xref: /openbmc/linux/mm/rmap.c (revision 1b69c6d0ae90b7f1a4f61d5c8209d5cb7a55f849)
1  /*
2   * mm/rmap.c - physical to virtual reverse mappings
3   *
4   * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5   * Released under the General Public License (GPL).
6   *
7   * Simple, low overhead reverse mapping scheme.
8   * Please try to keep this thing as modular as possible.
9   *
10   * Provides methods for unmapping each kind of mapped page:
11   * the anon methods track anonymous pages, and
12   * the file methods track pages belonging to an inode.
13   *
14   * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15   * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16   * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17   * Contributions by Hugh Dickins 2003, 2004
18   */
19  
20  /*
21   * Lock ordering in mm:
22   *
23   * inode->i_mutex	(while writing or truncating, not reading or faulting)
24   *   mm->mmap_sem
25   *     page->flags PG_locked (lock_page)
26   *       mapping->i_mmap_rwsem
27   *         anon_vma->rwsem
28   *           mm->page_table_lock or pte_lock
29   *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
30   *             swap_lock (in swap_duplicate, swap_info_get)
31   *               mmlist_lock (in mmput, drain_mmlist and others)
32   *               mapping->private_lock (in __set_page_dirty_buffers)
33   *                 mem_cgroup_{begin,end}_page_stat (memcg->move_lock)
34   *                   mapping->tree_lock (widely used)
35   *               inode->i_lock (in set_page_dirty's __mark_inode_dirty)
36   *               bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
37   *                 sb_lock (within inode_lock in fs/fs-writeback.c)
38   *                 mapping->tree_lock (widely used, in set_page_dirty,
39   *                           in arch-dependent flush_dcache_mmap_lock,
40   *                           within bdi.wb->list_lock in __sync_single_inode)
41   *
42   * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
43   *   ->tasklist_lock
44   *     pte map lock
45   */
46  
47  #include <linux/mm.h>
48  #include <linux/pagemap.h>
49  #include <linux/swap.h>
50  #include <linux/swapops.h>
51  #include <linux/slab.h>
52  #include <linux/init.h>
53  #include <linux/ksm.h>
54  #include <linux/rmap.h>
55  #include <linux/rcupdate.h>
56  #include <linux/export.h>
57  #include <linux/memcontrol.h>
58  #include <linux/mmu_notifier.h>
59  #include <linux/migrate.h>
60  #include <linux/hugetlb.h>
61  #include <linux/backing-dev.h>
62  #include <linux/page_idle.h>
63  
64  #include <asm/tlbflush.h>
65  
66  #include <trace/events/tlb.h>
67  
68  #include "internal.h"
69  
70  static struct kmem_cache *anon_vma_cachep;
71  static struct kmem_cache *anon_vma_chain_cachep;
72  
73  static inline struct anon_vma *anon_vma_alloc(void)
74  {
75  	struct anon_vma *anon_vma;
76  
77  	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
78  	if (anon_vma) {
79  		atomic_set(&anon_vma->refcount, 1);
80  		anon_vma->degree = 1;	/* Reference for first vma */
81  		anon_vma->parent = anon_vma;
82  		/*
83  		 * Initialise the anon_vma root to point to itself. If called
84  		 * from fork, the root will be reset to the parents anon_vma.
85  		 */
86  		anon_vma->root = anon_vma;
87  	}
88  
89  	return anon_vma;
90  }
91  
92  static inline void anon_vma_free(struct anon_vma *anon_vma)
93  {
94  	VM_BUG_ON(atomic_read(&anon_vma->refcount));
95  
96  	/*
97  	 * Synchronize against page_lock_anon_vma_read() such that
98  	 * we can safely hold the lock without the anon_vma getting
99  	 * freed.
100  	 *
101  	 * Relies on the full mb implied by the atomic_dec_and_test() from
102  	 * put_anon_vma() against the acquire barrier implied by
103  	 * down_read_trylock() from page_lock_anon_vma_read(). This orders:
104  	 *
105  	 * page_lock_anon_vma_read()	VS	put_anon_vma()
106  	 *   down_read_trylock()		  atomic_dec_and_test()
107  	 *   LOCK				  MB
108  	 *   atomic_read()			  rwsem_is_locked()
109  	 *
110  	 * LOCK should suffice since the actual taking of the lock must
111  	 * happen _before_ what follows.
112  	 */
113  	might_sleep();
114  	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
115  		anon_vma_lock_write(anon_vma);
116  		anon_vma_unlock_write(anon_vma);
117  	}
118  
119  	kmem_cache_free(anon_vma_cachep, anon_vma);
120  }
121  
122  static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
123  {
124  	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
125  }
126  
127  static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
128  {
129  	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
130  }
131  
132  static void anon_vma_chain_link(struct vm_area_struct *vma,
133  				struct anon_vma_chain *avc,
134  				struct anon_vma *anon_vma)
135  {
136  	avc->vma = vma;
137  	avc->anon_vma = anon_vma;
138  	list_add(&avc->same_vma, &vma->anon_vma_chain);
139  	anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
140  }
141  
142  /**
143   * anon_vma_prepare - attach an anon_vma to a memory region
144   * @vma: the memory region in question
145   *
146   * This makes sure the memory mapping described by 'vma' has
147   * an 'anon_vma' attached to it, so that we can associate the
148   * anonymous pages mapped into it with that anon_vma.
149   *
150   * The common case will be that we already have one, but if
151   * not we either need to find an adjacent mapping that we
152   * can re-use the anon_vma from (very common when the only
153   * reason for splitting a vma has been mprotect()), or we
154   * allocate a new one.
155   *
156   * Anon-vma allocations are very subtle, because we may have
157   * optimistically looked up an anon_vma in page_lock_anon_vma_read()
158   * and that may actually touch the spinlock even in the newly
159   * allocated vma (it depends on RCU to make sure that the
160   * anon_vma isn't actually destroyed).
161   *
162   * As a result, we need to do proper anon_vma locking even
163   * for the new allocation. At the same time, we do not want
164   * to do any locking for the common case of already having
165   * an anon_vma.
166   *
167   * This must be called with the mmap_sem held for reading.
168   */
169  int anon_vma_prepare(struct vm_area_struct *vma)
170  {
171  	struct anon_vma *anon_vma = vma->anon_vma;
172  	struct anon_vma_chain *avc;
173  
174  	might_sleep();
175  	if (unlikely(!anon_vma)) {
176  		struct mm_struct *mm = vma->vm_mm;
177  		struct anon_vma *allocated;
178  
179  		avc = anon_vma_chain_alloc(GFP_KERNEL);
180  		if (!avc)
181  			goto out_enomem;
182  
183  		anon_vma = find_mergeable_anon_vma(vma);
184  		allocated = NULL;
185  		if (!anon_vma) {
186  			anon_vma = anon_vma_alloc();
187  			if (unlikely(!anon_vma))
188  				goto out_enomem_free_avc;
189  			allocated = anon_vma;
190  		}
191  
192  		anon_vma_lock_write(anon_vma);
193  		/* page_table_lock to protect against threads */
194  		spin_lock(&mm->page_table_lock);
195  		if (likely(!vma->anon_vma)) {
196  			vma->anon_vma = anon_vma;
197  			anon_vma_chain_link(vma, avc, anon_vma);
198  			/* vma reference or self-parent link for new root */
199  			anon_vma->degree++;
200  			allocated = NULL;
201  			avc = NULL;
202  		}
203  		spin_unlock(&mm->page_table_lock);
204  		anon_vma_unlock_write(anon_vma);
205  
206  		if (unlikely(allocated))
207  			put_anon_vma(allocated);
208  		if (unlikely(avc))
209  			anon_vma_chain_free(avc);
210  	}
211  	return 0;
212  
213   out_enomem_free_avc:
214  	anon_vma_chain_free(avc);
215   out_enomem:
216  	return -ENOMEM;
217  }
218  
219  /*
220   * This is a useful helper function for locking the anon_vma root as
221   * we traverse the vma->anon_vma_chain, looping over anon_vma's that
222   * have the same vma.
223   *
224   * Such anon_vma's should have the same root, so you'd expect to see
225   * just a single mutex_lock for the whole traversal.
226   */
227  static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
228  {
229  	struct anon_vma *new_root = anon_vma->root;
230  	if (new_root != root) {
231  		if (WARN_ON_ONCE(root))
232  			up_write(&root->rwsem);
233  		root = new_root;
234  		down_write(&root->rwsem);
235  	}
236  	return root;
237  }
238  
239  static inline void unlock_anon_vma_root(struct anon_vma *root)
240  {
241  	if (root)
242  		up_write(&root->rwsem);
243  }
244  
245  /*
246   * Attach the anon_vmas from src to dst.
247   * Returns 0 on success, -ENOMEM on failure.
248   *
249   * If dst->anon_vma is NULL this function tries to find and reuse existing
250   * anon_vma which has no vmas and only one child anon_vma. This prevents
251   * degradation of anon_vma hierarchy to endless linear chain in case of
252   * constantly forking task. On the other hand, an anon_vma with more than one
253   * child isn't reused even if there was no alive vma, thus rmap walker has a
254   * good chance of avoiding scanning the whole hierarchy when it searches where
255   * page is mapped.
256   */
257  int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
258  {
259  	struct anon_vma_chain *avc, *pavc;
260  	struct anon_vma *root = NULL;
261  
262  	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
263  		struct anon_vma *anon_vma;
264  
265  		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
266  		if (unlikely(!avc)) {
267  			unlock_anon_vma_root(root);
268  			root = NULL;
269  			avc = anon_vma_chain_alloc(GFP_KERNEL);
270  			if (!avc)
271  				goto enomem_failure;
272  		}
273  		anon_vma = pavc->anon_vma;
274  		root = lock_anon_vma_root(root, anon_vma);
275  		anon_vma_chain_link(dst, avc, anon_vma);
276  
277  		/*
278  		 * Reuse existing anon_vma if its degree lower than two,
279  		 * that means it has no vma and only one anon_vma child.
280  		 *
281  		 * Do not chose parent anon_vma, otherwise first child
282  		 * will always reuse it. Root anon_vma is never reused:
283  		 * it has self-parent reference and at least one child.
284  		 */
285  		if (!dst->anon_vma && anon_vma != src->anon_vma &&
286  				anon_vma->degree < 2)
287  			dst->anon_vma = anon_vma;
288  	}
289  	if (dst->anon_vma)
290  		dst->anon_vma->degree++;
291  	unlock_anon_vma_root(root);
292  	return 0;
293  
294   enomem_failure:
295  	/*
296  	 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
297  	 * decremented in unlink_anon_vmas().
298  	 * We can safely do this because callers of anon_vma_clone() don't care
299  	 * about dst->anon_vma if anon_vma_clone() failed.
300  	 */
301  	dst->anon_vma = NULL;
302  	unlink_anon_vmas(dst);
303  	return -ENOMEM;
304  }
305  
306  /*
307   * Attach vma to its own anon_vma, as well as to the anon_vmas that
308   * the corresponding VMA in the parent process is attached to.
309   * Returns 0 on success, non-zero on failure.
310   */
311  int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
312  {
313  	struct anon_vma_chain *avc;
314  	struct anon_vma *anon_vma;
315  	int error;
316  
317  	/* Don't bother if the parent process has no anon_vma here. */
318  	if (!pvma->anon_vma)
319  		return 0;
320  
321  	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
322  	vma->anon_vma = NULL;
323  
324  	/*
325  	 * First, attach the new VMA to the parent VMA's anon_vmas,
326  	 * so rmap can find non-COWed pages in child processes.
327  	 */
328  	error = anon_vma_clone(vma, pvma);
329  	if (error)
330  		return error;
331  
332  	/* An existing anon_vma has been reused, all done then. */
333  	if (vma->anon_vma)
334  		return 0;
335  
336  	/* Then add our own anon_vma. */
337  	anon_vma = anon_vma_alloc();
338  	if (!anon_vma)
339  		goto out_error;
340  	avc = anon_vma_chain_alloc(GFP_KERNEL);
341  	if (!avc)
342  		goto out_error_free_anon_vma;
343  
344  	/*
345  	 * The root anon_vma's spinlock is the lock actually used when we
346  	 * lock any of the anon_vmas in this anon_vma tree.
347  	 */
348  	anon_vma->root = pvma->anon_vma->root;
349  	anon_vma->parent = pvma->anon_vma;
350  	/*
351  	 * With refcounts, an anon_vma can stay around longer than the
352  	 * process it belongs to. The root anon_vma needs to be pinned until
353  	 * this anon_vma is freed, because the lock lives in the root.
354  	 */
355  	get_anon_vma(anon_vma->root);
356  	/* Mark this anon_vma as the one where our new (COWed) pages go. */
357  	vma->anon_vma = anon_vma;
358  	anon_vma_lock_write(anon_vma);
359  	anon_vma_chain_link(vma, avc, anon_vma);
360  	anon_vma->parent->degree++;
361  	anon_vma_unlock_write(anon_vma);
362  
363  	return 0;
364  
365   out_error_free_anon_vma:
366  	put_anon_vma(anon_vma);
367   out_error:
368  	unlink_anon_vmas(vma);
369  	return -ENOMEM;
370  }
371  
372  void unlink_anon_vmas(struct vm_area_struct *vma)
373  {
374  	struct anon_vma_chain *avc, *next;
375  	struct anon_vma *root = NULL;
376  
377  	/*
378  	 * Unlink each anon_vma chained to the VMA.  This list is ordered
379  	 * from newest to oldest, ensuring the root anon_vma gets freed last.
380  	 */
381  	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
382  		struct anon_vma *anon_vma = avc->anon_vma;
383  
384  		root = lock_anon_vma_root(root, anon_vma);
385  		anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
386  
387  		/*
388  		 * Leave empty anon_vmas on the list - we'll need
389  		 * to free them outside the lock.
390  		 */
391  		if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
392  			anon_vma->parent->degree--;
393  			continue;
394  		}
395  
396  		list_del(&avc->same_vma);
397  		anon_vma_chain_free(avc);
398  	}
399  	if (vma->anon_vma)
400  		vma->anon_vma->degree--;
401  	unlock_anon_vma_root(root);
402  
403  	/*
404  	 * Iterate the list once more, it now only contains empty and unlinked
405  	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
406  	 * needing to write-acquire the anon_vma->root->rwsem.
407  	 */
408  	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
409  		struct anon_vma *anon_vma = avc->anon_vma;
410  
411  		BUG_ON(anon_vma->degree);
412  		put_anon_vma(anon_vma);
413  
414  		list_del(&avc->same_vma);
415  		anon_vma_chain_free(avc);
416  	}
417  }
418  
419  static void anon_vma_ctor(void *data)
420  {
421  	struct anon_vma *anon_vma = data;
422  
423  	init_rwsem(&anon_vma->rwsem);
424  	atomic_set(&anon_vma->refcount, 0);
425  	anon_vma->rb_root = RB_ROOT;
426  }
427  
428  void __init anon_vma_init(void)
429  {
430  	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
431  			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
432  	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
433  }
434  
435  /*
436   * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
437   *
438   * Since there is no serialization what so ever against page_remove_rmap()
439   * the best this function can do is return a locked anon_vma that might
440   * have been relevant to this page.
441   *
442   * The page might have been remapped to a different anon_vma or the anon_vma
443   * returned may already be freed (and even reused).
444   *
445   * In case it was remapped to a different anon_vma, the new anon_vma will be a
446   * child of the old anon_vma, and the anon_vma lifetime rules will therefore
447   * ensure that any anon_vma obtained from the page will still be valid for as
448   * long as we observe page_mapped() [ hence all those page_mapped() tests ].
449   *
450   * All users of this function must be very careful when walking the anon_vma
451   * chain and verify that the page in question is indeed mapped in it
452   * [ something equivalent to page_mapped_in_vma() ].
453   *
454   * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
455   * that the anon_vma pointer from page->mapping is valid if there is a
456   * mapcount, we can dereference the anon_vma after observing those.
457   */
458  struct anon_vma *page_get_anon_vma(struct page *page)
459  {
460  	struct anon_vma *anon_vma = NULL;
461  	unsigned long anon_mapping;
462  
463  	rcu_read_lock();
464  	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
465  	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
466  		goto out;
467  	if (!page_mapped(page))
468  		goto out;
469  
470  	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
471  	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
472  		anon_vma = NULL;
473  		goto out;
474  	}
475  
476  	/*
477  	 * If this page is still mapped, then its anon_vma cannot have been
478  	 * freed.  But if it has been unmapped, we have no security against the
479  	 * anon_vma structure being freed and reused (for another anon_vma:
480  	 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
481  	 * above cannot corrupt).
482  	 */
483  	if (!page_mapped(page)) {
484  		rcu_read_unlock();
485  		put_anon_vma(anon_vma);
486  		return NULL;
487  	}
488  out:
489  	rcu_read_unlock();
490  
491  	return anon_vma;
492  }
493  
494  /*
495   * Similar to page_get_anon_vma() except it locks the anon_vma.
496   *
497   * Its a little more complex as it tries to keep the fast path to a single
498   * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
499   * reference like with page_get_anon_vma() and then block on the mutex.
500   */
501  struct anon_vma *page_lock_anon_vma_read(struct page *page)
502  {
503  	struct anon_vma *anon_vma = NULL;
504  	struct anon_vma *root_anon_vma;
505  	unsigned long anon_mapping;
506  
507  	rcu_read_lock();
508  	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
509  	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
510  		goto out;
511  	if (!page_mapped(page))
512  		goto out;
513  
514  	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
515  	root_anon_vma = READ_ONCE(anon_vma->root);
516  	if (down_read_trylock(&root_anon_vma->rwsem)) {
517  		/*
518  		 * If the page is still mapped, then this anon_vma is still
519  		 * its anon_vma, and holding the mutex ensures that it will
520  		 * not go away, see anon_vma_free().
521  		 */
522  		if (!page_mapped(page)) {
523  			up_read(&root_anon_vma->rwsem);
524  			anon_vma = NULL;
525  		}
526  		goto out;
527  	}
528  
529  	/* trylock failed, we got to sleep */
530  	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
531  		anon_vma = NULL;
532  		goto out;
533  	}
534  
535  	if (!page_mapped(page)) {
536  		rcu_read_unlock();
537  		put_anon_vma(anon_vma);
538  		return NULL;
539  	}
540  
541  	/* we pinned the anon_vma, its safe to sleep */
542  	rcu_read_unlock();
543  	anon_vma_lock_read(anon_vma);
544  
545  	if (atomic_dec_and_test(&anon_vma->refcount)) {
546  		/*
547  		 * Oops, we held the last refcount, release the lock
548  		 * and bail -- can't simply use put_anon_vma() because
549  		 * we'll deadlock on the anon_vma_lock_write() recursion.
550  		 */
551  		anon_vma_unlock_read(anon_vma);
552  		__put_anon_vma(anon_vma);
553  		anon_vma = NULL;
554  	}
555  
556  	return anon_vma;
557  
558  out:
559  	rcu_read_unlock();
560  	return anon_vma;
561  }
562  
563  void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
564  {
565  	anon_vma_unlock_read(anon_vma);
566  }
567  
568  /*
569   * At what user virtual address is page expected in @vma?
570   */
571  static inline unsigned long
572  __vma_address(struct page *page, struct vm_area_struct *vma)
573  {
574  	pgoff_t pgoff = page_to_pgoff(page);
575  	return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
576  }
577  
578  inline unsigned long
579  vma_address(struct page *page, struct vm_area_struct *vma)
580  {
581  	unsigned long address = __vma_address(page, vma);
582  
583  	/* page should be within @vma mapping range */
584  	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
585  
586  	return address;
587  }
588  
589  #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
590  static void percpu_flush_tlb_batch_pages(void *data)
591  {
592  	/*
593  	 * All TLB entries are flushed on the assumption that it is
594  	 * cheaper to flush all TLBs and let them be refilled than
595  	 * flushing individual PFNs. Note that we do not track mm's
596  	 * to flush as that might simply be multiple full TLB flushes
597  	 * for no gain.
598  	 */
599  	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
600  	flush_tlb_local();
601  }
602  
603  /*
604   * Flush TLB entries for recently unmapped pages from remote CPUs. It is
605   * important if a PTE was dirty when it was unmapped that it's flushed
606   * before any IO is initiated on the page to prevent lost writes. Similarly,
607   * it must be flushed before freeing to prevent data leakage.
608   */
609  void try_to_unmap_flush(void)
610  {
611  	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
612  	int cpu;
613  
614  	if (!tlb_ubc->flush_required)
615  		return;
616  
617  	cpu = get_cpu();
618  
619  	trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
620  
621  	if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
622  		percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
623  
624  	if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
625  		smp_call_function_many(&tlb_ubc->cpumask,
626  			percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
627  	}
628  	cpumask_clear(&tlb_ubc->cpumask);
629  	tlb_ubc->flush_required = false;
630  	tlb_ubc->writable = false;
631  	put_cpu();
632  }
633  
634  /* Flush iff there are potentially writable TLB entries that can race with IO */
635  void try_to_unmap_flush_dirty(void)
636  {
637  	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
638  
639  	if (tlb_ubc->writable)
640  		try_to_unmap_flush();
641  }
642  
643  static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
644  		struct page *page, bool writable)
645  {
646  	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
647  
648  	cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
649  	tlb_ubc->flush_required = true;
650  
651  	/*
652  	 * If the PTE was dirty then it's best to assume it's writable. The
653  	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
654  	 * before the page is queued for IO.
655  	 */
656  	if (writable)
657  		tlb_ubc->writable = true;
658  }
659  
660  /*
661   * Returns true if the TLB flush should be deferred to the end of a batch of
662   * unmap operations to reduce IPIs.
663   */
664  static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
665  {
666  	bool should_defer = false;
667  
668  	if (!(flags & TTU_BATCH_FLUSH))
669  		return false;
670  
671  	/* If remote CPUs need to be flushed then defer batch the flush */
672  	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
673  		should_defer = true;
674  	put_cpu();
675  
676  	return should_defer;
677  }
678  #else
679  static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
680  		struct page *page, bool writable)
681  {
682  }
683  
684  static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
685  {
686  	return false;
687  }
688  #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
689  
690  /*
691   * At what user virtual address is page expected in vma?
692   * Caller should check the page is actually part of the vma.
693   */
694  unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
695  {
696  	unsigned long address;
697  	if (PageAnon(page)) {
698  		struct anon_vma *page__anon_vma = page_anon_vma(page);
699  		/*
700  		 * Note: swapoff's unuse_vma() is more efficient with this
701  		 * check, and needs it to match anon_vma when KSM is active.
702  		 */
703  		if (!vma->anon_vma || !page__anon_vma ||
704  		    vma->anon_vma->root != page__anon_vma->root)
705  			return -EFAULT;
706  	} else if (page->mapping) {
707  		if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
708  			return -EFAULT;
709  	} else
710  		return -EFAULT;
711  	address = __vma_address(page, vma);
712  	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
713  		return -EFAULT;
714  	return address;
715  }
716  
717  pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
718  {
719  	pgd_t *pgd;
720  	pud_t *pud;
721  	pmd_t *pmd = NULL;
722  	pmd_t pmde;
723  
724  	pgd = pgd_offset(mm, address);
725  	if (!pgd_present(*pgd))
726  		goto out;
727  
728  	pud = pud_offset(pgd, address);
729  	if (!pud_present(*pud))
730  		goto out;
731  
732  	pmd = pmd_offset(pud, address);
733  	/*
734  	 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
735  	 * without holding anon_vma lock for write.  So when looking for a
736  	 * genuine pmde (in which to find pte), test present and !THP together.
737  	 */
738  	pmde = *pmd;
739  	barrier();
740  	if (!pmd_present(pmde) || pmd_trans_huge(pmde))
741  		pmd = NULL;
742  out:
743  	return pmd;
744  }
745  
746  /*
747   * Check that @page is mapped at @address into @mm.
748   *
749   * If @sync is false, page_check_address may perform a racy check to avoid
750   * the page table lock when the pte is not present (helpful when reclaiming
751   * highly shared pages).
752   *
753   * On success returns with pte mapped and locked.
754   */
755  pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
756  			  unsigned long address, spinlock_t **ptlp, int sync)
757  {
758  	pmd_t *pmd;
759  	pte_t *pte;
760  	spinlock_t *ptl;
761  
762  	if (unlikely(PageHuge(page))) {
763  		/* when pud is not present, pte will be NULL */
764  		pte = huge_pte_offset(mm, address);
765  		if (!pte)
766  			return NULL;
767  
768  		ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
769  		goto check;
770  	}
771  
772  	pmd = mm_find_pmd(mm, address);
773  	if (!pmd)
774  		return NULL;
775  
776  	pte = pte_offset_map(pmd, address);
777  	/* Make a quick check before getting the lock */
778  	if (!sync && !pte_present(*pte)) {
779  		pte_unmap(pte);
780  		return NULL;
781  	}
782  
783  	ptl = pte_lockptr(mm, pmd);
784  check:
785  	spin_lock(ptl);
786  	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
787  		*ptlp = ptl;
788  		return pte;
789  	}
790  	pte_unmap_unlock(pte, ptl);
791  	return NULL;
792  }
793  
794  /**
795   * page_mapped_in_vma - check whether a page is really mapped in a VMA
796   * @page: the page to test
797   * @vma: the VMA to test
798   *
799   * Returns 1 if the page is mapped into the page tables of the VMA, 0
800   * if the page is not mapped into the page tables of this VMA.  Only
801   * valid for normal file or anonymous VMAs.
802   */
803  int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
804  {
805  	unsigned long address;
806  	pte_t *pte;
807  	spinlock_t *ptl;
808  
809  	address = __vma_address(page, vma);
810  	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
811  		return 0;
812  	pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
813  	if (!pte)			/* the page is not in this mm */
814  		return 0;
815  	pte_unmap_unlock(pte, ptl);
816  
817  	return 1;
818  }
819  
820  struct page_referenced_arg {
821  	int mapcount;
822  	int referenced;
823  	unsigned long vm_flags;
824  	struct mem_cgroup *memcg;
825  };
826  /*
827   * arg: page_referenced_arg will be passed
828   */
829  static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
830  			unsigned long address, void *arg)
831  {
832  	struct mm_struct *mm = vma->vm_mm;
833  	spinlock_t *ptl;
834  	int referenced = 0;
835  	struct page_referenced_arg *pra = arg;
836  
837  	if (unlikely(PageTransHuge(page))) {
838  		pmd_t *pmd;
839  
840  		/*
841  		 * rmap might return false positives; we must filter
842  		 * these out using page_check_address_pmd().
843  		 */
844  		pmd = page_check_address_pmd(page, mm, address,
845  					     PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
846  		if (!pmd)
847  			return SWAP_AGAIN;
848  
849  		if (vma->vm_flags & VM_LOCKED) {
850  			spin_unlock(ptl);
851  			pra->vm_flags |= VM_LOCKED;
852  			return SWAP_FAIL; /* To break the loop */
853  		}
854  
855  		/* go ahead even if the pmd is pmd_trans_splitting() */
856  		if (pmdp_clear_flush_young_notify(vma, address, pmd))
857  			referenced++;
858  		spin_unlock(ptl);
859  	} else {
860  		pte_t *pte;
861  
862  		/*
863  		 * rmap might return false positives; we must filter
864  		 * these out using page_check_address().
865  		 */
866  		pte = page_check_address(page, mm, address, &ptl, 0);
867  		if (!pte)
868  			return SWAP_AGAIN;
869  
870  		if (vma->vm_flags & VM_LOCKED) {
871  			pte_unmap_unlock(pte, ptl);
872  			pra->vm_flags |= VM_LOCKED;
873  			return SWAP_FAIL; /* To break the loop */
874  		}
875  
876  		if (ptep_clear_flush_young_notify(vma, address, pte)) {
877  			/*
878  			 * Don't treat a reference through a sequentially read
879  			 * mapping as such.  If the page has been used in
880  			 * another mapping, we will catch it; if this other
881  			 * mapping is already gone, the unmap path will have
882  			 * set PG_referenced or activated the page.
883  			 */
884  			if (likely(!(vma->vm_flags & VM_SEQ_READ)))
885  				referenced++;
886  		}
887  		pte_unmap_unlock(pte, ptl);
888  	}
889  
890  	if (referenced)
891  		clear_page_idle(page);
892  	if (test_and_clear_page_young(page))
893  		referenced++;
894  
895  	if (referenced) {
896  		pra->referenced++;
897  		pra->vm_flags |= vma->vm_flags;
898  	}
899  
900  	pra->mapcount--;
901  	if (!pra->mapcount)
902  		return SWAP_SUCCESS; /* To break the loop */
903  
904  	return SWAP_AGAIN;
905  }
906  
907  static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
908  {
909  	struct page_referenced_arg *pra = arg;
910  	struct mem_cgroup *memcg = pra->memcg;
911  
912  	if (!mm_match_cgroup(vma->vm_mm, memcg))
913  		return true;
914  
915  	return false;
916  }
917  
918  /**
919   * page_referenced - test if the page was referenced
920   * @page: the page to test
921   * @is_locked: caller holds lock on the page
922   * @memcg: target memory cgroup
923   * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
924   *
925   * Quick test_and_clear_referenced for all mappings to a page,
926   * returns the number of ptes which referenced the page.
927   */
928  int page_referenced(struct page *page,
929  		    int is_locked,
930  		    struct mem_cgroup *memcg,
931  		    unsigned long *vm_flags)
932  {
933  	int ret;
934  	int we_locked = 0;
935  	struct page_referenced_arg pra = {
936  		.mapcount = page_mapcount(page),
937  		.memcg = memcg,
938  	};
939  	struct rmap_walk_control rwc = {
940  		.rmap_one = page_referenced_one,
941  		.arg = (void *)&pra,
942  		.anon_lock = page_lock_anon_vma_read,
943  	};
944  
945  	*vm_flags = 0;
946  	if (!page_mapped(page))
947  		return 0;
948  
949  	if (!page_rmapping(page))
950  		return 0;
951  
952  	if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
953  		we_locked = trylock_page(page);
954  		if (!we_locked)
955  			return 1;
956  	}
957  
958  	/*
959  	 * If we are reclaiming on behalf of a cgroup, skip
960  	 * counting on behalf of references from different
961  	 * cgroups
962  	 */
963  	if (memcg) {
964  		rwc.invalid_vma = invalid_page_referenced_vma;
965  	}
966  
967  	ret = rmap_walk(page, &rwc);
968  	*vm_flags = pra.vm_flags;
969  
970  	if (we_locked)
971  		unlock_page(page);
972  
973  	return pra.referenced;
974  }
975  
976  static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
977  			    unsigned long address, void *arg)
978  {
979  	struct mm_struct *mm = vma->vm_mm;
980  	pte_t *pte;
981  	spinlock_t *ptl;
982  	int ret = 0;
983  	int *cleaned = arg;
984  
985  	pte = page_check_address(page, mm, address, &ptl, 1);
986  	if (!pte)
987  		goto out;
988  
989  	if (pte_dirty(*pte) || pte_write(*pte)) {
990  		pte_t entry;
991  
992  		flush_cache_page(vma, address, pte_pfn(*pte));
993  		entry = ptep_clear_flush(vma, address, pte);
994  		entry = pte_wrprotect(entry);
995  		entry = pte_mkclean(entry);
996  		set_pte_at(mm, address, pte, entry);
997  		ret = 1;
998  	}
999  
1000  	pte_unmap_unlock(pte, ptl);
1001  
1002  	if (ret) {
1003  		mmu_notifier_invalidate_page(mm, address);
1004  		(*cleaned)++;
1005  	}
1006  out:
1007  	return SWAP_AGAIN;
1008  }
1009  
1010  static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1011  {
1012  	if (vma->vm_flags & VM_SHARED)
1013  		return false;
1014  
1015  	return true;
1016  }
1017  
1018  int page_mkclean(struct page *page)
1019  {
1020  	int cleaned = 0;
1021  	struct address_space *mapping;
1022  	struct rmap_walk_control rwc = {
1023  		.arg = (void *)&cleaned,
1024  		.rmap_one = page_mkclean_one,
1025  		.invalid_vma = invalid_mkclean_vma,
1026  	};
1027  
1028  	BUG_ON(!PageLocked(page));
1029  
1030  	if (!page_mapped(page))
1031  		return 0;
1032  
1033  	mapping = page_mapping(page);
1034  	if (!mapping)
1035  		return 0;
1036  
1037  	rmap_walk(page, &rwc);
1038  
1039  	return cleaned;
1040  }
1041  EXPORT_SYMBOL_GPL(page_mkclean);
1042  
1043  /**
1044   * page_move_anon_rmap - move a page to our anon_vma
1045   * @page:	the page to move to our anon_vma
1046   * @vma:	the vma the page belongs to
1047   * @address:	the user virtual address mapped
1048   *
1049   * When a page belongs exclusively to one process after a COW event,
1050   * that page can be moved into the anon_vma that belongs to just that
1051   * process, so the rmap code will not search the parent or sibling
1052   * processes.
1053   */
1054  void page_move_anon_rmap(struct page *page,
1055  	struct vm_area_struct *vma, unsigned long address)
1056  {
1057  	struct anon_vma *anon_vma = vma->anon_vma;
1058  
1059  	VM_BUG_ON_PAGE(!PageLocked(page), page);
1060  	VM_BUG_ON_VMA(!anon_vma, vma);
1061  	VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
1062  
1063  	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1064  	/*
1065  	 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1066  	 * simultaneously, so a concurrent reader (eg page_referenced()'s
1067  	 * PageAnon()) will not see one without the other.
1068  	 */
1069  	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1070  }
1071  
1072  /**
1073   * __page_set_anon_rmap - set up new anonymous rmap
1074   * @page:	Page to add to rmap
1075   * @vma:	VM area to add page to.
1076   * @address:	User virtual address of the mapping
1077   * @exclusive:	the page is exclusively owned by the current process
1078   */
1079  static void __page_set_anon_rmap(struct page *page,
1080  	struct vm_area_struct *vma, unsigned long address, int exclusive)
1081  {
1082  	struct anon_vma *anon_vma = vma->anon_vma;
1083  
1084  	BUG_ON(!anon_vma);
1085  
1086  	if (PageAnon(page))
1087  		return;
1088  
1089  	/*
1090  	 * If the page isn't exclusively mapped into this vma,
1091  	 * we must use the _oldest_ possible anon_vma for the
1092  	 * page mapping!
1093  	 */
1094  	if (!exclusive)
1095  		anon_vma = anon_vma->root;
1096  
1097  	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1098  	page->mapping = (struct address_space *) anon_vma;
1099  	page->index = linear_page_index(vma, address);
1100  }
1101  
1102  /**
1103   * __page_check_anon_rmap - sanity check anonymous rmap addition
1104   * @page:	the page to add the mapping to
1105   * @vma:	the vm area in which the mapping is added
1106   * @address:	the user virtual address mapped
1107   */
1108  static void __page_check_anon_rmap(struct page *page,
1109  	struct vm_area_struct *vma, unsigned long address)
1110  {
1111  #ifdef CONFIG_DEBUG_VM
1112  	/*
1113  	 * The page's anon-rmap details (mapping and index) are guaranteed to
1114  	 * be set up correctly at this point.
1115  	 *
1116  	 * We have exclusion against page_add_anon_rmap because the caller
1117  	 * always holds the page locked, except if called from page_dup_rmap,
1118  	 * in which case the page is already known to be setup.
1119  	 *
1120  	 * We have exclusion against page_add_new_anon_rmap because those pages
1121  	 * are initially only visible via the pagetables, and the pte is locked
1122  	 * over the call to page_add_new_anon_rmap.
1123  	 */
1124  	BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1125  	BUG_ON(page->index != linear_page_index(vma, address));
1126  #endif
1127  }
1128  
1129  /**
1130   * page_add_anon_rmap - add pte mapping to an anonymous page
1131   * @page:	the page to add the mapping to
1132   * @vma:	the vm area in which the mapping is added
1133   * @address:	the user virtual address mapped
1134   *
1135   * The caller needs to hold the pte lock, and the page must be locked in
1136   * the anon_vma case: to serialize mapping,index checking after setting,
1137   * and to ensure that PageAnon is not being upgraded racily to PageKsm
1138   * (but PageKsm is never downgraded to PageAnon).
1139   */
1140  void page_add_anon_rmap(struct page *page,
1141  	struct vm_area_struct *vma, unsigned long address)
1142  {
1143  	do_page_add_anon_rmap(page, vma, address, 0);
1144  }
1145  
1146  /*
1147   * Special version of the above for do_swap_page, which often runs
1148   * into pages that are exclusively owned by the current process.
1149   * Everybody else should continue to use page_add_anon_rmap above.
1150   */
1151  void do_page_add_anon_rmap(struct page *page,
1152  	struct vm_area_struct *vma, unsigned long address, int exclusive)
1153  {
1154  	int first = atomic_inc_and_test(&page->_mapcount);
1155  	if (first) {
1156  		/*
1157  		 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1158  		 * these counters are not modified in interrupt context, and
1159  		 * pte lock(a spinlock) is held, which implies preemption
1160  		 * disabled.
1161  		 */
1162  		if (PageTransHuge(page))
1163  			__inc_zone_page_state(page,
1164  					      NR_ANON_TRANSPARENT_HUGEPAGES);
1165  		__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1166  				hpage_nr_pages(page));
1167  	}
1168  	if (unlikely(PageKsm(page)))
1169  		return;
1170  
1171  	VM_BUG_ON_PAGE(!PageLocked(page), page);
1172  	/* address might be in next vma when migration races vma_adjust */
1173  	if (first)
1174  		__page_set_anon_rmap(page, vma, address, exclusive);
1175  	else
1176  		__page_check_anon_rmap(page, vma, address);
1177  }
1178  
1179  /**
1180   * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1181   * @page:	the page to add the mapping to
1182   * @vma:	the vm area in which the mapping is added
1183   * @address:	the user virtual address mapped
1184   *
1185   * Same as page_add_anon_rmap but must only be called on *new* pages.
1186   * This means the inc-and-test can be bypassed.
1187   * Page does not have to be locked.
1188   */
1189  void page_add_new_anon_rmap(struct page *page,
1190  	struct vm_area_struct *vma, unsigned long address)
1191  {
1192  	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1193  	SetPageSwapBacked(page);
1194  	atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
1195  	if (PageTransHuge(page))
1196  		__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1197  	__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1198  			hpage_nr_pages(page));
1199  	__page_set_anon_rmap(page, vma, address, 1);
1200  }
1201  
1202  /**
1203   * page_add_file_rmap - add pte mapping to a file page
1204   * @page: the page to add the mapping to
1205   *
1206   * The caller needs to hold the pte lock.
1207   */
1208  void page_add_file_rmap(struct page *page)
1209  {
1210  	struct mem_cgroup *memcg;
1211  
1212  	memcg = mem_cgroup_begin_page_stat(page);
1213  	if (atomic_inc_and_test(&page->_mapcount)) {
1214  		__inc_zone_page_state(page, NR_FILE_MAPPED);
1215  		mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1216  	}
1217  	mem_cgroup_end_page_stat(memcg);
1218  }
1219  
1220  static void page_remove_file_rmap(struct page *page)
1221  {
1222  	struct mem_cgroup *memcg;
1223  
1224  	memcg = mem_cgroup_begin_page_stat(page);
1225  
1226  	/* page still mapped by someone else? */
1227  	if (!atomic_add_negative(-1, &page->_mapcount))
1228  		goto out;
1229  
1230  	/* Hugepages are not counted in NR_FILE_MAPPED for now. */
1231  	if (unlikely(PageHuge(page)))
1232  		goto out;
1233  
1234  	/*
1235  	 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1236  	 * these counters are not modified in interrupt context, and
1237  	 * pte lock(a spinlock) is held, which implies preemption disabled.
1238  	 */
1239  	__dec_zone_page_state(page, NR_FILE_MAPPED);
1240  	mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1241  
1242  	if (unlikely(PageMlocked(page)))
1243  		clear_page_mlock(page);
1244  out:
1245  	mem_cgroup_end_page_stat(memcg);
1246  }
1247  
1248  /**
1249   * page_remove_rmap - take down pte mapping from a page
1250   * @page: page to remove mapping from
1251   *
1252   * The caller needs to hold the pte lock.
1253   */
1254  void page_remove_rmap(struct page *page)
1255  {
1256  	if (!PageAnon(page)) {
1257  		page_remove_file_rmap(page);
1258  		return;
1259  	}
1260  
1261  	/* page still mapped by someone else? */
1262  	if (!atomic_add_negative(-1, &page->_mapcount))
1263  		return;
1264  
1265  	/* Hugepages are not counted in NR_ANON_PAGES for now. */
1266  	if (unlikely(PageHuge(page)))
1267  		return;
1268  
1269  	/*
1270  	 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1271  	 * these counters are not modified in interrupt context, and
1272  	 * pte lock(a spinlock) is held, which implies preemption disabled.
1273  	 */
1274  	if (PageTransHuge(page))
1275  		__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1276  
1277  	__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1278  			      -hpage_nr_pages(page));
1279  
1280  	if (unlikely(PageMlocked(page)))
1281  		clear_page_mlock(page);
1282  
1283  	/*
1284  	 * It would be tidy to reset the PageAnon mapping here,
1285  	 * but that might overwrite a racing page_add_anon_rmap
1286  	 * which increments mapcount after us but sets mapping
1287  	 * before us: so leave the reset to free_hot_cold_page,
1288  	 * and remember that it's only reliable while mapped.
1289  	 * Leaving it set also helps swapoff to reinstate ptes
1290  	 * faster for those pages still in swapcache.
1291  	 */
1292  }
1293  
1294  /*
1295   * @arg: enum ttu_flags will be passed to this argument
1296   */
1297  static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1298  		     unsigned long address, void *arg)
1299  {
1300  	struct mm_struct *mm = vma->vm_mm;
1301  	pte_t *pte;
1302  	pte_t pteval;
1303  	spinlock_t *ptl;
1304  	int ret = SWAP_AGAIN;
1305  	enum ttu_flags flags = (enum ttu_flags)arg;
1306  
1307  	pte = page_check_address(page, mm, address, &ptl, 0);
1308  	if (!pte)
1309  		goto out;
1310  
1311  	/*
1312  	 * If the page is mlock()d, we cannot swap it out.
1313  	 * If it's recently referenced (perhaps page_referenced
1314  	 * skipped over this mm) then we should reactivate it.
1315  	 */
1316  	if (!(flags & TTU_IGNORE_MLOCK)) {
1317  		if (vma->vm_flags & VM_LOCKED)
1318  			goto out_mlock;
1319  
1320  		if (flags & TTU_MUNLOCK)
1321  			goto out_unmap;
1322  	}
1323  	if (!(flags & TTU_IGNORE_ACCESS)) {
1324  		if (ptep_clear_flush_young_notify(vma, address, pte)) {
1325  			ret = SWAP_FAIL;
1326  			goto out_unmap;
1327  		}
1328    	}
1329  
1330  	/* Nuke the page table entry. */
1331  	flush_cache_page(vma, address, page_to_pfn(page));
1332  	if (should_defer_flush(mm, flags)) {
1333  		/*
1334  		 * We clear the PTE but do not flush so potentially a remote
1335  		 * CPU could still be writing to the page. If the entry was
1336  		 * previously clean then the architecture must guarantee that
1337  		 * a clear->dirty transition on a cached TLB entry is written
1338  		 * through and traps if the PTE is unmapped.
1339  		 */
1340  		pteval = ptep_get_and_clear(mm, address, pte);
1341  
1342  		set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
1343  	} else {
1344  		pteval = ptep_clear_flush(vma, address, pte);
1345  	}
1346  
1347  	/* Move the dirty bit to the physical page now the pte is gone. */
1348  	if (pte_dirty(pteval))
1349  		set_page_dirty(page);
1350  
1351  	/* Update high watermark before we lower rss */
1352  	update_hiwater_rss(mm);
1353  
1354  	if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1355  		if (!PageHuge(page)) {
1356  			if (PageAnon(page))
1357  				dec_mm_counter(mm, MM_ANONPAGES);
1358  			else
1359  				dec_mm_counter(mm, MM_FILEPAGES);
1360  		}
1361  		set_pte_at(mm, address, pte,
1362  			   swp_entry_to_pte(make_hwpoison_entry(page)));
1363  	} else if (pte_unused(pteval)) {
1364  		/*
1365  		 * The guest indicated that the page content is of no
1366  		 * interest anymore. Simply discard the pte, vmscan
1367  		 * will take care of the rest.
1368  		 */
1369  		if (PageAnon(page))
1370  			dec_mm_counter(mm, MM_ANONPAGES);
1371  		else
1372  			dec_mm_counter(mm, MM_FILEPAGES);
1373  	} else if (PageAnon(page)) {
1374  		swp_entry_t entry = { .val = page_private(page) };
1375  		pte_t swp_pte;
1376  
1377  		if (PageSwapCache(page)) {
1378  			/*
1379  			 * Store the swap location in the pte.
1380  			 * See handle_pte_fault() ...
1381  			 */
1382  			if (swap_duplicate(entry) < 0) {
1383  				set_pte_at(mm, address, pte, pteval);
1384  				ret = SWAP_FAIL;
1385  				goto out_unmap;
1386  			}
1387  			if (list_empty(&mm->mmlist)) {
1388  				spin_lock(&mmlist_lock);
1389  				if (list_empty(&mm->mmlist))
1390  					list_add(&mm->mmlist, &init_mm.mmlist);
1391  				spin_unlock(&mmlist_lock);
1392  			}
1393  			dec_mm_counter(mm, MM_ANONPAGES);
1394  			inc_mm_counter(mm, MM_SWAPENTS);
1395  		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
1396  			/*
1397  			 * Store the pfn of the page in a special migration
1398  			 * pte. do_swap_page() will wait until the migration
1399  			 * pte is removed and then restart fault handling.
1400  			 */
1401  			BUG_ON(!(flags & TTU_MIGRATION));
1402  			entry = make_migration_entry(page, pte_write(pteval));
1403  		}
1404  		swp_pte = swp_entry_to_pte(entry);
1405  		if (pte_soft_dirty(pteval))
1406  			swp_pte = pte_swp_mksoft_dirty(swp_pte);
1407  		set_pte_at(mm, address, pte, swp_pte);
1408  	} else if (IS_ENABLED(CONFIG_MIGRATION) &&
1409  		   (flags & TTU_MIGRATION)) {
1410  		/* Establish migration entry for a file page */
1411  		swp_entry_t entry;
1412  		entry = make_migration_entry(page, pte_write(pteval));
1413  		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1414  	} else
1415  		dec_mm_counter(mm, MM_FILEPAGES);
1416  
1417  	page_remove_rmap(page);
1418  	page_cache_release(page);
1419  
1420  out_unmap:
1421  	pte_unmap_unlock(pte, ptl);
1422  	if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK))
1423  		mmu_notifier_invalidate_page(mm, address);
1424  out:
1425  	return ret;
1426  
1427  out_mlock:
1428  	pte_unmap_unlock(pte, ptl);
1429  
1430  
1431  	/*
1432  	 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1433  	 * unstable result and race. Plus, We can't wait here because
1434  	 * we now hold anon_vma->rwsem or mapping->i_mmap_rwsem.
1435  	 * if trylock failed, the page remain in evictable lru and later
1436  	 * vmscan could retry to move the page to unevictable lru if the
1437  	 * page is actually mlocked.
1438  	 */
1439  	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1440  		if (vma->vm_flags & VM_LOCKED) {
1441  			mlock_vma_page(page);
1442  			ret = SWAP_MLOCK;
1443  		}
1444  		up_read(&vma->vm_mm->mmap_sem);
1445  	}
1446  	return ret;
1447  }
1448  
1449  bool is_vma_temporary_stack(struct vm_area_struct *vma)
1450  {
1451  	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1452  
1453  	if (!maybe_stack)
1454  		return false;
1455  
1456  	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1457  						VM_STACK_INCOMPLETE_SETUP)
1458  		return true;
1459  
1460  	return false;
1461  }
1462  
1463  static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1464  {
1465  	return is_vma_temporary_stack(vma);
1466  }
1467  
1468  static int page_not_mapped(struct page *page)
1469  {
1470  	return !page_mapped(page);
1471  };
1472  
1473  /**
1474   * try_to_unmap - try to remove all page table mappings to a page
1475   * @page: the page to get unmapped
1476   * @flags: action and flags
1477   *
1478   * Tries to remove all the page table entries which are mapping this
1479   * page, used in the pageout path.  Caller must hold the page lock.
1480   * Return values are:
1481   *
1482   * SWAP_SUCCESS	- we succeeded in removing all mappings
1483   * SWAP_AGAIN	- we missed a mapping, try again later
1484   * SWAP_FAIL	- the page is unswappable
1485   * SWAP_MLOCK	- page is mlocked.
1486   */
1487  int try_to_unmap(struct page *page, enum ttu_flags flags)
1488  {
1489  	int ret;
1490  	struct rmap_walk_control rwc = {
1491  		.rmap_one = try_to_unmap_one,
1492  		.arg = (void *)flags,
1493  		.done = page_not_mapped,
1494  		.anon_lock = page_lock_anon_vma_read,
1495  	};
1496  
1497  	VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
1498  
1499  	/*
1500  	 * During exec, a temporary VMA is setup and later moved.
1501  	 * The VMA is moved under the anon_vma lock but not the
1502  	 * page tables leading to a race where migration cannot
1503  	 * find the migration ptes. Rather than increasing the
1504  	 * locking requirements of exec(), migration skips
1505  	 * temporary VMAs until after exec() completes.
1506  	 */
1507  	if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
1508  		rwc.invalid_vma = invalid_migration_vma;
1509  
1510  	ret = rmap_walk(page, &rwc);
1511  
1512  	if (ret != SWAP_MLOCK && !page_mapped(page))
1513  		ret = SWAP_SUCCESS;
1514  	return ret;
1515  }
1516  
1517  /**
1518   * try_to_munlock - try to munlock a page
1519   * @page: the page to be munlocked
1520   *
1521   * Called from munlock code.  Checks all of the VMAs mapping the page
1522   * to make sure nobody else has this page mlocked. The page will be
1523   * returned with PG_mlocked cleared if no other vmas have it mlocked.
1524   *
1525   * Return values are:
1526   *
1527   * SWAP_AGAIN	- no vma is holding page mlocked, or,
1528   * SWAP_AGAIN	- page mapped in mlocked vma -- couldn't acquire mmap sem
1529   * SWAP_FAIL	- page cannot be located at present
1530   * SWAP_MLOCK	- page is now mlocked.
1531   */
1532  int try_to_munlock(struct page *page)
1533  {
1534  	int ret;
1535  	struct rmap_walk_control rwc = {
1536  		.rmap_one = try_to_unmap_one,
1537  		.arg = (void *)TTU_MUNLOCK,
1538  		.done = page_not_mapped,
1539  		.anon_lock = page_lock_anon_vma_read,
1540  
1541  	};
1542  
1543  	VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1544  
1545  	ret = rmap_walk(page, &rwc);
1546  	return ret;
1547  }
1548  
1549  void __put_anon_vma(struct anon_vma *anon_vma)
1550  {
1551  	struct anon_vma *root = anon_vma->root;
1552  
1553  	anon_vma_free(anon_vma);
1554  	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1555  		anon_vma_free(root);
1556  }
1557  
1558  static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1559  					struct rmap_walk_control *rwc)
1560  {
1561  	struct anon_vma *anon_vma;
1562  
1563  	if (rwc->anon_lock)
1564  		return rwc->anon_lock(page);
1565  
1566  	/*
1567  	 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
1568  	 * because that depends on page_mapped(); but not all its usages
1569  	 * are holding mmap_sem. Users without mmap_sem are required to
1570  	 * take a reference count to prevent the anon_vma disappearing
1571  	 */
1572  	anon_vma = page_anon_vma(page);
1573  	if (!anon_vma)
1574  		return NULL;
1575  
1576  	anon_vma_lock_read(anon_vma);
1577  	return anon_vma;
1578  }
1579  
1580  /*
1581   * rmap_walk_anon - do something to anonymous page using the object-based
1582   * rmap method
1583   * @page: the page to be handled
1584   * @rwc: control variable according to each walk type
1585   *
1586   * Find all the mappings of a page using the mapping pointer and the vma chains
1587   * contained in the anon_vma struct it points to.
1588   *
1589   * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1590   * where the page was found will be held for write.  So, we won't recheck
1591   * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1592   * LOCKED.
1593   */
1594  static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1595  {
1596  	struct anon_vma *anon_vma;
1597  	pgoff_t pgoff;
1598  	struct anon_vma_chain *avc;
1599  	int ret = SWAP_AGAIN;
1600  
1601  	anon_vma = rmap_walk_anon_lock(page, rwc);
1602  	if (!anon_vma)
1603  		return ret;
1604  
1605  	pgoff = page_to_pgoff(page);
1606  	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1607  		struct vm_area_struct *vma = avc->vma;
1608  		unsigned long address = vma_address(page, vma);
1609  
1610  		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1611  			continue;
1612  
1613  		ret = rwc->rmap_one(page, vma, address, rwc->arg);
1614  		if (ret != SWAP_AGAIN)
1615  			break;
1616  		if (rwc->done && rwc->done(page))
1617  			break;
1618  	}
1619  	anon_vma_unlock_read(anon_vma);
1620  	return ret;
1621  }
1622  
1623  /*
1624   * rmap_walk_file - do something to file page using the object-based rmap method
1625   * @page: the page to be handled
1626   * @rwc: control variable according to each walk type
1627   *
1628   * Find all the mappings of a page using the mapping pointer and the vma chains
1629   * contained in the address_space struct it points to.
1630   *
1631   * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1632   * where the page was found will be held for write.  So, we won't recheck
1633   * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1634   * LOCKED.
1635   */
1636  static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1637  {
1638  	struct address_space *mapping = page->mapping;
1639  	pgoff_t pgoff;
1640  	struct vm_area_struct *vma;
1641  	int ret = SWAP_AGAIN;
1642  
1643  	/*
1644  	 * The page lock not only makes sure that page->mapping cannot
1645  	 * suddenly be NULLified by truncation, it makes sure that the
1646  	 * structure at mapping cannot be freed and reused yet,
1647  	 * so we can safely take mapping->i_mmap_rwsem.
1648  	 */
1649  	VM_BUG_ON_PAGE(!PageLocked(page), page);
1650  
1651  	if (!mapping)
1652  		return ret;
1653  
1654  	pgoff = page_to_pgoff(page);
1655  	i_mmap_lock_read(mapping);
1656  	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1657  		unsigned long address = vma_address(page, vma);
1658  
1659  		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1660  			continue;
1661  
1662  		ret = rwc->rmap_one(page, vma, address, rwc->arg);
1663  		if (ret != SWAP_AGAIN)
1664  			goto done;
1665  		if (rwc->done && rwc->done(page))
1666  			goto done;
1667  	}
1668  
1669  done:
1670  	i_mmap_unlock_read(mapping);
1671  	return ret;
1672  }
1673  
1674  int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1675  {
1676  	if (unlikely(PageKsm(page)))
1677  		return rmap_walk_ksm(page, rwc);
1678  	else if (PageAnon(page))
1679  		return rmap_walk_anon(page, rwc);
1680  	else
1681  		return rmap_walk_file(page, rwc);
1682  }
1683  
1684  #ifdef CONFIG_HUGETLB_PAGE
1685  /*
1686   * The following three functions are for anonymous (private mapped) hugepages.
1687   * Unlike common anonymous pages, anonymous hugepages have no accounting code
1688   * and no lru code, because we handle hugepages differently from common pages.
1689   */
1690  static void __hugepage_set_anon_rmap(struct page *page,
1691  	struct vm_area_struct *vma, unsigned long address, int exclusive)
1692  {
1693  	struct anon_vma *anon_vma = vma->anon_vma;
1694  
1695  	BUG_ON(!anon_vma);
1696  
1697  	if (PageAnon(page))
1698  		return;
1699  	if (!exclusive)
1700  		anon_vma = anon_vma->root;
1701  
1702  	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1703  	page->mapping = (struct address_space *) anon_vma;
1704  	page->index = linear_page_index(vma, address);
1705  }
1706  
1707  void hugepage_add_anon_rmap(struct page *page,
1708  			    struct vm_area_struct *vma, unsigned long address)
1709  {
1710  	struct anon_vma *anon_vma = vma->anon_vma;
1711  	int first;
1712  
1713  	BUG_ON(!PageLocked(page));
1714  	BUG_ON(!anon_vma);
1715  	/* address might be in next vma when migration races vma_adjust */
1716  	first = atomic_inc_and_test(&page->_mapcount);
1717  	if (first)
1718  		__hugepage_set_anon_rmap(page, vma, address, 0);
1719  }
1720  
1721  void hugepage_add_new_anon_rmap(struct page *page,
1722  			struct vm_area_struct *vma, unsigned long address)
1723  {
1724  	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1725  	atomic_set(&page->_mapcount, 0);
1726  	__hugepage_set_anon_rmap(page, vma, address, 1);
1727  }
1728  #endif /* CONFIG_HUGETLB_PAGE */
1729