xref: /openbmc/linux/mm/rmap.c (revision e4c0d0e2)
1 /*
2  * mm/rmap.c - physical to virtual reverse mappings
3  *
4  * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5  * Released under the General Public License (GPL).
6  *
7  * Simple, low overhead reverse mapping scheme.
8  * Please try to keep this thing as modular as possible.
9  *
10  * Provides methods for unmapping each kind of mapped page:
11  * the anon methods track anonymous pages, and
12  * the file methods track pages belonging to an inode.
13  *
14  * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17  * Contributions by Hugh Dickins 2003, 2004
18  */
19 
20 /*
21  * Lock ordering in mm:
22  *
23  * inode->i_mutex	(while writing or truncating, not reading or faulting)
24  *   inode->i_alloc_sem (vmtruncate_range)
25  *   mm->mmap_sem
26  *     page->flags PG_locked (lock_page)
27  *       mapping->i_mmap_mutex
28  *         anon_vma->mutex
29  *           mm->page_table_lock or pte_lock
30  *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31  *             swap_lock (in swap_duplicate, swap_info_get)
32  *               mmlist_lock (in mmput, drain_mmlist and others)
33  *               mapping->private_lock (in __set_page_dirty_buffers)
34  *               inode->i_lock (in set_page_dirty's __mark_inode_dirty)
35  *               inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty)
36  *                 sb_lock (within inode_lock in fs/fs-writeback.c)
37  *                 mapping->tree_lock (widely used, in set_page_dirty,
38  *                           in arch-dependent flush_dcache_mmap_lock,
39  *                           within inode_wb_list_lock in __sync_single_inode)
40  *
41  * anon_vma->mutex,mapping->i_mutex      (memory_failure, collect_procs_anon)
42  *   ->tasklist_lock
43  *     pte map lock
44  */
45 
46 #include <linux/mm.h>
47 #include <linux/pagemap.h>
48 #include <linux/swap.h>
49 #include <linux/swapops.h>
50 #include <linux/slab.h>
51 #include <linux/init.h>
52 #include <linux/ksm.h>
53 #include <linux/rmap.h>
54 #include <linux/rcupdate.h>
55 #include <linux/module.h>
56 #include <linux/memcontrol.h>
57 #include <linux/mmu_notifier.h>
58 #include <linux/migrate.h>
59 #include <linux/hugetlb.h>
60 
61 #include <asm/tlbflush.h>
62 
63 #include "internal.h"
64 
65 static struct kmem_cache *anon_vma_cachep;
66 static struct kmem_cache *anon_vma_chain_cachep;
67 
68 static inline struct anon_vma *anon_vma_alloc(void)
69 {
70 	struct anon_vma *anon_vma;
71 
72 	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
73 	if (anon_vma) {
74 		atomic_set(&anon_vma->refcount, 1);
75 		/*
76 		 * Initialise the anon_vma root to point to itself. If called
77 		 * from fork, the root will be reset to the parents anon_vma.
78 		 */
79 		anon_vma->root = anon_vma;
80 	}
81 
82 	return anon_vma;
83 }
84 
85 static inline void anon_vma_free(struct anon_vma *anon_vma)
86 {
87 	VM_BUG_ON(atomic_read(&anon_vma->refcount));
88 
89 	/*
90 	 * Synchronize against page_lock_anon_vma() such that
91 	 * we can safely hold the lock without the anon_vma getting
92 	 * freed.
93 	 *
94 	 * Relies on the full mb implied by the atomic_dec_and_test() from
95 	 * put_anon_vma() against the acquire barrier implied by
96 	 * mutex_trylock() from page_lock_anon_vma(). This orders:
97 	 *
98 	 * page_lock_anon_vma()		VS	put_anon_vma()
99 	 *   mutex_trylock()			  atomic_dec_and_test()
100 	 *   LOCK				  MB
101 	 *   atomic_read()			  mutex_is_locked()
102 	 *
103 	 * LOCK should suffice since the actual taking of the lock must
104 	 * happen _before_ what follows.
105 	 */
106 	if (mutex_is_locked(&anon_vma->root->mutex)) {
107 		anon_vma_lock(anon_vma);
108 		anon_vma_unlock(anon_vma);
109 	}
110 
111 	kmem_cache_free(anon_vma_cachep, anon_vma);
112 }
113 
114 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
115 {
116 	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
117 }
118 
119 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
120 {
121 	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
122 }
123 
124 /**
125  * anon_vma_prepare - attach an anon_vma to a memory region
126  * @vma: the memory region in question
127  *
128  * This makes sure the memory mapping described by 'vma' has
129  * an 'anon_vma' attached to it, so that we can associate the
130  * anonymous pages mapped into it with that anon_vma.
131  *
132  * The common case will be that we already have one, but if
133  * not we either need to find an adjacent mapping that we
134  * can re-use the anon_vma from (very common when the only
135  * reason for splitting a vma has been mprotect()), or we
136  * allocate a new one.
137  *
138  * Anon-vma allocations are very subtle, because we may have
139  * optimistically looked up an anon_vma in page_lock_anon_vma()
140  * and that may actually touch the spinlock even in the newly
141  * allocated vma (it depends on RCU to make sure that the
142  * anon_vma isn't actually destroyed).
143  *
144  * As a result, we need to do proper anon_vma locking even
145  * for the new allocation. At the same time, we do not want
146  * to do any locking for the common case of already having
147  * an anon_vma.
148  *
149  * This must be called with the mmap_sem held for reading.
150  */
151 int anon_vma_prepare(struct vm_area_struct *vma)
152 {
153 	struct anon_vma *anon_vma = vma->anon_vma;
154 	struct anon_vma_chain *avc;
155 
156 	might_sleep();
157 	if (unlikely(!anon_vma)) {
158 		struct mm_struct *mm = vma->vm_mm;
159 		struct anon_vma *allocated;
160 
161 		avc = anon_vma_chain_alloc(GFP_KERNEL);
162 		if (!avc)
163 			goto out_enomem;
164 
165 		anon_vma = find_mergeable_anon_vma(vma);
166 		allocated = NULL;
167 		if (!anon_vma) {
168 			anon_vma = anon_vma_alloc();
169 			if (unlikely(!anon_vma))
170 				goto out_enomem_free_avc;
171 			allocated = anon_vma;
172 		}
173 
174 		anon_vma_lock(anon_vma);
175 		/* page_table_lock to protect against threads */
176 		spin_lock(&mm->page_table_lock);
177 		if (likely(!vma->anon_vma)) {
178 			vma->anon_vma = anon_vma;
179 			avc->anon_vma = anon_vma;
180 			avc->vma = vma;
181 			list_add(&avc->same_vma, &vma->anon_vma_chain);
182 			list_add_tail(&avc->same_anon_vma, &anon_vma->head);
183 			allocated = NULL;
184 			avc = NULL;
185 		}
186 		spin_unlock(&mm->page_table_lock);
187 		anon_vma_unlock(anon_vma);
188 
189 		if (unlikely(allocated))
190 			put_anon_vma(allocated);
191 		if (unlikely(avc))
192 			anon_vma_chain_free(avc);
193 	}
194 	return 0;
195 
196  out_enomem_free_avc:
197 	anon_vma_chain_free(avc);
198  out_enomem:
199 	return -ENOMEM;
200 }
201 
202 /*
203  * This is a useful helper function for locking the anon_vma root as
204  * we traverse the vma->anon_vma_chain, looping over anon_vma's that
205  * have the same vma.
206  *
207  * Such anon_vma's should have the same root, so you'd expect to see
208  * just a single mutex_lock for the whole traversal.
209  */
210 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
211 {
212 	struct anon_vma *new_root = anon_vma->root;
213 	if (new_root != root) {
214 		if (WARN_ON_ONCE(root))
215 			mutex_unlock(&root->mutex);
216 		root = new_root;
217 		mutex_lock(&root->mutex);
218 	}
219 	return root;
220 }
221 
222 static inline void unlock_anon_vma_root(struct anon_vma *root)
223 {
224 	if (root)
225 		mutex_unlock(&root->mutex);
226 }
227 
228 static void anon_vma_chain_link(struct vm_area_struct *vma,
229 				struct anon_vma_chain *avc,
230 				struct anon_vma *anon_vma)
231 {
232 	avc->vma = vma;
233 	avc->anon_vma = anon_vma;
234 	list_add(&avc->same_vma, &vma->anon_vma_chain);
235 
236 	/*
237 	 * It's critical to add new vmas to the tail of the anon_vma,
238 	 * see comment in huge_memory.c:__split_huge_page().
239 	 */
240 	list_add_tail(&avc->same_anon_vma, &anon_vma->head);
241 }
242 
243 /*
244  * Attach the anon_vmas from src to dst.
245  * Returns 0 on success, -ENOMEM on failure.
246  */
247 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
248 {
249 	struct anon_vma_chain *avc, *pavc;
250 	struct anon_vma *root = NULL;
251 
252 	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
253 		struct anon_vma *anon_vma;
254 
255 		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
256 		if (unlikely(!avc)) {
257 			unlock_anon_vma_root(root);
258 			root = NULL;
259 			avc = anon_vma_chain_alloc(GFP_KERNEL);
260 			if (!avc)
261 				goto enomem_failure;
262 		}
263 		anon_vma = pavc->anon_vma;
264 		root = lock_anon_vma_root(root, anon_vma);
265 		anon_vma_chain_link(dst, avc, anon_vma);
266 	}
267 	unlock_anon_vma_root(root);
268 	return 0;
269 
270  enomem_failure:
271 	unlink_anon_vmas(dst);
272 	return -ENOMEM;
273 }
274 
275 /*
276  * Attach vma to its own anon_vma, as well as to the anon_vmas that
277  * the corresponding VMA in the parent process is attached to.
278  * Returns 0 on success, non-zero on failure.
279  */
280 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
281 {
282 	struct anon_vma_chain *avc;
283 	struct anon_vma *anon_vma;
284 
285 	/* Don't bother if the parent process has no anon_vma here. */
286 	if (!pvma->anon_vma)
287 		return 0;
288 
289 	/*
290 	 * First, attach the new VMA to the parent VMA's anon_vmas,
291 	 * so rmap can find non-COWed pages in child processes.
292 	 */
293 	if (anon_vma_clone(vma, pvma))
294 		return -ENOMEM;
295 
296 	/* Then add our own anon_vma. */
297 	anon_vma = anon_vma_alloc();
298 	if (!anon_vma)
299 		goto out_error;
300 	avc = anon_vma_chain_alloc(GFP_KERNEL);
301 	if (!avc)
302 		goto out_error_free_anon_vma;
303 
304 	/*
305 	 * The root anon_vma's spinlock is the lock actually used when we
306 	 * lock any of the anon_vmas in this anon_vma tree.
307 	 */
308 	anon_vma->root = pvma->anon_vma->root;
309 	/*
310 	 * With refcounts, an anon_vma can stay around longer than the
311 	 * process it belongs to. The root anon_vma needs to be pinned until
312 	 * this anon_vma is freed, because the lock lives in the root.
313 	 */
314 	get_anon_vma(anon_vma->root);
315 	/* Mark this anon_vma as the one where our new (COWed) pages go. */
316 	vma->anon_vma = anon_vma;
317 	anon_vma_lock(anon_vma);
318 	anon_vma_chain_link(vma, avc, anon_vma);
319 	anon_vma_unlock(anon_vma);
320 
321 	return 0;
322 
323  out_error_free_anon_vma:
324 	put_anon_vma(anon_vma);
325  out_error:
326 	unlink_anon_vmas(vma);
327 	return -ENOMEM;
328 }
329 
330 void unlink_anon_vmas(struct vm_area_struct *vma)
331 {
332 	struct anon_vma_chain *avc, *next;
333 	struct anon_vma *root = NULL;
334 
335 	/*
336 	 * Unlink each anon_vma chained to the VMA.  This list is ordered
337 	 * from newest to oldest, ensuring the root anon_vma gets freed last.
338 	 */
339 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
340 		struct anon_vma *anon_vma = avc->anon_vma;
341 
342 		root = lock_anon_vma_root(root, anon_vma);
343 		list_del(&avc->same_anon_vma);
344 
345 		/*
346 		 * Leave empty anon_vmas on the list - we'll need
347 		 * to free them outside the lock.
348 		 */
349 		if (list_empty(&anon_vma->head))
350 			continue;
351 
352 		list_del(&avc->same_vma);
353 		anon_vma_chain_free(avc);
354 	}
355 	unlock_anon_vma_root(root);
356 
357 	/*
358 	 * Iterate the list once more, it now only contains empty and unlinked
359 	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
360 	 * needing to acquire the anon_vma->root->mutex.
361 	 */
362 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
363 		struct anon_vma *anon_vma = avc->anon_vma;
364 
365 		put_anon_vma(anon_vma);
366 
367 		list_del(&avc->same_vma);
368 		anon_vma_chain_free(avc);
369 	}
370 }
371 
372 static void anon_vma_ctor(void *data)
373 {
374 	struct anon_vma *anon_vma = data;
375 
376 	mutex_init(&anon_vma->mutex);
377 	atomic_set(&anon_vma->refcount, 0);
378 	INIT_LIST_HEAD(&anon_vma->head);
379 }
380 
381 void __init anon_vma_init(void)
382 {
383 	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
384 			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
385 	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
386 }
387 
388 /*
389  * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
390  *
391  * Since there is no serialization what so ever against page_remove_rmap()
392  * the best this function can do is return a locked anon_vma that might
393  * have been relevant to this page.
394  *
395  * The page might have been remapped to a different anon_vma or the anon_vma
396  * returned may already be freed (and even reused).
397  *
398  * In case it was remapped to a different anon_vma, the new anon_vma will be a
399  * child of the old anon_vma, and the anon_vma lifetime rules will therefore
400  * ensure that any anon_vma obtained from the page will still be valid for as
401  * long as we observe page_mapped() [ hence all those page_mapped() tests ].
402  *
403  * All users of this function must be very careful when walking the anon_vma
404  * chain and verify that the page in question is indeed mapped in it
405  * [ something equivalent to page_mapped_in_vma() ].
406  *
407  * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
408  * that the anon_vma pointer from page->mapping is valid if there is a
409  * mapcount, we can dereference the anon_vma after observing those.
410  */
411 struct anon_vma *page_get_anon_vma(struct page *page)
412 {
413 	struct anon_vma *anon_vma = NULL;
414 	unsigned long anon_mapping;
415 
416 	rcu_read_lock();
417 	anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
418 	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
419 		goto out;
420 	if (!page_mapped(page))
421 		goto out;
422 
423 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
424 	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
425 		anon_vma = NULL;
426 		goto out;
427 	}
428 
429 	/*
430 	 * If this page is still mapped, then its anon_vma cannot have been
431 	 * freed.  But if it has been unmapped, we have no security against the
432 	 * anon_vma structure being freed and reused (for another anon_vma:
433 	 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
434 	 * above cannot corrupt).
435 	 */
436 	if (!page_mapped(page)) {
437 		put_anon_vma(anon_vma);
438 		anon_vma = NULL;
439 	}
440 out:
441 	rcu_read_unlock();
442 
443 	return anon_vma;
444 }
445 
446 /*
447  * Similar to page_get_anon_vma() except it locks the anon_vma.
448  *
449  * Its a little more complex as it tries to keep the fast path to a single
450  * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
451  * reference like with page_get_anon_vma() and then block on the mutex.
452  */
453 struct anon_vma *page_lock_anon_vma(struct page *page)
454 {
455 	struct anon_vma *anon_vma = NULL;
456 	struct anon_vma *root_anon_vma;
457 	unsigned long anon_mapping;
458 
459 	rcu_read_lock();
460 	anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
461 	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
462 		goto out;
463 	if (!page_mapped(page))
464 		goto out;
465 
466 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
467 	root_anon_vma = ACCESS_ONCE(anon_vma->root);
468 	if (mutex_trylock(&root_anon_vma->mutex)) {
469 		/*
470 		 * If the page is still mapped, then this anon_vma is still
471 		 * its anon_vma, and holding the mutex ensures that it will
472 		 * not go away, see anon_vma_free().
473 		 */
474 		if (!page_mapped(page)) {
475 			mutex_unlock(&root_anon_vma->mutex);
476 			anon_vma = NULL;
477 		}
478 		goto out;
479 	}
480 
481 	/* trylock failed, we got to sleep */
482 	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
483 		anon_vma = NULL;
484 		goto out;
485 	}
486 
487 	if (!page_mapped(page)) {
488 		put_anon_vma(anon_vma);
489 		anon_vma = NULL;
490 		goto out;
491 	}
492 
493 	/* we pinned the anon_vma, its safe to sleep */
494 	rcu_read_unlock();
495 	anon_vma_lock(anon_vma);
496 
497 	if (atomic_dec_and_test(&anon_vma->refcount)) {
498 		/*
499 		 * Oops, we held the last refcount, release the lock
500 		 * and bail -- can't simply use put_anon_vma() because
501 		 * we'll deadlock on the anon_vma_lock() recursion.
502 		 */
503 		anon_vma_unlock(anon_vma);
504 		__put_anon_vma(anon_vma);
505 		anon_vma = NULL;
506 	}
507 
508 	return anon_vma;
509 
510 out:
511 	rcu_read_unlock();
512 	return anon_vma;
513 }
514 
515 void page_unlock_anon_vma(struct anon_vma *anon_vma)
516 {
517 	anon_vma_unlock(anon_vma);
518 }
519 
520 /*
521  * At what user virtual address is page expected in @vma?
522  * Returns virtual address or -EFAULT if page's index/offset is not
523  * within the range mapped the @vma.
524  */
525 inline unsigned long
526 vma_address(struct page *page, struct vm_area_struct *vma)
527 {
528 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
529 	unsigned long address;
530 
531 	if (unlikely(is_vm_hugetlb_page(vma)))
532 		pgoff = page->index << huge_page_order(page_hstate(page));
533 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
534 	if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
535 		/* page should be within @vma mapping range */
536 		return -EFAULT;
537 	}
538 	return address;
539 }
540 
541 /*
542  * At what user virtual address is page expected in vma?
543  * Caller should check the page is actually part of the vma.
544  */
545 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
546 {
547 	if (PageAnon(page)) {
548 		struct anon_vma *page__anon_vma = page_anon_vma(page);
549 		/*
550 		 * Note: swapoff's unuse_vma() is more efficient with this
551 		 * check, and needs it to match anon_vma when KSM is active.
552 		 */
553 		if (!vma->anon_vma || !page__anon_vma ||
554 		    vma->anon_vma->root != page__anon_vma->root)
555 			return -EFAULT;
556 	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
557 		if (!vma->vm_file ||
558 		    vma->vm_file->f_mapping != page->mapping)
559 			return -EFAULT;
560 	} else
561 		return -EFAULT;
562 	return vma_address(page, vma);
563 }
564 
565 /*
566  * Check that @page is mapped at @address into @mm.
567  *
568  * If @sync is false, page_check_address may perform a racy check to avoid
569  * the page table lock when the pte is not present (helpful when reclaiming
570  * highly shared pages).
571  *
572  * On success returns with pte mapped and locked.
573  */
574 pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
575 			  unsigned long address, spinlock_t **ptlp, int sync)
576 {
577 	pgd_t *pgd;
578 	pud_t *pud;
579 	pmd_t *pmd;
580 	pte_t *pte;
581 	spinlock_t *ptl;
582 
583 	if (unlikely(PageHuge(page))) {
584 		pte = huge_pte_offset(mm, address);
585 		ptl = &mm->page_table_lock;
586 		goto check;
587 	}
588 
589 	pgd = pgd_offset(mm, address);
590 	if (!pgd_present(*pgd))
591 		return NULL;
592 
593 	pud = pud_offset(pgd, address);
594 	if (!pud_present(*pud))
595 		return NULL;
596 
597 	pmd = pmd_offset(pud, address);
598 	if (!pmd_present(*pmd))
599 		return NULL;
600 	if (pmd_trans_huge(*pmd))
601 		return NULL;
602 
603 	pte = pte_offset_map(pmd, address);
604 	/* Make a quick check before getting the lock */
605 	if (!sync && !pte_present(*pte)) {
606 		pte_unmap(pte);
607 		return NULL;
608 	}
609 
610 	ptl = pte_lockptr(mm, pmd);
611 check:
612 	spin_lock(ptl);
613 	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
614 		*ptlp = ptl;
615 		return pte;
616 	}
617 	pte_unmap_unlock(pte, ptl);
618 	return NULL;
619 }
620 
621 /**
622  * page_mapped_in_vma - check whether a page is really mapped in a VMA
623  * @page: the page to test
624  * @vma: the VMA to test
625  *
626  * Returns 1 if the page is mapped into the page tables of the VMA, 0
627  * if the page is not mapped into the page tables of this VMA.  Only
628  * valid for normal file or anonymous VMAs.
629  */
630 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
631 {
632 	unsigned long address;
633 	pte_t *pte;
634 	spinlock_t *ptl;
635 
636 	address = vma_address(page, vma);
637 	if (address == -EFAULT)		/* out of vma range */
638 		return 0;
639 	pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
640 	if (!pte)			/* the page is not in this mm */
641 		return 0;
642 	pte_unmap_unlock(pte, ptl);
643 
644 	return 1;
645 }
646 
647 /*
648  * Subfunctions of page_referenced: page_referenced_one called
649  * repeatedly from either page_referenced_anon or page_referenced_file.
650  */
651 int page_referenced_one(struct page *page, struct vm_area_struct *vma,
652 			unsigned long address, unsigned int *mapcount,
653 			unsigned long *vm_flags)
654 {
655 	struct mm_struct *mm = vma->vm_mm;
656 	int referenced = 0;
657 
658 	if (unlikely(PageTransHuge(page))) {
659 		pmd_t *pmd;
660 
661 		spin_lock(&mm->page_table_lock);
662 		/*
663 		 * rmap might return false positives; we must filter
664 		 * these out using page_check_address_pmd().
665 		 */
666 		pmd = page_check_address_pmd(page, mm, address,
667 					     PAGE_CHECK_ADDRESS_PMD_FLAG);
668 		if (!pmd) {
669 			spin_unlock(&mm->page_table_lock);
670 			goto out;
671 		}
672 
673 		if (vma->vm_flags & VM_LOCKED) {
674 			spin_unlock(&mm->page_table_lock);
675 			*mapcount = 0;	/* break early from loop */
676 			*vm_flags |= VM_LOCKED;
677 			goto out;
678 		}
679 
680 		/* go ahead even if the pmd is pmd_trans_splitting() */
681 		if (pmdp_clear_flush_young_notify(vma, address, pmd))
682 			referenced++;
683 		spin_unlock(&mm->page_table_lock);
684 	} else {
685 		pte_t *pte;
686 		spinlock_t *ptl;
687 
688 		/*
689 		 * rmap might return false positives; we must filter
690 		 * these out using page_check_address().
691 		 */
692 		pte = page_check_address(page, mm, address, &ptl, 0);
693 		if (!pte)
694 			goto out;
695 
696 		if (vma->vm_flags & VM_LOCKED) {
697 			pte_unmap_unlock(pte, ptl);
698 			*mapcount = 0;	/* break early from loop */
699 			*vm_flags |= VM_LOCKED;
700 			goto out;
701 		}
702 
703 		if (ptep_clear_flush_young_notify(vma, address, pte)) {
704 			/*
705 			 * Don't treat a reference through a sequentially read
706 			 * mapping as such.  If the page has been used in
707 			 * another mapping, we will catch it; if this other
708 			 * mapping is already gone, the unmap path will have
709 			 * set PG_referenced or activated the page.
710 			 */
711 			if (likely(!VM_SequentialReadHint(vma)))
712 				referenced++;
713 		}
714 		pte_unmap_unlock(pte, ptl);
715 	}
716 
717 	/* Pretend the page is referenced if the task has the
718 	   swap token and is in the middle of a page fault. */
719 	if (mm != current->mm && has_swap_token(mm) &&
720 			rwsem_is_locked(&mm->mmap_sem))
721 		referenced++;
722 
723 	(*mapcount)--;
724 
725 	if (referenced)
726 		*vm_flags |= vma->vm_flags;
727 out:
728 	return referenced;
729 }
730 
731 static int page_referenced_anon(struct page *page,
732 				struct mem_cgroup *mem_cont,
733 				unsigned long *vm_flags)
734 {
735 	unsigned int mapcount;
736 	struct anon_vma *anon_vma;
737 	struct anon_vma_chain *avc;
738 	int referenced = 0;
739 
740 	anon_vma = page_lock_anon_vma(page);
741 	if (!anon_vma)
742 		return referenced;
743 
744 	mapcount = page_mapcount(page);
745 	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
746 		struct vm_area_struct *vma = avc->vma;
747 		unsigned long address = vma_address(page, vma);
748 		if (address == -EFAULT)
749 			continue;
750 		/*
751 		 * If we are reclaiming on behalf of a cgroup, skip
752 		 * counting on behalf of references from different
753 		 * cgroups
754 		 */
755 		if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
756 			continue;
757 		referenced += page_referenced_one(page, vma, address,
758 						  &mapcount, vm_flags);
759 		if (!mapcount)
760 			break;
761 	}
762 
763 	page_unlock_anon_vma(anon_vma);
764 	return referenced;
765 }
766 
767 /**
768  * page_referenced_file - referenced check for object-based rmap
769  * @page: the page we're checking references on.
770  * @mem_cont: target memory controller
771  * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
772  *
773  * For an object-based mapped page, find all the places it is mapped and
774  * check/clear the referenced flag.  This is done by following the page->mapping
775  * pointer, then walking the chain of vmas it holds.  It returns the number
776  * of references it found.
777  *
778  * This function is only called from page_referenced for object-based pages.
779  */
780 static int page_referenced_file(struct page *page,
781 				struct mem_cgroup *mem_cont,
782 				unsigned long *vm_flags)
783 {
784 	unsigned int mapcount;
785 	struct address_space *mapping = page->mapping;
786 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
787 	struct vm_area_struct *vma;
788 	struct prio_tree_iter iter;
789 	int referenced = 0;
790 
791 	/*
792 	 * The caller's checks on page->mapping and !PageAnon have made
793 	 * sure that this is a file page: the check for page->mapping
794 	 * excludes the case just before it gets set on an anon page.
795 	 */
796 	BUG_ON(PageAnon(page));
797 
798 	/*
799 	 * The page lock not only makes sure that page->mapping cannot
800 	 * suddenly be NULLified by truncation, it makes sure that the
801 	 * structure at mapping cannot be freed and reused yet,
802 	 * so we can safely take mapping->i_mmap_mutex.
803 	 */
804 	BUG_ON(!PageLocked(page));
805 
806 	mutex_lock(&mapping->i_mmap_mutex);
807 
808 	/*
809 	 * i_mmap_mutex does not stabilize mapcount at all, but mapcount
810 	 * is more likely to be accurate if we note it after spinning.
811 	 */
812 	mapcount = page_mapcount(page);
813 
814 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
815 		unsigned long address = vma_address(page, vma);
816 		if (address == -EFAULT)
817 			continue;
818 		/*
819 		 * If we are reclaiming on behalf of a cgroup, skip
820 		 * counting on behalf of references from different
821 		 * cgroups
822 		 */
823 		if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
824 			continue;
825 		referenced += page_referenced_one(page, vma, address,
826 						  &mapcount, vm_flags);
827 		if (!mapcount)
828 			break;
829 	}
830 
831 	mutex_unlock(&mapping->i_mmap_mutex);
832 	return referenced;
833 }
834 
835 /**
836  * page_referenced - test if the page was referenced
837  * @page: the page to test
838  * @is_locked: caller holds lock on the page
839  * @mem_cont: target memory controller
840  * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
841  *
842  * Quick test_and_clear_referenced for all mappings to a page,
843  * returns the number of ptes which referenced the page.
844  */
845 int page_referenced(struct page *page,
846 		    int is_locked,
847 		    struct mem_cgroup *mem_cont,
848 		    unsigned long *vm_flags)
849 {
850 	int referenced = 0;
851 	int we_locked = 0;
852 
853 	*vm_flags = 0;
854 	if (page_mapped(page) && page_rmapping(page)) {
855 		if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
856 			we_locked = trylock_page(page);
857 			if (!we_locked) {
858 				referenced++;
859 				goto out;
860 			}
861 		}
862 		if (unlikely(PageKsm(page)))
863 			referenced += page_referenced_ksm(page, mem_cont,
864 								vm_flags);
865 		else if (PageAnon(page))
866 			referenced += page_referenced_anon(page, mem_cont,
867 								vm_flags);
868 		else if (page->mapping)
869 			referenced += page_referenced_file(page, mem_cont,
870 								vm_flags);
871 		if (we_locked)
872 			unlock_page(page);
873 	}
874 out:
875 	if (page_test_and_clear_young(page_to_pfn(page)))
876 		referenced++;
877 
878 	return referenced;
879 }
880 
881 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
882 			    unsigned long address)
883 {
884 	struct mm_struct *mm = vma->vm_mm;
885 	pte_t *pte;
886 	spinlock_t *ptl;
887 	int ret = 0;
888 
889 	pte = page_check_address(page, mm, address, &ptl, 1);
890 	if (!pte)
891 		goto out;
892 
893 	if (pte_dirty(*pte) || pte_write(*pte)) {
894 		pte_t entry;
895 
896 		flush_cache_page(vma, address, pte_pfn(*pte));
897 		entry = ptep_clear_flush_notify(vma, address, pte);
898 		entry = pte_wrprotect(entry);
899 		entry = pte_mkclean(entry);
900 		set_pte_at(mm, address, pte, entry);
901 		ret = 1;
902 	}
903 
904 	pte_unmap_unlock(pte, ptl);
905 out:
906 	return ret;
907 }
908 
909 static int page_mkclean_file(struct address_space *mapping, struct page *page)
910 {
911 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
912 	struct vm_area_struct *vma;
913 	struct prio_tree_iter iter;
914 	int ret = 0;
915 
916 	BUG_ON(PageAnon(page));
917 
918 	mutex_lock(&mapping->i_mmap_mutex);
919 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
920 		if (vma->vm_flags & VM_SHARED) {
921 			unsigned long address = vma_address(page, vma);
922 			if (address == -EFAULT)
923 				continue;
924 			ret += page_mkclean_one(page, vma, address);
925 		}
926 	}
927 	mutex_unlock(&mapping->i_mmap_mutex);
928 	return ret;
929 }
930 
931 int page_mkclean(struct page *page)
932 {
933 	int ret = 0;
934 
935 	BUG_ON(!PageLocked(page));
936 
937 	if (page_mapped(page)) {
938 		struct address_space *mapping = page_mapping(page);
939 		if (mapping) {
940 			ret = page_mkclean_file(mapping, page);
941 			if (page_test_and_clear_dirty(page_to_pfn(page), 1))
942 				ret = 1;
943 		}
944 	}
945 
946 	return ret;
947 }
948 EXPORT_SYMBOL_GPL(page_mkclean);
949 
950 /**
951  * page_move_anon_rmap - move a page to our anon_vma
952  * @page:	the page to move to our anon_vma
953  * @vma:	the vma the page belongs to
954  * @address:	the user virtual address mapped
955  *
956  * When a page belongs exclusively to one process after a COW event,
957  * that page can be moved into the anon_vma that belongs to just that
958  * process, so the rmap code will not search the parent or sibling
959  * processes.
960  */
961 void page_move_anon_rmap(struct page *page,
962 	struct vm_area_struct *vma, unsigned long address)
963 {
964 	struct anon_vma *anon_vma = vma->anon_vma;
965 
966 	VM_BUG_ON(!PageLocked(page));
967 	VM_BUG_ON(!anon_vma);
968 	VM_BUG_ON(page->index != linear_page_index(vma, address));
969 
970 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
971 	page->mapping = (struct address_space *) anon_vma;
972 }
973 
974 /**
975  * __page_set_anon_rmap - set up new anonymous rmap
976  * @page:	Page to add to rmap
977  * @vma:	VM area to add page to.
978  * @address:	User virtual address of the mapping
979  * @exclusive:	the page is exclusively owned by the current process
980  */
981 static void __page_set_anon_rmap(struct page *page,
982 	struct vm_area_struct *vma, unsigned long address, int exclusive)
983 {
984 	struct anon_vma *anon_vma = vma->anon_vma;
985 
986 	BUG_ON(!anon_vma);
987 
988 	if (PageAnon(page))
989 		return;
990 
991 	/*
992 	 * If the page isn't exclusively mapped into this vma,
993 	 * we must use the _oldest_ possible anon_vma for the
994 	 * page mapping!
995 	 */
996 	if (!exclusive)
997 		anon_vma = anon_vma->root;
998 
999 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1000 	page->mapping = (struct address_space *) anon_vma;
1001 	page->index = linear_page_index(vma, address);
1002 }
1003 
1004 /**
1005  * __page_check_anon_rmap - sanity check anonymous rmap addition
1006  * @page:	the page to add the mapping to
1007  * @vma:	the vm area in which the mapping is added
1008  * @address:	the user virtual address mapped
1009  */
1010 static void __page_check_anon_rmap(struct page *page,
1011 	struct vm_area_struct *vma, unsigned long address)
1012 {
1013 #ifdef CONFIG_DEBUG_VM
1014 	/*
1015 	 * The page's anon-rmap details (mapping and index) are guaranteed to
1016 	 * be set up correctly at this point.
1017 	 *
1018 	 * We have exclusion against page_add_anon_rmap because the caller
1019 	 * always holds the page locked, except if called from page_dup_rmap,
1020 	 * in which case the page is already known to be setup.
1021 	 *
1022 	 * We have exclusion against page_add_new_anon_rmap because those pages
1023 	 * are initially only visible via the pagetables, and the pte is locked
1024 	 * over the call to page_add_new_anon_rmap.
1025 	 */
1026 	BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1027 	BUG_ON(page->index != linear_page_index(vma, address));
1028 #endif
1029 }
1030 
1031 /**
1032  * page_add_anon_rmap - add pte mapping to an anonymous page
1033  * @page:	the page to add the mapping to
1034  * @vma:	the vm area in which the mapping is added
1035  * @address:	the user virtual address mapped
1036  *
1037  * The caller needs to hold the pte lock, and the page must be locked in
1038  * the anon_vma case: to serialize mapping,index checking after setting,
1039  * and to ensure that PageAnon is not being upgraded racily to PageKsm
1040  * (but PageKsm is never downgraded to PageAnon).
1041  */
1042 void page_add_anon_rmap(struct page *page,
1043 	struct vm_area_struct *vma, unsigned long address)
1044 {
1045 	do_page_add_anon_rmap(page, vma, address, 0);
1046 }
1047 
1048 /*
1049  * Special version of the above for do_swap_page, which often runs
1050  * into pages that are exclusively owned by the current process.
1051  * Everybody else should continue to use page_add_anon_rmap above.
1052  */
1053 void do_page_add_anon_rmap(struct page *page,
1054 	struct vm_area_struct *vma, unsigned long address, int exclusive)
1055 {
1056 	int first = atomic_inc_and_test(&page->_mapcount);
1057 	if (first) {
1058 		if (!PageTransHuge(page))
1059 			__inc_zone_page_state(page, NR_ANON_PAGES);
1060 		else
1061 			__inc_zone_page_state(page,
1062 					      NR_ANON_TRANSPARENT_HUGEPAGES);
1063 	}
1064 	if (unlikely(PageKsm(page)))
1065 		return;
1066 
1067 	VM_BUG_ON(!PageLocked(page));
1068 	/* address might be in next vma when migration races vma_adjust */
1069 	if (first)
1070 		__page_set_anon_rmap(page, vma, address, exclusive);
1071 	else
1072 		__page_check_anon_rmap(page, vma, address);
1073 }
1074 
1075 /**
1076  * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1077  * @page:	the page to add the mapping to
1078  * @vma:	the vm area in which the mapping is added
1079  * @address:	the user virtual address mapped
1080  *
1081  * Same as page_add_anon_rmap but must only be called on *new* pages.
1082  * This means the inc-and-test can be bypassed.
1083  * Page does not have to be locked.
1084  */
1085 void page_add_new_anon_rmap(struct page *page,
1086 	struct vm_area_struct *vma, unsigned long address)
1087 {
1088 	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1089 	SetPageSwapBacked(page);
1090 	atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
1091 	if (!PageTransHuge(page))
1092 		__inc_zone_page_state(page, NR_ANON_PAGES);
1093 	else
1094 		__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1095 	__page_set_anon_rmap(page, vma, address, 1);
1096 	if (page_evictable(page, vma))
1097 		lru_cache_add_lru(page, LRU_ACTIVE_ANON);
1098 	else
1099 		add_page_to_unevictable_list(page);
1100 }
1101 
1102 /**
1103  * page_add_file_rmap - add pte mapping to a file page
1104  * @page: the page to add the mapping to
1105  *
1106  * The caller needs to hold the pte lock.
1107  */
1108 void page_add_file_rmap(struct page *page)
1109 {
1110 	if (atomic_inc_and_test(&page->_mapcount)) {
1111 		__inc_zone_page_state(page, NR_FILE_MAPPED);
1112 		mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
1113 	}
1114 }
1115 
1116 /**
1117  * page_remove_rmap - take down pte mapping from a page
1118  * @page: page to remove mapping from
1119  *
1120  * The caller needs to hold the pte lock.
1121  */
1122 void page_remove_rmap(struct page *page)
1123 {
1124 	/* page still mapped by someone else? */
1125 	if (!atomic_add_negative(-1, &page->_mapcount))
1126 		return;
1127 
1128 	/*
1129 	 * Now that the last pte has gone, s390 must transfer dirty
1130 	 * flag from storage key to struct page.  We can usually skip
1131 	 * this if the page is anon, so about to be freed; but perhaps
1132 	 * not if it's in swapcache - there might be another pte slot
1133 	 * containing the swap entry, but page not yet written to swap.
1134 	 */
1135 	if ((!PageAnon(page) || PageSwapCache(page)) &&
1136 	    page_test_and_clear_dirty(page_to_pfn(page), 1))
1137 		set_page_dirty(page);
1138 	/*
1139 	 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1140 	 * and not charged by memcg for now.
1141 	 */
1142 	if (unlikely(PageHuge(page)))
1143 		return;
1144 	if (PageAnon(page)) {
1145 		mem_cgroup_uncharge_page(page);
1146 		if (!PageTransHuge(page))
1147 			__dec_zone_page_state(page, NR_ANON_PAGES);
1148 		else
1149 			__dec_zone_page_state(page,
1150 					      NR_ANON_TRANSPARENT_HUGEPAGES);
1151 	} else {
1152 		__dec_zone_page_state(page, NR_FILE_MAPPED);
1153 		mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
1154 	}
1155 	/*
1156 	 * It would be tidy to reset the PageAnon mapping here,
1157 	 * but that might overwrite a racing page_add_anon_rmap
1158 	 * which increments mapcount after us but sets mapping
1159 	 * before us: so leave the reset to free_hot_cold_page,
1160 	 * and remember that it's only reliable while mapped.
1161 	 * Leaving it set also helps swapoff to reinstate ptes
1162 	 * faster for those pages still in swapcache.
1163 	 */
1164 }
1165 
1166 /*
1167  * Subfunctions of try_to_unmap: try_to_unmap_one called
1168  * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
1169  */
1170 int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1171 		     unsigned long address, enum ttu_flags flags)
1172 {
1173 	struct mm_struct *mm = vma->vm_mm;
1174 	pte_t *pte;
1175 	pte_t pteval;
1176 	spinlock_t *ptl;
1177 	int ret = SWAP_AGAIN;
1178 
1179 	pte = page_check_address(page, mm, address, &ptl, 0);
1180 	if (!pte)
1181 		goto out;
1182 
1183 	/*
1184 	 * If the page is mlock()d, we cannot swap it out.
1185 	 * If it's recently referenced (perhaps page_referenced
1186 	 * skipped over this mm) then we should reactivate it.
1187 	 */
1188 	if (!(flags & TTU_IGNORE_MLOCK)) {
1189 		if (vma->vm_flags & VM_LOCKED)
1190 			goto out_mlock;
1191 
1192 		if (TTU_ACTION(flags) == TTU_MUNLOCK)
1193 			goto out_unmap;
1194 	}
1195 	if (!(flags & TTU_IGNORE_ACCESS)) {
1196 		if (ptep_clear_flush_young_notify(vma, address, pte)) {
1197 			ret = SWAP_FAIL;
1198 			goto out_unmap;
1199 		}
1200   	}
1201 
1202 	/* Nuke the page table entry. */
1203 	flush_cache_page(vma, address, page_to_pfn(page));
1204 	pteval = ptep_clear_flush_notify(vma, address, pte);
1205 
1206 	/* Move the dirty bit to the physical page now the pte is gone. */
1207 	if (pte_dirty(pteval))
1208 		set_page_dirty(page);
1209 
1210 	/* Update high watermark before we lower rss */
1211 	update_hiwater_rss(mm);
1212 
1213 	if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1214 		if (PageAnon(page))
1215 			dec_mm_counter(mm, MM_ANONPAGES);
1216 		else
1217 			dec_mm_counter(mm, MM_FILEPAGES);
1218 		set_pte_at(mm, address, pte,
1219 				swp_entry_to_pte(make_hwpoison_entry(page)));
1220 	} else if (PageAnon(page)) {
1221 		swp_entry_t entry = { .val = page_private(page) };
1222 
1223 		if (PageSwapCache(page)) {
1224 			/*
1225 			 * Store the swap location in the pte.
1226 			 * See handle_pte_fault() ...
1227 			 */
1228 			if (swap_duplicate(entry) < 0) {
1229 				set_pte_at(mm, address, pte, pteval);
1230 				ret = SWAP_FAIL;
1231 				goto out_unmap;
1232 			}
1233 			if (list_empty(&mm->mmlist)) {
1234 				spin_lock(&mmlist_lock);
1235 				if (list_empty(&mm->mmlist))
1236 					list_add(&mm->mmlist, &init_mm.mmlist);
1237 				spin_unlock(&mmlist_lock);
1238 			}
1239 			dec_mm_counter(mm, MM_ANONPAGES);
1240 			inc_mm_counter(mm, MM_SWAPENTS);
1241 		} else if (PAGE_MIGRATION) {
1242 			/*
1243 			 * Store the pfn of the page in a special migration
1244 			 * pte. do_swap_page() will wait until the migration
1245 			 * pte is removed and then restart fault handling.
1246 			 */
1247 			BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
1248 			entry = make_migration_entry(page, pte_write(pteval));
1249 		}
1250 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1251 		BUG_ON(pte_file(*pte));
1252 	} else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
1253 		/* Establish migration entry for a file page */
1254 		swp_entry_t entry;
1255 		entry = make_migration_entry(page, pte_write(pteval));
1256 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1257 	} else
1258 		dec_mm_counter(mm, MM_FILEPAGES);
1259 
1260 	page_remove_rmap(page);
1261 	page_cache_release(page);
1262 
1263 out_unmap:
1264 	pte_unmap_unlock(pte, ptl);
1265 out:
1266 	return ret;
1267 
1268 out_mlock:
1269 	pte_unmap_unlock(pte, ptl);
1270 
1271 
1272 	/*
1273 	 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1274 	 * unstable result and race. Plus, We can't wait here because
1275 	 * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
1276 	 * if trylock failed, the page remain in evictable lru and later
1277 	 * vmscan could retry to move the page to unevictable lru if the
1278 	 * page is actually mlocked.
1279 	 */
1280 	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1281 		if (vma->vm_flags & VM_LOCKED) {
1282 			mlock_vma_page(page);
1283 			ret = SWAP_MLOCK;
1284 		}
1285 		up_read(&vma->vm_mm->mmap_sem);
1286 	}
1287 	return ret;
1288 }
1289 
1290 /*
1291  * objrmap doesn't work for nonlinear VMAs because the assumption that
1292  * offset-into-file correlates with offset-into-virtual-addresses does not hold.
1293  * Consequently, given a particular page and its ->index, we cannot locate the
1294  * ptes which are mapping that page without an exhaustive linear search.
1295  *
1296  * So what this code does is a mini "virtual scan" of each nonlinear VMA which
1297  * maps the file to which the target page belongs.  The ->vm_private_data field
1298  * holds the current cursor into that scan.  Successive searches will circulate
1299  * around the vma's virtual address space.
1300  *
1301  * So as more replacement pressure is applied to the pages in a nonlinear VMA,
1302  * more scanning pressure is placed against them as well.   Eventually pages
1303  * will become fully unmapped and are eligible for eviction.
1304  *
1305  * For very sparsely populated VMAs this is a little inefficient - chances are
1306  * there there won't be many ptes located within the scan cluster.  In this case
1307  * maybe we could scan further - to the end of the pte page, perhaps.
1308  *
1309  * Mlocked pages:  check VM_LOCKED under mmap_sem held for read, if we can
1310  * acquire it without blocking.  If vma locked, mlock the pages in the cluster,
1311  * rather than unmapping them.  If we encounter the "check_page" that vmscan is
1312  * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
1313  */
1314 #define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE)
1315 #define CLUSTER_MASK	(~(CLUSTER_SIZE - 1))
1316 
1317 static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1318 		struct vm_area_struct *vma, struct page *check_page)
1319 {
1320 	struct mm_struct *mm = vma->vm_mm;
1321 	pgd_t *pgd;
1322 	pud_t *pud;
1323 	pmd_t *pmd;
1324 	pte_t *pte;
1325 	pte_t pteval;
1326 	spinlock_t *ptl;
1327 	struct page *page;
1328 	unsigned long address;
1329 	unsigned long end;
1330 	int ret = SWAP_AGAIN;
1331 	int locked_vma = 0;
1332 
1333 	address = (vma->vm_start + cursor) & CLUSTER_MASK;
1334 	end = address + CLUSTER_SIZE;
1335 	if (address < vma->vm_start)
1336 		address = vma->vm_start;
1337 	if (end > vma->vm_end)
1338 		end = vma->vm_end;
1339 
1340 	pgd = pgd_offset(mm, address);
1341 	if (!pgd_present(*pgd))
1342 		return ret;
1343 
1344 	pud = pud_offset(pgd, address);
1345 	if (!pud_present(*pud))
1346 		return ret;
1347 
1348 	pmd = pmd_offset(pud, address);
1349 	if (!pmd_present(*pmd))
1350 		return ret;
1351 
1352 	/*
1353 	 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
1354 	 * keep the sem while scanning the cluster for mlocking pages.
1355 	 */
1356 	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1357 		locked_vma = (vma->vm_flags & VM_LOCKED);
1358 		if (!locked_vma)
1359 			up_read(&vma->vm_mm->mmap_sem); /* don't need it */
1360 	}
1361 
1362 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1363 
1364 	/* Update high watermark before we lower rss */
1365 	update_hiwater_rss(mm);
1366 
1367 	for (; address < end; pte++, address += PAGE_SIZE) {
1368 		if (!pte_present(*pte))
1369 			continue;
1370 		page = vm_normal_page(vma, address, *pte);
1371 		BUG_ON(!page || PageAnon(page));
1372 
1373 		if (locked_vma) {
1374 			mlock_vma_page(page);   /* no-op if already mlocked */
1375 			if (page == check_page)
1376 				ret = SWAP_MLOCK;
1377 			continue;	/* don't unmap */
1378 		}
1379 
1380 		if (ptep_clear_flush_young_notify(vma, address, pte))
1381 			continue;
1382 
1383 		/* Nuke the page table entry. */
1384 		flush_cache_page(vma, address, pte_pfn(*pte));
1385 		pteval = ptep_clear_flush_notify(vma, address, pte);
1386 
1387 		/* If nonlinear, store the file page offset in the pte. */
1388 		if (page->index != linear_page_index(vma, address))
1389 			set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
1390 
1391 		/* Move the dirty bit to the physical page now the pte is gone. */
1392 		if (pte_dirty(pteval))
1393 			set_page_dirty(page);
1394 
1395 		page_remove_rmap(page);
1396 		page_cache_release(page);
1397 		dec_mm_counter(mm, MM_FILEPAGES);
1398 		(*mapcount)--;
1399 	}
1400 	pte_unmap_unlock(pte - 1, ptl);
1401 	if (locked_vma)
1402 		up_read(&vma->vm_mm->mmap_sem);
1403 	return ret;
1404 }
1405 
1406 bool is_vma_temporary_stack(struct vm_area_struct *vma)
1407 {
1408 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1409 
1410 	if (!maybe_stack)
1411 		return false;
1412 
1413 	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1414 						VM_STACK_INCOMPLETE_SETUP)
1415 		return true;
1416 
1417 	return false;
1418 }
1419 
1420 /**
1421  * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
1422  * rmap method
1423  * @page: the page to unmap/unlock
1424  * @flags: action and flags
1425  *
1426  * Find all the mappings of a page using the mapping pointer and the vma chains
1427  * contained in the anon_vma struct it points to.
1428  *
1429  * This function is only called from try_to_unmap/try_to_munlock for
1430  * anonymous pages.
1431  * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1432  * where the page was found will be held for write.  So, we won't recheck
1433  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1434  * 'LOCKED.
1435  */
1436 static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1437 {
1438 	struct anon_vma *anon_vma;
1439 	struct anon_vma_chain *avc;
1440 	int ret = SWAP_AGAIN;
1441 
1442 	anon_vma = page_lock_anon_vma(page);
1443 	if (!anon_vma)
1444 		return ret;
1445 
1446 	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1447 		struct vm_area_struct *vma = avc->vma;
1448 		unsigned long address;
1449 
1450 		/*
1451 		 * During exec, a temporary VMA is setup and later moved.
1452 		 * The VMA is moved under the anon_vma lock but not the
1453 		 * page tables leading to a race where migration cannot
1454 		 * find the migration ptes. Rather than increasing the
1455 		 * locking requirements of exec(), migration skips
1456 		 * temporary VMAs until after exec() completes.
1457 		 */
1458 		if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
1459 				is_vma_temporary_stack(vma))
1460 			continue;
1461 
1462 		address = vma_address(page, vma);
1463 		if (address == -EFAULT)
1464 			continue;
1465 		ret = try_to_unmap_one(page, vma, address, flags);
1466 		if (ret != SWAP_AGAIN || !page_mapped(page))
1467 			break;
1468 	}
1469 
1470 	page_unlock_anon_vma(anon_vma);
1471 	return ret;
1472 }
1473 
1474 /**
1475  * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
1476  * @page: the page to unmap/unlock
1477  * @flags: action and flags
1478  *
1479  * Find all the mappings of a page using the mapping pointer and the vma chains
1480  * contained in the address_space struct it points to.
1481  *
1482  * This function is only called from try_to_unmap/try_to_munlock for
1483  * object-based pages.
1484  * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1485  * where the page was found will be held for write.  So, we won't recheck
1486  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1487  * 'LOCKED.
1488  */
1489 static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1490 {
1491 	struct address_space *mapping = page->mapping;
1492 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1493 	struct vm_area_struct *vma;
1494 	struct prio_tree_iter iter;
1495 	int ret = SWAP_AGAIN;
1496 	unsigned long cursor;
1497 	unsigned long max_nl_cursor = 0;
1498 	unsigned long max_nl_size = 0;
1499 	unsigned int mapcount;
1500 
1501 	mutex_lock(&mapping->i_mmap_mutex);
1502 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1503 		unsigned long address = vma_address(page, vma);
1504 		if (address == -EFAULT)
1505 			continue;
1506 		ret = try_to_unmap_one(page, vma, address, flags);
1507 		if (ret != SWAP_AGAIN || !page_mapped(page))
1508 			goto out;
1509 	}
1510 
1511 	if (list_empty(&mapping->i_mmap_nonlinear))
1512 		goto out;
1513 
1514 	/*
1515 	 * We don't bother to try to find the munlocked page in nonlinears.
1516 	 * It's costly. Instead, later, page reclaim logic may call
1517 	 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
1518 	 */
1519 	if (TTU_ACTION(flags) == TTU_MUNLOCK)
1520 		goto out;
1521 
1522 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1523 						shared.vm_set.list) {
1524 		cursor = (unsigned long) vma->vm_private_data;
1525 		if (cursor > max_nl_cursor)
1526 			max_nl_cursor = cursor;
1527 		cursor = vma->vm_end - vma->vm_start;
1528 		if (cursor > max_nl_size)
1529 			max_nl_size = cursor;
1530 	}
1531 
1532 	if (max_nl_size == 0) {	/* all nonlinears locked or reserved ? */
1533 		ret = SWAP_FAIL;
1534 		goto out;
1535 	}
1536 
1537 	/*
1538 	 * We don't try to search for this page in the nonlinear vmas,
1539 	 * and page_referenced wouldn't have found it anyway.  Instead
1540 	 * just walk the nonlinear vmas trying to age and unmap some.
1541 	 * The mapcount of the page we came in with is irrelevant,
1542 	 * but even so use it as a guide to how hard we should try?
1543 	 */
1544 	mapcount = page_mapcount(page);
1545 	if (!mapcount)
1546 		goto out;
1547 	cond_resched();
1548 
1549 	max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1550 	if (max_nl_cursor == 0)
1551 		max_nl_cursor = CLUSTER_SIZE;
1552 
1553 	do {
1554 		list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1555 						shared.vm_set.list) {
1556 			cursor = (unsigned long) vma->vm_private_data;
1557 			while ( cursor < max_nl_cursor &&
1558 				cursor < vma->vm_end - vma->vm_start) {
1559 				if (try_to_unmap_cluster(cursor, &mapcount,
1560 						vma, page) == SWAP_MLOCK)
1561 					ret = SWAP_MLOCK;
1562 				cursor += CLUSTER_SIZE;
1563 				vma->vm_private_data = (void *) cursor;
1564 				if ((int)mapcount <= 0)
1565 					goto out;
1566 			}
1567 			vma->vm_private_data = (void *) max_nl_cursor;
1568 		}
1569 		cond_resched();
1570 		max_nl_cursor += CLUSTER_SIZE;
1571 	} while (max_nl_cursor <= max_nl_size);
1572 
1573 	/*
1574 	 * Don't loop forever (perhaps all the remaining pages are
1575 	 * in locked vmas).  Reset cursor on all unreserved nonlinear
1576 	 * vmas, now forgetting on which ones it had fallen behind.
1577 	 */
1578 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1579 		vma->vm_private_data = NULL;
1580 out:
1581 	mutex_unlock(&mapping->i_mmap_mutex);
1582 	return ret;
1583 }
1584 
1585 /**
1586  * try_to_unmap - try to remove all page table mappings to a page
1587  * @page: the page to get unmapped
1588  * @flags: action and flags
1589  *
1590  * Tries to remove all the page table entries which are mapping this
1591  * page, used in the pageout path.  Caller must hold the page lock.
1592  * Return values are:
1593  *
1594  * SWAP_SUCCESS	- we succeeded in removing all mappings
1595  * SWAP_AGAIN	- we missed a mapping, try again later
1596  * SWAP_FAIL	- the page is unswappable
1597  * SWAP_MLOCK	- page is mlocked.
1598  */
1599 int try_to_unmap(struct page *page, enum ttu_flags flags)
1600 {
1601 	int ret;
1602 
1603 	BUG_ON(!PageLocked(page));
1604 	VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
1605 
1606 	if (unlikely(PageKsm(page)))
1607 		ret = try_to_unmap_ksm(page, flags);
1608 	else if (PageAnon(page))
1609 		ret = try_to_unmap_anon(page, flags);
1610 	else
1611 		ret = try_to_unmap_file(page, flags);
1612 	if (ret != SWAP_MLOCK && !page_mapped(page))
1613 		ret = SWAP_SUCCESS;
1614 	return ret;
1615 }
1616 
1617 /**
1618  * try_to_munlock - try to munlock a page
1619  * @page: the page to be munlocked
1620  *
1621  * Called from munlock code.  Checks all of the VMAs mapping the page
1622  * to make sure nobody else has this page mlocked. The page will be
1623  * returned with PG_mlocked cleared if no other vmas have it mlocked.
1624  *
1625  * Return values are:
1626  *
1627  * SWAP_AGAIN	- no vma is holding page mlocked, or,
1628  * SWAP_AGAIN	- page mapped in mlocked vma -- couldn't acquire mmap sem
1629  * SWAP_FAIL	- page cannot be located at present
1630  * SWAP_MLOCK	- page is now mlocked.
1631  */
1632 int try_to_munlock(struct page *page)
1633 {
1634 	VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1635 
1636 	if (unlikely(PageKsm(page)))
1637 		return try_to_unmap_ksm(page, TTU_MUNLOCK);
1638 	else if (PageAnon(page))
1639 		return try_to_unmap_anon(page, TTU_MUNLOCK);
1640 	else
1641 		return try_to_unmap_file(page, TTU_MUNLOCK);
1642 }
1643 
1644 void __put_anon_vma(struct anon_vma *anon_vma)
1645 {
1646 	struct anon_vma *root = anon_vma->root;
1647 
1648 	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1649 		anon_vma_free(root);
1650 
1651 	anon_vma_free(anon_vma);
1652 }
1653 
1654 #ifdef CONFIG_MIGRATION
1655 /*
1656  * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
1657  * Called by migrate.c to remove migration ptes, but might be used more later.
1658  */
1659 static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1660 		struct vm_area_struct *, unsigned long, void *), void *arg)
1661 {
1662 	struct anon_vma *anon_vma;
1663 	struct anon_vma_chain *avc;
1664 	int ret = SWAP_AGAIN;
1665 
1666 	/*
1667 	 * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
1668 	 * because that depends on page_mapped(); but not all its usages
1669 	 * are holding mmap_sem. Users without mmap_sem are required to
1670 	 * take a reference count to prevent the anon_vma disappearing
1671 	 */
1672 	anon_vma = page_anon_vma(page);
1673 	if (!anon_vma)
1674 		return ret;
1675 	anon_vma_lock(anon_vma);
1676 	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1677 		struct vm_area_struct *vma = avc->vma;
1678 		unsigned long address = vma_address(page, vma);
1679 		if (address == -EFAULT)
1680 			continue;
1681 		ret = rmap_one(page, vma, address, arg);
1682 		if (ret != SWAP_AGAIN)
1683 			break;
1684 	}
1685 	anon_vma_unlock(anon_vma);
1686 	return ret;
1687 }
1688 
1689 static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1690 		struct vm_area_struct *, unsigned long, void *), void *arg)
1691 {
1692 	struct address_space *mapping = page->mapping;
1693 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1694 	struct vm_area_struct *vma;
1695 	struct prio_tree_iter iter;
1696 	int ret = SWAP_AGAIN;
1697 
1698 	if (!mapping)
1699 		return ret;
1700 	mutex_lock(&mapping->i_mmap_mutex);
1701 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1702 		unsigned long address = vma_address(page, vma);
1703 		if (address == -EFAULT)
1704 			continue;
1705 		ret = rmap_one(page, vma, address, arg);
1706 		if (ret != SWAP_AGAIN)
1707 			break;
1708 	}
1709 	/*
1710 	 * No nonlinear handling: being always shared, nonlinear vmas
1711 	 * never contain migration ptes.  Decide what to do about this
1712 	 * limitation to linear when we need rmap_walk() on nonlinear.
1713 	 */
1714 	mutex_unlock(&mapping->i_mmap_mutex);
1715 	return ret;
1716 }
1717 
1718 int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
1719 		struct vm_area_struct *, unsigned long, void *), void *arg)
1720 {
1721 	VM_BUG_ON(!PageLocked(page));
1722 
1723 	if (unlikely(PageKsm(page)))
1724 		return rmap_walk_ksm(page, rmap_one, arg);
1725 	else if (PageAnon(page))
1726 		return rmap_walk_anon(page, rmap_one, arg);
1727 	else
1728 		return rmap_walk_file(page, rmap_one, arg);
1729 }
1730 #endif /* CONFIG_MIGRATION */
1731 
1732 #ifdef CONFIG_HUGETLB_PAGE
1733 /*
1734  * The following three functions are for anonymous (private mapped) hugepages.
1735  * Unlike common anonymous pages, anonymous hugepages have no accounting code
1736  * and no lru code, because we handle hugepages differently from common pages.
1737  */
1738 static void __hugepage_set_anon_rmap(struct page *page,
1739 	struct vm_area_struct *vma, unsigned long address, int exclusive)
1740 {
1741 	struct anon_vma *anon_vma = vma->anon_vma;
1742 
1743 	BUG_ON(!anon_vma);
1744 
1745 	if (PageAnon(page))
1746 		return;
1747 	if (!exclusive)
1748 		anon_vma = anon_vma->root;
1749 
1750 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1751 	page->mapping = (struct address_space *) anon_vma;
1752 	page->index = linear_page_index(vma, address);
1753 }
1754 
1755 void hugepage_add_anon_rmap(struct page *page,
1756 			    struct vm_area_struct *vma, unsigned long address)
1757 {
1758 	struct anon_vma *anon_vma = vma->anon_vma;
1759 	int first;
1760 
1761 	BUG_ON(!PageLocked(page));
1762 	BUG_ON(!anon_vma);
1763 	/* address might be in next vma when migration races vma_adjust */
1764 	first = atomic_inc_and_test(&page->_mapcount);
1765 	if (first)
1766 		__hugepage_set_anon_rmap(page, vma, address, 0);
1767 }
1768 
1769 void hugepage_add_new_anon_rmap(struct page *page,
1770 			struct vm_area_struct *vma, unsigned long address)
1771 {
1772 	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1773 	atomic_set(&page->_mapcount, 0);
1774 	__hugepage_set_anon_rmap(page, vma, address, 1);
1775 }
1776 #endif /* CONFIG_HUGETLB_PAGE */
1777