xref: /openbmc/linux/mm/rmap.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  * mm/rmap.c - physical to virtual reverse mappings
3  *
4  * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5  * Released under the General Public License (GPL).
6  *
7  * Simple, low overhead reverse mapping scheme.
8  * Please try to keep this thing as modular as possible.
9  *
10  * Provides methods for unmapping each kind of mapped page:
11  * the anon methods track anonymous pages, and
12  * the file methods track pages belonging to an inode.
13  *
14  * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17  * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
18  */
19 
20 /*
21  * Lock ordering in mm:
22  *
23  * inode->i_sem	(while writing or truncating, not reading or faulting)
24  *   inode->i_alloc_sem
25  *
26  * When a page fault occurs in writing from user to file, down_read
27  * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
28  * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
29  * taken together; in truncation, i_sem is taken outermost.
30  *
31  * mm->mmap_sem
32  *   page->flags PG_locked (lock_page)
33  *     mapping->i_mmap_lock
34  *       anon_vma->lock
35  *         mm->page_table_lock or pte_lock
36  *           zone->lru_lock (in mark_page_accessed)
37  *           swap_lock (in swap_duplicate, swap_info_get)
38  *             mmlist_lock (in mmput, drain_mmlist and others)
39  *             mapping->private_lock (in __set_page_dirty_buffers)
40  *             inode_lock (in set_page_dirty's __mark_inode_dirty)
41  *               sb_lock (within inode_lock in fs/fs-writeback.c)
42  *               mapping->tree_lock (widely used, in set_page_dirty,
43  *                         in arch-dependent flush_dcache_mmap_lock,
44  *                         within inode_lock in __sync_single_inode)
45  */
46 
47 #include <linux/mm.h>
48 #include <linux/pagemap.h>
49 #include <linux/swap.h>
50 #include <linux/swapops.h>
51 #include <linux/slab.h>
52 #include <linux/init.h>
53 #include <linux/rmap.h>
54 #include <linux/rcupdate.h>
55 
56 #include <asm/tlbflush.h>
57 
58 //#define RMAP_DEBUG /* can be enabled only for debugging */
59 
60 kmem_cache_t *anon_vma_cachep;
61 
62 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
63 {
64 #ifdef RMAP_DEBUG
65 	struct anon_vma *anon_vma = find_vma->anon_vma;
66 	struct vm_area_struct *vma;
67 	unsigned int mapcount = 0;
68 	int found = 0;
69 
70 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
71 		mapcount++;
72 		BUG_ON(mapcount > 100000);
73 		if (vma == find_vma)
74 			found = 1;
75 	}
76 	BUG_ON(!found);
77 #endif
78 }
79 
80 /* This must be called under the mmap_sem. */
81 int anon_vma_prepare(struct vm_area_struct *vma)
82 {
83 	struct anon_vma *anon_vma = vma->anon_vma;
84 
85 	might_sleep();
86 	if (unlikely(!anon_vma)) {
87 		struct mm_struct *mm = vma->vm_mm;
88 		struct anon_vma *allocated, *locked;
89 
90 		anon_vma = find_mergeable_anon_vma(vma);
91 		if (anon_vma) {
92 			allocated = NULL;
93 			locked = anon_vma;
94 			spin_lock(&locked->lock);
95 		} else {
96 			anon_vma = anon_vma_alloc();
97 			if (unlikely(!anon_vma))
98 				return -ENOMEM;
99 			allocated = anon_vma;
100 			locked = NULL;
101 		}
102 
103 		/* page_table_lock to protect against threads */
104 		spin_lock(&mm->page_table_lock);
105 		if (likely(!vma->anon_vma)) {
106 			vma->anon_vma = anon_vma;
107 			list_add(&vma->anon_vma_node, &anon_vma->head);
108 			allocated = NULL;
109 		}
110 		spin_unlock(&mm->page_table_lock);
111 
112 		if (locked)
113 			spin_unlock(&locked->lock);
114 		if (unlikely(allocated))
115 			anon_vma_free(allocated);
116 	}
117 	return 0;
118 }
119 
120 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
121 {
122 	BUG_ON(vma->anon_vma != next->anon_vma);
123 	list_del(&next->anon_vma_node);
124 }
125 
126 void __anon_vma_link(struct vm_area_struct *vma)
127 {
128 	struct anon_vma *anon_vma = vma->anon_vma;
129 
130 	if (anon_vma) {
131 		list_add(&vma->anon_vma_node, &anon_vma->head);
132 		validate_anon_vma(vma);
133 	}
134 }
135 
136 void anon_vma_link(struct vm_area_struct *vma)
137 {
138 	struct anon_vma *anon_vma = vma->anon_vma;
139 
140 	if (anon_vma) {
141 		spin_lock(&anon_vma->lock);
142 		list_add(&vma->anon_vma_node, &anon_vma->head);
143 		validate_anon_vma(vma);
144 		spin_unlock(&anon_vma->lock);
145 	}
146 }
147 
148 void anon_vma_unlink(struct vm_area_struct *vma)
149 {
150 	struct anon_vma *anon_vma = vma->anon_vma;
151 	int empty;
152 
153 	if (!anon_vma)
154 		return;
155 
156 	spin_lock(&anon_vma->lock);
157 	validate_anon_vma(vma);
158 	list_del(&vma->anon_vma_node);
159 
160 	/* We must garbage collect the anon_vma if it's empty */
161 	empty = list_empty(&anon_vma->head);
162 	spin_unlock(&anon_vma->lock);
163 
164 	if (empty)
165 		anon_vma_free(anon_vma);
166 }
167 
168 static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
169 {
170 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
171 						SLAB_CTOR_CONSTRUCTOR) {
172 		struct anon_vma *anon_vma = data;
173 
174 		spin_lock_init(&anon_vma->lock);
175 		INIT_LIST_HEAD(&anon_vma->head);
176 	}
177 }
178 
179 void __init anon_vma_init(void)
180 {
181 	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
182 			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
183 }
184 
185 /*
186  * Getting a lock on a stable anon_vma from a page off the LRU is
187  * tricky: page_lock_anon_vma rely on RCU to guard against the races.
188  */
189 static struct anon_vma *page_lock_anon_vma(struct page *page)
190 {
191 	struct anon_vma *anon_vma = NULL;
192 	unsigned long anon_mapping;
193 
194 	rcu_read_lock();
195 	anon_mapping = (unsigned long) page->mapping;
196 	if (!(anon_mapping & PAGE_MAPPING_ANON))
197 		goto out;
198 	if (!page_mapped(page))
199 		goto out;
200 
201 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
202 	spin_lock(&anon_vma->lock);
203 out:
204 	rcu_read_unlock();
205 	return anon_vma;
206 }
207 
208 /*
209  * At what user virtual address is page expected in vma?
210  */
211 static inline unsigned long
212 vma_address(struct page *page, struct vm_area_struct *vma)
213 {
214 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
215 	unsigned long address;
216 
217 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
218 	if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
219 		/* page should be within any vma from prio_tree_next */
220 		BUG_ON(!PageAnon(page));
221 		return -EFAULT;
222 	}
223 	return address;
224 }
225 
226 /*
227  * At what user virtual address is page expected in vma? checking that the
228  * page matches the vma: currently only used by unuse_process, on anon pages.
229  */
230 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
231 {
232 	if (PageAnon(page)) {
233 		if ((void *)vma->anon_vma !=
234 		    (void *)page->mapping - PAGE_MAPPING_ANON)
235 			return -EFAULT;
236 	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
237 		if (vma->vm_file->f_mapping != page->mapping)
238 			return -EFAULT;
239 	} else
240 		return -EFAULT;
241 	return vma_address(page, vma);
242 }
243 
244 /*
245  * Check that @page is mapped at @address into @mm.
246  *
247  * On success returns with pte mapped and locked.
248  */
249 pte_t *page_check_address(struct page *page, struct mm_struct *mm,
250 			  unsigned long address, spinlock_t **ptlp)
251 {
252 	pgd_t *pgd;
253 	pud_t *pud;
254 	pmd_t *pmd;
255 	pte_t *pte;
256 	spinlock_t *ptl;
257 
258 	pgd = pgd_offset(mm, address);
259 	if (!pgd_present(*pgd))
260 		return NULL;
261 
262 	pud = pud_offset(pgd, address);
263 	if (!pud_present(*pud))
264 		return NULL;
265 
266 	pmd = pmd_offset(pud, address);
267 	if (!pmd_present(*pmd))
268 		return NULL;
269 
270 	pte = pte_offset_map(pmd, address);
271 	/* Make a quick check before getting the lock */
272 	if (!pte_present(*pte)) {
273 		pte_unmap(pte);
274 		return NULL;
275 	}
276 
277 	ptl = pte_lockptr(mm, pmd);
278 	spin_lock(ptl);
279 	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
280 		*ptlp = ptl;
281 		return pte;
282 	}
283 	pte_unmap_unlock(pte, ptl);
284 	return NULL;
285 }
286 
287 /*
288  * Subfunctions of page_referenced: page_referenced_one called
289  * repeatedly from either page_referenced_anon or page_referenced_file.
290  */
291 static int page_referenced_one(struct page *page,
292 	struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token)
293 {
294 	struct mm_struct *mm = vma->vm_mm;
295 	unsigned long address;
296 	pte_t *pte;
297 	spinlock_t *ptl;
298 	int referenced = 0;
299 
300 	address = vma_address(page, vma);
301 	if (address == -EFAULT)
302 		goto out;
303 
304 	pte = page_check_address(page, mm, address, &ptl);
305 	if (!pte)
306 		goto out;
307 
308 	if (ptep_clear_flush_young(vma, address, pte))
309 		referenced++;
310 
311 	/* Pretend the page is referenced if the task has the
312 	   swap token and is in the middle of a page fault. */
313 	if (mm != current->mm && !ignore_token && has_swap_token(mm) &&
314 			rwsem_is_locked(&mm->mmap_sem))
315 		referenced++;
316 
317 	(*mapcount)--;
318 	pte_unmap_unlock(pte, ptl);
319 out:
320 	return referenced;
321 }
322 
323 static int page_referenced_anon(struct page *page, int ignore_token)
324 {
325 	unsigned int mapcount;
326 	struct anon_vma *anon_vma;
327 	struct vm_area_struct *vma;
328 	int referenced = 0;
329 
330 	anon_vma = page_lock_anon_vma(page);
331 	if (!anon_vma)
332 		return referenced;
333 
334 	mapcount = page_mapcount(page);
335 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
336 		referenced += page_referenced_one(page, vma, &mapcount,
337 							ignore_token);
338 		if (!mapcount)
339 			break;
340 	}
341 	spin_unlock(&anon_vma->lock);
342 	return referenced;
343 }
344 
345 /**
346  * page_referenced_file - referenced check for object-based rmap
347  * @page: the page we're checking references on.
348  *
349  * For an object-based mapped page, find all the places it is mapped and
350  * check/clear the referenced flag.  This is done by following the page->mapping
351  * pointer, then walking the chain of vmas it holds.  It returns the number
352  * of references it found.
353  *
354  * This function is only called from page_referenced for object-based pages.
355  */
356 static int page_referenced_file(struct page *page, int ignore_token)
357 {
358 	unsigned int mapcount;
359 	struct address_space *mapping = page->mapping;
360 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
361 	struct vm_area_struct *vma;
362 	struct prio_tree_iter iter;
363 	int referenced = 0;
364 
365 	/*
366 	 * The caller's checks on page->mapping and !PageAnon have made
367 	 * sure that this is a file page: the check for page->mapping
368 	 * excludes the case just before it gets set on an anon page.
369 	 */
370 	BUG_ON(PageAnon(page));
371 
372 	/*
373 	 * The page lock not only makes sure that page->mapping cannot
374 	 * suddenly be NULLified by truncation, it makes sure that the
375 	 * structure at mapping cannot be freed and reused yet,
376 	 * so we can safely take mapping->i_mmap_lock.
377 	 */
378 	BUG_ON(!PageLocked(page));
379 
380 	spin_lock(&mapping->i_mmap_lock);
381 
382 	/*
383 	 * i_mmap_lock does not stabilize mapcount at all, but mapcount
384 	 * is more likely to be accurate if we note it after spinning.
385 	 */
386 	mapcount = page_mapcount(page);
387 
388 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
389 		if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
390 				  == (VM_LOCKED|VM_MAYSHARE)) {
391 			referenced++;
392 			break;
393 		}
394 		referenced += page_referenced_one(page, vma, &mapcount,
395 							ignore_token);
396 		if (!mapcount)
397 			break;
398 	}
399 
400 	spin_unlock(&mapping->i_mmap_lock);
401 	return referenced;
402 }
403 
404 /**
405  * page_referenced - test if the page was referenced
406  * @page: the page to test
407  * @is_locked: caller holds lock on the page
408  *
409  * Quick test_and_clear_referenced for all mappings to a page,
410  * returns the number of ptes which referenced the page.
411  */
412 int page_referenced(struct page *page, int is_locked, int ignore_token)
413 {
414 	int referenced = 0;
415 
416 	if (!swap_token_default_timeout)
417 		ignore_token = 1;
418 
419 	if (page_test_and_clear_young(page))
420 		referenced++;
421 
422 	if (TestClearPageReferenced(page))
423 		referenced++;
424 
425 	if (page_mapped(page) && page->mapping) {
426 		if (PageAnon(page))
427 			referenced += page_referenced_anon(page, ignore_token);
428 		else if (is_locked)
429 			referenced += page_referenced_file(page, ignore_token);
430 		else if (TestSetPageLocked(page))
431 			referenced++;
432 		else {
433 			if (page->mapping)
434 				referenced += page_referenced_file(page,
435 								ignore_token);
436 			unlock_page(page);
437 		}
438 	}
439 	return referenced;
440 }
441 
442 /**
443  * page_add_anon_rmap - add pte mapping to an anonymous page
444  * @page:	the page to add the mapping to
445  * @vma:	the vm area in which the mapping is added
446  * @address:	the user virtual address mapped
447  *
448  * The caller needs to hold the pte lock.
449  */
450 void page_add_anon_rmap(struct page *page,
451 	struct vm_area_struct *vma, unsigned long address)
452 {
453 	if (atomic_inc_and_test(&page->_mapcount)) {
454 		struct anon_vma *anon_vma = vma->anon_vma;
455 
456 		BUG_ON(!anon_vma);
457 		anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
458 		page->mapping = (struct address_space *) anon_vma;
459 
460 		page->index = linear_page_index(vma, address);
461 
462 		inc_page_state(nr_mapped);
463 	}
464 	/* else checking page index and mapping is racy */
465 }
466 
467 /**
468  * page_add_file_rmap - add pte mapping to a file page
469  * @page: the page to add the mapping to
470  *
471  * The caller needs to hold the pte lock.
472  */
473 void page_add_file_rmap(struct page *page)
474 {
475 	BUG_ON(PageAnon(page));
476 	BUG_ON(!pfn_valid(page_to_pfn(page)));
477 
478 	if (atomic_inc_and_test(&page->_mapcount))
479 		inc_page_state(nr_mapped);
480 }
481 
482 /**
483  * page_remove_rmap - take down pte mapping from a page
484  * @page: page to remove mapping from
485  *
486  * The caller needs to hold the pte lock.
487  */
488 void page_remove_rmap(struct page *page)
489 {
490 	if (atomic_add_negative(-1, &page->_mapcount)) {
491 		BUG_ON(page_mapcount(page) < 0);
492 		/*
493 		 * It would be tidy to reset the PageAnon mapping here,
494 		 * but that might overwrite a racing page_add_anon_rmap
495 		 * which increments mapcount after us but sets mapping
496 		 * before us: so leave the reset to free_hot_cold_page,
497 		 * and remember that it's only reliable while mapped.
498 		 * Leaving it set also helps swapoff to reinstate ptes
499 		 * faster for those pages still in swapcache.
500 		 */
501 		if (page_test_and_clear_dirty(page))
502 			set_page_dirty(page);
503 		dec_page_state(nr_mapped);
504 	}
505 }
506 
507 /*
508  * Subfunctions of try_to_unmap: try_to_unmap_one called
509  * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
510  */
511 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
512 {
513 	struct mm_struct *mm = vma->vm_mm;
514 	unsigned long address;
515 	pte_t *pte;
516 	pte_t pteval;
517 	spinlock_t *ptl;
518 	int ret = SWAP_AGAIN;
519 
520 	address = vma_address(page, vma);
521 	if (address == -EFAULT)
522 		goto out;
523 
524 	pte = page_check_address(page, mm, address, &ptl);
525 	if (!pte)
526 		goto out;
527 
528 	/*
529 	 * If the page is mlock()d, we cannot swap it out.
530 	 * If it's recently referenced (perhaps page_referenced
531 	 * skipped over this mm) then we should reactivate it.
532 	 *
533 	 * Pages belonging to VM_RESERVED regions should not happen here.
534 	 */
535 	if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
536 			ptep_clear_flush_young(vma, address, pte)) {
537 		ret = SWAP_FAIL;
538 		goto out_unmap;
539 	}
540 
541 	/* Nuke the page table entry. */
542 	flush_cache_page(vma, address, page_to_pfn(page));
543 	pteval = ptep_clear_flush(vma, address, pte);
544 
545 	/* Move the dirty bit to the physical page now the pte is gone. */
546 	if (pte_dirty(pteval))
547 		set_page_dirty(page);
548 
549 	/* Update high watermark before we lower rss */
550 	update_hiwater_rss(mm);
551 
552 	if (PageAnon(page)) {
553 		swp_entry_t entry = { .val = page_private(page) };
554 		/*
555 		 * Store the swap location in the pte.
556 		 * See handle_pte_fault() ...
557 		 */
558 		BUG_ON(!PageSwapCache(page));
559 		swap_duplicate(entry);
560 		if (list_empty(&mm->mmlist)) {
561 			spin_lock(&mmlist_lock);
562 			if (list_empty(&mm->mmlist))
563 				list_add(&mm->mmlist, &init_mm.mmlist);
564 			spin_unlock(&mmlist_lock);
565 		}
566 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
567 		BUG_ON(pte_file(*pte));
568 		dec_mm_counter(mm, anon_rss);
569 	} else
570 		dec_mm_counter(mm, file_rss);
571 
572 	page_remove_rmap(page);
573 	page_cache_release(page);
574 
575 out_unmap:
576 	pte_unmap_unlock(pte, ptl);
577 out:
578 	return ret;
579 }
580 
581 /*
582  * objrmap doesn't work for nonlinear VMAs because the assumption that
583  * offset-into-file correlates with offset-into-virtual-addresses does not hold.
584  * Consequently, given a particular page and its ->index, we cannot locate the
585  * ptes which are mapping that page without an exhaustive linear search.
586  *
587  * So what this code does is a mini "virtual scan" of each nonlinear VMA which
588  * maps the file to which the target page belongs.  The ->vm_private_data field
589  * holds the current cursor into that scan.  Successive searches will circulate
590  * around the vma's virtual address space.
591  *
592  * So as more replacement pressure is applied to the pages in a nonlinear VMA,
593  * more scanning pressure is placed against them as well.   Eventually pages
594  * will become fully unmapped and are eligible for eviction.
595  *
596  * For very sparsely populated VMAs this is a little inefficient - chances are
597  * there there won't be many ptes located within the scan cluster.  In this case
598  * maybe we could scan further - to the end of the pte page, perhaps.
599  */
600 #define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE)
601 #define CLUSTER_MASK	(~(CLUSTER_SIZE - 1))
602 
603 static void try_to_unmap_cluster(unsigned long cursor,
604 	unsigned int *mapcount, struct vm_area_struct *vma)
605 {
606 	struct mm_struct *mm = vma->vm_mm;
607 	pgd_t *pgd;
608 	pud_t *pud;
609 	pmd_t *pmd;
610 	pte_t *pte;
611 	pte_t pteval;
612 	spinlock_t *ptl;
613 	struct page *page;
614 	unsigned long address;
615 	unsigned long end;
616 	unsigned long pfn;
617 
618 	address = (vma->vm_start + cursor) & CLUSTER_MASK;
619 	end = address + CLUSTER_SIZE;
620 	if (address < vma->vm_start)
621 		address = vma->vm_start;
622 	if (end > vma->vm_end)
623 		end = vma->vm_end;
624 
625 	pgd = pgd_offset(mm, address);
626 	if (!pgd_present(*pgd))
627 		return;
628 
629 	pud = pud_offset(pgd, address);
630 	if (!pud_present(*pud))
631 		return;
632 
633 	pmd = pmd_offset(pud, address);
634 	if (!pmd_present(*pmd))
635 		return;
636 
637 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
638 
639 	/* Update high watermark before we lower rss */
640 	update_hiwater_rss(mm);
641 
642 	for (; address < end; pte++, address += PAGE_SIZE) {
643 		if (!pte_present(*pte))
644 			continue;
645 
646 		pfn = pte_pfn(*pte);
647 		if (unlikely(!pfn_valid(pfn))) {
648 			print_bad_pte(vma, *pte, address);
649 			continue;
650 		}
651 
652 		page = pfn_to_page(pfn);
653 		BUG_ON(PageAnon(page));
654 
655 		if (ptep_clear_flush_young(vma, address, pte))
656 			continue;
657 
658 		/* Nuke the page table entry. */
659 		flush_cache_page(vma, address, pfn);
660 		pteval = ptep_clear_flush(vma, address, pte);
661 
662 		/* If nonlinear, store the file page offset in the pte. */
663 		if (page->index != linear_page_index(vma, address))
664 			set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
665 
666 		/* Move the dirty bit to the physical page now the pte is gone. */
667 		if (pte_dirty(pteval))
668 			set_page_dirty(page);
669 
670 		page_remove_rmap(page);
671 		page_cache_release(page);
672 		dec_mm_counter(mm, file_rss);
673 		(*mapcount)--;
674 	}
675 	pte_unmap_unlock(pte - 1, ptl);
676 }
677 
678 static int try_to_unmap_anon(struct page *page)
679 {
680 	struct anon_vma *anon_vma;
681 	struct vm_area_struct *vma;
682 	int ret = SWAP_AGAIN;
683 
684 	anon_vma = page_lock_anon_vma(page);
685 	if (!anon_vma)
686 		return ret;
687 
688 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
689 		ret = try_to_unmap_one(page, vma);
690 		if (ret == SWAP_FAIL || !page_mapped(page))
691 			break;
692 	}
693 	spin_unlock(&anon_vma->lock);
694 	return ret;
695 }
696 
697 /**
698  * try_to_unmap_file - unmap file page using the object-based rmap method
699  * @page: the page to unmap
700  *
701  * Find all the mappings of a page using the mapping pointer and the vma chains
702  * contained in the address_space struct it points to.
703  *
704  * This function is only called from try_to_unmap for object-based pages.
705  */
706 static int try_to_unmap_file(struct page *page)
707 {
708 	struct address_space *mapping = page->mapping;
709 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
710 	struct vm_area_struct *vma;
711 	struct prio_tree_iter iter;
712 	int ret = SWAP_AGAIN;
713 	unsigned long cursor;
714 	unsigned long max_nl_cursor = 0;
715 	unsigned long max_nl_size = 0;
716 	unsigned int mapcount;
717 
718 	spin_lock(&mapping->i_mmap_lock);
719 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
720 		ret = try_to_unmap_one(page, vma);
721 		if (ret == SWAP_FAIL || !page_mapped(page))
722 			goto out;
723 	}
724 
725 	if (list_empty(&mapping->i_mmap_nonlinear))
726 		goto out;
727 
728 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
729 						shared.vm_set.list) {
730 		if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
731 			continue;
732 		cursor = (unsigned long) vma->vm_private_data;
733 		if (cursor > max_nl_cursor)
734 			max_nl_cursor = cursor;
735 		cursor = vma->vm_end - vma->vm_start;
736 		if (cursor > max_nl_size)
737 			max_nl_size = cursor;
738 	}
739 
740 	if (max_nl_size == 0) {	/* any nonlinears locked or reserved */
741 		ret = SWAP_FAIL;
742 		goto out;
743 	}
744 
745 	/*
746 	 * We don't try to search for this page in the nonlinear vmas,
747 	 * and page_referenced wouldn't have found it anyway.  Instead
748 	 * just walk the nonlinear vmas trying to age and unmap some.
749 	 * The mapcount of the page we came in with is irrelevant,
750 	 * but even so use it as a guide to how hard we should try?
751 	 */
752 	mapcount = page_mapcount(page);
753 	if (!mapcount)
754 		goto out;
755 	cond_resched_lock(&mapping->i_mmap_lock);
756 
757 	max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
758 	if (max_nl_cursor == 0)
759 		max_nl_cursor = CLUSTER_SIZE;
760 
761 	do {
762 		list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
763 						shared.vm_set.list) {
764 			if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
765 				continue;
766 			cursor = (unsigned long) vma->vm_private_data;
767 			while ( cursor < max_nl_cursor &&
768 				cursor < vma->vm_end - vma->vm_start) {
769 				try_to_unmap_cluster(cursor, &mapcount, vma);
770 				cursor += CLUSTER_SIZE;
771 				vma->vm_private_data = (void *) cursor;
772 				if ((int)mapcount <= 0)
773 					goto out;
774 			}
775 			vma->vm_private_data = (void *) max_nl_cursor;
776 		}
777 		cond_resched_lock(&mapping->i_mmap_lock);
778 		max_nl_cursor += CLUSTER_SIZE;
779 	} while (max_nl_cursor <= max_nl_size);
780 
781 	/*
782 	 * Don't loop forever (perhaps all the remaining pages are
783 	 * in locked vmas).  Reset cursor on all unreserved nonlinear
784 	 * vmas, now forgetting on which ones it had fallen behind.
785 	 */
786 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
787 						shared.vm_set.list) {
788 		if (!(vma->vm_flags & VM_RESERVED))
789 			vma->vm_private_data = NULL;
790 	}
791 out:
792 	spin_unlock(&mapping->i_mmap_lock);
793 	return ret;
794 }
795 
796 /**
797  * try_to_unmap - try to remove all page table mappings to a page
798  * @page: the page to get unmapped
799  *
800  * Tries to remove all the page table entries which are mapping this
801  * page, used in the pageout path.  Caller must hold the page lock.
802  * Return values are:
803  *
804  * SWAP_SUCCESS	- we succeeded in removing all mappings
805  * SWAP_AGAIN	- we missed a mapping, try again later
806  * SWAP_FAIL	- the page is unswappable
807  */
808 int try_to_unmap(struct page *page)
809 {
810 	int ret;
811 
812 	BUG_ON(!PageLocked(page));
813 
814 	if (PageAnon(page))
815 		ret = try_to_unmap_anon(page);
816 	else
817 		ret = try_to_unmap_file(page);
818 
819 	if (!page_mapped(page))
820 		ret = SWAP_SUCCESS;
821 	return ret;
822 }
823 
824