1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * inode->i_alloc_sem (vmtruncate_range) 25 * mm->mmap_sem 26 * page->flags PG_locked (lock_page) 27 * mapping->i_mmap_lock 28 * anon_vma->lock 29 * mm->page_table_lock or pte_lock 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * inode_lock (in set_page_dirty's __mark_inode_dirty) 35 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * mapping->tree_lock (widely used, in set_page_dirty, 37 * in arch-dependent flush_dcache_mmap_lock, 38 * within inode_lock in __sync_single_inode) 39 */ 40 41 #include <linux/mm.h> 42 #include <linux/pagemap.h> 43 #include <linux/swap.h> 44 #include <linux/swapops.h> 45 #include <linux/slab.h> 46 #include <linux/init.h> 47 #include <linux/rmap.h> 48 #include <linux/rcupdate.h> 49 #include <linux/module.h> 50 #include <linux/kallsyms.h> 51 52 #include <asm/tlbflush.h> 53 54 struct kmem_cache *anon_vma_cachep; 55 56 static inline void validate_anon_vma(struct vm_area_struct *find_vma) 57 { 58 #ifdef CONFIG_DEBUG_VM 59 struct anon_vma *anon_vma = find_vma->anon_vma; 60 struct vm_area_struct *vma; 61 unsigned int mapcount = 0; 62 int found = 0; 63 64 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 65 mapcount++; 66 BUG_ON(mapcount > 100000); 67 if (vma == find_vma) 68 found = 1; 69 } 70 BUG_ON(!found); 71 #endif 72 } 73 74 /* This must be called under the mmap_sem. */ 75 int anon_vma_prepare(struct vm_area_struct *vma) 76 { 77 struct anon_vma *anon_vma = vma->anon_vma; 78 79 might_sleep(); 80 if (unlikely(!anon_vma)) { 81 struct mm_struct *mm = vma->vm_mm; 82 struct anon_vma *allocated, *locked; 83 84 anon_vma = find_mergeable_anon_vma(vma); 85 if (anon_vma) { 86 allocated = NULL; 87 locked = anon_vma; 88 spin_lock(&locked->lock); 89 } else { 90 anon_vma = anon_vma_alloc(); 91 if (unlikely(!anon_vma)) 92 return -ENOMEM; 93 allocated = anon_vma; 94 locked = NULL; 95 } 96 97 /* page_table_lock to protect against threads */ 98 spin_lock(&mm->page_table_lock); 99 if (likely(!vma->anon_vma)) { 100 vma->anon_vma = anon_vma; 101 list_add_tail(&vma->anon_vma_node, &anon_vma->head); 102 allocated = NULL; 103 } 104 spin_unlock(&mm->page_table_lock); 105 106 if (locked) 107 spin_unlock(&locked->lock); 108 if (unlikely(allocated)) 109 anon_vma_free(allocated); 110 } 111 return 0; 112 } 113 114 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) 115 { 116 BUG_ON(vma->anon_vma != next->anon_vma); 117 list_del(&next->anon_vma_node); 118 } 119 120 void __anon_vma_link(struct vm_area_struct *vma) 121 { 122 struct anon_vma *anon_vma = vma->anon_vma; 123 124 if (anon_vma) { 125 list_add_tail(&vma->anon_vma_node, &anon_vma->head); 126 validate_anon_vma(vma); 127 } 128 } 129 130 void anon_vma_link(struct vm_area_struct *vma) 131 { 132 struct anon_vma *anon_vma = vma->anon_vma; 133 134 if (anon_vma) { 135 spin_lock(&anon_vma->lock); 136 list_add_tail(&vma->anon_vma_node, &anon_vma->head); 137 validate_anon_vma(vma); 138 spin_unlock(&anon_vma->lock); 139 } 140 } 141 142 void anon_vma_unlink(struct vm_area_struct *vma) 143 { 144 struct anon_vma *anon_vma = vma->anon_vma; 145 int empty; 146 147 if (!anon_vma) 148 return; 149 150 spin_lock(&anon_vma->lock); 151 validate_anon_vma(vma); 152 list_del(&vma->anon_vma_node); 153 154 /* We must garbage collect the anon_vma if it's empty */ 155 empty = list_empty(&anon_vma->head); 156 spin_unlock(&anon_vma->lock); 157 158 if (empty) 159 anon_vma_free(anon_vma); 160 } 161 162 static void anon_vma_ctor(void *data, struct kmem_cache *cachep, 163 unsigned long flags) 164 { 165 if (flags & SLAB_CTOR_CONSTRUCTOR) { 166 struct anon_vma *anon_vma = data; 167 168 spin_lock_init(&anon_vma->lock); 169 INIT_LIST_HEAD(&anon_vma->head); 170 } 171 } 172 173 void __init anon_vma_init(void) 174 { 175 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 176 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); 177 } 178 179 /* 180 * Getting a lock on a stable anon_vma from a page off the LRU is 181 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 182 */ 183 static struct anon_vma *page_lock_anon_vma(struct page *page) 184 { 185 struct anon_vma *anon_vma; 186 unsigned long anon_mapping; 187 188 rcu_read_lock(); 189 anon_mapping = (unsigned long) page->mapping; 190 if (!(anon_mapping & PAGE_MAPPING_ANON)) 191 goto out; 192 if (!page_mapped(page)) 193 goto out; 194 195 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 196 spin_lock(&anon_vma->lock); 197 return anon_vma; 198 out: 199 rcu_read_unlock(); 200 return NULL; 201 } 202 203 static void page_unlock_anon_vma(struct anon_vma *anon_vma) 204 { 205 spin_unlock(&anon_vma->lock); 206 rcu_read_unlock(); 207 } 208 209 /* 210 * At what user virtual address is page expected in vma? 211 */ 212 static inline unsigned long 213 vma_address(struct page *page, struct vm_area_struct *vma) 214 { 215 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 216 unsigned long address; 217 218 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 219 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 220 /* page should be within any vma from prio_tree_next */ 221 BUG_ON(!PageAnon(page)); 222 return -EFAULT; 223 } 224 return address; 225 } 226 227 /* 228 * At what user virtual address is page expected in vma? checking that the 229 * page matches the vma: currently only used on anon pages, by unuse_vma; 230 */ 231 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 232 { 233 if (PageAnon(page)) { 234 if ((void *)vma->anon_vma != 235 (void *)page->mapping - PAGE_MAPPING_ANON) 236 return -EFAULT; 237 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 238 if (!vma->vm_file || 239 vma->vm_file->f_mapping != page->mapping) 240 return -EFAULT; 241 } else 242 return -EFAULT; 243 return vma_address(page, vma); 244 } 245 246 /* 247 * Check that @page is mapped at @address into @mm. 248 * 249 * On success returns with pte mapped and locked. 250 */ 251 pte_t *page_check_address(struct page *page, struct mm_struct *mm, 252 unsigned long address, spinlock_t **ptlp) 253 { 254 pgd_t *pgd; 255 pud_t *pud; 256 pmd_t *pmd; 257 pte_t *pte; 258 spinlock_t *ptl; 259 260 pgd = pgd_offset(mm, address); 261 if (!pgd_present(*pgd)) 262 return NULL; 263 264 pud = pud_offset(pgd, address); 265 if (!pud_present(*pud)) 266 return NULL; 267 268 pmd = pmd_offset(pud, address); 269 if (!pmd_present(*pmd)) 270 return NULL; 271 272 pte = pte_offset_map(pmd, address); 273 /* Make a quick check before getting the lock */ 274 if (!pte_present(*pte)) { 275 pte_unmap(pte); 276 return NULL; 277 } 278 279 ptl = pte_lockptr(mm, pmd); 280 spin_lock(ptl); 281 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 282 *ptlp = ptl; 283 return pte; 284 } 285 pte_unmap_unlock(pte, ptl); 286 return NULL; 287 } 288 289 /* 290 * Subfunctions of page_referenced: page_referenced_one called 291 * repeatedly from either page_referenced_anon or page_referenced_file. 292 */ 293 static int page_referenced_one(struct page *page, 294 struct vm_area_struct *vma, unsigned int *mapcount) 295 { 296 struct mm_struct *mm = vma->vm_mm; 297 unsigned long address; 298 pte_t *pte; 299 spinlock_t *ptl; 300 int referenced = 0; 301 302 address = vma_address(page, vma); 303 if (address == -EFAULT) 304 goto out; 305 306 pte = page_check_address(page, mm, address, &ptl); 307 if (!pte) 308 goto out; 309 310 if (ptep_clear_flush_young(vma, address, pte)) 311 referenced++; 312 313 /* Pretend the page is referenced if the task has the 314 swap token and is in the middle of a page fault. */ 315 if (mm != current->mm && has_swap_token(mm) && 316 rwsem_is_locked(&mm->mmap_sem)) 317 referenced++; 318 319 (*mapcount)--; 320 pte_unmap_unlock(pte, ptl); 321 out: 322 return referenced; 323 } 324 325 static int page_referenced_anon(struct page *page) 326 { 327 unsigned int mapcount; 328 struct anon_vma *anon_vma; 329 struct vm_area_struct *vma; 330 int referenced = 0; 331 332 anon_vma = page_lock_anon_vma(page); 333 if (!anon_vma) 334 return referenced; 335 336 mapcount = page_mapcount(page); 337 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 338 referenced += page_referenced_one(page, vma, &mapcount); 339 if (!mapcount) 340 break; 341 } 342 343 page_unlock_anon_vma(anon_vma); 344 return referenced; 345 } 346 347 /** 348 * page_referenced_file - referenced check for object-based rmap 349 * @page: the page we're checking references on. 350 * 351 * For an object-based mapped page, find all the places it is mapped and 352 * check/clear the referenced flag. This is done by following the page->mapping 353 * pointer, then walking the chain of vmas it holds. It returns the number 354 * of references it found. 355 * 356 * This function is only called from page_referenced for object-based pages. 357 */ 358 static int page_referenced_file(struct page *page) 359 { 360 unsigned int mapcount; 361 struct address_space *mapping = page->mapping; 362 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 363 struct vm_area_struct *vma; 364 struct prio_tree_iter iter; 365 int referenced = 0; 366 367 /* 368 * The caller's checks on page->mapping and !PageAnon have made 369 * sure that this is a file page: the check for page->mapping 370 * excludes the case just before it gets set on an anon page. 371 */ 372 BUG_ON(PageAnon(page)); 373 374 /* 375 * The page lock not only makes sure that page->mapping cannot 376 * suddenly be NULLified by truncation, it makes sure that the 377 * structure at mapping cannot be freed and reused yet, 378 * so we can safely take mapping->i_mmap_lock. 379 */ 380 BUG_ON(!PageLocked(page)); 381 382 spin_lock(&mapping->i_mmap_lock); 383 384 /* 385 * i_mmap_lock does not stabilize mapcount at all, but mapcount 386 * is more likely to be accurate if we note it after spinning. 387 */ 388 mapcount = page_mapcount(page); 389 390 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 391 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) 392 == (VM_LOCKED|VM_MAYSHARE)) { 393 referenced++; 394 break; 395 } 396 referenced += page_referenced_one(page, vma, &mapcount); 397 if (!mapcount) 398 break; 399 } 400 401 spin_unlock(&mapping->i_mmap_lock); 402 return referenced; 403 } 404 405 /** 406 * page_referenced - test if the page was referenced 407 * @page: the page to test 408 * @is_locked: caller holds lock on the page 409 * 410 * Quick test_and_clear_referenced for all mappings to a page, 411 * returns the number of ptes which referenced the page. 412 */ 413 int page_referenced(struct page *page, int is_locked) 414 { 415 int referenced = 0; 416 417 if (page_test_and_clear_young(page)) 418 referenced++; 419 420 if (TestClearPageReferenced(page)) 421 referenced++; 422 423 if (page_mapped(page) && page->mapping) { 424 if (PageAnon(page)) 425 referenced += page_referenced_anon(page); 426 else if (is_locked) 427 referenced += page_referenced_file(page); 428 else if (TestSetPageLocked(page)) 429 referenced++; 430 else { 431 if (page->mapping) 432 referenced += page_referenced_file(page); 433 unlock_page(page); 434 } 435 } 436 return referenced; 437 } 438 439 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) 440 { 441 struct mm_struct *mm = vma->vm_mm; 442 unsigned long address; 443 pte_t *pte; 444 spinlock_t *ptl; 445 int ret = 0; 446 447 address = vma_address(page, vma); 448 if (address == -EFAULT) 449 goto out; 450 451 pte = page_check_address(page, mm, address, &ptl); 452 if (!pte) 453 goto out; 454 455 if (pte_dirty(*pte) || pte_write(*pte)) { 456 pte_t entry; 457 458 flush_cache_page(vma, address, pte_pfn(*pte)); 459 entry = ptep_clear_flush(vma, address, pte); 460 entry = pte_wrprotect(entry); 461 entry = pte_mkclean(entry); 462 set_pte_at(mm, address, pte, entry); 463 lazy_mmu_prot_update(entry); 464 ret = 1; 465 } 466 467 pte_unmap_unlock(pte, ptl); 468 out: 469 return ret; 470 } 471 472 static int page_mkclean_file(struct address_space *mapping, struct page *page) 473 { 474 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 475 struct vm_area_struct *vma; 476 struct prio_tree_iter iter; 477 int ret = 0; 478 479 BUG_ON(PageAnon(page)); 480 481 spin_lock(&mapping->i_mmap_lock); 482 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 483 if (vma->vm_flags & VM_SHARED) 484 ret += page_mkclean_one(page, vma); 485 } 486 spin_unlock(&mapping->i_mmap_lock); 487 return ret; 488 } 489 490 int page_mkclean(struct page *page) 491 { 492 int ret = 0; 493 494 BUG_ON(!PageLocked(page)); 495 496 if (page_mapped(page)) { 497 struct address_space *mapping = page_mapping(page); 498 if (mapping) 499 ret = page_mkclean_file(mapping, page); 500 if (page_test_dirty(page)) { 501 page_clear_dirty(page); 502 ret = 1; 503 } 504 } 505 506 return ret; 507 } 508 EXPORT_SYMBOL_GPL(page_mkclean); 509 510 /** 511 * page_set_anon_rmap - setup new anonymous rmap 512 * @page: the page to add the mapping to 513 * @vma: the vm area in which the mapping is added 514 * @address: the user virtual address mapped 515 */ 516 static void __page_set_anon_rmap(struct page *page, 517 struct vm_area_struct *vma, unsigned long address) 518 { 519 struct anon_vma *anon_vma = vma->anon_vma; 520 521 BUG_ON(!anon_vma); 522 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 523 page->mapping = (struct address_space *) anon_vma; 524 525 page->index = linear_page_index(vma, address); 526 527 /* 528 * nr_mapped state can be updated without turning off 529 * interrupts because it is not modified via interrupt. 530 */ 531 __inc_zone_page_state(page, NR_ANON_PAGES); 532 } 533 534 /** 535 * page_add_anon_rmap - add pte mapping to an anonymous page 536 * @page: the page to add the mapping to 537 * @vma: the vm area in which the mapping is added 538 * @address: the user virtual address mapped 539 * 540 * The caller needs to hold the pte lock. 541 */ 542 void page_add_anon_rmap(struct page *page, 543 struct vm_area_struct *vma, unsigned long address) 544 { 545 if (atomic_inc_and_test(&page->_mapcount)) 546 __page_set_anon_rmap(page, vma, address); 547 /* else checking page index and mapping is racy */ 548 } 549 550 /* 551 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 552 * @page: the page to add the mapping to 553 * @vma: the vm area in which the mapping is added 554 * @address: the user virtual address mapped 555 * 556 * Same as page_add_anon_rmap but must only be called on *new* pages. 557 * This means the inc-and-test can be bypassed. 558 */ 559 void page_add_new_anon_rmap(struct page *page, 560 struct vm_area_struct *vma, unsigned long address) 561 { 562 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 563 __page_set_anon_rmap(page, vma, address); 564 } 565 566 /** 567 * page_add_file_rmap - add pte mapping to a file page 568 * @page: the page to add the mapping to 569 * 570 * The caller needs to hold the pte lock. 571 */ 572 void page_add_file_rmap(struct page *page) 573 { 574 if (atomic_inc_and_test(&page->_mapcount)) 575 __inc_zone_page_state(page, NR_FILE_MAPPED); 576 } 577 578 /** 579 * page_remove_rmap - take down pte mapping from a page 580 * @page: page to remove mapping from 581 * 582 * The caller needs to hold the pte lock. 583 */ 584 void page_remove_rmap(struct page *page, struct vm_area_struct *vma) 585 { 586 if (atomic_add_negative(-1, &page->_mapcount)) { 587 if (unlikely(page_mapcount(page) < 0)) { 588 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); 589 printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page)); 590 printk (KERN_EMERG " page->flags = %lx\n", page->flags); 591 printk (KERN_EMERG " page->count = %x\n", page_count(page)); 592 printk (KERN_EMERG " page->mapping = %p\n", page->mapping); 593 print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); 594 if (vma->vm_ops) 595 print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage); 596 if (vma->vm_file && vma->vm_file->f_op) 597 print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); 598 BUG(); 599 } 600 601 /* 602 * It would be tidy to reset the PageAnon mapping here, 603 * but that might overwrite a racing page_add_anon_rmap 604 * which increments mapcount after us but sets mapping 605 * before us: so leave the reset to free_hot_cold_page, 606 * and remember that it's only reliable while mapped. 607 * Leaving it set also helps swapoff to reinstate ptes 608 * faster for those pages still in swapcache. 609 */ 610 if (page_test_dirty(page)) { 611 page_clear_dirty(page); 612 set_page_dirty(page); 613 } 614 __dec_zone_page_state(page, 615 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 616 } 617 } 618 619 /* 620 * Subfunctions of try_to_unmap: try_to_unmap_one called 621 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 622 */ 623 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 624 int migration) 625 { 626 struct mm_struct *mm = vma->vm_mm; 627 unsigned long address; 628 pte_t *pte; 629 pte_t pteval; 630 spinlock_t *ptl; 631 int ret = SWAP_AGAIN; 632 633 address = vma_address(page, vma); 634 if (address == -EFAULT) 635 goto out; 636 637 pte = page_check_address(page, mm, address, &ptl); 638 if (!pte) 639 goto out; 640 641 /* 642 * If the page is mlock()d, we cannot swap it out. 643 * If it's recently referenced (perhaps page_referenced 644 * skipped over this mm) then we should reactivate it. 645 */ 646 if (!migration && ((vma->vm_flags & VM_LOCKED) || 647 (ptep_clear_flush_young(vma, address, pte)))) { 648 ret = SWAP_FAIL; 649 goto out_unmap; 650 } 651 652 /* Nuke the page table entry. */ 653 flush_cache_page(vma, address, page_to_pfn(page)); 654 pteval = ptep_clear_flush(vma, address, pte); 655 656 /* Move the dirty bit to the physical page now the pte is gone. */ 657 if (pte_dirty(pteval)) 658 set_page_dirty(page); 659 660 /* Update high watermark before we lower rss */ 661 update_hiwater_rss(mm); 662 663 if (PageAnon(page)) { 664 swp_entry_t entry = { .val = page_private(page) }; 665 666 if (PageSwapCache(page)) { 667 /* 668 * Store the swap location in the pte. 669 * See handle_pte_fault() ... 670 */ 671 swap_duplicate(entry); 672 if (list_empty(&mm->mmlist)) { 673 spin_lock(&mmlist_lock); 674 if (list_empty(&mm->mmlist)) 675 list_add(&mm->mmlist, &init_mm.mmlist); 676 spin_unlock(&mmlist_lock); 677 } 678 dec_mm_counter(mm, anon_rss); 679 #ifdef CONFIG_MIGRATION 680 } else { 681 /* 682 * Store the pfn of the page in a special migration 683 * pte. do_swap_page() will wait until the migration 684 * pte is removed and then restart fault handling. 685 */ 686 BUG_ON(!migration); 687 entry = make_migration_entry(page, pte_write(pteval)); 688 #endif 689 } 690 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 691 BUG_ON(pte_file(*pte)); 692 } else 693 #ifdef CONFIG_MIGRATION 694 if (migration) { 695 /* Establish migration entry for a file page */ 696 swp_entry_t entry; 697 entry = make_migration_entry(page, pte_write(pteval)); 698 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 699 } else 700 #endif 701 dec_mm_counter(mm, file_rss); 702 703 704 page_remove_rmap(page, vma); 705 page_cache_release(page); 706 707 out_unmap: 708 pte_unmap_unlock(pte, ptl); 709 out: 710 return ret; 711 } 712 713 /* 714 * objrmap doesn't work for nonlinear VMAs because the assumption that 715 * offset-into-file correlates with offset-into-virtual-addresses does not hold. 716 * Consequently, given a particular page and its ->index, we cannot locate the 717 * ptes which are mapping that page without an exhaustive linear search. 718 * 719 * So what this code does is a mini "virtual scan" of each nonlinear VMA which 720 * maps the file to which the target page belongs. The ->vm_private_data field 721 * holds the current cursor into that scan. Successive searches will circulate 722 * around the vma's virtual address space. 723 * 724 * So as more replacement pressure is applied to the pages in a nonlinear VMA, 725 * more scanning pressure is placed against them as well. Eventually pages 726 * will become fully unmapped and are eligible for eviction. 727 * 728 * For very sparsely populated VMAs this is a little inefficient - chances are 729 * there there won't be many ptes located within the scan cluster. In this case 730 * maybe we could scan further - to the end of the pte page, perhaps. 731 */ 732 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 733 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 734 735 static void try_to_unmap_cluster(unsigned long cursor, 736 unsigned int *mapcount, struct vm_area_struct *vma) 737 { 738 struct mm_struct *mm = vma->vm_mm; 739 pgd_t *pgd; 740 pud_t *pud; 741 pmd_t *pmd; 742 pte_t *pte; 743 pte_t pteval; 744 spinlock_t *ptl; 745 struct page *page; 746 unsigned long address; 747 unsigned long end; 748 749 address = (vma->vm_start + cursor) & CLUSTER_MASK; 750 end = address + CLUSTER_SIZE; 751 if (address < vma->vm_start) 752 address = vma->vm_start; 753 if (end > vma->vm_end) 754 end = vma->vm_end; 755 756 pgd = pgd_offset(mm, address); 757 if (!pgd_present(*pgd)) 758 return; 759 760 pud = pud_offset(pgd, address); 761 if (!pud_present(*pud)) 762 return; 763 764 pmd = pmd_offset(pud, address); 765 if (!pmd_present(*pmd)) 766 return; 767 768 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 769 770 /* Update high watermark before we lower rss */ 771 update_hiwater_rss(mm); 772 773 for (; address < end; pte++, address += PAGE_SIZE) { 774 if (!pte_present(*pte)) 775 continue; 776 page = vm_normal_page(vma, address, *pte); 777 BUG_ON(!page || PageAnon(page)); 778 779 if (ptep_clear_flush_young(vma, address, pte)) 780 continue; 781 782 /* Nuke the page table entry. */ 783 flush_cache_page(vma, address, pte_pfn(*pte)); 784 pteval = ptep_clear_flush(vma, address, pte); 785 786 /* If nonlinear, store the file page offset in the pte. */ 787 if (page->index != linear_page_index(vma, address)) 788 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 789 790 /* Move the dirty bit to the physical page now the pte is gone. */ 791 if (pte_dirty(pteval)) 792 set_page_dirty(page); 793 794 page_remove_rmap(page, vma); 795 page_cache_release(page); 796 dec_mm_counter(mm, file_rss); 797 (*mapcount)--; 798 } 799 pte_unmap_unlock(pte - 1, ptl); 800 } 801 802 static int try_to_unmap_anon(struct page *page, int migration) 803 { 804 struct anon_vma *anon_vma; 805 struct vm_area_struct *vma; 806 int ret = SWAP_AGAIN; 807 808 anon_vma = page_lock_anon_vma(page); 809 if (!anon_vma) 810 return ret; 811 812 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 813 ret = try_to_unmap_one(page, vma, migration); 814 if (ret == SWAP_FAIL || !page_mapped(page)) 815 break; 816 } 817 818 page_unlock_anon_vma(anon_vma); 819 return ret; 820 } 821 822 /** 823 * try_to_unmap_file - unmap file page using the object-based rmap method 824 * @page: the page to unmap 825 * 826 * Find all the mappings of a page using the mapping pointer and the vma chains 827 * contained in the address_space struct it points to. 828 * 829 * This function is only called from try_to_unmap for object-based pages. 830 */ 831 static int try_to_unmap_file(struct page *page, int migration) 832 { 833 struct address_space *mapping = page->mapping; 834 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 835 struct vm_area_struct *vma; 836 struct prio_tree_iter iter; 837 int ret = SWAP_AGAIN; 838 unsigned long cursor; 839 unsigned long max_nl_cursor = 0; 840 unsigned long max_nl_size = 0; 841 unsigned int mapcount; 842 843 spin_lock(&mapping->i_mmap_lock); 844 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 845 ret = try_to_unmap_one(page, vma, migration); 846 if (ret == SWAP_FAIL || !page_mapped(page)) 847 goto out; 848 } 849 850 if (list_empty(&mapping->i_mmap_nonlinear)) 851 goto out; 852 853 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 854 shared.vm_set.list) { 855 if ((vma->vm_flags & VM_LOCKED) && !migration) 856 continue; 857 cursor = (unsigned long) vma->vm_private_data; 858 if (cursor > max_nl_cursor) 859 max_nl_cursor = cursor; 860 cursor = vma->vm_end - vma->vm_start; 861 if (cursor > max_nl_size) 862 max_nl_size = cursor; 863 } 864 865 if (max_nl_size == 0) { /* any nonlinears locked or reserved */ 866 ret = SWAP_FAIL; 867 goto out; 868 } 869 870 /* 871 * We don't try to search for this page in the nonlinear vmas, 872 * and page_referenced wouldn't have found it anyway. Instead 873 * just walk the nonlinear vmas trying to age and unmap some. 874 * The mapcount of the page we came in with is irrelevant, 875 * but even so use it as a guide to how hard we should try? 876 */ 877 mapcount = page_mapcount(page); 878 if (!mapcount) 879 goto out; 880 cond_resched_lock(&mapping->i_mmap_lock); 881 882 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 883 if (max_nl_cursor == 0) 884 max_nl_cursor = CLUSTER_SIZE; 885 886 do { 887 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 888 shared.vm_set.list) { 889 if ((vma->vm_flags & VM_LOCKED) && !migration) 890 continue; 891 cursor = (unsigned long) vma->vm_private_data; 892 while ( cursor < max_nl_cursor && 893 cursor < vma->vm_end - vma->vm_start) { 894 try_to_unmap_cluster(cursor, &mapcount, vma); 895 cursor += CLUSTER_SIZE; 896 vma->vm_private_data = (void *) cursor; 897 if ((int)mapcount <= 0) 898 goto out; 899 } 900 vma->vm_private_data = (void *) max_nl_cursor; 901 } 902 cond_resched_lock(&mapping->i_mmap_lock); 903 max_nl_cursor += CLUSTER_SIZE; 904 } while (max_nl_cursor <= max_nl_size); 905 906 /* 907 * Don't loop forever (perhaps all the remaining pages are 908 * in locked vmas). Reset cursor on all unreserved nonlinear 909 * vmas, now forgetting on which ones it had fallen behind. 910 */ 911 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 912 vma->vm_private_data = NULL; 913 out: 914 spin_unlock(&mapping->i_mmap_lock); 915 return ret; 916 } 917 918 /** 919 * try_to_unmap - try to remove all page table mappings to a page 920 * @page: the page to get unmapped 921 * 922 * Tries to remove all the page table entries which are mapping this 923 * page, used in the pageout path. Caller must hold the page lock. 924 * Return values are: 925 * 926 * SWAP_SUCCESS - we succeeded in removing all mappings 927 * SWAP_AGAIN - we missed a mapping, try again later 928 * SWAP_FAIL - the page is unswappable 929 */ 930 int try_to_unmap(struct page *page, int migration) 931 { 932 int ret; 933 934 BUG_ON(!PageLocked(page)); 935 936 if (PageAnon(page)) 937 ret = try_to_unmap_anon(page, migration); 938 else 939 ret = try_to_unmap_file(page, migration); 940 941 if (!page_mapped(page)) 942 ret = SWAP_SUCCESS; 943 return ret; 944 } 945 946