vmscan.c (c3cc99ff5d24e2eeaf7ec2032e720681916990e3) vmscan.c (e286781d5f2e9c846e012a39653a166e9d31777d)
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 377 unchanged lines hidden (view full) ---

386 inc_zone_page_state(page, NR_VMSCAN_WRITE);
387 return PAGE_SUCCESS;
388 }
389
390 return PAGE_CLEAN;
391}
392
393/*
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 377 unchanged lines hidden (view full) ---

386 inc_zone_page_state(page, NR_VMSCAN_WRITE);
387 return PAGE_SUCCESS;
388 }
389
390 return PAGE_CLEAN;
391}
392
393/*
394 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
395 * someone else has a ref on the page, abort and return 0. If it was
396 * successfully detached, return 1. Assumes the caller has a single ref on
397 * this page.
394 * Same as remove_mapping, but if the page is removed from the mapping, it
395 * gets returned with a refcount of 0.
398 */
396 */
399int remove_mapping(struct address_space *mapping, struct page *page)
397static int __remove_mapping(struct address_space *mapping, struct page *page)
400{
401 BUG_ON(!PageLocked(page));
402 BUG_ON(mapping != page_mapping(page));
403
404 write_lock_irq(&mapping->tree_lock);
405 /*
406 * The non racy check for a busy page.
407 *

--- 14 unchanged lines hidden (view full) ---

422 *
423 * Reversing the order of the tests ensures such a situation cannot
424 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
425 * load is not satisfied before that of page->_count.
426 *
427 * Note that if SetPageDirty is always performed via set_page_dirty,
428 * and thus under tree_lock, then this ordering is not required.
429 */
398{
399 BUG_ON(!PageLocked(page));
400 BUG_ON(mapping != page_mapping(page));
401
402 write_lock_irq(&mapping->tree_lock);
403 /*
404 * The non racy check for a busy page.
405 *

--- 14 unchanged lines hidden (view full) ---

420 *
421 * Reversing the order of the tests ensures such a situation cannot
422 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
423 * load is not satisfied before that of page->_count.
424 *
425 * Note that if SetPageDirty is always performed via set_page_dirty,
426 * and thus under tree_lock, then this ordering is not required.
427 */
430 if (unlikely(page_count(page) != 2))
428 if (!page_freeze_refs(page, 2))
431 goto cannot_free;
429 goto cannot_free;
432 smp_rmb();
433 if (unlikely(PageDirty(page)))
430 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
431 if (unlikely(PageDirty(page))) {
432 page_unfreeze_refs(page, 2);
434 goto cannot_free;
433 goto cannot_free;
434 }
435
436 if (PageSwapCache(page)) {
437 swp_entry_t swap = { .val = page_private(page) };
438 __delete_from_swap_cache(page);
439 write_unlock_irq(&mapping->tree_lock);
440 swap_free(swap);
435
436 if (PageSwapCache(page)) {
437 swp_entry_t swap = { .val = page_private(page) };
438 __delete_from_swap_cache(page);
439 write_unlock_irq(&mapping->tree_lock);
440 swap_free(swap);
441 __put_page(page); /* The pagecache ref */
442 return 1;
441 } else {
442 __remove_from_page_cache(page);
443 write_unlock_irq(&mapping->tree_lock);
443 }
444
444 }
445
445 __remove_from_page_cache(page);
446 write_unlock_irq(&mapping->tree_lock);
447 __put_page(page);
448 return 1;
449
450cannot_free:
451 write_unlock_irq(&mapping->tree_lock);
452 return 0;
453}
454
455/*
446 return 1;
447
448cannot_free:
449 write_unlock_irq(&mapping->tree_lock);
450 return 0;
451}
452
453/*
454 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
455 * someone else has a ref on the page, abort and return 0. If it was
456 * successfully detached, return 1. Assumes the caller has a single ref on
457 * this page.
458 */
459int remove_mapping(struct address_space *mapping, struct page *page)
460{
461 if (__remove_mapping(mapping, page)) {
462 /*
463 * Unfreezing the refcount with 1 rather than 2 effectively
464 * drops the pagecache ref for us without requiring another
465 * atomic operation.
466 */
467 page_unfreeze_refs(page, 1);
468 return 1;
469 }
470 return 0;
471}
472
473/*
456 * shrink_page_list() returns the number of reclaimed pages
457 */
458static unsigned long shrink_page_list(struct list_head *page_list,
459 struct scan_control *sc,
460 enum pageout_io sync_writeback)
461{
462 LIST_HEAD(ret_pages);
463 struct pagevec freed_pvec;

--- 129 unchanged lines hidden (view full) ---

593 * truncate_complete_page(). We try to drop those buffers here
594 * and if that worked, and the page is no longer mapped into
595 * process address space (page_count == 1) it can be freed.
596 * Otherwise, leave the page on the LRU so it is swappable.
597 */
598 if (PagePrivate(page)) {
599 if (!try_to_release_page(page, sc->gfp_mask))
600 goto activate_locked;
474 * shrink_page_list() returns the number of reclaimed pages
475 */
476static unsigned long shrink_page_list(struct list_head *page_list,
477 struct scan_control *sc,
478 enum pageout_io sync_writeback)
479{
480 LIST_HEAD(ret_pages);
481 struct pagevec freed_pvec;

--- 129 unchanged lines hidden (view full) ---

611 * truncate_complete_page(). We try to drop those buffers here
612 * and if that worked, and the page is no longer mapped into
613 * process address space (page_count == 1) it can be freed.
614 * Otherwise, leave the page on the LRU so it is swappable.
615 */
616 if (PagePrivate(page)) {
617 if (!try_to_release_page(page, sc->gfp_mask))
618 goto activate_locked;
601 if (!mapping && page_count(page) == 1)
602 goto free_it;
619 if (!mapping && page_count(page) == 1) {
620 unlock_page(page);
621 if (put_page_testzero(page))
622 goto free_it;
623 else {
624 /*
625 * rare race with speculative reference.
626 * the speculative reference will free
627 * this page shortly, so we may
628 * increment nr_reclaimed here (and
629 * leave it off the LRU).
630 */
631 nr_reclaimed++;
632 continue;
633 }
634 }
603 }
604
635 }
636
605 if (!mapping || !remove_mapping(mapping, page))
637 if (!mapping || !__remove_mapping(mapping, page))
606 goto keep_locked;
607
638 goto keep_locked;
639
608free_it:
609 unlock_page(page);
640 unlock_page(page);
641free_it:
610 nr_reclaimed++;
642 nr_reclaimed++;
611 if (!pagevec_add(&freed_pvec, page))
612 __pagevec_release_nonlru(&freed_pvec);
643 if (!pagevec_add(&freed_pvec, page)) {
644 __pagevec_free(&freed_pvec);
645 pagevec_reinit(&freed_pvec);
646 }
613 continue;
614
615activate_locked:
616 SetPageActive(page);
617 pgactivate++;
618keep_locked:
619 unlock_page(page);
620keep:
621 list_add(&page->lru, &ret_pages);
622 VM_BUG_ON(PageLRU(page));
623 }
624 list_splice(&ret_pages, page_list);
625 if (pagevec_count(&freed_pvec))
647 continue;
648
649activate_locked:
650 SetPageActive(page);
651 pgactivate++;
652keep_locked:
653 unlock_page(page);
654keep:
655 list_add(&page->lru, &ret_pages);
656 VM_BUG_ON(PageLRU(page));
657 }
658 list_splice(&ret_pages, page_list);
659 if (pagevec_count(&freed_pvec))
626 __pagevec_release_nonlru(&freed_pvec);
660 __pagevec_free(&freed_pvec);
627 count_vm_events(PGACTIVATE, pgactivate);
628 return nr_reclaimed;
629}
630
631/* LRU Isolation modes. */
632#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
633#define ISOLATE_ACTIVE 1 /* Isolate active pages. */
634#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */

--- 1462 unchanged lines hidden ---
661 count_vm_events(PGACTIVATE, pgactivate);
662 return nr_reclaimed;
663}
664
665/* LRU Isolation modes. */
666#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
667#define ISOLATE_ACTIVE 1 /* Isolate active pages. */
668#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */

--- 1462 unchanged lines hidden ---