11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmscan.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie. 71da177e4SLinus Torvalds * kswapd added: 7.1.96 sct 81da177e4SLinus Torvalds * Removed kswapd_ctl limits, and swap out as many pages as needed 91da177e4SLinus Torvalds * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 101da177e4SLinus Torvalds * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 111da177e4SLinus Torvalds * Multiqueue VM started 5.8.00, Rik van Riel. 121da177e4SLinus Torvalds */ 131da177e4SLinus Torvalds 141da177e4SLinus Torvalds #include <linux/mm.h> 151da177e4SLinus Torvalds #include <linux/module.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/kernel_stat.h> 181da177e4SLinus Torvalds #include <linux/swap.h> 191da177e4SLinus Torvalds #include <linux/pagemap.h> 201da177e4SLinus Torvalds #include <linux/init.h> 211da177e4SLinus Torvalds #include <linux/highmem.h> 221da177e4SLinus Torvalds #include <linux/file.h> 231da177e4SLinus Torvalds #include <linux/writeback.h> 241da177e4SLinus Torvalds #include <linux/blkdev.h> 251da177e4SLinus Torvalds #include <linux/buffer_head.h> /* for try_to_release_page(), 261da177e4SLinus Torvalds buffer_heads_over_limit */ 271da177e4SLinus Torvalds #include <linux/mm_inline.h> 281da177e4SLinus Torvalds #include <linux/pagevec.h> 291da177e4SLinus Torvalds #include <linux/backing-dev.h> 301da177e4SLinus Torvalds #include <linux/rmap.h> 311da177e4SLinus Torvalds #include <linux/topology.h> 321da177e4SLinus Torvalds #include <linux/cpu.h> 331da177e4SLinus Torvalds #include <linux/cpuset.h> 341da177e4SLinus Torvalds #include <linux/notifier.h> 351da177e4SLinus Torvalds #include <linux/rwsem.h> 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds #include <asm/tlbflush.h> 381da177e4SLinus Torvalds #include <asm/div64.h> 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds #include <linux/swapops.h> 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds /* possible outcome of pageout() */ 431da177e4SLinus Torvalds typedef enum { 441da177e4SLinus Torvalds /* failed to write page out, page is locked */ 451da177e4SLinus Torvalds PAGE_KEEP, 461da177e4SLinus Torvalds /* move page to the active list, page is locked */ 471da177e4SLinus Torvalds PAGE_ACTIVATE, 481da177e4SLinus Torvalds /* page has been sent to the disk successfully, page is unlocked */ 491da177e4SLinus Torvalds PAGE_SUCCESS, 501da177e4SLinus Torvalds /* page is clean and locked */ 511da177e4SLinus Torvalds PAGE_CLEAN, 521da177e4SLinus Torvalds } pageout_t; 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds struct scan_control { 551da177e4SLinus Torvalds /* Incremented by the number of inactive pages that were scanned */ 561da177e4SLinus Torvalds unsigned long nr_scanned; 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds unsigned long nr_mapped; /* From page_state */ 591da177e4SLinus Torvalds 601da177e4SLinus Torvalds /* This context's GFP mask */ 616daa0e28SAl Viro gfp_t gfp_mask; 621da177e4SLinus Torvalds 631da177e4SLinus Torvalds int may_writepage; 641da177e4SLinus Torvalds 65f1fd1067SChristoph Lameter /* Can pages be swapped as part of reclaim? */ 66f1fd1067SChristoph Lameter int may_swap; 67f1fd1067SChristoph Lameter 681da177e4SLinus Torvalds /* This context's SWAP_CLUSTER_MAX. If freeing memory for 691da177e4SLinus Torvalds * suspend, we effectively ignore SWAP_CLUSTER_MAX. 701da177e4SLinus Torvalds * In this context, it doesn't matter that we scan the 711da177e4SLinus Torvalds * whole list at once. */ 721da177e4SLinus Torvalds int swap_cluster_max; 731da177e4SLinus Torvalds }; 741da177e4SLinus Torvalds 751da177e4SLinus Torvalds /* 761da177e4SLinus Torvalds * The list of shrinker callbacks used by to apply pressure to 771da177e4SLinus Torvalds * ageable caches. 781da177e4SLinus Torvalds */ 791da177e4SLinus Torvalds struct shrinker { 801da177e4SLinus Torvalds shrinker_t shrinker; 811da177e4SLinus Torvalds struct list_head list; 821da177e4SLinus Torvalds int seeks; /* seeks to recreate an obj */ 831da177e4SLinus Torvalds long nr; /* objs pending delete */ 841da177e4SLinus Torvalds }; 851da177e4SLinus Torvalds 861da177e4SLinus Torvalds #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 871da177e4SLinus Torvalds 881da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCH 891da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) \ 901da177e4SLinus Torvalds do { \ 911da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 921da177e4SLinus Torvalds struct page *prev; \ 931da177e4SLinus Torvalds \ 941da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 951da177e4SLinus Torvalds prefetch(&prev->_field); \ 961da177e4SLinus Torvalds } \ 971da177e4SLinus Torvalds } while (0) 981da177e4SLinus Torvalds #else 991da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 1001da177e4SLinus Torvalds #endif 1011da177e4SLinus Torvalds 1021da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCHW 1031da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) \ 1041da177e4SLinus Torvalds do { \ 1051da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 1061da177e4SLinus Torvalds struct page *prev; \ 1071da177e4SLinus Torvalds \ 1081da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 1091da177e4SLinus Torvalds prefetchw(&prev->_field); \ 1101da177e4SLinus Torvalds } \ 1111da177e4SLinus Torvalds } while (0) 1121da177e4SLinus Torvalds #else 1131da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 1141da177e4SLinus Torvalds #endif 1151da177e4SLinus Torvalds 1161da177e4SLinus Torvalds /* 1171da177e4SLinus Torvalds * From 0 .. 100. Higher means more swappy. 1181da177e4SLinus Torvalds */ 1191da177e4SLinus Torvalds int vm_swappiness = 60; 1201da177e4SLinus Torvalds static long total_memory; 1211da177e4SLinus Torvalds 1221da177e4SLinus Torvalds static LIST_HEAD(shrinker_list); 1231da177e4SLinus Torvalds static DECLARE_RWSEM(shrinker_rwsem); 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds /* 1261da177e4SLinus Torvalds * Add a shrinker callback to be called from the vm 1271da177e4SLinus Torvalds */ 1281da177e4SLinus Torvalds struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) 1291da177e4SLinus Torvalds { 1301da177e4SLinus Torvalds struct shrinker *shrinker; 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); 1331da177e4SLinus Torvalds if (shrinker) { 1341da177e4SLinus Torvalds shrinker->shrinker = theshrinker; 1351da177e4SLinus Torvalds shrinker->seeks = seeks; 1361da177e4SLinus Torvalds shrinker->nr = 0; 1371da177e4SLinus Torvalds down_write(&shrinker_rwsem); 1381da177e4SLinus Torvalds list_add_tail(&shrinker->list, &shrinker_list); 1391da177e4SLinus Torvalds up_write(&shrinker_rwsem); 1401da177e4SLinus Torvalds } 1411da177e4SLinus Torvalds return shrinker; 1421da177e4SLinus Torvalds } 1431da177e4SLinus Torvalds EXPORT_SYMBOL(set_shrinker); 1441da177e4SLinus Torvalds 1451da177e4SLinus Torvalds /* 1461da177e4SLinus Torvalds * Remove one 1471da177e4SLinus Torvalds */ 1481da177e4SLinus Torvalds void remove_shrinker(struct shrinker *shrinker) 1491da177e4SLinus Torvalds { 1501da177e4SLinus Torvalds down_write(&shrinker_rwsem); 1511da177e4SLinus Torvalds list_del(&shrinker->list); 1521da177e4SLinus Torvalds up_write(&shrinker_rwsem); 1531da177e4SLinus Torvalds kfree(shrinker); 1541da177e4SLinus Torvalds } 1551da177e4SLinus Torvalds EXPORT_SYMBOL(remove_shrinker); 1561da177e4SLinus Torvalds 1571da177e4SLinus Torvalds #define SHRINK_BATCH 128 1581da177e4SLinus Torvalds /* 1591da177e4SLinus Torvalds * Call the shrink functions to age shrinkable caches 1601da177e4SLinus Torvalds * 1611da177e4SLinus Torvalds * Here we assume it costs one seek to replace a lru page and that it also 1621da177e4SLinus Torvalds * takes a seek to recreate a cache object. With this in mind we age equal 1631da177e4SLinus Torvalds * percentages of the lru and ageable caches. This should balance the seeks 1641da177e4SLinus Torvalds * generated by these structures. 1651da177e4SLinus Torvalds * 1661da177e4SLinus Torvalds * If the vm encounted mapped pages on the LRU it increase the pressure on 1671da177e4SLinus Torvalds * slab to avoid swapping. 1681da177e4SLinus Torvalds * 1691da177e4SLinus Torvalds * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 1701da177e4SLinus Torvalds * 1711da177e4SLinus Torvalds * `lru_pages' represents the number of on-LRU pages in all the zones which 1721da177e4SLinus Torvalds * are eligible for the caller's allocation attempt. It is used for balancing 1731da177e4SLinus Torvalds * slab reclaim versus page reclaim. 174b15e0905Sakpm@osdl.org * 175b15e0905Sakpm@osdl.org * Returns the number of slab objects which we shrunk. 1761da177e4SLinus Torvalds */ 17769e05944SAndrew Morton unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 17869e05944SAndrew Morton unsigned long lru_pages) 1791da177e4SLinus Torvalds { 1801da177e4SLinus Torvalds struct shrinker *shrinker; 18169e05944SAndrew Morton unsigned long ret = 0; 1821da177e4SLinus Torvalds 1831da177e4SLinus Torvalds if (scanned == 0) 1841da177e4SLinus Torvalds scanned = SWAP_CLUSTER_MAX; 1851da177e4SLinus Torvalds 1861da177e4SLinus Torvalds if (!down_read_trylock(&shrinker_rwsem)) 187b15e0905Sakpm@osdl.org return 1; /* Assume we'll be able to shrink next time */ 1881da177e4SLinus Torvalds 1891da177e4SLinus Torvalds list_for_each_entry(shrinker, &shrinker_list, list) { 1901da177e4SLinus Torvalds unsigned long long delta; 1911da177e4SLinus Torvalds unsigned long total_scan; 192ea164d73SAndrea Arcangeli unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); 1931da177e4SLinus Torvalds 1941da177e4SLinus Torvalds delta = (4 * scanned) / shrinker->seeks; 195ea164d73SAndrea Arcangeli delta *= max_pass; 1961da177e4SLinus Torvalds do_div(delta, lru_pages + 1); 1971da177e4SLinus Torvalds shrinker->nr += delta; 198ea164d73SAndrea Arcangeli if (shrinker->nr < 0) { 199ea164d73SAndrea Arcangeli printk(KERN_ERR "%s: nr=%ld\n", 200ea164d73SAndrea Arcangeli __FUNCTION__, shrinker->nr); 201ea164d73SAndrea Arcangeli shrinker->nr = max_pass; 202ea164d73SAndrea Arcangeli } 203ea164d73SAndrea Arcangeli 204ea164d73SAndrea Arcangeli /* 205ea164d73SAndrea Arcangeli * Avoid risking looping forever due to too large nr value: 206ea164d73SAndrea Arcangeli * never try to free more than twice the estimate number of 207ea164d73SAndrea Arcangeli * freeable entries. 208ea164d73SAndrea Arcangeli */ 209ea164d73SAndrea Arcangeli if (shrinker->nr > max_pass * 2) 210ea164d73SAndrea Arcangeli shrinker->nr = max_pass * 2; 2111da177e4SLinus Torvalds 2121da177e4SLinus Torvalds total_scan = shrinker->nr; 2131da177e4SLinus Torvalds shrinker->nr = 0; 2141da177e4SLinus Torvalds 2151da177e4SLinus Torvalds while (total_scan >= SHRINK_BATCH) { 2161da177e4SLinus Torvalds long this_scan = SHRINK_BATCH; 2171da177e4SLinus Torvalds int shrink_ret; 218b15e0905Sakpm@osdl.org int nr_before; 2191da177e4SLinus Torvalds 220b15e0905Sakpm@osdl.org nr_before = (*shrinker->shrinker)(0, gfp_mask); 2211da177e4SLinus Torvalds shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); 2221da177e4SLinus Torvalds if (shrink_ret == -1) 2231da177e4SLinus Torvalds break; 224b15e0905Sakpm@osdl.org if (shrink_ret < nr_before) 225b15e0905Sakpm@osdl.org ret += nr_before - shrink_ret; 2261da177e4SLinus Torvalds mod_page_state(slabs_scanned, this_scan); 2271da177e4SLinus Torvalds total_scan -= this_scan; 2281da177e4SLinus Torvalds 2291da177e4SLinus Torvalds cond_resched(); 2301da177e4SLinus Torvalds } 2311da177e4SLinus Torvalds 2321da177e4SLinus Torvalds shrinker->nr += total_scan; 2331da177e4SLinus Torvalds } 2341da177e4SLinus Torvalds up_read(&shrinker_rwsem); 235b15e0905Sakpm@osdl.org return ret; 2361da177e4SLinus Torvalds } 2371da177e4SLinus Torvalds 2381da177e4SLinus Torvalds /* Called without lock on whether page is mapped, so answer is unstable */ 2391da177e4SLinus Torvalds static inline int page_mapping_inuse(struct page *page) 2401da177e4SLinus Torvalds { 2411da177e4SLinus Torvalds struct address_space *mapping; 2421da177e4SLinus Torvalds 2431da177e4SLinus Torvalds /* Page is in somebody's page tables. */ 2441da177e4SLinus Torvalds if (page_mapped(page)) 2451da177e4SLinus Torvalds return 1; 2461da177e4SLinus Torvalds 2471da177e4SLinus Torvalds /* Be more reluctant to reclaim swapcache than pagecache */ 2481da177e4SLinus Torvalds if (PageSwapCache(page)) 2491da177e4SLinus Torvalds return 1; 2501da177e4SLinus Torvalds 2511da177e4SLinus Torvalds mapping = page_mapping(page); 2521da177e4SLinus Torvalds if (!mapping) 2531da177e4SLinus Torvalds return 0; 2541da177e4SLinus Torvalds 2551da177e4SLinus Torvalds /* File is mmap'd by somebody? */ 2561da177e4SLinus Torvalds return mapping_mapped(mapping); 2571da177e4SLinus Torvalds } 2581da177e4SLinus Torvalds 2591da177e4SLinus Torvalds static inline int is_page_cache_freeable(struct page *page) 2601da177e4SLinus Torvalds { 2611da177e4SLinus Torvalds return page_count(page) - !!PagePrivate(page) == 2; 2621da177e4SLinus Torvalds } 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds static int may_write_to_queue(struct backing_dev_info *bdi) 2651da177e4SLinus Torvalds { 266930d9152SChristoph Lameter if (current->flags & PF_SWAPWRITE) 2671da177e4SLinus Torvalds return 1; 2681da177e4SLinus Torvalds if (!bdi_write_congested(bdi)) 2691da177e4SLinus Torvalds return 1; 2701da177e4SLinus Torvalds if (bdi == current->backing_dev_info) 2711da177e4SLinus Torvalds return 1; 2721da177e4SLinus Torvalds return 0; 2731da177e4SLinus Torvalds } 2741da177e4SLinus Torvalds 2751da177e4SLinus Torvalds /* 2761da177e4SLinus Torvalds * We detected a synchronous write error writing a page out. Probably 2771da177e4SLinus Torvalds * -ENOSPC. We need to propagate that into the address_space for a subsequent 2781da177e4SLinus Torvalds * fsync(), msync() or close(). 2791da177e4SLinus Torvalds * 2801da177e4SLinus Torvalds * The tricky part is that after writepage we cannot touch the mapping: nothing 2811da177e4SLinus Torvalds * prevents it from being freed up. But we have a ref on the page and once 2821da177e4SLinus Torvalds * that page is locked, the mapping is pinned. 2831da177e4SLinus Torvalds * 2841da177e4SLinus Torvalds * We're allowed to run sleeping lock_page() here because we know the caller has 2851da177e4SLinus Torvalds * __GFP_FS. 2861da177e4SLinus Torvalds */ 2871da177e4SLinus Torvalds static void handle_write_error(struct address_space *mapping, 2881da177e4SLinus Torvalds struct page *page, int error) 2891da177e4SLinus Torvalds { 2901da177e4SLinus Torvalds lock_page(page); 2911da177e4SLinus Torvalds if (page_mapping(page) == mapping) { 2921da177e4SLinus Torvalds if (error == -ENOSPC) 2931da177e4SLinus Torvalds set_bit(AS_ENOSPC, &mapping->flags); 2941da177e4SLinus Torvalds else 2951da177e4SLinus Torvalds set_bit(AS_EIO, &mapping->flags); 2961da177e4SLinus Torvalds } 2971da177e4SLinus Torvalds unlock_page(page); 2981da177e4SLinus Torvalds } 2991da177e4SLinus Torvalds 3001da177e4SLinus Torvalds /* 3011742f19fSAndrew Morton * pageout is called by shrink_page_list() for each dirty page. 3021742f19fSAndrew Morton * Calls ->writepage(). 3031da177e4SLinus Torvalds */ 3041da177e4SLinus Torvalds static pageout_t pageout(struct page *page, struct address_space *mapping) 3051da177e4SLinus Torvalds { 3061da177e4SLinus Torvalds /* 3071da177e4SLinus Torvalds * If the page is dirty, only perform writeback if that write 3081da177e4SLinus Torvalds * will be non-blocking. To prevent this allocation from being 3091da177e4SLinus Torvalds * stalled by pagecache activity. But note that there may be 3101da177e4SLinus Torvalds * stalls if we need to run get_block(). We could test 3111da177e4SLinus Torvalds * PagePrivate for that. 3121da177e4SLinus Torvalds * 3131da177e4SLinus Torvalds * If this process is currently in generic_file_write() against 3141da177e4SLinus Torvalds * this page's queue, we can perform writeback even if that 3151da177e4SLinus Torvalds * will block. 3161da177e4SLinus Torvalds * 3171da177e4SLinus Torvalds * If the page is swapcache, write it back even if that would 3181da177e4SLinus Torvalds * block, for some throttling. This happens by accident, because 3191da177e4SLinus Torvalds * swap_backing_dev_info is bust: it doesn't reflect the 3201da177e4SLinus Torvalds * congestion state of the swapdevs. Easy to fix, if needed. 3211da177e4SLinus Torvalds * See swapfile.c:page_queue_congested(). 3221da177e4SLinus Torvalds */ 3231da177e4SLinus Torvalds if (!is_page_cache_freeable(page)) 3241da177e4SLinus Torvalds return PAGE_KEEP; 3251da177e4SLinus Torvalds if (!mapping) { 3261da177e4SLinus Torvalds /* 3271da177e4SLinus Torvalds * Some data journaling orphaned pages can have 3281da177e4SLinus Torvalds * page->mapping == NULL while being dirty with clean buffers. 3291da177e4SLinus Torvalds */ 330323aca6cSakpm@osdl.org if (PagePrivate(page)) { 3311da177e4SLinus Torvalds if (try_to_free_buffers(page)) { 3321da177e4SLinus Torvalds ClearPageDirty(page); 3331da177e4SLinus Torvalds printk("%s: orphaned page\n", __FUNCTION__); 3341da177e4SLinus Torvalds return PAGE_CLEAN; 3351da177e4SLinus Torvalds } 3361da177e4SLinus Torvalds } 3371da177e4SLinus Torvalds return PAGE_KEEP; 3381da177e4SLinus Torvalds } 3391da177e4SLinus Torvalds if (mapping->a_ops->writepage == NULL) 3401da177e4SLinus Torvalds return PAGE_ACTIVATE; 3411da177e4SLinus Torvalds if (!may_write_to_queue(mapping->backing_dev_info)) 3421da177e4SLinus Torvalds return PAGE_KEEP; 3431da177e4SLinus Torvalds 3441da177e4SLinus Torvalds if (clear_page_dirty_for_io(page)) { 3451da177e4SLinus Torvalds int res; 3461da177e4SLinus Torvalds struct writeback_control wbc = { 3471da177e4SLinus Torvalds .sync_mode = WB_SYNC_NONE, 3481da177e4SLinus Torvalds .nr_to_write = SWAP_CLUSTER_MAX, 3491da177e4SLinus Torvalds .nonblocking = 1, 3501da177e4SLinus Torvalds .for_reclaim = 1, 3511da177e4SLinus Torvalds }; 3521da177e4SLinus Torvalds 3531da177e4SLinus Torvalds SetPageReclaim(page); 3541da177e4SLinus Torvalds res = mapping->a_ops->writepage(page, &wbc); 3551da177e4SLinus Torvalds if (res < 0) 3561da177e4SLinus Torvalds handle_write_error(mapping, page, res); 357994fc28cSZach Brown if (res == AOP_WRITEPAGE_ACTIVATE) { 3581da177e4SLinus Torvalds ClearPageReclaim(page); 3591da177e4SLinus Torvalds return PAGE_ACTIVATE; 3601da177e4SLinus Torvalds } 3611da177e4SLinus Torvalds if (!PageWriteback(page)) { 3621da177e4SLinus Torvalds /* synchronous write or broken a_ops? */ 3631da177e4SLinus Torvalds ClearPageReclaim(page); 3641da177e4SLinus Torvalds } 3651da177e4SLinus Torvalds 3661da177e4SLinus Torvalds return PAGE_SUCCESS; 3671da177e4SLinus Torvalds } 3681da177e4SLinus Torvalds 3691da177e4SLinus Torvalds return PAGE_CLEAN; 3701da177e4SLinus Torvalds } 3711da177e4SLinus Torvalds 37249d2e9ccSChristoph Lameter static int remove_mapping(struct address_space *mapping, struct page *page) 37349d2e9ccSChristoph Lameter { 37449d2e9ccSChristoph Lameter if (!mapping) 37549d2e9ccSChristoph Lameter return 0; /* truncate got there first */ 37649d2e9ccSChristoph Lameter 37749d2e9ccSChristoph Lameter write_lock_irq(&mapping->tree_lock); 37849d2e9ccSChristoph Lameter 37949d2e9ccSChristoph Lameter /* 38049d2e9ccSChristoph Lameter * The non-racy check for busy page. It is critical to check 38149d2e9ccSChristoph Lameter * PageDirty _after_ making sure that the page is freeable and 38249d2e9ccSChristoph Lameter * not in use by anybody. (pagecache + us == 2) 38349d2e9ccSChristoph Lameter */ 38449d2e9ccSChristoph Lameter if (unlikely(page_count(page) != 2)) 38549d2e9ccSChristoph Lameter goto cannot_free; 38649d2e9ccSChristoph Lameter smp_rmb(); 38749d2e9ccSChristoph Lameter if (unlikely(PageDirty(page))) 38849d2e9ccSChristoph Lameter goto cannot_free; 38949d2e9ccSChristoph Lameter 39049d2e9ccSChristoph Lameter if (PageSwapCache(page)) { 39149d2e9ccSChristoph Lameter swp_entry_t swap = { .val = page_private(page) }; 39249d2e9ccSChristoph Lameter __delete_from_swap_cache(page); 39349d2e9ccSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 39449d2e9ccSChristoph Lameter swap_free(swap); 39549d2e9ccSChristoph Lameter __put_page(page); /* The pagecache ref */ 39649d2e9ccSChristoph Lameter return 1; 39749d2e9ccSChristoph Lameter } 39849d2e9ccSChristoph Lameter 39949d2e9ccSChristoph Lameter __remove_from_page_cache(page); 40049d2e9ccSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 40149d2e9ccSChristoph Lameter __put_page(page); 40249d2e9ccSChristoph Lameter return 1; 40349d2e9ccSChristoph Lameter 40449d2e9ccSChristoph Lameter cannot_free: 40549d2e9ccSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 40649d2e9ccSChristoph Lameter return 0; 40749d2e9ccSChristoph Lameter } 40849d2e9ccSChristoph Lameter 4091da177e4SLinus Torvalds /* 4101742f19fSAndrew Morton * shrink_page_list() returns the number of reclaimed pages 4111da177e4SLinus Torvalds */ 4121742f19fSAndrew Morton static unsigned long shrink_page_list(struct list_head *page_list, 41369e05944SAndrew Morton struct scan_control *sc) 4141da177e4SLinus Torvalds { 4151da177e4SLinus Torvalds LIST_HEAD(ret_pages); 4161da177e4SLinus Torvalds struct pagevec freed_pvec; 4171da177e4SLinus Torvalds int pgactivate = 0; 41805ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 4191da177e4SLinus Torvalds 4201da177e4SLinus Torvalds cond_resched(); 4211da177e4SLinus Torvalds 4221da177e4SLinus Torvalds pagevec_init(&freed_pvec, 1); 4231da177e4SLinus Torvalds while (!list_empty(page_list)) { 4241da177e4SLinus Torvalds struct address_space *mapping; 4251da177e4SLinus Torvalds struct page *page; 4261da177e4SLinus Torvalds int may_enter_fs; 4271da177e4SLinus Torvalds int referenced; 4281da177e4SLinus Torvalds 4291da177e4SLinus Torvalds cond_resched(); 4301da177e4SLinus Torvalds 4311da177e4SLinus Torvalds page = lru_to_page(page_list); 4321da177e4SLinus Torvalds list_del(&page->lru); 4331da177e4SLinus Torvalds 4341da177e4SLinus Torvalds if (TestSetPageLocked(page)) 4351da177e4SLinus Torvalds goto keep; 4361da177e4SLinus Torvalds 4371da177e4SLinus Torvalds BUG_ON(PageActive(page)); 4381da177e4SLinus Torvalds 4391da177e4SLinus Torvalds sc->nr_scanned++; 44080e43426SChristoph Lameter 44180e43426SChristoph Lameter if (!sc->may_swap && page_mapped(page)) 44280e43426SChristoph Lameter goto keep_locked; 44380e43426SChristoph Lameter 4441da177e4SLinus Torvalds /* Double the slab pressure for mapped and swapcache pages */ 4451da177e4SLinus Torvalds if (page_mapped(page) || PageSwapCache(page)) 4461da177e4SLinus Torvalds sc->nr_scanned++; 4471da177e4SLinus Torvalds 4481da177e4SLinus Torvalds if (PageWriteback(page)) 4491da177e4SLinus Torvalds goto keep_locked; 4501da177e4SLinus Torvalds 451f7b7fd8fSRik van Riel referenced = page_referenced(page, 1); 4521da177e4SLinus Torvalds /* In active use or really unfreeable? Activate it. */ 4531da177e4SLinus Torvalds if (referenced && page_mapping_inuse(page)) 4541da177e4SLinus Torvalds goto activate_locked; 4551da177e4SLinus Torvalds 4561da177e4SLinus Torvalds #ifdef CONFIG_SWAP 4571da177e4SLinus Torvalds /* 4581da177e4SLinus Torvalds * Anonymous process memory has backing store? 4591da177e4SLinus Torvalds * Try to allocate it some swap space here. 4601da177e4SLinus Torvalds */ 461c340010eSLee Schermerhorn if (PageAnon(page) && !PageSwapCache(page)) { 462f1fd1067SChristoph Lameter if (!sc->may_swap) 463f1fd1067SChristoph Lameter goto keep_locked; 4641480a540SChristoph Lameter if (!add_to_swap(page, GFP_ATOMIC)) 4651da177e4SLinus Torvalds goto activate_locked; 4661da177e4SLinus Torvalds } 4671da177e4SLinus Torvalds #endif /* CONFIG_SWAP */ 4681da177e4SLinus Torvalds 4691da177e4SLinus Torvalds mapping = page_mapping(page); 4701da177e4SLinus Torvalds may_enter_fs = (sc->gfp_mask & __GFP_FS) || 4711da177e4SLinus Torvalds (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 4721da177e4SLinus Torvalds 4731da177e4SLinus Torvalds /* 4741da177e4SLinus Torvalds * The page is mapped into the page tables of one or more 4751da177e4SLinus Torvalds * processes. Try to unmap it here. 4761da177e4SLinus Torvalds */ 4771da177e4SLinus Torvalds if (page_mapped(page) && mapping) { 478aa3f18b3SChristoph Lameter /* 479aa3f18b3SChristoph Lameter * No unmapping if we do not swap 480aa3f18b3SChristoph Lameter */ 481aa3f18b3SChristoph Lameter if (!sc->may_swap) 482aa3f18b3SChristoph Lameter goto keep_locked; 483aa3f18b3SChristoph Lameter 484a48d07afSChristoph Lameter switch (try_to_unmap(page, 0)) { 4851da177e4SLinus Torvalds case SWAP_FAIL: 4861da177e4SLinus Torvalds goto activate_locked; 4871da177e4SLinus Torvalds case SWAP_AGAIN: 4881da177e4SLinus Torvalds goto keep_locked; 4891da177e4SLinus Torvalds case SWAP_SUCCESS: 4901da177e4SLinus Torvalds ; /* try to free the page below */ 4911da177e4SLinus Torvalds } 4921da177e4SLinus Torvalds } 4931da177e4SLinus Torvalds 4941da177e4SLinus Torvalds if (PageDirty(page)) { 4951da177e4SLinus Torvalds if (referenced) 4961da177e4SLinus Torvalds goto keep_locked; 4971da177e4SLinus Torvalds if (!may_enter_fs) 4981da177e4SLinus Torvalds goto keep_locked; 49952a8363eSChristoph Lameter if (!sc->may_writepage) 5001da177e4SLinus Torvalds goto keep_locked; 5011da177e4SLinus Torvalds 5021da177e4SLinus Torvalds /* Page is dirty, try to write it out here */ 5031da177e4SLinus Torvalds switch(pageout(page, mapping)) { 5041da177e4SLinus Torvalds case PAGE_KEEP: 5051da177e4SLinus Torvalds goto keep_locked; 5061da177e4SLinus Torvalds case PAGE_ACTIVATE: 5071da177e4SLinus Torvalds goto activate_locked; 5081da177e4SLinus Torvalds case PAGE_SUCCESS: 5091da177e4SLinus Torvalds if (PageWriteback(page) || PageDirty(page)) 5101da177e4SLinus Torvalds goto keep; 5111da177e4SLinus Torvalds /* 5121da177e4SLinus Torvalds * A synchronous write - probably a ramdisk. Go 5131da177e4SLinus Torvalds * ahead and try to reclaim the page. 5141da177e4SLinus Torvalds */ 5151da177e4SLinus Torvalds if (TestSetPageLocked(page)) 5161da177e4SLinus Torvalds goto keep; 5171da177e4SLinus Torvalds if (PageDirty(page) || PageWriteback(page)) 5181da177e4SLinus Torvalds goto keep_locked; 5191da177e4SLinus Torvalds mapping = page_mapping(page); 5201da177e4SLinus Torvalds case PAGE_CLEAN: 5211da177e4SLinus Torvalds ; /* try to free the page below */ 5221da177e4SLinus Torvalds } 5231da177e4SLinus Torvalds } 5241da177e4SLinus Torvalds 5251da177e4SLinus Torvalds /* 5261da177e4SLinus Torvalds * If the page has buffers, try to free the buffer mappings 5271da177e4SLinus Torvalds * associated with this page. If we succeed we try to free 5281da177e4SLinus Torvalds * the page as well. 5291da177e4SLinus Torvalds * 5301da177e4SLinus Torvalds * We do this even if the page is PageDirty(). 5311da177e4SLinus Torvalds * try_to_release_page() does not perform I/O, but it is 5321da177e4SLinus Torvalds * possible for a page to have PageDirty set, but it is actually 5331da177e4SLinus Torvalds * clean (all its buffers are clean). This happens if the 5341da177e4SLinus Torvalds * buffers were written out directly, with submit_bh(). ext3 5351da177e4SLinus Torvalds * will do this, as well as the blockdev mapping. 5361da177e4SLinus Torvalds * try_to_release_page() will discover that cleanness and will 5371da177e4SLinus Torvalds * drop the buffers and mark the page clean - it can be freed. 5381da177e4SLinus Torvalds * 5391da177e4SLinus Torvalds * Rarely, pages can have buffers and no ->mapping. These are 5401da177e4SLinus Torvalds * the pages which were not successfully invalidated in 5411da177e4SLinus Torvalds * truncate_complete_page(). We try to drop those buffers here 5421da177e4SLinus Torvalds * and if that worked, and the page is no longer mapped into 5431da177e4SLinus Torvalds * process address space (page_count == 1) it can be freed. 5441da177e4SLinus Torvalds * Otherwise, leave the page on the LRU so it is swappable. 5451da177e4SLinus Torvalds */ 5461da177e4SLinus Torvalds if (PagePrivate(page)) { 5471da177e4SLinus Torvalds if (!try_to_release_page(page, sc->gfp_mask)) 5481da177e4SLinus Torvalds goto activate_locked; 5491da177e4SLinus Torvalds if (!mapping && page_count(page) == 1) 5501da177e4SLinus Torvalds goto free_it; 5511da177e4SLinus Torvalds } 5521da177e4SLinus Torvalds 55349d2e9ccSChristoph Lameter if (!remove_mapping(mapping, page)) 55449d2e9ccSChristoph Lameter goto keep_locked; 5551da177e4SLinus Torvalds 5561da177e4SLinus Torvalds free_it: 5571da177e4SLinus Torvalds unlock_page(page); 55805ff5137SAndrew Morton nr_reclaimed++; 5591da177e4SLinus Torvalds if (!pagevec_add(&freed_pvec, page)) 5601da177e4SLinus Torvalds __pagevec_release_nonlru(&freed_pvec); 5611da177e4SLinus Torvalds continue; 5621da177e4SLinus Torvalds 5631da177e4SLinus Torvalds activate_locked: 5641da177e4SLinus Torvalds SetPageActive(page); 5651da177e4SLinus Torvalds pgactivate++; 5661da177e4SLinus Torvalds keep_locked: 5671da177e4SLinus Torvalds unlock_page(page); 5681da177e4SLinus Torvalds keep: 5691da177e4SLinus Torvalds list_add(&page->lru, &ret_pages); 5701da177e4SLinus Torvalds BUG_ON(PageLRU(page)); 5711da177e4SLinus Torvalds } 5721da177e4SLinus Torvalds list_splice(&ret_pages, page_list); 5731da177e4SLinus Torvalds if (pagevec_count(&freed_pvec)) 5741da177e4SLinus Torvalds __pagevec_release_nonlru(&freed_pvec); 5751da177e4SLinus Torvalds mod_page_state(pgactivate, pgactivate); 57605ff5137SAndrew Morton return nr_reclaimed; 5771da177e4SLinus Torvalds } 5781da177e4SLinus Torvalds 5797cbe34cfSChristoph Lameter #ifdef CONFIG_MIGRATION 5808419c318SChristoph Lameter static inline void move_to_lru(struct page *page) 5818419c318SChristoph Lameter { 5828419c318SChristoph Lameter list_del(&page->lru); 5838419c318SChristoph Lameter if (PageActive(page)) { 5848419c318SChristoph Lameter /* 5858419c318SChristoph Lameter * lru_cache_add_active checks that 5868419c318SChristoph Lameter * the PG_active bit is off. 5878419c318SChristoph Lameter */ 5888419c318SChristoph Lameter ClearPageActive(page); 5898419c318SChristoph Lameter lru_cache_add_active(page); 5908419c318SChristoph Lameter } else { 5918419c318SChristoph Lameter lru_cache_add(page); 5928419c318SChristoph Lameter } 5938419c318SChristoph Lameter put_page(page); 5948419c318SChristoph Lameter } 5958419c318SChristoph Lameter 5968419c318SChristoph Lameter /* 597053837fcSNick Piggin * Add isolated pages on the list back to the LRU. 5988419c318SChristoph Lameter * 5998419c318SChristoph Lameter * returns the number of pages put back. 6008419c318SChristoph Lameter */ 60169e05944SAndrew Morton unsigned long putback_lru_pages(struct list_head *l) 6028419c318SChristoph Lameter { 6038419c318SChristoph Lameter struct page *page; 6048419c318SChristoph Lameter struct page *page2; 60569e05944SAndrew Morton unsigned long count = 0; 6068419c318SChristoph Lameter 6078419c318SChristoph Lameter list_for_each_entry_safe(page, page2, l, lru) { 6088419c318SChristoph Lameter move_to_lru(page); 6098419c318SChristoph Lameter count++; 6108419c318SChristoph Lameter } 6118419c318SChristoph Lameter return count; 6128419c318SChristoph Lameter } 6138419c318SChristoph Lameter 6141da177e4SLinus Torvalds /* 615e965f963SChristoph Lameter * Non migratable page 616e965f963SChristoph Lameter */ 617e965f963SChristoph Lameter int fail_migrate_page(struct page *newpage, struct page *page) 618e965f963SChristoph Lameter { 619e965f963SChristoph Lameter return -EIO; 620e965f963SChristoph Lameter } 621e965f963SChristoph Lameter EXPORT_SYMBOL(fail_migrate_page); 622e965f963SChristoph Lameter 623e965f963SChristoph Lameter /* 62449d2e9ccSChristoph Lameter * swapout a single page 62549d2e9ccSChristoph Lameter * page is locked upon entry, unlocked on exit 62649d2e9ccSChristoph Lameter */ 62749d2e9ccSChristoph Lameter static int swap_page(struct page *page) 62849d2e9ccSChristoph Lameter { 62949d2e9ccSChristoph Lameter struct address_space *mapping = page_mapping(page); 63049d2e9ccSChristoph Lameter 63149d2e9ccSChristoph Lameter if (page_mapped(page) && mapping) 632418aade4SChristoph Lameter if (try_to_unmap(page, 1) != SWAP_SUCCESS) 63349d2e9ccSChristoph Lameter goto unlock_retry; 63449d2e9ccSChristoph Lameter 63549d2e9ccSChristoph Lameter if (PageDirty(page)) { 63649d2e9ccSChristoph Lameter /* Page is dirty, try to write it out here */ 63749d2e9ccSChristoph Lameter switch(pageout(page, mapping)) { 63849d2e9ccSChristoph Lameter case PAGE_KEEP: 63949d2e9ccSChristoph Lameter case PAGE_ACTIVATE: 64049d2e9ccSChristoph Lameter goto unlock_retry; 64149d2e9ccSChristoph Lameter 64249d2e9ccSChristoph Lameter case PAGE_SUCCESS: 64349d2e9ccSChristoph Lameter goto retry; 64449d2e9ccSChristoph Lameter 64549d2e9ccSChristoph Lameter case PAGE_CLEAN: 64649d2e9ccSChristoph Lameter ; /* try to free the page below */ 64749d2e9ccSChristoph Lameter } 64849d2e9ccSChristoph Lameter } 64949d2e9ccSChristoph Lameter 65049d2e9ccSChristoph Lameter if (PagePrivate(page)) { 65149d2e9ccSChristoph Lameter if (!try_to_release_page(page, GFP_KERNEL) || 65249d2e9ccSChristoph Lameter (!mapping && page_count(page) == 1)) 65349d2e9ccSChristoph Lameter goto unlock_retry; 65449d2e9ccSChristoph Lameter } 65549d2e9ccSChristoph Lameter 65649d2e9ccSChristoph Lameter if (remove_mapping(mapping, page)) { 65749d2e9ccSChristoph Lameter /* Success */ 65849d2e9ccSChristoph Lameter unlock_page(page); 65949d2e9ccSChristoph Lameter return 0; 66049d2e9ccSChristoph Lameter } 66149d2e9ccSChristoph Lameter 66249d2e9ccSChristoph Lameter unlock_retry: 66349d2e9ccSChristoph Lameter unlock_page(page); 66449d2e9ccSChristoph Lameter 66549d2e9ccSChristoph Lameter retry: 666d0d96328SChristoph Lameter return -EAGAIN; 66749d2e9ccSChristoph Lameter } 668e965f963SChristoph Lameter EXPORT_SYMBOL(swap_page); 669a48d07afSChristoph Lameter 670a48d07afSChristoph Lameter /* 671a48d07afSChristoph Lameter * Page migration was first developed in the context of the memory hotplug 672a48d07afSChristoph Lameter * project. The main authors of the migration code are: 673a48d07afSChristoph Lameter * 674a48d07afSChristoph Lameter * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 675a48d07afSChristoph Lameter * Hirokazu Takahashi <taka@valinux.co.jp> 676a48d07afSChristoph Lameter * Dave Hansen <haveblue@us.ibm.com> 677a48d07afSChristoph Lameter * Christoph Lameter <clameter@sgi.com> 678a48d07afSChristoph Lameter */ 679a48d07afSChristoph Lameter 680a48d07afSChristoph Lameter /* 681a48d07afSChristoph Lameter * Remove references for a page and establish the new page with the correct 682a48d07afSChristoph Lameter * basic settings to be able to stop accesses to the page. 683a48d07afSChristoph Lameter */ 684e965f963SChristoph Lameter int migrate_page_remove_references(struct page *newpage, 685a48d07afSChristoph Lameter struct page *page, int nr_refs) 686a48d07afSChristoph Lameter { 687a48d07afSChristoph Lameter struct address_space *mapping = page_mapping(page); 688a48d07afSChristoph Lameter struct page **radix_pointer; 689a48d07afSChristoph Lameter 690a48d07afSChristoph Lameter /* 691a48d07afSChristoph Lameter * Avoid doing any of the following work if the page count 692a48d07afSChristoph Lameter * indicates that the page is in use or truncate has removed 693a48d07afSChristoph Lameter * the page. 694a48d07afSChristoph Lameter */ 695a48d07afSChristoph Lameter if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) 6964983da07SChristoph Lameter return -EAGAIN; 697a48d07afSChristoph Lameter 698a48d07afSChristoph Lameter /* 699a48d07afSChristoph Lameter * Establish swap ptes for anonymous pages or destroy pte 700a48d07afSChristoph Lameter * maps for files. 701a48d07afSChristoph Lameter * 702a48d07afSChristoph Lameter * In order to reestablish file backed mappings the fault handlers 703a48d07afSChristoph Lameter * will take the radix tree_lock which may then be used to stop 704a48d07afSChristoph Lameter * processses from accessing this page until the new page is ready. 705a48d07afSChristoph Lameter * 706a48d07afSChristoph Lameter * A process accessing via a swap pte (an anonymous page) will take a 707a48d07afSChristoph Lameter * page_lock on the old page which will block the process until the 708a48d07afSChristoph Lameter * migration attempt is complete. At that time the PageSwapCache bit 709a48d07afSChristoph Lameter * will be examined. If the page was migrated then the PageSwapCache 710a48d07afSChristoph Lameter * bit will be clear and the operation to retrieve the page will be 711a48d07afSChristoph Lameter * retried which will find the new page in the radix tree. Then a new 712a48d07afSChristoph Lameter * direct mapping may be generated based on the radix tree contents. 713a48d07afSChristoph Lameter * 714a48d07afSChristoph Lameter * If the page was not migrated then the PageSwapCache bit 715a48d07afSChristoph Lameter * is still set and the operation may continue. 716a48d07afSChristoph Lameter */ 7174983da07SChristoph Lameter if (try_to_unmap(page, 1) == SWAP_FAIL) 7184983da07SChristoph Lameter /* A vma has VM_LOCKED set -> Permanent failure */ 7194983da07SChristoph Lameter return -EPERM; 720a48d07afSChristoph Lameter 721a48d07afSChristoph Lameter /* 722a48d07afSChristoph Lameter * Give up if we were unable to remove all mappings. 723a48d07afSChristoph Lameter */ 724a48d07afSChristoph Lameter if (page_mapcount(page)) 7254983da07SChristoph Lameter return -EAGAIN; 726a48d07afSChristoph Lameter 727a48d07afSChristoph Lameter write_lock_irq(&mapping->tree_lock); 728a48d07afSChristoph Lameter 729a48d07afSChristoph Lameter radix_pointer = (struct page **)radix_tree_lookup_slot( 730a48d07afSChristoph Lameter &mapping->page_tree, 731a48d07afSChristoph Lameter page_index(page)); 732a48d07afSChristoph Lameter 733a48d07afSChristoph Lameter if (!page_mapping(page) || page_count(page) != nr_refs || 734a48d07afSChristoph Lameter *radix_pointer != page) { 735a48d07afSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 7364983da07SChristoph Lameter return -EAGAIN; 737a48d07afSChristoph Lameter } 738a48d07afSChristoph Lameter 739a48d07afSChristoph Lameter /* 740a48d07afSChristoph Lameter * Now we know that no one else is looking at the page. 741a48d07afSChristoph Lameter * 742a48d07afSChristoph Lameter * Certain minimal information about a page must be available 743a48d07afSChristoph Lameter * in order for other subsystems to properly handle the page if they 744a48d07afSChristoph Lameter * find it through the radix tree update before we are finished 745a48d07afSChristoph Lameter * copying the page. 746a48d07afSChristoph Lameter */ 747a48d07afSChristoph Lameter get_page(newpage); 748a48d07afSChristoph Lameter newpage->index = page->index; 749a48d07afSChristoph Lameter newpage->mapping = page->mapping; 750a48d07afSChristoph Lameter if (PageSwapCache(page)) { 751a48d07afSChristoph Lameter SetPageSwapCache(newpage); 752a48d07afSChristoph Lameter set_page_private(newpage, page_private(page)); 753a48d07afSChristoph Lameter } 754a48d07afSChristoph Lameter 755a48d07afSChristoph Lameter *radix_pointer = newpage; 756a48d07afSChristoph Lameter __put_page(page); 757a48d07afSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 758a48d07afSChristoph Lameter 759a48d07afSChristoph Lameter return 0; 760a48d07afSChristoph Lameter } 761e965f963SChristoph Lameter EXPORT_SYMBOL(migrate_page_remove_references); 762a48d07afSChristoph Lameter 763a48d07afSChristoph Lameter /* 764a48d07afSChristoph Lameter * Copy the page to its new location 765a48d07afSChristoph Lameter */ 766a48d07afSChristoph Lameter void migrate_page_copy(struct page *newpage, struct page *page) 767a48d07afSChristoph Lameter { 768a48d07afSChristoph Lameter copy_highpage(newpage, page); 769a48d07afSChristoph Lameter 770a48d07afSChristoph Lameter if (PageError(page)) 771a48d07afSChristoph Lameter SetPageError(newpage); 772a48d07afSChristoph Lameter if (PageReferenced(page)) 773a48d07afSChristoph Lameter SetPageReferenced(newpage); 774a48d07afSChristoph Lameter if (PageUptodate(page)) 775a48d07afSChristoph Lameter SetPageUptodate(newpage); 776a48d07afSChristoph Lameter if (PageActive(page)) 777a48d07afSChristoph Lameter SetPageActive(newpage); 778a48d07afSChristoph Lameter if (PageChecked(page)) 779a48d07afSChristoph Lameter SetPageChecked(newpage); 780a48d07afSChristoph Lameter if (PageMappedToDisk(page)) 781a48d07afSChristoph Lameter SetPageMappedToDisk(newpage); 782a48d07afSChristoph Lameter 783a48d07afSChristoph Lameter if (PageDirty(page)) { 784a48d07afSChristoph Lameter clear_page_dirty_for_io(page); 785a48d07afSChristoph Lameter set_page_dirty(newpage); 786a48d07afSChristoph Lameter } 787a48d07afSChristoph Lameter 788a48d07afSChristoph Lameter ClearPageSwapCache(page); 789a48d07afSChristoph Lameter ClearPageActive(page); 790a48d07afSChristoph Lameter ClearPagePrivate(page); 791a48d07afSChristoph Lameter set_page_private(page, 0); 792a48d07afSChristoph Lameter page->mapping = NULL; 793a48d07afSChristoph Lameter 794a48d07afSChristoph Lameter /* 795a48d07afSChristoph Lameter * If any waiters have accumulated on the new page then 796a48d07afSChristoph Lameter * wake them up. 797a48d07afSChristoph Lameter */ 798a48d07afSChristoph Lameter if (PageWriteback(newpage)) 799a48d07afSChristoph Lameter end_page_writeback(newpage); 800a48d07afSChristoph Lameter } 801e965f963SChristoph Lameter EXPORT_SYMBOL(migrate_page_copy); 802a48d07afSChristoph Lameter 803a48d07afSChristoph Lameter /* 804a48d07afSChristoph Lameter * Common logic to directly migrate a single page suitable for 805a48d07afSChristoph Lameter * pages that do not use PagePrivate. 806a48d07afSChristoph Lameter * 807a48d07afSChristoph Lameter * Pages are locked upon entry and exit. 808a48d07afSChristoph Lameter */ 809a48d07afSChristoph Lameter int migrate_page(struct page *newpage, struct page *page) 810a48d07afSChristoph Lameter { 8114983da07SChristoph Lameter int rc; 8124983da07SChristoph Lameter 813a48d07afSChristoph Lameter BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 814a48d07afSChristoph Lameter 8154983da07SChristoph Lameter rc = migrate_page_remove_references(newpage, page, 2); 8164983da07SChristoph Lameter 8174983da07SChristoph Lameter if (rc) 8184983da07SChristoph Lameter return rc; 819a48d07afSChristoph Lameter 820a48d07afSChristoph Lameter migrate_page_copy(newpage, page); 821a48d07afSChristoph Lameter 822a3351e52SChristoph Lameter /* 823a3351e52SChristoph Lameter * Remove auxiliary swap entries and replace 824a3351e52SChristoph Lameter * them with real ptes. 825a3351e52SChristoph Lameter * 826a3351e52SChristoph Lameter * Note that a real pte entry will allow processes that are not 827a3351e52SChristoph Lameter * waiting on the page lock to use the new page via the page tables 828a3351e52SChristoph Lameter * before the new page is unlocked. 829a3351e52SChristoph Lameter */ 830a3351e52SChristoph Lameter remove_from_swap(newpage); 831a48d07afSChristoph Lameter return 0; 832a48d07afSChristoph Lameter } 833e965f963SChristoph Lameter EXPORT_SYMBOL(migrate_page); 834a48d07afSChristoph Lameter 83549d2e9ccSChristoph Lameter /* 83649d2e9ccSChristoph Lameter * migrate_pages 83749d2e9ccSChristoph Lameter * 83849d2e9ccSChristoph Lameter * Two lists are passed to this function. The first list 83949d2e9ccSChristoph Lameter * contains the pages isolated from the LRU to be migrated. 84049d2e9ccSChristoph Lameter * The second list contains new pages that the pages isolated 84149d2e9ccSChristoph Lameter * can be moved to. If the second list is NULL then all 84249d2e9ccSChristoph Lameter * pages are swapped out. 84349d2e9ccSChristoph Lameter * 84449d2e9ccSChristoph Lameter * The function returns after 10 attempts or if no pages 845418aade4SChristoph Lameter * are movable anymore because to has become empty 84649d2e9ccSChristoph Lameter * or no retryable pages exist anymore. 84749d2e9ccSChristoph Lameter * 848d0d96328SChristoph Lameter * Return: Number of pages not migrated when "to" ran empty. 84949d2e9ccSChristoph Lameter */ 85069e05944SAndrew Morton unsigned long migrate_pages(struct list_head *from, struct list_head *to, 851d4984711SChristoph Lameter struct list_head *moved, struct list_head *failed) 85249d2e9ccSChristoph Lameter { 85369e05944SAndrew Morton unsigned long retry; 85469e05944SAndrew Morton unsigned long nr_failed = 0; 85549d2e9ccSChristoph Lameter int pass = 0; 85649d2e9ccSChristoph Lameter struct page *page; 85749d2e9ccSChristoph Lameter struct page *page2; 85849d2e9ccSChristoph Lameter int swapwrite = current->flags & PF_SWAPWRITE; 859d0d96328SChristoph Lameter int rc; 86049d2e9ccSChristoph Lameter 86149d2e9ccSChristoph Lameter if (!swapwrite) 86249d2e9ccSChristoph Lameter current->flags |= PF_SWAPWRITE; 86349d2e9ccSChristoph Lameter 86449d2e9ccSChristoph Lameter redo: 86549d2e9ccSChristoph Lameter retry = 0; 86649d2e9ccSChristoph Lameter 867d4984711SChristoph Lameter list_for_each_entry_safe(page, page2, from, lru) { 868a48d07afSChristoph Lameter struct page *newpage = NULL; 869a48d07afSChristoph Lameter struct address_space *mapping; 870a48d07afSChristoph Lameter 87149d2e9ccSChristoph Lameter cond_resched(); 87249d2e9ccSChristoph Lameter 873d0d96328SChristoph Lameter rc = 0; 874d0d96328SChristoph Lameter if (page_count(page) == 1) 875ee27497dSChristoph Lameter /* page was freed from under us. So we are done. */ 876d0d96328SChristoph Lameter goto next; 877d0d96328SChristoph Lameter 878a48d07afSChristoph Lameter if (to && list_empty(to)) 879a48d07afSChristoph Lameter break; 880a48d07afSChristoph Lameter 88149d2e9ccSChristoph Lameter /* 88249d2e9ccSChristoph Lameter * Skip locked pages during the first two passes to give the 8837cbe34cfSChristoph Lameter * functions holding the lock time to release the page. Later we 8847cbe34cfSChristoph Lameter * use lock_page() to have a higher chance of acquiring the 8857cbe34cfSChristoph Lameter * lock. 88649d2e9ccSChristoph Lameter */ 887d0d96328SChristoph Lameter rc = -EAGAIN; 88849d2e9ccSChristoph Lameter if (pass > 2) 88949d2e9ccSChristoph Lameter lock_page(page); 89049d2e9ccSChristoph Lameter else 89149d2e9ccSChristoph Lameter if (TestSetPageLocked(page)) 892d0d96328SChristoph Lameter goto next; 89349d2e9ccSChristoph Lameter 89449d2e9ccSChristoph Lameter /* 89549d2e9ccSChristoph Lameter * Only wait on writeback if we have already done a pass where 89649d2e9ccSChristoph Lameter * we we may have triggered writeouts for lots of pages. 89749d2e9ccSChristoph Lameter */ 8987cbe34cfSChristoph Lameter if (pass > 0) { 89949d2e9ccSChristoph Lameter wait_on_page_writeback(page); 9007cbe34cfSChristoph Lameter } else { 901d0d96328SChristoph Lameter if (PageWriteback(page)) 902d0d96328SChristoph Lameter goto unlock_page; 9037cbe34cfSChristoph Lameter } 90449d2e9ccSChristoph Lameter 905d0d96328SChristoph Lameter /* 906d0d96328SChristoph Lameter * Anonymous pages must have swap cache references otherwise 907d0d96328SChristoph Lameter * the information contained in the page maps cannot be 908d0d96328SChristoph Lameter * preserved. 909d0d96328SChristoph Lameter */ 91049d2e9ccSChristoph Lameter if (PageAnon(page) && !PageSwapCache(page)) { 9111480a540SChristoph Lameter if (!add_to_swap(page, GFP_KERNEL)) { 912d0d96328SChristoph Lameter rc = -ENOMEM; 913d0d96328SChristoph Lameter goto unlock_page; 91449d2e9ccSChristoph Lameter } 91549d2e9ccSChristoph Lameter } 91649d2e9ccSChristoph Lameter 917a48d07afSChristoph Lameter if (!to) { 918d0d96328SChristoph Lameter rc = swap_page(page); 919d0d96328SChristoph Lameter goto next; 920a48d07afSChristoph Lameter } 921a48d07afSChristoph Lameter 922a48d07afSChristoph Lameter newpage = lru_to_page(to); 923a48d07afSChristoph Lameter lock_page(newpage); 924a48d07afSChristoph Lameter 925a48d07afSChristoph Lameter /* 926a48d07afSChristoph Lameter * Pages are properly locked and writeback is complete. 927a48d07afSChristoph Lameter * Try to migrate the page. 928a48d07afSChristoph Lameter */ 929a48d07afSChristoph Lameter mapping = page_mapping(page); 930a48d07afSChristoph Lameter if (!mapping) 931a48d07afSChristoph Lameter goto unlock_both; 932a48d07afSChristoph Lameter 933e965f963SChristoph Lameter if (mapping->a_ops->migratepage) { 934418aade4SChristoph Lameter /* 935418aade4SChristoph Lameter * Most pages have a mapping and most filesystems 936418aade4SChristoph Lameter * should provide a migration function. Anonymous 937418aade4SChristoph Lameter * pages are part of swap space which also has its 938418aade4SChristoph Lameter * own migration function. This is the most common 939418aade4SChristoph Lameter * path for page migration. 940418aade4SChristoph Lameter */ 941e965f963SChristoph Lameter rc = mapping->a_ops->migratepage(newpage, page); 942e965f963SChristoph Lameter goto unlock_both; 943e965f963SChristoph Lameter } 944e965f963SChristoph Lameter 945a48d07afSChristoph Lameter /* 946418aade4SChristoph Lameter * Default handling if a filesystem does not provide 947418aade4SChristoph Lameter * a migration function. We can only migrate clean 948418aade4SChristoph Lameter * pages so try to write out any dirty pages first. 949a48d07afSChristoph Lameter */ 950a48d07afSChristoph Lameter if (PageDirty(page)) { 951a48d07afSChristoph Lameter switch (pageout(page, mapping)) { 952a48d07afSChristoph Lameter case PAGE_KEEP: 953a48d07afSChristoph Lameter case PAGE_ACTIVATE: 954a48d07afSChristoph Lameter goto unlock_both; 955a48d07afSChristoph Lameter 956a48d07afSChristoph Lameter case PAGE_SUCCESS: 957a48d07afSChristoph Lameter unlock_page(newpage); 958a48d07afSChristoph Lameter goto next; 959a48d07afSChristoph Lameter 960a48d07afSChristoph Lameter case PAGE_CLEAN: 961a48d07afSChristoph Lameter ; /* try to migrate the page below */ 962a48d07afSChristoph Lameter } 963a48d07afSChristoph Lameter } 964418aade4SChristoph Lameter 965a48d07afSChristoph Lameter /* 966418aade4SChristoph Lameter * Buffers are managed in a filesystem specific way. 967418aade4SChristoph Lameter * We must have no buffers or drop them. 968a48d07afSChristoph Lameter */ 969a48d07afSChristoph Lameter if (!page_has_buffers(page) || 970a48d07afSChristoph Lameter try_to_release_page(page, GFP_KERNEL)) { 971a48d07afSChristoph Lameter rc = migrate_page(newpage, page); 972a48d07afSChristoph Lameter goto unlock_both; 973a48d07afSChristoph Lameter } 974a48d07afSChristoph Lameter 975a48d07afSChristoph Lameter /* 976a48d07afSChristoph Lameter * On early passes with mapped pages simply 977a48d07afSChristoph Lameter * retry. There may be a lock held for some 978a48d07afSChristoph Lameter * buffers that may go away. Later 979a48d07afSChristoph Lameter * swap them out. 980a48d07afSChristoph Lameter */ 981a48d07afSChristoph Lameter if (pass > 4) { 982418aade4SChristoph Lameter /* 983418aade4SChristoph Lameter * Persistently unable to drop buffers..... As a 984418aade4SChristoph Lameter * measure of last resort we fall back to 985418aade4SChristoph Lameter * swap_page(). 986418aade4SChristoph Lameter */ 987a48d07afSChristoph Lameter unlock_page(newpage); 988a48d07afSChristoph Lameter newpage = NULL; 989a48d07afSChristoph Lameter rc = swap_page(page); 990a48d07afSChristoph Lameter goto next; 991a48d07afSChristoph Lameter } 992a48d07afSChristoph Lameter 993a48d07afSChristoph Lameter unlock_both: 994a48d07afSChristoph Lameter unlock_page(newpage); 995d0d96328SChristoph Lameter 996d0d96328SChristoph Lameter unlock_page: 997d0d96328SChristoph Lameter unlock_page(page); 998d0d96328SChristoph Lameter 999d0d96328SChristoph Lameter next: 1000d0d96328SChristoph Lameter if (rc == -EAGAIN) { 100149d2e9ccSChristoph Lameter retry++; 1002d0d96328SChristoph Lameter } else if (rc) { 1003d0d96328SChristoph Lameter /* Permanent failure */ 1004d0d96328SChristoph Lameter list_move(&page->lru, failed); 1005d0d96328SChristoph Lameter nr_failed++; 1006d0d96328SChristoph Lameter } else { 1007a48d07afSChristoph Lameter if (newpage) { 1008a48d07afSChristoph Lameter /* Successful migration. Return page to LRU */ 1009a48d07afSChristoph Lameter move_to_lru(newpage); 1010a48d07afSChristoph Lameter } 1011d0d96328SChristoph Lameter list_move(&page->lru, moved); 1012d0d96328SChristoph Lameter } 101349d2e9ccSChristoph Lameter } 101449d2e9ccSChristoph Lameter if (retry && pass++ < 10) 101549d2e9ccSChristoph Lameter goto redo; 101649d2e9ccSChristoph Lameter 101749d2e9ccSChristoph Lameter if (!swapwrite) 101849d2e9ccSChristoph Lameter current->flags &= ~PF_SWAPWRITE; 101949d2e9ccSChristoph Lameter 102049d2e9ccSChristoph Lameter return nr_failed + retry; 102149d2e9ccSChristoph Lameter } 10228419c318SChristoph Lameter 10238419c318SChristoph Lameter /* 10248419c318SChristoph Lameter * Isolate one page from the LRU lists and put it on the 1025053837fcSNick Piggin * indicated list with elevated refcount. 10268419c318SChristoph Lameter * 10278419c318SChristoph Lameter * Result: 10288419c318SChristoph Lameter * 0 = page not on LRU list 10298419c318SChristoph Lameter * 1 = page removed from LRU list and added to the specified list. 10308419c318SChristoph Lameter */ 10318419c318SChristoph Lameter int isolate_lru_page(struct page *page) 10328419c318SChristoph Lameter { 1033053837fcSNick Piggin int ret = 0; 10348419c318SChristoph Lameter 1035053837fcSNick Piggin if (PageLRU(page)) { 1036053837fcSNick Piggin struct zone *zone = page_zone(page); 10378419c318SChristoph Lameter spin_lock_irq(&zone->lru_lock); 10388d438f96SNick Piggin if (PageLRU(page)) { 1039053837fcSNick Piggin ret = 1; 1040053837fcSNick Piggin get_page(page); 10418d438f96SNick Piggin ClearPageLRU(page); 10428419c318SChristoph Lameter if (PageActive(page)) 10438419c318SChristoph Lameter del_page_from_active_list(zone, page); 10448419c318SChristoph Lameter else 10458419c318SChristoph Lameter del_page_from_inactive_list(zone, page); 10468419c318SChristoph Lameter } 10478419c318SChristoph Lameter spin_unlock_irq(&zone->lru_lock); 10488419c318SChristoph Lameter } 1049053837fcSNick Piggin 1050053837fcSNick Piggin return ret; 10518419c318SChristoph Lameter } 10527cbe34cfSChristoph Lameter #endif 105349d2e9ccSChristoph Lameter 105449d2e9ccSChristoph Lameter /* 10551da177e4SLinus Torvalds * zone->lru_lock is heavily contended. Some of the functions that 10561da177e4SLinus Torvalds * shrink the lists perform better by taking out a batch of pages 10571da177e4SLinus Torvalds * and working on them outside the LRU lock. 10581da177e4SLinus Torvalds * 10591da177e4SLinus Torvalds * For pagecache intensive workloads, this function is the hottest 10601da177e4SLinus Torvalds * spot in the kernel (apart from copy_*_user functions). 10611da177e4SLinus Torvalds * 10621da177e4SLinus Torvalds * Appropriate locks must be held before calling this function. 10631da177e4SLinus Torvalds * 10641da177e4SLinus Torvalds * @nr_to_scan: The number of pages to look through on the list. 10651da177e4SLinus Torvalds * @src: The LRU list to pull pages off. 10661da177e4SLinus Torvalds * @dst: The temp list to put pages on to. 10671da177e4SLinus Torvalds * @scanned: The number of pages that were scanned. 10681da177e4SLinus Torvalds * 10691da177e4SLinus Torvalds * returns how many pages were moved onto *@dst. 10701da177e4SLinus Torvalds */ 107169e05944SAndrew Morton static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 107269e05944SAndrew Morton struct list_head *src, struct list_head *dst, 107369e05944SAndrew Morton unsigned long *scanned) 10741da177e4SLinus Torvalds { 107569e05944SAndrew Morton unsigned long nr_taken = 0; 10761da177e4SLinus Torvalds struct page *page; 1077c9b02d97SWu Fengguang unsigned long scan; 10781da177e4SLinus Torvalds 1079c9b02d97SWu Fengguang for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 10807c8ee9a8SNick Piggin struct list_head *target; 10811da177e4SLinus Torvalds page = lru_to_page(src); 10821da177e4SLinus Torvalds prefetchw_prev_lru_page(page, src, flags); 10831da177e4SLinus Torvalds 10848d438f96SNick Piggin BUG_ON(!PageLRU(page)); 10858d438f96SNick Piggin 1086053837fcSNick Piggin list_del(&page->lru); 10877c8ee9a8SNick Piggin target = src; 10887c8ee9a8SNick Piggin if (likely(get_page_unless_zero(page))) { 1089053837fcSNick Piggin /* 10907c8ee9a8SNick Piggin * Be careful not to clear PageLRU until after we're 10917c8ee9a8SNick Piggin * sure the page is not being freed elsewhere -- the 10927c8ee9a8SNick Piggin * page release code relies on it. 109346453a6eSNick Piggin */ 10948d438f96SNick Piggin ClearPageLRU(page); 10957c8ee9a8SNick Piggin target = dst; 1096053837fcSNick Piggin nr_taken++; 10977c8ee9a8SNick Piggin } /* else it is being freed elsewhere */ 10987c8ee9a8SNick Piggin 10997c8ee9a8SNick Piggin list_add(&page->lru, target); 11001da177e4SLinus Torvalds } 11011da177e4SLinus Torvalds 11021da177e4SLinus Torvalds *scanned = scan; 11031da177e4SLinus Torvalds return nr_taken; 11041da177e4SLinus Torvalds } 11051da177e4SLinus Torvalds 11061da177e4SLinus Torvalds /* 11071742f19fSAndrew Morton * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 11081742f19fSAndrew Morton * of reclaimed pages 11091da177e4SLinus Torvalds */ 11101742f19fSAndrew Morton static unsigned long shrink_inactive_list(unsigned long max_scan, 11111742f19fSAndrew Morton struct zone *zone, struct scan_control *sc) 11121da177e4SLinus Torvalds { 11131da177e4SLinus Torvalds LIST_HEAD(page_list); 11141da177e4SLinus Torvalds struct pagevec pvec; 111569e05944SAndrew Morton unsigned long nr_scanned = 0; 111605ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 11171da177e4SLinus Torvalds 11181da177e4SLinus Torvalds pagevec_init(&pvec, 1); 11191da177e4SLinus Torvalds 11201da177e4SLinus Torvalds lru_add_drain(); 11211da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 112269e05944SAndrew Morton do { 11231da177e4SLinus Torvalds struct page *page; 112469e05944SAndrew Morton unsigned long nr_taken; 112569e05944SAndrew Morton unsigned long nr_scan; 112669e05944SAndrew Morton unsigned long nr_freed; 11271da177e4SLinus Torvalds 11281da177e4SLinus Torvalds nr_taken = isolate_lru_pages(sc->swap_cluster_max, 11291da177e4SLinus Torvalds &zone->inactive_list, 11301da177e4SLinus Torvalds &page_list, &nr_scan); 11311da177e4SLinus Torvalds zone->nr_inactive -= nr_taken; 11321da177e4SLinus Torvalds zone->pages_scanned += nr_scan; 11331da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 11341da177e4SLinus Torvalds 113569e05944SAndrew Morton nr_scanned += nr_scan; 11361742f19fSAndrew Morton nr_freed = shrink_page_list(&page_list, sc); 113705ff5137SAndrew Morton nr_reclaimed += nr_freed; 1138a74609faSNick Piggin local_irq_disable(); 1139a74609faSNick Piggin if (current_is_kswapd()) { 1140a74609faSNick Piggin __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); 1141a74609faSNick Piggin __mod_page_state(kswapd_steal, nr_freed); 1142a74609faSNick Piggin } else 1143a74609faSNick Piggin __mod_page_state_zone(zone, pgscan_direct, nr_scan); 1144a74609faSNick Piggin __mod_page_state_zone(zone, pgsteal, nr_freed); 1145a74609faSNick Piggin 1146*fb8d14e1SWu Fengguang if (nr_taken == 0) 1147*fb8d14e1SWu Fengguang goto done; 1148*fb8d14e1SWu Fengguang 1149a74609faSNick Piggin spin_lock(&zone->lru_lock); 11501da177e4SLinus Torvalds /* 11511da177e4SLinus Torvalds * Put back any unfreeable pages. 11521da177e4SLinus Torvalds */ 11531da177e4SLinus Torvalds while (!list_empty(&page_list)) { 11541da177e4SLinus Torvalds page = lru_to_page(&page_list); 11558d438f96SNick Piggin BUG_ON(PageLRU(page)); 11568d438f96SNick Piggin SetPageLRU(page); 11571da177e4SLinus Torvalds list_del(&page->lru); 11581da177e4SLinus Torvalds if (PageActive(page)) 11591da177e4SLinus Torvalds add_page_to_active_list(zone, page); 11601da177e4SLinus Torvalds else 11611da177e4SLinus Torvalds add_page_to_inactive_list(zone, page); 11621da177e4SLinus Torvalds if (!pagevec_add(&pvec, page)) { 11631da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 11641da177e4SLinus Torvalds __pagevec_release(&pvec); 11651da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 11661da177e4SLinus Torvalds } 11671da177e4SLinus Torvalds } 116869e05944SAndrew Morton } while (nr_scanned < max_scan); 1169*fb8d14e1SWu Fengguang spin_unlock(&zone->lru_lock); 11701da177e4SLinus Torvalds done: 1171*fb8d14e1SWu Fengguang local_irq_enable(); 11721da177e4SLinus Torvalds pagevec_release(&pvec); 117305ff5137SAndrew Morton return nr_reclaimed; 11741da177e4SLinus Torvalds } 11751da177e4SLinus Torvalds 11761da177e4SLinus Torvalds /* 11771da177e4SLinus Torvalds * This moves pages from the active list to the inactive list. 11781da177e4SLinus Torvalds * 11791da177e4SLinus Torvalds * We move them the other way if the page is referenced by one or more 11801da177e4SLinus Torvalds * processes, from rmap. 11811da177e4SLinus Torvalds * 11821da177e4SLinus Torvalds * If the pages are mostly unmapped, the processing is fast and it is 11831da177e4SLinus Torvalds * appropriate to hold zone->lru_lock across the whole operation. But if 11841da177e4SLinus Torvalds * the pages are mapped, the processing is slow (page_referenced()) so we 11851da177e4SLinus Torvalds * should drop zone->lru_lock around each page. It's impossible to balance 11861da177e4SLinus Torvalds * this, so instead we remove the pages from the LRU while processing them. 11871da177e4SLinus Torvalds * It is safe to rely on PG_active against the non-LRU pages in here because 11881da177e4SLinus Torvalds * nobody will play with that bit on a non-LRU page. 11891da177e4SLinus Torvalds * 11901da177e4SLinus Torvalds * The downside is that we have to touch page->_count against each page. 11911da177e4SLinus Torvalds * But we had to alter page->flags anyway. 11921da177e4SLinus Torvalds */ 11931742f19fSAndrew Morton static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 119469e05944SAndrew Morton struct scan_control *sc) 11951da177e4SLinus Torvalds { 119669e05944SAndrew Morton unsigned long pgmoved; 11971da177e4SLinus Torvalds int pgdeactivate = 0; 119869e05944SAndrew Morton unsigned long pgscanned; 11991da177e4SLinus Torvalds LIST_HEAD(l_hold); /* The pages which were snipped off */ 12001da177e4SLinus Torvalds LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ 12011da177e4SLinus Torvalds LIST_HEAD(l_active); /* Pages to go onto the active_list */ 12021da177e4SLinus Torvalds struct page *page; 12031da177e4SLinus Torvalds struct pagevec pvec; 12041da177e4SLinus Torvalds int reclaim_mapped = 0; 12052903fb16SChristoph Lameter 12062903fb16SChristoph Lameter if (unlikely(sc->may_swap)) { 12071da177e4SLinus Torvalds long mapped_ratio; 12081da177e4SLinus Torvalds long distress; 12091da177e4SLinus Torvalds long swap_tendency; 12101da177e4SLinus Torvalds 12112903fb16SChristoph Lameter /* 12122903fb16SChristoph Lameter * `distress' is a measure of how much trouble we're having 12132903fb16SChristoph Lameter * reclaiming pages. 0 -> no problems. 100 -> great trouble. 12142903fb16SChristoph Lameter */ 12152903fb16SChristoph Lameter distress = 100 >> zone->prev_priority; 12162903fb16SChristoph Lameter 12172903fb16SChristoph Lameter /* 12182903fb16SChristoph Lameter * The point of this algorithm is to decide when to start 12192903fb16SChristoph Lameter * reclaiming mapped memory instead of just pagecache. Work out 12202903fb16SChristoph Lameter * how much memory 12212903fb16SChristoph Lameter * is mapped. 12222903fb16SChristoph Lameter */ 12232903fb16SChristoph Lameter mapped_ratio = (sc->nr_mapped * 100) / total_memory; 12242903fb16SChristoph Lameter 12252903fb16SChristoph Lameter /* 12262903fb16SChristoph Lameter * Now decide how much we really want to unmap some pages. The 12272903fb16SChristoph Lameter * mapped ratio is downgraded - just because there's a lot of 12282903fb16SChristoph Lameter * mapped memory doesn't necessarily mean that page reclaim 12292903fb16SChristoph Lameter * isn't succeeding. 12302903fb16SChristoph Lameter * 12312903fb16SChristoph Lameter * The distress ratio is important - we don't want to start 12322903fb16SChristoph Lameter * going oom. 12332903fb16SChristoph Lameter * 12342903fb16SChristoph Lameter * A 100% value of vm_swappiness overrides this algorithm 12352903fb16SChristoph Lameter * altogether. 12362903fb16SChristoph Lameter */ 12372903fb16SChristoph Lameter swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; 12382903fb16SChristoph Lameter 12392903fb16SChristoph Lameter /* 12402903fb16SChristoph Lameter * Now use this metric to decide whether to start moving mapped 12412903fb16SChristoph Lameter * memory onto the inactive list. 12422903fb16SChristoph Lameter */ 12432903fb16SChristoph Lameter if (swap_tendency >= 100) 12442903fb16SChristoph Lameter reclaim_mapped = 1; 12452903fb16SChristoph Lameter } 12462903fb16SChristoph Lameter 12471da177e4SLinus Torvalds lru_add_drain(); 12481da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 12491da177e4SLinus Torvalds pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, 12501da177e4SLinus Torvalds &l_hold, &pgscanned); 12511da177e4SLinus Torvalds zone->pages_scanned += pgscanned; 12521da177e4SLinus Torvalds zone->nr_active -= pgmoved; 12531da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 12541da177e4SLinus Torvalds 12551da177e4SLinus Torvalds while (!list_empty(&l_hold)) { 12561da177e4SLinus Torvalds cond_resched(); 12571da177e4SLinus Torvalds page = lru_to_page(&l_hold); 12581da177e4SLinus Torvalds list_del(&page->lru); 12591da177e4SLinus Torvalds if (page_mapped(page)) { 12601da177e4SLinus Torvalds if (!reclaim_mapped || 12611da177e4SLinus Torvalds (total_swap_pages == 0 && PageAnon(page)) || 1262f7b7fd8fSRik van Riel page_referenced(page, 0)) { 12631da177e4SLinus Torvalds list_add(&page->lru, &l_active); 12641da177e4SLinus Torvalds continue; 12651da177e4SLinus Torvalds } 12661da177e4SLinus Torvalds } 12671da177e4SLinus Torvalds list_add(&page->lru, &l_inactive); 12681da177e4SLinus Torvalds } 12691da177e4SLinus Torvalds 12701da177e4SLinus Torvalds pagevec_init(&pvec, 1); 12711da177e4SLinus Torvalds pgmoved = 0; 12721da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 12731da177e4SLinus Torvalds while (!list_empty(&l_inactive)) { 12741da177e4SLinus Torvalds page = lru_to_page(&l_inactive); 12751da177e4SLinus Torvalds prefetchw_prev_lru_page(page, &l_inactive, flags); 12768d438f96SNick Piggin BUG_ON(PageLRU(page)); 12778d438f96SNick Piggin SetPageLRU(page); 12784c84cacfSNick Piggin BUG_ON(!PageActive(page)); 12794c84cacfSNick Piggin ClearPageActive(page); 12804c84cacfSNick Piggin 12811da177e4SLinus Torvalds list_move(&page->lru, &zone->inactive_list); 12821da177e4SLinus Torvalds pgmoved++; 12831da177e4SLinus Torvalds if (!pagevec_add(&pvec, page)) { 12841da177e4SLinus Torvalds zone->nr_inactive += pgmoved; 12851da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 12861da177e4SLinus Torvalds pgdeactivate += pgmoved; 12871da177e4SLinus Torvalds pgmoved = 0; 12881da177e4SLinus Torvalds if (buffer_heads_over_limit) 12891da177e4SLinus Torvalds pagevec_strip(&pvec); 12901da177e4SLinus Torvalds __pagevec_release(&pvec); 12911da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 12921da177e4SLinus Torvalds } 12931da177e4SLinus Torvalds } 12941da177e4SLinus Torvalds zone->nr_inactive += pgmoved; 12951da177e4SLinus Torvalds pgdeactivate += pgmoved; 12961da177e4SLinus Torvalds if (buffer_heads_over_limit) { 12971da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 12981da177e4SLinus Torvalds pagevec_strip(&pvec); 12991da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 13001da177e4SLinus Torvalds } 13011da177e4SLinus Torvalds 13021da177e4SLinus Torvalds pgmoved = 0; 13031da177e4SLinus Torvalds while (!list_empty(&l_active)) { 13041da177e4SLinus Torvalds page = lru_to_page(&l_active); 13051da177e4SLinus Torvalds prefetchw_prev_lru_page(page, &l_active, flags); 13068d438f96SNick Piggin BUG_ON(PageLRU(page)); 13078d438f96SNick Piggin SetPageLRU(page); 13081da177e4SLinus Torvalds BUG_ON(!PageActive(page)); 13091da177e4SLinus Torvalds list_move(&page->lru, &zone->active_list); 13101da177e4SLinus Torvalds pgmoved++; 13111da177e4SLinus Torvalds if (!pagevec_add(&pvec, page)) { 13121da177e4SLinus Torvalds zone->nr_active += pgmoved; 13131da177e4SLinus Torvalds pgmoved = 0; 13141da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 13151da177e4SLinus Torvalds __pagevec_release(&pvec); 13161da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 13171da177e4SLinus Torvalds } 13181da177e4SLinus Torvalds } 13191da177e4SLinus Torvalds zone->nr_active += pgmoved; 1320a74609faSNick Piggin spin_unlock(&zone->lru_lock); 13211da177e4SLinus Torvalds 1322a74609faSNick Piggin __mod_page_state_zone(zone, pgrefill, pgscanned); 1323a74609faSNick Piggin __mod_page_state(pgdeactivate, pgdeactivate); 1324a74609faSNick Piggin local_irq_enable(); 1325a74609faSNick Piggin 1326a74609faSNick Piggin pagevec_release(&pvec); 13271da177e4SLinus Torvalds } 13281da177e4SLinus Torvalds 13291da177e4SLinus Torvalds /* 13301da177e4SLinus Torvalds * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 13311da177e4SLinus Torvalds */ 133205ff5137SAndrew Morton static unsigned long shrink_zone(int priority, struct zone *zone, 133369e05944SAndrew Morton struct scan_control *sc) 13341da177e4SLinus Torvalds { 13351da177e4SLinus Torvalds unsigned long nr_active; 13361da177e4SLinus Torvalds unsigned long nr_inactive; 13378695949aSChristoph Lameter unsigned long nr_to_scan; 133805ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 13391da177e4SLinus Torvalds 134053e9a615SMartin Hicks atomic_inc(&zone->reclaim_in_progress); 134153e9a615SMartin Hicks 13421da177e4SLinus Torvalds /* 13431da177e4SLinus Torvalds * Add one to `nr_to_scan' just to make sure that the kernel will 13441da177e4SLinus Torvalds * slowly sift through the active list. 13451da177e4SLinus Torvalds */ 13468695949aSChristoph Lameter zone->nr_scan_active += (zone->nr_active >> priority) + 1; 13471da177e4SLinus Torvalds nr_active = zone->nr_scan_active; 13481da177e4SLinus Torvalds if (nr_active >= sc->swap_cluster_max) 13491da177e4SLinus Torvalds zone->nr_scan_active = 0; 13501da177e4SLinus Torvalds else 13511da177e4SLinus Torvalds nr_active = 0; 13521da177e4SLinus Torvalds 13538695949aSChristoph Lameter zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1; 13541da177e4SLinus Torvalds nr_inactive = zone->nr_scan_inactive; 13551da177e4SLinus Torvalds if (nr_inactive >= sc->swap_cluster_max) 13561da177e4SLinus Torvalds zone->nr_scan_inactive = 0; 13571da177e4SLinus Torvalds else 13581da177e4SLinus Torvalds nr_inactive = 0; 13591da177e4SLinus Torvalds 13601da177e4SLinus Torvalds while (nr_active || nr_inactive) { 13611da177e4SLinus Torvalds if (nr_active) { 13628695949aSChristoph Lameter nr_to_scan = min(nr_active, 13631da177e4SLinus Torvalds (unsigned long)sc->swap_cluster_max); 13648695949aSChristoph Lameter nr_active -= nr_to_scan; 13651742f19fSAndrew Morton shrink_active_list(nr_to_scan, zone, sc); 13661da177e4SLinus Torvalds } 13671da177e4SLinus Torvalds 13681da177e4SLinus Torvalds if (nr_inactive) { 13698695949aSChristoph Lameter nr_to_scan = min(nr_inactive, 13701da177e4SLinus Torvalds (unsigned long)sc->swap_cluster_max); 13718695949aSChristoph Lameter nr_inactive -= nr_to_scan; 13721742f19fSAndrew Morton nr_reclaimed += shrink_inactive_list(nr_to_scan, zone, 13731742f19fSAndrew Morton sc); 13741da177e4SLinus Torvalds } 13751da177e4SLinus Torvalds } 13761da177e4SLinus Torvalds 13771da177e4SLinus Torvalds throttle_vm_writeout(); 137853e9a615SMartin Hicks 137953e9a615SMartin Hicks atomic_dec(&zone->reclaim_in_progress); 138005ff5137SAndrew Morton return nr_reclaimed; 13811da177e4SLinus Torvalds } 13821da177e4SLinus Torvalds 13831da177e4SLinus Torvalds /* 13841da177e4SLinus Torvalds * This is the direct reclaim path, for page-allocating processes. We only 13851da177e4SLinus Torvalds * try to reclaim pages from zones which will satisfy the caller's allocation 13861da177e4SLinus Torvalds * request. 13871da177e4SLinus Torvalds * 13881da177e4SLinus Torvalds * We reclaim from a zone even if that zone is over pages_high. Because: 13891da177e4SLinus Torvalds * a) The caller may be trying to free *extra* pages to satisfy a higher-order 13901da177e4SLinus Torvalds * allocation or 13911da177e4SLinus Torvalds * b) The zones may be over pages_high but they must go *over* pages_high to 13921da177e4SLinus Torvalds * satisfy the `incremental min' zone defense algorithm. 13931da177e4SLinus Torvalds * 13941da177e4SLinus Torvalds * Returns the number of reclaimed pages. 13951da177e4SLinus Torvalds * 13961da177e4SLinus Torvalds * If a zone is deemed to be full of pinned pages then just give it a light 13971da177e4SLinus Torvalds * scan then give up on it. 13981da177e4SLinus Torvalds */ 13991742f19fSAndrew Morton static unsigned long shrink_zones(int priority, struct zone **zones, 140069e05944SAndrew Morton struct scan_control *sc) 14011da177e4SLinus Torvalds { 140205ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 14031da177e4SLinus Torvalds int i; 14041da177e4SLinus Torvalds 14051da177e4SLinus Torvalds for (i = 0; zones[i] != NULL; i++) { 14061da177e4SLinus Torvalds struct zone *zone = zones[i]; 14071da177e4SLinus Torvalds 1408f3fe6512SCon Kolivas if (!populated_zone(zone)) 14091da177e4SLinus Torvalds continue; 14101da177e4SLinus Torvalds 14119bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 14121da177e4SLinus Torvalds continue; 14131da177e4SLinus Torvalds 14148695949aSChristoph Lameter zone->temp_priority = priority; 14158695949aSChristoph Lameter if (zone->prev_priority > priority) 14168695949aSChristoph Lameter zone->prev_priority = priority; 14171da177e4SLinus Torvalds 14188695949aSChristoph Lameter if (zone->all_unreclaimable && priority != DEF_PRIORITY) 14191da177e4SLinus Torvalds continue; /* Let kswapd poll it */ 14201da177e4SLinus Torvalds 142105ff5137SAndrew Morton nr_reclaimed += shrink_zone(priority, zone, sc); 14221da177e4SLinus Torvalds } 142305ff5137SAndrew Morton return nr_reclaimed; 14241da177e4SLinus Torvalds } 14251da177e4SLinus Torvalds 14261da177e4SLinus Torvalds /* 14271da177e4SLinus Torvalds * This is the main entry point to direct page reclaim. 14281da177e4SLinus Torvalds * 14291da177e4SLinus Torvalds * If a full scan of the inactive list fails to free enough memory then we 14301da177e4SLinus Torvalds * are "out of memory" and something needs to be killed. 14311da177e4SLinus Torvalds * 14321da177e4SLinus Torvalds * If the caller is !__GFP_FS then the probability of a failure is reasonably 14331da177e4SLinus Torvalds * high - the zone may be full of dirty or under-writeback pages, which this 14341da177e4SLinus Torvalds * caller can't do much about. We kick pdflush and take explicit naps in the 14351da177e4SLinus Torvalds * hope that some of these pages can be written. But if the allocating task 14361da177e4SLinus Torvalds * holds filesystem locks which prevent writeout this might not work, and the 14371da177e4SLinus Torvalds * allocation attempt will fail. 14381da177e4SLinus Torvalds */ 143969e05944SAndrew Morton unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) 14401da177e4SLinus Torvalds { 14411da177e4SLinus Torvalds int priority; 14421da177e4SLinus Torvalds int ret = 0; 144369e05944SAndrew Morton unsigned long total_scanned = 0; 144405ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 14451da177e4SLinus Torvalds struct reclaim_state *reclaim_state = current->reclaim_state; 14461da177e4SLinus Torvalds unsigned long lru_pages = 0; 14471da177e4SLinus Torvalds int i; 1448179e9639SAndrew Morton struct scan_control sc = { 1449179e9639SAndrew Morton .gfp_mask = gfp_mask, 1450179e9639SAndrew Morton .may_writepage = !laptop_mode, 1451179e9639SAndrew Morton .swap_cluster_max = SWAP_CLUSTER_MAX, 1452179e9639SAndrew Morton .may_swap = 1, 1453179e9639SAndrew Morton }; 14541da177e4SLinus Torvalds 14551da177e4SLinus Torvalds inc_page_state(allocstall); 14561da177e4SLinus Torvalds 14571da177e4SLinus Torvalds for (i = 0; zones[i] != NULL; i++) { 14581da177e4SLinus Torvalds struct zone *zone = zones[i]; 14591da177e4SLinus Torvalds 14609bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 14611da177e4SLinus Torvalds continue; 14621da177e4SLinus Torvalds 14631da177e4SLinus Torvalds zone->temp_priority = DEF_PRIORITY; 14641da177e4SLinus Torvalds lru_pages += zone->nr_active + zone->nr_inactive; 14651da177e4SLinus Torvalds } 14661da177e4SLinus Torvalds 14671da177e4SLinus Torvalds for (priority = DEF_PRIORITY; priority >= 0; priority--) { 14681da177e4SLinus Torvalds sc.nr_mapped = read_page_state(nr_mapped); 14691da177e4SLinus Torvalds sc.nr_scanned = 0; 1470f7b7fd8fSRik van Riel if (!priority) 1471f7b7fd8fSRik van Riel disable_swap_token(); 14721742f19fSAndrew Morton nr_reclaimed += shrink_zones(priority, zones, &sc); 14731da177e4SLinus Torvalds shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); 14741da177e4SLinus Torvalds if (reclaim_state) { 147505ff5137SAndrew Morton nr_reclaimed += reclaim_state->reclaimed_slab; 14761da177e4SLinus Torvalds reclaim_state->reclaimed_slab = 0; 14771da177e4SLinus Torvalds } 14781da177e4SLinus Torvalds total_scanned += sc.nr_scanned; 147905ff5137SAndrew Morton if (nr_reclaimed >= sc.swap_cluster_max) { 14801da177e4SLinus Torvalds ret = 1; 14811da177e4SLinus Torvalds goto out; 14821da177e4SLinus Torvalds } 14831da177e4SLinus Torvalds 14841da177e4SLinus Torvalds /* 14851da177e4SLinus Torvalds * Try to write back as many pages as we just scanned. This 14861da177e4SLinus Torvalds * tends to cause slow streaming writers to write data to the 14871da177e4SLinus Torvalds * disk smoothly, at the dirtying rate, which is nice. But 14881da177e4SLinus Torvalds * that's undesirable in laptop mode, where we *want* lumpy 14891da177e4SLinus Torvalds * writeout. So in laptop mode, write out the whole world. 14901da177e4SLinus Torvalds */ 1491179e9639SAndrew Morton if (total_scanned > sc.swap_cluster_max + 1492179e9639SAndrew Morton sc.swap_cluster_max / 2) { 1493687a21ceSPekka J Enberg wakeup_pdflush(laptop_mode ? 0 : total_scanned); 14941da177e4SLinus Torvalds sc.may_writepage = 1; 14951da177e4SLinus Torvalds } 14961da177e4SLinus Torvalds 14971da177e4SLinus Torvalds /* Take a nap, wait for some writeback to complete */ 14981da177e4SLinus Torvalds if (sc.nr_scanned && priority < DEF_PRIORITY - 2) 14991da177e4SLinus Torvalds blk_congestion_wait(WRITE, HZ/10); 15001da177e4SLinus Torvalds } 15011da177e4SLinus Torvalds out: 15021da177e4SLinus Torvalds for (i = 0; zones[i] != 0; i++) { 15031da177e4SLinus Torvalds struct zone *zone = zones[i]; 15041da177e4SLinus Torvalds 15059bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 15061da177e4SLinus Torvalds continue; 15071da177e4SLinus Torvalds 15081da177e4SLinus Torvalds zone->prev_priority = zone->temp_priority; 15091da177e4SLinus Torvalds } 15101da177e4SLinus Torvalds return ret; 15111da177e4SLinus Torvalds } 15121da177e4SLinus Torvalds 15131da177e4SLinus Torvalds /* 15141da177e4SLinus Torvalds * For kswapd, balance_pgdat() will work across all this node's zones until 15151da177e4SLinus Torvalds * they are all at pages_high. 15161da177e4SLinus Torvalds * 15171da177e4SLinus Torvalds * If `nr_pages' is non-zero then it is the number of pages which are to be 15181da177e4SLinus Torvalds * reclaimed, regardless of the zone occupancies. This is a software suspend 15191da177e4SLinus Torvalds * special. 15201da177e4SLinus Torvalds * 15211da177e4SLinus Torvalds * Returns the number of pages which were actually freed. 15221da177e4SLinus Torvalds * 15231da177e4SLinus Torvalds * There is special handling here for zones which are full of pinned pages. 15241da177e4SLinus Torvalds * This can happen if the pages are all mlocked, or if they are all used by 15251da177e4SLinus Torvalds * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 15261da177e4SLinus Torvalds * What we do is to detect the case where all pages in the zone have been 15271da177e4SLinus Torvalds * scanned twice and there has been zero successful reclaim. Mark the zone as 15281da177e4SLinus Torvalds * dead and from now on, only perform a short scan. Basically we're polling 15291da177e4SLinus Torvalds * the zone for when the problem goes away. 15301da177e4SLinus Torvalds * 15311da177e4SLinus Torvalds * kswapd scans the zones in the highmem->normal->dma direction. It skips 15321da177e4SLinus Torvalds * zones which have free_pages > pages_high, but once a zone is found to have 15331da177e4SLinus Torvalds * free_pages <= pages_high, we scan that zone and the lower zones regardless 15341da177e4SLinus Torvalds * of the number of free pages in the lower zones. This interoperates with 15351da177e4SLinus Torvalds * the page allocator fallback scheme to ensure that aging of pages is balanced 15361da177e4SLinus Torvalds * across the zones. 15371da177e4SLinus Torvalds */ 153869e05944SAndrew Morton static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, 153969e05944SAndrew Morton int order) 15401da177e4SLinus Torvalds { 154169e05944SAndrew Morton unsigned long to_free = nr_pages; 15421da177e4SLinus Torvalds int all_zones_ok; 15431da177e4SLinus Torvalds int priority; 15441da177e4SLinus Torvalds int i; 154569e05944SAndrew Morton unsigned long total_scanned; 154605ff5137SAndrew Morton unsigned long nr_reclaimed; 15471da177e4SLinus Torvalds struct reclaim_state *reclaim_state = current->reclaim_state; 1548179e9639SAndrew Morton struct scan_control sc = { 1549179e9639SAndrew Morton .gfp_mask = GFP_KERNEL, 1550179e9639SAndrew Morton .may_swap = 1, 1551179e9639SAndrew Morton .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX, 1552179e9639SAndrew Morton }; 15531da177e4SLinus Torvalds 15541da177e4SLinus Torvalds loop_again: 15551da177e4SLinus Torvalds total_scanned = 0; 155605ff5137SAndrew Morton nr_reclaimed = 0; 1557179e9639SAndrew Morton sc.may_writepage = !laptop_mode, 15581da177e4SLinus Torvalds sc.nr_mapped = read_page_state(nr_mapped); 15591da177e4SLinus Torvalds 15601da177e4SLinus Torvalds inc_page_state(pageoutrun); 15611da177e4SLinus Torvalds 15621da177e4SLinus Torvalds for (i = 0; i < pgdat->nr_zones; i++) { 15631da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 15641da177e4SLinus Torvalds 15651da177e4SLinus Torvalds zone->temp_priority = DEF_PRIORITY; 15661da177e4SLinus Torvalds } 15671da177e4SLinus Torvalds 15681da177e4SLinus Torvalds for (priority = DEF_PRIORITY; priority >= 0; priority--) { 15691da177e4SLinus Torvalds int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 15701da177e4SLinus Torvalds unsigned long lru_pages = 0; 15711da177e4SLinus Torvalds 1572f7b7fd8fSRik van Riel /* The swap token gets in the way of swapout... */ 1573f7b7fd8fSRik van Riel if (!priority) 1574f7b7fd8fSRik van Riel disable_swap_token(); 1575f7b7fd8fSRik van Riel 15761da177e4SLinus Torvalds all_zones_ok = 1; 15771da177e4SLinus Torvalds 15781da177e4SLinus Torvalds if (nr_pages == 0) { 15791da177e4SLinus Torvalds /* 15801da177e4SLinus Torvalds * Scan in the highmem->dma direction for the highest 15811da177e4SLinus Torvalds * zone which needs scanning 15821da177e4SLinus Torvalds */ 15831da177e4SLinus Torvalds for (i = pgdat->nr_zones - 1; i >= 0; i--) { 15841da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 15851da177e4SLinus Torvalds 1586f3fe6512SCon Kolivas if (!populated_zone(zone)) 15871da177e4SLinus Torvalds continue; 15881da177e4SLinus Torvalds 15891da177e4SLinus Torvalds if (zone->all_unreclaimable && 15901da177e4SLinus Torvalds priority != DEF_PRIORITY) 15911da177e4SLinus Torvalds continue; 15921da177e4SLinus Torvalds 15931da177e4SLinus Torvalds if (!zone_watermark_ok(zone, order, 15947fb1d9fcSRohit Seth zone->pages_high, 0, 0)) { 15951da177e4SLinus Torvalds end_zone = i; 15961da177e4SLinus Torvalds goto scan; 15971da177e4SLinus Torvalds } 15981da177e4SLinus Torvalds } 15991da177e4SLinus Torvalds goto out; 16001da177e4SLinus Torvalds } else { 16011da177e4SLinus Torvalds end_zone = pgdat->nr_zones - 1; 16021da177e4SLinus Torvalds } 16031da177e4SLinus Torvalds scan: 16041da177e4SLinus Torvalds for (i = 0; i <= end_zone; i++) { 16051da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 16061da177e4SLinus Torvalds 16071da177e4SLinus Torvalds lru_pages += zone->nr_active + zone->nr_inactive; 16081da177e4SLinus Torvalds } 16091da177e4SLinus Torvalds 16101da177e4SLinus Torvalds /* 16111da177e4SLinus Torvalds * Now scan the zone in the dma->highmem direction, stopping 16121da177e4SLinus Torvalds * at the last zone which needs scanning. 16131da177e4SLinus Torvalds * 16141da177e4SLinus Torvalds * We do this because the page allocator works in the opposite 16151da177e4SLinus Torvalds * direction. This prevents the page allocator from allocating 16161da177e4SLinus Torvalds * pages behind kswapd's direction of progress, which would 16171da177e4SLinus Torvalds * cause too much scanning of the lower zones. 16181da177e4SLinus Torvalds */ 16191da177e4SLinus Torvalds for (i = 0; i <= end_zone; i++) { 16201da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 1621b15e0905Sakpm@osdl.org int nr_slab; 16221da177e4SLinus Torvalds 1623f3fe6512SCon Kolivas if (!populated_zone(zone)) 16241da177e4SLinus Torvalds continue; 16251da177e4SLinus Torvalds 16261da177e4SLinus Torvalds if (zone->all_unreclaimable && priority != DEF_PRIORITY) 16271da177e4SLinus Torvalds continue; 16281da177e4SLinus Torvalds 16291da177e4SLinus Torvalds if (nr_pages == 0) { /* Not software suspend */ 16301da177e4SLinus Torvalds if (!zone_watermark_ok(zone, order, 16317fb1d9fcSRohit Seth zone->pages_high, end_zone, 0)) 16321da177e4SLinus Torvalds all_zones_ok = 0; 16331da177e4SLinus Torvalds } 16341da177e4SLinus Torvalds zone->temp_priority = priority; 16351da177e4SLinus Torvalds if (zone->prev_priority > priority) 16361da177e4SLinus Torvalds zone->prev_priority = priority; 16371da177e4SLinus Torvalds sc.nr_scanned = 0; 163805ff5137SAndrew Morton nr_reclaimed += shrink_zone(priority, zone, &sc); 16391da177e4SLinus Torvalds reclaim_state->reclaimed_slab = 0; 1640b15e0905Sakpm@osdl.org nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1641b15e0905Sakpm@osdl.org lru_pages); 164205ff5137SAndrew Morton nr_reclaimed += reclaim_state->reclaimed_slab; 16431da177e4SLinus Torvalds total_scanned += sc.nr_scanned; 16441da177e4SLinus Torvalds if (zone->all_unreclaimable) 16451da177e4SLinus Torvalds continue; 1646b15e0905Sakpm@osdl.org if (nr_slab == 0 && zone->pages_scanned >= 1647b15e0905Sakpm@osdl.org (zone->nr_active + zone->nr_inactive) * 4) 16481da177e4SLinus Torvalds zone->all_unreclaimable = 1; 16491da177e4SLinus Torvalds /* 16501da177e4SLinus Torvalds * If we've done a decent amount of scanning and 16511da177e4SLinus Torvalds * the reclaim ratio is low, start doing writepage 16521da177e4SLinus Torvalds * even in laptop mode 16531da177e4SLinus Torvalds */ 16541da177e4SLinus Torvalds if (total_scanned > SWAP_CLUSTER_MAX * 2 && 165505ff5137SAndrew Morton total_scanned > nr_reclaimed + nr_reclaimed / 2) 16561da177e4SLinus Torvalds sc.may_writepage = 1; 16571da177e4SLinus Torvalds } 165805ff5137SAndrew Morton if (nr_pages && to_free > nr_reclaimed) 16591da177e4SLinus Torvalds continue; /* swsusp: need to do more work */ 16601da177e4SLinus Torvalds if (all_zones_ok) 16611da177e4SLinus Torvalds break; /* kswapd: all done */ 16621da177e4SLinus Torvalds /* 16631da177e4SLinus Torvalds * OK, kswapd is getting into trouble. Take a nap, then take 16641da177e4SLinus Torvalds * another pass across the zones. 16651da177e4SLinus Torvalds */ 16661da177e4SLinus Torvalds if (total_scanned && priority < DEF_PRIORITY - 2) 16671da177e4SLinus Torvalds blk_congestion_wait(WRITE, HZ/10); 16681da177e4SLinus Torvalds 16691da177e4SLinus Torvalds /* 16701da177e4SLinus Torvalds * We do this so kswapd doesn't build up large priorities for 16711da177e4SLinus Torvalds * example when it is freeing in parallel with allocators. It 16721da177e4SLinus Torvalds * matches the direct reclaim path behaviour in terms of impact 16731da177e4SLinus Torvalds * on zone->*_priority. 16741da177e4SLinus Torvalds */ 167505ff5137SAndrew Morton if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages) 16761da177e4SLinus Torvalds break; 16771da177e4SLinus Torvalds } 16781da177e4SLinus Torvalds out: 16791da177e4SLinus Torvalds for (i = 0; i < pgdat->nr_zones; i++) { 16801da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 16811da177e4SLinus Torvalds 16821da177e4SLinus Torvalds zone->prev_priority = zone->temp_priority; 16831da177e4SLinus Torvalds } 16841da177e4SLinus Torvalds if (!all_zones_ok) { 16851da177e4SLinus Torvalds cond_resched(); 16861da177e4SLinus Torvalds goto loop_again; 16871da177e4SLinus Torvalds } 16881da177e4SLinus Torvalds 168905ff5137SAndrew Morton return nr_reclaimed; 16901da177e4SLinus Torvalds } 16911da177e4SLinus Torvalds 16921da177e4SLinus Torvalds /* 16931da177e4SLinus Torvalds * The background pageout daemon, started as a kernel thread 16941da177e4SLinus Torvalds * from the init process. 16951da177e4SLinus Torvalds * 16961da177e4SLinus Torvalds * This basically trickles out pages so that we have _some_ 16971da177e4SLinus Torvalds * free memory available even if there is no other activity 16981da177e4SLinus Torvalds * that frees anything up. This is needed for things like routing 16991da177e4SLinus Torvalds * etc, where we otherwise might have all activity going on in 17001da177e4SLinus Torvalds * asynchronous contexts that cannot page things out. 17011da177e4SLinus Torvalds * 17021da177e4SLinus Torvalds * If there are applications that are active memory-allocators 17031da177e4SLinus Torvalds * (most normal use), this basically shouldn't matter. 17041da177e4SLinus Torvalds */ 17051da177e4SLinus Torvalds static int kswapd(void *p) 17061da177e4SLinus Torvalds { 17071da177e4SLinus Torvalds unsigned long order; 17081da177e4SLinus Torvalds pg_data_t *pgdat = (pg_data_t*)p; 17091da177e4SLinus Torvalds struct task_struct *tsk = current; 17101da177e4SLinus Torvalds DEFINE_WAIT(wait); 17111da177e4SLinus Torvalds struct reclaim_state reclaim_state = { 17121da177e4SLinus Torvalds .reclaimed_slab = 0, 17131da177e4SLinus Torvalds }; 17141da177e4SLinus Torvalds cpumask_t cpumask; 17151da177e4SLinus Torvalds 17161da177e4SLinus Torvalds daemonize("kswapd%d", pgdat->node_id); 17171da177e4SLinus Torvalds cpumask = node_to_cpumask(pgdat->node_id); 17181da177e4SLinus Torvalds if (!cpus_empty(cpumask)) 17191da177e4SLinus Torvalds set_cpus_allowed(tsk, cpumask); 17201da177e4SLinus Torvalds current->reclaim_state = &reclaim_state; 17211da177e4SLinus Torvalds 17221da177e4SLinus Torvalds /* 17231da177e4SLinus Torvalds * Tell the memory management that we're a "memory allocator", 17241da177e4SLinus Torvalds * and that if we need more memory we should get access to it 17251da177e4SLinus Torvalds * regardless (see "__alloc_pages()"). "kswapd" should 17261da177e4SLinus Torvalds * never get caught in the normal page freeing logic. 17271da177e4SLinus Torvalds * 17281da177e4SLinus Torvalds * (Kswapd normally doesn't need memory anyway, but sometimes 17291da177e4SLinus Torvalds * you need a small amount of memory in order to be able to 17301da177e4SLinus Torvalds * page out something else, and this flag essentially protects 17311da177e4SLinus Torvalds * us from recursively trying to free more memory as we're 17321da177e4SLinus Torvalds * trying to free the first piece of memory in the first place). 17331da177e4SLinus Torvalds */ 1734930d9152SChristoph Lameter tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 17351da177e4SLinus Torvalds 17361da177e4SLinus Torvalds order = 0; 17371da177e4SLinus Torvalds for ( ; ; ) { 17381da177e4SLinus Torvalds unsigned long new_order; 17393e1d1d28SChristoph Lameter 17403e1d1d28SChristoph Lameter try_to_freeze(); 17411da177e4SLinus Torvalds 17421da177e4SLinus Torvalds prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 17431da177e4SLinus Torvalds new_order = pgdat->kswapd_max_order; 17441da177e4SLinus Torvalds pgdat->kswapd_max_order = 0; 17451da177e4SLinus Torvalds if (order < new_order) { 17461da177e4SLinus Torvalds /* 17471da177e4SLinus Torvalds * Don't sleep if someone wants a larger 'order' 17481da177e4SLinus Torvalds * allocation 17491da177e4SLinus Torvalds */ 17501da177e4SLinus Torvalds order = new_order; 17511da177e4SLinus Torvalds } else { 17521da177e4SLinus Torvalds schedule(); 17531da177e4SLinus Torvalds order = pgdat->kswapd_max_order; 17541da177e4SLinus Torvalds } 17551da177e4SLinus Torvalds finish_wait(&pgdat->kswapd_wait, &wait); 17561da177e4SLinus Torvalds 17571da177e4SLinus Torvalds balance_pgdat(pgdat, 0, order); 17581da177e4SLinus Torvalds } 17591da177e4SLinus Torvalds return 0; 17601da177e4SLinus Torvalds } 17611da177e4SLinus Torvalds 17621da177e4SLinus Torvalds /* 17631da177e4SLinus Torvalds * A zone is low on free memory, so wake its kswapd task to service it. 17641da177e4SLinus Torvalds */ 17651da177e4SLinus Torvalds void wakeup_kswapd(struct zone *zone, int order) 17661da177e4SLinus Torvalds { 17671da177e4SLinus Torvalds pg_data_t *pgdat; 17681da177e4SLinus Torvalds 1769f3fe6512SCon Kolivas if (!populated_zone(zone)) 17701da177e4SLinus Torvalds return; 17711da177e4SLinus Torvalds 17721da177e4SLinus Torvalds pgdat = zone->zone_pgdat; 17737fb1d9fcSRohit Seth if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) 17741da177e4SLinus Torvalds return; 17751da177e4SLinus Torvalds if (pgdat->kswapd_max_order < order) 17761da177e4SLinus Torvalds pgdat->kswapd_max_order = order; 17779bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 17781da177e4SLinus Torvalds return; 17798d0986e2SCon Kolivas if (!waitqueue_active(&pgdat->kswapd_wait)) 17801da177e4SLinus Torvalds return; 17818d0986e2SCon Kolivas wake_up_interruptible(&pgdat->kswapd_wait); 17821da177e4SLinus Torvalds } 17831da177e4SLinus Torvalds 17841da177e4SLinus Torvalds #ifdef CONFIG_PM 17851da177e4SLinus Torvalds /* 17861da177e4SLinus Torvalds * Try to free `nr_pages' of memory, system-wide. Returns the number of freed 17871da177e4SLinus Torvalds * pages. 17881da177e4SLinus Torvalds */ 178969e05944SAndrew Morton unsigned long shrink_all_memory(unsigned long nr_pages) 17901da177e4SLinus Torvalds { 17911da177e4SLinus Torvalds pg_data_t *pgdat; 179269e05944SAndrew Morton unsigned long nr_to_free = nr_pages; 179369e05944SAndrew Morton unsigned long ret = 0; 17941da177e4SLinus Torvalds struct reclaim_state reclaim_state = { 17951da177e4SLinus Torvalds .reclaimed_slab = 0, 17961da177e4SLinus Torvalds }; 17971da177e4SLinus Torvalds 17981da177e4SLinus Torvalds current->reclaim_state = &reclaim_state; 17991da177e4SLinus Torvalds for_each_pgdat(pgdat) { 180069e05944SAndrew Morton unsigned long freed; 180169e05944SAndrew Morton 18021da177e4SLinus Torvalds freed = balance_pgdat(pgdat, nr_to_free, 0); 18031da177e4SLinus Torvalds ret += freed; 18041da177e4SLinus Torvalds nr_to_free -= freed; 180569e05944SAndrew Morton if ((long)nr_to_free <= 0) 18061da177e4SLinus Torvalds break; 18071da177e4SLinus Torvalds } 18081da177e4SLinus Torvalds current->reclaim_state = NULL; 18091da177e4SLinus Torvalds return ret; 18101da177e4SLinus Torvalds } 18111da177e4SLinus Torvalds #endif 18121da177e4SLinus Torvalds 18131da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU 18141da177e4SLinus Torvalds /* It's optimal to keep kswapds on the same CPUs as their memory, but 18151da177e4SLinus Torvalds not required for correctness. So if the last cpu in a node goes 18161da177e4SLinus Torvalds away, we get changed to run anywhere: as the first one comes back, 18171da177e4SLinus Torvalds restore their cpu bindings. */ 18181da177e4SLinus Torvalds static int __devinit cpu_callback(struct notifier_block *nfb, 181969e05944SAndrew Morton unsigned long action, void *hcpu) 18201da177e4SLinus Torvalds { 18211da177e4SLinus Torvalds pg_data_t *pgdat; 18221da177e4SLinus Torvalds cpumask_t mask; 18231da177e4SLinus Torvalds 18241da177e4SLinus Torvalds if (action == CPU_ONLINE) { 18251da177e4SLinus Torvalds for_each_pgdat(pgdat) { 18261da177e4SLinus Torvalds mask = node_to_cpumask(pgdat->node_id); 18271da177e4SLinus Torvalds if (any_online_cpu(mask) != NR_CPUS) 18281da177e4SLinus Torvalds /* One of our CPUs online: restore mask */ 18291da177e4SLinus Torvalds set_cpus_allowed(pgdat->kswapd, mask); 18301da177e4SLinus Torvalds } 18311da177e4SLinus Torvalds } 18321da177e4SLinus Torvalds return NOTIFY_OK; 18331da177e4SLinus Torvalds } 18341da177e4SLinus Torvalds #endif /* CONFIG_HOTPLUG_CPU */ 18351da177e4SLinus Torvalds 18361da177e4SLinus Torvalds static int __init kswapd_init(void) 18371da177e4SLinus Torvalds { 18381da177e4SLinus Torvalds pg_data_t *pgdat; 183969e05944SAndrew Morton 18401da177e4SLinus Torvalds swap_setup(); 184169e05944SAndrew Morton for_each_pgdat(pgdat) { 184269e05944SAndrew Morton pid_t pid; 184369e05944SAndrew Morton 184469e05944SAndrew Morton pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL); 184569e05944SAndrew Morton BUG_ON(pid < 0); 184669e05944SAndrew Morton pgdat->kswapd = find_task_by_pid(pid); 184769e05944SAndrew Morton } 18481da177e4SLinus Torvalds total_memory = nr_free_pagecache_pages(); 18491da177e4SLinus Torvalds hotcpu_notifier(cpu_callback, 0); 18501da177e4SLinus Torvalds return 0; 18511da177e4SLinus Torvalds } 18521da177e4SLinus Torvalds 18531da177e4SLinus Torvalds module_init(kswapd_init) 18549eeff239SChristoph Lameter 18559eeff239SChristoph Lameter #ifdef CONFIG_NUMA 18569eeff239SChristoph Lameter /* 18579eeff239SChristoph Lameter * Zone reclaim mode 18589eeff239SChristoph Lameter * 18599eeff239SChristoph Lameter * If non-zero call zone_reclaim when the number of free pages falls below 18609eeff239SChristoph Lameter * the watermarks. 18619eeff239SChristoph Lameter * 18629eeff239SChristoph Lameter * In the future we may add flags to the mode. However, the page allocator 18639eeff239SChristoph Lameter * should only have to check that zone_reclaim_mode != 0 before calling 18649eeff239SChristoph Lameter * zone_reclaim(). 18659eeff239SChristoph Lameter */ 18669eeff239SChristoph Lameter int zone_reclaim_mode __read_mostly; 18679eeff239SChristoph Lameter 18681b2ffb78SChristoph Lameter #define RECLAIM_OFF 0 18691b2ffb78SChristoph Lameter #define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */ 18701b2ffb78SChristoph Lameter #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 18711b2ffb78SChristoph Lameter #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 18722a16e3f4SChristoph Lameter #define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */ 18731b2ffb78SChristoph Lameter 18749eeff239SChristoph Lameter /* 18759eeff239SChristoph Lameter * Mininum time between zone reclaim scans 18769eeff239SChristoph Lameter */ 18772a11ff06SChristoph Lameter int zone_reclaim_interval __read_mostly = 30*HZ; 1878a92f7126SChristoph Lameter 1879a92f7126SChristoph Lameter /* 1880a92f7126SChristoph Lameter * Priority for ZONE_RECLAIM. This determines the fraction of pages 1881a92f7126SChristoph Lameter * of a node considered for each zone_reclaim. 4 scans 1/16th of 1882a92f7126SChristoph Lameter * a zone. 1883a92f7126SChristoph Lameter */ 1884a92f7126SChristoph Lameter #define ZONE_RECLAIM_PRIORITY 4 1885a92f7126SChristoph Lameter 18869eeff239SChristoph Lameter /* 18879eeff239SChristoph Lameter * Try to free up some pages from this zone through reclaim. 18889eeff239SChristoph Lameter */ 1889179e9639SAndrew Morton static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 18909eeff239SChristoph Lameter { 18917fb2d46dSChristoph Lameter /* Minimum pages needed in order to stay on node */ 189269e05944SAndrew Morton const unsigned long nr_pages = 1 << order; 18939eeff239SChristoph Lameter struct task_struct *p = current; 18949eeff239SChristoph Lameter struct reclaim_state reclaim_state; 18958695949aSChristoph Lameter int priority; 189605ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 1897179e9639SAndrew Morton struct scan_control sc = { 1898179e9639SAndrew Morton .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 1899179e9639SAndrew Morton .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), 1900179e9639SAndrew Morton .nr_mapped = read_page_state(nr_mapped), 190169e05944SAndrew Morton .swap_cluster_max = max_t(unsigned long, nr_pages, 190269e05944SAndrew Morton SWAP_CLUSTER_MAX), 1903179e9639SAndrew Morton .gfp_mask = gfp_mask, 1904179e9639SAndrew Morton }; 19059eeff239SChristoph Lameter 19069eeff239SChristoph Lameter disable_swap_token(); 19079eeff239SChristoph Lameter cond_resched(); 1908d4f7796eSChristoph Lameter /* 1909d4f7796eSChristoph Lameter * We need to be able to allocate from the reserves for RECLAIM_SWAP 1910d4f7796eSChristoph Lameter * and we also need to be able to write out pages for RECLAIM_WRITE 1911d4f7796eSChristoph Lameter * and RECLAIM_SWAP. 1912d4f7796eSChristoph Lameter */ 1913d4f7796eSChristoph Lameter p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 19149eeff239SChristoph Lameter reclaim_state.reclaimed_slab = 0; 19159eeff239SChristoph Lameter p->reclaim_state = &reclaim_state; 1916c84db23cSChristoph Lameter 1917a92f7126SChristoph Lameter /* 1918a92f7126SChristoph Lameter * Free memory by calling shrink zone with increasing priorities 1919a92f7126SChristoph Lameter * until we have enough memory freed. 1920a92f7126SChristoph Lameter */ 19218695949aSChristoph Lameter priority = ZONE_RECLAIM_PRIORITY; 1922a92f7126SChristoph Lameter do { 192305ff5137SAndrew Morton nr_reclaimed += shrink_zone(priority, zone, &sc); 19248695949aSChristoph Lameter priority--; 192505ff5137SAndrew Morton } while (priority >= 0 && nr_reclaimed < nr_pages); 1926a92f7126SChristoph Lameter 192705ff5137SAndrew Morton if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { 19282a16e3f4SChristoph Lameter /* 19297fb2d46dSChristoph Lameter * shrink_slab() does not currently allow us to determine how 19307fb2d46dSChristoph Lameter * many pages were freed in this zone. So we just shake the slab 19317fb2d46dSChristoph Lameter * a bit and then go off node for this particular allocation 19327fb2d46dSChristoph Lameter * despite possibly having freed enough memory to allocate in 19337fb2d46dSChristoph Lameter * this zone. If we freed local memory then the next 19347fb2d46dSChristoph Lameter * allocations will be local again. 19352a16e3f4SChristoph Lameter * 19362a16e3f4SChristoph Lameter * shrink_slab will free memory on all zones and may take 19372a16e3f4SChristoph Lameter * a long time. 19382a16e3f4SChristoph Lameter */ 19392a16e3f4SChristoph Lameter shrink_slab(sc.nr_scanned, gfp_mask, order); 19402a16e3f4SChristoph Lameter } 19412a16e3f4SChristoph Lameter 19429eeff239SChristoph Lameter p->reclaim_state = NULL; 1943d4f7796eSChristoph Lameter current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 19449eeff239SChristoph Lameter 19457fb2d46dSChristoph Lameter if (nr_reclaimed == 0) { 19467fb2d46dSChristoph Lameter /* 19477fb2d46dSChristoph Lameter * We were unable to reclaim enough pages to stay on node. We 19487fb2d46dSChristoph Lameter * now allow off node accesses for a certain time period before 19497fb2d46dSChristoph Lameter * trying again to reclaim pages from the local zone. 19507fb2d46dSChristoph Lameter */ 19519eeff239SChristoph Lameter zone->last_unsuccessful_zone_reclaim = jiffies; 19527fb2d46dSChristoph Lameter } 19539eeff239SChristoph Lameter 195405ff5137SAndrew Morton return nr_reclaimed >= nr_pages; 19559eeff239SChristoph Lameter } 1956179e9639SAndrew Morton 1957179e9639SAndrew Morton int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 1958179e9639SAndrew Morton { 1959179e9639SAndrew Morton cpumask_t mask; 1960179e9639SAndrew Morton int node_id; 1961179e9639SAndrew Morton 1962179e9639SAndrew Morton /* 1963179e9639SAndrew Morton * Do not reclaim if there was a recent unsuccessful attempt at zone 1964179e9639SAndrew Morton * reclaim. In that case we let allocations go off node for the 1965179e9639SAndrew Morton * zone_reclaim_interval. Otherwise we would scan for each off-node 1966179e9639SAndrew Morton * page allocation. 1967179e9639SAndrew Morton */ 1968179e9639SAndrew Morton if (time_before(jiffies, 1969179e9639SAndrew Morton zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) 1970179e9639SAndrew Morton return 0; 1971179e9639SAndrew Morton 1972179e9639SAndrew Morton /* 1973179e9639SAndrew Morton * Avoid concurrent zone reclaims, do not reclaim in a zone that does 1974179e9639SAndrew Morton * not have reclaimable pages and if we should not delay the allocation 1975179e9639SAndrew Morton * then do not scan. 1976179e9639SAndrew Morton */ 1977179e9639SAndrew Morton if (!(gfp_mask & __GFP_WAIT) || 1978179e9639SAndrew Morton zone->all_unreclaimable || 1979179e9639SAndrew Morton atomic_read(&zone->reclaim_in_progress) > 0 || 1980179e9639SAndrew Morton (current->flags & PF_MEMALLOC)) 1981179e9639SAndrew Morton return 0; 1982179e9639SAndrew Morton 1983179e9639SAndrew Morton /* 1984179e9639SAndrew Morton * Only run zone reclaim on the local zone or on zones that do not 1985179e9639SAndrew Morton * have associated processors. This will favor the local processor 1986179e9639SAndrew Morton * over remote processors and spread off node memory allocations 1987179e9639SAndrew Morton * as wide as possible. 1988179e9639SAndrew Morton */ 1989179e9639SAndrew Morton node_id = zone->zone_pgdat->node_id; 1990179e9639SAndrew Morton mask = node_to_cpumask(node_id); 1991179e9639SAndrew Morton if (!cpus_empty(mask) && node_id != numa_node_id()) 1992179e9639SAndrew Morton return 0; 1993179e9639SAndrew Morton return __zone_reclaim(zone, gfp_mask, order); 1994179e9639SAndrew Morton } 19959eeff239SChristoph Lameter #endif 1996