11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmscan.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie. 71da177e4SLinus Torvalds * kswapd added: 7.1.96 sct 81da177e4SLinus Torvalds * Removed kswapd_ctl limits, and swap out as many pages as needed 91da177e4SLinus Torvalds * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 101da177e4SLinus Torvalds * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 111da177e4SLinus Torvalds * Multiqueue VM started 5.8.00, Rik van Riel. 121da177e4SLinus Torvalds */ 131da177e4SLinus Torvalds 141da177e4SLinus Torvalds #include <linux/mm.h> 151da177e4SLinus Torvalds #include <linux/module.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/kernel_stat.h> 181da177e4SLinus Torvalds #include <linux/swap.h> 191da177e4SLinus Torvalds #include <linux/pagemap.h> 201da177e4SLinus Torvalds #include <linux/init.h> 211da177e4SLinus Torvalds #include <linux/highmem.h> 22e129b5c2SAndrew Morton #include <linux/vmstat.h> 231da177e4SLinus Torvalds #include <linux/file.h> 241da177e4SLinus Torvalds #include <linux/writeback.h> 251da177e4SLinus Torvalds #include <linux/blkdev.h> 261da177e4SLinus Torvalds #include <linux/buffer_head.h> /* for try_to_release_page(), 271da177e4SLinus Torvalds buffer_heads_over_limit */ 281da177e4SLinus Torvalds #include <linux/mm_inline.h> 291da177e4SLinus Torvalds #include <linux/pagevec.h> 301da177e4SLinus Torvalds #include <linux/backing-dev.h> 311da177e4SLinus Torvalds #include <linux/rmap.h> 321da177e4SLinus Torvalds #include <linux/topology.h> 331da177e4SLinus Torvalds #include <linux/cpu.h> 341da177e4SLinus Torvalds #include <linux/cpuset.h> 351da177e4SLinus Torvalds #include <linux/notifier.h> 361da177e4SLinus Torvalds #include <linux/rwsem.h> 37248a0301SRafael J. Wysocki #include <linux/delay.h> 383218ae14SYasunori Goto #include <linux/kthread.h> 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds #include <asm/tlbflush.h> 411da177e4SLinus Torvalds #include <asm/div64.h> 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds #include <linux/swapops.h> 441da177e4SLinus Torvalds 450f8053a5SNick Piggin #include "internal.h" 460f8053a5SNick Piggin 471da177e4SLinus Torvalds struct scan_control { 481da177e4SLinus Torvalds /* Incremented by the number of inactive pages that were scanned */ 491da177e4SLinus Torvalds unsigned long nr_scanned; 501da177e4SLinus Torvalds 511da177e4SLinus Torvalds /* This context's GFP mask */ 526daa0e28SAl Viro gfp_t gfp_mask; 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds int may_writepage; 551da177e4SLinus Torvalds 56f1fd1067SChristoph Lameter /* Can pages be swapped as part of reclaim? */ 57f1fd1067SChristoph Lameter int may_swap; 58f1fd1067SChristoph Lameter 591da177e4SLinus Torvalds /* This context's SWAP_CLUSTER_MAX. If freeing memory for 601da177e4SLinus Torvalds * suspend, we effectively ignore SWAP_CLUSTER_MAX. 611da177e4SLinus Torvalds * In this context, it doesn't matter that we scan the 621da177e4SLinus Torvalds * whole list at once. */ 631da177e4SLinus Torvalds int swap_cluster_max; 64d6277db4SRafael J. Wysocki 65d6277db4SRafael J. Wysocki int swappiness; 66408d8544SNick Piggin 67408d8544SNick Piggin int all_unreclaimable; 681da177e4SLinus Torvalds }; 691da177e4SLinus Torvalds 701da177e4SLinus Torvalds /* 711da177e4SLinus Torvalds * The list of shrinker callbacks used by to apply pressure to 721da177e4SLinus Torvalds * ageable caches. 731da177e4SLinus Torvalds */ 741da177e4SLinus Torvalds struct shrinker { 751da177e4SLinus Torvalds shrinker_t shrinker; 761da177e4SLinus Torvalds struct list_head list; 771da177e4SLinus Torvalds int seeks; /* seeks to recreate an obj */ 781da177e4SLinus Torvalds long nr; /* objs pending delete */ 791da177e4SLinus Torvalds }; 801da177e4SLinus Torvalds 811da177e4SLinus Torvalds #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 821da177e4SLinus Torvalds 831da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCH 841da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) \ 851da177e4SLinus Torvalds do { \ 861da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 871da177e4SLinus Torvalds struct page *prev; \ 881da177e4SLinus Torvalds \ 891da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 901da177e4SLinus Torvalds prefetch(&prev->_field); \ 911da177e4SLinus Torvalds } \ 921da177e4SLinus Torvalds } while (0) 931da177e4SLinus Torvalds #else 941da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 951da177e4SLinus Torvalds #endif 961da177e4SLinus Torvalds 971da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCHW 981da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) \ 991da177e4SLinus Torvalds do { \ 1001da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 1011da177e4SLinus Torvalds struct page *prev; \ 1021da177e4SLinus Torvalds \ 1031da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 1041da177e4SLinus Torvalds prefetchw(&prev->_field); \ 1051da177e4SLinus Torvalds } \ 1061da177e4SLinus Torvalds } while (0) 1071da177e4SLinus Torvalds #else 1081da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 1091da177e4SLinus Torvalds #endif 1101da177e4SLinus Torvalds 1111da177e4SLinus Torvalds /* 1121da177e4SLinus Torvalds * From 0 .. 100. Higher means more swappy. 1131da177e4SLinus Torvalds */ 1141da177e4SLinus Torvalds int vm_swappiness = 60; 115bd1e22b8SAndrew Morton long vm_total_pages; /* The total number of pages which the VM controls */ 1161da177e4SLinus Torvalds 1171da177e4SLinus Torvalds static LIST_HEAD(shrinker_list); 1181da177e4SLinus Torvalds static DECLARE_RWSEM(shrinker_rwsem); 1191da177e4SLinus Torvalds 1201da177e4SLinus Torvalds /* 1211da177e4SLinus Torvalds * Add a shrinker callback to be called from the vm 1221da177e4SLinus Torvalds */ 1231da177e4SLinus Torvalds struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) 1241da177e4SLinus Torvalds { 1251da177e4SLinus Torvalds struct shrinker *shrinker; 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); 1281da177e4SLinus Torvalds if (shrinker) { 1291da177e4SLinus Torvalds shrinker->shrinker = theshrinker; 1301da177e4SLinus Torvalds shrinker->seeks = seeks; 1311da177e4SLinus Torvalds shrinker->nr = 0; 1321da177e4SLinus Torvalds down_write(&shrinker_rwsem); 1331da177e4SLinus Torvalds list_add_tail(&shrinker->list, &shrinker_list); 1341da177e4SLinus Torvalds up_write(&shrinker_rwsem); 1351da177e4SLinus Torvalds } 1361da177e4SLinus Torvalds return shrinker; 1371da177e4SLinus Torvalds } 1381da177e4SLinus Torvalds EXPORT_SYMBOL(set_shrinker); 1391da177e4SLinus Torvalds 1401da177e4SLinus Torvalds /* 1411da177e4SLinus Torvalds * Remove one 1421da177e4SLinus Torvalds */ 1431da177e4SLinus Torvalds void remove_shrinker(struct shrinker *shrinker) 1441da177e4SLinus Torvalds { 1451da177e4SLinus Torvalds down_write(&shrinker_rwsem); 1461da177e4SLinus Torvalds list_del(&shrinker->list); 1471da177e4SLinus Torvalds up_write(&shrinker_rwsem); 1481da177e4SLinus Torvalds kfree(shrinker); 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds EXPORT_SYMBOL(remove_shrinker); 1511da177e4SLinus Torvalds 1521da177e4SLinus Torvalds #define SHRINK_BATCH 128 1531da177e4SLinus Torvalds /* 1541da177e4SLinus Torvalds * Call the shrink functions to age shrinkable caches 1551da177e4SLinus Torvalds * 1561da177e4SLinus Torvalds * Here we assume it costs one seek to replace a lru page and that it also 1571da177e4SLinus Torvalds * takes a seek to recreate a cache object. With this in mind we age equal 1581da177e4SLinus Torvalds * percentages of the lru and ageable caches. This should balance the seeks 1591da177e4SLinus Torvalds * generated by these structures. 1601da177e4SLinus Torvalds * 1611da177e4SLinus Torvalds * If the vm encounted mapped pages on the LRU it increase the pressure on 1621da177e4SLinus Torvalds * slab to avoid swapping. 1631da177e4SLinus Torvalds * 1641da177e4SLinus Torvalds * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 1651da177e4SLinus Torvalds * 1661da177e4SLinus Torvalds * `lru_pages' represents the number of on-LRU pages in all the zones which 1671da177e4SLinus Torvalds * are eligible for the caller's allocation attempt. It is used for balancing 1681da177e4SLinus Torvalds * slab reclaim versus page reclaim. 169b15e0905Sakpm@osdl.org * 170b15e0905Sakpm@osdl.org * Returns the number of slab objects which we shrunk. 1711da177e4SLinus Torvalds */ 17269e05944SAndrew Morton unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 17369e05944SAndrew Morton unsigned long lru_pages) 1741da177e4SLinus Torvalds { 1751da177e4SLinus Torvalds struct shrinker *shrinker; 17669e05944SAndrew Morton unsigned long ret = 0; 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds if (scanned == 0) 1791da177e4SLinus Torvalds scanned = SWAP_CLUSTER_MAX; 1801da177e4SLinus Torvalds 1811da177e4SLinus Torvalds if (!down_read_trylock(&shrinker_rwsem)) 182b15e0905Sakpm@osdl.org return 1; /* Assume we'll be able to shrink next time */ 1831da177e4SLinus Torvalds 1841da177e4SLinus Torvalds list_for_each_entry(shrinker, &shrinker_list, list) { 1851da177e4SLinus Torvalds unsigned long long delta; 1861da177e4SLinus Torvalds unsigned long total_scan; 187ea164d73SAndrea Arcangeli unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); 1881da177e4SLinus Torvalds 1891da177e4SLinus Torvalds delta = (4 * scanned) / shrinker->seeks; 190ea164d73SAndrea Arcangeli delta *= max_pass; 1911da177e4SLinus Torvalds do_div(delta, lru_pages + 1); 1921da177e4SLinus Torvalds shrinker->nr += delta; 193ea164d73SAndrea Arcangeli if (shrinker->nr < 0) { 194ea164d73SAndrea Arcangeli printk(KERN_ERR "%s: nr=%ld\n", 195ea164d73SAndrea Arcangeli __FUNCTION__, shrinker->nr); 196ea164d73SAndrea Arcangeli shrinker->nr = max_pass; 197ea164d73SAndrea Arcangeli } 198ea164d73SAndrea Arcangeli 199ea164d73SAndrea Arcangeli /* 200ea164d73SAndrea Arcangeli * Avoid risking looping forever due to too large nr value: 201ea164d73SAndrea Arcangeli * never try to free more than twice the estimate number of 202ea164d73SAndrea Arcangeli * freeable entries. 203ea164d73SAndrea Arcangeli */ 204ea164d73SAndrea Arcangeli if (shrinker->nr > max_pass * 2) 205ea164d73SAndrea Arcangeli shrinker->nr = max_pass * 2; 2061da177e4SLinus Torvalds 2071da177e4SLinus Torvalds total_scan = shrinker->nr; 2081da177e4SLinus Torvalds shrinker->nr = 0; 2091da177e4SLinus Torvalds 2101da177e4SLinus Torvalds while (total_scan >= SHRINK_BATCH) { 2111da177e4SLinus Torvalds long this_scan = SHRINK_BATCH; 2121da177e4SLinus Torvalds int shrink_ret; 213b15e0905Sakpm@osdl.org int nr_before; 2141da177e4SLinus Torvalds 215b15e0905Sakpm@osdl.org nr_before = (*shrinker->shrinker)(0, gfp_mask); 2161da177e4SLinus Torvalds shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); 2171da177e4SLinus Torvalds if (shrink_ret == -1) 2181da177e4SLinus Torvalds break; 219b15e0905Sakpm@osdl.org if (shrink_ret < nr_before) 220b15e0905Sakpm@osdl.org ret += nr_before - shrink_ret; 221f8891e5eSChristoph Lameter count_vm_events(SLABS_SCANNED, this_scan); 2221da177e4SLinus Torvalds total_scan -= this_scan; 2231da177e4SLinus Torvalds 2241da177e4SLinus Torvalds cond_resched(); 2251da177e4SLinus Torvalds } 2261da177e4SLinus Torvalds 2271da177e4SLinus Torvalds shrinker->nr += total_scan; 2281da177e4SLinus Torvalds } 2291da177e4SLinus Torvalds up_read(&shrinker_rwsem); 230b15e0905Sakpm@osdl.org return ret; 2311da177e4SLinus Torvalds } 2321da177e4SLinus Torvalds 2331da177e4SLinus Torvalds /* Called without lock on whether page is mapped, so answer is unstable */ 2341da177e4SLinus Torvalds static inline int page_mapping_inuse(struct page *page) 2351da177e4SLinus Torvalds { 2361da177e4SLinus Torvalds struct address_space *mapping; 2371da177e4SLinus Torvalds 2381da177e4SLinus Torvalds /* Page is in somebody's page tables. */ 2391da177e4SLinus Torvalds if (page_mapped(page)) 2401da177e4SLinus Torvalds return 1; 2411da177e4SLinus Torvalds 2421da177e4SLinus Torvalds /* Be more reluctant to reclaim swapcache than pagecache */ 2431da177e4SLinus Torvalds if (PageSwapCache(page)) 2441da177e4SLinus Torvalds return 1; 2451da177e4SLinus Torvalds 2461da177e4SLinus Torvalds mapping = page_mapping(page); 2471da177e4SLinus Torvalds if (!mapping) 2481da177e4SLinus Torvalds return 0; 2491da177e4SLinus Torvalds 2501da177e4SLinus Torvalds /* File is mmap'd by somebody? */ 2511da177e4SLinus Torvalds return mapping_mapped(mapping); 2521da177e4SLinus Torvalds } 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds static inline int is_page_cache_freeable(struct page *page) 2551da177e4SLinus Torvalds { 2561da177e4SLinus Torvalds return page_count(page) - !!PagePrivate(page) == 2; 2571da177e4SLinus Torvalds } 2581da177e4SLinus Torvalds 2591da177e4SLinus Torvalds static int may_write_to_queue(struct backing_dev_info *bdi) 2601da177e4SLinus Torvalds { 261930d9152SChristoph Lameter if (current->flags & PF_SWAPWRITE) 2621da177e4SLinus Torvalds return 1; 2631da177e4SLinus Torvalds if (!bdi_write_congested(bdi)) 2641da177e4SLinus Torvalds return 1; 2651da177e4SLinus Torvalds if (bdi == current->backing_dev_info) 2661da177e4SLinus Torvalds return 1; 2671da177e4SLinus Torvalds return 0; 2681da177e4SLinus Torvalds } 2691da177e4SLinus Torvalds 2701da177e4SLinus Torvalds /* 2711da177e4SLinus Torvalds * We detected a synchronous write error writing a page out. Probably 2721da177e4SLinus Torvalds * -ENOSPC. We need to propagate that into the address_space for a subsequent 2731da177e4SLinus Torvalds * fsync(), msync() or close(). 2741da177e4SLinus Torvalds * 2751da177e4SLinus Torvalds * The tricky part is that after writepage we cannot touch the mapping: nothing 2761da177e4SLinus Torvalds * prevents it from being freed up. But we have a ref on the page and once 2771da177e4SLinus Torvalds * that page is locked, the mapping is pinned. 2781da177e4SLinus Torvalds * 2791da177e4SLinus Torvalds * We're allowed to run sleeping lock_page() here because we know the caller has 2801da177e4SLinus Torvalds * __GFP_FS. 2811da177e4SLinus Torvalds */ 2821da177e4SLinus Torvalds static void handle_write_error(struct address_space *mapping, 2831da177e4SLinus Torvalds struct page *page, int error) 2841da177e4SLinus Torvalds { 2851da177e4SLinus Torvalds lock_page(page); 2861da177e4SLinus Torvalds if (page_mapping(page) == mapping) { 2871da177e4SLinus Torvalds if (error == -ENOSPC) 2881da177e4SLinus Torvalds set_bit(AS_ENOSPC, &mapping->flags); 2891da177e4SLinus Torvalds else 2901da177e4SLinus Torvalds set_bit(AS_EIO, &mapping->flags); 2911da177e4SLinus Torvalds } 2921da177e4SLinus Torvalds unlock_page(page); 2931da177e4SLinus Torvalds } 2941da177e4SLinus Torvalds 29504e62a29SChristoph Lameter /* possible outcome of pageout() */ 29604e62a29SChristoph Lameter typedef enum { 29704e62a29SChristoph Lameter /* failed to write page out, page is locked */ 29804e62a29SChristoph Lameter PAGE_KEEP, 29904e62a29SChristoph Lameter /* move page to the active list, page is locked */ 30004e62a29SChristoph Lameter PAGE_ACTIVATE, 30104e62a29SChristoph Lameter /* page has been sent to the disk successfully, page is unlocked */ 30204e62a29SChristoph Lameter PAGE_SUCCESS, 30304e62a29SChristoph Lameter /* page is clean and locked */ 30404e62a29SChristoph Lameter PAGE_CLEAN, 30504e62a29SChristoph Lameter } pageout_t; 30604e62a29SChristoph Lameter 3071da177e4SLinus Torvalds /* 3081742f19fSAndrew Morton * pageout is called by shrink_page_list() for each dirty page. 3091742f19fSAndrew Morton * Calls ->writepage(). 3101da177e4SLinus Torvalds */ 31104e62a29SChristoph Lameter static pageout_t pageout(struct page *page, struct address_space *mapping) 3121da177e4SLinus Torvalds { 3131da177e4SLinus Torvalds /* 3141da177e4SLinus Torvalds * If the page is dirty, only perform writeback if that write 3151da177e4SLinus Torvalds * will be non-blocking. To prevent this allocation from being 3161da177e4SLinus Torvalds * stalled by pagecache activity. But note that there may be 3171da177e4SLinus Torvalds * stalls if we need to run get_block(). We could test 3181da177e4SLinus Torvalds * PagePrivate for that. 3191da177e4SLinus Torvalds * 3201da177e4SLinus Torvalds * If this process is currently in generic_file_write() against 3211da177e4SLinus Torvalds * this page's queue, we can perform writeback even if that 3221da177e4SLinus Torvalds * will block. 3231da177e4SLinus Torvalds * 3241da177e4SLinus Torvalds * If the page is swapcache, write it back even if that would 3251da177e4SLinus Torvalds * block, for some throttling. This happens by accident, because 3261da177e4SLinus Torvalds * swap_backing_dev_info is bust: it doesn't reflect the 3271da177e4SLinus Torvalds * congestion state of the swapdevs. Easy to fix, if needed. 3281da177e4SLinus Torvalds * See swapfile.c:page_queue_congested(). 3291da177e4SLinus Torvalds */ 3301da177e4SLinus Torvalds if (!is_page_cache_freeable(page)) 3311da177e4SLinus Torvalds return PAGE_KEEP; 3321da177e4SLinus Torvalds if (!mapping) { 3331da177e4SLinus Torvalds /* 3341da177e4SLinus Torvalds * Some data journaling orphaned pages can have 3351da177e4SLinus Torvalds * page->mapping == NULL while being dirty with clean buffers. 3361da177e4SLinus Torvalds */ 337323aca6cSakpm@osdl.org if (PagePrivate(page)) { 3381da177e4SLinus Torvalds if (try_to_free_buffers(page)) { 3391da177e4SLinus Torvalds ClearPageDirty(page); 3401da177e4SLinus Torvalds printk("%s: orphaned page\n", __FUNCTION__); 3411da177e4SLinus Torvalds return PAGE_CLEAN; 3421da177e4SLinus Torvalds } 3431da177e4SLinus Torvalds } 3441da177e4SLinus Torvalds return PAGE_KEEP; 3451da177e4SLinus Torvalds } 3461da177e4SLinus Torvalds if (mapping->a_ops->writepage == NULL) 3471da177e4SLinus Torvalds return PAGE_ACTIVATE; 3481da177e4SLinus Torvalds if (!may_write_to_queue(mapping->backing_dev_info)) 3491da177e4SLinus Torvalds return PAGE_KEEP; 3501da177e4SLinus Torvalds 3511da177e4SLinus Torvalds if (clear_page_dirty_for_io(page)) { 3521da177e4SLinus Torvalds int res; 3531da177e4SLinus Torvalds struct writeback_control wbc = { 3541da177e4SLinus Torvalds .sync_mode = WB_SYNC_NONE, 3551da177e4SLinus Torvalds .nr_to_write = SWAP_CLUSTER_MAX, 356111ebb6eSOGAWA Hirofumi .range_start = 0, 357111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 3581da177e4SLinus Torvalds .nonblocking = 1, 3591da177e4SLinus Torvalds .for_reclaim = 1, 3601da177e4SLinus Torvalds }; 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds SetPageReclaim(page); 3631da177e4SLinus Torvalds res = mapping->a_ops->writepage(page, &wbc); 3641da177e4SLinus Torvalds if (res < 0) 3651da177e4SLinus Torvalds handle_write_error(mapping, page, res); 366994fc28cSZach Brown if (res == AOP_WRITEPAGE_ACTIVATE) { 3671da177e4SLinus Torvalds ClearPageReclaim(page); 3681da177e4SLinus Torvalds return PAGE_ACTIVATE; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds if (!PageWriteback(page)) { 3711da177e4SLinus Torvalds /* synchronous write or broken a_ops? */ 3721da177e4SLinus Torvalds ClearPageReclaim(page); 3731da177e4SLinus Torvalds } 374e129b5c2SAndrew Morton inc_zone_page_state(page, NR_VMSCAN_WRITE); 3751da177e4SLinus Torvalds return PAGE_SUCCESS; 3761da177e4SLinus Torvalds } 3771da177e4SLinus Torvalds 3781da177e4SLinus Torvalds return PAGE_CLEAN; 3791da177e4SLinus Torvalds } 3801da177e4SLinus Torvalds 381a649fd92SAndrew Morton /* 382a649fd92SAndrew Morton * Attempt to detach a locked page from its ->mapping. If it is dirty or if 383a649fd92SAndrew Morton * someone else has a ref on the page, abort and return 0. If it was 384a649fd92SAndrew Morton * successfully detached, return 1. Assumes the caller has a single ref on 385a649fd92SAndrew Morton * this page. 386a649fd92SAndrew Morton */ 387b20a3503SChristoph Lameter int remove_mapping(struct address_space *mapping, struct page *page) 38849d2e9ccSChristoph Lameter { 38928e4d965SNick Piggin BUG_ON(!PageLocked(page)); 39028e4d965SNick Piggin BUG_ON(mapping != page_mapping(page)); 39149d2e9ccSChristoph Lameter 39249d2e9ccSChristoph Lameter write_lock_irq(&mapping->tree_lock); 39349d2e9ccSChristoph Lameter /* 3940fd0e6b0SNick Piggin * The non racy check for a busy page. 3950fd0e6b0SNick Piggin * 3960fd0e6b0SNick Piggin * Must be careful with the order of the tests. When someone has 3970fd0e6b0SNick Piggin * a ref to the page, it may be possible that they dirty it then 3980fd0e6b0SNick Piggin * drop the reference. So if PageDirty is tested before page_count 3990fd0e6b0SNick Piggin * here, then the following race may occur: 4000fd0e6b0SNick Piggin * 4010fd0e6b0SNick Piggin * get_user_pages(&page); 4020fd0e6b0SNick Piggin * [user mapping goes away] 4030fd0e6b0SNick Piggin * write_to(page); 4040fd0e6b0SNick Piggin * !PageDirty(page) [good] 4050fd0e6b0SNick Piggin * SetPageDirty(page); 4060fd0e6b0SNick Piggin * put_page(page); 4070fd0e6b0SNick Piggin * !page_count(page) [good, discard it] 4080fd0e6b0SNick Piggin * 4090fd0e6b0SNick Piggin * [oops, our write_to data is lost] 4100fd0e6b0SNick Piggin * 4110fd0e6b0SNick Piggin * Reversing the order of the tests ensures such a situation cannot 4120fd0e6b0SNick Piggin * escape unnoticed. The smp_rmb is needed to ensure the page->flags 4130fd0e6b0SNick Piggin * load is not satisfied before that of page->_count. 4140fd0e6b0SNick Piggin * 4150fd0e6b0SNick Piggin * Note that if SetPageDirty is always performed via set_page_dirty, 4160fd0e6b0SNick Piggin * and thus under tree_lock, then this ordering is not required. 41749d2e9ccSChristoph Lameter */ 41849d2e9ccSChristoph Lameter if (unlikely(page_count(page) != 2)) 41949d2e9ccSChristoph Lameter goto cannot_free; 42049d2e9ccSChristoph Lameter smp_rmb(); 42149d2e9ccSChristoph Lameter if (unlikely(PageDirty(page))) 42249d2e9ccSChristoph Lameter goto cannot_free; 42349d2e9ccSChristoph Lameter 42449d2e9ccSChristoph Lameter if (PageSwapCache(page)) { 42549d2e9ccSChristoph Lameter swp_entry_t swap = { .val = page_private(page) }; 42649d2e9ccSChristoph Lameter __delete_from_swap_cache(page); 42749d2e9ccSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 42849d2e9ccSChristoph Lameter swap_free(swap); 42949d2e9ccSChristoph Lameter __put_page(page); /* The pagecache ref */ 43049d2e9ccSChristoph Lameter return 1; 43149d2e9ccSChristoph Lameter } 43249d2e9ccSChristoph Lameter 43349d2e9ccSChristoph Lameter __remove_from_page_cache(page); 43449d2e9ccSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 43549d2e9ccSChristoph Lameter __put_page(page); 43649d2e9ccSChristoph Lameter return 1; 43749d2e9ccSChristoph Lameter 43849d2e9ccSChristoph Lameter cannot_free: 43949d2e9ccSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 44049d2e9ccSChristoph Lameter return 0; 44149d2e9ccSChristoph Lameter } 44249d2e9ccSChristoph Lameter 4431da177e4SLinus Torvalds /* 4441742f19fSAndrew Morton * shrink_page_list() returns the number of reclaimed pages 4451da177e4SLinus Torvalds */ 4461742f19fSAndrew Morton static unsigned long shrink_page_list(struct list_head *page_list, 44769e05944SAndrew Morton struct scan_control *sc) 4481da177e4SLinus Torvalds { 4491da177e4SLinus Torvalds LIST_HEAD(ret_pages); 4501da177e4SLinus Torvalds struct pagevec freed_pvec; 4511da177e4SLinus Torvalds int pgactivate = 0; 45205ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 4531da177e4SLinus Torvalds 4541da177e4SLinus Torvalds cond_resched(); 4551da177e4SLinus Torvalds 4561da177e4SLinus Torvalds pagevec_init(&freed_pvec, 1); 4571da177e4SLinus Torvalds while (!list_empty(page_list)) { 4581da177e4SLinus Torvalds struct address_space *mapping; 4591da177e4SLinus Torvalds struct page *page; 4601da177e4SLinus Torvalds int may_enter_fs; 4611da177e4SLinus Torvalds int referenced; 4621da177e4SLinus Torvalds 4631da177e4SLinus Torvalds cond_resched(); 4641da177e4SLinus Torvalds 4651da177e4SLinus Torvalds page = lru_to_page(page_list); 4661da177e4SLinus Torvalds list_del(&page->lru); 4671da177e4SLinus Torvalds 4681da177e4SLinus Torvalds if (TestSetPageLocked(page)) 4691da177e4SLinus Torvalds goto keep; 4701da177e4SLinus Torvalds 471725d704eSNick Piggin VM_BUG_ON(PageActive(page)); 4721da177e4SLinus Torvalds 4731da177e4SLinus Torvalds sc->nr_scanned++; 47480e43426SChristoph Lameter 47580e43426SChristoph Lameter if (!sc->may_swap && page_mapped(page)) 47680e43426SChristoph Lameter goto keep_locked; 47780e43426SChristoph Lameter 4781da177e4SLinus Torvalds /* Double the slab pressure for mapped and swapcache pages */ 4791da177e4SLinus Torvalds if (page_mapped(page) || PageSwapCache(page)) 4801da177e4SLinus Torvalds sc->nr_scanned++; 4811da177e4SLinus Torvalds 4821da177e4SLinus Torvalds if (PageWriteback(page)) 4831da177e4SLinus Torvalds goto keep_locked; 4841da177e4SLinus Torvalds 485f7b7fd8fSRik van Riel referenced = page_referenced(page, 1); 4861da177e4SLinus Torvalds /* In active use or really unfreeable? Activate it. */ 4871da177e4SLinus Torvalds if (referenced && page_mapping_inuse(page)) 4881da177e4SLinus Torvalds goto activate_locked; 4891da177e4SLinus Torvalds 4901da177e4SLinus Torvalds #ifdef CONFIG_SWAP 4911da177e4SLinus Torvalds /* 4921da177e4SLinus Torvalds * Anonymous process memory has backing store? 4931da177e4SLinus Torvalds * Try to allocate it some swap space here. 4941da177e4SLinus Torvalds */ 4956e5ef1a9SChristoph Lameter if (PageAnon(page) && !PageSwapCache(page)) 4961480a540SChristoph Lameter if (!add_to_swap(page, GFP_ATOMIC)) 4971da177e4SLinus Torvalds goto activate_locked; 4981da177e4SLinus Torvalds #endif /* CONFIG_SWAP */ 4991da177e4SLinus Torvalds 5001da177e4SLinus Torvalds mapping = page_mapping(page); 5011da177e4SLinus Torvalds may_enter_fs = (sc->gfp_mask & __GFP_FS) || 5021da177e4SLinus Torvalds (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 5031da177e4SLinus Torvalds 5041da177e4SLinus Torvalds /* 5051da177e4SLinus Torvalds * The page is mapped into the page tables of one or more 5061da177e4SLinus Torvalds * processes. Try to unmap it here. 5071da177e4SLinus Torvalds */ 5081da177e4SLinus Torvalds if (page_mapped(page) && mapping) { 509a48d07afSChristoph Lameter switch (try_to_unmap(page, 0)) { 5101da177e4SLinus Torvalds case SWAP_FAIL: 5111da177e4SLinus Torvalds goto activate_locked; 5121da177e4SLinus Torvalds case SWAP_AGAIN: 5131da177e4SLinus Torvalds goto keep_locked; 5141da177e4SLinus Torvalds case SWAP_SUCCESS: 5151da177e4SLinus Torvalds ; /* try to free the page below */ 5161da177e4SLinus Torvalds } 5171da177e4SLinus Torvalds } 5181da177e4SLinus Torvalds 5191da177e4SLinus Torvalds if (PageDirty(page)) { 5201da177e4SLinus Torvalds if (referenced) 5211da177e4SLinus Torvalds goto keep_locked; 5221da177e4SLinus Torvalds if (!may_enter_fs) 5231da177e4SLinus Torvalds goto keep_locked; 52452a8363eSChristoph Lameter if (!sc->may_writepage) 5251da177e4SLinus Torvalds goto keep_locked; 5261da177e4SLinus Torvalds 5271da177e4SLinus Torvalds /* Page is dirty, try to write it out here */ 5281da177e4SLinus Torvalds switch(pageout(page, mapping)) { 5291da177e4SLinus Torvalds case PAGE_KEEP: 5301da177e4SLinus Torvalds goto keep_locked; 5311da177e4SLinus Torvalds case PAGE_ACTIVATE: 5321da177e4SLinus Torvalds goto activate_locked; 5331da177e4SLinus Torvalds case PAGE_SUCCESS: 5341da177e4SLinus Torvalds if (PageWriteback(page) || PageDirty(page)) 5351da177e4SLinus Torvalds goto keep; 5361da177e4SLinus Torvalds /* 5371da177e4SLinus Torvalds * A synchronous write - probably a ramdisk. Go 5381da177e4SLinus Torvalds * ahead and try to reclaim the page. 5391da177e4SLinus Torvalds */ 5401da177e4SLinus Torvalds if (TestSetPageLocked(page)) 5411da177e4SLinus Torvalds goto keep; 5421da177e4SLinus Torvalds if (PageDirty(page) || PageWriteback(page)) 5431da177e4SLinus Torvalds goto keep_locked; 5441da177e4SLinus Torvalds mapping = page_mapping(page); 5451da177e4SLinus Torvalds case PAGE_CLEAN: 5461da177e4SLinus Torvalds ; /* try to free the page below */ 5471da177e4SLinus Torvalds } 5481da177e4SLinus Torvalds } 5491da177e4SLinus Torvalds 5501da177e4SLinus Torvalds /* 5511da177e4SLinus Torvalds * If the page has buffers, try to free the buffer mappings 5521da177e4SLinus Torvalds * associated with this page. If we succeed we try to free 5531da177e4SLinus Torvalds * the page as well. 5541da177e4SLinus Torvalds * 5551da177e4SLinus Torvalds * We do this even if the page is PageDirty(). 5561da177e4SLinus Torvalds * try_to_release_page() does not perform I/O, but it is 5571da177e4SLinus Torvalds * possible for a page to have PageDirty set, but it is actually 5581da177e4SLinus Torvalds * clean (all its buffers are clean). This happens if the 5591da177e4SLinus Torvalds * buffers were written out directly, with submit_bh(). ext3 5601da177e4SLinus Torvalds * will do this, as well as the blockdev mapping. 5611da177e4SLinus Torvalds * try_to_release_page() will discover that cleanness and will 5621da177e4SLinus Torvalds * drop the buffers and mark the page clean - it can be freed. 5631da177e4SLinus Torvalds * 5641da177e4SLinus Torvalds * Rarely, pages can have buffers and no ->mapping. These are 5651da177e4SLinus Torvalds * the pages which were not successfully invalidated in 5661da177e4SLinus Torvalds * truncate_complete_page(). We try to drop those buffers here 5671da177e4SLinus Torvalds * and if that worked, and the page is no longer mapped into 5681da177e4SLinus Torvalds * process address space (page_count == 1) it can be freed. 5691da177e4SLinus Torvalds * Otherwise, leave the page on the LRU so it is swappable. 5701da177e4SLinus Torvalds */ 5711da177e4SLinus Torvalds if (PagePrivate(page)) { 5721da177e4SLinus Torvalds if (!try_to_release_page(page, sc->gfp_mask)) 5731da177e4SLinus Torvalds goto activate_locked; 5741da177e4SLinus Torvalds if (!mapping && page_count(page) == 1) 5751da177e4SLinus Torvalds goto free_it; 5761da177e4SLinus Torvalds } 5771da177e4SLinus Torvalds 57828e4d965SNick Piggin if (!mapping || !remove_mapping(mapping, page)) 57949d2e9ccSChristoph Lameter goto keep_locked; 5801da177e4SLinus Torvalds 5811da177e4SLinus Torvalds free_it: 5821da177e4SLinus Torvalds unlock_page(page); 58305ff5137SAndrew Morton nr_reclaimed++; 5841da177e4SLinus Torvalds if (!pagevec_add(&freed_pvec, page)) 5851da177e4SLinus Torvalds __pagevec_release_nonlru(&freed_pvec); 5861da177e4SLinus Torvalds continue; 5871da177e4SLinus Torvalds 5881da177e4SLinus Torvalds activate_locked: 5891da177e4SLinus Torvalds SetPageActive(page); 5901da177e4SLinus Torvalds pgactivate++; 5911da177e4SLinus Torvalds keep_locked: 5921da177e4SLinus Torvalds unlock_page(page); 5931da177e4SLinus Torvalds keep: 5941da177e4SLinus Torvalds list_add(&page->lru, &ret_pages); 595725d704eSNick Piggin VM_BUG_ON(PageLRU(page)); 5961da177e4SLinus Torvalds } 5971da177e4SLinus Torvalds list_splice(&ret_pages, page_list); 5981da177e4SLinus Torvalds if (pagevec_count(&freed_pvec)) 5991da177e4SLinus Torvalds __pagevec_release_nonlru(&freed_pvec); 600f8891e5eSChristoph Lameter count_vm_events(PGACTIVATE, pgactivate); 60105ff5137SAndrew Morton return nr_reclaimed; 6021da177e4SLinus Torvalds } 6031da177e4SLinus Torvalds 60449d2e9ccSChristoph Lameter /* 6051da177e4SLinus Torvalds * zone->lru_lock is heavily contended. Some of the functions that 6061da177e4SLinus Torvalds * shrink the lists perform better by taking out a batch of pages 6071da177e4SLinus Torvalds * and working on them outside the LRU lock. 6081da177e4SLinus Torvalds * 6091da177e4SLinus Torvalds * For pagecache intensive workloads, this function is the hottest 6101da177e4SLinus Torvalds * spot in the kernel (apart from copy_*_user functions). 6111da177e4SLinus Torvalds * 6121da177e4SLinus Torvalds * Appropriate locks must be held before calling this function. 6131da177e4SLinus Torvalds * 6141da177e4SLinus Torvalds * @nr_to_scan: The number of pages to look through on the list. 6151da177e4SLinus Torvalds * @src: The LRU list to pull pages off. 6161da177e4SLinus Torvalds * @dst: The temp list to put pages on to. 6171da177e4SLinus Torvalds * @scanned: The number of pages that were scanned. 6181da177e4SLinus Torvalds * 6191da177e4SLinus Torvalds * returns how many pages were moved onto *@dst. 6201da177e4SLinus Torvalds */ 62169e05944SAndrew Morton static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 62269e05944SAndrew Morton struct list_head *src, struct list_head *dst, 62369e05944SAndrew Morton unsigned long *scanned) 6241da177e4SLinus Torvalds { 62569e05944SAndrew Morton unsigned long nr_taken = 0; 6261da177e4SLinus Torvalds struct page *page; 627c9b02d97SWu Fengguang unsigned long scan; 6281da177e4SLinus Torvalds 629c9b02d97SWu Fengguang for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 6307c8ee9a8SNick Piggin struct list_head *target; 6311da177e4SLinus Torvalds page = lru_to_page(src); 6321da177e4SLinus Torvalds prefetchw_prev_lru_page(page, src, flags); 6331da177e4SLinus Torvalds 634725d704eSNick Piggin VM_BUG_ON(!PageLRU(page)); 6358d438f96SNick Piggin 636053837fcSNick Piggin list_del(&page->lru); 6377c8ee9a8SNick Piggin target = src; 6387c8ee9a8SNick Piggin if (likely(get_page_unless_zero(page))) { 639053837fcSNick Piggin /* 6407c8ee9a8SNick Piggin * Be careful not to clear PageLRU until after we're 6417c8ee9a8SNick Piggin * sure the page is not being freed elsewhere -- the 6427c8ee9a8SNick Piggin * page release code relies on it. 64346453a6eSNick Piggin */ 6448d438f96SNick Piggin ClearPageLRU(page); 6457c8ee9a8SNick Piggin target = dst; 646053837fcSNick Piggin nr_taken++; 6477c8ee9a8SNick Piggin } /* else it is being freed elsewhere */ 6487c8ee9a8SNick Piggin 6497c8ee9a8SNick Piggin list_add(&page->lru, target); 6501da177e4SLinus Torvalds } 6511da177e4SLinus Torvalds 6521da177e4SLinus Torvalds *scanned = scan; 6531da177e4SLinus Torvalds return nr_taken; 6541da177e4SLinus Torvalds } 6551da177e4SLinus Torvalds 6561da177e4SLinus Torvalds /* 6571742f19fSAndrew Morton * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 6581742f19fSAndrew Morton * of reclaimed pages 6591da177e4SLinus Torvalds */ 6601742f19fSAndrew Morton static unsigned long shrink_inactive_list(unsigned long max_scan, 6611742f19fSAndrew Morton struct zone *zone, struct scan_control *sc) 6621da177e4SLinus Torvalds { 6631da177e4SLinus Torvalds LIST_HEAD(page_list); 6641da177e4SLinus Torvalds struct pagevec pvec; 66569e05944SAndrew Morton unsigned long nr_scanned = 0; 66605ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 6671da177e4SLinus Torvalds 6681da177e4SLinus Torvalds pagevec_init(&pvec, 1); 6691da177e4SLinus Torvalds 6701da177e4SLinus Torvalds lru_add_drain(); 6711da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 67269e05944SAndrew Morton do { 6731da177e4SLinus Torvalds struct page *page; 67469e05944SAndrew Morton unsigned long nr_taken; 67569e05944SAndrew Morton unsigned long nr_scan; 67669e05944SAndrew Morton unsigned long nr_freed; 6771da177e4SLinus Torvalds 6781da177e4SLinus Torvalds nr_taken = isolate_lru_pages(sc->swap_cluster_max, 6791da177e4SLinus Torvalds &zone->inactive_list, 6801da177e4SLinus Torvalds &page_list, &nr_scan); 6811da177e4SLinus Torvalds zone->nr_inactive -= nr_taken; 6821da177e4SLinus Torvalds zone->pages_scanned += nr_scan; 6831da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 6841da177e4SLinus Torvalds 68569e05944SAndrew Morton nr_scanned += nr_scan; 6861742f19fSAndrew Morton nr_freed = shrink_page_list(&page_list, sc); 68705ff5137SAndrew Morton nr_reclaimed += nr_freed; 688a74609faSNick Piggin local_irq_disable(); 689a74609faSNick Piggin if (current_is_kswapd()) { 690f8891e5eSChristoph Lameter __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); 691f8891e5eSChristoph Lameter __count_vm_events(KSWAPD_STEAL, nr_freed); 692a74609faSNick Piggin } else 693f8891e5eSChristoph Lameter __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); 694f8891e5eSChristoph Lameter __count_vm_events(PGACTIVATE, nr_freed); 695a74609faSNick Piggin 696fb8d14e1SWu Fengguang if (nr_taken == 0) 697fb8d14e1SWu Fengguang goto done; 698fb8d14e1SWu Fengguang 699a74609faSNick Piggin spin_lock(&zone->lru_lock); 7001da177e4SLinus Torvalds /* 7011da177e4SLinus Torvalds * Put back any unfreeable pages. 7021da177e4SLinus Torvalds */ 7031da177e4SLinus Torvalds while (!list_empty(&page_list)) { 7041da177e4SLinus Torvalds page = lru_to_page(&page_list); 705725d704eSNick Piggin VM_BUG_ON(PageLRU(page)); 7068d438f96SNick Piggin SetPageLRU(page); 7071da177e4SLinus Torvalds list_del(&page->lru); 7081da177e4SLinus Torvalds if (PageActive(page)) 7091da177e4SLinus Torvalds add_page_to_active_list(zone, page); 7101da177e4SLinus Torvalds else 7111da177e4SLinus Torvalds add_page_to_inactive_list(zone, page); 7121da177e4SLinus Torvalds if (!pagevec_add(&pvec, page)) { 7131da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 7141da177e4SLinus Torvalds __pagevec_release(&pvec); 7151da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 7161da177e4SLinus Torvalds } 7171da177e4SLinus Torvalds } 71869e05944SAndrew Morton } while (nr_scanned < max_scan); 719fb8d14e1SWu Fengguang spin_unlock(&zone->lru_lock); 7201da177e4SLinus Torvalds done: 721fb8d14e1SWu Fengguang local_irq_enable(); 7221da177e4SLinus Torvalds pagevec_release(&pvec); 72305ff5137SAndrew Morton return nr_reclaimed; 7241da177e4SLinus Torvalds } 7251da177e4SLinus Torvalds 726*3bb1a852SMartin Bligh /* 727*3bb1a852SMartin Bligh * We are about to scan this zone at a certain priority level. If that priority 728*3bb1a852SMartin Bligh * level is smaller (ie: more urgent) than the previous priority, then note 729*3bb1a852SMartin Bligh * that priority level within the zone. This is done so that when the next 730*3bb1a852SMartin Bligh * process comes in to scan this zone, it will immediately start out at this 731*3bb1a852SMartin Bligh * priority level rather than having to build up its own scanning priority. 732*3bb1a852SMartin Bligh * Here, this priority affects only the reclaim-mapped threshold. 733*3bb1a852SMartin Bligh */ 734*3bb1a852SMartin Bligh static inline void note_zone_scanning_priority(struct zone *zone, int priority) 735*3bb1a852SMartin Bligh { 736*3bb1a852SMartin Bligh if (priority < zone->prev_priority) 737*3bb1a852SMartin Bligh zone->prev_priority = priority; 738*3bb1a852SMartin Bligh } 739*3bb1a852SMartin Bligh 7404ff1ffb4SNick Piggin static inline int zone_is_near_oom(struct zone *zone) 7414ff1ffb4SNick Piggin { 7424ff1ffb4SNick Piggin return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3; 7434ff1ffb4SNick Piggin } 7444ff1ffb4SNick Piggin 7451da177e4SLinus Torvalds /* 7461da177e4SLinus Torvalds * This moves pages from the active list to the inactive list. 7471da177e4SLinus Torvalds * 7481da177e4SLinus Torvalds * We move them the other way if the page is referenced by one or more 7491da177e4SLinus Torvalds * processes, from rmap. 7501da177e4SLinus Torvalds * 7511da177e4SLinus Torvalds * If the pages are mostly unmapped, the processing is fast and it is 7521da177e4SLinus Torvalds * appropriate to hold zone->lru_lock across the whole operation. But if 7531da177e4SLinus Torvalds * the pages are mapped, the processing is slow (page_referenced()) so we 7541da177e4SLinus Torvalds * should drop zone->lru_lock around each page. It's impossible to balance 7551da177e4SLinus Torvalds * this, so instead we remove the pages from the LRU while processing them. 7561da177e4SLinus Torvalds * It is safe to rely on PG_active against the non-LRU pages in here because 7571da177e4SLinus Torvalds * nobody will play with that bit on a non-LRU page. 7581da177e4SLinus Torvalds * 7591da177e4SLinus Torvalds * The downside is that we have to touch page->_count against each page. 7601da177e4SLinus Torvalds * But we had to alter page->flags anyway. 7611da177e4SLinus Torvalds */ 7621742f19fSAndrew Morton static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 76369e05944SAndrew Morton struct scan_control *sc) 7641da177e4SLinus Torvalds { 76569e05944SAndrew Morton unsigned long pgmoved; 7661da177e4SLinus Torvalds int pgdeactivate = 0; 76769e05944SAndrew Morton unsigned long pgscanned; 7681da177e4SLinus Torvalds LIST_HEAD(l_hold); /* The pages which were snipped off */ 7691da177e4SLinus Torvalds LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ 7701da177e4SLinus Torvalds LIST_HEAD(l_active); /* Pages to go onto the active_list */ 7711da177e4SLinus Torvalds struct page *page; 7721da177e4SLinus Torvalds struct pagevec pvec; 7731da177e4SLinus Torvalds int reclaim_mapped = 0; 7742903fb16SChristoph Lameter 7756e5ef1a9SChristoph Lameter if (sc->may_swap) { 7761da177e4SLinus Torvalds long mapped_ratio; 7771da177e4SLinus Torvalds long distress; 7781da177e4SLinus Torvalds long swap_tendency; 7791da177e4SLinus Torvalds 7804ff1ffb4SNick Piggin if (zone_is_near_oom(zone)) 7814ff1ffb4SNick Piggin goto force_reclaim_mapped; 7824ff1ffb4SNick Piggin 7832903fb16SChristoph Lameter /* 7842903fb16SChristoph Lameter * `distress' is a measure of how much trouble we're having 7852903fb16SChristoph Lameter * reclaiming pages. 0 -> no problems. 100 -> great trouble. 7862903fb16SChristoph Lameter */ 7872903fb16SChristoph Lameter distress = 100 >> zone->prev_priority; 7882903fb16SChristoph Lameter 7892903fb16SChristoph Lameter /* 7902903fb16SChristoph Lameter * The point of this algorithm is to decide when to start 7912903fb16SChristoph Lameter * reclaiming mapped memory instead of just pagecache. Work out 7922903fb16SChristoph Lameter * how much memory 7932903fb16SChristoph Lameter * is mapped. 7942903fb16SChristoph Lameter */ 795f3dbd344SChristoph Lameter mapped_ratio = ((global_page_state(NR_FILE_MAPPED) + 796f3dbd344SChristoph Lameter global_page_state(NR_ANON_PAGES)) * 100) / 797bf02cf4bSChristoph Lameter vm_total_pages; 7982903fb16SChristoph Lameter 7992903fb16SChristoph Lameter /* 8002903fb16SChristoph Lameter * Now decide how much we really want to unmap some pages. The 8012903fb16SChristoph Lameter * mapped ratio is downgraded - just because there's a lot of 8022903fb16SChristoph Lameter * mapped memory doesn't necessarily mean that page reclaim 8032903fb16SChristoph Lameter * isn't succeeding. 8042903fb16SChristoph Lameter * 8052903fb16SChristoph Lameter * The distress ratio is important - we don't want to start 8062903fb16SChristoph Lameter * going oom. 8072903fb16SChristoph Lameter * 8082903fb16SChristoph Lameter * A 100% value of vm_swappiness overrides this algorithm 8092903fb16SChristoph Lameter * altogether. 8102903fb16SChristoph Lameter */ 811d6277db4SRafael J. Wysocki swap_tendency = mapped_ratio / 2 + distress + sc->swappiness; 8122903fb16SChristoph Lameter 8132903fb16SChristoph Lameter /* 8142903fb16SChristoph Lameter * Now use this metric to decide whether to start moving mapped 8152903fb16SChristoph Lameter * memory onto the inactive list. 8162903fb16SChristoph Lameter */ 8172903fb16SChristoph Lameter if (swap_tendency >= 100) 8184ff1ffb4SNick Piggin force_reclaim_mapped: 8192903fb16SChristoph Lameter reclaim_mapped = 1; 8202903fb16SChristoph Lameter } 8212903fb16SChristoph Lameter 8221da177e4SLinus Torvalds lru_add_drain(); 8231da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 8241da177e4SLinus Torvalds pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, 8251da177e4SLinus Torvalds &l_hold, &pgscanned); 8261da177e4SLinus Torvalds zone->pages_scanned += pgscanned; 8271da177e4SLinus Torvalds zone->nr_active -= pgmoved; 8281da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 8291da177e4SLinus Torvalds 8301da177e4SLinus Torvalds while (!list_empty(&l_hold)) { 8311da177e4SLinus Torvalds cond_resched(); 8321da177e4SLinus Torvalds page = lru_to_page(&l_hold); 8331da177e4SLinus Torvalds list_del(&page->lru); 8341da177e4SLinus Torvalds if (page_mapped(page)) { 8351da177e4SLinus Torvalds if (!reclaim_mapped || 8361da177e4SLinus Torvalds (total_swap_pages == 0 && PageAnon(page)) || 837f7b7fd8fSRik van Riel page_referenced(page, 0)) { 8381da177e4SLinus Torvalds list_add(&page->lru, &l_active); 8391da177e4SLinus Torvalds continue; 8401da177e4SLinus Torvalds } 8411da177e4SLinus Torvalds } 8421da177e4SLinus Torvalds list_add(&page->lru, &l_inactive); 8431da177e4SLinus Torvalds } 8441da177e4SLinus Torvalds 8451da177e4SLinus Torvalds pagevec_init(&pvec, 1); 8461da177e4SLinus Torvalds pgmoved = 0; 8471da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 8481da177e4SLinus Torvalds while (!list_empty(&l_inactive)) { 8491da177e4SLinus Torvalds page = lru_to_page(&l_inactive); 8501da177e4SLinus Torvalds prefetchw_prev_lru_page(page, &l_inactive, flags); 851725d704eSNick Piggin VM_BUG_ON(PageLRU(page)); 8528d438f96SNick Piggin SetPageLRU(page); 853725d704eSNick Piggin VM_BUG_ON(!PageActive(page)); 8544c84cacfSNick Piggin ClearPageActive(page); 8554c84cacfSNick Piggin 8561da177e4SLinus Torvalds list_move(&page->lru, &zone->inactive_list); 8571da177e4SLinus Torvalds pgmoved++; 8581da177e4SLinus Torvalds if (!pagevec_add(&pvec, page)) { 8591da177e4SLinus Torvalds zone->nr_inactive += pgmoved; 8601da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 8611da177e4SLinus Torvalds pgdeactivate += pgmoved; 8621da177e4SLinus Torvalds pgmoved = 0; 8631da177e4SLinus Torvalds if (buffer_heads_over_limit) 8641da177e4SLinus Torvalds pagevec_strip(&pvec); 8651da177e4SLinus Torvalds __pagevec_release(&pvec); 8661da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 8671da177e4SLinus Torvalds } 8681da177e4SLinus Torvalds } 8691da177e4SLinus Torvalds zone->nr_inactive += pgmoved; 8701da177e4SLinus Torvalds pgdeactivate += pgmoved; 8711da177e4SLinus Torvalds if (buffer_heads_over_limit) { 8721da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 8731da177e4SLinus Torvalds pagevec_strip(&pvec); 8741da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 8751da177e4SLinus Torvalds } 8761da177e4SLinus Torvalds 8771da177e4SLinus Torvalds pgmoved = 0; 8781da177e4SLinus Torvalds while (!list_empty(&l_active)) { 8791da177e4SLinus Torvalds page = lru_to_page(&l_active); 8801da177e4SLinus Torvalds prefetchw_prev_lru_page(page, &l_active, flags); 881725d704eSNick Piggin VM_BUG_ON(PageLRU(page)); 8828d438f96SNick Piggin SetPageLRU(page); 883725d704eSNick Piggin VM_BUG_ON(!PageActive(page)); 8841da177e4SLinus Torvalds list_move(&page->lru, &zone->active_list); 8851da177e4SLinus Torvalds pgmoved++; 8861da177e4SLinus Torvalds if (!pagevec_add(&pvec, page)) { 8871da177e4SLinus Torvalds zone->nr_active += pgmoved; 8881da177e4SLinus Torvalds pgmoved = 0; 8891da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 8901da177e4SLinus Torvalds __pagevec_release(&pvec); 8911da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 8921da177e4SLinus Torvalds } 8931da177e4SLinus Torvalds } 8941da177e4SLinus Torvalds zone->nr_active += pgmoved; 8951da177e4SLinus Torvalds 896f8891e5eSChristoph Lameter __count_zone_vm_events(PGREFILL, zone, pgscanned); 897f8891e5eSChristoph Lameter __count_vm_events(PGDEACTIVATE, pgdeactivate); 898f8891e5eSChristoph Lameter spin_unlock_irq(&zone->lru_lock); 899a74609faSNick Piggin 900a74609faSNick Piggin pagevec_release(&pvec); 9011da177e4SLinus Torvalds } 9021da177e4SLinus Torvalds 9031da177e4SLinus Torvalds /* 9041da177e4SLinus Torvalds * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 9051da177e4SLinus Torvalds */ 90605ff5137SAndrew Morton static unsigned long shrink_zone(int priority, struct zone *zone, 90769e05944SAndrew Morton struct scan_control *sc) 9081da177e4SLinus Torvalds { 9091da177e4SLinus Torvalds unsigned long nr_active; 9101da177e4SLinus Torvalds unsigned long nr_inactive; 9118695949aSChristoph Lameter unsigned long nr_to_scan; 91205ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 9131da177e4SLinus Torvalds 91453e9a615SMartin Hicks atomic_inc(&zone->reclaim_in_progress); 91553e9a615SMartin Hicks 9161da177e4SLinus Torvalds /* 9171da177e4SLinus Torvalds * Add one to `nr_to_scan' just to make sure that the kernel will 9181da177e4SLinus Torvalds * slowly sift through the active list. 9191da177e4SLinus Torvalds */ 9208695949aSChristoph Lameter zone->nr_scan_active += (zone->nr_active >> priority) + 1; 9211da177e4SLinus Torvalds nr_active = zone->nr_scan_active; 9221da177e4SLinus Torvalds if (nr_active >= sc->swap_cluster_max) 9231da177e4SLinus Torvalds zone->nr_scan_active = 0; 9241da177e4SLinus Torvalds else 9251da177e4SLinus Torvalds nr_active = 0; 9261da177e4SLinus Torvalds 9278695949aSChristoph Lameter zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1; 9281da177e4SLinus Torvalds nr_inactive = zone->nr_scan_inactive; 9291da177e4SLinus Torvalds if (nr_inactive >= sc->swap_cluster_max) 9301da177e4SLinus Torvalds zone->nr_scan_inactive = 0; 9311da177e4SLinus Torvalds else 9321da177e4SLinus Torvalds nr_inactive = 0; 9331da177e4SLinus Torvalds 9341da177e4SLinus Torvalds while (nr_active || nr_inactive) { 9351da177e4SLinus Torvalds if (nr_active) { 9368695949aSChristoph Lameter nr_to_scan = min(nr_active, 9371da177e4SLinus Torvalds (unsigned long)sc->swap_cluster_max); 9388695949aSChristoph Lameter nr_active -= nr_to_scan; 9391742f19fSAndrew Morton shrink_active_list(nr_to_scan, zone, sc); 9401da177e4SLinus Torvalds } 9411da177e4SLinus Torvalds 9421da177e4SLinus Torvalds if (nr_inactive) { 9438695949aSChristoph Lameter nr_to_scan = min(nr_inactive, 9441da177e4SLinus Torvalds (unsigned long)sc->swap_cluster_max); 9458695949aSChristoph Lameter nr_inactive -= nr_to_scan; 9461742f19fSAndrew Morton nr_reclaimed += shrink_inactive_list(nr_to_scan, zone, 9471742f19fSAndrew Morton sc); 9481da177e4SLinus Torvalds } 9491da177e4SLinus Torvalds } 9501da177e4SLinus Torvalds 9511da177e4SLinus Torvalds throttle_vm_writeout(); 95253e9a615SMartin Hicks 95353e9a615SMartin Hicks atomic_dec(&zone->reclaim_in_progress); 95405ff5137SAndrew Morton return nr_reclaimed; 9551da177e4SLinus Torvalds } 9561da177e4SLinus Torvalds 9571da177e4SLinus Torvalds /* 9581da177e4SLinus Torvalds * This is the direct reclaim path, for page-allocating processes. We only 9591da177e4SLinus Torvalds * try to reclaim pages from zones which will satisfy the caller's allocation 9601da177e4SLinus Torvalds * request. 9611da177e4SLinus Torvalds * 9621da177e4SLinus Torvalds * We reclaim from a zone even if that zone is over pages_high. Because: 9631da177e4SLinus Torvalds * a) The caller may be trying to free *extra* pages to satisfy a higher-order 9641da177e4SLinus Torvalds * allocation or 9651da177e4SLinus Torvalds * b) The zones may be over pages_high but they must go *over* pages_high to 9661da177e4SLinus Torvalds * satisfy the `incremental min' zone defense algorithm. 9671da177e4SLinus Torvalds * 9681da177e4SLinus Torvalds * Returns the number of reclaimed pages. 9691da177e4SLinus Torvalds * 9701da177e4SLinus Torvalds * If a zone is deemed to be full of pinned pages then just give it a light 9711da177e4SLinus Torvalds * scan then give up on it. 9721da177e4SLinus Torvalds */ 9731742f19fSAndrew Morton static unsigned long shrink_zones(int priority, struct zone **zones, 97469e05944SAndrew Morton struct scan_control *sc) 9751da177e4SLinus Torvalds { 97605ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 9771da177e4SLinus Torvalds int i; 9781da177e4SLinus Torvalds 979408d8544SNick Piggin sc->all_unreclaimable = 1; 9801da177e4SLinus Torvalds for (i = 0; zones[i] != NULL; i++) { 9811da177e4SLinus Torvalds struct zone *zone = zones[i]; 9821da177e4SLinus Torvalds 983f3fe6512SCon Kolivas if (!populated_zone(zone)) 9841da177e4SLinus Torvalds continue; 9851da177e4SLinus Torvalds 9869bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 9871da177e4SLinus Torvalds continue; 9881da177e4SLinus Torvalds 989*3bb1a852SMartin Bligh note_zone_scanning_priority(zone, priority); 9901da177e4SLinus Torvalds 9918695949aSChristoph Lameter if (zone->all_unreclaimable && priority != DEF_PRIORITY) 9921da177e4SLinus Torvalds continue; /* Let kswapd poll it */ 9931da177e4SLinus Torvalds 994408d8544SNick Piggin sc->all_unreclaimable = 0; 995408d8544SNick Piggin 99605ff5137SAndrew Morton nr_reclaimed += shrink_zone(priority, zone, sc); 9971da177e4SLinus Torvalds } 99805ff5137SAndrew Morton return nr_reclaimed; 9991da177e4SLinus Torvalds } 10001da177e4SLinus Torvalds 10011da177e4SLinus Torvalds /* 10021da177e4SLinus Torvalds * This is the main entry point to direct page reclaim. 10031da177e4SLinus Torvalds * 10041da177e4SLinus Torvalds * If a full scan of the inactive list fails to free enough memory then we 10051da177e4SLinus Torvalds * are "out of memory" and something needs to be killed. 10061da177e4SLinus Torvalds * 10071da177e4SLinus Torvalds * If the caller is !__GFP_FS then the probability of a failure is reasonably 10081da177e4SLinus Torvalds * high - the zone may be full of dirty or under-writeback pages, which this 10091da177e4SLinus Torvalds * caller can't do much about. We kick pdflush and take explicit naps in the 10101da177e4SLinus Torvalds * hope that some of these pages can be written. But if the allocating task 10111da177e4SLinus Torvalds * holds filesystem locks which prevent writeout this might not work, and the 10121da177e4SLinus Torvalds * allocation attempt will fail. 10131da177e4SLinus Torvalds */ 101469e05944SAndrew Morton unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) 10151da177e4SLinus Torvalds { 10161da177e4SLinus Torvalds int priority; 10171da177e4SLinus Torvalds int ret = 0; 101869e05944SAndrew Morton unsigned long total_scanned = 0; 101905ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 10201da177e4SLinus Torvalds struct reclaim_state *reclaim_state = current->reclaim_state; 10211da177e4SLinus Torvalds unsigned long lru_pages = 0; 10221da177e4SLinus Torvalds int i; 1023179e9639SAndrew Morton struct scan_control sc = { 1024179e9639SAndrew Morton .gfp_mask = gfp_mask, 1025179e9639SAndrew Morton .may_writepage = !laptop_mode, 1026179e9639SAndrew Morton .swap_cluster_max = SWAP_CLUSTER_MAX, 1027179e9639SAndrew Morton .may_swap = 1, 1028d6277db4SRafael J. Wysocki .swappiness = vm_swappiness, 1029179e9639SAndrew Morton }; 10301da177e4SLinus Torvalds 1031f8891e5eSChristoph Lameter count_vm_event(ALLOCSTALL); 10321da177e4SLinus Torvalds 10331da177e4SLinus Torvalds for (i = 0; zones[i] != NULL; i++) { 10341da177e4SLinus Torvalds struct zone *zone = zones[i]; 10351da177e4SLinus Torvalds 10369bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 10371da177e4SLinus Torvalds continue; 10381da177e4SLinus Torvalds 10391da177e4SLinus Torvalds lru_pages += zone->nr_active + zone->nr_inactive; 10401da177e4SLinus Torvalds } 10411da177e4SLinus Torvalds 10421da177e4SLinus Torvalds for (priority = DEF_PRIORITY; priority >= 0; priority--) { 10431da177e4SLinus Torvalds sc.nr_scanned = 0; 1044f7b7fd8fSRik van Riel if (!priority) 1045f7b7fd8fSRik van Riel disable_swap_token(); 10461742f19fSAndrew Morton nr_reclaimed += shrink_zones(priority, zones, &sc); 10471da177e4SLinus Torvalds shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); 10481da177e4SLinus Torvalds if (reclaim_state) { 104905ff5137SAndrew Morton nr_reclaimed += reclaim_state->reclaimed_slab; 10501da177e4SLinus Torvalds reclaim_state->reclaimed_slab = 0; 10511da177e4SLinus Torvalds } 10521da177e4SLinus Torvalds total_scanned += sc.nr_scanned; 105305ff5137SAndrew Morton if (nr_reclaimed >= sc.swap_cluster_max) { 10541da177e4SLinus Torvalds ret = 1; 10551da177e4SLinus Torvalds goto out; 10561da177e4SLinus Torvalds } 10571da177e4SLinus Torvalds 10581da177e4SLinus Torvalds /* 10591da177e4SLinus Torvalds * Try to write back as many pages as we just scanned. This 10601da177e4SLinus Torvalds * tends to cause slow streaming writers to write data to the 10611da177e4SLinus Torvalds * disk smoothly, at the dirtying rate, which is nice. But 10621da177e4SLinus Torvalds * that's undesirable in laptop mode, where we *want* lumpy 10631da177e4SLinus Torvalds * writeout. So in laptop mode, write out the whole world. 10641da177e4SLinus Torvalds */ 1065179e9639SAndrew Morton if (total_scanned > sc.swap_cluster_max + 1066179e9639SAndrew Morton sc.swap_cluster_max / 2) { 1067687a21ceSPekka J Enberg wakeup_pdflush(laptop_mode ? 0 : total_scanned); 10681da177e4SLinus Torvalds sc.may_writepage = 1; 10691da177e4SLinus Torvalds } 10701da177e4SLinus Torvalds 10711da177e4SLinus Torvalds /* Take a nap, wait for some writeback to complete */ 10721da177e4SLinus Torvalds if (sc.nr_scanned && priority < DEF_PRIORITY - 2) 10733fcfab16SAndrew Morton congestion_wait(WRITE, HZ/10); 10741da177e4SLinus Torvalds } 1075408d8544SNick Piggin /* top priority shrink_caches still had more to do? don't OOM, then */ 1076408d8544SNick Piggin if (!sc.all_unreclaimable) 1077408d8544SNick Piggin ret = 1; 10781da177e4SLinus Torvalds out: 1079*3bb1a852SMartin Bligh /* 1080*3bb1a852SMartin Bligh * Now that we've scanned all the zones at this priority level, note 1081*3bb1a852SMartin Bligh * that level within the zone so that the next thread which performs 1082*3bb1a852SMartin Bligh * scanning of this zone will immediately start out at this priority 1083*3bb1a852SMartin Bligh * level. This affects only the decision whether or not to bring 1084*3bb1a852SMartin Bligh * mapped pages onto the inactive list. 1085*3bb1a852SMartin Bligh */ 1086*3bb1a852SMartin Bligh if (priority < 0) 1087*3bb1a852SMartin Bligh priority = 0; 10881da177e4SLinus Torvalds for (i = 0; zones[i] != 0; i++) { 10891da177e4SLinus Torvalds struct zone *zone = zones[i]; 10901da177e4SLinus Torvalds 10919bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 10921da177e4SLinus Torvalds continue; 10931da177e4SLinus Torvalds 1094*3bb1a852SMartin Bligh zone->prev_priority = priority; 10951da177e4SLinus Torvalds } 10961da177e4SLinus Torvalds return ret; 10971da177e4SLinus Torvalds } 10981da177e4SLinus Torvalds 10991da177e4SLinus Torvalds /* 11001da177e4SLinus Torvalds * For kswapd, balance_pgdat() will work across all this node's zones until 11011da177e4SLinus Torvalds * they are all at pages_high. 11021da177e4SLinus Torvalds * 11031da177e4SLinus Torvalds * Returns the number of pages which were actually freed. 11041da177e4SLinus Torvalds * 11051da177e4SLinus Torvalds * There is special handling here for zones which are full of pinned pages. 11061da177e4SLinus Torvalds * This can happen if the pages are all mlocked, or if they are all used by 11071da177e4SLinus Torvalds * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 11081da177e4SLinus Torvalds * What we do is to detect the case where all pages in the zone have been 11091da177e4SLinus Torvalds * scanned twice and there has been zero successful reclaim. Mark the zone as 11101da177e4SLinus Torvalds * dead and from now on, only perform a short scan. Basically we're polling 11111da177e4SLinus Torvalds * the zone for when the problem goes away. 11121da177e4SLinus Torvalds * 11131da177e4SLinus Torvalds * kswapd scans the zones in the highmem->normal->dma direction. It skips 11141da177e4SLinus Torvalds * zones which have free_pages > pages_high, but once a zone is found to have 11151da177e4SLinus Torvalds * free_pages <= pages_high, we scan that zone and the lower zones regardless 11161da177e4SLinus Torvalds * of the number of free pages in the lower zones. This interoperates with 11171da177e4SLinus Torvalds * the page allocator fallback scheme to ensure that aging of pages is balanced 11181da177e4SLinus Torvalds * across the zones. 11191da177e4SLinus Torvalds */ 1120d6277db4SRafael J. Wysocki static unsigned long balance_pgdat(pg_data_t *pgdat, int order) 11211da177e4SLinus Torvalds { 11221da177e4SLinus Torvalds int all_zones_ok; 11231da177e4SLinus Torvalds int priority; 11241da177e4SLinus Torvalds int i; 112569e05944SAndrew Morton unsigned long total_scanned; 112605ff5137SAndrew Morton unsigned long nr_reclaimed; 11271da177e4SLinus Torvalds struct reclaim_state *reclaim_state = current->reclaim_state; 1128179e9639SAndrew Morton struct scan_control sc = { 1129179e9639SAndrew Morton .gfp_mask = GFP_KERNEL, 1130179e9639SAndrew Morton .may_swap = 1, 1131d6277db4SRafael J. Wysocki .swap_cluster_max = SWAP_CLUSTER_MAX, 1132d6277db4SRafael J. Wysocki .swappiness = vm_swappiness, 1133179e9639SAndrew Morton }; 1134*3bb1a852SMartin Bligh /* 1135*3bb1a852SMartin Bligh * temp_priority is used to remember the scanning priority at which 1136*3bb1a852SMartin Bligh * this zone was successfully refilled to free_pages == pages_high. 1137*3bb1a852SMartin Bligh */ 1138*3bb1a852SMartin Bligh int temp_priority[MAX_NR_ZONES]; 11391da177e4SLinus Torvalds 11401da177e4SLinus Torvalds loop_again: 11411da177e4SLinus Torvalds total_scanned = 0; 114205ff5137SAndrew Morton nr_reclaimed = 0; 1143c0bbbc73SChristoph Lameter sc.may_writepage = !laptop_mode; 1144f8891e5eSChristoph Lameter count_vm_event(PAGEOUTRUN); 11451da177e4SLinus Torvalds 1146*3bb1a852SMartin Bligh for (i = 0; i < pgdat->nr_zones; i++) 1147*3bb1a852SMartin Bligh temp_priority[i] = DEF_PRIORITY; 11481da177e4SLinus Torvalds 11491da177e4SLinus Torvalds for (priority = DEF_PRIORITY; priority >= 0; priority--) { 11501da177e4SLinus Torvalds int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 11511da177e4SLinus Torvalds unsigned long lru_pages = 0; 11521da177e4SLinus Torvalds 1153f7b7fd8fSRik van Riel /* The swap token gets in the way of swapout... */ 1154f7b7fd8fSRik van Riel if (!priority) 1155f7b7fd8fSRik van Riel disable_swap_token(); 1156f7b7fd8fSRik van Riel 11571da177e4SLinus Torvalds all_zones_ok = 1; 11581da177e4SLinus Torvalds 11591da177e4SLinus Torvalds /* 11601da177e4SLinus Torvalds * Scan in the highmem->dma direction for the highest 11611da177e4SLinus Torvalds * zone which needs scanning 11621da177e4SLinus Torvalds */ 11631da177e4SLinus Torvalds for (i = pgdat->nr_zones - 1; i >= 0; i--) { 11641da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 11651da177e4SLinus Torvalds 1166f3fe6512SCon Kolivas if (!populated_zone(zone)) 11671da177e4SLinus Torvalds continue; 11681da177e4SLinus Torvalds 1169d6277db4SRafael J. Wysocki if (zone->all_unreclaimable && priority != DEF_PRIORITY) 11701da177e4SLinus Torvalds continue; 11711da177e4SLinus Torvalds 1172d6277db4SRafael J. Wysocki if (!zone_watermark_ok(zone, order, zone->pages_high, 1173d6277db4SRafael J. Wysocki 0, 0)) { 11741da177e4SLinus Torvalds end_zone = i; 11751da177e4SLinus Torvalds goto scan; 11761da177e4SLinus Torvalds } 11771da177e4SLinus Torvalds } 11781da177e4SLinus Torvalds goto out; 11791da177e4SLinus Torvalds scan: 11801da177e4SLinus Torvalds for (i = 0; i <= end_zone; i++) { 11811da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 11821da177e4SLinus Torvalds 11831da177e4SLinus Torvalds lru_pages += zone->nr_active + zone->nr_inactive; 11841da177e4SLinus Torvalds } 11851da177e4SLinus Torvalds 11861da177e4SLinus Torvalds /* 11871da177e4SLinus Torvalds * Now scan the zone in the dma->highmem direction, stopping 11881da177e4SLinus Torvalds * at the last zone which needs scanning. 11891da177e4SLinus Torvalds * 11901da177e4SLinus Torvalds * We do this because the page allocator works in the opposite 11911da177e4SLinus Torvalds * direction. This prevents the page allocator from allocating 11921da177e4SLinus Torvalds * pages behind kswapd's direction of progress, which would 11931da177e4SLinus Torvalds * cause too much scanning of the lower zones. 11941da177e4SLinus Torvalds */ 11951da177e4SLinus Torvalds for (i = 0; i <= end_zone; i++) { 11961da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 1197b15e0905Sakpm@osdl.org int nr_slab; 11981da177e4SLinus Torvalds 1199f3fe6512SCon Kolivas if (!populated_zone(zone)) 12001da177e4SLinus Torvalds continue; 12011da177e4SLinus Torvalds 12021da177e4SLinus Torvalds if (zone->all_unreclaimable && priority != DEF_PRIORITY) 12031da177e4SLinus Torvalds continue; 12041da177e4SLinus Torvalds 1205d6277db4SRafael J. Wysocki if (!zone_watermark_ok(zone, order, zone->pages_high, 1206d6277db4SRafael J. Wysocki end_zone, 0)) 12071da177e4SLinus Torvalds all_zones_ok = 0; 1208*3bb1a852SMartin Bligh temp_priority[i] = priority; 12091da177e4SLinus Torvalds sc.nr_scanned = 0; 1210*3bb1a852SMartin Bligh note_zone_scanning_priority(zone, priority); 121105ff5137SAndrew Morton nr_reclaimed += shrink_zone(priority, zone, &sc); 12121da177e4SLinus Torvalds reclaim_state->reclaimed_slab = 0; 1213b15e0905Sakpm@osdl.org nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1214b15e0905Sakpm@osdl.org lru_pages); 121505ff5137SAndrew Morton nr_reclaimed += reclaim_state->reclaimed_slab; 12161da177e4SLinus Torvalds total_scanned += sc.nr_scanned; 12171da177e4SLinus Torvalds if (zone->all_unreclaimable) 12181da177e4SLinus Torvalds continue; 1219b15e0905Sakpm@osdl.org if (nr_slab == 0 && zone->pages_scanned >= 12204ff1ffb4SNick Piggin (zone->nr_active + zone->nr_inactive) * 6) 12211da177e4SLinus Torvalds zone->all_unreclaimable = 1; 12221da177e4SLinus Torvalds /* 12231da177e4SLinus Torvalds * If we've done a decent amount of scanning and 12241da177e4SLinus Torvalds * the reclaim ratio is low, start doing writepage 12251da177e4SLinus Torvalds * even in laptop mode 12261da177e4SLinus Torvalds */ 12271da177e4SLinus Torvalds if (total_scanned > SWAP_CLUSTER_MAX * 2 && 122805ff5137SAndrew Morton total_scanned > nr_reclaimed + nr_reclaimed / 2) 12291da177e4SLinus Torvalds sc.may_writepage = 1; 12301da177e4SLinus Torvalds } 12311da177e4SLinus Torvalds if (all_zones_ok) 12321da177e4SLinus Torvalds break; /* kswapd: all done */ 12331da177e4SLinus Torvalds /* 12341da177e4SLinus Torvalds * OK, kswapd is getting into trouble. Take a nap, then take 12351da177e4SLinus Torvalds * another pass across the zones. 12361da177e4SLinus Torvalds */ 12371da177e4SLinus Torvalds if (total_scanned && priority < DEF_PRIORITY - 2) 12383fcfab16SAndrew Morton congestion_wait(WRITE, HZ/10); 12391da177e4SLinus Torvalds 12401da177e4SLinus Torvalds /* 12411da177e4SLinus Torvalds * We do this so kswapd doesn't build up large priorities for 12421da177e4SLinus Torvalds * example when it is freeing in parallel with allocators. It 12431da177e4SLinus Torvalds * matches the direct reclaim path behaviour in terms of impact 12441da177e4SLinus Torvalds * on zone->*_priority. 12451da177e4SLinus Torvalds */ 1246d6277db4SRafael J. Wysocki if (nr_reclaimed >= SWAP_CLUSTER_MAX) 12471da177e4SLinus Torvalds break; 12481da177e4SLinus Torvalds } 12491da177e4SLinus Torvalds out: 1250*3bb1a852SMartin Bligh /* 1251*3bb1a852SMartin Bligh * Note within each zone the priority level at which this zone was 1252*3bb1a852SMartin Bligh * brought into a happy state. So that the next thread which scans this 1253*3bb1a852SMartin Bligh * zone will start out at that priority level. 1254*3bb1a852SMartin Bligh */ 12551da177e4SLinus Torvalds for (i = 0; i < pgdat->nr_zones; i++) { 12561da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 12571da177e4SLinus Torvalds 1258*3bb1a852SMartin Bligh zone->prev_priority = temp_priority[i]; 12591da177e4SLinus Torvalds } 12601da177e4SLinus Torvalds if (!all_zones_ok) { 12611da177e4SLinus Torvalds cond_resched(); 12621da177e4SLinus Torvalds goto loop_again; 12631da177e4SLinus Torvalds } 12641da177e4SLinus Torvalds 126505ff5137SAndrew Morton return nr_reclaimed; 12661da177e4SLinus Torvalds } 12671da177e4SLinus Torvalds 12681da177e4SLinus Torvalds /* 12691da177e4SLinus Torvalds * The background pageout daemon, started as a kernel thread 12701da177e4SLinus Torvalds * from the init process. 12711da177e4SLinus Torvalds * 12721da177e4SLinus Torvalds * This basically trickles out pages so that we have _some_ 12731da177e4SLinus Torvalds * free memory available even if there is no other activity 12741da177e4SLinus Torvalds * that frees anything up. This is needed for things like routing 12751da177e4SLinus Torvalds * etc, where we otherwise might have all activity going on in 12761da177e4SLinus Torvalds * asynchronous contexts that cannot page things out. 12771da177e4SLinus Torvalds * 12781da177e4SLinus Torvalds * If there are applications that are active memory-allocators 12791da177e4SLinus Torvalds * (most normal use), this basically shouldn't matter. 12801da177e4SLinus Torvalds */ 12811da177e4SLinus Torvalds static int kswapd(void *p) 12821da177e4SLinus Torvalds { 12831da177e4SLinus Torvalds unsigned long order; 12841da177e4SLinus Torvalds pg_data_t *pgdat = (pg_data_t*)p; 12851da177e4SLinus Torvalds struct task_struct *tsk = current; 12861da177e4SLinus Torvalds DEFINE_WAIT(wait); 12871da177e4SLinus Torvalds struct reclaim_state reclaim_state = { 12881da177e4SLinus Torvalds .reclaimed_slab = 0, 12891da177e4SLinus Torvalds }; 12901da177e4SLinus Torvalds cpumask_t cpumask; 12911da177e4SLinus Torvalds 12921da177e4SLinus Torvalds cpumask = node_to_cpumask(pgdat->node_id); 12931da177e4SLinus Torvalds if (!cpus_empty(cpumask)) 12941da177e4SLinus Torvalds set_cpus_allowed(tsk, cpumask); 12951da177e4SLinus Torvalds current->reclaim_state = &reclaim_state; 12961da177e4SLinus Torvalds 12971da177e4SLinus Torvalds /* 12981da177e4SLinus Torvalds * Tell the memory management that we're a "memory allocator", 12991da177e4SLinus Torvalds * and that if we need more memory we should get access to it 13001da177e4SLinus Torvalds * regardless (see "__alloc_pages()"). "kswapd" should 13011da177e4SLinus Torvalds * never get caught in the normal page freeing logic. 13021da177e4SLinus Torvalds * 13031da177e4SLinus Torvalds * (Kswapd normally doesn't need memory anyway, but sometimes 13041da177e4SLinus Torvalds * you need a small amount of memory in order to be able to 13051da177e4SLinus Torvalds * page out something else, and this flag essentially protects 13061da177e4SLinus Torvalds * us from recursively trying to free more memory as we're 13071da177e4SLinus Torvalds * trying to free the first piece of memory in the first place). 13081da177e4SLinus Torvalds */ 1309930d9152SChristoph Lameter tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 13101da177e4SLinus Torvalds 13111da177e4SLinus Torvalds order = 0; 13121da177e4SLinus Torvalds for ( ; ; ) { 13131da177e4SLinus Torvalds unsigned long new_order; 13143e1d1d28SChristoph Lameter 13153e1d1d28SChristoph Lameter try_to_freeze(); 13161da177e4SLinus Torvalds 13171da177e4SLinus Torvalds prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 13181da177e4SLinus Torvalds new_order = pgdat->kswapd_max_order; 13191da177e4SLinus Torvalds pgdat->kswapd_max_order = 0; 13201da177e4SLinus Torvalds if (order < new_order) { 13211da177e4SLinus Torvalds /* 13221da177e4SLinus Torvalds * Don't sleep if someone wants a larger 'order' 13231da177e4SLinus Torvalds * allocation 13241da177e4SLinus Torvalds */ 13251da177e4SLinus Torvalds order = new_order; 13261da177e4SLinus Torvalds } else { 13271da177e4SLinus Torvalds schedule(); 13281da177e4SLinus Torvalds order = pgdat->kswapd_max_order; 13291da177e4SLinus Torvalds } 13301da177e4SLinus Torvalds finish_wait(&pgdat->kswapd_wait, &wait); 13311da177e4SLinus Torvalds 1332d6277db4SRafael J. Wysocki balance_pgdat(pgdat, order); 13331da177e4SLinus Torvalds } 13341da177e4SLinus Torvalds return 0; 13351da177e4SLinus Torvalds } 13361da177e4SLinus Torvalds 13371da177e4SLinus Torvalds /* 13381da177e4SLinus Torvalds * A zone is low on free memory, so wake its kswapd task to service it. 13391da177e4SLinus Torvalds */ 13401da177e4SLinus Torvalds void wakeup_kswapd(struct zone *zone, int order) 13411da177e4SLinus Torvalds { 13421da177e4SLinus Torvalds pg_data_t *pgdat; 13431da177e4SLinus Torvalds 1344f3fe6512SCon Kolivas if (!populated_zone(zone)) 13451da177e4SLinus Torvalds return; 13461da177e4SLinus Torvalds 13471da177e4SLinus Torvalds pgdat = zone->zone_pgdat; 13487fb1d9fcSRohit Seth if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) 13491da177e4SLinus Torvalds return; 13501da177e4SLinus Torvalds if (pgdat->kswapd_max_order < order) 13511da177e4SLinus Torvalds pgdat->kswapd_max_order = order; 13529bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 13531da177e4SLinus Torvalds return; 13548d0986e2SCon Kolivas if (!waitqueue_active(&pgdat->kswapd_wait)) 13551da177e4SLinus Torvalds return; 13568d0986e2SCon Kolivas wake_up_interruptible(&pgdat->kswapd_wait); 13571da177e4SLinus Torvalds } 13581da177e4SLinus Torvalds 13591da177e4SLinus Torvalds #ifdef CONFIG_PM 13601da177e4SLinus Torvalds /* 1361d6277db4SRafael J. Wysocki * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages 1362d6277db4SRafael J. Wysocki * from LRU lists system-wide, for given pass and priority, and returns the 1363d6277db4SRafael J. Wysocki * number of reclaimed pages 1364d6277db4SRafael J. Wysocki * 1365d6277db4SRafael J. Wysocki * For pass > 3 we also try to shrink the LRU lists that contain a few pages 1366d6277db4SRafael J. Wysocki */ 1367d6277db4SRafael J. Wysocki static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, 1368d6277db4SRafael J. Wysocki int prio, struct scan_control *sc) 1369d6277db4SRafael J. Wysocki { 1370d6277db4SRafael J. Wysocki struct zone *zone; 1371d6277db4SRafael J. Wysocki unsigned long nr_to_scan, ret = 0; 1372d6277db4SRafael J. Wysocki 1373d6277db4SRafael J. Wysocki for_each_zone(zone) { 1374d6277db4SRafael J. Wysocki 1375d6277db4SRafael J. Wysocki if (!populated_zone(zone)) 1376d6277db4SRafael J. Wysocki continue; 1377d6277db4SRafael J. Wysocki 1378d6277db4SRafael J. Wysocki if (zone->all_unreclaimable && prio != DEF_PRIORITY) 1379d6277db4SRafael J. Wysocki continue; 1380d6277db4SRafael J. Wysocki 1381d6277db4SRafael J. Wysocki /* For pass = 0 we don't shrink the active list */ 1382d6277db4SRafael J. Wysocki if (pass > 0) { 1383d6277db4SRafael J. Wysocki zone->nr_scan_active += (zone->nr_active >> prio) + 1; 1384d6277db4SRafael J. Wysocki if (zone->nr_scan_active >= nr_pages || pass > 3) { 1385d6277db4SRafael J. Wysocki zone->nr_scan_active = 0; 1386d6277db4SRafael J. Wysocki nr_to_scan = min(nr_pages, zone->nr_active); 1387d6277db4SRafael J. Wysocki shrink_active_list(nr_to_scan, zone, sc); 1388d6277db4SRafael J. Wysocki } 1389d6277db4SRafael J. Wysocki } 1390d6277db4SRafael J. Wysocki 1391d6277db4SRafael J. Wysocki zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1; 1392d6277db4SRafael J. Wysocki if (zone->nr_scan_inactive >= nr_pages || pass > 3) { 1393d6277db4SRafael J. Wysocki zone->nr_scan_inactive = 0; 1394d6277db4SRafael J. Wysocki nr_to_scan = min(nr_pages, zone->nr_inactive); 1395d6277db4SRafael J. Wysocki ret += shrink_inactive_list(nr_to_scan, zone, sc); 1396d6277db4SRafael J. Wysocki if (ret >= nr_pages) 1397d6277db4SRafael J. Wysocki return ret; 1398d6277db4SRafael J. Wysocki } 1399d6277db4SRafael J. Wysocki } 1400d6277db4SRafael J. Wysocki 1401d6277db4SRafael J. Wysocki return ret; 1402d6277db4SRafael J. Wysocki } 1403d6277db4SRafael J. Wysocki 1404d6277db4SRafael J. Wysocki /* 1405d6277db4SRafael J. Wysocki * Try to free `nr_pages' of memory, system-wide, and return the number of 1406d6277db4SRafael J. Wysocki * freed pages. 1407d6277db4SRafael J. Wysocki * 1408d6277db4SRafael J. Wysocki * Rather than trying to age LRUs the aim is to preserve the overall 1409d6277db4SRafael J. Wysocki * LRU order by reclaiming preferentially 1410d6277db4SRafael J. Wysocki * inactive > active > active referenced > active mapped 14111da177e4SLinus Torvalds */ 141269e05944SAndrew Morton unsigned long shrink_all_memory(unsigned long nr_pages) 14131da177e4SLinus Torvalds { 1414d6277db4SRafael J. Wysocki unsigned long lru_pages, nr_slab; 141569e05944SAndrew Morton unsigned long ret = 0; 1416d6277db4SRafael J. Wysocki int pass; 1417d6277db4SRafael J. Wysocki struct reclaim_state reclaim_state; 1418d6277db4SRafael J. Wysocki struct zone *zone; 1419d6277db4SRafael J. Wysocki struct scan_control sc = { 1420d6277db4SRafael J. Wysocki .gfp_mask = GFP_KERNEL, 1421d6277db4SRafael J. Wysocki .may_swap = 0, 1422d6277db4SRafael J. Wysocki .swap_cluster_max = nr_pages, 1423d6277db4SRafael J. Wysocki .may_writepage = 1, 1424d6277db4SRafael J. Wysocki .swappiness = vm_swappiness, 14251da177e4SLinus Torvalds }; 14261da177e4SLinus Torvalds 14271da177e4SLinus Torvalds current->reclaim_state = &reclaim_state; 142869e05944SAndrew Morton 1429d6277db4SRafael J. Wysocki lru_pages = 0; 1430d6277db4SRafael J. Wysocki for_each_zone(zone) 1431d6277db4SRafael J. Wysocki lru_pages += zone->nr_active + zone->nr_inactive; 1432d6277db4SRafael J. Wysocki 1433972d1a7bSChristoph Lameter nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); 1434d6277db4SRafael J. Wysocki /* If slab caches are huge, it's better to hit them first */ 1435d6277db4SRafael J. Wysocki while (nr_slab >= lru_pages) { 1436d6277db4SRafael J. Wysocki reclaim_state.reclaimed_slab = 0; 1437d6277db4SRafael J. Wysocki shrink_slab(nr_pages, sc.gfp_mask, lru_pages); 1438d6277db4SRafael J. Wysocki if (!reclaim_state.reclaimed_slab) 14391da177e4SLinus Torvalds break; 1440d6277db4SRafael J. Wysocki 1441d6277db4SRafael J. Wysocki ret += reclaim_state.reclaimed_slab; 1442d6277db4SRafael J. Wysocki if (ret >= nr_pages) 1443d6277db4SRafael J. Wysocki goto out; 1444d6277db4SRafael J. Wysocki 1445d6277db4SRafael J. Wysocki nr_slab -= reclaim_state.reclaimed_slab; 14461da177e4SLinus Torvalds } 1447d6277db4SRafael J. Wysocki 1448d6277db4SRafael J. Wysocki /* 1449d6277db4SRafael J. Wysocki * We try to shrink LRUs in 5 passes: 1450d6277db4SRafael J. Wysocki * 0 = Reclaim from inactive_list only 1451d6277db4SRafael J. Wysocki * 1 = Reclaim from active list but don't reclaim mapped 1452d6277db4SRafael J. Wysocki * 2 = 2nd pass of type 1 1453d6277db4SRafael J. Wysocki * 3 = Reclaim mapped (normal reclaim) 1454d6277db4SRafael J. Wysocki * 4 = 2nd pass of type 3 1455d6277db4SRafael J. Wysocki */ 1456d6277db4SRafael J. Wysocki for (pass = 0; pass < 5; pass++) { 1457d6277db4SRafael J. Wysocki int prio; 1458d6277db4SRafael J. Wysocki 1459d6277db4SRafael J. Wysocki /* Needed for shrinking slab caches later on */ 1460d6277db4SRafael J. Wysocki if (!lru_pages) 1461d6277db4SRafael J. Wysocki for_each_zone(zone) { 1462d6277db4SRafael J. Wysocki lru_pages += zone->nr_active; 1463d6277db4SRafael J. Wysocki lru_pages += zone->nr_inactive; 1464248a0301SRafael J. Wysocki } 1465d6277db4SRafael J. Wysocki 1466d6277db4SRafael J. Wysocki /* Force reclaiming mapped pages in the passes #3 and #4 */ 1467d6277db4SRafael J. Wysocki if (pass > 2) { 1468d6277db4SRafael J. Wysocki sc.may_swap = 1; 1469d6277db4SRafael J. Wysocki sc.swappiness = 100; 1470d6277db4SRafael J. Wysocki } 1471d6277db4SRafael J. Wysocki 1472d6277db4SRafael J. Wysocki for (prio = DEF_PRIORITY; prio >= 0; prio--) { 1473d6277db4SRafael J. Wysocki unsigned long nr_to_scan = nr_pages - ret; 1474d6277db4SRafael J. Wysocki 1475d6277db4SRafael J. Wysocki sc.nr_scanned = 0; 1476d6277db4SRafael J. Wysocki ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); 1477d6277db4SRafael J. Wysocki if (ret >= nr_pages) 1478d6277db4SRafael J. Wysocki goto out; 1479d6277db4SRafael J. Wysocki 1480d6277db4SRafael J. Wysocki reclaim_state.reclaimed_slab = 0; 1481d6277db4SRafael J. Wysocki shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages); 1482d6277db4SRafael J. Wysocki ret += reclaim_state.reclaimed_slab; 1483d6277db4SRafael J. Wysocki if (ret >= nr_pages) 1484d6277db4SRafael J. Wysocki goto out; 1485d6277db4SRafael J. Wysocki 1486d6277db4SRafael J. Wysocki if (sc.nr_scanned && prio < DEF_PRIORITY - 2) 14873fcfab16SAndrew Morton congestion_wait(WRITE, HZ / 10); 1488d6277db4SRafael J. Wysocki } 1489d6277db4SRafael J. Wysocki 1490d6277db4SRafael J. Wysocki lru_pages = 0; 1491d6277db4SRafael J. Wysocki } 1492d6277db4SRafael J. Wysocki 1493d6277db4SRafael J. Wysocki /* 1494d6277db4SRafael J. Wysocki * If ret = 0, we could not shrink LRUs, but there may be something 1495d6277db4SRafael J. Wysocki * in slab caches 1496d6277db4SRafael J. Wysocki */ 1497d6277db4SRafael J. Wysocki if (!ret) 1498d6277db4SRafael J. Wysocki do { 1499d6277db4SRafael J. Wysocki reclaim_state.reclaimed_slab = 0; 1500d6277db4SRafael J. Wysocki shrink_slab(nr_pages, sc.gfp_mask, lru_pages); 1501d6277db4SRafael J. Wysocki ret += reclaim_state.reclaimed_slab; 1502d6277db4SRafael J. Wysocki } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); 1503d6277db4SRafael J. Wysocki 1504d6277db4SRafael J. Wysocki out: 15051da177e4SLinus Torvalds current->reclaim_state = NULL; 1506d6277db4SRafael J. Wysocki 15071da177e4SLinus Torvalds return ret; 15081da177e4SLinus Torvalds } 15091da177e4SLinus Torvalds #endif 15101da177e4SLinus Torvalds 15111da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU 15121da177e4SLinus Torvalds /* It's optimal to keep kswapds on the same CPUs as their memory, but 15131da177e4SLinus Torvalds not required for correctness. So if the last cpu in a node goes 15141da177e4SLinus Torvalds away, we get changed to run anywhere: as the first one comes back, 15151da177e4SLinus Torvalds restore their cpu bindings. */ 15169c7b216dSChandra Seetharaman static int __devinit cpu_callback(struct notifier_block *nfb, 151769e05944SAndrew Morton unsigned long action, void *hcpu) 15181da177e4SLinus Torvalds { 15191da177e4SLinus Torvalds pg_data_t *pgdat; 15201da177e4SLinus Torvalds cpumask_t mask; 15211da177e4SLinus Torvalds 15221da177e4SLinus Torvalds if (action == CPU_ONLINE) { 1523ec936fc5SKAMEZAWA Hiroyuki for_each_online_pgdat(pgdat) { 15241da177e4SLinus Torvalds mask = node_to_cpumask(pgdat->node_id); 15251da177e4SLinus Torvalds if (any_online_cpu(mask) != NR_CPUS) 15261da177e4SLinus Torvalds /* One of our CPUs online: restore mask */ 15271da177e4SLinus Torvalds set_cpus_allowed(pgdat->kswapd, mask); 15281da177e4SLinus Torvalds } 15291da177e4SLinus Torvalds } 15301da177e4SLinus Torvalds return NOTIFY_OK; 15311da177e4SLinus Torvalds } 15321da177e4SLinus Torvalds #endif /* CONFIG_HOTPLUG_CPU */ 15331da177e4SLinus Torvalds 15343218ae14SYasunori Goto /* 15353218ae14SYasunori Goto * This kswapd start function will be called by init and node-hot-add. 15363218ae14SYasunori Goto * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 15373218ae14SYasunori Goto */ 15383218ae14SYasunori Goto int kswapd_run(int nid) 15393218ae14SYasunori Goto { 15403218ae14SYasunori Goto pg_data_t *pgdat = NODE_DATA(nid); 15413218ae14SYasunori Goto int ret = 0; 15423218ae14SYasunori Goto 15433218ae14SYasunori Goto if (pgdat->kswapd) 15443218ae14SYasunori Goto return 0; 15453218ae14SYasunori Goto 15463218ae14SYasunori Goto pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 15473218ae14SYasunori Goto if (IS_ERR(pgdat->kswapd)) { 15483218ae14SYasunori Goto /* failure at boot is fatal */ 15493218ae14SYasunori Goto BUG_ON(system_state == SYSTEM_BOOTING); 15503218ae14SYasunori Goto printk("Failed to start kswapd on node %d\n",nid); 15513218ae14SYasunori Goto ret = -1; 15523218ae14SYasunori Goto } 15533218ae14SYasunori Goto return ret; 15543218ae14SYasunori Goto } 15553218ae14SYasunori Goto 15561da177e4SLinus Torvalds static int __init kswapd_init(void) 15571da177e4SLinus Torvalds { 15583218ae14SYasunori Goto int nid; 155969e05944SAndrew Morton 15601da177e4SLinus Torvalds swap_setup(); 15613218ae14SYasunori Goto for_each_online_node(nid) 15623218ae14SYasunori Goto kswapd_run(nid); 15631da177e4SLinus Torvalds hotcpu_notifier(cpu_callback, 0); 15641da177e4SLinus Torvalds return 0; 15651da177e4SLinus Torvalds } 15661da177e4SLinus Torvalds 15671da177e4SLinus Torvalds module_init(kswapd_init) 15689eeff239SChristoph Lameter 15699eeff239SChristoph Lameter #ifdef CONFIG_NUMA 15709eeff239SChristoph Lameter /* 15719eeff239SChristoph Lameter * Zone reclaim mode 15729eeff239SChristoph Lameter * 15739eeff239SChristoph Lameter * If non-zero call zone_reclaim when the number of free pages falls below 15749eeff239SChristoph Lameter * the watermarks. 15759eeff239SChristoph Lameter */ 15769eeff239SChristoph Lameter int zone_reclaim_mode __read_mostly; 15779eeff239SChristoph Lameter 15781b2ffb78SChristoph Lameter #define RECLAIM_OFF 0 15791b2ffb78SChristoph Lameter #define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */ 15801b2ffb78SChristoph Lameter #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 15811b2ffb78SChristoph Lameter #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 15821b2ffb78SChristoph Lameter 15839eeff239SChristoph Lameter /* 1584a92f7126SChristoph Lameter * Priority for ZONE_RECLAIM. This determines the fraction of pages 1585a92f7126SChristoph Lameter * of a node considered for each zone_reclaim. 4 scans 1/16th of 1586a92f7126SChristoph Lameter * a zone. 1587a92f7126SChristoph Lameter */ 1588a92f7126SChristoph Lameter #define ZONE_RECLAIM_PRIORITY 4 1589a92f7126SChristoph Lameter 15909eeff239SChristoph Lameter /* 15919614634fSChristoph Lameter * Percentage of pages in a zone that must be unmapped for zone_reclaim to 15929614634fSChristoph Lameter * occur. 15939614634fSChristoph Lameter */ 15949614634fSChristoph Lameter int sysctl_min_unmapped_ratio = 1; 15959614634fSChristoph Lameter 15969614634fSChristoph Lameter /* 15970ff38490SChristoph Lameter * If the number of slab pages in a zone grows beyond this percentage then 15980ff38490SChristoph Lameter * slab reclaim needs to occur. 15990ff38490SChristoph Lameter */ 16000ff38490SChristoph Lameter int sysctl_min_slab_ratio = 5; 16010ff38490SChristoph Lameter 16020ff38490SChristoph Lameter /* 16039eeff239SChristoph Lameter * Try to free up some pages from this zone through reclaim. 16049eeff239SChristoph Lameter */ 1605179e9639SAndrew Morton static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 16069eeff239SChristoph Lameter { 16077fb2d46dSChristoph Lameter /* Minimum pages needed in order to stay on node */ 160869e05944SAndrew Morton const unsigned long nr_pages = 1 << order; 16099eeff239SChristoph Lameter struct task_struct *p = current; 16109eeff239SChristoph Lameter struct reclaim_state reclaim_state; 16118695949aSChristoph Lameter int priority; 161205ff5137SAndrew Morton unsigned long nr_reclaimed = 0; 1613179e9639SAndrew Morton struct scan_control sc = { 1614179e9639SAndrew Morton .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 1615179e9639SAndrew Morton .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), 161669e05944SAndrew Morton .swap_cluster_max = max_t(unsigned long, nr_pages, 161769e05944SAndrew Morton SWAP_CLUSTER_MAX), 1618179e9639SAndrew Morton .gfp_mask = gfp_mask, 1619d6277db4SRafael J. Wysocki .swappiness = vm_swappiness, 1620179e9639SAndrew Morton }; 162183e33a47SChristoph Lameter unsigned long slab_reclaimable; 16229eeff239SChristoph Lameter 16239eeff239SChristoph Lameter disable_swap_token(); 16249eeff239SChristoph Lameter cond_resched(); 1625d4f7796eSChristoph Lameter /* 1626d4f7796eSChristoph Lameter * We need to be able to allocate from the reserves for RECLAIM_SWAP 1627d4f7796eSChristoph Lameter * and we also need to be able to write out pages for RECLAIM_WRITE 1628d4f7796eSChristoph Lameter * and RECLAIM_SWAP. 1629d4f7796eSChristoph Lameter */ 1630d4f7796eSChristoph Lameter p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 16319eeff239SChristoph Lameter reclaim_state.reclaimed_slab = 0; 16329eeff239SChristoph Lameter p->reclaim_state = &reclaim_state; 1633c84db23cSChristoph Lameter 16340ff38490SChristoph Lameter if (zone_page_state(zone, NR_FILE_PAGES) - 16350ff38490SChristoph Lameter zone_page_state(zone, NR_FILE_MAPPED) > 16360ff38490SChristoph Lameter zone->min_unmapped_pages) { 1637a92f7126SChristoph Lameter /* 16380ff38490SChristoph Lameter * Free memory by calling shrink zone with increasing 16390ff38490SChristoph Lameter * priorities until we have enough memory freed. 1640a92f7126SChristoph Lameter */ 16418695949aSChristoph Lameter priority = ZONE_RECLAIM_PRIORITY; 1642a92f7126SChristoph Lameter do { 1643*3bb1a852SMartin Bligh note_zone_scanning_priority(zone, priority); 164405ff5137SAndrew Morton nr_reclaimed += shrink_zone(priority, zone, &sc); 16458695949aSChristoph Lameter priority--; 164605ff5137SAndrew Morton } while (priority >= 0 && nr_reclaimed < nr_pages); 16470ff38490SChristoph Lameter } 1648a92f7126SChristoph Lameter 164983e33a47SChristoph Lameter slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 165083e33a47SChristoph Lameter if (slab_reclaimable > zone->min_slab_pages) { 16512a16e3f4SChristoph Lameter /* 16527fb2d46dSChristoph Lameter * shrink_slab() does not currently allow us to determine how 16530ff38490SChristoph Lameter * many pages were freed in this zone. So we take the current 16540ff38490SChristoph Lameter * number of slab pages and shake the slab until it is reduced 16550ff38490SChristoph Lameter * by the same nr_pages that we used for reclaiming unmapped 16560ff38490SChristoph Lameter * pages. 16572a16e3f4SChristoph Lameter * 16580ff38490SChristoph Lameter * Note that shrink_slab will free memory on all zones and may 16590ff38490SChristoph Lameter * take a long time. 16602a16e3f4SChristoph Lameter */ 16610ff38490SChristoph Lameter while (shrink_slab(sc.nr_scanned, gfp_mask, order) && 166283e33a47SChristoph Lameter zone_page_state(zone, NR_SLAB_RECLAIMABLE) > 166383e33a47SChristoph Lameter slab_reclaimable - nr_pages) 16640ff38490SChristoph Lameter ; 166583e33a47SChristoph Lameter 166683e33a47SChristoph Lameter /* 166783e33a47SChristoph Lameter * Update nr_reclaimed by the number of slab pages we 166883e33a47SChristoph Lameter * reclaimed from this zone. 166983e33a47SChristoph Lameter */ 167083e33a47SChristoph Lameter nr_reclaimed += slab_reclaimable - 167183e33a47SChristoph Lameter zone_page_state(zone, NR_SLAB_RECLAIMABLE); 16722a16e3f4SChristoph Lameter } 16732a16e3f4SChristoph Lameter 16749eeff239SChristoph Lameter p->reclaim_state = NULL; 1675d4f7796eSChristoph Lameter current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 167605ff5137SAndrew Morton return nr_reclaimed >= nr_pages; 16779eeff239SChristoph Lameter } 1678179e9639SAndrew Morton 1679179e9639SAndrew Morton int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 1680179e9639SAndrew Morton { 1681179e9639SAndrew Morton cpumask_t mask; 1682179e9639SAndrew Morton int node_id; 1683179e9639SAndrew Morton 1684179e9639SAndrew Morton /* 16850ff38490SChristoph Lameter * Zone reclaim reclaims unmapped file backed pages and 16860ff38490SChristoph Lameter * slab pages if we are over the defined limits. 168734aa1330SChristoph Lameter * 16889614634fSChristoph Lameter * A small portion of unmapped file backed pages is needed for 16899614634fSChristoph Lameter * file I/O otherwise pages read by file I/O will be immediately 16909614634fSChristoph Lameter * thrown out if the zone is overallocated. So we do not reclaim 16919614634fSChristoph Lameter * if less than a specified percentage of the zone is used by 16929614634fSChristoph Lameter * unmapped file backed pages. 1693179e9639SAndrew Morton */ 169434aa1330SChristoph Lameter if (zone_page_state(zone, NR_FILE_PAGES) - 16950ff38490SChristoph Lameter zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages 16960ff38490SChristoph Lameter && zone_page_state(zone, NR_SLAB_RECLAIMABLE) 16970ff38490SChristoph Lameter <= zone->min_slab_pages) 1698179e9639SAndrew Morton return 0; 1699179e9639SAndrew Morton 1700179e9639SAndrew Morton /* 1701179e9639SAndrew Morton * Avoid concurrent zone reclaims, do not reclaim in a zone that does 1702179e9639SAndrew Morton * not have reclaimable pages and if we should not delay the allocation 1703179e9639SAndrew Morton * then do not scan. 1704179e9639SAndrew Morton */ 1705179e9639SAndrew Morton if (!(gfp_mask & __GFP_WAIT) || 1706179e9639SAndrew Morton zone->all_unreclaimable || 1707179e9639SAndrew Morton atomic_read(&zone->reclaim_in_progress) > 0 || 1708179e9639SAndrew Morton (current->flags & PF_MEMALLOC)) 1709179e9639SAndrew Morton return 0; 1710179e9639SAndrew Morton 1711179e9639SAndrew Morton /* 1712179e9639SAndrew Morton * Only run zone reclaim on the local zone or on zones that do not 1713179e9639SAndrew Morton * have associated processors. This will favor the local processor 1714179e9639SAndrew Morton * over remote processors and spread off node memory allocations 1715179e9639SAndrew Morton * as wide as possible. 1716179e9639SAndrew Morton */ 171789fa3024SChristoph Lameter node_id = zone_to_nid(zone); 1718179e9639SAndrew Morton mask = node_to_cpumask(node_id); 1719179e9639SAndrew Morton if (!cpus_empty(mask) && node_id != numa_node_id()) 1720179e9639SAndrew Morton return 0; 1721179e9639SAndrew Morton return __zone_reclaim(zone, gfp_mask, order); 1722179e9639SAndrew Morton } 17239eeff239SChristoph Lameter #endif 1724