11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmscan.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie. 71da177e4SLinus Torvalds * kswapd added: 7.1.96 sct 81da177e4SLinus Torvalds * Removed kswapd_ctl limits, and swap out as many pages as needed 91da177e4SLinus Torvalds * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 101da177e4SLinus Torvalds * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 111da177e4SLinus Torvalds * Multiqueue VM started 5.8.00, Rik van Riel. 121da177e4SLinus Torvalds */ 131da177e4SLinus Torvalds 141da177e4SLinus Torvalds #include <linux/mm.h> 151da177e4SLinus Torvalds #include <linux/module.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/kernel_stat.h> 181da177e4SLinus Torvalds #include <linux/swap.h> 191da177e4SLinus Torvalds #include <linux/pagemap.h> 201da177e4SLinus Torvalds #include <linux/init.h> 211da177e4SLinus Torvalds #include <linux/highmem.h> 221da177e4SLinus Torvalds #include <linux/file.h> 231da177e4SLinus Torvalds #include <linux/writeback.h> 241da177e4SLinus Torvalds #include <linux/blkdev.h> 251da177e4SLinus Torvalds #include <linux/buffer_head.h> /* for try_to_release_page(), 261da177e4SLinus Torvalds buffer_heads_over_limit */ 271da177e4SLinus Torvalds #include <linux/mm_inline.h> 281da177e4SLinus Torvalds #include <linux/pagevec.h> 291da177e4SLinus Torvalds #include <linux/backing-dev.h> 301da177e4SLinus Torvalds #include <linux/rmap.h> 311da177e4SLinus Torvalds #include <linux/topology.h> 321da177e4SLinus Torvalds #include <linux/cpu.h> 331da177e4SLinus Torvalds #include <linux/cpuset.h> 341da177e4SLinus Torvalds #include <linux/notifier.h> 351da177e4SLinus Torvalds #include <linux/rwsem.h> 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds #include <asm/tlbflush.h> 381da177e4SLinus Torvalds #include <asm/div64.h> 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds #include <linux/swapops.h> 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds /* possible outcome of pageout() */ 431da177e4SLinus Torvalds typedef enum { 441da177e4SLinus Torvalds /* failed to write page out, page is locked */ 451da177e4SLinus Torvalds PAGE_KEEP, 461da177e4SLinus Torvalds /* move page to the active list, page is locked */ 471da177e4SLinus Torvalds PAGE_ACTIVATE, 481da177e4SLinus Torvalds /* page has been sent to the disk successfully, page is unlocked */ 491da177e4SLinus Torvalds PAGE_SUCCESS, 501da177e4SLinus Torvalds /* page is clean and locked */ 511da177e4SLinus Torvalds PAGE_CLEAN, 521da177e4SLinus Torvalds } pageout_t; 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds struct scan_control { 551da177e4SLinus Torvalds /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */ 561da177e4SLinus Torvalds unsigned long nr_to_scan; 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds /* Incremented by the number of inactive pages that were scanned */ 591da177e4SLinus Torvalds unsigned long nr_scanned; 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds /* Incremented by the number of pages reclaimed */ 621da177e4SLinus Torvalds unsigned long nr_reclaimed; 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds unsigned long nr_mapped; /* From page_state */ 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds /* Ask shrink_caches, or shrink_zone to scan at this priority */ 671da177e4SLinus Torvalds unsigned int priority; 681da177e4SLinus Torvalds 691da177e4SLinus Torvalds /* This context's GFP mask */ 706daa0e28SAl Viro gfp_t gfp_mask; 711da177e4SLinus Torvalds 721da177e4SLinus Torvalds int may_writepage; 731da177e4SLinus Torvalds 74f1fd1067SChristoph Lameter /* Can pages be swapped as part of reclaim? */ 75f1fd1067SChristoph Lameter int may_swap; 76f1fd1067SChristoph Lameter 771da177e4SLinus Torvalds /* This context's SWAP_CLUSTER_MAX. If freeing memory for 781da177e4SLinus Torvalds * suspend, we effectively ignore SWAP_CLUSTER_MAX. 791da177e4SLinus Torvalds * In this context, it doesn't matter that we scan the 801da177e4SLinus Torvalds * whole list at once. */ 811da177e4SLinus Torvalds int swap_cluster_max; 821da177e4SLinus Torvalds }; 831da177e4SLinus Torvalds 841da177e4SLinus Torvalds /* 851da177e4SLinus Torvalds * The list of shrinker callbacks used by to apply pressure to 861da177e4SLinus Torvalds * ageable caches. 871da177e4SLinus Torvalds */ 881da177e4SLinus Torvalds struct shrinker { 891da177e4SLinus Torvalds shrinker_t shrinker; 901da177e4SLinus Torvalds struct list_head list; 911da177e4SLinus Torvalds int seeks; /* seeks to recreate an obj */ 921da177e4SLinus Torvalds long nr; /* objs pending delete */ 931da177e4SLinus Torvalds }; 941da177e4SLinus Torvalds 951da177e4SLinus Torvalds #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 961da177e4SLinus Torvalds 971da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCH 981da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) \ 991da177e4SLinus Torvalds do { \ 1001da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 1011da177e4SLinus Torvalds struct page *prev; \ 1021da177e4SLinus Torvalds \ 1031da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 1041da177e4SLinus Torvalds prefetch(&prev->_field); \ 1051da177e4SLinus Torvalds } \ 1061da177e4SLinus Torvalds } while (0) 1071da177e4SLinus Torvalds #else 1081da177e4SLinus Torvalds #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 1091da177e4SLinus Torvalds #endif 1101da177e4SLinus Torvalds 1111da177e4SLinus Torvalds #ifdef ARCH_HAS_PREFETCHW 1121da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) \ 1131da177e4SLinus Torvalds do { \ 1141da177e4SLinus Torvalds if ((_page)->lru.prev != _base) { \ 1151da177e4SLinus Torvalds struct page *prev; \ 1161da177e4SLinus Torvalds \ 1171da177e4SLinus Torvalds prev = lru_to_page(&(_page->lru)); \ 1181da177e4SLinus Torvalds prefetchw(&prev->_field); \ 1191da177e4SLinus Torvalds } \ 1201da177e4SLinus Torvalds } while (0) 1211da177e4SLinus Torvalds #else 1221da177e4SLinus Torvalds #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 1231da177e4SLinus Torvalds #endif 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds /* 1261da177e4SLinus Torvalds * From 0 .. 100. Higher means more swappy. 1271da177e4SLinus Torvalds */ 1281da177e4SLinus Torvalds int vm_swappiness = 60; 1291da177e4SLinus Torvalds static long total_memory; 1301da177e4SLinus Torvalds 1311da177e4SLinus Torvalds static LIST_HEAD(shrinker_list); 1321da177e4SLinus Torvalds static DECLARE_RWSEM(shrinker_rwsem); 1331da177e4SLinus Torvalds 1341da177e4SLinus Torvalds /* 1351da177e4SLinus Torvalds * Add a shrinker callback to be called from the vm 1361da177e4SLinus Torvalds */ 1371da177e4SLinus Torvalds struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) 1381da177e4SLinus Torvalds { 1391da177e4SLinus Torvalds struct shrinker *shrinker; 1401da177e4SLinus Torvalds 1411da177e4SLinus Torvalds shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); 1421da177e4SLinus Torvalds if (shrinker) { 1431da177e4SLinus Torvalds shrinker->shrinker = theshrinker; 1441da177e4SLinus Torvalds shrinker->seeks = seeks; 1451da177e4SLinus Torvalds shrinker->nr = 0; 1461da177e4SLinus Torvalds down_write(&shrinker_rwsem); 1471da177e4SLinus Torvalds list_add_tail(&shrinker->list, &shrinker_list); 1481da177e4SLinus Torvalds up_write(&shrinker_rwsem); 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds return shrinker; 1511da177e4SLinus Torvalds } 1521da177e4SLinus Torvalds EXPORT_SYMBOL(set_shrinker); 1531da177e4SLinus Torvalds 1541da177e4SLinus Torvalds /* 1551da177e4SLinus Torvalds * Remove one 1561da177e4SLinus Torvalds */ 1571da177e4SLinus Torvalds void remove_shrinker(struct shrinker *shrinker) 1581da177e4SLinus Torvalds { 1591da177e4SLinus Torvalds down_write(&shrinker_rwsem); 1601da177e4SLinus Torvalds list_del(&shrinker->list); 1611da177e4SLinus Torvalds up_write(&shrinker_rwsem); 1621da177e4SLinus Torvalds kfree(shrinker); 1631da177e4SLinus Torvalds } 1641da177e4SLinus Torvalds EXPORT_SYMBOL(remove_shrinker); 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds #define SHRINK_BATCH 128 1671da177e4SLinus Torvalds /* 1681da177e4SLinus Torvalds * Call the shrink functions to age shrinkable caches 1691da177e4SLinus Torvalds * 1701da177e4SLinus Torvalds * Here we assume it costs one seek to replace a lru page and that it also 1711da177e4SLinus Torvalds * takes a seek to recreate a cache object. With this in mind we age equal 1721da177e4SLinus Torvalds * percentages of the lru and ageable caches. This should balance the seeks 1731da177e4SLinus Torvalds * generated by these structures. 1741da177e4SLinus Torvalds * 1751da177e4SLinus Torvalds * If the vm encounted mapped pages on the LRU it increase the pressure on 1761da177e4SLinus Torvalds * slab to avoid swapping. 1771da177e4SLinus Torvalds * 1781da177e4SLinus Torvalds * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 1791da177e4SLinus Torvalds * 1801da177e4SLinus Torvalds * `lru_pages' represents the number of on-LRU pages in all the zones which 1811da177e4SLinus Torvalds * are eligible for the caller's allocation attempt. It is used for balancing 1821da177e4SLinus Torvalds * slab reclaim versus page reclaim. 183b15e0905Sakpm@osdl.org * 184b15e0905Sakpm@osdl.org * Returns the number of slab objects which we shrunk. 1851da177e4SLinus Torvalds */ 1869d0243bcSAndrew Morton int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) 1871da177e4SLinus Torvalds { 1881da177e4SLinus Torvalds struct shrinker *shrinker; 189b15e0905Sakpm@osdl.org int ret = 0; 1901da177e4SLinus Torvalds 1911da177e4SLinus Torvalds if (scanned == 0) 1921da177e4SLinus Torvalds scanned = SWAP_CLUSTER_MAX; 1931da177e4SLinus Torvalds 1941da177e4SLinus Torvalds if (!down_read_trylock(&shrinker_rwsem)) 195b15e0905Sakpm@osdl.org return 1; /* Assume we'll be able to shrink next time */ 1961da177e4SLinus Torvalds 1971da177e4SLinus Torvalds list_for_each_entry(shrinker, &shrinker_list, list) { 1981da177e4SLinus Torvalds unsigned long long delta; 1991da177e4SLinus Torvalds unsigned long total_scan; 200ea164d73SAndrea Arcangeli unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); 2011da177e4SLinus Torvalds 2021da177e4SLinus Torvalds delta = (4 * scanned) / shrinker->seeks; 203ea164d73SAndrea Arcangeli delta *= max_pass; 2041da177e4SLinus Torvalds do_div(delta, lru_pages + 1); 2051da177e4SLinus Torvalds shrinker->nr += delta; 206ea164d73SAndrea Arcangeli if (shrinker->nr < 0) { 207ea164d73SAndrea Arcangeli printk(KERN_ERR "%s: nr=%ld\n", 208ea164d73SAndrea Arcangeli __FUNCTION__, shrinker->nr); 209ea164d73SAndrea Arcangeli shrinker->nr = max_pass; 210ea164d73SAndrea Arcangeli } 211ea164d73SAndrea Arcangeli 212ea164d73SAndrea Arcangeli /* 213ea164d73SAndrea Arcangeli * Avoid risking looping forever due to too large nr value: 214ea164d73SAndrea Arcangeli * never try to free more than twice the estimate number of 215ea164d73SAndrea Arcangeli * freeable entries. 216ea164d73SAndrea Arcangeli */ 217ea164d73SAndrea Arcangeli if (shrinker->nr > max_pass * 2) 218ea164d73SAndrea Arcangeli shrinker->nr = max_pass * 2; 2191da177e4SLinus Torvalds 2201da177e4SLinus Torvalds total_scan = shrinker->nr; 2211da177e4SLinus Torvalds shrinker->nr = 0; 2221da177e4SLinus Torvalds 2231da177e4SLinus Torvalds while (total_scan >= SHRINK_BATCH) { 2241da177e4SLinus Torvalds long this_scan = SHRINK_BATCH; 2251da177e4SLinus Torvalds int shrink_ret; 226b15e0905Sakpm@osdl.org int nr_before; 2271da177e4SLinus Torvalds 228b15e0905Sakpm@osdl.org nr_before = (*shrinker->shrinker)(0, gfp_mask); 2291da177e4SLinus Torvalds shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); 2301da177e4SLinus Torvalds if (shrink_ret == -1) 2311da177e4SLinus Torvalds break; 232b15e0905Sakpm@osdl.org if (shrink_ret < nr_before) 233b15e0905Sakpm@osdl.org ret += nr_before - shrink_ret; 2341da177e4SLinus Torvalds mod_page_state(slabs_scanned, this_scan); 2351da177e4SLinus Torvalds total_scan -= this_scan; 2361da177e4SLinus Torvalds 2371da177e4SLinus Torvalds cond_resched(); 2381da177e4SLinus Torvalds } 2391da177e4SLinus Torvalds 2401da177e4SLinus Torvalds shrinker->nr += total_scan; 2411da177e4SLinus Torvalds } 2421da177e4SLinus Torvalds up_read(&shrinker_rwsem); 243b15e0905Sakpm@osdl.org return ret; 2441da177e4SLinus Torvalds } 2451da177e4SLinus Torvalds 2461da177e4SLinus Torvalds /* Called without lock on whether page is mapped, so answer is unstable */ 2471da177e4SLinus Torvalds static inline int page_mapping_inuse(struct page *page) 2481da177e4SLinus Torvalds { 2491da177e4SLinus Torvalds struct address_space *mapping; 2501da177e4SLinus Torvalds 2511da177e4SLinus Torvalds /* Page is in somebody's page tables. */ 2521da177e4SLinus Torvalds if (page_mapped(page)) 2531da177e4SLinus Torvalds return 1; 2541da177e4SLinus Torvalds 2551da177e4SLinus Torvalds /* Be more reluctant to reclaim swapcache than pagecache */ 2561da177e4SLinus Torvalds if (PageSwapCache(page)) 2571da177e4SLinus Torvalds return 1; 2581da177e4SLinus Torvalds 2591da177e4SLinus Torvalds mapping = page_mapping(page); 2601da177e4SLinus Torvalds if (!mapping) 2611da177e4SLinus Torvalds return 0; 2621da177e4SLinus Torvalds 2631da177e4SLinus Torvalds /* File is mmap'd by somebody? */ 2641da177e4SLinus Torvalds return mapping_mapped(mapping); 2651da177e4SLinus Torvalds } 2661da177e4SLinus Torvalds 2671da177e4SLinus Torvalds static inline int is_page_cache_freeable(struct page *page) 2681da177e4SLinus Torvalds { 2691da177e4SLinus Torvalds return page_count(page) - !!PagePrivate(page) == 2; 2701da177e4SLinus Torvalds } 2711da177e4SLinus Torvalds 2721da177e4SLinus Torvalds static int may_write_to_queue(struct backing_dev_info *bdi) 2731da177e4SLinus Torvalds { 274930d9152SChristoph Lameter if (current->flags & PF_SWAPWRITE) 2751da177e4SLinus Torvalds return 1; 2761da177e4SLinus Torvalds if (!bdi_write_congested(bdi)) 2771da177e4SLinus Torvalds return 1; 2781da177e4SLinus Torvalds if (bdi == current->backing_dev_info) 2791da177e4SLinus Torvalds return 1; 2801da177e4SLinus Torvalds return 0; 2811da177e4SLinus Torvalds } 2821da177e4SLinus Torvalds 2831da177e4SLinus Torvalds /* 2841da177e4SLinus Torvalds * We detected a synchronous write error writing a page out. Probably 2851da177e4SLinus Torvalds * -ENOSPC. We need to propagate that into the address_space for a subsequent 2861da177e4SLinus Torvalds * fsync(), msync() or close(). 2871da177e4SLinus Torvalds * 2881da177e4SLinus Torvalds * The tricky part is that after writepage we cannot touch the mapping: nothing 2891da177e4SLinus Torvalds * prevents it from being freed up. But we have a ref on the page and once 2901da177e4SLinus Torvalds * that page is locked, the mapping is pinned. 2911da177e4SLinus Torvalds * 2921da177e4SLinus Torvalds * We're allowed to run sleeping lock_page() here because we know the caller has 2931da177e4SLinus Torvalds * __GFP_FS. 2941da177e4SLinus Torvalds */ 2951da177e4SLinus Torvalds static void handle_write_error(struct address_space *mapping, 2961da177e4SLinus Torvalds struct page *page, int error) 2971da177e4SLinus Torvalds { 2981da177e4SLinus Torvalds lock_page(page); 2991da177e4SLinus Torvalds if (page_mapping(page) == mapping) { 3001da177e4SLinus Torvalds if (error == -ENOSPC) 3011da177e4SLinus Torvalds set_bit(AS_ENOSPC, &mapping->flags); 3021da177e4SLinus Torvalds else 3031da177e4SLinus Torvalds set_bit(AS_EIO, &mapping->flags); 3041da177e4SLinus Torvalds } 3051da177e4SLinus Torvalds unlock_page(page); 3061da177e4SLinus Torvalds } 3071da177e4SLinus Torvalds 3081da177e4SLinus Torvalds /* 3091da177e4SLinus Torvalds * pageout is called by shrink_list() for each dirty page. Calls ->writepage(). 3101da177e4SLinus Torvalds */ 3111da177e4SLinus Torvalds static pageout_t pageout(struct page *page, struct address_space *mapping) 3121da177e4SLinus Torvalds { 3131da177e4SLinus Torvalds /* 3141da177e4SLinus Torvalds * If the page is dirty, only perform writeback if that write 3151da177e4SLinus Torvalds * will be non-blocking. To prevent this allocation from being 3161da177e4SLinus Torvalds * stalled by pagecache activity. But note that there may be 3171da177e4SLinus Torvalds * stalls if we need to run get_block(). We could test 3181da177e4SLinus Torvalds * PagePrivate for that. 3191da177e4SLinus Torvalds * 3201da177e4SLinus Torvalds * If this process is currently in generic_file_write() against 3211da177e4SLinus Torvalds * this page's queue, we can perform writeback even if that 3221da177e4SLinus Torvalds * will block. 3231da177e4SLinus Torvalds * 3241da177e4SLinus Torvalds * If the page is swapcache, write it back even if that would 3251da177e4SLinus Torvalds * block, for some throttling. This happens by accident, because 3261da177e4SLinus Torvalds * swap_backing_dev_info is bust: it doesn't reflect the 3271da177e4SLinus Torvalds * congestion state of the swapdevs. Easy to fix, if needed. 3281da177e4SLinus Torvalds * See swapfile.c:page_queue_congested(). 3291da177e4SLinus Torvalds */ 3301da177e4SLinus Torvalds if (!is_page_cache_freeable(page)) 3311da177e4SLinus Torvalds return PAGE_KEEP; 3321da177e4SLinus Torvalds if (!mapping) { 3331da177e4SLinus Torvalds /* 3341da177e4SLinus Torvalds * Some data journaling orphaned pages can have 3351da177e4SLinus Torvalds * page->mapping == NULL while being dirty with clean buffers. 3361da177e4SLinus Torvalds */ 337323aca6cSakpm@osdl.org if (PagePrivate(page)) { 3381da177e4SLinus Torvalds if (try_to_free_buffers(page)) { 3391da177e4SLinus Torvalds ClearPageDirty(page); 3401da177e4SLinus Torvalds printk("%s: orphaned page\n", __FUNCTION__); 3411da177e4SLinus Torvalds return PAGE_CLEAN; 3421da177e4SLinus Torvalds } 3431da177e4SLinus Torvalds } 3441da177e4SLinus Torvalds return PAGE_KEEP; 3451da177e4SLinus Torvalds } 3461da177e4SLinus Torvalds if (mapping->a_ops->writepage == NULL) 3471da177e4SLinus Torvalds return PAGE_ACTIVATE; 3481da177e4SLinus Torvalds if (!may_write_to_queue(mapping->backing_dev_info)) 3491da177e4SLinus Torvalds return PAGE_KEEP; 3501da177e4SLinus Torvalds 3511da177e4SLinus Torvalds if (clear_page_dirty_for_io(page)) { 3521da177e4SLinus Torvalds int res; 3531da177e4SLinus Torvalds struct writeback_control wbc = { 3541da177e4SLinus Torvalds .sync_mode = WB_SYNC_NONE, 3551da177e4SLinus Torvalds .nr_to_write = SWAP_CLUSTER_MAX, 3561da177e4SLinus Torvalds .nonblocking = 1, 3571da177e4SLinus Torvalds .for_reclaim = 1, 3581da177e4SLinus Torvalds }; 3591da177e4SLinus Torvalds 3601da177e4SLinus Torvalds SetPageReclaim(page); 3611da177e4SLinus Torvalds res = mapping->a_ops->writepage(page, &wbc); 3621da177e4SLinus Torvalds if (res < 0) 3631da177e4SLinus Torvalds handle_write_error(mapping, page, res); 364994fc28cSZach Brown if (res == AOP_WRITEPAGE_ACTIVATE) { 3651da177e4SLinus Torvalds ClearPageReclaim(page); 3661da177e4SLinus Torvalds return PAGE_ACTIVATE; 3671da177e4SLinus Torvalds } 3681da177e4SLinus Torvalds if (!PageWriteback(page)) { 3691da177e4SLinus Torvalds /* synchronous write or broken a_ops? */ 3701da177e4SLinus Torvalds ClearPageReclaim(page); 3711da177e4SLinus Torvalds } 3721da177e4SLinus Torvalds 3731da177e4SLinus Torvalds return PAGE_SUCCESS; 3741da177e4SLinus Torvalds } 3751da177e4SLinus Torvalds 3761da177e4SLinus Torvalds return PAGE_CLEAN; 3771da177e4SLinus Torvalds } 3781da177e4SLinus Torvalds 37949d2e9ccSChristoph Lameter static int remove_mapping(struct address_space *mapping, struct page *page) 38049d2e9ccSChristoph Lameter { 38149d2e9ccSChristoph Lameter if (!mapping) 38249d2e9ccSChristoph Lameter return 0; /* truncate got there first */ 38349d2e9ccSChristoph Lameter 38449d2e9ccSChristoph Lameter write_lock_irq(&mapping->tree_lock); 38549d2e9ccSChristoph Lameter 38649d2e9ccSChristoph Lameter /* 38749d2e9ccSChristoph Lameter * The non-racy check for busy page. It is critical to check 38849d2e9ccSChristoph Lameter * PageDirty _after_ making sure that the page is freeable and 38949d2e9ccSChristoph Lameter * not in use by anybody. (pagecache + us == 2) 39049d2e9ccSChristoph Lameter */ 39149d2e9ccSChristoph Lameter if (unlikely(page_count(page) != 2)) 39249d2e9ccSChristoph Lameter goto cannot_free; 39349d2e9ccSChristoph Lameter smp_rmb(); 39449d2e9ccSChristoph Lameter if (unlikely(PageDirty(page))) 39549d2e9ccSChristoph Lameter goto cannot_free; 39649d2e9ccSChristoph Lameter 39749d2e9ccSChristoph Lameter if (PageSwapCache(page)) { 39849d2e9ccSChristoph Lameter swp_entry_t swap = { .val = page_private(page) }; 39949d2e9ccSChristoph Lameter __delete_from_swap_cache(page); 40049d2e9ccSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 40149d2e9ccSChristoph Lameter swap_free(swap); 40249d2e9ccSChristoph Lameter __put_page(page); /* The pagecache ref */ 40349d2e9ccSChristoph Lameter return 1; 40449d2e9ccSChristoph Lameter } 40549d2e9ccSChristoph Lameter 40649d2e9ccSChristoph Lameter __remove_from_page_cache(page); 40749d2e9ccSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 40849d2e9ccSChristoph Lameter __put_page(page); 40949d2e9ccSChristoph Lameter return 1; 41049d2e9ccSChristoph Lameter 41149d2e9ccSChristoph Lameter cannot_free: 41249d2e9ccSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 41349d2e9ccSChristoph Lameter return 0; 41449d2e9ccSChristoph Lameter } 41549d2e9ccSChristoph Lameter 4161da177e4SLinus Torvalds /* 4171da177e4SLinus Torvalds * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed 4181da177e4SLinus Torvalds */ 4191da177e4SLinus Torvalds static int shrink_list(struct list_head *page_list, struct scan_control *sc) 4201da177e4SLinus Torvalds { 4211da177e4SLinus Torvalds LIST_HEAD(ret_pages); 4221da177e4SLinus Torvalds struct pagevec freed_pvec; 4231da177e4SLinus Torvalds int pgactivate = 0; 4241da177e4SLinus Torvalds int reclaimed = 0; 4251da177e4SLinus Torvalds 4261da177e4SLinus Torvalds cond_resched(); 4271da177e4SLinus Torvalds 4281da177e4SLinus Torvalds pagevec_init(&freed_pvec, 1); 4291da177e4SLinus Torvalds while (!list_empty(page_list)) { 4301da177e4SLinus Torvalds struct address_space *mapping; 4311da177e4SLinus Torvalds struct page *page; 4321da177e4SLinus Torvalds int may_enter_fs; 4331da177e4SLinus Torvalds int referenced; 4341da177e4SLinus Torvalds 4351da177e4SLinus Torvalds cond_resched(); 4361da177e4SLinus Torvalds 4371da177e4SLinus Torvalds page = lru_to_page(page_list); 4381da177e4SLinus Torvalds list_del(&page->lru); 4391da177e4SLinus Torvalds 4401da177e4SLinus Torvalds if (TestSetPageLocked(page)) 4411da177e4SLinus Torvalds goto keep; 4421da177e4SLinus Torvalds 4431da177e4SLinus Torvalds BUG_ON(PageActive(page)); 4441da177e4SLinus Torvalds 4451da177e4SLinus Torvalds sc->nr_scanned++; 4461da177e4SLinus Torvalds /* Double the slab pressure for mapped and swapcache pages */ 4471da177e4SLinus Torvalds if (page_mapped(page) || PageSwapCache(page)) 4481da177e4SLinus Torvalds sc->nr_scanned++; 4491da177e4SLinus Torvalds 4501da177e4SLinus Torvalds if (PageWriteback(page)) 4511da177e4SLinus Torvalds goto keep_locked; 4521da177e4SLinus Torvalds 453f7b7fd8fSRik van Riel referenced = page_referenced(page, 1); 4541da177e4SLinus Torvalds /* In active use or really unfreeable? Activate it. */ 4551da177e4SLinus Torvalds if (referenced && page_mapping_inuse(page)) 4561da177e4SLinus Torvalds goto activate_locked; 4571da177e4SLinus Torvalds 4581da177e4SLinus Torvalds #ifdef CONFIG_SWAP 4591da177e4SLinus Torvalds /* 4601da177e4SLinus Torvalds * Anonymous process memory has backing store? 4611da177e4SLinus Torvalds * Try to allocate it some swap space here. 4621da177e4SLinus Torvalds */ 463c340010eSLee Schermerhorn if (PageAnon(page) && !PageSwapCache(page)) { 464f1fd1067SChristoph Lameter if (!sc->may_swap) 465f1fd1067SChristoph Lameter goto keep_locked; 4661480a540SChristoph Lameter if (!add_to_swap(page, GFP_ATOMIC)) 4671da177e4SLinus Torvalds goto activate_locked; 4681da177e4SLinus Torvalds } 4691da177e4SLinus Torvalds #endif /* CONFIG_SWAP */ 4701da177e4SLinus Torvalds 4711da177e4SLinus Torvalds mapping = page_mapping(page); 4721da177e4SLinus Torvalds may_enter_fs = (sc->gfp_mask & __GFP_FS) || 4731da177e4SLinus Torvalds (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 4741da177e4SLinus Torvalds 4751da177e4SLinus Torvalds /* 4761da177e4SLinus Torvalds * The page is mapped into the page tables of one or more 4771da177e4SLinus Torvalds * processes. Try to unmap it here. 4781da177e4SLinus Torvalds */ 4791da177e4SLinus Torvalds if (page_mapped(page) && mapping) { 480aa3f18b3SChristoph Lameter /* 481aa3f18b3SChristoph Lameter * No unmapping if we do not swap 482aa3f18b3SChristoph Lameter */ 483aa3f18b3SChristoph Lameter if (!sc->may_swap) 484aa3f18b3SChristoph Lameter goto keep_locked; 485aa3f18b3SChristoph Lameter 486*a48d07afSChristoph Lameter switch (try_to_unmap(page, 0)) { 4871da177e4SLinus Torvalds case SWAP_FAIL: 4881da177e4SLinus Torvalds goto activate_locked; 4891da177e4SLinus Torvalds case SWAP_AGAIN: 4901da177e4SLinus Torvalds goto keep_locked; 4911da177e4SLinus Torvalds case SWAP_SUCCESS: 4921da177e4SLinus Torvalds ; /* try to free the page below */ 4931da177e4SLinus Torvalds } 4941da177e4SLinus Torvalds } 4951da177e4SLinus Torvalds 4961da177e4SLinus Torvalds if (PageDirty(page)) { 4971da177e4SLinus Torvalds if (referenced) 4981da177e4SLinus Torvalds goto keep_locked; 4991da177e4SLinus Torvalds if (!may_enter_fs) 5001da177e4SLinus Torvalds goto keep_locked; 50152a8363eSChristoph Lameter if (!sc->may_writepage) 5021da177e4SLinus Torvalds goto keep_locked; 5031da177e4SLinus Torvalds 5041da177e4SLinus Torvalds /* Page is dirty, try to write it out here */ 5051da177e4SLinus Torvalds switch(pageout(page, mapping)) { 5061da177e4SLinus Torvalds case PAGE_KEEP: 5071da177e4SLinus Torvalds goto keep_locked; 5081da177e4SLinus Torvalds case PAGE_ACTIVATE: 5091da177e4SLinus Torvalds goto activate_locked; 5101da177e4SLinus Torvalds case PAGE_SUCCESS: 5111da177e4SLinus Torvalds if (PageWriteback(page) || PageDirty(page)) 5121da177e4SLinus Torvalds goto keep; 5131da177e4SLinus Torvalds /* 5141da177e4SLinus Torvalds * A synchronous write - probably a ramdisk. Go 5151da177e4SLinus Torvalds * ahead and try to reclaim the page. 5161da177e4SLinus Torvalds */ 5171da177e4SLinus Torvalds if (TestSetPageLocked(page)) 5181da177e4SLinus Torvalds goto keep; 5191da177e4SLinus Torvalds if (PageDirty(page) || PageWriteback(page)) 5201da177e4SLinus Torvalds goto keep_locked; 5211da177e4SLinus Torvalds mapping = page_mapping(page); 5221da177e4SLinus Torvalds case PAGE_CLEAN: 5231da177e4SLinus Torvalds ; /* try to free the page below */ 5241da177e4SLinus Torvalds } 5251da177e4SLinus Torvalds } 5261da177e4SLinus Torvalds 5271da177e4SLinus Torvalds /* 5281da177e4SLinus Torvalds * If the page has buffers, try to free the buffer mappings 5291da177e4SLinus Torvalds * associated with this page. If we succeed we try to free 5301da177e4SLinus Torvalds * the page as well. 5311da177e4SLinus Torvalds * 5321da177e4SLinus Torvalds * We do this even if the page is PageDirty(). 5331da177e4SLinus Torvalds * try_to_release_page() does not perform I/O, but it is 5341da177e4SLinus Torvalds * possible for a page to have PageDirty set, but it is actually 5351da177e4SLinus Torvalds * clean (all its buffers are clean). This happens if the 5361da177e4SLinus Torvalds * buffers were written out directly, with submit_bh(). ext3 5371da177e4SLinus Torvalds * will do this, as well as the blockdev mapping. 5381da177e4SLinus Torvalds * try_to_release_page() will discover that cleanness and will 5391da177e4SLinus Torvalds * drop the buffers and mark the page clean - it can be freed. 5401da177e4SLinus Torvalds * 5411da177e4SLinus Torvalds * Rarely, pages can have buffers and no ->mapping. These are 5421da177e4SLinus Torvalds * the pages which were not successfully invalidated in 5431da177e4SLinus Torvalds * truncate_complete_page(). We try to drop those buffers here 5441da177e4SLinus Torvalds * and if that worked, and the page is no longer mapped into 5451da177e4SLinus Torvalds * process address space (page_count == 1) it can be freed. 5461da177e4SLinus Torvalds * Otherwise, leave the page on the LRU so it is swappable. 5471da177e4SLinus Torvalds */ 5481da177e4SLinus Torvalds if (PagePrivate(page)) { 5491da177e4SLinus Torvalds if (!try_to_release_page(page, sc->gfp_mask)) 5501da177e4SLinus Torvalds goto activate_locked; 5511da177e4SLinus Torvalds if (!mapping && page_count(page) == 1) 5521da177e4SLinus Torvalds goto free_it; 5531da177e4SLinus Torvalds } 5541da177e4SLinus Torvalds 55549d2e9ccSChristoph Lameter if (!remove_mapping(mapping, page)) 55649d2e9ccSChristoph Lameter goto keep_locked; 5571da177e4SLinus Torvalds 5581da177e4SLinus Torvalds free_it: 5591da177e4SLinus Torvalds unlock_page(page); 5601da177e4SLinus Torvalds reclaimed++; 5611da177e4SLinus Torvalds if (!pagevec_add(&freed_pvec, page)) 5621da177e4SLinus Torvalds __pagevec_release_nonlru(&freed_pvec); 5631da177e4SLinus Torvalds continue; 5641da177e4SLinus Torvalds 5651da177e4SLinus Torvalds activate_locked: 5661da177e4SLinus Torvalds SetPageActive(page); 5671da177e4SLinus Torvalds pgactivate++; 5681da177e4SLinus Torvalds keep_locked: 5691da177e4SLinus Torvalds unlock_page(page); 5701da177e4SLinus Torvalds keep: 5711da177e4SLinus Torvalds list_add(&page->lru, &ret_pages); 5721da177e4SLinus Torvalds BUG_ON(PageLRU(page)); 5731da177e4SLinus Torvalds } 5741da177e4SLinus Torvalds list_splice(&ret_pages, page_list); 5751da177e4SLinus Torvalds if (pagevec_count(&freed_pvec)) 5761da177e4SLinus Torvalds __pagevec_release_nonlru(&freed_pvec); 5771da177e4SLinus Torvalds mod_page_state(pgactivate, pgactivate); 5781da177e4SLinus Torvalds sc->nr_reclaimed += reclaimed; 5791da177e4SLinus Torvalds return reclaimed; 5801da177e4SLinus Torvalds } 5811da177e4SLinus Torvalds 5827cbe34cfSChristoph Lameter #ifdef CONFIG_MIGRATION 5838419c318SChristoph Lameter static inline void move_to_lru(struct page *page) 5848419c318SChristoph Lameter { 5858419c318SChristoph Lameter list_del(&page->lru); 5868419c318SChristoph Lameter if (PageActive(page)) { 5878419c318SChristoph Lameter /* 5888419c318SChristoph Lameter * lru_cache_add_active checks that 5898419c318SChristoph Lameter * the PG_active bit is off. 5908419c318SChristoph Lameter */ 5918419c318SChristoph Lameter ClearPageActive(page); 5928419c318SChristoph Lameter lru_cache_add_active(page); 5938419c318SChristoph Lameter } else { 5948419c318SChristoph Lameter lru_cache_add(page); 5958419c318SChristoph Lameter } 5968419c318SChristoph Lameter put_page(page); 5978419c318SChristoph Lameter } 5988419c318SChristoph Lameter 5998419c318SChristoph Lameter /* 600053837fcSNick Piggin * Add isolated pages on the list back to the LRU. 6018419c318SChristoph Lameter * 6028419c318SChristoph Lameter * returns the number of pages put back. 6038419c318SChristoph Lameter */ 6048419c318SChristoph Lameter int putback_lru_pages(struct list_head *l) 6058419c318SChristoph Lameter { 6068419c318SChristoph Lameter struct page *page; 6078419c318SChristoph Lameter struct page *page2; 6088419c318SChristoph Lameter int count = 0; 6098419c318SChristoph Lameter 6108419c318SChristoph Lameter list_for_each_entry_safe(page, page2, l, lru) { 6118419c318SChristoph Lameter move_to_lru(page); 6128419c318SChristoph Lameter count++; 6138419c318SChristoph Lameter } 6148419c318SChristoph Lameter return count; 6158419c318SChristoph Lameter } 6168419c318SChristoph Lameter 6171da177e4SLinus Torvalds /* 61849d2e9ccSChristoph Lameter * swapout a single page 61949d2e9ccSChristoph Lameter * page is locked upon entry, unlocked on exit 62049d2e9ccSChristoph Lameter */ 62149d2e9ccSChristoph Lameter static int swap_page(struct page *page) 62249d2e9ccSChristoph Lameter { 62349d2e9ccSChristoph Lameter struct address_space *mapping = page_mapping(page); 62449d2e9ccSChristoph Lameter 62549d2e9ccSChristoph Lameter if (page_mapped(page) && mapping) 626*a48d07afSChristoph Lameter if (try_to_unmap(page, 0) != SWAP_SUCCESS) 62749d2e9ccSChristoph Lameter goto unlock_retry; 62849d2e9ccSChristoph Lameter 62949d2e9ccSChristoph Lameter if (PageDirty(page)) { 63049d2e9ccSChristoph Lameter /* Page is dirty, try to write it out here */ 63149d2e9ccSChristoph Lameter switch(pageout(page, mapping)) { 63249d2e9ccSChristoph Lameter case PAGE_KEEP: 63349d2e9ccSChristoph Lameter case PAGE_ACTIVATE: 63449d2e9ccSChristoph Lameter goto unlock_retry; 63549d2e9ccSChristoph Lameter 63649d2e9ccSChristoph Lameter case PAGE_SUCCESS: 63749d2e9ccSChristoph Lameter goto retry; 63849d2e9ccSChristoph Lameter 63949d2e9ccSChristoph Lameter case PAGE_CLEAN: 64049d2e9ccSChristoph Lameter ; /* try to free the page below */ 64149d2e9ccSChristoph Lameter } 64249d2e9ccSChristoph Lameter } 64349d2e9ccSChristoph Lameter 64449d2e9ccSChristoph Lameter if (PagePrivate(page)) { 64549d2e9ccSChristoph Lameter if (!try_to_release_page(page, GFP_KERNEL) || 64649d2e9ccSChristoph Lameter (!mapping && page_count(page) == 1)) 64749d2e9ccSChristoph Lameter goto unlock_retry; 64849d2e9ccSChristoph Lameter } 64949d2e9ccSChristoph Lameter 65049d2e9ccSChristoph Lameter if (remove_mapping(mapping, page)) { 65149d2e9ccSChristoph Lameter /* Success */ 65249d2e9ccSChristoph Lameter unlock_page(page); 65349d2e9ccSChristoph Lameter return 0; 65449d2e9ccSChristoph Lameter } 65549d2e9ccSChristoph Lameter 65649d2e9ccSChristoph Lameter unlock_retry: 65749d2e9ccSChristoph Lameter unlock_page(page); 65849d2e9ccSChristoph Lameter 65949d2e9ccSChristoph Lameter retry: 660d0d96328SChristoph Lameter return -EAGAIN; 66149d2e9ccSChristoph Lameter } 662*a48d07afSChristoph Lameter 663*a48d07afSChristoph Lameter /* 664*a48d07afSChristoph Lameter * Page migration was first developed in the context of the memory hotplug 665*a48d07afSChristoph Lameter * project. The main authors of the migration code are: 666*a48d07afSChristoph Lameter * 667*a48d07afSChristoph Lameter * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 668*a48d07afSChristoph Lameter * Hirokazu Takahashi <taka@valinux.co.jp> 669*a48d07afSChristoph Lameter * Dave Hansen <haveblue@us.ibm.com> 670*a48d07afSChristoph Lameter * Christoph Lameter <clameter@sgi.com> 671*a48d07afSChristoph Lameter */ 672*a48d07afSChristoph Lameter 673*a48d07afSChristoph Lameter /* 674*a48d07afSChristoph Lameter * Remove references for a page and establish the new page with the correct 675*a48d07afSChristoph Lameter * basic settings to be able to stop accesses to the page. 676*a48d07afSChristoph Lameter */ 677*a48d07afSChristoph Lameter static int migrate_page_remove_references(struct page *newpage, 678*a48d07afSChristoph Lameter struct page *page, int nr_refs) 679*a48d07afSChristoph Lameter { 680*a48d07afSChristoph Lameter struct address_space *mapping = page_mapping(page); 681*a48d07afSChristoph Lameter struct page **radix_pointer; 682*a48d07afSChristoph Lameter 683*a48d07afSChristoph Lameter /* 684*a48d07afSChristoph Lameter * Avoid doing any of the following work if the page count 685*a48d07afSChristoph Lameter * indicates that the page is in use or truncate has removed 686*a48d07afSChristoph Lameter * the page. 687*a48d07afSChristoph Lameter */ 688*a48d07afSChristoph Lameter if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) 689*a48d07afSChristoph Lameter return 1; 690*a48d07afSChristoph Lameter 691*a48d07afSChristoph Lameter /* 692*a48d07afSChristoph Lameter * Establish swap ptes for anonymous pages or destroy pte 693*a48d07afSChristoph Lameter * maps for files. 694*a48d07afSChristoph Lameter * 695*a48d07afSChristoph Lameter * In order to reestablish file backed mappings the fault handlers 696*a48d07afSChristoph Lameter * will take the radix tree_lock which may then be used to stop 697*a48d07afSChristoph Lameter * processses from accessing this page until the new page is ready. 698*a48d07afSChristoph Lameter * 699*a48d07afSChristoph Lameter * A process accessing via a swap pte (an anonymous page) will take a 700*a48d07afSChristoph Lameter * page_lock on the old page which will block the process until the 701*a48d07afSChristoph Lameter * migration attempt is complete. At that time the PageSwapCache bit 702*a48d07afSChristoph Lameter * will be examined. If the page was migrated then the PageSwapCache 703*a48d07afSChristoph Lameter * bit will be clear and the operation to retrieve the page will be 704*a48d07afSChristoph Lameter * retried which will find the new page in the radix tree. Then a new 705*a48d07afSChristoph Lameter * direct mapping may be generated based on the radix tree contents. 706*a48d07afSChristoph Lameter * 707*a48d07afSChristoph Lameter * If the page was not migrated then the PageSwapCache bit 708*a48d07afSChristoph Lameter * is still set and the operation may continue. 709*a48d07afSChristoph Lameter */ 710*a48d07afSChristoph Lameter try_to_unmap(page, 1); 711*a48d07afSChristoph Lameter 712*a48d07afSChristoph Lameter /* 713*a48d07afSChristoph Lameter * Give up if we were unable to remove all mappings. 714*a48d07afSChristoph Lameter */ 715*a48d07afSChristoph Lameter if (page_mapcount(page)) 716*a48d07afSChristoph Lameter return 1; 717*a48d07afSChristoph Lameter 718*a48d07afSChristoph Lameter write_lock_irq(&mapping->tree_lock); 719*a48d07afSChristoph Lameter 720*a48d07afSChristoph Lameter radix_pointer = (struct page **)radix_tree_lookup_slot( 721*a48d07afSChristoph Lameter &mapping->page_tree, 722*a48d07afSChristoph Lameter page_index(page)); 723*a48d07afSChristoph Lameter 724*a48d07afSChristoph Lameter if (!page_mapping(page) || page_count(page) != nr_refs || 725*a48d07afSChristoph Lameter *radix_pointer != page) { 726*a48d07afSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 727*a48d07afSChristoph Lameter return 1; 728*a48d07afSChristoph Lameter } 729*a48d07afSChristoph Lameter 730*a48d07afSChristoph Lameter /* 731*a48d07afSChristoph Lameter * Now we know that no one else is looking at the page. 732*a48d07afSChristoph Lameter * 733*a48d07afSChristoph Lameter * Certain minimal information about a page must be available 734*a48d07afSChristoph Lameter * in order for other subsystems to properly handle the page if they 735*a48d07afSChristoph Lameter * find it through the radix tree update before we are finished 736*a48d07afSChristoph Lameter * copying the page. 737*a48d07afSChristoph Lameter */ 738*a48d07afSChristoph Lameter get_page(newpage); 739*a48d07afSChristoph Lameter newpage->index = page->index; 740*a48d07afSChristoph Lameter newpage->mapping = page->mapping; 741*a48d07afSChristoph Lameter if (PageSwapCache(page)) { 742*a48d07afSChristoph Lameter SetPageSwapCache(newpage); 743*a48d07afSChristoph Lameter set_page_private(newpage, page_private(page)); 744*a48d07afSChristoph Lameter } 745*a48d07afSChristoph Lameter 746*a48d07afSChristoph Lameter *radix_pointer = newpage; 747*a48d07afSChristoph Lameter __put_page(page); 748*a48d07afSChristoph Lameter write_unlock_irq(&mapping->tree_lock); 749*a48d07afSChristoph Lameter 750*a48d07afSChristoph Lameter return 0; 751*a48d07afSChristoph Lameter } 752*a48d07afSChristoph Lameter 753*a48d07afSChristoph Lameter /* 754*a48d07afSChristoph Lameter * Copy the page to its new location 755*a48d07afSChristoph Lameter */ 756*a48d07afSChristoph Lameter void migrate_page_copy(struct page *newpage, struct page *page) 757*a48d07afSChristoph Lameter { 758*a48d07afSChristoph Lameter copy_highpage(newpage, page); 759*a48d07afSChristoph Lameter 760*a48d07afSChristoph Lameter if (PageError(page)) 761*a48d07afSChristoph Lameter SetPageError(newpage); 762*a48d07afSChristoph Lameter if (PageReferenced(page)) 763*a48d07afSChristoph Lameter SetPageReferenced(newpage); 764*a48d07afSChristoph Lameter if (PageUptodate(page)) 765*a48d07afSChristoph Lameter SetPageUptodate(newpage); 766*a48d07afSChristoph Lameter if (PageActive(page)) 767*a48d07afSChristoph Lameter SetPageActive(newpage); 768*a48d07afSChristoph Lameter if (PageChecked(page)) 769*a48d07afSChristoph Lameter SetPageChecked(newpage); 770*a48d07afSChristoph Lameter if (PageMappedToDisk(page)) 771*a48d07afSChristoph Lameter SetPageMappedToDisk(newpage); 772*a48d07afSChristoph Lameter 773*a48d07afSChristoph Lameter if (PageDirty(page)) { 774*a48d07afSChristoph Lameter clear_page_dirty_for_io(page); 775*a48d07afSChristoph Lameter set_page_dirty(newpage); 776*a48d07afSChristoph Lameter } 777*a48d07afSChristoph Lameter 778*a48d07afSChristoph Lameter ClearPageSwapCache(page); 779*a48d07afSChristoph Lameter ClearPageActive(page); 780*a48d07afSChristoph Lameter ClearPagePrivate(page); 781*a48d07afSChristoph Lameter set_page_private(page, 0); 782*a48d07afSChristoph Lameter page->mapping = NULL; 783*a48d07afSChristoph Lameter 784*a48d07afSChristoph Lameter /* 785*a48d07afSChristoph Lameter * If any waiters have accumulated on the new page then 786*a48d07afSChristoph Lameter * wake them up. 787*a48d07afSChristoph Lameter */ 788*a48d07afSChristoph Lameter if (PageWriteback(newpage)) 789*a48d07afSChristoph Lameter end_page_writeback(newpage); 790*a48d07afSChristoph Lameter } 791*a48d07afSChristoph Lameter 792*a48d07afSChristoph Lameter /* 793*a48d07afSChristoph Lameter * Common logic to directly migrate a single page suitable for 794*a48d07afSChristoph Lameter * pages that do not use PagePrivate. 795*a48d07afSChristoph Lameter * 796*a48d07afSChristoph Lameter * Pages are locked upon entry and exit. 797*a48d07afSChristoph Lameter */ 798*a48d07afSChristoph Lameter int migrate_page(struct page *newpage, struct page *page) 799*a48d07afSChristoph Lameter { 800*a48d07afSChristoph Lameter BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 801*a48d07afSChristoph Lameter 802*a48d07afSChristoph Lameter if (migrate_page_remove_references(newpage, page, 2)) 803*a48d07afSChristoph Lameter return -EAGAIN; 804*a48d07afSChristoph Lameter 805*a48d07afSChristoph Lameter migrate_page_copy(newpage, page); 806*a48d07afSChristoph Lameter 807*a48d07afSChristoph Lameter return 0; 808*a48d07afSChristoph Lameter } 809*a48d07afSChristoph Lameter 81049d2e9ccSChristoph Lameter /* 81149d2e9ccSChristoph Lameter * migrate_pages 81249d2e9ccSChristoph Lameter * 81349d2e9ccSChristoph Lameter * Two lists are passed to this function. The first list 81449d2e9ccSChristoph Lameter * contains the pages isolated from the LRU to be migrated. 81549d2e9ccSChristoph Lameter * The second list contains new pages that the pages isolated 81649d2e9ccSChristoph Lameter * can be moved to. If the second list is NULL then all 81749d2e9ccSChristoph Lameter * pages are swapped out. 81849d2e9ccSChristoph Lameter * 81949d2e9ccSChristoph Lameter * The function returns after 10 attempts or if no pages 82049d2e9ccSChristoph Lameter * are movable anymore because t has become empty 82149d2e9ccSChristoph Lameter * or no retryable pages exist anymore. 82249d2e9ccSChristoph Lameter * 823d0d96328SChristoph Lameter * Return: Number of pages not migrated when "to" ran empty. 82449d2e9ccSChristoph Lameter */ 825d4984711SChristoph Lameter int migrate_pages(struct list_head *from, struct list_head *to, 826d4984711SChristoph Lameter struct list_head *moved, struct list_head *failed) 82749d2e9ccSChristoph Lameter { 82849d2e9ccSChristoph Lameter int retry; 82949d2e9ccSChristoph Lameter int nr_failed = 0; 83049d2e9ccSChristoph Lameter int pass = 0; 83149d2e9ccSChristoph Lameter struct page *page; 83249d2e9ccSChristoph Lameter struct page *page2; 83349d2e9ccSChristoph Lameter int swapwrite = current->flags & PF_SWAPWRITE; 834d0d96328SChristoph Lameter int rc; 83549d2e9ccSChristoph Lameter 83649d2e9ccSChristoph Lameter if (!swapwrite) 83749d2e9ccSChristoph Lameter current->flags |= PF_SWAPWRITE; 83849d2e9ccSChristoph Lameter 83949d2e9ccSChristoph Lameter redo: 84049d2e9ccSChristoph Lameter retry = 0; 84149d2e9ccSChristoph Lameter 842d4984711SChristoph Lameter list_for_each_entry_safe(page, page2, from, lru) { 843*a48d07afSChristoph Lameter struct page *newpage = NULL; 844*a48d07afSChristoph Lameter struct address_space *mapping; 845*a48d07afSChristoph Lameter 84649d2e9ccSChristoph Lameter cond_resched(); 84749d2e9ccSChristoph Lameter 848d0d96328SChristoph Lameter rc = 0; 849d0d96328SChristoph Lameter if (page_count(page) == 1) 850ee27497dSChristoph Lameter /* page was freed from under us. So we are done. */ 851d0d96328SChristoph Lameter goto next; 852d0d96328SChristoph Lameter 853*a48d07afSChristoph Lameter if (to && list_empty(to)) 854*a48d07afSChristoph Lameter break; 855*a48d07afSChristoph Lameter 85649d2e9ccSChristoph Lameter /* 85749d2e9ccSChristoph Lameter * Skip locked pages during the first two passes to give the 8587cbe34cfSChristoph Lameter * functions holding the lock time to release the page. Later we 8597cbe34cfSChristoph Lameter * use lock_page() to have a higher chance of acquiring the 8607cbe34cfSChristoph Lameter * lock. 86149d2e9ccSChristoph Lameter */ 862d0d96328SChristoph Lameter rc = -EAGAIN; 86349d2e9ccSChristoph Lameter if (pass > 2) 86449d2e9ccSChristoph Lameter lock_page(page); 86549d2e9ccSChristoph Lameter else 86649d2e9ccSChristoph Lameter if (TestSetPageLocked(page)) 867d0d96328SChristoph Lameter goto next; 86849d2e9ccSChristoph Lameter 86949d2e9ccSChristoph Lameter /* 87049d2e9ccSChristoph Lameter * Only wait on writeback if we have already done a pass where 87149d2e9ccSChristoph Lameter * we we may have triggered writeouts for lots of pages. 87249d2e9ccSChristoph Lameter */ 8737cbe34cfSChristoph Lameter if (pass > 0) { 87449d2e9ccSChristoph Lameter wait_on_page_writeback(page); 8757cbe34cfSChristoph Lameter } else { 876d0d96328SChristoph Lameter if (PageWriteback(page)) 877d0d96328SChristoph Lameter goto unlock_page; 8787cbe34cfSChristoph Lameter } 87949d2e9ccSChristoph Lameter 880d0d96328SChristoph Lameter /* 881d0d96328SChristoph Lameter * Anonymous pages must have swap cache references otherwise 882d0d96328SChristoph Lameter * the information contained in the page maps cannot be 883d0d96328SChristoph Lameter * preserved. 884d0d96328SChristoph Lameter */ 88549d2e9ccSChristoph Lameter if (PageAnon(page) && !PageSwapCache(page)) { 8861480a540SChristoph Lameter if (!add_to_swap(page, GFP_KERNEL)) { 887d0d96328SChristoph Lameter rc = -ENOMEM; 888d0d96328SChristoph Lameter goto unlock_page; 88949d2e9ccSChristoph Lameter } 89049d2e9ccSChristoph Lameter } 89149d2e9ccSChristoph Lameter 892*a48d07afSChristoph Lameter if (!to) { 893d0d96328SChristoph Lameter rc = swap_page(page); 894d0d96328SChristoph Lameter goto next; 895*a48d07afSChristoph Lameter } 896*a48d07afSChristoph Lameter 897*a48d07afSChristoph Lameter newpage = lru_to_page(to); 898*a48d07afSChristoph Lameter lock_page(newpage); 899*a48d07afSChristoph Lameter 900*a48d07afSChristoph Lameter /* 901*a48d07afSChristoph Lameter * Pages are properly locked and writeback is complete. 902*a48d07afSChristoph Lameter * Try to migrate the page. 903*a48d07afSChristoph Lameter */ 904*a48d07afSChristoph Lameter mapping = page_mapping(page); 905*a48d07afSChristoph Lameter if (!mapping) 906*a48d07afSChristoph Lameter goto unlock_both; 907*a48d07afSChristoph Lameter 908*a48d07afSChristoph Lameter /* 909*a48d07afSChristoph Lameter * Trigger writeout if page is dirty 910*a48d07afSChristoph Lameter */ 911*a48d07afSChristoph Lameter if (PageDirty(page)) { 912*a48d07afSChristoph Lameter switch (pageout(page, mapping)) { 913*a48d07afSChristoph Lameter case PAGE_KEEP: 914*a48d07afSChristoph Lameter case PAGE_ACTIVATE: 915*a48d07afSChristoph Lameter goto unlock_both; 916*a48d07afSChristoph Lameter 917*a48d07afSChristoph Lameter case PAGE_SUCCESS: 918*a48d07afSChristoph Lameter unlock_page(newpage); 919*a48d07afSChristoph Lameter goto next; 920*a48d07afSChristoph Lameter 921*a48d07afSChristoph Lameter case PAGE_CLEAN: 922*a48d07afSChristoph Lameter ; /* try to migrate the page below */ 923*a48d07afSChristoph Lameter } 924*a48d07afSChristoph Lameter } 925*a48d07afSChristoph Lameter /* 926*a48d07afSChristoph Lameter * If we have no buffer or can release the buffer 927*a48d07afSChristoph Lameter * then do a simple migration. 928*a48d07afSChristoph Lameter */ 929*a48d07afSChristoph Lameter if (!page_has_buffers(page) || 930*a48d07afSChristoph Lameter try_to_release_page(page, GFP_KERNEL)) { 931*a48d07afSChristoph Lameter rc = migrate_page(newpage, page); 932*a48d07afSChristoph Lameter goto unlock_both; 933*a48d07afSChristoph Lameter } 934*a48d07afSChristoph Lameter 935*a48d07afSChristoph Lameter /* 936*a48d07afSChristoph Lameter * On early passes with mapped pages simply 937*a48d07afSChristoph Lameter * retry. There may be a lock held for some 938*a48d07afSChristoph Lameter * buffers that may go away. Later 939*a48d07afSChristoph Lameter * swap them out. 940*a48d07afSChristoph Lameter */ 941*a48d07afSChristoph Lameter if (pass > 4) { 942*a48d07afSChristoph Lameter unlock_page(newpage); 943*a48d07afSChristoph Lameter newpage = NULL; 944*a48d07afSChristoph Lameter rc = swap_page(page); 945*a48d07afSChristoph Lameter goto next; 946*a48d07afSChristoph Lameter } 947*a48d07afSChristoph Lameter 948*a48d07afSChristoph Lameter unlock_both: 949*a48d07afSChristoph Lameter unlock_page(newpage); 950d0d96328SChristoph Lameter 951d0d96328SChristoph Lameter unlock_page: 952d0d96328SChristoph Lameter unlock_page(page); 953d0d96328SChristoph Lameter 954d0d96328SChristoph Lameter next: 955d0d96328SChristoph Lameter if (rc == -EAGAIN) { 95649d2e9ccSChristoph Lameter retry++; 957d0d96328SChristoph Lameter } else if (rc) { 958d0d96328SChristoph Lameter /* Permanent failure */ 959d0d96328SChristoph Lameter list_move(&page->lru, failed); 960d0d96328SChristoph Lameter nr_failed++; 961d0d96328SChristoph Lameter } else { 962*a48d07afSChristoph Lameter if (newpage) { 963*a48d07afSChristoph Lameter /* Successful migration. Return page to LRU */ 964*a48d07afSChristoph Lameter move_to_lru(newpage); 965*a48d07afSChristoph Lameter } 966d0d96328SChristoph Lameter list_move(&page->lru, moved); 967d0d96328SChristoph Lameter } 96849d2e9ccSChristoph Lameter } 96949d2e9ccSChristoph Lameter if (retry && pass++ < 10) 97049d2e9ccSChristoph Lameter goto redo; 97149d2e9ccSChristoph Lameter 97249d2e9ccSChristoph Lameter if (!swapwrite) 97349d2e9ccSChristoph Lameter current->flags &= ~PF_SWAPWRITE; 97449d2e9ccSChristoph Lameter 97549d2e9ccSChristoph Lameter return nr_failed + retry; 97649d2e9ccSChristoph Lameter } 9778419c318SChristoph Lameter 9788419c318SChristoph Lameter /* 9798419c318SChristoph Lameter * Isolate one page from the LRU lists and put it on the 980053837fcSNick Piggin * indicated list with elevated refcount. 9818419c318SChristoph Lameter * 9828419c318SChristoph Lameter * Result: 9838419c318SChristoph Lameter * 0 = page not on LRU list 9848419c318SChristoph Lameter * 1 = page removed from LRU list and added to the specified list. 9858419c318SChristoph Lameter */ 9868419c318SChristoph Lameter int isolate_lru_page(struct page *page) 9878419c318SChristoph Lameter { 988053837fcSNick Piggin int ret = 0; 9898419c318SChristoph Lameter 990053837fcSNick Piggin if (PageLRU(page)) { 991053837fcSNick Piggin struct zone *zone = page_zone(page); 9928419c318SChristoph Lameter spin_lock_irq(&zone->lru_lock); 993053837fcSNick Piggin if (TestClearPageLRU(page)) { 994053837fcSNick Piggin ret = 1; 995053837fcSNick Piggin get_page(page); 9968419c318SChristoph Lameter if (PageActive(page)) 9978419c318SChristoph Lameter del_page_from_active_list(zone, page); 9988419c318SChristoph Lameter else 9998419c318SChristoph Lameter del_page_from_inactive_list(zone, page); 10008419c318SChristoph Lameter } 10018419c318SChristoph Lameter spin_unlock_irq(&zone->lru_lock); 10028419c318SChristoph Lameter } 1003053837fcSNick Piggin 1004053837fcSNick Piggin return ret; 10058419c318SChristoph Lameter } 10067cbe34cfSChristoph Lameter #endif 100749d2e9ccSChristoph Lameter 100849d2e9ccSChristoph Lameter /* 10091da177e4SLinus Torvalds * zone->lru_lock is heavily contended. Some of the functions that 10101da177e4SLinus Torvalds * shrink the lists perform better by taking out a batch of pages 10111da177e4SLinus Torvalds * and working on them outside the LRU lock. 10121da177e4SLinus Torvalds * 10131da177e4SLinus Torvalds * For pagecache intensive workloads, this function is the hottest 10141da177e4SLinus Torvalds * spot in the kernel (apart from copy_*_user functions). 10151da177e4SLinus Torvalds * 10161da177e4SLinus Torvalds * Appropriate locks must be held before calling this function. 10171da177e4SLinus Torvalds * 10181da177e4SLinus Torvalds * @nr_to_scan: The number of pages to look through on the list. 10191da177e4SLinus Torvalds * @src: The LRU list to pull pages off. 10201da177e4SLinus Torvalds * @dst: The temp list to put pages on to. 10211da177e4SLinus Torvalds * @scanned: The number of pages that were scanned. 10221da177e4SLinus Torvalds * 10231da177e4SLinus Torvalds * returns how many pages were moved onto *@dst. 10241da177e4SLinus Torvalds */ 10251da177e4SLinus Torvalds static int isolate_lru_pages(int nr_to_scan, struct list_head *src, 10261da177e4SLinus Torvalds struct list_head *dst, int *scanned) 10271da177e4SLinus Torvalds { 10281da177e4SLinus Torvalds int nr_taken = 0; 10291da177e4SLinus Torvalds struct page *page; 10301da177e4SLinus Torvalds int scan = 0; 10311da177e4SLinus Torvalds 10321da177e4SLinus Torvalds while (scan++ < nr_to_scan && !list_empty(src)) { 10331da177e4SLinus Torvalds page = lru_to_page(src); 10341da177e4SLinus Torvalds prefetchw_prev_lru_page(page, src, flags); 10351da177e4SLinus Torvalds 1036053837fcSNick Piggin if (!TestClearPageLRU(page)) 103721eac81fSChristoph Lameter BUG(); 1038053837fcSNick Piggin list_del(&page->lru); 1039053837fcSNick Piggin if (get_page_testone(page)) { 1040053837fcSNick Piggin /* 1041053837fcSNick Piggin * It is being freed elsewhere 1042053837fcSNick Piggin */ 1043053837fcSNick Piggin __put_page(page); 1044053837fcSNick Piggin SetPageLRU(page); 1045053837fcSNick Piggin list_add(&page->lru, src); 1046053837fcSNick Piggin continue; 1047053837fcSNick Piggin } else { 1048053837fcSNick Piggin list_add(&page->lru, dst); 1049053837fcSNick Piggin nr_taken++; 10501da177e4SLinus Torvalds } 10511da177e4SLinus Torvalds } 10521da177e4SLinus Torvalds 10531da177e4SLinus Torvalds *scanned = scan; 10541da177e4SLinus Torvalds return nr_taken; 10551da177e4SLinus Torvalds } 10561da177e4SLinus Torvalds 10571da177e4SLinus Torvalds /* 10581da177e4SLinus Torvalds * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed 10591da177e4SLinus Torvalds */ 10601da177e4SLinus Torvalds static void shrink_cache(struct zone *zone, struct scan_control *sc) 10611da177e4SLinus Torvalds { 10621da177e4SLinus Torvalds LIST_HEAD(page_list); 10631da177e4SLinus Torvalds struct pagevec pvec; 10641da177e4SLinus Torvalds int max_scan = sc->nr_to_scan; 10651da177e4SLinus Torvalds 10661da177e4SLinus Torvalds pagevec_init(&pvec, 1); 10671da177e4SLinus Torvalds 10681da177e4SLinus Torvalds lru_add_drain(); 10691da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 10701da177e4SLinus Torvalds while (max_scan > 0) { 10711da177e4SLinus Torvalds struct page *page; 10721da177e4SLinus Torvalds int nr_taken; 10731da177e4SLinus Torvalds int nr_scan; 10741da177e4SLinus Torvalds int nr_freed; 10751da177e4SLinus Torvalds 10761da177e4SLinus Torvalds nr_taken = isolate_lru_pages(sc->swap_cluster_max, 10771da177e4SLinus Torvalds &zone->inactive_list, 10781da177e4SLinus Torvalds &page_list, &nr_scan); 10791da177e4SLinus Torvalds zone->nr_inactive -= nr_taken; 10801da177e4SLinus Torvalds zone->pages_scanned += nr_scan; 10811da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 10821da177e4SLinus Torvalds 10831da177e4SLinus Torvalds if (nr_taken == 0) 10841da177e4SLinus Torvalds goto done; 10851da177e4SLinus Torvalds 10861da177e4SLinus Torvalds max_scan -= nr_scan; 10871da177e4SLinus Torvalds nr_freed = shrink_list(&page_list, sc); 10881da177e4SLinus Torvalds 1089a74609faSNick Piggin local_irq_disable(); 1090a74609faSNick Piggin if (current_is_kswapd()) { 1091a74609faSNick Piggin __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); 1092a74609faSNick Piggin __mod_page_state(kswapd_steal, nr_freed); 1093a74609faSNick Piggin } else 1094a74609faSNick Piggin __mod_page_state_zone(zone, pgscan_direct, nr_scan); 1095a74609faSNick Piggin __mod_page_state_zone(zone, pgsteal, nr_freed); 1096a74609faSNick Piggin 1097a74609faSNick Piggin spin_lock(&zone->lru_lock); 10981da177e4SLinus Torvalds /* 10991da177e4SLinus Torvalds * Put back any unfreeable pages. 11001da177e4SLinus Torvalds */ 11011da177e4SLinus Torvalds while (!list_empty(&page_list)) { 11021da177e4SLinus Torvalds page = lru_to_page(&page_list); 11031da177e4SLinus Torvalds if (TestSetPageLRU(page)) 11041da177e4SLinus Torvalds BUG(); 11051da177e4SLinus Torvalds list_del(&page->lru); 11061da177e4SLinus Torvalds if (PageActive(page)) 11071da177e4SLinus Torvalds add_page_to_active_list(zone, page); 11081da177e4SLinus Torvalds else 11091da177e4SLinus Torvalds add_page_to_inactive_list(zone, page); 11101da177e4SLinus Torvalds if (!pagevec_add(&pvec, page)) { 11111da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 11121da177e4SLinus Torvalds __pagevec_release(&pvec); 11131da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 11141da177e4SLinus Torvalds } 11151da177e4SLinus Torvalds } 11161da177e4SLinus Torvalds } 11171da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 11181da177e4SLinus Torvalds done: 11191da177e4SLinus Torvalds pagevec_release(&pvec); 11201da177e4SLinus Torvalds } 11211da177e4SLinus Torvalds 11221da177e4SLinus Torvalds /* 11231da177e4SLinus Torvalds * This moves pages from the active list to the inactive list. 11241da177e4SLinus Torvalds * 11251da177e4SLinus Torvalds * We move them the other way if the page is referenced by one or more 11261da177e4SLinus Torvalds * processes, from rmap. 11271da177e4SLinus Torvalds * 11281da177e4SLinus Torvalds * If the pages are mostly unmapped, the processing is fast and it is 11291da177e4SLinus Torvalds * appropriate to hold zone->lru_lock across the whole operation. But if 11301da177e4SLinus Torvalds * the pages are mapped, the processing is slow (page_referenced()) so we 11311da177e4SLinus Torvalds * should drop zone->lru_lock around each page. It's impossible to balance 11321da177e4SLinus Torvalds * this, so instead we remove the pages from the LRU while processing them. 11331da177e4SLinus Torvalds * It is safe to rely on PG_active against the non-LRU pages in here because 11341da177e4SLinus Torvalds * nobody will play with that bit on a non-LRU page. 11351da177e4SLinus Torvalds * 11361da177e4SLinus Torvalds * The downside is that we have to touch page->_count against each page. 11371da177e4SLinus Torvalds * But we had to alter page->flags anyway. 11381da177e4SLinus Torvalds */ 11391da177e4SLinus Torvalds static void 11401da177e4SLinus Torvalds refill_inactive_zone(struct zone *zone, struct scan_control *sc) 11411da177e4SLinus Torvalds { 11421da177e4SLinus Torvalds int pgmoved; 11431da177e4SLinus Torvalds int pgdeactivate = 0; 11441da177e4SLinus Torvalds int pgscanned; 11451da177e4SLinus Torvalds int nr_pages = sc->nr_to_scan; 11461da177e4SLinus Torvalds LIST_HEAD(l_hold); /* The pages which were snipped off */ 11471da177e4SLinus Torvalds LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ 11481da177e4SLinus Torvalds LIST_HEAD(l_active); /* Pages to go onto the active_list */ 11491da177e4SLinus Torvalds struct page *page; 11501da177e4SLinus Torvalds struct pagevec pvec; 11511da177e4SLinus Torvalds int reclaim_mapped = 0; 11521da177e4SLinus Torvalds long mapped_ratio; 11531da177e4SLinus Torvalds long distress; 11541da177e4SLinus Torvalds long swap_tendency; 11551da177e4SLinus Torvalds 11561da177e4SLinus Torvalds lru_add_drain(); 11571da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 11581da177e4SLinus Torvalds pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, 11591da177e4SLinus Torvalds &l_hold, &pgscanned); 11601da177e4SLinus Torvalds zone->pages_scanned += pgscanned; 11611da177e4SLinus Torvalds zone->nr_active -= pgmoved; 11621da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 11631da177e4SLinus Torvalds 11641da177e4SLinus Torvalds /* 11651da177e4SLinus Torvalds * `distress' is a measure of how much trouble we're having reclaiming 11661da177e4SLinus Torvalds * pages. 0 -> no problems. 100 -> great trouble. 11671da177e4SLinus Torvalds */ 11681da177e4SLinus Torvalds distress = 100 >> zone->prev_priority; 11691da177e4SLinus Torvalds 11701da177e4SLinus Torvalds /* 11711da177e4SLinus Torvalds * The point of this algorithm is to decide when to start reclaiming 11721da177e4SLinus Torvalds * mapped memory instead of just pagecache. Work out how much memory 11731da177e4SLinus Torvalds * is mapped. 11741da177e4SLinus Torvalds */ 11751da177e4SLinus Torvalds mapped_ratio = (sc->nr_mapped * 100) / total_memory; 11761da177e4SLinus Torvalds 11771da177e4SLinus Torvalds /* 11781da177e4SLinus Torvalds * Now decide how much we really want to unmap some pages. The mapped 11791da177e4SLinus Torvalds * ratio is downgraded - just because there's a lot of mapped memory 11801da177e4SLinus Torvalds * doesn't necessarily mean that page reclaim isn't succeeding. 11811da177e4SLinus Torvalds * 11821da177e4SLinus Torvalds * The distress ratio is important - we don't want to start going oom. 11831da177e4SLinus Torvalds * 11841da177e4SLinus Torvalds * A 100% value of vm_swappiness overrides this algorithm altogether. 11851da177e4SLinus Torvalds */ 11861da177e4SLinus Torvalds swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; 11871da177e4SLinus Torvalds 11881da177e4SLinus Torvalds /* 11891da177e4SLinus Torvalds * Now use this metric to decide whether to start moving mapped memory 11901da177e4SLinus Torvalds * onto the inactive list. 11911da177e4SLinus Torvalds */ 11921da177e4SLinus Torvalds if (swap_tendency >= 100) 11931da177e4SLinus Torvalds reclaim_mapped = 1; 11941da177e4SLinus Torvalds 11951da177e4SLinus Torvalds while (!list_empty(&l_hold)) { 11961da177e4SLinus Torvalds cond_resched(); 11971da177e4SLinus Torvalds page = lru_to_page(&l_hold); 11981da177e4SLinus Torvalds list_del(&page->lru); 11991da177e4SLinus Torvalds if (page_mapped(page)) { 12001da177e4SLinus Torvalds if (!reclaim_mapped || 12011da177e4SLinus Torvalds (total_swap_pages == 0 && PageAnon(page)) || 1202f7b7fd8fSRik van Riel page_referenced(page, 0)) { 12031da177e4SLinus Torvalds list_add(&page->lru, &l_active); 12041da177e4SLinus Torvalds continue; 12051da177e4SLinus Torvalds } 12061da177e4SLinus Torvalds } 12071da177e4SLinus Torvalds list_add(&page->lru, &l_inactive); 12081da177e4SLinus Torvalds } 12091da177e4SLinus Torvalds 12101da177e4SLinus Torvalds pagevec_init(&pvec, 1); 12111da177e4SLinus Torvalds pgmoved = 0; 12121da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 12131da177e4SLinus Torvalds while (!list_empty(&l_inactive)) { 12141da177e4SLinus Torvalds page = lru_to_page(&l_inactive); 12151da177e4SLinus Torvalds prefetchw_prev_lru_page(page, &l_inactive, flags); 12161da177e4SLinus Torvalds if (TestSetPageLRU(page)) 12171da177e4SLinus Torvalds BUG(); 12181da177e4SLinus Torvalds if (!TestClearPageActive(page)) 12191da177e4SLinus Torvalds BUG(); 12201da177e4SLinus Torvalds list_move(&page->lru, &zone->inactive_list); 12211da177e4SLinus Torvalds pgmoved++; 12221da177e4SLinus Torvalds if (!pagevec_add(&pvec, page)) { 12231da177e4SLinus Torvalds zone->nr_inactive += pgmoved; 12241da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 12251da177e4SLinus Torvalds pgdeactivate += pgmoved; 12261da177e4SLinus Torvalds pgmoved = 0; 12271da177e4SLinus Torvalds if (buffer_heads_over_limit) 12281da177e4SLinus Torvalds pagevec_strip(&pvec); 12291da177e4SLinus Torvalds __pagevec_release(&pvec); 12301da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 12311da177e4SLinus Torvalds } 12321da177e4SLinus Torvalds } 12331da177e4SLinus Torvalds zone->nr_inactive += pgmoved; 12341da177e4SLinus Torvalds pgdeactivate += pgmoved; 12351da177e4SLinus Torvalds if (buffer_heads_over_limit) { 12361da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 12371da177e4SLinus Torvalds pagevec_strip(&pvec); 12381da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 12391da177e4SLinus Torvalds } 12401da177e4SLinus Torvalds 12411da177e4SLinus Torvalds pgmoved = 0; 12421da177e4SLinus Torvalds while (!list_empty(&l_active)) { 12431da177e4SLinus Torvalds page = lru_to_page(&l_active); 12441da177e4SLinus Torvalds prefetchw_prev_lru_page(page, &l_active, flags); 12451da177e4SLinus Torvalds if (TestSetPageLRU(page)) 12461da177e4SLinus Torvalds BUG(); 12471da177e4SLinus Torvalds BUG_ON(!PageActive(page)); 12481da177e4SLinus Torvalds list_move(&page->lru, &zone->active_list); 12491da177e4SLinus Torvalds pgmoved++; 12501da177e4SLinus Torvalds if (!pagevec_add(&pvec, page)) { 12511da177e4SLinus Torvalds zone->nr_active += pgmoved; 12521da177e4SLinus Torvalds pgmoved = 0; 12531da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 12541da177e4SLinus Torvalds __pagevec_release(&pvec); 12551da177e4SLinus Torvalds spin_lock_irq(&zone->lru_lock); 12561da177e4SLinus Torvalds } 12571da177e4SLinus Torvalds } 12581da177e4SLinus Torvalds zone->nr_active += pgmoved; 1259a74609faSNick Piggin spin_unlock(&zone->lru_lock); 12601da177e4SLinus Torvalds 1261a74609faSNick Piggin __mod_page_state_zone(zone, pgrefill, pgscanned); 1262a74609faSNick Piggin __mod_page_state(pgdeactivate, pgdeactivate); 1263a74609faSNick Piggin local_irq_enable(); 1264a74609faSNick Piggin 1265a74609faSNick Piggin pagevec_release(&pvec); 12661da177e4SLinus Torvalds } 12671da177e4SLinus Torvalds 12681da177e4SLinus Torvalds /* 12691da177e4SLinus Torvalds * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 12701da177e4SLinus Torvalds */ 12711da177e4SLinus Torvalds static void 12721da177e4SLinus Torvalds shrink_zone(struct zone *zone, struct scan_control *sc) 12731da177e4SLinus Torvalds { 12741da177e4SLinus Torvalds unsigned long nr_active; 12751da177e4SLinus Torvalds unsigned long nr_inactive; 12761da177e4SLinus Torvalds 127753e9a615SMartin Hicks atomic_inc(&zone->reclaim_in_progress); 127853e9a615SMartin Hicks 12791da177e4SLinus Torvalds /* 12801da177e4SLinus Torvalds * Add one to `nr_to_scan' just to make sure that the kernel will 12811da177e4SLinus Torvalds * slowly sift through the active list. 12821da177e4SLinus Torvalds */ 12831da177e4SLinus Torvalds zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1; 12841da177e4SLinus Torvalds nr_active = zone->nr_scan_active; 12851da177e4SLinus Torvalds if (nr_active >= sc->swap_cluster_max) 12861da177e4SLinus Torvalds zone->nr_scan_active = 0; 12871da177e4SLinus Torvalds else 12881da177e4SLinus Torvalds nr_active = 0; 12891da177e4SLinus Torvalds 12901da177e4SLinus Torvalds zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1; 12911da177e4SLinus Torvalds nr_inactive = zone->nr_scan_inactive; 12921da177e4SLinus Torvalds if (nr_inactive >= sc->swap_cluster_max) 12931da177e4SLinus Torvalds zone->nr_scan_inactive = 0; 12941da177e4SLinus Torvalds else 12951da177e4SLinus Torvalds nr_inactive = 0; 12961da177e4SLinus Torvalds 12971da177e4SLinus Torvalds while (nr_active || nr_inactive) { 12981da177e4SLinus Torvalds if (nr_active) { 12991da177e4SLinus Torvalds sc->nr_to_scan = min(nr_active, 13001da177e4SLinus Torvalds (unsigned long)sc->swap_cluster_max); 13011da177e4SLinus Torvalds nr_active -= sc->nr_to_scan; 13021da177e4SLinus Torvalds refill_inactive_zone(zone, sc); 13031da177e4SLinus Torvalds } 13041da177e4SLinus Torvalds 13051da177e4SLinus Torvalds if (nr_inactive) { 13061da177e4SLinus Torvalds sc->nr_to_scan = min(nr_inactive, 13071da177e4SLinus Torvalds (unsigned long)sc->swap_cluster_max); 13081da177e4SLinus Torvalds nr_inactive -= sc->nr_to_scan; 13091da177e4SLinus Torvalds shrink_cache(zone, sc); 13101da177e4SLinus Torvalds } 13111da177e4SLinus Torvalds } 13121da177e4SLinus Torvalds 13131da177e4SLinus Torvalds throttle_vm_writeout(); 131453e9a615SMartin Hicks 131553e9a615SMartin Hicks atomic_dec(&zone->reclaim_in_progress); 13161da177e4SLinus Torvalds } 13171da177e4SLinus Torvalds 13181da177e4SLinus Torvalds /* 13191da177e4SLinus Torvalds * This is the direct reclaim path, for page-allocating processes. We only 13201da177e4SLinus Torvalds * try to reclaim pages from zones which will satisfy the caller's allocation 13211da177e4SLinus Torvalds * request. 13221da177e4SLinus Torvalds * 13231da177e4SLinus Torvalds * We reclaim from a zone even if that zone is over pages_high. Because: 13241da177e4SLinus Torvalds * a) The caller may be trying to free *extra* pages to satisfy a higher-order 13251da177e4SLinus Torvalds * allocation or 13261da177e4SLinus Torvalds * b) The zones may be over pages_high but they must go *over* pages_high to 13271da177e4SLinus Torvalds * satisfy the `incremental min' zone defense algorithm. 13281da177e4SLinus Torvalds * 13291da177e4SLinus Torvalds * Returns the number of reclaimed pages. 13301da177e4SLinus Torvalds * 13311da177e4SLinus Torvalds * If a zone is deemed to be full of pinned pages then just give it a light 13321da177e4SLinus Torvalds * scan then give up on it. 13331da177e4SLinus Torvalds */ 13341da177e4SLinus Torvalds static void 13351da177e4SLinus Torvalds shrink_caches(struct zone **zones, struct scan_control *sc) 13361da177e4SLinus Torvalds { 13371da177e4SLinus Torvalds int i; 13381da177e4SLinus Torvalds 13391da177e4SLinus Torvalds for (i = 0; zones[i] != NULL; i++) { 13401da177e4SLinus Torvalds struct zone *zone = zones[i]; 13411da177e4SLinus Torvalds 1342f3fe6512SCon Kolivas if (!populated_zone(zone)) 13431da177e4SLinus Torvalds continue; 13441da177e4SLinus Torvalds 13459bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 13461da177e4SLinus Torvalds continue; 13471da177e4SLinus Torvalds 13481da177e4SLinus Torvalds zone->temp_priority = sc->priority; 13491da177e4SLinus Torvalds if (zone->prev_priority > sc->priority) 13501da177e4SLinus Torvalds zone->prev_priority = sc->priority; 13511da177e4SLinus Torvalds 13521da177e4SLinus Torvalds if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) 13531da177e4SLinus Torvalds continue; /* Let kswapd poll it */ 13541da177e4SLinus Torvalds 13551da177e4SLinus Torvalds shrink_zone(zone, sc); 13561da177e4SLinus Torvalds } 13571da177e4SLinus Torvalds } 13581da177e4SLinus Torvalds 13591da177e4SLinus Torvalds /* 13601da177e4SLinus Torvalds * This is the main entry point to direct page reclaim. 13611da177e4SLinus Torvalds * 13621da177e4SLinus Torvalds * If a full scan of the inactive list fails to free enough memory then we 13631da177e4SLinus Torvalds * are "out of memory" and something needs to be killed. 13641da177e4SLinus Torvalds * 13651da177e4SLinus Torvalds * If the caller is !__GFP_FS then the probability of a failure is reasonably 13661da177e4SLinus Torvalds * high - the zone may be full of dirty or under-writeback pages, which this 13671da177e4SLinus Torvalds * caller can't do much about. We kick pdflush and take explicit naps in the 13681da177e4SLinus Torvalds * hope that some of these pages can be written. But if the allocating task 13691da177e4SLinus Torvalds * holds filesystem locks which prevent writeout this might not work, and the 13701da177e4SLinus Torvalds * allocation attempt will fail. 13711da177e4SLinus Torvalds */ 13726daa0e28SAl Viro int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) 13731da177e4SLinus Torvalds { 13741da177e4SLinus Torvalds int priority; 13751da177e4SLinus Torvalds int ret = 0; 13761da177e4SLinus Torvalds int total_scanned = 0, total_reclaimed = 0; 13771da177e4SLinus Torvalds struct reclaim_state *reclaim_state = current->reclaim_state; 13781da177e4SLinus Torvalds struct scan_control sc; 13791da177e4SLinus Torvalds unsigned long lru_pages = 0; 13801da177e4SLinus Torvalds int i; 13811da177e4SLinus Torvalds 13821da177e4SLinus Torvalds sc.gfp_mask = gfp_mask; 138352a8363eSChristoph Lameter sc.may_writepage = !laptop_mode; 1384f1fd1067SChristoph Lameter sc.may_swap = 1; 13851da177e4SLinus Torvalds 13861da177e4SLinus Torvalds inc_page_state(allocstall); 13871da177e4SLinus Torvalds 13881da177e4SLinus Torvalds for (i = 0; zones[i] != NULL; i++) { 13891da177e4SLinus Torvalds struct zone *zone = zones[i]; 13901da177e4SLinus Torvalds 13919bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 13921da177e4SLinus Torvalds continue; 13931da177e4SLinus Torvalds 13941da177e4SLinus Torvalds zone->temp_priority = DEF_PRIORITY; 13951da177e4SLinus Torvalds lru_pages += zone->nr_active + zone->nr_inactive; 13961da177e4SLinus Torvalds } 13971da177e4SLinus Torvalds 13981da177e4SLinus Torvalds for (priority = DEF_PRIORITY; priority >= 0; priority--) { 13991da177e4SLinus Torvalds sc.nr_mapped = read_page_state(nr_mapped); 14001da177e4SLinus Torvalds sc.nr_scanned = 0; 14011da177e4SLinus Torvalds sc.nr_reclaimed = 0; 14021da177e4SLinus Torvalds sc.priority = priority; 14031da177e4SLinus Torvalds sc.swap_cluster_max = SWAP_CLUSTER_MAX; 1404f7b7fd8fSRik van Riel if (!priority) 1405f7b7fd8fSRik van Riel disable_swap_token(); 14061da177e4SLinus Torvalds shrink_caches(zones, &sc); 14071da177e4SLinus Torvalds shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); 14081da177e4SLinus Torvalds if (reclaim_state) { 14091da177e4SLinus Torvalds sc.nr_reclaimed += reclaim_state->reclaimed_slab; 14101da177e4SLinus Torvalds reclaim_state->reclaimed_slab = 0; 14111da177e4SLinus Torvalds } 14121da177e4SLinus Torvalds total_scanned += sc.nr_scanned; 14131da177e4SLinus Torvalds total_reclaimed += sc.nr_reclaimed; 14141da177e4SLinus Torvalds if (total_reclaimed >= sc.swap_cluster_max) { 14151da177e4SLinus Torvalds ret = 1; 14161da177e4SLinus Torvalds goto out; 14171da177e4SLinus Torvalds } 14181da177e4SLinus Torvalds 14191da177e4SLinus Torvalds /* 14201da177e4SLinus Torvalds * Try to write back as many pages as we just scanned. This 14211da177e4SLinus Torvalds * tends to cause slow streaming writers to write data to the 14221da177e4SLinus Torvalds * disk smoothly, at the dirtying rate, which is nice. But 14231da177e4SLinus Torvalds * that's undesirable in laptop mode, where we *want* lumpy 14241da177e4SLinus Torvalds * writeout. So in laptop mode, write out the whole world. 14251da177e4SLinus Torvalds */ 14261da177e4SLinus Torvalds if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { 1427687a21ceSPekka J Enberg wakeup_pdflush(laptop_mode ? 0 : total_scanned); 14281da177e4SLinus Torvalds sc.may_writepage = 1; 14291da177e4SLinus Torvalds } 14301da177e4SLinus Torvalds 14311da177e4SLinus Torvalds /* Take a nap, wait for some writeback to complete */ 14321da177e4SLinus Torvalds if (sc.nr_scanned && priority < DEF_PRIORITY - 2) 14331da177e4SLinus Torvalds blk_congestion_wait(WRITE, HZ/10); 14341da177e4SLinus Torvalds } 14351da177e4SLinus Torvalds out: 14361da177e4SLinus Torvalds for (i = 0; zones[i] != 0; i++) { 14371da177e4SLinus Torvalds struct zone *zone = zones[i]; 14381da177e4SLinus Torvalds 14399bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 14401da177e4SLinus Torvalds continue; 14411da177e4SLinus Torvalds 14421da177e4SLinus Torvalds zone->prev_priority = zone->temp_priority; 14431da177e4SLinus Torvalds } 14441da177e4SLinus Torvalds return ret; 14451da177e4SLinus Torvalds } 14461da177e4SLinus Torvalds 14471da177e4SLinus Torvalds /* 14481da177e4SLinus Torvalds * For kswapd, balance_pgdat() will work across all this node's zones until 14491da177e4SLinus Torvalds * they are all at pages_high. 14501da177e4SLinus Torvalds * 14511da177e4SLinus Torvalds * If `nr_pages' is non-zero then it is the number of pages which are to be 14521da177e4SLinus Torvalds * reclaimed, regardless of the zone occupancies. This is a software suspend 14531da177e4SLinus Torvalds * special. 14541da177e4SLinus Torvalds * 14551da177e4SLinus Torvalds * Returns the number of pages which were actually freed. 14561da177e4SLinus Torvalds * 14571da177e4SLinus Torvalds * There is special handling here for zones which are full of pinned pages. 14581da177e4SLinus Torvalds * This can happen if the pages are all mlocked, or if they are all used by 14591da177e4SLinus Torvalds * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 14601da177e4SLinus Torvalds * What we do is to detect the case where all pages in the zone have been 14611da177e4SLinus Torvalds * scanned twice and there has been zero successful reclaim. Mark the zone as 14621da177e4SLinus Torvalds * dead and from now on, only perform a short scan. Basically we're polling 14631da177e4SLinus Torvalds * the zone for when the problem goes away. 14641da177e4SLinus Torvalds * 14651da177e4SLinus Torvalds * kswapd scans the zones in the highmem->normal->dma direction. It skips 14661da177e4SLinus Torvalds * zones which have free_pages > pages_high, but once a zone is found to have 14671da177e4SLinus Torvalds * free_pages <= pages_high, we scan that zone and the lower zones regardless 14681da177e4SLinus Torvalds * of the number of free pages in the lower zones. This interoperates with 14691da177e4SLinus Torvalds * the page allocator fallback scheme to ensure that aging of pages is balanced 14701da177e4SLinus Torvalds * across the zones. 14711da177e4SLinus Torvalds */ 14721da177e4SLinus Torvalds static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order) 14731da177e4SLinus Torvalds { 14741da177e4SLinus Torvalds int to_free = nr_pages; 14751da177e4SLinus Torvalds int all_zones_ok; 14761da177e4SLinus Torvalds int priority; 14771da177e4SLinus Torvalds int i; 14781da177e4SLinus Torvalds int total_scanned, total_reclaimed; 14791da177e4SLinus Torvalds struct reclaim_state *reclaim_state = current->reclaim_state; 14801da177e4SLinus Torvalds struct scan_control sc; 14811da177e4SLinus Torvalds 14821da177e4SLinus Torvalds loop_again: 14831da177e4SLinus Torvalds total_scanned = 0; 14841da177e4SLinus Torvalds total_reclaimed = 0; 14851da177e4SLinus Torvalds sc.gfp_mask = GFP_KERNEL; 148652a8363eSChristoph Lameter sc.may_writepage = !laptop_mode; 1487f1fd1067SChristoph Lameter sc.may_swap = 1; 14881da177e4SLinus Torvalds sc.nr_mapped = read_page_state(nr_mapped); 14891da177e4SLinus Torvalds 14901da177e4SLinus Torvalds inc_page_state(pageoutrun); 14911da177e4SLinus Torvalds 14921da177e4SLinus Torvalds for (i = 0; i < pgdat->nr_zones; i++) { 14931da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 14941da177e4SLinus Torvalds 14951da177e4SLinus Torvalds zone->temp_priority = DEF_PRIORITY; 14961da177e4SLinus Torvalds } 14971da177e4SLinus Torvalds 14981da177e4SLinus Torvalds for (priority = DEF_PRIORITY; priority >= 0; priority--) { 14991da177e4SLinus Torvalds int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 15001da177e4SLinus Torvalds unsigned long lru_pages = 0; 15011da177e4SLinus Torvalds 1502f7b7fd8fSRik van Riel /* The swap token gets in the way of swapout... */ 1503f7b7fd8fSRik van Riel if (!priority) 1504f7b7fd8fSRik van Riel disable_swap_token(); 1505f7b7fd8fSRik van Riel 15061da177e4SLinus Torvalds all_zones_ok = 1; 15071da177e4SLinus Torvalds 15081da177e4SLinus Torvalds if (nr_pages == 0) { 15091da177e4SLinus Torvalds /* 15101da177e4SLinus Torvalds * Scan in the highmem->dma direction for the highest 15111da177e4SLinus Torvalds * zone which needs scanning 15121da177e4SLinus Torvalds */ 15131da177e4SLinus Torvalds for (i = pgdat->nr_zones - 1; i >= 0; i--) { 15141da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 15151da177e4SLinus Torvalds 1516f3fe6512SCon Kolivas if (!populated_zone(zone)) 15171da177e4SLinus Torvalds continue; 15181da177e4SLinus Torvalds 15191da177e4SLinus Torvalds if (zone->all_unreclaimable && 15201da177e4SLinus Torvalds priority != DEF_PRIORITY) 15211da177e4SLinus Torvalds continue; 15221da177e4SLinus Torvalds 15231da177e4SLinus Torvalds if (!zone_watermark_ok(zone, order, 15247fb1d9fcSRohit Seth zone->pages_high, 0, 0)) { 15251da177e4SLinus Torvalds end_zone = i; 15261da177e4SLinus Torvalds goto scan; 15271da177e4SLinus Torvalds } 15281da177e4SLinus Torvalds } 15291da177e4SLinus Torvalds goto out; 15301da177e4SLinus Torvalds } else { 15311da177e4SLinus Torvalds end_zone = pgdat->nr_zones - 1; 15321da177e4SLinus Torvalds } 15331da177e4SLinus Torvalds scan: 15341da177e4SLinus Torvalds for (i = 0; i <= end_zone; i++) { 15351da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 15361da177e4SLinus Torvalds 15371da177e4SLinus Torvalds lru_pages += zone->nr_active + zone->nr_inactive; 15381da177e4SLinus Torvalds } 15391da177e4SLinus Torvalds 15401da177e4SLinus Torvalds /* 15411da177e4SLinus Torvalds * Now scan the zone in the dma->highmem direction, stopping 15421da177e4SLinus Torvalds * at the last zone which needs scanning. 15431da177e4SLinus Torvalds * 15441da177e4SLinus Torvalds * We do this because the page allocator works in the opposite 15451da177e4SLinus Torvalds * direction. This prevents the page allocator from allocating 15461da177e4SLinus Torvalds * pages behind kswapd's direction of progress, which would 15471da177e4SLinus Torvalds * cause too much scanning of the lower zones. 15481da177e4SLinus Torvalds */ 15491da177e4SLinus Torvalds for (i = 0; i <= end_zone; i++) { 15501da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 1551b15e0905Sakpm@osdl.org int nr_slab; 15521da177e4SLinus Torvalds 1553f3fe6512SCon Kolivas if (!populated_zone(zone)) 15541da177e4SLinus Torvalds continue; 15551da177e4SLinus Torvalds 15561da177e4SLinus Torvalds if (zone->all_unreclaimable && priority != DEF_PRIORITY) 15571da177e4SLinus Torvalds continue; 15581da177e4SLinus Torvalds 15591da177e4SLinus Torvalds if (nr_pages == 0) { /* Not software suspend */ 15601da177e4SLinus Torvalds if (!zone_watermark_ok(zone, order, 15617fb1d9fcSRohit Seth zone->pages_high, end_zone, 0)) 15621da177e4SLinus Torvalds all_zones_ok = 0; 15631da177e4SLinus Torvalds } 15641da177e4SLinus Torvalds zone->temp_priority = priority; 15651da177e4SLinus Torvalds if (zone->prev_priority > priority) 15661da177e4SLinus Torvalds zone->prev_priority = priority; 15671da177e4SLinus Torvalds sc.nr_scanned = 0; 15681da177e4SLinus Torvalds sc.nr_reclaimed = 0; 15691da177e4SLinus Torvalds sc.priority = priority; 15701da177e4SLinus Torvalds sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; 15711e7e5a90SMartin Hicks atomic_inc(&zone->reclaim_in_progress); 15721da177e4SLinus Torvalds shrink_zone(zone, &sc); 15731e7e5a90SMartin Hicks atomic_dec(&zone->reclaim_in_progress); 15741da177e4SLinus Torvalds reclaim_state->reclaimed_slab = 0; 1575b15e0905Sakpm@osdl.org nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1576b15e0905Sakpm@osdl.org lru_pages); 15771da177e4SLinus Torvalds sc.nr_reclaimed += reclaim_state->reclaimed_slab; 15781da177e4SLinus Torvalds total_reclaimed += sc.nr_reclaimed; 15791da177e4SLinus Torvalds total_scanned += sc.nr_scanned; 15801da177e4SLinus Torvalds if (zone->all_unreclaimable) 15811da177e4SLinus Torvalds continue; 1582b15e0905Sakpm@osdl.org if (nr_slab == 0 && zone->pages_scanned >= 1583b15e0905Sakpm@osdl.org (zone->nr_active + zone->nr_inactive) * 4) 15841da177e4SLinus Torvalds zone->all_unreclaimable = 1; 15851da177e4SLinus Torvalds /* 15861da177e4SLinus Torvalds * If we've done a decent amount of scanning and 15871da177e4SLinus Torvalds * the reclaim ratio is low, start doing writepage 15881da177e4SLinus Torvalds * even in laptop mode 15891da177e4SLinus Torvalds */ 15901da177e4SLinus Torvalds if (total_scanned > SWAP_CLUSTER_MAX * 2 && 15911da177e4SLinus Torvalds total_scanned > total_reclaimed+total_reclaimed/2) 15921da177e4SLinus Torvalds sc.may_writepage = 1; 15931da177e4SLinus Torvalds } 15941da177e4SLinus Torvalds if (nr_pages && to_free > total_reclaimed) 15951da177e4SLinus Torvalds continue; /* swsusp: need to do more work */ 15961da177e4SLinus Torvalds if (all_zones_ok) 15971da177e4SLinus Torvalds break; /* kswapd: all done */ 15981da177e4SLinus Torvalds /* 15991da177e4SLinus Torvalds * OK, kswapd is getting into trouble. Take a nap, then take 16001da177e4SLinus Torvalds * another pass across the zones. 16011da177e4SLinus Torvalds */ 16021da177e4SLinus Torvalds if (total_scanned && priority < DEF_PRIORITY - 2) 16031da177e4SLinus Torvalds blk_congestion_wait(WRITE, HZ/10); 16041da177e4SLinus Torvalds 16051da177e4SLinus Torvalds /* 16061da177e4SLinus Torvalds * We do this so kswapd doesn't build up large priorities for 16071da177e4SLinus Torvalds * example when it is freeing in parallel with allocators. It 16081da177e4SLinus Torvalds * matches the direct reclaim path behaviour in terms of impact 16091da177e4SLinus Torvalds * on zone->*_priority. 16101da177e4SLinus Torvalds */ 16111da177e4SLinus Torvalds if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages)) 16121da177e4SLinus Torvalds break; 16131da177e4SLinus Torvalds } 16141da177e4SLinus Torvalds out: 16151da177e4SLinus Torvalds for (i = 0; i < pgdat->nr_zones; i++) { 16161da177e4SLinus Torvalds struct zone *zone = pgdat->node_zones + i; 16171da177e4SLinus Torvalds 16181da177e4SLinus Torvalds zone->prev_priority = zone->temp_priority; 16191da177e4SLinus Torvalds } 16201da177e4SLinus Torvalds if (!all_zones_ok) { 16211da177e4SLinus Torvalds cond_resched(); 16221da177e4SLinus Torvalds goto loop_again; 16231da177e4SLinus Torvalds } 16241da177e4SLinus Torvalds 16251da177e4SLinus Torvalds return total_reclaimed; 16261da177e4SLinus Torvalds } 16271da177e4SLinus Torvalds 16281da177e4SLinus Torvalds /* 16291da177e4SLinus Torvalds * The background pageout daemon, started as a kernel thread 16301da177e4SLinus Torvalds * from the init process. 16311da177e4SLinus Torvalds * 16321da177e4SLinus Torvalds * This basically trickles out pages so that we have _some_ 16331da177e4SLinus Torvalds * free memory available even if there is no other activity 16341da177e4SLinus Torvalds * that frees anything up. This is needed for things like routing 16351da177e4SLinus Torvalds * etc, where we otherwise might have all activity going on in 16361da177e4SLinus Torvalds * asynchronous contexts that cannot page things out. 16371da177e4SLinus Torvalds * 16381da177e4SLinus Torvalds * If there are applications that are active memory-allocators 16391da177e4SLinus Torvalds * (most normal use), this basically shouldn't matter. 16401da177e4SLinus Torvalds */ 16411da177e4SLinus Torvalds static int kswapd(void *p) 16421da177e4SLinus Torvalds { 16431da177e4SLinus Torvalds unsigned long order; 16441da177e4SLinus Torvalds pg_data_t *pgdat = (pg_data_t*)p; 16451da177e4SLinus Torvalds struct task_struct *tsk = current; 16461da177e4SLinus Torvalds DEFINE_WAIT(wait); 16471da177e4SLinus Torvalds struct reclaim_state reclaim_state = { 16481da177e4SLinus Torvalds .reclaimed_slab = 0, 16491da177e4SLinus Torvalds }; 16501da177e4SLinus Torvalds cpumask_t cpumask; 16511da177e4SLinus Torvalds 16521da177e4SLinus Torvalds daemonize("kswapd%d", pgdat->node_id); 16531da177e4SLinus Torvalds cpumask = node_to_cpumask(pgdat->node_id); 16541da177e4SLinus Torvalds if (!cpus_empty(cpumask)) 16551da177e4SLinus Torvalds set_cpus_allowed(tsk, cpumask); 16561da177e4SLinus Torvalds current->reclaim_state = &reclaim_state; 16571da177e4SLinus Torvalds 16581da177e4SLinus Torvalds /* 16591da177e4SLinus Torvalds * Tell the memory management that we're a "memory allocator", 16601da177e4SLinus Torvalds * and that if we need more memory we should get access to it 16611da177e4SLinus Torvalds * regardless (see "__alloc_pages()"). "kswapd" should 16621da177e4SLinus Torvalds * never get caught in the normal page freeing logic. 16631da177e4SLinus Torvalds * 16641da177e4SLinus Torvalds * (Kswapd normally doesn't need memory anyway, but sometimes 16651da177e4SLinus Torvalds * you need a small amount of memory in order to be able to 16661da177e4SLinus Torvalds * page out something else, and this flag essentially protects 16671da177e4SLinus Torvalds * us from recursively trying to free more memory as we're 16681da177e4SLinus Torvalds * trying to free the first piece of memory in the first place). 16691da177e4SLinus Torvalds */ 1670930d9152SChristoph Lameter tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 16711da177e4SLinus Torvalds 16721da177e4SLinus Torvalds order = 0; 16731da177e4SLinus Torvalds for ( ; ; ) { 16741da177e4SLinus Torvalds unsigned long new_order; 16753e1d1d28SChristoph Lameter 16763e1d1d28SChristoph Lameter try_to_freeze(); 16771da177e4SLinus Torvalds 16781da177e4SLinus Torvalds prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 16791da177e4SLinus Torvalds new_order = pgdat->kswapd_max_order; 16801da177e4SLinus Torvalds pgdat->kswapd_max_order = 0; 16811da177e4SLinus Torvalds if (order < new_order) { 16821da177e4SLinus Torvalds /* 16831da177e4SLinus Torvalds * Don't sleep if someone wants a larger 'order' 16841da177e4SLinus Torvalds * allocation 16851da177e4SLinus Torvalds */ 16861da177e4SLinus Torvalds order = new_order; 16871da177e4SLinus Torvalds } else { 16881da177e4SLinus Torvalds schedule(); 16891da177e4SLinus Torvalds order = pgdat->kswapd_max_order; 16901da177e4SLinus Torvalds } 16911da177e4SLinus Torvalds finish_wait(&pgdat->kswapd_wait, &wait); 16921da177e4SLinus Torvalds 16931da177e4SLinus Torvalds balance_pgdat(pgdat, 0, order); 16941da177e4SLinus Torvalds } 16951da177e4SLinus Torvalds return 0; 16961da177e4SLinus Torvalds } 16971da177e4SLinus Torvalds 16981da177e4SLinus Torvalds /* 16991da177e4SLinus Torvalds * A zone is low on free memory, so wake its kswapd task to service it. 17001da177e4SLinus Torvalds */ 17011da177e4SLinus Torvalds void wakeup_kswapd(struct zone *zone, int order) 17021da177e4SLinus Torvalds { 17031da177e4SLinus Torvalds pg_data_t *pgdat; 17041da177e4SLinus Torvalds 1705f3fe6512SCon Kolivas if (!populated_zone(zone)) 17061da177e4SLinus Torvalds return; 17071da177e4SLinus Torvalds 17081da177e4SLinus Torvalds pgdat = zone->zone_pgdat; 17097fb1d9fcSRohit Seth if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) 17101da177e4SLinus Torvalds return; 17111da177e4SLinus Torvalds if (pgdat->kswapd_max_order < order) 17121da177e4SLinus Torvalds pgdat->kswapd_max_order = order; 17139bf2229fSPaul Jackson if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 17141da177e4SLinus Torvalds return; 17158d0986e2SCon Kolivas if (!waitqueue_active(&pgdat->kswapd_wait)) 17161da177e4SLinus Torvalds return; 17178d0986e2SCon Kolivas wake_up_interruptible(&pgdat->kswapd_wait); 17181da177e4SLinus Torvalds } 17191da177e4SLinus Torvalds 17201da177e4SLinus Torvalds #ifdef CONFIG_PM 17211da177e4SLinus Torvalds /* 17221da177e4SLinus Torvalds * Try to free `nr_pages' of memory, system-wide. Returns the number of freed 17231da177e4SLinus Torvalds * pages. 17241da177e4SLinus Torvalds */ 17251da177e4SLinus Torvalds int shrink_all_memory(int nr_pages) 17261da177e4SLinus Torvalds { 17271da177e4SLinus Torvalds pg_data_t *pgdat; 17281da177e4SLinus Torvalds int nr_to_free = nr_pages; 17291da177e4SLinus Torvalds int ret = 0; 17301da177e4SLinus Torvalds struct reclaim_state reclaim_state = { 17311da177e4SLinus Torvalds .reclaimed_slab = 0, 17321da177e4SLinus Torvalds }; 17331da177e4SLinus Torvalds 17341da177e4SLinus Torvalds current->reclaim_state = &reclaim_state; 17351da177e4SLinus Torvalds for_each_pgdat(pgdat) { 17361da177e4SLinus Torvalds int freed; 17371da177e4SLinus Torvalds freed = balance_pgdat(pgdat, nr_to_free, 0); 17381da177e4SLinus Torvalds ret += freed; 17391da177e4SLinus Torvalds nr_to_free -= freed; 17401da177e4SLinus Torvalds if (nr_to_free <= 0) 17411da177e4SLinus Torvalds break; 17421da177e4SLinus Torvalds } 17431da177e4SLinus Torvalds current->reclaim_state = NULL; 17441da177e4SLinus Torvalds return ret; 17451da177e4SLinus Torvalds } 17461da177e4SLinus Torvalds #endif 17471da177e4SLinus Torvalds 17481da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU 17491da177e4SLinus Torvalds /* It's optimal to keep kswapds on the same CPUs as their memory, but 17501da177e4SLinus Torvalds not required for correctness. So if the last cpu in a node goes 17511da177e4SLinus Torvalds away, we get changed to run anywhere: as the first one comes back, 17521da177e4SLinus Torvalds restore their cpu bindings. */ 17531da177e4SLinus Torvalds static int __devinit cpu_callback(struct notifier_block *nfb, 17541da177e4SLinus Torvalds unsigned long action, 17551da177e4SLinus Torvalds void *hcpu) 17561da177e4SLinus Torvalds { 17571da177e4SLinus Torvalds pg_data_t *pgdat; 17581da177e4SLinus Torvalds cpumask_t mask; 17591da177e4SLinus Torvalds 17601da177e4SLinus Torvalds if (action == CPU_ONLINE) { 17611da177e4SLinus Torvalds for_each_pgdat(pgdat) { 17621da177e4SLinus Torvalds mask = node_to_cpumask(pgdat->node_id); 17631da177e4SLinus Torvalds if (any_online_cpu(mask) != NR_CPUS) 17641da177e4SLinus Torvalds /* One of our CPUs online: restore mask */ 17651da177e4SLinus Torvalds set_cpus_allowed(pgdat->kswapd, mask); 17661da177e4SLinus Torvalds } 17671da177e4SLinus Torvalds } 17681da177e4SLinus Torvalds return NOTIFY_OK; 17691da177e4SLinus Torvalds } 17701da177e4SLinus Torvalds #endif /* CONFIG_HOTPLUG_CPU */ 17711da177e4SLinus Torvalds 17721da177e4SLinus Torvalds static int __init kswapd_init(void) 17731da177e4SLinus Torvalds { 17741da177e4SLinus Torvalds pg_data_t *pgdat; 17751da177e4SLinus Torvalds swap_setup(); 17761da177e4SLinus Torvalds for_each_pgdat(pgdat) 17771da177e4SLinus Torvalds pgdat->kswapd 17781da177e4SLinus Torvalds = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL)); 17791da177e4SLinus Torvalds total_memory = nr_free_pagecache_pages(); 17801da177e4SLinus Torvalds hotcpu_notifier(cpu_callback, 0); 17811da177e4SLinus Torvalds return 0; 17821da177e4SLinus Torvalds } 17831da177e4SLinus Torvalds 17841da177e4SLinus Torvalds module_init(kswapd_init) 17859eeff239SChristoph Lameter 17869eeff239SChristoph Lameter #ifdef CONFIG_NUMA 17879eeff239SChristoph Lameter /* 17889eeff239SChristoph Lameter * Zone reclaim mode 17899eeff239SChristoph Lameter * 17909eeff239SChristoph Lameter * If non-zero call zone_reclaim when the number of free pages falls below 17919eeff239SChristoph Lameter * the watermarks. 17929eeff239SChristoph Lameter * 17939eeff239SChristoph Lameter * In the future we may add flags to the mode. However, the page allocator 17949eeff239SChristoph Lameter * should only have to check that zone_reclaim_mode != 0 before calling 17959eeff239SChristoph Lameter * zone_reclaim(). 17969eeff239SChristoph Lameter */ 17979eeff239SChristoph Lameter int zone_reclaim_mode __read_mostly; 17989eeff239SChristoph Lameter 17991b2ffb78SChristoph Lameter #define RECLAIM_OFF 0 18001b2ffb78SChristoph Lameter #define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */ 18011b2ffb78SChristoph Lameter #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 18021b2ffb78SChristoph Lameter #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 18032a16e3f4SChristoph Lameter #define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */ 18041b2ffb78SChristoph Lameter 18059eeff239SChristoph Lameter /* 18069eeff239SChristoph Lameter * Mininum time between zone reclaim scans 18079eeff239SChristoph Lameter */ 18082a11ff06SChristoph Lameter int zone_reclaim_interval __read_mostly = 30*HZ; 1809a92f7126SChristoph Lameter 1810a92f7126SChristoph Lameter /* 1811a92f7126SChristoph Lameter * Priority for ZONE_RECLAIM. This determines the fraction of pages 1812a92f7126SChristoph Lameter * of a node considered for each zone_reclaim. 4 scans 1/16th of 1813a92f7126SChristoph Lameter * a zone. 1814a92f7126SChristoph Lameter */ 1815a92f7126SChristoph Lameter #define ZONE_RECLAIM_PRIORITY 4 1816a92f7126SChristoph Lameter 18179eeff239SChristoph Lameter /* 18189eeff239SChristoph Lameter * Try to free up some pages from this zone through reclaim. 18199eeff239SChristoph Lameter */ 18209eeff239SChristoph Lameter int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 18219eeff239SChristoph Lameter { 182289288623SChristoph Lameter int nr_pages; 18239eeff239SChristoph Lameter struct task_struct *p = current; 18249eeff239SChristoph Lameter struct reclaim_state reclaim_state; 182589288623SChristoph Lameter struct scan_control sc; 182642c722d4SChristoph Lameter cpumask_t mask; 182742c722d4SChristoph Lameter int node_id; 182889288623SChristoph Lameter 182989288623SChristoph Lameter if (time_before(jiffies, 18302a11ff06SChristoph Lameter zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) 183189288623SChristoph Lameter return 0; 18329eeff239SChristoph Lameter 18339eeff239SChristoph Lameter if (!(gfp_mask & __GFP_WAIT) || 18349eeff239SChristoph Lameter zone->all_unreclaimable || 18359eeff239SChristoph Lameter atomic_read(&zone->reclaim_in_progress) > 0) 18369eeff239SChristoph Lameter return 0; 18379eeff239SChristoph Lameter 183842c722d4SChristoph Lameter node_id = zone->zone_pgdat->node_id; 183942c722d4SChristoph Lameter mask = node_to_cpumask(node_id); 184042c722d4SChristoph Lameter if (!cpus_empty(mask) && node_id != numa_node_id()) 184142c722d4SChristoph Lameter return 0; 184242c722d4SChristoph Lameter 18431b2ffb78SChristoph Lameter sc.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE); 18441b2ffb78SChristoph Lameter sc.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP); 184589288623SChristoph Lameter sc.nr_scanned = 0; 184689288623SChristoph Lameter sc.nr_reclaimed = 0; 1847a92f7126SChristoph Lameter sc.priority = ZONE_RECLAIM_PRIORITY + 1; 184889288623SChristoph Lameter sc.nr_mapped = read_page_state(nr_mapped); 184989288623SChristoph Lameter sc.gfp_mask = gfp_mask; 18509eeff239SChristoph Lameter 18519eeff239SChristoph Lameter disable_swap_token(); 18529eeff239SChristoph Lameter 185389288623SChristoph Lameter nr_pages = 1 << order; 18549eeff239SChristoph Lameter if (nr_pages > SWAP_CLUSTER_MAX) 18559eeff239SChristoph Lameter sc.swap_cluster_max = nr_pages; 18569eeff239SChristoph Lameter else 18579eeff239SChristoph Lameter sc.swap_cluster_max = SWAP_CLUSTER_MAX; 18589eeff239SChristoph Lameter 18599eeff239SChristoph Lameter cond_resched(); 18609eeff239SChristoph Lameter p->flags |= PF_MEMALLOC; 18619eeff239SChristoph Lameter reclaim_state.reclaimed_slab = 0; 18629eeff239SChristoph Lameter p->reclaim_state = &reclaim_state; 1863c84db23cSChristoph Lameter 1864a92f7126SChristoph Lameter /* 1865a92f7126SChristoph Lameter * Free memory by calling shrink zone with increasing priorities 1866a92f7126SChristoph Lameter * until we have enough memory freed. 1867a92f7126SChristoph Lameter */ 1868a92f7126SChristoph Lameter do { 1869a92f7126SChristoph Lameter sc.priority--; 18709eeff239SChristoph Lameter shrink_zone(zone, &sc); 1871c84db23cSChristoph Lameter 1872a92f7126SChristoph Lameter } while (sc.nr_reclaimed < nr_pages && sc.priority > 0); 1873a92f7126SChristoph Lameter 18742a16e3f4SChristoph Lameter if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { 18752a16e3f4SChristoph Lameter /* 18762a16e3f4SChristoph Lameter * shrink_slab does not currently allow us to determine 18772a16e3f4SChristoph Lameter * how many pages were freed in the zone. So we just 18782a16e3f4SChristoph Lameter * shake the slab and then go offnode for a single allocation. 18792a16e3f4SChristoph Lameter * 18802a16e3f4SChristoph Lameter * shrink_slab will free memory on all zones and may take 18812a16e3f4SChristoph Lameter * a long time. 18822a16e3f4SChristoph Lameter */ 18832a16e3f4SChristoph Lameter shrink_slab(sc.nr_scanned, gfp_mask, order); 18842a16e3f4SChristoph Lameter sc.nr_reclaimed = 1; /* Avoid getting the off node timeout */ 18852a16e3f4SChristoph Lameter } 18862a16e3f4SChristoph Lameter 18879eeff239SChristoph Lameter p->reclaim_state = NULL; 18889eeff239SChristoph Lameter current->flags &= ~PF_MEMALLOC; 18899eeff239SChristoph Lameter 18909eeff239SChristoph Lameter if (sc.nr_reclaimed == 0) 18919eeff239SChristoph Lameter zone->last_unsuccessful_zone_reclaim = jiffies; 18929eeff239SChristoph Lameter 1893c84db23cSChristoph Lameter return sc.nr_reclaimed >= nr_pages; 18949eeff239SChristoph Lameter } 18959eeff239SChristoph Lameter #endif 18969eeff239SChristoph Lameter 1897