1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 18194159fbSMinchan Kim #include <linux/page-isolation.h> 19748446bbSMel Gorman #include "internal.h" 20748446bbSMel Gorman 21010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 22010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 23010fc29aSMinchan Kim { 24010fc29aSMinchan Kim count_vm_event(item); 25010fc29aSMinchan Kim } 26010fc29aSMinchan Kim 27010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 28010fc29aSMinchan Kim { 29010fc29aSMinchan Kim count_vm_events(item, delta); 30010fc29aSMinchan Kim } 31010fc29aSMinchan Kim #else 32010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 33010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 34010fc29aSMinchan Kim #endif 35010fc29aSMinchan Kim 36ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 37ff9543fdSMichal Nazarewicz 38b7aba698SMel Gorman #define CREATE_TRACE_POINTS 39b7aba698SMel Gorman #include <trace/events/compaction.h> 40b7aba698SMel Gorman 41748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 42748446bbSMel Gorman { 43748446bbSMel Gorman struct page *page, *next; 44748446bbSMel Gorman unsigned long count = 0; 45748446bbSMel Gorman 46748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 47748446bbSMel Gorman list_del(&page->lru); 48748446bbSMel Gorman __free_page(page); 49748446bbSMel Gorman count++; 50748446bbSMel Gorman } 51748446bbSMel Gorman 52748446bbSMel Gorman return count; 53748446bbSMel Gorman } 54748446bbSMel Gorman 55ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 56ff9543fdSMichal Nazarewicz { 57ff9543fdSMichal Nazarewicz struct page *page; 58ff9543fdSMichal Nazarewicz 59ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 60ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 61ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 62ff9543fdSMichal Nazarewicz } 63ff9543fdSMichal Nazarewicz } 64ff9543fdSMichal Nazarewicz 6547118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 6647118af0SMichal Nazarewicz { 6747118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 6847118af0SMichal Nazarewicz } 6947118af0SMichal Nazarewicz 70bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 71bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 72bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 73bb13ffebSMel Gorman struct page *page) 74bb13ffebSMel Gorman { 75bb13ffebSMel Gorman if (cc->ignore_skip_hint) 76bb13ffebSMel Gorman return true; 77bb13ffebSMel Gorman 78bb13ffebSMel Gorman return !get_pageblock_skip(page); 79bb13ffebSMel Gorman } 80bb13ffebSMel Gorman 81bb13ffebSMel Gorman /* 82bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 83bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 84bb13ffebSMel Gorman * meet. 85bb13ffebSMel Gorman */ 8662997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 87bb13ffebSMel Gorman { 88bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 89108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 90bb13ffebSMel Gorman unsigned long pfn; 91bb13ffebSMel Gorman 92c89511abSMel Gorman zone->compact_cached_migrate_pfn = start_pfn; 93c89511abSMel Gorman zone->compact_cached_free_pfn = end_pfn; 9462997027SMel Gorman zone->compact_blockskip_flush = false; 95bb13ffebSMel Gorman 96bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 97bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 98bb13ffebSMel Gorman struct page *page; 99bb13ffebSMel Gorman 100bb13ffebSMel Gorman cond_resched(); 101bb13ffebSMel Gorman 102bb13ffebSMel Gorman if (!pfn_valid(pfn)) 103bb13ffebSMel Gorman continue; 104bb13ffebSMel Gorman 105bb13ffebSMel Gorman page = pfn_to_page(pfn); 106bb13ffebSMel Gorman if (zone != page_zone(page)) 107bb13ffebSMel Gorman continue; 108bb13ffebSMel Gorman 109bb13ffebSMel Gorman clear_pageblock_skip(page); 110bb13ffebSMel Gorman } 111bb13ffebSMel Gorman } 112bb13ffebSMel Gorman 11362997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 11462997027SMel Gorman { 11562997027SMel Gorman int zoneid; 11662997027SMel Gorman 11762997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 11862997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 11962997027SMel Gorman if (!populated_zone(zone)) 12062997027SMel Gorman continue; 12162997027SMel Gorman 12262997027SMel Gorman /* Only flush if a full compaction finished recently */ 12362997027SMel Gorman if (zone->compact_blockskip_flush) 12462997027SMel Gorman __reset_isolation_suitable(zone); 12562997027SMel Gorman } 12662997027SMel Gorman } 12762997027SMel Gorman 128bb13ffebSMel Gorman /* 129bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 13062997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 131bb13ffebSMel Gorman */ 132c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 133c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 134c89511abSMel Gorman bool migrate_scanner) 135bb13ffebSMel Gorman { 136c89511abSMel Gorman struct zone *zone = cc->zone; 1376815bf3fSJoonsoo Kim 1386815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 1396815bf3fSJoonsoo Kim return; 1406815bf3fSJoonsoo Kim 141bb13ffebSMel Gorman if (!page) 142bb13ffebSMel Gorman return; 143bb13ffebSMel Gorman 144c89511abSMel Gorman if (!nr_isolated) { 145c89511abSMel Gorman unsigned long pfn = page_to_pfn(page); 146bb13ffebSMel Gorman set_pageblock_skip(page); 147c89511abSMel Gorman 148c89511abSMel Gorman /* Update where compaction should restart */ 149c89511abSMel Gorman if (migrate_scanner) { 150c89511abSMel Gorman if (!cc->finished_update_migrate && 151c89511abSMel Gorman pfn > zone->compact_cached_migrate_pfn) 152c89511abSMel Gorman zone->compact_cached_migrate_pfn = pfn; 153c89511abSMel Gorman } else { 154c89511abSMel Gorman if (!cc->finished_update_free && 155c89511abSMel Gorman pfn < zone->compact_cached_free_pfn) 156c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 157c89511abSMel Gorman } 158c89511abSMel Gorman } 159bb13ffebSMel Gorman } 160bb13ffebSMel Gorman #else 161bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 162bb13ffebSMel Gorman struct page *page) 163bb13ffebSMel Gorman { 164bb13ffebSMel Gorman return true; 165bb13ffebSMel Gorman } 166bb13ffebSMel Gorman 167c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 168c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 169c89511abSMel Gorman bool migrate_scanner) 170bb13ffebSMel Gorman { 171bb13ffebSMel Gorman } 172bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 173bb13ffebSMel Gorman 1742a1402aaSMel Gorman static inline bool should_release_lock(spinlock_t *lock) 1752a1402aaSMel Gorman { 1762a1402aaSMel Gorman return need_resched() || spin_is_contended(lock); 1772a1402aaSMel Gorman } 1782a1402aaSMel Gorman 17985aa125fSMichal Nazarewicz /* 180c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 181c67fe375SMel Gorman * very heavily contended. Check if the process needs to be scheduled or 182c67fe375SMel Gorman * if the lock is contended. For async compaction, back out in the event 183c67fe375SMel Gorman * if contention is severe. For sync compaction, schedule. 184c67fe375SMel Gorman * 185c67fe375SMel Gorman * Returns true if the lock is held. 186c67fe375SMel Gorman * Returns false if the lock is released and compaction should abort 187c67fe375SMel Gorman */ 188c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, 189c67fe375SMel Gorman bool locked, struct compact_control *cc) 190c67fe375SMel Gorman { 1912a1402aaSMel Gorman if (should_release_lock(lock)) { 192c67fe375SMel Gorman if (locked) { 193c67fe375SMel Gorman spin_unlock_irqrestore(lock, *flags); 194c67fe375SMel Gorman locked = false; 195c67fe375SMel Gorman } 196c67fe375SMel Gorman 197c67fe375SMel Gorman /* async aborts if taking too long or contended */ 198c67fe375SMel Gorman if (!cc->sync) { 199e64c5237SShaohua Li cc->contended = true; 200c67fe375SMel Gorman return false; 201c67fe375SMel Gorman } 202c67fe375SMel Gorman 203c67fe375SMel Gorman cond_resched(); 204c67fe375SMel Gorman } 205c67fe375SMel Gorman 206c67fe375SMel Gorman if (!locked) 207c67fe375SMel Gorman spin_lock_irqsave(lock, *flags); 208c67fe375SMel Gorman return true; 209c67fe375SMel Gorman } 210c67fe375SMel Gorman 211c67fe375SMel Gorman static inline bool compact_trylock_irqsave(spinlock_t *lock, 212c67fe375SMel Gorman unsigned long *flags, struct compact_control *cc) 213c67fe375SMel Gorman { 214c67fe375SMel Gorman return compact_checklock_irqsave(lock, flags, false, cc); 215c67fe375SMel Gorman } 216c67fe375SMel Gorman 217f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */ 218f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page) 219f40d1e42SMel Gorman { 2207d348b9eSJoonsoo Kim /* If the page is a large free page, then disallow migration */ 221f40d1e42SMel Gorman if (PageBuddy(page) && page_order(page) >= pageblock_order) 2227d348b9eSJoonsoo Kim return false; 223f40d1e42SMel Gorman 224f40d1e42SMel Gorman /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 2257d348b9eSJoonsoo Kim if (migrate_async_suitable(get_pageblock_migratetype(page))) 226f40d1e42SMel Gorman return true; 227f40d1e42SMel Gorman 228f40d1e42SMel Gorman /* Otherwise skip the block */ 229f40d1e42SMel Gorman return false; 230f40d1e42SMel Gorman } 231f40d1e42SMel Gorman 232c67fe375SMel Gorman /* 2339e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 2349e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 2359e4be470SJerome Marchand * (even though it may still end up isolating some pages). 23685aa125fSMichal Nazarewicz */ 237f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 238f40d1e42SMel Gorman unsigned long blockpfn, 23985aa125fSMichal Nazarewicz unsigned long end_pfn, 24085aa125fSMichal Nazarewicz struct list_head *freelist, 24185aa125fSMichal Nazarewicz bool strict) 242748446bbSMel Gorman { 243b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 244bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 245f40d1e42SMel Gorman unsigned long flags; 246f40d1e42SMel Gorman bool locked = false; 24701ead534SJoonsoo Kim bool checked_pageblock = false; 248748446bbSMel Gorman 249748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 250748446bbSMel Gorman 251f40d1e42SMel Gorman /* Isolate free pages. */ 252748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 253748446bbSMel Gorman int isolated, i; 254748446bbSMel Gorman struct page *page = cursor; 255748446bbSMel Gorman 256b7aba698SMel Gorman nr_scanned++; 257f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 2582af120bcSLaura Abbott goto isolate_fail; 2592af120bcSLaura Abbott 260bb13ffebSMel Gorman if (!valid_page) 261bb13ffebSMel Gorman valid_page = page; 262f40d1e42SMel Gorman if (!PageBuddy(page)) 2632af120bcSLaura Abbott goto isolate_fail; 264f40d1e42SMel Gorman 265f40d1e42SMel Gorman /* 266f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 267f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 268f40d1e42SMel Gorman * heavily contended if there are parallel allocations 269f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 270f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 271f40d1e42SMel Gorman * possible. 272f40d1e42SMel Gorman */ 273f40d1e42SMel Gorman locked = compact_checklock_irqsave(&cc->zone->lock, &flags, 274f40d1e42SMel Gorman locked, cc); 275f40d1e42SMel Gorman if (!locked) 276f40d1e42SMel Gorman break; 277f40d1e42SMel Gorman 278f40d1e42SMel Gorman /* Recheck this is a suitable migration target under lock */ 27901ead534SJoonsoo Kim if (!strict && !checked_pageblock) { 28001ead534SJoonsoo Kim /* 28101ead534SJoonsoo Kim * We need to check suitability of pageblock only once 28201ead534SJoonsoo Kim * and this isolate_freepages_block() is called with 28301ead534SJoonsoo Kim * pageblock range, so just check once is sufficient. 28401ead534SJoonsoo Kim */ 28501ead534SJoonsoo Kim checked_pageblock = true; 28601ead534SJoonsoo Kim if (!suitable_migration_target(page)) 287f40d1e42SMel Gorman break; 28801ead534SJoonsoo Kim } 289f40d1e42SMel Gorman 290f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 291f40d1e42SMel Gorman if (!PageBuddy(page)) 2922af120bcSLaura Abbott goto isolate_fail; 293748446bbSMel Gorman 294748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 295748446bbSMel Gorman isolated = split_free_page(page); 296748446bbSMel Gorman total_isolated += isolated; 297748446bbSMel Gorman for (i = 0; i < isolated; i++) { 298748446bbSMel Gorman list_add(&page->lru, freelist); 299748446bbSMel Gorman page++; 300748446bbSMel Gorman } 301748446bbSMel Gorman 302748446bbSMel Gorman /* If a page was split, advance to the end of it */ 303748446bbSMel Gorman if (isolated) { 304748446bbSMel Gorman blockpfn += isolated - 1; 305748446bbSMel Gorman cursor += isolated - 1; 3062af120bcSLaura Abbott continue; 307748446bbSMel Gorman } 3082af120bcSLaura Abbott 3092af120bcSLaura Abbott isolate_fail: 3102af120bcSLaura Abbott if (strict) 3112af120bcSLaura Abbott break; 3122af120bcSLaura Abbott else 3132af120bcSLaura Abbott continue; 3142af120bcSLaura Abbott 315748446bbSMel Gorman } 316748446bbSMel Gorman 317b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 318f40d1e42SMel Gorman 319f40d1e42SMel Gorman /* 320f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 321f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 322f40d1e42SMel Gorman * returned and CMA will fail. 323f40d1e42SMel Gorman */ 3242af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 325f40d1e42SMel Gorman total_isolated = 0; 326f40d1e42SMel Gorman 327f40d1e42SMel Gorman if (locked) 328f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 329f40d1e42SMel Gorman 330bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 331bb13ffebSMel Gorman if (blockpfn == end_pfn) 332c89511abSMel Gorman update_pageblock_skip(cc, valid_page, total_isolated, false); 333bb13ffebSMel Gorman 334010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 335397487dbSMel Gorman if (total_isolated) 336010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 337748446bbSMel Gorman return total_isolated; 338748446bbSMel Gorman } 339748446bbSMel Gorman 34085aa125fSMichal Nazarewicz /** 34185aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 34285aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 34385aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 34485aa125fSMichal Nazarewicz * 34585aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 34685aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 34785aa125fSMichal Nazarewicz * undo its actions and return zero. 34885aa125fSMichal Nazarewicz * 34985aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 35085aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 35185aa125fSMichal Nazarewicz * a free page). 35285aa125fSMichal Nazarewicz */ 353ff9543fdSMichal Nazarewicz unsigned long 354bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 355bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 35685aa125fSMichal Nazarewicz { 357f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 35885aa125fSMichal Nazarewicz LIST_HEAD(freelist); 35985aa125fSMichal Nazarewicz 36085aa125fSMichal Nazarewicz for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 361bb13ffebSMel Gorman if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn))) 36285aa125fSMichal Nazarewicz break; 36385aa125fSMichal Nazarewicz 36485aa125fSMichal Nazarewicz /* 36585aa125fSMichal Nazarewicz * On subsequent iterations ALIGN() is actually not needed, 36685aa125fSMichal Nazarewicz * but we keep it that we not to complicate the code. 36785aa125fSMichal Nazarewicz */ 36885aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 36985aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 37085aa125fSMichal Nazarewicz 371bb13ffebSMel Gorman isolated = isolate_freepages_block(cc, pfn, block_end_pfn, 37285aa125fSMichal Nazarewicz &freelist, true); 37385aa125fSMichal Nazarewicz 37485aa125fSMichal Nazarewicz /* 37585aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 37685aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 37785aa125fSMichal Nazarewicz * non-free pages). 37885aa125fSMichal Nazarewicz */ 37985aa125fSMichal Nazarewicz if (!isolated) 38085aa125fSMichal Nazarewicz break; 38185aa125fSMichal Nazarewicz 38285aa125fSMichal Nazarewicz /* 38385aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 38485aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 38585aa125fSMichal Nazarewicz * page may span two pageblocks). 38685aa125fSMichal Nazarewicz */ 38785aa125fSMichal Nazarewicz } 38885aa125fSMichal Nazarewicz 38985aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 39085aa125fSMichal Nazarewicz map_pages(&freelist); 39185aa125fSMichal Nazarewicz 39285aa125fSMichal Nazarewicz if (pfn < end_pfn) { 39385aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 39485aa125fSMichal Nazarewicz release_freepages(&freelist); 39585aa125fSMichal Nazarewicz return 0; 39685aa125fSMichal Nazarewicz } 39785aa125fSMichal Nazarewicz 39885aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 39985aa125fSMichal Nazarewicz return pfn; 40085aa125fSMichal Nazarewicz } 40185aa125fSMichal Nazarewicz 402748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 403c67fe375SMel Gorman static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc) 404748446bbSMel Gorman { 405748446bbSMel Gorman struct page *page; 406b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 407748446bbSMel Gorman 408b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 409b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 410748446bbSMel Gorman 411c67fe375SMel Gorman /* If locked we can use the interrupt unsafe versions */ 412c67fe375SMel Gorman if (locked) { 413b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 414b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 415c67fe375SMel Gorman } else { 416c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 417c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 418c67fe375SMel Gorman } 419748446bbSMel Gorman } 420748446bbSMel Gorman 421748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 422748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 423748446bbSMel Gorman { 424bc693045SMinchan Kim unsigned long active, inactive, isolated; 425748446bbSMel Gorman 426748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 427748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 428bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 429bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 430748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 431748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 432748446bbSMel Gorman 433bc693045SMinchan Kim return isolated > (inactive + active) / 2; 434748446bbSMel Gorman } 435748446bbSMel Gorman 4362fe86e00SMichal Nazarewicz /** 4372fe86e00SMichal Nazarewicz * isolate_migratepages_range() - isolate all migrate-able pages in range. 4382fe86e00SMichal Nazarewicz * @zone: Zone pages are in. 4392fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 4402fe86e00SMichal Nazarewicz * @low_pfn: The first PFN of the range. 4412fe86e00SMichal Nazarewicz * @end_pfn: The one-past-the-last PFN of the range. 442e46a2879SMinchan Kim * @unevictable: true if it allows to isolate unevictable pages 4432fe86e00SMichal Nazarewicz * 4442fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 4452fe86e00SMichal Nazarewicz * [low_pfn, end_pfn). Returns zero if there is a fatal signal 4462fe86e00SMichal Nazarewicz * pending), otherwise PFN of the first page that was not scanned 4472fe86e00SMichal Nazarewicz * (which may be both less, equal to or more then end_pfn). 4482fe86e00SMichal Nazarewicz * 4492fe86e00SMichal Nazarewicz * Assumes that cc->migratepages is empty and cc->nr_migratepages is 4502fe86e00SMichal Nazarewicz * zero. 4512fe86e00SMichal Nazarewicz * 4522fe86e00SMichal Nazarewicz * Apart from cc->migratepages and cc->nr_migratetypes this function 4532fe86e00SMichal Nazarewicz * does not modify any cc's fields, in particular it does not modify 4542fe86e00SMichal Nazarewicz * (or read for that matter) cc->migrate_pfn. 455748446bbSMel Gorman */ 456ff9543fdSMichal Nazarewicz unsigned long 4572fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 458e46a2879SMinchan Kim unsigned long low_pfn, unsigned long end_pfn, bool unevictable) 459748446bbSMel Gorman { 4609927af74SMel Gorman unsigned long last_pageblock_nr = 0, pageblock_nr; 461b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 462748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 463f3fd4a61SKonstantin Khlebnikov isolate_mode_t mode = 0; 464fa9add64SHugh Dickins struct lruvec *lruvec; 465c67fe375SMel Gorman unsigned long flags; 4662a1402aaSMel Gorman bool locked = false; 467bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 46850b5b094SVlastimil Babka bool skipped_async_unsuitable = false; 469748446bbSMel Gorman 470748446bbSMel Gorman /* 471748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 472748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 473748446bbSMel Gorman * delay for some time until fewer pages are isolated 474748446bbSMel Gorman */ 475748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 476f9e35b3bSMel Gorman /* async migration should just abort */ 47768e3e926SLinus Torvalds if (!cc->sync) 4782fe86e00SMichal Nazarewicz return 0; 479f9e35b3bSMel Gorman 480748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 481748446bbSMel Gorman 482748446bbSMel Gorman if (fatal_signal_pending(current)) 4832fe86e00SMichal Nazarewicz return 0; 484748446bbSMel Gorman } 485748446bbSMel Gorman 486748446bbSMel Gorman /* Time to isolate some pages for migration */ 487b2eef8c0SAndrea Arcangeli cond_resched(); 488748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 489b2eef8c0SAndrea Arcangeli /* give a chance to irqs before checking need_resched() */ 490*be1aa03bSJoonsoo Kim if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) { 4912a1402aaSMel Gorman if (should_release_lock(&zone->lru_lock)) { 492c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 493b2eef8c0SAndrea Arcangeli locked = false; 494b2eef8c0SAndrea Arcangeli } 4952a1402aaSMel Gorman } 496b2eef8c0SAndrea Arcangeli 4970bf380bcSMel Gorman /* 4980bf380bcSMel Gorman * migrate_pfn does not necessarily start aligned to a 4990bf380bcSMel Gorman * pageblock. Ensure that pfn_valid is called when moving 5000bf380bcSMel Gorman * into a new MAX_ORDER_NR_PAGES range in case of large 5010bf380bcSMel Gorman * memory holes within the zone 5020bf380bcSMel Gorman */ 5030bf380bcSMel Gorman if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 5040bf380bcSMel Gorman if (!pfn_valid(low_pfn)) { 5050bf380bcSMel Gorman low_pfn += MAX_ORDER_NR_PAGES - 1; 5060bf380bcSMel Gorman continue; 5070bf380bcSMel Gorman } 5080bf380bcSMel Gorman } 5090bf380bcSMel Gorman 510748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 511748446bbSMel Gorman continue; 512b7aba698SMel Gorman nr_scanned++; 513748446bbSMel Gorman 514dc908600SMel Gorman /* 515dc908600SMel Gorman * Get the page and ensure the page is within the same zone. 516dc908600SMel Gorman * See the comment in isolate_freepages about overlapping 517dc908600SMel Gorman * nodes. It is deliberate that the new zone lock is not taken 518dc908600SMel Gorman * as memory compaction should not move pages between nodes. 519dc908600SMel Gorman */ 520748446bbSMel Gorman page = pfn_to_page(low_pfn); 521dc908600SMel Gorman if (page_zone(page) != zone) 522dc908600SMel Gorman continue; 523dc908600SMel Gorman 524bb13ffebSMel Gorman if (!valid_page) 525bb13ffebSMel Gorman valid_page = page; 526bb13ffebSMel Gorman 527bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 528bb13ffebSMel Gorman pageblock_nr = low_pfn >> pageblock_order; 529bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 530bb13ffebSMel Gorman goto next_pageblock; 531bb13ffebSMel Gorman 5326c14466cSMel Gorman /* 5336c14466cSMel Gorman * Skip if free. page_order cannot be used without zone->lock 5346c14466cSMel Gorman * as nothing prevents parallel allocations or buddy merging. 5356c14466cSMel Gorman */ 536748446bbSMel Gorman if (PageBuddy(page)) 537748446bbSMel Gorman continue; 538748446bbSMel Gorman 5399927af74SMel Gorman /* 5409927af74SMel Gorman * For async migration, also only scan in MOVABLE blocks. Async 5419927af74SMel Gorman * migration is optimistic to see if the minimum amount of work 5429927af74SMel Gorman * satisfies the allocation 5439927af74SMel Gorman */ 54468e3e926SLinus Torvalds if (!cc->sync && last_pageblock_nr != pageblock_nr && 54547118af0SMichal Nazarewicz !migrate_async_suitable(get_pageblock_migratetype(page))) { 546c89511abSMel Gorman cc->finished_update_migrate = true; 54750b5b094SVlastimil Babka skipped_async_unsuitable = true; 5482a1402aaSMel Gorman goto next_pageblock; 5499927af74SMel Gorman } 5509927af74SMel Gorman 551bf6bddf1SRafael Aquini /* 552bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 553bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 554bf6bddf1SRafael Aquini * Skip any other type of page 555bf6bddf1SRafael Aquini */ 556bf6bddf1SRafael Aquini if (!PageLRU(page)) { 557bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 558bf6bddf1SRafael Aquini if (locked && balloon_page_isolate(page)) { 559bf6bddf1SRafael Aquini /* Successfully isolated */ 560bf6bddf1SRafael Aquini cc->finished_update_migrate = true; 561bf6bddf1SRafael Aquini list_add(&page->lru, migratelist); 562bf6bddf1SRafael Aquini cc->nr_migratepages++; 563bf6bddf1SRafael Aquini nr_isolated++; 564bf6bddf1SRafael Aquini goto check_compact_cluster; 565bf6bddf1SRafael Aquini } 566bf6bddf1SRafael Aquini } 567bc835011SAndrea Arcangeli continue; 568bf6bddf1SRafael Aquini } 569bc835011SAndrea Arcangeli 570bc835011SAndrea Arcangeli /* 5712a1402aaSMel Gorman * PageLRU is set. lru_lock normally excludes isolation 5722a1402aaSMel Gorman * splitting and collapsing (collapsing has already happened 5732a1402aaSMel Gorman * if PageLRU is set) but the lock is not necessarily taken 5742a1402aaSMel Gorman * here and it is wasteful to take it just to check transhuge. 5752a1402aaSMel Gorman * Check TransHuge without lock and skip the whole pageblock if 5762a1402aaSMel Gorman * it's either a transhuge or hugetlbfs page, as calling 5772a1402aaSMel Gorman * compound_order() without preventing THP from splitting the 5782a1402aaSMel Gorman * page underneath us may return surprising results. 579bc835011SAndrea Arcangeli */ 580bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 5812a1402aaSMel Gorman if (!locked) 5822a1402aaSMel Gorman goto next_pageblock; 5832a1402aaSMel Gorman low_pfn += (1 << compound_order(page)) - 1; 5842a1402aaSMel Gorman continue; 5852a1402aaSMel Gorman } 5862a1402aaSMel Gorman 587119d6d59SDavid Rientjes /* 588119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 589119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 590119d6d59SDavid Rientjes * admittedly racy check. 591119d6d59SDavid Rientjes */ 592119d6d59SDavid Rientjes if (!page_mapping(page) && 593119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 594119d6d59SDavid Rientjes continue; 595119d6d59SDavid Rientjes 5962a1402aaSMel Gorman /* Check if it is ok to still hold the lock */ 5972a1402aaSMel Gorman locked = compact_checklock_irqsave(&zone->lru_lock, &flags, 5982a1402aaSMel Gorman locked, cc); 5992a1402aaSMel Gorman if (!locked || fatal_signal_pending(current)) 6002a1402aaSMel Gorman break; 6012a1402aaSMel Gorman 6022a1402aaSMel Gorman /* Recheck PageLRU and PageTransHuge under lock */ 6032a1402aaSMel Gorman if (!PageLRU(page)) 6042a1402aaSMel Gorman continue; 6052a1402aaSMel Gorman if (PageTransHuge(page)) { 606bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 607bc835011SAndrea Arcangeli continue; 608bc835011SAndrea Arcangeli } 609bc835011SAndrea Arcangeli 61068e3e926SLinus Torvalds if (!cc->sync) 611c8244935SMel Gorman mode |= ISOLATE_ASYNC_MIGRATE; 612c8244935SMel Gorman 613e46a2879SMinchan Kim if (unevictable) 614e46a2879SMinchan Kim mode |= ISOLATE_UNEVICTABLE; 615e46a2879SMinchan Kim 616fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 617fa9add64SHugh Dickins 618748446bbSMel Gorman /* Try isolate the page */ 619f3fd4a61SKonstantin Khlebnikov if (__isolate_lru_page(page, mode) != 0) 620748446bbSMel Gorman continue; 621748446bbSMel Gorman 622309381feSSasha Levin VM_BUG_ON_PAGE(PageTransCompound(page), page); 623bc835011SAndrea Arcangeli 624748446bbSMel Gorman /* Successfully isolated */ 625c89511abSMel Gorman cc->finished_update_migrate = true; 626fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 627748446bbSMel Gorman list_add(&page->lru, migratelist); 628748446bbSMel Gorman cc->nr_migratepages++; 629b7aba698SMel Gorman nr_isolated++; 630748446bbSMel Gorman 631bf6bddf1SRafael Aquini check_compact_cluster: 632748446bbSMel Gorman /* Avoid isolating too much */ 63331b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 63431b8384aSHillf Danton ++low_pfn; 635748446bbSMel Gorman break; 636748446bbSMel Gorman } 6372a1402aaSMel Gorman 6382a1402aaSMel Gorman continue; 6392a1402aaSMel Gorman 6402a1402aaSMel Gorman next_pageblock: 641a9aacbccSMel Gorman low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1; 6422a1402aaSMel Gorman last_pageblock_nr = pageblock_nr; 64331b8384aSHillf Danton } 644748446bbSMel Gorman 645c67fe375SMel Gorman acct_isolated(zone, locked, cc); 646748446bbSMel Gorman 647c67fe375SMel Gorman if (locked) 648c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 649748446bbSMel Gorman 65050b5b094SVlastimil Babka /* 65150b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 65250b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 65350b5b094SVlastimil Babka * This is not done when pageblock was skipped due to being unsuitable 65450b5b094SVlastimil Babka * for async compaction, so that eventual sync compaction can try. 65550b5b094SVlastimil Babka */ 65650b5b094SVlastimil Babka if (low_pfn == end_pfn && !skipped_async_unsuitable) 657c89511abSMel Gorman update_pageblock_skip(cc, valid_page, nr_isolated, true); 658bb13ffebSMel Gorman 659b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 660b7aba698SMel Gorman 661010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 662397487dbSMel Gorman if (nr_isolated) 663010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 664397487dbSMel Gorman 6652fe86e00SMichal Nazarewicz return low_pfn; 6662fe86e00SMichal Nazarewicz } 6672fe86e00SMichal Nazarewicz 668ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 669ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 670ff9543fdSMichal Nazarewicz /* 671ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 672ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 673ff9543fdSMichal Nazarewicz */ 674ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone, 675ff9543fdSMichal Nazarewicz struct compact_control *cc) 676ff9543fdSMichal Nazarewicz { 677ff9543fdSMichal Nazarewicz struct page *page; 678108bcc96SCody P Schafer unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn; 679ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 680ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 6812fe86e00SMichal Nazarewicz 682ff9543fdSMichal Nazarewicz /* 683ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 684ff9543fdSMichal Nazarewicz * scanned from (or the end of the zone if starting). The low point 685ff9543fdSMichal Nazarewicz * is the end of the pageblock the migration scanner is using. 686ff9543fdSMichal Nazarewicz */ 687ff9543fdSMichal Nazarewicz pfn = cc->free_pfn; 6887ed695e0SVlastimil Babka low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 6892fe86e00SMichal Nazarewicz 690ff9543fdSMichal Nazarewicz /* 691ff9543fdSMichal Nazarewicz * Take care that if the migration scanner is at the end of the zone 692ff9543fdSMichal Nazarewicz * that the free scanner does not accidentally move to the next zone 693ff9543fdSMichal Nazarewicz * in the next isolation cycle. 694ff9543fdSMichal Nazarewicz */ 695ff9543fdSMichal Nazarewicz high_pfn = min(low_pfn, pfn); 696ff9543fdSMichal Nazarewicz 697108bcc96SCody P Schafer z_end_pfn = zone_end_pfn(zone); 698ff9543fdSMichal Nazarewicz 699ff9543fdSMichal Nazarewicz /* 700ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 701ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 702ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 703ff9543fdSMichal Nazarewicz */ 7047ed695e0SVlastimil Babka for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 705ff9543fdSMichal Nazarewicz pfn -= pageblock_nr_pages) { 706ff9543fdSMichal Nazarewicz unsigned long isolated; 707ff9543fdSMichal Nazarewicz 708f6ea3adbSDavid Rientjes /* 709f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 710f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 711f6ea3adbSDavid Rientjes * to schedule. 712f6ea3adbSDavid Rientjes */ 713f6ea3adbSDavid Rientjes cond_resched(); 714f6ea3adbSDavid Rientjes 715ff9543fdSMichal Nazarewicz if (!pfn_valid(pfn)) 716ff9543fdSMichal Nazarewicz continue; 717ff9543fdSMichal Nazarewicz 718ff9543fdSMichal Nazarewicz /* 719ff9543fdSMichal Nazarewicz * Check for overlapping nodes/zones. It's possible on some 720ff9543fdSMichal Nazarewicz * configurations to have a setup like 721ff9543fdSMichal Nazarewicz * node0 node1 node0 722ff9543fdSMichal Nazarewicz * i.e. it's possible that all pages within a zones range of 723ff9543fdSMichal Nazarewicz * pages do not belong to a single zone. 724ff9543fdSMichal Nazarewicz */ 725ff9543fdSMichal Nazarewicz page = pfn_to_page(pfn); 726ff9543fdSMichal Nazarewicz if (page_zone(page) != zone) 727ff9543fdSMichal Nazarewicz continue; 728ff9543fdSMichal Nazarewicz 729ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 73068e3e926SLinus Torvalds if (!suitable_migration_target(page)) 731ff9543fdSMichal Nazarewicz continue; 73268e3e926SLinus Torvalds 733bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 734bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 735bb13ffebSMel Gorman continue; 736bb13ffebSMel Gorman 737f40d1e42SMel Gorman /* Found a block suitable for isolating free pages from */ 738ff9543fdSMichal Nazarewicz isolated = 0; 73960177d31SMel Gorman 74060177d31SMel Gorman /* 74160177d31SMel Gorman * As pfn may not start aligned, pfn+pageblock_nr_page 74260177d31SMel Gorman * may cross a MAX_ORDER_NR_PAGES boundary and miss 74360177d31SMel Gorman * a pfn_valid check. Ensure isolate_freepages_block() 74460177d31SMel Gorman * only scans within a pageblock 74560177d31SMel Gorman */ 74660177d31SMel Gorman end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 747108bcc96SCody P Schafer end_pfn = min(end_pfn, z_end_pfn); 748f40d1e42SMel Gorman isolated = isolate_freepages_block(cc, pfn, end_pfn, 749ff9543fdSMichal Nazarewicz freelist, false); 750ff9543fdSMichal Nazarewicz nr_freepages += isolated; 751ff9543fdSMichal Nazarewicz 752ff9543fdSMichal Nazarewicz /* 753ff9543fdSMichal Nazarewicz * Record the highest PFN we isolated pages from. When next 754ff9543fdSMichal Nazarewicz * looking for free pages, the search will restart here as 755ff9543fdSMichal Nazarewicz * page migration may have returned some pages to the allocator 756ff9543fdSMichal Nazarewicz */ 757c89511abSMel Gorman if (isolated) { 758c89511abSMel Gorman cc->finished_update_free = true; 759ff9543fdSMichal Nazarewicz high_pfn = max(high_pfn, pfn); 760ff9543fdSMichal Nazarewicz } 761c89511abSMel Gorman } 762ff9543fdSMichal Nazarewicz 763ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 764ff9543fdSMichal Nazarewicz map_pages(freelist); 765ff9543fdSMichal Nazarewicz 7667ed695e0SVlastimil Babka /* 7677ed695e0SVlastimil Babka * If we crossed the migrate scanner, we want to keep it that way 7687ed695e0SVlastimil Babka * so that compact_finished() may detect this 7697ed695e0SVlastimil Babka */ 7707ed695e0SVlastimil Babka if (pfn < low_pfn) 7717ed695e0SVlastimil Babka cc->free_pfn = max(pfn, zone->zone_start_pfn); 7727ed695e0SVlastimil Babka else 773ff9543fdSMichal Nazarewicz cc->free_pfn = high_pfn; 774ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 775748446bbSMel Gorman } 776748446bbSMel Gorman 777748446bbSMel Gorman /* 778748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 779748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 780748446bbSMel Gorman */ 781748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 782748446bbSMel Gorman unsigned long data, 783748446bbSMel Gorman int **result) 784748446bbSMel Gorman { 785748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 786748446bbSMel Gorman struct page *freepage; 787748446bbSMel Gorman 788748446bbSMel Gorman /* Isolate free pages if necessary */ 789748446bbSMel Gorman if (list_empty(&cc->freepages)) { 790748446bbSMel Gorman isolate_freepages(cc->zone, cc); 791748446bbSMel Gorman 792748446bbSMel Gorman if (list_empty(&cc->freepages)) 793748446bbSMel Gorman return NULL; 794748446bbSMel Gorman } 795748446bbSMel Gorman 796748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 797748446bbSMel Gorman list_del(&freepage->lru); 798748446bbSMel Gorman cc->nr_freepages--; 799748446bbSMel Gorman 800748446bbSMel Gorman return freepage; 801748446bbSMel Gorman } 802748446bbSMel Gorman 803748446bbSMel Gorman /* 804748446bbSMel Gorman * We cannot control nr_migratepages and nr_freepages fully when migration is 805748446bbSMel Gorman * running as migrate_pages() has no knowledge of compact_control. When 806748446bbSMel Gorman * migration is complete, we count the number of pages on the lists by hand. 807748446bbSMel Gorman */ 808748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc) 809748446bbSMel Gorman { 810748446bbSMel Gorman int nr_migratepages = 0; 811748446bbSMel Gorman int nr_freepages = 0; 812748446bbSMel Gorman struct page *page; 813748446bbSMel Gorman 814748446bbSMel Gorman list_for_each_entry(page, &cc->migratepages, lru) 815748446bbSMel Gorman nr_migratepages++; 816748446bbSMel Gorman list_for_each_entry(page, &cc->freepages, lru) 817748446bbSMel Gorman nr_freepages++; 818748446bbSMel Gorman 819748446bbSMel Gorman cc->nr_migratepages = nr_migratepages; 820748446bbSMel Gorman cc->nr_freepages = nr_freepages; 821748446bbSMel Gorman } 822748446bbSMel Gorman 823ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 824ff9543fdSMichal Nazarewicz typedef enum { 825ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 826ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 827ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 828ff9543fdSMichal Nazarewicz } isolate_migrate_t; 829ff9543fdSMichal Nazarewicz 830ff9543fdSMichal Nazarewicz /* 831ff9543fdSMichal Nazarewicz * Isolate all pages that can be migrated from the block pointed to by 832ff9543fdSMichal Nazarewicz * the migrate scanner within compact_control. 833ff9543fdSMichal Nazarewicz */ 834ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 835ff9543fdSMichal Nazarewicz struct compact_control *cc) 836ff9543fdSMichal Nazarewicz { 837ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 838ff9543fdSMichal Nazarewicz 839ff9543fdSMichal Nazarewicz /* Do not scan outside zone boundaries */ 840ff9543fdSMichal Nazarewicz low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 841ff9543fdSMichal Nazarewicz 842ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 843a9aacbccSMel Gorman end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 844ff9543fdSMichal Nazarewicz 845ff9543fdSMichal Nazarewicz /* Do not cross the free scanner or scan within a memory hole */ 846ff9543fdSMichal Nazarewicz if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 847ff9543fdSMichal Nazarewicz cc->migrate_pfn = end_pfn; 848ff9543fdSMichal Nazarewicz return ISOLATE_NONE; 849ff9543fdSMichal Nazarewicz } 850ff9543fdSMichal Nazarewicz 851ff9543fdSMichal Nazarewicz /* Perform the isolation */ 852e46a2879SMinchan Kim low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false); 853e64c5237SShaohua Li if (!low_pfn || cc->contended) 854ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 855ff9543fdSMichal Nazarewicz 856ff9543fdSMichal Nazarewicz cc->migrate_pfn = low_pfn; 857ff9543fdSMichal Nazarewicz 858ff9543fdSMichal Nazarewicz return ISOLATE_SUCCESS; 859ff9543fdSMichal Nazarewicz } 860ff9543fdSMichal Nazarewicz 861748446bbSMel Gorman static int compact_finished(struct zone *zone, 862748446bbSMel Gorman struct compact_control *cc) 863748446bbSMel Gorman { 8648fb74b9fSMel Gorman unsigned int order; 8655a03b051SAndrea Arcangeli unsigned long watermark; 86656de7263SMel Gorman 867748446bbSMel Gorman if (fatal_signal_pending(current)) 868748446bbSMel Gorman return COMPACT_PARTIAL; 869748446bbSMel Gorman 870753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 871bb13ffebSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) { 87255b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 87355b7c4c9SVlastimil Babka zone->compact_cached_migrate_pfn = zone->zone_start_pfn; 87455b7c4c9SVlastimil Babka zone->compact_cached_free_pfn = zone_end_pfn(zone); 87555b7c4c9SVlastimil Babka 87662997027SMel Gorman /* 87762997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 87862997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 87962997027SMel Gorman * flag itself as the decision to be clear should be directly 88062997027SMel Gorman * based on an allocation request. 88162997027SMel Gorman */ 88262997027SMel Gorman if (!current_is_kswapd()) 88362997027SMel Gorman zone->compact_blockskip_flush = true; 88462997027SMel Gorman 885748446bbSMel Gorman return COMPACT_COMPLETE; 886bb13ffebSMel Gorman } 887748446bbSMel Gorman 88882478fb7SJohannes Weiner /* 88982478fb7SJohannes Weiner * order == -1 is expected when compacting via 89082478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 89182478fb7SJohannes Weiner */ 89256de7263SMel Gorman if (cc->order == -1) 89356de7263SMel Gorman return COMPACT_CONTINUE; 89456de7263SMel Gorman 8953957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 8963957c776SMichal Hocko watermark = low_wmark_pages(zone); 8973957c776SMichal Hocko watermark += (1 << cc->order); 8983957c776SMichal Hocko 8993957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 9003957c776SMichal Hocko return COMPACT_CONTINUE; 9013957c776SMichal Hocko 90256de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 90356de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 9048fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 9058fb74b9fSMel Gorman 90656de7263SMel Gorman /* Job done if page is free of the right migratetype */ 9071fb3f8caSMel Gorman if (!list_empty(&area->free_list[cc->migratetype])) 90856de7263SMel Gorman return COMPACT_PARTIAL; 90956de7263SMel Gorman 91056de7263SMel Gorman /* Job done if allocation would set block type */ 9111fb3f8caSMel Gorman if (cc->order >= pageblock_order && area->nr_free) 91256de7263SMel Gorman return COMPACT_PARTIAL; 91356de7263SMel Gorman } 91456de7263SMel Gorman 915748446bbSMel Gorman return COMPACT_CONTINUE; 916748446bbSMel Gorman } 917748446bbSMel Gorman 9183e7d3449SMel Gorman /* 9193e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 9203e7d3449SMel Gorman * Returns 9213e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 9223e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 9233e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 9243e7d3449SMel Gorman */ 9253e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 9263e7d3449SMel Gorman { 9273e7d3449SMel Gorman int fragindex; 9283e7d3449SMel Gorman unsigned long watermark; 9293e7d3449SMel Gorman 9303e7d3449SMel Gorman /* 9313957c776SMichal Hocko * order == -1 is expected when compacting via 9323957c776SMichal Hocko * /proc/sys/vm/compact_memory 9333957c776SMichal Hocko */ 9343957c776SMichal Hocko if (order == -1) 9353957c776SMichal Hocko return COMPACT_CONTINUE; 9363957c776SMichal Hocko 9373957c776SMichal Hocko /* 9383e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 9393e7d3449SMel Gorman * This is because during migration, copies of pages need to be 9403e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 9413e7d3449SMel Gorman */ 9423e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 9433e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 9443e7d3449SMel Gorman return COMPACT_SKIPPED; 9453e7d3449SMel Gorman 9463e7d3449SMel Gorman /* 9473e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 9483e7d3449SMel Gorman * low memory or external fragmentation 9493e7d3449SMel Gorman * 950a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 951a582a738SShaohua Li * watermarks 9523e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 9533e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 9543e7d3449SMel Gorman * 9553e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 9563e7d3449SMel Gorman */ 9573e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 9583e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 9593e7d3449SMel Gorman return COMPACT_SKIPPED; 9603e7d3449SMel Gorman 961a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 962a582a738SShaohua Li 0, 0)) 9633e7d3449SMel Gorman return COMPACT_PARTIAL; 9643e7d3449SMel Gorman 9653e7d3449SMel Gorman return COMPACT_CONTINUE; 9663e7d3449SMel Gorman } 9673e7d3449SMel Gorman 968748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 969748446bbSMel Gorman { 970748446bbSMel Gorman int ret; 971c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 972108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 973748446bbSMel Gorman 9743e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 9753e7d3449SMel Gorman switch (ret) { 9763e7d3449SMel Gorman case COMPACT_PARTIAL: 9773e7d3449SMel Gorman case COMPACT_SKIPPED: 9783e7d3449SMel Gorman /* Compaction is likely to fail */ 9793e7d3449SMel Gorman return ret; 9803e7d3449SMel Gorman case COMPACT_CONTINUE: 9813e7d3449SMel Gorman /* Fall through to compaction */ 9823e7d3449SMel Gorman ; 9833e7d3449SMel Gorman } 9843e7d3449SMel Gorman 985c89511abSMel Gorman /* 986d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 987d3132e4bSVlastimil Babka * is about to be retried after being deferred. kswapd does not do 988d3132e4bSVlastimil Babka * this reset as it'll reset the cached information when going to sleep. 989d3132e4bSVlastimil Babka */ 990d3132e4bSVlastimil Babka if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 991d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 992d3132e4bSVlastimil Babka 993d3132e4bSVlastimil Babka /* 994c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 995c89511abSMel Gorman * information on where the scanners should start but check that it 996c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 997c89511abSMel Gorman */ 998c89511abSMel Gorman cc->migrate_pfn = zone->compact_cached_migrate_pfn; 999c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1000c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1001c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1002c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1003c89511abSMel Gorman } 1004c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1005c89511abSMel Gorman cc->migrate_pfn = start_pfn; 1006c89511abSMel Gorman zone->compact_cached_migrate_pfn = cc->migrate_pfn; 1007c89511abSMel Gorman } 1008748446bbSMel Gorman 10090eb927c0SMel Gorman trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); 10100eb927c0SMel Gorman 1011748446bbSMel Gorman migrate_prep_local(); 1012748446bbSMel Gorman 1013748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 1014748446bbSMel Gorman unsigned long nr_migrate, nr_remaining; 10159d502c1cSMinchan Kim int err; 1016748446bbSMel Gorman 1017f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1018f9e35b3bSMel Gorman case ISOLATE_ABORT: 1019f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 10205733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1021e64c5237SShaohua Li cc->nr_migratepages = 0; 1022f9e35b3bSMel Gorman goto out; 1023f9e35b3bSMel Gorman case ISOLATE_NONE: 1024748446bbSMel Gorman continue; 1025f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1026f9e35b3bSMel Gorman ; 1027f9e35b3bSMel Gorman } 1028748446bbSMel Gorman 1029748446bbSMel Gorman nr_migrate = cc->nr_migratepages; 10309d502c1cSMinchan Kim err = migrate_pages(&cc->migratepages, compaction_alloc, 10319c620e2bSHugh Dickins (unsigned long)cc, 10327b2a2d4aSMel Gorman cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC, 10337b2a2d4aSMel Gorman MR_COMPACTION); 1034748446bbSMel Gorman update_nr_listpages(cc); 1035748446bbSMel Gorman nr_remaining = cc->nr_migratepages; 1036748446bbSMel Gorman 1037b7aba698SMel Gorman trace_mm_compaction_migratepages(nr_migrate - nr_remaining, 1038b7aba698SMel Gorman nr_remaining); 1039748446bbSMel Gorman 10405733c7d1SRafael Aquini /* Release isolated pages not migrated */ 10419d502c1cSMinchan Kim if (err) { 10425733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1043748446bbSMel Gorman cc->nr_migratepages = 0; 10447ed695e0SVlastimil Babka /* 10457ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 10467ed695e0SVlastimil Babka * and we want compact_finished() to detect it 10477ed695e0SVlastimil Babka */ 10487ed695e0SVlastimil Babka if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { 10494bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 10504bf2bba3SDavid Rientjes goto out; 1051748446bbSMel Gorman } 10524bf2bba3SDavid Rientjes } 1053748446bbSMel Gorman } 1054748446bbSMel Gorman 1055f9e35b3bSMel Gorman out: 1056748446bbSMel Gorman /* Release free pages and check accounting */ 1057748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 1058748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 1059748446bbSMel Gorman 10600eb927c0SMel Gorman trace_mm_compaction_end(ret); 10610eb927c0SMel Gorman 1062748446bbSMel Gorman return ret; 1063748446bbSMel Gorman } 106476ab0f53SMel Gorman 1065d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone, 106677f1fe6bSMel Gorman int order, gfp_t gfp_mask, 10678fb74b9fSMel Gorman bool sync, bool *contended) 106856de7263SMel Gorman { 1069e64c5237SShaohua Li unsigned long ret; 107056de7263SMel Gorman struct compact_control cc = { 107156de7263SMel Gorman .nr_freepages = 0, 107256de7263SMel Gorman .nr_migratepages = 0, 107356de7263SMel Gorman .order = order, 107456de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 107556de7263SMel Gorman .zone = zone, 107668e3e926SLinus Torvalds .sync = sync, 107756de7263SMel Gorman }; 107856de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 107956de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 108056de7263SMel Gorman 1081e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1082e64c5237SShaohua Li 1083e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1084e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1085e64c5237SShaohua Li 1086e64c5237SShaohua Li *contended = cc.contended; 1087e64c5237SShaohua Li return ret; 108856de7263SMel Gorman } 108956de7263SMel Gorman 10905e771905SMel Gorman int sysctl_extfrag_threshold = 500; 10915e771905SMel Gorman 109256de7263SMel Gorman /** 109356de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 109456de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 109556de7263SMel Gorman * @order: The order of the current allocation 109656de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 109756de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 109877f1fe6bSMel Gorman * @sync: Whether migration is synchronous or not 1099661c4cb9SMel Gorman * @contended: Return value that is true if compaction was aborted due to lock contention 1100661c4cb9SMel Gorman * @page: Optionally capture a free page of the requested order during compaction 110156de7263SMel Gorman * 110256de7263SMel Gorman * This is the main entry point for direct page compaction. 110356de7263SMel Gorman */ 110456de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 110577f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 11068fb74b9fSMel Gorman bool sync, bool *contended) 110756de7263SMel Gorman { 110856de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 110956de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 111056de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 111156de7263SMel Gorman struct zoneref *z; 111256de7263SMel Gorman struct zone *zone; 111356de7263SMel Gorman int rc = COMPACT_SKIPPED; 1114d95ea5d1SBartlomiej Zolnierkiewicz int alloc_flags = 0; 111556de7263SMel Gorman 11164ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1117c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 111856de7263SMel Gorman return rc; 111956de7263SMel Gorman 1120010fc29aSMinchan Kim count_compact_event(COMPACTSTALL); 112156de7263SMel Gorman 1122d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA 1123d95ea5d1SBartlomiej Zolnierkiewicz if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 1124d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags |= ALLOC_CMA; 1125d95ea5d1SBartlomiej Zolnierkiewicz #endif 112656de7263SMel Gorman /* Compact each zone in the list */ 112756de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 112856de7263SMel Gorman nodemask) { 112956de7263SMel Gorman int status; 113056de7263SMel Gorman 1131c67fe375SMel Gorman status = compact_zone_order(zone, order, gfp_mask, sync, 11328fb74b9fSMel Gorman contended); 113356de7263SMel Gorman rc = max(status, rc); 113456de7263SMel Gorman 11353e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1136d95ea5d1SBartlomiej Zolnierkiewicz if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 1137d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags)) 113856de7263SMel Gorman break; 113956de7263SMel Gorman } 114056de7263SMel Gorman 114156de7263SMel Gorman return rc; 114256de7263SMel Gorman } 114356de7263SMel Gorman 114456de7263SMel Gorman 114576ab0f53SMel Gorman /* Compact all zones within a node */ 11467103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 114776ab0f53SMel Gorman { 114876ab0f53SMel Gorman int zoneid; 114976ab0f53SMel Gorman struct zone *zone; 115076ab0f53SMel Gorman 115176ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 115276ab0f53SMel Gorman 115376ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 115476ab0f53SMel Gorman if (!populated_zone(zone)) 115576ab0f53SMel Gorman continue; 115676ab0f53SMel Gorman 11577be62de9SRik van Riel cc->nr_freepages = 0; 11587be62de9SRik van Riel cc->nr_migratepages = 0; 11597be62de9SRik van Riel cc->zone = zone; 11607be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 11617be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 116276ab0f53SMel Gorman 1163aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 11647be62de9SRik van Riel compact_zone(zone, cc); 116576ab0f53SMel Gorman 1166aff62249SRik van Riel if (cc->order > 0) { 1167de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1168de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1169de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1170aff62249SRik van Riel /* Currently async compaction is never deferred. */ 1171de6c60a6SVlastimil Babka else if (cc->sync) 1172aff62249SRik van Riel defer_compaction(zone, cc->order); 1173aff62249SRik van Riel } 1174aff62249SRik van Riel 11757be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 11767be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 117776ab0f53SMel Gorman } 117876ab0f53SMel Gorman } 117976ab0f53SMel Gorman 11807103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 11817be62de9SRik van Riel { 11827be62de9SRik van Riel struct compact_control cc = { 11837be62de9SRik van Riel .order = order, 118468e3e926SLinus Torvalds .sync = false, 11857be62de9SRik van Riel }; 11867be62de9SRik van Riel 11873a7200afSMel Gorman if (!order) 11883a7200afSMel Gorman return; 11893a7200afSMel Gorman 11907103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 11917be62de9SRik van Riel } 11927be62de9SRik van Riel 11937103f16dSAndrew Morton static void compact_node(int nid) 11947be62de9SRik van Riel { 11957be62de9SRik van Riel struct compact_control cc = { 11967be62de9SRik van Riel .order = -1, 119768e3e926SLinus Torvalds .sync = true, 119891ca9186SDavid Rientjes .ignore_skip_hint = true, 11997be62de9SRik van Riel }; 12007be62de9SRik van Riel 12017103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 12027be62de9SRik van Riel } 12037be62de9SRik van Riel 120476ab0f53SMel Gorman /* Compact all nodes in the system */ 12057964c06dSJason Liu static void compact_nodes(void) 120676ab0f53SMel Gorman { 120776ab0f53SMel Gorman int nid; 120876ab0f53SMel Gorman 12098575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 12108575ec29SHugh Dickins lru_add_drain_all(); 12118575ec29SHugh Dickins 121276ab0f53SMel Gorman for_each_online_node(nid) 121376ab0f53SMel Gorman compact_node(nid); 121476ab0f53SMel Gorman } 121576ab0f53SMel Gorman 121676ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 121776ab0f53SMel Gorman int sysctl_compact_memory; 121876ab0f53SMel Gorman 121976ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 122076ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 122176ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 122276ab0f53SMel Gorman { 122376ab0f53SMel Gorman if (write) 12247964c06dSJason Liu compact_nodes(); 122576ab0f53SMel Gorman 122676ab0f53SMel Gorman return 0; 122776ab0f53SMel Gorman } 1228ed4a6d7fSMel Gorman 12295e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 12305e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 12315e771905SMel Gorman { 12325e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 12335e771905SMel Gorman 12345e771905SMel Gorman return 0; 12355e771905SMel Gorman } 12365e771905SMel Gorman 1237ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 123874e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 123910fbcf4cSKay Sievers struct device_attribute *attr, 1240ed4a6d7fSMel Gorman const char *buf, size_t count) 1241ed4a6d7fSMel Gorman { 12428575ec29SHugh Dickins int nid = dev->id; 12438575ec29SHugh Dickins 12448575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 12458575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 12468575ec29SHugh Dickins lru_add_drain_all(); 12478575ec29SHugh Dickins 12488575ec29SHugh Dickins compact_node(nid); 12498575ec29SHugh Dickins } 1250ed4a6d7fSMel Gorman 1251ed4a6d7fSMel Gorman return count; 1252ed4a6d7fSMel Gorman } 125310fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1254ed4a6d7fSMel Gorman 1255ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1256ed4a6d7fSMel Gorman { 125710fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1258ed4a6d7fSMel Gorman } 1259ed4a6d7fSMel Gorman 1260ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1261ed4a6d7fSMel Gorman { 126210fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1263ed4a6d7fSMel Gorman } 1264ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1265ff9543fdSMichal Nazarewicz 1266ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1267