1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 18194159fbSMinchan Kim #include <linux/page-isolation.h> 19748446bbSMel Gorman #include "internal.h" 20748446bbSMel Gorman 21010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 22010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 23010fc29aSMinchan Kim { 24010fc29aSMinchan Kim count_vm_event(item); 25010fc29aSMinchan Kim } 26010fc29aSMinchan Kim 27010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 28010fc29aSMinchan Kim { 29010fc29aSMinchan Kim count_vm_events(item, delta); 30010fc29aSMinchan Kim } 31010fc29aSMinchan Kim #else 32010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 33010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 34010fc29aSMinchan Kim #endif 35010fc29aSMinchan Kim 36ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 37ff9543fdSMichal Nazarewicz 38b7aba698SMel Gorman #define CREATE_TRACE_POINTS 39b7aba698SMel Gorman #include <trace/events/compaction.h> 40b7aba698SMel Gorman 41748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 42748446bbSMel Gorman { 43748446bbSMel Gorman struct page *page, *next; 44748446bbSMel Gorman unsigned long count = 0; 45748446bbSMel Gorman 46748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 47748446bbSMel Gorman list_del(&page->lru); 48748446bbSMel Gorman __free_page(page); 49748446bbSMel Gorman count++; 50748446bbSMel Gorman } 51748446bbSMel Gorman 52748446bbSMel Gorman return count; 53748446bbSMel Gorman } 54748446bbSMel Gorman 55ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 56ff9543fdSMichal Nazarewicz { 57ff9543fdSMichal Nazarewicz struct page *page; 58ff9543fdSMichal Nazarewicz 59ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 60ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 61ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 62ff9543fdSMichal Nazarewicz } 63ff9543fdSMichal Nazarewicz } 64ff9543fdSMichal Nazarewicz 6547118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 6647118af0SMichal Nazarewicz { 6747118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 6847118af0SMichal Nazarewicz } 6947118af0SMichal Nazarewicz 70bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 71bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 72bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 73bb13ffebSMel Gorman struct page *page) 74bb13ffebSMel Gorman { 75bb13ffebSMel Gorman if (cc->ignore_skip_hint) 76bb13ffebSMel Gorman return true; 77bb13ffebSMel Gorman 78bb13ffebSMel Gorman return !get_pageblock_skip(page); 79bb13ffebSMel Gorman } 80bb13ffebSMel Gorman 81bb13ffebSMel Gorman /* 82bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 83bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 84bb13ffebSMel Gorman * meet. 85bb13ffebSMel Gorman */ 8662997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 87bb13ffebSMel Gorman { 88bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 89108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 90bb13ffebSMel Gorman unsigned long pfn; 91bb13ffebSMel Gorman 92c89511abSMel Gorman zone->compact_cached_migrate_pfn = start_pfn; 93c89511abSMel Gorman zone->compact_cached_free_pfn = end_pfn; 9462997027SMel Gorman zone->compact_blockskip_flush = false; 95bb13ffebSMel Gorman 96bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 97bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 98bb13ffebSMel Gorman struct page *page; 99bb13ffebSMel Gorman 100bb13ffebSMel Gorman cond_resched(); 101bb13ffebSMel Gorman 102bb13ffebSMel Gorman if (!pfn_valid(pfn)) 103bb13ffebSMel Gorman continue; 104bb13ffebSMel Gorman 105bb13ffebSMel Gorman page = pfn_to_page(pfn); 106bb13ffebSMel Gorman if (zone != page_zone(page)) 107bb13ffebSMel Gorman continue; 108bb13ffebSMel Gorman 109bb13ffebSMel Gorman clear_pageblock_skip(page); 110bb13ffebSMel Gorman } 111bb13ffebSMel Gorman } 112bb13ffebSMel Gorman 11362997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 11462997027SMel Gorman { 11562997027SMel Gorman int zoneid; 11662997027SMel Gorman 11762997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 11862997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 11962997027SMel Gorman if (!populated_zone(zone)) 12062997027SMel Gorman continue; 12162997027SMel Gorman 12262997027SMel Gorman /* Only flush if a full compaction finished recently */ 12362997027SMel Gorman if (zone->compact_blockskip_flush) 12462997027SMel Gorman __reset_isolation_suitable(zone); 12562997027SMel Gorman } 12662997027SMel Gorman } 12762997027SMel Gorman 128bb13ffebSMel Gorman /* 129bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 13062997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 131bb13ffebSMel Gorman */ 132c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 133c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 134c89511abSMel Gorman bool migrate_scanner) 135bb13ffebSMel Gorman { 136c89511abSMel Gorman struct zone *zone = cc->zone; 1376815bf3fSJoonsoo Kim 1386815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 1396815bf3fSJoonsoo Kim return; 1406815bf3fSJoonsoo Kim 141bb13ffebSMel Gorman if (!page) 142bb13ffebSMel Gorman return; 143bb13ffebSMel Gorman 144c89511abSMel Gorman if (!nr_isolated) { 145c89511abSMel Gorman unsigned long pfn = page_to_pfn(page); 146bb13ffebSMel Gorman set_pageblock_skip(page); 147c89511abSMel Gorman 148c89511abSMel Gorman /* Update where compaction should restart */ 149c89511abSMel Gorman if (migrate_scanner) { 150c89511abSMel Gorman if (!cc->finished_update_migrate && 151c89511abSMel Gorman pfn > zone->compact_cached_migrate_pfn) 152c89511abSMel Gorman zone->compact_cached_migrate_pfn = pfn; 153c89511abSMel Gorman } else { 154c89511abSMel Gorman if (!cc->finished_update_free && 155c89511abSMel Gorman pfn < zone->compact_cached_free_pfn) 156c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 157c89511abSMel Gorman } 158c89511abSMel Gorman } 159bb13ffebSMel Gorman } 160bb13ffebSMel Gorman #else 161bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 162bb13ffebSMel Gorman struct page *page) 163bb13ffebSMel Gorman { 164bb13ffebSMel Gorman return true; 165bb13ffebSMel Gorman } 166bb13ffebSMel Gorman 167c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 168c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 169c89511abSMel Gorman bool migrate_scanner) 170bb13ffebSMel Gorman { 171bb13ffebSMel Gorman } 172bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 173bb13ffebSMel Gorman 1742a1402aaSMel Gorman static inline bool should_release_lock(spinlock_t *lock) 1752a1402aaSMel Gorman { 1762a1402aaSMel Gorman return need_resched() || spin_is_contended(lock); 1772a1402aaSMel Gorman } 1782a1402aaSMel Gorman 17985aa125fSMichal Nazarewicz /* 180c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 181c67fe375SMel Gorman * very heavily contended. Check if the process needs to be scheduled or 182c67fe375SMel Gorman * if the lock is contended. For async compaction, back out in the event 183c67fe375SMel Gorman * if contention is severe. For sync compaction, schedule. 184c67fe375SMel Gorman * 185c67fe375SMel Gorman * Returns true if the lock is held. 186c67fe375SMel Gorman * Returns false if the lock is released and compaction should abort 187c67fe375SMel Gorman */ 188c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, 189c67fe375SMel Gorman bool locked, struct compact_control *cc) 190c67fe375SMel Gorman { 1912a1402aaSMel Gorman if (should_release_lock(lock)) { 192c67fe375SMel Gorman if (locked) { 193c67fe375SMel Gorman spin_unlock_irqrestore(lock, *flags); 194c67fe375SMel Gorman locked = false; 195c67fe375SMel Gorman } 196c67fe375SMel Gorman 197c67fe375SMel Gorman /* async aborts if taking too long or contended */ 198c67fe375SMel Gorman if (!cc->sync) { 199e64c5237SShaohua Li cc->contended = true; 200c67fe375SMel Gorman return false; 201c67fe375SMel Gorman } 202c67fe375SMel Gorman 203c67fe375SMel Gorman cond_resched(); 204c67fe375SMel Gorman } 205c67fe375SMel Gorman 206c67fe375SMel Gorman if (!locked) 207c67fe375SMel Gorman spin_lock_irqsave(lock, *flags); 208c67fe375SMel Gorman return true; 209c67fe375SMel Gorman } 210c67fe375SMel Gorman 211c67fe375SMel Gorman static inline bool compact_trylock_irqsave(spinlock_t *lock, 212c67fe375SMel Gorman unsigned long *flags, struct compact_control *cc) 213c67fe375SMel Gorman { 214c67fe375SMel Gorman return compact_checklock_irqsave(lock, flags, false, cc); 215c67fe375SMel Gorman } 216c67fe375SMel Gorman 217f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */ 218f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page) 219f40d1e42SMel Gorman { 220f40d1e42SMel Gorman int migratetype = get_pageblock_migratetype(page); 221f40d1e42SMel Gorman 222f40d1e42SMel Gorman /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 223194159fbSMinchan Kim if (migratetype == MIGRATE_RESERVE) 224194159fbSMinchan Kim return false; 225194159fbSMinchan Kim 226194159fbSMinchan Kim if (is_migrate_isolate(migratetype)) 227f40d1e42SMel Gorman return false; 228f40d1e42SMel Gorman 229f40d1e42SMel Gorman /* If the page is a large free page, then allow migration */ 230f40d1e42SMel Gorman if (PageBuddy(page) && page_order(page) >= pageblock_order) 231f40d1e42SMel Gorman return true; 232f40d1e42SMel Gorman 233f40d1e42SMel Gorman /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 234f40d1e42SMel Gorman if (migrate_async_suitable(migratetype)) 235f40d1e42SMel Gorman return true; 236f40d1e42SMel Gorman 237f40d1e42SMel Gorman /* Otherwise skip the block */ 238f40d1e42SMel Gorman return false; 239f40d1e42SMel Gorman } 240f40d1e42SMel Gorman 241c67fe375SMel Gorman /* 2429e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 2439e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 2449e4be470SJerome Marchand * (even though it may still end up isolating some pages). 24585aa125fSMichal Nazarewicz */ 246f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 247f40d1e42SMel Gorman unsigned long blockpfn, 24885aa125fSMichal Nazarewicz unsigned long end_pfn, 24985aa125fSMichal Nazarewicz struct list_head *freelist, 25085aa125fSMichal Nazarewicz bool strict) 251748446bbSMel Gorman { 252b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 253bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 254f40d1e42SMel Gorman unsigned long nr_strict_required = end_pfn - blockpfn; 255f40d1e42SMel Gorman unsigned long flags; 256f40d1e42SMel Gorman bool locked = false; 257748446bbSMel Gorman 258748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 259748446bbSMel Gorman 260f40d1e42SMel Gorman /* Isolate free pages. */ 261748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 262748446bbSMel Gorman int isolated, i; 263748446bbSMel Gorman struct page *page = cursor; 264748446bbSMel Gorman 265b7aba698SMel Gorman nr_scanned++; 266f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 267748446bbSMel Gorman continue; 268bb13ffebSMel Gorman if (!valid_page) 269bb13ffebSMel Gorman valid_page = page; 270f40d1e42SMel Gorman if (!PageBuddy(page)) 271f40d1e42SMel Gorman continue; 272f40d1e42SMel Gorman 273f40d1e42SMel Gorman /* 274f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 275f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 276f40d1e42SMel Gorman * heavily contended if there are parallel allocations 277f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 278f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 279f40d1e42SMel Gorman * possible. 280f40d1e42SMel Gorman */ 281f40d1e42SMel Gorman locked = compact_checklock_irqsave(&cc->zone->lock, &flags, 282f40d1e42SMel Gorman locked, cc); 283f40d1e42SMel Gorman if (!locked) 284f40d1e42SMel Gorman break; 285f40d1e42SMel Gorman 286f40d1e42SMel Gorman /* Recheck this is a suitable migration target under lock */ 287f40d1e42SMel Gorman if (!strict && !suitable_migration_target(page)) 288f40d1e42SMel Gorman break; 289f40d1e42SMel Gorman 290f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 291f40d1e42SMel Gorman if (!PageBuddy(page)) 292f40d1e42SMel Gorman continue; 293748446bbSMel Gorman 294748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 295748446bbSMel Gorman isolated = split_free_page(page); 29685aa125fSMichal Nazarewicz if (!isolated && strict) 297f40d1e42SMel Gorman break; 298748446bbSMel Gorman total_isolated += isolated; 299748446bbSMel Gorman for (i = 0; i < isolated; i++) { 300748446bbSMel Gorman list_add(&page->lru, freelist); 301748446bbSMel Gorman page++; 302748446bbSMel Gorman } 303748446bbSMel Gorman 304748446bbSMel Gorman /* If a page was split, advance to the end of it */ 305748446bbSMel Gorman if (isolated) { 306748446bbSMel Gorman blockpfn += isolated - 1; 307748446bbSMel Gorman cursor += isolated - 1; 308748446bbSMel Gorman } 309748446bbSMel Gorman } 310748446bbSMel Gorman 311b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 312f40d1e42SMel Gorman 313f40d1e42SMel Gorman /* 314f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 315f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 316f40d1e42SMel Gorman * returned and CMA will fail. 317f40d1e42SMel Gorman */ 3180db63d7eSMel Gorman if (strict && nr_strict_required > total_isolated) 319f40d1e42SMel Gorman total_isolated = 0; 320f40d1e42SMel Gorman 321f40d1e42SMel Gorman if (locked) 322f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 323f40d1e42SMel Gorman 324bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 325bb13ffebSMel Gorman if (blockpfn == end_pfn) 326c89511abSMel Gorman update_pageblock_skip(cc, valid_page, total_isolated, false); 327bb13ffebSMel Gorman 328010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 329397487dbSMel Gorman if (total_isolated) 330010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 331748446bbSMel Gorman return total_isolated; 332748446bbSMel Gorman } 333748446bbSMel Gorman 33485aa125fSMichal Nazarewicz /** 33585aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 33685aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 33785aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 33885aa125fSMichal Nazarewicz * 33985aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 34085aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 34185aa125fSMichal Nazarewicz * undo its actions and return zero. 34285aa125fSMichal Nazarewicz * 34385aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 34485aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 34585aa125fSMichal Nazarewicz * a free page). 34685aa125fSMichal Nazarewicz */ 347ff9543fdSMichal Nazarewicz unsigned long 348bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 349bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 35085aa125fSMichal Nazarewicz { 351f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 35285aa125fSMichal Nazarewicz LIST_HEAD(freelist); 35385aa125fSMichal Nazarewicz 35485aa125fSMichal Nazarewicz for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 355bb13ffebSMel Gorman if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn))) 35685aa125fSMichal Nazarewicz break; 35785aa125fSMichal Nazarewicz 35885aa125fSMichal Nazarewicz /* 35985aa125fSMichal Nazarewicz * On subsequent iterations ALIGN() is actually not needed, 36085aa125fSMichal Nazarewicz * but we keep it that we not to complicate the code. 36185aa125fSMichal Nazarewicz */ 36285aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 36385aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 36485aa125fSMichal Nazarewicz 365bb13ffebSMel Gorman isolated = isolate_freepages_block(cc, pfn, block_end_pfn, 36685aa125fSMichal Nazarewicz &freelist, true); 36785aa125fSMichal Nazarewicz 36885aa125fSMichal Nazarewicz /* 36985aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 37085aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 37185aa125fSMichal Nazarewicz * non-free pages). 37285aa125fSMichal Nazarewicz */ 37385aa125fSMichal Nazarewicz if (!isolated) 37485aa125fSMichal Nazarewicz break; 37585aa125fSMichal Nazarewicz 37685aa125fSMichal Nazarewicz /* 37785aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 37885aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 37985aa125fSMichal Nazarewicz * page may span two pageblocks). 38085aa125fSMichal Nazarewicz */ 38185aa125fSMichal Nazarewicz } 38285aa125fSMichal Nazarewicz 38385aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 38485aa125fSMichal Nazarewicz map_pages(&freelist); 38585aa125fSMichal Nazarewicz 38685aa125fSMichal Nazarewicz if (pfn < end_pfn) { 38785aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 38885aa125fSMichal Nazarewicz release_freepages(&freelist); 38985aa125fSMichal Nazarewicz return 0; 39085aa125fSMichal Nazarewicz } 39185aa125fSMichal Nazarewicz 39285aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 39385aa125fSMichal Nazarewicz return pfn; 39485aa125fSMichal Nazarewicz } 39585aa125fSMichal Nazarewicz 396748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 397c67fe375SMel Gorman static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc) 398748446bbSMel Gorman { 399748446bbSMel Gorman struct page *page; 400b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 401748446bbSMel Gorman 402b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 403b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 404748446bbSMel Gorman 405c67fe375SMel Gorman /* If locked we can use the interrupt unsafe versions */ 406c67fe375SMel Gorman if (locked) { 407b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 408b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 409c67fe375SMel Gorman } else { 410c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 411c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 412c67fe375SMel Gorman } 413748446bbSMel Gorman } 414748446bbSMel Gorman 415748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 416748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 417748446bbSMel Gorman { 418bc693045SMinchan Kim unsigned long active, inactive, isolated; 419748446bbSMel Gorman 420748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 421748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 422bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 423bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 424748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 425748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 426748446bbSMel Gorman 427bc693045SMinchan Kim return isolated > (inactive + active) / 2; 428748446bbSMel Gorman } 429748446bbSMel Gorman 4302fe86e00SMichal Nazarewicz /** 4312fe86e00SMichal Nazarewicz * isolate_migratepages_range() - isolate all migrate-able pages in range. 4322fe86e00SMichal Nazarewicz * @zone: Zone pages are in. 4332fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 4342fe86e00SMichal Nazarewicz * @low_pfn: The first PFN of the range. 4352fe86e00SMichal Nazarewicz * @end_pfn: The one-past-the-last PFN of the range. 436e46a2879SMinchan Kim * @unevictable: true if it allows to isolate unevictable pages 4372fe86e00SMichal Nazarewicz * 4382fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 4392fe86e00SMichal Nazarewicz * [low_pfn, end_pfn). Returns zero if there is a fatal signal 4402fe86e00SMichal Nazarewicz * pending), otherwise PFN of the first page that was not scanned 4412fe86e00SMichal Nazarewicz * (which may be both less, equal to or more then end_pfn). 4422fe86e00SMichal Nazarewicz * 4432fe86e00SMichal Nazarewicz * Assumes that cc->migratepages is empty and cc->nr_migratepages is 4442fe86e00SMichal Nazarewicz * zero. 4452fe86e00SMichal Nazarewicz * 4462fe86e00SMichal Nazarewicz * Apart from cc->migratepages and cc->nr_migratetypes this function 4472fe86e00SMichal Nazarewicz * does not modify any cc's fields, in particular it does not modify 4482fe86e00SMichal Nazarewicz * (or read for that matter) cc->migrate_pfn. 449748446bbSMel Gorman */ 450ff9543fdSMichal Nazarewicz unsigned long 4512fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 452e46a2879SMinchan Kim unsigned long low_pfn, unsigned long end_pfn, bool unevictable) 453748446bbSMel Gorman { 4549927af74SMel Gorman unsigned long last_pageblock_nr = 0, pageblock_nr; 455b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 456748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 457f3fd4a61SKonstantin Khlebnikov isolate_mode_t mode = 0; 458fa9add64SHugh Dickins struct lruvec *lruvec; 459c67fe375SMel Gorman unsigned long flags; 4602a1402aaSMel Gorman bool locked = false; 461bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 46250b5b094SVlastimil Babka bool skipped_async_unsuitable = false; 463748446bbSMel Gorman 464748446bbSMel Gorman /* 465748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 466748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 467748446bbSMel Gorman * delay for some time until fewer pages are isolated 468748446bbSMel Gorman */ 469748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 470f9e35b3bSMel Gorman /* async migration should just abort */ 47168e3e926SLinus Torvalds if (!cc->sync) 4722fe86e00SMichal Nazarewicz return 0; 473f9e35b3bSMel Gorman 474748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 475748446bbSMel Gorman 476748446bbSMel Gorman if (fatal_signal_pending(current)) 4772fe86e00SMichal Nazarewicz return 0; 478748446bbSMel Gorman } 479748446bbSMel Gorman 480748446bbSMel Gorman /* Time to isolate some pages for migration */ 481b2eef8c0SAndrea Arcangeli cond_resched(); 482748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 483b2eef8c0SAndrea Arcangeli /* give a chance to irqs before checking need_resched() */ 4842a1402aaSMel Gorman if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) { 4852a1402aaSMel Gorman if (should_release_lock(&zone->lru_lock)) { 486c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 487b2eef8c0SAndrea Arcangeli locked = false; 488b2eef8c0SAndrea Arcangeli } 4892a1402aaSMel Gorman } 490b2eef8c0SAndrea Arcangeli 4910bf380bcSMel Gorman /* 4920bf380bcSMel Gorman * migrate_pfn does not necessarily start aligned to a 4930bf380bcSMel Gorman * pageblock. Ensure that pfn_valid is called when moving 4940bf380bcSMel Gorman * into a new MAX_ORDER_NR_PAGES range in case of large 4950bf380bcSMel Gorman * memory holes within the zone 4960bf380bcSMel Gorman */ 4970bf380bcSMel Gorman if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 4980bf380bcSMel Gorman if (!pfn_valid(low_pfn)) { 4990bf380bcSMel Gorman low_pfn += MAX_ORDER_NR_PAGES - 1; 5000bf380bcSMel Gorman continue; 5010bf380bcSMel Gorman } 5020bf380bcSMel Gorman } 5030bf380bcSMel Gorman 504748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 505748446bbSMel Gorman continue; 506b7aba698SMel Gorman nr_scanned++; 507748446bbSMel Gorman 508dc908600SMel Gorman /* 509dc908600SMel Gorman * Get the page and ensure the page is within the same zone. 510dc908600SMel Gorman * See the comment in isolate_freepages about overlapping 511dc908600SMel Gorman * nodes. It is deliberate that the new zone lock is not taken 512dc908600SMel Gorman * as memory compaction should not move pages between nodes. 513dc908600SMel Gorman */ 514748446bbSMel Gorman page = pfn_to_page(low_pfn); 515dc908600SMel Gorman if (page_zone(page) != zone) 516dc908600SMel Gorman continue; 517dc908600SMel Gorman 518bb13ffebSMel Gorman if (!valid_page) 519bb13ffebSMel Gorman valid_page = page; 520bb13ffebSMel Gorman 521bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 522bb13ffebSMel Gorman pageblock_nr = low_pfn >> pageblock_order; 523bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 524bb13ffebSMel Gorman goto next_pageblock; 525bb13ffebSMel Gorman 526dc908600SMel Gorman /* Skip if free */ 527748446bbSMel Gorman if (PageBuddy(page)) 528748446bbSMel Gorman continue; 529748446bbSMel Gorman 5309927af74SMel Gorman /* 5319927af74SMel Gorman * For async migration, also only scan in MOVABLE blocks. Async 5329927af74SMel Gorman * migration is optimistic to see if the minimum amount of work 5339927af74SMel Gorman * satisfies the allocation 5349927af74SMel Gorman */ 53568e3e926SLinus Torvalds if (!cc->sync && last_pageblock_nr != pageblock_nr && 53647118af0SMichal Nazarewicz !migrate_async_suitable(get_pageblock_migratetype(page))) { 537c89511abSMel Gorman cc->finished_update_migrate = true; 53850b5b094SVlastimil Babka skipped_async_unsuitable = true; 5392a1402aaSMel Gorman goto next_pageblock; 5409927af74SMel Gorman } 5419927af74SMel Gorman 542bf6bddf1SRafael Aquini /* 543bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 544bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 545bf6bddf1SRafael Aquini * Skip any other type of page 546bf6bddf1SRafael Aquini */ 547bf6bddf1SRafael Aquini if (!PageLRU(page)) { 548bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 549bf6bddf1SRafael Aquini if (locked && balloon_page_isolate(page)) { 550bf6bddf1SRafael Aquini /* Successfully isolated */ 551bf6bddf1SRafael Aquini cc->finished_update_migrate = true; 552bf6bddf1SRafael Aquini list_add(&page->lru, migratelist); 553bf6bddf1SRafael Aquini cc->nr_migratepages++; 554bf6bddf1SRafael Aquini nr_isolated++; 555bf6bddf1SRafael Aquini goto check_compact_cluster; 556bf6bddf1SRafael Aquini } 557bf6bddf1SRafael Aquini } 558bc835011SAndrea Arcangeli continue; 559bf6bddf1SRafael Aquini } 560bc835011SAndrea Arcangeli 561bc835011SAndrea Arcangeli /* 5622a1402aaSMel Gorman * PageLRU is set. lru_lock normally excludes isolation 5632a1402aaSMel Gorman * splitting and collapsing (collapsing has already happened 5642a1402aaSMel Gorman * if PageLRU is set) but the lock is not necessarily taken 5652a1402aaSMel Gorman * here and it is wasteful to take it just to check transhuge. 5662a1402aaSMel Gorman * Check TransHuge without lock and skip the whole pageblock if 5672a1402aaSMel Gorman * it's either a transhuge or hugetlbfs page, as calling 5682a1402aaSMel Gorman * compound_order() without preventing THP from splitting the 5692a1402aaSMel Gorman * page underneath us may return surprising results. 570bc835011SAndrea Arcangeli */ 571bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 5722a1402aaSMel Gorman if (!locked) 5732a1402aaSMel Gorman goto next_pageblock; 5742a1402aaSMel Gorman low_pfn += (1 << compound_order(page)) - 1; 5752a1402aaSMel Gorman continue; 5762a1402aaSMel Gorman } 5772a1402aaSMel Gorman 5782a1402aaSMel Gorman /* Check if it is ok to still hold the lock */ 5792a1402aaSMel Gorman locked = compact_checklock_irqsave(&zone->lru_lock, &flags, 5802a1402aaSMel Gorman locked, cc); 5812a1402aaSMel Gorman if (!locked || fatal_signal_pending(current)) 5822a1402aaSMel Gorman break; 5832a1402aaSMel Gorman 5842a1402aaSMel Gorman /* Recheck PageLRU and PageTransHuge under lock */ 5852a1402aaSMel Gorman if (!PageLRU(page)) 5862a1402aaSMel Gorman continue; 5872a1402aaSMel Gorman if (PageTransHuge(page)) { 588bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 589bc835011SAndrea Arcangeli continue; 590bc835011SAndrea Arcangeli } 591bc835011SAndrea Arcangeli 59268e3e926SLinus Torvalds if (!cc->sync) 593c8244935SMel Gorman mode |= ISOLATE_ASYNC_MIGRATE; 594c8244935SMel Gorman 595e46a2879SMinchan Kim if (unevictable) 596e46a2879SMinchan Kim mode |= ISOLATE_UNEVICTABLE; 597e46a2879SMinchan Kim 598fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 599fa9add64SHugh Dickins 600748446bbSMel Gorman /* Try isolate the page */ 601f3fd4a61SKonstantin Khlebnikov if (__isolate_lru_page(page, mode) != 0) 602748446bbSMel Gorman continue; 603748446bbSMel Gorman 604*309381feSSasha Levin VM_BUG_ON_PAGE(PageTransCompound(page), page); 605bc835011SAndrea Arcangeli 606748446bbSMel Gorman /* Successfully isolated */ 607c89511abSMel Gorman cc->finished_update_migrate = true; 608fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 609748446bbSMel Gorman list_add(&page->lru, migratelist); 610748446bbSMel Gorman cc->nr_migratepages++; 611b7aba698SMel Gorman nr_isolated++; 612748446bbSMel Gorman 613bf6bddf1SRafael Aquini check_compact_cluster: 614748446bbSMel Gorman /* Avoid isolating too much */ 61531b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 61631b8384aSHillf Danton ++low_pfn; 617748446bbSMel Gorman break; 618748446bbSMel Gorman } 6192a1402aaSMel Gorman 6202a1402aaSMel Gorman continue; 6212a1402aaSMel Gorman 6222a1402aaSMel Gorman next_pageblock: 623a9aacbccSMel Gorman low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1; 6242a1402aaSMel Gorman last_pageblock_nr = pageblock_nr; 62531b8384aSHillf Danton } 626748446bbSMel Gorman 627c67fe375SMel Gorman acct_isolated(zone, locked, cc); 628748446bbSMel Gorman 629c67fe375SMel Gorman if (locked) 630c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 631748446bbSMel Gorman 63250b5b094SVlastimil Babka /* 63350b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 63450b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 63550b5b094SVlastimil Babka * This is not done when pageblock was skipped due to being unsuitable 63650b5b094SVlastimil Babka * for async compaction, so that eventual sync compaction can try. 63750b5b094SVlastimil Babka */ 63850b5b094SVlastimil Babka if (low_pfn == end_pfn && !skipped_async_unsuitable) 639c89511abSMel Gorman update_pageblock_skip(cc, valid_page, nr_isolated, true); 640bb13ffebSMel Gorman 641b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 642b7aba698SMel Gorman 643010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 644397487dbSMel Gorman if (nr_isolated) 645010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 646397487dbSMel Gorman 6472fe86e00SMichal Nazarewicz return low_pfn; 6482fe86e00SMichal Nazarewicz } 6492fe86e00SMichal Nazarewicz 650ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 651ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 652ff9543fdSMichal Nazarewicz /* 653ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 654ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 655ff9543fdSMichal Nazarewicz */ 656ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone, 657ff9543fdSMichal Nazarewicz struct compact_control *cc) 658ff9543fdSMichal Nazarewicz { 659ff9543fdSMichal Nazarewicz struct page *page; 660108bcc96SCody P Schafer unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn; 661ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 662ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 6632fe86e00SMichal Nazarewicz 664ff9543fdSMichal Nazarewicz /* 665ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 666ff9543fdSMichal Nazarewicz * scanned from (or the end of the zone if starting). The low point 667ff9543fdSMichal Nazarewicz * is the end of the pageblock the migration scanner is using. 668ff9543fdSMichal Nazarewicz */ 669ff9543fdSMichal Nazarewicz pfn = cc->free_pfn; 6707ed695e0SVlastimil Babka low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 6712fe86e00SMichal Nazarewicz 672ff9543fdSMichal Nazarewicz /* 673ff9543fdSMichal Nazarewicz * Take care that if the migration scanner is at the end of the zone 674ff9543fdSMichal Nazarewicz * that the free scanner does not accidentally move to the next zone 675ff9543fdSMichal Nazarewicz * in the next isolation cycle. 676ff9543fdSMichal Nazarewicz */ 677ff9543fdSMichal Nazarewicz high_pfn = min(low_pfn, pfn); 678ff9543fdSMichal Nazarewicz 679108bcc96SCody P Schafer z_end_pfn = zone_end_pfn(zone); 680ff9543fdSMichal Nazarewicz 681ff9543fdSMichal Nazarewicz /* 682ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 683ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 684ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 685ff9543fdSMichal Nazarewicz */ 6867ed695e0SVlastimil Babka for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 687ff9543fdSMichal Nazarewicz pfn -= pageblock_nr_pages) { 688ff9543fdSMichal Nazarewicz unsigned long isolated; 689ff9543fdSMichal Nazarewicz 690f6ea3adbSDavid Rientjes /* 691f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 692f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 693f6ea3adbSDavid Rientjes * to schedule. 694f6ea3adbSDavid Rientjes */ 695f6ea3adbSDavid Rientjes cond_resched(); 696f6ea3adbSDavid Rientjes 697ff9543fdSMichal Nazarewicz if (!pfn_valid(pfn)) 698ff9543fdSMichal Nazarewicz continue; 699ff9543fdSMichal Nazarewicz 700ff9543fdSMichal Nazarewicz /* 701ff9543fdSMichal Nazarewicz * Check for overlapping nodes/zones. It's possible on some 702ff9543fdSMichal Nazarewicz * configurations to have a setup like 703ff9543fdSMichal Nazarewicz * node0 node1 node0 704ff9543fdSMichal Nazarewicz * i.e. it's possible that all pages within a zones range of 705ff9543fdSMichal Nazarewicz * pages do not belong to a single zone. 706ff9543fdSMichal Nazarewicz */ 707ff9543fdSMichal Nazarewicz page = pfn_to_page(pfn); 708ff9543fdSMichal Nazarewicz if (page_zone(page) != zone) 709ff9543fdSMichal Nazarewicz continue; 710ff9543fdSMichal Nazarewicz 711ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 71268e3e926SLinus Torvalds if (!suitable_migration_target(page)) 713ff9543fdSMichal Nazarewicz continue; 71468e3e926SLinus Torvalds 715bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 716bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 717bb13ffebSMel Gorman continue; 718bb13ffebSMel Gorman 719f40d1e42SMel Gorman /* Found a block suitable for isolating free pages from */ 720ff9543fdSMichal Nazarewicz isolated = 0; 72160177d31SMel Gorman 72260177d31SMel Gorman /* 72360177d31SMel Gorman * As pfn may not start aligned, pfn+pageblock_nr_page 72460177d31SMel Gorman * may cross a MAX_ORDER_NR_PAGES boundary and miss 72560177d31SMel Gorman * a pfn_valid check. Ensure isolate_freepages_block() 72660177d31SMel Gorman * only scans within a pageblock 72760177d31SMel Gorman */ 72860177d31SMel Gorman end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 729108bcc96SCody P Schafer end_pfn = min(end_pfn, z_end_pfn); 730f40d1e42SMel Gorman isolated = isolate_freepages_block(cc, pfn, end_pfn, 731ff9543fdSMichal Nazarewicz freelist, false); 732ff9543fdSMichal Nazarewicz nr_freepages += isolated; 733ff9543fdSMichal Nazarewicz 734ff9543fdSMichal Nazarewicz /* 735ff9543fdSMichal Nazarewicz * Record the highest PFN we isolated pages from. When next 736ff9543fdSMichal Nazarewicz * looking for free pages, the search will restart here as 737ff9543fdSMichal Nazarewicz * page migration may have returned some pages to the allocator 738ff9543fdSMichal Nazarewicz */ 739c89511abSMel Gorman if (isolated) { 740c89511abSMel Gorman cc->finished_update_free = true; 741ff9543fdSMichal Nazarewicz high_pfn = max(high_pfn, pfn); 742ff9543fdSMichal Nazarewicz } 743c89511abSMel Gorman } 744ff9543fdSMichal Nazarewicz 745ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 746ff9543fdSMichal Nazarewicz map_pages(freelist); 747ff9543fdSMichal Nazarewicz 7487ed695e0SVlastimil Babka /* 7497ed695e0SVlastimil Babka * If we crossed the migrate scanner, we want to keep it that way 7507ed695e0SVlastimil Babka * so that compact_finished() may detect this 7517ed695e0SVlastimil Babka */ 7527ed695e0SVlastimil Babka if (pfn < low_pfn) 7537ed695e0SVlastimil Babka cc->free_pfn = max(pfn, zone->zone_start_pfn); 7547ed695e0SVlastimil Babka else 755ff9543fdSMichal Nazarewicz cc->free_pfn = high_pfn; 756ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 757748446bbSMel Gorman } 758748446bbSMel Gorman 759748446bbSMel Gorman /* 760748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 761748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 762748446bbSMel Gorman */ 763748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 764748446bbSMel Gorman unsigned long data, 765748446bbSMel Gorman int **result) 766748446bbSMel Gorman { 767748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 768748446bbSMel Gorman struct page *freepage; 769748446bbSMel Gorman 770748446bbSMel Gorman /* Isolate free pages if necessary */ 771748446bbSMel Gorman if (list_empty(&cc->freepages)) { 772748446bbSMel Gorman isolate_freepages(cc->zone, cc); 773748446bbSMel Gorman 774748446bbSMel Gorman if (list_empty(&cc->freepages)) 775748446bbSMel Gorman return NULL; 776748446bbSMel Gorman } 777748446bbSMel Gorman 778748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 779748446bbSMel Gorman list_del(&freepage->lru); 780748446bbSMel Gorman cc->nr_freepages--; 781748446bbSMel Gorman 782748446bbSMel Gorman return freepage; 783748446bbSMel Gorman } 784748446bbSMel Gorman 785748446bbSMel Gorman /* 786748446bbSMel Gorman * We cannot control nr_migratepages and nr_freepages fully when migration is 787748446bbSMel Gorman * running as migrate_pages() has no knowledge of compact_control. When 788748446bbSMel Gorman * migration is complete, we count the number of pages on the lists by hand. 789748446bbSMel Gorman */ 790748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc) 791748446bbSMel Gorman { 792748446bbSMel Gorman int nr_migratepages = 0; 793748446bbSMel Gorman int nr_freepages = 0; 794748446bbSMel Gorman struct page *page; 795748446bbSMel Gorman 796748446bbSMel Gorman list_for_each_entry(page, &cc->migratepages, lru) 797748446bbSMel Gorman nr_migratepages++; 798748446bbSMel Gorman list_for_each_entry(page, &cc->freepages, lru) 799748446bbSMel Gorman nr_freepages++; 800748446bbSMel Gorman 801748446bbSMel Gorman cc->nr_migratepages = nr_migratepages; 802748446bbSMel Gorman cc->nr_freepages = nr_freepages; 803748446bbSMel Gorman } 804748446bbSMel Gorman 805ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 806ff9543fdSMichal Nazarewicz typedef enum { 807ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 808ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 809ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 810ff9543fdSMichal Nazarewicz } isolate_migrate_t; 811ff9543fdSMichal Nazarewicz 812ff9543fdSMichal Nazarewicz /* 813ff9543fdSMichal Nazarewicz * Isolate all pages that can be migrated from the block pointed to by 814ff9543fdSMichal Nazarewicz * the migrate scanner within compact_control. 815ff9543fdSMichal Nazarewicz */ 816ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 817ff9543fdSMichal Nazarewicz struct compact_control *cc) 818ff9543fdSMichal Nazarewicz { 819ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 820ff9543fdSMichal Nazarewicz 821ff9543fdSMichal Nazarewicz /* Do not scan outside zone boundaries */ 822ff9543fdSMichal Nazarewicz low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 823ff9543fdSMichal Nazarewicz 824ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 825a9aacbccSMel Gorman end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 826ff9543fdSMichal Nazarewicz 827ff9543fdSMichal Nazarewicz /* Do not cross the free scanner or scan within a memory hole */ 828ff9543fdSMichal Nazarewicz if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 829ff9543fdSMichal Nazarewicz cc->migrate_pfn = end_pfn; 830ff9543fdSMichal Nazarewicz return ISOLATE_NONE; 831ff9543fdSMichal Nazarewicz } 832ff9543fdSMichal Nazarewicz 833ff9543fdSMichal Nazarewicz /* Perform the isolation */ 834e46a2879SMinchan Kim low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false); 835e64c5237SShaohua Li if (!low_pfn || cc->contended) 836ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 837ff9543fdSMichal Nazarewicz 838ff9543fdSMichal Nazarewicz cc->migrate_pfn = low_pfn; 839ff9543fdSMichal Nazarewicz 840ff9543fdSMichal Nazarewicz return ISOLATE_SUCCESS; 841ff9543fdSMichal Nazarewicz } 842ff9543fdSMichal Nazarewicz 843748446bbSMel Gorman static int compact_finished(struct zone *zone, 844748446bbSMel Gorman struct compact_control *cc) 845748446bbSMel Gorman { 8468fb74b9fSMel Gorman unsigned int order; 8475a03b051SAndrea Arcangeli unsigned long watermark; 84856de7263SMel Gorman 849748446bbSMel Gorman if (fatal_signal_pending(current)) 850748446bbSMel Gorman return COMPACT_PARTIAL; 851748446bbSMel Gorman 852753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 853bb13ffebSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) { 85455b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 85555b7c4c9SVlastimil Babka zone->compact_cached_migrate_pfn = zone->zone_start_pfn; 85655b7c4c9SVlastimil Babka zone->compact_cached_free_pfn = zone_end_pfn(zone); 85755b7c4c9SVlastimil Babka 85862997027SMel Gorman /* 85962997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 86062997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 86162997027SMel Gorman * flag itself as the decision to be clear should be directly 86262997027SMel Gorman * based on an allocation request. 86362997027SMel Gorman */ 86462997027SMel Gorman if (!current_is_kswapd()) 86562997027SMel Gorman zone->compact_blockskip_flush = true; 86662997027SMel Gorman 867748446bbSMel Gorman return COMPACT_COMPLETE; 868bb13ffebSMel Gorman } 869748446bbSMel Gorman 87082478fb7SJohannes Weiner /* 87182478fb7SJohannes Weiner * order == -1 is expected when compacting via 87282478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 87382478fb7SJohannes Weiner */ 87456de7263SMel Gorman if (cc->order == -1) 87556de7263SMel Gorman return COMPACT_CONTINUE; 87656de7263SMel Gorman 8773957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 8783957c776SMichal Hocko watermark = low_wmark_pages(zone); 8793957c776SMichal Hocko watermark += (1 << cc->order); 8803957c776SMichal Hocko 8813957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 8823957c776SMichal Hocko return COMPACT_CONTINUE; 8833957c776SMichal Hocko 88456de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 88556de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 8868fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 8878fb74b9fSMel Gorman 88856de7263SMel Gorman /* Job done if page is free of the right migratetype */ 8891fb3f8caSMel Gorman if (!list_empty(&area->free_list[cc->migratetype])) 89056de7263SMel Gorman return COMPACT_PARTIAL; 89156de7263SMel Gorman 89256de7263SMel Gorman /* Job done if allocation would set block type */ 8931fb3f8caSMel Gorman if (cc->order >= pageblock_order && area->nr_free) 89456de7263SMel Gorman return COMPACT_PARTIAL; 89556de7263SMel Gorman } 89656de7263SMel Gorman 897748446bbSMel Gorman return COMPACT_CONTINUE; 898748446bbSMel Gorman } 899748446bbSMel Gorman 9003e7d3449SMel Gorman /* 9013e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 9023e7d3449SMel Gorman * Returns 9033e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 9043e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 9053e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 9063e7d3449SMel Gorman */ 9073e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 9083e7d3449SMel Gorman { 9093e7d3449SMel Gorman int fragindex; 9103e7d3449SMel Gorman unsigned long watermark; 9113e7d3449SMel Gorman 9123e7d3449SMel Gorman /* 9133957c776SMichal Hocko * order == -1 is expected when compacting via 9143957c776SMichal Hocko * /proc/sys/vm/compact_memory 9153957c776SMichal Hocko */ 9163957c776SMichal Hocko if (order == -1) 9173957c776SMichal Hocko return COMPACT_CONTINUE; 9183957c776SMichal Hocko 9193957c776SMichal Hocko /* 9203e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 9213e7d3449SMel Gorman * This is because during migration, copies of pages need to be 9223e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 9233e7d3449SMel Gorman */ 9243e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 9253e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 9263e7d3449SMel Gorman return COMPACT_SKIPPED; 9273e7d3449SMel Gorman 9283e7d3449SMel Gorman /* 9293e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 9303e7d3449SMel Gorman * low memory or external fragmentation 9313e7d3449SMel Gorman * 932a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 933a582a738SShaohua Li * watermarks 9343e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 9353e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 9363e7d3449SMel Gorman * 9373e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 9383e7d3449SMel Gorman */ 9393e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 9403e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 9413e7d3449SMel Gorman return COMPACT_SKIPPED; 9423e7d3449SMel Gorman 943a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 944a582a738SShaohua Li 0, 0)) 9453e7d3449SMel Gorman return COMPACT_PARTIAL; 9463e7d3449SMel Gorman 9473e7d3449SMel Gorman return COMPACT_CONTINUE; 9483e7d3449SMel Gorman } 9493e7d3449SMel Gorman 950748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 951748446bbSMel Gorman { 952748446bbSMel Gorman int ret; 953c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 954108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 955748446bbSMel Gorman 9563e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 9573e7d3449SMel Gorman switch (ret) { 9583e7d3449SMel Gorman case COMPACT_PARTIAL: 9593e7d3449SMel Gorman case COMPACT_SKIPPED: 9603e7d3449SMel Gorman /* Compaction is likely to fail */ 9613e7d3449SMel Gorman return ret; 9623e7d3449SMel Gorman case COMPACT_CONTINUE: 9633e7d3449SMel Gorman /* Fall through to compaction */ 9643e7d3449SMel Gorman ; 9653e7d3449SMel Gorman } 9663e7d3449SMel Gorman 967c89511abSMel Gorman /* 968d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 969d3132e4bSVlastimil Babka * is about to be retried after being deferred. kswapd does not do 970d3132e4bSVlastimil Babka * this reset as it'll reset the cached information when going to sleep. 971d3132e4bSVlastimil Babka */ 972d3132e4bSVlastimil Babka if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 973d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 974d3132e4bSVlastimil Babka 975d3132e4bSVlastimil Babka /* 976c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 977c89511abSMel Gorman * information on where the scanners should start but check that it 978c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 979c89511abSMel Gorman */ 980c89511abSMel Gorman cc->migrate_pfn = zone->compact_cached_migrate_pfn; 981c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 982c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 983c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 984c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 985c89511abSMel Gorman } 986c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 987c89511abSMel Gorman cc->migrate_pfn = start_pfn; 988c89511abSMel Gorman zone->compact_cached_migrate_pfn = cc->migrate_pfn; 989c89511abSMel Gorman } 990748446bbSMel Gorman 9910eb927c0SMel Gorman trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); 9920eb927c0SMel Gorman 993748446bbSMel Gorman migrate_prep_local(); 994748446bbSMel Gorman 995748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 996748446bbSMel Gorman unsigned long nr_migrate, nr_remaining; 9979d502c1cSMinchan Kim int err; 998748446bbSMel Gorman 999f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1000f9e35b3bSMel Gorman case ISOLATE_ABORT: 1001f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 10025733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1003e64c5237SShaohua Li cc->nr_migratepages = 0; 1004f9e35b3bSMel Gorman goto out; 1005f9e35b3bSMel Gorman case ISOLATE_NONE: 1006748446bbSMel Gorman continue; 1007f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1008f9e35b3bSMel Gorman ; 1009f9e35b3bSMel Gorman } 1010748446bbSMel Gorman 1011748446bbSMel Gorman nr_migrate = cc->nr_migratepages; 10129d502c1cSMinchan Kim err = migrate_pages(&cc->migratepages, compaction_alloc, 10139c620e2bSHugh Dickins (unsigned long)cc, 10147b2a2d4aSMel Gorman cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC, 10157b2a2d4aSMel Gorman MR_COMPACTION); 1016748446bbSMel Gorman update_nr_listpages(cc); 1017748446bbSMel Gorman nr_remaining = cc->nr_migratepages; 1018748446bbSMel Gorman 1019b7aba698SMel Gorman trace_mm_compaction_migratepages(nr_migrate - nr_remaining, 1020b7aba698SMel Gorman nr_remaining); 1021748446bbSMel Gorman 10225733c7d1SRafael Aquini /* Release isolated pages not migrated */ 10239d502c1cSMinchan Kim if (err) { 10245733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1025748446bbSMel Gorman cc->nr_migratepages = 0; 10267ed695e0SVlastimil Babka /* 10277ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 10287ed695e0SVlastimil Babka * and we want compact_finished() to detect it 10297ed695e0SVlastimil Babka */ 10307ed695e0SVlastimil Babka if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { 10314bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 10324bf2bba3SDavid Rientjes goto out; 1033748446bbSMel Gorman } 10344bf2bba3SDavid Rientjes } 1035748446bbSMel Gorman } 1036748446bbSMel Gorman 1037f9e35b3bSMel Gorman out: 1038748446bbSMel Gorman /* Release free pages and check accounting */ 1039748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 1040748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 1041748446bbSMel Gorman 10420eb927c0SMel Gorman trace_mm_compaction_end(ret); 10430eb927c0SMel Gorman 1044748446bbSMel Gorman return ret; 1045748446bbSMel Gorman } 104676ab0f53SMel Gorman 1047d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone, 104877f1fe6bSMel Gorman int order, gfp_t gfp_mask, 10498fb74b9fSMel Gorman bool sync, bool *contended) 105056de7263SMel Gorman { 1051e64c5237SShaohua Li unsigned long ret; 105256de7263SMel Gorman struct compact_control cc = { 105356de7263SMel Gorman .nr_freepages = 0, 105456de7263SMel Gorman .nr_migratepages = 0, 105556de7263SMel Gorman .order = order, 105656de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 105756de7263SMel Gorman .zone = zone, 105868e3e926SLinus Torvalds .sync = sync, 105956de7263SMel Gorman }; 106056de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 106156de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 106256de7263SMel Gorman 1063e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1064e64c5237SShaohua Li 1065e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1066e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1067e64c5237SShaohua Li 1068e64c5237SShaohua Li *contended = cc.contended; 1069e64c5237SShaohua Li return ret; 107056de7263SMel Gorman } 107156de7263SMel Gorman 10725e771905SMel Gorman int sysctl_extfrag_threshold = 500; 10735e771905SMel Gorman 107456de7263SMel Gorman /** 107556de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 107656de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 107756de7263SMel Gorman * @order: The order of the current allocation 107856de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 107956de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 108077f1fe6bSMel Gorman * @sync: Whether migration is synchronous or not 1081661c4cb9SMel Gorman * @contended: Return value that is true if compaction was aborted due to lock contention 1082661c4cb9SMel Gorman * @page: Optionally capture a free page of the requested order during compaction 108356de7263SMel Gorman * 108456de7263SMel Gorman * This is the main entry point for direct page compaction. 108556de7263SMel Gorman */ 108656de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 108777f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 10888fb74b9fSMel Gorman bool sync, bool *contended) 108956de7263SMel Gorman { 109056de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 109156de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 109256de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 109356de7263SMel Gorman struct zoneref *z; 109456de7263SMel Gorman struct zone *zone; 109556de7263SMel Gorman int rc = COMPACT_SKIPPED; 1096d95ea5d1SBartlomiej Zolnierkiewicz int alloc_flags = 0; 109756de7263SMel Gorman 10984ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1099c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 110056de7263SMel Gorman return rc; 110156de7263SMel Gorman 1102010fc29aSMinchan Kim count_compact_event(COMPACTSTALL); 110356de7263SMel Gorman 1104d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA 1105d95ea5d1SBartlomiej Zolnierkiewicz if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 1106d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags |= ALLOC_CMA; 1107d95ea5d1SBartlomiej Zolnierkiewicz #endif 110856de7263SMel Gorman /* Compact each zone in the list */ 110956de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 111056de7263SMel Gorman nodemask) { 111156de7263SMel Gorman int status; 111256de7263SMel Gorman 1113c67fe375SMel Gorman status = compact_zone_order(zone, order, gfp_mask, sync, 11148fb74b9fSMel Gorman contended); 111556de7263SMel Gorman rc = max(status, rc); 111656de7263SMel Gorman 11173e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1118d95ea5d1SBartlomiej Zolnierkiewicz if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 1119d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags)) 112056de7263SMel Gorman break; 112156de7263SMel Gorman } 112256de7263SMel Gorman 112356de7263SMel Gorman return rc; 112456de7263SMel Gorman } 112556de7263SMel Gorman 112656de7263SMel Gorman 112776ab0f53SMel Gorman /* Compact all zones within a node */ 11287103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 112976ab0f53SMel Gorman { 113076ab0f53SMel Gorman int zoneid; 113176ab0f53SMel Gorman struct zone *zone; 113276ab0f53SMel Gorman 113376ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 113476ab0f53SMel Gorman 113576ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 113676ab0f53SMel Gorman if (!populated_zone(zone)) 113776ab0f53SMel Gorman continue; 113876ab0f53SMel Gorman 11397be62de9SRik van Riel cc->nr_freepages = 0; 11407be62de9SRik van Riel cc->nr_migratepages = 0; 11417be62de9SRik van Riel cc->zone = zone; 11427be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 11437be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 114476ab0f53SMel Gorman 1145aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 11467be62de9SRik van Riel compact_zone(zone, cc); 114776ab0f53SMel Gorman 1148aff62249SRik van Riel if (cc->order > 0) { 1149de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1150de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1151de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1152aff62249SRik van Riel /* Currently async compaction is never deferred. */ 1153de6c60a6SVlastimil Babka else if (cc->sync) 1154aff62249SRik van Riel defer_compaction(zone, cc->order); 1155aff62249SRik van Riel } 1156aff62249SRik van Riel 11577be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 11587be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 115976ab0f53SMel Gorman } 116076ab0f53SMel Gorman } 116176ab0f53SMel Gorman 11627103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 11637be62de9SRik van Riel { 11647be62de9SRik van Riel struct compact_control cc = { 11657be62de9SRik van Riel .order = order, 116668e3e926SLinus Torvalds .sync = false, 11677be62de9SRik van Riel }; 11687be62de9SRik van Riel 11693a7200afSMel Gorman if (!order) 11703a7200afSMel Gorman return; 11713a7200afSMel Gorman 11727103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 11737be62de9SRik van Riel } 11747be62de9SRik van Riel 11757103f16dSAndrew Morton static void compact_node(int nid) 11767be62de9SRik van Riel { 11777be62de9SRik van Riel struct compact_control cc = { 11787be62de9SRik van Riel .order = -1, 117968e3e926SLinus Torvalds .sync = true, 11807be62de9SRik van Riel }; 11817be62de9SRik van Riel 11827103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 11837be62de9SRik van Riel } 11847be62de9SRik van Riel 118576ab0f53SMel Gorman /* Compact all nodes in the system */ 11867964c06dSJason Liu static void compact_nodes(void) 118776ab0f53SMel Gorman { 118876ab0f53SMel Gorman int nid; 118976ab0f53SMel Gorman 11908575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 11918575ec29SHugh Dickins lru_add_drain_all(); 11928575ec29SHugh Dickins 119376ab0f53SMel Gorman for_each_online_node(nid) 119476ab0f53SMel Gorman compact_node(nid); 119576ab0f53SMel Gorman } 119676ab0f53SMel Gorman 119776ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 119876ab0f53SMel Gorman int sysctl_compact_memory; 119976ab0f53SMel Gorman 120076ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 120176ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 120276ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 120376ab0f53SMel Gorman { 120476ab0f53SMel Gorman if (write) 12057964c06dSJason Liu compact_nodes(); 120676ab0f53SMel Gorman 120776ab0f53SMel Gorman return 0; 120876ab0f53SMel Gorman } 1209ed4a6d7fSMel Gorman 12105e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 12115e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 12125e771905SMel Gorman { 12135e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 12145e771905SMel Gorman 12155e771905SMel Gorman return 0; 12165e771905SMel Gorman } 12175e771905SMel Gorman 1218ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 121910fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev, 122010fbcf4cSKay Sievers struct device_attribute *attr, 1221ed4a6d7fSMel Gorman const char *buf, size_t count) 1222ed4a6d7fSMel Gorman { 12238575ec29SHugh Dickins int nid = dev->id; 12248575ec29SHugh Dickins 12258575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 12268575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 12278575ec29SHugh Dickins lru_add_drain_all(); 12288575ec29SHugh Dickins 12298575ec29SHugh Dickins compact_node(nid); 12308575ec29SHugh Dickins } 1231ed4a6d7fSMel Gorman 1232ed4a6d7fSMel Gorman return count; 1233ed4a6d7fSMel Gorman } 123410fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1235ed4a6d7fSMel Gorman 1236ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1237ed4a6d7fSMel Gorman { 123810fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1239ed4a6d7fSMel Gorman } 1240ed4a6d7fSMel Gorman 1241ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1242ed4a6d7fSMel Gorman { 124310fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1244ed4a6d7fSMel Gorman } 1245ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1246ff9543fdSMichal Nazarewicz 1247ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1248