1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2748446bbSMel Gorman /* 3748446bbSMel Gorman * linux/mm/compaction.c 4748446bbSMel Gorman * 5748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 6748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 7748446bbSMel Gorman * lifting 8748446bbSMel Gorman * 9748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10748446bbSMel Gorman */ 11698b1b30SVlastimil Babka #include <linux/cpu.h> 12748446bbSMel Gorman #include <linux/swap.h> 13748446bbSMel Gorman #include <linux/migrate.h> 14748446bbSMel Gorman #include <linux/compaction.h> 15748446bbSMel Gorman #include <linux/mm_inline.h> 16174cd4b1SIngo Molnar #include <linux/sched/signal.h> 17748446bbSMel Gorman #include <linux/backing-dev.h> 1876ab0f53SMel Gorman #include <linux/sysctl.h> 19ed4a6d7fSMel Gorman #include <linux/sysfs.h> 20194159fbSMinchan Kim #include <linux/page-isolation.h> 21b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 22698b1b30SVlastimil Babka #include <linux/kthread.h> 23698b1b30SVlastimil Babka #include <linux/freezer.h> 2483358eceSJoonsoo Kim #include <linux/page_owner.h> 25eb414681SJohannes Weiner #include <linux/psi.h> 26748446bbSMel Gorman #include "internal.h" 27748446bbSMel Gorman 28010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 29010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 30010fc29aSMinchan Kim { 31010fc29aSMinchan Kim count_vm_event(item); 32010fc29aSMinchan Kim } 33010fc29aSMinchan Kim 34010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 35010fc29aSMinchan Kim { 36010fc29aSMinchan Kim count_vm_events(item, delta); 37010fc29aSMinchan Kim } 38010fc29aSMinchan Kim #else 39010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 40010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 41010fc29aSMinchan Kim #endif 42010fc29aSMinchan Kim 43ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 44ff9543fdSMichal Nazarewicz 45b7aba698SMel Gorman #define CREATE_TRACE_POINTS 46b7aba698SMel Gorman #include <trace/events/compaction.h> 47b7aba698SMel Gorman 4806b6640aSVlastimil Babka #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 4906b6640aSVlastimil Babka #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 5006b6640aSVlastimil Babka #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 5106b6640aSVlastimil Babka #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 5206b6640aSVlastimil Babka 53748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 54748446bbSMel Gorman { 55748446bbSMel Gorman struct page *page, *next; 566bace090SVlastimil Babka unsigned long high_pfn = 0; 57748446bbSMel Gorman 58748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 596bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 60748446bbSMel Gorman list_del(&page->lru); 61748446bbSMel Gorman __free_page(page); 626bace090SVlastimil Babka if (pfn > high_pfn) 636bace090SVlastimil Babka high_pfn = pfn; 64748446bbSMel Gorman } 65748446bbSMel Gorman 666bace090SVlastimil Babka return high_pfn; 67748446bbSMel Gorman } 68748446bbSMel Gorman 69ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 70ff9543fdSMichal Nazarewicz { 7166c64223SJoonsoo Kim unsigned int i, order, nr_pages; 7266c64223SJoonsoo Kim struct page *page, *next; 7366c64223SJoonsoo Kim LIST_HEAD(tmp_list); 74ff9543fdSMichal Nazarewicz 7566c64223SJoonsoo Kim list_for_each_entry_safe(page, next, list, lru) { 7666c64223SJoonsoo Kim list_del(&page->lru); 7766c64223SJoonsoo Kim 7866c64223SJoonsoo Kim order = page_private(page); 7966c64223SJoonsoo Kim nr_pages = 1 << order; 8066c64223SJoonsoo Kim 8146f24fd8SJoonsoo Kim post_alloc_hook(page, order, __GFP_MOVABLE); 8266c64223SJoonsoo Kim if (order) 8366c64223SJoonsoo Kim split_page(page, order); 8466c64223SJoonsoo Kim 8566c64223SJoonsoo Kim for (i = 0; i < nr_pages; i++) { 8666c64223SJoonsoo Kim list_add(&page->lru, &tmp_list); 8766c64223SJoonsoo Kim page++; 88ff9543fdSMichal Nazarewicz } 89ff9543fdSMichal Nazarewicz } 90ff9543fdSMichal Nazarewicz 9166c64223SJoonsoo Kim list_splice(&tmp_list, list); 9266c64223SJoonsoo Kim } 9366c64223SJoonsoo Kim 94bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 9524e2716fSJoonsoo Kim 96bda807d4SMinchan Kim int PageMovable(struct page *page) 97bda807d4SMinchan Kim { 98bda807d4SMinchan Kim struct address_space *mapping; 99bda807d4SMinchan Kim 100bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 101bda807d4SMinchan Kim if (!__PageMovable(page)) 102bda807d4SMinchan Kim return 0; 103bda807d4SMinchan Kim 104bda807d4SMinchan Kim mapping = page_mapping(page); 105bda807d4SMinchan Kim if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) 106bda807d4SMinchan Kim return 1; 107bda807d4SMinchan Kim 108bda807d4SMinchan Kim return 0; 109bda807d4SMinchan Kim } 110bda807d4SMinchan Kim EXPORT_SYMBOL(PageMovable); 111bda807d4SMinchan Kim 112bda807d4SMinchan Kim void __SetPageMovable(struct page *page, struct address_space *mapping) 113bda807d4SMinchan Kim { 114bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 115bda807d4SMinchan Kim VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); 116bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); 117bda807d4SMinchan Kim } 118bda807d4SMinchan Kim EXPORT_SYMBOL(__SetPageMovable); 119bda807d4SMinchan Kim 120bda807d4SMinchan Kim void __ClearPageMovable(struct page *page) 121bda807d4SMinchan Kim { 122bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 123bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page); 124bda807d4SMinchan Kim /* 125bda807d4SMinchan Kim * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE 126bda807d4SMinchan Kim * flag so that VM can catch up released page by driver after isolation. 127bda807d4SMinchan Kim * With it, VM migration doesn't try to put it back. 128bda807d4SMinchan Kim */ 129bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)page->mapping & 130bda807d4SMinchan Kim PAGE_MAPPING_MOVABLE); 131bda807d4SMinchan Kim } 132bda807d4SMinchan Kim EXPORT_SYMBOL(__ClearPageMovable); 133bda807d4SMinchan Kim 13424e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 13524e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 13624e2716fSJoonsoo Kim 13724e2716fSJoonsoo Kim /* 13824e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 13924e2716fSJoonsoo Kim * allocation success. 1 << compact_defer_limit compactions are skipped up 14024e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 14124e2716fSJoonsoo Kim */ 14224e2716fSJoonsoo Kim void defer_compaction(struct zone *zone, int order) 14324e2716fSJoonsoo Kim { 14424e2716fSJoonsoo Kim zone->compact_considered = 0; 14524e2716fSJoonsoo Kim zone->compact_defer_shift++; 14624e2716fSJoonsoo Kim 14724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 14824e2716fSJoonsoo Kim zone->compact_order_failed = order; 14924e2716fSJoonsoo Kim 15024e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 15124e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 15224e2716fSJoonsoo Kim 15324e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 15424e2716fSJoonsoo Kim } 15524e2716fSJoonsoo Kim 15624e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 15724e2716fSJoonsoo Kim bool compaction_deferred(struct zone *zone, int order) 15824e2716fSJoonsoo Kim { 15924e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 16024e2716fSJoonsoo Kim 16124e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 16224e2716fSJoonsoo Kim return false; 16324e2716fSJoonsoo Kim 16424e2716fSJoonsoo Kim /* Avoid possible overflow */ 16524e2716fSJoonsoo Kim if (++zone->compact_considered > defer_limit) 16624e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 16724e2716fSJoonsoo Kim 16824e2716fSJoonsoo Kim if (zone->compact_considered >= defer_limit) 16924e2716fSJoonsoo Kim return false; 17024e2716fSJoonsoo Kim 17124e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 17224e2716fSJoonsoo Kim 17324e2716fSJoonsoo Kim return true; 17424e2716fSJoonsoo Kim } 17524e2716fSJoonsoo Kim 17624e2716fSJoonsoo Kim /* 17724e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 17824e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 17924e2716fSJoonsoo Kim * expected to succeed. 18024e2716fSJoonsoo Kim */ 18124e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 18224e2716fSJoonsoo Kim bool alloc_success) 18324e2716fSJoonsoo Kim { 18424e2716fSJoonsoo Kim if (alloc_success) { 18524e2716fSJoonsoo Kim zone->compact_considered = 0; 18624e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 18724e2716fSJoonsoo Kim } 18824e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 18924e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 19024e2716fSJoonsoo Kim 19124e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 19224e2716fSJoonsoo Kim } 19324e2716fSJoonsoo Kim 19424e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 19524e2716fSJoonsoo Kim bool compaction_restarting(struct zone *zone, int order) 19624e2716fSJoonsoo Kim { 19724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 19824e2716fSJoonsoo Kim return false; 19924e2716fSJoonsoo Kim 20024e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 20124e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 20224e2716fSJoonsoo Kim } 20324e2716fSJoonsoo Kim 204bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 205bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 206bb13ffebSMel Gorman struct page *page) 207bb13ffebSMel Gorman { 208bb13ffebSMel Gorman if (cc->ignore_skip_hint) 209bb13ffebSMel Gorman return true; 210bb13ffebSMel Gorman 211bb13ffebSMel Gorman return !get_pageblock_skip(page); 212bb13ffebSMel Gorman } 213bb13ffebSMel Gorman 21402333641SVlastimil Babka static void reset_cached_positions(struct zone *zone) 21502333641SVlastimil Babka { 21602333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 21702333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 218623446e4SJoonsoo Kim zone->compact_cached_free_pfn = 21906b6640aSVlastimil Babka pageblock_start_pfn(zone_end_pfn(zone) - 1); 22002333641SVlastimil Babka } 22102333641SVlastimil Babka 222bb13ffebSMel Gorman /* 223b527cfe5SVlastimil Babka * Compound pages of >= pageblock_order should consistenly be skipped until 224b527cfe5SVlastimil Babka * released. It is always pointless to compact pages of such order (if they are 225b527cfe5SVlastimil Babka * migratable), and the pageblocks they occupy cannot contain any free pages. 22621dc7e02SDavid Rientjes */ 227b527cfe5SVlastimil Babka static bool pageblock_skip_persistent(struct page *page) 22821dc7e02SDavid Rientjes { 229b527cfe5SVlastimil Babka if (!PageCompound(page)) 23021dc7e02SDavid Rientjes return false; 231b527cfe5SVlastimil Babka 232b527cfe5SVlastimil Babka page = compound_head(page); 233b527cfe5SVlastimil Babka 234b527cfe5SVlastimil Babka if (compound_order(page) >= pageblock_order) 23521dc7e02SDavid Rientjes return true; 236b527cfe5SVlastimil Babka 237b527cfe5SVlastimil Babka return false; 23821dc7e02SDavid Rientjes } 23921dc7e02SDavid Rientjes 24021dc7e02SDavid Rientjes /* 241bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 242bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 243bb13ffebSMel Gorman * meet. 244bb13ffebSMel Gorman */ 24562997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 246bb13ffebSMel Gorman { 247bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 248108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 249bb13ffebSMel Gorman unsigned long pfn; 250bb13ffebSMel Gorman 25162997027SMel Gorman zone->compact_blockskip_flush = false; 252bb13ffebSMel Gorman 253bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 254bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 255bb13ffebSMel Gorman struct page *page; 256bb13ffebSMel Gorman 257bb13ffebSMel Gorman cond_resched(); 258bb13ffebSMel Gorman 259ccbe1e4dSMichal Hocko page = pfn_to_online_page(pfn); 260ccbe1e4dSMichal Hocko if (!page) 261bb13ffebSMel Gorman continue; 262bb13ffebSMel Gorman if (zone != page_zone(page)) 263bb13ffebSMel Gorman continue; 264b527cfe5SVlastimil Babka if (pageblock_skip_persistent(page)) 26521dc7e02SDavid Rientjes continue; 266bb13ffebSMel Gorman 267bb13ffebSMel Gorman clear_pageblock_skip(page); 268bb13ffebSMel Gorman } 26902333641SVlastimil Babka 27002333641SVlastimil Babka reset_cached_positions(zone); 271bb13ffebSMel Gorman } 272bb13ffebSMel Gorman 27362997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 27462997027SMel Gorman { 27562997027SMel Gorman int zoneid; 27662997027SMel Gorman 27762997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 27862997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 27962997027SMel Gorman if (!populated_zone(zone)) 28062997027SMel Gorman continue; 28162997027SMel Gorman 28262997027SMel Gorman /* Only flush if a full compaction finished recently */ 28362997027SMel Gorman if (zone->compact_blockskip_flush) 28462997027SMel Gorman __reset_isolation_suitable(zone); 28562997027SMel Gorman } 28662997027SMel Gorman } 28762997027SMel Gorman 288bb13ffebSMel Gorman /* 289bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 29062997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 291bb13ffebSMel Gorman */ 292c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 293c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 294edc2ca61SVlastimil Babka bool migrate_scanner) 295bb13ffebSMel Gorman { 296c89511abSMel Gorman struct zone *zone = cc->zone; 29735979ef3SDavid Rientjes unsigned long pfn; 2986815bf3fSJoonsoo Kim 2992583d671SVlastimil Babka if (cc->no_set_skip_hint) 3006815bf3fSJoonsoo Kim return; 3016815bf3fSJoonsoo Kim 302bb13ffebSMel Gorman if (!page) 303bb13ffebSMel Gorman return; 304bb13ffebSMel Gorman 30535979ef3SDavid Rientjes if (nr_isolated) 30635979ef3SDavid Rientjes return; 30735979ef3SDavid Rientjes 308bb13ffebSMel Gorman set_pageblock_skip(page); 309c89511abSMel Gorman 31035979ef3SDavid Rientjes pfn = page_to_pfn(page); 31135979ef3SDavid Rientjes 31235979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 313c89511abSMel Gorman if (migrate_scanner) { 31435979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 31535979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 316e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 317e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 31835979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 319c89511abSMel Gorman } else { 32035979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 321c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 322c89511abSMel Gorman } 323c89511abSMel Gorman } 324bb13ffebSMel Gorman #else 325bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 326bb13ffebSMel Gorman struct page *page) 327bb13ffebSMel Gorman { 328bb13ffebSMel Gorman return true; 329bb13ffebSMel Gorman } 330bb13ffebSMel Gorman 331b527cfe5SVlastimil Babka static inline bool pageblock_skip_persistent(struct page *page) 33221dc7e02SDavid Rientjes { 33321dc7e02SDavid Rientjes return false; 33421dc7e02SDavid Rientjes } 33521dc7e02SDavid Rientjes 33621dc7e02SDavid Rientjes static inline void update_pageblock_skip(struct compact_control *cc, 337c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 338edc2ca61SVlastimil Babka bool migrate_scanner) 339bb13ffebSMel Gorman { 340bb13ffebSMel Gorman } 341bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 342bb13ffebSMel Gorman 3431f9efdefSVlastimil Babka /* 3448b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 3458b44d279SVlastimil Babka * very heavily contended. For async compaction, back out if the lock cannot 3468b44d279SVlastimil Babka * be taken immediately. For sync compaction, spin on the lock if needed. 3478b44d279SVlastimil Babka * 3488b44d279SVlastimil Babka * Returns true if the lock is held 3498b44d279SVlastimil Babka * Returns false if the lock is not held and compaction should abort 3501f9efdefSVlastimil Babka */ 3518b44d279SVlastimil Babka static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, 3528b44d279SVlastimil Babka struct compact_control *cc) 3538b44d279SVlastimil Babka { 3548b44d279SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3558b44d279SVlastimil Babka if (!spin_trylock_irqsave(lock, *flags)) { 356c3486f53SVlastimil Babka cc->contended = true; 3578b44d279SVlastimil Babka return false; 3588b44d279SVlastimil Babka } 3598b44d279SVlastimil Babka } else { 3608b44d279SVlastimil Babka spin_lock_irqsave(lock, *flags); 3618b44d279SVlastimil Babka } 3621f9efdefSVlastimil Babka 3638b44d279SVlastimil Babka return true; 3642a1402aaSMel Gorman } 3652a1402aaSMel Gorman 36685aa125fSMichal Nazarewicz /* 367c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 3688b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 3698b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 3708b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 3718b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 3728b44d279SVlastimil Babka * aborts. Sync compaction schedules. 3738b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 3748b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 375c67fe375SMel Gorman * 3768b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 3778b44d279SVlastimil Babka * async compaction due to need_resched() 3788b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 3798b44d279SVlastimil Babka * scheduled) 380c67fe375SMel Gorman */ 3818b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 3828b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 383c67fe375SMel Gorman { 3848b44d279SVlastimil Babka if (*locked) { 3858b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 3868b44d279SVlastimil Babka *locked = false; 387c67fe375SMel Gorman } 388c67fe375SMel Gorman 3898b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 390c3486f53SVlastimil Babka cc->contended = true; 3918b44d279SVlastimil Babka return true; 3928b44d279SVlastimil Babka } 3938b44d279SVlastimil Babka 3948b44d279SVlastimil Babka if (need_resched()) { 395e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 396c3486f53SVlastimil Babka cc->contended = true; 3978b44d279SVlastimil Babka return true; 398c67fe375SMel Gorman } 399c67fe375SMel Gorman cond_resched(); 400c67fe375SMel Gorman } 401c67fe375SMel Gorman 4028b44d279SVlastimil Babka return false; 403c67fe375SMel Gorman } 404c67fe375SMel Gorman 405be976572SVlastimil Babka /* 406be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 407be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 4088b44d279SVlastimil Babka * compaction. This is similar to what compact_unlock_should_abort() does, but 409be976572SVlastimil Babka * is used where no lock is concerned. 410be976572SVlastimil Babka * 411be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 412be976572SVlastimil Babka * Returns true when async compaction should abort. 413be976572SVlastimil Babka */ 414be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 415be976572SVlastimil Babka { 416be976572SVlastimil Babka /* async compaction aborts if contended */ 417be976572SVlastimil Babka if (need_resched()) { 418be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 419c3486f53SVlastimil Babka cc->contended = true; 420be976572SVlastimil Babka return true; 421be976572SVlastimil Babka } 422be976572SVlastimil Babka 423be976572SVlastimil Babka cond_resched(); 424be976572SVlastimil Babka } 425be976572SVlastimil Babka 426be976572SVlastimil Babka return false; 427be976572SVlastimil Babka } 428be976572SVlastimil Babka 429c67fe375SMel Gorman /* 4309e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 4319e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 4329e4be470SJerome Marchand * (even though it may still end up isolating some pages). 43385aa125fSMichal Nazarewicz */ 434f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 435e14c720eSVlastimil Babka unsigned long *start_pfn, 43685aa125fSMichal Nazarewicz unsigned long end_pfn, 43785aa125fSMichal Nazarewicz struct list_head *freelist, 43885aa125fSMichal Nazarewicz bool strict) 439748446bbSMel Gorman { 440b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 441bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 442b8b2d825SXiubo Li unsigned long flags = 0; 443f40d1e42SMel Gorman bool locked = false; 444e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 44566c64223SJoonsoo Kim unsigned int order; 446748446bbSMel Gorman 447748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 448748446bbSMel Gorman 449f40d1e42SMel Gorman /* Isolate free pages. */ 450748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 45166c64223SJoonsoo Kim int isolated; 452748446bbSMel Gorman struct page *page = cursor; 453748446bbSMel Gorman 4548b44d279SVlastimil Babka /* 4558b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 4568b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 4578b44d279SVlastimil Babka * pending or async compaction detects need_resched() 4588b44d279SVlastimil Babka */ 4598b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 4608b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 4618b44d279SVlastimil Babka &locked, cc)) 4628b44d279SVlastimil Babka break; 4638b44d279SVlastimil Babka 464b7aba698SMel Gorman nr_scanned++; 465f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 4662af120bcSLaura Abbott goto isolate_fail; 4672af120bcSLaura Abbott 468bb13ffebSMel Gorman if (!valid_page) 469bb13ffebSMel Gorman valid_page = page; 4709fcd6d2eSVlastimil Babka 4719fcd6d2eSVlastimil Babka /* 4729fcd6d2eSVlastimil Babka * For compound pages such as THP and hugetlbfs, we can save 4739fcd6d2eSVlastimil Babka * potentially a lot of iterations if we skip them at once. 4749fcd6d2eSVlastimil Babka * The check is racy, but we can consider only valid values 4759fcd6d2eSVlastimil Babka * and the only danger is skipping too much. 4769fcd6d2eSVlastimil Babka */ 4779fcd6d2eSVlastimil Babka if (PageCompound(page)) { 47821dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 4799fcd6d2eSVlastimil Babka 480d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) { 48121dc7e02SDavid Rientjes blockpfn += (1UL << order) - 1; 48221dc7e02SDavid Rientjes cursor += (1UL << order) - 1; 4839fcd6d2eSVlastimil Babka } 4849fcd6d2eSVlastimil Babka goto isolate_fail; 4859fcd6d2eSVlastimil Babka } 4869fcd6d2eSVlastimil Babka 487f40d1e42SMel Gorman if (!PageBuddy(page)) 4882af120bcSLaura Abbott goto isolate_fail; 489f40d1e42SMel Gorman 490f40d1e42SMel Gorman /* 49169b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 49269b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 49369b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 49469b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 49569b7189fSVlastimil Babka * recheck as well. 49669b7189fSVlastimil Babka */ 49769b7189fSVlastimil Babka if (!locked) { 49869b7189fSVlastimil Babka /* 499f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 500f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 501f40d1e42SMel Gorman * heavily contended if there are parallel allocations 502f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 503f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 504f40d1e42SMel Gorman * possible. 505f40d1e42SMel Gorman */ 5068b44d279SVlastimil Babka locked = compact_trylock_irqsave(&cc->zone->lock, 5078b44d279SVlastimil Babka &flags, cc); 508f40d1e42SMel Gorman if (!locked) 509f40d1e42SMel Gorman break; 510f40d1e42SMel Gorman 511f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 512f40d1e42SMel Gorman if (!PageBuddy(page)) 5132af120bcSLaura Abbott goto isolate_fail; 51469b7189fSVlastimil Babka } 515748446bbSMel Gorman 51666c64223SJoonsoo Kim /* Found a free page, will break it into order-0 pages */ 51766c64223SJoonsoo Kim order = page_order(page); 51866c64223SJoonsoo Kim isolated = __isolate_free_page(page, order); 519a4f04f2cSDavid Rientjes if (!isolated) 520a4f04f2cSDavid Rientjes break; 52166c64223SJoonsoo Kim set_page_private(page, order); 522a4f04f2cSDavid Rientjes 523748446bbSMel Gorman total_isolated += isolated; 524a4f04f2cSDavid Rientjes cc->nr_freepages += isolated; 52566c64223SJoonsoo Kim list_add_tail(&page->lru, freelist); 52666c64223SJoonsoo Kim 527a4f04f2cSDavid Rientjes if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 528932ff6bbSJoonsoo Kim blockpfn += isolated; 529932ff6bbSJoonsoo Kim break; 530932ff6bbSJoonsoo Kim } 531a4f04f2cSDavid Rientjes /* Advance to the end of split page */ 532748446bbSMel Gorman blockpfn += isolated - 1; 533748446bbSMel Gorman cursor += isolated - 1; 5342af120bcSLaura Abbott continue; 5352af120bcSLaura Abbott 5362af120bcSLaura Abbott isolate_fail: 5372af120bcSLaura Abbott if (strict) 5382af120bcSLaura Abbott break; 5392af120bcSLaura Abbott else 5402af120bcSLaura Abbott continue; 5412af120bcSLaura Abbott 542748446bbSMel Gorman } 543748446bbSMel Gorman 544a4f04f2cSDavid Rientjes if (locked) 545a4f04f2cSDavid Rientjes spin_unlock_irqrestore(&cc->zone->lock, flags); 546a4f04f2cSDavid Rientjes 5479fcd6d2eSVlastimil Babka /* 5489fcd6d2eSVlastimil Babka * There is a tiny chance that we have read bogus compound_order(), 5499fcd6d2eSVlastimil Babka * so be careful to not go outside of the pageblock. 5509fcd6d2eSVlastimil Babka */ 5519fcd6d2eSVlastimil Babka if (unlikely(blockpfn > end_pfn)) 5529fcd6d2eSVlastimil Babka blockpfn = end_pfn; 5539fcd6d2eSVlastimil Babka 554e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 555e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 556e34d85f0SJoonsoo Kim 557e14c720eSVlastimil Babka /* Record how far we have got within the block */ 558e14c720eSVlastimil Babka *start_pfn = blockpfn; 559e14c720eSVlastimil Babka 560f40d1e42SMel Gorman /* 561f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 562f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 563f40d1e42SMel Gorman * returned and CMA will fail. 564f40d1e42SMel Gorman */ 5652af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 566f40d1e42SMel Gorman total_isolated = 0; 567f40d1e42SMel Gorman 568bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 569bb13ffebSMel Gorman if (blockpfn == end_pfn) 570edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 571bb13ffebSMel Gorman 5727f354a54SDavid Rientjes cc->total_free_scanned += nr_scanned; 573397487dbSMel Gorman if (total_isolated) 574010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 575748446bbSMel Gorman return total_isolated; 576748446bbSMel Gorman } 577748446bbSMel Gorman 57885aa125fSMichal Nazarewicz /** 57985aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 580e8b098fcSMike Rapoport * @cc: Compaction control structure. 58185aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 58285aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 58385aa125fSMichal Nazarewicz * 58485aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 58585aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 58685aa125fSMichal Nazarewicz * undo its actions and return zero. 58785aa125fSMichal Nazarewicz * 58885aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 58985aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 59085aa125fSMichal Nazarewicz * a free page). 59185aa125fSMichal Nazarewicz */ 592ff9543fdSMichal Nazarewicz unsigned long 593bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 594bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 59585aa125fSMichal Nazarewicz { 596e1409c32SJoonsoo Kim unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 59785aa125fSMichal Nazarewicz LIST_HEAD(freelist); 59885aa125fSMichal Nazarewicz 5997d49d886SVlastimil Babka pfn = start_pfn; 60006b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 601e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 602e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 60306b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 6047d49d886SVlastimil Babka 6057d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 606e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 6077d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 608e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 609e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 6107d49d886SVlastimil Babka 61185aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 61285aa125fSMichal Nazarewicz 61358420016SJoonsoo Kim /* 61458420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 61558420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 61658420016SJoonsoo Kim * scanning range to right one. 61758420016SJoonsoo Kim */ 61858420016SJoonsoo Kim if (pfn >= block_end_pfn) { 61906b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 62006b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 62158420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 62258420016SJoonsoo Kim } 62358420016SJoonsoo Kim 624e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 625e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 6267d49d886SVlastimil Babka break; 6277d49d886SVlastimil Babka 628e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 629e14c720eSVlastimil Babka block_end_pfn, &freelist, true); 63085aa125fSMichal Nazarewicz 63185aa125fSMichal Nazarewicz /* 63285aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 63385aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 63485aa125fSMichal Nazarewicz * non-free pages). 63585aa125fSMichal Nazarewicz */ 63685aa125fSMichal Nazarewicz if (!isolated) 63785aa125fSMichal Nazarewicz break; 63885aa125fSMichal Nazarewicz 63985aa125fSMichal Nazarewicz /* 64085aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 64185aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 64285aa125fSMichal Nazarewicz * page may span two pageblocks). 64385aa125fSMichal Nazarewicz */ 64485aa125fSMichal Nazarewicz } 64585aa125fSMichal Nazarewicz 64666c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 64785aa125fSMichal Nazarewicz map_pages(&freelist); 64885aa125fSMichal Nazarewicz 64985aa125fSMichal Nazarewicz if (pfn < end_pfn) { 65085aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 65185aa125fSMichal Nazarewicz release_freepages(&freelist); 65285aa125fSMichal Nazarewicz return 0; 65385aa125fSMichal Nazarewicz } 65485aa125fSMichal Nazarewicz 65585aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 65685aa125fSMichal Nazarewicz return pfn; 65785aa125fSMichal Nazarewicz } 65885aa125fSMichal Nazarewicz 659748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 660748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 661748446bbSMel Gorman { 662bc693045SMinchan Kim unsigned long active, inactive, isolated; 663748446bbSMel Gorman 664599d0c95SMel Gorman inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) + 665599d0c95SMel Gorman node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON); 666599d0c95SMel Gorman active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) + 667599d0c95SMel Gorman node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON); 668599d0c95SMel Gorman isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) + 669599d0c95SMel Gorman node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON); 670748446bbSMel Gorman 671bc693045SMinchan Kim return isolated > (inactive + active) / 2; 672748446bbSMel Gorman } 673748446bbSMel Gorman 6742fe86e00SMichal Nazarewicz /** 675edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 676edc2ca61SVlastimil Babka * a single pageblock 6772fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 678edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 679edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 680edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 6812fe86e00SMichal Nazarewicz * 6822fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 683edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 684edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 685edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 686edc2ca61SVlastimil Babka * than end_pfn). 6872fe86e00SMichal Nazarewicz * 688edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 689edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 690edc2ca61SVlastimil Babka * is neither read nor updated. 691748446bbSMel Gorman */ 692edc2ca61SVlastimil Babka static unsigned long 693edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 694edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 695748446bbSMel Gorman { 696edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 697b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 698fa9add64SHugh Dickins struct lruvec *lruvec; 699b8b2d825SXiubo Li unsigned long flags = 0; 7002a1402aaSMel Gorman bool locked = false; 701bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 702e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 703fdd048e1SVlastimil Babka bool skip_on_failure = false; 704fdd048e1SVlastimil Babka unsigned long next_skip_pfn = 0; 705748446bbSMel Gorman 706748446bbSMel Gorman /* 707748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 708748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 709748446bbSMel Gorman * delay for some time until fewer pages are isolated 710748446bbSMel Gorman */ 711748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 712f9e35b3bSMel Gorman /* async migration should just abort */ 713e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 7142fe86e00SMichal Nazarewicz return 0; 715f9e35b3bSMel Gorman 716748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 717748446bbSMel Gorman 718748446bbSMel Gorman if (fatal_signal_pending(current)) 7192fe86e00SMichal Nazarewicz return 0; 720748446bbSMel Gorman } 721748446bbSMel Gorman 722be976572SVlastimil Babka if (compact_should_abort(cc)) 723aeef4b83SDavid Rientjes return 0; 724aeef4b83SDavid Rientjes 725fdd048e1SVlastimil Babka if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 726fdd048e1SVlastimil Babka skip_on_failure = true; 727fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 728fdd048e1SVlastimil Babka } 729fdd048e1SVlastimil Babka 730748446bbSMel Gorman /* Time to isolate some pages for migration */ 731748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 73229c0dde8SVlastimil Babka 733fdd048e1SVlastimil Babka if (skip_on_failure && low_pfn >= next_skip_pfn) { 734fdd048e1SVlastimil Babka /* 735fdd048e1SVlastimil Babka * We have isolated all migration candidates in the 736fdd048e1SVlastimil Babka * previous order-aligned block, and did not skip it due 737fdd048e1SVlastimil Babka * to failure. We should migrate the pages now and 738fdd048e1SVlastimil Babka * hopefully succeed compaction. 739fdd048e1SVlastimil Babka */ 740fdd048e1SVlastimil Babka if (nr_isolated) 741fdd048e1SVlastimil Babka break; 742fdd048e1SVlastimil Babka 743fdd048e1SVlastimil Babka /* 744fdd048e1SVlastimil Babka * We failed to isolate in the previous order-aligned 745fdd048e1SVlastimil Babka * block. Set the new boundary to the end of the 746fdd048e1SVlastimil Babka * current block. Note we can't simply increase 747fdd048e1SVlastimil Babka * next_skip_pfn by 1 << order, as low_pfn might have 748fdd048e1SVlastimil Babka * been incremented by a higher number due to skipping 749fdd048e1SVlastimil Babka * a compound or a high-order buddy page in the 750fdd048e1SVlastimil Babka * previous loop iteration. 751fdd048e1SVlastimil Babka */ 752fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 753fdd048e1SVlastimil Babka } 754fdd048e1SVlastimil Babka 7558b44d279SVlastimil Babka /* 7568b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 7578b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort async compaction 7588b44d279SVlastimil Babka * if contended. 7598b44d279SVlastimil Babka */ 7608b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 761a52633d8SMel Gorman && compact_unlock_should_abort(zone_lru_lock(zone), flags, 7628b44d279SVlastimil Babka &locked, cc)) 7638b44d279SVlastimil Babka break; 764b2eef8c0SAndrea Arcangeli 765748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 766fdd048e1SVlastimil Babka goto isolate_fail; 767b7aba698SMel Gorman nr_scanned++; 768748446bbSMel Gorman 769748446bbSMel Gorman page = pfn_to_page(low_pfn); 770dc908600SMel Gorman 771bb13ffebSMel Gorman if (!valid_page) 772bb13ffebSMel Gorman valid_page = page; 773bb13ffebSMel Gorman 774c122b208SJoonsoo Kim /* 77599c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 77699c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 77799c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 77899c0fd5eSVlastimil Babka * potential isolation targets. 7796c14466cSMel Gorman */ 78099c0fd5eSVlastimil Babka if (PageBuddy(page)) { 78199c0fd5eSVlastimil Babka unsigned long freepage_order = page_order_unsafe(page); 78299c0fd5eSVlastimil Babka 78399c0fd5eSVlastimil Babka /* 78499c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 78599c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 78699c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 78799c0fd5eSVlastimil Babka */ 78899c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 78999c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 790748446bbSMel Gorman continue; 79199c0fd5eSVlastimil Babka } 792748446bbSMel Gorman 7939927af74SMel Gorman /* 79429c0dde8SVlastimil Babka * Regardless of being on LRU, compound pages such as THP and 79529c0dde8SVlastimil Babka * hugetlbfs are not to be compacted. We can potentially save 79629c0dde8SVlastimil Babka * a lot of iterations if we skip them at once. The check is 79729c0dde8SVlastimil Babka * racy, but we can consider only valid values and the only 79829c0dde8SVlastimil Babka * danger is skipping too much. 799bc835011SAndrea Arcangeli */ 80029c0dde8SVlastimil Babka if (PageCompound(page)) { 80121dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 80229c0dde8SVlastimil Babka 803d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) 80421dc7e02SDavid Rientjes low_pfn += (1UL << order) - 1; 805fdd048e1SVlastimil Babka goto isolate_fail; 8062a1402aaSMel Gorman } 8072a1402aaSMel Gorman 808bda807d4SMinchan Kim /* 809bda807d4SMinchan Kim * Check may be lockless but that's ok as we recheck later. 810bda807d4SMinchan Kim * It's possible to migrate LRU and non-lru movable pages. 811bda807d4SMinchan Kim * Skip any other type of page 812bda807d4SMinchan Kim */ 813bda807d4SMinchan Kim if (!PageLRU(page)) { 814bda807d4SMinchan Kim /* 815bda807d4SMinchan Kim * __PageMovable can return false positive so we need 816bda807d4SMinchan Kim * to verify it under page_lock. 817bda807d4SMinchan Kim */ 818bda807d4SMinchan Kim if (unlikely(__PageMovable(page)) && 819bda807d4SMinchan Kim !PageIsolated(page)) { 820bda807d4SMinchan Kim if (locked) { 821a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(zone), 822bda807d4SMinchan Kim flags); 823bda807d4SMinchan Kim locked = false; 824bda807d4SMinchan Kim } 825bda807d4SMinchan Kim 8269e5bcd61SYisheng Xie if (!isolate_movable_page(page, isolate_mode)) 827bda807d4SMinchan Kim goto isolate_success; 828bda807d4SMinchan Kim } 829bda807d4SMinchan Kim 830fdd048e1SVlastimil Babka goto isolate_fail; 831bda807d4SMinchan Kim } 83229c0dde8SVlastimil Babka 833119d6d59SDavid Rientjes /* 834119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 835119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 836119d6d59SDavid Rientjes * admittedly racy check. 837119d6d59SDavid Rientjes */ 838119d6d59SDavid Rientjes if (!page_mapping(page) && 839119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 840fdd048e1SVlastimil Babka goto isolate_fail; 841119d6d59SDavid Rientjes 84273e64c51SMichal Hocko /* 84373e64c51SMichal Hocko * Only allow to migrate anonymous pages in GFP_NOFS context 84473e64c51SMichal Hocko * because those do not depend on fs locks. 84573e64c51SMichal Hocko */ 84673e64c51SMichal Hocko if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) 84773e64c51SMichal Hocko goto isolate_fail; 84873e64c51SMichal Hocko 84969b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 85069b7189fSVlastimil Babka if (!locked) { 851a52633d8SMel Gorman locked = compact_trylock_irqsave(zone_lru_lock(zone), 8528b44d279SVlastimil Babka &flags, cc); 8538b44d279SVlastimil Babka if (!locked) 8542a1402aaSMel Gorman break; 8552a1402aaSMel Gorman 85629c0dde8SVlastimil Babka /* Recheck PageLRU and PageCompound under lock */ 8572a1402aaSMel Gorman if (!PageLRU(page)) 858fdd048e1SVlastimil Babka goto isolate_fail; 85929c0dde8SVlastimil Babka 86029c0dde8SVlastimil Babka /* 86129c0dde8SVlastimil Babka * Page become compound since the non-locked check, 86229c0dde8SVlastimil Babka * and it's on LRU. It can only be a THP so the order 86329c0dde8SVlastimil Babka * is safe to read and it's 0 for tail pages. 86429c0dde8SVlastimil Babka */ 86529c0dde8SVlastimil Babka if (unlikely(PageCompound(page))) { 866d3c85badSVlastimil Babka low_pfn += (1UL << compound_order(page)) - 1; 867fdd048e1SVlastimil Babka goto isolate_fail; 868bc835011SAndrea Arcangeli } 86969b7189fSVlastimil Babka } 870bc835011SAndrea Arcangeli 871599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 872fa9add64SHugh Dickins 873748446bbSMel Gorman /* Try isolate the page */ 874edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 875fdd048e1SVlastimil Babka goto isolate_fail; 876748446bbSMel Gorman 87729c0dde8SVlastimil Babka VM_BUG_ON_PAGE(PageCompound(page), page); 878bc835011SAndrea Arcangeli 879748446bbSMel Gorman /* Successfully isolated */ 880fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 8816afcf8efSMing Ling inc_node_page_state(page, 8826afcf8efSMing Ling NR_ISOLATED_ANON + page_is_file_cache(page)); 883b6c75016SJoonsoo Kim 884b6c75016SJoonsoo Kim isolate_success: 885fdd048e1SVlastimil Babka list_add(&page->lru, &cc->migratepages); 886748446bbSMel Gorman cc->nr_migratepages++; 887b7aba698SMel Gorman nr_isolated++; 888748446bbSMel Gorman 889748446bbSMel Gorman /* Avoid isolating too much */ 89031b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 89131b8384aSHillf Danton ++low_pfn; 892748446bbSMel Gorman break; 893748446bbSMel Gorman } 894fdd048e1SVlastimil Babka 895fdd048e1SVlastimil Babka continue; 896fdd048e1SVlastimil Babka isolate_fail: 897fdd048e1SVlastimil Babka if (!skip_on_failure) 898fdd048e1SVlastimil Babka continue; 899fdd048e1SVlastimil Babka 900fdd048e1SVlastimil Babka /* 901fdd048e1SVlastimil Babka * We have isolated some pages, but then failed. Release them 902fdd048e1SVlastimil Babka * instead of migrating, as we cannot form the cc->order buddy 903fdd048e1SVlastimil Babka * page anyway. 904fdd048e1SVlastimil Babka */ 905fdd048e1SVlastimil Babka if (nr_isolated) { 906fdd048e1SVlastimil Babka if (locked) { 907a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(zone), flags); 908fdd048e1SVlastimil Babka locked = false; 909fdd048e1SVlastimil Babka } 910fdd048e1SVlastimil Babka putback_movable_pages(&cc->migratepages); 911fdd048e1SVlastimil Babka cc->nr_migratepages = 0; 912fdd048e1SVlastimil Babka nr_isolated = 0; 913fdd048e1SVlastimil Babka } 914fdd048e1SVlastimil Babka 915fdd048e1SVlastimil Babka if (low_pfn < next_skip_pfn) { 916fdd048e1SVlastimil Babka low_pfn = next_skip_pfn - 1; 917fdd048e1SVlastimil Babka /* 918fdd048e1SVlastimil Babka * The check near the loop beginning would have updated 919fdd048e1SVlastimil Babka * next_skip_pfn too, but this is a bit simpler. 920fdd048e1SVlastimil Babka */ 921fdd048e1SVlastimil Babka next_skip_pfn += 1UL << cc->order; 922fdd048e1SVlastimil Babka } 92331b8384aSHillf Danton } 924748446bbSMel Gorman 92599c0fd5eSVlastimil Babka /* 92699c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 92799c0fd5eSVlastimil Babka * the range to be scanned. 92899c0fd5eSVlastimil Babka */ 92999c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 93099c0fd5eSVlastimil Babka low_pfn = end_pfn; 93199c0fd5eSVlastimil Babka 932c67fe375SMel Gorman if (locked) 933a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(zone), flags); 934748446bbSMel Gorman 93550b5b094SVlastimil Babka /* 93650b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 93750b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 93850b5b094SVlastimil Babka */ 93935979ef3SDavid Rientjes if (low_pfn == end_pfn) 940edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 941bb13ffebSMel Gorman 942e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 943e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 944b7aba698SMel Gorman 9457f354a54SDavid Rientjes cc->total_migrate_scanned += nr_scanned; 946397487dbSMel Gorman if (nr_isolated) 947010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 948397487dbSMel Gorman 9492fe86e00SMichal Nazarewicz return low_pfn; 9502fe86e00SMichal Nazarewicz } 9512fe86e00SMichal Nazarewicz 952edc2ca61SVlastimil Babka /** 953edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 954edc2ca61SVlastimil Babka * @cc: Compaction control structure. 955edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 956edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 957edc2ca61SVlastimil Babka * 958edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 959edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 960edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 961edc2ca61SVlastimil Babka */ 962edc2ca61SVlastimil Babka unsigned long 963edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 964edc2ca61SVlastimil Babka unsigned long end_pfn) 965edc2ca61SVlastimil Babka { 966e1409c32SJoonsoo Kim unsigned long pfn, block_start_pfn, block_end_pfn; 967edc2ca61SVlastimil Babka 968edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 969edc2ca61SVlastimil Babka pfn = start_pfn; 97006b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 971e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 972e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 97306b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 974edc2ca61SVlastimil Babka 975edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 976e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 977edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 978edc2ca61SVlastimil Babka 979edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 980edc2ca61SVlastimil Babka 981e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 982e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 983edc2ca61SVlastimil Babka continue; 984edc2ca61SVlastimil Babka 985edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 986edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 987edc2ca61SVlastimil Babka 98814af4a5eSHugh Dickins if (!pfn) 989edc2ca61SVlastimil Babka break; 9906ea41c0cSJoonsoo Kim 9916ea41c0cSJoonsoo Kim if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 9926ea41c0cSJoonsoo Kim break; 993edc2ca61SVlastimil Babka } 994edc2ca61SVlastimil Babka 995edc2ca61SVlastimil Babka return pfn; 996edc2ca61SVlastimil Babka } 997edc2ca61SVlastimil Babka 998ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 999ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 1000018e9a49SAndrew Morton 1001b682debdSVlastimil Babka static bool suitable_migration_source(struct compact_control *cc, 1002b682debdSVlastimil Babka struct page *page) 1003b682debdSVlastimil Babka { 1004282722b0SVlastimil Babka int block_mt; 1005282722b0SVlastimil Babka 1006282722b0SVlastimil Babka if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1007b682debdSVlastimil Babka return true; 1008b682debdSVlastimil Babka 1009282722b0SVlastimil Babka block_mt = get_pageblock_migratetype(page); 1010282722b0SVlastimil Babka 1011282722b0SVlastimil Babka if (cc->migratetype == MIGRATE_MOVABLE) 1012282722b0SVlastimil Babka return is_migrate_movable(block_mt); 1013282722b0SVlastimil Babka else 1014282722b0SVlastimil Babka return block_mt == cc->migratetype; 1015b682debdSVlastimil Babka } 1016b682debdSVlastimil Babka 1017018e9a49SAndrew Morton /* Returns true if the page is within a block suitable for migration to */ 10189f7e3387SVlastimil Babka static bool suitable_migration_target(struct compact_control *cc, 10199f7e3387SVlastimil Babka struct page *page) 1020018e9a49SAndrew Morton { 1021018e9a49SAndrew Morton /* If the page is a large free page, then disallow migration */ 1022018e9a49SAndrew Morton if (PageBuddy(page)) { 1023018e9a49SAndrew Morton /* 1024018e9a49SAndrew Morton * We are checking page_order without zone->lock taken. But 1025018e9a49SAndrew Morton * the only small danger is that we skip a potentially suitable 1026018e9a49SAndrew Morton * pageblock, so it's not worth to check order for valid range. 1027018e9a49SAndrew Morton */ 1028018e9a49SAndrew Morton if (page_order_unsafe(page) >= pageblock_order) 1029018e9a49SAndrew Morton return false; 1030018e9a49SAndrew Morton } 1031018e9a49SAndrew Morton 10321ef36db2SYisheng Xie if (cc->ignore_block_suitable) 10331ef36db2SYisheng Xie return true; 10341ef36db2SYisheng Xie 1035018e9a49SAndrew Morton /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1036b682debdSVlastimil Babka if (is_migrate_movable(get_pageblock_migratetype(page))) 1037018e9a49SAndrew Morton return true; 1038018e9a49SAndrew Morton 1039018e9a49SAndrew Morton /* Otherwise skip the block */ 1040018e9a49SAndrew Morton return false; 1041018e9a49SAndrew Morton } 1042018e9a49SAndrew Morton 1043ff9543fdSMichal Nazarewicz /* 1044f2849aa0SVlastimil Babka * Test whether the free scanner has reached the same or lower pageblock than 1045f2849aa0SVlastimil Babka * the migration scanner, and compaction should thus terminate. 1046f2849aa0SVlastimil Babka */ 1047f2849aa0SVlastimil Babka static inline bool compact_scanners_met(struct compact_control *cc) 1048f2849aa0SVlastimil Babka { 1049f2849aa0SVlastimil Babka return (cc->free_pfn >> pageblock_order) 1050f2849aa0SVlastimil Babka <= (cc->migrate_pfn >> pageblock_order); 1051f2849aa0SVlastimil Babka } 1052f2849aa0SVlastimil Babka 1053f2849aa0SVlastimil Babka /* 1054ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 1055ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 1056ff9543fdSMichal Nazarewicz */ 1057edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 1058ff9543fdSMichal Nazarewicz { 1059edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 1060ff9543fdSMichal Nazarewicz struct page *page; 1061c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 1062e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 1063c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 1064c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1065ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 10662fe86e00SMichal Nazarewicz 1067ff9543fdSMichal Nazarewicz /* 1068ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 106949e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 1070e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 1071e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 1072c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 1073c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 1074c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 107549e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 107649e068f0SVlastimil Babka * is using. 1077ff9543fdSMichal Nazarewicz */ 1078e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 107906b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(cc->free_pfn); 1080c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1081c96b9e50SVlastimil Babka zone_end_pfn(zone)); 108206b6640aSVlastimil Babka low_pfn = pageblock_end_pfn(cc->migrate_pfn); 10832fe86e00SMichal Nazarewicz 1084ff9543fdSMichal Nazarewicz /* 1085ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 1086ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 1087ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 1088ff9543fdSMichal Nazarewicz */ 1089f5f61a32SVlastimil Babka for (; block_start_pfn >= low_pfn; 1090c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 1091e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 1092e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 1093f6ea3adbSDavid Rientjes /* 1094f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 1095f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 1096be976572SVlastimil Babka * to schedule, or even abort async compaction. 1097f6ea3adbSDavid Rientjes */ 1098be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1099be976572SVlastimil Babka && compact_should_abort(cc)) 1100be976572SVlastimil Babka break; 1101f6ea3adbSDavid Rientjes 11027d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 11037d49d886SVlastimil Babka zone); 11047d49d886SVlastimil Babka if (!page) 1105ff9543fdSMichal Nazarewicz continue; 1106ff9543fdSMichal Nazarewicz 1107ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 11089f7e3387SVlastimil Babka if (!suitable_migration_target(cc, page)) 1109ff9543fdSMichal Nazarewicz continue; 111068e3e926SLinus Torvalds 1111bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 1112bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 1113bb13ffebSMel Gorman continue; 1114bb13ffebSMel Gorman 1115e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 1116a46cbf3bSDavid Rientjes isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, 1117a46cbf3bSDavid Rientjes freelist, false); 1118ff9543fdSMichal Nazarewicz 1119ff9543fdSMichal Nazarewicz /* 1120a46cbf3bSDavid Rientjes * If we isolated enough freepages, or aborted due to lock 1121a46cbf3bSDavid Rientjes * contention, terminate. 1122e14c720eSVlastimil Babka */ 1123f5f61a32SVlastimil Babka if ((cc->nr_freepages >= cc->nr_migratepages) 1124f5f61a32SVlastimil Babka || cc->contended) { 1125a46cbf3bSDavid Rientjes if (isolate_start_pfn >= block_end_pfn) { 1126a46cbf3bSDavid Rientjes /* 1127a46cbf3bSDavid Rientjes * Restart at previous pageblock if more 1128a46cbf3bSDavid Rientjes * freepages can be isolated next time. 1129a46cbf3bSDavid Rientjes */ 1130f5f61a32SVlastimil Babka isolate_start_pfn = 1131e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 1132a46cbf3bSDavid Rientjes } 1133be976572SVlastimil Babka break; 1134a46cbf3bSDavid Rientjes } else if (isolate_start_pfn < block_end_pfn) { 1135f5f61a32SVlastimil Babka /* 1136a46cbf3bSDavid Rientjes * If isolation failed early, do not continue 1137a46cbf3bSDavid Rientjes * needlessly. 1138f5f61a32SVlastimil Babka */ 1139a46cbf3bSDavid Rientjes break; 1140f5f61a32SVlastimil Babka } 1141c89511abSMel Gorman } 1142ff9543fdSMichal Nazarewicz 114366c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 1144ff9543fdSMichal Nazarewicz map_pages(freelist); 1145ff9543fdSMichal Nazarewicz 11467ed695e0SVlastimil Babka /* 1147f5f61a32SVlastimil Babka * Record where the free scanner will restart next time. Either we 1148f5f61a32SVlastimil Babka * broke from the loop and set isolate_start_pfn based on the last 1149f5f61a32SVlastimil Babka * call to isolate_freepages_block(), or we met the migration scanner 1150f5f61a32SVlastimil Babka * and the loop terminated due to isolate_start_pfn < low_pfn 11517ed695e0SVlastimil Babka */ 1152f5f61a32SVlastimil Babka cc->free_pfn = isolate_start_pfn; 1153748446bbSMel Gorman } 1154748446bbSMel Gorman 1155748446bbSMel Gorman /* 1156748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1157748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1158748446bbSMel Gorman */ 1159748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 1160666feb21SMichal Hocko unsigned long data) 1161748446bbSMel Gorman { 1162748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 1163748446bbSMel Gorman struct page *freepage; 1164748446bbSMel Gorman 1165be976572SVlastimil Babka /* 1166be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 1167be976572SVlastimil Babka * contention. 1168be976572SVlastimil Babka */ 1169748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1170be976572SVlastimil Babka if (!cc->contended) 1171edc2ca61SVlastimil Babka isolate_freepages(cc); 1172748446bbSMel Gorman 1173748446bbSMel Gorman if (list_empty(&cc->freepages)) 1174748446bbSMel Gorman return NULL; 1175748446bbSMel Gorman } 1176748446bbSMel Gorman 1177748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 1178748446bbSMel Gorman list_del(&freepage->lru); 1179748446bbSMel Gorman cc->nr_freepages--; 1180748446bbSMel Gorman 1181748446bbSMel Gorman return freepage; 1182748446bbSMel Gorman } 1183748446bbSMel Gorman 1184748446bbSMel Gorman /* 1185d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1186d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1187d53aea3dSDavid Rientjes * special handling needed for NUMA. 1188d53aea3dSDavid Rientjes */ 1189d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 1190d53aea3dSDavid Rientjes { 1191d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1192d53aea3dSDavid Rientjes 1193d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 1194d53aea3dSDavid Rientjes cc->nr_freepages++; 1195d53aea3dSDavid Rientjes } 1196d53aea3dSDavid Rientjes 1197ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1198ff9543fdSMichal Nazarewicz typedef enum { 1199ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1200ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1201ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1202ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1203ff9543fdSMichal Nazarewicz 1204ff9543fdSMichal Nazarewicz /* 12055bbe3547SEric B Munson * Allow userspace to control policy on scanning the unevictable LRU for 12065bbe3547SEric B Munson * compactable pages. 12075bbe3547SEric B Munson */ 12085bbe3547SEric B Munson int sysctl_compact_unevictable_allowed __read_mostly = 1; 12095bbe3547SEric B Munson 12105bbe3547SEric B Munson /* 1211edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1212edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1213edc2ca61SVlastimil Babka * compact_control. 1214ff9543fdSMichal Nazarewicz */ 1215ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 1216ff9543fdSMichal Nazarewicz struct compact_control *cc) 1217ff9543fdSMichal Nazarewicz { 1218e1409c32SJoonsoo Kim unsigned long block_start_pfn; 1219e1409c32SJoonsoo Kim unsigned long block_end_pfn; 1220e1409c32SJoonsoo Kim unsigned long low_pfn; 1221edc2ca61SVlastimil Babka struct page *page; 1222edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 12235bbe3547SEric B Munson (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 12241d2047feSHugh Dickins (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 1225ff9543fdSMichal Nazarewicz 1226edc2ca61SVlastimil Babka /* 1227edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 1228edc2ca61SVlastimil Babka * initialized by compact_zone() 1229edc2ca61SVlastimil Babka */ 1230edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 123106b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(low_pfn); 1232e1409c32SJoonsoo Kim if (block_start_pfn < zone->zone_start_pfn) 1233e1409c32SJoonsoo Kim block_start_pfn = zone->zone_start_pfn; 1234ff9543fdSMichal Nazarewicz 1235ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 123606b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(low_pfn); 1237ff9543fdSMichal Nazarewicz 1238edc2ca61SVlastimil Babka /* 1239edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 1240edc2ca61SVlastimil Babka * Do not cross the free scanner. 1241edc2ca61SVlastimil Babka */ 1242e1409c32SJoonsoo Kim for (; block_end_pfn <= cc->free_pfn; 1243e1409c32SJoonsoo Kim low_pfn = block_end_pfn, 1244e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1245e1409c32SJoonsoo Kim block_end_pfn += pageblock_nr_pages) { 1246edc2ca61SVlastimil Babka 1247edc2ca61SVlastimil Babka /* 1248edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1249edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1250edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 1251edc2ca61SVlastimil Babka */ 1252edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1253edc2ca61SVlastimil Babka && compact_should_abort(cc)) 1254edc2ca61SVlastimil Babka break; 1255edc2ca61SVlastimil Babka 1256e1409c32SJoonsoo Kim page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1257e1409c32SJoonsoo Kim zone); 12587d49d886SVlastimil Babka if (!page) 1259edc2ca61SVlastimil Babka continue; 1260edc2ca61SVlastimil Babka 1261edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 1262edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 1263edc2ca61SVlastimil Babka continue; 1264edc2ca61SVlastimil Babka 1265edc2ca61SVlastimil Babka /* 1266edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 1267edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 1268edc2ca61SVlastimil Babka * of work satisfies the allocation. 1269edc2ca61SVlastimil Babka */ 1270b682debdSVlastimil Babka if (!suitable_migration_source(cc, page)) 1271edc2ca61SVlastimil Babka continue; 1272ff9543fdSMichal Nazarewicz 1273ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1274e1409c32SJoonsoo Kim low_pfn = isolate_migratepages_block(cc, low_pfn, 1275e1409c32SJoonsoo Kim block_end_pfn, isolate_mode); 1276edc2ca61SVlastimil Babka 12776afcf8efSMing Ling if (!low_pfn || cc->contended) 1278ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1279ff9543fdSMichal Nazarewicz 1280edc2ca61SVlastimil Babka /* 1281edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1282edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1283edc2ca61SVlastimil Babka * continue or not. 1284edc2ca61SVlastimil Babka */ 1285edc2ca61SVlastimil Babka break; 1286edc2ca61SVlastimil Babka } 1287edc2ca61SVlastimil Babka 1288f2849aa0SVlastimil Babka /* Record where migration scanner will be restarted. */ 1289f2849aa0SVlastimil Babka cc->migrate_pfn = low_pfn; 1290ff9543fdSMichal Nazarewicz 1291edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1292ff9543fdSMichal Nazarewicz } 1293ff9543fdSMichal Nazarewicz 129421c527a3SYaowei Bai /* 129521c527a3SYaowei Bai * order == -1 is expected when compacting via 129621c527a3SYaowei Bai * /proc/sys/vm/compact_memory 129721c527a3SYaowei Bai */ 129821c527a3SYaowei Bai static inline bool is_via_compact_memory(int order) 129921c527a3SYaowei Bai { 130021c527a3SYaowei Bai return order == -1; 130121c527a3SYaowei Bai } 130221c527a3SYaowei Bai 1303d39773a0SVlastimil Babka static enum compact_result __compact_finished(struct zone *zone, 1304d39773a0SVlastimil Babka struct compact_control *cc) 1305748446bbSMel Gorman { 13068fb74b9fSMel Gorman unsigned int order; 1307d39773a0SVlastimil Babka const int migratetype = cc->migratetype; 130856de7263SMel Gorman 1309be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 13102d1e1041SVlastimil Babka return COMPACT_CONTENDED; 1311748446bbSMel Gorman 1312753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1313f2849aa0SVlastimil Babka if (compact_scanners_met(cc)) { 131455b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 131502333641SVlastimil Babka reset_cached_positions(zone); 131655b7c4c9SVlastimil Babka 131762997027SMel Gorman /* 131862997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 1319accf6242SVlastimil Babka * by kswapd when it goes to sleep. kcompactd does not set the 132062997027SMel Gorman * flag itself as the decision to be clear should be directly 132162997027SMel Gorman * based on an allocation request. 132262997027SMel Gorman */ 1323accf6242SVlastimil Babka if (cc->direct_compaction) 132462997027SMel Gorman zone->compact_blockskip_flush = true; 132562997027SMel Gorman 1326c8f7de0bSMichal Hocko if (cc->whole_zone) 1327748446bbSMel Gorman return COMPACT_COMPLETE; 1328c8f7de0bSMichal Hocko else 1329c8f7de0bSMichal Hocko return COMPACT_PARTIAL_SKIPPED; 1330bb13ffebSMel Gorman } 1331748446bbSMel Gorman 133221c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 133356de7263SMel Gorman return COMPACT_CONTINUE; 133456de7263SMel Gorman 1335baf6a9a1SVlastimil Babka if (cc->finishing_block) { 1336baf6a9a1SVlastimil Babka /* 1337baf6a9a1SVlastimil Babka * We have finished the pageblock, but better check again that 1338baf6a9a1SVlastimil Babka * we really succeeded. 1339baf6a9a1SVlastimil Babka */ 1340baf6a9a1SVlastimil Babka if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) 1341baf6a9a1SVlastimil Babka cc->finishing_block = false; 1342baf6a9a1SVlastimil Babka else 1343baf6a9a1SVlastimil Babka return COMPACT_CONTINUE; 1344baf6a9a1SVlastimil Babka } 1345baf6a9a1SVlastimil Babka 134656de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 134756de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 13488fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 13492149cdaeSJoonsoo Kim bool can_steal; 13508fb74b9fSMel Gorman 135156de7263SMel Gorman /* Job done if page is free of the right migratetype */ 13526d7ce559SDavid Rientjes if (!list_empty(&area->free_list[migratetype])) 1353cf378319SVlastimil Babka return COMPACT_SUCCESS; 135456de7263SMel Gorman 13552149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 13562149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 13572149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 13582149cdaeSJoonsoo Kim !list_empty(&area->free_list[MIGRATE_CMA])) 1359cf378319SVlastimil Babka return COMPACT_SUCCESS; 13602149cdaeSJoonsoo Kim #endif 13612149cdaeSJoonsoo Kim /* 13622149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 13632149cdaeSJoonsoo Kim * other migratetype buddy lists. 13642149cdaeSJoonsoo Kim */ 13652149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 1366baf6a9a1SVlastimil Babka true, &can_steal) != -1) { 1367baf6a9a1SVlastimil Babka 1368baf6a9a1SVlastimil Babka /* movable pages are OK in any pageblock */ 1369baf6a9a1SVlastimil Babka if (migratetype == MIGRATE_MOVABLE) 1370cf378319SVlastimil Babka return COMPACT_SUCCESS; 1371baf6a9a1SVlastimil Babka 1372baf6a9a1SVlastimil Babka /* 1373baf6a9a1SVlastimil Babka * We are stealing for a non-movable allocation. Make 1374baf6a9a1SVlastimil Babka * sure we finish compacting the current pageblock 1375baf6a9a1SVlastimil Babka * first so it is as free as possible and we won't 1376baf6a9a1SVlastimil Babka * have to steal another one soon. This only applies 1377baf6a9a1SVlastimil Babka * to sync compaction, as async compaction operates 1378baf6a9a1SVlastimil Babka * on pageblocks of the same migratetype. 1379baf6a9a1SVlastimil Babka */ 1380baf6a9a1SVlastimil Babka if (cc->mode == MIGRATE_ASYNC || 1381baf6a9a1SVlastimil Babka IS_ALIGNED(cc->migrate_pfn, 1382baf6a9a1SVlastimil Babka pageblock_nr_pages)) { 1383baf6a9a1SVlastimil Babka return COMPACT_SUCCESS; 1384baf6a9a1SVlastimil Babka } 1385baf6a9a1SVlastimil Babka 1386baf6a9a1SVlastimil Babka cc->finishing_block = true; 1387baf6a9a1SVlastimil Babka return COMPACT_CONTINUE; 1388baf6a9a1SVlastimil Babka } 138956de7263SMel Gorman } 139056de7263SMel Gorman 1391837d026dSJoonsoo Kim return COMPACT_NO_SUITABLE_PAGE; 1392837d026dSJoonsoo Kim } 1393837d026dSJoonsoo Kim 1394ea7ab982SMichal Hocko static enum compact_result compact_finished(struct zone *zone, 1395d39773a0SVlastimil Babka struct compact_control *cc) 1396837d026dSJoonsoo Kim { 1397837d026dSJoonsoo Kim int ret; 1398837d026dSJoonsoo Kim 1399d39773a0SVlastimil Babka ret = __compact_finished(zone, cc); 1400837d026dSJoonsoo Kim trace_mm_compaction_finished(zone, cc->order, ret); 1401837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 1402837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 1403837d026dSJoonsoo Kim 1404837d026dSJoonsoo Kim return ret; 1405748446bbSMel Gorman } 1406748446bbSMel Gorman 14073e7d3449SMel Gorman /* 14083e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 14093e7d3449SMel Gorman * Returns 14103e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 1411cf378319SVlastimil Babka * COMPACT_SUCCESS - If the allocation would succeed without compaction 14123e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 14133e7d3449SMel Gorman */ 1414ea7ab982SMichal Hocko static enum compact_result __compaction_suitable(struct zone *zone, int order, 1415c603844bSMel Gorman unsigned int alloc_flags, 141686a294a8SMichal Hocko int classzone_idx, 141786a294a8SMichal Hocko unsigned long wmark_target) 14183e7d3449SMel Gorman { 14193e7d3449SMel Gorman unsigned long watermark; 14203e7d3449SMel Gorman 142121c527a3SYaowei Bai if (is_via_compact_memory(order)) 14223957c776SMichal Hocko return COMPACT_CONTINUE; 14233957c776SMichal Hocko 1424a9214443SMel Gorman watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 1425ebff3980SVlastimil Babka /* 1426ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 1427ebff3980SVlastimil Babka * should be no need for compaction at all. 1428ebff3980SVlastimil Babka */ 1429ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1430ebff3980SVlastimil Babka alloc_flags)) 1431cf378319SVlastimil Babka return COMPACT_SUCCESS; 1432ebff3980SVlastimil Babka 14333957c776SMichal Hocko /* 14349861a62cSVlastimil Babka * Watermarks for order-0 must be met for compaction to be able to 1435984fdba6SVlastimil Babka * isolate free pages for migration targets. This means that the 1436984fdba6SVlastimil Babka * watermark and alloc_flags have to match, or be more pessimistic than 1437984fdba6SVlastimil Babka * the check in __isolate_free_page(). We don't use the direct 1438984fdba6SVlastimil Babka * compactor's alloc_flags, as they are not relevant for freepage 1439984fdba6SVlastimil Babka * isolation. We however do use the direct compactor's classzone_idx to 1440984fdba6SVlastimil Babka * skip over zones where lowmem reserves would prevent allocation even 1441984fdba6SVlastimil Babka * if compaction succeeds. 14428348faf9SVlastimil Babka * For costly orders, we require low watermark instead of min for 14438348faf9SVlastimil Babka * compaction to proceed to increase its chances. 1444d883c6cfSJoonsoo Kim * ALLOC_CMA is used, as pages in CMA pageblocks are considered 1445d883c6cfSJoonsoo Kim * suitable migration targets 14463e7d3449SMel Gorman */ 14478348faf9SVlastimil Babka watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 14488348faf9SVlastimil Babka low_wmark_pages(zone) : min_wmark_pages(zone); 14498348faf9SVlastimil Babka watermark += compact_gap(order); 145086a294a8SMichal Hocko if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, 1451d883c6cfSJoonsoo Kim ALLOC_CMA, wmark_target)) 14523e7d3449SMel Gorman return COMPACT_SKIPPED; 14533e7d3449SMel Gorman 1454cc5c9f09SVlastimil Babka return COMPACT_CONTINUE; 1455cc5c9f09SVlastimil Babka } 1456cc5c9f09SVlastimil Babka 1457cc5c9f09SVlastimil Babka enum compact_result compaction_suitable(struct zone *zone, int order, 1458cc5c9f09SVlastimil Babka unsigned int alloc_flags, 1459cc5c9f09SVlastimil Babka int classzone_idx) 1460cc5c9f09SVlastimil Babka { 1461cc5c9f09SVlastimil Babka enum compact_result ret; 1462cc5c9f09SVlastimil Babka int fragindex; 1463cc5c9f09SVlastimil Babka 1464cc5c9f09SVlastimil Babka ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, 1465cc5c9f09SVlastimil Babka zone_page_state(zone, NR_FREE_PAGES)); 14663e7d3449SMel Gorman /* 14673e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 14683e7d3449SMel Gorman * low memory or external fragmentation 14693e7d3449SMel Gorman * 1470ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 1471ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 14723e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 14733e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 14743e7d3449SMel Gorman * 147520311420SVlastimil Babka * Only compact if a failure would be due to fragmentation. Also 147620311420SVlastimil Babka * ignore fragindex for non-costly orders where the alternative to 147720311420SVlastimil Babka * a successful reclaim/compaction is OOM. Fragindex and the 147820311420SVlastimil Babka * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 147920311420SVlastimil Babka * excessive compaction for costly orders, but it should not be at the 148020311420SVlastimil Babka * expense of system stability. 14813e7d3449SMel Gorman */ 148220311420SVlastimil Babka if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { 14833e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 14843e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 1485cc5c9f09SVlastimil Babka ret = COMPACT_NOT_SUITABLE_ZONE; 14863e7d3449SMel Gorman } 14873e7d3449SMel Gorman 1488837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 1489837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 1490837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 1491837d026dSJoonsoo Kim 1492837d026dSJoonsoo Kim return ret; 1493837d026dSJoonsoo Kim } 1494837d026dSJoonsoo Kim 149586a294a8SMichal Hocko bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 149686a294a8SMichal Hocko int alloc_flags) 149786a294a8SMichal Hocko { 149886a294a8SMichal Hocko struct zone *zone; 149986a294a8SMichal Hocko struct zoneref *z; 150086a294a8SMichal Hocko 150186a294a8SMichal Hocko /* 150286a294a8SMichal Hocko * Make sure at least one zone would pass __compaction_suitable if we continue 150386a294a8SMichal Hocko * retrying the reclaim. 150486a294a8SMichal Hocko */ 150586a294a8SMichal Hocko for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 150686a294a8SMichal Hocko ac->nodemask) { 150786a294a8SMichal Hocko unsigned long available; 150886a294a8SMichal Hocko enum compact_result compact_result; 150986a294a8SMichal Hocko 151086a294a8SMichal Hocko /* 151186a294a8SMichal Hocko * Do not consider all the reclaimable memory because we do not 151286a294a8SMichal Hocko * want to trash just for a single high order allocation which 151386a294a8SMichal Hocko * is even not guaranteed to appear even if __compaction_suitable 151486a294a8SMichal Hocko * is happy about the watermark check. 151586a294a8SMichal Hocko */ 15165a1c84b4SMel Gorman available = zone_reclaimable_pages(zone) / order; 151786a294a8SMichal Hocko available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 151886a294a8SMichal Hocko compact_result = __compaction_suitable(zone, order, alloc_flags, 151986a294a8SMichal Hocko ac_classzone_idx(ac), available); 1520cc5c9f09SVlastimil Babka if (compact_result != COMPACT_SKIPPED) 152186a294a8SMichal Hocko return true; 152286a294a8SMichal Hocko } 152386a294a8SMichal Hocko 152486a294a8SMichal Hocko return false; 152586a294a8SMichal Hocko } 152686a294a8SMichal Hocko 1527ea7ab982SMichal Hocko static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc) 1528748446bbSMel Gorman { 1529ea7ab982SMichal Hocko enum compact_result ret; 1530c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1531108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 1532*566e54e1SMel Gorman unsigned long last_migrated_pfn; 1533e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1534748446bbSMel Gorman 1535d39773a0SVlastimil Babka cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); 1536ebff3980SVlastimil Babka ret = compaction_suitable(zone, cc->order, cc->alloc_flags, 1537ebff3980SVlastimil Babka cc->classzone_idx); 15383e7d3449SMel Gorman /* Compaction is likely to fail */ 1539cf378319SVlastimil Babka if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) 15403e7d3449SMel Gorman return ret; 1541c46649deSMichal Hocko 1542c46649deSMichal Hocko /* huh, compaction_suitable is returning something unexpected */ 1543c46649deSMichal Hocko VM_BUG_ON(ret != COMPACT_CONTINUE); 15443e7d3449SMel Gorman 1545c89511abSMel Gorman /* 1546d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1547accf6242SVlastimil Babka * is about to be retried after being deferred. 1548d3132e4bSVlastimil Babka */ 1549accf6242SVlastimil Babka if (compaction_restarting(zone, cc->order)) 1550d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1551d3132e4bSVlastimil Babka 1552d3132e4bSVlastimil Babka /* 1553c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 155406ed2998SVlastimil Babka * information on where the scanners should start (unless we explicitly 155506ed2998SVlastimil Babka * want to compact the whole zone), but check that it is initialised 155606ed2998SVlastimil Babka * by ensuring the values are within zone boundaries. 1557c89511abSMel Gorman */ 155806ed2998SVlastimil Babka if (cc->whole_zone) { 155906ed2998SVlastimil Babka cc->migrate_pfn = start_pfn; 156006ed2998SVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 156106ed2998SVlastimil Babka } else { 1562e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1563c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1564623446e4SJoonsoo Kim if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 156506b6640aSVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 1566c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1567c89511abSMel Gorman } 1568623446e4SJoonsoo Kim if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 1569c89511abSMel Gorman cc->migrate_pfn = start_pfn; 157035979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 157135979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1572c89511abSMel Gorman } 1573c8f7de0bSMichal Hocko 1574c8f7de0bSMichal Hocko if (cc->migrate_pfn == start_pfn) 1575c8f7de0bSMichal Hocko cc->whole_zone = true; 157606ed2998SVlastimil Babka } 1577c8f7de0bSMichal Hocko 1578*566e54e1SMel Gorman last_migrated_pfn = 0; 1579748446bbSMel Gorman 158016c4a097SJoonsoo Kim trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 158116c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync); 15820eb927c0SMel Gorman 1583748446bbSMel Gorman migrate_prep_local(); 1584748446bbSMel Gorman 1585d39773a0SVlastimil Babka while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 15869d502c1cSMinchan Kim int err; 1587*566e54e1SMel Gorman unsigned long start_pfn = cc->migrate_pfn; 1588748446bbSMel Gorman 1589f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1590f9e35b3bSMel Gorman case ISOLATE_ABORT: 15912d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 15925733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1593e64c5237SShaohua Li cc->nr_migratepages = 0; 1594*566e54e1SMel Gorman last_migrated_pfn = 0; 1595f9e35b3bSMel Gorman goto out; 1596f9e35b3bSMel Gorman case ISOLATE_NONE: 1597fdaf7f5cSVlastimil Babka /* 1598fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 1599fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 1600fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 1601fdaf7f5cSVlastimil Babka */ 1602fdaf7f5cSVlastimil Babka goto check_drain; 1603f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1604*566e54e1SMel Gorman last_migrated_pfn = start_pfn; 1605f9e35b3bSMel Gorman ; 1606f9e35b3bSMel Gorman } 1607748446bbSMel Gorman 1608d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1609e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 16107b2a2d4aSMel Gorman MR_COMPACTION); 1611748446bbSMel Gorman 1612f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1613f8c9301fSVlastimil Babka &cc->migratepages); 1614748446bbSMel Gorman 1615f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1616f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 16179d502c1cSMinchan Kim if (err) { 16185733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 16197ed695e0SVlastimil Babka /* 16207ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 16217ed695e0SVlastimil Babka * and we want compact_finished() to detect it 16227ed695e0SVlastimil Babka */ 1623f2849aa0SVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) { 16242d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 16254bf2bba3SDavid Rientjes goto out; 1626748446bbSMel Gorman } 1627fdd048e1SVlastimil Babka /* 1628fdd048e1SVlastimil Babka * We failed to migrate at least one page in the current 1629fdd048e1SVlastimil Babka * order-aligned block, so skip the rest of it. 1630fdd048e1SVlastimil Babka */ 1631fdd048e1SVlastimil Babka if (cc->direct_compaction && 1632fdd048e1SVlastimil Babka (cc->mode == MIGRATE_ASYNC)) { 1633fdd048e1SVlastimil Babka cc->migrate_pfn = block_end_pfn( 1634fdd048e1SVlastimil Babka cc->migrate_pfn - 1, cc->order); 1635fdd048e1SVlastimil Babka /* Draining pcplists is useless in this case */ 1636*566e54e1SMel Gorman last_migrated_pfn = 0; 1637fdd048e1SVlastimil Babka } 16384bf2bba3SDavid Rientjes } 1639fdaf7f5cSVlastimil Babka 1640fdaf7f5cSVlastimil Babka check_drain: 1641fdaf7f5cSVlastimil Babka /* 1642fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 1643fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 1644fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 1645fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 1646fdaf7f5cSVlastimil Babka * would succeed. 1647fdaf7f5cSVlastimil Babka */ 1648*566e54e1SMel Gorman if (cc->order > 0 && last_migrated_pfn) { 1649fdaf7f5cSVlastimil Babka int cpu; 1650fdaf7f5cSVlastimil Babka unsigned long current_block_start = 165106b6640aSVlastimil Babka block_start_pfn(cc->migrate_pfn, cc->order); 1652fdaf7f5cSVlastimil Babka 1653*566e54e1SMel Gorman if (last_migrated_pfn < current_block_start) { 1654fdaf7f5cSVlastimil Babka cpu = get_cpu(); 1655fdaf7f5cSVlastimil Babka lru_add_drain_cpu(cpu); 1656fdaf7f5cSVlastimil Babka drain_local_pages(zone); 1657fdaf7f5cSVlastimil Babka put_cpu(); 1658fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 1659*566e54e1SMel Gorman last_migrated_pfn = 0; 1660fdaf7f5cSVlastimil Babka } 1661fdaf7f5cSVlastimil Babka } 1662fdaf7f5cSVlastimil Babka 1663748446bbSMel Gorman } 1664748446bbSMel Gorman 1665f9e35b3bSMel Gorman out: 16666bace090SVlastimil Babka /* 16676bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 16686bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 16696bace090SVlastimil Babka */ 16706bace090SVlastimil Babka if (cc->nr_freepages > 0) { 16716bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 16726bace090SVlastimil Babka 16736bace090SVlastimil Babka cc->nr_freepages = 0; 16746bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 16756bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 167606b6640aSVlastimil Babka free_pfn = pageblock_start_pfn(free_pfn); 16776bace090SVlastimil Babka /* 16786bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 16796bace090SVlastimil Babka * already reset to zone end in compact_finished() 16806bace090SVlastimil Babka */ 16816bace090SVlastimil Babka if (free_pfn > zone->compact_cached_free_pfn) 16826bace090SVlastimil Babka zone->compact_cached_free_pfn = free_pfn; 16836bace090SVlastimil Babka } 1684748446bbSMel Gorman 16857f354a54SDavid Rientjes count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 16867f354a54SDavid Rientjes count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 16877f354a54SDavid Rientjes 168816c4a097SJoonsoo Kim trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 168916c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync, ret); 16900eb927c0SMel Gorman 1691748446bbSMel Gorman return ret; 1692748446bbSMel Gorman } 169376ab0f53SMel Gorman 1694ea7ab982SMichal Hocko static enum compact_result compact_zone_order(struct zone *zone, int order, 1695c3486f53SVlastimil Babka gfp_t gfp_mask, enum compact_priority prio, 1696c603844bSMel Gorman unsigned int alloc_flags, int classzone_idx) 169756de7263SMel Gorman { 1698ea7ab982SMichal Hocko enum compact_result ret; 169956de7263SMel Gorman struct compact_control cc = { 170056de7263SMel Gorman .nr_freepages = 0, 170156de7263SMel Gorman .nr_migratepages = 0, 17027f354a54SDavid Rientjes .total_migrate_scanned = 0, 17037f354a54SDavid Rientjes .total_free_scanned = 0, 170456de7263SMel Gorman .order = order, 17056d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 170656de7263SMel Gorman .zone = zone, 1707a5508cd8SVlastimil Babka .mode = (prio == COMPACT_PRIO_ASYNC) ? 1708a5508cd8SVlastimil Babka MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 1709ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 1710ebff3980SVlastimil Babka .classzone_idx = classzone_idx, 1711accf6242SVlastimil Babka .direct_compaction = true, 1712a8e025e5SVlastimil Babka .whole_zone = (prio == MIN_COMPACT_PRIORITY), 17139f7e3387SVlastimil Babka .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 17149f7e3387SVlastimil Babka .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 171556de7263SMel Gorman }; 171656de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 171756de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 171856de7263SMel Gorman 1719e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1720e64c5237SShaohua Li 1721e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1722e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1723e64c5237SShaohua Li 1724e64c5237SShaohua Li return ret; 172556de7263SMel Gorman } 172656de7263SMel Gorman 17275e771905SMel Gorman int sysctl_extfrag_threshold = 500; 17285e771905SMel Gorman 172956de7263SMel Gorman /** 173056de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 173156de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 17321a6d53a1SVlastimil Babka * @order: The order of the current allocation 17331a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 17341a6d53a1SVlastimil Babka * @ac: The context of current allocation 1735112d2d29SYang Shi * @prio: Determines how hard direct compaction should try to succeed 173656de7263SMel Gorman * 173756de7263SMel Gorman * This is the main entry point for direct page compaction. 173856de7263SMel Gorman */ 1739ea7ab982SMichal Hocko enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 1740c603844bSMel Gorman unsigned int alloc_flags, const struct alloc_context *ac, 1741c3486f53SVlastimil Babka enum compact_priority prio) 174256de7263SMel Gorman { 174356de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 174456de7263SMel Gorman struct zoneref *z; 174556de7263SMel Gorman struct zone *zone; 17461d4746d3SMichal Hocko enum compact_result rc = COMPACT_SKIPPED; 174756de7263SMel Gorman 174873e64c51SMichal Hocko /* 174973e64c51SMichal Hocko * Check if the GFP flags allow compaction - GFP_NOIO is really 175073e64c51SMichal Hocko * tricky context because the migration might require IO 175173e64c51SMichal Hocko */ 175273e64c51SMichal Hocko if (!may_perform_io) 175353853e2dSVlastimil Babka return COMPACT_SKIPPED; 175456de7263SMel Gorman 1755a5508cd8SVlastimil Babka trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 1756837d026dSJoonsoo Kim 175756de7263SMel Gorman /* Compact each zone in the list */ 17581a6d53a1SVlastimil Babka for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 17591a6d53a1SVlastimil Babka ac->nodemask) { 1760ea7ab982SMichal Hocko enum compact_result status; 176156de7263SMel Gorman 1762a8e025e5SVlastimil Babka if (prio > MIN_COMPACT_PRIORITY 1763a8e025e5SVlastimil Babka && compaction_deferred(zone, order)) { 17641d4746d3SMichal Hocko rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 176553853e2dSVlastimil Babka continue; 17661d4746d3SMichal Hocko } 176753853e2dSVlastimil Babka 1768a5508cd8SVlastimil Babka status = compact_zone_order(zone, order, gfp_mask, prio, 1769c3486f53SVlastimil Babka alloc_flags, ac_classzone_idx(ac)); 177056de7263SMel Gorman rc = max(status, rc); 177156de7263SMel Gorman 17727ceb009aSVlastimil Babka /* The allocation should succeed, stop compacting */ 17737ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 177453853e2dSVlastimil Babka /* 177553853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 177653853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 177753853e2dSVlastimil Babka * will repeat this with true if allocation indeed 177853853e2dSVlastimil Babka * succeeds in this zone. 177953853e2dSVlastimil Babka */ 178053853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 17811f9efdefSVlastimil Babka 1782c3486f53SVlastimil Babka break; 17831f9efdefSVlastimil Babka } 17841f9efdefSVlastimil Babka 1785a5508cd8SVlastimil Babka if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 1786c3486f53SVlastimil Babka status == COMPACT_PARTIAL_SKIPPED)) 178753853e2dSVlastimil Babka /* 178853853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 178953853e2dSVlastimil Babka * so we defer compaction there. If it ends up 179053853e2dSVlastimil Babka * succeeding after all, it will be reset. 179153853e2dSVlastimil Babka */ 179253853e2dSVlastimil Babka defer_compaction(zone, order); 17931f9efdefSVlastimil Babka 17941f9efdefSVlastimil Babka /* 17951f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 17961f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 1797c3486f53SVlastimil Babka * case do not try further zones 17981f9efdefSVlastimil Babka */ 1799c3486f53SVlastimil Babka if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 1800c3486f53SVlastimil Babka || fatal_signal_pending(current)) 18011f9efdefSVlastimil Babka break; 18021f9efdefSVlastimil Babka } 18031f9efdefSVlastimil Babka 180456de7263SMel Gorman return rc; 180556de7263SMel Gorman } 180656de7263SMel Gorman 180756de7263SMel Gorman 180876ab0f53SMel Gorman /* Compact all zones within a node */ 18097103f16dSAndrew Morton static void compact_node(int nid) 18107be62de9SRik van Riel { 1811791cae96SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 1812791cae96SVlastimil Babka int zoneid; 1813791cae96SVlastimil Babka struct zone *zone; 18147be62de9SRik van Riel struct compact_control cc = { 18157be62de9SRik van Riel .order = -1, 18167f354a54SDavid Rientjes .total_migrate_scanned = 0, 18177f354a54SDavid Rientjes .total_free_scanned = 0, 1818e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 181991ca9186SDavid Rientjes .ignore_skip_hint = true, 182006ed2998SVlastimil Babka .whole_zone = true, 182173e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 18227be62de9SRik van Riel }; 18237be62de9SRik van Riel 1824791cae96SVlastimil Babka 1825791cae96SVlastimil Babka for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 1826791cae96SVlastimil Babka 1827791cae96SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 1828791cae96SVlastimil Babka if (!populated_zone(zone)) 1829791cae96SVlastimil Babka continue; 1830791cae96SVlastimil Babka 1831791cae96SVlastimil Babka cc.nr_freepages = 0; 1832791cae96SVlastimil Babka cc.nr_migratepages = 0; 1833791cae96SVlastimil Babka cc.zone = zone; 1834791cae96SVlastimil Babka INIT_LIST_HEAD(&cc.freepages); 1835791cae96SVlastimil Babka INIT_LIST_HEAD(&cc.migratepages); 1836791cae96SVlastimil Babka 1837791cae96SVlastimil Babka compact_zone(zone, &cc); 1838791cae96SVlastimil Babka 1839791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 1840791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 1841791cae96SVlastimil Babka } 18427be62de9SRik van Riel } 18437be62de9SRik van Riel 184476ab0f53SMel Gorman /* Compact all nodes in the system */ 18457964c06dSJason Liu static void compact_nodes(void) 184676ab0f53SMel Gorman { 184776ab0f53SMel Gorman int nid; 184876ab0f53SMel Gorman 18498575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 18508575ec29SHugh Dickins lru_add_drain_all(); 18518575ec29SHugh Dickins 185276ab0f53SMel Gorman for_each_online_node(nid) 185376ab0f53SMel Gorman compact_node(nid); 185476ab0f53SMel Gorman } 185576ab0f53SMel Gorman 185676ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 185776ab0f53SMel Gorman int sysctl_compact_memory; 185876ab0f53SMel Gorman 1859fec4eb2cSYaowei Bai /* 1860fec4eb2cSYaowei Bai * This is the entry point for compacting all nodes via 1861fec4eb2cSYaowei Bai * /proc/sys/vm/compact_memory 1862fec4eb2cSYaowei Bai */ 186376ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 186476ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 186576ab0f53SMel Gorman { 186676ab0f53SMel Gorman if (write) 18677964c06dSJason Liu compact_nodes(); 186876ab0f53SMel Gorman 186976ab0f53SMel Gorman return 0; 187076ab0f53SMel Gorman } 1871ed4a6d7fSMel Gorman 1872ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 187374e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 187410fbcf4cSKay Sievers struct device_attribute *attr, 1875ed4a6d7fSMel Gorman const char *buf, size_t count) 1876ed4a6d7fSMel Gorman { 18778575ec29SHugh Dickins int nid = dev->id; 18788575ec29SHugh Dickins 18798575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 18808575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 18818575ec29SHugh Dickins lru_add_drain_all(); 18828575ec29SHugh Dickins 18838575ec29SHugh Dickins compact_node(nid); 18848575ec29SHugh Dickins } 1885ed4a6d7fSMel Gorman 1886ed4a6d7fSMel Gorman return count; 1887ed4a6d7fSMel Gorman } 18880825a6f9SJoe Perches static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node); 1889ed4a6d7fSMel Gorman 1890ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1891ed4a6d7fSMel Gorman { 189210fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1893ed4a6d7fSMel Gorman } 1894ed4a6d7fSMel Gorman 1895ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1896ed4a6d7fSMel Gorman { 189710fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1898ed4a6d7fSMel Gorman } 1899ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1900ff9543fdSMichal Nazarewicz 1901698b1b30SVlastimil Babka static inline bool kcompactd_work_requested(pg_data_t *pgdat) 1902698b1b30SVlastimil Babka { 1903172400c6SVlastimil Babka return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); 1904698b1b30SVlastimil Babka } 1905698b1b30SVlastimil Babka 1906698b1b30SVlastimil Babka static bool kcompactd_node_suitable(pg_data_t *pgdat) 1907698b1b30SVlastimil Babka { 1908698b1b30SVlastimil Babka int zoneid; 1909698b1b30SVlastimil Babka struct zone *zone; 1910698b1b30SVlastimil Babka enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx; 1911698b1b30SVlastimil Babka 19126cd9dc3eSChen Feng for (zoneid = 0; zoneid <= classzone_idx; zoneid++) { 1913698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 1914698b1b30SVlastimil Babka 1915698b1b30SVlastimil Babka if (!populated_zone(zone)) 1916698b1b30SVlastimil Babka continue; 1917698b1b30SVlastimil Babka 1918698b1b30SVlastimil Babka if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 1919698b1b30SVlastimil Babka classzone_idx) == COMPACT_CONTINUE) 1920698b1b30SVlastimil Babka return true; 1921698b1b30SVlastimil Babka } 1922698b1b30SVlastimil Babka 1923698b1b30SVlastimil Babka return false; 1924698b1b30SVlastimil Babka } 1925698b1b30SVlastimil Babka 1926698b1b30SVlastimil Babka static void kcompactd_do_work(pg_data_t *pgdat) 1927698b1b30SVlastimil Babka { 1928698b1b30SVlastimil Babka /* 1929698b1b30SVlastimil Babka * With no special task, compact all zones so that a page of requested 1930698b1b30SVlastimil Babka * order is allocatable. 1931698b1b30SVlastimil Babka */ 1932698b1b30SVlastimil Babka int zoneid; 1933698b1b30SVlastimil Babka struct zone *zone; 1934698b1b30SVlastimil Babka struct compact_control cc = { 1935698b1b30SVlastimil Babka .order = pgdat->kcompactd_max_order, 19367f354a54SDavid Rientjes .total_migrate_scanned = 0, 19377f354a54SDavid Rientjes .total_free_scanned = 0, 1938698b1b30SVlastimil Babka .classzone_idx = pgdat->kcompactd_classzone_idx, 1939698b1b30SVlastimil Babka .mode = MIGRATE_SYNC_LIGHT, 1940a0647dc9SDavid Rientjes .ignore_skip_hint = false, 194173e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 1942698b1b30SVlastimil Babka }; 1943698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 1944698b1b30SVlastimil Babka cc.classzone_idx); 19457f354a54SDavid Rientjes count_compact_event(KCOMPACTD_WAKE); 1946698b1b30SVlastimil Babka 19476cd9dc3eSChen Feng for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) { 1948698b1b30SVlastimil Babka int status; 1949698b1b30SVlastimil Babka 1950698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 1951698b1b30SVlastimil Babka if (!populated_zone(zone)) 1952698b1b30SVlastimil Babka continue; 1953698b1b30SVlastimil Babka 1954698b1b30SVlastimil Babka if (compaction_deferred(zone, cc.order)) 1955698b1b30SVlastimil Babka continue; 1956698b1b30SVlastimil Babka 1957698b1b30SVlastimil Babka if (compaction_suitable(zone, cc.order, 0, zoneid) != 1958698b1b30SVlastimil Babka COMPACT_CONTINUE) 1959698b1b30SVlastimil Babka continue; 1960698b1b30SVlastimil Babka 1961698b1b30SVlastimil Babka cc.nr_freepages = 0; 1962698b1b30SVlastimil Babka cc.nr_migratepages = 0; 19637f354a54SDavid Rientjes cc.total_migrate_scanned = 0; 19647f354a54SDavid Rientjes cc.total_free_scanned = 0; 1965698b1b30SVlastimil Babka cc.zone = zone; 1966698b1b30SVlastimil Babka INIT_LIST_HEAD(&cc.freepages); 1967698b1b30SVlastimil Babka INIT_LIST_HEAD(&cc.migratepages); 1968698b1b30SVlastimil Babka 1969172400c6SVlastimil Babka if (kthread_should_stop()) 1970172400c6SVlastimil Babka return; 1971698b1b30SVlastimil Babka status = compact_zone(zone, &cc); 1972698b1b30SVlastimil Babka 19737ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 1974698b1b30SVlastimil Babka compaction_defer_reset(zone, cc.order, false); 1975c8f7de0bSMichal Hocko } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 1976698b1b30SVlastimil Babka /* 1977bc3106b2SDavid Rientjes * Buddy pages may become stranded on pcps that could 1978bc3106b2SDavid Rientjes * otherwise coalesce on the zone's free area for 1979bc3106b2SDavid Rientjes * order >= cc.order. This is ratelimited by the 1980bc3106b2SDavid Rientjes * upcoming deferral. 1981bc3106b2SDavid Rientjes */ 1982bc3106b2SDavid Rientjes drain_all_pages(zone); 1983bc3106b2SDavid Rientjes 1984bc3106b2SDavid Rientjes /* 1985698b1b30SVlastimil Babka * We use sync migration mode here, so we defer like 1986698b1b30SVlastimil Babka * sync direct compaction does. 1987698b1b30SVlastimil Babka */ 1988698b1b30SVlastimil Babka defer_compaction(zone, cc.order); 1989698b1b30SVlastimil Babka } 1990698b1b30SVlastimil Babka 19917f354a54SDavid Rientjes count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 19927f354a54SDavid Rientjes cc.total_migrate_scanned); 19937f354a54SDavid Rientjes count_compact_events(KCOMPACTD_FREE_SCANNED, 19947f354a54SDavid Rientjes cc.total_free_scanned); 19957f354a54SDavid Rientjes 1996698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 1997698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 1998698b1b30SVlastimil Babka } 1999698b1b30SVlastimil Babka 2000698b1b30SVlastimil Babka /* 2001698b1b30SVlastimil Babka * Regardless of success, we are done until woken up next. But remember 2002698b1b30SVlastimil Babka * the requested order/classzone_idx in case it was higher/tighter than 2003698b1b30SVlastimil Babka * our current ones 2004698b1b30SVlastimil Babka */ 2005698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order <= cc.order) 2006698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 2007698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx) 2008698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2009698b1b30SVlastimil Babka } 2010698b1b30SVlastimil Babka 2011698b1b30SVlastimil Babka void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) 2012698b1b30SVlastimil Babka { 2013698b1b30SVlastimil Babka if (!order) 2014698b1b30SVlastimil Babka return; 2015698b1b30SVlastimil Babka 2016698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order < order) 2017698b1b30SVlastimil Babka pgdat->kcompactd_max_order = order; 2018698b1b30SVlastimil Babka 2019698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx > classzone_idx) 2020698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = classzone_idx; 2021698b1b30SVlastimil Babka 20226818600fSDavidlohr Bueso /* 20236818600fSDavidlohr Bueso * Pairs with implicit barrier in wait_event_freezable() 20246818600fSDavidlohr Bueso * such that wakeups are not missed. 20256818600fSDavidlohr Bueso */ 20266818600fSDavidlohr Bueso if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 2027698b1b30SVlastimil Babka return; 2028698b1b30SVlastimil Babka 2029698b1b30SVlastimil Babka if (!kcompactd_node_suitable(pgdat)) 2030698b1b30SVlastimil Babka return; 2031698b1b30SVlastimil Babka 2032698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 2033698b1b30SVlastimil Babka classzone_idx); 2034698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait); 2035698b1b30SVlastimil Babka } 2036698b1b30SVlastimil Babka 2037698b1b30SVlastimil Babka /* 2038698b1b30SVlastimil Babka * The background compaction daemon, started as a kernel thread 2039698b1b30SVlastimil Babka * from the init process. 2040698b1b30SVlastimil Babka */ 2041698b1b30SVlastimil Babka static int kcompactd(void *p) 2042698b1b30SVlastimil Babka { 2043698b1b30SVlastimil Babka pg_data_t *pgdat = (pg_data_t*)p; 2044698b1b30SVlastimil Babka struct task_struct *tsk = current; 2045698b1b30SVlastimil Babka 2046698b1b30SVlastimil Babka const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2047698b1b30SVlastimil Babka 2048698b1b30SVlastimil Babka if (!cpumask_empty(cpumask)) 2049698b1b30SVlastimil Babka set_cpus_allowed_ptr(tsk, cpumask); 2050698b1b30SVlastimil Babka 2051698b1b30SVlastimil Babka set_freezable(); 2052698b1b30SVlastimil Babka 2053698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 2054698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2055698b1b30SVlastimil Babka 2056698b1b30SVlastimil Babka while (!kthread_should_stop()) { 2057eb414681SJohannes Weiner unsigned long pflags; 2058eb414681SJohannes Weiner 2059698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 2060698b1b30SVlastimil Babka wait_event_freezable(pgdat->kcompactd_wait, 2061698b1b30SVlastimil Babka kcompactd_work_requested(pgdat)); 2062698b1b30SVlastimil Babka 2063eb414681SJohannes Weiner psi_memstall_enter(&pflags); 2064698b1b30SVlastimil Babka kcompactd_do_work(pgdat); 2065eb414681SJohannes Weiner psi_memstall_leave(&pflags); 2066698b1b30SVlastimil Babka } 2067698b1b30SVlastimil Babka 2068698b1b30SVlastimil Babka return 0; 2069698b1b30SVlastimil Babka } 2070698b1b30SVlastimil Babka 2071698b1b30SVlastimil Babka /* 2072698b1b30SVlastimil Babka * This kcompactd start function will be called by init and node-hot-add. 2073698b1b30SVlastimil Babka * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 2074698b1b30SVlastimil Babka */ 2075698b1b30SVlastimil Babka int kcompactd_run(int nid) 2076698b1b30SVlastimil Babka { 2077698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2078698b1b30SVlastimil Babka int ret = 0; 2079698b1b30SVlastimil Babka 2080698b1b30SVlastimil Babka if (pgdat->kcompactd) 2081698b1b30SVlastimil Babka return 0; 2082698b1b30SVlastimil Babka 2083698b1b30SVlastimil Babka pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 2084698b1b30SVlastimil Babka if (IS_ERR(pgdat->kcompactd)) { 2085698b1b30SVlastimil Babka pr_err("Failed to start kcompactd on node %d\n", nid); 2086698b1b30SVlastimil Babka ret = PTR_ERR(pgdat->kcompactd); 2087698b1b30SVlastimil Babka pgdat->kcompactd = NULL; 2088698b1b30SVlastimil Babka } 2089698b1b30SVlastimil Babka return ret; 2090698b1b30SVlastimil Babka } 2091698b1b30SVlastimil Babka 2092698b1b30SVlastimil Babka /* 2093698b1b30SVlastimil Babka * Called by memory hotplug when all memory in a node is offlined. Caller must 2094698b1b30SVlastimil Babka * hold mem_hotplug_begin/end(). 2095698b1b30SVlastimil Babka */ 2096698b1b30SVlastimil Babka void kcompactd_stop(int nid) 2097698b1b30SVlastimil Babka { 2098698b1b30SVlastimil Babka struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 2099698b1b30SVlastimil Babka 2100698b1b30SVlastimil Babka if (kcompactd) { 2101698b1b30SVlastimil Babka kthread_stop(kcompactd); 2102698b1b30SVlastimil Babka NODE_DATA(nid)->kcompactd = NULL; 2103698b1b30SVlastimil Babka } 2104698b1b30SVlastimil Babka } 2105698b1b30SVlastimil Babka 2106698b1b30SVlastimil Babka /* 2107698b1b30SVlastimil Babka * It's optimal to keep kcompactd on the same CPUs as their memory, but 2108698b1b30SVlastimil Babka * not required for correctness. So if the last cpu in a node goes 2109698b1b30SVlastimil Babka * away, we get changed to run anywhere: as the first one comes back, 2110698b1b30SVlastimil Babka * restore their cpu bindings. 2111698b1b30SVlastimil Babka */ 2112e46b1db2SAnna-Maria Gleixner static int kcompactd_cpu_online(unsigned int cpu) 2113698b1b30SVlastimil Babka { 2114698b1b30SVlastimil Babka int nid; 2115698b1b30SVlastimil Babka 2116698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) { 2117698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2118698b1b30SVlastimil Babka const struct cpumask *mask; 2119698b1b30SVlastimil Babka 2120698b1b30SVlastimil Babka mask = cpumask_of_node(pgdat->node_id); 2121698b1b30SVlastimil Babka 2122698b1b30SVlastimil Babka if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2123698b1b30SVlastimil Babka /* One of our CPUs online: restore mask */ 2124698b1b30SVlastimil Babka set_cpus_allowed_ptr(pgdat->kcompactd, mask); 2125698b1b30SVlastimil Babka } 2126e46b1db2SAnna-Maria Gleixner return 0; 2127698b1b30SVlastimil Babka } 2128698b1b30SVlastimil Babka 2129698b1b30SVlastimil Babka static int __init kcompactd_init(void) 2130698b1b30SVlastimil Babka { 2131698b1b30SVlastimil Babka int nid; 2132e46b1db2SAnna-Maria Gleixner int ret; 2133e46b1db2SAnna-Maria Gleixner 2134e46b1db2SAnna-Maria Gleixner ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 2135e46b1db2SAnna-Maria Gleixner "mm/compaction:online", 2136e46b1db2SAnna-Maria Gleixner kcompactd_cpu_online, NULL); 2137e46b1db2SAnna-Maria Gleixner if (ret < 0) { 2138e46b1db2SAnna-Maria Gleixner pr_err("kcompactd: failed to register hotplug callbacks.\n"); 2139e46b1db2SAnna-Maria Gleixner return ret; 2140e46b1db2SAnna-Maria Gleixner } 2141698b1b30SVlastimil Babka 2142698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) 2143698b1b30SVlastimil Babka kcompactd_run(nid); 2144698b1b30SVlastimil Babka return 0; 2145698b1b30SVlastimil Babka } 2146698b1b30SVlastimil Babka subsys_initcall(kcompactd_init) 2147698b1b30SVlastimil Babka 2148ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 2149