1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2748446bbSMel Gorman /* 3748446bbSMel Gorman * linux/mm/compaction.c 4748446bbSMel Gorman * 5748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 6748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 7748446bbSMel Gorman * lifting 8748446bbSMel Gorman * 9748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10748446bbSMel Gorman */ 11698b1b30SVlastimil Babka #include <linux/cpu.h> 12748446bbSMel Gorman #include <linux/swap.h> 13748446bbSMel Gorman #include <linux/migrate.h> 14748446bbSMel Gorman #include <linux/compaction.h> 15748446bbSMel Gorman #include <linux/mm_inline.h> 16174cd4b1SIngo Molnar #include <linux/sched/signal.h> 17748446bbSMel Gorman #include <linux/backing-dev.h> 1876ab0f53SMel Gorman #include <linux/sysctl.h> 19ed4a6d7fSMel Gorman #include <linux/sysfs.h> 20194159fbSMinchan Kim #include <linux/page-isolation.h> 21b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 22698b1b30SVlastimil Babka #include <linux/kthread.h> 23698b1b30SVlastimil Babka #include <linux/freezer.h> 2483358eceSJoonsoo Kim #include <linux/page_owner.h> 25*eb414681SJohannes Weiner #include <linux/psi.h> 26748446bbSMel Gorman #include "internal.h" 27748446bbSMel Gorman 28010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 29010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 30010fc29aSMinchan Kim { 31010fc29aSMinchan Kim count_vm_event(item); 32010fc29aSMinchan Kim } 33010fc29aSMinchan Kim 34010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 35010fc29aSMinchan Kim { 36010fc29aSMinchan Kim count_vm_events(item, delta); 37010fc29aSMinchan Kim } 38010fc29aSMinchan Kim #else 39010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 40010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 41010fc29aSMinchan Kim #endif 42010fc29aSMinchan Kim 43ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 44ff9543fdSMichal Nazarewicz 45b7aba698SMel Gorman #define CREATE_TRACE_POINTS 46b7aba698SMel Gorman #include <trace/events/compaction.h> 47b7aba698SMel Gorman 4806b6640aSVlastimil Babka #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 4906b6640aSVlastimil Babka #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 5006b6640aSVlastimil Babka #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 5106b6640aSVlastimil Babka #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 5206b6640aSVlastimil Babka 53748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 54748446bbSMel Gorman { 55748446bbSMel Gorman struct page *page, *next; 566bace090SVlastimil Babka unsigned long high_pfn = 0; 57748446bbSMel Gorman 58748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 596bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 60748446bbSMel Gorman list_del(&page->lru); 61748446bbSMel Gorman __free_page(page); 626bace090SVlastimil Babka if (pfn > high_pfn) 636bace090SVlastimil Babka high_pfn = pfn; 64748446bbSMel Gorman } 65748446bbSMel Gorman 666bace090SVlastimil Babka return high_pfn; 67748446bbSMel Gorman } 68748446bbSMel Gorman 69ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 70ff9543fdSMichal Nazarewicz { 7166c64223SJoonsoo Kim unsigned int i, order, nr_pages; 7266c64223SJoonsoo Kim struct page *page, *next; 7366c64223SJoonsoo Kim LIST_HEAD(tmp_list); 74ff9543fdSMichal Nazarewicz 7566c64223SJoonsoo Kim list_for_each_entry_safe(page, next, list, lru) { 7666c64223SJoonsoo Kim list_del(&page->lru); 7766c64223SJoonsoo Kim 7866c64223SJoonsoo Kim order = page_private(page); 7966c64223SJoonsoo Kim nr_pages = 1 << order; 8066c64223SJoonsoo Kim 8146f24fd8SJoonsoo Kim post_alloc_hook(page, order, __GFP_MOVABLE); 8266c64223SJoonsoo Kim if (order) 8366c64223SJoonsoo Kim split_page(page, order); 8466c64223SJoonsoo Kim 8566c64223SJoonsoo Kim for (i = 0; i < nr_pages; i++) { 8666c64223SJoonsoo Kim list_add(&page->lru, &tmp_list); 8766c64223SJoonsoo Kim page++; 88ff9543fdSMichal Nazarewicz } 89ff9543fdSMichal Nazarewicz } 90ff9543fdSMichal Nazarewicz 9166c64223SJoonsoo Kim list_splice(&tmp_list, list); 9266c64223SJoonsoo Kim } 9366c64223SJoonsoo Kim 94bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 9524e2716fSJoonsoo Kim 96bda807d4SMinchan Kim int PageMovable(struct page *page) 97bda807d4SMinchan Kim { 98bda807d4SMinchan Kim struct address_space *mapping; 99bda807d4SMinchan Kim 100bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 101bda807d4SMinchan Kim if (!__PageMovable(page)) 102bda807d4SMinchan Kim return 0; 103bda807d4SMinchan Kim 104bda807d4SMinchan Kim mapping = page_mapping(page); 105bda807d4SMinchan Kim if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) 106bda807d4SMinchan Kim return 1; 107bda807d4SMinchan Kim 108bda807d4SMinchan Kim return 0; 109bda807d4SMinchan Kim } 110bda807d4SMinchan Kim EXPORT_SYMBOL(PageMovable); 111bda807d4SMinchan Kim 112bda807d4SMinchan Kim void __SetPageMovable(struct page *page, struct address_space *mapping) 113bda807d4SMinchan Kim { 114bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 115bda807d4SMinchan Kim VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); 116bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); 117bda807d4SMinchan Kim } 118bda807d4SMinchan Kim EXPORT_SYMBOL(__SetPageMovable); 119bda807d4SMinchan Kim 120bda807d4SMinchan Kim void __ClearPageMovable(struct page *page) 121bda807d4SMinchan Kim { 122bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 123bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page); 124bda807d4SMinchan Kim /* 125bda807d4SMinchan Kim * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE 126bda807d4SMinchan Kim * flag so that VM can catch up released page by driver after isolation. 127bda807d4SMinchan Kim * With it, VM migration doesn't try to put it back. 128bda807d4SMinchan Kim */ 129bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)page->mapping & 130bda807d4SMinchan Kim PAGE_MAPPING_MOVABLE); 131bda807d4SMinchan Kim } 132bda807d4SMinchan Kim EXPORT_SYMBOL(__ClearPageMovable); 133bda807d4SMinchan Kim 13424e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 13524e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 13624e2716fSJoonsoo Kim 13724e2716fSJoonsoo Kim /* 13824e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 13924e2716fSJoonsoo Kim * allocation success. 1 << compact_defer_limit compactions are skipped up 14024e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 14124e2716fSJoonsoo Kim */ 14224e2716fSJoonsoo Kim void defer_compaction(struct zone *zone, int order) 14324e2716fSJoonsoo Kim { 14424e2716fSJoonsoo Kim zone->compact_considered = 0; 14524e2716fSJoonsoo Kim zone->compact_defer_shift++; 14624e2716fSJoonsoo Kim 14724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 14824e2716fSJoonsoo Kim zone->compact_order_failed = order; 14924e2716fSJoonsoo Kim 15024e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 15124e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 15224e2716fSJoonsoo Kim 15324e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 15424e2716fSJoonsoo Kim } 15524e2716fSJoonsoo Kim 15624e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 15724e2716fSJoonsoo Kim bool compaction_deferred(struct zone *zone, int order) 15824e2716fSJoonsoo Kim { 15924e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 16024e2716fSJoonsoo Kim 16124e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 16224e2716fSJoonsoo Kim return false; 16324e2716fSJoonsoo Kim 16424e2716fSJoonsoo Kim /* Avoid possible overflow */ 16524e2716fSJoonsoo Kim if (++zone->compact_considered > defer_limit) 16624e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 16724e2716fSJoonsoo Kim 16824e2716fSJoonsoo Kim if (zone->compact_considered >= defer_limit) 16924e2716fSJoonsoo Kim return false; 17024e2716fSJoonsoo Kim 17124e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 17224e2716fSJoonsoo Kim 17324e2716fSJoonsoo Kim return true; 17424e2716fSJoonsoo Kim } 17524e2716fSJoonsoo Kim 17624e2716fSJoonsoo Kim /* 17724e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 17824e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 17924e2716fSJoonsoo Kim * expected to succeed. 18024e2716fSJoonsoo Kim */ 18124e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 18224e2716fSJoonsoo Kim bool alloc_success) 18324e2716fSJoonsoo Kim { 18424e2716fSJoonsoo Kim if (alloc_success) { 18524e2716fSJoonsoo Kim zone->compact_considered = 0; 18624e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 18724e2716fSJoonsoo Kim } 18824e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 18924e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 19024e2716fSJoonsoo Kim 19124e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 19224e2716fSJoonsoo Kim } 19324e2716fSJoonsoo Kim 19424e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 19524e2716fSJoonsoo Kim bool compaction_restarting(struct zone *zone, int order) 19624e2716fSJoonsoo Kim { 19724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 19824e2716fSJoonsoo Kim return false; 19924e2716fSJoonsoo Kim 20024e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 20124e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 20224e2716fSJoonsoo Kim } 20324e2716fSJoonsoo Kim 204bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 205bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 206bb13ffebSMel Gorman struct page *page) 207bb13ffebSMel Gorman { 208bb13ffebSMel Gorman if (cc->ignore_skip_hint) 209bb13ffebSMel Gorman return true; 210bb13ffebSMel Gorman 211bb13ffebSMel Gorman return !get_pageblock_skip(page); 212bb13ffebSMel Gorman } 213bb13ffebSMel Gorman 21402333641SVlastimil Babka static void reset_cached_positions(struct zone *zone) 21502333641SVlastimil Babka { 21602333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 21702333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 218623446e4SJoonsoo Kim zone->compact_cached_free_pfn = 21906b6640aSVlastimil Babka pageblock_start_pfn(zone_end_pfn(zone) - 1); 22002333641SVlastimil Babka } 22102333641SVlastimil Babka 222bb13ffebSMel Gorman /* 223b527cfe5SVlastimil Babka * Compound pages of >= pageblock_order should consistenly be skipped until 224b527cfe5SVlastimil Babka * released. It is always pointless to compact pages of such order (if they are 225b527cfe5SVlastimil Babka * migratable), and the pageblocks they occupy cannot contain any free pages. 22621dc7e02SDavid Rientjes */ 227b527cfe5SVlastimil Babka static bool pageblock_skip_persistent(struct page *page) 22821dc7e02SDavid Rientjes { 229b527cfe5SVlastimil Babka if (!PageCompound(page)) 23021dc7e02SDavid Rientjes return false; 231b527cfe5SVlastimil Babka 232b527cfe5SVlastimil Babka page = compound_head(page); 233b527cfe5SVlastimil Babka 234b527cfe5SVlastimil Babka if (compound_order(page) >= pageblock_order) 23521dc7e02SDavid Rientjes return true; 236b527cfe5SVlastimil Babka 237b527cfe5SVlastimil Babka return false; 23821dc7e02SDavid Rientjes } 23921dc7e02SDavid Rientjes 24021dc7e02SDavid Rientjes /* 241bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 242bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 243bb13ffebSMel Gorman * meet. 244bb13ffebSMel Gorman */ 24562997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 246bb13ffebSMel Gorman { 247bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 248108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 249bb13ffebSMel Gorman unsigned long pfn; 250bb13ffebSMel Gorman 25162997027SMel Gorman zone->compact_blockskip_flush = false; 252bb13ffebSMel Gorman 253bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 254bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 255bb13ffebSMel Gorman struct page *page; 256bb13ffebSMel Gorman 257bb13ffebSMel Gorman cond_resched(); 258bb13ffebSMel Gorman 259ccbe1e4dSMichal Hocko page = pfn_to_online_page(pfn); 260ccbe1e4dSMichal Hocko if (!page) 261bb13ffebSMel Gorman continue; 262bb13ffebSMel Gorman if (zone != page_zone(page)) 263bb13ffebSMel Gorman continue; 264b527cfe5SVlastimil Babka if (pageblock_skip_persistent(page)) 26521dc7e02SDavid Rientjes continue; 266bb13ffebSMel Gorman 267bb13ffebSMel Gorman clear_pageblock_skip(page); 268bb13ffebSMel Gorman } 26902333641SVlastimil Babka 27002333641SVlastimil Babka reset_cached_positions(zone); 271bb13ffebSMel Gorman } 272bb13ffebSMel Gorman 27362997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 27462997027SMel Gorman { 27562997027SMel Gorman int zoneid; 27662997027SMel Gorman 27762997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 27862997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 27962997027SMel Gorman if (!populated_zone(zone)) 28062997027SMel Gorman continue; 28162997027SMel Gorman 28262997027SMel Gorman /* Only flush if a full compaction finished recently */ 28362997027SMel Gorman if (zone->compact_blockskip_flush) 28462997027SMel Gorman __reset_isolation_suitable(zone); 28562997027SMel Gorman } 28662997027SMel Gorman } 28762997027SMel Gorman 288bb13ffebSMel Gorman /* 289bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 29062997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 291bb13ffebSMel Gorman */ 292c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 293c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 294edc2ca61SVlastimil Babka bool migrate_scanner) 295bb13ffebSMel Gorman { 296c89511abSMel Gorman struct zone *zone = cc->zone; 29735979ef3SDavid Rientjes unsigned long pfn; 2986815bf3fSJoonsoo Kim 2992583d671SVlastimil Babka if (cc->no_set_skip_hint) 3006815bf3fSJoonsoo Kim return; 3016815bf3fSJoonsoo Kim 302bb13ffebSMel Gorman if (!page) 303bb13ffebSMel Gorman return; 304bb13ffebSMel Gorman 30535979ef3SDavid Rientjes if (nr_isolated) 30635979ef3SDavid Rientjes return; 30735979ef3SDavid Rientjes 308bb13ffebSMel Gorman set_pageblock_skip(page); 309c89511abSMel Gorman 31035979ef3SDavid Rientjes pfn = page_to_pfn(page); 31135979ef3SDavid Rientjes 31235979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 313c89511abSMel Gorman if (migrate_scanner) { 31435979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 31535979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 316e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 317e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 31835979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 319c89511abSMel Gorman } else { 32035979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 321c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 322c89511abSMel Gorman } 323c89511abSMel Gorman } 324bb13ffebSMel Gorman #else 325bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 326bb13ffebSMel Gorman struct page *page) 327bb13ffebSMel Gorman { 328bb13ffebSMel Gorman return true; 329bb13ffebSMel Gorman } 330bb13ffebSMel Gorman 331b527cfe5SVlastimil Babka static inline bool pageblock_skip_persistent(struct page *page) 33221dc7e02SDavid Rientjes { 33321dc7e02SDavid Rientjes return false; 33421dc7e02SDavid Rientjes } 33521dc7e02SDavid Rientjes 33621dc7e02SDavid Rientjes static inline void update_pageblock_skip(struct compact_control *cc, 337c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 338edc2ca61SVlastimil Babka bool migrate_scanner) 339bb13ffebSMel Gorman { 340bb13ffebSMel Gorman } 341bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 342bb13ffebSMel Gorman 3431f9efdefSVlastimil Babka /* 3448b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 3458b44d279SVlastimil Babka * very heavily contended. For async compaction, back out if the lock cannot 3468b44d279SVlastimil Babka * be taken immediately. For sync compaction, spin on the lock if needed. 3478b44d279SVlastimil Babka * 3488b44d279SVlastimil Babka * Returns true if the lock is held 3498b44d279SVlastimil Babka * Returns false if the lock is not held and compaction should abort 3501f9efdefSVlastimil Babka */ 3518b44d279SVlastimil Babka static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, 3528b44d279SVlastimil Babka struct compact_control *cc) 3538b44d279SVlastimil Babka { 3548b44d279SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3558b44d279SVlastimil Babka if (!spin_trylock_irqsave(lock, *flags)) { 356c3486f53SVlastimil Babka cc->contended = true; 3578b44d279SVlastimil Babka return false; 3588b44d279SVlastimil Babka } 3598b44d279SVlastimil Babka } else { 3608b44d279SVlastimil Babka spin_lock_irqsave(lock, *flags); 3618b44d279SVlastimil Babka } 3621f9efdefSVlastimil Babka 3638b44d279SVlastimil Babka return true; 3642a1402aaSMel Gorman } 3652a1402aaSMel Gorman 36685aa125fSMichal Nazarewicz /* 367c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 3688b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 3698b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 3708b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 3718b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 3728b44d279SVlastimil Babka * aborts. Sync compaction schedules. 3738b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 3748b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 375c67fe375SMel Gorman * 3768b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 3778b44d279SVlastimil Babka * async compaction due to need_resched() 3788b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 3798b44d279SVlastimil Babka * scheduled) 380c67fe375SMel Gorman */ 3818b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 3828b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 383c67fe375SMel Gorman { 3848b44d279SVlastimil Babka if (*locked) { 3858b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 3868b44d279SVlastimil Babka *locked = false; 387c67fe375SMel Gorman } 388c67fe375SMel Gorman 3898b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 390c3486f53SVlastimil Babka cc->contended = true; 3918b44d279SVlastimil Babka return true; 3928b44d279SVlastimil Babka } 3938b44d279SVlastimil Babka 3948b44d279SVlastimil Babka if (need_resched()) { 395e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 396c3486f53SVlastimil Babka cc->contended = true; 3978b44d279SVlastimil Babka return true; 398c67fe375SMel Gorman } 399c67fe375SMel Gorman cond_resched(); 400c67fe375SMel Gorman } 401c67fe375SMel Gorman 4028b44d279SVlastimil Babka return false; 403c67fe375SMel Gorman } 404c67fe375SMel Gorman 405be976572SVlastimil Babka /* 406be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 407be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 4088b44d279SVlastimil Babka * compaction. This is similar to what compact_unlock_should_abort() does, but 409be976572SVlastimil Babka * is used where no lock is concerned. 410be976572SVlastimil Babka * 411be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 412be976572SVlastimil Babka * Returns true when async compaction should abort. 413be976572SVlastimil Babka */ 414be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 415be976572SVlastimil Babka { 416be976572SVlastimil Babka /* async compaction aborts if contended */ 417be976572SVlastimil Babka if (need_resched()) { 418be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 419c3486f53SVlastimil Babka cc->contended = true; 420be976572SVlastimil Babka return true; 421be976572SVlastimil Babka } 422be976572SVlastimil Babka 423be976572SVlastimil Babka cond_resched(); 424be976572SVlastimil Babka } 425be976572SVlastimil Babka 426be976572SVlastimil Babka return false; 427be976572SVlastimil Babka } 428be976572SVlastimil Babka 429c67fe375SMel Gorman /* 4309e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 4319e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 4329e4be470SJerome Marchand * (even though it may still end up isolating some pages). 43385aa125fSMichal Nazarewicz */ 434f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 435e14c720eSVlastimil Babka unsigned long *start_pfn, 43685aa125fSMichal Nazarewicz unsigned long end_pfn, 43785aa125fSMichal Nazarewicz struct list_head *freelist, 43885aa125fSMichal Nazarewicz bool strict) 439748446bbSMel Gorman { 440b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 441bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 442b8b2d825SXiubo Li unsigned long flags = 0; 443f40d1e42SMel Gorman bool locked = false; 444e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 44566c64223SJoonsoo Kim unsigned int order; 446748446bbSMel Gorman 447748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 448748446bbSMel Gorman 449f40d1e42SMel Gorman /* Isolate free pages. */ 450748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 45166c64223SJoonsoo Kim int isolated; 452748446bbSMel Gorman struct page *page = cursor; 453748446bbSMel Gorman 4548b44d279SVlastimil Babka /* 4558b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 4568b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 4578b44d279SVlastimil Babka * pending or async compaction detects need_resched() 4588b44d279SVlastimil Babka */ 4598b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 4608b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 4618b44d279SVlastimil Babka &locked, cc)) 4628b44d279SVlastimil Babka break; 4638b44d279SVlastimil Babka 464b7aba698SMel Gorman nr_scanned++; 465f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 4662af120bcSLaura Abbott goto isolate_fail; 4672af120bcSLaura Abbott 468bb13ffebSMel Gorman if (!valid_page) 469bb13ffebSMel Gorman valid_page = page; 4709fcd6d2eSVlastimil Babka 4719fcd6d2eSVlastimil Babka /* 4729fcd6d2eSVlastimil Babka * For compound pages such as THP and hugetlbfs, we can save 4739fcd6d2eSVlastimil Babka * potentially a lot of iterations if we skip them at once. 4749fcd6d2eSVlastimil Babka * The check is racy, but we can consider only valid values 4759fcd6d2eSVlastimil Babka * and the only danger is skipping too much. 4769fcd6d2eSVlastimil Babka */ 4779fcd6d2eSVlastimil Babka if (PageCompound(page)) { 47821dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 4799fcd6d2eSVlastimil Babka 480d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) { 48121dc7e02SDavid Rientjes blockpfn += (1UL << order) - 1; 48221dc7e02SDavid Rientjes cursor += (1UL << order) - 1; 4839fcd6d2eSVlastimil Babka } 4849fcd6d2eSVlastimil Babka goto isolate_fail; 4859fcd6d2eSVlastimil Babka } 4869fcd6d2eSVlastimil Babka 487f40d1e42SMel Gorman if (!PageBuddy(page)) 4882af120bcSLaura Abbott goto isolate_fail; 489f40d1e42SMel Gorman 490f40d1e42SMel Gorman /* 49169b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 49269b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 49369b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 49469b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 49569b7189fSVlastimil Babka * recheck as well. 49669b7189fSVlastimil Babka */ 49769b7189fSVlastimil Babka if (!locked) { 49869b7189fSVlastimil Babka /* 499f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 500f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 501f40d1e42SMel Gorman * heavily contended if there are parallel allocations 502f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 503f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 504f40d1e42SMel Gorman * possible. 505f40d1e42SMel Gorman */ 5068b44d279SVlastimil Babka locked = compact_trylock_irqsave(&cc->zone->lock, 5078b44d279SVlastimil Babka &flags, cc); 508f40d1e42SMel Gorman if (!locked) 509f40d1e42SMel Gorman break; 510f40d1e42SMel Gorman 511f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 512f40d1e42SMel Gorman if (!PageBuddy(page)) 5132af120bcSLaura Abbott goto isolate_fail; 51469b7189fSVlastimil Babka } 515748446bbSMel Gorman 51666c64223SJoonsoo Kim /* Found a free page, will break it into order-0 pages */ 51766c64223SJoonsoo Kim order = page_order(page); 51866c64223SJoonsoo Kim isolated = __isolate_free_page(page, order); 519a4f04f2cSDavid Rientjes if (!isolated) 520a4f04f2cSDavid Rientjes break; 52166c64223SJoonsoo Kim set_page_private(page, order); 522a4f04f2cSDavid Rientjes 523748446bbSMel Gorman total_isolated += isolated; 524a4f04f2cSDavid Rientjes cc->nr_freepages += isolated; 52566c64223SJoonsoo Kim list_add_tail(&page->lru, freelist); 52666c64223SJoonsoo Kim 527a4f04f2cSDavid Rientjes if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 528932ff6bbSJoonsoo Kim blockpfn += isolated; 529932ff6bbSJoonsoo Kim break; 530932ff6bbSJoonsoo Kim } 531a4f04f2cSDavid Rientjes /* Advance to the end of split page */ 532748446bbSMel Gorman blockpfn += isolated - 1; 533748446bbSMel Gorman cursor += isolated - 1; 5342af120bcSLaura Abbott continue; 5352af120bcSLaura Abbott 5362af120bcSLaura Abbott isolate_fail: 5372af120bcSLaura Abbott if (strict) 5382af120bcSLaura Abbott break; 5392af120bcSLaura Abbott else 5402af120bcSLaura Abbott continue; 5412af120bcSLaura Abbott 542748446bbSMel Gorman } 543748446bbSMel Gorman 544a4f04f2cSDavid Rientjes if (locked) 545a4f04f2cSDavid Rientjes spin_unlock_irqrestore(&cc->zone->lock, flags); 546a4f04f2cSDavid Rientjes 5479fcd6d2eSVlastimil Babka /* 5489fcd6d2eSVlastimil Babka * There is a tiny chance that we have read bogus compound_order(), 5499fcd6d2eSVlastimil Babka * so be careful to not go outside of the pageblock. 5509fcd6d2eSVlastimil Babka */ 5519fcd6d2eSVlastimil Babka if (unlikely(blockpfn > end_pfn)) 5529fcd6d2eSVlastimil Babka blockpfn = end_pfn; 5539fcd6d2eSVlastimil Babka 554e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 555e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 556e34d85f0SJoonsoo Kim 557e14c720eSVlastimil Babka /* Record how far we have got within the block */ 558e14c720eSVlastimil Babka *start_pfn = blockpfn; 559e14c720eSVlastimil Babka 560f40d1e42SMel Gorman /* 561f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 562f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 563f40d1e42SMel Gorman * returned and CMA will fail. 564f40d1e42SMel Gorman */ 5652af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 566f40d1e42SMel Gorman total_isolated = 0; 567f40d1e42SMel Gorman 568bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 569bb13ffebSMel Gorman if (blockpfn == end_pfn) 570edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 571bb13ffebSMel Gorman 5727f354a54SDavid Rientjes cc->total_free_scanned += nr_scanned; 573397487dbSMel Gorman if (total_isolated) 574010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 575748446bbSMel Gorman return total_isolated; 576748446bbSMel Gorman } 577748446bbSMel Gorman 57885aa125fSMichal Nazarewicz /** 57985aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 580e8b098fcSMike Rapoport * @cc: Compaction control structure. 58185aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 58285aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 58385aa125fSMichal Nazarewicz * 58485aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 58585aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 58685aa125fSMichal Nazarewicz * undo its actions and return zero. 58785aa125fSMichal Nazarewicz * 58885aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 58985aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 59085aa125fSMichal Nazarewicz * a free page). 59185aa125fSMichal Nazarewicz */ 592ff9543fdSMichal Nazarewicz unsigned long 593bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 594bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 59585aa125fSMichal Nazarewicz { 596e1409c32SJoonsoo Kim unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 59785aa125fSMichal Nazarewicz LIST_HEAD(freelist); 59885aa125fSMichal Nazarewicz 5997d49d886SVlastimil Babka pfn = start_pfn; 60006b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 601e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 602e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 60306b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 6047d49d886SVlastimil Babka 6057d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 606e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 6077d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 608e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 609e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 6107d49d886SVlastimil Babka 61185aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 61285aa125fSMichal Nazarewicz 61358420016SJoonsoo Kim /* 61458420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 61558420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 61658420016SJoonsoo Kim * scanning range to right one. 61758420016SJoonsoo Kim */ 61858420016SJoonsoo Kim if (pfn >= block_end_pfn) { 61906b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 62006b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 62158420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 62258420016SJoonsoo Kim } 62358420016SJoonsoo Kim 624e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 625e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 6267d49d886SVlastimil Babka break; 6277d49d886SVlastimil Babka 628e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 629e14c720eSVlastimil Babka block_end_pfn, &freelist, true); 63085aa125fSMichal Nazarewicz 63185aa125fSMichal Nazarewicz /* 63285aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 63385aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 63485aa125fSMichal Nazarewicz * non-free pages). 63585aa125fSMichal Nazarewicz */ 63685aa125fSMichal Nazarewicz if (!isolated) 63785aa125fSMichal Nazarewicz break; 63885aa125fSMichal Nazarewicz 63985aa125fSMichal Nazarewicz /* 64085aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 64185aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 64285aa125fSMichal Nazarewicz * page may span two pageblocks). 64385aa125fSMichal Nazarewicz */ 64485aa125fSMichal Nazarewicz } 64585aa125fSMichal Nazarewicz 64666c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 64785aa125fSMichal Nazarewicz map_pages(&freelist); 64885aa125fSMichal Nazarewicz 64985aa125fSMichal Nazarewicz if (pfn < end_pfn) { 65085aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 65185aa125fSMichal Nazarewicz release_freepages(&freelist); 65285aa125fSMichal Nazarewicz return 0; 65385aa125fSMichal Nazarewicz } 65485aa125fSMichal Nazarewicz 65585aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 65685aa125fSMichal Nazarewicz return pfn; 65785aa125fSMichal Nazarewicz } 65885aa125fSMichal Nazarewicz 659748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 660748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 661748446bbSMel Gorman { 662bc693045SMinchan Kim unsigned long active, inactive, isolated; 663748446bbSMel Gorman 664599d0c95SMel Gorman inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) + 665599d0c95SMel Gorman node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON); 666599d0c95SMel Gorman active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) + 667599d0c95SMel Gorman node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON); 668599d0c95SMel Gorman isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) + 669599d0c95SMel Gorman node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON); 670748446bbSMel Gorman 671bc693045SMinchan Kim return isolated > (inactive + active) / 2; 672748446bbSMel Gorman } 673748446bbSMel Gorman 6742fe86e00SMichal Nazarewicz /** 675edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 676edc2ca61SVlastimil Babka * a single pageblock 6772fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 678edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 679edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 680edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 6812fe86e00SMichal Nazarewicz * 6822fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 683edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 684edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 685edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 686edc2ca61SVlastimil Babka * than end_pfn). 6872fe86e00SMichal Nazarewicz * 688edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 689edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 690edc2ca61SVlastimil Babka * is neither read nor updated. 691748446bbSMel Gorman */ 692edc2ca61SVlastimil Babka static unsigned long 693edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 694edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 695748446bbSMel Gorman { 696edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 697b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 698fa9add64SHugh Dickins struct lruvec *lruvec; 699b8b2d825SXiubo Li unsigned long flags = 0; 7002a1402aaSMel Gorman bool locked = false; 701bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 702e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 703fdd048e1SVlastimil Babka bool skip_on_failure = false; 704fdd048e1SVlastimil Babka unsigned long next_skip_pfn = 0; 705748446bbSMel Gorman 706748446bbSMel Gorman /* 707748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 708748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 709748446bbSMel Gorman * delay for some time until fewer pages are isolated 710748446bbSMel Gorman */ 711748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 712f9e35b3bSMel Gorman /* async migration should just abort */ 713e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 7142fe86e00SMichal Nazarewicz return 0; 715f9e35b3bSMel Gorman 716748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 717748446bbSMel Gorman 718748446bbSMel Gorman if (fatal_signal_pending(current)) 7192fe86e00SMichal Nazarewicz return 0; 720748446bbSMel Gorman } 721748446bbSMel Gorman 722be976572SVlastimil Babka if (compact_should_abort(cc)) 723aeef4b83SDavid Rientjes return 0; 724aeef4b83SDavid Rientjes 725fdd048e1SVlastimil Babka if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 726fdd048e1SVlastimil Babka skip_on_failure = true; 727fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 728fdd048e1SVlastimil Babka } 729fdd048e1SVlastimil Babka 730748446bbSMel Gorman /* Time to isolate some pages for migration */ 731748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 73229c0dde8SVlastimil Babka 733fdd048e1SVlastimil Babka if (skip_on_failure && low_pfn >= next_skip_pfn) { 734fdd048e1SVlastimil Babka /* 735fdd048e1SVlastimil Babka * We have isolated all migration candidates in the 736fdd048e1SVlastimil Babka * previous order-aligned block, and did not skip it due 737fdd048e1SVlastimil Babka * to failure. We should migrate the pages now and 738fdd048e1SVlastimil Babka * hopefully succeed compaction. 739fdd048e1SVlastimil Babka */ 740fdd048e1SVlastimil Babka if (nr_isolated) 741fdd048e1SVlastimil Babka break; 742fdd048e1SVlastimil Babka 743fdd048e1SVlastimil Babka /* 744fdd048e1SVlastimil Babka * We failed to isolate in the previous order-aligned 745fdd048e1SVlastimil Babka * block. Set the new boundary to the end of the 746fdd048e1SVlastimil Babka * current block. Note we can't simply increase 747fdd048e1SVlastimil Babka * next_skip_pfn by 1 << order, as low_pfn might have 748fdd048e1SVlastimil Babka * been incremented by a higher number due to skipping 749fdd048e1SVlastimil Babka * a compound or a high-order buddy page in the 750fdd048e1SVlastimil Babka * previous loop iteration. 751fdd048e1SVlastimil Babka */ 752fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 753fdd048e1SVlastimil Babka } 754fdd048e1SVlastimil Babka 7558b44d279SVlastimil Babka /* 7568b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 7578b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort async compaction 7588b44d279SVlastimil Babka * if contended. 7598b44d279SVlastimil Babka */ 7608b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 761a52633d8SMel Gorman && compact_unlock_should_abort(zone_lru_lock(zone), flags, 7628b44d279SVlastimil Babka &locked, cc)) 7638b44d279SVlastimil Babka break; 764b2eef8c0SAndrea Arcangeli 765748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 766fdd048e1SVlastimil Babka goto isolate_fail; 767b7aba698SMel Gorman nr_scanned++; 768748446bbSMel Gorman 769748446bbSMel Gorman page = pfn_to_page(low_pfn); 770dc908600SMel Gorman 771bb13ffebSMel Gorman if (!valid_page) 772bb13ffebSMel Gorman valid_page = page; 773bb13ffebSMel Gorman 774c122b208SJoonsoo Kim /* 77599c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 77699c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 77799c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 77899c0fd5eSVlastimil Babka * potential isolation targets. 7796c14466cSMel Gorman */ 78099c0fd5eSVlastimil Babka if (PageBuddy(page)) { 78199c0fd5eSVlastimil Babka unsigned long freepage_order = page_order_unsafe(page); 78299c0fd5eSVlastimil Babka 78399c0fd5eSVlastimil Babka /* 78499c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 78599c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 78699c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 78799c0fd5eSVlastimil Babka */ 78899c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 78999c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 790748446bbSMel Gorman continue; 79199c0fd5eSVlastimil Babka } 792748446bbSMel Gorman 7939927af74SMel Gorman /* 79429c0dde8SVlastimil Babka * Regardless of being on LRU, compound pages such as THP and 79529c0dde8SVlastimil Babka * hugetlbfs are not to be compacted. We can potentially save 79629c0dde8SVlastimil Babka * a lot of iterations if we skip them at once. The check is 79729c0dde8SVlastimil Babka * racy, but we can consider only valid values and the only 79829c0dde8SVlastimil Babka * danger is skipping too much. 799bc835011SAndrea Arcangeli */ 80029c0dde8SVlastimil Babka if (PageCompound(page)) { 80121dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 80229c0dde8SVlastimil Babka 803d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) 80421dc7e02SDavid Rientjes low_pfn += (1UL << order) - 1; 805fdd048e1SVlastimil Babka goto isolate_fail; 8062a1402aaSMel Gorman } 8072a1402aaSMel Gorman 808bda807d4SMinchan Kim /* 809bda807d4SMinchan Kim * Check may be lockless but that's ok as we recheck later. 810bda807d4SMinchan Kim * It's possible to migrate LRU and non-lru movable pages. 811bda807d4SMinchan Kim * Skip any other type of page 812bda807d4SMinchan Kim */ 813bda807d4SMinchan Kim if (!PageLRU(page)) { 814bda807d4SMinchan Kim /* 815bda807d4SMinchan Kim * __PageMovable can return false positive so we need 816bda807d4SMinchan Kim * to verify it under page_lock. 817bda807d4SMinchan Kim */ 818bda807d4SMinchan Kim if (unlikely(__PageMovable(page)) && 819bda807d4SMinchan Kim !PageIsolated(page)) { 820bda807d4SMinchan Kim if (locked) { 821a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(zone), 822bda807d4SMinchan Kim flags); 823bda807d4SMinchan Kim locked = false; 824bda807d4SMinchan Kim } 825bda807d4SMinchan Kim 8269e5bcd61SYisheng Xie if (!isolate_movable_page(page, isolate_mode)) 827bda807d4SMinchan Kim goto isolate_success; 828bda807d4SMinchan Kim } 829bda807d4SMinchan Kim 830fdd048e1SVlastimil Babka goto isolate_fail; 831bda807d4SMinchan Kim } 83229c0dde8SVlastimil Babka 833119d6d59SDavid Rientjes /* 834119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 835119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 836119d6d59SDavid Rientjes * admittedly racy check. 837119d6d59SDavid Rientjes */ 838119d6d59SDavid Rientjes if (!page_mapping(page) && 839119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 840fdd048e1SVlastimil Babka goto isolate_fail; 841119d6d59SDavid Rientjes 84273e64c51SMichal Hocko /* 84373e64c51SMichal Hocko * Only allow to migrate anonymous pages in GFP_NOFS context 84473e64c51SMichal Hocko * because those do not depend on fs locks. 84573e64c51SMichal Hocko */ 84673e64c51SMichal Hocko if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) 84773e64c51SMichal Hocko goto isolate_fail; 84873e64c51SMichal Hocko 84969b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 85069b7189fSVlastimil Babka if (!locked) { 851a52633d8SMel Gorman locked = compact_trylock_irqsave(zone_lru_lock(zone), 8528b44d279SVlastimil Babka &flags, cc); 8538b44d279SVlastimil Babka if (!locked) 8542a1402aaSMel Gorman break; 8552a1402aaSMel Gorman 85629c0dde8SVlastimil Babka /* Recheck PageLRU and PageCompound under lock */ 8572a1402aaSMel Gorman if (!PageLRU(page)) 858fdd048e1SVlastimil Babka goto isolate_fail; 85929c0dde8SVlastimil Babka 86029c0dde8SVlastimil Babka /* 86129c0dde8SVlastimil Babka * Page become compound since the non-locked check, 86229c0dde8SVlastimil Babka * and it's on LRU. It can only be a THP so the order 86329c0dde8SVlastimil Babka * is safe to read and it's 0 for tail pages. 86429c0dde8SVlastimil Babka */ 86529c0dde8SVlastimil Babka if (unlikely(PageCompound(page))) { 866d3c85badSVlastimil Babka low_pfn += (1UL << compound_order(page)) - 1; 867fdd048e1SVlastimil Babka goto isolate_fail; 868bc835011SAndrea Arcangeli } 86969b7189fSVlastimil Babka } 870bc835011SAndrea Arcangeli 871599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 872fa9add64SHugh Dickins 873748446bbSMel Gorman /* Try isolate the page */ 874edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 875fdd048e1SVlastimil Babka goto isolate_fail; 876748446bbSMel Gorman 87729c0dde8SVlastimil Babka VM_BUG_ON_PAGE(PageCompound(page), page); 878bc835011SAndrea Arcangeli 879748446bbSMel Gorman /* Successfully isolated */ 880fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 8816afcf8efSMing Ling inc_node_page_state(page, 8826afcf8efSMing Ling NR_ISOLATED_ANON + page_is_file_cache(page)); 883b6c75016SJoonsoo Kim 884b6c75016SJoonsoo Kim isolate_success: 885fdd048e1SVlastimil Babka list_add(&page->lru, &cc->migratepages); 886748446bbSMel Gorman cc->nr_migratepages++; 887b7aba698SMel Gorman nr_isolated++; 888748446bbSMel Gorman 889a34753d2SVlastimil Babka /* 890a34753d2SVlastimil Babka * Record where we could have freed pages by migration and not 891a34753d2SVlastimil Babka * yet flushed them to buddy allocator. 892a34753d2SVlastimil Babka * - this is the lowest page that was isolated and likely be 893a34753d2SVlastimil Babka * then freed by migration. 894a34753d2SVlastimil Babka */ 895a34753d2SVlastimil Babka if (!cc->last_migrated_pfn) 896a34753d2SVlastimil Babka cc->last_migrated_pfn = low_pfn; 897a34753d2SVlastimil Babka 898748446bbSMel Gorman /* Avoid isolating too much */ 89931b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 90031b8384aSHillf Danton ++low_pfn; 901748446bbSMel Gorman break; 902748446bbSMel Gorman } 903fdd048e1SVlastimil Babka 904fdd048e1SVlastimil Babka continue; 905fdd048e1SVlastimil Babka isolate_fail: 906fdd048e1SVlastimil Babka if (!skip_on_failure) 907fdd048e1SVlastimil Babka continue; 908fdd048e1SVlastimil Babka 909fdd048e1SVlastimil Babka /* 910fdd048e1SVlastimil Babka * We have isolated some pages, but then failed. Release them 911fdd048e1SVlastimil Babka * instead of migrating, as we cannot form the cc->order buddy 912fdd048e1SVlastimil Babka * page anyway. 913fdd048e1SVlastimil Babka */ 914fdd048e1SVlastimil Babka if (nr_isolated) { 915fdd048e1SVlastimil Babka if (locked) { 916a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(zone), flags); 917fdd048e1SVlastimil Babka locked = false; 918fdd048e1SVlastimil Babka } 919fdd048e1SVlastimil Babka putback_movable_pages(&cc->migratepages); 920fdd048e1SVlastimil Babka cc->nr_migratepages = 0; 921fdd048e1SVlastimil Babka cc->last_migrated_pfn = 0; 922fdd048e1SVlastimil Babka nr_isolated = 0; 923fdd048e1SVlastimil Babka } 924fdd048e1SVlastimil Babka 925fdd048e1SVlastimil Babka if (low_pfn < next_skip_pfn) { 926fdd048e1SVlastimil Babka low_pfn = next_skip_pfn - 1; 927fdd048e1SVlastimil Babka /* 928fdd048e1SVlastimil Babka * The check near the loop beginning would have updated 929fdd048e1SVlastimil Babka * next_skip_pfn too, but this is a bit simpler. 930fdd048e1SVlastimil Babka */ 931fdd048e1SVlastimil Babka next_skip_pfn += 1UL << cc->order; 932fdd048e1SVlastimil Babka } 93331b8384aSHillf Danton } 934748446bbSMel Gorman 93599c0fd5eSVlastimil Babka /* 93699c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 93799c0fd5eSVlastimil Babka * the range to be scanned. 93899c0fd5eSVlastimil Babka */ 93999c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 94099c0fd5eSVlastimil Babka low_pfn = end_pfn; 94199c0fd5eSVlastimil Babka 942c67fe375SMel Gorman if (locked) 943a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(zone), flags); 944748446bbSMel Gorman 94550b5b094SVlastimil Babka /* 94650b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 94750b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 94850b5b094SVlastimil Babka */ 94935979ef3SDavid Rientjes if (low_pfn == end_pfn) 950edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 951bb13ffebSMel Gorman 952e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 953e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 954b7aba698SMel Gorman 9557f354a54SDavid Rientjes cc->total_migrate_scanned += nr_scanned; 956397487dbSMel Gorman if (nr_isolated) 957010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 958397487dbSMel Gorman 9592fe86e00SMichal Nazarewicz return low_pfn; 9602fe86e00SMichal Nazarewicz } 9612fe86e00SMichal Nazarewicz 962edc2ca61SVlastimil Babka /** 963edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 964edc2ca61SVlastimil Babka * @cc: Compaction control structure. 965edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 966edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 967edc2ca61SVlastimil Babka * 968edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 969edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 970edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 971edc2ca61SVlastimil Babka */ 972edc2ca61SVlastimil Babka unsigned long 973edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 974edc2ca61SVlastimil Babka unsigned long end_pfn) 975edc2ca61SVlastimil Babka { 976e1409c32SJoonsoo Kim unsigned long pfn, block_start_pfn, block_end_pfn; 977edc2ca61SVlastimil Babka 978edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 979edc2ca61SVlastimil Babka pfn = start_pfn; 98006b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 981e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 982e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 98306b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 984edc2ca61SVlastimil Babka 985edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 986e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 987edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 988edc2ca61SVlastimil Babka 989edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 990edc2ca61SVlastimil Babka 991e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 992e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 993edc2ca61SVlastimil Babka continue; 994edc2ca61SVlastimil Babka 995edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 996edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 997edc2ca61SVlastimil Babka 99814af4a5eSHugh Dickins if (!pfn) 999edc2ca61SVlastimil Babka break; 10006ea41c0cSJoonsoo Kim 10016ea41c0cSJoonsoo Kim if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 10026ea41c0cSJoonsoo Kim break; 1003edc2ca61SVlastimil Babka } 1004edc2ca61SVlastimil Babka 1005edc2ca61SVlastimil Babka return pfn; 1006edc2ca61SVlastimil Babka } 1007edc2ca61SVlastimil Babka 1008ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1009ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 1010018e9a49SAndrew Morton 1011b682debdSVlastimil Babka static bool suitable_migration_source(struct compact_control *cc, 1012b682debdSVlastimil Babka struct page *page) 1013b682debdSVlastimil Babka { 1014282722b0SVlastimil Babka int block_mt; 1015282722b0SVlastimil Babka 1016282722b0SVlastimil Babka if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1017b682debdSVlastimil Babka return true; 1018b682debdSVlastimil Babka 1019282722b0SVlastimil Babka block_mt = get_pageblock_migratetype(page); 1020282722b0SVlastimil Babka 1021282722b0SVlastimil Babka if (cc->migratetype == MIGRATE_MOVABLE) 1022282722b0SVlastimil Babka return is_migrate_movable(block_mt); 1023282722b0SVlastimil Babka else 1024282722b0SVlastimil Babka return block_mt == cc->migratetype; 1025b682debdSVlastimil Babka } 1026b682debdSVlastimil Babka 1027018e9a49SAndrew Morton /* Returns true if the page is within a block suitable for migration to */ 10289f7e3387SVlastimil Babka static bool suitable_migration_target(struct compact_control *cc, 10299f7e3387SVlastimil Babka struct page *page) 1030018e9a49SAndrew Morton { 1031018e9a49SAndrew Morton /* If the page is a large free page, then disallow migration */ 1032018e9a49SAndrew Morton if (PageBuddy(page)) { 1033018e9a49SAndrew Morton /* 1034018e9a49SAndrew Morton * We are checking page_order without zone->lock taken. But 1035018e9a49SAndrew Morton * the only small danger is that we skip a potentially suitable 1036018e9a49SAndrew Morton * pageblock, so it's not worth to check order for valid range. 1037018e9a49SAndrew Morton */ 1038018e9a49SAndrew Morton if (page_order_unsafe(page) >= pageblock_order) 1039018e9a49SAndrew Morton return false; 1040018e9a49SAndrew Morton } 1041018e9a49SAndrew Morton 10421ef36db2SYisheng Xie if (cc->ignore_block_suitable) 10431ef36db2SYisheng Xie return true; 10441ef36db2SYisheng Xie 1045018e9a49SAndrew Morton /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1046b682debdSVlastimil Babka if (is_migrate_movable(get_pageblock_migratetype(page))) 1047018e9a49SAndrew Morton return true; 1048018e9a49SAndrew Morton 1049018e9a49SAndrew Morton /* Otherwise skip the block */ 1050018e9a49SAndrew Morton return false; 1051018e9a49SAndrew Morton } 1052018e9a49SAndrew Morton 1053ff9543fdSMichal Nazarewicz /* 1054f2849aa0SVlastimil Babka * Test whether the free scanner has reached the same or lower pageblock than 1055f2849aa0SVlastimil Babka * the migration scanner, and compaction should thus terminate. 1056f2849aa0SVlastimil Babka */ 1057f2849aa0SVlastimil Babka static inline bool compact_scanners_met(struct compact_control *cc) 1058f2849aa0SVlastimil Babka { 1059f2849aa0SVlastimil Babka return (cc->free_pfn >> pageblock_order) 1060f2849aa0SVlastimil Babka <= (cc->migrate_pfn >> pageblock_order); 1061f2849aa0SVlastimil Babka } 1062f2849aa0SVlastimil Babka 1063f2849aa0SVlastimil Babka /* 1064ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 1065ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 1066ff9543fdSMichal Nazarewicz */ 1067edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 1068ff9543fdSMichal Nazarewicz { 1069edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 1070ff9543fdSMichal Nazarewicz struct page *page; 1071c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 1072e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 1073c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 1074c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1075ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 10762fe86e00SMichal Nazarewicz 1077ff9543fdSMichal Nazarewicz /* 1078ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 107949e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 1080e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 1081e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 1082c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 1083c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 1084c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 108549e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 108649e068f0SVlastimil Babka * is using. 1087ff9543fdSMichal Nazarewicz */ 1088e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 108906b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(cc->free_pfn); 1090c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1091c96b9e50SVlastimil Babka zone_end_pfn(zone)); 109206b6640aSVlastimil Babka low_pfn = pageblock_end_pfn(cc->migrate_pfn); 10932fe86e00SMichal Nazarewicz 1094ff9543fdSMichal Nazarewicz /* 1095ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 1096ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 1097ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 1098ff9543fdSMichal Nazarewicz */ 1099f5f61a32SVlastimil Babka for (; block_start_pfn >= low_pfn; 1100c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 1101e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 1102e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 1103f6ea3adbSDavid Rientjes /* 1104f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 1105f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 1106be976572SVlastimil Babka * to schedule, or even abort async compaction. 1107f6ea3adbSDavid Rientjes */ 1108be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1109be976572SVlastimil Babka && compact_should_abort(cc)) 1110be976572SVlastimil Babka break; 1111f6ea3adbSDavid Rientjes 11127d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 11137d49d886SVlastimil Babka zone); 11147d49d886SVlastimil Babka if (!page) 1115ff9543fdSMichal Nazarewicz continue; 1116ff9543fdSMichal Nazarewicz 1117ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 11189f7e3387SVlastimil Babka if (!suitable_migration_target(cc, page)) 1119ff9543fdSMichal Nazarewicz continue; 112068e3e926SLinus Torvalds 1121bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 1122bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 1123bb13ffebSMel Gorman continue; 1124bb13ffebSMel Gorman 1125e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 1126a46cbf3bSDavid Rientjes isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, 1127a46cbf3bSDavid Rientjes freelist, false); 1128ff9543fdSMichal Nazarewicz 1129ff9543fdSMichal Nazarewicz /* 1130a46cbf3bSDavid Rientjes * If we isolated enough freepages, or aborted due to lock 1131a46cbf3bSDavid Rientjes * contention, terminate. 1132e14c720eSVlastimil Babka */ 1133f5f61a32SVlastimil Babka if ((cc->nr_freepages >= cc->nr_migratepages) 1134f5f61a32SVlastimil Babka || cc->contended) { 1135a46cbf3bSDavid Rientjes if (isolate_start_pfn >= block_end_pfn) { 1136a46cbf3bSDavid Rientjes /* 1137a46cbf3bSDavid Rientjes * Restart at previous pageblock if more 1138a46cbf3bSDavid Rientjes * freepages can be isolated next time. 1139a46cbf3bSDavid Rientjes */ 1140f5f61a32SVlastimil Babka isolate_start_pfn = 1141e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 1142a46cbf3bSDavid Rientjes } 1143be976572SVlastimil Babka break; 1144a46cbf3bSDavid Rientjes } else if (isolate_start_pfn < block_end_pfn) { 1145f5f61a32SVlastimil Babka /* 1146a46cbf3bSDavid Rientjes * If isolation failed early, do not continue 1147a46cbf3bSDavid Rientjes * needlessly. 1148f5f61a32SVlastimil Babka */ 1149a46cbf3bSDavid Rientjes break; 1150f5f61a32SVlastimil Babka } 1151c89511abSMel Gorman } 1152ff9543fdSMichal Nazarewicz 115366c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 1154ff9543fdSMichal Nazarewicz map_pages(freelist); 1155ff9543fdSMichal Nazarewicz 11567ed695e0SVlastimil Babka /* 1157f5f61a32SVlastimil Babka * Record where the free scanner will restart next time. Either we 1158f5f61a32SVlastimil Babka * broke from the loop and set isolate_start_pfn based on the last 1159f5f61a32SVlastimil Babka * call to isolate_freepages_block(), or we met the migration scanner 1160f5f61a32SVlastimil Babka * and the loop terminated due to isolate_start_pfn < low_pfn 11617ed695e0SVlastimil Babka */ 1162f5f61a32SVlastimil Babka cc->free_pfn = isolate_start_pfn; 1163748446bbSMel Gorman } 1164748446bbSMel Gorman 1165748446bbSMel Gorman /* 1166748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1167748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1168748446bbSMel Gorman */ 1169748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 1170666feb21SMichal Hocko unsigned long data) 1171748446bbSMel Gorman { 1172748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 1173748446bbSMel Gorman struct page *freepage; 1174748446bbSMel Gorman 1175be976572SVlastimil Babka /* 1176be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 1177be976572SVlastimil Babka * contention. 1178be976572SVlastimil Babka */ 1179748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1180be976572SVlastimil Babka if (!cc->contended) 1181edc2ca61SVlastimil Babka isolate_freepages(cc); 1182748446bbSMel Gorman 1183748446bbSMel Gorman if (list_empty(&cc->freepages)) 1184748446bbSMel Gorman return NULL; 1185748446bbSMel Gorman } 1186748446bbSMel Gorman 1187748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 1188748446bbSMel Gorman list_del(&freepage->lru); 1189748446bbSMel Gorman cc->nr_freepages--; 1190748446bbSMel Gorman 1191748446bbSMel Gorman return freepage; 1192748446bbSMel Gorman } 1193748446bbSMel Gorman 1194748446bbSMel Gorman /* 1195d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1196d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1197d53aea3dSDavid Rientjes * special handling needed for NUMA. 1198d53aea3dSDavid Rientjes */ 1199d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 1200d53aea3dSDavid Rientjes { 1201d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1202d53aea3dSDavid Rientjes 1203d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 1204d53aea3dSDavid Rientjes cc->nr_freepages++; 1205d53aea3dSDavid Rientjes } 1206d53aea3dSDavid Rientjes 1207ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1208ff9543fdSMichal Nazarewicz typedef enum { 1209ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1210ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1211ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1212ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1213ff9543fdSMichal Nazarewicz 1214ff9543fdSMichal Nazarewicz /* 12155bbe3547SEric B Munson * Allow userspace to control policy on scanning the unevictable LRU for 12165bbe3547SEric B Munson * compactable pages. 12175bbe3547SEric B Munson */ 12185bbe3547SEric B Munson int sysctl_compact_unevictable_allowed __read_mostly = 1; 12195bbe3547SEric B Munson 12205bbe3547SEric B Munson /* 1221edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1222edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1223edc2ca61SVlastimil Babka * compact_control. 1224ff9543fdSMichal Nazarewicz */ 1225ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 1226ff9543fdSMichal Nazarewicz struct compact_control *cc) 1227ff9543fdSMichal Nazarewicz { 1228e1409c32SJoonsoo Kim unsigned long block_start_pfn; 1229e1409c32SJoonsoo Kim unsigned long block_end_pfn; 1230e1409c32SJoonsoo Kim unsigned long low_pfn; 1231edc2ca61SVlastimil Babka struct page *page; 1232edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 12335bbe3547SEric B Munson (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 12341d2047feSHugh Dickins (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 1235ff9543fdSMichal Nazarewicz 1236edc2ca61SVlastimil Babka /* 1237edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 1238edc2ca61SVlastimil Babka * initialized by compact_zone() 1239edc2ca61SVlastimil Babka */ 1240edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 124106b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(low_pfn); 1242e1409c32SJoonsoo Kim if (block_start_pfn < zone->zone_start_pfn) 1243e1409c32SJoonsoo Kim block_start_pfn = zone->zone_start_pfn; 1244ff9543fdSMichal Nazarewicz 1245ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 124606b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(low_pfn); 1247ff9543fdSMichal Nazarewicz 1248edc2ca61SVlastimil Babka /* 1249edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 1250edc2ca61SVlastimil Babka * Do not cross the free scanner. 1251edc2ca61SVlastimil Babka */ 1252e1409c32SJoonsoo Kim for (; block_end_pfn <= cc->free_pfn; 1253e1409c32SJoonsoo Kim low_pfn = block_end_pfn, 1254e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1255e1409c32SJoonsoo Kim block_end_pfn += pageblock_nr_pages) { 1256edc2ca61SVlastimil Babka 1257edc2ca61SVlastimil Babka /* 1258edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1259edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1260edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 1261edc2ca61SVlastimil Babka */ 1262edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1263edc2ca61SVlastimil Babka && compact_should_abort(cc)) 1264edc2ca61SVlastimil Babka break; 1265edc2ca61SVlastimil Babka 1266e1409c32SJoonsoo Kim page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1267e1409c32SJoonsoo Kim zone); 12687d49d886SVlastimil Babka if (!page) 1269edc2ca61SVlastimil Babka continue; 1270edc2ca61SVlastimil Babka 1271edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 1272edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 1273edc2ca61SVlastimil Babka continue; 1274edc2ca61SVlastimil Babka 1275edc2ca61SVlastimil Babka /* 1276edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 1277edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 1278edc2ca61SVlastimil Babka * of work satisfies the allocation. 1279edc2ca61SVlastimil Babka */ 1280b682debdSVlastimil Babka if (!suitable_migration_source(cc, page)) 1281edc2ca61SVlastimil Babka continue; 1282ff9543fdSMichal Nazarewicz 1283ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1284e1409c32SJoonsoo Kim low_pfn = isolate_migratepages_block(cc, low_pfn, 1285e1409c32SJoonsoo Kim block_end_pfn, isolate_mode); 1286edc2ca61SVlastimil Babka 12876afcf8efSMing Ling if (!low_pfn || cc->contended) 1288ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1289ff9543fdSMichal Nazarewicz 1290edc2ca61SVlastimil Babka /* 1291edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1292edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1293edc2ca61SVlastimil Babka * continue or not. 1294edc2ca61SVlastimil Babka */ 1295edc2ca61SVlastimil Babka break; 1296edc2ca61SVlastimil Babka } 1297edc2ca61SVlastimil Babka 1298f2849aa0SVlastimil Babka /* Record where migration scanner will be restarted. */ 1299f2849aa0SVlastimil Babka cc->migrate_pfn = low_pfn; 1300ff9543fdSMichal Nazarewicz 1301edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1302ff9543fdSMichal Nazarewicz } 1303ff9543fdSMichal Nazarewicz 130421c527a3SYaowei Bai /* 130521c527a3SYaowei Bai * order == -1 is expected when compacting via 130621c527a3SYaowei Bai * /proc/sys/vm/compact_memory 130721c527a3SYaowei Bai */ 130821c527a3SYaowei Bai static inline bool is_via_compact_memory(int order) 130921c527a3SYaowei Bai { 131021c527a3SYaowei Bai return order == -1; 131121c527a3SYaowei Bai } 131221c527a3SYaowei Bai 1313d39773a0SVlastimil Babka static enum compact_result __compact_finished(struct zone *zone, 1314d39773a0SVlastimil Babka struct compact_control *cc) 1315748446bbSMel Gorman { 13168fb74b9fSMel Gorman unsigned int order; 1317d39773a0SVlastimil Babka const int migratetype = cc->migratetype; 131856de7263SMel Gorman 1319be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 13202d1e1041SVlastimil Babka return COMPACT_CONTENDED; 1321748446bbSMel Gorman 1322753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1323f2849aa0SVlastimil Babka if (compact_scanners_met(cc)) { 132455b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 132502333641SVlastimil Babka reset_cached_positions(zone); 132655b7c4c9SVlastimil Babka 132762997027SMel Gorman /* 132862997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 1329accf6242SVlastimil Babka * by kswapd when it goes to sleep. kcompactd does not set the 133062997027SMel Gorman * flag itself as the decision to be clear should be directly 133162997027SMel Gorman * based on an allocation request. 133262997027SMel Gorman */ 1333accf6242SVlastimil Babka if (cc->direct_compaction) 133462997027SMel Gorman zone->compact_blockskip_flush = true; 133562997027SMel Gorman 1336c8f7de0bSMichal Hocko if (cc->whole_zone) 1337748446bbSMel Gorman return COMPACT_COMPLETE; 1338c8f7de0bSMichal Hocko else 1339c8f7de0bSMichal Hocko return COMPACT_PARTIAL_SKIPPED; 1340bb13ffebSMel Gorman } 1341748446bbSMel Gorman 134221c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 134356de7263SMel Gorman return COMPACT_CONTINUE; 134456de7263SMel Gorman 1345baf6a9a1SVlastimil Babka if (cc->finishing_block) { 1346baf6a9a1SVlastimil Babka /* 1347baf6a9a1SVlastimil Babka * We have finished the pageblock, but better check again that 1348baf6a9a1SVlastimil Babka * we really succeeded. 1349baf6a9a1SVlastimil Babka */ 1350baf6a9a1SVlastimil Babka if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) 1351baf6a9a1SVlastimil Babka cc->finishing_block = false; 1352baf6a9a1SVlastimil Babka else 1353baf6a9a1SVlastimil Babka return COMPACT_CONTINUE; 1354baf6a9a1SVlastimil Babka } 1355baf6a9a1SVlastimil Babka 135656de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 135756de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 13588fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 13592149cdaeSJoonsoo Kim bool can_steal; 13608fb74b9fSMel Gorman 136156de7263SMel Gorman /* Job done if page is free of the right migratetype */ 13626d7ce559SDavid Rientjes if (!list_empty(&area->free_list[migratetype])) 1363cf378319SVlastimil Babka return COMPACT_SUCCESS; 136456de7263SMel Gorman 13652149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 13662149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 13672149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 13682149cdaeSJoonsoo Kim !list_empty(&area->free_list[MIGRATE_CMA])) 1369cf378319SVlastimil Babka return COMPACT_SUCCESS; 13702149cdaeSJoonsoo Kim #endif 13712149cdaeSJoonsoo Kim /* 13722149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 13732149cdaeSJoonsoo Kim * other migratetype buddy lists. 13742149cdaeSJoonsoo Kim */ 13752149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 1376baf6a9a1SVlastimil Babka true, &can_steal) != -1) { 1377baf6a9a1SVlastimil Babka 1378baf6a9a1SVlastimil Babka /* movable pages are OK in any pageblock */ 1379baf6a9a1SVlastimil Babka if (migratetype == MIGRATE_MOVABLE) 1380cf378319SVlastimil Babka return COMPACT_SUCCESS; 1381baf6a9a1SVlastimil Babka 1382baf6a9a1SVlastimil Babka /* 1383baf6a9a1SVlastimil Babka * We are stealing for a non-movable allocation. Make 1384baf6a9a1SVlastimil Babka * sure we finish compacting the current pageblock 1385baf6a9a1SVlastimil Babka * first so it is as free as possible and we won't 1386baf6a9a1SVlastimil Babka * have to steal another one soon. This only applies 1387baf6a9a1SVlastimil Babka * to sync compaction, as async compaction operates 1388baf6a9a1SVlastimil Babka * on pageblocks of the same migratetype. 1389baf6a9a1SVlastimil Babka */ 1390baf6a9a1SVlastimil Babka if (cc->mode == MIGRATE_ASYNC || 1391baf6a9a1SVlastimil Babka IS_ALIGNED(cc->migrate_pfn, 1392baf6a9a1SVlastimil Babka pageblock_nr_pages)) { 1393baf6a9a1SVlastimil Babka return COMPACT_SUCCESS; 1394baf6a9a1SVlastimil Babka } 1395baf6a9a1SVlastimil Babka 1396baf6a9a1SVlastimil Babka cc->finishing_block = true; 1397baf6a9a1SVlastimil Babka return COMPACT_CONTINUE; 1398baf6a9a1SVlastimil Babka } 139956de7263SMel Gorman } 140056de7263SMel Gorman 1401837d026dSJoonsoo Kim return COMPACT_NO_SUITABLE_PAGE; 1402837d026dSJoonsoo Kim } 1403837d026dSJoonsoo Kim 1404ea7ab982SMichal Hocko static enum compact_result compact_finished(struct zone *zone, 1405d39773a0SVlastimil Babka struct compact_control *cc) 1406837d026dSJoonsoo Kim { 1407837d026dSJoonsoo Kim int ret; 1408837d026dSJoonsoo Kim 1409d39773a0SVlastimil Babka ret = __compact_finished(zone, cc); 1410837d026dSJoonsoo Kim trace_mm_compaction_finished(zone, cc->order, ret); 1411837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 1412837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 1413837d026dSJoonsoo Kim 1414837d026dSJoonsoo Kim return ret; 1415748446bbSMel Gorman } 1416748446bbSMel Gorman 14173e7d3449SMel Gorman /* 14183e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 14193e7d3449SMel Gorman * Returns 14203e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 1421cf378319SVlastimil Babka * COMPACT_SUCCESS - If the allocation would succeed without compaction 14223e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 14233e7d3449SMel Gorman */ 1424ea7ab982SMichal Hocko static enum compact_result __compaction_suitable(struct zone *zone, int order, 1425c603844bSMel Gorman unsigned int alloc_flags, 142686a294a8SMichal Hocko int classzone_idx, 142786a294a8SMichal Hocko unsigned long wmark_target) 14283e7d3449SMel Gorman { 14293e7d3449SMel Gorman unsigned long watermark; 14303e7d3449SMel Gorman 143121c527a3SYaowei Bai if (is_via_compact_memory(order)) 14323957c776SMichal Hocko return COMPACT_CONTINUE; 14333957c776SMichal Hocko 1434f2b8228cSVlastimil Babka watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 1435ebff3980SVlastimil Babka /* 1436ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 1437ebff3980SVlastimil Babka * should be no need for compaction at all. 1438ebff3980SVlastimil Babka */ 1439ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1440ebff3980SVlastimil Babka alloc_flags)) 1441cf378319SVlastimil Babka return COMPACT_SUCCESS; 1442ebff3980SVlastimil Babka 14433957c776SMichal Hocko /* 14449861a62cSVlastimil Babka * Watermarks for order-0 must be met for compaction to be able to 1445984fdba6SVlastimil Babka * isolate free pages for migration targets. This means that the 1446984fdba6SVlastimil Babka * watermark and alloc_flags have to match, or be more pessimistic than 1447984fdba6SVlastimil Babka * the check in __isolate_free_page(). We don't use the direct 1448984fdba6SVlastimil Babka * compactor's alloc_flags, as they are not relevant for freepage 1449984fdba6SVlastimil Babka * isolation. We however do use the direct compactor's classzone_idx to 1450984fdba6SVlastimil Babka * skip over zones where lowmem reserves would prevent allocation even 1451984fdba6SVlastimil Babka * if compaction succeeds. 14528348faf9SVlastimil Babka * For costly orders, we require low watermark instead of min for 14538348faf9SVlastimil Babka * compaction to proceed to increase its chances. 1454d883c6cfSJoonsoo Kim * ALLOC_CMA is used, as pages in CMA pageblocks are considered 1455d883c6cfSJoonsoo Kim * suitable migration targets 14563e7d3449SMel Gorman */ 14578348faf9SVlastimil Babka watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 14588348faf9SVlastimil Babka low_wmark_pages(zone) : min_wmark_pages(zone); 14598348faf9SVlastimil Babka watermark += compact_gap(order); 146086a294a8SMichal Hocko if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, 1461d883c6cfSJoonsoo Kim ALLOC_CMA, wmark_target)) 14623e7d3449SMel Gorman return COMPACT_SKIPPED; 14633e7d3449SMel Gorman 1464cc5c9f09SVlastimil Babka return COMPACT_CONTINUE; 1465cc5c9f09SVlastimil Babka } 1466cc5c9f09SVlastimil Babka 1467cc5c9f09SVlastimil Babka enum compact_result compaction_suitable(struct zone *zone, int order, 1468cc5c9f09SVlastimil Babka unsigned int alloc_flags, 1469cc5c9f09SVlastimil Babka int classzone_idx) 1470cc5c9f09SVlastimil Babka { 1471cc5c9f09SVlastimil Babka enum compact_result ret; 1472cc5c9f09SVlastimil Babka int fragindex; 1473cc5c9f09SVlastimil Babka 1474cc5c9f09SVlastimil Babka ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, 1475cc5c9f09SVlastimil Babka zone_page_state(zone, NR_FREE_PAGES)); 14763e7d3449SMel Gorman /* 14773e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 14783e7d3449SMel Gorman * low memory or external fragmentation 14793e7d3449SMel Gorman * 1480ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 1481ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 14823e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 14833e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 14843e7d3449SMel Gorman * 148520311420SVlastimil Babka * Only compact if a failure would be due to fragmentation. Also 148620311420SVlastimil Babka * ignore fragindex for non-costly orders where the alternative to 148720311420SVlastimil Babka * a successful reclaim/compaction is OOM. Fragindex and the 148820311420SVlastimil Babka * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 148920311420SVlastimil Babka * excessive compaction for costly orders, but it should not be at the 149020311420SVlastimil Babka * expense of system stability. 14913e7d3449SMel Gorman */ 149220311420SVlastimil Babka if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { 14933e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 14943e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 1495cc5c9f09SVlastimil Babka ret = COMPACT_NOT_SUITABLE_ZONE; 14963e7d3449SMel Gorman } 14973e7d3449SMel Gorman 1498837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 1499837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 1500837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 1501837d026dSJoonsoo Kim 1502837d026dSJoonsoo Kim return ret; 1503837d026dSJoonsoo Kim } 1504837d026dSJoonsoo Kim 150586a294a8SMichal Hocko bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 150686a294a8SMichal Hocko int alloc_flags) 150786a294a8SMichal Hocko { 150886a294a8SMichal Hocko struct zone *zone; 150986a294a8SMichal Hocko struct zoneref *z; 151086a294a8SMichal Hocko 151186a294a8SMichal Hocko /* 151286a294a8SMichal Hocko * Make sure at least one zone would pass __compaction_suitable if we continue 151386a294a8SMichal Hocko * retrying the reclaim. 151486a294a8SMichal Hocko */ 151586a294a8SMichal Hocko for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 151686a294a8SMichal Hocko ac->nodemask) { 151786a294a8SMichal Hocko unsigned long available; 151886a294a8SMichal Hocko enum compact_result compact_result; 151986a294a8SMichal Hocko 152086a294a8SMichal Hocko /* 152186a294a8SMichal Hocko * Do not consider all the reclaimable memory because we do not 152286a294a8SMichal Hocko * want to trash just for a single high order allocation which 152386a294a8SMichal Hocko * is even not guaranteed to appear even if __compaction_suitable 152486a294a8SMichal Hocko * is happy about the watermark check. 152586a294a8SMichal Hocko */ 15265a1c84b4SMel Gorman available = zone_reclaimable_pages(zone) / order; 152786a294a8SMichal Hocko available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 152886a294a8SMichal Hocko compact_result = __compaction_suitable(zone, order, alloc_flags, 152986a294a8SMichal Hocko ac_classzone_idx(ac), available); 1530cc5c9f09SVlastimil Babka if (compact_result != COMPACT_SKIPPED) 153186a294a8SMichal Hocko return true; 153286a294a8SMichal Hocko } 153386a294a8SMichal Hocko 153486a294a8SMichal Hocko return false; 153586a294a8SMichal Hocko } 153686a294a8SMichal Hocko 1537ea7ab982SMichal Hocko static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc) 1538748446bbSMel Gorman { 1539ea7ab982SMichal Hocko enum compact_result ret; 1540c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1541108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 1542e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1543748446bbSMel Gorman 1544d39773a0SVlastimil Babka cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); 1545ebff3980SVlastimil Babka ret = compaction_suitable(zone, cc->order, cc->alloc_flags, 1546ebff3980SVlastimil Babka cc->classzone_idx); 15473e7d3449SMel Gorman /* Compaction is likely to fail */ 1548cf378319SVlastimil Babka if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) 15493e7d3449SMel Gorman return ret; 1550c46649deSMichal Hocko 1551c46649deSMichal Hocko /* huh, compaction_suitable is returning something unexpected */ 1552c46649deSMichal Hocko VM_BUG_ON(ret != COMPACT_CONTINUE); 15533e7d3449SMel Gorman 1554c89511abSMel Gorman /* 1555d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1556accf6242SVlastimil Babka * is about to be retried after being deferred. 1557d3132e4bSVlastimil Babka */ 1558accf6242SVlastimil Babka if (compaction_restarting(zone, cc->order)) 1559d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1560d3132e4bSVlastimil Babka 1561d3132e4bSVlastimil Babka /* 1562c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 156306ed2998SVlastimil Babka * information on where the scanners should start (unless we explicitly 156406ed2998SVlastimil Babka * want to compact the whole zone), but check that it is initialised 156506ed2998SVlastimil Babka * by ensuring the values are within zone boundaries. 1566c89511abSMel Gorman */ 156706ed2998SVlastimil Babka if (cc->whole_zone) { 156806ed2998SVlastimil Babka cc->migrate_pfn = start_pfn; 156906ed2998SVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 157006ed2998SVlastimil Babka } else { 1571e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1572c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1573623446e4SJoonsoo Kim if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 157406b6640aSVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 1575c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1576c89511abSMel Gorman } 1577623446e4SJoonsoo Kim if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 1578c89511abSMel Gorman cc->migrate_pfn = start_pfn; 157935979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 158035979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1581c89511abSMel Gorman } 1582c8f7de0bSMichal Hocko 1583c8f7de0bSMichal Hocko if (cc->migrate_pfn == start_pfn) 1584c8f7de0bSMichal Hocko cc->whole_zone = true; 158506ed2998SVlastimil Babka } 1586c8f7de0bSMichal Hocko 15871a16718cSJoonsoo Kim cc->last_migrated_pfn = 0; 1588748446bbSMel Gorman 158916c4a097SJoonsoo Kim trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 159016c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync); 15910eb927c0SMel Gorman 1592748446bbSMel Gorman migrate_prep_local(); 1593748446bbSMel Gorman 1594d39773a0SVlastimil Babka while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 15959d502c1cSMinchan Kim int err; 1596748446bbSMel Gorman 1597f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1598f9e35b3bSMel Gorman case ISOLATE_ABORT: 15992d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 16005733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1601e64c5237SShaohua Li cc->nr_migratepages = 0; 1602f9e35b3bSMel Gorman goto out; 1603f9e35b3bSMel Gorman case ISOLATE_NONE: 1604fdaf7f5cSVlastimil Babka /* 1605fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 1606fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 1607fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 1608fdaf7f5cSVlastimil Babka */ 1609fdaf7f5cSVlastimil Babka goto check_drain; 1610f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1611f9e35b3bSMel Gorman ; 1612f9e35b3bSMel Gorman } 1613748446bbSMel Gorman 1614d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1615e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 16167b2a2d4aSMel Gorman MR_COMPACTION); 1617748446bbSMel Gorman 1618f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1619f8c9301fSVlastimil Babka &cc->migratepages); 1620748446bbSMel Gorman 1621f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1622f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 16239d502c1cSMinchan Kim if (err) { 16245733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 16257ed695e0SVlastimil Babka /* 16267ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 16277ed695e0SVlastimil Babka * and we want compact_finished() to detect it 16287ed695e0SVlastimil Babka */ 1629f2849aa0SVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) { 16302d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 16314bf2bba3SDavid Rientjes goto out; 1632748446bbSMel Gorman } 1633fdd048e1SVlastimil Babka /* 1634fdd048e1SVlastimil Babka * We failed to migrate at least one page in the current 1635fdd048e1SVlastimil Babka * order-aligned block, so skip the rest of it. 1636fdd048e1SVlastimil Babka */ 1637fdd048e1SVlastimil Babka if (cc->direct_compaction && 1638fdd048e1SVlastimil Babka (cc->mode == MIGRATE_ASYNC)) { 1639fdd048e1SVlastimil Babka cc->migrate_pfn = block_end_pfn( 1640fdd048e1SVlastimil Babka cc->migrate_pfn - 1, cc->order); 1641fdd048e1SVlastimil Babka /* Draining pcplists is useless in this case */ 1642fdd048e1SVlastimil Babka cc->last_migrated_pfn = 0; 1643fdd048e1SVlastimil Babka 1644fdd048e1SVlastimil Babka } 16454bf2bba3SDavid Rientjes } 1646fdaf7f5cSVlastimil Babka 1647fdaf7f5cSVlastimil Babka check_drain: 1648fdaf7f5cSVlastimil Babka /* 1649fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 1650fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 1651fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 1652fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 1653fdaf7f5cSVlastimil Babka * would succeed. 1654fdaf7f5cSVlastimil Babka */ 16551a16718cSJoonsoo Kim if (cc->order > 0 && cc->last_migrated_pfn) { 1656fdaf7f5cSVlastimil Babka int cpu; 1657fdaf7f5cSVlastimil Babka unsigned long current_block_start = 165806b6640aSVlastimil Babka block_start_pfn(cc->migrate_pfn, cc->order); 1659fdaf7f5cSVlastimil Babka 16601a16718cSJoonsoo Kim if (cc->last_migrated_pfn < current_block_start) { 1661fdaf7f5cSVlastimil Babka cpu = get_cpu(); 1662fdaf7f5cSVlastimil Babka lru_add_drain_cpu(cpu); 1663fdaf7f5cSVlastimil Babka drain_local_pages(zone); 1664fdaf7f5cSVlastimil Babka put_cpu(); 1665fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 16661a16718cSJoonsoo Kim cc->last_migrated_pfn = 0; 1667fdaf7f5cSVlastimil Babka } 1668fdaf7f5cSVlastimil Babka } 1669fdaf7f5cSVlastimil Babka 1670748446bbSMel Gorman } 1671748446bbSMel Gorman 1672f9e35b3bSMel Gorman out: 16736bace090SVlastimil Babka /* 16746bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 16756bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 16766bace090SVlastimil Babka */ 16776bace090SVlastimil Babka if (cc->nr_freepages > 0) { 16786bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 16796bace090SVlastimil Babka 16806bace090SVlastimil Babka cc->nr_freepages = 0; 16816bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 16826bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 168306b6640aSVlastimil Babka free_pfn = pageblock_start_pfn(free_pfn); 16846bace090SVlastimil Babka /* 16856bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 16866bace090SVlastimil Babka * already reset to zone end in compact_finished() 16876bace090SVlastimil Babka */ 16886bace090SVlastimil Babka if (free_pfn > zone->compact_cached_free_pfn) 16896bace090SVlastimil Babka zone->compact_cached_free_pfn = free_pfn; 16906bace090SVlastimil Babka } 1691748446bbSMel Gorman 16927f354a54SDavid Rientjes count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 16937f354a54SDavid Rientjes count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 16947f354a54SDavid Rientjes 169516c4a097SJoonsoo Kim trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 169616c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync, ret); 16970eb927c0SMel Gorman 1698748446bbSMel Gorman return ret; 1699748446bbSMel Gorman } 170076ab0f53SMel Gorman 1701ea7ab982SMichal Hocko static enum compact_result compact_zone_order(struct zone *zone, int order, 1702c3486f53SVlastimil Babka gfp_t gfp_mask, enum compact_priority prio, 1703c603844bSMel Gorman unsigned int alloc_flags, int classzone_idx) 170456de7263SMel Gorman { 1705ea7ab982SMichal Hocko enum compact_result ret; 170656de7263SMel Gorman struct compact_control cc = { 170756de7263SMel Gorman .nr_freepages = 0, 170856de7263SMel Gorman .nr_migratepages = 0, 17097f354a54SDavid Rientjes .total_migrate_scanned = 0, 17107f354a54SDavid Rientjes .total_free_scanned = 0, 171156de7263SMel Gorman .order = order, 17126d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 171356de7263SMel Gorman .zone = zone, 1714a5508cd8SVlastimil Babka .mode = (prio == COMPACT_PRIO_ASYNC) ? 1715a5508cd8SVlastimil Babka MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 1716ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 1717ebff3980SVlastimil Babka .classzone_idx = classzone_idx, 1718accf6242SVlastimil Babka .direct_compaction = true, 1719a8e025e5SVlastimil Babka .whole_zone = (prio == MIN_COMPACT_PRIORITY), 17209f7e3387SVlastimil Babka .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 17219f7e3387SVlastimil Babka .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 172256de7263SMel Gorman }; 172356de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 172456de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 172556de7263SMel Gorman 1726e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1727e64c5237SShaohua Li 1728e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1729e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1730e64c5237SShaohua Li 1731e64c5237SShaohua Li return ret; 173256de7263SMel Gorman } 173356de7263SMel Gorman 17345e771905SMel Gorman int sysctl_extfrag_threshold = 500; 17355e771905SMel Gorman 173656de7263SMel Gorman /** 173756de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 173856de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 17391a6d53a1SVlastimil Babka * @order: The order of the current allocation 17401a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 17411a6d53a1SVlastimil Babka * @ac: The context of current allocation 1742112d2d29SYang Shi * @prio: Determines how hard direct compaction should try to succeed 174356de7263SMel Gorman * 174456de7263SMel Gorman * This is the main entry point for direct page compaction. 174556de7263SMel Gorman */ 1746ea7ab982SMichal Hocko enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 1747c603844bSMel Gorman unsigned int alloc_flags, const struct alloc_context *ac, 1748c3486f53SVlastimil Babka enum compact_priority prio) 174956de7263SMel Gorman { 175056de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 175156de7263SMel Gorman struct zoneref *z; 175256de7263SMel Gorman struct zone *zone; 17531d4746d3SMichal Hocko enum compact_result rc = COMPACT_SKIPPED; 175456de7263SMel Gorman 175573e64c51SMichal Hocko /* 175673e64c51SMichal Hocko * Check if the GFP flags allow compaction - GFP_NOIO is really 175773e64c51SMichal Hocko * tricky context because the migration might require IO 175873e64c51SMichal Hocko */ 175973e64c51SMichal Hocko if (!may_perform_io) 176053853e2dSVlastimil Babka return COMPACT_SKIPPED; 176156de7263SMel Gorman 1762a5508cd8SVlastimil Babka trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 1763837d026dSJoonsoo Kim 176456de7263SMel Gorman /* Compact each zone in the list */ 17651a6d53a1SVlastimil Babka for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 17661a6d53a1SVlastimil Babka ac->nodemask) { 1767ea7ab982SMichal Hocko enum compact_result status; 176856de7263SMel Gorman 1769a8e025e5SVlastimil Babka if (prio > MIN_COMPACT_PRIORITY 1770a8e025e5SVlastimil Babka && compaction_deferred(zone, order)) { 17711d4746d3SMichal Hocko rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 177253853e2dSVlastimil Babka continue; 17731d4746d3SMichal Hocko } 177453853e2dSVlastimil Babka 1775a5508cd8SVlastimil Babka status = compact_zone_order(zone, order, gfp_mask, prio, 1776c3486f53SVlastimil Babka alloc_flags, ac_classzone_idx(ac)); 177756de7263SMel Gorman rc = max(status, rc); 177856de7263SMel Gorman 17797ceb009aSVlastimil Babka /* The allocation should succeed, stop compacting */ 17807ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 178153853e2dSVlastimil Babka /* 178253853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 178353853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 178453853e2dSVlastimil Babka * will repeat this with true if allocation indeed 178553853e2dSVlastimil Babka * succeeds in this zone. 178653853e2dSVlastimil Babka */ 178753853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 17881f9efdefSVlastimil Babka 1789c3486f53SVlastimil Babka break; 17901f9efdefSVlastimil Babka } 17911f9efdefSVlastimil Babka 1792a5508cd8SVlastimil Babka if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 1793c3486f53SVlastimil Babka status == COMPACT_PARTIAL_SKIPPED)) 179453853e2dSVlastimil Babka /* 179553853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 179653853e2dSVlastimil Babka * so we defer compaction there. If it ends up 179753853e2dSVlastimil Babka * succeeding after all, it will be reset. 179853853e2dSVlastimil Babka */ 179953853e2dSVlastimil Babka defer_compaction(zone, order); 18001f9efdefSVlastimil Babka 18011f9efdefSVlastimil Babka /* 18021f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 18031f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 1804c3486f53SVlastimil Babka * case do not try further zones 18051f9efdefSVlastimil Babka */ 1806c3486f53SVlastimil Babka if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 1807c3486f53SVlastimil Babka || fatal_signal_pending(current)) 18081f9efdefSVlastimil Babka break; 18091f9efdefSVlastimil Babka } 18101f9efdefSVlastimil Babka 181156de7263SMel Gorman return rc; 181256de7263SMel Gorman } 181356de7263SMel Gorman 181456de7263SMel Gorman 181576ab0f53SMel Gorman /* Compact all zones within a node */ 18167103f16dSAndrew Morton static void compact_node(int nid) 18177be62de9SRik van Riel { 1818791cae96SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 1819791cae96SVlastimil Babka int zoneid; 1820791cae96SVlastimil Babka struct zone *zone; 18217be62de9SRik van Riel struct compact_control cc = { 18227be62de9SRik van Riel .order = -1, 18237f354a54SDavid Rientjes .total_migrate_scanned = 0, 18247f354a54SDavid Rientjes .total_free_scanned = 0, 1825e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 182691ca9186SDavid Rientjes .ignore_skip_hint = true, 182706ed2998SVlastimil Babka .whole_zone = true, 182873e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 18297be62de9SRik van Riel }; 18307be62de9SRik van Riel 1831791cae96SVlastimil Babka 1832791cae96SVlastimil Babka for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 1833791cae96SVlastimil Babka 1834791cae96SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 1835791cae96SVlastimil Babka if (!populated_zone(zone)) 1836791cae96SVlastimil Babka continue; 1837791cae96SVlastimil Babka 1838791cae96SVlastimil Babka cc.nr_freepages = 0; 1839791cae96SVlastimil Babka cc.nr_migratepages = 0; 1840791cae96SVlastimil Babka cc.zone = zone; 1841791cae96SVlastimil Babka INIT_LIST_HEAD(&cc.freepages); 1842791cae96SVlastimil Babka INIT_LIST_HEAD(&cc.migratepages); 1843791cae96SVlastimil Babka 1844791cae96SVlastimil Babka compact_zone(zone, &cc); 1845791cae96SVlastimil Babka 1846791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 1847791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 1848791cae96SVlastimil Babka } 18497be62de9SRik van Riel } 18507be62de9SRik van Riel 185176ab0f53SMel Gorman /* Compact all nodes in the system */ 18527964c06dSJason Liu static void compact_nodes(void) 185376ab0f53SMel Gorman { 185476ab0f53SMel Gorman int nid; 185576ab0f53SMel Gorman 18568575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 18578575ec29SHugh Dickins lru_add_drain_all(); 18588575ec29SHugh Dickins 185976ab0f53SMel Gorman for_each_online_node(nid) 186076ab0f53SMel Gorman compact_node(nid); 186176ab0f53SMel Gorman } 186276ab0f53SMel Gorman 186376ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 186476ab0f53SMel Gorman int sysctl_compact_memory; 186576ab0f53SMel Gorman 1866fec4eb2cSYaowei Bai /* 1867fec4eb2cSYaowei Bai * This is the entry point for compacting all nodes via 1868fec4eb2cSYaowei Bai * /proc/sys/vm/compact_memory 1869fec4eb2cSYaowei Bai */ 187076ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 187176ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 187276ab0f53SMel Gorman { 187376ab0f53SMel Gorman if (write) 18747964c06dSJason Liu compact_nodes(); 187576ab0f53SMel Gorman 187676ab0f53SMel Gorman return 0; 187776ab0f53SMel Gorman } 1878ed4a6d7fSMel Gorman 18795e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 18805e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 18815e771905SMel Gorman { 18825e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 18835e771905SMel Gorman 18845e771905SMel Gorman return 0; 18855e771905SMel Gorman } 18865e771905SMel Gorman 1887ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 188874e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 188910fbcf4cSKay Sievers struct device_attribute *attr, 1890ed4a6d7fSMel Gorman const char *buf, size_t count) 1891ed4a6d7fSMel Gorman { 18928575ec29SHugh Dickins int nid = dev->id; 18938575ec29SHugh Dickins 18948575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 18958575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 18968575ec29SHugh Dickins lru_add_drain_all(); 18978575ec29SHugh Dickins 18988575ec29SHugh Dickins compact_node(nid); 18998575ec29SHugh Dickins } 1900ed4a6d7fSMel Gorman 1901ed4a6d7fSMel Gorman return count; 1902ed4a6d7fSMel Gorman } 19030825a6f9SJoe Perches static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node); 1904ed4a6d7fSMel Gorman 1905ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1906ed4a6d7fSMel Gorman { 190710fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1908ed4a6d7fSMel Gorman } 1909ed4a6d7fSMel Gorman 1910ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1911ed4a6d7fSMel Gorman { 191210fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1913ed4a6d7fSMel Gorman } 1914ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1915ff9543fdSMichal Nazarewicz 1916698b1b30SVlastimil Babka static inline bool kcompactd_work_requested(pg_data_t *pgdat) 1917698b1b30SVlastimil Babka { 1918172400c6SVlastimil Babka return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); 1919698b1b30SVlastimil Babka } 1920698b1b30SVlastimil Babka 1921698b1b30SVlastimil Babka static bool kcompactd_node_suitable(pg_data_t *pgdat) 1922698b1b30SVlastimil Babka { 1923698b1b30SVlastimil Babka int zoneid; 1924698b1b30SVlastimil Babka struct zone *zone; 1925698b1b30SVlastimil Babka enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx; 1926698b1b30SVlastimil Babka 19276cd9dc3eSChen Feng for (zoneid = 0; zoneid <= classzone_idx; zoneid++) { 1928698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 1929698b1b30SVlastimil Babka 1930698b1b30SVlastimil Babka if (!populated_zone(zone)) 1931698b1b30SVlastimil Babka continue; 1932698b1b30SVlastimil Babka 1933698b1b30SVlastimil Babka if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 1934698b1b30SVlastimil Babka classzone_idx) == COMPACT_CONTINUE) 1935698b1b30SVlastimil Babka return true; 1936698b1b30SVlastimil Babka } 1937698b1b30SVlastimil Babka 1938698b1b30SVlastimil Babka return false; 1939698b1b30SVlastimil Babka } 1940698b1b30SVlastimil Babka 1941698b1b30SVlastimil Babka static void kcompactd_do_work(pg_data_t *pgdat) 1942698b1b30SVlastimil Babka { 1943698b1b30SVlastimil Babka /* 1944698b1b30SVlastimil Babka * With no special task, compact all zones so that a page of requested 1945698b1b30SVlastimil Babka * order is allocatable. 1946698b1b30SVlastimil Babka */ 1947698b1b30SVlastimil Babka int zoneid; 1948698b1b30SVlastimil Babka struct zone *zone; 1949698b1b30SVlastimil Babka struct compact_control cc = { 1950698b1b30SVlastimil Babka .order = pgdat->kcompactd_max_order, 19517f354a54SDavid Rientjes .total_migrate_scanned = 0, 19527f354a54SDavid Rientjes .total_free_scanned = 0, 1953698b1b30SVlastimil Babka .classzone_idx = pgdat->kcompactd_classzone_idx, 1954698b1b30SVlastimil Babka .mode = MIGRATE_SYNC_LIGHT, 1955a0647dc9SDavid Rientjes .ignore_skip_hint = false, 195673e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 1957698b1b30SVlastimil Babka }; 1958698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 1959698b1b30SVlastimil Babka cc.classzone_idx); 19607f354a54SDavid Rientjes count_compact_event(KCOMPACTD_WAKE); 1961698b1b30SVlastimil Babka 19626cd9dc3eSChen Feng for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) { 1963698b1b30SVlastimil Babka int status; 1964698b1b30SVlastimil Babka 1965698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 1966698b1b30SVlastimil Babka if (!populated_zone(zone)) 1967698b1b30SVlastimil Babka continue; 1968698b1b30SVlastimil Babka 1969698b1b30SVlastimil Babka if (compaction_deferred(zone, cc.order)) 1970698b1b30SVlastimil Babka continue; 1971698b1b30SVlastimil Babka 1972698b1b30SVlastimil Babka if (compaction_suitable(zone, cc.order, 0, zoneid) != 1973698b1b30SVlastimil Babka COMPACT_CONTINUE) 1974698b1b30SVlastimil Babka continue; 1975698b1b30SVlastimil Babka 1976698b1b30SVlastimil Babka cc.nr_freepages = 0; 1977698b1b30SVlastimil Babka cc.nr_migratepages = 0; 19787f354a54SDavid Rientjes cc.total_migrate_scanned = 0; 19797f354a54SDavid Rientjes cc.total_free_scanned = 0; 1980698b1b30SVlastimil Babka cc.zone = zone; 1981698b1b30SVlastimil Babka INIT_LIST_HEAD(&cc.freepages); 1982698b1b30SVlastimil Babka INIT_LIST_HEAD(&cc.migratepages); 1983698b1b30SVlastimil Babka 1984172400c6SVlastimil Babka if (kthread_should_stop()) 1985172400c6SVlastimil Babka return; 1986698b1b30SVlastimil Babka status = compact_zone(zone, &cc); 1987698b1b30SVlastimil Babka 19887ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 1989698b1b30SVlastimil Babka compaction_defer_reset(zone, cc.order, false); 1990c8f7de0bSMichal Hocko } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 1991698b1b30SVlastimil Babka /* 1992bc3106b2SDavid Rientjes * Buddy pages may become stranded on pcps that could 1993bc3106b2SDavid Rientjes * otherwise coalesce on the zone's free area for 1994bc3106b2SDavid Rientjes * order >= cc.order. This is ratelimited by the 1995bc3106b2SDavid Rientjes * upcoming deferral. 1996bc3106b2SDavid Rientjes */ 1997bc3106b2SDavid Rientjes drain_all_pages(zone); 1998bc3106b2SDavid Rientjes 1999bc3106b2SDavid Rientjes /* 2000698b1b30SVlastimil Babka * We use sync migration mode here, so we defer like 2001698b1b30SVlastimil Babka * sync direct compaction does. 2002698b1b30SVlastimil Babka */ 2003698b1b30SVlastimil Babka defer_compaction(zone, cc.order); 2004698b1b30SVlastimil Babka } 2005698b1b30SVlastimil Babka 20067f354a54SDavid Rientjes count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 20077f354a54SDavid Rientjes cc.total_migrate_scanned); 20087f354a54SDavid Rientjes count_compact_events(KCOMPACTD_FREE_SCANNED, 20097f354a54SDavid Rientjes cc.total_free_scanned); 20107f354a54SDavid Rientjes 2011698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 2012698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 2013698b1b30SVlastimil Babka } 2014698b1b30SVlastimil Babka 2015698b1b30SVlastimil Babka /* 2016698b1b30SVlastimil Babka * Regardless of success, we are done until woken up next. But remember 2017698b1b30SVlastimil Babka * the requested order/classzone_idx in case it was higher/tighter than 2018698b1b30SVlastimil Babka * our current ones 2019698b1b30SVlastimil Babka */ 2020698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order <= cc.order) 2021698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 2022698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx) 2023698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2024698b1b30SVlastimil Babka } 2025698b1b30SVlastimil Babka 2026698b1b30SVlastimil Babka void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) 2027698b1b30SVlastimil Babka { 2028698b1b30SVlastimil Babka if (!order) 2029698b1b30SVlastimil Babka return; 2030698b1b30SVlastimil Babka 2031698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order < order) 2032698b1b30SVlastimil Babka pgdat->kcompactd_max_order = order; 2033698b1b30SVlastimil Babka 2034698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx > classzone_idx) 2035698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = classzone_idx; 2036698b1b30SVlastimil Babka 20376818600fSDavidlohr Bueso /* 20386818600fSDavidlohr Bueso * Pairs with implicit barrier in wait_event_freezable() 20396818600fSDavidlohr Bueso * such that wakeups are not missed. 20406818600fSDavidlohr Bueso */ 20416818600fSDavidlohr Bueso if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 2042698b1b30SVlastimil Babka return; 2043698b1b30SVlastimil Babka 2044698b1b30SVlastimil Babka if (!kcompactd_node_suitable(pgdat)) 2045698b1b30SVlastimil Babka return; 2046698b1b30SVlastimil Babka 2047698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 2048698b1b30SVlastimil Babka classzone_idx); 2049698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait); 2050698b1b30SVlastimil Babka } 2051698b1b30SVlastimil Babka 2052698b1b30SVlastimil Babka /* 2053698b1b30SVlastimil Babka * The background compaction daemon, started as a kernel thread 2054698b1b30SVlastimil Babka * from the init process. 2055698b1b30SVlastimil Babka */ 2056698b1b30SVlastimil Babka static int kcompactd(void *p) 2057698b1b30SVlastimil Babka { 2058698b1b30SVlastimil Babka pg_data_t *pgdat = (pg_data_t*)p; 2059698b1b30SVlastimil Babka struct task_struct *tsk = current; 2060698b1b30SVlastimil Babka 2061698b1b30SVlastimil Babka const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2062698b1b30SVlastimil Babka 2063698b1b30SVlastimil Babka if (!cpumask_empty(cpumask)) 2064698b1b30SVlastimil Babka set_cpus_allowed_ptr(tsk, cpumask); 2065698b1b30SVlastimil Babka 2066698b1b30SVlastimil Babka set_freezable(); 2067698b1b30SVlastimil Babka 2068698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 2069698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2070698b1b30SVlastimil Babka 2071698b1b30SVlastimil Babka while (!kthread_should_stop()) { 2072*eb414681SJohannes Weiner unsigned long pflags; 2073*eb414681SJohannes Weiner 2074698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 2075698b1b30SVlastimil Babka wait_event_freezable(pgdat->kcompactd_wait, 2076698b1b30SVlastimil Babka kcompactd_work_requested(pgdat)); 2077698b1b30SVlastimil Babka 2078*eb414681SJohannes Weiner psi_memstall_enter(&pflags); 2079698b1b30SVlastimil Babka kcompactd_do_work(pgdat); 2080*eb414681SJohannes Weiner psi_memstall_leave(&pflags); 2081698b1b30SVlastimil Babka } 2082698b1b30SVlastimil Babka 2083698b1b30SVlastimil Babka return 0; 2084698b1b30SVlastimil Babka } 2085698b1b30SVlastimil Babka 2086698b1b30SVlastimil Babka /* 2087698b1b30SVlastimil Babka * This kcompactd start function will be called by init and node-hot-add. 2088698b1b30SVlastimil Babka * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 2089698b1b30SVlastimil Babka */ 2090698b1b30SVlastimil Babka int kcompactd_run(int nid) 2091698b1b30SVlastimil Babka { 2092698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2093698b1b30SVlastimil Babka int ret = 0; 2094698b1b30SVlastimil Babka 2095698b1b30SVlastimil Babka if (pgdat->kcompactd) 2096698b1b30SVlastimil Babka return 0; 2097698b1b30SVlastimil Babka 2098698b1b30SVlastimil Babka pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 2099698b1b30SVlastimil Babka if (IS_ERR(pgdat->kcompactd)) { 2100698b1b30SVlastimil Babka pr_err("Failed to start kcompactd on node %d\n", nid); 2101698b1b30SVlastimil Babka ret = PTR_ERR(pgdat->kcompactd); 2102698b1b30SVlastimil Babka pgdat->kcompactd = NULL; 2103698b1b30SVlastimil Babka } 2104698b1b30SVlastimil Babka return ret; 2105698b1b30SVlastimil Babka } 2106698b1b30SVlastimil Babka 2107698b1b30SVlastimil Babka /* 2108698b1b30SVlastimil Babka * Called by memory hotplug when all memory in a node is offlined. Caller must 2109698b1b30SVlastimil Babka * hold mem_hotplug_begin/end(). 2110698b1b30SVlastimil Babka */ 2111698b1b30SVlastimil Babka void kcompactd_stop(int nid) 2112698b1b30SVlastimil Babka { 2113698b1b30SVlastimil Babka struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 2114698b1b30SVlastimil Babka 2115698b1b30SVlastimil Babka if (kcompactd) { 2116698b1b30SVlastimil Babka kthread_stop(kcompactd); 2117698b1b30SVlastimil Babka NODE_DATA(nid)->kcompactd = NULL; 2118698b1b30SVlastimil Babka } 2119698b1b30SVlastimil Babka } 2120698b1b30SVlastimil Babka 2121698b1b30SVlastimil Babka /* 2122698b1b30SVlastimil Babka * It's optimal to keep kcompactd on the same CPUs as their memory, but 2123698b1b30SVlastimil Babka * not required for correctness. So if the last cpu in a node goes 2124698b1b30SVlastimil Babka * away, we get changed to run anywhere: as the first one comes back, 2125698b1b30SVlastimil Babka * restore their cpu bindings. 2126698b1b30SVlastimil Babka */ 2127e46b1db2SAnna-Maria Gleixner static int kcompactd_cpu_online(unsigned int cpu) 2128698b1b30SVlastimil Babka { 2129698b1b30SVlastimil Babka int nid; 2130698b1b30SVlastimil Babka 2131698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) { 2132698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2133698b1b30SVlastimil Babka const struct cpumask *mask; 2134698b1b30SVlastimil Babka 2135698b1b30SVlastimil Babka mask = cpumask_of_node(pgdat->node_id); 2136698b1b30SVlastimil Babka 2137698b1b30SVlastimil Babka if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2138698b1b30SVlastimil Babka /* One of our CPUs online: restore mask */ 2139698b1b30SVlastimil Babka set_cpus_allowed_ptr(pgdat->kcompactd, mask); 2140698b1b30SVlastimil Babka } 2141e46b1db2SAnna-Maria Gleixner return 0; 2142698b1b30SVlastimil Babka } 2143698b1b30SVlastimil Babka 2144698b1b30SVlastimil Babka static int __init kcompactd_init(void) 2145698b1b30SVlastimil Babka { 2146698b1b30SVlastimil Babka int nid; 2147e46b1db2SAnna-Maria Gleixner int ret; 2148e46b1db2SAnna-Maria Gleixner 2149e46b1db2SAnna-Maria Gleixner ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 2150e46b1db2SAnna-Maria Gleixner "mm/compaction:online", 2151e46b1db2SAnna-Maria Gleixner kcompactd_cpu_online, NULL); 2152e46b1db2SAnna-Maria Gleixner if (ret < 0) { 2153e46b1db2SAnna-Maria Gleixner pr_err("kcompactd: failed to register hotplug callbacks.\n"); 2154e46b1db2SAnna-Maria Gleixner return ret; 2155e46b1db2SAnna-Maria Gleixner } 2156698b1b30SVlastimil Babka 2157698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) 2158698b1b30SVlastimil Babka kcompactd_run(nid); 2159698b1b30SVlastimil Babka return 0; 2160698b1b30SVlastimil Babka } 2161698b1b30SVlastimil Babka subsys_initcall(kcompactd_init) 2162698b1b30SVlastimil Babka 2163ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 2164