1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10698b1b30SVlastimil Babka #include <linux/cpu.h> 11748446bbSMel Gorman #include <linux/swap.h> 12748446bbSMel Gorman #include <linux/migrate.h> 13748446bbSMel Gorman #include <linux/compaction.h> 14748446bbSMel Gorman #include <linux/mm_inline.h> 15748446bbSMel Gorman #include <linux/backing-dev.h> 1676ab0f53SMel Gorman #include <linux/sysctl.h> 17ed4a6d7fSMel Gorman #include <linux/sysfs.h> 18194159fbSMinchan Kim #include <linux/page-isolation.h> 19b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 20698b1b30SVlastimil Babka #include <linux/kthread.h> 21698b1b30SVlastimil Babka #include <linux/freezer.h> 22748446bbSMel Gorman #include "internal.h" 23748446bbSMel Gorman 24010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 25010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 26010fc29aSMinchan Kim { 27010fc29aSMinchan Kim count_vm_event(item); 28010fc29aSMinchan Kim } 29010fc29aSMinchan Kim 30010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 31010fc29aSMinchan Kim { 32010fc29aSMinchan Kim count_vm_events(item, delta); 33010fc29aSMinchan Kim } 34010fc29aSMinchan Kim #else 35010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 36010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 37010fc29aSMinchan Kim #endif 38010fc29aSMinchan Kim 39ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 40ff9543fdSMichal Nazarewicz 41b7aba698SMel Gorman #define CREATE_TRACE_POINTS 42b7aba698SMel Gorman #include <trace/events/compaction.h> 43b7aba698SMel Gorman 4406b6640aSVlastimil Babka #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 4506b6640aSVlastimil Babka #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 4606b6640aSVlastimil Babka #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 4706b6640aSVlastimil Babka #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 4806b6640aSVlastimil Babka 49748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 50748446bbSMel Gorman { 51748446bbSMel Gorman struct page *page, *next; 526bace090SVlastimil Babka unsigned long high_pfn = 0; 53748446bbSMel Gorman 54748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 556bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 56748446bbSMel Gorman list_del(&page->lru); 57748446bbSMel Gorman __free_page(page); 586bace090SVlastimil Babka if (pfn > high_pfn) 596bace090SVlastimil Babka high_pfn = pfn; 60748446bbSMel Gorman } 61748446bbSMel Gorman 626bace090SVlastimil Babka return high_pfn; 63748446bbSMel Gorman } 64748446bbSMel Gorman 65ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 66ff9543fdSMichal Nazarewicz { 67*66c64223SJoonsoo Kim unsigned int i, order, nr_pages; 68*66c64223SJoonsoo Kim struct page *page, *next; 69*66c64223SJoonsoo Kim LIST_HEAD(tmp_list); 70ff9543fdSMichal Nazarewicz 71*66c64223SJoonsoo Kim list_for_each_entry_safe(page, next, list, lru) { 72*66c64223SJoonsoo Kim list_del(&page->lru); 73*66c64223SJoonsoo Kim 74*66c64223SJoonsoo Kim order = page_private(page); 75*66c64223SJoonsoo Kim nr_pages = 1 << order; 76*66c64223SJoonsoo Kim set_page_private(page, 0); 77*66c64223SJoonsoo Kim set_page_refcounted(page); 78*66c64223SJoonsoo Kim 79*66c64223SJoonsoo Kim arch_alloc_page(page, order); 80*66c64223SJoonsoo Kim kernel_map_pages(page, nr_pages, 1); 81*66c64223SJoonsoo Kim kasan_alloc_pages(page, order); 82*66c64223SJoonsoo Kim if (order) 83*66c64223SJoonsoo Kim split_page(page, order); 84*66c64223SJoonsoo Kim 85*66c64223SJoonsoo Kim for (i = 0; i < nr_pages; i++) { 86*66c64223SJoonsoo Kim list_add(&page->lru, &tmp_list); 87*66c64223SJoonsoo Kim page++; 88ff9543fdSMichal Nazarewicz } 89ff9543fdSMichal Nazarewicz } 90ff9543fdSMichal Nazarewicz 91*66c64223SJoonsoo Kim list_splice(&tmp_list, list); 92*66c64223SJoonsoo Kim } 93*66c64223SJoonsoo Kim 9447118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 9547118af0SMichal Nazarewicz { 9647118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 9747118af0SMichal Nazarewicz } 9847118af0SMichal Nazarewicz 99bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 10024e2716fSJoonsoo Kim 101bda807d4SMinchan Kim int PageMovable(struct page *page) 102bda807d4SMinchan Kim { 103bda807d4SMinchan Kim struct address_space *mapping; 104bda807d4SMinchan Kim 105bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 106bda807d4SMinchan Kim if (!__PageMovable(page)) 107bda807d4SMinchan Kim return 0; 108bda807d4SMinchan Kim 109bda807d4SMinchan Kim mapping = page_mapping(page); 110bda807d4SMinchan Kim if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) 111bda807d4SMinchan Kim return 1; 112bda807d4SMinchan Kim 113bda807d4SMinchan Kim return 0; 114bda807d4SMinchan Kim } 115bda807d4SMinchan Kim EXPORT_SYMBOL(PageMovable); 116bda807d4SMinchan Kim 117bda807d4SMinchan Kim void __SetPageMovable(struct page *page, struct address_space *mapping) 118bda807d4SMinchan Kim { 119bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 120bda807d4SMinchan Kim VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); 121bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); 122bda807d4SMinchan Kim } 123bda807d4SMinchan Kim EXPORT_SYMBOL(__SetPageMovable); 124bda807d4SMinchan Kim 125bda807d4SMinchan Kim void __ClearPageMovable(struct page *page) 126bda807d4SMinchan Kim { 127bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 128bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page); 129bda807d4SMinchan Kim /* 130bda807d4SMinchan Kim * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE 131bda807d4SMinchan Kim * flag so that VM can catch up released page by driver after isolation. 132bda807d4SMinchan Kim * With it, VM migration doesn't try to put it back. 133bda807d4SMinchan Kim */ 134bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)page->mapping & 135bda807d4SMinchan Kim PAGE_MAPPING_MOVABLE); 136bda807d4SMinchan Kim } 137bda807d4SMinchan Kim EXPORT_SYMBOL(__ClearPageMovable); 138bda807d4SMinchan Kim 13924e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 14024e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 14124e2716fSJoonsoo Kim 14224e2716fSJoonsoo Kim /* 14324e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 14424e2716fSJoonsoo Kim * allocation success. 1 << compact_defer_limit compactions are skipped up 14524e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 14624e2716fSJoonsoo Kim */ 14724e2716fSJoonsoo Kim void defer_compaction(struct zone *zone, int order) 14824e2716fSJoonsoo Kim { 14924e2716fSJoonsoo Kim zone->compact_considered = 0; 15024e2716fSJoonsoo Kim zone->compact_defer_shift++; 15124e2716fSJoonsoo Kim 15224e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 15324e2716fSJoonsoo Kim zone->compact_order_failed = order; 15424e2716fSJoonsoo Kim 15524e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 15624e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 15724e2716fSJoonsoo Kim 15824e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 15924e2716fSJoonsoo Kim } 16024e2716fSJoonsoo Kim 16124e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 16224e2716fSJoonsoo Kim bool compaction_deferred(struct zone *zone, int order) 16324e2716fSJoonsoo Kim { 16424e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 16524e2716fSJoonsoo Kim 16624e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 16724e2716fSJoonsoo Kim return false; 16824e2716fSJoonsoo Kim 16924e2716fSJoonsoo Kim /* Avoid possible overflow */ 17024e2716fSJoonsoo Kim if (++zone->compact_considered > defer_limit) 17124e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 17224e2716fSJoonsoo Kim 17324e2716fSJoonsoo Kim if (zone->compact_considered >= defer_limit) 17424e2716fSJoonsoo Kim return false; 17524e2716fSJoonsoo Kim 17624e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 17724e2716fSJoonsoo Kim 17824e2716fSJoonsoo Kim return true; 17924e2716fSJoonsoo Kim } 18024e2716fSJoonsoo Kim 18124e2716fSJoonsoo Kim /* 18224e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 18324e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 18424e2716fSJoonsoo Kim * expected to succeed. 18524e2716fSJoonsoo Kim */ 18624e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 18724e2716fSJoonsoo Kim bool alloc_success) 18824e2716fSJoonsoo Kim { 18924e2716fSJoonsoo Kim if (alloc_success) { 19024e2716fSJoonsoo Kim zone->compact_considered = 0; 19124e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 19224e2716fSJoonsoo Kim } 19324e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 19424e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 19524e2716fSJoonsoo Kim 19624e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 19724e2716fSJoonsoo Kim } 19824e2716fSJoonsoo Kim 19924e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 20024e2716fSJoonsoo Kim bool compaction_restarting(struct zone *zone, int order) 20124e2716fSJoonsoo Kim { 20224e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 20324e2716fSJoonsoo Kim return false; 20424e2716fSJoonsoo Kim 20524e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 20624e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 20724e2716fSJoonsoo Kim } 20824e2716fSJoonsoo Kim 209bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 210bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 211bb13ffebSMel Gorman struct page *page) 212bb13ffebSMel Gorman { 213bb13ffebSMel Gorman if (cc->ignore_skip_hint) 214bb13ffebSMel Gorman return true; 215bb13ffebSMel Gorman 216bb13ffebSMel Gorman return !get_pageblock_skip(page); 217bb13ffebSMel Gorman } 218bb13ffebSMel Gorman 21902333641SVlastimil Babka static void reset_cached_positions(struct zone *zone) 22002333641SVlastimil Babka { 22102333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 22202333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 223623446e4SJoonsoo Kim zone->compact_cached_free_pfn = 22406b6640aSVlastimil Babka pageblock_start_pfn(zone_end_pfn(zone) - 1); 22502333641SVlastimil Babka } 22602333641SVlastimil Babka 227bb13ffebSMel Gorman /* 228bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 229bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 230bb13ffebSMel Gorman * meet. 231bb13ffebSMel Gorman */ 23262997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 233bb13ffebSMel Gorman { 234bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 235108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 236bb13ffebSMel Gorman unsigned long pfn; 237bb13ffebSMel Gorman 23862997027SMel Gorman zone->compact_blockskip_flush = false; 239bb13ffebSMel Gorman 240bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 241bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 242bb13ffebSMel Gorman struct page *page; 243bb13ffebSMel Gorman 244bb13ffebSMel Gorman cond_resched(); 245bb13ffebSMel Gorman 246bb13ffebSMel Gorman if (!pfn_valid(pfn)) 247bb13ffebSMel Gorman continue; 248bb13ffebSMel Gorman 249bb13ffebSMel Gorman page = pfn_to_page(pfn); 250bb13ffebSMel Gorman if (zone != page_zone(page)) 251bb13ffebSMel Gorman continue; 252bb13ffebSMel Gorman 253bb13ffebSMel Gorman clear_pageblock_skip(page); 254bb13ffebSMel Gorman } 25502333641SVlastimil Babka 25602333641SVlastimil Babka reset_cached_positions(zone); 257bb13ffebSMel Gorman } 258bb13ffebSMel Gorman 25962997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 26062997027SMel Gorman { 26162997027SMel Gorman int zoneid; 26262997027SMel Gorman 26362997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 26462997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 26562997027SMel Gorman if (!populated_zone(zone)) 26662997027SMel Gorman continue; 26762997027SMel Gorman 26862997027SMel Gorman /* Only flush if a full compaction finished recently */ 26962997027SMel Gorman if (zone->compact_blockskip_flush) 27062997027SMel Gorman __reset_isolation_suitable(zone); 27162997027SMel Gorman } 27262997027SMel Gorman } 27362997027SMel Gorman 274bb13ffebSMel Gorman /* 275bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 27662997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 277bb13ffebSMel Gorman */ 278c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 279c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 280edc2ca61SVlastimil Babka bool migrate_scanner) 281bb13ffebSMel Gorman { 282c89511abSMel Gorman struct zone *zone = cc->zone; 28335979ef3SDavid Rientjes unsigned long pfn; 2846815bf3fSJoonsoo Kim 2856815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 2866815bf3fSJoonsoo Kim return; 2876815bf3fSJoonsoo Kim 288bb13ffebSMel Gorman if (!page) 289bb13ffebSMel Gorman return; 290bb13ffebSMel Gorman 29135979ef3SDavid Rientjes if (nr_isolated) 29235979ef3SDavid Rientjes return; 29335979ef3SDavid Rientjes 294bb13ffebSMel Gorman set_pageblock_skip(page); 295c89511abSMel Gorman 29635979ef3SDavid Rientjes pfn = page_to_pfn(page); 29735979ef3SDavid Rientjes 29835979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 299c89511abSMel Gorman if (migrate_scanner) { 30035979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 30135979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 302e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 303e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 30435979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 305c89511abSMel Gorman } else { 30635979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 307c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 308c89511abSMel Gorman } 309c89511abSMel Gorman } 310bb13ffebSMel Gorman #else 311bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 312bb13ffebSMel Gorman struct page *page) 313bb13ffebSMel Gorman { 314bb13ffebSMel Gorman return true; 315bb13ffebSMel Gorman } 316bb13ffebSMel Gorman 317c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 318c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 319edc2ca61SVlastimil Babka bool migrate_scanner) 320bb13ffebSMel Gorman { 321bb13ffebSMel Gorman } 322bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 323bb13ffebSMel Gorman 3241f9efdefSVlastimil Babka /* 3258b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 3268b44d279SVlastimil Babka * very heavily contended. For async compaction, back out if the lock cannot 3278b44d279SVlastimil Babka * be taken immediately. For sync compaction, spin on the lock if needed. 3288b44d279SVlastimil Babka * 3298b44d279SVlastimil Babka * Returns true if the lock is held 3308b44d279SVlastimil Babka * Returns false if the lock is not held and compaction should abort 3311f9efdefSVlastimil Babka */ 3328b44d279SVlastimil Babka static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, 3338b44d279SVlastimil Babka struct compact_control *cc) 3348b44d279SVlastimil Babka { 3358b44d279SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3368b44d279SVlastimil Babka if (!spin_trylock_irqsave(lock, *flags)) { 3378b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_LOCK; 3388b44d279SVlastimil Babka return false; 3398b44d279SVlastimil Babka } 3408b44d279SVlastimil Babka } else { 3418b44d279SVlastimil Babka spin_lock_irqsave(lock, *flags); 3428b44d279SVlastimil Babka } 3431f9efdefSVlastimil Babka 3448b44d279SVlastimil Babka return true; 3452a1402aaSMel Gorman } 3462a1402aaSMel Gorman 34785aa125fSMichal Nazarewicz /* 348c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 3498b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 3508b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 3518b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 3528b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 3538b44d279SVlastimil Babka * aborts. Sync compaction schedules. 3548b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 3558b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 356c67fe375SMel Gorman * 3578b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 3588b44d279SVlastimil Babka * async compaction due to need_resched() 3598b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 3608b44d279SVlastimil Babka * scheduled) 361c67fe375SMel Gorman */ 3628b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 3638b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 364c67fe375SMel Gorman { 3658b44d279SVlastimil Babka if (*locked) { 3668b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 3678b44d279SVlastimil Babka *locked = false; 368c67fe375SMel Gorman } 369c67fe375SMel Gorman 3708b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 3718b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 3728b44d279SVlastimil Babka return true; 3738b44d279SVlastimil Babka } 3748b44d279SVlastimil Babka 3758b44d279SVlastimil Babka if (need_resched()) { 376e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 3778b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 3788b44d279SVlastimil Babka return true; 379c67fe375SMel Gorman } 380c67fe375SMel Gorman cond_resched(); 381c67fe375SMel Gorman } 382c67fe375SMel Gorman 3838b44d279SVlastimil Babka return false; 384c67fe375SMel Gorman } 385c67fe375SMel Gorman 386be976572SVlastimil Babka /* 387be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 388be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 3898b44d279SVlastimil Babka * compaction. This is similar to what compact_unlock_should_abort() does, but 390be976572SVlastimil Babka * is used where no lock is concerned. 391be976572SVlastimil Babka * 392be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 393be976572SVlastimil Babka * Returns true when async compaction should abort. 394be976572SVlastimil Babka */ 395be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 396be976572SVlastimil Babka { 397be976572SVlastimil Babka /* async compaction aborts if contended */ 398be976572SVlastimil Babka if (need_resched()) { 399be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 4001f9efdefSVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 401be976572SVlastimil Babka return true; 402be976572SVlastimil Babka } 403be976572SVlastimil Babka 404be976572SVlastimil Babka cond_resched(); 405be976572SVlastimil Babka } 406be976572SVlastimil Babka 407be976572SVlastimil Babka return false; 408be976572SVlastimil Babka } 409be976572SVlastimil Babka 410c67fe375SMel Gorman /* 4119e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 4129e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 4139e4be470SJerome Marchand * (even though it may still end up isolating some pages). 41485aa125fSMichal Nazarewicz */ 415f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 416e14c720eSVlastimil Babka unsigned long *start_pfn, 41785aa125fSMichal Nazarewicz unsigned long end_pfn, 41885aa125fSMichal Nazarewicz struct list_head *freelist, 41985aa125fSMichal Nazarewicz bool strict) 420748446bbSMel Gorman { 421b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 422bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 423b8b2d825SXiubo Li unsigned long flags = 0; 424f40d1e42SMel Gorman bool locked = false; 425e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 426*66c64223SJoonsoo Kim unsigned int order; 427748446bbSMel Gorman 428748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 429748446bbSMel Gorman 430f40d1e42SMel Gorman /* Isolate free pages. */ 431748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 432*66c64223SJoonsoo Kim int isolated; 433748446bbSMel Gorman struct page *page = cursor; 434748446bbSMel Gorman 4358b44d279SVlastimil Babka /* 4368b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 4378b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 4388b44d279SVlastimil Babka * pending or async compaction detects need_resched() 4398b44d279SVlastimil Babka */ 4408b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 4418b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 4428b44d279SVlastimil Babka &locked, cc)) 4438b44d279SVlastimil Babka break; 4448b44d279SVlastimil Babka 445b7aba698SMel Gorman nr_scanned++; 446f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 4472af120bcSLaura Abbott goto isolate_fail; 4482af120bcSLaura Abbott 449bb13ffebSMel Gorman if (!valid_page) 450bb13ffebSMel Gorman valid_page = page; 4519fcd6d2eSVlastimil Babka 4529fcd6d2eSVlastimil Babka /* 4539fcd6d2eSVlastimil Babka * For compound pages such as THP and hugetlbfs, we can save 4549fcd6d2eSVlastimil Babka * potentially a lot of iterations if we skip them at once. 4559fcd6d2eSVlastimil Babka * The check is racy, but we can consider only valid values 4569fcd6d2eSVlastimil Babka * and the only danger is skipping too much. 4579fcd6d2eSVlastimil Babka */ 4589fcd6d2eSVlastimil Babka if (PageCompound(page)) { 4599fcd6d2eSVlastimil Babka unsigned int comp_order = compound_order(page); 4609fcd6d2eSVlastimil Babka 4619fcd6d2eSVlastimil Babka if (likely(comp_order < MAX_ORDER)) { 4629fcd6d2eSVlastimil Babka blockpfn += (1UL << comp_order) - 1; 4639fcd6d2eSVlastimil Babka cursor += (1UL << comp_order) - 1; 4649fcd6d2eSVlastimil Babka } 4659fcd6d2eSVlastimil Babka 4669fcd6d2eSVlastimil Babka goto isolate_fail; 4679fcd6d2eSVlastimil Babka } 4689fcd6d2eSVlastimil Babka 469f40d1e42SMel Gorman if (!PageBuddy(page)) 4702af120bcSLaura Abbott goto isolate_fail; 471f40d1e42SMel Gorman 472f40d1e42SMel Gorman /* 47369b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 47469b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 47569b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 47669b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 47769b7189fSVlastimil Babka * recheck as well. 47869b7189fSVlastimil Babka */ 47969b7189fSVlastimil Babka if (!locked) { 48069b7189fSVlastimil Babka /* 481f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 482f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 483f40d1e42SMel Gorman * heavily contended if there are parallel allocations 484f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 485f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 486f40d1e42SMel Gorman * possible. 487f40d1e42SMel Gorman */ 4888b44d279SVlastimil Babka locked = compact_trylock_irqsave(&cc->zone->lock, 4898b44d279SVlastimil Babka &flags, cc); 490f40d1e42SMel Gorman if (!locked) 491f40d1e42SMel Gorman break; 492f40d1e42SMel Gorman 493f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 494f40d1e42SMel Gorman if (!PageBuddy(page)) 4952af120bcSLaura Abbott goto isolate_fail; 49669b7189fSVlastimil Babka } 497748446bbSMel Gorman 498*66c64223SJoonsoo Kim /* Found a free page, will break it into order-0 pages */ 499*66c64223SJoonsoo Kim order = page_order(page); 500*66c64223SJoonsoo Kim isolated = __isolate_free_page(page, order); 501a4f04f2cSDavid Rientjes if (!isolated) 502a4f04f2cSDavid Rientjes break; 503*66c64223SJoonsoo Kim set_page_private(page, order); 504a4f04f2cSDavid Rientjes 505748446bbSMel Gorman total_isolated += isolated; 506a4f04f2cSDavid Rientjes cc->nr_freepages += isolated; 507*66c64223SJoonsoo Kim list_add_tail(&page->lru, freelist); 508*66c64223SJoonsoo Kim 509a4f04f2cSDavid Rientjes if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 510932ff6bbSJoonsoo Kim blockpfn += isolated; 511932ff6bbSJoonsoo Kim break; 512932ff6bbSJoonsoo Kim } 513a4f04f2cSDavid Rientjes /* Advance to the end of split page */ 514748446bbSMel Gorman blockpfn += isolated - 1; 515748446bbSMel Gorman cursor += isolated - 1; 5162af120bcSLaura Abbott continue; 5172af120bcSLaura Abbott 5182af120bcSLaura Abbott isolate_fail: 5192af120bcSLaura Abbott if (strict) 5202af120bcSLaura Abbott break; 5212af120bcSLaura Abbott else 5222af120bcSLaura Abbott continue; 5232af120bcSLaura Abbott 524748446bbSMel Gorman } 525748446bbSMel Gorman 526a4f04f2cSDavid Rientjes if (locked) 527a4f04f2cSDavid Rientjes spin_unlock_irqrestore(&cc->zone->lock, flags); 528a4f04f2cSDavid Rientjes 5299fcd6d2eSVlastimil Babka /* 5309fcd6d2eSVlastimil Babka * There is a tiny chance that we have read bogus compound_order(), 5319fcd6d2eSVlastimil Babka * so be careful to not go outside of the pageblock. 5329fcd6d2eSVlastimil Babka */ 5339fcd6d2eSVlastimil Babka if (unlikely(blockpfn > end_pfn)) 5349fcd6d2eSVlastimil Babka blockpfn = end_pfn; 5359fcd6d2eSVlastimil Babka 536e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 537e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 538e34d85f0SJoonsoo Kim 539e14c720eSVlastimil Babka /* Record how far we have got within the block */ 540e14c720eSVlastimil Babka *start_pfn = blockpfn; 541e14c720eSVlastimil Babka 542f40d1e42SMel Gorman /* 543f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 544f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 545f40d1e42SMel Gorman * returned and CMA will fail. 546f40d1e42SMel Gorman */ 5472af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 548f40d1e42SMel Gorman total_isolated = 0; 549f40d1e42SMel Gorman 550bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 551bb13ffebSMel Gorman if (blockpfn == end_pfn) 552edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 553bb13ffebSMel Gorman 554010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 555397487dbSMel Gorman if (total_isolated) 556010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 557748446bbSMel Gorman return total_isolated; 558748446bbSMel Gorman } 559748446bbSMel Gorman 56085aa125fSMichal Nazarewicz /** 56185aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 56285aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 56385aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 56485aa125fSMichal Nazarewicz * 56585aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 56685aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 56785aa125fSMichal Nazarewicz * undo its actions and return zero. 56885aa125fSMichal Nazarewicz * 56985aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 57085aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 57185aa125fSMichal Nazarewicz * a free page). 57285aa125fSMichal Nazarewicz */ 573ff9543fdSMichal Nazarewicz unsigned long 574bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 575bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 57685aa125fSMichal Nazarewicz { 577e1409c32SJoonsoo Kim unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 57885aa125fSMichal Nazarewicz LIST_HEAD(freelist); 57985aa125fSMichal Nazarewicz 5807d49d886SVlastimil Babka pfn = start_pfn; 58106b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 582e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 583e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 58406b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 5857d49d886SVlastimil Babka 5867d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 587e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 5887d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 589e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 590e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 5917d49d886SVlastimil Babka 59285aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 59385aa125fSMichal Nazarewicz 59458420016SJoonsoo Kim /* 59558420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 59658420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 59758420016SJoonsoo Kim * scanning range to right one. 59858420016SJoonsoo Kim */ 59958420016SJoonsoo Kim if (pfn >= block_end_pfn) { 60006b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 60106b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 60258420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 60358420016SJoonsoo Kim } 60458420016SJoonsoo Kim 605e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 606e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 6077d49d886SVlastimil Babka break; 6087d49d886SVlastimil Babka 609e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 610e14c720eSVlastimil Babka block_end_pfn, &freelist, true); 61185aa125fSMichal Nazarewicz 61285aa125fSMichal Nazarewicz /* 61385aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 61485aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 61585aa125fSMichal Nazarewicz * non-free pages). 61685aa125fSMichal Nazarewicz */ 61785aa125fSMichal Nazarewicz if (!isolated) 61885aa125fSMichal Nazarewicz break; 61985aa125fSMichal Nazarewicz 62085aa125fSMichal Nazarewicz /* 62185aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 62285aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 62385aa125fSMichal Nazarewicz * page may span two pageblocks). 62485aa125fSMichal Nazarewicz */ 62585aa125fSMichal Nazarewicz } 62685aa125fSMichal Nazarewicz 627*66c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 62885aa125fSMichal Nazarewicz map_pages(&freelist); 62985aa125fSMichal Nazarewicz 63085aa125fSMichal Nazarewicz if (pfn < end_pfn) { 63185aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 63285aa125fSMichal Nazarewicz release_freepages(&freelist); 63385aa125fSMichal Nazarewicz return 0; 63485aa125fSMichal Nazarewicz } 63585aa125fSMichal Nazarewicz 63685aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 63785aa125fSMichal Nazarewicz return pfn; 63885aa125fSMichal Nazarewicz } 63985aa125fSMichal Nazarewicz 640748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 641edc2ca61SVlastimil Babka static void acct_isolated(struct zone *zone, struct compact_control *cc) 642748446bbSMel Gorman { 643748446bbSMel Gorman struct page *page; 644b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 645748446bbSMel Gorman 646edc2ca61SVlastimil Babka if (list_empty(&cc->migratepages)) 647edc2ca61SVlastimil Babka return; 648edc2ca61SVlastimil Babka 649b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 650b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 651748446bbSMel Gorman 652c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 653c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 654c67fe375SMel Gorman } 655748446bbSMel Gorman 656748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 657748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 658748446bbSMel Gorman { 659bc693045SMinchan Kim unsigned long active, inactive, isolated; 660748446bbSMel Gorman 661748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 662748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 663bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 664bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 665748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 666748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 667748446bbSMel Gorman 668bc693045SMinchan Kim return isolated > (inactive + active) / 2; 669748446bbSMel Gorman } 670748446bbSMel Gorman 6712fe86e00SMichal Nazarewicz /** 672edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 673edc2ca61SVlastimil Babka * a single pageblock 6742fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 675edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 676edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 677edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 6782fe86e00SMichal Nazarewicz * 6792fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 680edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 681edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 682edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 683edc2ca61SVlastimil Babka * than end_pfn). 6842fe86e00SMichal Nazarewicz * 685edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 686edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 687edc2ca61SVlastimil Babka * is neither read nor updated. 688748446bbSMel Gorman */ 689edc2ca61SVlastimil Babka static unsigned long 690edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 691edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 692748446bbSMel Gorman { 693edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 694b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 695fa9add64SHugh Dickins struct lruvec *lruvec; 696b8b2d825SXiubo Li unsigned long flags = 0; 6972a1402aaSMel Gorman bool locked = false; 698bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 699e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 700fdd048e1SVlastimil Babka bool skip_on_failure = false; 701fdd048e1SVlastimil Babka unsigned long next_skip_pfn = 0; 702748446bbSMel Gorman 703748446bbSMel Gorman /* 704748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 705748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 706748446bbSMel Gorman * delay for some time until fewer pages are isolated 707748446bbSMel Gorman */ 708748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 709f9e35b3bSMel Gorman /* async migration should just abort */ 710e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 7112fe86e00SMichal Nazarewicz return 0; 712f9e35b3bSMel Gorman 713748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 714748446bbSMel Gorman 715748446bbSMel Gorman if (fatal_signal_pending(current)) 7162fe86e00SMichal Nazarewicz return 0; 717748446bbSMel Gorman } 718748446bbSMel Gorman 719be976572SVlastimil Babka if (compact_should_abort(cc)) 720aeef4b83SDavid Rientjes return 0; 721aeef4b83SDavid Rientjes 722fdd048e1SVlastimil Babka if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 723fdd048e1SVlastimil Babka skip_on_failure = true; 724fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 725fdd048e1SVlastimil Babka } 726fdd048e1SVlastimil Babka 727748446bbSMel Gorman /* Time to isolate some pages for migration */ 728748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 72929c0dde8SVlastimil Babka 730fdd048e1SVlastimil Babka if (skip_on_failure && low_pfn >= next_skip_pfn) { 731fdd048e1SVlastimil Babka /* 732fdd048e1SVlastimil Babka * We have isolated all migration candidates in the 733fdd048e1SVlastimil Babka * previous order-aligned block, and did not skip it due 734fdd048e1SVlastimil Babka * to failure. We should migrate the pages now and 735fdd048e1SVlastimil Babka * hopefully succeed compaction. 736fdd048e1SVlastimil Babka */ 737fdd048e1SVlastimil Babka if (nr_isolated) 738fdd048e1SVlastimil Babka break; 739fdd048e1SVlastimil Babka 740fdd048e1SVlastimil Babka /* 741fdd048e1SVlastimil Babka * We failed to isolate in the previous order-aligned 742fdd048e1SVlastimil Babka * block. Set the new boundary to the end of the 743fdd048e1SVlastimil Babka * current block. Note we can't simply increase 744fdd048e1SVlastimil Babka * next_skip_pfn by 1 << order, as low_pfn might have 745fdd048e1SVlastimil Babka * been incremented by a higher number due to skipping 746fdd048e1SVlastimil Babka * a compound or a high-order buddy page in the 747fdd048e1SVlastimil Babka * previous loop iteration. 748fdd048e1SVlastimil Babka */ 749fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 750fdd048e1SVlastimil Babka } 751fdd048e1SVlastimil Babka 7528b44d279SVlastimil Babka /* 7538b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 7548b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort async compaction 7558b44d279SVlastimil Babka * if contended. 7568b44d279SVlastimil Babka */ 7578b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 7588b44d279SVlastimil Babka && compact_unlock_should_abort(&zone->lru_lock, flags, 7598b44d279SVlastimil Babka &locked, cc)) 7608b44d279SVlastimil Babka break; 761b2eef8c0SAndrea Arcangeli 762748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 763fdd048e1SVlastimil Babka goto isolate_fail; 764b7aba698SMel Gorman nr_scanned++; 765748446bbSMel Gorman 766748446bbSMel Gorman page = pfn_to_page(low_pfn); 767dc908600SMel Gorman 768bb13ffebSMel Gorman if (!valid_page) 769bb13ffebSMel Gorman valid_page = page; 770bb13ffebSMel Gorman 771c122b208SJoonsoo Kim /* 77299c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 77399c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 77499c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 77599c0fd5eSVlastimil Babka * potential isolation targets. 7766c14466cSMel Gorman */ 77799c0fd5eSVlastimil Babka if (PageBuddy(page)) { 77899c0fd5eSVlastimil Babka unsigned long freepage_order = page_order_unsafe(page); 77999c0fd5eSVlastimil Babka 78099c0fd5eSVlastimil Babka /* 78199c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 78299c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 78399c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 78499c0fd5eSVlastimil Babka */ 78599c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 78699c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 787748446bbSMel Gorman continue; 78899c0fd5eSVlastimil Babka } 789748446bbSMel Gorman 7909927af74SMel Gorman /* 79129c0dde8SVlastimil Babka * Regardless of being on LRU, compound pages such as THP and 79229c0dde8SVlastimil Babka * hugetlbfs are not to be compacted. We can potentially save 79329c0dde8SVlastimil Babka * a lot of iterations if we skip them at once. The check is 79429c0dde8SVlastimil Babka * racy, but we can consider only valid values and the only 79529c0dde8SVlastimil Babka * danger is skipping too much. 796bc835011SAndrea Arcangeli */ 79729c0dde8SVlastimil Babka if (PageCompound(page)) { 79829c0dde8SVlastimil Babka unsigned int comp_order = compound_order(page); 79929c0dde8SVlastimil Babka 80029c0dde8SVlastimil Babka if (likely(comp_order < MAX_ORDER)) 80129c0dde8SVlastimil Babka low_pfn += (1UL << comp_order) - 1; 802edc2ca61SVlastimil Babka 803fdd048e1SVlastimil Babka goto isolate_fail; 8042a1402aaSMel Gorman } 8052a1402aaSMel Gorman 806bda807d4SMinchan Kim /* 807bda807d4SMinchan Kim * Check may be lockless but that's ok as we recheck later. 808bda807d4SMinchan Kim * It's possible to migrate LRU and non-lru movable pages. 809bda807d4SMinchan Kim * Skip any other type of page 810bda807d4SMinchan Kim */ 811bda807d4SMinchan Kim if (!PageLRU(page)) { 812bda807d4SMinchan Kim /* 813bda807d4SMinchan Kim * __PageMovable can return false positive so we need 814bda807d4SMinchan Kim * to verify it under page_lock. 815bda807d4SMinchan Kim */ 816bda807d4SMinchan Kim if (unlikely(__PageMovable(page)) && 817bda807d4SMinchan Kim !PageIsolated(page)) { 818bda807d4SMinchan Kim if (locked) { 819bda807d4SMinchan Kim spin_unlock_irqrestore(&zone->lru_lock, 820bda807d4SMinchan Kim flags); 821bda807d4SMinchan Kim locked = false; 822bda807d4SMinchan Kim } 823bda807d4SMinchan Kim 824bda807d4SMinchan Kim if (isolate_movable_page(page, isolate_mode)) 825bda807d4SMinchan Kim goto isolate_success; 826bda807d4SMinchan Kim } 827bda807d4SMinchan Kim 828fdd048e1SVlastimil Babka goto isolate_fail; 829bda807d4SMinchan Kim } 83029c0dde8SVlastimil Babka 831119d6d59SDavid Rientjes /* 832119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 833119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 834119d6d59SDavid Rientjes * admittedly racy check. 835119d6d59SDavid Rientjes */ 836119d6d59SDavid Rientjes if (!page_mapping(page) && 837119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 838fdd048e1SVlastimil Babka goto isolate_fail; 839119d6d59SDavid Rientjes 84069b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 84169b7189fSVlastimil Babka if (!locked) { 8428b44d279SVlastimil Babka locked = compact_trylock_irqsave(&zone->lru_lock, 8438b44d279SVlastimil Babka &flags, cc); 8448b44d279SVlastimil Babka if (!locked) 8452a1402aaSMel Gorman break; 8462a1402aaSMel Gorman 84729c0dde8SVlastimil Babka /* Recheck PageLRU and PageCompound under lock */ 8482a1402aaSMel Gorman if (!PageLRU(page)) 849fdd048e1SVlastimil Babka goto isolate_fail; 85029c0dde8SVlastimil Babka 85129c0dde8SVlastimil Babka /* 85229c0dde8SVlastimil Babka * Page become compound since the non-locked check, 85329c0dde8SVlastimil Babka * and it's on LRU. It can only be a THP so the order 85429c0dde8SVlastimil Babka * is safe to read and it's 0 for tail pages. 85529c0dde8SVlastimil Babka */ 85629c0dde8SVlastimil Babka if (unlikely(PageCompound(page))) { 85729c0dde8SVlastimil Babka low_pfn += (1UL << compound_order(page)) - 1; 858fdd048e1SVlastimil Babka goto isolate_fail; 859bc835011SAndrea Arcangeli } 86069b7189fSVlastimil Babka } 861bc835011SAndrea Arcangeli 862fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 863fa9add64SHugh Dickins 864748446bbSMel Gorman /* Try isolate the page */ 865edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 866fdd048e1SVlastimil Babka goto isolate_fail; 867748446bbSMel Gorman 86829c0dde8SVlastimil Babka VM_BUG_ON_PAGE(PageCompound(page), page); 869bc835011SAndrea Arcangeli 870748446bbSMel Gorman /* Successfully isolated */ 871fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 872b6c75016SJoonsoo Kim 873b6c75016SJoonsoo Kim isolate_success: 874fdd048e1SVlastimil Babka list_add(&page->lru, &cc->migratepages); 875748446bbSMel Gorman cc->nr_migratepages++; 876b7aba698SMel Gorman nr_isolated++; 877748446bbSMel Gorman 878a34753d2SVlastimil Babka /* 879a34753d2SVlastimil Babka * Record where we could have freed pages by migration and not 880a34753d2SVlastimil Babka * yet flushed them to buddy allocator. 881a34753d2SVlastimil Babka * - this is the lowest page that was isolated and likely be 882a34753d2SVlastimil Babka * then freed by migration. 883a34753d2SVlastimil Babka */ 884a34753d2SVlastimil Babka if (!cc->last_migrated_pfn) 885a34753d2SVlastimil Babka cc->last_migrated_pfn = low_pfn; 886a34753d2SVlastimil Babka 887748446bbSMel Gorman /* Avoid isolating too much */ 88831b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 88931b8384aSHillf Danton ++low_pfn; 890748446bbSMel Gorman break; 891748446bbSMel Gorman } 892fdd048e1SVlastimil Babka 893fdd048e1SVlastimil Babka continue; 894fdd048e1SVlastimil Babka isolate_fail: 895fdd048e1SVlastimil Babka if (!skip_on_failure) 896fdd048e1SVlastimil Babka continue; 897fdd048e1SVlastimil Babka 898fdd048e1SVlastimil Babka /* 899fdd048e1SVlastimil Babka * We have isolated some pages, but then failed. Release them 900fdd048e1SVlastimil Babka * instead of migrating, as we cannot form the cc->order buddy 901fdd048e1SVlastimil Babka * page anyway. 902fdd048e1SVlastimil Babka */ 903fdd048e1SVlastimil Babka if (nr_isolated) { 904fdd048e1SVlastimil Babka if (locked) { 905fdd048e1SVlastimil Babka spin_unlock_irqrestore(&zone->lru_lock, flags); 906fdd048e1SVlastimil Babka locked = false; 907fdd048e1SVlastimil Babka } 908fdd048e1SVlastimil Babka acct_isolated(zone, cc); 909fdd048e1SVlastimil Babka putback_movable_pages(&cc->migratepages); 910fdd048e1SVlastimil Babka cc->nr_migratepages = 0; 911fdd048e1SVlastimil Babka cc->last_migrated_pfn = 0; 912fdd048e1SVlastimil Babka nr_isolated = 0; 913fdd048e1SVlastimil Babka } 914fdd048e1SVlastimil Babka 915fdd048e1SVlastimil Babka if (low_pfn < next_skip_pfn) { 916fdd048e1SVlastimil Babka low_pfn = next_skip_pfn - 1; 917fdd048e1SVlastimil Babka /* 918fdd048e1SVlastimil Babka * The check near the loop beginning would have updated 919fdd048e1SVlastimil Babka * next_skip_pfn too, but this is a bit simpler. 920fdd048e1SVlastimil Babka */ 921fdd048e1SVlastimil Babka next_skip_pfn += 1UL << cc->order; 922fdd048e1SVlastimil Babka } 92331b8384aSHillf Danton } 924748446bbSMel Gorman 92599c0fd5eSVlastimil Babka /* 92699c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 92799c0fd5eSVlastimil Babka * the range to be scanned. 92899c0fd5eSVlastimil Babka */ 92999c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 93099c0fd5eSVlastimil Babka low_pfn = end_pfn; 93199c0fd5eSVlastimil Babka 932c67fe375SMel Gorman if (locked) 933c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 934748446bbSMel Gorman 93550b5b094SVlastimil Babka /* 93650b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 93750b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 93850b5b094SVlastimil Babka */ 93935979ef3SDavid Rientjes if (low_pfn == end_pfn) 940edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 941bb13ffebSMel Gorman 942e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 943e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 944b7aba698SMel Gorman 945010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 946397487dbSMel Gorman if (nr_isolated) 947010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 948397487dbSMel Gorman 9492fe86e00SMichal Nazarewicz return low_pfn; 9502fe86e00SMichal Nazarewicz } 9512fe86e00SMichal Nazarewicz 952edc2ca61SVlastimil Babka /** 953edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 954edc2ca61SVlastimil Babka * @cc: Compaction control structure. 955edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 956edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 957edc2ca61SVlastimil Babka * 958edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 959edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 960edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 961edc2ca61SVlastimil Babka */ 962edc2ca61SVlastimil Babka unsigned long 963edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 964edc2ca61SVlastimil Babka unsigned long end_pfn) 965edc2ca61SVlastimil Babka { 966e1409c32SJoonsoo Kim unsigned long pfn, block_start_pfn, block_end_pfn; 967edc2ca61SVlastimil Babka 968edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 969edc2ca61SVlastimil Babka pfn = start_pfn; 97006b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 971e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 972e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 97306b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 974edc2ca61SVlastimil Babka 975edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 976e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 977edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 978edc2ca61SVlastimil Babka 979edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 980edc2ca61SVlastimil Babka 981e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 982e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 983edc2ca61SVlastimil Babka continue; 984edc2ca61SVlastimil Babka 985edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 986edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 987edc2ca61SVlastimil Babka 98814af4a5eSHugh Dickins if (!pfn) 989edc2ca61SVlastimil Babka break; 9906ea41c0cSJoonsoo Kim 9916ea41c0cSJoonsoo Kim if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 9926ea41c0cSJoonsoo Kim break; 993edc2ca61SVlastimil Babka } 994edc2ca61SVlastimil Babka acct_isolated(cc->zone, cc); 995edc2ca61SVlastimil Babka 996edc2ca61SVlastimil Babka return pfn; 997edc2ca61SVlastimil Babka } 998edc2ca61SVlastimil Babka 999ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1000ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 1001018e9a49SAndrew Morton 1002018e9a49SAndrew Morton /* Returns true if the page is within a block suitable for migration to */ 1003018e9a49SAndrew Morton static bool suitable_migration_target(struct page *page) 1004018e9a49SAndrew Morton { 1005018e9a49SAndrew Morton /* If the page is a large free page, then disallow migration */ 1006018e9a49SAndrew Morton if (PageBuddy(page)) { 1007018e9a49SAndrew Morton /* 1008018e9a49SAndrew Morton * We are checking page_order without zone->lock taken. But 1009018e9a49SAndrew Morton * the only small danger is that we skip a potentially suitable 1010018e9a49SAndrew Morton * pageblock, so it's not worth to check order for valid range. 1011018e9a49SAndrew Morton */ 1012018e9a49SAndrew Morton if (page_order_unsafe(page) >= pageblock_order) 1013018e9a49SAndrew Morton return false; 1014018e9a49SAndrew Morton } 1015018e9a49SAndrew Morton 1016018e9a49SAndrew Morton /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1017018e9a49SAndrew Morton if (migrate_async_suitable(get_pageblock_migratetype(page))) 1018018e9a49SAndrew Morton return true; 1019018e9a49SAndrew Morton 1020018e9a49SAndrew Morton /* Otherwise skip the block */ 1021018e9a49SAndrew Morton return false; 1022018e9a49SAndrew Morton } 1023018e9a49SAndrew Morton 1024ff9543fdSMichal Nazarewicz /* 1025f2849aa0SVlastimil Babka * Test whether the free scanner has reached the same or lower pageblock than 1026f2849aa0SVlastimil Babka * the migration scanner, and compaction should thus terminate. 1027f2849aa0SVlastimil Babka */ 1028f2849aa0SVlastimil Babka static inline bool compact_scanners_met(struct compact_control *cc) 1029f2849aa0SVlastimil Babka { 1030f2849aa0SVlastimil Babka return (cc->free_pfn >> pageblock_order) 1031f2849aa0SVlastimil Babka <= (cc->migrate_pfn >> pageblock_order); 1032f2849aa0SVlastimil Babka } 1033f2849aa0SVlastimil Babka 1034f2849aa0SVlastimil Babka /* 1035ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 1036ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 1037ff9543fdSMichal Nazarewicz */ 1038edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 1039ff9543fdSMichal Nazarewicz { 1040edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 1041ff9543fdSMichal Nazarewicz struct page *page; 1042c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 1043e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 1044c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 1045c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1046ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 10472fe86e00SMichal Nazarewicz 1048ff9543fdSMichal Nazarewicz /* 1049ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 105049e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 1051e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 1052e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 1053c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 1054c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 1055c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 105649e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 105749e068f0SVlastimil Babka * is using. 1058ff9543fdSMichal Nazarewicz */ 1059e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 106006b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(cc->free_pfn); 1061c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1062c96b9e50SVlastimil Babka zone_end_pfn(zone)); 106306b6640aSVlastimil Babka low_pfn = pageblock_end_pfn(cc->migrate_pfn); 10642fe86e00SMichal Nazarewicz 1065ff9543fdSMichal Nazarewicz /* 1066ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 1067ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 1068ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 1069ff9543fdSMichal Nazarewicz */ 1070f5f61a32SVlastimil Babka for (; block_start_pfn >= low_pfn; 1071c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 1072e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 1073e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 1074f6ea3adbSDavid Rientjes /* 1075f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 1076f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 1077be976572SVlastimil Babka * to schedule, or even abort async compaction. 1078f6ea3adbSDavid Rientjes */ 1079be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1080be976572SVlastimil Babka && compact_should_abort(cc)) 1081be976572SVlastimil Babka break; 1082f6ea3adbSDavid Rientjes 10837d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 10847d49d886SVlastimil Babka zone); 10857d49d886SVlastimil Babka if (!page) 1086ff9543fdSMichal Nazarewicz continue; 1087ff9543fdSMichal Nazarewicz 1088ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 108968e3e926SLinus Torvalds if (!suitable_migration_target(page)) 1090ff9543fdSMichal Nazarewicz continue; 109168e3e926SLinus Torvalds 1092bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 1093bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 1094bb13ffebSMel Gorman continue; 1095bb13ffebSMel Gorman 1096e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 1097a46cbf3bSDavid Rientjes isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, 1098a46cbf3bSDavid Rientjes freelist, false); 1099ff9543fdSMichal Nazarewicz 1100ff9543fdSMichal Nazarewicz /* 1101a46cbf3bSDavid Rientjes * If we isolated enough freepages, or aborted due to lock 1102a46cbf3bSDavid Rientjes * contention, terminate. 1103e14c720eSVlastimil Babka */ 1104f5f61a32SVlastimil Babka if ((cc->nr_freepages >= cc->nr_migratepages) 1105f5f61a32SVlastimil Babka || cc->contended) { 1106a46cbf3bSDavid Rientjes if (isolate_start_pfn >= block_end_pfn) { 1107a46cbf3bSDavid Rientjes /* 1108a46cbf3bSDavid Rientjes * Restart at previous pageblock if more 1109a46cbf3bSDavid Rientjes * freepages can be isolated next time. 1110a46cbf3bSDavid Rientjes */ 1111f5f61a32SVlastimil Babka isolate_start_pfn = 1112e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 1113a46cbf3bSDavid Rientjes } 1114be976572SVlastimil Babka break; 1115a46cbf3bSDavid Rientjes } else if (isolate_start_pfn < block_end_pfn) { 1116f5f61a32SVlastimil Babka /* 1117a46cbf3bSDavid Rientjes * If isolation failed early, do not continue 1118a46cbf3bSDavid Rientjes * needlessly. 1119f5f61a32SVlastimil Babka */ 1120a46cbf3bSDavid Rientjes break; 1121f5f61a32SVlastimil Babka } 1122c89511abSMel Gorman } 1123ff9543fdSMichal Nazarewicz 1124*66c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 1125ff9543fdSMichal Nazarewicz map_pages(freelist); 1126ff9543fdSMichal Nazarewicz 11277ed695e0SVlastimil Babka /* 1128f5f61a32SVlastimil Babka * Record where the free scanner will restart next time. Either we 1129f5f61a32SVlastimil Babka * broke from the loop and set isolate_start_pfn based on the last 1130f5f61a32SVlastimil Babka * call to isolate_freepages_block(), or we met the migration scanner 1131f5f61a32SVlastimil Babka * and the loop terminated due to isolate_start_pfn < low_pfn 11327ed695e0SVlastimil Babka */ 1133f5f61a32SVlastimil Babka cc->free_pfn = isolate_start_pfn; 1134748446bbSMel Gorman } 1135748446bbSMel Gorman 1136748446bbSMel Gorman /* 1137748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1138748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1139748446bbSMel Gorman */ 1140748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 1141748446bbSMel Gorman unsigned long data, 1142748446bbSMel Gorman int **result) 1143748446bbSMel Gorman { 1144748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 1145748446bbSMel Gorman struct page *freepage; 1146748446bbSMel Gorman 1147be976572SVlastimil Babka /* 1148be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 1149be976572SVlastimil Babka * contention. 1150be976572SVlastimil Babka */ 1151748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1152be976572SVlastimil Babka if (!cc->contended) 1153edc2ca61SVlastimil Babka isolate_freepages(cc); 1154748446bbSMel Gorman 1155748446bbSMel Gorman if (list_empty(&cc->freepages)) 1156748446bbSMel Gorman return NULL; 1157748446bbSMel Gorman } 1158748446bbSMel Gorman 1159748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 1160748446bbSMel Gorman list_del(&freepage->lru); 1161748446bbSMel Gorman cc->nr_freepages--; 1162748446bbSMel Gorman 1163748446bbSMel Gorman return freepage; 1164748446bbSMel Gorman } 1165748446bbSMel Gorman 1166748446bbSMel Gorman /* 1167d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1168d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1169d53aea3dSDavid Rientjes * special handling needed for NUMA. 1170d53aea3dSDavid Rientjes */ 1171d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 1172d53aea3dSDavid Rientjes { 1173d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1174d53aea3dSDavid Rientjes 1175d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 1176d53aea3dSDavid Rientjes cc->nr_freepages++; 1177d53aea3dSDavid Rientjes } 1178d53aea3dSDavid Rientjes 1179ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1180ff9543fdSMichal Nazarewicz typedef enum { 1181ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1182ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1183ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1184ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1185ff9543fdSMichal Nazarewicz 1186ff9543fdSMichal Nazarewicz /* 11875bbe3547SEric B Munson * Allow userspace to control policy on scanning the unevictable LRU for 11885bbe3547SEric B Munson * compactable pages. 11895bbe3547SEric B Munson */ 11905bbe3547SEric B Munson int sysctl_compact_unevictable_allowed __read_mostly = 1; 11915bbe3547SEric B Munson 11925bbe3547SEric B Munson /* 1193edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1194edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1195edc2ca61SVlastimil Babka * compact_control. 1196ff9543fdSMichal Nazarewicz */ 1197ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 1198ff9543fdSMichal Nazarewicz struct compact_control *cc) 1199ff9543fdSMichal Nazarewicz { 1200e1409c32SJoonsoo Kim unsigned long block_start_pfn; 1201e1409c32SJoonsoo Kim unsigned long block_end_pfn; 1202e1409c32SJoonsoo Kim unsigned long low_pfn; 1203edc2ca61SVlastimil Babka struct page *page; 1204edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 12055bbe3547SEric B Munson (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 1206edc2ca61SVlastimil Babka (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); 1207ff9543fdSMichal Nazarewicz 1208edc2ca61SVlastimil Babka /* 1209edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 1210edc2ca61SVlastimil Babka * initialized by compact_zone() 1211edc2ca61SVlastimil Babka */ 1212edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 121306b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(low_pfn); 1214e1409c32SJoonsoo Kim if (block_start_pfn < zone->zone_start_pfn) 1215e1409c32SJoonsoo Kim block_start_pfn = zone->zone_start_pfn; 1216ff9543fdSMichal Nazarewicz 1217ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 121806b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(low_pfn); 1219ff9543fdSMichal Nazarewicz 1220edc2ca61SVlastimil Babka /* 1221edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 1222edc2ca61SVlastimil Babka * Do not cross the free scanner. 1223edc2ca61SVlastimil Babka */ 1224e1409c32SJoonsoo Kim for (; block_end_pfn <= cc->free_pfn; 1225e1409c32SJoonsoo Kim low_pfn = block_end_pfn, 1226e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1227e1409c32SJoonsoo Kim block_end_pfn += pageblock_nr_pages) { 1228edc2ca61SVlastimil Babka 1229edc2ca61SVlastimil Babka /* 1230edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1231edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1232edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 1233edc2ca61SVlastimil Babka */ 1234edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1235edc2ca61SVlastimil Babka && compact_should_abort(cc)) 1236edc2ca61SVlastimil Babka break; 1237edc2ca61SVlastimil Babka 1238e1409c32SJoonsoo Kim page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1239e1409c32SJoonsoo Kim zone); 12407d49d886SVlastimil Babka if (!page) 1241edc2ca61SVlastimil Babka continue; 1242edc2ca61SVlastimil Babka 1243edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 1244edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 1245edc2ca61SVlastimil Babka continue; 1246edc2ca61SVlastimil Babka 1247edc2ca61SVlastimil Babka /* 1248edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 1249edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 1250edc2ca61SVlastimil Babka * of work satisfies the allocation. 1251edc2ca61SVlastimil Babka */ 1252edc2ca61SVlastimil Babka if (cc->mode == MIGRATE_ASYNC && 1253edc2ca61SVlastimil Babka !migrate_async_suitable(get_pageblock_migratetype(page))) 1254edc2ca61SVlastimil Babka continue; 1255ff9543fdSMichal Nazarewicz 1256ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1257e1409c32SJoonsoo Kim low_pfn = isolate_migratepages_block(cc, low_pfn, 1258e1409c32SJoonsoo Kim block_end_pfn, isolate_mode); 1259edc2ca61SVlastimil Babka 1260ff59909aSHugh Dickins if (!low_pfn || cc->contended) { 1261ff59909aSHugh Dickins acct_isolated(zone, cc); 1262ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1263ff59909aSHugh Dickins } 1264ff9543fdSMichal Nazarewicz 1265edc2ca61SVlastimil Babka /* 1266edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1267edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1268edc2ca61SVlastimil Babka * continue or not. 1269edc2ca61SVlastimil Babka */ 1270edc2ca61SVlastimil Babka break; 1271edc2ca61SVlastimil Babka } 1272edc2ca61SVlastimil Babka 1273edc2ca61SVlastimil Babka acct_isolated(zone, cc); 1274f2849aa0SVlastimil Babka /* Record where migration scanner will be restarted. */ 1275f2849aa0SVlastimil Babka cc->migrate_pfn = low_pfn; 1276ff9543fdSMichal Nazarewicz 1277edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1278ff9543fdSMichal Nazarewicz } 1279ff9543fdSMichal Nazarewicz 128021c527a3SYaowei Bai /* 128121c527a3SYaowei Bai * order == -1 is expected when compacting via 128221c527a3SYaowei Bai * /proc/sys/vm/compact_memory 128321c527a3SYaowei Bai */ 128421c527a3SYaowei Bai static inline bool is_via_compact_memory(int order) 128521c527a3SYaowei Bai { 128621c527a3SYaowei Bai return order == -1; 128721c527a3SYaowei Bai } 128821c527a3SYaowei Bai 1289ea7ab982SMichal Hocko static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc, 12906d7ce559SDavid Rientjes const int migratetype) 1291748446bbSMel Gorman { 12928fb74b9fSMel Gorman unsigned int order; 12935a03b051SAndrea Arcangeli unsigned long watermark; 129456de7263SMel Gorman 1295be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 12962d1e1041SVlastimil Babka return COMPACT_CONTENDED; 1297748446bbSMel Gorman 1298753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1299f2849aa0SVlastimil Babka if (compact_scanners_met(cc)) { 130055b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 130102333641SVlastimil Babka reset_cached_positions(zone); 130255b7c4c9SVlastimil Babka 130362997027SMel Gorman /* 130462997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 1305accf6242SVlastimil Babka * by kswapd when it goes to sleep. kcompactd does not set the 130662997027SMel Gorman * flag itself as the decision to be clear should be directly 130762997027SMel Gorman * based on an allocation request. 130862997027SMel Gorman */ 1309accf6242SVlastimil Babka if (cc->direct_compaction) 131062997027SMel Gorman zone->compact_blockskip_flush = true; 131162997027SMel Gorman 1312c8f7de0bSMichal Hocko if (cc->whole_zone) 1313748446bbSMel Gorman return COMPACT_COMPLETE; 1314c8f7de0bSMichal Hocko else 1315c8f7de0bSMichal Hocko return COMPACT_PARTIAL_SKIPPED; 1316bb13ffebSMel Gorman } 1317748446bbSMel Gorman 131821c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 131956de7263SMel Gorman return COMPACT_CONTINUE; 132056de7263SMel Gorman 13213957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 13223957c776SMichal Hocko watermark = low_wmark_pages(zone); 13233957c776SMichal Hocko 1324ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, 1325ebff3980SVlastimil Babka cc->alloc_flags)) 13263957c776SMichal Hocko return COMPACT_CONTINUE; 13273957c776SMichal Hocko 132856de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 132956de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 13308fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 13312149cdaeSJoonsoo Kim bool can_steal; 13328fb74b9fSMel Gorman 133356de7263SMel Gorman /* Job done if page is free of the right migratetype */ 13346d7ce559SDavid Rientjes if (!list_empty(&area->free_list[migratetype])) 133556de7263SMel Gorman return COMPACT_PARTIAL; 133656de7263SMel Gorman 13372149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 13382149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 13392149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 13402149cdaeSJoonsoo Kim !list_empty(&area->free_list[MIGRATE_CMA])) 13412149cdaeSJoonsoo Kim return COMPACT_PARTIAL; 13422149cdaeSJoonsoo Kim #endif 13432149cdaeSJoonsoo Kim /* 13442149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 13452149cdaeSJoonsoo Kim * other migratetype buddy lists. 13462149cdaeSJoonsoo Kim */ 13472149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 13482149cdaeSJoonsoo Kim true, &can_steal) != -1) 134956de7263SMel Gorman return COMPACT_PARTIAL; 135056de7263SMel Gorman } 135156de7263SMel Gorman 1352837d026dSJoonsoo Kim return COMPACT_NO_SUITABLE_PAGE; 1353837d026dSJoonsoo Kim } 1354837d026dSJoonsoo Kim 1355ea7ab982SMichal Hocko static enum compact_result compact_finished(struct zone *zone, 1356ea7ab982SMichal Hocko struct compact_control *cc, 1357837d026dSJoonsoo Kim const int migratetype) 1358837d026dSJoonsoo Kim { 1359837d026dSJoonsoo Kim int ret; 1360837d026dSJoonsoo Kim 1361837d026dSJoonsoo Kim ret = __compact_finished(zone, cc, migratetype); 1362837d026dSJoonsoo Kim trace_mm_compaction_finished(zone, cc->order, ret); 1363837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 1364837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 1365837d026dSJoonsoo Kim 1366837d026dSJoonsoo Kim return ret; 1367748446bbSMel Gorman } 1368748446bbSMel Gorman 13693e7d3449SMel Gorman /* 13703e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 13713e7d3449SMel Gorman * Returns 13723e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 13733e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 13743e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 13753e7d3449SMel Gorman */ 1376ea7ab982SMichal Hocko static enum compact_result __compaction_suitable(struct zone *zone, int order, 1377c603844bSMel Gorman unsigned int alloc_flags, 137886a294a8SMichal Hocko int classzone_idx, 137986a294a8SMichal Hocko unsigned long wmark_target) 13803e7d3449SMel Gorman { 13813e7d3449SMel Gorman int fragindex; 13823e7d3449SMel Gorman unsigned long watermark; 13833e7d3449SMel Gorman 138421c527a3SYaowei Bai if (is_via_compact_memory(order)) 13853957c776SMichal Hocko return COMPACT_CONTINUE; 13863957c776SMichal Hocko 1387ebff3980SVlastimil Babka watermark = low_wmark_pages(zone); 1388ebff3980SVlastimil Babka /* 1389ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 1390ebff3980SVlastimil Babka * should be no need for compaction at all. 1391ebff3980SVlastimil Babka */ 1392ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1393ebff3980SVlastimil Babka alloc_flags)) 1394ebff3980SVlastimil Babka return COMPACT_PARTIAL; 1395ebff3980SVlastimil Babka 13963957c776SMichal Hocko /* 13973e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 13983e7d3449SMel Gorman * This is because during migration, copies of pages need to be 13993e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 14003e7d3449SMel Gorman */ 1401ebff3980SVlastimil Babka watermark += (2UL << order); 140286a294a8SMichal Hocko if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, 140386a294a8SMichal Hocko alloc_flags, wmark_target)) 14043e7d3449SMel Gorman return COMPACT_SKIPPED; 14053e7d3449SMel Gorman 14063e7d3449SMel Gorman /* 14073e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 14083e7d3449SMel Gorman * low memory or external fragmentation 14093e7d3449SMel Gorman * 1410ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 1411ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 14123e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 14133e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 14143e7d3449SMel Gorman * 14153e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 14163e7d3449SMel Gorman */ 14173e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 14183e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 1419837d026dSJoonsoo Kim return COMPACT_NOT_SUITABLE_ZONE; 14203e7d3449SMel Gorman 14213e7d3449SMel Gorman return COMPACT_CONTINUE; 14223e7d3449SMel Gorman } 14233e7d3449SMel Gorman 1424ea7ab982SMichal Hocko enum compact_result compaction_suitable(struct zone *zone, int order, 1425c603844bSMel Gorman unsigned int alloc_flags, 1426c603844bSMel Gorman int classzone_idx) 1427837d026dSJoonsoo Kim { 1428ea7ab982SMichal Hocko enum compact_result ret; 1429837d026dSJoonsoo Kim 143086a294a8SMichal Hocko ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, 143186a294a8SMichal Hocko zone_page_state(zone, NR_FREE_PAGES)); 1432837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 1433837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 1434837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 1435837d026dSJoonsoo Kim 1436837d026dSJoonsoo Kim return ret; 1437837d026dSJoonsoo Kim } 1438837d026dSJoonsoo Kim 143986a294a8SMichal Hocko bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 144086a294a8SMichal Hocko int alloc_flags) 144186a294a8SMichal Hocko { 144286a294a8SMichal Hocko struct zone *zone; 144386a294a8SMichal Hocko struct zoneref *z; 144486a294a8SMichal Hocko 144586a294a8SMichal Hocko /* 144686a294a8SMichal Hocko * Make sure at least one zone would pass __compaction_suitable if we continue 144786a294a8SMichal Hocko * retrying the reclaim. 144886a294a8SMichal Hocko */ 144986a294a8SMichal Hocko for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 145086a294a8SMichal Hocko ac->nodemask) { 145186a294a8SMichal Hocko unsigned long available; 145286a294a8SMichal Hocko enum compact_result compact_result; 145386a294a8SMichal Hocko 145486a294a8SMichal Hocko /* 145586a294a8SMichal Hocko * Do not consider all the reclaimable memory because we do not 145686a294a8SMichal Hocko * want to trash just for a single high order allocation which 145786a294a8SMichal Hocko * is even not guaranteed to appear even if __compaction_suitable 145886a294a8SMichal Hocko * is happy about the watermark check. 145986a294a8SMichal Hocko */ 146086a294a8SMichal Hocko available = zone_reclaimable_pages(zone) / order; 146186a294a8SMichal Hocko available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 146286a294a8SMichal Hocko compact_result = __compaction_suitable(zone, order, alloc_flags, 146386a294a8SMichal Hocko ac_classzone_idx(ac), available); 146486a294a8SMichal Hocko if (compact_result != COMPACT_SKIPPED && 146586a294a8SMichal Hocko compact_result != COMPACT_NOT_SUITABLE_ZONE) 146686a294a8SMichal Hocko return true; 146786a294a8SMichal Hocko } 146886a294a8SMichal Hocko 146986a294a8SMichal Hocko return false; 147086a294a8SMichal Hocko } 147186a294a8SMichal Hocko 1472ea7ab982SMichal Hocko static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc) 1473748446bbSMel Gorman { 1474ea7ab982SMichal Hocko enum compact_result ret; 1475c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1476108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 14776d7ce559SDavid Rientjes const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); 1478e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1479748446bbSMel Gorman 1480ebff3980SVlastimil Babka ret = compaction_suitable(zone, cc->order, cc->alloc_flags, 1481ebff3980SVlastimil Babka cc->classzone_idx); 14823e7d3449SMel Gorman /* Compaction is likely to fail */ 1483c46649deSMichal Hocko if (ret == COMPACT_PARTIAL || ret == COMPACT_SKIPPED) 14843e7d3449SMel Gorman return ret; 1485c46649deSMichal Hocko 1486c46649deSMichal Hocko /* huh, compaction_suitable is returning something unexpected */ 1487c46649deSMichal Hocko VM_BUG_ON(ret != COMPACT_CONTINUE); 14883e7d3449SMel Gorman 1489c89511abSMel Gorman /* 1490d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1491accf6242SVlastimil Babka * is about to be retried after being deferred. 1492d3132e4bSVlastimil Babka */ 1493accf6242SVlastimil Babka if (compaction_restarting(zone, cc->order)) 1494d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1495d3132e4bSVlastimil Babka 1496d3132e4bSVlastimil Babka /* 1497c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 1498c89511abSMel Gorman * information on where the scanners should start but check that it 1499c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 1500c89511abSMel Gorman */ 1501e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1502c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1503623446e4SJoonsoo Kim if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 150406b6640aSVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 1505c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1506c89511abSMel Gorman } 1507623446e4SJoonsoo Kim if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 1508c89511abSMel Gorman cc->migrate_pfn = start_pfn; 150935979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 151035979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1511c89511abSMel Gorman } 1512c8f7de0bSMichal Hocko 1513c8f7de0bSMichal Hocko if (cc->migrate_pfn == start_pfn) 1514c8f7de0bSMichal Hocko cc->whole_zone = true; 1515c8f7de0bSMichal Hocko 15161a16718cSJoonsoo Kim cc->last_migrated_pfn = 0; 1517748446bbSMel Gorman 151816c4a097SJoonsoo Kim trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 151916c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync); 15200eb927c0SMel Gorman 1521748446bbSMel Gorman migrate_prep_local(); 1522748446bbSMel Gorman 15236d7ce559SDavid Rientjes while ((ret = compact_finished(zone, cc, migratetype)) == 15246d7ce559SDavid Rientjes COMPACT_CONTINUE) { 15259d502c1cSMinchan Kim int err; 1526748446bbSMel Gorman 1527f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1528f9e35b3bSMel Gorman case ISOLATE_ABORT: 15292d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 15305733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1531e64c5237SShaohua Li cc->nr_migratepages = 0; 1532f9e35b3bSMel Gorman goto out; 1533f9e35b3bSMel Gorman case ISOLATE_NONE: 1534fdaf7f5cSVlastimil Babka /* 1535fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 1536fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 1537fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 1538fdaf7f5cSVlastimil Babka */ 1539fdaf7f5cSVlastimil Babka goto check_drain; 1540f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1541f9e35b3bSMel Gorman ; 1542f9e35b3bSMel Gorman } 1543748446bbSMel Gorman 1544d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1545e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 15467b2a2d4aSMel Gorman MR_COMPACTION); 1547748446bbSMel Gorman 1548f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1549f8c9301fSVlastimil Babka &cc->migratepages); 1550748446bbSMel Gorman 1551f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1552f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 15539d502c1cSMinchan Kim if (err) { 15545733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 15557ed695e0SVlastimil Babka /* 15567ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 15577ed695e0SVlastimil Babka * and we want compact_finished() to detect it 15587ed695e0SVlastimil Babka */ 1559f2849aa0SVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) { 15602d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 15614bf2bba3SDavid Rientjes goto out; 1562748446bbSMel Gorman } 1563fdd048e1SVlastimil Babka /* 1564fdd048e1SVlastimil Babka * We failed to migrate at least one page in the current 1565fdd048e1SVlastimil Babka * order-aligned block, so skip the rest of it. 1566fdd048e1SVlastimil Babka */ 1567fdd048e1SVlastimil Babka if (cc->direct_compaction && 1568fdd048e1SVlastimil Babka (cc->mode == MIGRATE_ASYNC)) { 1569fdd048e1SVlastimil Babka cc->migrate_pfn = block_end_pfn( 1570fdd048e1SVlastimil Babka cc->migrate_pfn - 1, cc->order); 1571fdd048e1SVlastimil Babka /* Draining pcplists is useless in this case */ 1572fdd048e1SVlastimil Babka cc->last_migrated_pfn = 0; 1573fdd048e1SVlastimil Babka 1574fdd048e1SVlastimil Babka } 15754bf2bba3SDavid Rientjes } 1576fdaf7f5cSVlastimil Babka 1577fdaf7f5cSVlastimil Babka check_drain: 1578fdaf7f5cSVlastimil Babka /* 1579fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 1580fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 1581fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 1582fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 1583fdaf7f5cSVlastimil Babka * would succeed. 1584fdaf7f5cSVlastimil Babka */ 15851a16718cSJoonsoo Kim if (cc->order > 0 && cc->last_migrated_pfn) { 1586fdaf7f5cSVlastimil Babka int cpu; 1587fdaf7f5cSVlastimil Babka unsigned long current_block_start = 158806b6640aSVlastimil Babka block_start_pfn(cc->migrate_pfn, cc->order); 1589fdaf7f5cSVlastimil Babka 15901a16718cSJoonsoo Kim if (cc->last_migrated_pfn < current_block_start) { 1591fdaf7f5cSVlastimil Babka cpu = get_cpu(); 1592fdaf7f5cSVlastimil Babka lru_add_drain_cpu(cpu); 1593fdaf7f5cSVlastimil Babka drain_local_pages(zone); 1594fdaf7f5cSVlastimil Babka put_cpu(); 1595fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 15961a16718cSJoonsoo Kim cc->last_migrated_pfn = 0; 1597fdaf7f5cSVlastimil Babka } 1598fdaf7f5cSVlastimil Babka } 1599fdaf7f5cSVlastimil Babka 1600748446bbSMel Gorman } 1601748446bbSMel Gorman 1602f9e35b3bSMel Gorman out: 16036bace090SVlastimil Babka /* 16046bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 16056bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 16066bace090SVlastimil Babka */ 16076bace090SVlastimil Babka if (cc->nr_freepages > 0) { 16086bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 16096bace090SVlastimil Babka 16106bace090SVlastimil Babka cc->nr_freepages = 0; 16116bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 16126bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 161306b6640aSVlastimil Babka free_pfn = pageblock_start_pfn(free_pfn); 16146bace090SVlastimil Babka /* 16156bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 16166bace090SVlastimil Babka * already reset to zone end in compact_finished() 16176bace090SVlastimil Babka */ 16186bace090SVlastimil Babka if (free_pfn > zone->compact_cached_free_pfn) 16196bace090SVlastimil Babka zone->compact_cached_free_pfn = free_pfn; 16206bace090SVlastimil Babka } 1621748446bbSMel Gorman 162216c4a097SJoonsoo Kim trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 162316c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync, ret); 16240eb927c0SMel Gorman 16252d1e1041SVlastimil Babka if (ret == COMPACT_CONTENDED) 16262d1e1041SVlastimil Babka ret = COMPACT_PARTIAL; 16272d1e1041SVlastimil Babka 1628748446bbSMel Gorman return ret; 1629748446bbSMel Gorman } 163076ab0f53SMel Gorman 1631ea7ab982SMichal Hocko static enum compact_result compact_zone_order(struct zone *zone, int order, 1632ebff3980SVlastimil Babka gfp_t gfp_mask, enum migrate_mode mode, int *contended, 1633c603844bSMel Gorman unsigned int alloc_flags, int classzone_idx) 163456de7263SMel Gorman { 1635ea7ab982SMichal Hocko enum compact_result ret; 163656de7263SMel Gorman struct compact_control cc = { 163756de7263SMel Gorman .nr_freepages = 0, 163856de7263SMel Gorman .nr_migratepages = 0, 163956de7263SMel Gorman .order = order, 16406d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 164156de7263SMel Gorman .zone = zone, 1642e0b9daebSDavid Rientjes .mode = mode, 1643ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 1644ebff3980SVlastimil Babka .classzone_idx = classzone_idx, 1645accf6242SVlastimil Babka .direct_compaction = true, 164656de7263SMel Gorman }; 164756de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 164856de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 164956de7263SMel Gorman 1650e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1651e64c5237SShaohua Li 1652e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1653e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1654e64c5237SShaohua Li 1655e64c5237SShaohua Li *contended = cc.contended; 1656e64c5237SShaohua Li return ret; 165756de7263SMel Gorman } 165856de7263SMel Gorman 16595e771905SMel Gorman int sysctl_extfrag_threshold = 500; 16605e771905SMel Gorman 166156de7263SMel Gorman /** 166256de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 166356de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 16641a6d53a1SVlastimil Babka * @order: The order of the current allocation 16651a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 16661a6d53a1SVlastimil Babka * @ac: The context of current allocation 1667e0b9daebSDavid Rientjes * @mode: The migration mode for async, sync light, or sync migration 16681f9efdefSVlastimil Babka * @contended: Return value that determines if compaction was aborted due to 16691f9efdefSVlastimil Babka * need_resched() or lock contention 167056de7263SMel Gorman * 167156de7263SMel Gorman * This is the main entry point for direct page compaction. 167256de7263SMel Gorman */ 1673ea7ab982SMichal Hocko enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 1674c603844bSMel Gorman unsigned int alloc_flags, const struct alloc_context *ac, 16751a6d53a1SVlastimil Babka enum migrate_mode mode, int *contended) 167656de7263SMel Gorman { 167756de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 167856de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 167956de7263SMel Gorman struct zoneref *z; 168056de7263SMel Gorman struct zone *zone; 16811d4746d3SMichal Hocko enum compact_result rc = COMPACT_SKIPPED; 16821f9efdefSVlastimil Babka int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ 16831f9efdefSVlastimil Babka 16841f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_NONE; 168556de7263SMel Gorman 16864ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1687c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 168853853e2dSVlastimil Babka return COMPACT_SKIPPED; 168956de7263SMel Gorman 1690837d026dSJoonsoo Kim trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); 1691837d026dSJoonsoo Kim 169256de7263SMel Gorman /* Compact each zone in the list */ 16931a6d53a1SVlastimil Babka for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 16941a6d53a1SVlastimil Babka ac->nodemask) { 1695ea7ab982SMichal Hocko enum compact_result status; 16961f9efdefSVlastimil Babka int zone_contended; 169756de7263SMel Gorman 16981d4746d3SMichal Hocko if (compaction_deferred(zone, order)) { 16991d4746d3SMichal Hocko rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 170053853e2dSVlastimil Babka continue; 17011d4746d3SMichal Hocko } 170253853e2dSVlastimil Babka 1703e0b9daebSDavid Rientjes status = compact_zone_order(zone, order, gfp_mask, mode, 17041a6d53a1SVlastimil Babka &zone_contended, alloc_flags, 170593ea9964SMel Gorman ac_classzone_idx(ac)); 170656de7263SMel Gorman rc = max(status, rc); 17071f9efdefSVlastimil Babka /* 17081f9efdefSVlastimil Babka * It takes at least one zone that wasn't lock contended 17091f9efdefSVlastimil Babka * to clear all_zones_contended. 17101f9efdefSVlastimil Babka */ 17111f9efdefSVlastimil Babka all_zones_contended &= zone_contended; 171256de7263SMel Gorman 17133e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1714ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 171593ea9964SMel Gorman ac_classzone_idx(ac), alloc_flags)) { 171653853e2dSVlastimil Babka /* 171753853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 171853853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 171953853e2dSVlastimil Babka * will repeat this with true if allocation indeed 172053853e2dSVlastimil Babka * succeeds in this zone. 172153853e2dSVlastimil Babka */ 172253853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 17231f9efdefSVlastimil Babka /* 17241f9efdefSVlastimil Babka * It is possible that async compaction aborted due to 17251f9efdefSVlastimil Babka * need_resched() and the watermarks were ok thanks to 17261f9efdefSVlastimil Babka * somebody else freeing memory. The allocation can 17271f9efdefSVlastimil Babka * however still fail so we better signal the 17281f9efdefSVlastimil Babka * need_resched() contention anyway (this will not 17291f9efdefSVlastimil Babka * prevent the allocation attempt). 17301f9efdefSVlastimil Babka */ 17311f9efdefSVlastimil Babka if (zone_contended == COMPACT_CONTENDED_SCHED) 17321f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 17331f9efdefSVlastimil Babka 17341f9efdefSVlastimil Babka goto break_loop; 17351f9efdefSVlastimil Babka } 17361f9efdefSVlastimil Babka 1737c8f7de0bSMichal Hocko if (mode != MIGRATE_ASYNC && (status == COMPACT_COMPLETE || 1738c8f7de0bSMichal Hocko status == COMPACT_PARTIAL_SKIPPED)) { 173953853e2dSVlastimil Babka /* 174053853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 174153853e2dSVlastimil Babka * so we defer compaction there. If it ends up 174253853e2dSVlastimil Babka * succeeding after all, it will be reset. 174353853e2dSVlastimil Babka */ 174453853e2dSVlastimil Babka defer_compaction(zone, order); 174553853e2dSVlastimil Babka } 17461f9efdefSVlastimil Babka 17471f9efdefSVlastimil Babka /* 17481f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 17491f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 17501f9efdefSVlastimil Babka * case do not try further zones and signal need_resched() 17511f9efdefSVlastimil Babka * contention. 17521f9efdefSVlastimil Babka */ 17531f9efdefSVlastimil Babka if ((zone_contended == COMPACT_CONTENDED_SCHED) 17541f9efdefSVlastimil Babka || fatal_signal_pending(current)) { 17551f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 17561f9efdefSVlastimil Babka goto break_loop; 175756de7263SMel Gorman } 175856de7263SMel Gorman 17591f9efdefSVlastimil Babka continue; 17601f9efdefSVlastimil Babka break_loop: 17611f9efdefSVlastimil Babka /* 17621f9efdefSVlastimil Babka * We might not have tried all the zones, so be conservative 17631f9efdefSVlastimil Babka * and assume they are not all lock contended. 17641f9efdefSVlastimil Babka */ 17651f9efdefSVlastimil Babka all_zones_contended = 0; 17661f9efdefSVlastimil Babka break; 17671f9efdefSVlastimil Babka } 17681f9efdefSVlastimil Babka 17691f9efdefSVlastimil Babka /* 17701f9efdefSVlastimil Babka * If at least one zone wasn't deferred or skipped, we report if all 17711f9efdefSVlastimil Babka * zones that were tried were lock contended. 17721f9efdefSVlastimil Babka */ 17731d4746d3SMichal Hocko if (rc > COMPACT_INACTIVE && all_zones_contended) 17741f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_LOCK; 17751f9efdefSVlastimil Babka 177656de7263SMel Gorman return rc; 177756de7263SMel Gorman } 177856de7263SMel Gorman 177956de7263SMel Gorman 178076ab0f53SMel Gorman /* Compact all zones within a node */ 17817103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 178276ab0f53SMel Gorman { 178376ab0f53SMel Gorman int zoneid; 178476ab0f53SMel Gorman struct zone *zone; 178576ab0f53SMel Gorman 178676ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 178776ab0f53SMel Gorman 178876ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 178976ab0f53SMel Gorman if (!populated_zone(zone)) 179076ab0f53SMel Gorman continue; 179176ab0f53SMel Gorman 17927be62de9SRik van Riel cc->nr_freepages = 0; 17937be62de9SRik van Riel cc->nr_migratepages = 0; 17947be62de9SRik van Riel cc->zone = zone; 17957be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 17967be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 179776ab0f53SMel Gorman 1798195b0c60SGioh Kim /* 1799195b0c60SGioh Kim * When called via /proc/sys/vm/compact_memory 1800195b0c60SGioh Kim * this makes sure we compact the whole zone regardless of 1801195b0c60SGioh Kim * cached scanner positions. 1802195b0c60SGioh Kim */ 180321c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 1804195b0c60SGioh Kim __reset_isolation_suitable(zone); 1805195b0c60SGioh Kim 180621c527a3SYaowei Bai if (is_via_compact_memory(cc->order) || 180721c527a3SYaowei Bai !compaction_deferred(zone, cc->order)) 18087be62de9SRik van Riel compact_zone(zone, cc); 180976ab0f53SMel Gorman 181075469345SJoonsoo Kim VM_BUG_ON(!list_empty(&cc->freepages)); 181175469345SJoonsoo Kim VM_BUG_ON(!list_empty(&cc->migratepages)); 181275469345SJoonsoo Kim 181375469345SJoonsoo Kim if (is_via_compact_memory(cc->order)) 181475469345SJoonsoo Kim continue; 181575469345SJoonsoo Kim 1816de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1817de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1818de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1819aff62249SRik van Riel } 182076ab0f53SMel Gorman } 182176ab0f53SMel Gorman 18227103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 18237be62de9SRik van Riel { 18247be62de9SRik van Riel struct compact_control cc = { 18257be62de9SRik van Riel .order = order, 1826e0b9daebSDavid Rientjes .mode = MIGRATE_ASYNC, 18277be62de9SRik van Riel }; 18287be62de9SRik van Riel 18293a7200afSMel Gorman if (!order) 18303a7200afSMel Gorman return; 18313a7200afSMel Gorman 18327103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 18337be62de9SRik van Riel } 18347be62de9SRik van Riel 18357103f16dSAndrew Morton static void compact_node(int nid) 18367be62de9SRik van Riel { 18377be62de9SRik van Riel struct compact_control cc = { 18387be62de9SRik van Riel .order = -1, 1839e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 184091ca9186SDavid Rientjes .ignore_skip_hint = true, 18417be62de9SRik van Riel }; 18427be62de9SRik van Riel 18437103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 18447be62de9SRik van Riel } 18457be62de9SRik van Riel 184676ab0f53SMel Gorman /* Compact all nodes in the system */ 18477964c06dSJason Liu static void compact_nodes(void) 184876ab0f53SMel Gorman { 184976ab0f53SMel Gorman int nid; 185076ab0f53SMel Gorman 18518575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 18528575ec29SHugh Dickins lru_add_drain_all(); 18538575ec29SHugh Dickins 185476ab0f53SMel Gorman for_each_online_node(nid) 185576ab0f53SMel Gorman compact_node(nid); 185676ab0f53SMel Gorman } 185776ab0f53SMel Gorman 185876ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 185976ab0f53SMel Gorman int sysctl_compact_memory; 186076ab0f53SMel Gorman 1861fec4eb2cSYaowei Bai /* 1862fec4eb2cSYaowei Bai * This is the entry point for compacting all nodes via 1863fec4eb2cSYaowei Bai * /proc/sys/vm/compact_memory 1864fec4eb2cSYaowei Bai */ 186576ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 186676ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 186776ab0f53SMel Gorman { 186876ab0f53SMel Gorman if (write) 18697964c06dSJason Liu compact_nodes(); 187076ab0f53SMel Gorman 187176ab0f53SMel Gorman return 0; 187276ab0f53SMel Gorman } 1873ed4a6d7fSMel Gorman 18745e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 18755e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 18765e771905SMel Gorman { 18775e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 18785e771905SMel Gorman 18795e771905SMel Gorman return 0; 18805e771905SMel Gorman } 18815e771905SMel Gorman 1882ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 188374e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 188410fbcf4cSKay Sievers struct device_attribute *attr, 1885ed4a6d7fSMel Gorman const char *buf, size_t count) 1886ed4a6d7fSMel Gorman { 18878575ec29SHugh Dickins int nid = dev->id; 18888575ec29SHugh Dickins 18898575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 18908575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 18918575ec29SHugh Dickins lru_add_drain_all(); 18928575ec29SHugh Dickins 18938575ec29SHugh Dickins compact_node(nid); 18948575ec29SHugh Dickins } 1895ed4a6d7fSMel Gorman 1896ed4a6d7fSMel Gorman return count; 1897ed4a6d7fSMel Gorman } 189810fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1899ed4a6d7fSMel Gorman 1900ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1901ed4a6d7fSMel Gorman { 190210fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1903ed4a6d7fSMel Gorman } 1904ed4a6d7fSMel Gorman 1905ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1906ed4a6d7fSMel Gorman { 190710fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1908ed4a6d7fSMel Gorman } 1909ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1910ff9543fdSMichal Nazarewicz 1911698b1b30SVlastimil Babka static inline bool kcompactd_work_requested(pg_data_t *pgdat) 1912698b1b30SVlastimil Babka { 1913172400c6SVlastimil Babka return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); 1914698b1b30SVlastimil Babka } 1915698b1b30SVlastimil Babka 1916698b1b30SVlastimil Babka static bool kcompactd_node_suitable(pg_data_t *pgdat) 1917698b1b30SVlastimil Babka { 1918698b1b30SVlastimil Babka int zoneid; 1919698b1b30SVlastimil Babka struct zone *zone; 1920698b1b30SVlastimil Babka enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx; 1921698b1b30SVlastimil Babka 19226cd9dc3eSChen Feng for (zoneid = 0; zoneid <= classzone_idx; zoneid++) { 1923698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 1924698b1b30SVlastimil Babka 1925698b1b30SVlastimil Babka if (!populated_zone(zone)) 1926698b1b30SVlastimil Babka continue; 1927698b1b30SVlastimil Babka 1928698b1b30SVlastimil Babka if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 1929698b1b30SVlastimil Babka classzone_idx) == COMPACT_CONTINUE) 1930698b1b30SVlastimil Babka return true; 1931698b1b30SVlastimil Babka } 1932698b1b30SVlastimil Babka 1933698b1b30SVlastimil Babka return false; 1934698b1b30SVlastimil Babka } 1935698b1b30SVlastimil Babka 1936698b1b30SVlastimil Babka static void kcompactd_do_work(pg_data_t *pgdat) 1937698b1b30SVlastimil Babka { 1938698b1b30SVlastimil Babka /* 1939698b1b30SVlastimil Babka * With no special task, compact all zones so that a page of requested 1940698b1b30SVlastimil Babka * order is allocatable. 1941698b1b30SVlastimil Babka */ 1942698b1b30SVlastimil Babka int zoneid; 1943698b1b30SVlastimil Babka struct zone *zone; 1944698b1b30SVlastimil Babka struct compact_control cc = { 1945698b1b30SVlastimil Babka .order = pgdat->kcompactd_max_order, 1946698b1b30SVlastimil Babka .classzone_idx = pgdat->kcompactd_classzone_idx, 1947698b1b30SVlastimil Babka .mode = MIGRATE_SYNC_LIGHT, 1948698b1b30SVlastimil Babka .ignore_skip_hint = true, 1949698b1b30SVlastimil Babka 1950698b1b30SVlastimil Babka }; 1951698b1b30SVlastimil Babka bool success = false; 1952698b1b30SVlastimil Babka 1953698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 1954698b1b30SVlastimil Babka cc.classzone_idx); 1955698b1b30SVlastimil Babka count_vm_event(KCOMPACTD_WAKE); 1956698b1b30SVlastimil Babka 19576cd9dc3eSChen Feng for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) { 1958698b1b30SVlastimil Babka int status; 1959698b1b30SVlastimil Babka 1960698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 1961698b1b30SVlastimil Babka if (!populated_zone(zone)) 1962698b1b30SVlastimil Babka continue; 1963698b1b30SVlastimil Babka 1964698b1b30SVlastimil Babka if (compaction_deferred(zone, cc.order)) 1965698b1b30SVlastimil Babka continue; 1966698b1b30SVlastimil Babka 1967698b1b30SVlastimil Babka if (compaction_suitable(zone, cc.order, 0, zoneid) != 1968698b1b30SVlastimil Babka COMPACT_CONTINUE) 1969698b1b30SVlastimil Babka continue; 1970698b1b30SVlastimil Babka 1971698b1b30SVlastimil Babka cc.nr_freepages = 0; 1972698b1b30SVlastimil Babka cc.nr_migratepages = 0; 1973698b1b30SVlastimil Babka cc.zone = zone; 1974698b1b30SVlastimil Babka INIT_LIST_HEAD(&cc.freepages); 1975698b1b30SVlastimil Babka INIT_LIST_HEAD(&cc.migratepages); 1976698b1b30SVlastimil Babka 1977172400c6SVlastimil Babka if (kthread_should_stop()) 1978172400c6SVlastimil Babka return; 1979698b1b30SVlastimil Babka status = compact_zone(zone, &cc); 1980698b1b30SVlastimil Babka 1981698b1b30SVlastimil Babka if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone), 1982698b1b30SVlastimil Babka cc.classzone_idx, 0)) { 1983698b1b30SVlastimil Babka success = true; 1984698b1b30SVlastimil Babka compaction_defer_reset(zone, cc.order, false); 1985c8f7de0bSMichal Hocko } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 1986698b1b30SVlastimil Babka /* 1987698b1b30SVlastimil Babka * We use sync migration mode here, so we defer like 1988698b1b30SVlastimil Babka * sync direct compaction does. 1989698b1b30SVlastimil Babka */ 1990698b1b30SVlastimil Babka defer_compaction(zone, cc.order); 1991698b1b30SVlastimil Babka } 1992698b1b30SVlastimil Babka 1993698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 1994698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 1995698b1b30SVlastimil Babka } 1996698b1b30SVlastimil Babka 1997698b1b30SVlastimil Babka /* 1998698b1b30SVlastimil Babka * Regardless of success, we are done until woken up next. But remember 1999698b1b30SVlastimil Babka * the requested order/classzone_idx in case it was higher/tighter than 2000698b1b30SVlastimil Babka * our current ones 2001698b1b30SVlastimil Babka */ 2002698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order <= cc.order) 2003698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 2004698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx) 2005698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2006698b1b30SVlastimil Babka } 2007698b1b30SVlastimil Babka 2008698b1b30SVlastimil Babka void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) 2009698b1b30SVlastimil Babka { 2010698b1b30SVlastimil Babka if (!order) 2011698b1b30SVlastimil Babka return; 2012698b1b30SVlastimil Babka 2013698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order < order) 2014698b1b30SVlastimil Babka pgdat->kcompactd_max_order = order; 2015698b1b30SVlastimil Babka 2016698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx > classzone_idx) 2017698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = classzone_idx; 2018698b1b30SVlastimil Babka 2019698b1b30SVlastimil Babka if (!waitqueue_active(&pgdat->kcompactd_wait)) 2020698b1b30SVlastimil Babka return; 2021698b1b30SVlastimil Babka 2022698b1b30SVlastimil Babka if (!kcompactd_node_suitable(pgdat)) 2023698b1b30SVlastimil Babka return; 2024698b1b30SVlastimil Babka 2025698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 2026698b1b30SVlastimil Babka classzone_idx); 2027698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait); 2028698b1b30SVlastimil Babka } 2029698b1b30SVlastimil Babka 2030698b1b30SVlastimil Babka /* 2031698b1b30SVlastimil Babka * The background compaction daemon, started as a kernel thread 2032698b1b30SVlastimil Babka * from the init process. 2033698b1b30SVlastimil Babka */ 2034698b1b30SVlastimil Babka static int kcompactd(void *p) 2035698b1b30SVlastimil Babka { 2036698b1b30SVlastimil Babka pg_data_t *pgdat = (pg_data_t*)p; 2037698b1b30SVlastimil Babka struct task_struct *tsk = current; 2038698b1b30SVlastimil Babka 2039698b1b30SVlastimil Babka const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2040698b1b30SVlastimil Babka 2041698b1b30SVlastimil Babka if (!cpumask_empty(cpumask)) 2042698b1b30SVlastimil Babka set_cpus_allowed_ptr(tsk, cpumask); 2043698b1b30SVlastimil Babka 2044698b1b30SVlastimil Babka set_freezable(); 2045698b1b30SVlastimil Babka 2046698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 2047698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2048698b1b30SVlastimil Babka 2049698b1b30SVlastimil Babka while (!kthread_should_stop()) { 2050698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 2051698b1b30SVlastimil Babka wait_event_freezable(pgdat->kcompactd_wait, 2052698b1b30SVlastimil Babka kcompactd_work_requested(pgdat)); 2053698b1b30SVlastimil Babka 2054698b1b30SVlastimil Babka kcompactd_do_work(pgdat); 2055698b1b30SVlastimil Babka } 2056698b1b30SVlastimil Babka 2057698b1b30SVlastimil Babka return 0; 2058698b1b30SVlastimil Babka } 2059698b1b30SVlastimil Babka 2060698b1b30SVlastimil Babka /* 2061698b1b30SVlastimil Babka * This kcompactd start function will be called by init and node-hot-add. 2062698b1b30SVlastimil Babka * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 2063698b1b30SVlastimil Babka */ 2064698b1b30SVlastimil Babka int kcompactd_run(int nid) 2065698b1b30SVlastimil Babka { 2066698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2067698b1b30SVlastimil Babka int ret = 0; 2068698b1b30SVlastimil Babka 2069698b1b30SVlastimil Babka if (pgdat->kcompactd) 2070698b1b30SVlastimil Babka return 0; 2071698b1b30SVlastimil Babka 2072698b1b30SVlastimil Babka pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 2073698b1b30SVlastimil Babka if (IS_ERR(pgdat->kcompactd)) { 2074698b1b30SVlastimil Babka pr_err("Failed to start kcompactd on node %d\n", nid); 2075698b1b30SVlastimil Babka ret = PTR_ERR(pgdat->kcompactd); 2076698b1b30SVlastimil Babka pgdat->kcompactd = NULL; 2077698b1b30SVlastimil Babka } 2078698b1b30SVlastimil Babka return ret; 2079698b1b30SVlastimil Babka } 2080698b1b30SVlastimil Babka 2081698b1b30SVlastimil Babka /* 2082698b1b30SVlastimil Babka * Called by memory hotplug when all memory in a node is offlined. Caller must 2083698b1b30SVlastimil Babka * hold mem_hotplug_begin/end(). 2084698b1b30SVlastimil Babka */ 2085698b1b30SVlastimil Babka void kcompactd_stop(int nid) 2086698b1b30SVlastimil Babka { 2087698b1b30SVlastimil Babka struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 2088698b1b30SVlastimil Babka 2089698b1b30SVlastimil Babka if (kcompactd) { 2090698b1b30SVlastimil Babka kthread_stop(kcompactd); 2091698b1b30SVlastimil Babka NODE_DATA(nid)->kcompactd = NULL; 2092698b1b30SVlastimil Babka } 2093698b1b30SVlastimil Babka } 2094698b1b30SVlastimil Babka 2095698b1b30SVlastimil Babka /* 2096698b1b30SVlastimil Babka * It's optimal to keep kcompactd on the same CPUs as their memory, but 2097698b1b30SVlastimil Babka * not required for correctness. So if the last cpu in a node goes 2098698b1b30SVlastimil Babka * away, we get changed to run anywhere: as the first one comes back, 2099698b1b30SVlastimil Babka * restore their cpu bindings. 2100698b1b30SVlastimil Babka */ 2101698b1b30SVlastimil Babka static int cpu_callback(struct notifier_block *nfb, unsigned long action, 2102698b1b30SVlastimil Babka void *hcpu) 2103698b1b30SVlastimil Babka { 2104698b1b30SVlastimil Babka int nid; 2105698b1b30SVlastimil Babka 2106698b1b30SVlastimil Babka if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 2107698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) { 2108698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2109698b1b30SVlastimil Babka const struct cpumask *mask; 2110698b1b30SVlastimil Babka 2111698b1b30SVlastimil Babka mask = cpumask_of_node(pgdat->node_id); 2112698b1b30SVlastimil Babka 2113698b1b30SVlastimil Babka if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2114698b1b30SVlastimil Babka /* One of our CPUs online: restore mask */ 2115698b1b30SVlastimil Babka set_cpus_allowed_ptr(pgdat->kcompactd, mask); 2116698b1b30SVlastimil Babka } 2117698b1b30SVlastimil Babka } 2118698b1b30SVlastimil Babka return NOTIFY_OK; 2119698b1b30SVlastimil Babka } 2120698b1b30SVlastimil Babka 2121698b1b30SVlastimil Babka static int __init kcompactd_init(void) 2122698b1b30SVlastimil Babka { 2123698b1b30SVlastimil Babka int nid; 2124698b1b30SVlastimil Babka 2125698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) 2126698b1b30SVlastimil Babka kcompactd_run(nid); 2127698b1b30SVlastimil Babka hotcpu_notifier(cpu_callback, 0); 2128698b1b30SVlastimil Babka return 0; 2129698b1b30SVlastimil Babka } 2130698b1b30SVlastimil Babka subsys_initcall(kcompactd_init) 2131698b1b30SVlastimil Babka 2132ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 2133