1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 18194159fbSMinchan Kim #include <linux/page-isolation.h> 19b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 20748446bbSMel Gorman #include "internal.h" 21748446bbSMel Gorman 22010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 23010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 24010fc29aSMinchan Kim { 25010fc29aSMinchan Kim count_vm_event(item); 26010fc29aSMinchan Kim } 27010fc29aSMinchan Kim 28010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 29010fc29aSMinchan Kim { 30010fc29aSMinchan Kim count_vm_events(item, delta); 31010fc29aSMinchan Kim } 32010fc29aSMinchan Kim #else 33010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 34010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 35010fc29aSMinchan Kim #endif 36010fc29aSMinchan Kim 37ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 3816c4a097SJoonsoo Kim #ifdef CONFIG_TRACEPOINTS 3916c4a097SJoonsoo Kim static const char *const compaction_status_string[] = { 4016c4a097SJoonsoo Kim "deferred", 4116c4a097SJoonsoo Kim "skipped", 4216c4a097SJoonsoo Kim "continue", 4316c4a097SJoonsoo Kim "partial", 4416c4a097SJoonsoo Kim "complete", 45837d026dSJoonsoo Kim "no_suitable_page", 46837d026dSJoonsoo Kim "not_suitable_zone", 4716c4a097SJoonsoo Kim }; 4816c4a097SJoonsoo Kim #endif 49ff9543fdSMichal Nazarewicz 50b7aba698SMel Gorman #define CREATE_TRACE_POINTS 51b7aba698SMel Gorman #include <trace/events/compaction.h> 52b7aba698SMel Gorman 53748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 54748446bbSMel Gorman { 55748446bbSMel Gorman struct page *page, *next; 566bace090SVlastimil Babka unsigned long high_pfn = 0; 57748446bbSMel Gorman 58748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 596bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 60748446bbSMel Gorman list_del(&page->lru); 61748446bbSMel Gorman __free_page(page); 626bace090SVlastimil Babka if (pfn > high_pfn) 636bace090SVlastimil Babka high_pfn = pfn; 64748446bbSMel Gorman } 65748446bbSMel Gorman 666bace090SVlastimil Babka return high_pfn; 67748446bbSMel Gorman } 68748446bbSMel Gorman 69ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 70ff9543fdSMichal Nazarewicz { 71ff9543fdSMichal Nazarewicz struct page *page; 72ff9543fdSMichal Nazarewicz 73ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 74ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 75ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 76b8c73fc2SAndrey Ryabinin kasan_alloc_pages(page, 0); 77ff9543fdSMichal Nazarewicz } 78ff9543fdSMichal Nazarewicz } 79ff9543fdSMichal Nazarewicz 8047118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 8147118af0SMichal Nazarewicz { 8247118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 8347118af0SMichal Nazarewicz } 8447118af0SMichal Nazarewicz 857d49d886SVlastimil Babka /* 867d49d886SVlastimil Babka * Check that the whole (or subset of) a pageblock given by the interval of 877d49d886SVlastimil Babka * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 887d49d886SVlastimil Babka * with the migration of free compaction scanner. The scanners then need to 897d49d886SVlastimil Babka * use only pfn_valid_within() check for arches that allow holes within 907d49d886SVlastimil Babka * pageblocks. 917d49d886SVlastimil Babka * 927d49d886SVlastimil Babka * Return struct page pointer of start_pfn, or NULL if checks were not passed. 937d49d886SVlastimil Babka * 947d49d886SVlastimil Babka * It's possible on some configurations to have a setup like node0 node1 node0 957d49d886SVlastimil Babka * i.e. it's possible that all pages within a zones range of pages do not 967d49d886SVlastimil Babka * belong to a single zone. We assume that a border between node0 and node1 977d49d886SVlastimil Babka * can occur within a single pageblock, but not a node0 node1 node0 987d49d886SVlastimil Babka * interleaving within a single pageblock. It is therefore sufficient to check 997d49d886SVlastimil Babka * the first and last page of a pageblock and avoid checking each individual 1007d49d886SVlastimil Babka * page in a pageblock. 1017d49d886SVlastimil Babka */ 1027d49d886SVlastimil Babka static struct page *pageblock_pfn_to_page(unsigned long start_pfn, 1037d49d886SVlastimil Babka unsigned long end_pfn, struct zone *zone) 1047d49d886SVlastimil Babka { 1057d49d886SVlastimil Babka struct page *start_page; 1067d49d886SVlastimil Babka struct page *end_page; 1077d49d886SVlastimil Babka 1087d49d886SVlastimil Babka /* end_pfn is one past the range we are checking */ 1097d49d886SVlastimil Babka end_pfn--; 1107d49d886SVlastimil Babka 1117d49d886SVlastimil Babka if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1127d49d886SVlastimil Babka return NULL; 1137d49d886SVlastimil Babka 1147d49d886SVlastimil Babka start_page = pfn_to_page(start_pfn); 1157d49d886SVlastimil Babka 1167d49d886SVlastimil Babka if (page_zone(start_page) != zone) 1177d49d886SVlastimil Babka return NULL; 1187d49d886SVlastimil Babka 1197d49d886SVlastimil Babka end_page = pfn_to_page(end_pfn); 1207d49d886SVlastimil Babka 1217d49d886SVlastimil Babka /* This gives a shorter code than deriving page_zone(end_page) */ 1227d49d886SVlastimil Babka if (page_zone_id(start_page) != page_zone_id(end_page)) 1237d49d886SVlastimil Babka return NULL; 1247d49d886SVlastimil Babka 1257d49d886SVlastimil Babka return start_page; 1267d49d886SVlastimil Babka } 1277d49d886SVlastimil Babka 128bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 12924e2716fSJoonsoo Kim 13024e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 13124e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 13224e2716fSJoonsoo Kim 13324e2716fSJoonsoo Kim /* 13424e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 13524e2716fSJoonsoo Kim * allocation success. 1 << compact_defer_limit compactions are skipped up 13624e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 13724e2716fSJoonsoo Kim */ 13824e2716fSJoonsoo Kim void defer_compaction(struct zone *zone, int order) 13924e2716fSJoonsoo Kim { 14024e2716fSJoonsoo Kim zone->compact_considered = 0; 14124e2716fSJoonsoo Kim zone->compact_defer_shift++; 14224e2716fSJoonsoo Kim 14324e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 14424e2716fSJoonsoo Kim zone->compact_order_failed = order; 14524e2716fSJoonsoo Kim 14624e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 14724e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 14824e2716fSJoonsoo Kim 14924e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 15024e2716fSJoonsoo Kim } 15124e2716fSJoonsoo Kim 15224e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 15324e2716fSJoonsoo Kim bool compaction_deferred(struct zone *zone, int order) 15424e2716fSJoonsoo Kim { 15524e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 15624e2716fSJoonsoo Kim 15724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 15824e2716fSJoonsoo Kim return false; 15924e2716fSJoonsoo Kim 16024e2716fSJoonsoo Kim /* Avoid possible overflow */ 16124e2716fSJoonsoo Kim if (++zone->compact_considered > defer_limit) 16224e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 16324e2716fSJoonsoo Kim 16424e2716fSJoonsoo Kim if (zone->compact_considered >= defer_limit) 16524e2716fSJoonsoo Kim return false; 16624e2716fSJoonsoo Kim 16724e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 16824e2716fSJoonsoo Kim 16924e2716fSJoonsoo Kim return true; 17024e2716fSJoonsoo Kim } 17124e2716fSJoonsoo Kim 17224e2716fSJoonsoo Kim /* 17324e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 17424e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 17524e2716fSJoonsoo Kim * expected to succeed. 17624e2716fSJoonsoo Kim */ 17724e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 17824e2716fSJoonsoo Kim bool alloc_success) 17924e2716fSJoonsoo Kim { 18024e2716fSJoonsoo Kim if (alloc_success) { 18124e2716fSJoonsoo Kim zone->compact_considered = 0; 18224e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 18324e2716fSJoonsoo Kim } 18424e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 18524e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 18624e2716fSJoonsoo Kim 18724e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 18824e2716fSJoonsoo Kim } 18924e2716fSJoonsoo Kim 19024e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 19124e2716fSJoonsoo Kim bool compaction_restarting(struct zone *zone, int order) 19224e2716fSJoonsoo Kim { 19324e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 19424e2716fSJoonsoo Kim return false; 19524e2716fSJoonsoo Kim 19624e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 19724e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 19824e2716fSJoonsoo Kim } 19924e2716fSJoonsoo Kim 200bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 201bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 202bb13ffebSMel Gorman struct page *page) 203bb13ffebSMel Gorman { 204bb13ffebSMel Gorman if (cc->ignore_skip_hint) 205bb13ffebSMel Gorman return true; 206bb13ffebSMel Gorman 207bb13ffebSMel Gorman return !get_pageblock_skip(page); 208bb13ffebSMel Gorman } 209bb13ffebSMel Gorman 210bb13ffebSMel Gorman /* 211bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 212bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 213bb13ffebSMel Gorman * meet. 214bb13ffebSMel Gorman */ 21562997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 216bb13ffebSMel Gorman { 217bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 218108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 219bb13ffebSMel Gorman unsigned long pfn; 220bb13ffebSMel Gorman 22135979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = start_pfn; 22235979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = start_pfn; 223c89511abSMel Gorman zone->compact_cached_free_pfn = end_pfn; 22462997027SMel Gorman zone->compact_blockskip_flush = false; 225bb13ffebSMel Gorman 226bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 227bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 228bb13ffebSMel Gorman struct page *page; 229bb13ffebSMel Gorman 230bb13ffebSMel Gorman cond_resched(); 231bb13ffebSMel Gorman 232bb13ffebSMel Gorman if (!pfn_valid(pfn)) 233bb13ffebSMel Gorman continue; 234bb13ffebSMel Gorman 235bb13ffebSMel Gorman page = pfn_to_page(pfn); 236bb13ffebSMel Gorman if (zone != page_zone(page)) 237bb13ffebSMel Gorman continue; 238bb13ffebSMel Gorman 239bb13ffebSMel Gorman clear_pageblock_skip(page); 240bb13ffebSMel Gorman } 241bb13ffebSMel Gorman } 242bb13ffebSMel Gorman 24362997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 24462997027SMel Gorman { 24562997027SMel Gorman int zoneid; 24662997027SMel Gorman 24762997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 24862997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 24962997027SMel Gorman if (!populated_zone(zone)) 25062997027SMel Gorman continue; 25162997027SMel Gorman 25262997027SMel Gorman /* Only flush if a full compaction finished recently */ 25362997027SMel Gorman if (zone->compact_blockskip_flush) 25462997027SMel Gorman __reset_isolation_suitable(zone); 25562997027SMel Gorman } 25662997027SMel Gorman } 25762997027SMel Gorman 258bb13ffebSMel Gorman /* 259bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 26062997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 261bb13ffebSMel Gorman */ 262c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 263c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 264edc2ca61SVlastimil Babka bool migrate_scanner) 265bb13ffebSMel Gorman { 266c89511abSMel Gorman struct zone *zone = cc->zone; 26735979ef3SDavid Rientjes unsigned long pfn; 2686815bf3fSJoonsoo Kim 2696815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 2706815bf3fSJoonsoo Kim return; 2716815bf3fSJoonsoo Kim 272bb13ffebSMel Gorman if (!page) 273bb13ffebSMel Gorman return; 274bb13ffebSMel Gorman 27535979ef3SDavid Rientjes if (nr_isolated) 27635979ef3SDavid Rientjes return; 27735979ef3SDavid Rientjes 278bb13ffebSMel Gorman set_pageblock_skip(page); 279c89511abSMel Gorman 28035979ef3SDavid Rientjes pfn = page_to_pfn(page); 28135979ef3SDavid Rientjes 28235979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 283c89511abSMel Gorman if (migrate_scanner) { 28435979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 28535979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 286e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 287e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 28835979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 289c89511abSMel Gorman } else { 29035979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 291c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 292c89511abSMel Gorman } 293c89511abSMel Gorman } 294bb13ffebSMel Gorman #else 295bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 296bb13ffebSMel Gorman struct page *page) 297bb13ffebSMel Gorman { 298bb13ffebSMel Gorman return true; 299bb13ffebSMel Gorman } 300bb13ffebSMel Gorman 301c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 302c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 303edc2ca61SVlastimil Babka bool migrate_scanner) 304bb13ffebSMel Gorman { 305bb13ffebSMel Gorman } 306bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 307bb13ffebSMel Gorman 3081f9efdefSVlastimil Babka /* 3098b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 3108b44d279SVlastimil Babka * very heavily contended. For async compaction, back out if the lock cannot 3118b44d279SVlastimil Babka * be taken immediately. For sync compaction, spin on the lock if needed. 3128b44d279SVlastimil Babka * 3138b44d279SVlastimil Babka * Returns true if the lock is held 3148b44d279SVlastimil Babka * Returns false if the lock is not held and compaction should abort 3151f9efdefSVlastimil Babka */ 3168b44d279SVlastimil Babka static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, 3178b44d279SVlastimil Babka struct compact_control *cc) 3188b44d279SVlastimil Babka { 3198b44d279SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3208b44d279SVlastimil Babka if (!spin_trylock_irqsave(lock, *flags)) { 3218b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_LOCK; 3228b44d279SVlastimil Babka return false; 3238b44d279SVlastimil Babka } 3248b44d279SVlastimil Babka } else { 3258b44d279SVlastimil Babka spin_lock_irqsave(lock, *flags); 3268b44d279SVlastimil Babka } 3271f9efdefSVlastimil Babka 3288b44d279SVlastimil Babka return true; 3292a1402aaSMel Gorman } 3302a1402aaSMel Gorman 33185aa125fSMichal Nazarewicz /* 332c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 3338b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 3348b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 3358b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 3368b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 3378b44d279SVlastimil Babka * aborts. Sync compaction schedules. 3388b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 3398b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 340c67fe375SMel Gorman * 3418b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 3428b44d279SVlastimil Babka * async compaction due to need_resched() 3438b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 3448b44d279SVlastimil Babka * scheduled) 345c67fe375SMel Gorman */ 3468b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 3478b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 348c67fe375SMel Gorman { 3498b44d279SVlastimil Babka if (*locked) { 3508b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 3518b44d279SVlastimil Babka *locked = false; 352c67fe375SMel Gorman } 353c67fe375SMel Gorman 3548b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 3558b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 3568b44d279SVlastimil Babka return true; 3578b44d279SVlastimil Babka } 3588b44d279SVlastimil Babka 3598b44d279SVlastimil Babka if (need_resched()) { 360e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 3618b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 3628b44d279SVlastimil Babka return true; 363c67fe375SMel Gorman } 364c67fe375SMel Gorman cond_resched(); 365c67fe375SMel Gorman } 366c67fe375SMel Gorman 3678b44d279SVlastimil Babka return false; 368c67fe375SMel Gorman } 369c67fe375SMel Gorman 370be976572SVlastimil Babka /* 371be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 372be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 3738b44d279SVlastimil Babka * compaction. This is similar to what compact_unlock_should_abort() does, but 374be976572SVlastimil Babka * is used where no lock is concerned. 375be976572SVlastimil Babka * 376be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 377be976572SVlastimil Babka * Returns true when async compaction should abort. 378be976572SVlastimil Babka */ 379be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 380be976572SVlastimil Babka { 381be976572SVlastimil Babka /* async compaction aborts if contended */ 382be976572SVlastimil Babka if (need_resched()) { 383be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3841f9efdefSVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 385be976572SVlastimil Babka return true; 386be976572SVlastimil Babka } 387be976572SVlastimil Babka 388be976572SVlastimil Babka cond_resched(); 389be976572SVlastimil Babka } 390be976572SVlastimil Babka 391be976572SVlastimil Babka return false; 392be976572SVlastimil Babka } 393be976572SVlastimil Babka 394f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */ 395f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page) 396f40d1e42SMel Gorman { 3977d348b9eSJoonsoo Kim /* If the page is a large free page, then disallow migration */ 39899c0fd5eSVlastimil Babka if (PageBuddy(page)) { 39999c0fd5eSVlastimil Babka /* 40099c0fd5eSVlastimil Babka * We are checking page_order without zone->lock taken. But 40199c0fd5eSVlastimil Babka * the only small danger is that we skip a potentially suitable 40299c0fd5eSVlastimil Babka * pageblock, so it's not worth to check order for valid range. 40399c0fd5eSVlastimil Babka */ 40499c0fd5eSVlastimil Babka if (page_order_unsafe(page) >= pageblock_order) 4057d348b9eSJoonsoo Kim return false; 40699c0fd5eSVlastimil Babka } 407f40d1e42SMel Gorman 408f40d1e42SMel Gorman /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 4097d348b9eSJoonsoo Kim if (migrate_async_suitable(get_pageblock_migratetype(page))) 410f40d1e42SMel Gorman return true; 411f40d1e42SMel Gorman 412f40d1e42SMel Gorman /* Otherwise skip the block */ 413f40d1e42SMel Gorman return false; 414f40d1e42SMel Gorman } 415f40d1e42SMel Gorman 416c67fe375SMel Gorman /* 4179e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 4189e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 4199e4be470SJerome Marchand * (even though it may still end up isolating some pages). 42085aa125fSMichal Nazarewicz */ 421f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 422e14c720eSVlastimil Babka unsigned long *start_pfn, 42385aa125fSMichal Nazarewicz unsigned long end_pfn, 42485aa125fSMichal Nazarewicz struct list_head *freelist, 42585aa125fSMichal Nazarewicz bool strict) 426748446bbSMel Gorman { 427b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 428bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 429b8b2d825SXiubo Li unsigned long flags = 0; 430f40d1e42SMel Gorman bool locked = false; 431e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 432748446bbSMel Gorman 433748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 434748446bbSMel Gorman 435f40d1e42SMel Gorman /* Isolate free pages. */ 436748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 437748446bbSMel Gorman int isolated, i; 438748446bbSMel Gorman struct page *page = cursor; 439748446bbSMel Gorman 4408b44d279SVlastimil Babka /* 4418b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 4428b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 4438b44d279SVlastimil Babka * pending or async compaction detects need_resched() 4448b44d279SVlastimil Babka */ 4458b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 4468b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 4478b44d279SVlastimil Babka &locked, cc)) 4488b44d279SVlastimil Babka break; 4498b44d279SVlastimil Babka 450b7aba698SMel Gorman nr_scanned++; 451f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 4522af120bcSLaura Abbott goto isolate_fail; 4532af120bcSLaura Abbott 454bb13ffebSMel Gorman if (!valid_page) 455bb13ffebSMel Gorman valid_page = page; 456f40d1e42SMel Gorman if (!PageBuddy(page)) 4572af120bcSLaura Abbott goto isolate_fail; 458f40d1e42SMel Gorman 459f40d1e42SMel Gorman /* 46069b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 46169b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 46269b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 46369b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 46469b7189fSVlastimil Babka * recheck as well. 46569b7189fSVlastimil Babka */ 46669b7189fSVlastimil Babka if (!locked) { 46769b7189fSVlastimil Babka /* 468f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 469f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 470f40d1e42SMel Gorman * heavily contended if there are parallel allocations 471f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 472f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 473f40d1e42SMel Gorman * possible. 474f40d1e42SMel Gorman */ 4758b44d279SVlastimil Babka locked = compact_trylock_irqsave(&cc->zone->lock, 4768b44d279SVlastimil Babka &flags, cc); 477f40d1e42SMel Gorman if (!locked) 478f40d1e42SMel Gorman break; 479f40d1e42SMel Gorman 480f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 481f40d1e42SMel Gorman if (!PageBuddy(page)) 4822af120bcSLaura Abbott goto isolate_fail; 48369b7189fSVlastimil Babka } 484748446bbSMel Gorman 485748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 486748446bbSMel Gorman isolated = split_free_page(page); 487748446bbSMel Gorman total_isolated += isolated; 488748446bbSMel Gorman for (i = 0; i < isolated; i++) { 489748446bbSMel Gorman list_add(&page->lru, freelist); 490748446bbSMel Gorman page++; 491748446bbSMel Gorman } 492748446bbSMel Gorman 493748446bbSMel Gorman /* If a page was split, advance to the end of it */ 494748446bbSMel Gorman if (isolated) { 495932ff6bbSJoonsoo Kim cc->nr_freepages += isolated; 496932ff6bbSJoonsoo Kim if (!strict && 497932ff6bbSJoonsoo Kim cc->nr_migratepages <= cc->nr_freepages) { 498932ff6bbSJoonsoo Kim blockpfn += isolated; 499932ff6bbSJoonsoo Kim break; 500932ff6bbSJoonsoo Kim } 501932ff6bbSJoonsoo Kim 502748446bbSMel Gorman blockpfn += isolated - 1; 503748446bbSMel Gorman cursor += isolated - 1; 5042af120bcSLaura Abbott continue; 505748446bbSMel Gorman } 5062af120bcSLaura Abbott 5072af120bcSLaura Abbott isolate_fail: 5082af120bcSLaura Abbott if (strict) 5092af120bcSLaura Abbott break; 5102af120bcSLaura Abbott else 5112af120bcSLaura Abbott continue; 5122af120bcSLaura Abbott 513748446bbSMel Gorman } 514748446bbSMel Gorman 515e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 516e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 517e34d85f0SJoonsoo Kim 518e14c720eSVlastimil Babka /* Record how far we have got within the block */ 519e14c720eSVlastimil Babka *start_pfn = blockpfn; 520e14c720eSVlastimil Babka 521f40d1e42SMel Gorman /* 522f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 523f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 524f40d1e42SMel Gorman * returned and CMA will fail. 525f40d1e42SMel Gorman */ 5262af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 527f40d1e42SMel Gorman total_isolated = 0; 528f40d1e42SMel Gorman 529f40d1e42SMel Gorman if (locked) 530f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 531f40d1e42SMel Gorman 532bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 533bb13ffebSMel Gorman if (blockpfn == end_pfn) 534edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 535bb13ffebSMel Gorman 536010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 537397487dbSMel Gorman if (total_isolated) 538010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 539748446bbSMel Gorman return total_isolated; 540748446bbSMel Gorman } 541748446bbSMel Gorman 54285aa125fSMichal Nazarewicz /** 54385aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 54485aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 54585aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 54685aa125fSMichal Nazarewicz * 54785aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 54885aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 54985aa125fSMichal Nazarewicz * undo its actions and return zero. 55085aa125fSMichal Nazarewicz * 55185aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 55285aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 55385aa125fSMichal Nazarewicz * a free page). 55485aa125fSMichal Nazarewicz */ 555ff9543fdSMichal Nazarewicz unsigned long 556bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 557bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 55885aa125fSMichal Nazarewicz { 559f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 56085aa125fSMichal Nazarewicz LIST_HEAD(freelist); 56185aa125fSMichal Nazarewicz 5627d49d886SVlastimil Babka pfn = start_pfn; 56385aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 5647d49d886SVlastimil Babka 5657d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 5667d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 567e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 568e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 5697d49d886SVlastimil Babka 57085aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 57185aa125fSMichal Nazarewicz 57258420016SJoonsoo Kim /* 57358420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 57458420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 57558420016SJoonsoo Kim * scanning range to right one. 57658420016SJoonsoo Kim */ 57758420016SJoonsoo Kim if (pfn >= block_end_pfn) { 57858420016SJoonsoo Kim block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 57958420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 58058420016SJoonsoo Kim } 58158420016SJoonsoo Kim 5827d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 5837d49d886SVlastimil Babka break; 5847d49d886SVlastimil Babka 585e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 586e14c720eSVlastimil Babka block_end_pfn, &freelist, true); 58785aa125fSMichal Nazarewicz 58885aa125fSMichal Nazarewicz /* 58985aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 59085aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 59185aa125fSMichal Nazarewicz * non-free pages). 59285aa125fSMichal Nazarewicz */ 59385aa125fSMichal Nazarewicz if (!isolated) 59485aa125fSMichal Nazarewicz break; 59585aa125fSMichal Nazarewicz 59685aa125fSMichal Nazarewicz /* 59785aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 59885aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 59985aa125fSMichal Nazarewicz * page may span two pageblocks). 60085aa125fSMichal Nazarewicz */ 60185aa125fSMichal Nazarewicz } 60285aa125fSMichal Nazarewicz 60385aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 60485aa125fSMichal Nazarewicz map_pages(&freelist); 60585aa125fSMichal Nazarewicz 60685aa125fSMichal Nazarewicz if (pfn < end_pfn) { 60785aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 60885aa125fSMichal Nazarewicz release_freepages(&freelist); 60985aa125fSMichal Nazarewicz return 0; 61085aa125fSMichal Nazarewicz } 61185aa125fSMichal Nazarewicz 61285aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 61385aa125fSMichal Nazarewicz return pfn; 61485aa125fSMichal Nazarewicz } 61585aa125fSMichal Nazarewicz 616748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 617edc2ca61SVlastimil Babka static void acct_isolated(struct zone *zone, struct compact_control *cc) 618748446bbSMel Gorman { 619748446bbSMel Gorman struct page *page; 620b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 621748446bbSMel Gorman 622edc2ca61SVlastimil Babka if (list_empty(&cc->migratepages)) 623edc2ca61SVlastimil Babka return; 624edc2ca61SVlastimil Babka 625b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 626b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 627748446bbSMel Gorman 628c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 629c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 630c67fe375SMel Gorman } 631748446bbSMel Gorman 632748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 633748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 634748446bbSMel Gorman { 635bc693045SMinchan Kim unsigned long active, inactive, isolated; 636748446bbSMel Gorman 637748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 638748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 639bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 640bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 641748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 642748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 643748446bbSMel Gorman 644bc693045SMinchan Kim return isolated > (inactive + active) / 2; 645748446bbSMel Gorman } 646748446bbSMel Gorman 6472fe86e00SMichal Nazarewicz /** 648edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 649edc2ca61SVlastimil Babka * a single pageblock 6502fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 651edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 652edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 653edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 6542fe86e00SMichal Nazarewicz * 6552fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 656edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 657edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 658edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 659edc2ca61SVlastimil Babka * than end_pfn). 6602fe86e00SMichal Nazarewicz * 661edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 662edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 663edc2ca61SVlastimil Babka * is neither read nor updated. 664748446bbSMel Gorman */ 665edc2ca61SVlastimil Babka static unsigned long 666edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 667edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 668748446bbSMel Gorman { 669edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 670b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 671748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 672fa9add64SHugh Dickins struct lruvec *lruvec; 673b8b2d825SXiubo Li unsigned long flags = 0; 6742a1402aaSMel Gorman bool locked = false; 675bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 676e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 677748446bbSMel Gorman 678748446bbSMel Gorman /* 679748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 680748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 681748446bbSMel Gorman * delay for some time until fewer pages are isolated 682748446bbSMel Gorman */ 683748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 684f9e35b3bSMel Gorman /* async migration should just abort */ 685e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 6862fe86e00SMichal Nazarewicz return 0; 687f9e35b3bSMel Gorman 688748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 689748446bbSMel Gorman 690748446bbSMel Gorman if (fatal_signal_pending(current)) 6912fe86e00SMichal Nazarewicz return 0; 692748446bbSMel Gorman } 693748446bbSMel Gorman 694be976572SVlastimil Babka if (compact_should_abort(cc)) 695aeef4b83SDavid Rientjes return 0; 696aeef4b83SDavid Rientjes 697748446bbSMel Gorman /* Time to isolate some pages for migration */ 698748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 6998b44d279SVlastimil Babka /* 7008b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 7018b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort async compaction 7028b44d279SVlastimil Babka * if contended. 7038b44d279SVlastimil Babka */ 7048b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 7058b44d279SVlastimil Babka && compact_unlock_should_abort(&zone->lru_lock, flags, 7068b44d279SVlastimil Babka &locked, cc)) 7078b44d279SVlastimil Babka break; 708b2eef8c0SAndrea Arcangeli 709748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 710748446bbSMel Gorman continue; 711b7aba698SMel Gorman nr_scanned++; 712748446bbSMel Gorman 713748446bbSMel Gorman page = pfn_to_page(low_pfn); 714dc908600SMel Gorman 715bb13ffebSMel Gorman if (!valid_page) 716bb13ffebSMel Gorman valid_page = page; 717bb13ffebSMel Gorman 718c122b208SJoonsoo Kim /* 71999c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 72099c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 72199c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 72299c0fd5eSVlastimil Babka * potential isolation targets. 7236c14466cSMel Gorman */ 72499c0fd5eSVlastimil Babka if (PageBuddy(page)) { 72599c0fd5eSVlastimil Babka unsigned long freepage_order = page_order_unsafe(page); 72699c0fd5eSVlastimil Babka 72799c0fd5eSVlastimil Babka /* 72899c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 72999c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 73099c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 73199c0fd5eSVlastimil Babka */ 73299c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 73399c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 734748446bbSMel Gorman continue; 73599c0fd5eSVlastimil Babka } 736748446bbSMel Gorman 7379927af74SMel Gorman /* 738bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 739bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 740bf6bddf1SRafael Aquini * Skip any other type of page 741bf6bddf1SRafael Aquini */ 742bf6bddf1SRafael Aquini if (!PageLRU(page)) { 743bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 744d6d86c0aSKonstantin Khlebnikov if (balloon_page_isolate(page)) { 745bf6bddf1SRafael Aquini /* Successfully isolated */ 746b6c75016SJoonsoo Kim goto isolate_success; 747bf6bddf1SRafael Aquini } 748bf6bddf1SRafael Aquini } 749bc835011SAndrea Arcangeli continue; 750bf6bddf1SRafael Aquini } 751bc835011SAndrea Arcangeli 752bc835011SAndrea Arcangeli /* 7532a1402aaSMel Gorman * PageLRU is set. lru_lock normally excludes isolation 7542a1402aaSMel Gorman * splitting and collapsing (collapsing has already happened 7552a1402aaSMel Gorman * if PageLRU is set) but the lock is not necessarily taken 7562a1402aaSMel Gorman * here and it is wasteful to take it just to check transhuge. 7572a1402aaSMel Gorman * Check TransHuge without lock and skip the whole pageblock if 7582a1402aaSMel Gorman * it's either a transhuge or hugetlbfs page, as calling 7592a1402aaSMel Gorman * compound_order() without preventing THP from splitting the 7602a1402aaSMel Gorman * page underneath us may return surprising results. 761bc835011SAndrea Arcangeli */ 762bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 7632a1402aaSMel Gorman if (!locked) 764edc2ca61SVlastimil Babka low_pfn = ALIGN(low_pfn + 1, 765edc2ca61SVlastimil Babka pageblock_nr_pages) - 1; 766edc2ca61SVlastimil Babka else 7672a1402aaSMel Gorman low_pfn += (1 << compound_order(page)) - 1; 768edc2ca61SVlastimil Babka 7692a1402aaSMel Gorman continue; 7702a1402aaSMel Gorman } 7712a1402aaSMel Gorman 772119d6d59SDavid Rientjes /* 773119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 774119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 775119d6d59SDavid Rientjes * admittedly racy check. 776119d6d59SDavid Rientjes */ 777119d6d59SDavid Rientjes if (!page_mapping(page) && 778119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 779119d6d59SDavid Rientjes continue; 780119d6d59SDavid Rientjes 78169b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 78269b7189fSVlastimil Babka if (!locked) { 7838b44d279SVlastimil Babka locked = compact_trylock_irqsave(&zone->lru_lock, 7848b44d279SVlastimil Babka &flags, cc); 7858b44d279SVlastimil Babka if (!locked) 7862a1402aaSMel Gorman break; 7872a1402aaSMel Gorman 7882a1402aaSMel Gorman /* Recheck PageLRU and PageTransHuge under lock */ 7892a1402aaSMel Gorman if (!PageLRU(page)) 7902a1402aaSMel Gorman continue; 7912a1402aaSMel Gorman if (PageTransHuge(page)) { 792bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 793bc835011SAndrea Arcangeli continue; 794bc835011SAndrea Arcangeli } 79569b7189fSVlastimil Babka } 796bc835011SAndrea Arcangeli 797fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 798fa9add64SHugh Dickins 799748446bbSMel Gorman /* Try isolate the page */ 800edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 801748446bbSMel Gorman continue; 802748446bbSMel Gorman 803309381feSSasha Levin VM_BUG_ON_PAGE(PageTransCompound(page), page); 804bc835011SAndrea Arcangeli 805748446bbSMel Gorman /* Successfully isolated */ 806fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 807b6c75016SJoonsoo Kim 808b6c75016SJoonsoo Kim isolate_success: 809748446bbSMel Gorman list_add(&page->lru, migratelist); 810748446bbSMel Gorman cc->nr_migratepages++; 811b7aba698SMel Gorman nr_isolated++; 812748446bbSMel Gorman 813748446bbSMel Gorman /* Avoid isolating too much */ 81431b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 81531b8384aSHillf Danton ++low_pfn; 816748446bbSMel Gorman break; 817748446bbSMel Gorman } 81831b8384aSHillf Danton } 819748446bbSMel Gorman 82099c0fd5eSVlastimil Babka /* 82199c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 82299c0fd5eSVlastimil Babka * the range to be scanned. 82399c0fd5eSVlastimil Babka */ 82499c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 82599c0fd5eSVlastimil Babka low_pfn = end_pfn; 82699c0fd5eSVlastimil Babka 827c67fe375SMel Gorman if (locked) 828c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 829748446bbSMel Gorman 83050b5b094SVlastimil Babka /* 83150b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 83250b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 83350b5b094SVlastimil Babka */ 83435979ef3SDavid Rientjes if (low_pfn == end_pfn) 835edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 836bb13ffebSMel Gorman 837e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 838e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 839b7aba698SMel Gorman 840010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 841397487dbSMel Gorman if (nr_isolated) 842010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 843397487dbSMel Gorman 8442fe86e00SMichal Nazarewicz return low_pfn; 8452fe86e00SMichal Nazarewicz } 8462fe86e00SMichal Nazarewicz 847edc2ca61SVlastimil Babka /** 848edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 849edc2ca61SVlastimil Babka * @cc: Compaction control structure. 850edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 851edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 852edc2ca61SVlastimil Babka * 853edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 854edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 855edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 856edc2ca61SVlastimil Babka */ 857edc2ca61SVlastimil Babka unsigned long 858edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 859edc2ca61SVlastimil Babka unsigned long end_pfn) 860edc2ca61SVlastimil Babka { 861edc2ca61SVlastimil Babka unsigned long pfn, block_end_pfn; 862edc2ca61SVlastimil Babka 863edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 864edc2ca61SVlastimil Babka pfn = start_pfn; 865edc2ca61SVlastimil Babka block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 866edc2ca61SVlastimil Babka 867edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 868edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 869edc2ca61SVlastimil Babka 870edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 871edc2ca61SVlastimil Babka 8727d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 873edc2ca61SVlastimil Babka continue; 874edc2ca61SVlastimil Babka 875edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 876edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 877edc2ca61SVlastimil Babka 878edc2ca61SVlastimil Babka /* 879edc2ca61SVlastimil Babka * In case of fatal failure, release everything that might 880edc2ca61SVlastimil Babka * have been isolated in the previous iteration, and signal 881edc2ca61SVlastimil Babka * the failure back to caller. 882edc2ca61SVlastimil Babka */ 883edc2ca61SVlastimil Babka if (!pfn) { 884edc2ca61SVlastimil Babka putback_movable_pages(&cc->migratepages); 885edc2ca61SVlastimil Babka cc->nr_migratepages = 0; 886edc2ca61SVlastimil Babka break; 887edc2ca61SVlastimil Babka } 8886ea41c0cSJoonsoo Kim 8896ea41c0cSJoonsoo Kim if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 8906ea41c0cSJoonsoo Kim break; 891edc2ca61SVlastimil Babka } 892edc2ca61SVlastimil Babka acct_isolated(cc->zone, cc); 893edc2ca61SVlastimil Babka 894edc2ca61SVlastimil Babka return pfn; 895edc2ca61SVlastimil Babka } 896edc2ca61SVlastimil Babka 897ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 898ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 899ff9543fdSMichal Nazarewicz /* 900ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 901ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 902ff9543fdSMichal Nazarewicz */ 903edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 904ff9543fdSMichal Nazarewicz { 905edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 906ff9543fdSMichal Nazarewicz struct page *page; 907c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 908e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 909c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 910c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 911ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 9122fe86e00SMichal Nazarewicz 913ff9543fdSMichal Nazarewicz /* 914ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 91549e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 916e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 917e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 918c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 919c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 920c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 92149e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 92249e068f0SVlastimil Babka * is using. 923ff9543fdSMichal Nazarewicz */ 924e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 925c96b9e50SVlastimil Babka block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 926c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 927c96b9e50SVlastimil Babka zone_end_pfn(zone)); 9287ed695e0SVlastimil Babka low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 9292fe86e00SMichal Nazarewicz 930ff9543fdSMichal Nazarewicz /* 931ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 932ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 933ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 934ff9543fdSMichal Nazarewicz */ 935932ff6bbSJoonsoo Kim for (; block_start_pfn >= low_pfn && 936932ff6bbSJoonsoo Kim cc->nr_migratepages > cc->nr_freepages; 937c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 938e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 939e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 940ff9543fdSMichal Nazarewicz 941f6ea3adbSDavid Rientjes /* 942f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 943f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 944be976572SVlastimil Babka * to schedule, or even abort async compaction. 945f6ea3adbSDavid Rientjes */ 946be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 947be976572SVlastimil Babka && compact_should_abort(cc)) 948be976572SVlastimil Babka break; 949f6ea3adbSDavid Rientjes 9507d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 9517d49d886SVlastimil Babka zone); 9527d49d886SVlastimil Babka if (!page) 953ff9543fdSMichal Nazarewicz continue; 954ff9543fdSMichal Nazarewicz 955ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 95668e3e926SLinus Torvalds if (!suitable_migration_target(page)) 957ff9543fdSMichal Nazarewicz continue; 95868e3e926SLinus Torvalds 959bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 960bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 961bb13ffebSMel Gorman continue; 962bb13ffebSMel Gorman 963e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 964932ff6bbSJoonsoo Kim isolate_freepages_block(cc, &isolate_start_pfn, 965c96b9e50SVlastimil Babka block_end_pfn, freelist, false); 966ff9543fdSMichal Nazarewicz 967ff9543fdSMichal Nazarewicz /* 968e14c720eSVlastimil Babka * Remember where the free scanner should restart next time, 969e14c720eSVlastimil Babka * which is where isolate_freepages_block() left off. 970e14c720eSVlastimil Babka * But if it scanned the whole pageblock, isolate_start_pfn 971e14c720eSVlastimil Babka * now points at block_end_pfn, which is the start of the next 972e14c720eSVlastimil Babka * pageblock. 973e14c720eSVlastimil Babka * In that case we will however want to restart at the start 974e14c720eSVlastimil Babka * of the previous pageblock. 975e14c720eSVlastimil Babka */ 976e14c720eSVlastimil Babka cc->free_pfn = (isolate_start_pfn < block_end_pfn) ? 977e14c720eSVlastimil Babka isolate_start_pfn : 978e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 979e14c720eSVlastimil Babka 980e14c720eSVlastimil Babka /* 981be976572SVlastimil Babka * isolate_freepages_block() might have aborted due to async 982be976572SVlastimil Babka * compaction being contended 983be976572SVlastimil Babka */ 984be976572SVlastimil Babka if (cc->contended) 985be976572SVlastimil Babka break; 986c89511abSMel Gorman } 987ff9543fdSMichal Nazarewicz 988ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 989ff9543fdSMichal Nazarewicz map_pages(freelist); 990ff9543fdSMichal Nazarewicz 9917ed695e0SVlastimil Babka /* 9927ed695e0SVlastimil Babka * If we crossed the migrate scanner, we want to keep it that way 9937ed695e0SVlastimil Babka * so that compact_finished() may detect this 9947ed695e0SVlastimil Babka */ 995c96b9e50SVlastimil Babka if (block_start_pfn < low_pfn) 996e9ade569SVlastimil Babka cc->free_pfn = cc->migrate_pfn; 997748446bbSMel Gorman } 998748446bbSMel Gorman 999748446bbSMel Gorman /* 1000748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1001748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1002748446bbSMel Gorman */ 1003748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 1004748446bbSMel Gorman unsigned long data, 1005748446bbSMel Gorman int **result) 1006748446bbSMel Gorman { 1007748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 1008748446bbSMel Gorman struct page *freepage; 1009748446bbSMel Gorman 1010be976572SVlastimil Babka /* 1011be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 1012be976572SVlastimil Babka * contention. 1013be976572SVlastimil Babka */ 1014748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1015be976572SVlastimil Babka if (!cc->contended) 1016edc2ca61SVlastimil Babka isolate_freepages(cc); 1017748446bbSMel Gorman 1018748446bbSMel Gorman if (list_empty(&cc->freepages)) 1019748446bbSMel Gorman return NULL; 1020748446bbSMel Gorman } 1021748446bbSMel Gorman 1022748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 1023748446bbSMel Gorman list_del(&freepage->lru); 1024748446bbSMel Gorman cc->nr_freepages--; 1025748446bbSMel Gorman 1026748446bbSMel Gorman return freepage; 1027748446bbSMel Gorman } 1028748446bbSMel Gorman 1029748446bbSMel Gorman /* 1030d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1031d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1032d53aea3dSDavid Rientjes * special handling needed for NUMA. 1033d53aea3dSDavid Rientjes */ 1034d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 1035d53aea3dSDavid Rientjes { 1036d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1037d53aea3dSDavid Rientjes 1038d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 1039d53aea3dSDavid Rientjes cc->nr_freepages++; 1040d53aea3dSDavid Rientjes } 1041d53aea3dSDavid Rientjes 1042ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1043ff9543fdSMichal Nazarewicz typedef enum { 1044ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1045ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1046ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1047ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1048ff9543fdSMichal Nazarewicz 1049ff9543fdSMichal Nazarewicz /* 1050edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1051edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1052edc2ca61SVlastimil Babka * compact_control. 1053ff9543fdSMichal Nazarewicz */ 1054ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 1055ff9543fdSMichal Nazarewicz struct compact_control *cc) 1056ff9543fdSMichal Nazarewicz { 1057ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 1058edc2ca61SVlastimil Babka struct page *page; 1059edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 1060edc2ca61SVlastimil Babka (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); 1061ff9543fdSMichal Nazarewicz 1062edc2ca61SVlastimil Babka /* 1063edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 1064edc2ca61SVlastimil Babka * initialized by compact_zone() 1065edc2ca61SVlastimil Babka */ 1066edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 1067ff9543fdSMichal Nazarewicz 1068ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 1069a9aacbccSMel Gorman end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 1070ff9543fdSMichal Nazarewicz 1071edc2ca61SVlastimil Babka /* 1072edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 1073edc2ca61SVlastimil Babka * Do not cross the free scanner. 1074edc2ca61SVlastimil Babka */ 1075edc2ca61SVlastimil Babka for (; end_pfn <= cc->free_pfn; 1076edc2ca61SVlastimil Babka low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { 1077edc2ca61SVlastimil Babka 1078edc2ca61SVlastimil Babka /* 1079edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1080edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1081edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 1082edc2ca61SVlastimil Babka */ 1083edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1084edc2ca61SVlastimil Babka && compact_should_abort(cc)) 1085edc2ca61SVlastimil Babka break; 1086edc2ca61SVlastimil Babka 10877d49d886SVlastimil Babka page = pageblock_pfn_to_page(low_pfn, end_pfn, zone); 10887d49d886SVlastimil Babka if (!page) 1089edc2ca61SVlastimil Babka continue; 1090edc2ca61SVlastimil Babka 1091edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 1092edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 1093edc2ca61SVlastimil Babka continue; 1094edc2ca61SVlastimil Babka 1095edc2ca61SVlastimil Babka /* 1096edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 1097edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 1098edc2ca61SVlastimil Babka * of work satisfies the allocation. 1099edc2ca61SVlastimil Babka */ 1100edc2ca61SVlastimil Babka if (cc->mode == MIGRATE_ASYNC && 1101edc2ca61SVlastimil Babka !migrate_async_suitable(get_pageblock_migratetype(page))) 1102edc2ca61SVlastimil Babka continue; 1103ff9543fdSMichal Nazarewicz 1104ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1105edc2ca61SVlastimil Babka low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, 1106edc2ca61SVlastimil Babka isolate_mode); 1107edc2ca61SVlastimil Babka 1108ff59909aSHugh Dickins if (!low_pfn || cc->contended) { 1109ff59909aSHugh Dickins acct_isolated(zone, cc); 1110ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1111ff59909aSHugh Dickins } 1112ff9543fdSMichal Nazarewicz 1113edc2ca61SVlastimil Babka /* 1114edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1115edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1116edc2ca61SVlastimil Babka * continue or not. 1117edc2ca61SVlastimil Babka */ 1118edc2ca61SVlastimil Babka break; 1119edc2ca61SVlastimil Babka } 1120edc2ca61SVlastimil Babka 1121edc2ca61SVlastimil Babka acct_isolated(zone, cc); 11221d5bfe1fSVlastimil Babka /* 11231d5bfe1fSVlastimil Babka * Record where migration scanner will be restarted. If we end up in 11241d5bfe1fSVlastimil Babka * the same pageblock as the free scanner, make the scanners fully 11251d5bfe1fSVlastimil Babka * meet so that compact_finished() terminates compaction. 11261d5bfe1fSVlastimil Babka */ 11271d5bfe1fSVlastimil Babka cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn; 1128ff9543fdSMichal Nazarewicz 1129edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1130ff9543fdSMichal Nazarewicz } 1131ff9543fdSMichal Nazarewicz 1132837d026dSJoonsoo Kim static int __compact_finished(struct zone *zone, struct compact_control *cc, 11336d7ce559SDavid Rientjes const int migratetype) 1134748446bbSMel Gorman { 11358fb74b9fSMel Gorman unsigned int order; 11365a03b051SAndrea Arcangeli unsigned long watermark; 113756de7263SMel Gorman 1138be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 1139748446bbSMel Gorman return COMPACT_PARTIAL; 1140748446bbSMel Gorman 1141753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1142bb13ffebSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) { 114355b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 114435979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 114535979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 114655b7c4c9SVlastimil Babka zone->compact_cached_free_pfn = zone_end_pfn(zone); 114755b7c4c9SVlastimil Babka 114862997027SMel Gorman /* 114962997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 115062997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 115162997027SMel Gorman * flag itself as the decision to be clear should be directly 115262997027SMel Gorman * based on an allocation request. 115362997027SMel Gorman */ 115462997027SMel Gorman if (!current_is_kswapd()) 115562997027SMel Gorman zone->compact_blockskip_flush = true; 115662997027SMel Gorman 1157748446bbSMel Gorman return COMPACT_COMPLETE; 1158bb13ffebSMel Gorman } 1159748446bbSMel Gorman 116082478fb7SJohannes Weiner /* 116182478fb7SJohannes Weiner * order == -1 is expected when compacting via 116282478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 116382478fb7SJohannes Weiner */ 116456de7263SMel Gorman if (cc->order == -1) 116556de7263SMel Gorman return COMPACT_CONTINUE; 116656de7263SMel Gorman 11673957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 11683957c776SMichal Hocko watermark = low_wmark_pages(zone); 11693957c776SMichal Hocko 1170ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, 1171ebff3980SVlastimil Babka cc->alloc_flags)) 11723957c776SMichal Hocko return COMPACT_CONTINUE; 11733957c776SMichal Hocko 117456de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 117556de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 11768fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 1177*2149cdaeSJoonsoo Kim bool can_steal; 11788fb74b9fSMel Gorman 117956de7263SMel Gorman /* Job done if page is free of the right migratetype */ 11806d7ce559SDavid Rientjes if (!list_empty(&area->free_list[migratetype])) 118156de7263SMel Gorman return COMPACT_PARTIAL; 118256de7263SMel Gorman 1183*2149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 1184*2149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 1185*2149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 1186*2149cdaeSJoonsoo Kim !list_empty(&area->free_list[MIGRATE_CMA])) 1187*2149cdaeSJoonsoo Kim return COMPACT_PARTIAL; 1188*2149cdaeSJoonsoo Kim #endif 1189*2149cdaeSJoonsoo Kim /* 1190*2149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 1191*2149cdaeSJoonsoo Kim * other migratetype buddy lists. 1192*2149cdaeSJoonsoo Kim */ 1193*2149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 1194*2149cdaeSJoonsoo Kim true, &can_steal) != -1) 119556de7263SMel Gorman return COMPACT_PARTIAL; 119656de7263SMel Gorman } 119756de7263SMel Gorman 1198837d026dSJoonsoo Kim return COMPACT_NO_SUITABLE_PAGE; 1199837d026dSJoonsoo Kim } 1200837d026dSJoonsoo Kim 1201837d026dSJoonsoo Kim static int compact_finished(struct zone *zone, struct compact_control *cc, 1202837d026dSJoonsoo Kim const int migratetype) 1203837d026dSJoonsoo Kim { 1204837d026dSJoonsoo Kim int ret; 1205837d026dSJoonsoo Kim 1206837d026dSJoonsoo Kim ret = __compact_finished(zone, cc, migratetype); 1207837d026dSJoonsoo Kim trace_mm_compaction_finished(zone, cc->order, ret); 1208837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 1209837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 1210837d026dSJoonsoo Kim 1211837d026dSJoonsoo Kim return ret; 1212748446bbSMel Gorman } 1213748446bbSMel Gorman 12143e7d3449SMel Gorman /* 12153e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 12163e7d3449SMel Gorman * Returns 12173e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 12183e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 12193e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 12203e7d3449SMel Gorman */ 1221837d026dSJoonsoo Kim static unsigned long __compaction_suitable(struct zone *zone, int order, 1222ebff3980SVlastimil Babka int alloc_flags, int classzone_idx) 12233e7d3449SMel Gorman { 12243e7d3449SMel Gorman int fragindex; 12253e7d3449SMel Gorman unsigned long watermark; 12263e7d3449SMel Gorman 12273e7d3449SMel Gorman /* 12283957c776SMichal Hocko * order == -1 is expected when compacting via 12293957c776SMichal Hocko * /proc/sys/vm/compact_memory 12303957c776SMichal Hocko */ 12313957c776SMichal Hocko if (order == -1) 12323957c776SMichal Hocko return COMPACT_CONTINUE; 12333957c776SMichal Hocko 1234ebff3980SVlastimil Babka watermark = low_wmark_pages(zone); 1235ebff3980SVlastimil Babka /* 1236ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 1237ebff3980SVlastimil Babka * should be no need for compaction at all. 1238ebff3980SVlastimil Babka */ 1239ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1240ebff3980SVlastimil Babka alloc_flags)) 1241ebff3980SVlastimil Babka return COMPACT_PARTIAL; 1242ebff3980SVlastimil Babka 12433957c776SMichal Hocko /* 12443e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 12453e7d3449SMel Gorman * This is because during migration, copies of pages need to be 12463e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 12473e7d3449SMel Gorman */ 1248ebff3980SVlastimil Babka watermark += (2UL << order); 1249ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags)) 12503e7d3449SMel Gorman return COMPACT_SKIPPED; 12513e7d3449SMel Gorman 12523e7d3449SMel Gorman /* 12533e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 12543e7d3449SMel Gorman * low memory or external fragmentation 12553e7d3449SMel Gorman * 1256ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 1257ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 12583e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 12593e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 12603e7d3449SMel Gorman * 12613e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 12623e7d3449SMel Gorman */ 12633e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 12643e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 1265837d026dSJoonsoo Kim return COMPACT_NOT_SUITABLE_ZONE; 12663e7d3449SMel Gorman 12673e7d3449SMel Gorman return COMPACT_CONTINUE; 12683e7d3449SMel Gorman } 12693e7d3449SMel Gorman 1270837d026dSJoonsoo Kim unsigned long compaction_suitable(struct zone *zone, int order, 1271837d026dSJoonsoo Kim int alloc_flags, int classzone_idx) 1272837d026dSJoonsoo Kim { 1273837d026dSJoonsoo Kim unsigned long ret; 1274837d026dSJoonsoo Kim 1275837d026dSJoonsoo Kim ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx); 1276837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 1277837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 1278837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 1279837d026dSJoonsoo Kim 1280837d026dSJoonsoo Kim return ret; 1281837d026dSJoonsoo Kim } 1282837d026dSJoonsoo Kim 1283748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 1284748446bbSMel Gorman { 1285748446bbSMel Gorman int ret; 1286c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1287108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 12886d7ce559SDavid Rientjes const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); 1289e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1290fdaf7f5cSVlastimil Babka unsigned long last_migrated_pfn = 0; 1291748446bbSMel Gorman 1292ebff3980SVlastimil Babka ret = compaction_suitable(zone, cc->order, cc->alloc_flags, 1293ebff3980SVlastimil Babka cc->classzone_idx); 12943e7d3449SMel Gorman switch (ret) { 12953e7d3449SMel Gorman case COMPACT_PARTIAL: 12963e7d3449SMel Gorman case COMPACT_SKIPPED: 12973e7d3449SMel Gorman /* Compaction is likely to fail */ 12983e7d3449SMel Gorman return ret; 12993e7d3449SMel Gorman case COMPACT_CONTINUE: 13003e7d3449SMel Gorman /* Fall through to compaction */ 13013e7d3449SMel Gorman ; 13023e7d3449SMel Gorman } 13033e7d3449SMel Gorman 1304c89511abSMel Gorman /* 1305d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1306d3132e4bSVlastimil Babka * is about to be retried after being deferred. kswapd does not do 1307d3132e4bSVlastimil Babka * this reset as it'll reset the cached information when going to sleep. 1308d3132e4bSVlastimil Babka */ 1309d3132e4bSVlastimil Babka if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 1310d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1311d3132e4bSVlastimil Babka 1312d3132e4bSVlastimil Babka /* 1313c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 1314c89511abSMel Gorman * information on where the scanners should start but check that it 1315c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 1316c89511abSMel Gorman */ 1317e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1318c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1319c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1320c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1321c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1322c89511abSMel Gorman } 1323c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1324c89511abSMel Gorman cc->migrate_pfn = start_pfn; 132535979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 132635979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1327c89511abSMel Gorman } 1328748446bbSMel Gorman 132916c4a097SJoonsoo Kim trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 133016c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync); 13310eb927c0SMel Gorman 1332748446bbSMel Gorman migrate_prep_local(); 1333748446bbSMel Gorman 13346d7ce559SDavid Rientjes while ((ret = compact_finished(zone, cc, migratetype)) == 13356d7ce559SDavid Rientjes COMPACT_CONTINUE) { 13369d502c1cSMinchan Kim int err; 1337fdaf7f5cSVlastimil Babka unsigned long isolate_start_pfn = cc->migrate_pfn; 1338748446bbSMel Gorman 1339f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1340f9e35b3bSMel Gorman case ISOLATE_ABORT: 1341f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 13425733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1343e64c5237SShaohua Li cc->nr_migratepages = 0; 1344f9e35b3bSMel Gorman goto out; 1345f9e35b3bSMel Gorman case ISOLATE_NONE: 1346fdaf7f5cSVlastimil Babka /* 1347fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 1348fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 1349fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 1350fdaf7f5cSVlastimil Babka */ 1351fdaf7f5cSVlastimil Babka goto check_drain; 1352f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1353f9e35b3bSMel Gorman ; 1354f9e35b3bSMel Gorman } 1355748446bbSMel Gorman 1356d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1357e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 13587b2a2d4aSMel Gorman MR_COMPACTION); 1359748446bbSMel Gorman 1360f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1361f8c9301fSVlastimil Babka &cc->migratepages); 1362748446bbSMel Gorman 1363f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1364f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 13659d502c1cSMinchan Kim if (err) { 13665733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 13677ed695e0SVlastimil Babka /* 13687ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 13697ed695e0SVlastimil Babka * and we want compact_finished() to detect it 13707ed695e0SVlastimil Babka */ 13717ed695e0SVlastimil Babka if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { 13724bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 13734bf2bba3SDavid Rientjes goto out; 1374748446bbSMel Gorman } 13754bf2bba3SDavid Rientjes } 1376fdaf7f5cSVlastimil Babka 1377fdaf7f5cSVlastimil Babka /* 1378fdaf7f5cSVlastimil Babka * Record where we could have freed pages by migration and not 1379fdaf7f5cSVlastimil Babka * yet flushed them to buddy allocator. We use the pfn that 1380fdaf7f5cSVlastimil Babka * isolate_migratepages() started from in this loop iteration 1381fdaf7f5cSVlastimil Babka * - this is the lowest page that could have been isolated and 1382fdaf7f5cSVlastimil Babka * then freed by migration. 1383fdaf7f5cSVlastimil Babka */ 1384fdaf7f5cSVlastimil Babka if (!last_migrated_pfn) 1385fdaf7f5cSVlastimil Babka last_migrated_pfn = isolate_start_pfn; 1386fdaf7f5cSVlastimil Babka 1387fdaf7f5cSVlastimil Babka check_drain: 1388fdaf7f5cSVlastimil Babka /* 1389fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 1390fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 1391fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 1392fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 1393fdaf7f5cSVlastimil Babka * would succeed. 1394fdaf7f5cSVlastimil Babka */ 1395fdaf7f5cSVlastimil Babka if (cc->order > 0 && last_migrated_pfn) { 1396fdaf7f5cSVlastimil Babka int cpu; 1397fdaf7f5cSVlastimil Babka unsigned long current_block_start = 1398fdaf7f5cSVlastimil Babka cc->migrate_pfn & ~((1UL << cc->order) - 1); 1399fdaf7f5cSVlastimil Babka 1400fdaf7f5cSVlastimil Babka if (last_migrated_pfn < current_block_start) { 1401fdaf7f5cSVlastimil Babka cpu = get_cpu(); 1402fdaf7f5cSVlastimil Babka lru_add_drain_cpu(cpu); 1403fdaf7f5cSVlastimil Babka drain_local_pages(zone); 1404fdaf7f5cSVlastimil Babka put_cpu(); 1405fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 1406fdaf7f5cSVlastimil Babka last_migrated_pfn = 0; 1407fdaf7f5cSVlastimil Babka } 1408fdaf7f5cSVlastimil Babka } 1409fdaf7f5cSVlastimil Babka 1410748446bbSMel Gorman } 1411748446bbSMel Gorman 1412f9e35b3bSMel Gorman out: 14136bace090SVlastimil Babka /* 14146bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 14156bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 14166bace090SVlastimil Babka */ 14176bace090SVlastimil Babka if (cc->nr_freepages > 0) { 14186bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 14196bace090SVlastimil Babka 14206bace090SVlastimil Babka cc->nr_freepages = 0; 14216bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 14226bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 14236bace090SVlastimil Babka free_pfn &= ~(pageblock_nr_pages-1); 14246bace090SVlastimil Babka /* 14256bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 14266bace090SVlastimil Babka * already reset to zone end in compact_finished() 14276bace090SVlastimil Babka */ 14286bace090SVlastimil Babka if (free_pfn > zone->compact_cached_free_pfn) 14296bace090SVlastimil Babka zone->compact_cached_free_pfn = free_pfn; 14306bace090SVlastimil Babka } 1431748446bbSMel Gorman 143216c4a097SJoonsoo Kim trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 143316c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync, ret); 14340eb927c0SMel Gorman 1435748446bbSMel Gorman return ret; 1436748446bbSMel Gorman } 143776ab0f53SMel Gorman 1438e0b9daebSDavid Rientjes static unsigned long compact_zone_order(struct zone *zone, int order, 1439ebff3980SVlastimil Babka gfp_t gfp_mask, enum migrate_mode mode, int *contended, 1440ebff3980SVlastimil Babka int alloc_flags, int classzone_idx) 144156de7263SMel Gorman { 1442e64c5237SShaohua Li unsigned long ret; 144356de7263SMel Gorman struct compact_control cc = { 144456de7263SMel Gorman .nr_freepages = 0, 144556de7263SMel Gorman .nr_migratepages = 0, 144656de7263SMel Gorman .order = order, 14476d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 144856de7263SMel Gorman .zone = zone, 1449e0b9daebSDavid Rientjes .mode = mode, 1450ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 1451ebff3980SVlastimil Babka .classzone_idx = classzone_idx, 145256de7263SMel Gorman }; 145356de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 145456de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 145556de7263SMel Gorman 1456e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1457e64c5237SShaohua Li 1458e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1459e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1460e64c5237SShaohua Li 1461e64c5237SShaohua Li *contended = cc.contended; 1462e64c5237SShaohua Li return ret; 146356de7263SMel Gorman } 146456de7263SMel Gorman 14655e771905SMel Gorman int sysctl_extfrag_threshold = 500; 14665e771905SMel Gorman 146756de7263SMel Gorman /** 146856de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 146956de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 14701a6d53a1SVlastimil Babka * @order: The order of the current allocation 14711a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 14721a6d53a1SVlastimil Babka * @ac: The context of current allocation 1473e0b9daebSDavid Rientjes * @mode: The migration mode for async, sync light, or sync migration 14741f9efdefSVlastimil Babka * @contended: Return value that determines if compaction was aborted due to 14751f9efdefSVlastimil Babka * need_resched() or lock contention 147656de7263SMel Gorman * 147756de7263SMel Gorman * This is the main entry point for direct page compaction. 147856de7263SMel Gorman */ 14791a6d53a1SVlastimil Babka unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 14801a6d53a1SVlastimil Babka int alloc_flags, const struct alloc_context *ac, 14811a6d53a1SVlastimil Babka enum migrate_mode mode, int *contended) 148256de7263SMel Gorman { 148356de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 148456de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 148556de7263SMel Gorman struct zoneref *z; 148656de7263SMel Gorman struct zone *zone; 148753853e2dSVlastimil Babka int rc = COMPACT_DEFERRED; 14881f9efdefSVlastimil Babka int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ 14891f9efdefSVlastimil Babka 14901f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_NONE; 149156de7263SMel Gorman 14924ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1493c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 149453853e2dSVlastimil Babka return COMPACT_SKIPPED; 149556de7263SMel Gorman 1496837d026dSJoonsoo Kim trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); 1497837d026dSJoonsoo Kim 149856de7263SMel Gorman /* Compact each zone in the list */ 14991a6d53a1SVlastimil Babka for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 15001a6d53a1SVlastimil Babka ac->nodemask) { 150156de7263SMel Gorman int status; 15021f9efdefSVlastimil Babka int zone_contended; 150356de7263SMel Gorman 150453853e2dSVlastimil Babka if (compaction_deferred(zone, order)) 150553853e2dSVlastimil Babka continue; 150653853e2dSVlastimil Babka 1507e0b9daebSDavid Rientjes status = compact_zone_order(zone, order, gfp_mask, mode, 15081a6d53a1SVlastimil Babka &zone_contended, alloc_flags, 15091a6d53a1SVlastimil Babka ac->classzone_idx); 151056de7263SMel Gorman rc = max(status, rc); 15111f9efdefSVlastimil Babka /* 15121f9efdefSVlastimil Babka * It takes at least one zone that wasn't lock contended 15131f9efdefSVlastimil Babka * to clear all_zones_contended. 15141f9efdefSVlastimil Babka */ 15151f9efdefSVlastimil Babka all_zones_contended &= zone_contended; 151656de7263SMel Gorman 15173e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1518ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 15191a6d53a1SVlastimil Babka ac->classzone_idx, alloc_flags)) { 152053853e2dSVlastimil Babka /* 152153853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 152253853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 152353853e2dSVlastimil Babka * will repeat this with true if allocation indeed 152453853e2dSVlastimil Babka * succeeds in this zone. 152553853e2dSVlastimil Babka */ 152653853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 15271f9efdefSVlastimil Babka /* 15281f9efdefSVlastimil Babka * It is possible that async compaction aborted due to 15291f9efdefSVlastimil Babka * need_resched() and the watermarks were ok thanks to 15301f9efdefSVlastimil Babka * somebody else freeing memory. The allocation can 15311f9efdefSVlastimil Babka * however still fail so we better signal the 15321f9efdefSVlastimil Babka * need_resched() contention anyway (this will not 15331f9efdefSVlastimil Babka * prevent the allocation attempt). 15341f9efdefSVlastimil Babka */ 15351f9efdefSVlastimil Babka if (zone_contended == COMPACT_CONTENDED_SCHED) 15361f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 15371f9efdefSVlastimil Babka 15381f9efdefSVlastimil Babka goto break_loop; 15391f9efdefSVlastimil Babka } 15401f9efdefSVlastimil Babka 1541f8669795SVlastimil Babka if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) { 154253853e2dSVlastimil Babka /* 154353853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 154453853e2dSVlastimil Babka * so we defer compaction there. If it ends up 154553853e2dSVlastimil Babka * succeeding after all, it will be reset. 154653853e2dSVlastimil Babka */ 154753853e2dSVlastimil Babka defer_compaction(zone, order); 154853853e2dSVlastimil Babka } 15491f9efdefSVlastimil Babka 15501f9efdefSVlastimil Babka /* 15511f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 15521f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 15531f9efdefSVlastimil Babka * case do not try further zones and signal need_resched() 15541f9efdefSVlastimil Babka * contention. 15551f9efdefSVlastimil Babka */ 15561f9efdefSVlastimil Babka if ((zone_contended == COMPACT_CONTENDED_SCHED) 15571f9efdefSVlastimil Babka || fatal_signal_pending(current)) { 15581f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 15591f9efdefSVlastimil Babka goto break_loop; 156056de7263SMel Gorman } 156156de7263SMel Gorman 15621f9efdefSVlastimil Babka continue; 15631f9efdefSVlastimil Babka break_loop: 15641f9efdefSVlastimil Babka /* 15651f9efdefSVlastimil Babka * We might not have tried all the zones, so be conservative 15661f9efdefSVlastimil Babka * and assume they are not all lock contended. 15671f9efdefSVlastimil Babka */ 15681f9efdefSVlastimil Babka all_zones_contended = 0; 15691f9efdefSVlastimil Babka break; 15701f9efdefSVlastimil Babka } 15711f9efdefSVlastimil Babka 15721f9efdefSVlastimil Babka /* 15731f9efdefSVlastimil Babka * If at least one zone wasn't deferred or skipped, we report if all 15741f9efdefSVlastimil Babka * zones that were tried were lock contended. 15751f9efdefSVlastimil Babka */ 15761f9efdefSVlastimil Babka if (rc > COMPACT_SKIPPED && all_zones_contended) 15771f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_LOCK; 15781f9efdefSVlastimil Babka 157956de7263SMel Gorman return rc; 158056de7263SMel Gorman } 158156de7263SMel Gorman 158256de7263SMel Gorman 158376ab0f53SMel Gorman /* Compact all zones within a node */ 15847103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 158576ab0f53SMel Gorman { 158676ab0f53SMel Gorman int zoneid; 158776ab0f53SMel Gorman struct zone *zone; 158876ab0f53SMel Gorman 158976ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 159076ab0f53SMel Gorman 159176ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 159276ab0f53SMel Gorman if (!populated_zone(zone)) 159376ab0f53SMel Gorman continue; 159476ab0f53SMel Gorman 15957be62de9SRik van Riel cc->nr_freepages = 0; 15967be62de9SRik van Riel cc->nr_migratepages = 0; 15977be62de9SRik van Riel cc->zone = zone; 15987be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 15997be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 160076ab0f53SMel Gorman 1601aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 16027be62de9SRik van Riel compact_zone(zone, cc); 160376ab0f53SMel Gorman 1604aff62249SRik van Riel if (cc->order > 0) { 1605de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1606de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1607de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1608aff62249SRik van Riel } 1609aff62249SRik van Riel 16107be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 16117be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 161276ab0f53SMel Gorman } 161376ab0f53SMel Gorman } 161476ab0f53SMel Gorman 16157103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 16167be62de9SRik van Riel { 16177be62de9SRik van Riel struct compact_control cc = { 16187be62de9SRik van Riel .order = order, 1619e0b9daebSDavid Rientjes .mode = MIGRATE_ASYNC, 16207be62de9SRik van Riel }; 16217be62de9SRik van Riel 16223a7200afSMel Gorman if (!order) 16233a7200afSMel Gorman return; 16243a7200afSMel Gorman 16257103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 16267be62de9SRik van Riel } 16277be62de9SRik van Riel 16287103f16dSAndrew Morton static void compact_node(int nid) 16297be62de9SRik van Riel { 16307be62de9SRik van Riel struct compact_control cc = { 16317be62de9SRik van Riel .order = -1, 1632e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 163391ca9186SDavid Rientjes .ignore_skip_hint = true, 16347be62de9SRik van Riel }; 16357be62de9SRik van Riel 16367103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 16377be62de9SRik van Riel } 16387be62de9SRik van Riel 163976ab0f53SMel Gorman /* Compact all nodes in the system */ 16407964c06dSJason Liu static void compact_nodes(void) 164176ab0f53SMel Gorman { 164276ab0f53SMel Gorman int nid; 164376ab0f53SMel Gorman 16448575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 16458575ec29SHugh Dickins lru_add_drain_all(); 16468575ec29SHugh Dickins 164776ab0f53SMel Gorman for_each_online_node(nid) 164876ab0f53SMel Gorman compact_node(nid); 164976ab0f53SMel Gorman } 165076ab0f53SMel Gorman 165176ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 165276ab0f53SMel Gorman int sysctl_compact_memory; 165376ab0f53SMel Gorman 165476ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 165576ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 165676ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 165776ab0f53SMel Gorman { 165876ab0f53SMel Gorman if (write) 16597964c06dSJason Liu compact_nodes(); 166076ab0f53SMel Gorman 166176ab0f53SMel Gorman return 0; 166276ab0f53SMel Gorman } 1663ed4a6d7fSMel Gorman 16645e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 16655e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 16665e771905SMel Gorman { 16675e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 16685e771905SMel Gorman 16695e771905SMel Gorman return 0; 16705e771905SMel Gorman } 16715e771905SMel Gorman 1672ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 167374e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 167410fbcf4cSKay Sievers struct device_attribute *attr, 1675ed4a6d7fSMel Gorman const char *buf, size_t count) 1676ed4a6d7fSMel Gorman { 16778575ec29SHugh Dickins int nid = dev->id; 16788575ec29SHugh Dickins 16798575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 16808575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 16818575ec29SHugh Dickins lru_add_drain_all(); 16828575ec29SHugh Dickins 16838575ec29SHugh Dickins compact_node(nid); 16848575ec29SHugh Dickins } 1685ed4a6d7fSMel Gorman 1686ed4a6d7fSMel Gorman return count; 1687ed4a6d7fSMel Gorman } 168810fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1689ed4a6d7fSMel Gorman 1690ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1691ed4a6d7fSMel Gorman { 169210fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1693ed4a6d7fSMel Gorman } 1694ed4a6d7fSMel Gorman 1695ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1696ed4a6d7fSMel Gorman { 169710fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1698ed4a6d7fSMel Gorman } 1699ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1700ff9543fdSMichal Nazarewicz 1701ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1702