1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 18194159fbSMinchan Kim #include <linux/page-isolation.h> 19748446bbSMel Gorman #include "internal.h" 20748446bbSMel Gorman 21010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 22010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 23010fc29aSMinchan Kim { 24010fc29aSMinchan Kim count_vm_event(item); 25010fc29aSMinchan Kim } 26010fc29aSMinchan Kim 27010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 28010fc29aSMinchan Kim { 29010fc29aSMinchan Kim count_vm_events(item, delta); 30010fc29aSMinchan Kim } 31010fc29aSMinchan Kim #else 32010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 33010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 34010fc29aSMinchan Kim #endif 35010fc29aSMinchan Kim 36ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 37ff9543fdSMichal Nazarewicz 38b7aba698SMel Gorman #define CREATE_TRACE_POINTS 39b7aba698SMel Gorman #include <trace/events/compaction.h> 40b7aba698SMel Gorman 41748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 42748446bbSMel Gorman { 43748446bbSMel Gorman struct page *page, *next; 44748446bbSMel Gorman unsigned long count = 0; 45748446bbSMel Gorman 46748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 47748446bbSMel Gorman list_del(&page->lru); 48748446bbSMel Gorman __free_page(page); 49748446bbSMel Gorman count++; 50748446bbSMel Gorman } 51748446bbSMel Gorman 52748446bbSMel Gorman return count; 53748446bbSMel Gorman } 54748446bbSMel Gorman 55ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 56ff9543fdSMichal Nazarewicz { 57ff9543fdSMichal Nazarewicz struct page *page; 58ff9543fdSMichal Nazarewicz 59ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 60ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 61ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 62ff9543fdSMichal Nazarewicz } 63ff9543fdSMichal Nazarewicz } 64ff9543fdSMichal Nazarewicz 6547118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 6647118af0SMichal Nazarewicz { 6747118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 6847118af0SMichal Nazarewicz } 6947118af0SMichal Nazarewicz 707d49d886SVlastimil Babka /* 717d49d886SVlastimil Babka * Check that the whole (or subset of) a pageblock given by the interval of 727d49d886SVlastimil Babka * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 737d49d886SVlastimil Babka * with the migration of free compaction scanner. The scanners then need to 747d49d886SVlastimil Babka * use only pfn_valid_within() check for arches that allow holes within 757d49d886SVlastimil Babka * pageblocks. 767d49d886SVlastimil Babka * 777d49d886SVlastimil Babka * Return struct page pointer of start_pfn, or NULL if checks were not passed. 787d49d886SVlastimil Babka * 797d49d886SVlastimil Babka * It's possible on some configurations to have a setup like node0 node1 node0 807d49d886SVlastimil Babka * i.e. it's possible that all pages within a zones range of pages do not 817d49d886SVlastimil Babka * belong to a single zone. We assume that a border between node0 and node1 827d49d886SVlastimil Babka * can occur within a single pageblock, but not a node0 node1 node0 837d49d886SVlastimil Babka * interleaving within a single pageblock. It is therefore sufficient to check 847d49d886SVlastimil Babka * the first and last page of a pageblock and avoid checking each individual 857d49d886SVlastimil Babka * page in a pageblock. 867d49d886SVlastimil Babka */ 877d49d886SVlastimil Babka static struct page *pageblock_pfn_to_page(unsigned long start_pfn, 887d49d886SVlastimil Babka unsigned long end_pfn, struct zone *zone) 897d49d886SVlastimil Babka { 907d49d886SVlastimil Babka struct page *start_page; 917d49d886SVlastimil Babka struct page *end_page; 927d49d886SVlastimil Babka 937d49d886SVlastimil Babka /* end_pfn is one past the range we are checking */ 947d49d886SVlastimil Babka end_pfn--; 957d49d886SVlastimil Babka 967d49d886SVlastimil Babka if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 977d49d886SVlastimil Babka return NULL; 987d49d886SVlastimil Babka 997d49d886SVlastimil Babka start_page = pfn_to_page(start_pfn); 1007d49d886SVlastimil Babka 1017d49d886SVlastimil Babka if (page_zone(start_page) != zone) 1027d49d886SVlastimil Babka return NULL; 1037d49d886SVlastimil Babka 1047d49d886SVlastimil Babka end_page = pfn_to_page(end_pfn); 1057d49d886SVlastimil Babka 1067d49d886SVlastimil Babka /* This gives a shorter code than deriving page_zone(end_page) */ 1077d49d886SVlastimil Babka if (page_zone_id(start_page) != page_zone_id(end_page)) 1087d49d886SVlastimil Babka return NULL; 1097d49d886SVlastimil Babka 1107d49d886SVlastimil Babka return start_page; 1117d49d886SVlastimil Babka } 1127d49d886SVlastimil Babka 113bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 114bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 115bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 116bb13ffebSMel Gorman struct page *page) 117bb13ffebSMel Gorman { 118bb13ffebSMel Gorman if (cc->ignore_skip_hint) 119bb13ffebSMel Gorman return true; 120bb13ffebSMel Gorman 121bb13ffebSMel Gorman return !get_pageblock_skip(page); 122bb13ffebSMel Gorman } 123bb13ffebSMel Gorman 124bb13ffebSMel Gorman /* 125bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 126bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 127bb13ffebSMel Gorman * meet. 128bb13ffebSMel Gorman */ 12962997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 130bb13ffebSMel Gorman { 131bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 132108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 133bb13ffebSMel Gorman unsigned long pfn; 134bb13ffebSMel Gorman 13535979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = start_pfn; 13635979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = start_pfn; 137c89511abSMel Gorman zone->compact_cached_free_pfn = end_pfn; 13862997027SMel Gorman zone->compact_blockskip_flush = false; 139bb13ffebSMel Gorman 140bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 141bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 142bb13ffebSMel Gorman struct page *page; 143bb13ffebSMel Gorman 144bb13ffebSMel Gorman cond_resched(); 145bb13ffebSMel Gorman 146bb13ffebSMel Gorman if (!pfn_valid(pfn)) 147bb13ffebSMel Gorman continue; 148bb13ffebSMel Gorman 149bb13ffebSMel Gorman page = pfn_to_page(pfn); 150bb13ffebSMel Gorman if (zone != page_zone(page)) 151bb13ffebSMel Gorman continue; 152bb13ffebSMel Gorman 153bb13ffebSMel Gorman clear_pageblock_skip(page); 154bb13ffebSMel Gorman } 155bb13ffebSMel Gorman } 156bb13ffebSMel Gorman 15762997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 15862997027SMel Gorman { 15962997027SMel Gorman int zoneid; 16062997027SMel Gorman 16162997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 16262997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 16362997027SMel Gorman if (!populated_zone(zone)) 16462997027SMel Gorman continue; 16562997027SMel Gorman 16662997027SMel Gorman /* Only flush if a full compaction finished recently */ 16762997027SMel Gorman if (zone->compact_blockskip_flush) 16862997027SMel Gorman __reset_isolation_suitable(zone); 16962997027SMel Gorman } 17062997027SMel Gorman } 17162997027SMel Gorman 172bb13ffebSMel Gorman /* 173bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 17462997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 175bb13ffebSMel Gorman */ 176c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 177c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 178edc2ca61SVlastimil Babka bool migrate_scanner) 179bb13ffebSMel Gorman { 180c89511abSMel Gorman struct zone *zone = cc->zone; 18135979ef3SDavid Rientjes unsigned long pfn; 1826815bf3fSJoonsoo Kim 1836815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 1846815bf3fSJoonsoo Kim return; 1856815bf3fSJoonsoo Kim 186bb13ffebSMel Gorman if (!page) 187bb13ffebSMel Gorman return; 188bb13ffebSMel Gorman 18935979ef3SDavid Rientjes if (nr_isolated) 19035979ef3SDavid Rientjes return; 19135979ef3SDavid Rientjes 192bb13ffebSMel Gorman set_pageblock_skip(page); 193c89511abSMel Gorman 19435979ef3SDavid Rientjes pfn = page_to_pfn(page); 19535979ef3SDavid Rientjes 19635979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 197c89511abSMel Gorman if (migrate_scanner) { 19835979ef3SDavid Rientjes if (cc->finished_update_migrate) 19935979ef3SDavid Rientjes return; 20035979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 20135979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 202e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 203e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 20435979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 205c89511abSMel Gorman } else { 20635979ef3SDavid Rientjes if (cc->finished_update_free) 20735979ef3SDavid Rientjes return; 20835979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 209c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 210c89511abSMel Gorman } 211c89511abSMel Gorman } 212bb13ffebSMel Gorman #else 213bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 214bb13ffebSMel Gorman struct page *page) 215bb13ffebSMel Gorman { 216bb13ffebSMel Gorman return true; 217bb13ffebSMel Gorman } 218bb13ffebSMel Gorman 219c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 220c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 221edc2ca61SVlastimil Babka bool migrate_scanner) 222bb13ffebSMel Gorman { 223bb13ffebSMel Gorman } 224bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 225bb13ffebSMel Gorman 2261f9efdefSVlastimil Babka /* 2278b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 2288b44d279SVlastimil Babka * very heavily contended. For async compaction, back out if the lock cannot 2298b44d279SVlastimil Babka * be taken immediately. For sync compaction, spin on the lock if needed. 2308b44d279SVlastimil Babka * 2318b44d279SVlastimil Babka * Returns true if the lock is held 2328b44d279SVlastimil Babka * Returns false if the lock is not held and compaction should abort 2331f9efdefSVlastimil Babka */ 2348b44d279SVlastimil Babka static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, 2358b44d279SVlastimil Babka struct compact_control *cc) 2368b44d279SVlastimil Babka { 2378b44d279SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 2388b44d279SVlastimil Babka if (!spin_trylock_irqsave(lock, *flags)) { 2398b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_LOCK; 2408b44d279SVlastimil Babka return false; 2418b44d279SVlastimil Babka } 2428b44d279SVlastimil Babka } else { 2438b44d279SVlastimil Babka spin_lock_irqsave(lock, *flags); 2448b44d279SVlastimil Babka } 2451f9efdefSVlastimil Babka 2468b44d279SVlastimil Babka return true; 2472a1402aaSMel Gorman } 2482a1402aaSMel Gorman 24985aa125fSMichal Nazarewicz /* 250c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 2518b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 2528b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 2538b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 2548b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 2558b44d279SVlastimil Babka * aborts. Sync compaction schedules. 2568b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 2578b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 258c67fe375SMel Gorman * 2598b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 2608b44d279SVlastimil Babka * async compaction due to need_resched() 2618b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 2628b44d279SVlastimil Babka * scheduled) 263c67fe375SMel Gorman */ 2648b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 2658b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 266c67fe375SMel Gorman { 2678b44d279SVlastimil Babka if (*locked) { 2688b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 2698b44d279SVlastimil Babka *locked = false; 270c67fe375SMel Gorman } 271c67fe375SMel Gorman 2728b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 2738b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 2748b44d279SVlastimil Babka return true; 2758b44d279SVlastimil Babka } 2768b44d279SVlastimil Babka 2778b44d279SVlastimil Babka if (need_resched()) { 278e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 2798b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 2808b44d279SVlastimil Babka return true; 281c67fe375SMel Gorman } 282c67fe375SMel Gorman cond_resched(); 283c67fe375SMel Gorman } 284c67fe375SMel Gorman 2858b44d279SVlastimil Babka return false; 286c67fe375SMel Gorman } 287c67fe375SMel Gorman 288be976572SVlastimil Babka /* 289be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 290be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 2918b44d279SVlastimil Babka * compaction. This is similar to what compact_unlock_should_abort() does, but 292be976572SVlastimil Babka * is used where no lock is concerned. 293be976572SVlastimil Babka * 294be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 295be976572SVlastimil Babka * Returns true when async compaction should abort. 296be976572SVlastimil Babka */ 297be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 298be976572SVlastimil Babka { 299be976572SVlastimil Babka /* async compaction aborts if contended */ 300be976572SVlastimil Babka if (need_resched()) { 301be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3021f9efdefSVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 303be976572SVlastimil Babka return true; 304be976572SVlastimil Babka } 305be976572SVlastimil Babka 306be976572SVlastimil Babka cond_resched(); 307be976572SVlastimil Babka } 308be976572SVlastimil Babka 309be976572SVlastimil Babka return false; 310be976572SVlastimil Babka } 311be976572SVlastimil Babka 312f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */ 313f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page) 314f40d1e42SMel Gorman { 3157d348b9eSJoonsoo Kim /* If the page is a large free page, then disallow migration */ 316f40d1e42SMel Gorman if (PageBuddy(page) && page_order(page) >= pageblock_order) 3177d348b9eSJoonsoo Kim return false; 318f40d1e42SMel Gorman 319f40d1e42SMel Gorman /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 3207d348b9eSJoonsoo Kim if (migrate_async_suitable(get_pageblock_migratetype(page))) 321f40d1e42SMel Gorman return true; 322f40d1e42SMel Gorman 323f40d1e42SMel Gorman /* Otherwise skip the block */ 324f40d1e42SMel Gorman return false; 325f40d1e42SMel Gorman } 326f40d1e42SMel Gorman 327c67fe375SMel Gorman /* 3289e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 3299e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 3309e4be470SJerome Marchand * (even though it may still end up isolating some pages). 33185aa125fSMichal Nazarewicz */ 332f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 333*e14c720eSVlastimil Babka unsigned long *start_pfn, 33485aa125fSMichal Nazarewicz unsigned long end_pfn, 33585aa125fSMichal Nazarewicz struct list_head *freelist, 33685aa125fSMichal Nazarewicz bool strict) 337748446bbSMel Gorman { 338b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 339bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 340f40d1e42SMel Gorman unsigned long flags; 341f40d1e42SMel Gorman bool locked = false; 342*e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 343748446bbSMel Gorman 344748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 345748446bbSMel Gorman 346f40d1e42SMel Gorman /* Isolate free pages. */ 347748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 348748446bbSMel Gorman int isolated, i; 349748446bbSMel Gorman struct page *page = cursor; 350748446bbSMel Gorman 3518b44d279SVlastimil Babka /* 3528b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 3538b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 3548b44d279SVlastimil Babka * pending or async compaction detects need_resched() 3558b44d279SVlastimil Babka */ 3568b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 3578b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 3588b44d279SVlastimil Babka &locked, cc)) 3598b44d279SVlastimil Babka break; 3608b44d279SVlastimil Babka 361b7aba698SMel Gorman nr_scanned++; 362f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 3632af120bcSLaura Abbott goto isolate_fail; 3642af120bcSLaura Abbott 365bb13ffebSMel Gorman if (!valid_page) 366bb13ffebSMel Gorman valid_page = page; 367f40d1e42SMel Gorman if (!PageBuddy(page)) 3682af120bcSLaura Abbott goto isolate_fail; 369f40d1e42SMel Gorman 370f40d1e42SMel Gorman /* 37169b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 37269b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 37369b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 37469b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 37569b7189fSVlastimil Babka * recheck as well. 37669b7189fSVlastimil Babka */ 37769b7189fSVlastimil Babka if (!locked) { 37869b7189fSVlastimil Babka /* 379f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 380f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 381f40d1e42SMel Gorman * heavily contended if there are parallel allocations 382f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 383f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 384f40d1e42SMel Gorman * possible. 385f40d1e42SMel Gorman */ 3868b44d279SVlastimil Babka locked = compact_trylock_irqsave(&cc->zone->lock, 3878b44d279SVlastimil Babka &flags, cc); 388f40d1e42SMel Gorman if (!locked) 389f40d1e42SMel Gorman break; 390f40d1e42SMel Gorman 391f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 392f40d1e42SMel Gorman if (!PageBuddy(page)) 3932af120bcSLaura Abbott goto isolate_fail; 39469b7189fSVlastimil Babka } 395748446bbSMel Gorman 396748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 397748446bbSMel Gorman isolated = split_free_page(page); 398748446bbSMel Gorman total_isolated += isolated; 399748446bbSMel Gorman for (i = 0; i < isolated; i++) { 400748446bbSMel Gorman list_add(&page->lru, freelist); 401748446bbSMel Gorman page++; 402748446bbSMel Gorman } 403748446bbSMel Gorman 404748446bbSMel Gorman /* If a page was split, advance to the end of it */ 405748446bbSMel Gorman if (isolated) { 406748446bbSMel Gorman blockpfn += isolated - 1; 407748446bbSMel Gorman cursor += isolated - 1; 4082af120bcSLaura Abbott continue; 409748446bbSMel Gorman } 4102af120bcSLaura Abbott 4112af120bcSLaura Abbott isolate_fail: 4122af120bcSLaura Abbott if (strict) 4132af120bcSLaura Abbott break; 4142af120bcSLaura Abbott else 4152af120bcSLaura Abbott continue; 4162af120bcSLaura Abbott 417748446bbSMel Gorman } 418748446bbSMel Gorman 419*e14c720eSVlastimil Babka /* Record how far we have got within the block */ 420*e14c720eSVlastimil Babka *start_pfn = blockpfn; 421*e14c720eSVlastimil Babka 422b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 423f40d1e42SMel Gorman 424f40d1e42SMel Gorman /* 425f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 426f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 427f40d1e42SMel Gorman * returned and CMA will fail. 428f40d1e42SMel Gorman */ 4292af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 430f40d1e42SMel Gorman total_isolated = 0; 431f40d1e42SMel Gorman 432f40d1e42SMel Gorman if (locked) 433f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 434f40d1e42SMel Gorman 435bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 436bb13ffebSMel Gorman if (blockpfn == end_pfn) 437edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 438bb13ffebSMel Gorman 439010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 440397487dbSMel Gorman if (total_isolated) 441010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 442748446bbSMel Gorman return total_isolated; 443748446bbSMel Gorman } 444748446bbSMel Gorman 44585aa125fSMichal Nazarewicz /** 44685aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 44785aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 44885aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 44985aa125fSMichal Nazarewicz * 45085aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 45185aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 45285aa125fSMichal Nazarewicz * undo its actions and return zero. 45385aa125fSMichal Nazarewicz * 45485aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 45585aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 45685aa125fSMichal Nazarewicz * a free page). 45785aa125fSMichal Nazarewicz */ 458ff9543fdSMichal Nazarewicz unsigned long 459bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 460bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 46185aa125fSMichal Nazarewicz { 462f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 46385aa125fSMichal Nazarewicz LIST_HEAD(freelist); 46485aa125fSMichal Nazarewicz 4657d49d886SVlastimil Babka pfn = start_pfn; 46685aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 4677d49d886SVlastimil Babka 4687d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 4697d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 470*e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 471*e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 4727d49d886SVlastimil Babka 47385aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 47485aa125fSMichal Nazarewicz 4757d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 4767d49d886SVlastimil Babka break; 4777d49d886SVlastimil Babka 478*e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 479*e14c720eSVlastimil Babka block_end_pfn, &freelist, true); 48085aa125fSMichal Nazarewicz 48185aa125fSMichal Nazarewicz /* 48285aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 48385aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 48485aa125fSMichal Nazarewicz * non-free pages). 48585aa125fSMichal Nazarewicz */ 48685aa125fSMichal Nazarewicz if (!isolated) 48785aa125fSMichal Nazarewicz break; 48885aa125fSMichal Nazarewicz 48985aa125fSMichal Nazarewicz /* 49085aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 49185aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 49285aa125fSMichal Nazarewicz * page may span two pageblocks). 49385aa125fSMichal Nazarewicz */ 49485aa125fSMichal Nazarewicz } 49585aa125fSMichal Nazarewicz 49685aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 49785aa125fSMichal Nazarewicz map_pages(&freelist); 49885aa125fSMichal Nazarewicz 49985aa125fSMichal Nazarewicz if (pfn < end_pfn) { 50085aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 50185aa125fSMichal Nazarewicz release_freepages(&freelist); 50285aa125fSMichal Nazarewicz return 0; 50385aa125fSMichal Nazarewicz } 50485aa125fSMichal Nazarewicz 50585aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 50685aa125fSMichal Nazarewicz return pfn; 50785aa125fSMichal Nazarewicz } 50885aa125fSMichal Nazarewicz 509748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 510edc2ca61SVlastimil Babka static void acct_isolated(struct zone *zone, struct compact_control *cc) 511748446bbSMel Gorman { 512748446bbSMel Gorman struct page *page; 513b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 514748446bbSMel Gorman 515edc2ca61SVlastimil Babka if (list_empty(&cc->migratepages)) 516edc2ca61SVlastimil Babka return; 517edc2ca61SVlastimil Babka 518b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 519b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 520748446bbSMel Gorman 521c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 522c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 523c67fe375SMel Gorman } 524748446bbSMel Gorman 525748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 526748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 527748446bbSMel Gorman { 528bc693045SMinchan Kim unsigned long active, inactive, isolated; 529748446bbSMel Gorman 530748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 531748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 532bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 533bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 534748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 535748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 536748446bbSMel Gorman 537bc693045SMinchan Kim return isolated > (inactive + active) / 2; 538748446bbSMel Gorman } 539748446bbSMel Gorman 5402fe86e00SMichal Nazarewicz /** 541edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 542edc2ca61SVlastimil Babka * a single pageblock 5432fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 544edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 545edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 546edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 5472fe86e00SMichal Nazarewicz * 5482fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 549edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 550edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 551edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 552edc2ca61SVlastimil Babka * than end_pfn). 5532fe86e00SMichal Nazarewicz * 554edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 555edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 556edc2ca61SVlastimil Babka * is neither read nor updated. 557748446bbSMel Gorman */ 558edc2ca61SVlastimil Babka static unsigned long 559edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 560edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 561748446bbSMel Gorman { 562edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 563b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 564748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 565fa9add64SHugh Dickins struct lruvec *lruvec; 566c67fe375SMel Gorman unsigned long flags; 5672a1402aaSMel Gorman bool locked = false; 568bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 569748446bbSMel Gorman 570748446bbSMel Gorman /* 571748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 572748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 573748446bbSMel Gorman * delay for some time until fewer pages are isolated 574748446bbSMel Gorman */ 575748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 576f9e35b3bSMel Gorman /* async migration should just abort */ 577e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 5782fe86e00SMichal Nazarewicz return 0; 579f9e35b3bSMel Gorman 580748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 581748446bbSMel Gorman 582748446bbSMel Gorman if (fatal_signal_pending(current)) 5832fe86e00SMichal Nazarewicz return 0; 584748446bbSMel Gorman } 585748446bbSMel Gorman 586be976572SVlastimil Babka if (compact_should_abort(cc)) 587aeef4b83SDavid Rientjes return 0; 588aeef4b83SDavid Rientjes 589748446bbSMel Gorman /* Time to isolate some pages for migration */ 590748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 5918b44d279SVlastimil Babka /* 5928b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 5938b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort async compaction 5948b44d279SVlastimil Babka * if contended. 5958b44d279SVlastimil Babka */ 5968b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 5978b44d279SVlastimil Babka && compact_unlock_should_abort(&zone->lru_lock, flags, 5988b44d279SVlastimil Babka &locked, cc)) 5998b44d279SVlastimil Babka break; 600b2eef8c0SAndrea Arcangeli 601748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 602748446bbSMel Gorman continue; 603b7aba698SMel Gorman nr_scanned++; 604748446bbSMel Gorman 605748446bbSMel Gorman page = pfn_to_page(low_pfn); 606dc908600SMel Gorman 607bb13ffebSMel Gorman if (!valid_page) 608bb13ffebSMel Gorman valid_page = page; 609bb13ffebSMel Gorman 610c122b208SJoonsoo Kim /* 6116c14466cSMel Gorman * Skip if free. page_order cannot be used without zone->lock 6126c14466cSMel Gorman * as nothing prevents parallel allocations or buddy merging. 6136c14466cSMel Gorman */ 614748446bbSMel Gorman if (PageBuddy(page)) 615748446bbSMel Gorman continue; 616748446bbSMel Gorman 6179927af74SMel Gorman /* 618bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 619bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 620bf6bddf1SRafael Aquini * Skip any other type of page 621bf6bddf1SRafael Aquini */ 622bf6bddf1SRafael Aquini if (!PageLRU(page)) { 623bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 624bf6bddf1SRafael Aquini if (locked && balloon_page_isolate(page)) { 625bf6bddf1SRafael Aquini /* Successfully isolated */ 626b6c75016SJoonsoo Kim goto isolate_success; 627bf6bddf1SRafael Aquini } 628bf6bddf1SRafael Aquini } 629bc835011SAndrea Arcangeli continue; 630bf6bddf1SRafael Aquini } 631bc835011SAndrea Arcangeli 632bc835011SAndrea Arcangeli /* 6332a1402aaSMel Gorman * PageLRU is set. lru_lock normally excludes isolation 6342a1402aaSMel Gorman * splitting and collapsing (collapsing has already happened 6352a1402aaSMel Gorman * if PageLRU is set) but the lock is not necessarily taken 6362a1402aaSMel Gorman * here and it is wasteful to take it just to check transhuge. 6372a1402aaSMel Gorman * Check TransHuge without lock and skip the whole pageblock if 6382a1402aaSMel Gorman * it's either a transhuge or hugetlbfs page, as calling 6392a1402aaSMel Gorman * compound_order() without preventing THP from splitting the 6402a1402aaSMel Gorman * page underneath us may return surprising results. 641bc835011SAndrea Arcangeli */ 642bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 6432a1402aaSMel Gorman if (!locked) 644edc2ca61SVlastimil Babka low_pfn = ALIGN(low_pfn + 1, 645edc2ca61SVlastimil Babka pageblock_nr_pages) - 1; 646edc2ca61SVlastimil Babka else 6472a1402aaSMel Gorman low_pfn += (1 << compound_order(page)) - 1; 648edc2ca61SVlastimil Babka 6492a1402aaSMel Gorman continue; 6502a1402aaSMel Gorman } 6512a1402aaSMel Gorman 652119d6d59SDavid Rientjes /* 653119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 654119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 655119d6d59SDavid Rientjes * admittedly racy check. 656119d6d59SDavid Rientjes */ 657119d6d59SDavid Rientjes if (!page_mapping(page) && 658119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 659119d6d59SDavid Rientjes continue; 660119d6d59SDavid Rientjes 66169b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 66269b7189fSVlastimil Babka if (!locked) { 6638b44d279SVlastimil Babka locked = compact_trylock_irqsave(&zone->lru_lock, 6648b44d279SVlastimil Babka &flags, cc); 6658b44d279SVlastimil Babka if (!locked) 6662a1402aaSMel Gorman break; 6672a1402aaSMel Gorman 6682a1402aaSMel Gorman /* Recheck PageLRU and PageTransHuge under lock */ 6692a1402aaSMel Gorman if (!PageLRU(page)) 6702a1402aaSMel Gorman continue; 6712a1402aaSMel Gorman if (PageTransHuge(page)) { 672bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 673bc835011SAndrea Arcangeli continue; 674bc835011SAndrea Arcangeli } 67569b7189fSVlastimil Babka } 676bc835011SAndrea Arcangeli 677fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 678fa9add64SHugh Dickins 679748446bbSMel Gorman /* Try isolate the page */ 680edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 681748446bbSMel Gorman continue; 682748446bbSMel Gorman 683309381feSSasha Levin VM_BUG_ON_PAGE(PageTransCompound(page), page); 684bc835011SAndrea Arcangeli 685748446bbSMel Gorman /* Successfully isolated */ 686fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 687b6c75016SJoonsoo Kim 688b6c75016SJoonsoo Kim isolate_success: 689b6c75016SJoonsoo Kim cc->finished_update_migrate = true; 690748446bbSMel Gorman list_add(&page->lru, migratelist); 691748446bbSMel Gorman cc->nr_migratepages++; 692b7aba698SMel Gorman nr_isolated++; 693748446bbSMel Gorman 694748446bbSMel Gorman /* Avoid isolating too much */ 69531b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 69631b8384aSHillf Danton ++low_pfn; 697748446bbSMel Gorman break; 698748446bbSMel Gorman } 69931b8384aSHillf Danton } 700748446bbSMel Gorman 701c67fe375SMel Gorman if (locked) 702c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 703748446bbSMel Gorman 70450b5b094SVlastimil Babka /* 70550b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 70650b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 70750b5b094SVlastimil Babka */ 70835979ef3SDavid Rientjes if (low_pfn == end_pfn) 709edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 710bb13ffebSMel Gorman 711b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 712b7aba698SMel Gorman 713010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 714397487dbSMel Gorman if (nr_isolated) 715010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 716397487dbSMel Gorman 7172fe86e00SMichal Nazarewicz return low_pfn; 7182fe86e00SMichal Nazarewicz } 7192fe86e00SMichal Nazarewicz 720edc2ca61SVlastimil Babka /** 721edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 722edc2ca61SVlastimil Babka * @cc: Compaction control structure. 723edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 724edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 725edc2ca61SVlastimil Babka * 726edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 727edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 728edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 729edc2ca61SVlastimil Babka */ 730edc2ca61SVlastimil Babka unsigned long 731edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 732edc2ca61SVlastimil Babka unsigned long end_pfn) 733edc2ca61SVlastimil Babka { 734edc2ca61SVlastimil Babka unsigned long pfn, block_end_pfn; 735edc2ca61SVlastimil Babka 736edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 737edc2ca61SVlastimil Babka pfn = start_pfn; 738edc2ca61SVlastimil Babka block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 739edc2ca61SVlastimil Babka 740edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 741edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 742edc2ca61SVlastimil Babka 743edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 744edc2ca61SVlastimil Babka 7457d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 746edc2ca61SVlastimil Babka continue; 747edc2ca61SVlastimil Babka 748edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 749edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 750edc2ca61SVlastimil Babka 751edc2ca61SVlastimil Babka /* 752edc2ca61SVlastimil Babka * In case of fatal failure, release everything that might 753edc2ca61SVlastimil Babka * have been isolated in the previous iteration, and signal 754edc2ca61SVlastimil Babka * the failure back to caller. 755edc2ca61SVlastimil Babka */ 756edc2ca61SVlastimil Babka if (!pfn) { 757edc2ca61SVlastimil Babka putback_movable_pages(&cc->migratepages); 758edc2ca61SVlastimil Babka cc->nr_migratepages = 0; 759edc2ca61SVlastimil Babka break; 760edc2ca61SVlastimil Babka } 761edc2ca61SVlastimil Babka } 762edc2ca61SVlastimil Babka acct_isolated(cc->zone, cc); 763edc2ca61SVlastimil Babka 764edc2ca61SVlastimil Babka return pfn; 765edc2ca61SVlastimil Babka } 766edc2ca61SVlastimil Babka 767ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 768ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 769ff9543fdSMichal Nazarewicz /* 770ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 771ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 772ff9543fdSMichal Nazarewicz */ 773edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 774ff9543fdSMichal Nazarewicz { 775edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 776ff9543fdSMichal Nazarewicz struct page *page; 777c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 778*e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 779c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 780c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 781ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 782ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 7832fe86e00SMichal Nazarewicz 784ff9543fdSMichal Nazarewicz /* 785ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 78649e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 787*e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 788*e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 789c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 790c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 791c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 79249e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 79349e068f0SVlastimil Babka * is using. 794ff9543fdSMichal Nazarewicz */ 795*e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 796c96b9e50SVlastimil Babka block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 797c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 798c96b9e50SVlastimil Babka zone_end_pfn(zone)); 7997ed695e0SVlastimil Babka low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 8002fe86e00SMichal Nazarewicz 801ff9543fdSMichal Nazarewicz /* 802ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 803ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 804ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 805ff9543fdSMichal Nazarewicz */ 806c96b9e50SVlastimil Babka for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 807c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 808*e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 809*e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 810ff9543fdSMichal Nazarewicz unsigned long isolated; 811ff9543fdSMichal Nazarewicz 812f6ea3adbSDavid Rientjes /* 813f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 814f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 815be976572SVlastimil Babka * to schedule, or even abort async compaction. 816f6ea3adbSDavid Rientjes */ 817be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 818be976572SVlastimil Babka && compact_should_abort(cc)) 819be976572SVlastimil Babka break; 820f6ea3adbSDavid Rientjes 8217d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 8227d49d886SVlastimil Babka zone); 8237d49d886SVlastimil Babka if (!page) 824ff9543fdSMichal Nazarewicz continue; 825ff9543fdSMichal Nazarewicz 826ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 82768e3e926SLinus Torvalds if (!suitable_migration_target(page)) 828ff9543fdSMichal Nazarewicz continue; 82968e3e926SLinus Torvalds 830bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 831bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 832bb13ffebSMel Gorman continue; 833bb13ffebSMel Gorman 834*e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 835*e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 836c96b9e50SVlastimil Babka block_end_pfn, freelist, false); 837ff9543fdSMichal Nazarewicz nr_freepages += isolated; 838ff9543fdSMichal Nazarewicz 839ff9543fdSMichal Nazarewicz /* 840*e14c720eSVlastimil Babka * Remember where the free scanner should restart next time, 841*e14c720eSVlastimil Babka * which is where isolate_freepages_block() left off. 842*e14c720eSVlastimil Babka * But if it scanned the whole pageblock, isolate_start_pfn 843*e14c720eSVlastimil Babka * now points at block_end_pfn, which is the start of the next 844*e14c720eSVlastimil Babka * pageblock. 845*e14c720eSVlastimil Babka * In that case we will however want to restart at the start 846*e14c720eSVlastimil Babka * of the previous pageblock. 847*e14c720eSVlastimil Babka */ 848*e14c720eSVlastimil Babka cc->free_pfn = (isolate_start_pfn < block_end_pfn) ? 849*e14c720eSVlastimil Babka isolate_start_pfn : 850*e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 851*e14c720eSVlastimil Babka 852*e14c720eSVlastimil Babka /* 853e9ade569SVlastimil Babka * Set a flag that we successfully isolated in this pageblock. 854e9ade569SVlastimil Babka * In the next loop iteration, zone->compact_cached_free_pfn 855e9ade569SVlastimil Babka * will not be updated and thus it will effectively contain the 856e9ade569SVlastimil Babka * highest pageblock we isolated pages from. 857ff9543fdSMichal Nazarewicz */ 858e9ade569SVlastimil Babka if (isolated) 859c89511abSMel Gorman cc->finished_update_free = true; 860be976572SVlastimil Babka 861be976572SVlastimil Babka /* 862be976572SVlastimil Babka * isolate_freepages_block() might have aborted due to async 863be976572SVlastimil Babka * compaction being contended 864be976572SVlastimil Babka */ 865be976572SVlastimil Babka if (cc->contended) 866be976572SVlastimil Babka break; 867c89511abSMel Gorman } 868ff9543fdSMichal Nazarewicz 869ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 870ff9543fdSMichal Nazarewicz map_pages(freelist); 871ff9543fdSMichal Nazarewicz 8727ed695e0SVlastimil Babka /* 8737ed695e0SVlastimil Babka * If we crossed the migrate scanner, we want to keep it that way 8747ed695e0SVlastimil Babka * so that compact_finished() may detect this 8757ed695e0SVlastimil Babka */ 876c96b9e50SVlastimil Babka if (block_start_pfn < low_pfn) 877e9ade569SVlastimil Babka cc->free_pfn = cc->migrate_pfn; 878c96b9e50SVlastimil Babka 879ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 880748446bbSMel Gorman } 881748446bbSMel Gorman 882748446bbSMel Gorman /* 883748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 884748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 885748446bbSMel Gorman */ 886748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 887748446bbSMel Gorman unsigned long data, 888748446bbSMel Gorman int **result) 889748446bbSMel Gorman { 890748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 891748446bbSMel Gorman struct page *freepage; 892748446bbSMel Gorman 893be976572SVlastimil Babka /* 894be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 895be976572SVlastimil Babka * contention. 896be976572SVlastimil Babka */ 897748446bbSMel Gorman if (list_empty(&cc->freepages)) { 898be976572SVlastimil Babka if (!cc->contended) 899edc2ca61SVlastimil Babka isolate_freepages(cc); 900748446bbSMel Gorman 901748446bbSMel Gorman if (list_empty(&cc->freepages)) 902748446bbSMel Gorman return NULL; 903748446bbSMel Gorman } 904748446bbSMel Gorman 905748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 906748446bbSMel Gorman list_del(&freepage->lru); 907748446bbSMel Gorman cc->nr_freepages--; 908748446bbSMel Gorman 909748446bbSMel Gorman return freepage; 910748446bbSMel Gorman } 911748446bbSMel Gorman 912748446bbSMel Gorman /* 913d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 914d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 915d53aea3dSDavid Rientjes * special handling needed for NUMA. 916d53aea3dSDavid Rientjes */ 917d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 918d53aea3dSDavid Rientjes { 919d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 920d53aea3dSDavid Rientjes 921d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 922d53aea3dSDavid Rientjes cc->nr_freepages++; 923d53aea3dSDavid Rientjes } 924d53aea3dSDavid Rientjes 925ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 926ff9543fdSMichal Nazarewicz typedef enum { 927ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 928ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 929ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 930ff9543fdSMichal Nazarewicz } isolate_migrate_t; 931ff9543fdSMichal Nazarewicz 932ff9543fdSMichal Nazarewicz /* 933edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 934edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 935edc2ca61SVlastimil Babka * compact_control. 936ff9543fdSMichal Nazarewicz */ 937ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 938ff9543fdSMichal Nazarewicz struct compact_control *cc) 939ff9543fdSMichal Nazarewicz { 940ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 941edc2ca61SVlastimil Babka struct page *page; 942edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 943edc2ca61SVlastimil Babka (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); 944ff9543fdSMichal Nazarewicz 945edc2ca61SVlastimil Babka /* 946edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 947edc2ca61SVlastimil Babka * initialized by compact_zone() 948edc2ca61SVlastimil Babka */ 949edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 950ff9543fdSMichal Nazarewicz 951ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 952a9aacbccSMel Gorman end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 953ff9543fdSMichal Nazarewicz 954edc2ca61SVlastimil Babka /* 955edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 956edc2ca61SVlastimil Babka * Do not cross the free scanner. 957edc2ca61SVlastimil Babka */ 958edc2ca61SVlastimil Babka for (; end_pfn <= cc->free_pfn; 959edc2ca61SVlastimil Babka low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { 960edc2ca61SVlastimil Babka 961edc2ca61SVlastimil Babka /* 962edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 963edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 964edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 965edc2ca61SVlastimil Babka */ 966edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 967edc2ca61SVlastimil Babka && compact_should_abort(cc)) 968edc2ca61SVlastimil Babka break; 969edc2ca61SVlastimil Babka 9707d49d886SVlastimil Babka page = pageblock_pfn_to_page(low_pfn, end_pfn, zone); 9717d49d886SVlastimil Babka if (!page) 972edc2ca61SVlastimil Babka continue; 973edc2ca61SVlastimil Babka 974edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 975edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 976edc2ca61SVlastimil Babka continue; 977edc2ca61SVlastimil Babka 978edc2ca61SVlastimil Babka /* 979edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 980edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 981edc2ca61SVlastimil Babka * of work satisfies the allocation. 982edc2ca61SVlastimil Babka */ 983edc2ca61SVlastimil Babka if (cc->mode == MIGRATE_ASYNC && 984edc2ca61SVlastimil Babka !migrate_async_suitable(get_pageblock_migratetype(page))) 985edc2ca61SVlastimil Babka continue; 986ff9543fdSMichal Nazarewicz 987ff9543fdSMichal Nazarewicz /* Perform the isolation */ 988edc2ca61SVlastimil Babka low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, 989edc2ca61SVlastimil Babka isolate_mode); 990edc2ca61SVlastimil Babka 991e64c5237SShaohua Li if (!low_pfn || cc->contended) 992ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 993ff9543fdSMichal Nazarewicz 994edc2ca61SVlastimil Babka /* 995edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 996edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 997edc2ca61SVlastimil Babka * continue or not. 998edc2ca61SVlastimil Babka */ 999edc2ca61SVlastimil Babka break; 1000edc2ca61SVlastimil Babka } 1001edc2ca61SVlastimil Babka 1002edc2ca61SVlastimil Babka acct_isolated(zone, cc); 1003edc2ca61SVlastimil Babka /* Record where migration scanner will be restarted */ 1004ff9543fdSMichal Nazarewicz cc->migrate_pfn = low_pfn; 1005ff9543fdSMichal Nazarewicz 1006edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1007ff9543fdSMichal Nazarewicz } 1008ff9543fdSMichal Nazarewicz 1009748446bbSMel Gorman static int compact_finished(struct zone *zone, 1010748446bbSMel Gorman struct compact_control *cc) 1011748446bbSMel Gorman { 10128fb74b9fSMel Gorman unsigned int order; 10135a03b051SAndrea Arcangeli unsigned long watermark; 101456de7263SMel Gorman 1015be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 1016748446bbSMel Gorman return COMPACT_PARTIAL; 1017748446bbSMel Gorman 1018753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1019bb13ffebSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) { 102055b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 102135979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 102235979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 102355b7c4c9SVlastimil Babka zone->compact_cached_free_pfn = zone_end_pfn(zone); 102455b7c4c9SVlastimil Babka 102562997027SMel Gorman /* 102662997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 102762997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 102862997027SMel Gorman * flag itself as the decision to be clear should be directly 102962997027SMel Gorman * based on an allocation request. 103062997027SMel Gorman */ 103162997027SMel Gorman if (!current_is_kswapd()) 103262997027SMel Gorman zone->compact_blockskip_flush = true; 103362997027SMel Gorman 1034748446bbSMel Gorman return COMPACT_COMPLETE; 1035bb13ffebSMel Gorman } 1036748446bbSMel Gorman 103782478fb7SJohannes Weiner /* 103882478fb7SJohannes Weiner * order == -1 is expected when compacting via 103982478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 104082478fb7SJohannes Weiner */ 104156de7263SMel Gorman if (cc->order == -1) 104256de7263SMel Gorman return COMPACT_CONTINUE; 104356de7263SMel Gorman 10443957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 10453957c776SMichal Hocko watermark = low_wmark_pages(zone); 10463957c776SMichal Hocko watermark += (1 << cc->order); 10473957c776SMichal Hocko 10483957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 10493957c776SMichal Hocko return COMPACT_CONTINUE; 10503957c776SMichal Hocko 105156de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 105256de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 10538fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 10548fb74b9fSMel Gorman 105556de7263SMel Gorman /* Job done if page is free of the right migratetype */ 10561fb3f8caSMel Gorman if (!list_empty(&area->free_list[cc->migratetype])) 105756de7263SMel Gorman return COMPACT_PARTIAL; 105856de7263SMel Gorman 105956de7263SMel Gorman /* Job done if allocation would set block type */ 10601fb3f8caSMel Gorman if (cc->order >= pageblock_order && area->nr_free) 106156de7263SMel Gorman return COMPACT_PARTIAL; 106256de7263SMel Gorman } 106356de7263SMel Gorman 1064748446bbSMel Gorman return COMPACT_CONTINUE; 1065748446bbSMel Gorman } 1066748446bbSMel Gorman 10673e7d3449SMel Gorman /* 10683e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 10693e7d3449SMel Gorman * Returns 10703e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 10713e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 10723e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 10733e7d3449SMel Gorman */ 10743e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 10753e7d3449SMel Gorman { 10763e7d3449SMel Gorman int fragindex; 10773e7d3449SMel Gorman unsigned long watermark; 10783e7d3449SMel Gorman 10793e7d3449SMel Gorman /* 10803957c776SMichal Hocko * order == -1 is expected when compacting via 10813957c776SMichal Hocko * /proc/sys/vm/compact_memory 10823957c776SMichal Hocko */ 10833957c776SMichal Hocko if (order == -1) 10843957c776SMichal Hocko return COMPACT_CONTINUE; 10853957c776SMichal Hocko 10863957c776SMichal Hocko /* 10873e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 10883e7d3449SMel Gorman * This is because during migration, copies of pages need to be 10893e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 10903e7d3449SMel Gorman */ 10913e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 10923e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 10933e7d3449SMel Gorman return COMPACT_SKIPPED; 10943e7d3449SMel Gorman 10953e7d3449SMel Gorman /* 10963e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 10973e7d3449SMel Gorman * low memory or external fragmentation 10983e7d3449SMel Gorman * 1099a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 1100a582a738SShaohua Li * watermarks 11013e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 11023e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 11033e7d3449SMel Gorman * 11043e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 11053e7d3449SMel Gorman */ 11063e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 11073e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 11083e7d3449SMel Gorman return COMPACT_SKIPPED; 11093e7d3449SMel Gorman 1110a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 1111a582a738SShaohua Li 0, 0)) 11123e7d3449SMel Gorman return COMPACT_PARTIAL; 11133e7d3449SMel Gorman 11143e7d3449SMel Gorman return COMPACT_CONTINUE; 11153e7d3449SMel Gorman } 11163e7d3449SMel Gorman 1117748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 1118748446bbSMel Gorman { 1119748446bbSMel Gorman int ret; 1120c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1121108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 1122e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1123748446bbSMel Gorman 11243e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 11253e7d3449SMel Gorman switch (ret) { 11263e7d3449SMel Gorman case COMPACT_PARTIAL: 11273e7d3449SMel Gorman case COMPACT_SKIPPED: 11283e7d3449SMel Gorman /* Compaction is likely to fail */ 11293e7d3449SMel Gorman return ret; 11303e7d3449SMel Gorman case COMPACT_CONTINUE: 11313e7d3449SMel Gorman /* Fall through to compaction */ 11323e7d3449SMel Gorman ; 11333e7d3449SMel Gorman } 11343e7d3449SMel Gorman 1135c89511abSMel Gorman /* 1136d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1137d3132e4bSVlastimil Babka * is about to be retried after being deferred. kswapd does not do 1138d3132e4bSVlastimil Babka * this reset as it'll reset the cached information when going to sleep. 1139d3132e4bSVlastimil Babka */ 1140d3132e4bSVlastimil Babka if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 1141d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1142d3132e4bSVlastimil Babka 1143d3132e4bSVlastimil Babka /* 1144c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 1145c89511abSMel Gorman * information on where the scanners should start but check that it 1146c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 1147c89511abSMel Gorman */ 1148e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1149c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1150c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1151c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1152c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1153c89511abSMel Gorman } 1154c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1155c89511abSMel Gorman cc->migrate_pfn = start_pfn; 115635979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 115735979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1158c89511abSMel Gorman } 1159748446bbSMel Gorman 11600eb927c0SMel Gorman trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); 11610eb927c0SMel Gorman 1162748446bbSMel Gorman migrate_prep_local(); 1163748446bbSMel Gorman 1164748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 11659d502c1cSMinchan Kim int err; 1166748446bbSMel Gorman 1167f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1168f9e35b3bSMel Gorman case ISOLATE_ABORT: 1169f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 11705733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1171e64c5237SShaohua Li cc->nr_migratepages = 0; 1172f9e35b3bSMel Gorman goto out; 1173f9e35b3bSMel Gorman case ISOLATE_NONE: 1174748446bbSMel Gorman continue; 1175f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1176f9e35b3bSMel Gorman ; 1177f9e35b3bSMel Gorman } 1178748446bbSMel Gorman 1179d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1180e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 11817b2a2d4aSMel Gorman MR_COMPACTION); 1182748446bbSMel Gorman 1183f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1184f8c9301fSVlastimil Babka &cc->migratepages); 1185748446bbSMel Gorman 1186f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1187f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 11889d502c1cSMinchan Kim if (err) { 11895733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 11907ed695e0SVlastimil Babka /* 11917ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 11927ed695e0SVlastimil Babka * and we want compact_finished() to detect it 11937ed695e0SVlastimil Babka */ 11947ed695e0SVlastimil Babka if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { 11954bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 11964bf2bba3SDavid Rientjes goto out; 1197748446bbSMel Gorman } 11984bf2bba3SDavid Rientjes } 1199748446bbSMel Gorman } 1200748446bbSMel Gorman 1201f9e35b3bSMel Gorman out: 1202748446bbSMel Gorman /* Release free pages and check accounting */ 1203748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 1204748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 1205748446bbSMel Gorman 12060eb927c0SMel Gorman trace_mm_compaction_end(ret); 12070eb927c0SMel Gorman 1208748446bbSMel Gorman return ret; 1209748446bbSMel Gorman } 121076ab0f53SMel Gorman 1211e0b9daebSDavid Rientjes static unsigned long compact_zone_order(struct zone *zone, int order, 12121f9efdefSVlastimil Babka gfp_t gfp_mask, enum migrate_mode mode, int *contended) 121356de7263SMel Gorman { 1214e64c5237SShaohua Li unsigned long ret; 121556de7263SMel Gorman struct compact_control cc = { 121656de7263SMel Gorman .nr_freepages = 0, 121756de7263SMel Gorman .nr_migratepages = 0, 121856de7263SMel Gorman .order = order, 121956de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 122056de7263SMel Gorman .zone = zone, 1221e0b9daebSDavid Rientjes .mode = mode, 122256de7263SMel Gorman }; 122356de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 122456de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 122556de7263SMel Gorman 1226e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1227e64c5237SShaohua Li 1228e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1229e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1230e64c5237SShaohua Li 1231e64c5237SShaohua Li *contended = cc.contended; 1232e64c5237SShaohua Li return ret; 123356de7263SMel Gorman } 123456de7263SMel Gorman 12355e771905SMel Gorman int sysctl_extfrag_threshold = 500; 12365e771905SMel Gorman 123756de7263SMel Gorman /** 123856de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 123956de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 124056de7263SMel Gorman * @order: The order of the current allocation 124156de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 124256de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 1243e0b9daebSDavid Rientjes * @mode: The migration mode for async, sync light, or sync migration 12441f9efdefSVlastimil Babka * @contended: Return value that determines if compaction was aborted due to 12451f9efdefSVlastimil Babka * need_resched() or lock contention 124653853e2dSVlastimil Babka * @candidate_zone: Return the zone where we think allocation should succeed 124756de7263SMel Gorman * 124856de7263SMel Gorman * This is the main entry point for direct page compaction. 124956de7263SMel Gorman */ 125056de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 125177f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 12521f9efdefSVlastimil Babka enum migrate_mode mode, int *contended, 125353853e2dSVlastimil Babka struct zone **candidate_zone) 125456de7263SMel Gorman { 125556de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 125656de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 125756de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 125856de7263SMel Gorman struct zoneref *z; 125956de7263SMel Gorman struct zone *zone; 126053853e2dSVlastimil Babka int rc = COMPACT_DEFERRED; 1261d95ea5d1SBartlomiej Zolnierkiewicz int alloc_flags = 0; 12621f9efdefSVlastimil Babka int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ 12631f9efdefSVlastimil Babka 12641f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_NONE; 126556de7263SMel Gorman 12664ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1267c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 126853853e2dSVlastimil Babka return COMPACT_SKIPPED; 126956de7263SMel Gorman 1270d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA 1271d95ea5d1SBartlomiej Zolnierkiewicz if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 1272d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags |= ALLOC_CMA; 1273d95ea5d1SBartlomiej Zolnierkiewicz #endif 127456de7263SMel Gorman /* Compact each zone in the list */ 127556de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 127656de7263SMel Gorman nodemask) { 127756de7263SMel Gorman int status; 12781f9efdefSVlastimil Babka int zone_contended; 127956de7263SMel Gorman 128053853e2dSVlastimil Babka if (compaction_deferred(zone, order)) 128153853e2dSVlastimil Babka continue; 128253853e2dSVlastimil Babka 1283e0b9daebSDavid Rientjes status = compact_zone_order(zone, order, gfp_mask, mode, 12841f9efdefSVlastimil Babka &zone_contended); 128556de7263SMel Gorman rc = max(status, rc); 12861f9efdefSVlastimil Babka /* 12871f9efdefSVlastimil Babka * It takes at least one zone that wasn't lock contended 12881f9efdefSVlastimil Babka * to clear all_zones_contended. 12891f9efdefSVlastimil Babka */ 12901f9efdefSVlastimil Babka all_zones_contended &= zone_contended; 129156de7263SMel Gorman 12923e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1293d95ea5d1SBartlomiej Zolnierkiewicz if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 129453853e2dSVlastimil Babka alloc_flags)) { 129553853e2dSVlastimil Babka *candidate_zone = zone; 129653853e2dSVlastimil Babka /* 129753853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 129853853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 129953853e2dSVlastimil Babka * will repeat this with true if allocation indeed 130053853e2dSVlastimil Babka * succeeds in this zone. 130153853e2dSVlastimil Babka */ 130253853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 13031f9efdefSVlastimil Babka /* 13041f9efdefSVlastimil Babka * It is possible that async compaction aborted due to 13051f9efdefSVlastimil Babka * need_resched() and the watermarks were ok thanks to 13061f9efdefSVlastimil Babka * somebody else freeing memory. The allocation can 13071f9efdefSVlastimil Babka * however still fail so we better signal the 13081f9efdefSVlastimil Babka * need_resched() contention anyway (this will not 13091f9efdefSVlastimil Babka * prevent the allocation attempt). 13101f9efdefSVlastimil Babka */ 13111f9efdefSVlastimil Babka if (zone_contended == COMPACT_CONTENDED_SCHED) 13121f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 13131f9efdefSVlastimil Babka 13141f9efdefSVlastimil Babka goto break_loop; 13151f9efdefSVlastimil Babka } 13161f9efdefSVlastimil Babka 13171f9efdefSVlastimil Babka if (mode != MIGRATE_ASYNC) { 131853853e2dSVlastimil Babka /* 131953853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 132053853e2dSVlastimil Babka * so we defer compaction there. If it ends up 132153853e2dSVlastimil Babka * succeeding after all, it will be reset. 132253853e2dSVlastimil Babka */ 132353853e2dSVlastimil Babka defer_compaction(zone, order); 132453853e2dSVlastimil Babka } 13251f9efdefSVlastimil Babka 13261f9efdefSVlastimil Babka /* 13271f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 13281f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 13291f9efdefSVlastimil Babka * case do not try further zones and signal need_resched() 13301f9efdefSVlastimil Babka * contention. 13311f9efdefSVlastimil Babka */ 13321f9efdefSVlastimil Babka if ((zone_contended == COMPACT_CONTENDED_SCHED) 13331f9efdefSVlastimil Babka || fatal_signal_pending(current)) { 13341f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 13351f9efdefSVlastimil Babka goto break_loop; 133656de7263SMel Gorman } 133756de7263SMel Gorman 13381f9efdefSVlastimil Babka continue; 13391f9efdefSVlastimil Babka break_loop: 13401f9efdefSVlastimil Babka /* 13411f9efdefSVlastimil Babka * We might not have tried all the zones, so be conservative 13421f9efdefSVlastimil Babka * and assume they are not all lock contended. 13431f9efdefSVlastimil Babka */ 13441f9efdefSVlastimil Babka all_zones_contended = 0; 13451f9efdefSVlastimil Babka break; 13461f9efdefSVlastimil Babka } 13471f9efdefSVlastimil Babka 13481f9efdefSVlastimil Babka /* 13491f9efdefSVlastimil Babka * If at least one zone wasn't deferred or skipped, we report if all 13501f9efdefSVlastimil Babka * zones that were tried were lock contended. 13511f9efdefSVlastimil Babka */ 13521f9efdefSVlastimil Babka if (rc > COMPACT_SKIPPED && all_zones_contended) 13531f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_LOCK; 13541f9efdefSVlastimil Babka 135556de7263SMel Gorman return rc; 135656de7263SMel Gorman } 135756de7263SMel Gorman 135856de7263SMel Gorman 135976ab0f53SMel Gorman /* Compact all zones within a node */ 13607103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 136176ab0f53SMel Gorman { 136276ab0f53SMel Gorman int zoneid; 136376ab0f53SMel Gorman struct zone *zone; 136476ab0f53SMel Gorman 136576ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 136676ab0f53SMel Gorman 136776ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 136876ab0f53SMel Gorman if (!populated_zone(zone)) 136976ab0f53SMel Gorman continue; 137076ab0f53SMel Gorman 13717be62de9SRik van Riel cc->nr_freepages = 0; 13727be62de9SRik van Riel cc->nr_migratepages = 0; 13737be62de9SRik van Riel cc->zone = zone; 13747be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 13757be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 137676ab0f53SMel Gorman 1377aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 13787be62de9SRik van Riel compact_zone(zone, cc); 137976ab0f53SMel Gorman 1380aff62249SRik van Riel if (cc->order > 0) { 1381de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1382de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1383de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1384aff62249SRik van Riel } 1385aff62249SRik van Riel 13867be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 13877be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 138876ab0f53SMel Gorman } 138976ab0f53SMel Gorman } 139076ab0f53SMel Gorman 13917103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 13927be62de9SRik van Riel { 13937be62de9SRik van Riel struct compact_control cc = { 13947be62de9SRik van Riel .order = order, 1395e0b9daebSDavid Rientjes .mode = MIGRATE_ASYNC, 13967be62de9SRik van Riel }; 13977be62de9SRik van Riel 13983a7200afSMel Gorman if (!order) 13993a7200afSMel Gorman return; 14003a7200afSMel Gorman 14017103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 14027be62de9SRik van Riel } 14037be62de9SRik van Riel 14047103f16dSAndrew Morton static void compact_node(int nid) 14057be62de9SRik van Riel { 14067be62de9SRik van Riel struct compact_control cc = { 14077be62de9SRik van Riel .order = -1, 1408e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 140991ca9186SDavid Rientjes .ignore_skip_hint = true, 14107be62de9SRik van Riel }; 14117be62de9SRik van Riel 14127103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 14137be62de9SRik van Riel } 14147be62de9SRik van Riel 141576ab0f53SMel Gorman /* Compact all nodes in the system */ 14167964c06dSJason Liu static void compact_nodes(void) 141776ab0f53SMel Gorman { 141876ab0f53SMel Gorman int nid; 141976ab0f53SMel Gorman 14208575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 14218575ec29SHugh Dickins lru_add_drain_all(); 14228575ec29SHugh Dickins 142376ab0f53SMel Gorman for_each_online_node(nid) 142476ab0f53SMel Gorman compact_node(nid); 142576ab0f53SMel Gorman } 142676ab0f53SMel Gorman 142776ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 142876ab0f53SMel Gorman int sysctl_compact_memory; 142976ab0f53SMel Gorman 143076ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 143176ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 143276ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 143376ab0f53SMel Gorman { 143476ab0f53SMel Gorman if (write) 14357964c06dSJason Liu compact_nodes(); 143676ab0f53SMel Gorman 143776ab0f53SMel Gorman return 0; 143876ab0f53SMel Gorman } 1439ed4a6d7fSMel Gorman 14405e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 14415e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 14425e771905SMel Gorman { 14435e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 14445e771905SMel Gorman 14455e771905SMel Gorman return 0; 14465e771905SMel Gorman } 14475e771905SMel Gorman 1448ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 144974e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 145010fbcf4cSKay Sievers struct device_attribute *attr, 1451ed4a6d7fSMel Gorman const char *buf, size_t count) 1452ed4a6d7fSMel Gorman { 14538575ec29SHugh Dickins int nid = dev->id; 14548575ec29SHugh Dickins 14558575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 14568575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 14578575ec29SHugh Dickins lru_add_drain_all(); 14588575ec29SHugh Dickins 14598575ec29SHugh Dickins compact_node(nid); 14608575ec29SHugh Dickins } 1461ed4a6d7fSMel Gorman 1462ed4a6d7fSMel Gorman return count; 1463ed4a6d7fSMel Gorman } 146410fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1465ed4a6d7fSMel Gorman 1466ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1467ed4a6d7fSMel Gorman { 146810fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1469ed4a6d7fSMel Gorman } 1470ed4a6d7fSMel Gorman 1471ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1472ed4a6d7fSMel Gorman { 147310fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1474ed4a6d7fSMel Gorman } 1475ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1476ff9543fdSMichal Nazarewicz 1477ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1478