1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 18194159fbSMinchan Kim #include <linux/page-isolation.h> 19748446bbSMel Gorman #include "internal.h" 20748446bbSMel Gorman 21010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 22010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 23010fc29aSMinchan Kim { 24010fc29aSMinchan Kim count_vm_event(item); 25010fc29aSMinchan Kim } 26010fc29aSMinchan Kim 27010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 28010fc29aSMinchan Kim { 29010fc29aSMinchan Kim count_vm_events(item, delta); 30010fc29aSMinchan Kim } 31010fc29aSMinchan Kim #else 32010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 33010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 34010fc29aSMinchan Kim #endif 35010fc29aSMinchan Kim 36ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 37ff9543fdSMichal Nazarewicz 38b7aba698SMel Gorman #define CREATE_TRACE_POINTS 39b7aba698SMel Gorman #include <trace/events/compaction.h> 40b7aba698SMel Gorman 41748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 42748446bbSMel Gorman { 43748446bbSMel Gorman struct page *page, *next; 446bace090SVlastimil Babka unsigned long high_pfn = 0; 45748446bbSMel Gorman 46748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 476bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 48748446bbSMel Gorman list_del(&page->lru); 49748446bbSMel Gorman __free_page(page); 506bace090SVlastimil Babka if (pfn > high_pfn) 516bace090SVlastimil Babka high_pfn = pfn; 52748446bbSMel Gorman } 53748446bbSMel Gorman 546bace090SVlastimil Babka return high_pfn; 55748446bbSMel Gorman } 56748446bbSMel Gorman 57ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 58ff9543fdSMichal Nazarewicz { 59ff9543fdSMichal Nazarewicz struct page *page; 60ff9543fdSMichal Nazarewicz 61ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 62ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 63ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 64ff9543fdSMichal Nazarewicz } 65ff9543fdSMichal Nazarewicz } 66ff9543fdSMichal Nazarewicz 6747118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 6847118af0SMichal Nazarewicz { 6947118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 7047118af0SMichal Nazarewicz } 7147118af0SMichal Nazarewicz 727d49d886SVlastimil Babka /* 737d49d886SVlastimil Babka * Check that the whole (or subset of) a pageblock given by the interval of 747d49d886SVlastimil Babka * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 757d49d886SVlastimil Babka * with the migration of free compaction scanner. The scanners then need to 767d49d886SVlastimil Babka * use only pfn_valid_within() check for arches that allow holes within 777d49d886SVlastimil Babka * pageblocks. 787d49d886SVlastimil Babka * 797d49d886SVlastimil Babka * Return struct page pointer of start_pfn, or NULL if checks were not passed. 807d49d886SVlastimil Babka * 817d49d886SVlastimil Babka * It's possible on some configurations to have a setup like node0 node1 node0 827d49d886SVlastimil Babka * i.e. it's possible that all pages within a zones range of pages do not 837d49d886SVlastimil Babka * belong to a single zone. We assume that a border between node0 and node1 847d49d886SVlastimil Babka * can occur within a single pageblock, but not a node0 node1 node0 857d49d886SVlastimil Babka * interleaving within a single pageblock. It is therefore sufficient to check 867d49d886SVlastimil Babka * the first and last page of a pageblock and avoid checking each individual 877d49d886SVlastimil Babka * page in a pageblock. 887d49d886SVlastimil Babka */ 897d49d886SVlastimil Babka static struct page *pageblock_pfn_to_page(unsigned long start_pfn, 907d49d886SVlastimil Babka unsigned long end_pfn, struct zone *zone) 917d49d886SVlastimil Babka { 927d49d886SVlastimil Babka struct page *start_page; 937d49d886SVlastimil Babka struct page *end_page; 947d49d886SVlastimil Babka 957d49d886SVlastimil Babka /* end_pfn is one past the range we are checking */ 967d49d886SVlastimil Babka end_pfn--; 977d49d886SVlastimil Babka 987d49d886SVlastimil Babka if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 997d49d886SVlastimil Babka return NULL; 1007d49d886SVlastimil Babka 1017d49d886SVlastimil Babka start_page = pfn_to_page(start_pfn); 1027d49d886SVlastimil Babka 1037d49d886SVlastimil Babka if (page_zone(start_page) != zone) 1047d49d886SVlastimil Babka return NULL; 1057d49d886SVlastimil Babka 1067d49d886SVlastimil Babka end_page = pfn_to_page(end_pfn); 1077d49d886SVlastimil Babka 1087d49d886SVlastimil Babka /* This gives a shorter code than deriving page_zone(end_page) */ 1097d49d886SVlastimil Babka if (page_zone_id(start_page) != page_zone_id(end_page)) 1107d49d886SVlastimil Babka return NULL; 1117d49d886SVlastimil Babka 1127d49d886SVlastimil Babka return start_page; 1137d49d886SVlastimil Babka } 1147d49d886SVlastimil Babka 115bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 116bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 117bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 118bb13ffebSMel Gorman struct page *page) 119bb13ffebSMel Gorman { 120bb13ffebSMel Gorman if (cc->ignore_skip_hint) 121bb13ffebSMel Gorman return true; 122bb13ffebSMel Gorman 123bb13ffebSMel Gorman return !get_pageblock_skip(page); 124bb13ffebSMel Gorman } 125bb13ffebSMel Gorman 126bb13ffebSMel Gorman /* 127bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 128bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 129bb13ffebSMel Gorman * meet. 130bb13ffebSMel Gorman */ 13162997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 132bb13ffebSMel Gorman { 133bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 134108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 135bb13ffebSMel Gorman unsigned long pfn; 136bb13ffebSMel Gorman 13735979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = start_pfn; 13835979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = start_pfn; 139c89511abSMel Gorman zone->compact_cached_free_pfn = end_pfn; 14062997027SMel Gorman zone->compact_blockskip_flush = false; 141bb13ffebSMel Gorman 142bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 143bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 144bb13ffebSMel Gorman struct page *page; 145bb13ffebSMel Gorman 146bb13ffebSMel Gorman cond_resched(); 147bb13ffebSMel Gorman 148bb13ffebSMel Gorman if (!pfn_valid(pfn)) 149bb13ffebSMel Gorman continue; 150bb13ffebSMel Gorman 151bb13ffebSMel Gorman page = pfn_to_page(pfn); 152bb13ffebSMel Gorman if (zone != page_zone(page)) 153bb13ffebSMel Gorman continue; 154bb13ffebSMel Gorman 155bb13ffebSMel Gorman clear_pageblock_skip(page); 156bb13ffebSMel Gorman } 157bb13ffebSMel Gorman } 158bb13ffebSMel Gorman 15962997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 16062997027SMel Gorman { 16162997027SMel Gorman int zoneid; 16262997027SMel Gorman 16362997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 16462997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 16562997027SMel Gorman if (!populated_zone(zone)) 16662997027SMel Gorman continue; 16762997027SMel Gorman 16862997027SMel Gorman /* Only flush if a full compaction finished recently */ 16962997027SMel Gorman if (zone->compact_blockskip_flush) 17062997027SMel Gorman __reset_isolation_suitable(zone); 17162997027SMel Gorman } 17262997027SMel Gorman } 17362997027SMel Gorman 174bb13ffebSMel Gorman /* 175bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 17662997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 177bb13ffebSMel Gorman */ 178c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 179c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 180edc2ca61SVlastimil Babka bool migrate_scanner) 181bb13ffebSMel Gorman { 182c89511abSMel Gorman struct zone *zone = cc->zone; 18335979ef3SDavid Rientjes unsigned long pfn; 1846815bf3fSJoonsoo Kim 1856815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 1866815bf3fSJoonsoo Kim return; 1876815bf3fSJoonsoo Kim 188bb13ffebSMel Gorman if (!page) 189bb13ffebSMel Gorman return; 190bb13ffebSMel Gorman 19135979ef3SDavid Rientjes if (nr_isolated) 19235979ef3SDavid Rientjes return; 19335979ef3SDavid Rientjes 194bb13ffebSMel Gorman set_pageblock_skip(page); 195c89511abSMel Gorman 19635979ef3SDavid Rientjes pfn = page_to_pfn(page); 19735979ef3SDavid Rientjes 19835979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 199c89511abSMel Gorman if (migrate_scanner) { 20035979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 20135979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 202e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 203e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 20435979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 205c89511abSMel Gorman } else { 20635979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 207c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 208c89511abSMel Gorman } 209c89511abSMel Gorman } 210bb13ffebSMel Gorman #else 211bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 212bb13ffebSMel Gorman struct page *page) 213bb13ffebSMel Gorman { 214bb13ffebSMel Gorman return true; 215bb13ffebSMel Gorman } 216bb13ffebSMel Gorman 217c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 218c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 219edc2ca61SVlastimil Babka bool migrate_scanner) 220bb13ffebSMel Gorman { 221bb13ffebSMel Gorman } 222bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 223bb13ffebSMel Gorman 2241f9efdefSVlastimil Babka /* 2258b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 2268b44d279SVlastimil Babka * very heavily contended. For async compaction, back out if the lock cannot 2278b44d279SVlastimil Babka * be taken immediately. For sync compaction, spin on the lock if needed. 2288b44d279SVlastimil Babka * 2298b44d279SVlastimil Babka * Returns true if the lock is held 2308b44d279SVlastimil Babka * Returns false if the lock is not held and compaction should abort 2311f9efdefSVlastimil Babka */ 2328b44d279SVlastimil Babka static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, 2338b44d279SVlastimil Babka struct compact_control *cc) 2348b44d279SVlastimil Babka { 2358b44d279SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 2368b44d279SVlastimil Babka if (!spin_trylock_irqsave(lock, *flags)) { 2378b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_LOCK; 2388b44d279SVlastimil Babka return false; 2398b44d279SVlastimil Babka } 2408b44d279SVlastimil Babka } else { 2418b44d279SVlastimil Babka spin_lock_irqsave(lock, *flags); 2428b44d279SVlastimil Babka } 2431f9efdefSVlastimil Babka 2448b44d279SVlastimil Babka return true; 2452a1402aaSMel Gorman } 2462a1402aaSMel Gorman 24785aa125fSMichal Nazarewicz /* 248c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 2498b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 2508b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 2518b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 2528b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 2538b44d279SVlastimil Babka * aborts. Sync compaction schedules. 2548b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 2558b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 256c67fe375SMel Gorman * 2578b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 2588b44d279SVlastimil Babka * async compaction due to need_resched() 2598b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 2608b44d279SVlastimil Babka * scheduled) 261c67fe375SMel Gorman */ 2628b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 2638b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 264c67fe375SMel Gorman { 2658b44d279SVlastimil Babka if (*locked) { 2668b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 2678b44d279SVlastimil Babka *locked = false; 268c67fe375SMel Gorman } 269c67fe375SMel Gorman 2708b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 2718b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 2728b44d279SVlastimil Babka return true; 2738b44d279SVlastimil Babka } 2748b44d279SVlastimil Babka 2758b44d279SVlastimil Babka if (need_resched()) { 276e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 2778b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 2788b44d279SVlastimil Babka return true; 279c67fe375SMel Gorman } 280c67fe375SMel Gorman cond_resched(); 281c67fe375SMel Gorman } 282c67fe375SMel Gorman 2838b44d279SVlastimil Babka return false; 284c67fe375SMel Gorman } 285c67fe375SMel Gorman 286be976572SVlastimil Babka /* 287be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 288be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 2898b44d279SVlastimil Babka * compaction. This is similar to what compact_unlock_should_abort() does, but 290be976572SVlastimil Babka * is used where no lock is concerned. 291be976572SVlastimil Babka * 292be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 293be976572SVlastimil Babka * Returns true when async compaction should abort. 294be976572SVlastimil Babka */ 295be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 296be976572SVlastimil Babka { 297be976572SVlastimil Babka /* async compaction aborts if contended */ 298be976572SVlastimil Babka if (need_resched()) { 299be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3001f9efdefSVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 301be976572SVlastimil Babka return true; 302be976572SVlastimil Babka } 303be976572SVlastimil Babka 304be976572SVlastimil Babka cond_resched(); 305be976572SVlastimil Babka } 306be976572SVlastimil Babka 307be976572SVlastimil Babka return false; 308be976572SVlastimil Babka } 309be976572SVlastimil Babka 310f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */ 311f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page) 312f40d1e42SMel Gorman { 3137d348b9eSJoonsoo Kim /* If the page is a large free page, then disallow migration */ 31499c0fd5eSVlastimil Babka if (PageBuddy(page)) { 31599c0fd5eSVlastimil Babka /* 31699c0fd5eSVlastimil Babka * We are checking page_order without zone->lock taken. But 31799c0fd5eSVlastimil Babka * the only small danger is that we skip a potentially suitable 31899c0fd5eSVlastimil Babka * pageblock, so it's not worth to check order for valid range. 31999c0fd5eSVlastimil Babka */ 32099c0fd5eSVlastimil Babka if (page_order_unsafe(page) >= pageblock_order) 3217d348b9eSJoonsoo Kim return false; 32299c0fd5eSVlastimil Babka } 323f40d1e42SMel Gorman 324f40d1e42SMel Gorman /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 3257d348b9eSJoonsoo Kim if (migrate_async_suitable(get_pageblock_migratetype(page))) 326f40d1e42SMel Gorman return true; 327f40d1e42SMel Gorman 328f40d1e42SMel Gorman /* Otherwise skip the block */ 329f40d1e42SMel Gorman return false; 330f40d1e42SMel Gorman } 331f40d1e42SMel Gorman 332c67fe375SMel Gorman /* 3339e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 3349e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 3359e4be470SJerome Marchand * (even though it may still end up isolating some pages). 33685aa125fSMichal Nazarewicz */ 337f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 338e14c720eSVlastimil Babka unsigned long *start_pfn, 33985aa125fSMichal Nazarewicz unsigned long end_pfn, 34085aa125fSMichal Nazarewicz struct list_head *freelist, 34185aa125fSMichal Nazarewicz bool strict) 342748446bbSMel Gorman { 343b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 344bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 345b8b2d825SXiubo Li unsigned long flags = 0; 346f40d1e42SMel Gorman bool locked = false; 347e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 348748446bbSMel Gorman 349748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 350748446bbSMel Gorman 351f40d1e42SMel Gorman /* Isolate free pages. */ 352748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 353748446bbSMel Gorman int isolated, i; 354748446bbSMel Gorman struct page *page = cursor; 355748446bbSMel Gorman 3568b44d279SVlastimil Babka /* 3578b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 3588b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 3598b44d279SVlastimil Babka * pending or async compaction detects need_resched() 3608b44d279SVlastimil Babka */ 3618b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 3628b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 3638b44d279SVlastimil Babka &locked, cc)) 3648b44d279SVlastimil Babka break; 3658b44d279SVlastimil Babka 366b7aba698SMel Gorman nr_scanned++; 367f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 3682af120bcSLaura Abbott goto isolate_fail; 3692af120bcSLaura Abbott 370bb13ffebSMel Gorman if (!valid_page) 371bb13ffebSMel Gorman valid_page = page; 372f40d1e42SMel Gorman if (!PageBuddy(page)) 3732af120bcSLaura Abbott goto isolate_fail; 374f40d1e42SMel Gorman 375f40d1e42SMel Gorman /* 37669b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 37769b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 37869b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 37969b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 38069b7189fSVlastimil Babka * recheck as well. 38169b7189fSVlastimil Babka */ 38269b7189fSVlastimil Babka if (!locked) { 38369b7189fSVlastimil Babka /* 384f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 385f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 386f40d1e42SMel Gorman * heavily contended if there are parallel allocations 387f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 388f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 389f40d1e42SMel Gorman * possible. 390f40d1e42SMel Gorman */ 3918b44d279SVlastimil Babka locked = compact_trylock_irqsave(&cc->zone->lock, 3928b44d279SVlastimil Babka &flags, cc); 393f40d1e42SMel Gorman if (!locked) 394f40d1e42SMel Gorman break; 395f40d1e42SMel Gorman 396f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 397f40d1e42SMel Gorman if (!PageBuddy(page)) 3982af120bcSLaura Abbott goto isolate_fail; 39969b7189fSVlastimil Babka } 400748446bbSMel Gorman 401748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 402748446bbSMel Gorman isolated = split_free_page(page); 403748446bbSMel Gorman total_isolated += isolated; 404748446bbSMel Gorman for (i = 0; i < isolated; i++) { 405748446bbSMel Gorman list_add(&page->lru, freelist); 406748446bbSMel Gorman page++; 407748446bbSMel Gorman } 408748446bbSMel Gorman 409748446bbSMel Gorman /* If a page was split, advance to the end of it */ 410748446bbSMel Gorman if (isolated) { 411748446bbSMel Gorman blockpfn += isolated - 1; 412748446bbSMel Gorman cursor += isolated - 1; 4132af120bcSLaura Abbott continue; 414748446bbSMel Gorman } 4152af120bcSLaura Abbott 4162af120bcSLaura Abbott isolate_fail: 4172af120bcSLaura Abbott if (strict) 4182af120bcSLaura Abbott break; 4192af120bcSLaura Abbott else 4202af120bcSLaura Abbott continue; 4212af120bcSLaura Abbott 422748446bbSMel Gorman } 423748446bbSMel Gorman 424e14c720eSVlastimil Babka /* Record how far we have got within the block */ 425e14c720eSVlastimil Babka *start_pfn = blockpfn; 426e14c720eSVlastimil Babka 427b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 428f40d1e42SMel Gorman 429f40d1e42SMel Gorman /* 430f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 431f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 432f40d1e42SMel Gorman * returned and CMA will fail. 433f40d1e42SMel Gorman */ 4342af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 435f40d1e42SMel Gorman total_isolated = 0; 436f40d1e42SMel Gorman 437f40d1e42SMel Gorman if (locked) 438f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 439f40d1e42SMel Gorman 440bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 441bb13ffebSMel Gorman if (blockpfn == end_pfn) 442edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 443bb13ffebSMel Gorman 444010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 445397487dbSMel Gorman if (total_isolated) 446010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 447748446bbSMel Gorman return total_isolated; 448748446bbSMel Gorman } 449748446bbSMel Gorman 45085aa125fSMichal Nazarewicz /** 45185aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 45285aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 45385aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 45485aa125fSMichal Nazarewicz * 45585aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 45685aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 45785aa125fSMichal Nazarewicz * undo its actions and return zero. 45885aa125fSMichal Nazarewicz * 45985aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 46085aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 46185aa125fSMichal Nazarewicz * a free page). 46285aa125fSMichal Nazarewicz */ 463ff9543fdSMichal Nazarewicz unsigned long 464bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 465bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 46685aa125fSMichal Nazarewicz { 467f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 46885aa125fSMichal Nazarewicz LIST_HEAD(freelist); 46985aa125fSMichal Nazarewicz 4707d49d886SVlastimil Babka pfn = start_pfn; 47185aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 4727d49d886SVlastimil Babka 4737d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 4747d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 475e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 476e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 4777d49d886SVlastimil Babka 47885aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 47985aa125fSMichal Nazarewicz 48058420016SJoonsoo Kim /* 48158420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 48258420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 48358420016SJoonsoo Kim * scanning range to right one. 48458420016SJoonsoo Kim */ 48558420016SJoonsoo Kim if (pfn >= block_end_pfn) { 48658420016SJoonsoo Kim block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 48758420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 48858420016SJoonsoo Kim } 48958420016SJoonsoo Kim 4907d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 4917d49d886SVlastimil Babka break; 4927d49d886SVlastimil Babka 493e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 494e14c720eSVlastimil Babka block_end_pfn, &freelist, true); 49585aa125fSMichal Nazarewicz 49685aa125fSMichal Nazarewicz /* 49785aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 49885aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 49985aa125fSMichal Nazarewicz * non-free pages). 50085aa125fSMichal Nazarewicz */ 50185aa125fSMichal Nazarewicz if (!isolated) 50285aa125fSMichal Nazarewicz break; 50385aa125fSMichal Nazarewicz 50485aa125fSMichal Nazarewicz /* 50585aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 50685aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 50785aa125fSMichal Nazarewicz * page may span two pageblocks). 50885aa125fSMichal Nazarewicz */ 50985aa125fSMichal Nazarewicz } 51085aa125fSMichal Nazarewicz 51185aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 51285aa125fSMichal Nazarewicz map_pages(&freelist); 51385aa125fSMichal Nazarewicz 51485aa125fSMichal Nazarewicz if (pfn < end_pfn) { 51585aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 51685aa125fSMichal Nazarewicz release_freepages(&freelist); 51785aa125fSMichal Nazarewicz return 0; 51885aa125fSMichal Nazarewicz } 51985aa125fSMichal Nazarewicz 52085aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 52185aa125fSMichal Nazarewicz return pfn; 52285aa125fSMichal Nazarewicz } 52385aa125fSMichal Nazarewicz 524748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 525edc2ca61SVlastimil Babka static void acct_isolated(struct zone *zone, struct compact_control *cc) 526748446bbSMel Gorman { 527748446bbSMel Gorman struct page *page; 528b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 529748446bbSMel Gorman 530edc2ca61SVlastimil Babka if (list_empty(&cc->migratepages)) 531edc2ca61SVlastimil Babka return; 532edc2ca61SVlastimil Babka 533b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 534b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 535748446bbSMel Gorman 536c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 537c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 538c67fe375SMel Gorman } 539748446bbSMel Gorman 540748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 541748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 542748446bbSMel Gorman { 543bc693045SMinchan Kim unsigned long active, inactive, isolated; 544748446bbSMel Gorman 545748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 546748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 547bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 548bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 549748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 550748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 551748446bbSMel Gorman 552bc693045SMinchan Kim return isolated > (inactive + active) / 2; 553748446bbSMel Gorman } 554748446bbSMel Gorman 5552fe86e00SMichal Nazarewicz /** 556edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 557edc2ca61SVlastimil Babka * a single pageblock 5582fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 559edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 560edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 561edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 5622fe86e00SMichal Nazarewicz * 5632fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 564edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 565edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 566edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 567edc2ca61SVlastimil Babka * than end_pfn). 5682fe86e00SMichal Nazarewicz * 569edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 570edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 571edc2ca61SVlastimil Babka * is neither read nor updated. 572748446bbSMel Gorman */ 573edc2ca61SVlastimil Babka static unsigned long 574edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 575edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 576748446bbSMel Gorman { 577edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 578b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 579748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 580fa9add64SHugh Dickins struct lruvec *lruvec; 581b8b2d825SXiubo Li unsigned long flags = 0; 5822a1402aaSMel Gorman bool locked = false; 583bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 584748446bbSMel Gorman 585748446bbSMel Gorman /* 586748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 587748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 588748446bbSMel Gorman * delay for some time until fewer pages are isolated 589748446bbSMel Gorman */ 590748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 591f9e35b3bSMel Gorman /* async migration should just abort */ 592e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 5932fe86e00SMichal Nazarewicz return 0; 594f9e35b3bSMel Gorman 595748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 596748446bbSMel Gorman 597748446bbSMel Gorman if (fatal_signal_pending(current)) 5982fe86e00SMichal Nazarewicz return 0; 599748446bbSMel Gorman } 600748446bbSMel Gorman 601be976572SVlastimil Babka if (compact_should_abort(cc)) 602aeef4b83SDavid Rientjes return 0; 603aeef4b83SDavid Rientjes 604748446bbSMel Gorman /* Time to isolate some pages for migration */ 605748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 6068b44d279SVlastimil Babka /* 6078b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 6088b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort async compaction 6098b44d279SVlastimil Babka * if contended. 6108b44d279SVlastimil Babka */ 6118b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 6128b44d279SVlastimil Babka && compact_unlock_should_abort(&zone->lru_lock, flags, 6138b44d279SVlastimil Babka &locked, cc)) 6148b44d279SVlastimil Babka break; 615b2eef8c0SAndrea Arcangeli 616748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 617748446bbSMel Gorman continue; 618b7aba698SMel Gorman nr_scanned++; 619748446bbSMel Gorman 620748446bbSMel Gorman page = pfn_to_page(low_pfn); 621dc908600SMel Gorman 622bb13ffebSMel Gorman if (!valid_page) 623bb13ffebSMel Gorman valid_page = page; 624bb13ffebSMel Gorman 625c122b208SJoonsoo Kim /* 62699c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 62799c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 62899c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 62999c0fd5eSVlastimil Babka * potential isolation targets. 6306c14466cSMel Gorman */ 63199c0fd5eSVlastimil Babka if (PageBuddy(page)) { 63299c0fd5eSVlastimil Babka unsigned long freepage_order = page_order_unsafe(page); 63399c0fd5eSVlastimil Babka 63499c0fd5eSVlastimil Babka /* 63599c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 63699c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 63799c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 63899c0fd5eSVlastimil Babka */ 63999c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 64099c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 641748446bbSMel Gorman continue; 64299c0fd5eSVlastimil Babka } 643748446bbSMel Gorman 6449927af74SMel Gorman /* 645bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 646bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 647bf6bddf1SRafael Aquini * Skip any other type of page 648bf6bddf1SRafael Aquini */ 649bf6bddf1SRafael Aquini if (!PageLRU(page)) { 650bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 651d6d86c0aSKonstantin Khlebnikov if (balloon_page_isolate(page)) { 652bf6bddf1SRafael Aquini /* Successfully isolated */ 653b6c75016SJoonsoo Kim goto isolate_success; 654bf6bddf1SRafael Aquini } 655bf6bddf1SRafael Aquini } 656bc835011SAndrea Arcangeli continue; 657bf6bddf1SRafael Aquini } 658bc835011SAndrea Arcangeli 659bc835011SAndrea Arcangeli /* 6602a1402aaSMel Gorman * PageLRU is set. lru_lock normally excludes isolation 6612a1402aaSMel Gorman * splitting and collapsing (collapsing has already happened 6622a1402aaSMel Gorman * if PageLRU is set) but the lock is not necessarily taken 6632a1402aaSMel Gorman * here and it is wasteful to take it just to check transhuge. 6642a1402aaSMel Gorman * Check TransHuge without lock and skip the whole pageblock if 6652a1402aaSMel Gorman * it's either a transhuge or hugetlbfs page, as calling 6662a1402aaSMel Gorman * compound_order() without preventing THP from splitting the 6672a1402aaSMel Gorman * page underneath us may return surprising results. 668bc835011SAndrea Arcangeli */ 669bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 6702a1402aaSMel Gorman if (!locked) 671edc2ca61SVlastimil Babka low_pfn = ALIGN(low_pfn + 1, 672edc2ca61SVlastimil Babka pageblock_nr_pages) - 1; 673edc2ca61SVlastimil Babka else 6742a1402aaSMel Gorman low_pfn += (1 << compound_order(page)) - 1; 675edc2ca61SVlastimil Babka 6762a1402aaSMel Gorman continue; 6772a1402aaSMel Gorman } 6782a1402aaSMel Gorman 679119d6d59SDavid Rientjes /* 680119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 681119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 682119d6d59SDavid Rientjes * admittedly racy check. 683119d6d59SDavid Rientjes */ 684119d6d59SDavid Rientjes if (!page_mapping(page) && 685119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 686119d6d59SDavid Rientjes continue; 687119d6d59SDavid Rientjes 68869b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 68969b7189fSVlastimil Babka if (!locked) { 6908b44d279SVlastimil Babka locked = compact_trylock_irqsave(&zone->lru_lock, 6918b44d279SVlastimil Babka &flags, cc); 6928b44d279SVlastimil Babka if (!locked) 6932a1402aaSMel Gorman break; 6942a1402aaSMel Gorman 6952a1402aaSMel Gorman /* Recheck PageLRU and PageTransHuge under lock */ 6962a1402aaSMel Gorman if (!PageLRU(page)) 6972a1402aaSMel Gorman continue; 6982a1402aaSMel Gorman if (PageTransHuge(page)) { 699bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 700bc835011SAndrea Arcangeli continue; 701bc835011SAndrea Arcangeli } 70269b7189fSVlastimil Babka } 703bc835011SAndrea Arcangeli 704fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 705fa9add64SHugh Dickins 706748446bbSMel Gorman /* Try isolate the page */ 707edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 708748446bbSMel Gorman continue; 709748446bbSMel Gorman 710309381feSSasha Levin VM_BUG_ON_PAGE(PageTransCompound(page), page); 711bc835011SAndrea Arcangeli 712748446bbSMel Gorman /* Successfully isolated */ 713fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 714b6c75016SJoonsoo Kim 715b6c75016SJoonsoo Kim isolate_success: 716748446bbSMel Gorman list_add(&page->lru, migratelist); 717748446bbSMel Gorman cc->nr_migratepages++; 718b7aba698SMel Gorman nr_isolated++; 719748446bbSMel Gorman 720748446bbSMel Gorman /* Avoid isolating too much */ 72131b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 72231b8384aSHillf Danton ++low_pfn; 723748446bbSMel Gorman break; 724748446bbSMel Gorman } 72531b8384aSHillf Danton } 726748446bbSMel Gorman 72799c0fd5eSVlastimil Babka /* 72899c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 72999c0fd5eSVlastimil Babka * the range to be scanned. 73099c0fd5eSVlastimil Babka */ 73199c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 73299c0fd5eSVlastimil Babka low_pfn = end_pfn; 73399c0fd5eSVlastimil Babka 734c67fe375SMel Gorman if (locked) 735c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 736748446bbSMel Gorman 73750b5b094SVlastimil Babka /* 73850b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 73950b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 74050b5b094SVlastimil Babka */ 74135979ef3SDavid Rientjes if (low_pfn == end_pfn) 742edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 743bb13ffebSMel Gorman 744b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 745b7aba698SMel Gorman 746010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 747397487dbSMel Gorman if (nr_isolated) 748010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 749397487dbSMel Gorman 7502fe86e00SMichal Nazarewicz return low_pfn; 7512fe86e00SMichal Nazarewicz } 7522fe86e00SMichal Nazarewicz 753edc2ca61SVlastimil Babka /** 754edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 755edc2ca61SVlastimil Babka * @cc: Compaction control structure. 756edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 757edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 758edc2ca61SVlastimil Babka * 759edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 760edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 761edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 762edc2ca61SVlastimil Babka */ 763edc2ca61SVlastimil Babka unsigned long 764edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 765edc2ca61SVlastimil Babka unsigned long end_pfn) 766edc2ca61SVlastimil Babka { 767edc2ca61SVlastimil Babka unsigned long pfn, block_end_pfn; 768edc2ca61SVlastimil Babka 769edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 770edc2ca61SVlastimil Babka pfn = start_pfn; 771edc2ca61SVlastimil Babka block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 772edc2ca61SVlastimil Babka 773edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 774edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 775edc2ca61SVlastimil Babka 776edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 777edc2ca61SVlastimil Babka 7787d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 779edc2ca61SVlastimil Babka continue; 780edc2ca61SVlastimil Babka 781edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 782edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 783edc2ca61SVlastimil Babka 784edc2ca61SVlastimil Babka /* 785edc2ca61SVlastimil Babka * In case of fatal failure, release everything that might 786edc2ca61SVlastimil Babka * have been isolated in the previous iteration, and signal 787edc2ca61SVlastimil Babka * the failure back to caller. 788edc2ca61SVlastimil Babka */ 789edc2ca61SVlastimil Babka if (!pfn) { 790edc2ca61SVlastimil Babka putback_movable_pages(&cc->migratepages); 791edc2ca61SVlastimil Babka cc->nr_migratepages = 0; 792edc2ca61SVlastimil Babka break; 793edc2ca61SVlastimil Babka } 7946ea41c0cSJoonsoo Kim 7956ea41c0cSJoonsoo Kim if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 7966ea41c0cSJoonsoo Kim break; 797edc2ca61SVlastimil Babka } 798edc2ca61SVlastimil Babka acct_isolated(cc->zone, cc); 799edc2ca61SVlastimil Babka 800edc2ca61SVlastimil Babka return pfn; 801edc2ca61SVlastimil Babka } 802edc2ca61SVlastimil Babka 803ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 804ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 805ff9543fdSMichal Nazarewicz /* 806ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 807ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 808ff9543fdSMichal Nazarewicz */ 809edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 810ff9543fdSMichal Nazarewicz { 811edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 812ff9543fdSMichal Nazarewicz struct page *page; 813c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 814e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 815c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 816c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 817ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 818ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 8192fe86e00SMichal Nazarewicz 820ff9543fdSMichal Nazarewicz /* 821ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 82249e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 823e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 824e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 825c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 826c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 827c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 82849e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 82949e068f0SVlastimil Babka * is using. 830ff9543fdSMichal Nazarewicz */ 831e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 832c96b9e50SVlastimil Babka block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 833c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 834c96b9e50SVlastimil Babka zone_end_pfn(zone)); 8357ed695e0SVlastimil Babka low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 8362fe86e00SMichal Nazarewicz 837ff9543fdSMichal Nazarewicz /* 838ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 839ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 840ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 841ff9543fdSMichal Nazarewicz */ 842c96b9e50SVlastimil Babka for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 843c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 844e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 845e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 846ff9543fdSMichal Nazarewicz unsigned long isolated; 847ff9543fdSMichal Nazarewicz 848f6ea3adbSDavid Rientjes /* 849f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 850f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 851be976572SVlastimil Babka * to schedule, or even abort async compaction. 852f6ea3adbSDavid Rientjes */ 853be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 854be976572SVlastimil Babka && compact_should_abort(cc)) 855be976572SVlastimil Babka break; 856f6ea3adbSDavid Rientjes 8577d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 8587d49d886SVlastimil Babka zone); 8597d49d886SVlastimil Babka if (!page) 860ff9543fdSMichal Nazarewicz continue; 861ff9543fdSMichal Nazarewicz 862ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 86368e3e926SLinus Torvalds if (!suitable_migration_target(page)) 864ff9543fdSMichal Nazarewicz continue; 86568e3e926SLinus Torvalds 866bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 867bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 868bb13ffebSMel Gorman continue; 869bb13ffebSMel Gorman 870e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 871e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 872c96b9e50SVlastimil Babka block_end_pfn, freelist, false); 873ff9543fdSMichal Nazarewicz nr_freepages += isolated; 874ff9543fdSMichal Nazarewicz 875ff9543fdSMichal Nazarewicz /* 876e14c720eSVlastimil Babka * Remember where the free scanner should restart next time, 877e14c720eSVlastimil Babka * which is where isolate_freepages_block() left off. 878e14c720eSVlastimil Babka * But if it scanned the whole pageblock, isolate_start_pfn 879e14c720eSVlastimil Babka * now points at block_end_pfn, which is the start of the next 880e14c720eSVlastimil Babka * pageblock. 881e14c720eSVlastimil Babka * In that case we will however want to restart at the start 882e14c720eSVlastimil Babka * of the previous pageblock. 883e14c720eSVlastimil Babka */ 884e14c720eSVlastimil Babka cc->free_pfn = (isolate_start_pfn < block_end_pfn) ? 885e14c720eSVlastimil Babka isolate_start_pfn : 886e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 887e14c720eSVlastimil Babka 888e14c720eSVlastimil Babka /* 889be976572SVlastimil Babka * isolate_freepages_block() might have aborted due to async 890be976572SVlastimil Babka * compaction being contended 891be976572SVlastimil Babka */ 892be976572SVlastimil Babka if (cc->contended) 893be976572SVlastimil Babka break; 894c89511abSMel Gorman } 895ff9543fdSMichal Nazarewicz 896ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 897ff9543fdSMichal Nazarewicz map_pages(freelist); 898ff9543fdSMichal Nazarewicz 8997ed695e0SVlastimil Babka /* 9007ed695e0SVlastimil Babka * If we crossed the migrate scanner, we want to keep it that way 9017ed695e0SVlastimil Babka * so that compact_finished() may detect this 9027ed695e0SVlastimil Babka */ 903c96b9e50SVlastimil Babka if (block_start_pfn < low_pfn) 904e9ade569SVlastimil Babka cc->free_pfn = cc->migrate_pfn; 905c96b9e50SVlastimil Babka 906ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 907748446bbSMel Gorman } 908748446bbSMel Gorman 909748446bbSMel Gorman /* 910748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 911748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 912748446bbSMel Gorman */ 913748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 914748446bbSMel Gorman unsigned long data, 915748446bbSMel Gorman int **result) 916748446bbSMel Gorman { 917748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 918748446bbSMel Gorman struct page *freepage; 919748446bbSMel Gorman 920be976572SVlastimil Babka /* 921be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 922be976572SVlastimil Babka * contention. 923be976572SVlastimil Babka */ 924748446bbSMel Gorman if (list_empty(&cc->freepages)) { 925be976572SVlastimil Babka if (!cc->contended) 926edc2ca61SVlastimil Babka isolate_freepages(cc); 927748446bbSMel Gorman 928748446bbSMel Gorman if (list_empty(&cc->freepages)) 929748446bbSMel Gorman return NULL; 930748446bbSMel Gorman } 931748446bbSMel Gorman 932748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 933748446bbSMel Gorman list_del(&freepage->lru); 934748446bbSMel Gorman cc->nr_freepages--; 935748446bbSMel Gorman 936748446bbSMel Gorman return freepage; 937748446bbSMel Gorman } 938748446bbSMel Gorman 939748446bbSMel Gorman /* 940d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 941d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 942d53aea3dSDavid Rientjes * special handling needed for NUMA. 943d53aea3dSDavid Rientjes */ 944d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 945d53aea3dSDavid Rientjes { 946d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 947d53aea3dSDavid Rientjes 948d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 949d53aea3dSDavid Rientjes cc->nr_freepages++; 950d53aea3dSDavid Rientjes } 951d53aea3dSDavid Rientjes 952ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 953ff9543fdSMichal Nazarewicz typedef enum { 954ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 955ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 956ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 957ff9543fdSMichal Nazarewicz } isolate_migrate_t; 958ff9543fdSMichal Nazarewicz 959ff9543fdSMichal Nazarewicz /* 960edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 961edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 962edc2ca61SVlastimil Babka * compact_control. 963ff9543fdSMichal Nazarewicz */ 964ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 965ff9543fdSMichal Nazarewicz struct compact_control *cc) 966ff9543fdSMichal Nazarewicz { 967ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 968edc2ca61SVlastimil Babka struct page *page; 969edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 970edc2ca61SVlastimil Babka (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); 971ff9543fdSMichal Nazarewicz 972edc2ca61SVlastimil Babka /* 973edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 974edc2ca61SVlastimil Babka * initialized by compact_zone() 975edc2ca61SVlastimil Babka */ 976edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 977ff9543fdSMichal Nazarewicz 978ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 979a9aacbccSMel Gorman end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 980ff9543fdSMichal Nazarewicz 981edc2ca61SVlastimil Babka /* 982edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 983edc2ca61SVlastimil Babka * Do not cross the free scanner. 984edc2ca61SVlastimil Babka */ 985edc2ca61SVlastimil Babka for (; end_pfn <= cc->free_pfn; 986edc2ca61SVlastimil Babka low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { 987edc2ca61SVlastimil Babka 988edc2ca61SVlastimil Babka /* 989edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 990edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 991edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 992edc2ca61SVlastimil Babka */ 993edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 994edc2ca61SVlastimil Babka && compact_should_abort(cc)) 995edc2ca61SVlastimil Babka break; 996edc2ca61SVlastimil Babka 9977d49d886SVlastimil Babka page = pageblock_pfn_to_page(low_pfn, end_pfn, zone); 9987d49d886SVlastimil Babka if (!page) 999edc2ca61SVlastimil Babka continue; 1000edc2ca61SVlastimil Babka 1001edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 1002edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 1003edc2ca61SVlastimil Babka continue; 1004edc2ca61SVlastimil Babka 1005edc2ca61SVlastimil Babka /* 1006edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 1007edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 1008edc2ca61SVlastimil Babka * of work satisfies the allocation. 1009edc2ca61SVlastimil Babka */ 1010edc2ca61SVlastimil Babka if (cc->mode == MIGRATE_ASYNC && 1011edc2ca61SVlastimil Babka !migrate_async_suitable(get_pageblock_migratetype(page))) 1012edc2ca61SVlastimil Babka continue; 1013ff9543fdSMichal Nazarewicz 1014ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1015edc2ca61SVlastimil Babka low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, 1016edc2ca61SVlastimil Babka isolate_mode); 1017edc2ca61SVlastimil Babka 1018e64c5237SShaohua Li if (!low_pfn || cc->contended) 1019ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1020ff9543fdSMichal Nazarewicz 1021edc2ca61SVlastimil Babka /* 1022edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1023edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1024edc2ca61SVlastimil Babka * continue or not. 1025edc2ca61SVlastimil Babka */ 1026edc2ca61SVlastimil Babka break; 1027edc2ca61SVlastimil Babka } 1028edc2ca61SVlastimil Babka 1029edc2ca61SVlastimil Babka acct_isolated(zone, cc); 10301d5bfe1fSVlastimil Babka /* 10311d5bfe1fSVlastimil Babka * Record where migration scanner will be restarted. If we end up in 10321d5bfe1fSVlastimil Babka * the same pageblock as the free scanner, make the scanners fully 10331d5bfe1fSVlastimil Babka * meet so that compact_finished() terminates compaction. 10341d5bfe1fSVlastimil Babka */ 10351d5bfe1fSVlastimil Babka cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn; 1036ff9543fdSMichal Nazarewicz 1037edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1038ff9543fdSMichal Nazarewicz } 1039ff9543fdSMichal Nazarewicz 10406d7ce559SDavid Rientjes static int compact_finished(struct zone *zone, struct compact_control *cc, 10416d7ce559SDavid Rientjes const int migratetype) 1042748446bbSMel Gorman { 10438fb74b9fSMel Gorman unsigned int order; 10445a03b051SAndrea Arcangeli unsigned long watermark; 104556de7263SMel Gorman 1046be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 1047748446bbSMel Gorman return COMPACT_PARTIAL; 1048748446bbSMel Gorman 1049753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1050bb13ffebSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) { 105155b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 105235979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 105335979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 105455b7c4c9SVlastimil Babka zone->compact_cached_free_pfn = zone_end_pfn(zone); 105555b7c4c9SVlastimil Babka 105662997027SMel Gorman /* 105762997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 105862997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 105962997027SMel Gorman * flag itself as the decision to be clear should be directly 106062997027SMel Gorman * based on an allocation request. 106162997027SMel Gorman */ 106262997027SMel Gorman if (!current_is_kswapd()) 106362997027SMel Gorman zone->compact_blockskip_flush = true; 106462997027SMel Gorman 1065748446bbSMel Gorman return COMPACT_COMPLETE; 1066bb13ffebSMel Gorman } 1067748446bbSMel Gorman 106882478fb7SJohannes Weiner /* 106982478fb7SJohannes Weiner * order == -1 is expected when compacting via 107082478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 107182478fb7SJohannes Weiner */ 107256de7263SMel Gorman if (cc->order == -1) 107356de7263SMel Gorman return COMPACT_CONTINUE; 107456de7263SMel Gorman 10753957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 10763957c776SMichal Hocko watermark = low_wmark_pages(zone); 10773957c776SMichal Hocko 1078ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, 1079ebff3980SVlastimil Babka cc->alloc_flags)) 10803957c776SMichal Hocko return COMPACT_CONTINUE; 10813957c776SMichal Hocko 108256de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 108356de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 10848fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 10858fb74b9fSMel Gorman 108656de7263SMel Gorman /* Job done if page is free of the right migratetype */ 10876d7ce559SDavid Rientjes if (!list_empty(&area->free_list[migratetype])) 108856de7263SMel Gorman return COMPACT_PARTIAL; 108956de7263SMel Gorman 109056de7263SMel Gorman /* Job done if allocation would set block type */ 10911fb3f8caSMel Gorman if (cc->order >= pageblock_order && area->nr_free) 109256de7263SMel Gorman return COMPACT_PARTIAL; 109356de7263SMel Gorman } 109456de7263SMel Gorman 1095748446bbSMel Gorman return COMPACT_CONTINUE; 1096748446bbSMel Gorman } 1097748446bbSMel Gorman 10983e7d3449SMel Gorman /* 10993e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 11003e7d3449SMel Gorman * Returns 11013e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 11023e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 11033e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 11043e7d3449SMel Gorman */ 1105ebff3980SVlastimil Babka unsigned long compaction_suitable(struct zone *zone, int order, 1106ebff3980SVlastimil Babka int alloc_flags, int classzone_idx) 11073e7d3449SMel Gorman { 11083e7d3449SMel Gorman int fragindex; 11093e7d3449SMel Gorman unsigned long watermark; 11103e7d3449SMel Gorman 11113e7d3449SMel Gorman /* 11123957c776SMichal Hocko * order == -1 is expected when compacting via 11133957c776SMichal Hocko * /proc/sys/vm/compact_memory 11143957c776SMichal Hocko */ 11153957c776SMichal Hocko if (order == -1) 11163957c776SMichal Hocko return COMPACT_CONTINUE; 11173957c776SMichal Hocko 1118ebff3980SVlastimil Babka watermark = low_wmark_pages(zone); 1119ebff3980SVlastimil Babka /* 1120ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 1121ebff3980SVlastimil Babka * should be no need for compaction at all. 1122ebff3980SVlastimil Babka */ 1123ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1124ebff3980SVlastimil Babka alloc_flags)) 1125ebff3980SVlastimil Babka return COMPACT_PARTIAL; 1126ebff3980SVlastimil Babka 11273957c776SMichal Hocko /* 11283e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 11293e7d3449SMel Gorman * This is because during migration, copies of pages need to be 11303e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 11313e7d3449SMel Gorman */ 1132ebff3980SVlastimil Babka watermark += (2UL << order); 1133ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags)) 11343e7d3449SMel Gorman return COMPACT_SKIPPED; 11353e7d3449SMel Gorman 11363e7d3449SMel Gorman /* 11373e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 11383e7d3449SMel Gorman * low memory or external fragmentation 11393e7d3449SMel Gorman * 1140ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 1141ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 11423e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 11433e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 11443e7d3449SMel Gorman * 11453e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 11463e7d3449SMel Gorman */ 11473e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 11483e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 11493e7d3449SMel Gorman return COMPACT_SKIPPED; 11503e7d3449SMel Gorman 11513e7d3449SMel Gorman return COMPACT_CONTINUE; 11523e7d3449SMel Gorman } 11533e7d3449SMel Gorman 1154748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 1155748446bbSMel Gorman { 1156748446bbSMel Gorman int ret; 1157c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1158108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 11596d7ce559SDavid Rientjes const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); 1160e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1161*fdaf7f5cSVlastimil Babka unsigned long last_migrated_pfn = 0; 1162748446bbSMel Gorman 1163ebff3980SVlastimil Babka ret = compaction_suitable(zone, cc->order, cc->alloc_flags, 1164ebff3980SVlastimil Babka cc->classzone_idx); 11653e7d3449SMel Gorman switch (ret) { 11663e7d3449SMel Gorman case COMPACT_PARTIAL: 11673e7d3449SMel Gorman case COMPACT_SKIPPED: 11683e7d3449SMel Gorman /* Compaction is likely to fail */ 11693e7d3449SMel Gorman return ret; 11703e7d3449SMel Gorman case COMPACT_CONTINUE: 11713e7d3449SMel Gorman /* Fall through to compaction */ 11723e7d3449SMel Gorman ; 11733e7d3449SMel Gorman } 11743e7d3449SMel Gorman 1175c89511abSMel Gorman /* 1176d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1177d3132e4bSVlastimil Babka * is about to be retried after being deferred. kswapd does not do 1178d3132e4bSVlastimil Babka * this reset as it'll reset the cached information when going to sleep. 1179d3132e4bSVlastimil Babka */ 1180d3132e4bSVlastimil Babka if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 1181d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1182d3132e4bSVlastimil Babka 1183d3132e4bSVlastimil Babka /* 1184c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 1185c89511abSMel Gorman * information on where the scanners should start but check that it 1186c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 1187c89511abSMel Gorman */ 1188e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1189c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1190c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1191c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1192c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1193c89511abSMel Gorman } 1194c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1195c89511abSMel Gorman cc->migrate_pfn = start_pfn; 119635979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 119735979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1198c89511abSMel Gorman } 1199748446bbSMel Gorman 12000eb927c0SMel Gorman trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); 12010eb927c0SMel Gorman 1202748446bbSMel Gorman migrate_prep_local(); 1203748446bbSMel Gorman 12046d7ce559SDavid Rientjes while ((ret = compact_finished(zone, cc, migratetype)) == 12056d7ce559SDavid Rientjes COMPACT_CONTINUE) { 12069d502c1cSMinchan Kim int err; 1207*fdaf7f5cSVlastimil Babka unsigned long isolate_start_pfn = cc->migrate_pfn; 1208748446bbSMel Gorman 1209f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1210f9e35b3bSMel Gorman case ISOLATE_ABORT: 1211f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 12125733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1213e64c5237SShaohua Li cc->nr_migratepages = 0; 1214f9e35b3bSMel Gorman goto out; 1215f9e35b3bSMel Gorman case ISOLATE_NONE: 1216*fdaf7f5cSVlastimil Babka /* 1217*fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 1218*fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 1219*fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 1220*fdaf7f5cSVlastimil Babka */ 1221*fdaf7f5cSVlastimil Babka goto check_drain; 1222f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1223f9e35b3bSMel Gorman ; 1224f9e35b3bSMel Gorman } 1225748446bbSMel Gorman 1226d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1227e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 12287b2a2d4aSMel Gorman MR_COMPACTION); 1229748446bbSMel Gorman 1230f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1231f8c9301fSVlastimil Babka &cc->migratepages); 1232748446bbSMel Gorman 1233f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1234f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 12359d502c1cSMinchan Kim if (err) { 12365733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 12377ed695e0SVlastimil Babka /* 12387ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 12397ed695e0SVlastimil Babka * and we want compact_finished() to detect it 12407ed695e0SVlastimil Babka */ 12417ed695e0SVlastimil Babka if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { 12424bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 12434bf2bba3SDavid Rientjes goto out; 1244748446bbSMel Gorman } 12454bf2bba3SDavid Rientjes } 1246*fdaf7f5cSVlastimil Babka 1247*fdaf7f5cSVlastimil Babka /* 1248*fdaf7f5cSVlastimil Babka * Record where we could have freed pages by migration and not 1249*fdaf7f5cSVlastimil Babka * yet flushed them to buddy allocator. We use the pfn that 1250*fdaf7f5cSVlastimil Babka * isolate_migratepages() started from in this loop iteration 1251*fdaf7f5cSVlastimil Babka * - this is the lowest page that could have been isolated and 1252*fdaf7f5cSVlastimil Babka * then freed by migration. 1253*fdaf7f5cSVlastimil Babka */ 1254*fdaf7f5cSVlastimil Babka if (!last_migrated_pfn) 1255*fdaf7f5cSVlastimil Babka last_migrated_pfn = isolate_start_pfn; 1256*fdaf7f5cSVlastimil Babka 1257*fdaf7f5cSVlastimil Babka check_drain: 1258*fdaf7f5cSVlastimil Babka /* 1259*fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 1260*fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 1261*fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 1262*fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 1263*fdaf7f5cSVlastimil Babka * would succeed. 1264*fdaf7f5cSVlastimil Babka */ 1265*fdaf7f5cSVlastimil Babka if (cc->order > 0 && last_migrated_pfn) { 1266*fdaf7f5cSVlastimil Babka int cpu; 1267*fdaf7f5cSVlastimil Babka unsigned long current_block_start = 1268*fdaf7f5cSVlastimil Babka cc->migrate_pfn & ~((1UL << cc->order) - 1); 1269*fdaf7f5cSVlastimil Babka 1270*fdaf7f5cSVlastimil Babka if (last_migrated_pfn < current_block_start) { 1271*fdaf7f5cSVlastimil Babka cpu = get_cpu(); 1272*fdaf7f5cSVlastimil Babka lru_add_drain_cpu(cpu); 1273*fdaf7f5cSVlastimil Babka drain_local_pages(zone); 1274*fdaf7f5cSVlastimil Babka put_cpu(); 1275*fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 1276*fdaf7f5cSVlastimil Babka last_migrated_pfn = 0; 1277*fdaf7f5cSVlastimil Babka } 1278*fdaf7f5cSVlastimil Babka } 1279*fdaf7f5cSVlastimil Babka 1280748446bbSMel Gorman } 1281748446bbSMel Gorman 1282f9e35b3bSMel Gorman out: 12836bace090SVlastimil Babka /* 12846bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 12856bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 12866bace090SVlastimil Babka */ 12876bace090SVlastimil Babka if (cc->nr_freepages > 0) { 12886bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 12896bace090SVlastimil Babka 12906bace090SVlastimil Babka cc->nr_freepages = 0; 12916bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 12926bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 12936bace090SVlastimil Babka free_pfn &= ~(pageblock_nr_pages-1); 12946bace090SVlastimil Babka /* 12956bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 12966bace090SVlastimil Babka * already reset to zone end in compact_finished() 12976bace090SVlastimil Babka */ 12986bace090SVlastimil Babka if (free_pfn > zone->compact_cached_free_pfn) 12996bace090SVlastimil Babka zone->compact_cached_free_pfn = free_pfn; 13006bace090SVlastimil Babka } 1301748446bbSMel Gorman 13020eb927c0SMel Gorman trace_mm_compaction_end(ret); 13030eb927c0SMel Gorman 1304748446bbSMel Gorman return ret; 1305748446bbSMel Gorman } 130676ab0f53SMel Gorman 1307e0b9daebSDavid Rientjes static unsigned long compact_zone_order(struct zone *zone, int order, 1308ebff3980SVlastimil Babka gfp_t gfp_mask, enum migrate_mode mode, int *contended, 1309ebff3980SVlastimil Babka int alloc_flags, int classzone_idx) 131056de7263SMel Gorman { 1311e64c5237SShaohua Li unsigned long ret; 131256de7263SMel Gorman struct compact_control cc = { 131356de7263SMel Gorman .nr_freepages = 0, 131456de7263SMel Gorman .nr_migratepages = 0, 131556de7263SMel Gorman .order = order, 13166d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 131756de7263SMel Gorman .zone = zone, 1318e0b9daebSDavid Rientjes .mode = mode, 1319ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 1320ebff3980SVlastimil Babka .classzone_idx = classzone_idx, 132156de7263SMel Gorman }; 132256de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 132356de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 132456de7263SMel Gorman 1325e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1326e64c5237SShaohua Li 1327e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1328e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1329e64c5237SShaohua Li 1330e64c5237SShaohua Li *contended = cc.contended; 1331e64c5237SShaohua Li return ret; 133256de7263SMel Gorman } 133356de7263SMel Gorman 13345e771905SMel Gorman int sysctl_extfrag_threshold = 500; 13355e771905SMel Gorman 133656de7263SMel Gorman /** 133756de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 133856de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 133956de7263SMel Gorman * @order: The order of the current allocation 134056de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 134156de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 1342e0b9daebSDavid Rientjes * @mode: The migration mode for async, sync light, or sync migration 13431f9efdefSVlastimil Babka * @contended: Return value that determines if compaction was aborted due to 13441f9efdefSVlastimil Babka * need_resched() or lock contention 134556de7263SMel Gorman * 134656de7263SMel Gorman * This is the main entry point for direct page compaction. 134756de7263SMel Gorman */ 134856de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 134977f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 13501f9efdefSVlastimil Babka enum migrate_mode mode, int *contended, 135197d47a65SVlastimil Babka int alloc_flags, int classzone_idx) 135256de7263SMel Gorman { 135356de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 135456de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 135556de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 135656de7263SMel Gorman struct zoneref *z; 135756de7263SMel Gorman struct zone *zone; 135853853e2dSVlastimil Babka int rc = COMPACT_DEFERRED; 13591f9efdefSVlastimil Babka int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ 13601f9efdefSVlastimil Babka 13611f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_NONE; 136256de7263SMel Gorman 13634ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1364c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 136553853e2dSVlastimil Babka return COMPACT_SKIPPED; 136656de7263SMel Gorman 136756de7263SMel Gorman /* Compact each zone in the list */ 136856de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 136956de7263SMel Gorman nodemask) { 137056de7263SMel Gorman int status; 13711f9efdefSVlastimil Babka int zone_contended; 137256de7263SMel Gorman 137353853e2dSVlastimil Babka if (compaction_deferred(zone, order)) 137453853e2dSVlastimil Babka continue; 137553853e2dSVlastimil Babka 1376e0b9daebSDavid Rientjes status = compact_zone_order(zone, order, gfp_mask, mode, 1377ebff3980SVlastimil Babka &zone_contended, alloc_flags, classzone_idx); 137856de7263SMel Gorman rc = max(status, rc); 13791f9efdefSVlastimil Babka /* 13801f9efdefSVlastimil Babka * It takes at least one zone that wasn't lock contended 13811f9efdefSVlastimil Babka * to clear all_zones_contended. 13821f9efdefSVlastimil Babka */ 13831f9efdefSVlastimil Babka all_zones_contended &= zone_contended; 138456de7263SMel Gorman 13853e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1386ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 1387ebff3980SVlastimil Babka classzone_idx, alloc_flags)) { 138853853e2dSVlastimil Babka /* 138953853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 139053853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 139153853e2dSVlastimil Babka * will repeat this with true if allocation indeed 139253853e2dSVlastimil Babka * succeeds in this zone. 139353853e2dSVlastimil Babka */ 139453853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 13951f9efdefSVlastimil Babka /* 13961f9efdefSVlastimil Babka * It is possible that async compaction aborted due to 13971f9efdefSVlastimil Babka * need_resched() and the watermarks were ok thanks to 13981f9efdefSVlastimil Babka * somebody else freeing memory. The allocation can 13991f9efdefSVlastimil Babka * however still fail so we better signal the 14001f9efdefSVlastimil Babka * need_resched() contention anyway (this will not 14011f9efdefSVlastimil Babka * prevent the allocation attempt). 14021f9efdefSVlastimil Babka */ 14031f9efdefSVlastimil Babka if (zone_contended == COMPACT_CONTENDED_SCHED) 14041f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 14051f9efdefSVlastimil Babka 14061f9efdefSVlastimil Babka goto break_loop; 14071f9efdefSVlastimil Babka } 14081f9efdefSVlastimil Babka 1409f8669795SVlastimil Babka if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) { 141053853e2dSVlastimil Babka /* 141153853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 141253853e2dSVlastimil Babka * so we defer compaction there. If it ends up 141353853e2dSVlastimil Babka * succeeding after all, it will be reset. 141453853e2dSVlastimil Babka */ 141553853e2dSVlastimil Babka defer_compaction(zone, order); 141653853e2dSVlastimil Babka } 14171f9efdefSVlastimil Babka 14181f9efdefSVlastimil Babka /* 14191f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 14201f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 14211f9efdefSVlastimil Babka * case do not try further zones and signal need_resched() 14221f9efdefSVlastimil Babka * contention. 14231f9efdefSVlastimil Babka */ 14241f9efdefSVlastimil Babka if ((zone_contended == COMPACT_CONTENDED_SCHED) 14251f9efdefSVlastimil Babka || fatal_signal_pending(current)) { 14261f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 14271f9efdefSVlastimil Babka goto break_loop; 142856de7263SMel Gorman } 142956de7263SMel Gorman 14301f9efdefSVlastimil Babka continue; 14311f9efdefSVlastimil Babka break_loop: 14321f9efdefSVlastimil Babka /* 14331f9efdefSVlastimil Babka * We might not have tried all the zones, so be conservative 14341f9efdefSVlastimil Babka * and assume they are not all lock contended. 14351f9efdefSVlastimil Babka */ 14361f9efdefSVlastimil Babka all_zones_contended = 0; 14371f9efdefSVlastimil Babka break; 14381f9efdefSVlastimil Babka } 14391f9efdefSVlastimil Babka 14401f9efdefSVlastimil Babka /* 14411f9efdefSVlastimil Babka * If at least one zone wasn't deferred or skipped, we report if all 14421f9efdefSVlastimil Babka * zones that were tried were lock contended. 14431f9efdefSVlastimil Babka */ 14441f9efdefSVlastimil Babka if (rc > COMPACT_SKIPPED && all_zones_contended) 14451f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_LOCK; 14461f9efdefSVlastimil Babka 144756de7263SMel Gorman return rc; 144856de7263SMel Gorman } 144956de7263SMel Gorman 145056de7263SMel Gorman 145176ab0f53SMel Gorman /* Compact all zones within a node */ 14527103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 145376ab0f53SMel Gorman { 145476ab0f53SMel Gorman int zoneid; 145576ab0f53SMel Gorman struct zone *zone; 145676ab0f53SMel Gorman 145776ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 145876ab0f53SMel Gorman 145976ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 146076ab0f53SMel Gorman if (!populated_zone(zone)) 146176ab0f53SMel Gorman continue; 146276ab0f53SMel Gorman 14637be62de9SRik van Riel cc->nr_freepages = 0; 14647be62de9SRik van Riel cc->nr_migratepages = 0; 14657be62de9SRik van Riel cc->zone = zone; 14667be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 14677be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 146876ab0f53SMel Gorman 1469aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 14707be62de9SRik van Riel compact_zone(zone, cc); 147176ab0f53SMel Gorman 1472aff62249SRik van Riel if (cc->order > 0) { 1473de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1474de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1475de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1476aff62249SRik van Riel } 1477aff62249SRik van Riel 14787be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 14797be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 148076ab0f53SMel Gorman } 148176ab0f53SMel Gorman } 148276ab0f53SMel Gorman 14837103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 14847be62de9SRik van Riel { 14857be62de9SRik van Riel struct compact_control cc = { 14867be62de9SRik van Riel .order = order, 1487e0b9daebSDavid Rientjes .mode = MIGRATE_ASYNC, 14887be62de9SRik van Riel }; 14897be62de9SRik van Riel 14903a7200afSMel Gorman if (!order) 14913a7200afSMel Gorman return; 14923a7200afSMel Gorman 14937103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 14947be62de9SRik van Riel } 14957be62de9SRik van Riel 14967103f16dSAndrew Morton static void compact_node(int nid) 14977be62de9SRik van Riel { 14987be62de9SRik van Riel struct compact_control cc = { 14997be62de9SRik van Riel .order = -1, 1500e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 150191ca9186SDavid Rientjes .ignore_skip_hint = true, 15027be62de9SRik van Riel }; 15037be62de9SRik van Riel 15047103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 15057be62de9SRik van Riel } 15067be62de9SRik van Riel 150776ab0f53SMel Gorman /* Compact all nodes in the system */ 15087964c06dSJason Liu static void compact_nodes(void) 150976ab0f53SMel Gorman { 151076ab0f53SMel Gorman int nid; 151176ab0f53SMel Gorman 15128575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 15138575ec29SHugh Dickins lru_add_drain_all(); 15148575ec29SHugh Dickins 151576ab0f53SMel Gorman for_each_online_node(nid) 151676ab0f53SMel Gorman compact_node(nid); 151776ab0f53SMel Gorman } 151876ab0f53SMel Gorman 151976ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 152076ab0f53SMel Gorman int sysctl_compact_memory; 152176ab0f53SMel Gorman 152276ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 152376ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 152476ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 152576ab0f53SMel Gorman { 152676ab0f53SMel Gorman if (write) 15277964c06dSJason Liu compact_nodes(); 152876ab0f53SMel Gorman 152976ab0f53SMel Gorman return 0; 153076ab0f53SMel Gorman } 1531ed4a6d7fSMel Gorman 15325e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 15335e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 15345e771905SMel Gorman { 15355e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 15365e771905SMel Gorman 15375e771905SMel Gorman return 0; 15385e771905SMel Gorman } 15395e771905SMel Gorman 1540ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 154174e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 154210fbcf4cSKay Sievers struct device_attribute *attr, 1543ed4a6d7fSMel Gorman const char *buf, size_t count) 1544ed4a6d7fSMel Gorman { 15458575ec29SHugh Dickins int nid = dev->id; 15468575ec29SHugh Dickins 15478575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 15488575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 15498575ec29SHugh Dickins lru_add_drain_all(); 15508575ec29SHugh Dickins 15518575ec29SHugh Dickins compact_node(nid); 15528575ec29SHugh Dickins } 1553ed4a6d7fSMel Gorman 1554ed4a6d7fSMel Gorman return count; 1555ed4a6d7fSMel Gorman } 155610fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1557ed4a6d7fSMel Gorman 1558ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1559ed4a6d7fSMel Gorman { 156010fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1561ed4a6d7fSMel Gorman } 1562ed4a6d7fSMel Gorman 1563ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1564ed4a6d7fSMel Gorman { 156510fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1566ed4a6d7fSMel Gorman } 1567ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1568ff9543fdSMichal Nazarewicz 1569ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1570