1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 18194159fbSMinchan Kim #include <linux/page-isolation.h> 19748446bbSMel Gorman #include "internal.h" 20748446bbSMel Gorman 21010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 22010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 23010fc29aSMinchan Kim { 24010fc29aSMinchan Kim count_vm_event(item); 25010fc29aSMinchan Kim } 26010fc29aSMinchan Kim 27010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 28010fc29aSMinchan Kim { 29010fc29aSMinchan Kim count_vm_events(item, delta); 30010fc29aSMinchan Kim } 31010fc29aSMinchan Kim #else 32010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 33010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 34010fc29aSMinchan Kim #endif 35010fc29aSMinchan Kim 36ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 37ff9543fdSMichal Nazarewicz 38b7aba698SMel Gorman #define CREATE_TRACE_POINTS 39b7aba698SMel Gorman #include <trace/events/compaction.h> 40b7aba698SMel Gorman 41748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 42748446bbSMel Gorman { 43748446bbSMel Gorman struct page *page, *next; 44748446bbSMel Gorman unsigned long count = 0; 45748446bbSMel Gorman 46748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 47748446bbSMel Gorman list_del(&page->lru); 48748446bbSMel Gorman __free_page(page); 49748446bbSMel Gorman count++; 50748446bbSMel Gorman } 51748446bbSMel Gorman 52748446bbSMel Gorman return count; 53748446bbSMel Gorman } 54748446bbSMel Gorman 55ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 56ff9543fdSMichal Nazarewicz { 57ff9543fdSMichal Nazarewicz struct page *page; 58ff9543fdSMichal Nazarewicz 59ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 60ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 61ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 62ff9543fdSMichal Nazarewicz } 63ff9543fdSMichal Nazarewicz } 64ff9543fdSMichal Nazarewicz 6547118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 6647118af0SMichal Nazarewicz { 6747118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 6847118af0SMichal Nazarewicz } 6947118af0SMichal Nazarewicz 70*7d49d886SVlastimil Babka /* 71*7d49d886SVlastimil Babka * Check that the whole (or subset of) a pageblock given by the interval of 72*7d49d886SVlastimil Babka * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 73*7d49d886SVlastimil Babka * with the migration of free compaction scanner. The scanners then need to 74*7d49d886SVlastimil Babka * use only pfn_valid_within() check for arches that allow holes within 75*7d49d886SVlastimil Babka * pageblocks. 76*7d49d886SVlastimil Babka * 77*7d49d886SVlastimil Babka * Return struct page pointer of start_pfn, or NULL if checks were not passed. 78*7d49d886SVlastimil Babka * 79*7d49d886SVlastimil Babka * It's possible on some configurations to have a setup like node0 node1 node0 80*7d49d886SVlastimil Babka * i.e. it's possible that all pages within a zones range of pages do not 81*7d49d886SVlastimil Babka * belong to a single zone. We assume that a border between node0 and node1 82*7d49d886SVlastimil Babka * can occur within a single pageblock, but not a node0 node1 node0 83*7d49d886SVlastimil Babka * interleaving within a single pageblock. It is therefore sufficient to check 84*7d49d886SVlastimil Babka * the first and last page of a pageblock and avoid checking each individual 85*7d49d886SVlastimil Babka * page in a pageblock. 86*7d49d886SVlastimil Babka */ 87*7d49d886SVlastimil Babka static struct page *pageblock_pfn_to_page(unsigned long start_pfn, 88*7d49d886SVlastimil Babka unsigned long end_pfn, struct zone *zone) 89*7d49d886SVlastimil Babka { 90*7d49d886SVlastimil Babka struct page *start_page; 91*7d49d886SVlastimil Babka struct page *end_page; 92*7d49d886SVlastimil Babka 93*7d49d886SVlastimil Babka /* end_pfn is one past the range we are checking */ 94*7d49d886SVlastimil Babka end_pfn--; 95*7d49d886SVlastimil Babka 96*7d49d886SVlastimil Babka if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 97*7d49d886SVlastimil Babka return NULL; 98*7d49d886SVlastimil Babka 99*7d49d886SVlastimil Babka start_page = pfn_to_page(start_pfn); 100*7d49d886SVlastimil Babka 101*7d49d886SVlastimil Babka if (page_zone(start_page) != zone) 102*7d49d886SVlastimil Babka return NULL; 103*7d49d886SVlastimil Babka 104*7d49d886SVlastimil Babka end_page = pfn_to_page(end_pfn); 105*7d49d886SVlastimil Babka 106*7d49d886SVlastimil Babka /* This gives a shorter code than deriving page_zone(end_page) */ 107*7d49d886SVlastimil Babka if (page_zone_id(start_page) != page_zone_id(end_page)) 108*7d49d886SVlastimil Babka return NULL; 109*7d49d886SVlastimil Babka 110*7d49d886SVlastimil Babka return start_page; 111*7d49d886SVlastimil Babka } 112*7d49d886SVlastimil Babka 113bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 114bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 115bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 116bb13ffebSMel Gorman struct page *page) 117bb13ffebSMel Gorman { 118bb13ffebSMel Gorman if (cc->ignore_skip_hint) 119bb13ffebSMel Gorman return true; 120bb13ffebSMel Gorman 121bb13ffebSMel Gorman return !get_pageblock_skip(page); 122bb13ffebSMel Gorman } 123bb13ffebSMel Gorman 124bb13ffebSMel Gorman /* 125bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 126bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 127bb13ffebSMel Gorman * meet. 128bb13ffebSMel Gorman */ 12962997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 130bb13ffebSMel Gorman { 131bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 132108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 133bb13ffebSMel Gorman unsigned long pfn; 134bb13ffebSMel Gorman 13535979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = start_pfn; 13635979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = start_pfn; 137c89511abSMel Gorman zone->compact_cached_free_pfn = end_pfn; 13862997027SMel Gorman zone->compact_blockskip_flush = false; 139bb13ffebSMel Gorman 140bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 141bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 142bb13ffebSMel Gorman struct page *page; 143bb13ffebSMel Gorman 144bb13ffebSMel Gorman cond_resched(); 145bb13ffebSMel Gorman 146bb13ffebSMel Gorman if (!pfn_valid(pfn)) 147bb13ffebSMel Gorman continue; 148bb13ffebSMel Gorman 149bb13ffebSMel Gorman page = pfn_to_page(pfn); 150bb13ffebSMel Gorman if (zone != page_zone(page)) 151bb13ffebSMel Gorman continue; 152bb13ffebSMel Gorman 153bb13ffebSMel Gorman clear_pageblock_skip(page); 154bb13ffebSMel Gorman } 155bb13ffebSMel Gorman } 156bb13ffebSMel Gorman 15762997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 15862997027SMel Gorman { 15962997027SMel Gorman int zoneid; 16062997027SMel Gorman 16162997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 16262997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 16362997027SMel Gorman if (!populated_zone(zone)) 16462997027SMel Gorman continue; 16562997027SMel Gorman 16662997027SMel Gorman /* Only flush if a full compaction finished recently */ 16762997027SMel Gorman if (zone->compact_blockskip_flush) 16862997027SMel Gorman __reset_isolation_suitable(zone); 16962997027SMel Gorman } 17062997027SMel Gorman } 17162997027SMel Gorman 172bb13ffebSMel Gorman /* 173bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 17462997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 175bb13ffebSMel Gorman */ 176c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 177c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 178edc2ca61SVlastimil Babka bool migrate_scanner) 179bb13ffebSMel Gorman { 180c89511abSMel Gorman struct zone *zone = cc->zone; 18135979ef3SDavid Rientjes unsigned long pfn; 1826815bf3fSJoonsoo Kim 1836815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 1846815bf3fSJoonsoo Kim return; 1856815bf3fSJoonsoo Kim 186bb13ffebSMel Gorman if (!page) 187bb13ffebSMel Gorman return; 188bb13ffebSMel Gorman 18935979ef3SDavid Rientjes if (nr_isolated) 19035979ef3SDavid Rientjes return; 19135979ef3SDavid Rientjes 192bb13ffebSMel Gorman set_pageblock_skip(page); 193c89511abSMel Gorman 19435979ef3SDavid Rientjes pfn = page_to_pfn(page); 19535979ef3SDavid Rientjes 19635979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 197c89511abSMel Gorman if (migrate_scanner) { 19835979ef3SDavid Rientjes if (cc->finished_update_migrate) 19935979ef3SDavid Rientjes return; 20035979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 20135979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 202e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 203e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 20435979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 205c89511abSMel Gorman } else { 20635979ef3SDavid Rientjes if (cc->finished_update_free) 20735979ef3SDavid Rientjes return; 20835979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 209c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 210c89511abSMel Gorman } 211c89511abSMel Gorman } 212bb13ffebSMel Gorman #else 213bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 214bb13ffebSMel Gorman struct page *page) 215bb13ffebSMel Gorman { 216bb13ffebSMel Gorman return true; 217bb13ffebSMel Gorman } 218bb13ffebSMel Gorman 219c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 220c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 221edc2ca61SVlastimil Babka bool migrate_scanner) 222bb13ffebSMel Gorman { 223bb13ffebSMel Gorman } 224bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 225bb13ffebSMel Gorman 2262a1402aaSMel Gorman static inline bool should_release_lock(spinlock_t *lock) 2272a1402aaSMel Gorman { 2282a1402aaSMel Gorman return need_resched() || spin_is_contended(lock); 2292a1402aaSMel Gorman } 2302a1402aaSMel Gorman 23185aa125fSMichal Nazarewicz /* 232c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 233c67fe375SMel Gorman * very heavily contended. Check if the process needs to be scheduled or 234c67fe375SMel Gorman * if the lock is contended. For async compaction, back out in the event 235c67fe375SMel Gorman * if contention is severe. For sync compaction, schedule. 236c67fe375SMel Gorman * 237c67fe375SMel Gorman * Returns true if the lock is held. 238c67fe375SMel Gorman * Returns false if the lock is released and compaction should abort 239c67fe375SMel Gorman */ 240c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, 241c67fe375SMel Gorman bool locked, struct compact_control *cc) 242c67fe375SMel Gorman { 2432a1402aaSMel Gorman if (should_release_lock(lock)) { 244c67fe375SMel Gorman if (locked) { 245c67fe375SMel Gorman spin_unlock_irqrestore(lock, *flags); 246c67fe375SMel Gorman locked = false; 247c67fe375SMel Gorman } 248c67fe375SMel Gorman 249c67fe375SMel Gorman /* async aborts if taking too long or contended */ 250e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 251e64c5237SShaohua Li cc->contended = true; 252c67fe375SMel Gorman return false; 253c67fe375SMel Gorman } 254c67fe375SMel Gorman 255c67fe375SMel Gorman cond_resched(); 256c67fe375SMel Gorman } 257c67fe375SMel Gorman 258c67fe375SMel Gorman if (!locked) 259c67fe375SMel Gorman spin_lock_irqsave(lock, *flags); 260c67fe375SMel Gorman return true; 261c67fe375SMel Gorman } 262c67fe375SMel Gorman 263be976572SVlastimil Babka /* 264be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 265be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 266be976572SVlastimil Babka * compaction. This is similar to what compact_checklock_irqsave() does, but 267be976572SVlastimil Babka * is used where no lock is concerned. 268be976572SVlastimil Babka * 269be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 270be976572SVlastimil Babka * Returns true when async compaction should abort. 271be976572SVlastimil Babka */ 272be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 273be976572SVlastimil Babka { 274be976572SVlastimil Babka /* async compaction aborts if contended */ 275be976572SVlastimil Babka if (need_resched()) { 276be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 277be976572SVlastimil Babka cc->contended = true; 278be976572SVlastimil Babka return true; 279be976572SVlastimil Babka } 280be976572SVlastimil Babka 281be976572SVlastimil Babka cond_resched(); 282be976572SVlastimil Babka } 283be976572SVlastimil Babka 284be976572SVlastimil Babka return false; 285be976572SVlastimil Babka } 286be976572SVlastimil Babka 287f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */ 288f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page) 289f40d1e42SMel Gorman { 2907d348b9eSJoonsoo Kim /* If the page is a large free page, then disallow migration */ 291f40d1e42SMel Gorman if (PageBuddy(page) && page_order(page) >= pageblock_order) 2927d348b9eSJoonsoo Kim return false; 293f40d1e42SMel Gorman 294f40d1e42SMel Gorman /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 2957d348b9eSJoonsoo Kim if (migrate_async_suitable(get_pageblock_migratetype(page))) 296f40d1e42SMel Gorman return true; 297f40d1e42SMel Gorman 298f40d1e42SMel Gorman /* Otherwise skip the block */ 299f40d1e42SMel Gorman return false; 300f40d1e42SMel Gorman } 301f40d1e42SMel Gorman 302c67fe375SMel Gorman /* 3039e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 3049e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 3059e4be470SJerome Marchand * (even though it may still end up isolating some pages). 30685aa125fSMichal Nazarewicz */ 307f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 308f40d1e42SMel Gorman unsigned long blockpfn, 30985aa125fSMichal Nazarewicz unsigned long end_pfn, 31085aa125fSMichal Nazarewicz struct list_head *freelist, 31185aa125fSMichal Nazarewicz bool strict) 312748446bbSMel Gorman { 313b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 314bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 315f40d1e42SMel Gorman unsigned long flags; 316f40d1e42SMel Gorman bool locked = false; 317748446bbSMel Gorman 318748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 319748446bbSMel Gorman 320f40d1e42SMel Gorman /* Isolate free pages. */ 321748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 322748446bbSMel Gorman int isolated, i; 323748446bbSMel Gorman struct page *page = cursor; 324748446bbSMel Gorman 325b7aba698SMel Gorman nr_scanned++; 326f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 3272af120bcSLaura Abbott goto isolate_fail; 3282af120bcSLaura Abbott 329bb13ffebSMel Gorman if (!valid_page) 330bb13ffebSMel Gorman valid_page = page; 331f40d1e42SMel Gorman if (!PageBuddy(page)) 3322af120bcSLaura Abbott goto isolate_fail; 333f40d1e42SMel Gorman 334f40d1e42SMel Gorman /* 335f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 336f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 337f40d1e42SMel Gorman * heavily contended if there are parallel allocations 338f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 339f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 340f40d1e42SMel Gorman * possible. 341f40d1e42SMel Gorman */ 342f40d1e42SMel Gorman locked = compact_checklock_irqsave(&cc->zone->lock, &flags, 343f40d1e42SMel Gorman locked, cc); 344f40d1e42SMel Gorman if (!locked) 345f40d1e42SMel Gorman break; 346f40d1e42SMel Gorman 347f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 348f40d1e42SMel Gorman if (!PageBuddy(page)) 3492af120bcSLaura Abbott goto isolate_fail; 350748446bbSMel Gorman 351748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 352748446bbSMel Gorman isolated = split_free_page(page); 353748446bbSMel Gorman total_isolated += isolated; 354748446bbSMel Gorman for (i = 0; i < isolated; i++) { 355748446bbSMel Gorman list_add(&page->lru, freelist); 356748446bbSMel Gorman page++; 357748446bbSMel Gorman } 358748446bbSMel Gorman 359748446bbSMel Gorman /* If a page was split, advance to the end of it */ 360748446bbSMel Gorman if (isolated) { 361748446bbSMel Gorman blockpfn += isolated - 1; 362748446bbSMel Gorman cursor += isolated - 1; 3632af120bcSLaura Abbott continue; 364748446bbSMel Gorman } 3652af120bcSLaura Abbott 3662af120bcSLaura Abbott isolate_fail: 3672af120bcSLaura Abbott if (strict) 3682af120bcSLaura Abbott break; 3692af120bcSLaura Abbott else 3702af120bcSLaura Abbott continue; 3712af120bcSLaura Abbott 372748446bbSMel Gorman } 373748446bbSMel Gorman 374b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 375f40d1e42SMel Gorman 376f40d1e42SMel Gorman /* 377f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 378f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 379f40d1e42SMel Gorman * returned and CMA will fail. 380f40d1e42SMel Gorman */ 3812af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 382f40d1e42SMel Gorman total_isolated = 0; 383f40d1e42SMel Gorman 384f40d1e42SMel Gorman if (locked) 385f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 386f40d1e42SMel Gorman 387bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 388bb13ffebSMel Gorman if (blockpfn == end_pfn) 389edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 390bb13ffebSMel Gorman 391010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 392397487dbSMel Gorman if (total_isolated) 393010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 394748446bbSMel Gorman return total_isolated; 395748446bbSMel Gorman } 396748446bbSMel Gorman 39785aa125fSMichal Nazarewicz /** 39885aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 39985aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 40085aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 40185aa125fSMichal Nazarewicz * 40285aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 40385aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 40485aa125fSMichal Nazarewicz * undo its actions and return zero. 40585aa125fSMichal Nazarewicz * 40685aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 40785aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 40885aa125fSMichal Nazarewicz * a free page). 40985aa125fSMichal Nazarewicz */ 410ff9543fdSMichal Nazarewicz unsigned long 411bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 412bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 41385aa125fSMichal Nazarewicz { 414f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 41585aa125fSMichal Nazarewicz LIST_HEAD(freelist); 41685aa125fSMichal Nazarewicz 417*7d49d886SVlastimil Babka pfn = start_pfn; 41885aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 419*7d49d886SVlastimil Babka 420*7d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 421*7d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 422*7d49d886SVlastimil Babka 42385aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 42485aa125fSMichal Nazarewicz 425*7d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 426*7d49d886SVlastimil Babka break; 427*7d49d886SVlastimil Babka 428bb13ffebSMel Gorman isolated = isolate_freepages_block(cc, pfn, block_end_pfn, 42985aa125fSMichal Nazarewicz &freelist, true); 43085aa125fSMichal Nazarewicz 43185aa125fSMichal Nazarewicz /* 43285aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 43385aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 43485aa125fSMichal Nazarewicz * non-free pages). 43585aa125fSMichal Nazarewicz */ 43685aa125fSMichal Nazarewicz if (!isolated) 43785aa125fSMichal Nazarewicz break; 43885aa125fSMichal Nazarewicz 43985aa125fSMichal Nazarewicz /* 44085aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 44185aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 44285aa125fSMichal Nazarewicz * page may span two pageblocks). 44385aa125fSMichal Nazarewicz */ 44485aa125fSMichal Nazarewicz } 44585aa125fSMichal Nazarewicz 44685aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 44785aa125fSMichal Nazarewicz map_pages(&freelist); 44885aa125fSMichal Nazarewicz 44985aa125fSMichal Nazarewicz if (pfn < end_pfn) { 45085aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 45185aa125fSMichal Nazarewicz release_freepages(&freelist); 45285aa125fSMichal Nazarewicz return 0; 45385aa125fSMichal Nazarewicz } 45485aa125fSMichal Nazarewicz 45585aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 45685aa125fSMichal Nazarewicz return pfn; 45785aa125fSMichal Nazarewicz } 45885aa125fSMichal Nazarewicz 459748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 460edc2ca61SVlastimil Babka static void acct_isolated(struct zone *zone, struct compact_control *cc) 461748446bbSMel Gorman { 462748446bbSMel Gorman struct page *page; 463b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 464748446bbSMel Gorman 465edc2ca61SVlastimil Babka if (list_empty(&cc->migratepages)) 466edc2ca61SVlastimil Babka return; 467edc2ca61SVlastimil Babka 468b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 469b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 470748446bbSMel Gorman 471c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 472c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 473c67fe375SMel Gorman } 474748446bbSMel Gorman 475748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 476748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 477748446bbSMel Gorman { 478bc693045SMinchan Kim unsigned long active, inactive, isolated; 479748446bbSMel Gorman 480748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 481748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 482bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 483bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 484748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 485748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 486748446bbSMel Gorman 487bc693045SMinchan Kim return isolated > (inactive + active) / 2; 488748446bbSMel Gorman } 489748446bbSMel Gorman 4902fe86e00SMichal Nazarewicz /** 491edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 492edc2ca61SVlastimil Babka * a single pageblock 4932fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 494edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 495edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 496edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 4972fe86e00SMichal Nazarewicz * 4982fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 499edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 500edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 501edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 502edc2ca61SVlastimil Babka * than end_pfn). 5032fe86e00SMichal Nazarewicz * 504edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 505edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 506edc2ca61SVlastimil Babka * is neither read nor updated. 507748446bbSMel Gorman */ 508edc2ca61SVlastimil Babka static unsigned long 509edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 510edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 511748446bbSMel Gorman { 512edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 513b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 514748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 515fa9add64SHugh Dickins struct lruvec *lruvec; 516c67fe375SMel Gorman unsigned long flags; 5172a1402aaSMel Gorman bool locked = false; 518bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 519748446bbSMel Gorman 520748446bbSMel Gorman /* 521748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 522748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 523748446bbSMel Gorman * delay for some time until fewer pages are isolated 524748446bbSMel Gorman */ 525748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 526f9e35b3bSMel Gorman /* async migration should just abort */ 527e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 5282fe86e00SMichal Nazarewicz return 0; 529f9e35b3bSMel Gorman 530748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 531748446bbSMel Gorman 532748446bbSMel Gorman if (fatal_signal_pending(current)) 5332fe86e00SMichal Nazarewicz return 0; 534748446bbSMel Gorman } 535748446bbSMel Gorman 536be976572SVlastimil Babka if (compact_should_abort(cc)) 537aeef4b83SDavid Rientjes return 0; 538aeef4b83SDavid Rientjes 539748446bbSMel Gorman /* Time to isolate some pages for migration */ 540748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 541b2eef8c0SAndrea Arcangeli /* give a chance to irqs before checking need_resched() */ 542be1aa03bSJoonsoo Kim if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) { 5432a1402aaSMel Gorman if (should_release_lock(&zone->lru_lock)) { 544c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 545b2eef8c0SAndrea Arcangeli locked = false; 546b2eef8c0SAndrea Arcangeli } 5472a1402aaSMel Gorman } 548b2eef8c0SAndrea Arcangeli 549748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 550748446bbSMel Gorman continue; 551b7aba698SMel Gorman nr_scanned++; 552748446bbSMel Gorman 553748446bbSMel Gorman page = pfn_to_page(low_pfn); 554dc908600SMel Gorman 555bb13ffebSMel Gorman if (!valid_page) 556bb13ffebSMel Gorman valid_page = page; 557bb13ffebSMel Gorman 558c122b208SJoonsoo Kim /* 5596c14466cSMel Gorman * Skip if free. page_order cannot be used without zone->lock 5606c14466cSMel Gorman * as nothing prevents parallel allocations or buddy merging. 5616c14466cSMel Gorman */ 562748446bbSMel Gorman if (PageBuddy(page)) 563748446bbSMel Gorman continue; 564748446bbSMel Gorman 5659927af74SMel Gorman /* 566bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 567bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 568bf6bddf1SRafael Aquini * Skip any other type of page 569bf6bddf1SRafael Aquini */ 570bf6bddf1SRafael Aquini if (!PageLRU(page)) { 571bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 572bf6bddf1SRafael Aquini if (locked && balloon_page_isolate(page)) { 573bf6bddf1SRafael Aquini /* Successfully isolated */ 574b6c75016SJoonsoo Kim goto isolate_success; 575bf6bddf1SRafael Aquini } 576bf6bddf1SRafael Aquini } 577bc835011SAndrea Arcangeli continue; 578bf6bddf1SRafael Aquini } 579bc835011SAndrea Arcangeli 580bc835011SAndrea Arcangeli /* 5812a1402aaSMel Gorman * PageLRU is set. lru_lock normally excludes isolation 5822a1402aaSMel Gorman * splitting and collapsing (collapsing has already happened 5832a1402aaSMel Gorman * if PageLRU is set) but the lock is not necessarily taken 5842a1402aaSMel Gorman * here and it is wasteful to take it just to check transhuge. 5852a1402aaSMel Gorman * Check TransHuge without lock and skip the whole pageblock if 5862a1402aaSMel Gorman * it's either a transhuge or hugetlbfs page, as calling 5872a1402aaSMel Gorman * compound_order() without preventing THP from splitting the 5882a1402aaSMel Gorman * page underneath us may return surprising results. 589bc835011SAndrea Arcangeli */ 590bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 5912a1402aaSMel Gorman if (!locked) 592edc2ca61SVlastimil Babka low_pfn = ALIGN(low_pfn + 1, 593edc2ca61SVlastimil Babka pageblock_nr_pages) - 1; 594edc2ca61SVlastimil Babka else 5952a1402aaSMel Gorman low_pfn += (1 << compound_order(page)) - 1; 596edc2ca61SVlastimil Babka 5972a1402aaSMel Gorman continue; 5982a1402aaSMel Gorman } 5992a1402aaSMel Gorman 600119d6d59SDavid Rientjes /* 601119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 602119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 603119d6d59SDavid Rientjes * admittedly racy check. 604119d6d59SDavid Rientjes */ 605119d6d59SDavid Rientjes if (!page_mapping(page) && 606119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 607119d6d59SDavid Rientjes continue; 608119d6d59SDavid Rientjes 6092a1402aaSMel Gorman /* Check if it is ok to still hold the lock */ 6102a1402aaSMel Gorman locked = compact_checklock_irqsave(&zone->lru_lock, &flags, 6112a1402aaSMel Gorman locked, cc); 6122a1402aaSMel Gorman if (!locked || fatal_signal_pending(current)) 6132a1402aaSMel Gorman break; 6142a1402aaSMel Gorman 6152a1402aaSMel Gorman /* Recheck PageLRU and PageTransHuge under lock */ 6162a1402aaSMel Gorman if (!PageLRU(page)) 6172a1402aaSMel Gorman continue; 6182a1402aaSMel Gorman if (PageTransHuge(page)) { 619bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 620bc835011SAndrea Arcangeli continue; 621bc835011SAndrea Arcangeli } 622bc835011SAndrea Arcangeli 623fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 624fa9add64SHugh Dickins 625748446bbSMel Gorman /* Try isolate the page */ 626edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 627748446bbSMel Gorman continue; 628748446bbSMel Gorman 629309381feSSasha Levin VM_BUG_ON_PAGE(PageTransCompound(page), page); 630bc835011SAndrea Arcangeli 631748446bbSMel Gorman /* Successfully isolated */ 632fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 633b6c75016SJoonsoo Kim 634b6c75016SJoonsoo Kim isolate_success: 635b6c75016SJoonsoo Kim cc->finished_update_migrate = true; 636748446bbSMel Gorman list_add(&page->lru, migratelist); 637748446bbSMel Gorman cc->nr_migratepages++; 638b7aba698SMel Gorman nr_isolated++; 639748446bbSMel Gorman 640748446bbSMel Gorman /* Avoid isolating too much */ 64131b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 64231b8384aSHillf Danton ++low_pfn; 643748446bbSMel Gorman break; 644748446bbSMel Gorman } 64531b8384aSHillf Danton } 646748446bbSMel Gorman 647c67fe375SMel Gorman if (locked) 648c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 649748446bbSMel Gorman 65050b5b094SVlastimil Babka /* 65150b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 65250b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 65350b5b094SVlastimil Babka */ 65435979ef3SDavid Rientjes if (low_pfn == end_pfn) 655edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 656bb13ffebSMel Gorman 657b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 658b7aba698SMel Gorman 659010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 660397487dbSMel Gorman if (nr_isolated) 661010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 662397487dbSMel Gorman 6632fe86e00SMichal Nazarewicz return low_pfn; 6642fe86e00SMichal Nazarewicz } 6652fe86e00SMichal Nazarewicz 666edc2ca61SVlastimil Babka /** 667edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 668edc2ca61SVlastimil Babka * @cc: Compaction control structure. 669edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 670edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 671edc2ca61SVlastimil Babka * 672edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 673edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 674edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 675edc2ca61SVlastimil Babka */ 676edc2ca61SVlastimil Babka unsigned long 677edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 678edc2ca61SVlastimil Babka unsigned long end_pfn) 679edc2ca61SVlastimil Babka { 680edc2ca61SVlastimil Babka unsigned long pfn, block_end_pfn; 681edc2ca61SVlastimil Babka 682edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 683edc2ca61SVlastimil Babka pfn = start_pfn; 684edc2ca61SVlastimil Babka block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 685edc2ca61SVlastimil Babka 686edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 687edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 688edc2ca61SVlastimil Babka 689edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 690edc2ca61SVlastimil Babka 691*7d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 692edc2ca61SVlastimil Babka continue; 693edc2ca61SVlastimil Babka 694edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 695edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 696edc2ca61SVlastimil Babka 697edc2ca61SVlastimil Babka /* 698edc2ca61SVlastimil Babka * In case of fatal failure, release everything that might 699edc2ca61SVlastimil Babka * have been isolated in the previous iteration, and signal 700edc2ca61SVlastimil Babka * the failure back to caller. 701edc2ca61SVlastimil Babka */ 702edc2ca61SVlastimil Babka if (!pfn) { 703edc2ca61SVlastimil Babka putback_movable_pages(&cc->migratepages); 704edc2ca61SVlastimil Babka cc->nr_migratepages = 0; 705edc2ca61SVlastimil Babka break; 706edc2ca61SVlastimil Babka } 707edc2ca61SVlastimil Babka } 708edc2ca61SVlastimil Babka acct_isolated(cc->zone, cc); 709edc2ca61SVlastimil Babka 710edc2ca61SVlastimil Babka return pfn; 711edc2ca61SVlastimil Babka } 712edc2ca61SVlastimil Babka 713ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 714ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 715ff9543fdSMichal Nazarewicz /* 716ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 717ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 718ff9543fdSMichal Nazarewicz */ 719edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 720ff9543fdSMichal Nazarewicz { 721edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 722ff9543fdSMichal Nazarewicz struct page *page; 723c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 724c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 725c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 726ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 727ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 7282fe86e00SMichal Nazarewicz 729ff9543fdSMichal Nazarewicz /* 730ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 73149e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 73249e068f0SVlastimil Babka * zone when isolating for the first time. We need this aligned to 733c96b9e50SVlastimil Babka * the pageblock boundary, because we do 734c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 735c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 736c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 73749e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 73849e068f0SVlastimil Babka * is using. 739ff9543fdSMichal Nazarewicz */ 740c96b9e50SVlastimil Babka block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 741c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 742c96b9e50SVlastimil Babka zone_end_pfn(zone)); 7437ed695e0SVlastimil Babka low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 7442fe86e00SMichal Nazarewicz 745ff9543fdSMichal Nazarewicz /* 746ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 747ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 748ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 749ff9543fdSMichal Nazarewicz */ 750c96b9e50SVlastimil Babka for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 751c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 752c96b9e50SVlastimil Babka block_start_pfn -= pageblock_nr_pages) { 753ff9543fdSMichal Nazarewicz unsigned long isolated; 754ff9543fdSMichal Nazarewicz 755f6ea3adbSDavid Rientjes /* 756f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 757f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 758be976572SVlastimil Babka * to schedule, or even abort async compaction. 759f6ea3adbSDavid Rientjes */ 760be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 761be976572SVlastimil Babka && compact_should_abort(cc)) 762be976572SVlastimil Babka break; 763f6ea3adbSDavid Rientjes 764*7d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 765*7d49d886SVlastimil Babka zone); 766*7d49d886SVlastimil Babka if (!page) 767ff9543fdSMichal Nazarewicz continue; 768ff9543fdSMichal Nazarewicz 769ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 77068e3e926SLinus Torvalds if (!suitable_migration_target(page)) 771ff9543fdSMichal Nazarewicz continue; 77268e3e926SLinus Torvalds 773bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 774bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 775bb13ffebSMel Gorman continue; 776bb13ffebSMel Gorman 777f40d1e42SMel Gorman /* Found a block suitable for isolating free pages from */ 778e9ade569SVlastimil Babka cc->free_pfn = block_start_pfn; 779c96b9e50SVlastimil Babka isolated = isolate_freepages_block(cc, block_start_pfn, 780c96b9e50SVlastimil Babka block_end_pfn, freelist, false); 781ff9543fdSMichal Nazarewicz nr_freepages += isolated; 782ff9543fdSMichal Nazarewicz 783ff9543fdSMichal Nazarewicz /* 784e9ade569SVlastimil Babka * Set a flag that we successfully isolated in this pageblock. 785e9ade569SVlastimil Babka * In the next loop iteration, zone->compact_cached_free_pfn 786e9ade569SVlastimil Babka * will not be updated and thus it will effectively contain the 787e9ade569SVlastimil Babka * highest pageblock we isolated pages from. 788ff9543fdSMichal Nazarewicz */ 789e9ade569SVlastimil Babka if (isolated) 790c89511abSMel Gorman cc->finished_update_free = true; 791be976572SVlastimil Babka 792be976572SVlastimil Babka /* 793be976572SVlastimil Babka * isolate_freepages_block() might have aborted due to async 794be976572SVlastimil Babka * compaction being contended 795be976572SVlastimil Babka */ 796be976572SVlastimil Babka if (cc->contended) 797be976572SVlastimil Babka break; 798c89511abSMel Gorman } 799ff9543fdSMichal Nazarewicz 800ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 801ff9543fdSMichal Nazarewicz map_pages(freelist); 802ff9543fdSMichal Nazarewicz 8037ed695e0SVlastimil Babka /* 8047ed695e0SVlastimil Babka * If we crossed the migrate scanner, we want to keep it that way 8057ed695e0SVlastimil Babka * so that compact_finished() may detect this 8067ed695e0SVlastimil Babka */ 807c96b9e50SVlastimil Babka if (block_start_pfn < low_pfn) 808e9ade569SVlastimil Babka cc->free_pfn = cc->migrate_pfn; 809c96b9e50SVlastimil Babka 810ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 811748446bbSMel Gorman } 812748446bbSMel Gorman 813748446bbSMel Gorman /* 814748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 815748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 816748446bbSMel Gorman */ 817748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 818748446bbSMel Gorman unsigned long data, 819748446bbSMel Gorman int **result) 820748446bbSMel Gorman { 821748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 822748446bbSMel Gorman struct page *freepage; 823748446bbSMel Gorman 824be976572SVlastimil Babka /* 825be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 826be976572SVlastimil Babka * contention. 827be976572SVlastimil Babka */ 828748446bbSMel Gorman if (list_empty(&cc->freepages)) { 829be976572SVlastimil Babka if (!cc->contended) 830edc2ca61SVlastimil Babka isolate_freepages(cc); 831748446bbSMel Gorman 832748446bbSMel Gorman if (list_empty(&cc->freepages)) 833748446bbSMel Gorman return NULL; 834748446bbSMel Gorman } 835748446bbSMel Gorman 836748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 837748446bbSMel Gorman list_del(&freepage->lru); 838748446bbSMel Gorman cc->nr_freepages--; 839748446bbSMel Gorman 840748446bbSMel Gorman return freepage; 841748446bbSMel Gorman } 842748446bbSMel Gorman 843748446bbSMel Gorman /* 844d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 845d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 846d53aea3dSDavid Rientjes * special handling needed for NUMA. 847d53aea3dSDavid Rientjes */ 848d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 849d53aea3dSDavid Rientjes { 850d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 851d53aea3dSDavid Rientjes 852d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 853d53aea3dSDavid Rientjes cc->nr_freepages++; 854d53aea3dSDavid Rientjes } 855d53aea3dSDavid Rientjes 856ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 857ff9543fdSMichal Nazarewicz typedef enum { 858ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 859ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 860ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 861ff9543fdSMichal Nazarewicz } isolate_migrate_t; 862ff9543fdSMichal Nazarewicz 863ff9543fdSMichal Nazarewicz /* 864edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 865edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 866edc2ca61SVlastimil Babka * compact_control. 867ff9543fdSMichal Nazarewicz */ 868ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 869ff9543fdSMichal Nazarewicz struct compact_control *cc) 870ff9543fdSMichal Nazarewicz { 871ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 872edc2ca61SVlastimil Babka struct page *page; 873edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 874edc2ca61SVlastimil Babka (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); 875ff9543fdSMichal Nazarewicz 876edc2ca61SVlastimil Babka /* 877edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 878edc2ca61SVlastimil Babka * initialized by compact_zone() 879edc2ca61SVlastimil Babka */ 880edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 881ff9543fdSMichal Nazarewicz 882ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 883a9aacbccSMel Gorman end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 884ff9543fdSMichal Nazarewicz 885edc2ca61SVlastimil Babka /* 886edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 887edc2ca61SVlastimil Babka * Do not cross the free scanner. 888edc2ca61SVlastimil Babka */ 889edc2ca61SVlastimil Babka for (; end_pfn <= cc->free_pfn; 890edc2ca61SVlastimil Babka low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { 891edc2ca61SVlastimil Babka 892edc2ca61SVlastimil Babka /* 893edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 894edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 895edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 896edc2ca61SVlastimil Babka */ 897edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 898edc2ca61SVlastimil Babka && compact_should_abort(cc)) 899edc2ca61SVlastimil Babka break; 900edc2ca61SVlastimil Babka 901*7d49d886SVlastimil Babka page = pageblock_pfn_to_page(low_pfn, end_pfn, zone); 902*7d49d886SVlastimil Babka if (!page) 903edc2ca61SVlastimil Babka continue; 904edc2ca61SVlastimil Babka 905edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 906edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 907edc2ca61SVlastimil Babka continue; 908edc2ca61SVlastimil Babka 909edc2ca61SVlastimil Babka /* 910edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 911edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 912edc2ca61SVlastimil Babka * of work satisfies the allocation. 913edc2ca61SVlastimil Babka */ 914edc2ca61SVlastimil Babka if (cc->mode == MIGRATE_ASYNC && 915edc2ca61SVlastimil Babka !migrate_async_suitable(get_pageblock_migratetype(page))) 916edc2ca61SVlastimil Babka continue; 917ff9543fdSMichal Nazarewicz 918ff9543fdSMichal Nazarewicz /* Perform the isolation */ 919edc2ca61SVlastimil Babka low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, 920edc2ca61SVlastimil Babka isolate_mode); 921edc2ca61SVlastimil Babka 922e64c5237SShaohua Li if (!low_pfn || cc->contended) 923ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 924ff9543fdSMichal Nazarewicz 925edc2ca61SVlastimil Babka /* 926edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 927edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 928edc2ca61SVlastimil Babka * continue or not. 929edc2ca61SVlastimil Babka */ 930edc2ca61SVlastimil Babka break; 931edc2ca61SVlastimil Babka } 932edc2ca61SVlastimil Babka 933edc2ca61SVlastimil Babka acct_isolated(zone, cc); 934edc2ca61SVlastimil Babka /* Record where migration scanner will be restarted */ 935ff9543fdSMichal Nazarewicz cc->migrate_pfn = low_pfn; 936ff9543fdSMichal Nazarewicz 937edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 938ff9543fdSMichal Nazarewicz } 939ff9543fdSMichal Nazarewicz 940748446bbSMel Gorman static int compact_finished(struct zone *zone, 941748446bbSMel Gorman struct compact_control *cc) 942748446bbSMel Gorman { 9438fb74b9fSMel Gorman unsigned int order; 9445a03b051SAndrea Arcangeli unsigned long watermark; 94556de7263SMel Gorman 946be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 947748446bbSMel Gorman return COMPACT_PARTIAL; 948748446bbSMel Gorman 949753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 950bb13ffebSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) { 95155b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 95235979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 95335979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 95455b7c4c9SVlastimil Babka zone->compact_cached_free_pfn = zone_end_pfn(zone); 95555b7c4c9SVlastimil Babka 95662997027SMel Gorman /* 95762997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 95862997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 95962997027SMel Gorman * flag itself as the decision to be clear should be directly 96062997027SMel Gorman * based on an allocation request. 96162997027SMel Gorman */ 96262997027SMel Gorman if (!current_is_kswapd()) 96362997027SMel Gorman zone->compact_blockskip_flush = true; 96462997027SMel Gorman 965748446bbSMel Gorman return COMPACT_COMPLETE; 966bb13ffebSMel Gorman } 967748446bbSMel Gorman 96882478fb7SJohannes Weiner /* 96982478fb7SJohannes Weiner * order == -1 is expected when compacting via 97082478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 97182478fb7SJohannes Weiner */ 97256de7263SMel Gorman if (cc->order == -1) 97356de7263SMel Gorman return COMPACT_CONTINUE; 97456de7263SMel Gorman 9753957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 9763957c776SMichal Hocko watermark = low_wmark_pages(zone); 9773957c776SMichal Hocko watermark += (1 << cc->order); 9783957c776SMichal Hocko 9793957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 9803957c776SMichal Hocko return COMPACT_CONTINUE; 9813957c776SMichal Hocko 98256de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 98356de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 9848fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 9858fb74b9fSMel Gorman 98656de7263SMel Gorman /* Job done if page is free of the right migratetype */ 9871fb3f8caSMel Gorman if (!list_empty(&area->free_list[cc->migratetype])) 98856de7263SMel Gorman return COMPACT_PARTIAL; 98956de7263SMel Gorman 99056de7263SMel Gorman /* Job done if allocation would set block type */ 9911fb3f8caSMel Gorman if (cc->order >= pageblock_order && area->nr_free) 99256de7263SMel Gorman return COMPACT_PARTIAL; 99356de7263SMel Gorman } 99456de7263SMel Gorman 995748446bbSMel Gorman return COMPACT_CONTINUE; 996748446bbSMel Gorman } 997748446bbSMel Gorman 9983e7d3449SMel Gorman /* 9993e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 10003e7d3449SMel Gorman * Returns 10013e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 10023e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 10033e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 10043e7d3449SMel Gorman */ 10053e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 10063e7d3449SMel Gorman { 10073e7d3449SMel Gorman int fragindex; 10083e7d3449SMel Gorman unsigned long watermark; 10093e7d3449SMel Gorman 10103e7d3449SMel Gorman /* 10113957c776SMichal Hocko * order == -1 is expected when compacting via 10123957c776SMichal Hocko * /proc/sys/vm/compact_memory 10133957c776SMichal Hocko */ 10143957c776SMichal Hocko if (order == -1) 10153957c776SMichal Hocko return COMPACT_CONTINUE; 10163957c776SMichal Hocko 10173957c776SMichal Hocko /* 10183e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 10193e7d3449SMel Gorman * This is because during migration, copies of pages need to be 10203e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 10213e7d3449SMel Gorman */ 10223e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 10233e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 10243e7d3449SMel Gorman return COMPACT_SKIPPED; 10253e7d3449SMel Gorman 10263e7d3449SMel Gorman /* 10273e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 10283e7d3449SMel Gorman * low memory or external fragmentation 10293e7d3449SMel Gorman * 1030a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 1031a582a738SShaohua Li * watermarks 10323e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 10333e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 10343e7d3449SMel Gorman * 10353e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 10363e7d3449SMel Gorman */ 10373e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 10383e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 10393e7d3449SMel Gorman return COMPACT_SKIPPED; 10403e7d3449SMel Gorman 1041a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 1042a582a738SShaohua Li 0, 0)) 10433e7d3449SMel Gorman return COMPACT_PARTIAL; 10443e7d3449SMel Gorman 10453e7d3449SMel Gorman return COMPACT_CONTINUE; 10463e7d3449SMel Gorman } 10473e7d3449SMel Gorman 1048748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 1049748446bbSMel Gorman { 1050748446bbSMel Gorman int ret; 1051c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1052108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 1053e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1054748446bbSMel Gorman 10553e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 10563e7d3449SMel Gorman switch (ret) { 10573e7d3449SMel Gorman case COMPACT_PARTIAL: 10583e7d3449SMel Gorman case COMPACT_SKIPPED: 10593e7d3449SMel Gorman /* Compaction is likely to fail */ 10603e7d3449SMel Gorman return ret; 10613e7d3449SMel Gorman case COMPACT_CONTINUE: 10623e7d3449SMel Gorman /* Fall through to compaction */ 10633e7d3449SMel Gorman ; 10643e7d3449SMel Gorman } 10653e7d3449SMel Gorman 1066c89511abSMel Gorman /* 1067d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1068d3132e4bSVlastimil Babka * is about to be retried after being deferred. kswapd does not do 1069d3132e4bSVlastimil Babka * this reset as it'll reset the cached information when going to sleep. 1070d3132e4bSVlastimil Babka */ 1071d3132e4bSVlastimil Babka if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 1072d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1073d3132e4bSVlastimil Babka 1074d3132e4bSVlastimil Babka /* 1075c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 1076c89511abSMel Gorman * information on where the scanners should start but check that it 1077c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 1078c89511abSMel Gorman */ 1079e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1080c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1081c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1082c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1083c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1084c89511abSMel Gorman } 1085c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1086c89511abSMel Gorman cc->migrate_pfn = start_pfn; 108735979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 108835979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1089c89511abSMel Gorman } 1090748446bbSMel Gorman 10910eb927c0SMel Gorman trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); 10920eb927c0SMel Gorman 1093748446bbSMel Gorman migrate_prep_local(); 1094748446bbSMel Gorman 1095748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 10969d502c1cSMinchan Kim int err; 1097748446bbSMel Gorman 1098f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1099f9e35b3bSMel Gorman case ISOLATE_ABORT: 1100f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 11015733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1102e64c5237SShaohua Li cc->nr_migratepages = 0; 1103f9e35b3bSMel Gorman goto out; 1104f9e35b3bSMel Gorman case ISOLATE_NONE: 1105748446bbSMel Gorman continue; 1106f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1107f9e35b3bSMel Gorman ; 1108f9e35b3bSMel Gorman } 1109748446bbSMel Gorman 1110d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1111e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 11127b2a2d4aSMel Gorman MR_COMPACTION); 1113748446bbSMel Gorman 1114f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1115f8c9301fSVlastimil Babka &cc->migratepages); 1116748446bbSMel Gorman 1117f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1118f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 11199d502c1cSMinchan Kim if (err) { 11205733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 11217ed695e0SVlastimil Babka /* 11227ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 11237ed695e0SVlastimil Babka * and we want compact_finished() to detect it 11247ed695e0SVlastimil Babka */ 11257ed695e0SVlastimil Babka if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { 11264bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 11274bf2bba3SDavid Rientjes goto out; 1128748446bbSMel Gorman } 11294bf2bba3SDavid Rientjes } 1130748446bbSMel Gorman } 1131748446bbSMel Gorman 1132f9e35b3bSMel Gorman out: 1133748446bbSMel Gorman /* Release free pages and check accounting */ 1134748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 1135748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 1136748446bbSMel Gorman 11370eb927c0SMel Gorman trace_mm_compaction_end(ret); 11380eb927c0SMel Gorman 1139748446bbSMel Gorman return ret; 1140748446bbSMel Gorman } 114176ab0f53SMel Gorman 1142e0b9daebSDavid Rientjes static unsigned long compact_zone_order(struct zone *zone, int order, 1143e0b9daebSDavid Rientjes gfp_t gfp_mask, enum migrate_mode mode, bool *contended) 114456de7263SMel Gorman { 1145e64c5237SShaohua Li unsigned long ret; 114656de7263SMel Gorman struct compact_control cc = { 114756de7263SMel Gorman .nr_freepages = 0, 114856de7263SMel Gorman .nr_migratepages = 0, 114956de7263SMel Gorman .order = order, 115056de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 115156de7263SMel Gorman .zone = zone, 1152e0b9daebSDavid Rientjes .mode = mode, 115356de7263SMel Gorman }; 115456de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 115556de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 115656de7263SMel Gorman 1157e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1158e64c5237SShaohua Li 1159e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1160e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1161e64c5237SShaohua Li 1162e64c5237SShaohua Li *contended = cc.contended; 1163e64c5237SShaohua Li return ret; 116456de7263SMel Gorman } 116556de7263SMel Gorman 11665e771905SMel Gorman int sysctl_extfrag_threshold = 500; 11675e771905SMel Gorman 116856de7263SMel Gorman /** 116956de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 117056de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 117156de7263SMel Gorman * @order: The order of the current allocation 117256de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 117356de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 1174e0b9daebSDavid Rientjes * @mode: The migration mode for async, sync light, or sync migration 1175661c4cb9SMel Gorman * @contended: Return value that is true if compaction was aborted due to lock contention 117653853e2dSVlastimil Babka * @candidate_zone: Return the zone where we think allocation should succeed 117756de7263SMel Gorman * 117856de7263SMel Gorman * This is the main entry point for direct page compaction. 117956de7263SMel Gorman */ 118056de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 118177f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 118253853e2dSVlastimil Babka enum migrate_mode mode, bool *contended, 118353853e2dSVlastimil Babka struct zone **candidate_zone) 118456de7263SMel Gorman { 118556de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 118656de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 118756de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 118856de7263SMel Gorman struct zoneref *z; 118956de7263SMel Gorman struct zone *zone; 119053853e2dSVlastimil Babka int rc = COMPACT_DEFERRED; 1191d95ea5d1SBartlomiej Zolnierkiewicz int alloc_flags = 0; 119256de7263SMel Gorman 11934ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1194c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 119553853e2dSVlastimil Babka return COMPACT_SKIPPED; 119656de7263SMel Gorman 1197d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA 1198d95ea5d1SBartlomiej Zolnierkiewicz if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 1199d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags |= ALLOC_CMA; 1200d95ea5d1SBartlomiej Zolnierkiewicz #endif 120156de7263SMel Gorman /* Compact each zone in the list */ 120256de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 120356de7263SMel Gorman nodemask) { 120456de7263SMel Gorman int status; 120556de7263SMel Gorman 120653853e2dSVlastimil Babka if (compaction_deferred(zone, order)) 120753853e2dSVlastimil Babka continue; 120853853e2dSVlastimil Babka 1209e0b9daebSDavid Rientjes status = compact_zone_order(zone, order, gfp_mask, mode, 12108fb74b9fSMel Gorman contended); 121156de7263SMel Gorman rc = max(status, rc); 121256de7263SMel Gorman 12133e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1214d95ea5d1SBartlomiej Zolnierkiewicz if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 121553853e2dSVlastimil Babka alloc_flags)) { 121653853e2dSVlastimil Babka *candidate_zone = zone; 121753853e2dSVlastimil Babka /* 121853853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 121953853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 122053853e2dSVlastimil Babka * will repeat this with true if allocation indeed 122153853e2dSVlastimil Babka * succeeds in this zone. 122253853e2dSVlastimil Babka */ 122353853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 122456de7263SMel Gorman break; 122553853e2dSVlastimil Babka } else if (mode != MIGRATE_ASYNC) { 122653853e2dSVlastimil Babka /* 122753853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 122853853e2dSVlastimil Babka * so we defer compaction there. If it ends up 122953853e2dSVlastimil Babka * succeeding after all, it will be reset. 123053853e2dSVlastimil Babka */ 123153853e2dSVlastimil Babka defer_compaction(zone, order); 123253853e2dSVlastimil Babka } 123356de7263SMel Gorman } 123456de7263SMel Gorman 123556de7263SMel Gorman return rc; 123656de7263SMel Gorman } 123756de7263SMel Gorman 123856de7263SMel Gorman 123976ab0f53SMel Gorman /* Compact all zones within a node */ 12407103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 124176ab0f53SMel Gorman { 124276ab0f53SMel Gorman int zoneid; 124376ab0f53SMel Gorman struct zone *zone; 124476ab0f53SMel Gorman 124576ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 124676ab0f53SMel Gorman 124776ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 124876ab0f53SMel Gorman if (!populated_zone(zone)) 124976ab0f53SMel Gorman continue; 125076ab0f53SMel Gorman 12517be62de9SRik van Riel cc->nr_freepages = 0; 12527be62de9SRik van Riel cc->nr_migratepages = 0; 12537be62de9SRik van Riel cc->zone = zone; 12547be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 12557be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 125676ab0f53SMel Gorman 1257aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 12587be62de9SRik van Riel compact_zone(zone, cc); 125976ab0f53SMel Gorman 1260aff62249SRik van Riel if (cc->order > 0) { 1261de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1262de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1263de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1264aff62249SRik van Riel } 1265aff62249SRik van Riel 12667be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 12677be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 126876ab0f53SMel Gorman } 126976ab0f53SMel Gorman } 127076ab0f53SMel Gorman 12717103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 12727be62de9SRik van Riel { 12737be62de9SRik van Riel struct compact_control cc = { 12747be62de9SRik van Riel .order = order, 1275e0b9daebSDavid Rientjes .mode = MIGRATE_ASYNC, 12767be62de9SRik van Riel }; 12777be62de9SRik van Riel 12783a7200afSMel Gorman if (!order) 12793a7200afSMel Gorman return; 12803a7200afSMel Gorman 12817103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 12827be62de9SRik van Riel } 12837be62de9SRik van Riel 12847103f16dSAndrew Morton static void compact_node(int nid) 12857be62de9SRik van Riel { 12867be62de9SRik van Riel struct compact_control cc = { 12877be62de9SRik van Riel .order = -1, 1288e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 128991ca9186SDavid Rientjes .ignore_skip_hint = true, 12907be62de9SRik van Riel }; 12917be62de9SRik van Riel 12927103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 12937be62de9SRik van Riel } 12947be62de9SRik van Riel 129576ab0f53SMel Gorman /* Compact all nodes in the system */ 12967964c06dSJason Liu static void compact_nodes(void) 129776ab0f53SMel Gorman { 129876ab0f53SMel Gorman int nid; 129976ab0f53SMel Gorman 13008575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 13018575ec29SHugh Dickins lru_add_drain_all(); 13028575ec29SHugh Dickins 130376ab0f53SMel Gorman for_each_online_node(nid) 130476ab0f53SMel Gorman compact_node(nid); 130576ab0f53SMel Gorman } 130676ab0f53SMel Gorman 130776ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 130876ab0f53SMel Gorman int sysctl_compact_memory; 130976ab0f53SMel Gorman 131076ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 131176ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 131276ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 131376ab0f53SMel Gorman { 131476ab0f53SMel Gorman if (write) 13157964c06dSJason Liu compact_nodes(); 131676ab0f53SMel Gorman 131776ab0f53SMel Gorman return 0; 131876ab0f53SMel Gorman } 1319ed4a6d7fSMel Gorman 13205e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 13215e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 13225e771905SMel Gorman { 13235e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 13245e771905SMel Gorman 13255e771905SMel Gorman return 0; 13265e771905SMel Gorman } 13275e771905SMel Gorman 1328ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 132974e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 133010fbcf4cSKay Sievers struct device_attribute *attr, 1331ed4a6d7fSMel Gorman const char *buf, size_t count) 1332ed4a6d7fSMel Gorman { 13338575ec29SHugh Dickins int nid = dev->id; 13348575ec29SHugh Dickins 13358575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 13368575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 13378575ec29SHugh Dickins lru_add_drain_all(); 13388575ec29SHugh Dickins 13398575ec29SHugh Dickins compact_node(nid); 13408575ec29SHugh Dickins } 1341ed4a6d7fSMel Gorman 1342ed4a6d7fSMel Gorman return count; 1343ed4a6d7fSMel Gorman } 134410fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1345ed4a6d7fSMel Gorman 1346ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1347ed4a6d7fSMel Gorman { 134810fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1349ed4a6d7fSMel Gorman } 1350ed4a6d7fSMel Gorman 1351ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1352ed4a6d7fSMel Gorman { 135310fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1354ed4a6d7fSMel Gorman } 1355ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1356ff9543fdSMichal Nazarewicz 1357ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1358