1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 18194159fbSMinchan Kim #include <linux/page-isolation.h> 19748446bbSMel Gorman #include "internal.h" 20748446bbSMel Gorman 21010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 22010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 23010fc29aSMinchan Kim { 24010fc29aSMinchan Kim count_vm_event(item); 25010fc29aSMinchan Kim } 26010fc29aSMinchan Kim 27010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 28010fc29aSMinchan Kim { 29010fc29aSMinchan Kim count_vm_events(item, delta); 30010fc29aSMinchan Kim } 31010fc29aSMinchan Kim #else 32010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 33010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 34010fc29aSMinchan Kim #endif 35010fc29aSMinchan Kim 36ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 37ff9543fdSMichal Nazarewicz 38b7aba698SMel Gorman #define CREATE_TRACE_POINTS 39b7aba698SMel Gorman #include <trace/events/compaction.h> 40b7aba698SMel Gorman 41748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 42748446bbSMel Gorman { 43748446bbSMel Gorman struct page *page, *next; 44748446bbSMel Gorman unsigned long count = 0; 45748446bbSMel Gorman 46748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 47748446bbSMel Gorman list_del(&page->lru); 48748446bbSMel Gorman __free_page(page); 49748446bbSMel Gorman count++; 50748446bbSMel Gorman } 51748446bbSMel Gorman 52748446bbSMel Gorman return count; 53748446bbSMel Gorman } 54748446bbSMel Gorman 55ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 56ff9543fdSMichal Nazarewicz { 57ff9543fdSMichal Nazarewicz struct page *page; 58ff9543fdSMichal Nazarewicz 59ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 60ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 61ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 62ff9543fdSMichal Nazarewicz } 63ff9543fdSMichal Nazarewicz } 64ff9543fdSMichal Nazarewicz 6547118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 6647118af0SMichal Nazarewicz { 6747118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 6847118af0SMichal Nazarewicz } 6947118af0SMichal Nazarewicz 70bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 71bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 72bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 73bb13ffebSMel Gorman struct page *page) 74bb13ffebSMel Gorman { 75bb13ffebSMel Gorman if (cc->ignore_skip_hint) 76bb13ffebSMel Gorman return true; 77bb13ffebSMel Gorman 78bb13ffebSMel Gorman return !get_pageblock_skip(page); 79bb13ffebSMel Gorman } 80bb13ffebSMel Gorman 81bb13ffebSMel Gorman /* 82bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 83bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 84bb13ffebSMel Gorman * meet. 85bb13ffebSMel Gorman */ 8662997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 87bb13ffebSMel Gorman { 88bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 89108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 90bb13ffebSMel Gorman unsigned long pfn; 91bb13ffebSMel Gorman 9235979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = start_pfn; 9335979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = start_pfn; 94c89511abSMel Gorman zone->compact_cached_free_pfn = end_pfn; 9562997027SMel Gorman zone->compact_blockskip_flush = false; 96bb13ffebSMel Gorman 97bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 98bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 99bb13ffebSMel Gorman struct page *page; 100bb13ffebSMel Gorman 101bb13ffebSMel Gorman cond_resched(); 102bb13ffebSMel Gorman 103bb13ffebSMel Gorman if (!pfn_valid(pfn)) 104bb13ffebSMel Gorman continue; 105bb13ffebSMel Gorman 106bb13ffebSMel Gorman page = pfn_to_page(pfn); 107bb13ffebSMel Gorman if (zone != page_zone(page)) 108bb13ffebSMel Gorman continue; 109bb13ffebSMel Gorman 110bb13ffebSMel Gorman clear_pageblock_skip(page); 111bb13ffebSMel Gorman } 112bb13ffebSMel Gorman } 113bb13ffebSMel Gorman 11462997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 11562997027SMel Gorman { 11662997027SMel Gorman int zoneid; 11762997027SMel Gorman 11862997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 11962997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 12062997027SMel Gorman if (!populated_zone(zone)) 12162997027SMel Gorman continue; 12262997027SMel Gorman 12362997027SMel Gorman /* Only flush if a full compaction finished recently */ 12462997027SMel Gorman if (zone->compact_blockskip_flush) 12562997027SMel Gorman __reset_isolation_suitable(zone); 12662997027SMel Gorman } 12762997027SMel Gorman } 12862997027SMel Gorman 129bb13ffebSMel Gorman /* 130bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 13162997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 132bb13ffebSMel Gorman */ 133c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 134c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 135*edc2ca61SVlastimil Babka bool migrate_scanner) 136bb13ffebSMel Gorman { 137c89511abSMel Gorman struct zone *zone = cc->zone; 13835979ef3SDavid Rientjes unsigned long pfn; 1396815bf3fSJoonsoo Kim 1406815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 1416815bf3fSJoonsoo Kim return; 1426815bf3fSJoonsoo Kim 143bb13ffebSMel Gorman if (!page) 144bb13ffebSMel Gorman return; 145bb13ffebSMel Gorman 14635979ef3SDavid Rientjes if (nr_isolated) 14735979ef3SDavid Rientjes return; 14835979ef3SDavid Rientjes 149bb13ffebSMel Gorman set_pageblock_skip(page); 150c89511abSMel Gorman 15135979ef3SDavid Rientjes pfn = page_to_pfn(page); 15235979ef3SDavid Rientjes 15335979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 154c89511abSMel Gorman if (migrate_scanner) { 15535979ef3SDavid Rientjes if (cc->finished_update_migrate) 15635979ef3SDavid Rientjes return; 15735979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 15835979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 159e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 160e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 16135979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 162c89511abSMel Gorman } else { 16335979ef3SDavid Rientjes if (cc->finished_update_free) 16435979ef3SDavid Rientjes return; 16535979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 166c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 167c89511abSMel Gorman } 168c89511abSMel Gorman } 169bb13ffebSMel Gorman #else 170bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 171bb13ffebSMel Gorman struct page *page) 172bb13ffebSMel Gorman { 173bb13ffebSMel Gorman return true; 174bb13ffebSMel Gorman } 175bb13ffebSMel Gorman 176c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 177c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 178*edc2ca61SVlastimil Babka bool migrate_scanner) 179bb13ffebSMel Gorman { 180bb13ffebSMel Gorman } 181bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 182bb13ffebSMel Gorman 1832a1402aaSMel Gorman static inline bool should_release_lock(spinlock_t *lock) 1842a1402aaSMel Gorman { 1852a1402aaSMel Gorman return need_resched() || spin_is_contended(lock); 1862a1402aaSMel Gorman } 1872a1402aaSMel Gorman 18885aa125fSMichal Nazarewicz /* 189c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 190c67fe375SMel Gorman * very heavily contended. Check if the process needs to be scheduled or 191c67fe375SMel Gorman * if the lock is contended. For async compaction, back out in the event 192c67fe375SMel Gorman * if contention is severe. For sync compaction, schedule. 193c67fe375SMel Gorman * 194c67fe375SMel Gorman * Returns true if the lock is held. 195c67fe375SMel Gorman * Returns false if the lock is released and compaction should abort 196c67fe375SMel Gorman */ 197c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, 198c67fe375SMel Gorman bool locked, struct compact_control *cc) 199c67fe375SMel Gorman { 2002a1402aaSMel Gorman if (should_release_lock(lock)) { 201c67fe375SMel Gorman if (locked) { 202c67fe375SMel Gorman spin_unlock_irqrestore(lock, *flags); 203c67fe375SMel Gorman locked = false; 204c67fe375SMel Gorman } 205c67fe375SMel Gorman 206c67fe375SMel Gorman /* async aborts if taking too long or contended */ 207e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 208e64c5237SShaohua Li cc->contended = true; 209c67fe375SMel Gorman return false; 210c67fe375SMel Gorman } 211c67fe375SMel Gorman 212c67fe375SMel Gorman cond_resched(); 213c67fe375SMel Gorman } 214c67fe375SMel Gorman 215c67fe375SMel Gorman if (!locked) 216c67fe375SMel Gorman spin_lock_irqsave(lock, *flags); 217c67fe375SMel Gorman return true; 218c67fe375SMel Gorman } 219c67fe375SMel Gorman 220be976572SVlastimil Babka /* 221be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 222be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 223be976572SVlastimil Babka * compaction. This is similar to what compact_checklock_irqsave() does, but 224be976572SVlastimil Babka * is used where no lock is concerned. 225be976572SVlastimil Babka * 226be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 227be976572SVlastimil Babka * Returns true when async compaction should abort. 228be976572SVlastimil Babka */ 229be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 230be976572SVlastimil Babka { 231be976572SVlastimil Babka /* async compaction aborts if contended */ 232be976572SVlastimil Babka if (need_resched()) { 233be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 234be976572SVlastimil Babka cc->contended = true; 235be976572SVlastimil Babka return true; 236be976572SVlastimil Babka } 237be976572SVlastimil Babka 238be976572SVlastimil Babka cond_resched(); 239be976572SVlastimil Babka } 240be976572SVlastimil Babka 241be976572SVlastimil Babka return false; 242be976572SVlastimil Babka } 243be976572SVlastimil Babka 244f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */ 245f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page) 246f40d1e42SMel Gorman { 2477d348b9eSJoonsoo Kim /* If the page is a large free page, then disallow migration */ 248f40d1e42SMel Gorman if (PageBuddy(page) && page_order(page) >= pageblock_order) 2497d348b9eSJoonsoo Kim return false; 250f40d1e42SMel Gorman 251f40d1e42SMel Gorman /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 2527d348b9eSJoonsoo Kim if (migrate_async_suitable(get_pageblock_migratetype(page))) 253f40d1e42SMel Gorman return true; 254f40d1e42SMel Gorman 255f40d1e42SMel Gorman /* Otherwise skip the block */ 256f40d1e42SMel Gorman return false; 257f40d1e42SMel Gorman } 258f40d1e42SMel Gorman 259c67fe375SMel Gorman /* 2609e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 2619e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 2629e4be470SJerome Marchand * (even though it may still end up isolating some pages). 26385aa125fSMichal Nazarewicz */ 264f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 265f40d1e42SMel Gorman unsigned long blockpfn, 26685aa125fSMichal Nazarewicz unsigned long end_pfn, 26785aa125fSMichal Nazarewicz struct list_head *freelist, 26885aa125fSMichal Nazarewicz bool strict) 269748446bbSMel Gorman { 270b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 271bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 272f40d1e42SMel Gorman unsigned long flags; 273f40d1e42SMel Gorman bool locked = false; 274748446bbSMel Gorman 275748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 276748446bbSMel Gorman 277f40d1e42SMel Gorman /* Isolate free pages. */ 278748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 279748446bbSMel Gorman int isolated, i; 280748446bbSMel Gorman struct page *page = cursor; 281748446bbSMel Gorman 282b7aba698SMel Gorman nr_scanned++; 283f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 2842af120bcSLaura Abbott goto isolate_fail; 2852af120bcSLaura Abbott 286bb13ffebSMel Gorman if (!valid_page) 287bb13ffebSMel Gorman valid_page = page; 288f40d1e42SMel Gorman if (!PageBuddy(page)) 2892af120bcSLaura Abbott goto isolate_fail; 290f40d1e42SMel Gorman 291f40d1e42SMel Gorman /* 292f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 293f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 294f40d1e42SMel Gorman * heavily contended if there are parallel allocations 295f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 296f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 297f40d1e42SMel Gorman * possible. 298f40d1e42SMel Gorman */ 299f40d1e42SMel Gorman locked = compact_checklock_irqsave(&cc->zone->lock, &flags, 300f40d1e42SMel Gorman locked, cc); 301f40d1e42SMel Gorman if (!locked) 302f40d1e42SMel Gorman break; 303f40d1e42SMel Gorman 304f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 305f40d1e42SMel Gorman if (!PageBuddy(page)) 3062af120bcSLaura Abbott goto isolate_fail; 307748446bbSMel Gorman 308748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 309748446bbSMel Gorman isolated = split_free_page(page); 310748446bbSMel Gorman total_isolated += isolated; 311748446bbSMel Gorman for (i = 0; i < isolated; i++) { 312748446bbSMel Gorman list_add(&page->lru, freelist); 313748446bbSMel Gorman page++; 314748446bbSMel Gorman } 315748446bbSMel Gorman 316748446bbSMel Gorman /* If a page was split, advance to the end of it */ 317748446bbSMel Gorman if (isolated) { 318748446bbSMel Gorman blockpfn += isolated - 1; 319748446bbSMel Gorman cursor += isolated - 1; 3202af120bcSLaura Abbott continue; 321748446bbSMel Gorman } 3222af120bcSLaura Abbott 3232af120bcSLaura Abbott isolate_fail: 3242af120bcSLaura Abbott if (strict) 3252af120bcSLaura Abbott break; 3262af120bcSLaura Abbott else 3272af120bcSLaura Abbott continue; 3282af120bcSLaura Abbott 329748446bbSMel Gorman } 330748446bbSMel Gorman 331b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 332f40d1e42SMel Gorman 333f40d1e42SMel Gorman /* 334f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 335f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 336f40d1e42SMel Gorman * returned and CMA will fail. 337f40d1e42SMel Gorman */ 3382af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 339f40d1e42SMel Gorman total_isolated = 0; 340f40d1e42SMel Gorman 341f40d1e42SMel Gorman if (locked) 342f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 343f40d1e42SMel Gorman 344bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 345bb13ffebSMel Gorman if (blockpfn == end_pfn) 346*edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 347bb13ffebSMel Gorman 348010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 349397487dbSMel Gorman if (total_isolated) 350010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 351748446bbSMel Gorman return total_isolated; 352748446bbSMel Gorman } 353748446bbSMel Gorman 35485aa125fSMichal Nazarewicz /** 35585aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 35685aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 35785aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 35885aa125fSMichal Nazarewicz * 35985aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 36085aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 36185aa125fSMichal Nazarewicz * undo its actions and return zero. 36285aa125fSMichal Nazarewicz * 36385aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 36485aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 36585aa125fSMichal Nazarewicz * a free page). 36685aa125fSMichal Nazarewicz */ 367ff9543fdSMichal Nazarewicz unsigned long 368bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 369bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 37085aa125fSMichal Nazarewicz { 371f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 37285aa125fSMichal Nazarewicz LIST_HEAD(freelist); 37385aa125fSMichal Nazarewicz 37485aa125fSMichal Nazarewicz for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 375bb13ffebSMel Gorman if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn))) 37685aa125fSMichal Nazarewicz break; 37785aa125fSMichal Nazarewicz 37885aa125fSMichal Nazarewicz /* 37985aa125fSMichal Nazarewicz * On subsequent iterations ALIGN() is actually not needed, 38085aa125fSMichal Nazarewicz * but we keep it that we not to complicate the code. 38185aa125fSMichal Nazarewicz */ 38285aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 38385aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 38485aa125fSMichal Nazarewicz 385bb13ffebSMel Gorman isolated = isolate_freepages_block(cc, pfn, block_end_pfn, 38685aa125fSMichal Nazarewicz &freelist, true); 38785aa125fSMichal Nazarewicz 38885aa125fSMichal Nazarewicz /* 38985aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 39085aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 39185aa125fSMichal Nazarewicz * non-free pages). 39285aa125fSMichal Nazarewicz */ 39385aa125fSMichal Nazarewicz if (!isolated) 39485aa125fSMichal Nazarewicz break; 39585aa125fSMichal Nazarewicz 39685aa125fSMichal Nazarewicz /* 39785aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 39885aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 39985aa125fSMichal Nazarewicz * page may span two pageblocks). 40085aa125fSMichal Nazarewicz */ 40185aa125fSMichal Nazarewicz } 40285aa125fSMichal Nazarewicz 40385aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 40485aa125fSMichal Nazarewicz map_pages(&freelist); 40585aa125fSMichal Nazarewicz 40685aa125fSMichal Nazarewicz if (pfn < end_pfn) { 40785aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 40885aa125fSMichal Nazarewicz release_freepages(&freelist); 40985aa125fSMichal Nazarewicz return 0; 41085aa125fSMichal Nazarewicz } 41185aa125fSMichal Nazarewicz 41285aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 41385aa125fSMichal Nazarewicz return pfn; 41485aa125fSMichal Nazarewicz } 41585aa125fSMichal Nazarewicz 416748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 417*edc2ca61SVlastimil Babka static void acct_isolated(struct zone *zone, struct compact_control *cc) 418748446bbSMel Gorman { 419748446bbSMel Gorman struct page *page; 420b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 421748446bbSMel Gorman 422*edc2ca61SVlastimil Babka if (list_empty(&cc->migratepages)) 423*edc2ca61SVlastimil Babka return; 424*edc2ca61SVlastimil Babka 425b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 426b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 427748446bbSMel Gorman 428c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 429c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 430c67fe375SMel Gorman } 431748446bbSMel Gorman 432748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 433748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 434748446bbSMel Gorman { 435bc693045SMinchan Kim unsigned long active, inactive, isolated; 436748446bbSMel Gorman 437748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 438748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 439bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 440bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 441748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 442748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 443748446bbSMel Gorman 444bc693045SMinchan Kim return isolated > (inactive + active) / 2; 445748446bbSMel Gorman } 446748446bbSMel Gorman 4472fe86e00SMichal Nazarewicz /** 448*edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 449*edc2ca61SVlastimil Babka * a single pageblock 4502fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 451*edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 452*edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 453*edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 4542fe86e00SMichal Nazarewicz * 4552fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 456*edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 457*edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 458*edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 459*edc2ca61SVlastimil Babka * than end_pfn). 4602fe86e00SMichal Nazarewicz * 461*edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 462*edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 463*edc2ca61SVlastimil Babka * is neither read nor updated. 464748446bbSMel Gorman */ 465*edc2ca61SVlastimil Babka static unsigned long 466*edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 467*edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 468748446bbSMel Gorman { 469*edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 470b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 471748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 472fa9add64SHugh Dickins struct lruvec *lruvec; 473c67fe375SMel Gorman unsigned long flags; 4742a1402aaSMel Gorman bool locked = false; 475bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 476748446bbSMel Gorman 477748446bbSMel Gorman /* 478748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 479748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 480748446bbSMel Gorman * delay for some time until fewer pages are isolated 481748446bbSMel Gorman */ 482748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 483f9e35b3bSMel Gorman /* async migration should just abort */ 484e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 4852fe86e00SMichal Nazarewicz return 0; 486f9e35b3bSMel Gorman 487748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 488748446bbSMel Gorman 489748446bbSMel Gorman if (fatal_signal_pending(current)) 4902fe86e00SMichal Nazarewicz return 0; 491748446bbSMel Gorman } 492748446bbSMel Gorman 493be976572SVlastimil Babka if (compact_should_abort(cc)) 494aeef4b83SDavid Rientjes return 0; 495aeef4b83SDavid Rientjes 496748446bbSMel Gorman /* Time to isolate some pages for migration */ 497748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 498b2eef8c0SAndrea Arcangeli /* give a chance to irqs before checking need_resched() */ 499be1aa03bSJoonsoo Kim if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) { 5002a1402aaSMel Gorman if (should_release_lock(&zone->lru_lock)) { 501c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 502b2eef8c0SAndrea Arcangeli locked = false; 503b2eef8c0SAndrea Arcangeli } 5042a1402aaSMel Gorman } 505b2eef8c0SAndrea Arcangeli 506748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 507748446bbSMel Gorman continue; 508b7aba698SMel Gorman nr_scanned++; 509748446bbSMel Gorman 510dc908600SMel Gorman /* 511dc908600SMel Gorman * Get the page and ensure the page is within the same zone. 512dc908600SMel Gorman * See the comment in isolate_freepages about overlapping 513dc908600SMel Gorman * nodes. It is deliberate that the new zone lock is not taken 514dc908600SMel Gorman * as memory compaction should not move pages between nodes. 515dc908600SMel Gorman */ 516748446bbSMel Gorman page = pfn_to_page(low_pfn); 517dc908600SMel Gorman if (page_zone(page) != zone) 518dc908600SMel Gorman continue; 519dc908600SMel Gorman 520bb13ffebSMel Gorman if (!valid_page) 521bb13ffebSMel Gorman valid_page = page; 522bb13ffebSMel Gorman 523c122b208SJoonsoo Kim /* 5246c14466cSMel Gorman * Skip if free. page_order cannot be used without zone->lock 5256c14466cSMel Gorman * as nothing prevents parallel allocations or buddy merging. 5266c14466cSMel Gorman */ 527748446bbSMel Gorman if (PageBuddy(page)) 528748446bbSMel Gorman continue; 529748446bbSMel Gorman 5309927af74SMel Gorman /* 531bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 532bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 533bf6bddf1SRafael Aquini * Skip any other type of page 534bf6bddf1SRafael Aquini */ 535bf6bddf1SRafael Aquini if (!PageLRU(page)) { 536bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 537bf6bddf1SRafael Aquini if (locked && balloon_page_isolate(page)) { 538bf6bddf1SRafael Aquini /* Successfully isolated */ 539b6c75016SJoonsoo Kim goto isolate_success; 540bf6bddf1SRafael Aquini } 541bf6bddf1SRafael Aquini } 542bc835011SAndrea Arcangeli continue; 543bf6bddf1SRafael Aquini } 544bc835011SAndrea Arcangeli 545bc835011SAndrea Arcangeli /* 5462a1402aaSMel Gorman * PageLRU is set. lru_lock normally excludes isolation 5472a1402aaSMel Gorman * splitting and collapsing (collapsing has already happened 5482a1402aaSMel Gorman * if PageLRU is set) but the lock is not necessarily taken 5492a1402aaSMel Gorman * here and it is wasteful to take it just to check transhuge. 5502a1402aaSMel Gorman * Check TransHuge without lock and skip the whole pageblock if 5512a1402aaSMel Gorman * it's either a transhuge or hugetlbfs page, as calling 5522a1402aaSMel Gorman * compound_order() without preventing THP from splitting the 5532a1402aaSMel Gorman * page underneath us may return surprising results. 554bc835011SAndrea Arcangeli */ 555bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 5562a1402aaSMel Gorman if (!locked) 557*edc2ca61SVlastimil Babka low_pfn = ALIGN(low_pfn + 1, 558*edc2ca61SVlastimil Babka pageblock_nr_pages) - 1; 559*edc2ca61SVlastimil Babka else 5602a1402aaSMel Gorman low_pfn += (1 << compound_order(page)) - 1; 561*edc2ca61SVlastimil Babka 5622a1402aaSMel Gorman continue; 5632a1402aaSMel Gorman } 5642a1402aaSMel Gorman 565119d6d59SDavid Rientjes /* 566119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 567119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 568119d6d59SDavid Rientjes * admittedly racy check. 569119d6d59SDavid Rientjes */ 570119d6d59SDavid Rientjes if (!page_mapping(page) && 571119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 572119d6d59SDavid Rientjes continue; 573119d6d59SDavid Rientjes 5742a1402aaSMel Gorman /* Check if it is ok to still hold the lock */ 5752a1402aaSMel Gorman locked = compact_checklock_irqsave(&zone->lru_lock, &flags, 5762a1402aaSMel Gorman locked, cc); 5772a1402aaSMel Gorman if (!locked || fatal_signal_pending(current)) 5782a1402aaSMel Gorman break; 5792a1402aaSMel Gorman 5802a1402aaSMel Gorman /* Recheck PageLRU and PageTransHuge under lock */ 5812a1402aaSMel Gorman if (!PageLRU(page)) 5822a1402aaSMel Gorman continue; 5832a1402aaSMel Gorman if (PageTransHuge(page)) { 584bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 585bc835011SAndrea Arcangeli continue; 586bc835011SAndrea Arcangeli } 587bc835011SAndrea Arcangeli 588fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 589fa9add64SHugh Dickins 590748446bbSMel Gorman /* Try isolate the page */ 591*edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 592748446bbSMel Gorman continue; 593748446bbSMel Gorman 594309381feSSasha Levin VM_BUG_ON_PAGE(PageTransCompound(page), page); 595bc835011SAndrea Arcangeli 596748446bbSMel Gorman /* Successfully isolated */ 597fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 598b6c75016SJoonsoo Kim 599b6c75016SJoonsoo Kim isolate_success: 600b6c75016SJoonsoo Kim cc->finished_update_migrate = true; 601748446bbSMel Gorman list_add(&page->lru, migratelist); 602748446bbSMel Gorman cc->nr_migratepages++; 603b7aba698SMel Gorman nr_isolated++; 604748446bbSMel Gorman 605748446bbSMel Gorman /* Avoid isolating too much */ 60631b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 60731b8384aSHillf Danton ++low_pfn; 608748446bbSMel Gorman break; 609748446bbSMel Gorman } 61031b8384aSHillf Danton } 611748446bbSMel Gorman 612c67fe375SMel Gorman if (locked) 613c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 614748446bbSMel Gorman 61550b5b094SVlastimil Babka /* 61650b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 61750b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 61850b5b094SVlastimil Babka */ 61935979ef3SDavid Rientjes if (low_pfn == end_pfn) 620*edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 621bb13ffebSMel Gorman 622b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 623b7aba698SMel Gorman 624010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 625397487dbSMel Gorman if (nr_isolated) 626010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 627397487dbSMel Gorman 6282fe86e00SMichal Nazarewicz return low_pfn; 6292fe86e00SMichal Nazarewicz } 6302fe86e00SMichal Nazarewicz 631*edc2ca61SVlastimil Babka /** 632*edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 633*edc2ca61SVlastimil Babka * @cc: Compaction control structure. 634*edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 635*edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 636*edc2ca61SVlastimil Babka * 637*edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 638*edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 639*edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 640*edc2ca61SVlastimil Babka */ 641*edc2ca61SVlastimil Babka unsigned long 642*edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 643*edc2ca61SVlastimil Babka unsigned long end_pfn) 644*edc2ca61SVlastimil Babka { 645*edc2ca61SVlastimil Babka unsigned long pfn, block_end_pfn; 646*edc2ca61SVlastimil Babka 647*edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 648*edc2ca61SVlastimil Babka pfn = start_pfn; 649*edc2ca61SVlastimil Babka block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 650*edc2ca61SVlastimil Babka 651*edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 652*edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 653*edc2ca61SVlastimil Babka 654*edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 655*edc2ca61SVlastimil Babka 656*edc2ca61SVlastimil Babka /* Skip whole pageblock in case of a memory hole */ 657*edc2ca61SVlastimil Babka if (!pfn_valid(pfn)) 658*edc2ca61SVlastimil Babka continue; 659*edc2ca61SVlastimil Babka 660*edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 661*edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 662*edc2ca61SVlastimil Babka 663*edc2ca61SVlastimil Babka /* 664*edc2ca61SVlastimil Babka * In case of fatal failure, release everything that might 665*edc2ca61SVlastimil Babka * have been isolated in the previous iteration, and signal 666*edc2ca61SVlastimil Babka * the failure back to caller. 667*edc2ca61SVlastimil Babka */ 668*edc2ca61SVlastimil Babka if (!pfn) { 669*edc2ca61SVlastimil Babka putback_movable_pages(&cc->migratepages); 670*edc2ca61SVlastimil Babka cc->nr_migratepages = 0; 671*edc2ca61SVlastimil Babka break; 672*edc2ca61SVlastimil Babka } 673*edc2ca61SVlastimil Babka } 674*edc2ca61SVlastimil Babka acct_isolated(cc->zone, cc); 675*edc2ca61SVlastimil Babka 676*edc2ca61SVlastimil Babka return pfn; 677*edc2ca61SVlastimil Babka } 678*edc2ca61SVlastimil Babka 679ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 680ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 681ff9543fdSMichal Nazarewicz /* 682ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 683ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 684ff9543fdSMichal Nazarewicz */ 685*edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 686ff9543fdSMichal Nazarewicz { 687*edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 688ff9543fdSMichal Nazarewicz struct page *page; 689c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 690c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 691c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 692ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 693ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 6942fe86e00SMichal Nazarewicz 695ff9543fdSMichal Nazarewicz /* 696ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 69749e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 69849e068f0SVlastimil Babka * zone when isolating for the first time. We need this aligned to 699c96b9e50SVlastimil Babka * the pageblock boundary, because we do 700c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 701c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 702c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 70349e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 70449e068f0SVlastimil Babka * is using. 705ff9543fdSMichal Nazarewicz */ 706c96b9e50SVlastimil Babka block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 707c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 708c96b9e50SVlastimil Babka zone_end_pfn(zone)); 7097ed695e0SVlastimil Babka low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 7102fe86e00SMichal Nazarewicz 711ff9543fdSMichal Nazarewicz /* 712ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 713ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 714ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 715ff9543fdSMichal Nazarewicz */ 716c96b9e50SVlastimil Babka for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 717c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 718c96b9e50SVlastimil Babka block_start_pfn -= pageblock_nr_pages) { 719ff9543fdSMichal Nazarewicz unsigned long isolated; 720ff9543fdSMichal Nazarewicz 721f6ea3adbSDavid Rientjes /* 722f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 723f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 724be976572SVlastimil Babka * to schedule, or even abort async compaction. 725f6ea3adbSDavid Rientjes */ 726be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 727be976572SVlastimil Babka && compact_should_abort(cc)) 728be976572SVlastimil Babka break; 729f6ea3adbSDavid Rientjes 730c96b9e50SVlastimil Babka if (!pfn_valid(block_start_pfn)) 731ff9543fdSMichal Nazarewicz continue; 732ff9543fdSMichal Nazarewicz 733ff9543fdSMichal Nazarewicz /* 734ff9543fdSMichal Nazarewicz * Check for overlapping nodes/zones. It's possible on some 735ff9543fdSMichal Nazarewicz * configurations to have a setup like 736ff9543fdSMichal Nazarewicz * node0 node1 node0 737ff9543fdSMichal Nazarewicz * i.e. it's possible that all pages within a zones range of 738ff9543fdSMichal Nazarewicz * pages do not belong to a single zone. 739ff9543fdSMichal Nazarewicz */ 740c96b9e50SVlastimil Babka page = pfn_to_page(block_start_pfn); 741ff9543fdSMichal Nazarewicz if (page_zone(page) != zone) 742ff9543fdSMichal Nazarewicz continue; 743ff9543fdSMichal Nazarewicz 744ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 74568e3e926SLinus Torvalds if (!suitable_migration_target(page)) 746ff9543fdSMichal Nazarewicz continue; 74768e3e926SLinus Torvalds 748bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 749bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 750bb13ffebSMel Gorman continue; 751bb13ffebSMel Gorman 752f40d1e42SMel Gorman /* Found a block suitable for isolating free pages from */ 753e9ade569SVlastimil Babka cc->free_pfn = block_start_pfn; 754c96b9e50SVlastimil Babka isolated = isolate_freepages_block(cc, block_start_pfn, 755c96b9e50SVlastimil Babka block_end_pfn, freelist, false); 756ff9543fdSMichal Nazarewicz nr_freepages += isolated; 757ff9543fdSMichal Nazarewicz 758ff9543fdSMichal Nazarewicz /* 759e9ade569SVlastimil Babka * Set a flag that we successfully isolated in this pageblock. 760e9ade569SVlastimil Babka * In the next loop iteration, zone->compact_cached_free_pfn 761e9ade569SVlastimil Babka * will not be updated and thus it will effectively contain the 762e9ade569SVlastimil Babka * highest pageblock we isolated pages from. 763ff9543fdSMichal Nazarewicz */ 764e9ade569SVlastimil Babka if (isolated) 765c89511abSMel Gorman cc->finished_update_free = true; 766be976572SVlastimil Babka 767be976572SVlastimil Babka /* 768be976572SVlastimil Babka * isolate_freepages_block() might have aborted due to async 769be976572SVlastimil Babka * compaction being contended 770be976572SVlastimil Babka */ 771be976572SVlastimil Babka if (cc->contended) 772be976572SVlastimil Babka break; 773c89511abSMel Gorman } 774ff9543fdSMichal Nazarewicz 775ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 776ff9543fdSMichal Nazarewicz map_pages(freelist); 777ff9543fdSMichal Nazarewicz 7787ed695e0SVlastimil Babka /* 7797ed695e0SVlastimil Babka * If we crossed the migrate scanner, we want to keep it that way 7807ed695e0SVlastimil Babka * so that compact_finished() may detect this 7817ed695e0SVlastimil Babka */ 782c96b9e50SVlastimil Babka if (block_start_pfn < low_pfn) 783e9ade569SVlastimil Babka cc->free_pfn = cc->migrate_pfn; 784c96b9e50SVlastimil Babka 785ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 786748446bbSMel Gorman } 787748446bbSMel Gorman 788748446bbSMel Gorman /* 789748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 790748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 791748446bbSMel Gorman */ 792748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 793748446bbSMel Gorman unsigned long data, 794748446bbSMel Gorman int **result) 795748446bbSMel Gorman { 796748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 797748446bbSMel Gorman struct page *freepage; 798748446bbSMel Gorman 799be976572SVlastimil Babka /* 800be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 801be976572SVlastimil Babka * contention. 802be976572SVlastimil Babka */ 803748446bbSMel Gorman if (list_empty(&cc->freepages)) { 804be976572SVlastimil Babka if (!cc->contended) 805*edc2ca61SVlastimil Babka isolate_freepages(cc); 806748446bbSMel Gorman 807748446bbSMel Gorman if (list_empty(&cc->freepages)) 808748446bbSMel Gorman return NULL; 809748446bbSMel Gorman } 810748446bbSMel Gorman 811748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 812748446bbSMel Gorman list_del(&freepage->lru); 813748446bbSMel Gorman cc->nr_freepages--; 814748446bbSMel Gorman 815748446bbSMel Gorman return freepage; 816748446bbSMel Gorman } 817748446bbSMel Gorman 818748446bbSMel Gorman /* 819d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 820d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 821d53aea3dSDavid Rientjes * special handling needed for NUMA. 822d53aea3dSDavid Rientjes */ 823d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 824d53aea3dSDavid Rientjes { 825d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 826d53aea3dSDavid Rientjes 827d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 828d53aea3dSDavid Rientjes cc->nr_freepages++; 829d53aea3dSDavid Rientjes } 830d53aea3dSDavid Rientjes 831ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 832ff9543fdSMichal Nazarewicz typedef enum { 833ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 834ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 835ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 836ff9543fdSMichal Nazarewicz } isolate_migrate_t; 837ff9543fdSMichal Nazarewicz 838ff9543fdSMichal Nazarewicz /* 839*edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 840*edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 841*edc2ca61SVlastimil Babka * compact_control. 842ff9543fdSMichal Nazarewicz */ 843ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 844ff9543fdSMichal Nazarewicz struct compact_control *cc) 845ff9543fdSMichal Nazarewicz { 846ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 847*edc2ca61SVlastimil Babka struct page *page; 848*edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 849*edc2ca61SVlastimil Babka (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); 850ff9543fdSMichal Nazarewicz 851*edc2ca61SVlastimil Babka /* 852*edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 853*edc2ca61SVlastimil Babka * initialized by compact_zone() 854*edc2ca61SVlastimil Babka */ 855*edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 856ff9543fdSMichal Nazarewicz 857ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 858a9aacbccSMel Gorman end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 859ff9543fdSMichal Nazarewicz 860*edc2ca61SVlastimil Babka /* 861*edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 862*edc2ca61SVlastimil Babka * Do not cross the free scanner. 863*edc2ca61SVlastimil Babka */ 864*edc2ca61SVlastimil Babka for (; end_pfn <= cc->free_pfn; 865*edc2ca61SVlastimil Babka low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { 866*edc2ca61SVlastimil Babka 867*edc2ca61SVlastimil Babka /* 868*edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 869*edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 870*edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 871*edc2ca61SVlastimil Babka */ 872*edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 873*edc2ca61SVlastimil Babka && compact_should_abort(cc)) 874*edc2ca61SVlastimil Babka break; 875*edc2ca61SVlastimil Babka 876*edc2ca61SVlastimil Babka /* Skip whole pageblock in case of a memory hole */ 877*edc2ca61SVlastimil Babka if (!pfn_valid(low_pfn)) 878*edc2ca61SVlastimil Babka continue; 879*edc2ca61SVlastimil Babka 880*edc2ca61SVlastimil Babka page = pfn_to_page(low_pfn); 881*edc2ca61SVlastimil Babka 882*edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 883*edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 884*edc2ca61SVlastimil Babka continue; 885*edc2ca61SVlastimil Babka 886*edc2ca61SVlastimil Babka /* 887*edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 888*edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 889*edc2ca61SVlastimil Babka * of work satisfies the allocation. 890*edc2ca61SVlastimil Babka */ 891*edc2ca61SVlastimil Babka if (cc->mode == MIGRATE_ASYNC && 892*edc2ca61SVlastimil Babka !migrate_async_suitable(get_pageblock_migratetype(page))) 893*edc2ca61SVlastimil Babka continue; 894ff9543fdSMichal Nazarewicz 895ff9543fdSMichal Nazarewicz /* Perform the isolation */ 896*edc2ca61SVlastimil Babka low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, 897*edc2ca61SVlastimil Babka isolate_mode); 898*edc2ca61SVlastimil Babka 899e64c5237SShaohua Li if (!low_pfn || cc->contended) 900ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 901ff9543fdSMichal Nazarewicz 902*edc2ca61SVlastimil Babka /* 903*edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 904*edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 905*edc2ca61SVlastimil Babka * continue or not. 906*edc2ca61SVlastimil Babka */ 907*edc2ca61SVlastimil Babka break; 908*edc2ca61SVlastimil Babka } 909*edc2ca61SVlastimil Babka 910*edc2ca61SVlastimil Babka acct_isolated(zone, cc); 911*edc2ca61SVlastimil Babka /* Record where migration scanner will be restarted */ 912ff9543fdSMichal Nazarewicz cc->migrate_pfn = low_pfn; 913ff9543fdSMichal Nazarewicz 914*edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 915ff9543fdSMichal Nazarewicz } 916ff9543fdSMichal Nazarewicz 917748446bbSMel Gorman static int compact_finished(struct zone *zone, 918748446bbSMel Gorman struct compact_control *cc) 919748446bbSMel Gorman { 9208fb74b9fSMel Gorman unsigned int order; 9215a03b051SAndrea Arcangeli unsigned long watermark; 92256de7263SMel Gorman 923be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 924748446bbSMel Gorman return COMPACT_PARTIAL; 925748446bbSMel Gorman 926753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 927bb13ffebSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) { 92855b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 92935979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 93035979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 93155b7c4c9SVlastimil Babka zone->compact_cached_free_pfn = zone_end_pfn(zone); 93255b7c4c9SVlastimil Babka 93362997027SMel Gorman /* 93462997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 93562997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 93662997027SMel Gorman * flag itself as the decision to be clear should be directly 93762997027SMel Gorman * based on an allocation request. 93862997027SMel Gorman */ 93962997027SMel Gorman if (!current_is_kswapd()) 94062997027SMel Gorman zone->compact_blockskip_flush = true; 94162997027SMel Gorman 942748446bbSMel Gorman return COMPACT_COMPLETE; 943bb13ffebSMel Gorman } 944748446bbSMel Gorman 94582478fb7SJohannes Weiner /* 94682478fb7SJohannes Weiner * order == -1 is expected when compacting via 94782478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 94882478fb7SJohannes Weiner */ 94956de7263SMel Gorman if (cc->order == -1) 95056de7263SMel Gorman return COMPACT_CONTINUE; 95156de7263SMel Gorman 9523957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 9533957c776SMichal Hocko watermark = low_wmark_pages(zone); 9543957c776SMichal Hocko watermark += (1 << cc->order); 9553957c776SMichal Hocko 9563957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 9573957c776SMichal Hocko return COMPACT_CONTINUE; 9583957c776SMichal Hocko 95956de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 96056de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 9618fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 9628fb74b9fSMel Gorman 96356de7263SMel Gorman /* Job done if page is free of the right migratetype */ 9641fb3f8caSMel Gorman if (!list_empty(&area->free_list[cc->migratetype])) 96556de7263SMel Gorman return COMPACT_PARTIAL; 96656de7263SMel Gorman 96756de7263SMel Gorman /* Job done if allocation would set block type */ 9681fb3f8caSMel Gorman if (cc->order >= pageblock_order && area->nr_free) 96956de7263SMel Gorman return COMPACT_PARTIAL; 97056de7263SMel Gorman } 97156de7263SMel Gorman 972748446bbSMel Gorman return COMPACT_CONTINUE; 973748446bbSMel Gorman } 974748446bbSMel Gorman 9753e7d3449SMel Gorman /* 9763e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 9773e7d3449SMel Gorman * Returns 9783e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 9793e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 9803e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 9813e7d3449SMel Gorman */ 9823e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 9833e7d3449SMel Gorman { 9843e7d3449SMel Gorman int fragindex; 9853e7d3449SMel Gorman unsigned long watermark; 9863e7d3449SMel Gorman 9873e7d3449SMel Gorman /* 9883957c776SMichal Hocko * order == -1 is expected when compacting via 9893957c776SMichal Hocko * /proc/sys/vm/compact_memory 9903957c776SMichal Hocko */ 9913957c776SMichal Hocko if (order == -1) 9923957c776SMichal Hocko return COMPACT_CONTINUE; 9933957c776SMichal Hocko 9943957c776SMichal Hocko /* 9953e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 9963e7d3449SMel Gorman * This is because during migration, copies of pages need to be 9973e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 9983e7d3449SMel Gorman */ 9993e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 10003e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 10013e7d3449SMel Gorman return COMPACT_SKIPPED; 10023e7d3449SMel Gorman 10033e7d3449SMel Gorman /* 10043e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 10053e7d3449SMel Gorman * low memory or external fragmentation 10063e7d3449SMel Gorman * 1007a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 1008a582a738SShaohua Li * watermarks 10093e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 10103e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 10113e7d3449SMel Gorman * 10123e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 10133e7d3449SMel Gorman */ 10143e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 10153e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 10163e7d3449SMel Gorman return COMPACT_SKIPPED; 10173e7d3449SMel Gorman 1018a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 1019a582a738SShaohua Li 0, 0)) 10203e7d3449SMel Gorman return COMPACT_PARTIAL; 10213e7d3449SMel Gorman 10223e7d3449SMel Gorman return COMPACT_CONTINUE; 10233e7d3449SMel Gorman } 10243e7d3449SMel Gorman 1025748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 1026748446bbSMel Gorman { 1027748446bbSMel Gorman int ret; 1028c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1029108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 1030e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1031748446bbSMel Gorman 10323e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 10333e7d3449SMel Gorman switch (ret) { 10343e7d3449SMel Gorman case COMPACT_PARTIAL: 10353e7d3449SMel Gorman case COMPACT_SKIPPED: 10363e7d3449SMel Gorman /* Compaction is likely to fail */ 10373e7d3449SMel Gorman return ret; 10383e7d3449SMel Gorman case COMPACT_CONTINUE: 10393e7d3449SMel Gorman /* Fall through to compaction */ 10403e7d3449SMel Gorman ; 10413e7d3449SMel Gorman } 10423e7d3449SMel Gorman 1043c89511abSMel Gorman /* 1044d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1045d3132e4bSVlastimil Babka * is about to be retried after being deferred. kswapd does not do 1046d3132e4bSVlastimil Babka * this reset as it'll reset the cached information when going to sleep. 1047d3132e4bSVlastimil Babka */ 1048d3132e4bSVlastimil Babka if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 1049d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1050d3132e4bSVlastimil Babka 1051d3132e4bSVlastimil Babka /* 1052c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 1053c89511abSMel Gorman * information on where the scanners should start but check that it 1054c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 1055c89511abSMel Gorman */ 1056e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1057c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1058c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1059c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1060c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1061c89511abSMel Gorman } 1062c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1063c89511abSMel Gorman cc->migrate_pfn = start_pfn; 106435979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 106535979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1066c89511abSMel Gorman } 1067748446bbSMel Gorman 10680eb927c0SMel Gorman trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); 10690eb927c0SMel Gorman 1070748446bbSMel Gorman migrate_prep_local(); 1071748446bbSMel Gorman 1072748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 10739d502c1cSMinchan Kim int err; 1074748446bbSMel Gorman 1075f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1076f9e35b3bSMel Gorman case ISOLATE_ABORT: 1077f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 10785733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1079e64c5237SShaohua Li cc->nr_migratepages = 0; 1080f9e35b3bSMel Gorman goto out; 1081f9e35b3bSMel Gorman case ISOLATE_NONE: 1082748446bbSMel Gorman continue; 1083f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1084f9e35b3bSMel Gorman ; 1085f9e35b3bSMel Gorman } 1086748446bbSMel Gorman 1087d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1088e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 10897b2a2d4aSMel Gorman MR_COMPACTION); 1090748446bbSMel Gorman 1091f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1092f8c9301fSVlastimil Babka &cc->migratepages); 1093748446bbSMel Gorman 1094f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1095f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 10969d502c1cSMinchan Kim if (err) { 10975733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 10987ed695e0SVlastimil Babka /* 10997ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 11007ed695e0SVlastimil Babka * and we want compact_finished() to detect it 11017ed695e0SVlastimil Babka */ 11027ed695e0SVlastimil Babka if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { 11034bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 11044bf2bba3SDavid Rientjes goto out; 1105748446bbSMel Gorman } 11064bf2bba3SDavid Rientjes } 1107748446bbSMel Gorman } 1108748446bbSMel Gorman 1109f9e35b3bSMel Gorman out: 1110748446bbSMel Gorman /* Release free pages and check accounting */ 1111748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 1112748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 1113748446bbSMel Gorman 11140eb927c0SMel Gorman trace_mm_compaction_end(ret); 11150eb927c0SMel Gorman 1116748446bbSMel Gorman return ret; 1117748446bbSMel Gorman } 111876ab0f53SMel Gorman 1119e0b9daebSDavid Rientjes static unsigned long compact_zone_order(struct zone *zone, int order, 1120e0b9daebSDavid Rientjes gfp_t gfp_mask, enum migrate_mode mode, bool *contended) 112156de7263SMel Gorman { 1122e64c5237SShaohua Li unsigned long ret; 112356de7263SMel Gorman struct compact_control cc = { 112456de7263SMel Gorman .nr_freepages = 0, 112556de7263SMel Gorman .nr_migratepages = 0, 112656de7263SMel Gorman .order = order, 112756de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 112856de7263SMel Gorman .zone = zone, 1129e0b9daebSDavid Rientjes .mode = mode, 113056de7263SMel Gorman }; 113156de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 113256de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 113356de7263SMel Gorman 1134e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1135e64c5237SShaohua Li 1136e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1137e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1138e64c5237SShaohua Li 1139e64c5237SShaohua Li *contended = cc.contended; 1140e64c5237SShaohua Li return ret; 114156de7263SMel Gorman } 114256de7263SMel Gorman 11435e771905SMel Gorman int sysctl_extfrag_threshold = 500; 11445e771905SMel Gorman 114556de7263SMel Gorman /** 114656de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 114756de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 114856de7263SMel Gorman * @order: The order of the current allocation 114956de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 115056de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 1151e0b9daebSDavid Rientjes * @mode: The migration mode for async, sync light, or sync migration 1152661c4cb9SMel Gorman * @contended: Return value that is true if compaction was aborted due to lock contention 115353853e2dSVlastimil Babka * @candidate_zone: Return the zone where we think allocation should succeed 115456de7263SMel Gorman * 115556de7263SMel Gorman * This is the main entry point for direct page compaction. 115656de7263SMel Gorman */ 115756de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 115877f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 115953853e2dSVlastimil Babka enum migrate_mode mode, bool *contended, 116053853e2dSVlastimil Babka struct zone **candidate_zone) 116156de7263SMel Gorman { 116256de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 116356de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 116456de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 116556de7263SMel Gorman struct zoneref *z; 116656de7263SMel Gorman struct zone *zone; 116753853e2dSVlastimil Babka int rc = COMPACT_DEFERRED; 1168d95ea5d1SBartlomiej Zolnierkiewicz int alloc_flags = 0; 116956de7263SMel Gorman 11704ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1171c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 117253853e2dSVlastimil Babka return COMPACT_SKIPPED; 117356de7263SMel Gorman 1174d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA 1175d95ea5d1SBartlomiej Zolnierkiewicz if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 1176d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags |= ALLOC_CMA; 1177d95ea5d1SBartlomiej Zolnierkiewicz #endif 117856de7263SMel Gorman /* Compact each zone in the list */ 117956de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 118056de7263SMel Gorman nodemask) { 118156de7263SMel Gorman int status; 118256de7263SMel Gorman 118353853e2dSVlastimil Babka if (compaction_deferred(zone, order)) 118453853e2dSVlastimil Babka continue; 118553853e2dSVlastimil Babka 1186e0b9daebSDavid Rientjes status = compact_zone_order(zone, order, gfp_mask, mode, 11878fb74b9fSMel Gorman contended); 118856de7263SMel Gorman rc = max(status, rc); 118956de7263SMel Gorman 11903e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1191d95ea5d1SBartlomiej Zolnierkiewicz if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 119253853e2dSVlastimil Babka alloc_flags)) { 119353853e2dSVlastimil Babka *candidate_zone = zone; 119453853e2dSVlastimil Babka /* 119553853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 119653853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 119753853e2dSVlastimil Babka * will repeat this with true if allocation indeed 119853853e2dSVlastimil Babka * succeeds in this zone. 119953853e2dSVlastimil Babka */ 120053853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 120156de7263SMel Gorman break; 120253853e2dSVlastimil Babka } else if (mode != MIGRATE_ASYNC) { 120353853e2dSVlastimil Babka /* 120453853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 120553853e2dSVlastimil Babka * so we defer compaction there. If it ends up 120653853e2dSVlastimil Babka * succeeding after all, it will be reset. 120753853e2dSVlastimil Babka */ 120853853e2dSVlastimil Babka defer_compaction(zone, order); 120953853e2dSVlastimil Babka } 121056de7263SMel Gorman } 121156de7263SMel Gorman 121256de7263SMel Gorman return rc; 121356de7263SMel Gorman } 121456de7263SMel Gorman 121556de7263SMel Gorman 121676ab0f53SMel Gorman /* Compact all zones within a node */ 12177103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 121876ab0f53SMel Gorman { 121976ab0f53SMel Gorman int zoneid; 122076ab0f53SMel Gorman struct zone *zone; 122176ab0f53SMel Gorman 122276ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 122376ab0f53SMel Gorman 122476ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 122576ab0f53SMel Gorman if (!populated_zone(zone)) 122676ab0f53SMel Gorman continue; 122776ab0f53SMel Gorman 12287be62de9SRik van Riel cc->nr_freepages = 0; 12297be62de9SRik van Riel cc->nr_migratepages = 0; 12307be62de9SRik van Riel cc->zone = zone; 12317be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 12327be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 123376ab0f53SMel Gorman 1234aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 12357be62de9SRik van Riel compact_zone(zone, cc); 123676ab0f53SMel Gorman 1237aff62249SRik van Riel if (cc->order > 0) { 1238de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1239de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1240de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1241aff62249SRik van Riel } 1242aff62249SRik van Riel 12437be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 12447be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 124576ab0f53SMel Gorman } 124676ab0f53SMel Gorman } 124776ab0f53SMel Gorman 12487103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 12497be62de9SRik van Riel { 12507be62de9SRik van Riel struct compact_control cc = { 12517be62de9SRik van Riel .order = order, 1252e0b9daebSDavid Rientjes .mode = MIGRATE_ASYNC, 12537be62de9SRik van Riel }; 12547be62de9SRik van Riel 12553a7200afSMel Gorman if (!order) 12563a7200afSMel Gorman return; 12573a7200afSMel Gorman 12587103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 12597be62de9SRik van Riel } 12607be62de9SRik van Riel 12617103f16dSAndrew Morton static void compact_node(int nid) 12627be62de9SRik van Riel { 12637be62de9SRik van Riel struct compact_control cc = { 12647be62de9SRik van Riel .order = -1, 1265e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 126691ca9186SDavid Rientjes .ignore_skip_hint = true, 12677be62de9SRik van Riel }; 12687be62de9SRik van Riel 12697103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 12707be62de9SRik van Riel } 12717be62de9SRik van Riel 127276ab0f53SMel Gorman /* Compact all nodes in the system */ 12737964c06dSJason Liu static void compact_nodes(void) 127476ab0f53SMel Gorman { 127576ab0f53SMel Gorman int nid; 127676ab0f53SMel Gorman 12778575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 12788575ec29SHugh Dickins lru_add_drain_all(); 12798575ec29SHugh Dickins 128076ab0f53SMel Gorman for_each_online_node(nid) 128176ab0f53SMel Gorman compact_node(nid); 128276ab0f53SMel Gorman } 128376ab0f53SMel Gorman 128476ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 128576ab0f53SMel Gorman int sysctl_compact_memory; 128676ab0f53SMel Gorman 128776ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 128876ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 128976ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 129076ab0f53SMel Gorman { 129176ab0f53SMel Gorman if (write) 12927964c06dSJason Liu compact_nodes(); 129376ab0f53SMel Gorman 129476ab0f53SMel Gorman return 0; 129576ab0f53SMel Gorman } 1296ed4a6d7fSMel Gorman 12975e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 12985e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 12995e771905SMel Gorman { 13005e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 13015e771905SMel Gorman 13025e771905SMel Gorman return 0; 13035e771905SMel Gorman } 13045e771905SMel Gorman 1305ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 130674e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 130710fbcf4cSKay Sievers struct device_attribute *attr, 1308ed4a6d7fSMel Gorman const char *buf, size_t count) 1309ed4a6d7fSMel Gorman { 13108575ec29SHugh Dickins int nid = dev->id; 13118575ec29SHugh Dickins 13128575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 13138575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 13148575ec29SHugh Dickins lru_add_drain_all(); 13158575ec29SHugh Dickins 13168575ec29SHugh Dickins compact_node(nid); 13178575ec29SHugh Dickins } 1318ed4a6d7fSMel Gorman 1319ed4a6d7fSMel Gorman return count; 1320ed4a6d7fSMel Gorman } 132110fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1322ed4a6d7fSMel Gorman 1323ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1324ed4a6d7fSMel Gorman { 132510fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1326ed4a6d7fSMel Gorman } 1327ed4a6d7fSMel Gorman 1328ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1329ed4a6d7fSMel Gorman { 133010fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1331ed4a6d7fSMel Gorman } 1332ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1333ff9543fdSMichal Nazarewicz 1334ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1335