1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17748446bbSMel Gorman #include "internal.h" 18748446bbSMel Gorman 19ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 20ff9543fdSMichal Nazarewicz 21b7aba698SMel Gorman #define CREATE_TRACE_POINTS 22b7aba698SMel Gorman #include <trace/events/compaction.h> 23b7aba698SMel Gorman 24748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 25748446bbSMel Gorman { 26748446bbSMel Gorman struct page *page, *next; 27748446bbSMel Gorman unsigned long count = 0; 28748446bbSMel Gorman 29748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 30748446bbSMel Gorman list_del(&page->lru); 31748446bbSMel Gorman __free_page(page); 32748446bbSMel Gorman count++; 33748446bbSMel Gorman } 34748446bbSMel Gorman 35748446bbSMel Gorman return count; 36748446bbSMel Gorman } 37748446bbSMel Gorman 38ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 39ff9543fdSMichal Nazarewicz { 40ff9543fdSMichal Nazarewicz struct page *page; 41ff9543fdSMichal Nazarewicz 42ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 43ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 44ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 45ff9543fdSMichal Nazarewicz } 46ff9543fdSMichal Nazarewicz } 47ff9543fdSMichal Nazarewicz 4847118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 4947118af0SMichal Nazarewicz { 5047118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 5147118af0SMichal Nazarewicz } 5247118af0SMichal Nazarewicz 53bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 54bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 55bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 56bb13ffebSMel Gorman struct page *page) 57bb13ffebSMel Gorman { 58bb13ffebSMel Gorman if (cc->ignore_skip_hint) 59bb13ffebSMel Gorman return true; 60bb13ffebSMel Gorman 61bb13ffebSMel Gorman return !get_pageblock_skip(page); 62bb13ffebSMel Gorman } 63bb13ffebSMel Gorman 64bb13ffebSMel Gorman /* 65bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 66bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 67bb13ffebSMel Gorman * meet. 68bb13ffebSMel Gorman */ 6962997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 70bb13ffebSMel Gorman { 71bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 72bb13ffebSMel Gorman unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; 73bb13ffebSMel Gorman unsigned long pfn; 74bb13ffebSMel Gorman 75c89511abSMel Gorman zone->compact_cached_migrate_pfn = start_pfn; 76c89511abSMel Gorman zone->compact_cached_free_pfn = end_pfn; 7762997027SMel Gorman zone->compact_blockskip_flush = false; 78bb13ffebSMel Gorman 79bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 80bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 81bb13ffebSMel Gorman struct page *page; 82bb13ffebSMel Gorman 83bb13ffebSMel Gorman cond_resched(); 84bb13ffebSMel Gorman 85bb13ffebSMel Gorman if (!pfn_valid(pfn)) 86bb13ffebSMel Gorman continue; 87bb13ffebSMel Gorman 88bb13ffebSMel Gorman page = pfn_to_page(pfn); 89bb13ffebSMel Gorman if (zone != page_zone(page)) 90bb13ffebSMel Gorman continue; 91bb13ffebSMel Gorman 92bb13ffebSMel Gorman clear_pageblock_skip(page); 93bb13ffebSMel Gorman } 94bb13ffebSMel Gorman } 95bb13ffebSMel Gorman 9662997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 9762997027SMel Gorman { 9862997027SMel Gorman int zoneid; 9962997027SMel Gorman 10062997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 10162997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 10262997027SMel Gorman if (!populated_zone(zone)) 10362997027SMel Gorman continue; 10462997027SMel Gorman 10562997027SMel Gorman /* Only flush if a full compaction finished recently */ 10662997027SMel Gorman if (zone->compact_blockskip_flush) 10762997027SMel Gorman __reset_isolation_suitable(zone); 10862997027SMel Gorman } 10962997027SMel Gorman } 11062997027SMel Gorman 111bb13ffebSMel Gorman /* 112bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 11362997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 114bb13ffebSMel Gorman */ 115c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 116c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 117c89511abSMel Gorman bool migrate_scanner) 118bb13ffebSMel Gorman { 119c89511abSMel Gorman struct zone *zone = cc->zone; 120bb13ffebSMel Gorman if (!page) 121bb13ffebSMel Gorman return; 122bb13ffebSMel Gorman 123c89511abSMel Gorman if (!nr_isolated) { 124c89511abSMel Gorman unsigned long pfn = page_to_pfn(page); 125bb13ffebSMel Gorman set_pageblock_skip(page); 126c89511abSMel Gorman 127c89511abSMel Gorman /* Update where compaction should restart */ 128c89511abSMel Gorman if (migrate_scanner) { 129c89511abSMel Gorman if (!cc->finished_update_migrate && 130c89511abSMel Gorman pfn > zone->compact_cached_migrate_pfn) 131c89511abSMel Gorman zone->compact_cached_migrate_pfn = pfn; 132c89511abSMel Gorman } else { 133c89511abSMel Gorman if (!cc->finished_update_free && 134c89511abSMel Gorman pfn < zone->compact_cached_free_pfn) 135c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 136c89511abSMel Gorman } 137c89511abSMel Gorman } 138bb13ffebSMel Gorman } 139bb13ffebSMel Gorman #else 140bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 141bb13ffebSMel Gorman struct page *page) 142bb13ffebSMel Gorman { 143bb13ffebSMel Gorman return true; 144bb13ffebSMel Gorman } 145bb13ffebSMel Gorman 146c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 147c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 148c89511abSMel Gorman bool migrate_scanner) 149bb13ffebSMel Gorman { 150bb13ffebSMel Gorman } 151bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 152bb13ffebSMel Gorman 1532a1402aaSMel Gorman static inline bool should_release_lock(spinlock_t *lock) 1542a1402aaSMel Gorman { 1552a1402aaSMel Gorman return need_resched() || spin_is_contended(lock); 1562a1402aaSMel Gorman } 1572a1402aaSMel Gorman 15885aa125fSMichal Nazarewicz /* 159c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 160c67fe375SMel Gorman * very heavily contended. Check if the process needs to be scheduled or 161c67fe375SMel Gorman * if the lock is contended. For async compaction, back out in the event 162c67fe375SMel Gorman * if contention is severe. For sync compaction, schedule. 163c67fe375SMel Gorman * 164c67fe375SMel Gorman * Returns true if the lock is held. 165c67fe375SMel Gorman * Returns false if the lock is released and compaction should abort 166c67fe375SMel Gorman */ 167c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, 168c67fe375SMel Gorman bool locked, struct compact_control *cc) 169c67fe375SMel Gorman { 1702a1402aaSMel Gorman if (should_release_lock(lock)) { 171c67fe375SMel Gorman if (locked) { 172c67fe375SMel Gorman spin_unlock_irqrestore(lock, *flags); 173c67fe375SMel Gorman locked = false; 174c67fe375SMel Gorman } 175c67fe375SMel Gorman 176c67fe375SMel Gorman /* async aborts if taking too long or contended */ 177c67fe375SMel Gorman if (!cc->sync) { 178e64c5237SShaohua Li cc->contended = true; 179c67fe375SMel Gorman return false; 180c67fe375SMel Gorman } 181c67fe375SMel Gorman 182c67fe375SMel Gorman cond_resched(); 183c67fe375SMel Gorman } 184c67fe375SMel Gorman 185c67fe375SMel Gorman if (!locked) 186c67fe375SMel Gorman spin_lock_irqsave(lock, *flags); 187c67fe375SMel Gorman return true; 188c67fe375SMel Gorman } 189c67fe375SMel Gorman 190c67fe375SMel Gorman static inline bool compact_trylock_irqsave(spinlock_t *lock, 191c67fe375SMel Gorman unsigned long *flags, struct compact_control *cc) 192c67fe375SMel Gorman { 193c67fe375SMel Gorman return compact_checklock_irqsave(lock, flags, false, cc); 194c67fe375SMel Gorman } 195c67fe375SMel Gorman 196f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */ 197f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page) 198f40d1e42SMel Gorman { 199f40d1e42SMel Gorman int migratetype = get_pageblock_migratetype(page); 200f40d1e42SMel Gorman 201f40d1e42SMel Gorman /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 202f40d1e42SMel Gorman if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 203f40d1e42SMel Gorman return false; 204f40d1e42SMel Gorman 205f40d1e42SMel Gorman /* If the page is a large free page, then allow migration */ 206f40d1e42SMel Gorman if (PageBuddy(page) && page_order(page) >= pageblock_order) 207f40d1e42SMel Gorman return true; 208f40d1e42SMel Gorman 209f40d1e42SMel Gorman /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 210f40d1e42SMel Gorman if (migrate_async_suitable(migratetype)) 211f40d1e42SMel Gorman return true; 212f40d1e42SMel Gorman 213f40d1e42SMel Gorman /* Otherwise skip the block */ 214f40d1e42SMel Gorman return false; 215f40d1e42SMel Gorman } 216f40d1e42SMel Gorman 2171fb3f8caSMel Gorman static void compact_capture_page(struct compact_control *cc) 2181fb3f8caSMel Gorman { 2191fb3f8caSMel Gorman unsigned long flags; 2201fb3f8caSMel Gorman int mtype, mtype_low, mtype_high; 2211fb3f8caSMel Gorman 2221fb3f8caSMel Gorman if (!cc->page || *cc->page) 2231fb3f8caSMel Gorman return; 2241fb3f8caSMel Gorman 2251fb3f8caSMel Gorman /* 2261fb3f8caSMel Gorman * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP 2271fb3f8caSMel Gorman * regardless of the migratetype of the freelist is is captured from. 2281fb3f8caSMel Gorman * This is fine because the order for a high-order MIGRATE_MOVABLE 2291fb3f8caSMel Gorman * allocation is typically at least a pageblock size and overall 2301fb3f8caSMel Gorman * fragmentation is not impaired. Other allocation types must 2311fb3f8caSMel Gorman * capture pages from their own migratelist because otherwise they 2321fb3f8caSMel Gorman * could pollute other pageblocks like MIGRATE_MOVABLE with 2331fb3f8caSMel Gorman * difficult to move pages and making fragmentation worse overall. 2341fb3f8caSMel Gorman */ 2351fb3f8caSMel Gorman if (cc->migratetype == MIGRATE_MOVABLE) { 2361fb3f8caSMel Gorman mtype_low = 0; 2371fb3f8caSMel Gorman mtype_high = MIGRATE_PCPTYPES; 2381fb3f8caSMel Gorman } else { 2391fb3f8caSMel Gorman mtype_low = cc->migratetype; 2401fb3f8caSMel Gorman mtype_high = cc->migratetype + 1; 2411fb3f8caSMel Gorman } 2421fb3f8caSMel Gorman 2431fb3f8caSMel Gorman /* Speculatively examine the free lists without zone lock */ 2441fb3f8caSMel Gorman for (mtype = mtype_low; mtype < mtype_high; mtype++) { 2451fb3f8caSMel Gorman int order; 2461fb3f8caSMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 2471fb3f8caSMel Gorman struct page *page; 2481fb3f8caSMel Gorman struct free_area *area; 2491fb3f8caSMel Gorman area = &(cc->zone->free_area[order]); 2501fb3f8caSMel Gorman if (list_empty(&area->free_list[mtype])) 2511fb3f8caSMel Gorman continue; 2521fb3f8caSMel Gorman 2531fb3f8caSMel Gorman /* Take the lock and attempt capture of the page */ 2541fb3f8caSMel Gorman if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc)) 2551fb3f8caSMel Gorman return; 2561fb3f8caSMel Gorman if (!list_empty(&area->free_list[mtype])) { 2571fb3f8caSMel Gorman page = list_entry(area->free_list[mtype].next, 2581fb3f8caSMel Gorman struct page, lru); 2591fb3f8caSMel Gorman if (capture_free_page(page, cc->order, mtype)) { 2601fb3f8caSMel Gorman spin_unlock_irqrestore(&cc->zone->lock, 2611fb3f8caSMel Gorman flags); 2621fb3f8caSMel Gorman *cc->page = page; 2631fb3f8caSMel Gorman return; 2641fb3f8caSMel Gorman } 2651fb3f8caSMel Gorman } 2661fb3f8caSMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 2671fb3f8caSMel Gorman } 2681fb3f8caSMel Gorman } 2691fb3f8caSMel Gorman } 2701fb3f8caSMel Gorman 271c67fe375SMel Gorman /* 27285aa125fSMichal Nazarewicz * Isolate free pages onto a private freelist. Caller must hold zone->lock. 27385aa125fSMichal Nazarewicz * If @strict is true, will abort returning 0 on any invalid PFNs or non-free 27485aa125fSMichal Nazarewicz * pages inside of the pageblock (even though it may still end up isolating 27585aa125fSMichal Nazarewicz * some pages). 27685aa125fSMichal Nazarewicz */ 277f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 278f40d1e42SMel Gorman unsigned long blockpfn, 27985aa125fSMichal Nazarewicz unsigned long end_pfn, 28085aa125fSMichal Nazarewicz struct list_head *freelist, 28185aa125fSMichal Nazarewicz bool strict) 282748446bbSMel Gorman { 283b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 284bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 285f40d1e42SMel Gorman unsigned long nr_strict_required = end_pfn - blockpfn; 286f40d1e42SMel Gorman unsigned long flags; 287f40d1e42SMel Gorman bool locked = false; 288748446bbSMel Gorman 289748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 290748446bbSMel Gorman 291f40d1e42SMel Gorman /* Isolate free pages. */ 292748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 293748446bbSMel Gorman int isolated, i; 294748446bbSMel Gorman struct page *page = cursor; 295748446bbSMel Gorman 296b7aba698SMel Gorman nr_scanned++; 297f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 298748446bbSMel Gorman continue; 299bb13ffebSMel Gorman if (!valid_page) 300bb13ffebSMel Gorman valid_page = page; 301f40d1e42SMel Gorman if (!PageBuddy(page)) 302f40d1e42SMel Gorman continue; 303f40d1e42SMel Gorman 304f40d1e42SMel Gorman /* 305f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 306f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 307f40d1e42SMel Gorman * heavily contended if there are parallel allocations 308f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 309f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 310f40d1e42SMel Gorman * possible. 311f40d1e42SMel Gorman */ 312f40d1e42SMel Gorman locked = compact_checklock_irqsave(&cc->zone->lock, &flags, 313f40d1e42SMel Gorman locked, cc); 314f40d1e42SMel Gorman if (!locked) 315f40d1e42SMel Gorman break; 316f40d1e42SMel Gorman 317f40d1e42SMel Gorman /* Recheck this is a suitable migration target under lock */ 318f40d1e42SMel Gorman if (!strict && !suitable_migration_target(page)) 319f40d1e42SMel Gorman break; 320f40d1e42SMel Gorman 321f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 322f40d1e42SMel Gorman if (!PageBuddy(page)) 323f40d1e42SMel Gorman continue; 324748446bbSMel Gorman 325748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 326748446bbSMel Gorman isolated = split_free_page(page); 32785aa125fSMichal Nazarewicz if (!isolated && strict) 328f40d1e42SMel Gorman break; 329748446bbSMel Gorman total_isolated += isolated; 330748446bbSMel Gorman for (i = 0; i < isolated; i++) { 331748446bbSMel Gorman list_add(&page->lru, freelist); 332748446bbSMel Gorman page++; 333748446bbSMel Gorman } 334748446bbSMel Gorman 335748446bbSMel Gorman /* If a page was split, advance to the end of it */ 336748446bbSMel Gorman if (isolated) { 337748446bbSMel Gorman blockpfn += isolated - 1; 338748446bbSMel Gorman cursor += isolated - 1; 339748446bbSMel Gorman } 340748446bbSMel Gorman } 341748446bbSMel Gorman 342b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 343f40d1e42SMel Gorman 344f40d1e42SMel Gorman /* 345f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 346f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 347f40d1e42SMel Gorman * returned and CMA will fail. 348f40d1e42SMel Gorman */ 3490db63d7eSMel Gorman if (strict && nr_strict_required > total_isolated) 350f40d1e42SMel Gorman total_isolated = 0; 351f40d1e42SMel Gorman 352f40d1e42SMel Gorman if (locked) 353f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 354f40d1e42SMel Gorman 355bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 356bb13ffebSMel Gorman if (blockpfn == end_pfn) 357c89511abSMel Gorman update_pageblock_skip(cc, valid_page, total_isolated, false); 358bb13ffebSMel Gorman 359748446bbSMel Gorman return total_isolated; 360748446bbSMel Gorman } 361748446bbSMel Gorman 36285aa125fSMichal Nazarewicz /** 36385aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 36485aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 36585aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 36685aa125fSMichal Nazarewicz * 36785aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 36885aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 36985aa125fSMichal Nazarewicz * undo its actions and return zero. 37085aa125fSMichal Nazarewicz * 37185aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 37285aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 37385aa125fSMichal Nazarewicz * a free page). 37485aa125fSMichal Nazarewicz */ 375ff9543fdSMichal Nazarewicz unsigned long 376bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 377bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 37885aa125fSMichal Nazarewicz { 379f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 38085aa125fSMichal Nazarewicz LIST_HEAD(freelist); 38185aa125fSMichal Nazarewicz 38285aa125fSMichal Nazarewicz for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 383bb13ffebSMel Gorman if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn))) 38485aa125fSMichal Nazarewicz break; 38585aa125fSMichal Nazarewicz 38685aa125fSMichal Nazarewicz /* 38785aa125fSMichal Nazarewicz * On subsequent iterations ALIGN() is actually not needed, 38885aa125fSMichal Nazarewicz * but we keep it that we not to complicate the code. 38985aa125fSMichal Nazarewicz */ 39085aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 39185aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 39285aa125fSMichal Nazarewicz 393bb13ffebSMel Gorman isolated = isolate_freepages_block(cc, pfn, block_end_pfn, 39485aa125fSMichal Nazarewicz &freelist, true); 39585aa125fSMichal Nazarewicz 39685aa125fSMichal Nazarewicz /* 39785aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 39885aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 39985aa125fSMichal Nazarewicz * non-free pages). 40085aa125fSMichal Nazarewicz */ 40185aa125fSMichal Nazarewicz if (!isolated) 40285aa125fSMichal Nazarewicz break; 40385aa125fSMichal Nazarewicz 40485aa125fSMichal Nazarewicz /* 40585aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 40685aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 40785aa125fSMichal Nazarewicz * page may span two pageblocks). 40885aa125fSMichal Nazarewicz */ 40985aa125fSMichal Nazarewicz } 41085aa125fSMichal Nazarewicz 41185aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 41285aa125fSMichal Nazarewicz map_pages(&freelist); 41385aa125fSMichal Nazarewicz 41485aa125fSMichal Nazarewicz if (pfn < end_pfn) { 41585aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 41685aa125fSMichal Nazarewicz release_freepages(&freelist); 41785aa125fSMichal Nazarewicz return 0; 41885aa125fSMichal Nazarewicz } 41985aa125fSMichal Nazarewicz 42085aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 42185aa125fSMichal Nazarewicz return pfn; 42285aa125fSMichal Nazarewicz } 42385aa125fSMichal Nazarewicz 424748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 425c67fe375SMel Gorman static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc) 426748446bbSMel Gorman { 427748446bbSMel Gorman struct page *page; 428b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 429748446bbSMel Gorman 430b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 431b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 432748446bbSMel Gorman 433c67fe375SMel Gorman /* If locked we can use the interrupt unsafe versions */ 434c67fe375SMel Gorman if (locked) { 435b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 436b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 437c67fe375SMel Gorman } else { 438c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 439c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 440c67fe375SMel Gorman } 441748446bbSMel Gorman } 442748446bbSMel Gorman 443748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 444748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 445748446bbSMel Gorman { 446bc693045SMinchan Kim unsigned long active, inactive, isolated; 447748446bbSMel Gorman 448748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 449748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 450bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 451bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 452748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 453748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 454748446bbSMel Gorman 455bc693045SMinchan Kim return isolated > (inactive + active) / 2; 456748446bbSMel Gorman } 457748446bbSMel Gorman 4582fe86e00SMichal Nazarewicz /** 4592fe86e00SMichal Nazarewicz * isolate_migratepages_range() - isolate all migrate-able pages in range. 4602fe86e00SMichal Nazarewicz * @zone: Zone pages are in. 4612fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 4622fe86e00SMichal Nazarewicz * @low_pfn: The first PFN of the range. 4632fe86e00SMichal Nazarewicz * @end_pfn: The one-past-the-last PFN of the range. 464e46a2879SMinchan Kim * @unevictable: true if it allows to isolate unevictable pages 4652fe86e00SMichal Nazarewicz * 4662fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 4672fe86e00SMichal Nazarewicz * [low_pfn, end_pfn). Returns zero if there is a fatal signal 4682fe86e00SMichal Nazarewicz * pending), otherwise PFN of the first page that was not scanned 4692fe86e00SMichal Nazarewicz * (which may be both less, equal to or more then end_pfn). 4702fe86e00SMichal Nazarewicz * 4712fe86e00SMichal Nazarewicz * Assumes that cc->migratepages is empty and cc->nr_migratepages is 4722fe86e00SMichal Nazarewicz * zero. 4732fe86e00SMichal Nazarewicz * 4742fe86e00SMichal Nazarewicz * Apart from cc->migratepages and cc->nr_migratetypes this function 4752fe86e00SMichal Nazarewicz * does not modify any cc's fields, in particular it does not modify 4762fe86e00SMichal Nazarewicz * (or read for that matter) cc->migrate_pfn. 477748446bbSMel Gorman */ 478ff9543fdSMichal Nazarewicz unsigned long 4792fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 480e46a2879SMinchan Kim unsigned long low_pfn, unsigned long end_pfn, bool unevictable) 481748446bbSMel Gorman { 4829927af74SMel Gorman unsigned long last_pageblock_nr = 0, pageblock_nr; 483b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 484748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 485f3fd4a61SKonstantin Khlebnikov isolate_mode_t mode = 0; 486fa9add64SHugh Dickins struct lruvec *lruvec; 487c67fe375SMel Gorman unsigned long flags; 4882a1402aaSMel Gorman bool locked = false; 489bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 490748446bbSMel Gorman 491748446bbSMel Gorman /* 492748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 493748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 494748446bbSMel Gorman * delay for some time until fewer pages are isolated 495748446bbSMel Gorman */ 496748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 497f9e35b3bSMel Gorman /* async migration should just abort */ 49868e3e926SLinus Torvalds if (!cc->sync) 4992fe86e00SMichal Nazarewicz return 0; 500f9e35b3bSMel Gorman 501748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 502748446bbSMel Gorman 503748446bbSMel Gorman if (fatal_signal_pending(current)) 5042fe86e00SMichal Nazarewicz return 0; 505748446bbSMel Gorman } 506748446bbSMel Gorman 507748446bbSMel Gorman /* Time to isolate some pages for migration */ 508b2eef8c0SAndrea Arcangeli cond_resched(); 509748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 510b2eef8c0SAndrea Arcangeli /* give a chance to irqs before checking need_resched() */ 5112a1402aaSMel Gorman if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) { 5122a1402aaSMel Gorman if (should_release_lock(&zone->lru_lock)) { 513c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 514b2eef8c0SAndrea Arcangeli locked = false; 515b2eef8c0SAndrea Arcangeli } 5162a1402aaSMel Gorman } 517b2eef8c0SAndrea Arcangeli 5180bf380bcSMel Gorman /* 5190bf380bcSMel Gorman * migrate_pfn does not necessarily start aligned to a 5200bf380bcSMel Gorman * pageblock. Ensure that pfn_valid is called when moving 5210bf380bcSMel Gorman * into a new MAX_ORDER_NR_PAGES range in case of large 5220bf380bcSMel Gorman * memory holes within the zone 5230bf380bcSMel Gorman */ 5240bf380bcSMel Gorman if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 5250bf380bcSMel Gorman if (!pfn_valid(low_pfn)) { 5260bf380bcSMel Gorman low_pfn += MAX_ORDER_NR_PAGES - 1; 5270bf380bcSMel Gorman continue; 5280bf380bcSMel Gorman } 5290bf380bcSMel Gorman } 5300bf380bcSMel Gorman 531748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 532748446bbSMel Gorman continue; 533b7aba698SMel Gorman nr_scanned++; 534748446bbSMel Gorman 535dc908600SMel Gorman /* 536dc908600SMel Gorman * Get the page and ensure the page is within the same zone. 537dc908600SMel Gorman * See the comment in isolate_freepages about overlapping 538dc908600SMel Gorman * nodes. It is deliberate that the new zone lock is not taken 539dc908600SMel Gorman * as memory compaction should not move pages between nodes. 540dc908600SMel Gorman */ 541748446bbSMel Gorman page = pfn_to_page(low_pfn); 542dc908600SMel Gorman if (page_zone(page) != zone) 543dc908600SMel Gorman continue; 544dc908600SMel Gorman 545bb13ffebSMel Gorman if (!valid_page) 546bb13ffebSMel Gorman valid_page = page; 547bb13ffebSMel Gorman 548bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 549bb13ffebSMel Gorman pageblock_nr = low_pfn >> pageblock_order; 550bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 551bb13ffebSMel Gorman goto next_pageblock; 552bb13ffebSMel Gorman 553dc908600SMel Gorman /* Skip if free */ 554748446bbSMel Gorman if (PageBuddy(page)) 555748446bbSMel Gorman continue; 556748446bbSMel Gorman 5579927af74SMel Gorman /* 5589927af74SMel Gorman * For async migration, also only scan in MOVABLE blocks. Async 5599927af74SMel Gorman * migration is optimistic to see if the minimum amount of work 5609927af74SMel Gorman * satisfies the allocation 5619927af74SMel Gorman */ 56268e3e926SLinus Torvalds if (!cc->sync && last_pageblock_nr != pageblock_nr && 56347118af0SMichal Nazarewicz !migrate_async_suitable(get_pageblock_migratetype(page))) { 564c89511abSMel Gorman cc->finished_update_migrate = true; 5652a1402aaSMel Gorman goto next_pageblock; 5669927af74SMel Gorman } 5679927af74SMel Gorman 5682a1402aaSMel Gorman /* Check may be lockless but that's ok as we recheck later */ 569bc835011SAndrea Arcangeli if (!PageLRU(page)) 570bc835011SAndrea Arcangeli continue; 571bc835011SAndrea Arcangeli 572bc835011SAndrea Arcangeli /* 5732a1402aaSMel Gorman * PageLRU is set. lru_lock normally excludes isolation 5742a1402aaSMel Gorman * splitting and collapsing (collapsing has already happened 5752a1402aaSMel Gorman * if PageLRU is set) but the lock is not necessarily taken 5762a1402aaSMel Gorman * here and it is wasteful to take it just to check transhuge. 5772a1402aaSMel Gorman * Check TransHuge without lock and skip the whole pageblock if 5782a1402aaSMel Gorman * it's either a transhuge or hugetlbfs page, as calling 5792a1402aaSMel Gorman * compound_order() without preventing THP from splitting the 5802a1402aaSMel Gorman * page underneath us may return surprising results. 581bc835011SAndrea Arcangeli */ 582bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 5832a1402aaSMel Gorman if (!locked) 5842a1402aaSMel Gorman goto next_pageblock; 5852a1402aaSMel Gorman low_pfn += (1 << compound_order(page)) - 1; 5862a1402aaSMel Gorman continue; 5872a1402aaSMel Gorman } 5882a1402aaSMel Gorman 5892a1402aaSMel Gorman /* Check if it is ok to still hold the lock */ 5902a1402aaSMel Gorman locked = compact_checklock_irqsave(&zone->lru_lock, &flags, 5912a1402aaSMel Gorman locked, cc); 5922a1402aaSMel Gorman if (!locked || fatal_signal_pending(current)) 5932a1402aaSMel Gorman break; 5942a1402aaSMel Gorman 5952a1402aaSMel Gorman /* Recheck PageLRU and PageTransHuge under lock */ 5962a1402aaSMel Gorman if (!PageLRU(page)) 5972a1402aaSMel Gorman continue; 5982a1402aaSMel Gorman if (PageTransHuge(page)) { 599bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 600bc835011SAndrea Arcangeli continue; 601bc835011SAndrea Arcangeli } 602bc835011SAndrea Arcangeli 60368e3e926SLinus Torvalds if (!cc->sync) 604c8244935SMel Gorman mode |= ISOLATE_ASYNC_MIGRATE; 605c8244935SMel Gorman 606e46a2879SMinchan Kim if (unevictable) 607e46a2879SMinchan Kim mode |= ISOLATE_UNEVICTABLE; 608e46a2879SMinchan Kim 609fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 610fa9add64SHugh Dickins 611748446bbSMel Gorman /* Try isolate the page */ 612f3fd4a61SKonstantin Khlebnikov if (__isolate_lru_page(page, mode) != 0) 613748446bbSMel Gorman continue; 614748446bbSMel Gorman 615bc835011SAndrea Arcangeli VM_BUG_ON(PageTransCompound(page)); 616bc835011SAndrea Arcangeli 617748446bbSMel Gorman /* Successfully isolated */ 618c89511abSMel Gorman cc->finished_update_migrate = true; 619fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 620748446bbSMel Gorman list_add(&page->lru, migratelist); 621748446bbSMel Gorman cc->nr_migratepages++; 622b7aba698SMel Gorman nr_isolated++; 623748446bbSMel Gorman 624748446bbSMel Gorman /* Avoid isolating too much */ 62531b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 62631b8384aSHillf Danton ++low_pfn; 627748446bbSMel Gorman break; 628748446bbSMel Gorman } 6292a1402aaSMel Gorman 6302a1402aaSMel Gorman continue; 6312a1402aaSMel Gorman 6322a1402aaSMel Gorman next_pageblock: 6332a1402aaSMel Gorman low_pfn += pageblock_nr_pages; 6342a1402aaSMel Gorman low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 6352a1402aaSMel Gorman last_pageblock_nr = pageblock_nr; 63631b8384aSHillf Danton } 637748446bbSMel Gorman 638c67fe375SMel Gorman acct_isolated(zone, locked, cc); 639748446bbSMel Gorman 640c67fe375SMel Gorman if (locked) 641c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 642748446bbSMel Gorman 643bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 644bb13ffebSMel Gorman if (low_pfn == end_pfn) 645c89511abSMel Gorman update_pageblock_skip(cc, valid_page, nr_isolated, true); 646bb13ffebSMel Gorman 647b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 648b7aba698SMel Gorman 6492fe86e00SMichal Nazarewicz return low_pfn; 6502fe86e00SMichal Nazarewicz } 6512fe86e00SMichal Nazarewicz 652ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 653ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 654ff9543fdSMichal Nazarewicz /* 655ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 656ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 657ff9543fdSMichal Nazarewicz */ 658ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone, 659ff9543fdSMichal Nazarewicz struct compact_control *cc) 660ff9543fdSMichal Nazarewicz { 661ff9543fdSMichal Nazarewicz struct page *page; 662ff9543fdSMichal Nazarewicz unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; 663ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 664ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 6652fe86e00SMichal Nazarewicz 666ff9543fdSMichal Nazarewicz /* 667ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 668ff9543fdSMichal Nazarewicz * scanned from (or the end of the zone if starting). The low point 669ff9543fdSMichal Nazarewicz * is the end of the pageblock the migration scanner is using. 670ff9543fdSMichal Nazarewicz */ 671ff9543fdSMichal Nazarewicz pfn = cc->free_pfn; 672ff9543fdSMichal Nazarewicz low_pfn = cc->migrate_pfn + pageblock_nr_pages; 6732fe86e00SMichal Nazarewicz 674ff9543fdSMichal Nazarewicz /* 675ff9543fdSMichal Nazarewicz * Take care that if the migration scanner is at the end of the zone 676ff9543fdSMichal Nazarewicz * that the free scanner does not accidentally move to the next zone 677ff9543fdSMichal Nazarewicz * in the next isolation cycle. 678ff9543fdSMichal Nazarewicz */ 679ff9543fdSMichal Nazarewicz high_pfn = min(low_pfn, pfn); 680ff9543fdSMichal Nazarewicz 681ff9543fdSMichal Nazarewicz zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 682ff9543fdSMichal Nazarewicz 683ff9543fdSMichal Nazarewicz /* 684ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 685ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 686ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 687ff9543fdSMichal Nazarewicz */ 688ff9543fdSMichal Nazarewicz for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 689ff9543fdSMichal Nazarewicz pfn -= pageblock_nr_pages) { 690ff9543fdSMichal Nazarewicz unsigned long isolated; 691ff9543fdSMichal Nazarewicz 692ff9543fdSMichal Nazarewicz if (!pfn_valid(pfn)) 693ff9543fdSMichal Nazarewicz continue; 694ff9543fdSMichal Nazarewicz 695ff9543fdSMichal Nazarewicz /* 696ff9543fdSMichal Nazarewicz * Check for overlapping nodes/zones. It's possible on some 697ff9543fdSMichal Nazarewicz * configurations to have a setup like 698ff9543fdSMichal Nazarewicz * node0 node1 node0 699ff9543fdSMichal Nazarewicz * i.e. it's possible that all pages within a zones range of 700ff9543fdSMichal Nazarewicz * pages do not belong to a single zone. 701ff9543fdSMichal Nazarewicz */ 702ff9543fdSMichal Nazarewicz page = pfn_to_page(pfn); 703ff9543fdSMichal Nazarewicz if (page_zone(page) != zone) 704ff9543fdSMichal Nazarewicz continue; 705ff9543fdSMichal Nazarewicz 706ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 70768e3e926SLinus Torvalds if (!suitable_migration_target(page)) 708ff9543fdSMichal Nazarewicz continue; 70968e3e926SLinus Torvalds 710bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 711bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 712bb13ffebSMel Gorman continue; 713bb13ffebSMel Gorman 714f40d1e42SMel Gorman /* Found a block suitable for isolating free pages from */ 715ff9543fdSMichal Nazarewicz isolated = 0; 716*60177d31SMel Gorman 717*60177d31SMel Gorman /* 718*60177d31SMel Gorman * As pfn may not start aligned, pfn+pageblock_nr_page 719*60177d31SMel Gorman * may cross a MAX_ORDER_NR_PAGES boundary and miss 720*60177d31SMel Gorman * a pfn_valid check. Ensure isolate_freepages_block() 721*60177d31SMel Gorman * only scans within a pageblock 722*60177d31SMel Gorman */ 723*60177d31SMel Gorman end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 724*60177d31SMel Gorman end_pfn = min(end_pfn, zone_end_pfn); 725f40d1e42SMel Gorman isolated = isolate_freepages_block(cc, pfn, end_pfn, 726ff9543fdSMichal Nazarewicz freelist, false); 727ff9543fdSMichal Nazarewicz nr_freepages += isolated; 728ff9543fdSMichal Nazarewicz 729ff9543fdSMichal Nazarewicz /* 730ff9543fdSMichal Nazarewicz * Record the highest PFN we isolated pages from. When next 731ff9543fdSMichal Nazarewicz * looking for free pages, the search will restart here as 732ff9543fdSMichal Nazarewicz * page migration may have returned some pages to the allocator 733ff9543fdSMichal Nazarewicz */ 734c89511abSMel Gorman if (isolated) { 735c89511abSMel Gorman cc->finished_update_free = true; 736ff9543fdSMichal Nazarewicz high_pfn = max(high_pfn, pfn); 737ff9543fdSMichal Nazarewicz } 738c89511abSMel Gorman } 739ff9543fdSMichal Nazarewicz 740ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 741ff9543fdSMichal Nazarewicz map_pages(freelist); 742ff9543fdSMichal Nazarewicz 743ff9543fdSMichal Nazarewicz cc->free_pfn = high_pfn; 744ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 745748446bbSMel Gorman } 746748446bbSMel Gorman 747748446bbSMel Gorman /* 748748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 749748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 750748446bbSMel Gorman */ 751748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 752748446bbSMel Gorman unsigned long data, 753748446bbSMel Gorman int **result) 754748446bbSMel Gorman { 755748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 756748446bbSMel Gorman struct page *freepage; 757748446bbSMel Gorman 758748446bbSMel Gorman /* Isolate free pages if necessary */ 759748446bbSMel Gorman if (list_empty(&cc->freepages)) { 760748446bbSMel Gorman isolate_freepages(cc->zone, cc); 761748446bbSMel Gorman 762748446bbSMel Gorman if (list_empty(&cc->freepages)) 763748446bbSMel Gorman return NULL; 764748446bbSMel Gorman } 765748446bbSMel Gorman 766748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 767748446bbSMel Gorman list_del(&freepage->lru); 768748446bbSMel Gorman cc->nr_freepages--; 769748446bbSMel Gorman 770748446bbSMel Gorman return freepage; 771748446bbSMel Gorman } 772748446bbSMel Gorman 773748446bbSMel Gorman /* 774748446bbSMel Gorman * We cannot control nr_migratepages and nr_freepages fully when migration is 775748446bbSMel Gorman * running as migrate_pages() has no knowledge of compact_control. When 776748446bbSMel Gorman * migration is complete, we count the number of pages on the lists by hand. 777748446bbSMel Gorman */ 778748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc) 779748446bbSMel Gorman { 780748446bbSMel Gorman int nr_migratepages = 0; 781748446bbSMel Gorman int nr_freepages = 0; 782748446bbSMel Gorman struct page *page; 783748446bbSMel Gorman 784748446bbSMel Gorman list_for_each_entry(page, &cc->migratepages, lru) 785748446bbSMel Gorman nr_migratepages++; 786748446bbSMel Gorman list_for_each_entry(page, &cc->freepages, lru) 787748446bbSMel Gorman nr_freepages++; 788748446bbSMel Gorman 789748446bbSMel Gorman cc->nr_migratepages = nr_migratepages; 790748446bbSMel Gorman cc->nr_freepages = nr_freepages; 791748446bbSMel Gorman } 792748446bbSMel Gorman 793ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 794ff9543fdSMichal Nazarewicz typedef enum { 795ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 796ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 797ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 798ff9543fdSMichal Nazarewicz } isolate_migrate_t; 799ff9543fdSMichal Nazarewicz 800ff9543fdSMichal Nazarewicz /* 801ff9543fdSMichal Nazarewicz * Isolate all pages that can be migrated from the block pointed to by 802ff9543fdSMichal Nazarewicz * the migrate scanner within compact_control. 803ff9543fdSMichal Nazarewicz */ 804ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 805ff9543fdSMichal Nazarewicz struct compact_control *cc) 806ff9543fdSMichal Nazarewicz { 807ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 808ff9543fdSMichal Nazarewicz 809ff9543fdSMichal Nazarewicz /* Do not scan outside zone boundaries */ 810ff9543fdSMichal Nazarewicz low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 811ff9543fdSMichal Nazarewicz 812ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 813ff9543fdSMichal Nazarewicz end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 814ff9543fdSMichal Nazarewicz 815ff9543fdSMichal Nazarewicz /* Do not cross the free scanner or scan within a memory hole */ 816ff9543fdSMichal Nazarewicz if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 817ff9543fdSMichal Nazarewicz cc->migrate_pfn = end_pfn; 818ff9543fdSMichal Nazarewicz return ISOLATE_NONE; 819ff9543fdSMichal Nazarewicz } 820ff9543fdSMichal Nazarewicz 821ff9543fdSMichal Nazarewicz /* Perform the isolation */ 822e46a2879SMinchan Kim low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false); 823e64c5237SShaohua Li if (!low_pfn || cc->contended) 824ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 825ff9543fdSMichal Nazarewicz 826ff9543fdSMichal Nazarewicz cc->migrate_pfn = low_pfn; 827ff9543fdSMichal Nazarewicz 828ff9543fdSMichal Nazarewicz return ISOLATE_SUCCESS; 829ff9543fdSMichal Nazarewicz } 830ff9543fdSMichal Nazarewicz 831748446bbSMel Gorman static int compact_finished(struct zone *zone, 832748446bbSMel Gorman struct compact_control *cc) 833748446bbSMel Gorman { 8345a03b051SAndrea Arcangeli unsigned long watermark; 83556de7263SMel Gorman 836748446bbSMel Gorman if (fatal_signal_pending(current)) 837748446bbSMel Gorman return COMPACT_PARTIAL; 838748446bbSMel Gorman 839753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 840bb13ffebSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) { 84162997027SMel Gorman /* 84262997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 84362997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 84462997027SMel Gorman * flag itself as the decision to be clear should be directly 84562997027SMel Gorman * based on an allocation request. 84662997027SMel Gorman */ 84762997027SMel Gorman if (!current_is_kswapd()) 84862997027SMel Gorman zone->compact_blockskip_flush = true; 84962997027SMel Gorman 850748446bbSMel Gorman return COMPACT_COMPLETE; 851bb13ffebSMel Gorman } 852748446bbSMel Gorman 85382478fb7SJohannes Weiner /* 85482478fb7SJohannes Weiner * order == -1 is expected when compacting via 85582478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 85682478fb7SJohannes Weiner */ 85756de7263SMel Gorman if (cc->order == -1) 85856de7263SMel Gorman return COMPACT_CONTINUE; 85956de7263SMel Gorman 8603957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 8613957c776SMichal Hocko watermark = low_wmark_pages(zone); 8623957c776SMichal Hocko watermark += (1 << cc->order); 8633957c776SMichal Hocko 8643957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 8653957c776SMichal Hocko return COMPACT_CONTINUE; 8663957c776SMichal Hocko 86756de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 8681fb3f8caSMel Gorman if (cc->page) { 8691fb3f8caSMel Gorman /* Was a suitable page captured? */ 8701fb3f8caSMel Gorman if (*cc->page) 8711fb3f8caSMel Gorman return COMPACT_PARTIAL; 8721fb3f8caSMel Gorman } else { 8731fb3f8caSMel Gorman unsigned int order; 87456de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 8751fb3f8caSMel Gorman struct free_area *area = &zone->free_area[cc->order]; 87656de7263SMel Gorman /* Job done if page is free of the right migratetype */ 8771fb3f8caSMel Gorman if (!list_empty(&area->free_list[cc->migratetype])) 87856de7263SMel Gorman return COMPACT_PARTIAL; 87956de7263SMel Gorman 88056de7263SMel Gorman /* Job done if allocation would set block type */ 8811fb3f8caSMel Gorman if (cc->order >= pageblock_order && area->nr_free) 88256de7263SMel Gorman return COMPACT_PARTIAL; 88356de7263SMel Gorman } 8841fb3f8caSMel Gorman } 88556de7263SMel Gorman 886748446bbSMel Gorman return COMPACT_CONTINUE; 887748446bbSMel Gorman } 888748446bbSMel Gorman 8893e7d3449SMel Gorman /* 8903e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 8913e7d3449SMel Gorman * Returns 8923e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 8933e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 8943e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 8953e7d3449SMel Gorman */ 8963e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 8973e7d3449SMel Gorman { 8983e7d3449SMel Gorman int fragindex; 8993e7d3449SMel Gorman unsigned long watermark; 9003e7d3449SMel Gorman 9013e7d3449SMel Gorman /* 9023957c776SMichal Hocko * order == -1 is expected when compacting via 9033957c776SMichal Hocko * /proc/sys/vm/compact_memory 9043957c776SMichal Hocko */ 9053957c776SMichal Hocko if (order == -1) 9063957c776SMichal Hocko return COMPACT_CONTINUE; 9073957c776SMichal Hocko 9083957c776SMichal Hocko /* 9093e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 9103e7d3449SMel Gorman * This is because during migration, copies of pages need to be 9113e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 9123e7d3449SMel Gorman */ 9133e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 9143e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 9153e7d3449SMel Gorman return COMPACT_SKIPPED; 9163e7d3449SMel Gorman 9173e7d3449SMel Gorman /* 9183e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 9193e7d3449SMel Gorman * low memory or external fragmentation 9203e7d3449SMel Gorman * 921a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 922a582a738SShaohua Li * watermarks 9233e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 9243e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 9253e7d3449SMel Gorman * 9263e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 9273e7d3449SMel Gorman */ 9283e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 9293e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 9303e7d3449SMel Gorman return COMPACT_SKIPPED; 9313e7d3449SMel Gorman 932a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 933a582a738SShaohua Li 0, 0)) 9343e7d3449SMel Gorman return COMPACT_PARTIAL; 9353e7d3449SMel Gorman 9363e7d3449SMel Gorman return COMPACT_CONTINUE; 9373e7d3449SMel Gorman } 9383e7d3449SMel Gorman 939748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 940748446bbSMel Gorman { 941748446bbSMel Gorman int ret; 942c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 943c89511abSMel Gorman unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; 944748446bbSMel Gorman 9453e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 9463e7d3449SMel Gorman switch (ret) { 9473e7d3449SMel Gorman case COMPACT_PARTIAL: 9483e7d3449SMel Gorman case COMPACT_SKIPPED: 9493e7d3449SMel Gorman /* Compaction is likely to fail */ 9503e7d3449SMel Gorman return ret; 9513e7d3449SMel Gorman case COMPACT_CONTINUE: 9523e7d3449SMel Gorman /* Fall through to compaction */ 9533e7d3449SMel Gorman ; 9543e7d3449SMel Gorman } 9553e7d3449SMel Gorman 956c89511abSMel Gorman /* 957c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 958c89511abSMel Gorman * information on where the scanners should start but check that it 959c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 960c89511abSMel Gorman */ 961c89511abSMel Gorman cc->migrate_pfn = zone->compact_cached_migrate_pfn; 962c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 963c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 964c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 965c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 966c89511abSMel Gorman } 967c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 968c89511abSMel Gorman cc->migrate_pfn = start_pfn; 969c89511abSMel Gorman zone->compact_cached_migrate_pfn = cc->migrate_pfn; 970c89511abSMel Gorman } 971748446bbSMel Gorman 97262997027SMel Gorman /* 97362997027SMel Gorman * Clear pageblock skip if there were failures recently and compaction 97462997027SMel Gorman * is about to be retried after being deferred. kswapd does not do 97562997027SMel Gorman * this reset as it'll reset the cached information when going to sleep. 97662997027SMel Gorman */ 97762997027SMel Gorman if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 97862997027SMel Gorman __reset_isolation_suitable(zone); 979bb13ffebSMel Gorman 980748446bbSMel Gorman migrate_prep_local(); 981748446bbSMel Gorman 982748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 983748446bbSMel Gorman unsigned long nr_migrate, nr_remaining; 9849d502c1cSMinchan Kim int err; 985748446bbSMel Gorman 986f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 987f9e35b3bSMel Gorman case ISOLATE_ABORT: 988f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 989e64c5237SShaohua Li putback_lru_pages(&cc->migratepages); 990e64c5237SShaohua Li cc->nr_migratepages = 0; 991f9e35b3bSMel Gorman goto out; 992f9e35b3bSMel Gorman case ISOLATE_NONE: 993748446bbSMel Gorman continue; 994f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 995f9e35b3bSMel Gorman ; 996f9e35b3bSMel Gorman } 997748446bbSMel Gorman 998748446bbSMel Gorman nr_migrate = cc->nr_migratepages; 9999d502c1cSMinchan Kim err = migrate_pages(&cc->migratepages, compaction_alloc, 100068e3e926SLinus Torvalds (unsigned long)cc, false, 100168e3e926SLinus Torvalds cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); 1002748446bbSMel Gorman update_nr_listpages(cc); 1003748446bbSMel Gorman nr_remaining = cc->nr_migratepages; 1004748446bbSMel Gorman 1005748446bbSMel Gorman count_vm_event(COMPACTBLOCKS); 1006748446bbSMel Gorman count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); 1007748446bbSMel Gorman if (nr_remaining) 1008748446bbSMel Gorman count_vm_events(COMPACTPAGEFAILED, nr_remaining); 1009b7aba698SMel Gorman trace_mm_compaction_migratepages(nr_migrate - nr_remaining, 1010b7aba698SMel Gorman nr_remaining); 1011748446bbSMel Gorman 1012748446bbSMel Gorman /* Release LRU pages not migrated */ 10139d502c1cSMinchan Kim if (err) { 1014748446bbSMel Gorman putback_lru_pages(&cc->migratepages); 1015748446bbSMel Gorman cc->nr_migratepages = 0; 10164bf2bba3SDavid Rientjes if (err == -ENOMEM) { 10174bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 10184bf2bba3SDavid Rientjes goto out; 1019748446bbSMel Gorman } 10204bf2bba3SDavid Rientjes } 10211fb3f8caSMel Gorman 10221fb3f8caSMel Gorman /* Capture a page now if it is a suitable size */ 10231fb3f8caSMel Gorman compact_capture_page(cc); 1024748446bbSMel Gorman } 1025748446bbSMel Gorman 1026f9e35b3bSMel Gorman out: 1027748446bbSMel Gorman /* Release free pages and check accounting */ 1028748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 1029748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 1030748446bbSMel Gorman 1031748446bbSMel Gorman return ret; 1032748446bbSMel Gorman } 103376ab0f53SMel Gorman 1034d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone, 103577f1fe6bSMel Gorman int order, gfp_t gfp_mask, 10361fb3f8caSMel Gorman bool sync, bool *contended, 10371fb3f8caSMel Gorman struct page **page) 103856de7263SMel Gorman { 1039e64c5237SShaohua Li unsigned long ret; 104056de7263SMel Gorman struct compact_control cc = { 104156de7263SMel Gorman .nr_freepages = 0, 104256de7263SMel Gorman .nr_migratepages = 0, 104356de7263SMel Gorman .order = order, 104456de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 104556de7263SMel Gorman .zone = zone, 104668e3e926SLinus Torvalds .sync = sync, 10471fb3f8caSMel Gorman .page = page, 104856de7263SMel Gorman }; 104956de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 105056de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 105156de7263SMel Gorman 1052e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1053e64c5237SShaohua Li 1054e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1055e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1056e64c5237SShaohua Li 1057e64c5237SShaohua Li *contended = cc.contended; 1058e64c5237SShaohua Li return ret; 105956de7263SMel Gorman } 106056de7263SMel Gorman 10615e771905SMel Gorman int sysctl_extfrag_threshold = 500; 10625e771905SMel Gorman 106356de7263SMel Gorman /** 106456de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 106556de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 106656de7263SMel Gorman * @order: The order of the current allocation 106756de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 106856de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 106977f1fe6bSMel Gorman * @sync: Whether migration is synchronous or not 1070661c4cb9SMel Gorman * @contended: Return value that is true if compaction was aborted due to lock contention 1071661c4cb9SMel Gorman * @page: Optionally capture a free page of the requested order during compaction 107256de7263SMel Gorman * 107356de7263SMel Gorman * This is the main entry point for direct page compaction. 107456de7263SMel Gorman */ 107556de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 107677f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 10771fb3f8caSMel Gorman bool sync, bool *contended, struct page **page) 107856de7263SMel Gorman { 107956de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 108056de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 108156de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 108256de7263SMel Gorman struct zoneref *z; 108356de7263SMel Gorman struct zone *zone; 108456de7263SMel Gorman int rc = COMPACT_SKIPPED; 1085d95ea5d1SBartlomiej Zolnierkiewicz int alloc_flags = 0; 108656de7263SMel Gorman 10874ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1088c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 108956de7263SMel Gorman return rc; 109056de7263SMel Gorman 109156de7263SMel Gorman count_vm_event(COMPACTSTALL); 109256de7263SMel Gorman 1093d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA 1094d95ea5d1SBartlomiej Zolnierkiewicz if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 1095d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags |= ALLOC_CMA; 1096d95ea5d1SBartlomiej Zolnierkiewicz #endif 109756de7263SMel Gorman /* Compact each zone in the list */ 109856de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 109956de7263SMel Gorman nodemask) { 110056de7263SMel Gorman int status; 110156de7263SMel Gorman 1102c67fe375SMel Gorman status = compact_zone_order(zone, order, gfp_mask, sync, 11031fb3f8caSMel Gorman contended, page); 110456de7263SMel Gorman rc = max(status, rc); 110556de7263SMel Gorman 11063e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1107d95ea5d1SBartlomiej Zolnierkiewicz if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 1108d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags)) 110956de7263SMel Gorman break; 111056de7263SMel Gorman } 111156de7263SMel Gorman 111256de7263SMel Gorman return rc; 111356de7263SMel Gorman } 111456de7263SMel Gorman 111556de7263SMel Gorman 111676ab0f53SMel Gorman /* Compact all zones within a node */ 11177be62de9SRik van Riel static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 111876ab0f53SMel Gorman { 111976ab0f53SMel Gorman int zoneid; 112076ab0f53SMel Gorman struct zone *zone; 112176ab0f53SMel Gorman 112276ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 112376ab0f53SMel Gorman 112476ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 112576ab0f53SMel Gorman if (!populated_zone(zone)) 112676ab0f53SMel Gorman continue; 112776ab0f53SMel Gorman 11287be62de9SRik van Riel cc->nr_freepages = 0; 11297be62de9SRik van Riel cc->nr_migratepages = 0; 11307be62de9SRik van Riel cc->zone = zone; 11317be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 11327be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 113376ab0f53SMel Gorman 1134aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 11357be62de9SRik van Riel compact_zone(zone, cc); 113676ab0f53SMel Gorman 1137aff62249SRik van Riel if (cc->order > 0) { 1138aff62249SRik van Riel int ok = zone_watermark_ok(zone, cc->order, 1139aff62249SRik van Riel low_wmark_pages(zone), 0, 0); 1140c81758fbSMinchan Kim if (ok && cc->order >= zone->compact_order_failed) 1141aff62249SRik van Riel zone->compact_order_failed = cc->order + 1; 1142aff62249SRik van Riel /* Currently async compaction is never deferred. */ 114368e3e926SLinus Torvalds else if (!ok && cc->sync) 1144aff62249SRik van Riel defer_compaction(zone, cc->order); 1145aff62249SRik van Riel } 1146aff62249SRik van Riel 11477be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 11487be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 114976ab0f53SMel Gorman } 115076ab0f53SMel Gorman 115176ab0f53SMel Gorman return 0; 115276ab0f53SMel Gorman } 115376ab0f53SMel Gorman 11547be62de9SRik van Riel int compact_pgdat(pg_data_t *pgdat, int order) 11557be62de9SRik van Riel { 11567be62de9SRik van Riel struct compact_control cc = { 11577be62de9SRik van Riel .order = order, 115868e3e926SLinus Torvalds .sync = false, 11591fb3f8caSMel Gorman .page = NULL, 11607be62de9SRik van Riel }; 11617be62de9SRik van Riel 11627be62de9SRik van Riel return __compact_pgdat(pgdat, &cc); 11637be62de9SRik van Riel } 11647be62de9SRik van Riel 11657be62de9SRik van Riel static int compact_node(int nid) 11667be62de9SRik van Riel { 11677be62de9SRik van Riel struct compact_control cc = { 11687be62de9SRik van Riel .order = -1, 116968e3e926SLinus Torvalds .sync = true, 11701fb3f8caSMel Gorman .page = NULL, 11717be62de9SRik van Riel }; 11727be62de9SRik van Riel 11738575ec29SHugh Dickins return __compact_pgdat(NODE_DATA(nid), &cc); 11747be62de9SRik van Riel } 11757be62de9SRik van Riel 117676ab0f53SMel Gorman /* Compact all nodes in the system */ 117776ab0f53SMel Gorman static int compact_nodes(void) 117876ab0f53SMel Gorman { 117976ab0f53SMel Gorman int nid; 118076ab0f53SMel Gorman 11818575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 11828575ec29SHugh Dickins lru_add_drain_all(); 11838575ec29SHugh Dickins 118476ab0f53SMel Gorman for_each_online_node(nid) 118576ab0f53SMel Gorman compact_node(nid); 118676ab0f53SMel Gorman 118776ab0f53SMel Gorman return COMPACT_COMPLETE; 118876ab0f53SMel Gorman } 118976ab0f53SMel Gorman 119076ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 119176ab0f53SMel Gorman int sysctl_compact_memory; 119276ab0f53SMel Gorman 119376ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 119476ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 119576ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 119676ab0f53SMel Gorman { 119776ab0f53SMel Gorman if (write) 119876ab0f53SMel Gorman return compact_nodes(); 119976ab0f53SMel Gorman 120076ab0f53SMel Gorman return 0; 120176ab0f53SMel Gorman } 1202ed4a6d7fSMel Gorman 12035e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 12045e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 12055e771905SMel Gorman { 12065e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 12075e771905SMel Gorman 12085e771905SMel Gorman return 0; 12095e771905SMel Gorman } 12105e771905SMel Gorman 1211ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 121210fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev, 121310fbcf4cSKay Sievers struct device_attribute *attr, 1214ed4a6d7fSMel Gorman const char *buf, size_t count) 1215ed4a6d7fSMel Gorman { 12168575ec29SHugh Dickins int nid = dev->id; 12178575ec29SHugh Dickins 12188575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 12198575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 12208575ec29SHugh Dickins lru_add_drain_all(); 12218575ec29SHugh Dickins 12228575ec29SHugh Dickins compact_node(nid); 12238575ec29SHugh Dickins } 1224ed4a6d7fSMel Gorman 1225ed4a6d7fSMel Gorman return count; 1226ed4a6d7fSMel Gorman } 122710fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1228ed4a6d7fSMel Gorman 1229ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1230ed4a6d7fSMel Gorman { 123110fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1232ed4a6d7fSMel Gorman } 1233ed4a6d7fSMel Gorman 1234ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1235ed4a6d7fSMel Gorman { 123610fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1237ed4a6d7fSMel Gorman } 1238ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1239ff9543fdSMichal Nazarewicz 1240ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1241