1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 18748446bbSMel Gorman #include "internal.h" 19748446bbSMel Gorman 20ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 21ff9543fdSMichal Nazarewicz 22b7aba698SMel Gorman #define CREATE_TRACE_POINTS 23b7aba698SMel Gorman #include <trace/events/compaction.h> 24b7aba698SMel Gorman 25748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 26748446bbSMel Gorman { 27748446bbSMel Gorman struct page *page, *next; 28748446bbSMel Gorman unsigned long count = 0; 29748446bbSMel Gorman 30748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 31748446bbSMel Gorman list_del(&page->lru); 32748446bbSMel Gorman __free_page(page); 33748446bbSMel Gorman count++; 34748446bbSMel Gorman } 35748446bbSMel Gorman 36748446bbSMel Gorman return count; 37748446bbSMel Gorman } 38748446bbSMel Gorman 39ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 40ff9543fdSMichal Nazarewicz { 41ff9543fdSMichal Nazarewicz struct page *page; 42ff9543fdSMichal Nazarewicz 43ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 44ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 45ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 46ff9543fdSMichal Nazarewicz } 47ff9543fdSMichal Nazarewicz } 48ff9543fdSMichal Nazarewicz 4947118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 5047118af0SMichal Nazarewicz { 5147118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 5247118af0SMichal Nazarewicz } 5347118af0SMichal Nazarewicz 54bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 55bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 56bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 57bb13ffebSMel Gorman struct page *page) 58bb13ffebSMel Gorman { 59bb13ffebSMel Gorman if (cc->ignore_skip_hint) 60bb13ffebSMel Gorman return true; 61bb13ffebSMel Gorman 62bb13ffebSMel Gorman return !get_pageblock_skip(page); 63bb13ffebSMel Gorman } 64bb13ffebSMel Gorman 65bb13ffebSMel Gorman /* 66bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 67bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 68bb13ffebSMel Gorman * meet. 69bb13ffebSMel Gorman */ 7062997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 71bb13ffebSMel Gorman { 72bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 73bb13ffebSMel Gorman unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; 74bb13ffebSMel Gorman unsigned long pfn; 75bb13ffebSMel Gorman 76c89511abSMel Gorman zone->compact_cached_migrate_pfn = start_pfn; 77c89511abSMel Gorman zone->compact_cached_free_pfn = end_pfn; 7862997027SMel Gorman zone->compact_blockskip_flush = false; 79bb13ffebSMel Gorman 80bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 81bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 82bb13ffebSMel Gorman struct page *page; 83bb13ffebSMel Gorman 84bb13ffebSMel Gorman cond_resched(); 85bb13ffebSMel Gorman 86bb13ffebSMel Gorman if (!pfn_valid(pfn)) 87bb13ffebSMel Gorman continue; 88bb13ffebSMel Gorman 89bb13ffebSMel Gorman page = pfn_to_page(pfn); 90bb13ffebSMel Gorman if (zone != page_zone(page)) 91bb13ffebSMel Gorman continue; 92bb13ffebSMel Gorman 93bb13ffebSMel Gorman clear_pageblock_skip(page); 94bb13ffebSMel Gorman } 95bb13ffebSMel Gorman } 96bb13ffebSMel Gorman 9762997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 9862997027SMel Gorman { 9962997027SMel Gorman int zoneid; 10062997027SMel Gorman 10162997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 10262997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 10362997027SMel Gorman if (!populated_zone(zone)) 10462997027SMel Gorman continue; 10562997027SMel Gorman 10662997027SMel Gorman /* Only flush if a full compaction finished recently */ 10762997027SMel Gorman if (zone->compact_blockskip_flush) 10862997027SMel Gorman __reset_isolation_suitable(zone); 10962997027SMel Gorman } 11062997027SMel Gorman } 11162997027SMel Gorman 112bb13ffebSMel Gorman /* 113bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 11462997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 115bb13ffebSMel Gorman */ 116c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 117c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 118c89511abSMel Gorman bool migrate_scanner) 119bb13ffebSMel Gorman { 120c89511abSMel Gorman struct zone *zone = cc->zone; 121bb13ffebSMel Gorman if (!page) 122bb13ffebSMel Gorman return; 123bb13ffebSMel Gorman 124c89511abSMel Gorman if (!nr_isolated) { 125c89511abSMel Gorman unsigned long pfn = page_to_pfn(page); 126bb13ffebSMel Gorman set_pageblock_skip(page); 127c89511abSMel Gorman 128c89511abSMel Gorman /* Update where compaction should restart */ 129c89511abSMel Gorman if (migrate_scanner) { 130c89511abSMel Gorman if (!cc->finished_update_migrate && 131c89511abSMel Gorman pfn > zone->compact_cached_migrate_pfn) 132c89511abSMel Gorman zone->compact_cached_migrate_pfn = pfn; 133c89511abSMel Gorman } else { 134c89511abSMel Gorman if (!cc->finished_update_free && 135c89511abSMel Gorman pfn < zone->compact_cached_free_pfn) 136c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 137c89511abSMel Gorman } 138c89511abSMel Gorman } 139bb13ffebSMel Gorman } 140bb13ffebSMel Gorman #else 141bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 142bb13ffebSMel Gorman struct page *page) 143bb13ffebSMel Gorman { 144bb13ffebSMel Gorman return true; 145bb13ffebSMel Gorman } 146bb13ffebSMel Gorman 147c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 148c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 149c89511abSMel Gorman bool migrate_scanner) 150bb13ffebSMel Gorman { 151bb13ffebSMel Gorman } 152bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 153bb13ffebSMel Gorman 1542a1402aaSMel Gorman static inline bool should_release_lock(spinlock_t *lock) 1552a1402aaSMel Gorman { 1562a1402aaSMel Gorman return need_resched() || spin_is_contended(lock); 1572a1402aaSMel Gorman } 1582a1402aaSMel Gorman 15985aa125fSMichal Nazarewicz /* 160c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 161c67fe375SMel Gorman * very heavily contended. Check if the process needs to be scheduled or 162c67fe375SMel Gorman * if the lock is contended. For async compaction, back out in the event 163c67fe375SMel Gorman * if contention is severe. For sync compaction, schedule. 164c67fe375SMel Gorman * 165c67fe375SMel Gorman * Returns true if the lock is held. 166c67fe375SMel Gorman * Returns false if the lock is released and compaction should abort 167c67fe375SMel Gorman */ 168c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, 169c67fe375SMel Gorman bool locked, struct compact_control *cc) 170c67fe375SMel Gorman { 1712a1402aaSMel Gorman if (should_release_lock(lock)) { 172c67fe375SMel Gorman if (locked) { 173c67fe375SMel Gorman spin_unlock_irqrestore(lock, *flags); 174c67fe375SMel Gorman locked = false; 175c67fe375SMel Gorman } 176c67fe375SMel Gorman 177c67fe375SMel Gorman /* async aborts if taking too long or contended */ 178c67fe375SMel Gorman if (!cc->sync) { 179e64c5237SShaohua Li cc->contended = true; 180c67fe375SMel Gorman return false; 181c67fe375SMel Gorman } 182c67fe375SMel Gorman 183c67fe375SMel Gorman cond_resched(); 184c67fe375SMel Gorman } 185c67fe375SMel Gorman 186c67fe375SMel Gorman if (!locked) 187c67fe375SMel Gorman spin_lock_irqsave(lock, *flags); 188c67fe375SMel Gorman return true; 189c67fe375SMel Gorman } 190c67fe375SMel Gorman 191c67fe375SMel Gorman static inline bool compact_trylock_irqsave(spinlock_t *lock, 192c67fe375SMel Gorman unsigned long *flags, struct compact_control *cc) 193c67fe375SMel Gorman { 194c67fe375SMel Gorman return compact_checklock_irqsave(lock, flags, false, cc); 195c67fe375SMel Gorman } 196c67fe375SMel Gorman 197f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */ 198f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page) 199f40d1e42SMel Gorman { 200f40d1e42SMel Gorman int migratetype = get_pageblock_migratetype(page); 201f40d1e42SMel Gorman 202f40d1e42SMel Gorman /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 203f40d1e42SMel Gorman if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 204f40d1e42SMel Gorman return false; 205f40d1e42SMel Gorman 206f40d1e42SMel Gorman /* If the page is a large free page, then allow migration */ 207f40d1e42SMel Gorman if (PageBuddy(page) && page_order(page) >= pageblock_order) 208f40d1e42SMel Gorman return true; 209f40d1e42SMel Gorman 210f40d1e42SMel Gorman /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 211f40d1e42SMel Gorman if (migrate_async_suitable(migratetype)) 212f40d1e42SMel Gorman return true; 213f40d1e42SMel Gorman 214f40d1e42SMel Gorman /* Otherwise skip the block */ 215f40d1e42SMel Gorman return false; 216f40d1e42SMel Gorman } 217f40d1e42SMel Gorman 218c67fe375SMel Gorman /* 21985aa125fSMichal Nazarewicz * Isolate free pages onto a private freelist. Caller must hold zone->lock. 22085aa125fSMichal Nazarewicz * If @strict is true, will abort returning 0 on any invalid PFNs or non-free 22185aa125fSMichal Nazarewicz * pages inside of the pageblock (even though it may still end up isolating 22285aa125fSMichal Nazarewicz * some pages). 22385aa125fSMichal Nazarewicz */ 224f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 225f40d1e42SMel Gorman unsigned long blockpfn, 22685aa125fSMichal Nazarewicz unsigned long end_pfn, 22785aa125fSMichal Nazarewicz struct list_head *freelist, 22885aa125fSMichal Nazarewicz bool strict) 229748446bbSMel Gorman { 230b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 231bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 232f40d1e42SMel Gorman unsigned long nr_strict_required = end_pfn - blockpfn; 233f40d1e42SMel Gorman unsigned long flags; 234f40d1e42SMel Gorman bool locked = false; 235748446bbSMel Gorman 236748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 237748446bbSMel Gorman 238f40d1e42SMel Gorman /* Isolate free pages. */ 239748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 240748446bbSMel Gorman int isolated, i; 241748446bbSMel Gorman struct page *page = cursor; 242748446bbSMel Gorman 243b7aba698SMel Gorman nr_scanned++; 244f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 245748446bbSMel Gorman continue; 246bb13ffebSMel Gorman if (!valid_page) 247bb13ffebSMel Gorman valid_page = page; 248f40d1e42SMel Gorman if (!PageBuddy(page)) 249f40d1e42SMel Gorman continue; 250f40d1e42SMel Gorman 251f40d1e42SMel Gorman /* 252f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 253f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 254f40d1e42SMel Gorman * heavily contended if there are parallel allocations 255f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 256f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 257f40d1e42SMel Gorman * possible. 258f40d1e42SMel Gorman */ 259f40d1e42SMel Gorman locked = compact_checklock_irqsave(&cc->zone->lock, &flags, 260f40d1e42SMel Gorman locked, cc); 261f40d1e42SMel Gorman if (!locked) 262f40d1e42SMel Gorman break; 263f40d1e42SMel Gorman 264f40d1e42SMel Gorman /* Recheck this is a suitable migration target under lock */ 265f40d1e42SMel Gorman if (!strict && !suitable_migration_target(page)) 266f40d1e42SMel Gorman break; 267f40d1e42SMel Gorman 268f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 269f40d1e42SMel Gorman if (!PageBuddy(page)) 270f40d1e42SMel Gorman continue; 271748446bbSMel Gorman 272748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 273748446bbSMel Gorman isolated = split_free_page(page); 27485aa125fSMichal Nazarewicz if (!isolated && strict) 275f40d1e42SMel Gorman break; 276748446bbSMel Gorman total_isolated += isolated; 277748446bbSMel Gorman for (i = 0; i < isolated; i++) { 278748446bbSMel Gorman list_add(&page->lru, freelist); 279748446bbSMel Gorman page++; 280748446bbSMel Gorman } 281748446bbSMel Gorman 282748446bbSMel Gorman /* If a page was split, advance to the end of it */ 283748446bbSMel Gorman if (isolated) { 284748446bbSMel Gorman blockpfn += isolated - 1; 285748446bbSMel Gorman cursor += isolated - 1; 286748446bbSMel Gorman } 287748446bbSMel Gorman } 288748446bbSMel Gorman 289b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 290f40d1e42SMel Gorman 291f40d1e42SMel Gorman /* 292f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 293f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 294f40d1e42SMel Gorman * returned and CMA will fail. 295f40d1e42SMel Gorman */ 2960db63d7eSMel Gorman if (strict && nr_strict_required > total_isolated) 297f40d1e42SMel Gorman total_isolated = 0; 298f40d1e42SMel Gorman 299f40d1e42SMel Gorman if (locked) 300f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 301f40d1e42SMel Gorman 302bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 303bb13ffebSMel Gorman if (blockpfn == end_pfn) 304c89511abSMel Gorman update_pageblock_skip(cc, valid_page, total_isolated, false); 305bb13ffebSMel Gorman 306748446bbSMel Gorman return total_isolated; 307748446bbSMel Gorman } 308748446bbSMel Gorman 30985aa125fSMichal Nazarewicz /** 31085aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 31185aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 31285aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 31385aa125fSMichal Nazarewicz * 31485aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 31585aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 31685aa125fSMichal Nazarewicz * undo its actions and return zero. 31785aa125fSMichal Nazarewicz * 31885aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 31985aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 32085aa125fSMichal Nazarewicz * a free page). 32185aa125fSMichal Nazarewicz */ 322ff9543fdSMichal Nazarewicz unsigned long 323bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 324bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 32585aa125fSMichal Nazarewicz { 326f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 32785aa125fSMichal Nazarewicz LIST_HEAD(freelist); 32885aa125fSMichal Nazarewicz 32985aa125fSMichal Nazarewicz for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 330bb13ffebSMel Gorman if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn))) 33185aa125fSMichal Nazarewicz break; 33285aa125fSMichal Nazarewicz 33385aa125fSMichal Nazarewicz /* 33485aa125fSMichal Nazarewicz * On subsequent iterations ALIGN() is actually not needed, 33585aa125fSMichal Nazarewicz * but we keep it that we not to complicate the code. 33685aa125fSMichal Nazarewicz */ 33785aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 33885aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 33985aa125fSMichal Nazarewicz 340bb13ffebSMel Gorman isolated = isolate_freepages_block(cc, pfn, block_end_pfn, 34185aa125fSMichal Nazarewicz &freelist, true); 34285aa125fSMichal Nazarewicz 34385aa125fSMichal Nazarewicz /* 34485aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 34585aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 34685aa125fSMichal Nazarewicz * non-free pages). 34785aa125fSMichal Nazarewicz */ 34885aa125fSMichal Nazarewicz if (!isolated) 34985aa125fSMichal Nazarewicz break; 35085aa125fSMichal Nazarewicz 35185aa125fSMichal Nazarewicz /* 35285aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 35385aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 35485aa125fSMichal Nazarewicz * page may span two pageblocks). 35585aa125fSMichal Nazarewicz */ 35685aa125fSMichal Nazarewicz } 35785aa125fSMichal Nazarewicz 35885aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 35985aa125fSMichal Nazarewicz map_pages(&freelist); 36085aa125fSMichal Nazarewicz 36185aa125fSMichal Nazarewicz if (pfn < end_pfn) { 36285aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 36385aa125fSMichal Nazarewicz release_freepages(&freelist); 36485aa125fSMichal Nazarewicz return 0; 36585aa125fSMichal Nazarewicz } 36685aa125fSMichal Nazarewicz 36785aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 36885aa125fSMichal Nazarewicz return pfn; 36985aa125fSMichal Nazarewicz } 37085aa125fSMichal Nazarewicz 371748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 372c67fe375SMel Gorman static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc) 373748446bbSMel Gorman { 374748446bbSMel Gorman struct page *page; 375b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 376748446bbSMel Gorman 377b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 378b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 379748446bbSMel Gorman 380c67fe375SMel Gorman /* If locked we can use the interrupt unsafe versions */ 381c67fe375SMel Gorman if (locked) { 382b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 383b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 384c67fe375SMel Gorman } else { 385c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 386c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 387c67fe375SMel Gorman } 388748446bbSMel Gorman } 389748446bbSMel Gorman 390748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 391748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 392748446bbSMel Gorman { 393bc693045SMinchan Kim unsigned long active, inactive, isolated; 394748446bbSMel Gorman 395748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 396748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 397bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 398bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 399748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 400748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 401748446bbSMel Gorman 402bc693045SMinchan Kim return isolated > (inactive + active) / 2; 403748446bbSMel Gorman } 404748446bbSMel Gorman 4052fe86e00SMichal Nazarewicz /** 4062fe86e00SMichal Nazarewicz * isolate_migratepages_range() - isolate all migrate-able pages in range. 4072fe86e00SMichal Nazarewicz * @zone: Zone pages are in. 4082fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 4092fe86e00SMichal Nazarewicz * @low_pfn: The first PFN of the range. 4102fe86e00SMichal Nazarewicz * @end_pfn: The one-past-the-last PFN of the range. 411e46a2879SMinchan Kim * @unevictable: true if it allows to isolate unevictable pages 4122fe86e00SMichal Nazarewicz * 4132fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 4142fe86e00SMichal Nazarewicz * [low_pfn, end_pfn). Returns zero if there is a fatal signal 4152fe86e00SMichal Nazarewicz * pending), otherwise PFN of the first page that was not scanned 4162fe86e00SMichal Nazarewicz * (which may be both less, equal to or more then end_pfn). 4172fe86e00SMichal Nazarewicz * 4182fe86e00SMichal Nazarewicz * Assumes that cc->migratepages is empty and cc->nr_migratepages is 4192fe86e00SMichal Nazarewicz * zero. 4202fe86e00SMichal Nazarewicz * 4212fe86e00SMichal Nazarewicz * Apart from cc->migratepages and cc->nr_migratetypes this function 4222fe86e00SMichal Nazarewicz * does not modify any cc's fields, in particular it does not modify 4232fe86e00SMichal Nazarewicz * (or read for that matter) cc->migrate_pfn. 424748446bbSMel Gorman */ 425ff9543fdSMichal Nazarewicz unsigned long 4262fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 427e46a2879SMinchan Kim unsigned long low_pfn, unsigned long end_pfn, bool unevictable) 428748446bbSMel Gorman { 4299927af74SMel Gorman unsigned long last_pageblock_nr = 0, pageblock_nr; 430b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 431748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 432f3fd4a61SKonstantin Khlebnikov isolate_mode_t mode = 0; 433fa9add64SHugh Dickins struct lruvec *lruvec; 434c67fe375SMel Gorman unsigned long flags; 4352a1402aaSMel Gorman bool locked = false; 436bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 437748446bbSMel Gorman 438748446bbSMel Gorman /* 439748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 440748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 441748446bbSMel Gorman * delay for some time until fewer pages are isolated 442748446bbSMel Gorman */ 443748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 444f9e35b3bSMel Gorman /* async migration should just abort */ 44568e3e926SLinus Torvalds if (!cc->sync) 4462fe86e00SMichal Nazarewicz return 0; 447f9e35b3bSMel Gorman 448748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 449748446bbSMel Gorman 450748446bbSMel Gorman if (fatal_signal_pending(current)) 4512fe86e00SMichal Nazarewicz return 0; 452748446bbSMel Gorman } 453748446bbSMel Gorman 454748446bbSMel Gorman /* Time to isolate some pages for migration */ 455b2eef8c0SAndrea Arcangeli cond_resched(); 456748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 457b2eef8c0SAndrea Arcangeli /* give a chance to irqs before checking need_resched() */ 4582a1402aaSMel Gorman if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) { 4592a1402aaSMel Gorman if (should_release_lock(&zone->lru_lock)) { 460c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 461b2eef8c0SAndrea Arcangeli locked = false; 462b2eef8c0SAndrea Arcangeli } 4632a1402aaSMel Gorman } 464b2eef8c0SAndrea Arcangeli 4650bf380bcSMel Gorman /* 4660bf380bcSMel Gorman * migrate_pfn does not necessarily start aligned to a 4670bf380bcSMel Gorman * pageblock. Ensure that pfn_valid is called when moving 4680bf380bcSMel Gorman * into a new MAX_ORDER_NR_PAGES range in case of large 4690bf380bcSMel Gorman * memory holes within the zone 4700bf380bcSMel Gorman */ 4710bf380bcSMel Gorman if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 4720bf380bcSMel Gorman if (!pfn_valid(low_pfn)) { 4730bf380bcSMel Gorman low_pfn += MAX_ORDER_NR_PAGES - 1; 4740bf380bcSMel Gorman continue; 4750bf380bcSMel Gorman } 4760bf380bcSMel Gorman } 4770bf380bcSMel Gorman 478748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 479748446bbSMel Gorman continue; 480b7aba698SMel Gorman nr_scanned++; 481748446bbSMel Gorman 482dc908600SMel Gorman /* 483dc908600SMel Gorman * Get the page and ensure the page is within the same zone. 484dc908600SMel Gorman * See the comment in isolate_freepages about overlapping 485dc908600SMel Gorman * nodes. It is deliberate that the new zone lock is not taken 486dc908600SMel Gorman * as memory compaction should not move pages between nodes. 487dc908600SMel Gorman */ 488748446bbSMel Gorman page = pfn_to_page(low_pfn); 489dc908600SMel Gorman if (page_zone(page) != zone) 490dc908600SMel Gorman continue; 491dc908600SMel Gorman 492bb13ffebSMel Gorman if (!valid_page) 493bb13ffebSMel Gorman valid_page = page; 494bb13ffebSMel Gorman 495bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 496bb13ffebSMel Gorman pageblock_nr = low_pfn >> pageblock_order; 497bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 498bb13ffebSMel Gorman goto next_pageblock; 499bb13ffebSMel Gorman 500dc908600SMel Gorman /* Skip if free */ 501748446bbSMel Gorman if (PageBuddy(page)) 502748446bbSMel Gorman continue; 503748446bbSMel Gorman 5049927af74SMel Gorman /* 5059927af74SMel Gorman * For async migration, also only scan in MOVABLE blocks. Async 5069927af74SMel Gorman * migration is optimistic to see if the minimum amount of work 5079927af74SMel Gorman * satisfies the allocation 5089927af74SMel Gorman */ 50968e3e926SLinus Torvalds if (!cc->sync && last_pageblock_nr != pageblock_nr && 51047118af0SMichal Nazarewicz !migrate_async_suitable(get_pageblock_migratetype(page))) { 511c89511abSMel Gorman cc->finished_update_migrate = true; 5122a1402aaSMel Gorman goto next_pageblock; 5139927af74SMel Gorman } 5149927af74SMel Gorman 515bf6bddf1SRafael Aquini /* 516bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 517bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 518bf6bddf1SRafael Aquini * Skip any other type of page 519bf6bddf1SRafael Aquini */ 520bf6bddf1SRafael Aquini if (!PageLRU(page)) { 521bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 522bf6bddf1SRafael Aquini if (locked && balloon_page_isolate(page)) { 523bf6bddf1SRafael Aquini /* Successfully isolated */ 524bf6bddf1SRafael Aquini cc->finished_update_migrate = true; 525bf6bddf1SRafael Aquini list_add(&page->lru, migratelist); 526bf6bddf1SRafael Aquini cc->nr_migratepages++; 527bf6bddf1SRafael Aquini nr_isolated++; 528bf6bddf1SRafael Aquini goto check_compact_cluster; 529bf6bddf1SRafael Aquini } 530bf6bddf1SRafael Aquini } 531bc835011SAndrea Arcangeli continue; 532bf6bddf1SRafael Aquini } 533bc835011SAndrea Arcangeli 534bc835011SAndrea Arcangeli /* 5352a1402aaSMel Gorman * PageLRU is set. lru_lock normally excludes isolation 5362a1402aaSMel Gorman * splitting and collapsing (collapsing has already happened 5372a1402aaSMel Gorman * if PageLRU is set) but the lock is not necessarily taken 5382a1402aaSMel Gorman * here and it is wasteful to take it just to check transhuge. 5392a1402aaSMel Gorman * Check TransHuge without lock and skip the whole pageblock if 5402a1402aaSMel Gorman * it's either a transhuge or hugetlbfs page, as calling 5412a1402aaSMel Gorman * compound_order() without preventing THP from splitting the 5422a1402aaSMel Gorman * page underneath us may return surprising results. 543bc835011SAndrea Arcangeli */ 544bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 5452a1402aaSMel Gorman if (!locked) 5462a1402aaSMel Gorman goto next_pageblock; 5472a1402aaSMel Gorman low_pfn += (1 << compound_order(page)) - 1; 5482a1402aaSMel Gorman continue; 5492a1402aaSMel Gorman } 5502a1402aaSMel Gorman 5512a1402aaSMel Gorman /* Check if it is ok to still hold the lock */ 5522a1402aaSMel Gorman locked = compact_checklock_irqsave(&zone->lru_lock, &flags, 5532a1402aaSMel Gorman locked, cc); 5542a1402aaSMel Gorman if (!locked || fatal_signal_pending(current)) 5552a1402aaSMel Gorman break; 5562a1402aaSMel Gorman 5572a1402aaSMel Gorman /* Recheck PageLRU and PageTransHuge under lock */ 5582a1402aaSMel Gorman if (!PageLRU(page)) 5592a1402aaSMel Gorman continue; 5602a1402aaSMel Gorman if (PageTransHuge(page)) { 561bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 562bc835011SAndrea Arcangeli continue; 563bc835011SAndrea Arcangeli } 564bc835011SAndrea Arcangeli 56568e3e926SLinus Torvalds if (!cc->sync) 566c8244935SMel Gorman mode |= ISOLATE_ASYNC_MIGRATE; 567c8244935SMel Gorman 568e46a2879SMinchan Kim if (unevictable) 569e46a2879SMinchan Kim mode |= ISOLATE_UNEVICTABLE; 570e46a2879SMinchan Kim 571fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 572fa9add64SHugh Dickins 573748446bbSMel Gorman /* Try isolate the page */ 574f3fd4a61SKonstantin Khlebnikov if (__isolate_lru_page(page, mode) != 0) 575748446bbSMel Gorman continue; 576748446bbSMel Gorman 577bc835011SAndrea Arcangeli VM_BUG_ON(PageTransCompound(page)); 578bc835011SAndrea Arcangeli 579748446bbSMel Gorman /* Successfully isolated */ 580c89511abSMel Gorman cc->finished_update_migrate = true; 581fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 582748446bbSMel Gorman list_add(&page->lru, migratelist); 583748446bbSMel Gorman cc->nr_migratepages++; 584b7aba698SMel Gorman nr_isolated++; 585748446bbSMel Gorman 586bf6bddf1SRafael Aquini check_compact_cluster: 587748446bbSMel Gorman /* Avoid isolating too much */ 58831b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 58931b8384aSHillf Danton ++low_pfn; 590748446bbSMel Gorman break; 591748446bbSMel Gorman } 5922a1402aaSMel Gorman 5932a1402aaSMel Gorman continue; 5942a1402aaSMel Gorman 5952a1402aaSMel Gorman next_pageblock: 5962a1402aaSMel Gorman low_pfn += pageblock_nr_pages; 5972a1402aaSMel Gorman low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 5982a1402aaSMel Gorman last_pageblock_nr = pageblock_nr; 59931b8384aSHillf Danton } 600748446bbSMel Gorman 601c67fe375SMel Gorman acct_isolated(zone, locked, cc); 602748446bbSMel Gorman 603c67fe375SMel Gorman if (locked) 604c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 605748446bbSMel Gorman 606bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 607bb13ffebSMel Gorman if (low_pfn == end_pfn) 608c89511abSMel Gorman update_pageblock_skip(cc, valid_page, nr_isolated, true); 609bb13ffebSMel Gorman 610b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 611b7aba698SMel Gorman 6122fe86e00SMichal Nazarewicz return low_pfn; 6132fe86e00SMichal Nazarewicz } 6142fe86e00SMichal Nazarewicz 615ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 616ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 617ff9543fdSMichal Nazarewicz /* 618ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 619ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 620ff9543fdSMichal Nazarewicz */ 621ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone, 622ff9543fdSMichal Nazarewicz struct compact_control *cc) 623ff9543fdSMichal Nazarewicz { 624ff9543fdSMichal Nazarewicz struct page *page; 625ff9543fdSMichal Nazarewicz unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; 626ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 627ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 6282fe86e00SMichal Nazarewicz 629ff9543fdSMichal Nazarewicz /* 630ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 631ff9543fdSMichal Nazarewicz * scanned from (or the end of the zone if starting). The low point 632ff9543fdSMichal Nazarewicz * is the end of the pageblock the migration scanner is using. 633ff9543fdSMichal Nazarewicz */ 634ff9543fdSMichal Nazarewicz pfn = cc->free_pfn; 635ff9543fdSMichal Nazarewicz low_pfn = cc->migrate_pfn + pageblock_nr_pages; 6362fe86e00SMichal Nazarewicz 637ff9543fdSMichal Nazarewicz /* 638ff9543fdSMichal Nazarewicz * Take care that if the migration scanner is at the end of the zone 639ff9543fdSMichal Nazarewicz * that the free scanner does not accidentally move to the next zone 640ff9543fdSMichal Nazarewicz * in the next isolation cycle. 641ff9543fdSMichal Nazarewicz */ 642ff9543fdSMichal Nazarewicz high_pfn = min(low_pfn, pfn); 643ff9543fdSMichal Nazarewicz 644ff9543fdSMichal Nazarewicz zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 645ff9543fdSMichal Nazarewicz 646ff9543fdSMichal Nazarewicz /* 647ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 648ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 649ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 650ff9543fdSMichal Nazarewicz */ 651ff9543fdSMichal Nazarewicz for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 652ff9543fdSMichal Nazarewicz pfn -= pageblock_nr_pages) { 653ff9543fdSMichal Nazarewicz unsigned long isolated; 654ff9543fdSMichal Nazarewicz 655ff9543fdSMichal Nazarewicz if (!pfn_valid(pfn)) 656ff9543fdSMichal Nazarewicz continue; 657ff9543fdSMichal Nazarewicz 658ff9543fdSMichal Nazarewicz /* 659ff9543fdSMichal Nazarewicz * Check for overlapping nodes/zones. It's possible on some 660ff9543fdSMichal Nazarewicz * configurations to have a setup like 661ff9543fdSMichal Nazarewicz * node0 node1 node0 662ff9543fdSMichal Nazarewicz * i.e. it's possible that all pages within a zones range of 663ff9543fdSMichal Nazarewicz * pages do not belong to a single zone. 664ff9543fdSMichal Nazarewicz */ 665ff9543fdSMichal Nazarewicz page = pfn_to_page(pfn); 666ff9543fdSMichal Nazarewicz if (page_zone(page) != zone) 667ff9543fdSMichal Nazarewicz continue; 668ff9543fdSMichal Nazarewicz 669ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 67068e3e926SLinus Torvalds if (!suitable_migration_target(page)) 671ff9543fdSMichal Nazarewicz continue; 67268e3e926SLinus Torvalds 673bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 674bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 675bb13ffebSMel Gorman continue; 676bb13ffebSMel Gorman 677f40d1e42SMel Gorman /* Found a block suitable for isolating free pages from */ 678ff9543fdSMichal Nazarewicz isolated = 0; 67960177d31SMel Gorman 68060177d31SMel Gorman /* 68160177d31SMel Gorman * As pfn may not start aligned, pfn+pageblock_nr_page 68260177d31SMel Gorman * may cross a MAX_ORDER_NR_PAGES boundary and miss 68360177d31SMel Gorman * a pfn_valid check. Ensure isolate_freepages_block() 68460177d31SMel Gorman * only scans within a pageblock 68560177d31SMel Gorman */ 68660177d31SMel Gorman end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 68760177d31SMel Gorman end_pfn = min(end_pfn, zone_end_pfn); 688f40d1e42SMel Gorman isolated = isolate_freepages_block(cc, pfn, end_pfn, 689ff9543fdSMichal Nazarewicz freelist, false); 690ff9543fdSMichal Nazarewicz nr_freepages += isolated; 691ff9543fdSMichal Nazarewicz 692ff9543fdSMichal Nazarewicz /* 693ff9543fdSMichal Nazarewicz * Record the highest PFN we isolated pages from. When next 694ff9543fdSMichal Nazarewicz * looking for free pages, the search will restart here as 695ff9543fdSMichal Nazarewicz * page migration may have returned some pages to the allocator 696ff9543fdSMichal Nazarewicz */ 697c89511abSMel Gorman if (isolated) { 698c89511abSMel Gorman cc->finished_update_free = true; 699ff9543fdSMichal Nazarewicz high_pfn = max(high_pfn, pfn); 700ff9543fdSMichal Nazarewicz } 701c89511abSMel Gorman } 702ff9543fdSMichal Nazarewicz 703ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 704ff9543fdSMichal Nazarewicz map_pages(freelist); 705ff9543fdSMichal Nazarewicz 706ff9543fdSMichal Nazarewicz cc->free_pfn = high_pfn; 707ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 708748446bbSMel Gorman } 709748446bbSMel Gorman 710748446bbSMel Gorman /* 711748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 712748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 713748446bbSMel Gorman */ 714748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 715748446bbSMel Gorman unsigned long data, 716748446bbSMel Gorman int **result) 717748446bbSMel Gorman { 718748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 719748446bbSMel Gorman struct page *freepage; 720748446bbSMel Gorman 721748446bbSMel Gorman /* Isolate free pages if necessary */ 722748446bbSMel Gorman if (list_empty(&cc->freepages)) { 723748446bbSMel Gorman isolate_freepages(cc->zone, cc); 724748446bbSMel Gorman 725748446bbSMel Gorman if (list_empty(&cc->freepages)) 726748446bbSMel Gorman return NULL; 727748446bbSMel Gorman } 728748446bbSMel Gorman 729748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 730748446bbSMel Gorman list_del(&freepage->lru); 731748446bbSMel Gorman cc->nr_freepages--; 732748446bbSMel Gorman 733748446bbSMel Gorman return freepage; 734748446bbSMel Gorman } 735748446bbSMel Gorman 736748446bbSMel Gorman /* 737748446bbSMel Gorman * We cannot control nr_migratepages and nr_freepages fully when migration is 738748446bbSMel Gorman * running as migrate_pages() has no knowledge of compact_control. When 739748446bbSMel Gorman * migration is complete, we count the number of pages on the lists by hand. 740748446bbSMel Gorman */ 741748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc) 742748446bbSMel Gorman { 743748446bbSMel Gorman int nr_migratepages = 0; 744748446bbSMel Gorman int nr_freepages = 0; 745748446bbSMel Gorman struct page *page; 746748446bbSMel Gorman 747748446bbSMel Gorman list_for_each_entry(page, &cc->migratepages, lru) 748748446bbSMel Gorman nr_migratepages++; 749748446bbSMel Gorman list_for_each_entry(page, &cc->freepages, lru) 750748446bbSMel Gorman nr_freepages++; 751748446bbSMel Gorman 752748446bbSMel Gorman cc->nr_migratepages = nr_migratepages; 753748446bbSMel Gorman cc->nr_freepages = nr_freepages; 754748446bbSMel Gorman } 755748446bbSMel Gorman 756ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 757ff9543fdSMichal Nazarewicz typedef enum { 758ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 759ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 760ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 761ff9543fdSMichal Nazarewicz } isolate_migrate_t; 762ff9543fdSMichal Nazarewicz 763ff9543fdSMichal Nazarewicz /* 764ff9543fdSMichal Nazarewicz * Isolate all pages that can be migrated from the block pointed to by 765ff9543fdSMichal Nazarewicz * the migrate scanner within compact_control. 766ff9543fdSMichal Nazarewicz */ 767ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 768ff9543fdSMichal Nazarewicz struct compact_control *cc) 769ff9543fdSMichal Nazarewicz { 770ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 771ff9543fdSMichal Nazarewicz 772ff9543fdSMichal Nazarewicz /* Do not scan outside zone boundaries */ 773ff9543fdSMichal Nazarewicz low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 774ff9543fdSMichal Nazarewicz 775ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 776ff9543fdSMichal Nazarewicz end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 777ff9543fdSMichal Nazarewicz 778ff9543fdSMichal Nazarewicz /* Do not cross the free scanner or scan within a memory hole */ 779ff9543fdSMichal Nazarewicz if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 780ff9543fdSMichal Nazarewicz cc->migrate_pfn = end_pfn; 781ff9543fdSMichal Nazarewicz return ISOLATE_NONE; 782ff9543fdSMichal Nazarewicz } 783ff9543fdSMichal Nazarewicz 784ff9543fdSMichal Nazarewicz /* Perform the isolation */ 785e46a2879SMinchan Kim low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false); 786e64c5237SShaohua Li if (!low_pfn || cc->contended) 787ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 788ff9543fdSMichal Nazarewicz 789ff9543fdSMichal Nazarewicz cc->migrate_pfn = low_pfn; 790ff9543fdSMichal Nazarewicz 791ff9543fdSMichal Nazarewicz return ISOLATE_SUCCESS; 792ff9543fdSMichal Nazarewicz } 793ff9543fdSMichal Nazarewicz 794748446bbSMel Gorman static int compact_finished(struct zone *zone, 795748446bbSMel Gorman struct compact_control *cc) 796748446bbSMel Gorman { 7975a03b051SAndrea Arcangeli unsigned long watermark; 79856de7263SMel Gorman 799748446bbSMel Gorman if (fatal_signal_pending(current)) 800748446bbSMel Gorman return COMPACT_PARTIAL; 801748446bbSMel Gorman 802753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 803bb13ffebSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) { 80462997027SMel Gorman /* 80562997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 80662997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 80762997027SMel Gorman * flag itself as the decision to be clear should be directly 80862997027SMel Gorman * based on an allocation request. 80962997027SMel Gorman */ 81062997027SMel Gorman if (!current_is_kswapd()) 81162997027SMel Gorman zone->compact_blockskip_flush = true; 81262997027SMel Gorman 813748446bbSMel Gorman return COMPACT_COMPLETE; 814bb13ffebSMel Gorman } 815748446bbSMel Gorman 81682478fb7SJohannes Weiner /* 81782478fb7SJohannes Weiner * order == -1 is expected when compacting via 81882478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 81982478fb7SJohannes Weiner */ 82056de7263SMel Gorman if (cc->order == -1) 82156de7263SMel Gorman return COMPACT_CONTINUE; 82256de7263SMel Gorman 8233957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 8243957c776SMichal Hocko watermark = low_wmark_pages(zone); 8253957c776SMichal Hocko watermark += (1 << cc->order); 8263957c776SMichal Hocko 8273957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 8283957c776SMichal Hocko return COMPACT_CONTINUE; 8293957c776SMichal Hocko 83056de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 8311fb3f8caSMel Gorman if (cc->page) { 8321fb3f8caSMel Gorman /* Was a suitable page captured? */ 8331fb3f8caSMel Gorman if (*cc->page) 8341fb3f8caSMel Gorman return COMPACT_PARTIAL; 8351fb3f8caSMel Gorman } else { 8361fb3f8caSMel Gorman unsigned int order; 83756de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 8381fb3f8caSMel Gorman struct free_area *area = &zone->free_area[cc->order]; 83956de7263SMel Gorman /* Job done if page is free of the right migratetype */ 8401fb3f8caSMel Gorman if (!list_empty(&area->free_list[cc->migratetype])) 84156de7263SMel Gorman return COMPACT_PARTIAL; 84256de7263SMel Gorman 84356de7263SMel Gorman /* Job done if allocation would set block type */ 8441fb3f8caSMel Gorman if (cc->order >= pageblock_order && area->nr_free) 84556de7263SMel Gorman return COMPACT_PARTIAL; 84656de7263SMel Gorman } 8471fb3f8caSMel Gorman } 84856de7263SMel Gorman 849748446bbSMel Gorman return COMPACT_CONTINUE; 850748446bbSMel Gorman } 851748446bbSMel Gorman 8523e7d3449SMel Gorman /* 8533e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 8543e7d3449SMel Gorman * Returns 8553e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 8563e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 8573e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 8583e7d3449SMel Gorman */ 8593e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 8603e7d3449SMel Gorman { 8613e7d3449SMel Gorman int fragindex; 8623e7d3449SMel Gorman unsigned long watermark; 8633e7d3449SMel Gorman 8643e7d3449SMel Gorman /* 8653957c776SMichal Hocko * order == -1 is expected when compacting via 8663957c776SMichal Hocko * /proc/sys/vm/compact_memory 8673957c776SMichal Hocko */ 8683957c776SMichal Hocko if (order == -1) 8693957c776SMichal Hocko return COMPACT_CONTINUE; 8703957c776SMichal Hocko 8713957c776SMichal Hocko /* 8723e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 8733e7d3449SMel Gorman * This is because during migration, copies of pages need to be 8743e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 8753e7d3449SMel Gorman */ 8763e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 8773e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 8783e7d3449SMel Gorman return COMPACT_SKIPPED; 8793e7d3449SMel Gorman 8803e7d3449SMel Gorman /* 8813e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 8823e7d3449SMel Gorman * low memory or external fragmentation 8833e7d3449SMel Gorman * 884a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 885a582a738SShaohua Li * watermarks 8863e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 8873e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 8883e7d3449SMel Gorman * 8893e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 8903e7d3449SMel Gorman */ 8913e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 8923e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 8933e7d3449SMel Gorman return COMPACT_SKIPPED; 8943e7d3449SMel Gorman 895a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 896a582a738SShaohua Li 0, 0)) 8973e7d3449SMel Gorman return COMPACT_PARTIAL; 8983e7d3449SMel Gorman 8993e7d3449SMel Gorman return COMPACT_CONTINUE; 9003e7d3449SMel Gorman } 9013e7d3449SMel Gorman 902*c8bf2d8bSThierry Reding static void compact_capture_page(struct compact_control *cc) 903*c8bf2d8bSThierry Reding { 904*c8bf2d8bSThierry Reding unsigned long flags; 905*c8bf2d8bSThierry Reding int mtype, mtype_low, mtype_high; 906*c8bf2d8bSThierry Reding 907*c8bf2d8bSThierry Reding if (!cc->page || *cc->page) 908*c8bf2d8bSThierry Reding return; 909*c8bf2d8bSThierry Reding 910*c8bf2d8bSThierry Reding /* 911*c8bf2d8bSThierry Reding * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP 912*c8bf2d8bSThierry Reding * regardless of the migratetype of the freelist is is captured from. 913*c8bf2d8bSThierry Reding * This is fine because the order for a high-order MIGRATE_MOVABLE 914*c8bf2d8bSThierry Reding * allocation is typically at least a pageblock size and overall 915*c8bf2d8bSThierry Reding * fragmentation is not impaired. Other allocation types must 916*c8bf2d8bSThierry Reding * capture pages from their own migratelist because otherwise they 917*c8bf2d8bSThierry Reding * could pollute other pageblocks like MIGRATE_MOVABLE with 918*c8bf2d8bSThierry Reding * difficult to move pages and making fragmentation worse overall. 919*c8bf2d8bSThierry Reding */ 920*c8bf2d8bSThierry Reding if (cc->migratetype == MIGRATE_MOVABLE) { 921*c8bf2d8bSThierry Reding mtype_low = 0; 922*c8bf2d8bSThierry Reding mtype_high = MIGRATE_PCPTYPES; 923*c8bf2d8bSThierry Reding } else { 924*c8bf2d8bSThierry Reding mtype_low = cc->migratetype; 925*c8bf2d8bSThierry Reding mtype_high = cc->migratetype + 1; 926*c8bf2d8bSThierry Reding } 927*c8bf2d8bSThierry Reding 928*c8bf2d8bSThierry Reding /* Speculatively examine the free lists without zone lock */ 929*c8bf2d8bSThierry Reding for (mtype = mtype_low; mtype < mtype_high; mtype++) { 930*c8bf2d8bSThierry Reding int order; 931*c8bf2d8bSThierry Reding for (order = cc->order; order < MAX_ORDER; order++) { 932*c8bf2d8bSThierry Reding struct page *page; 933*c8bf2d8bSThierry Reding struct free_area *area; 934*c8bf2d8bSThierry Reding area = &(cc->zone->free_area[order]); 935*c8bf2d8bSThierry Reding if (list_empty(&area->free_list[mtype])) 936*c8bf2d8bSThierry Reding continue; 937*c8bf2d8bSThierry Reding 938*c8bf2d8bSThierry Reding /* Take the lock and attempt capture of the page */ 939*c8bf2d8bSThierry Reding if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc)) 940*c8bf2d8bSThierry Reding return; 941*c8bf2d8bSThierry Reding if (!list_empty(&area->free_list[mtype])) { 942*c8bf2d8bSThierry Reding page = list_entry(area->free_list[mtype].next, 943*c8bf2d8bSThierry Reding struct page, lru); 944*c8bf2d8bSThierry Reding if (capture_free_page(page, cc->order, mtype)) { 945*c8bf2d8bSThierry Reding spin_unlock_irqrestore(&cc->zone->lock, 946*c8bf2d8bSThierry Reding flags); 947*c8bf2d8bSThierry Reding *cc->page = page; 948*c8bf2d8bSThierry Reding return; 949*c8bf2d8bSThierry Reding } 950*c8bf2d8bSThierry Reding } 951*c8bf2d8bSThierry Reding spin_unlock_irqrestore(&cc->zone->lock, flags); 952*c8bf2d8bSThierry Reding } 953*c8bf2d8bSThierry Reding } 954*c8bf2d8bSThierry Reding } 955*c8bf2d8bSThierry Reding 956748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 957748446bbSMel Gorman { 958748446bbSMel Gorman int ret; 959c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 960c89511abSMel Gorman unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; 961748446bbSMel Gorman 9623e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 9633e7d3449SMel Gorman switch (ret) { 9643e7d3449SMel Gorman case COMPACT_PARTIAL: 9653e7d3449SMel Gorman case COMPACT_SKIPPED: 9663e7d3449SMel Gorman /* Compaction is likely to fail */ 9673e7d3449SMel Gorman return ret; 9683e7d3449SMel Gorman case COMPACT_CONTINUE: 9693e7d3449SMel Gorman /* Fall through to compaction */ 9703e7d3449SMel Gorman ; 9713e7d3449SMel Gorman } 9723e7d3449SMel Gorman 973c89511abSMel Gorman /* 974c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 975c89511abSMel Gorman * information on where the scanners should start but check that it 976c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 977c89511abSMel Gorman */ 978c89511abSMel Gorman cc->migrate_pfn = zone->compact_cached_migrate_pfn; 979c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 980c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 981c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 982c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 983c89511abSMel Gorman } 984c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 985c89511abSMel Gorman cc->migrate_pfn = start_pfn; 986c89511abSMel Gorman zone->compact_cached_migrate_pfn = cc->migrate_pfn; 987c89511abSMel Gorman } 988748446bbSMel Gorman 98962997027SMel Gorman /* 99062997027SMel Gorman * Clear pageblock skip if there were failures recently and compaction 99162997027SMel Gorman * is about to be retried after being deferred. kswapd does not do 99262997027SMel Gorman * this reset as it'll reset the cached information when going to sleep. 99362997027SMel Gorman */ 99462997027SMel Gorman if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 99562997027SMel Gorman __reset_isolation_suitable(zone); 996bb13ffebSMel Gorman 997748446bbSMel Gorman migrate_prep_local(); 998748446bbSMel Gorman 999748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 1000748446bbSMel Gorman unsigned long nr_migrate, nr_remaining; 10019d502c1cSMinchan Kim int err; 1002748446bbSMel Gorman 1003f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1004f9e35b3bSMel Gorman case ISOLATE_ABORT: 1005f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 10065733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1007e64c5237SShaohua Li cc->nr_migratepages = 0; 1008f9e35b3bSMel Gorman goto out; 1009f9e35b3bSMel Gorman case ISOLATE_NONE: 1010748446bbSMel Gorman continue; 1011f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1012f9e35b3bSMel Gorman ; 1013f9e35b3bSMel Gorman } 1014748446bbSMel Gorman 1015748446bbSMel Gorman nr_migrate = cc->nr_migratepages; 10169d502c1cSMinchan Kim err = migrate_pages(&cc->migratepages, compaction_alloc, 101768e3e926SLinus Torvalds (unsigned long)cc, false, 101868e3e926SLinus Torvalds cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); 1019748446bbSMel Gorman update_nr_listpages(cc); 1020748446bbSMel Gorman nr_remaining = cc->nr_migratepages; 1021748446bbSMel Gorman 1022748446bbSMel Gorman count_vm_event(COMPACTBLOCKS); 1023748446bbSMel Gorman count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); 1024748446bbSMel Gorman if (nr_remaining) 1025748446bbSMel Gorman count_vm_events(COMPACTPAGEFAILED, nr_remaining); 1026b7aba698SMel Gorman trace_mm_compaction_migratepages(nr_migrate - nr_remaining, 1027b7aba698SMel Gorman nr_remaining); 1028748446bbSMel Gorman 10295733c7d1SRafael Aquini /* Release isolated pages not migrated */ 10309d502c1cSMinchan Kim if (err) { 10315733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1032748446bbSMel Gorman cc->nr_migratepages = 0; 10334bf2bba3SDavid Rientjes if (err == -ENOMEM) { 10344bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 10354bf2bba3SDavid Rientjes goto out; 1036748446bbSMel Gorman } 10374bf2bba3SDavid Rientjes } 10381fb3f8caSMel Gorman 10391fb3f8caSMel Gorman /* Capture a page now if it is a suitable size */ 10401fb3f8caSMel Gorman compact_capture_page(cc); 1041748446bbSMel Gorman } 1042748446bbSMel Gorman 1043f9e35b3bSMel Gorman out: 1044748446bbSMel Gorman /* Release free pages and check accounting */ 1045748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 1046748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 1047748446bbSMel Gorman 1048748446bbSMel Gorman return ret; 1049748446bbSMel Gorman } 105076ab0f53SMel Gorman 1051d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone, 105277f1fe6bSMel Gorman int order, gfp_t gfp_mask, 10531fb3f8caSMel Gorman bool sync, bool *contended, 10541fb3f8caSMel Gorman struct page **page) 105556de7263SMel Gorman { 1056e64c5237SShaohua Li unsigned long ret; 105756de7263SMel Gorman struct compact_control cc = { 105856de7263SMel Gorman .nr_freepages = 0, 105956de7263SMel Gorman .nr_migratepages = 0, 106056de7263SMel Gorman .order = order, 106156de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 106256de7263SMel Gorman .zone = zone, 106368e3e926SLinus Torvalds .sync = sync, 10641fb3f8caSMel Gorman .page = page, 106556de7263SMel Gorman }; 106656de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 106756de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 106856de7263SMel Gorman 1069e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1070e64c5237SShaohua Li 1071e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1072e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1073e64c5237SShaohua Li 1074e64c5237SShaohua Li *contended = cc.contended; 1075e64c5237SShaohua Li return ret; 107656de7263SMel Gorman } 107756de7263SMel Gorman 10785e771905SMel Gorman int sysctl_extfrag_threshold = 500; 10795e771905SMel Gorman 108056de7263SMel Gorman /** 108156de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 108256de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 108356de7263SMel Gorman * @order: The order of the current allocation 108456de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 108556de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 108677f1fe6bSMel Gorman * @sync: Whether migration is synchronous or not 1087661c4cb9SMel Gorman * @contended: Return value that is true if compaction was aborted due to lock contention 1088661c4cb9SMel Gorman * @page: Optionally capture a free page of the requested order during compaction 108956de7263SMel Gorman * 109056de7263SMel Gorman * This is the main entry point for direct page compaction. 109156de7263SMel Gorman */ 109256de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 109377f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 10941fb3f8caSMel Gorman bool sync, bool *contended, struct page **page) 109556de7263SMel Gorman { 109656de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 109756de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 109856de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 109956de7263SMel Gorman struct zoneref *z; 110056de7263SMel Gorman struct zone *zone; 110156de7263SMel Gorman int rc = COMPACT_SKIPPED; 1102d95ea5d1SBartlomiej Zolnierkiewicz int alloc_flags = 0; 110356de7263SMel Gorman 11044ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1105c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 110656de7263SMel Gorman return rc; 110756de7263SMel Gorman 110856de7263SMel Gorman count_vm_event(COMPACTSTALL); 110956de7263SMel Gorman 1110d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA 1111d95ea5d1SBartlomiej Zolnierkiewicz if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 1112d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags |= ALLOC_CMA; 1113d95ea5d1SBartlomiej Zolnierkiewicz #endif 111456de7263SMel Gorman /* Compact each zone in the list */ 111556de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 111656de7263SMel Gorman nodemask) { 111756de7263SMel Gorman int status; 111856de7263SMel Gorman 1119c67fe375SMel Gorman status = compact_zone_order(zone, order, gfp_mask, sync, 11201fb3f8caSMel Gorman contended, page); 112156de7263SMel Gorman rc = max(status, rc); 112256de7263SMel Gorman 11233e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1124d95ea5d1SBartlomiej Zolnierkiewicz if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 1125d95ea5d1SBartlomiej Zolnierkiewicz alloc_flags)) 112656de7263SMel Gorman break; 112756de7263SMel Gorman } 112856de7263SMel Gorman 112956de7263SMel Gorman return rc; 113056de7263SMel Gorman } 113156de7263SMel Gorman 113256de7263SMel Gorman 113376ab0f53SMel Gorman /* Compact all zones within a node */ 11347be62de9SRik van Riel static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 113576ab0f53SMel Gorman { 113676ab0f53SMel Gorman int zoneid; 113776ab0f53SMel Gorman struct zone *zone; 113876ab0f53SMel Gorman 113976ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 114076ab0f53SMel Gorman 114176ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 114276ab0f53SMel Gorman if (!populated_zone(zone)) 114376ab0f53SMel Gorman continue; 114476ab0f53SMel Gorman 11457be62de9SRik van Riel cc->nr_freepages = 0; 11467be62de9SRik van Riel cc->nr_migratepages = 0; 11477be62de9SRik van Riel cc->zone = zone; 11487be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 11497be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 115076ab0f53SMel Gorman 1151aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 11527be62de9SRik van Riel compact_zone(zone, cc); 115376ab0f53SMel Gorman 1154aff62249SRik van Riel if (cc->order > 0) { 1155aff62249SRik van Riel int ok = zone_watermark_ok(zone, cc->order, 1156aff62249SRik van Riel low_wmark_pages(zone), 0, 0); 1157c81758fbSMinchan Kim if (ok && cc->order >= zone->compact_order_failed) 1158aff62249SRik van Riel zone->compact_order_failed = cc->order + 1; 1159aff62249SRik van Riel /* Currently async compaction is never deferred. */ 116068e3e926SLinus Torvalds else if (!ok && cc->sync) 1161aff62249SRik van Riel defer_compaction(zone, cc->order); 1162aff62249SRik van Riel } 1163aff62249SRik van Riel 11647be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 11657be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 116676ab0f53SMel Gorman } 116776ab0f53SMel Gorman 116876ab0f53SMel Gorman return 0; 116976ab0f53SMel Gorman } 117076ab0f53SMel Gorman 11717be62de9SRik van Riel int compact_pgdat(pg_data_t *pgdat, int order) 11727be62de9SRik van Riel { 11737be62de9SRik van Riel struct compact_control cc = { 11747be62de9SRik van Riel .order = order, 117568e3e926SLinus Torvalds .sync = false, 11761fb3f8caSMel Gorman .page = NULL, 11777be62de9SRik van Riel }; 11787be62de9SRik van Riel 11797be62de9SRik van Riel return __compact_pgdat(pgdat, &cc); 11807be62de9SRik van Riel } 11817be62de9SRik van Riel 11827be62de9SRik van Riel static int compact_node(int nid) 11837be62de9SRik van Riel { 11847be62de9SRik van Riel struct compact_control cc = { 11857be62de9SRik van Riel .order = -1, 118668e3e926SLinus Torvalds .sync = true, 11871fb3f8caSMel Gorman .page = NULL, 11887be62de9SRik van Riel }; 11897be62de9SRik van Riel 11908575ec29SHugh Dickins return __compact_pgdat(NODE_DATA(nid), &cc); 11917be62de9SRik van Riel } 11927be62de9SRik van Riel 119376ab0f53SMel Gorman /* Compact all nodes in the system */ 119476ab0f53SMel Gorman static int compact_nodes(void) 119576ab0f53SMel Gorman { 119676ab0f53SMel Gorman int nid; 119776ab0f53SMel Gorman 11988575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 11998575ec29SHugh Dickins lru_add_drain_all(); 12008575ec29SHugh Dickins 120176ab0f53SMel Gorman for_each_online_node(nid) 120276ab0f53SMel Gorman compact_node(nid); 120376ab0f53SMel Gorman 120476ab0f53SMel Gorman return COMPACT_COMPLETE; 120576ab0f53SMel Gorman } 120676ab0f53SMel Gorman 120776ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 120876ab0f53SMel Gorman int sysctl_compact_memory; 120976ab0f53SMel Gorman 121076ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 121176ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 121276ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 121376ab0f53SMel Gorman { 121476ab0f53SMel Gorman if (write) 121576ab0f53SMel Gorman return compact_nodes(); 121676ab0f53SMel Gorman 121776ab0f53SMel Gorman return 0; 121876ab0f53SMel Gorman } 1219ed4a6d7fSMel Gorman 12205e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 12215e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 12225e771905SMel Gorman { 12235e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 12245e771905SMel Gorman 12255e771905SMel Gorman return 0; 12265e771905SMel Gorman } 12275e771905SMel Gorman 1228ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 122910fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev, 123010fbcf4cSKay Sievers struct device_attribute *attr, 1231ed4a6d7fSMel Gorman const char *buf, size_t count) 1232ed4a6d7fSMel Gorman { 12338575ec29SHugh Dickins int nid = dev->id; 12348575ec29SHugh Dickins 12358575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 12368575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 12378575ec29SHugh Dickins lru_add_drain_all(); 12388575ec29SHugh Dickins 12398575ec29SHugh Dickins compact_node(nid); 12408575ec29SHugh Dickins } 1241ed4a6d7fSMel Gorman 1242ed4a6d7fSMel Gorman return count; 1243ed4a6d7fSMel Gorman } 124410fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1245ed4a6d7fSMel Gorman 1246ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1247ed4a6d7fSMel Gorman { 124810fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1249ed4a6d7fSMel Gorman } 1250ed4a6d7fSMel Gorman 1251ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1252ed4a6d7fSMel Gorman { 125310fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1254ed4a6d7fSMel Gorman } 1255ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1256ff9543fdSMichal Nazarewicz 1257ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1258