1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10698b1b30SVlastimil Babka #include <linux/cpu.h> 11748446bbSMel Gorman #include <linux/swap.h> 12748446bbSMel Gorman #include <linux/migrate.h> 13748446bbSMel Gorman #include <linux/compaction.h> 14748446bbSMel Gorman #include <linux/mm_inline.h> 15748446bbSMel Gorman #include <linux/backing-dev.h> 1676ab0f53SMel Gorman #include <linux/sysctl.h> 17ed4a6d7fSMel Gorman #include <linux/sysfs.h> 18bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 19194159fbSMinchan Kim #include <linux/page-isolation.h> 20b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 21698b1b30SVlastimil Babka #include <linux/kthread.h> 22698b1b30SVlastimil Babka #include <linux/freezer.h> 23748446bbSMel Gorman #include "internal.h" 24748446bbSMel Gorman 25010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 26010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 27010fc29aSMinchan Kim { 28010fc29aSMinchan Kim count_vm_event(item); 29010fc29aSMinchan Kim } 30010fc29aSMinchan Kim 31010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 32010fc29aSMinchan Kim { 33010fc29aSMinchan Kim count_vm_events(item, delta); 34010fc29aSMinchan Kim } 35010fc29aSMinchan Kim #else 36010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 37010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 38010fc29aSMinchan Kim #endif 39010fc29aSMinchan Kim 40ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 41ff9543fdSMichal Nazarewicz 42b7aba698SMel Gorman #define CREATE_TRACE_POINTS 43b7aba698SMel Gorman #include <trace/events/compaction.h> 44b7aba698SMel Gorman 4506b6640aSVlastimil Babka #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 4606b6640aSVlastimil Babka #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 4706b6640aSVlastimil Babka #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 4806b6640aSVlastimil Babka #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 4906b6640aSVlastimil Babka 50748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 51748446bbSMel Gorman { 52748446bbSMel Gorman struct page *page, *next; 536bace090SVlastimil Babka unsigned long high_pfn = 0; 54748446bbSMel Gorman 55748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 566bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 57748446bbSMel Gorman list_del(&page->lru); 58748446bbSMel Gorman __free_page(page); 596bace090SVlastimil Babka if (pfn > high_pfn) 606bace090SVlastimil Babka high_pfn = pfn; 61748446bbSMel Gorman } 62748446bbSMel Gorman 636bace090SVlastimil Babka return high_pfn; 64748446bbSMel Gorman } 65748446bbSMel Gorman 66ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 67ff9543fdSMichal Nazarewicz { 68ff9543fdSMichal Nazarewicz struct page *page; 69ff9543fdSMichal Nazarewicz 70ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 71ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 72ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 73b8c73fc2SAndrey Ryabinin kasan_alloc_pages(page, 0); 74ff9543fdSMichal Nazarewicz } 75ff9543fdSMichal Nazarewicz } 76ff9543fdSMichal Nazarewicz 7747118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 7847118af0SMichal Nazarewicz { 7947118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 8047118af0SMichal Nazarewicz } 8147118af0SMichal Nazarewicz 82bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 8324e2716fSJoonsoo Kim 8424e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 8524e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 8624e2716fSJoonsoo Kim 8724e2716fSJoonsoo Kim /* 8824e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 8924e2716fSJoonsoo Kim * allocation success. 1 << compact_defer_limit compactions are skipped up 9024e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 9124e2716fSJoonsoo Kim */ 9224e2716fSJoonsoo Kim void defer_compaction(struct zone *zone, int order) 9324e2716fSJoonsoo Kim { 9424e2716fSJoonsoo Kim zone->compact_considered = 0; 9524e2716fSJoonsoo Kim zone->compact_defer_shift++; 9624e2716fSJoonsoo Kim 9724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 9824e2716fSJoonsoo Kim zone->compact_order_failed = order; 9924e2716fSJoonsoo Kim 10024e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 10124e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 10224e2716fSJoonsoo Kim 10324e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 10424e2716fSJoonsoo Kim } 10524e2716fSJoonsoo Kim 10624e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 10724e2716fSJoonsoo Kim bool compaction_deferred(struct zone *zone, int order) 10824e2716fSJoonsoo Kim { 10924e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 11024e2716fSJoonsoo Kim 11124e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 11224e2716fSJoonsoo Kim return false; 11324e2716fSJoonsoo Kim 11424e2716fSJoonsoo Kim /* Avoid possible overflow */ 11524e2716fSJoonsoo Kim if (++zone->compact_considered > defer_limit) 11624e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 11724e2716fSJoonsoo Kim 11824e2716fSJoonsoo Kim if (zone->compact_considered >= defer_limit) 11924e2716fSJoonsoo Kim return false; 12024e2716fSJoonsoo Kim 12124e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 12224e2716fSJoonsoo Kim 12324e2716fSJoonsoo Kim return true; 12424e2716fSJoonsoo Kim } 12524e2716fSJoonsoo Kim 12624e2716fSJoonsoo Kim /* 12724e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 12824e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 12924e2716fSJoonsoo Kim * expected to succeed. 13024e2716fSJoonsoo Kim */ 13124e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 13224e2716fSJoonsoo Kim bool alloc_success) 13324e2716fSJoonsoo Kim { 13424e2716fSJoonsoo Kim if (alloc_success) { 13524e2716fSJoonsoo Kim zone->compact_considered = 0; 13624e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 13724e2716fSJoonsoo Kim } 13824e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 13924e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 14024e2716fSJoonsoo Kim 14124e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 14224e2716fSJoonsoo Kim } 14324e2716fSJoonsoo Kim 14424e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 14524e2716fSJoonsoo Kim bool compaction_restarting(struct zone *zone, int order) 14624e2716fSJoonsoo Kim { 14724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 14824e2716fSJoonsoo Kim return false; 14924e2716fSJoonsoo Kim 15024e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 15124e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 15224e2716fSJoonsoo Kim } 15324e2716fSJoonsoo Kim 154bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 155bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 156bb13ffebSMel Gorman struct page *page) 157bb13ffebSMel Gorman { 158bb13ffebSMel Gorman if (cc->ignore_skip_hint) 159bb13ffebSMel Gorman return true; 160bb13ffebSMel Gorman 161bb13ffebSMel Gorman return !get_pageblock_skip(page); 162bb13ffebSMel Gorman } 163bb13ffebSMel Gorman 16402333641SVlastimil Babka static void reset_cached_positions(struct zone *zone) 16502333641SVlastimil Babka { 16602333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 16702333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 168623446e4SJoonsoo Kim zone->compact_cached_free_pfn = 16906b6640aSVlastimil Babka pageblock_start_pfn(zone_end_pfn(zone) - 1); 17002333641SVlastimil Babka } 17102333641SVlastimil Babka 172bb13ffebSMel Gorman /* 173bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 174bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 175bb13ffebSMel Gorman * meet. 176bb13ffebSMel Gorman */ 17762997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 178bb13ffebSMel Gorman { 179bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 180108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 181bb13ffebSMel Gorman unsigned long pfn; 182bb13ffebSMel Gorman 18362997027SMel Gorman zone->compact_blockskip_flush = false; 184bb13ffebSMel Gorman 185bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 186bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 187bb13ffebSMel Gorman struct page *page; 188bb13ffebSMel Gorman 189bb13ffebSMel Gorman cond_resched(); 190bb13ffebSMel Gorman 191bb13ffebSMel Gorman if (!pfn_valid(pfn)) 192bb13ffebSMel Gorman continue; 193bb13ffebSMel Gorman 194bb13ffebSMel Gorman page = pfn_to_page(pfn); 195bb13ffebSMel Gorman if (zone != page_zone(page)) 196bb13ffebSMel Gorman continue; 197bb13ffebSMel Gorman 198bb13ffebSMel Gorman clear_pageblock_skip(page); 199bb13ffebSMel Gorman } 20002333641SVlastimil Babka 20102333641SVlastimil Babka reset_cached_positions(zone); 202bb13ffebSMel Gorman } 203bb13ffebSMel Gorman 20462997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 20562997027SMel Gorman { 20662997027SMel Gorman int zoneid; 20762997027SMel Gorman 20862997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 20962997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 21062997027SMel Gorman if (!populated_zone(zone)) 21162997027SMel Gorman continue; 21262997027SMel Gorman 21362997027SMel Gorman /* Only flush if a full compaction finished recently */ 21462997027SMel Gorman if (zone->compact_blockskip_flush) 21562997027SMel Gorman __reset_isolation_suitable(zone); 21662997027SMel Gorman } 21762997027SMel Gorman } 21862997027SMel Gorman 219bb13ffebSMel Gorman /* 220bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 22162997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 222bb13ffebSMel Gorman */ 223c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 224c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 225edc2ca61SVlastimil Babka bool migrate_scanner) 226bb13ffebSMel Gorman { 227c89511abSMel Gorman struct zone *zone = cc->zone; 22835979ef3SDavid Rientjes unsigned long pfn; 2296815bf3fSJoonsoo Kim 2306815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 2316815bf3fSJoonsoo Kim return; 2326815bf3fSJoonsoo Kim 233bb13ffebSMel Gorman if (!page) 234bb13ffebSMel Gorman return; 235bb13ffebSMel Gorman 23635979ef3SDavid Rientjes if (nr_isolated) 23735979ef3SDavid Rientjes return; 23835979ef3SDavid Rientjes 239bb13ffebSMel Gorman set_pageblock_skip(page); 240c89511abSMel Gorman 24135979ef3SDavid Rientjes pfn = page_to_pfn(page); 24235979ef3SDavid Rientjes 24335979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 244c89511abSMel Gorman if (migrate_scanner) { 24535979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 24635979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 247e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 248e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 24935979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 250c89511abSMel Gorman } else { 25135979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 252c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 253c89511abSMel Gorman } 254c89511abSMel Gorman } 255bb13ffebSMel Gorman #else 256bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 257bb13ffebSMel Gorman struct page *page) 258bb13ffebSMel Gorman { 259bb13ffebSMel Gorman return true; 260bb13ffebSMel Gorman } 261bb13ffebSMel Gorman 262c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 263c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 264edc2ca61SVlastimil Babka bool migrate_scanner) 265bb13ffebSMel Gorman { 266bb13ffebSMel Gorman } 267bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 268bb13ffebSMel Gorman 2691f9efdefSVlastimil Babka /* 2708b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 2718b44d279SVlastimil Babka * very heavily contended. For async compaction, back out if the lock cannot 2728b44d279SVlastimil Babka * be taken immediately. For sync compaction, spin on the lock if needed. 2738b44d279SVlastimil Babka * 2748b44d279SVlastimil Babka * Returns true if the lock is held 2758b44d279SVlastimil Babka * Returns false if the lock is not held and compaction should abort 2761f9efdefSVlastimil Babka */ 2778b44d279SVlastimil Babka static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, 2788b44d279SVlastimil Babka struct compact_control *cc) 2798b44d279SVlastimil Babka { 2808b44d279SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 2818b44d279SVlastimil Babka if (!spin_trylock_irqsave(lock, *flags)) { 2828b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_LOCK; 2838b44d279SVlastimil Babka return false; 2848b44d279SVlastimil Babka } 2858b44d279SVlastimil Babka } else { 2868b44d279SVlastimil Babka spin_lock_irqsave(lock, *flags); 2878b44d279SVlastimil Babka } 2881f9efdefSVlastimil Babka 2898b44d279SVlastimil Babka return true; 2902a1402aaSMel Gorman } 2912a1402aaSMel Gorman 29285aa125fSMichal Nazarewicz /* 293c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 2948b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 2958b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 2968b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 2978b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 2988b44d279SVlastimil Babka * aborts. Sync compaction schedules. 2998b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 3008b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 301c67fe375SMel Gorman * 3028b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 3038b44d279SVlastimil Babka * async compaction due to need_resched() 3048b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 3058b44d279SVlastimil Babka * scheduled) 306c67fe375SMel Gorman */ 3078b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 3088b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 309c67fe375SMel Gorman { 3108b44d279SVlastimil Babka if (*locked) { 3118b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 3128b44d279SVlastimil Babka *locked = false; 313c67fe375SMel Gorman } 314c67fe375SMel Gorman 3158b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 3168b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 3178b44d279SVlastimil Babka return true; 3188b44d279SVlastimil Babka } 3198b44d279SVlastimil Babka 3208b44d279SVlastimil Babka if (need_resched()) { 321e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 3228b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 3238b44d279SVlastimil Babka return true; 324c67fe375SMel Gorman } 325c67fe375SMel Gorman cond_resched(); 326c67fe375SMel Gorman } 327c67fe375SMel Gorman 3288b44d279SVlastimil Babka return false; 329c67fe375SMel Gorman } 330c67fe375SMel Gorman 331be976572SVlastimil Babka /* 332be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 333be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 3348b44d279SVlastimil Babka * compaction. This is similar to what compact_unlock_should_abort() does, but 335be976572SVlastimil Babka * is used where no lock is concerned. 336be976572SVlastimil Babka * 337be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 338be976572SVlastimil Babka * Returns true when async compaction should abort. 339be976572SVlastimil Babka */ 340be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 341be976572SVlastimil Babka { 342be976572SVlastimil Babka /* async compaction aborts if contended */ 343be976572SVlastimil Babka if (need_resched()) { 344be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3451f9efdefSVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 346be976572SVlastimil Babka return true; 347be976572SVlastimil Babka } 348be976572SVlastimil Babka 349be976572SVlastimil Babka cond_resched(); 350be976572SVlastimil Babka } 351be976572SVlastimil Babka 352be976572SVlastimil Babka return false; 353be976572SVlastimil Babka } 354be976572SVlastimil Babka 355c67fe375SMel Gorman /* 3569e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 3579e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 3589e4be470SJerome Marchand * (even though it may still end up isolating some pages). 35985aa125fSMichal Nazarewicz */ 360f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 361e14c720eSVlastimil Babka unsigned long *start_pfn, 36285aa125fSMichal Nazarewicz unsigned long end_pfn, 36385aa125fSMichal Nazarewicz struct list_head *freelist, 36485aa125fSMichal Nazarewicz bool strict) 365748446bbSMel Gorman { 366b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 367bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 368b8b2d825SXiubo Li unsigned long flags = 0; 369f40d1e42SMel Gorman bool locked = false; 370e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 371748446bbSMel Gorman 372748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 373748446bbSMel Gorman 374f40d1e42SMel Gorman /* Isolate free pages. */ 375748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 376748446bbSMel Gorman int isolated, i; 377748446bbSMel Gorman struct page *page = cursor; 378748446bbSMel Gorman 3798b44d279SVlastimil Babka /* 3808b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 3818b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 3828b44d279SVlastimil Babka * pending or async compaction detects need_resched() 3838b44d279SVlastimil Babka */ 3848b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 3858b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 3868b44d279SVlastimil Babka &locked, cc)) 3878b44d279SVlastimil Babka break; 3888b44d279SVlastimil Babka 389b7aba698SMel Gorman nr_scanned++; 390f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 3912af120bcSLaura Abbott goto isolate_fail; 3922af120bcSLaura Abbott 393bb13ffebSMel Gorman if (!valid_page) 394bb13ffebSMel Gorman valid_page = page; 3959fcd6d2eSVlastimil Babka 3969fcd6d2eSVlastimil Babka /* 3979fcd6d2eSVlastimil Babka * For compound pages such as THP and hugetlbfs, we can save 3989fcd6d2eSVlastimil Babka * potentially a lot of iterations if we skip them at once. 3999fcd6d2eSVlastimil Babka * The check is racy, but we can consider only valid values 4009fcd6d2eSVlastimil Babka * and the only danger is skipping too much. 4019fcd6d2eSVlastimil Babka */ 4029fcd6d2eSVlastimil Babka if (PageCompound(page)) { 4039fcd6d2eSVlastimil Babka unsigned int comp_order = compound_order(page); 4049fcd6d2eSVlastimil Babka 4059fcd6d2eSVlastimil Babka if (likely(comp_order < MAX_ORDER)) { 4069fcd6d2eSVlastimil Babka blockpfn += (1UL << comp_order) - 1; 4079fcd6d2eSVlastimil Babka cursor += (1UL << comp_order) - 1; 4089fcd6d2eSVlastimil Babka } 4099fcd6d2eSVlastimil Babka 4109fcd6d2eSVlastimil Babka goto isolate_fail; 4119fcd6d2eSVlastimil Babka } 4129fcd6d2eSVlastimil Babka 413f40d1e42SMel Gorman if (!PageBuddy(page)) 4142af120bcSLaura Abbott goto isolate_fail; 415f40d1e42SMel Gorman 416f40d1e42SMel Gorman /* 41769b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 41869b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 41969b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 42069b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 42169b7189fSVlastimil Babka * recheck as well. 42269b7189fSVlastimil Babka */ 42369b7189fSVlastimil Babka if (!locked) { 42469b7189fSVlastimil Babka /* 425f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 426f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 427f40d1e42SMel Gorman * heavily contended if there are parallel allocations 428f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 429f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 430f40d1e42SMel Gorman * possible. 431f40d1e42SMel Gorman */ 4328b44d279SVlastimil Babka locked = compact_trylock_irqsave(&cc->zone->lock, 4338b44d279SVlastimil Babka &flags, cc); 434f40d1e42SMel Gorman if (!locked) 435f40d1e42SMel Gorman break; 436f40d1e42SMel Gorman 437f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 438f40d1e42SMel Gorman if (!PageBuddy(page)) 4392af120bcSLaura Abbott goto isolate_fail; 44069b7189fSVlastimil Babka } 441748446bbSMel Gorman 442748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 443748446bbSMel Gorman isolated = split_free_page(page); 444748446bbSMel Gorman total_isolated += isolated; 445748446bbSMel Gorman for (i = 0; i < isolated; i++) { 446748446bbSMel Gorman list_add(&page->lru, freelist); 447748446bbSMel Gorman page++; 448748446bbSMel Gorman } 449748446bbSMel Gorman 450748446bbSMel Gorman /* If a page was split, advance to the end of it */ 451748446bbSMel Gorman if (isolated) { 452932ff6bbSJoonsoo Kim cc->nr_freepages += isolated; 453932ff6bbSJoonsoo Kim if (!strict && 454932ff6bbSJoonsoo Kim cc->nr_migratepages <= cc->nr_freepages) { 455932ff6bbSJoonsoo Kim blockpfn += isolated; 456932ff6bbSJoonsoo Kim break; 457932ff6bbSJoonsoo Kim } 458932ff6bbSJoonsoo Kim 459748446bbSMel Gorman blockpfn += isolated - 1; 460748446bbSMel Gorman cursor += isolated - 1; 4612af120bcSLaura Abbott continue; 462748446bbSMel Gorman } 4632af120bcSLaura Abbott 4642af120bcSLaura Abbott isolate_fail: 4652af120bcSLaura Abbott if (strict) 4662af120bcSLaura Abbott break; 4672af120bcSLaura Abbott else 4682af120bcSLaura Abbott continue; 4692af120bcSLaura Abbott 470748446bbSMel Gorman } 471748446bbSMel Gorman 4729fcd6d2eSVlastimil Babka /* 4739fcd6d2eSVlastimil Babka * There is a tiny chance that we have read bogus compound_order(), 4749fcd6d2eSVlastimil Babka * so be careful to not go outside of the pageblock. 4759fcd6d2eSVlastimil Babka */ 4769fcd6d2eSVlastimil Babka if (unlikely(blockpfn > end_pfn)) 4779fcd6d2eSVlastimil Babka blockpfn = end_pfn; 4789fcd6d2eSVlastimil Babka 479e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 480e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 481e34d85f0SJoonsoo Kim 482e14c720eSVlastimil Babka /* Record how far we have got within the block */ 483e14c720eSVlastimil Babka *start_pfn = blockpfn; 484e14c720eSVlastimil Babka 485f40d1e42SMel Gorman /* 486f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 487f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 488f40d1e42SMel Gorman * returned and CMA will fail. 489f40d1e42SMel Gorman */ 4902af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 491f40d1e42SMel Gorman total_isolated = 0; 492f40d1e42SMel Gorman 493f40d1e42SMel Gorman if (locked) 494f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 495f40d1e42SMel Gorman 496bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 497bb13ffebSMel Gorman if (blockpfn == end_pfn) 498edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 499bb13ffebSMel Gorman 500010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 501397487dbSMel Gorman if (total_isolated) 502010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 503748446bbSMel Gorman return total_isolated; 504748446bbSMel Gorman } 505748446bbSMel Gorman 50685aa125fSMichal Nazarewicz /** 50785aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 50885aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 50985aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 51085aa125fSMichal Nazarewicz * 51185aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 51285aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 51385aa125fSMichal Nazarewicz * undo its actions and return zero. 51485aa125fSMichal Nazarewicz * 51585aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 51685aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 51785aa125fSMichal Nazarewicz * a free page). 51885aa125fSMichal Nazarewicz */ 519ff9543fdSMichal Nazarewicz unsigned long 520bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 521bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 52285aa125fSMichal Nazarewicz { 523e1409c32SJoonsoo Kim unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 52485aa125fSMichal Nazarewicz LIST_HEAD(freelist); 52585aa125fSMichal Nazarewicz 5267d49d886SVlastimil Babka pfn = start_pfn; 52706b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 528e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 529e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 53006b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 5317d49d886SVlastimil Babka 5327d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 533e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 5347d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 535e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 536e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 5377d49d886SVlastimil Babka 53885aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 53985aa125fSMichal Nazarewicz 54058420016SJoonsoo Kim /* 54158420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 54258420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 54358420016SJoonsoo Kim * scanning range to right one. 54458420016SJoonsoo Kim */ 54558420016SJoonsoo Kim if (pfn >= block_end_pfn) { 54606b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 54706b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 54858420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 54958420016SJoonsoo Kim } 55058420016SJoonsoo Kim 551e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 552e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 5537d49d886SVlastimil Babka break; 5547d49d886SVlastimil Babka 555e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 556e14c720eSVlastimil Babka block_end_pfn, &freelist, true); 55785aa125fSMichal Nazarewicz 55885aa125fSMichal Nazarewicz /* 55985aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 56085aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 56185aa125fSMichal Nazarewicz * non-free pages). 56285aa125fSMichal Nazarewicz */ 56385aa125fSMichal Nazarewicz if (!isolated) 56485aa125fSMichal Nazarewicz break; 56585aa125fSMichal Nazarewicz 56685aa125fSMichal Nazarewicz /* 56785aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 56885aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 56985aa125fSMichal Nazarewicz * page may span two pageblocks). 57085aa125fSMichal Nazarewicz */ 57185aa125fSMichal Nazarewicz } 57285aa125fSMichal Nazarewicz 57385aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 57485aa125fSMichal Nazarewicz map_pages(&freelist); 57585aa125fSMichal Nazarewicz 57685aa125fSMichal Nazarewicz if (pfn < end_pfn) { 57785aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 57885aa125fSMichal Nazarewicz release_freepages(&freelist); 57985aa125fSMichal Nazarewicz return 0; 58085aa125fSMichal Nazarewicz } 58185aa125fSMichal Nazarewicz 58285aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 58385aa125fSMichal Nazarewicz return pfn; 58485aa125fSMichal Nazarewicz } 58585aa125fSMichal Nazarewicz 586748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 587edc2ca61SVlastimil Babka static void acct_isolated(struct zone *zone, struct compact_control *cc) 588748446bbSMel Gorman { 589748446bbSMel Gorman struct page *page; 590b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 591748446bbSMel Gorman 592edc2ca61SVlastimil Babka if (list_empty(&cc->migratepages)) 593edc2ca61SVlastimil Babka return; 594edc2ca61SVlastimil Babka 595b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 596b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 597748446bbSMel Gorman 598c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 599c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 600c67fe375SMel Gorman } 601748446bbSMel Gorman 602748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 603748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 604748446bbSMel Gorman { 605bc693045SMinchan Kim unsigned long active, inactive, isolated; 606748446bbSMel Gorman 607748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 608748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 609bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 610bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 611748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 612748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 613748446bbSMel Gorman 614bc693045SMinchan Kim return isolated > (inactive + active) / 2; 615748446bbSMel Gorman } 616748446bbSMel Gorman 6172fe86e00SMichal Nazarewicz /** 618edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 619edc2ca61SVlastimil Babka * a single pageblock 6202fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 621edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 622edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 623edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 6242fe86e00SMichal Nazarewicz * 6252fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 626edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 627edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 628edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 629edc2ca61SVlastimil Babka * than end_pfn). 6302fe86e00SMichal Nazarewicz * 631edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 632edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 633edc2ca61SVlastimil Babka * is neither read nor updated. 634748446bbSMel Gorman */ 635edc2ca61SVlastimil Babka static unsigned long 636edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 637edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 638748446bbSMel Gorman { 639edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 640b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 641fa9add64SHugh Dickins struct lruvec *lruvec; 642b8b2d825SXiubo Li unsigned long flags = 0; 6432a1402aaSMel Gorman bool locked = false; 644bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 645e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 646fdd048e1SVlastimil Babka bool skip_on_failure = false; 647fdd048e1SVlastimil Babka unsigned long next_skip_pfn = 0; 648748446bbSMel Gorman 649748446bbSMel Gorman /* 650748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 651748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 652748446bbSMel Gorman * delay for some time until fewer pages are isolated 653748446bbSMel Gorman */ 654748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 655f9e35b3bSMel Gorman /* async migration should just abort */ 656e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 6572fe86e00SMichal Nazarewicz return 0; 658f9e35b3bSMel Gorman 659748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 660748446bbSMel Gorman 661748446bbSMel Gorman if (fatal_signal_pending(current)) 6622fe86e00SMichal Nazarewicz return 0; 663748446bbSMel Gorman } 664748446bbSMel Gorman 665be976572SVlastimil Babka if (compact_should_abort(cc)) 666aeef4b83SDavid Rientjes return 0; 667aeef4b83SDavid Rientjes 668fdd048e1SVlastimil Babka if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 669fdd048e1SVlastimil Babka skip_on_failure = true; 670fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 671fdd048e1SVlastimil Babka } 672fdd048e1SVlastimil Babka 673748446bbSMel Gorman /* Time to isolate some pages for migration */ 674748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 67529c0dde8SVlastimil Babka bool is_lru; 67629c0dde8SVlastimil Babka 677fdd048e1SVlastimil Babka if (skip_on_failure && low_pfn >= next_skip_pfn) { 678fdd048e1SVlastimil Babka /* 679fdd048e1SVlastimil Babka * We have isolated all migration candidates in the 680fdd048e1SVlastimil Babka * previous order-aligned block, and did not skip it due 681fdd048e1SVlastimil Babka * to failure. We should migrate the pages now and 682fdd048e1SVlastimil Babka * hopefully succeed compaction. 683fdd048e1SVlastimil Babka */ 684fdd048e1SVlastimil Babka if (nr_isolated) 685fdd048e1SVlastimil Babka break; 686fdd048e1SVlastimil Babka 687fdd048e1SVlastimil Babka /* 688fdd048e1SVlastimil Babka * We failed to isolate in the previous order-aligned 689fdd048e1SVlastimil Babka * block. Set the new boundary to the end of the 690fdd048e1SVlastimil Babka * current block. Note we can't simply increase 691fdd048e1SVlastimil Babka * next_skip_pfn by 1 << order, as low_pfn might have 692fdd048e1SVlastimil Babka * been incremented by a higher number due to skipping 693fdd048e1SVlastimil Babka * a compound or a high-order buddy page in the 694fdd048e1SVlastimil Babka * previous loop iteration. 695fdd048e1SVlastimil Babka */ 696fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 697fdd048e1SVlastimil Babka } 698fdd048e1SVlastimil Babka 6998b44d279SVlastimil Babka /* 7008b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 7018b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort async compaction 7028b44d279SVlastimil Babka * if contended. 7038b44d279SVlastimil Babka */ 7048b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 7058b44d279SVlastimil Babka && compact_unlock_should_abort(&zone->lru_lock, flags, 7068b44d279SVlastimil Babka &locked, cc)) 7078b44d279SVlastimil Babka break; 708b2eef8c0SAndrea Arcangeli 709748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 710fdd048e1SVlastimil Babka goto isolate_fail; 711b7aba698SMel Gorman nr_scanned++; 712748446bbSMel Gorman 713748446bbSMel Gorman page = pfn_to_page(low_pfn); 714dc908600SMel Gorman 715bb13ffebSMel Gorman if (!valid_page) 716bb13ffebSMel Gorman valid_page = page; 717bb13ffebSMel Gorman 718c122b208SJoonsoo Kim /* 71999c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 72099c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 72199c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 72299c0fd5eSVlastimil Babka * potential isolation targets. 7236c14466cSMel Gorman */ 72499c0fd5eSVlastimil Babka if (PageBuddy(page)) { 72599c0fd5eSVlastimil Babka unsigned long freepage_order = page_order_unsafe(page); 72699c0fd5eSVlastimil Babka 72799c0fd5eSVlastimil Babka /* 72899c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 72999c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 73099c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 73199c0fd5eSVlastimil Babka */ 73299c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 73399c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 734748446bbSMel Gorman continue; 73599c0fd5eSVlastimil Babka } 736748446bbSMel Gorman 7379927af74SMel Gorman /* 738bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 739bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 740bf6bddf1SRafael Aquini * Skip any other type of page 741bf6bddf1SRafael Aquini */ 74229c0dde8SVlastimil Babka is_lru = PageLRU(page); 74329c0dde8SVlastimil Babka if (!is_lru) { 744bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 745d6d86c0aSKonstantin Khlebnikov if (balloon_page_isolate(page)) { 746bf6bddf1SRafael Aquini /* Successfully isolated */ 747b6c75016SJoonsoo Kim goto isolate_success; 748bf6bddf1SRafael Aquini } 749bf6bddf1SRafael Aquini } 750bf6bddf1SRafael Aquini } 751bc835011SAndrea Arcangeli 752bc835011SAndrea Arcangeli /* 75329c0dde8SVlastimil Babka * Regardless of being on LRU, compound pages such as THP and 75429c0dde8SVlastimil Babka * hugetlbfs are not to be compacted. We can potentially save 75529c0dde8SVlastimil Babka * a lot of iterations if we skip them at once. The check is 75629c0dde8SVlastimil Babka * racy, but we can consider only valid values and the only 75729c0dde8SVlastimil Babka * danger is skipping too much. 758bc835011SAndrea Arcangeli */ 75929c0dde8SVlastimil Babka if (PageCompound(page)) { 76029c0dde8SVlastimil Babka unsigned int comp_order = compound_order(page); 76129c0dde8SVlastimil Babka 76229c0dde8SVlastimil Babka if (likely(comp_order < MAX_ORDER)) 76329c0dde8SVlastimil Babka low_pfn += (1UL << comp_order) - 1; 764edc2ca61SVlastimil Babka 765fdd048e1SVlastimil Babka goto isolate_fail; 7662a1402aaSMel Gorman } 7672a1402aaSMel Gorman 76829c0dde8SVlastimil Babka if (!is_lru) 769fdd048e1SVlastimil Babka goto isolate_fail; 77029c0dde8SVlastimil Babka 771119d6d59SDavid Rientjes /* 772119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 773119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 774119d6d59SDavid Rientjes * admittedly racy check. 775119d6d59SDavid Rientjes */ 776119d6d59SDavid Rientjes if (!page_mapping(page) && 777119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 778fdd048e1SVlastimil Babka goto isolate_fail; 779119d6d59SDavid Rientjes 78069b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 78169b7189fSVlastimil Babka if (!locked) { 7828b44d279SVlastimil Babka locked = compact_trylock_irqsave(&zone->lru_lock, 7838b44d279SVlastimil Babka &flags, cc); 7848b44d279SVlastimil Babka if (!locked) 7852a1402aaSMel Gorman break; 7862a1402aaSMel Gorman 78729c0dde8SVlastimil Babka /* Recheck PageLRU and PageCompound under lock */ 7882a1402aaSMel Gorman if (!PageLRU(page)) 789fdd048e1SVlastimil Babka goto isolate_fail; 79029c0dde8SVlastimil Babka 79129c0dde8SVlastimil Babka /* 79229c0dde8SVlastimil Babka * Page become compound since the non-locked check, 79329c0dde8SVlastimil Babka * and it's on LRU. It can only be a THP so the order 79429c0dde8SVlastimil Babka * is safe to read and it's 0 for tail pages. 79529c0dde8SVlastimil Babka */ 79629c0dde8SVlastimil Babka if (unlikely(PageCompound(page))) { 79729c0dde8SVlastimil Babka low_pfn += (1UL << compound_order(page)) - 1; 798fdd048e1SVlastimil Babka goto isolate_fail; 799bc835011SAndrea Arcangeli } 80069b7189fSVlastimil Babka } 801bc835011SAndrea Arcangeli 802fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 803fa9add64SHugh Dickins 804748446bbSMel Gorman /* Try isolate the page */ 805edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 806fdd048e1SVlastimil Babka goto isolate_fail; 807748446bbSMel Gorman 80829c0dde8SVlastimil Babka VM_BUG_ON_PAGE(PageCompound(page), page); 809bc835011SAndrea Arcangeli 810748446bbSMel Gorman /* Successfully isolated */ 811fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 812b6c75016SJoonsoo Kim 813b6c75016SJoonsoo Kim isolate_success: 814fdd048e1SVlastimil Babka list_add(&page->lru, &cc->migratepages); 815748446bbSMel Gorman cc->nr_migratepages++; 816b7aba698SMel Gorman nr_isolated++; 817748446bbSMel Gorman 818a34753d2SVlastimil Babka /* 819a34753d2SVlastimil Babka * Record where we could have freed pages by migration and not 820a34753d2SVlastimil Babka * yet flushed them to buddy allocator. 821a34753d2SVlastimil Babka * - this is the lowest page that was isolated and likely be 822a34753d2SVlastimil Babka * then freed by migration. 823a34753d2SVlastimil Babka */ 824a34753d2SVlastimil Babka if (!cc->last_migrated_pfn) 825a34753d2SVlastimil Babka cc->last_migrated_pfn = low_pfn; 826a34753d2SVlastimil Babka 827748446bbSMel Gorman /* Avoid isolating too much */ 82831b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 82931b8384aSHillf Danton ++low_pfn; 830748446bbSMel Gorman break; 831748446bbSMel Gorman } 832fdd048e1SVlastimil Babka 833fdd048e1SVlastimil Babka continue; 834fdd048e1SVlastimil Babka isolate_fail: 835fdd048e1SVlastimil Babka if (!skip_on_failure) 836fdd048e1SVlastimil Babka continue; 837fdd048e1SVlastimil Babka 838fdd048e1SVlastimil Babka /* 839fdd048e1SVlastimil Babka * We have isolated some pages, but then failed. Release them 840fdd048e1SVlastimil Babka * instead of migrating, as we cannot form the cc->order buddy 841fdd048e1SVlastimil Babka * page anyway. 842fdd048e1SVlastimil Babka */ 843fdd048e1SVlastimil Babka if (nr_isolated) { 844fdd048e1SVlastimil Babka if (locked) { 845fdd048e1SVlastimil Babka spin_unlock_irqrestore(&zone->lru_lock, flags); 846fdd048e1SVlastimil Babka locked = false; 847fdd048e1SVlastimil Babka } 848fdd048e1SVlastimil Babka acct_isolated(zone, cc); 849fdd048e1SVlastimil Babka putback_movable_pages(&cc->migratepages); 850fdd048e1SVlastimil Babka cc->nr_migratepages = 0; 851fdd048e1SVlastimil Babka cc->last_migrated_pfn = 0; 852fdd048e1SVlastimil Babka nr_isolated = 0; 853fdd048e1SVlastimil Babka } 854fdd048e1SVlastimil Babka 855fdd048e1SVlastimil Babka if (low_pfn < next_skip_pfn) { 856fdd048e1SVlastimil Babka low_pfn = next_skip_pfn - 1; 857fdd048e1SVlastimil Babka /* 858fdd048e1SVlastimil Babka * The check near the loop beginning would have updated 859fdd048e1SVlastimil Babka * next_skip_pfn too, but this is a bit simpler. 860fdd048e1SVlastimil Babka */ 861fdd048e1SVlastimil Babka next_skip_pfn += 1UL << cc->order; 862fdd048e1SVlastimil Babka } 86331b8384aSHillf Danton } 864748446bbSMel Gorman 86599c0fd5eSVlastimil Babka /* 86699c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 86799c0fd5eSVlastimil Babka * the range to be scanned. 86899c0fd5eSVlastimil Babka */ 86999c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 87099c0fd5eSVlastimil Babka low_pfn = end_pfn; 87199c0fd5eSVlastimil Babka 872c67fe375SMel Gorman if (locked) 873c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 874748446bbSMel Gorman 87550b5b094SVlastimil Babka /* 87650b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 87750b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 87850b5b094SVlastimil Babka */ 87935979ef3SDavid Rientjes if (low_pfn == end_pfn) 880edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 881bb13ffebSMel Gorman 882e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 883e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 884b7aba698SMel Gorman 885010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 886397487dbSMel Gorman if (nr_isolated) 887010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 888397487dbSMel Gorman 8892fe86e00SMichal Nazarewicz return low_pfn; 8902fe86e00SMichal Nazarewicz } 8912fe86e00SMichal Nazarewicz 892edc2ca61SVlastimil Babka /** 893edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 894edc2ca61SVlastimil Babka * @cc: Compaction control structure. 895edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 896edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 897edc2ca61SVlastimil Babka * 898edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 899edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 900edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 901edc2ca61SVlastimil Babka */ 902edc2ca61SVlastimil Babka unsigned long 903edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 904edc2ca61SVlastimil Babka unsigned long end_pfn) 905edc2ca61SVlastimil Babka { 906e1409c32SJoonsoo Kim unsigned long pfn, block_start_pfn, block_end_pfn; 907edc2ca61SVlastimil Babka 908edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 909edc2ca61SVlastimil Babka pfn = start_pfn; 91006b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 911e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 912e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 91306b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 914edc2ca61SVlastimil Babka 915edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 916e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 917edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 918edc2ca61SVlastimil Babka 919edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 920edc2ca61SVlastimil Babka 921e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 922e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 923edc2ca61SVlastimil Babka continue; 924edc2ca61SVlastimil Babka 925edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 926edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 927edc2ca61SVlastimil Babka 92814af4a5eSHugh Dickins if (!pfn) 929edc2ca61SVlastimil Babka break; 9306ea41c0cSJoonsoo Kim 9316ea41c0cSJoonsoo Kim if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 9326ea41c0cSJoonsoo Kim break; 933edc2ca61SVlastimil Babka } 934edc2ca61SVlastimil Babka acct_isolated(cc->zone, cc); 935edc2ca61SVlastimil Babka 936edc2ca61SVlastimil Babka return pfn; 937edc2ca61SVlastimil Babka } 938edc2ca61SVlastimil Babka 939ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 940ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 941018e9a49SAndrew Morton 942018e9a49SAndrew Morton /* Returns true if the page is within a block suitable for migration to */ 943018e9a49SAndrew Morton static bool suitable_migration_target(struct page *page) 944018e9a49SAndrew Morton { 945018e9a49SAndrew Morton /* If the page is a large free page, then disallow migration */ 946018e9a49SAndrew Morton if (PageBuddy(page)) { 947018e9a49SAndrew Morton /* 948018e9a49SAndrew Morton * We are checking page_order without zone->lock taken. But 949018e9a49SAndrew Morton * the only small danger is that we skip a potentially suitable 950018e9a49SAndrew Morton * pageblock, so it's not worth to check order for valid range. 951018e9a49SAndrew Morton */ 952018e9a49SAndrew Morton if (page_order_unsafe(page) >= pageblock_order) 953018e9a49SAndrew Morton return false; 954018e9a49SAndrew Morton } 955018e9a49SAndrew Morton 956018e9a49SAndrew Morton /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 957018e9a49SAndrew Morton if (migrate_async_suitable(get_pageblock_migratetype(page))) 958018e9a49SAndrew Morton return true; 959018e9a49SAndrew Morton 960018e9a49SAndrew Morton /* Otherwise skip the block */ 961018e9a49SAndrew Morton return false; 962018e9a49SAndrew Morton } 963018e9a49SAndrew Morton 964ff9543fdSMichal Nazarewicz /* 965f2849aa0SVlastimil Babka * Test whether the free scanner has reached the same or lower pageblock than 966f2849aa0SVlastimil Babka * the migration scanner, and compaction should thus terminate. 967f2849aa0SVlastimil Babka */ 968f2849aa0SVlastimil Babka static inline bool compact_scanners_met(struct compact_control *cc) 969f2849aa0SVlastimil Babka { 970f2849aa0SVlastimil Babka return (cc->free_pfn >> pageblock_order) 971f2849aa0SVlastimil Babka <= (cc->migrate_pfn >> pageblock_order); 972f2849aa0SVlastimil Babka } 973f2849aa0SVlastimil Babka 974f2849aa0SVlastimil Babka /* 975ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 976ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 977ff9543fdSMichal Nazarewicz */ 978edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 979ff9543fdSMichal Nazarewicz { 980edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 981ff9543fdSMichal Nazarewicz struct page *page; 982c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 983e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 984c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 985c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 986ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 9872fe86e00SMichal Nazarewicz 988ff9543fdSMichal Nazarewicz /* 989ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 99049e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 991e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 992e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 993c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 994c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 995c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 99649e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 99749e068f0SVlastimil Babka * is using. 998ff9543fdSMichal Nazarewicz */ 999e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 100006b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(cc->free_pfn); 1001c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1002c96b9e50SVlastimil Babka zone_end_pfn(zone)); 100306b6640aSVlastimil Babka low_pfn = pageblock_end_pfn(cc->migrate_pfn); 10042fe86e00SMichal Nazarewicz 1005ff9543fdSMichal Nazarewicz /* 1006ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 1007ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 1008ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 1009ff9543fdSMichal Nazarewicz */ 1010f5f61a32SVlastimil Babka for (; block_start_pfn >= low_pfn; 1011c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 1012e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 1013e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 1014ff9543fdSMichal Nazarewicz 1015f6ea3adbSDavid Rientjes /* 1016f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 1017f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 1018be976572SVlastimil Babka * to schedule, or even abort async compaction. 1019f6ea3adbSDavid Rientjes */ 1020be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1021be976572SVlastimil Babka && compact_should_abort(cc)) 1022be976572SVlastimil Babka break; 1023f6ea3adbSDavid Rientjes 10247d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 10257d49d886SVlastimil Babka zone); 10267d49d886SVlastimil Babka if (!page) 1027ff9543fdSMichal Nazarewicz continue; 1028ff9543fdSMichal Nazarewicz 1029ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 103068e3e926SLinus Torvalds if (!suitable_migration_target(page)) 1031ff9543fdSMichal Nazarewicz continue; 103268e3e926SLinus Torvalds 1033bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 1034bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 1035bb13ffebSMel Gorman continue; 1036bb13ffebSMel Gorman 1037e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 1038932ff6bbSJoonsoo Kim isolate_freepages_block(cc, &isolate_start_pfn, 1039c96b9e50SVlastimil Babka block_end_pfn, freelist, false); 1040ff9543fdSMichal Nazarewicz 1041ff9543fdSMichal Nazarewicz /* 1042f5f61a32SVlastimil Babka * If we isolated enough freepages, or aborted due to async 1043f5f61a32SVlastimil Babka * compaction being contended, terminate the loop. 1044e14c720eSVlastimil Babka * Remember where the free scanner should restart next time, 1045e14c720eSVlastimil Babka * which is where isolate_freepages_block() left off. 1046e14c720eSVlastimil Babka * But if it scanned the whole pageblock, isolate_start_pfn 1047e14c720eSVlastimil Babka * now points at block_end_pfn, which is the start of the next 1048e14c720eSVlastimil Babka * pageblock. 1049e14c720eSVlastimil Babka * In that case we will however want to restart at the start 1050e14c720eSVlastimil Babka * of the previous pageblock. 1051e14c720eSVlastimil Babka */ 1052f5f61a32SVlastimil Babka if ((cc->nr_freepages >= cc->nr_migratepages) 1053f5f61a32SVlastimil Babka || cc->contended) { 1054f5f61a32SVlastimil Babka if (isolate_start_pfn >= block_end_pfn) 1055f5f61a32SVlastimil Babka isolate_start_pfn = 1056e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 1057be976572SVlastimil Babka break; 1058f5f61a32SVlastimil Babka } else { 1059f5f61a32SVlastimil Babka /* 1060f5f61a32SVlastimil Babka * isolate_freepages_block() should not terminate 1061f5f61a32SVlastimil Babka * prematurely unless contended, or isolated enough 1062f5f61a32SVlastimil Babka */ 1063f5f61a32SVlastimil Babka VM_BUG_ON(isolate_start_pfn < block_end_pfn); 1064f5f61a32SVlastimil Babka } 1065c89511abSMel Gorman } 1066ff9543fdSMichal Nazarewicz 1067ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 1068ff9543fdSMichal Nazarewicz map_pages(freelist); 1069ff9543fdSMichal Nazarewicz 10707ed695e0SVlastimil Babka /* 1071f5f61a32SVlastimil Babka * Record where the free scanner will restart next time. Either we 1072f5f61a32SVlastimil Babka * broke from the loop and set isolate_start_pfn based on the last 1073f5f61a32SVlastimil Babka * call to isolate_freepages_block(), or we met the migration scanner 1074f5f61a32SVlastimil Babka * and the loop terminated due to isolate_start_pfn < low_pfn 10757ed695e0SVlastimil Babka */ 1076f5f61a32SVlastimil Babka cc->free_pfn = isolate_start_pfn; 1077748446bbSMel Gorman } 1078748446bbSMel Gorman 1079748446bbSMel Gorman /* 1080748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1081748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1082748446bbSMel Gorman */ 1083748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 1084748446bbSMel Gorman unsigned long data, 1085748446bbSMel Gorman int **result) 1086748446bbSMel Gorman { 1087748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 1088748446bbSMel Gorman struct page *freepage; 1089748446bbSMel Gorman 1090be976572SVlastimil Babka /* 1091be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 1092be976572SVlastimil Babka * contention. 1093be976572SVlastimil Babka */ 1094748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1095be976572SVlastimil Babka if (!cc->contended) 1096edc2ca61SVlastimil Babka isolate_freepages(cc); 1097748446bbSMel Gorman 1098748446bbSMel Gorman if (list_empty(&cc->freepages)) 1099748446bbSMel Gorman return NULL; 1100748446bbSMel Gorman } 1101748446bbSMel Gorman 1102748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 1103748446bbSMel Gorman list_del(&freepage->lru); 1104748446bbSMel Gorman cc->nr_freepages--; 1105748446bbSMel Gorman 1106748446bbSMel Gorman return freepage; 1107748446bbSMel Gorman } 1108748446bbSMel Gorman 1109748446bbSMel Gorman /* 1110d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1111d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1112d53aea3dSDavid Rientjes * special handling needed for NUMA. 1113d53aea3dSDavid Rientjes */ 1114d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 1115d53aea3dSDavid Rientjes { 1116d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1117d53aea3dSDavid Rientjes 1118d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 1119d53aea3dSDavid Rientjes cc->nr_freepages++; 1120d53aea3dSDavid Rientjes } 1121d53aea3dSDavid Rientjes 1122ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1123ff9543fdSMichal Nazarewicz typedef enum { 1124ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1125ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1126ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1127ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1128ff9543fdSMichal Nazarewicz 1129ff9543fdSMichal Nazarewicz /* 11305bbe3547SEric B Munson * Allow userspace to control policy on scanning the unevictable LRU for 11315bbe3547SEric B Munson * compactable pages. 11325bbe3547SEric B Munson */ 11335bbe3547SEric B Munson int sysctl_compact_unevictable_allowed __read_mostly = 1; 11345bbe3547SEric B Munson 11355bbe3547SEric B Munson /* 1136edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1137edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1138edc2ca61SVlastimil Babka * compact_control. 1139ff9543fdSMichal Nazarewicz */ 1140ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 1141ff9543fdSMichal Nazarewicz struct compact_control *cc) 1142ff9543fdSMichal Nazarewicz { 1143e1409c32SJoonsoo Kim unsigned long block_start_pfn; 1144e1409c32SJoonsoo Kim unsigned long block_end_pfn; 1145e1409c32SJoonsoo Kim unsigned long low_pfn; 1146edc2ca61SVlastimil Babka struct page *page; 1147edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 11485bbe3547SEric B Munson (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 1149edc2ca61SVlastimil Babka (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); 1150ff9543fdSMichal Nazarewicz 1151edc2ca61SVlastimil Babka /* 1152edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 1153edc2ca61SVlastimil Babka * initialized by compact_zone() 1154edc2ca61SVlastimil Babka */ 1155edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 115606b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(low_pfn); 1157e1409c32SJoonsoo Kim if (block_start_pfn < zone->zone_start_pfn) 1158e1409c32SJoonsoo Kim block_start_pfn = zone->zone_start_pfn; 1159ff9543fdSMichal Nazarewicz 1160ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 116106b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(low_pfn); 1162ff9543fdSMichal Nazarewicz 1163edc2ca61SVlastimil Babka /* 1164edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 1165edc2ca61SVlastimil Babka * Do not cross the free scanner. 1166edc2ca61SVlastimil Babka */ 1167e1409c32SJoonsoo Kim for (; block_end_pfn <= cc->free_pfn; 1168e1409c32SJoonsoo Kim low_pfn = block_end_pfn, 1169e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1170e1409c32SJoonsoo Kim block_end_pfn += pageblock_nr_pages) { 1171edc2ca61SVlastimil Babka 1172edc2ca61SVlastimil Babka /* 1173edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1174edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1175edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 1176edc2ca61SVlastimil Babka */ 1177edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1178edc2ca61SVlastimil Babka && compact_should_abort(cc)) 1179edc2ca61SVlastimil Babka break; 1180edc2ca61SVlastimil Babka 1181e1409c32SJoonsoo Kim page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1182e1409c32SJoonsoo Kim zone); 11837d49d886SVlastimil Babka if (!page) 1184edc2ca61SVlastimil Babka continue; 1185edc2ca61SVlastimil Babka 1186edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 1187edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 1188edc2ca61SVlastimil Babka continue; 1189edc2ca61SVlastimil Babka 1190edc2ca61SVlastimil Babka /* 1191edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 1192edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 1193edc2ca61SVlastimil Babka * of work satisfies the allocation. 1194edc2ca61SVlastimil Babka */ 1195edc2ca61SVlastimil Babka if (cc->mode == MIGRATE_ASYNC && 1196edc2ca61SVlastimil Babka !migrate_async_suitable(get_pageblock_migratetype(page))) 1197edc2ca61SVlastimil Babka continue; 1198ff9543fdSMichal Nazarewicz 1199ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1200e1409c32SJoonsoo Kim low_pfn = isolate_migratepages_block(cc, low_pfn, 1201e1409c32SJoonsoo Kim block_end_pfn, isolate_mode); 1202edc2ca61SVlastimil Babka 1203ff59909aSHugh Dickins if (!low_pfn || cc->contended) { 1204ff59909aSHugh Dickins acct_isolated(zone, cc); 1205ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1206ff59909aSHugh Dickins } 1207ff9543fdSMichal Nazarewicz 1208edc2ca61SVlastimil Babka /* 1209edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1210edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1211edc2ca61SVlastimil Babka * continue or not. 1212edc2ca61SVlastimil Babka */ 1213edc2ca61SVlastimil Babka break; 1214edc2ca61SVlastimil Babka } 1215edc2ca61SVlastimil Babka 1216edc2ca61SVlastimil Babka acct_isolated(zone, cc); 1217f2849aa0SVlastimil Babka /* Record where migration scanner will be restarted. */ 1218f2849aa0SVlastimil Babka cc->migrate_pfn = low_pfn; 1219ff9543fdSMichal Nazarewicz 1220edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1221ff9543fdSMichal Nazarewicz } 1222ff9543fdSMichal Nazarewicz 122321c527a3SYaowei Bai /* 122421c527a3SYaowei Bai * order == -1 is expected when compacting via 122521c527a3SYaowei Bai * /proc/sys/vm/compact_memory 122621c527a3SYaowei Bai */ 122721c527a3SYaowei Bai static inline bool is_via_compact_memory(int order) 122821c527a3SYaowei Bai { 122921c527a3SYaowei Bai return order == -1; 123021c527a3SYaowei Bai } 123121c527a3SYaowei Bai 1232ea7ab982SMichal Hocko static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc, 12336d7ce559SDavid Rientjes const int migratetype) 1234748446bbSMel Gorman { 12358fb74b9fSMel Gorman unsigned int order; 12365a03b051SAndrea Arcangeli unsigned long watermark; 123756de7263SMel Gorman 1238be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 12392d1e1041SVlastimil Babka return COMPACT_CONTENDED; 1240748446bbSMel Gorman 1241753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1242f2849aa0SVlastimil Babka if (compact_scanners_met(cc)) { 124355b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 124402333641SVlastimil Babka reset_cached_positions(zone); 124555b7c4c9SVlastimil Babka 124662997027SMel Gorman /* 124762997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 1248accf6242SVlastimil Babka * by kswapd when it goes to sleep. kcompactd does not set the 124962997027SMel Gorman * flag itself as the decision to be clear should be directly 125062997027SMel Gorman * based on an allocation request. 125162997027SMel Gorman */ 1252accf6242SVlastimil Babka if (cc->direct_compaction) 125362997027SMel Gorman zone->compact_blockskip_flush = true; 125462997027SMel Gorman 1255*c8f7de0bSMichal Hocko if (cc->whole_zone) 1256748446bbSMel Gorman return COMPACT_COMPLETE; 1257*c8f7de0bSMichal Hocko else 1258*c8f7de0bSMichal Hocko return COMPACT_PARTIAL_SKIPPED; 1259bb13ffebSMel Gorman } 1260748446bbSMel Gorman 126121c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 126256de7263SMel Gorman return COMPACT_CONTINUE; 126356de7263SMel Gorman 12643957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 12653957c776SMichal Hocko watermark = low_wmark_pages(zone); 12663957c776SMichal Hocko 1267ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, 1268ebff3980SVlastimil Babka cc->alloc_flags)) 12693957c776SMichal Hocko return COMPACT_CONTINUE; 12703957c776SMichal Hocko 127156de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 127256de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 12738fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 12742149cdaeSJoonsoo Kim bool can_steal; 12758fb74b9fSMel Gorman 127656de7263SMel Gorman /* Job done if page is free of the right migratetype */ 12776d7ce559SDavid Rientjes if (!list_empty(&area->free_list[migratetype])) 127856de7263SMel Gorman return COMPACT_PARTIAL; 127956de7263SMel Gorman 12802149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 12812149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 12822149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 12832149cdaeSJoonsoo Kim !list_empty(&area->free_list[MIGRATE_CMA])) 12842149cdaeSJoonsoo Kim return COMPACT_PARTIAL; 12852149cdaeSJoonsoo Kim #endif 12862149cdaeSJoonsoo Kim /* 12872149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 12882149cdaeSJoonsoo Kim * other migratetype buddy lists. 12892149cdaeSJoonsoo Kim */ 12902149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 12912149cdaeSJoonsoo Kim true, &can_steal) != -1) 129256de7263SMel Gorman return COMPACT_PARTIAL; 129356de7263SMel Gorman } 129456de7263SMel Gorman 1295837d026dSJoonsoo Kim return COMPACT_NO_SUITABLE_PAGE; 1296837d026dSJoonsoo Kim } 1297837d026dSJoonsoo Kim 1298ea7ab982SMichal Hocko static enum compact_result compact_finished(struct zone *zone, 1299ea7ab982SMichal Hocko struct compact_control *cc, 1300837d026dSJoonsoo Kim const int migratetype) 1301837d026dSJoonsoo Kim { 1302837d026dSJoonsoo Kim int ret; 1303837d026dSJoonsoo Kim 1304837d026dSJoonsoo Kim ret = __compact_finished(zone, cc, migratetype); 1305837d026dSJoonsoo Kim trace_mm_compaction_finished(zone, cc->order, ret); 1306837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 1307837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 1308837d026dSJoonsoo Kim 1309837d026dSJoonsoo Kim return ret; 1310748446bbSMel Gorman } 1311748446bbSMel Gorman 13123e7d3449SMel Gorman /* 13133e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 13143e7d3449SMel Gorman * Returns 13153e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 13163e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 13173e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 13183e7d3449SMel Gorman */ 1319ea7ab982SMichal Hocko static enum compact_result __compaction_suitable(struct zone *zone, int order, 1320c603844bSMel Gorman unsigned int alloc_flags, 1321c603844bSMel Gorman int classzone_idx) 13223e7d3449SMel Gorman { 13233e7d3449SMel Gorman int fragindex; 13243e7d3449SMel Gorman unsigned long watermark; 13253e7d3449SMel Gorman 132621c527a3SYaowei Bai if (is_via_compact_memory(order)) 13273957c776SMichal Hocko return COMPACT_CONTINUE; 13283957c776SMichal Hocko 1329ebff3980SVlastimil Babka watermark = low_wmark_pages(zone); 1330ebff3980SVlastimil Babka /* 1331ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 1332ebff3980SVlastimil Babka * should be no need for compaction at all. 1333ebff3980SVlastimil Babka */ 1334ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1335ebff3980SVlastimil Babka alloc_flags)) 1336ebff3980SVlastimil Babka return COMPACT_PARTIAL; 1337ebff3980SVlastimil Babka 13383957c776SMichal Hocko /* 13393e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 13403e7d3449SMel Gorman * This is because during migration, copies of pages need to be 13413e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 13423e7d3449SMel Gorman */ 1343ebff3980SVlastimil Babka watermark += (2UL << order); 1344ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags)) 13453e7d3449SMel Gorman return COMPACT_SKIPPED; 13463e7d3449SMel Gorman 13473e7d3449SMel Gorman /* 13483e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 13493e7d3449SMel Gorman * low memory or external fragmentation 13503e7d3449SMel Gorman * 1351ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 1352ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 13533e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 13543e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 13553e7d3449SMel Gorman * 13563e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 13573e7d3449SMel Gorman */ 13583e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 13593e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 1360837d026dSJoonsoo Kim return COMPACT_NOT_SUITABLE_ZONE; 13613e7d3449SMel Gorman 13623e7d3449SMel Gorman return COMPACT_CONTINUE; 13633e7d3449SMel Gorman } 13643e7d3449SMel Gorman 1365ea7ab982SMichal Hocko enum compact_result compaction_suitable(struct zone *zone, int order, 1366c603844bSMel Gorman unsigned int alloc_flags, 1367c603844bSMel Gorman int classzone_idx) 1368837d026dSJoonsoo Kim { 1369ea7ab982SMichal Hocko enum compact_result ret; 1370837d026dSJoonsoo Kim 1371837d026dSJoonsoo Kim ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx); 1372837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 1373837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 1374837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 1375837d026dSJoonsoo Kim 1376837d026dSJoonsoo Kim return ret; 1377837d026dSJoonsoo Kim } 1378837d026dSJoonsoo Kim 1379ea7ab982SMichal Hocko static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc) 1380748446bbSMel Gorman { 1381ea7ab982SMichal Hocko enum compact_result ret; 1382c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1383108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 13846d7ce559SDavid Rientjes const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); 1385e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1386748446bbSMel Gorman 1387ebff3980SVlastimil Babka ret = compaction_suitable(zone, cc->order, cc->alloc_flags, 1388ebff3980SVlastimil Babka cc->classzone_idx); 13893e7d3449SMel Gorman /* Compaction is likely to fail */ 1390c46649deSMichal Hocko if (ret == COMPACT_PARTIAL || ret == COMPACT_SKIPPED) 13913e7d3449SMel Gorman return ret; 1392c46649deSMichal Hocko 1393c46649deSMichal Hocko /* huh, compaction_suitable is returning something unexpected */ 1394c46649deSMichal Hocko VM_BUG_ON(ret != COMPACT_CONTINUE); 13953e7d3449SMel Gorman 1396c89511abSMel Gorman /* 1397d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1398accf6242SVlastimil Babka * is about to be retried after being deferred. 1399d3132e4bSVlastimil Babka */ 1400accf6242SVlastimil Babka if (compaction_restarting(zone, cc->order)) 1401d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1402d3132e4bSVlastimil Babka 1403d3132e4bSVlastimil Babka /* 1404c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 1405c89511abSMel Gorman * information on where the scanners should start but check that it 1406c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 1407c89511abSMel Gorman */ 1408e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1409c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1410623446e4SJoonsoo Kim if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 141106b6640aSVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 1412c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1413c89511abSMel Gorman } 1414623446e4SJoonsoo Kim if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 1415c89511abSMel Gorman cc->migrate_pfn = start_pfn; 141635979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 141735979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1418c89511abSMel Gorman } 1419*c8f7de0bSMichal Hocko 1420*c8f7de0bSMichal Hocko if (cc->migrate_pfn == start_pfn) 1421*c8f7de0bSMichal Hocko cc->whole_zone = true; 1422*c8f7de0bSMichal Hocko 14231a16718cSJoonsoo Kim cc->last_migrated_pfn = 0; 1424748446bbSMel Gorman 142516c4a097SJoonsoo Kim trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 142616c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync); 14270eb927c0SMel Gorman 1428748446bbSMel Gorman migrate_prep_local(); 1429748446bbSMel Gorman 14306d7ce559SDavid Rientjes while ((ret = compact_finished(zone, cc, migratetype)) == 14316d7ce559SDavid Rientjes COMPACT_CONTINUE) { 14329d502c1cSMinchan Kim int err; 1433748446bbSMel Gorman 1434f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1435f9e35b3bSMel Gorman case ISOLATE_ABORT: 14362d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 14375733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1438e64c5237SShaohua Li cc->nr_migratepages = 0; 1439f9e35b3bSMel Gorman goto out; 1440f9e35b3bSMel Gorman case ISOLATE_NONE: 1441fdaf7f5cSVlastimil Babka /* 1442fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 1443fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 1444fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 1445fdaf7f5cSVlastimil Babka */ 1446fdaf7f5cSVlastimil Babka goto check_drain; 1447f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1448f9e35b3bSMel Gorman ; 1449f9e35b3bSMel Gorman } 1450748446bbSMel Gorman 1451d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1452e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 14537b2a2d4aSMel Gorman MR_COMPACTION); 1454748446bbSMel Gorman 1455f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1456f8c9301fSVlastimil Babka &cc->migratepages); 1457748446bbSMel Gorman 1458f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1459f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 14609d502c1cSMinchan Kim if (err) { 14615733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 14627ed695e0SVlastimil Babka /* 14637ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 14647ed695e0SVlastimil Babka * and we want compact_finished() to detect it 14657ed695e0SVlastimil Babka */ 1466f2849aa0SVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) { 14672d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 14684bf2bba3SDavid Rientjes goto out; 1469748446bbSMel Gorman } 1470fdd048e1SVlastimil Babka /* 1471fdd048e1SVlastimil Babka * We failed to migrate at least one page in the current 1472fdd048e1SVlastimil Babka * order-aligned block, so skip the rest of it. 1473fdd048e1SVlastimil Babka */ 1474fdd048e1SVlastimil Babka if (cc->direct_compaction && 1475fdd048e1SVlastimil Babka (cc->mode == MIGRATE_ASYNC)) { 1476fdd048e1SVlastimil Babka cc->migrate_pfn = block_end_pfn( 1477fdd048e1SVlastimil Babka cc->migrate_pfn - 1, cc->order); 1478fdd048e1SVlastimil Babka /* Draining pcplists is useless in this case */ 1479fdd048e1SVlastimil Babka cc->last_migrated_pfn = 0; 1480fdd048e1SVlastimil Babka 1481fdd048e1SVlastimil Babka } 14824bf2bba3SDavid Rientjes } 1483fdaf7f5cSVlastimil Babka 1484fdaf7f5cSVlastimil Babka check_drain: 1485fdaf7f5cSVlastimil Babka /* 1486fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 1487fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 1488fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 1489fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 1490fdaf7f5cSVlastimil Babka * would succeed. 1491fdaf7f5cSVlastimil Babka */ 14921a16718cSJoonsoo Kim if (cc->order > 0 && cc->last_migrated_pfn) { 1493fdaf7f5cSVlastimil Babka int cpu; 1494fdaf7f5cSVlastimil Babka unsigned long current_block_start = 149506b6640aSVlastimil Babka block_start_pfn(cc->migrate_pfn, cc->order); 1496fdaf7f5cSVlastimil Babka 14971a16718cSJoonsoo Kim if (cc->last_migrated_pfn < current_block_start) { 1498fdaf7f5cSVlastimil Babka cpu = get_cpu(); 1499fdaf7f5cSVlastimil Babka lru_add_drain_cpu(cpu); 1500fdaf7f5cSVlastimil Babka drain_local_pages(zone); 1501fdaf7f5cSVlastimil Babka put_cpu(); 1502fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 15031a16718cSJoonsoo Kim cc->last_migrated_pfn = 0; 1504fdaf7f5cSVlastimil Babka } 1505fdaf7f5cSVlastimil Babka } 1506fdaf7f5cSVlastimil Babka 1507748446bbSMel Gorman } 1508748446bbSMel Gorman 1509f9e35b3bSMel Gorman out: 15106bace090SVlastimil Babka /* 15116bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 15126bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 15136bace090SVlastimil Babka */ 15146bace090SVlastimil Babka if (cc->nr_freepages > 0) { 15156bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 15166bace090SVlastimil Babka 15176bace090SVlastimil Babka cc->nr_freepages = 0; 15186bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 15196bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 152006b6640aSVlastimil Babka free_pfn = pageblock_start_pfn(free_pfn); 15216bace090SVlastimil Babka /* 15226bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 15236bace090SVlastimil Babka * already reset to zone end in compact_finished() 15246bace090SVlastimil Babka */ 15256bace090SVlastimil Babka if (free_pfn > zone->compact_cached_free_pfn) 15266bace090SVlastimil Babka zone->compact_cached_free_pfn = free_pfn; 15276bace090SVlastimil Babka } 1528748446bbSMel Gorman 152916c4a097SJoonsoo Kim trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 153016c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync, ret); 15310eb927c0SMel Gorman 15322d1e1041SVlastimil Babka if (ret == COMPACT_CONTENDED) 15332d1e1041SVlastimil Babka ret = COMPACT_PARTIAL; 15342d1e1041SVlastimil Babka 1535748446bbSMel Gorman return ret; 1536748446bbSMel Gorman } 153776ab0f53SMel Gorman 1538ea7ab982SMichal Hocko static enum compact_result compact_zone_order(struct zone *zone, int order, 1539ebff3980SVlastimil Babka gfp_t gfp_mask, enum migrate_mode mode, int *contended, 1540c603844bSMel Gorman unsigned int alloc_flags, int classzone_idx) 154156de7263SMel Gorman { 1542ea7ab982SMichal Hocko enum compact_result ret; 154356de7263SMel Gorman struct compact_control cc = { 154456de7263SMel Gorman .nr_freepages = 0, 154556de7263SMel Gorman .nr_migratepages = 0, 154656de7263SMel Gorman .order = order, 15476d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 154856de7263SMel Gorman .zone = zone, 1549e0b9daebSDavid Rientjes .mode = mode, 1550ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 1551ebff3980SVlastimil Babka .classzone_idx = classzone_idx, 1552accf6242SVlastimil Babka .direct_compaction = true, 155356de7263SMel Gorman }; 155456de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 155556de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 155656de7263SMel Gorman 1557e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1558e64c5237SShaohua Li 1559e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1560e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1561e64c5237SShaohua Li 1562e64c5237SShaohua Li *contended = cc.contended; 1563e64c5237SShaohua Li return ret; 156456de7263SMel Gorman } 156556de7263SMel Gorman 15665e771905SMel Gorman int sysctl_extfrag_threshold = 500; 15675e771905SMel Gorman 156856de7263SMel Gorman /** 156956de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 157056de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 15711a6d53a1SVlastimil Babka * @order: The order of the current allocation 15721a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 15731a6d53a1SVlastimil Babka * @ac: The context of current allocation 1574e0b9daebSDavid Rientjes * @mode: The migration mode for async, sync light, or sync migration 15751f9efdefSVlastimil Babka * @contended: Return value that determines if compaction was aborted due to 15761f9efdefSVlastimil Babka * need_resched() or lock contention 157756de7263SMel Gorman * 157856de7263SMel Gorman * This is the main entry point for direct page compaction. 157956de7263SMel Gorman */ 1580ea7ab982SMichal Hocko enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 1581c603844bSMel Gorman unsigned int alloc_flags, const struct alloc_context *ac, 15821a6d53a1SVlastimil Babka enum migrate_mode mode, int *contended) 158356de7263SMel Gorman { 158456de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 158556de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 158656de7263SMel Gorman struct zoneref *z; 158756de7263SMel Gorman struct zone *zone; 15881d4746d3SMichal Hocko enum compact_result rc = COMPACT_SKIPPED; 15891f9efdefSVlastimil Babka int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ 15901f9efdefSVlastimil Babka 15911f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_NONE; 159256de7263SMel Gorman 15934ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1594c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 159553853e2dSVlastimil Babka return COMPACT_SKIPPED; 159656de7263SMel Gorman 1597837d026dSJoonsoo Kim trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); 1598837d026dSJoonsoo Kim 159956de7263SMel Gorman /* Compact each zone in the list */ 16001a6d53a1SVlastimil Babka for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 16011a6d53a1SVlastimil Babka ac->nodemask) { 1602ea7ab982SMichal Hocko enum compact_result status; 16031f9efdefSVlastimil Babka int zone_contended; 160456de7263SMel Gorman 16051d4746d3SMichal Hocko if (compaction_deferred(zone, order)) { 16061d4746d3SMichal Hocko rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 160753853e2dSVlastimil Babka continue; 16081d4746d3SMichal Hocko } 160953853e2dSVlastimil Babka 1610e0b9daebSDavid Rientjes status = compact_zone_order(zone, order, gfp_mask, mode, 16111a6d53a1SVlastimil Babka &zone_contended, alloc_flags, 161293ea9964SMel Gorman ac_classzone_idx(ac)); 161356de7263SMel Gorman rc = max(status, rc); 16141f9efdefSVlastimil Babka /* 16151f9efdefSVlastimil Babka * It takes at least one zone that wasn't lock contended 16161f9efdefSVlastimil Babka * to clear all_zones_contended. 16171f9efdefSVlastimil Babka */ 16181f9efdefSVlastimil Babka all_zones_contended &= zone_contended; 161956de7263SMel Gorman 16203e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1621ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 162293ea9964SMel Gorman ac_classzone_idx(ac), alloc_flags)) { 162353853e2dSVlastimil Babka /* 162453853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 162553853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 162653853e2dSVlastimil Babka * will repeat this with true if allocation indeed 162753853e2dSVlastimil Babka * succeeds in this zone. 162853853e2dSVlastimil Babka */ 162953853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 16301f9efdefSVlastimil Babka /* 16311f9efdefSVlastimil Babka * It is possible that async compaction aborted due to 16321f9efdefSVlastimil Babka * need_resched() and the watermarks were ok thanks to 16331f9efdefSVlastimil Babka * somebody else freeing memory. The allocation can 16341f9efdefSVlastimil Babka * however still fail so we better signal the 16351f9efdefSVlastimil Babka * need_resched() contention anyway (this will not 16361f9efdefSVlastimil Babka * prevent the allocation attempt). 16371f9efdefSVlastimil Babka */ 16381f9efdefSVlastimil Babka if (zone_contended == COMPACT_CONTENDED_SCHED) 16391f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 16401f9efdefSVlastimil Babka 16411f9efdefSVlastimil Babka goto break_loop; 16421f9efdefSVlastimil Babka } 16431f9efdefSVlastimil Babka 1644*c8f7de0bSMichal Hocko if (mode != MIGRATE_ASYNC && (status == COMPACT_COMPLETE || 1645*c8f7de0bSMichal Hocko status == COMPACT_PARTIAL_SKIPPED)) { 164653853e2dSVlastimil Babka /* 164753853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 164853853e2dSVlastimil Babka * so we defer compaction there. If it ends up 164953853e2dSVlastimil Babka * succeeding after all, it will be reset. 165053853e2dSVlastimil Babka */ 165153853e2dSVlastimil Babka defer_compaction(zone, order); 165253853e2dSVlastimil Babka } 16531f9efdefSVlastimil Babka 16541f9efdefSVlastimil Babka /* 16551f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 16561f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 16571f9efdefSVlastimil Babka * case do not try further zones and signal need_resched() 16581f9efdefSVlastimil Babka * contention. 16591f9efdefSVlastimil Babka */ 16601f9efdefSVlastimil Babka if ((zone_contended == COMPACT_CONTENDED_SCHED) 16611f9efdefSVlastimil Babka || fatal_signal_pending(current)) { 16621f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 16631f9efdefSVlastimil Babka goto break_loop; 166456de7263SMel Gorman } 166556de7263SMel Gorman 16661f9efdefSVlastimil Babka continue; 16671f9efdefSVlastimil Babka break_loop: 16681f9efdefSVlastimil Babka /* 16691f9efdefSVlastimil Babka * We might not have tried all the zones, so be conservative 16701f9efdefSVlastimil Babka * and assume they are not all lock contended. 16711f9efdefSVlastimil Babka */ 16721f9efdefSVlastimil Babka all_zones_contended = 0; 16731f9efdefSVlastimil Babka break; 16741f9efdefSVlastimil Babka } 16751f9efdefSVlastimil Babka 16761f9efdefSVlastimil Babka /* 16771f9efdefSVlastimil Babka * If at least one zone wasn't deferred or skipped, we report if all 16781f9efdefSVlastimil Babka * zones that were tried were lock contended. 16791f9efdefSVlastimil Babka */ 16801d4746d3SMichal Hocko if (rc > COMPACT_INACTIVE && all_zones_contended) 16811f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_LOCK; 16821f9efdefSVlastimil Babka 168356de7263SMel Gorman return rc; 168456de7263SMel Gorman } 168556de7263SMel Gorman 168656de7263SMel Gorman 168776ab0f53SMel Gorman /* Compact all zones within a node */ 16887103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 168976ab0f53SMel Gorman { 169076ab0f53SMel Gorman int zoneid; 169176ab0f53SMel Gorman struct zone *zone; 169276ab0f53SMel Gorman 169376ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 169476ab0f53SMel Gorman 169576ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 169676ab0f53SMel Gorman if (!populated_zone(zone)) 169776ab0f53SMel Gorman continue; 169876ab0f53SMel Gorman 16997be62de9SRik van Riel cc->nr_freepages = 0; 17007be62de9SRik van Riel cc->nr_migratepages = 0; 17017be62de9SRik van Riel cc->zone = zone; 17027be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 17037be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 170476ab0f53SMel Gorman 1705195b0c60SGioh Kim /* 1706195b0c60SGioh Kim * When called via /proc/sys/vm/compact_memory 1707195b0c60SGioh Kim * this makes sure we compact the whole zone regardless of 1708195b0c60SGioh Kim * cached scanner positions. 1709195b0c60SGioh Kim */ 171021c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 1711195b0c60SGioh Kim __reset_isolation_suitable(zone); 1712195b0c60SGioh Kim 171321c527a3SYaowei Bai if (is_via_compact_memory(cc->order) || 171421c527a3SYaowei Bai !compaction_deferred(zone, cc->order)) 17157be62de9SRik van Riel compact_zone(zone, cc); 171676ab0f53SMel Gorman 171775469345SJoonsoo Kim VM_BUG_ON(!list_empty(&cc->freepages)); 171875469345SJoonsoo Kim VM_BUG_ON(!list_empty(&cc->migratepages)); 171975469345SJoonsoo Kim 172075469345SJoonsoo Kim if (is_via_compact_memory(cc->order)) 172175469345SJoonsoo Kim continue; 172275469345SJoonsoo Kim 1723de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1724de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1725de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1726aff62249SRik van Riel } 172776ab0f53SMel Gorman } 172876ab0f53SMel Gorman 17297103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 17307be62de9SRik van Riel { 17317be62de9SRik van Riel struct compact_control cc = { 17327be62de9SRik van Riel .order = order, 1733e0b9daebSDavid Rientjes .mode = MIGRATE_ASYNC, 17347be62de9SRik van Riel }; 17357be62de9SRik van Riel 17363a7200afSMel Gorman if (!order) 17373a7200afSMel Gorman return; 17383a7200afSMel Gorman 17397103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 17407be62de9SRik van Riel } 17417be62de9SRik van Riel 17427103f16dSAndrew Morton static void compact_node(int nid) 17437be62de9SRik van Riel { 17447be62de9SRik van Riel struct compact_control cc = { 17457be62de9SRik van Riel .order = -1, 1746e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 174791ca9186SDavid Rientjes .ignore_skip_hint = true, 17487be62de9SRik van Riel }; 17497be62de9SRik van Riel 17507103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 17517be62de9SRik van Riel } 17527be62de9SRik van Riel 175376ab0f53SMel Gorman /* Compact all nodes in the system */ 17547964c06dSJason Liu static void compact_nodes(void) 175576ab0f53SMel Gorman { 175676ab0f53SMel Gorman int nid; 175776ab0f53SMel Gorman 17588575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 17598575ec29SHugh Dickins lru_add_drain_all(); 17608575ec29SHugh Dickins 176176ab0f53SMel Gorman for_each_online_node(nid) 176276ab0f53SMel Gorman compact_node(nid); 176376ab0f53SMel Gorman } 176476ab0f53SMel Gorman 176576ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 176676ab0f53SMel Gorman int sysctl_compact_memory; 176776ab0f53SMel Gorman 1768fec4eb2cSYaowei Bai /* 1769fec4eb2cSYaowei Bai * This is the entry point for compacting all nodes via 1770fec4eb2cSYaowei Bai * /proc/sys/vm/compact_memory 1771fec4eb2cSYaowei Bai */ 177276ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 177376ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 177476ab0f53SMel Gorman { 177576ab0f53SMel Gorman if (write) 17767964c06dSJason Liu compact_nodes(); 177776ab0f53SMel Gorman 177876ab0f53SMel Gorman return 0; 177976ab0f53SMel Gorman } 1780ed4a6d7fSMel Gorman 17815e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 17825e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 17835e771905SMel Gorman { 17845e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 17855e771905SMel Gorman 17865e771905SMel Gorman return 0; 17875e771905SMel Gorman } 17885e771905SMel Gorman 1789ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 179074e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 179110fbcf4cSKay Sievers struct device_attribute *attr, 1792ed4a6d7fSMel Gorman const char *buf, size_t count) 1793ed4a6d7fSMel Gorman { 17948575ec29SHugh Dickins int nid = dev->id; 17958575ec29SHugh Dickins 17968575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 17978575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 17988575ec29SHugh Dickins lru_add_drain_all(); 17998575ec29SHugh Dickins 18008575ec29SHugh Dickins compact_node(nid); 18018575ec29SHugh Dickins } 1802ed4a6d7fSMel Gorman 1803ed4a6d7fSMel Gorman return count; 1804ed4a6d7fSMel Gorman } 180510fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1806ed4a6d7fSMel Gorman 1807ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1808ed4a6d7fSMel Gorman { 180910fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1810ed4a6d7fSMel Gorman } 1811ed4a6d7fSMel Gorman 1812ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1813ed4a6d7fSMel Gorman { 181410fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1815ed4a6d7fSMel Gorman } 1816ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1817ff9543fdSMichal Nazarewicz 1818698b1b30SVlastimil Babka static inline bool kcompactd_work_requested(pg_data_t *pgdat) 1819698b1b30SVlastimil Babka { 1820172400c6SVlastimil Babka return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); 1821698b1b30SVlastimil Babka } 1822698b1b30SVlastimil Babka 1823698b1b30SVlastimil Babka static bool kcompactd_node_suitable(pg_data_t *pgdat) 1824698b1b30SVlastimil Babka { 1825698b1b30SVlastimil Babka int zoneid; 1826698b1b30SVlastimil Babka struct zone *zone; 1827698b1b30SVlastimil Babka enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx; 1828698b1b30SVlastimil Babka 1829698b1b30SVlastimil Babka for (zoneid = 0; zoneid < classzone_idx; zoneid++) { 1830698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 1831698b1b30SVlastimil Babka 1832698b1b30SVlastimil Babka if (!populated_zone(zone)) 1833698b1b30SVlastimil Babka continue; 1834698b1b30SVlastimil Babka 1835698b1b30SVlastimil Babka if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 1836698b1b30SVlastimil Babka classzone_idx) == COMPACT_CONTINUE) 1837698b1b30SVlastimil Babka return true; 1838698b1b30SVlastimil Babka } 1839698b1b30SVlastimil Babka 1840698b1b30SVlastimil Babka return false; 1841698b1b30SVlastimil Babka } 1842698b1b30SVlastimil Babka 1843698b1b30SVlastimil Babka static void kcompactd_do_work(pg_data_t *pgdat) 1844698b1b30SVlastimil Babka { 1845698b1b30SVlastimil Babka /* 1846698b1b30SVlastimil Babka * With no special task, compact all zones so that a page of requested 1847698b1b30SVlastimil Babka * order is allocatable. 1848698b1b30SVlastimil Babka */ 1849698b1b30SVlastimil Babka int zoneid; 1850698b1b30SVlastimil Babka struct zone *zone; 1851698b1b30SVlastimil Babka struct compact_control cc = { 1852698b1b30SVlastimil Babka .order = pgdat->kcompactd_max_order, 1853698b1b30SVlastimil Babka .classzone_idx = pgdat->kcompactd_classzone_idx, 1854698b1b30SVlastimil Babka .mode = MIGRATE_SYNC_LIGHT, 1855698b1b30SVlastimil Babka .ignore_skip_hint = true, 1856698b1b30SVlastimil Babka 1857698b1b30SVlastimil Babka }; 1858698b1b30SVlastimil Babka bool success = false; 1859698b1b30SVlastimil Babka 1860698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 1861698b1b30SVlastimil Babka cc.classzone_idx); 1862698b1b30SVlastimil Babka count_vm_event(KCOMPACTD_WAKE); 1863698b1b30SVlastimil Babka 1864698b1b30SVlastimil Babka for (zoneid = 0; zoneid < cc.classzone_idx; zoneid++) { 1865698b1b30SVlastimil Babka int status; 1866698b1b30SVlastimil Babka 1867698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 1868698b1b30SVlastimil Babka if (!populated_zone(zone)) 1869698b1b30SVlastimil Babka continue; 1870698b1b30SVlastimil Babka 1871698b1b30SVlastimil Babka if (compaction_deferred(zone, cc.order)) 1872698b1b30SVlastimil Babka continue; 1873698b1b30SVlastimil Babka 1874698b1b30SVlastimil Babka if (compaction_suitable(zone, cc.order, 0, zoneid) != 1875698b1b30SVlastimil Babka COMPACT_CONTINUE) 1876698b1b30SVlastimil Babka continue; 1877698b1b30SVlastimil Babka 1878698b1b30SVlastimil Babka cc.nr_freepages = 0; 1879698b1b30SVlastimil Babka cc.nr_migratepages = 0; 1880698b1b30SVlastimil Babka cc.zone = zone; 1881698b1b30SVlastimil Babka INIT_LIST_HEAD(&cc.freepages); 1882698b1b30SVlastimil Babka INIT_LIST_HEAD(&cc.migratepages); 1883698b1b30SVlastimil Babka 1884172400c6SVlastimil Babka if (kthread_should_stop()) 1885172400c6SVlastimil Babka return; 1886698b1b30SVlastimil Babka status = compact_zone(zone, &cc); 1887698b1b30SVlastimil Babka 1888698b1b30SVlastimil Babka if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone), 1889698b1b30SVlastimil Babka cc.classzone_idx, 0)) { 1890698b1b30SVlastimil Babka success = true; 1891698b1b30SVlastimil Babka compaction_defer_reset(zone, cc.order, false); 1892*c8f7de0bSMichal Hocko } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 1893698b1b30SVlastimil Babka /* 1894698b1b30SVlastimil Babka * We use sync migration mode here, so we defer like 1895698b1b30SVlastimil Babka * sync direct compaction does. 1896698b1b30SVlastimil Babka */ 1897698b1b30SVlastimil Babka defer_compaction(zone, cc.order); 1898698b1b30SVlastimil Babka } 1899698b1b30SVlastimil Babka 1900698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 1901698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 1902698b1b30SVlastimil Babka } 1903698b1b30SVlastimil Babka 1904698b1b30SVlastimil Babka /* 1905698b1b30SVlastimil Babka * Regardless of success, we are done until woken up next. But remember 1906698b1b30SVlastimil Babka * the requested order/classzone_idx in case it was higher/tighter than 1907698b1b30SVlastimil Babka * our current ones 1908698b1b30SVlastimil Babka */ 1909698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order <= cc.order) 1910698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 1911698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx) 1912698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 1913698b1b30SVlastimil Babka } 1914698b1b30SVlastimil Babka 1915698b1b30SVlastimil Babka void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) 1916698b1b30SVlastimil Babka { 1917698b1b30SVlastimil Babka if (!order) 1918698b1b30SVlastimil Babka return; 1919698b1b30SVlastimil Babka 1920698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order < order) 1921698b1b30SVlastimil Babka pgdat->kcompactd_max_order = order; 1922698b1b30SVlastimil Babka 1923698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx > classzone_idx) 1924698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = classzone_idx; 1925698b1b30SVlastimil Babka 1926698b1b30SVlastimil Babka if (!waitqueue_active(&pgdat->kcompactd_wait)) 1927698b1b30SVlastimil Babka return; 1928698b1b30SVlastimil Babka 1929698b1b30SVlastimil Babka if (!kcompactd_node_suitable(pgdat)) 1930698b1b30SVlastimil Babka return; 1931698b1b30SVlastimil Babka 1932698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 1933698b1b30SVlastimil Babka classzone_idx); 1934698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait); 1935698b1b30SVlastimil Babka } 1936698b1b30SVlastimil Babka 1937698b1b30SVlastimil Babka /* 1938698b1b30SVlastimil Babka * The background compaction daemon, started as a kernel thread 1939698b1b30SVlastimil Babka * from the init process. 1940698b1b30SVlastimil Babka */ 1941698b1b30SVlastimil Babka static int kcompactd(void *p) 1942698b1b30SVlastimil Babka { 1943698b1b30SVlastimil Babka pg_data_t *pgdat = (pg_data_t*)p; 1944698b1b30SVlastimil Babka struct task_struct *tsk = current; 1945698b1b30SVlastimil Babka 1946698b1b30SVlastimil Babka const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1947698b1b30SVlastimil Babka 1948698b1b30SVlastimil Babka if (!cpumask_empty(cpumask)) 1949698b1b30SVlastimil Babka set_cpus_allowed_ptr(tsk, cpumask); 1950698b1b30SVlastimil Babka 1951698b1b30SVlastimil Babka set_freezable(); 1952698b1b30SVlastimil Babka 1953698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 1954698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 1955698b1b30SVlastimil Babka 1956698b1b30SVlastimil Babka while (!kthread_should_stop()) { 1957698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 1958698b1b30SVlastimil Babka wait_event_freezable(pgdat->kcompactd_wait, 1959698b1b30SVlastimil Babka kcompactd_work_requested(pgdat)); 1960698b1b30SVlastimil Babka 1961698b1b30SVlastimil Babka kcompactd_do_work(pgdat); 1962698b1b30SVlastimil Babka } 1963698b1b30SVlastimil Babka 1964698b1b30SVlastimil Babka return 0; 1965698b1b30SVlastimil Babka } 1966698b1b30SVlastimil Babka 1967698b1b30SVlastimil Babka /* 1968698b1b30SVlastimil Babka * This kcompactd start function will be called by init and node-hot-add. 1969698b1b30SVlastimil Babka * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 1970698b1b30SVlastimil Babka */ 1971698b1b30SVlastimil Babka int kcompactd_run(int nid) 1972698b1b30SVlastimil Babka { 1973698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 1974698b1b30SVlastimil Babka int ret = 0; 1975698b1b30SVlastimil Babka 1976698b1b30SVlastimil Babka if (pgdat->kcompactd) 1977698b1b30SVlastimil Babka return 0; 1978698b1b30SVlastimil Babka 1979698b1b30SVlastimil Babka pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 1980698b1b30SVlastimil Babka if (IS_ERR(pgdat->kcompactd)) { 1981698b1b30SVlastimil Babka pr_err("Failed to start kcompactd on node %d\n", nid); 1982698b1b30SVlastimil Babka ret = PTR_ERR(pgdat->kcompactd); 1983698b1b30SVlastimil Babka pgdat->kcompactd = NULL; 1984698b1b30SVlastimil Babka } 1985698b1b30SVlastimil Babka return ret; 1986698b1b30SVlastimil Babka } 1987698b1b30SVlastimil Babka 1988698b1b30SVlastimil Babka /* 1989698b1b30SVlastimil Babka * Called by memory hotplug when all memory in a node is offlined. Caller must 1990698b1b30SVlastimil Babka * hold mem_hotplug_begin/end(). 1991698b1b30SVlastimil Babka */ 1992698b1b30SVlastimil Babka void kcompactd_stop(int nid) 1993698b1b30SVlastimil Babka { 1994698b1b30SVlastimil Babka struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 1995698b1b30SVlastimil Babka 1996698b1b30SVlastimil Babka if (kcompactd) { 1997698b1b30SVlastimil Babka kthread_stop(kcompactd); 1998698b1b30SVlastimil Babka NODE_DATA(nid)->kcompactd = NULL; 1999698b1b30SVlastimil Babka } 2000698b1b30SVlastimil Babka } 2001698b1b30SVlastimil Babka 2002698b1b30SVlastimil Babka /* 2003698b1b30SVlastimil Babka * It's optimal to keep kcompactd on the same CPUs as their memory, but 2004698b1b30SVlastimil Babka * not required for correctness. So if the last cpu in a node goes 2005698b1b30SVlastimil Babka * away, we get changed to run anywhere: as the first one comes back, 2006698b1b30SVlastimil Babka * restore their cpu bindings. 2007698b1b30SVlastimil Babka */ 2008698b1b30SVlastimil Babka static int cpu_callback(struct notifier_block *nfb, unsigned long action, 2009698b1b30SVlastimil Babka void *hcpu) 2010698b1b30SVlastimil Babka { 2011698b1b30SVlastimil Babka int nid; 2012698b1b30SVlastimil Babka 2013698b1b30SVlastimil Babka if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 2014698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) { 2015698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2016698b1b30SVlastimil Babka const struct cpumask *mask; 2017698b1b30SVlastimil Babka 2018698b1b30SVlastimil Babka mask = cpumask_of_node(pgdat->node_id); 2019698b1b30SVlastimil Babka 2020698b1b30SVlastimil Babka if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2021698b1b30SVlastimil Babka /* One of our CPUs online: restore mask */ 2022698b1b30SVlastimil Babka set_cpus_allowed_ptr(pgdat->kcompactd, mask); 2023698b1b30SVlastimil Babka } 2024698b1b30SVlastimil Babka } 2025698b1b30SVlastimil Babka return NOTIFY_OK; 2026698b1b30SVlastimil Babka } 2027698b1b30SVlastimil Babka 2028698b1b30SVlastimil Babka static int __init kcompactd_init(void) 2029698b1b30SVlastimil Babka { 2030698b1b30SVlastimil Babka int nid; 2031698b1b30SVlastimil Babka 2032698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) 2033698b1b30SVlastimil Babka kcompactd_run(nid); 2034698b1b30SVlastimil Babka hotcpu_notifier(cpu_callback, 0); 2035698b1b30SVlastimil Babka return 0; 2036698b1b30SVlastimil Babka } 2037698b1b30SVlastimil Babka subsys_initcall(kcompactd_init) 2038698b1b30SVlastimil Babka 2039ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 2040