1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2748446bbSMel Gorman /* 3748446bbSMel Gorman * linux/mm/compaction.c 4748446bbSMel Gorman * 5748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 6748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 7748446bbSMel Gorman * lifting 8748446bbSMel Gorman * 9748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10748446bbSMel Gorman */ 11698b1b30SVlastimil Babka #include <linux/cpu.h> 12748446bbSMel Gorman #include <linux/swap.h> 13748446bbSMel Gorman #include <linux/migrate.h> 14748446bbSMel Gorman #include <linux/compaction.h> 15748446bbSMel Gorman #include <linux/mm_inline.h> 16174cd4b1SIngo Molnar #include <linux/sched/signal.h> 17748446bbSMel Gorman #include <linux/backing-dev.h> 1876ab0f53SMel Gorman #include <linux/sysctl.h> 19ed4a6d7fSMel Gorman #include <linux/sysfs.h> 20194159fbSMinchan Kim #include <linux/page-isolation.h> 21b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 22698b1b30SVlastimil Babka #include <linux/kthread.h> 23698b1b30SVlastimil Babka #include <linux/freezer.h> 2483358eceSJoonsoo Kim #include <linux/page_owner.h> 25eb414681SJohannes Weiner #include <linux/psi.h> 26748446bbSMel Gorman #include "internal.h" 27748446bbSMel Gorman 28010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 29010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 30010fc29aSMinchan Kim { 31010fc29aSMinchan Kim count_vm_event(item); 32010fc29aSMinchan Kim } 33010fc29aSMinchan Kim 34010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 35010fc29aSMinchan Kim { 36010fc29aSMinchan Kim count_vm_events(item, delta); 37010fc29aSMinchan Kim } 38010fc29aSMinchan Kim #else 39010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 40010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 41010fc29aSMinchan Kim #endif 42010fc29aSMinchan Kim 43ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 44ff9543fdSMichal Nazarewicz 45b7aba698SMel Gorman #define CREATE_TRACE_POINTS 46b7aba698SMel Gorman #include <trace/events/compaction.h> 47b7aba698SMel Gorman 4806b6640aSVlastimil Babka #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 4906b6640aSVlastimil Babka #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 5006b6640aSVlastimil Babka #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 5106b6640aSVlastimil Babka #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 5206b6640aSVlastimil Babka 53facdaa91SNitin Gupta /* 54facdaa91SNitin Gupta * Fragmentation score check interval for proactive compaction purposes. 55facdaa91SNitin Gupta */ 56d34c0a75SNitin Gupta static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; 57facdaa91SNitin Gupta 58facdaa91SNitin Gupta /* 59facdaa91SNitin Gupta * Page order with-respect-to which proactive compaction 60facdaa91SNitin Gupta * calculates external fragmentation, which is used as 61facdaa91SNitin Gupta * the "fragmentation score" of a node/zone. 62facdaa91SNitin Gupta */ 63facdaa91SNitin Gupta #if defined CONFIG_TRANSPARENT_HUGEPAGE 64facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER 6525788738SNitin Gupta #elif defined CONFIG_HUGETLBFS 66facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER 67facdaa91SNitin Gupta #else 68facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) 69facdaa91SNitin Gupta #endif 70facdaa91SNitin Gupta 71748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 72748446bbSMel Gorman { 73748446bbSMel Gorman struct page *page, *next; 746bace090SVlastimil Babka unsigned long high_pfn = 0; 75748446bbSMel Gorman 76748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 776bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 78748446bbSMel Gorman list_del(&page->lru); 79748446bbSMel Gorman __free_page(page); 806bace090SVlastimil Babka if (pfn > high_pfn) 816bace090SVlastimil Babka high_pfn = pfn; 82748446bbSMel Gorman } 83748446bbSMel Gorman 846bace090SVlastimil Babka return high_pfn; 85748446bbSMel Gorman } 86748446bbSMel Gorman 874469ab98SMel Gorman static void split_map_pages(struct list_head *list) 88ff9543fdSMichal Nazarewicz { 8966c64223SJoonsoo Kim unsigned int i, order, nr_pages; 9066c64223SJoonsoo Kim struct page *page, *next; 9166c64223SJoonsoo Kim LIST_HEAD(tmp_list); 92ff9543fdSMichal Nazarewicz 9366c64223SJoonsoo Kim list_for_each_entry_safe(page, next, list, lru) { 9466c64223SJoonsoo Kim list_del(&page->lru); 9566c64223SJoonsoo Kim 9666c64223SJoonsoo Kim order = page_private(page); 9766c64223SJoonsoo Kim nr_pages = 1 << order; 9866c64223SJoonsoo Kim 9946f24fd8SJoonsoo Kim post_alloc_hook(page, order, __GFP_MOVABLE); 10066c64223SJoonsoo Kim if (order) 10166c64223SJoonsoo Kim split_page(page, order); 10266c64223SJoonsoo Kim 10366c64223SJoonsoo Kim for (i = 0; i < nr_pages; i++) { 10466c64223SJoonsoo Kim list_add(&page->lru, &tmp_list); 10566c64223SJoonsoo Kim page++; 106ff9543fdSMichal Nazarewicz } 107ff9543fdSMichal Nazarewicz } 108ff9543fdSMichal Nazarewicz 10966c64223SJoonsoo Kim list_splice(&tmp_list, list); 11066c64223SJoonsoo Kim } 11166c64223SJoonsoo Kim 112bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 11324e2716fSJoonsoo Kim 114bda807d4SMinchan Kim int PageMovable(struct page *page) 115bda807d4SMinchan Kim { 116bda807d4SMinchan Kim struct address_space *mapping; 117bda807d4SMinchan Kim 118bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 119bda807d4SMinchan Kim if (!__PageMovable(page)) 120bda807d4SMinchan Kim return 0; 121bda807d4SMinchan Kim 122bda807d4SMinchan Kim mapping = page_mapping(page); 123bda807d4SMinchan Kim if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) 124bda807d4SMinchan Kim return 1; 125bda807d4SMinchan Kim 126bda807d4SMinchan Kim return 0; 127bda807d4SMinchan Kim } 128bda807d4SMinchan Kim EXPORT_SYMBOL(PageMovable); 129bda807d4SMinchan Kim 130bda807d4SMinchan Kim void __SetPageMovable(struct page *page, struct address_space *mapping) 131bda807d4SMinchan Kim { 132bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 133bda807d4SMinchan Kim VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); 134bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); 135bda807d4SMinchan Kim } 136bda807d4SMinchan Kim EXPORT_SYMBOL(__SetPageMovable); 137bda807d4SMinchan Kim 138bda807d4SMinchan Kim void __ClearPageMovable(struct page *page) 139bda807d4SMinchan Kim { 140bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 141bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page); 142bda807d4SMinchan Kim /* 143bda807d4SMinchan Kim * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE 144bda807d4SMinchan Kim * flag so that VM can catch up released page by driver after isolation. 145bda807d4SMinchan Kim * With it, VM migration doesn't try to put it back. 146bda807d4SMinchan Kim */ 147bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)page->mapping & 148bda807d4SMinchan Kim PAGE_MAPPING_MOVABLE); 149bda807d4SMinchan Kim } 150bda807d4SMinchan Kim EXPORT_SYMBOL(__ClearPageMovable); 151bda807d4SMinchan Kim 15224e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 15324e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 15424e2716fSJoonsoo Kim 15524e2716fSJoonsoo Kim /* 15624e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 157860b3272SAlex Shi * allocation success. 1 << compact_defer_shift, compactions are skipped up 15824e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 15924e2716fSJoonsoo Kim */ 1602271b016SHui Su static void defer_compaction(struct zone *zone, int order) 16124e2716fSJoonsoo Kim { 16224e2716fSJoonsoo Kim zone->compact_considered = 0; 16324e2716fSJoonsoo Kim zone->compact_defer_shift++; 16424e2716fSJoonsoo Kim 16524e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 16624e2716fSJoonsoo Kim zone->compact_order_failed = order; 16724e2716fSJoonsoo Kim 16824e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 16924e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 17024e2716fSJoonsoo Kim 17124e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 17224e2716fSJoonsoo Kim } 17324e2716fSJoonsoo Kim 17424e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 1752271b016SHui Su static bool compaction_deferred(struct zone *zone, int order) 17624e2716fSJoonsoo Kim { 17724e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 17824e2716fSJoonsoo Kim 17924e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 18024e2716fSJoonsoo Kim return false; 18124e2716fSJoonsoo Kim 18224e2716fSJoonsoo Kim /* Avoid possible overflow */ 18362b35fe0SMateusz Nosek if (++zone->compact_considered >= defer_limit) { 18424e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 18524e2716fSJoonsoo Kim return false; 18662b35fe0SMateusz Nosek } 18724e2716fSJoonsoo Kim 18824e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 18924e2716fSJoonsoo Kim 19024e2716fSJoonsoo Kim return true; 19124e2716fSJoonsoo Kim } 19224e2716fSJoonsoo Kim 19324e2716fSJoonsoo Kim /* 19424e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 19524e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 19624e2716fSJoonsoo Kim * expected to succeed. 19724e2716fSJoonsoo Kim */ 19824e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 19924e2716fSJoonsoo Kim bool alloc_success) 20024e2716fSJoonsoo Kim { 20124e2716fSJoonsoo Kim if (alloc_success) { 20224e2716fSJoonsoo Kim zone->compact_considered = 0; 20324e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 20424e2716fSJoonsoo Kim } 20524e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 20624e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 20724e2716fSJoonsoo Kim 20824e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 20924e2716fSJoonsoo Kim } 21024e2716fSJoonsoo Kim 21124e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 2122271b016SHui Su static bool compaction_restarting(struct zone *zone, int order) 21324e2716fSJoonsoo Kim { 21424e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 21524e2716fSJoonsoo Kim return false; 21624e2716fSJoonsoo Kim 21724e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 21824e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 21924e2716fSJoonsoo Kim } 22024e2716fSJoonsoo Kim 221bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 222bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 223bb13ffebSMel Gorman struct page *page) 224bb13ffebSMel Gorman { 225bb13ffebSMel Gorman if (cc->ignore_skip_hint) 226bb13ffebSMel Gorman return true; 227bb13ffebSMel Gorman 228bb13ffebSMel Gorman return !get_pageblock_skip(page); 229bb13ffebSMel Gorman } 230bb13ffebSMel Gorman 23102333641SVlastimil Babka static void reset_cached_positions(struct zone *zone) 23202333641SVlastimil Babka { 23302333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 23402333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 235623446e4SJoonsoo Kim zone->compact_cached_free_pfn = 23606b6640aSVlastimil Babka pageblock_start_pfn(zone_end_pfn(zone) - 1); 23702333641SVlastimil Babka } 23802333641SVlastimil Babka 239bb13ffebSMel Gorman /* 2402271b016SHui Su * Compound pages of >= pageblock_order should consistently be skipped until 241b527cfe5SVlastimil Babka * released. It is always pointless to compact pages of such order (if they are 242b527cfe5SVlastimil Babka * migratable), and the pageblocks they occupy cannot contain any free pages. 24321dc7e02SDavid Rientjes */ 244b527cfe5SVlastimil Babka static bool pageblock_skip_persistent(struct page *page) 24521dc7e02SDavid Rientjes { 246b527cfe5SVlastimil Babka if (!PageCompound(page)) 24721dc7e02SDavid Rientjes return false; 248b527cfe5SVlastimil Babka 249b527cfe5SVlastimil Babka page = compound_head(page); 250b527cfe5SVlastimil Babka 251b527cfe5SVlastimil Babka if (compound_order(page) >= pageblock_order) 25221dc7e02SDavid Rientjes return true; 253b527cfe5SVlastimil Babka 254b527cfe5SVlastimil Babka return false; 25521dc7e02SDavid Rientjes } 25621dc7e02SDavid Rientjes 257e332f741SMel Gorman static bool 258e332f741SMel Gorman __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, 259e332f741SMel Gorman bool check_target) 260e332f741SMel Gorman { 261e332f741SMel Gorman struct page *page = pfn_to_online_page(pfn); 2626b0868c8SMel Gorman struct page *block_page; 263e332f741SMel Gorman struct page *end_page; 264e332f741SMel Gorman unsigned long block_pfn; 265e332f741SMel Gorman 266e332f741SMel Gorman if (!page) 267e332f741SMel Gorman return false; 268e332f741SMel Gorman if (zone != page_zone(page)) 269e332f741SMel Gorman return false; 270e332f741SMel Gorman if (pageblock_skip_persistent(page)) 271e332f741SMel Gorman return false; 272e332f741SMel Gorman 273e332f741SMel Gorman /* 274e332f741SMel Gorman * If skip is already cleared do no further checking once the 275e332f741SMel Gorman * restart points have been set. 276e332f741SMel Gorman */ 277e332f741SMel Gorman if (check_source && check_target && !get_pageblock_skip(page)) 278e332f741SMel Gorman return true; 279e332f741SMel Gorman 280e332f741SMel Gorman /* 281e332f741SMel Gorman * If clearing skip for the target scanner, do not select a 282e332f741SMel Gorman * non-movable pageblock as the starting point. 283e332f741SMel Gorman */ 284e332f741SMel Gorman if (!check_source && check_target && 285e332f741SMel Gorman get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 286e332f741SMel Gorman return false; 287e332f741SMel Gorman 2886b0868c8SMel Gorman /* Ensure the start of the pageblock or zone is online and valid */ 2896b0868c8SMel Gorman block_pfn = pageblock_start_pfn(pfn); 290a2e9a5afSVlastimil Babka block_pfn = max(block_pfn, zone->zone_start_pfn); 291a2e9a5afSVlastimil Babka block_page = pfn_to_online_page(block_pfn); 2926b0868c8SMel Gorman if (block_page) { 2936b0868c8SMel Gorman page = block_page; 2946b0868c8SMel Gorman pfn = block_pfn; 2956b0868c8SMel Gorman } 2966b0868c8SMel Gorman 2976b0868c8SMel Gorman /* Ensure the end of the pageblock or zone is online and valid */ 298a2e9a5afSVlastimil Babka block_pfn = pageblock_end_pfn(pfn) - 1; 2996b0868c8SMel Gorman block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); 3006b0868c8SMel Gorman end_page = pfn_to_online_page(block_pfn); 3016b0868c8SMel Gorman if (!end_page) 3026b0868c8SMel Gorman return false; 3036b0868c8SMel Gorman 304e332f741SMel Gorman /* 305e332f741SMel Gorman * Only clear the hint if a sample indicates there is either a 306e332f741SMel Gorman * free page or an LRU page in the block. One or other condition 307e332f741SMel Gorman * is necessary for the block to be a migration source/target. 308e332f741SMel Gorman */ 309e332f741SMel Gorman do { 310e332f741SMel Gorman if (pfn_valid_within(pfn)) { 311e332f741SMel Gorman if (check_source && PageLRU(page)) { 312e332f741SMel Gorman clear_pageblock_skip(page); 313e332f741SMel Gorman return true; 314e332f741SMel Gorman } 315e332f741SMel Gorman 316e332f741SMel Gorman if (check_target && PageBuddy(page)) { 317e332f741SMel Gorman clear_pageblock_skip(page); 318e332f741SMel Gorman return true; 319e332f741SMel Gorman } 320e332f741SMel Gorman } 321e332f741SMel Gorman 322e332f741SMel Gorman page += (1 << PAGE_ALLOC_COSTLY_ORDER); 323e332f741SMel Gorman pfn += (1 << PAGE_ALLOC_COSTLY_ORDER); 324a2e9a5afSVlastimil Babka } while (page <= end_page); 325e332f741SMel Gorman 326e332f741SMel Gorman return false; 327e332f741SMel Gorman } 328e332f741SMel Gorman 32921dc7e02SDavid Rientjes /* 330bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 331bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 332bb13ffebSMel Gorman * meet. 333bb13ffebSMel Gorman */ 33462997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 335bb13ffebSMel Gorman { 336e332f741SMel Gorman unsigned long migrate_pfn = zone->zone_start_pfn; 3376b0868c8SMel Gorman unsigned long free_pfn = zone_end_pfn(zone) - 1; 338e332f741SMel Gorman unsigned long reset_migrate = free_pfn; 339e332f741SMel Gorman unsigned long reset_free = migrate_pfn; 340e332f741SMel Gorman bool source_set = false; 341e332f741SMel Gorman bool free_set = false; 342e332f741SMel Gorman 343e332f741SMel Gorman if (!zone->compact_blockskip_flush) 344e332f741SMel Gorman return; 345bb13ffebSMel Gorman 34662997027SMel Gorman zone->compact_blockskip_flush = false; 347bb13ffebSMel Gorman 348e332f741SMel Gorman /* 349e332f741SMel Gorman * Walk the zone and update pageblock skip information. Source looks 350e332f741SMel Gorman * for PageLRU while target looks for PageBuddy. When the scanner 351e332f741SMel Gorman * is found, both PageBuddy and PageLRU are checked as the pageblock 352e332f741SMel Gorman * is suitable as both source and target. 353e332f741SMel Gorman */ 354e332f741SMel Gorman for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, 355e332f741SMel Gorman free_pfn -= pageblock_nr_pages) { 356bb13ffebSMel Gorman cond_resched(); 357bb13ffebSMel Gorman 358e332f741SMel Gorman /* Update the migrate PFN */ 359e332f741SMel Gorman if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && 360e332f741SMel Gorman migrate_pfn < reset_migrate) { 361e332f741SMel Gorman source_set = true; 362e332f741SMel Gorman reset_migrate = migrate_pfn; 363e332f741SMel Gorman zone->compact_init_migrate_pfn = reset_migrate; 364e332f741SMel Gorman zone->compact_cached_migrate_pfn[0] = reset_migrate; 365e332f741SMel Gorman zone->compact_cached_migrate_pfn[1] = reset_migrate; 366bb13ffebSMel Gorman } 36702333641SVlastimil Babka 368e332f741SMel Gorman /* Update the free PFN */ 369e332f741SMel Gorman if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && 370e332f741SMel Gorman free_pfn > reset_free) { 371e332f741SMel Gorman free_set = true; 372e332f741SMel Gorman reset_free = free_pfn; 373e332f741SMel Gorman zone->compact_init_free_pfn = reset_free; 374e332f741SMel Gorman zone->compact_cached_free_pfn = reset_free; 375e332f741SMel Gorman } 376e332f741SMel Gorman } 377e332f741SMel Gorman 378e332f741SMel Gorman /* Leave no distance if no suitable block was reset */ 379e332f741SMel Gorman if (reset_migrate >= reset_free) { 380e332f741SMel Gorman zone->compact_cached_migrate_pfn[0] = migrate_pfn; 381e332f741SMel Gorman zone->compact_cached_migrate_pfn[1] = migrate_pfn; 382e332f741SMel Gorman zone->compact_cached_free_pfn = free_pfn; 383e332f741SMel Gorman } 384bb13ffebSMel Gorman } 385bb13ffebSMel Gorman 38662997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 38762997027SMel Gorman { 38862997027SMel Gorman int zoneid; 38962997027SMel Gorman 39062997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 39162997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 39262997027SMel Gorman if (!populated_zone(zone)) 39362997027SMel Gorman continue; 39462997027SMel Gorman 39562997027SMel Gorman /* Only flush if a full compaction finished recently */ 39662997027SMel Gorman if (zone->compact_blockskip_flush) 39762997027SMel Gorman __reset_isolation_suitable(zone); 39862997027SMel Gorman } 39962997027SMel Gorman } 40062997027SMel Gorman 401bb13ffebSMel Gorman /* 402e380bebeSMel Gorman * Sets the pageblock skip bit if it was clear. Note that this is a hint as 403e380bebeSMel Gorman * locks are not required for read/writers. Returns true if it was already set. 404e380bebeSMel Gorman */ 405e380bebeSMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page, 406e380bebeSMel Gorman unsigned long pfn) 407e380bebeSMel Gorman { 408e380bebeSMel Gorman bool skip; 409e380bebeSMel Gorman 410e380bebeSMel Gorman /* Do no update if skip hint is being ignored */ 411e380bebeSMel Gorman if (cc->ignore_skip_hint) 412e380bebeSMel Gorman return false; 413e380bebeSMel Gorman 414e380bebeSMel Gorman if (!IS_ALIGNED(pfn, pageblock_nr_pages)) 415e380bebeSMel Gorman return false; 416e380bebeSMel Gorman 417e380bebeSMel Gorman skip = get_pageblock_skip(page); 418e380bebeSMel Gorman if (!skip && !cc->no_set_skip_hint) 419e380bebeSMel Gorman set_pageblock_skip(page); 420e380bebeSMel Gorman 421e380bebeSMel Gorman return skip; 422e380bebeSMel Gorman } 423e380bebeSMel Gorman 424e380bebeSMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 425e380bebeSMel Gorman { 426e380bebeSMel Gorman struct zone *zone = cc->zone; 427e380bebeSMel Gorman 428e380bebeSMel Gorman pfn = pageblock_end_pfn(pfn); 429e380bebeSMel Gorman 430e380bebeSMel Gorman /* Set for isolation rather than compaction */ 431e380bebeSMel Gorman if (cc->no_set_skip_hint) 432e380bebeSMel Gorman return; 433e380bebeSMel Gorman 434e380bebeSMel Gorman if (pfn > zone->compact_cached_migrate_pfn[0]) 435e380bebeSMel Gorman zone->compact_cached_migrate_pfn[0] = pfn; 436e380bebeSMel Gorman if (cc->mode != MIGRATE_ASYNC && 437e380bebeSMel Gorman pfn > zone->compact_cached_migrate_pfn[1]) 438e380bebeSMel Gorman zone->compact_cached_migrate_pfn[1] = pfn; 439e380bebeSMel Gorman } 440e380bebeSMel Gorman 441e380bebeSMel Gorman /* 442bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 44362997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 444bb13ffebSMel Gorman */ 445c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 446d097a6f6SMel Gorman struct page *page, unsigned long pfn) 447bb13ffebSMel Gorman { 448c89511abSMel Gorman struct zone *zone = cc->zone; 4496815bf3fSJoonsoo Kim 4502583d671SVlastimil Babka if (cc->no_set_skip_hint) 4516815bf3fSJoonsoo Kim return; 4526815bf3fSJoonsoo Kim 453bb13ffebSMel Gorman if (!page) 454bb13ffebSMel Gorman return; 455bb13ffebSMel Gorman 456bb13ffebSMel Gorman set_pageblock_skip(page); 457c89511abSMel Gorman 45835979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 45935979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 460c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 461c89511abSMel Gorman } 462bb13ffebSMel Gorman #else 463bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 464bb13ffebSMel Gorman struct page *page) 465bb13ffebSMel Gorman { 466bb13ffebSMel Gorman return true; 467bb13ffebSMel Gorman } 468bb13ffebSMel Gorman 469b527cfe5SVlastimil Babka static inline bool pageblock_skip_persistent(struct page *page) 47021dc7e02SDavid Rientjes { 47121dc7e02SDavid Rientjes return false; 47221dc7e02SDavid Rientjes } 47321dc7e02SDavid Rientjes 47421dc7e02SDavid Rientjes static inline void update_pageblock_skip(struct compact_control *cc, 475d097a6f6SMel Gorman struct page *page, unsigned long pfn) 476bb13ffebSMel Gorman { 477bb13ffebSMel Gorman } 478e380bebeSMel Gorman 479e380bebeSMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 480e380bebeSMel Gorman { 481e380bebeSMel Gorman } 482e380bebeSMel Gorman 483e380bebeSMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page, 484e380bebeSMel Gorman unsigned long pfn) 485e380bebeSMel Gorman { 486e380bebeSMel Gorman return false; 487e380bebeSMel Gorman } 488bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 489bb13ffebSMel Gorman 4901f9efdefSVlastimil Babka /* 4918b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 492cb2dcaf0SMel Gorman * very heavily contended. For async compaction, trylock and record if the 493cb2dcaf0SMel Gorman * lock is contended. The lock will still be acquired but compaction will 494cb2dcaf0SMel Gorman * abort when the current block is finished regardless of success rate. 495cb2dcaf0SMel Gorman * Sync compaction acquires the lock. 4968b44d279SVlastimil Babka * 497cb2dcaf0SMel Gorman * Always returns true which makes it easier to track lock state in callers. 4981f9efdefSVlastimil Babka */ 499cb2dcaf0SMel Gorman static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, 5008b44d279SVlastimil Babka struct compact_control *cc) 50177337edeSJules Irenge __acquires(lock) 5028b44d279SVlastimil Babka { 503cb2dcaf0SMel Gorman /* Track if the lock is contended in async mode */ 504cb2dcaf0SMel Gorman if (cc->mode == MIGRATE_ASYNC && !cc->contended) { 505cb2dcaf0SMel Gorman if (spin_trylock_irqsave(lock, *flags)) 506cb2dcaf0SMel Gorman return true; 507cb2dcaf0SMel Gorman 508c3486f53SVlastimil Babka cc->contended = true; 5098b44d279SVlastimil Babka } 5101f9efdefSVlastimil Babka 511cb2dcaf0SMel Gorman spin_lock_irqsave(lock, *flags); 5128b44d279SVlastimil Babka return true; 5132a1402aaSMel Gorman } 5142a1402aaSMel Gorman 51585aa125fSMichal Nazarewicz /* 516c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 5178b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 5188b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 5198b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 5208b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 5218b44d279SVlastimil Babka * aborts. Sync compaction schedules. 5228b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 5238b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 524c67fe375SMel Gorman * 5258b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 5268b44d279SVlastimil Babka * async compaction due to need_resched() 5278b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 5288b44d279SVlastimil Babka * scheduled) 529c67fe375SMel Gorman */ 5308b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 5318b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 532c67fe375SMel Gorman { 5338b44d279SVlastimil Babka if (*locked) { 5348b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 5358b44d279SVlastimil Babka *locked = false; 536c67fe375SMel Gorman } 537c67fe375SMel Gorman 5388b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 539c3486f53SVlastimil Babka cc->contended = true; 5408b44d279SVlastimil Babka return true; 5418b44d279SVlastimil Babka } 5428b44d279SVlastimil Babka 543cf66f070SMel Gorman cond_resched(); 544be976572SVlastimil Babka 545be976572SVlastimil Babka return false; 546be976572SVlastimil Babka } 547be976572SVlastimil Babka 548c67fe375SMel Gorman /* 5499e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 5509e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 5519e4be470SJerome Marchand * (even though it may still end up isolating some pages). 55285aa125fSMichal Nazarewicz */ 553f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 554e14c720eSVlastimil Babka unsigned long *start_pfn, 55585aa125fSMichal Nazarewicz unsigned long end_pfn, 55685aa125fSMichal Nazarewicz struct list_head *freelist, 5574fca9730SMel Gorman unsigned int stride, 55885aa125fSMichal Nazarewicz bool strict) 559748446bbSMel Gorman { 560b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 561d097a6f6SMel Gorman struct page *cursor; 562b8b2d825SXiubo Li unsigned long flags = 0; 563f40d1e42SMel Gorman bool locked = false; 564e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 56566c64223SJoonsoo Kim unsigned int order; 566748446bbSMel Gorman 5674fca9730SMel Gorman /* Strict mode is for isolation, speed is secondary */ 5684fca9730SMel Gorman if (strict) 5694fca9730SMel Gorman stride = 1; 5704fca9730SMel Gorman 571748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 572748446bbSMel Gorman 573f40d1e42SMel Gorman /* Isolate free pages. */ 5744fca9730SMel Gorman for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) { 57566c64223SJoonsoo Kim int isolated; 576748446bbSMel Gorman struct page *page = cursor; 577748446bbSMel Gorman 5788b44d279SVlastimil Babka /* 5798b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 5808b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 5818b44d279SVlastimil Babka * pending or async compaction detects need_resched() 5828b44d279SVlastimil Babka */ 5838b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 5848b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 5858b44d279SVlastimil Babka &locked, cc)) 5868b44d279SVlastimil Babka break; 5878b44d279SVlastimil Babka 588b7aba698SMel Gorman nr_scanned++; 589f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 5902af120bcSLaura Abbott goto isolate_fail; 5912af120bcSLaura Abbott 5929fcd6d2eSVlastimil Babka /* 5939fcd6d2eSVlastimil Babka * For compound pages such as THP and hugetlbfs, we can save 5949fcd6d2eSVlastimil Babka * potentially a lot of iterations if we skip them at once. 5959fcd6d2eSVlastimil Babka * The check is racy, but we can consider only valid values 5969fcd6d2eSVlastimil Babka * and the only danger is skipping too much. 5979fcd6d2eSVlastimil Babka */ 5989fcd6d2eSVlastimil Babka if (PageCompound(page)) { 59921dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 6009fcd6d2eSVlastimil Babka 601d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) { 60221dc7e02SDavid Rientjes blockpfn += (1UL << order) - 1; 60321dc7e02SDavid Rientjes cursor += (1UL << order) - 1; 6049fcd6d2eSVlastimil Babka } 6059fcd6d2eSVlastimil Babka goto isolate_fail; 6069fcd6d2eSVlastimil Babka } 6079fcd6d2eSVlastimil Babka 608f40d1e42SMel Gorman if (!PageBuddy(page)) 6092af120bcSLaura Abbott goto isolate_fail; 610f40d1e42SMel Gorman 611f40d1e42SMel Gorman /* 61269b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 61369b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 61469b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 61569b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 61669b7189fSVlastimil Babka * recheck as well. 61769b7189fSVlastimil Babka */ 61869b7189fSVlastimil Babka if (!locked) { 619cb2dcaf0SMel Gorman locked = compact_lock_irqsave(&cc->zone->lock, 6208b44d279SVlastimil Babka &flags, cc); 621f40d1e42SMel Gorman 622f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 623f40d1e42SMel Gorman if (!PageBuddy(page)) 6242af120bcSLaura Abbott goto isolate_fail; 62569b7189fSVlastimil Babka } 626748446bbSMel Gorman 62766c64223SJoonsoo Kim /* Found a free page, will break it into order-0 pages */ 628ab130f91SMatthew Wilcox (Oracle) order = buddy_order(page); 62966c64223SJoonsoo Kim isolated = __isolate_free_page(page, order); 630a4f04f2cSDavid Rientjes if (!isolated) 631a4f04f2cSDavid Rientjes break; 63266c64223SJoonsoo Kim set_page_private(page, order); 633a4f04f2cSDavid Rientjes 634748446bbSMel Gorman total_isolated += isolated; 635a4f04f2cSDavid Rientjes cc->nr_freepages += isolated; 63666c64223SJoonsoo Kim list_add_tail(&page->lru, freelist); 63766c64223SJoonsoo Kim 638a4f04f2cSDavid Rientjes if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 639932ff6bbSJoonsoo Kim blockpfn += isolated; 640932ff6bbSJoonsoo Kim break; 641932ff6bbSJoonsoo Kim } 642a4f04f2cSDavid Rientjes /* Advance to the end of split page */ 643748446bbSMel Gorman blockpfn += isolated - 1; 644748446bbSMel Gorman cursor += isolated - 1; 6452af120bcSLaura Abbott continue; 6462af120bcSLaura Abbott 6472af120bcSLaura Abbott isolate_fail: 6482af120bcSLaura Abbott if (strict) 6492af120bcSLaura Abbott break; 6502af120bcSLaura Abbott else 6512af120bcSLaura Abbott continue; 6522af120bcSLaura Abbott 653748446bbSMel Gorman } 654748446bbSMel Gorman 655a4f04f2cSDavid Rientjes if (locked) 656a4f04f2cSDavid Rientjes spin_unlock_irqrestore(&cc->zone->lock, flags); 657a4f04f2cSDavid Rientjes 6589fcd6d2eSVlastimil Babka /* 6599fcd6d2eSVlastimil Babka * There is a tiny chance that we have read bogus compound_order(), 6609fcd6d2eSVlastimil Babka * so be careful to not go outside of the pageblock. 6619fcd6d2eSVlastimil Babka */ 6629fcd6d2eSVlastimil Babka if (unlikely(blockpfn > end_pfn)) 6639fcd6d2eSVlastimil Babka blockpfn = end_pfn; 6649fcd6d2eSVlastimil Babka 665e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 666e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 667e34d85f0SJoonsoo Kim 668e14c720eSVlastimil Babka /* Record how far we have got within the block */ 669e14c720eSVlastimil Babka *start_pfn = blockpfn; 670e14c720eSVlastimil Babka 671f40d1e42SMel Gorman /* 672f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 673f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 674f40d1e42SMel Gorman * returned and CMA will fail. 675f40d1e42SMel Gorman */ 6762af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 677f40d1e42SMel Gorman total_isolated = 0; 678f40d1e42SMel Gorman 6797f354a54SDavid Rientjes cc->total_free_scanned += nr_scanned; 680397487dbSMel Gorman if (total_isolated) 681010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 682748446bbSMel Gorman return total_isolated; 683748446bbSMel Gorman } 684748446bbSMel Gorman 68585aa125fSMichal Nazarewicz /** 68685aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 687e8b098fcSMike Rapoport * @cc: Compaction control structure. 68885aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 68985aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 69085aa125fSMichal Nazarewicz * 69185aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 69285aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 69385aa125fSMichal Nazarewicz * undo its actions and return zero. 69485aa125fSMichal Nazarewicz * 69585aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 69685aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 69785aa125fSMichal Nazarewicz * a free page). 69885aa125fSMichal Nazarewicz */ 699ff9543fdSMichal Nazarewicz unsigned long 700bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 701bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 70285aa125fSMichal Nazarewicz { 703e1409c32SJoonsoo Kim unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 70485aa125fSMichal Nazarewicz LIST_HEAD(freelist); 70585aa125fSMichal Nazarewicz 7067d49d886SVlastimil Babka pfn = start_pfn; 70706b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 708e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 709e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 71006b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 7117d49d886SVlastimil Babka 7127d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 713e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 7147d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 715e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 716e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 7177d49d886SVlastimil Babka 71885aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 71985aa125fSMichal Nazarewicz 72058420016SJoonsoo Kim /* 72158420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 72258420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 72358420016SJoonsoo Kim * scanning range to right one. 72458420016SJoonsoo Kim */ 72558420016SJoonsoo Kim if (pfn >= block_end_pfn) { 72606b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 72706b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 72858420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 72958420016SJoonsoo Kim } 73058420016SJoonsoo Kim 731e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 732e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 7337d49d886SVlastimil Babka break; 7347d49d886SVlastimil Babka 735e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 7364fca9730SMel Gorman block_end_pfn, &freelist, 0, true); 73785aa125fSMichal Nazarewicz 73885aa125fSMichal Nazarewicz /* 73985aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 74085aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 74185aa125fSMichal Nazarewicz * non-free pages). 74285aa125fSMichal Nazarewicz */ 74385aa125fSMichal Nazarewicz if (!isolated) 74485aa125fSMichal Nazarewicz break; 74585aa125fSMichal Nazarewicz 74685aa125fSMichal Nazarewicz /* 74785aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 74885aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 74985aa125fSMichal Nazarewicz * page may span two pageblocks). 75085aa125fSMichal Nazarewicz */ 75185aa125fSMichal Nazarewicz } 75285aa125fSMichal Nazarewicz 75366c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 7544469ab98SMel Gorman split_map_pages(&freelist); 75585aa125fSMichal Nazarewicz 75685aa125fSMichal Nazarewicz if (pfn < end_pfn) { 75785aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 75885aa125fSMichal Nazarewicz release_freepages(&freelist); 75985aa125fSMichal Nazarewicz return 0; 76085aa125fSMichal Nazarewicz } 76185aa125fSMichal Nazarewicz 76285aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 76385aa125fSMichal Nazarewicz return pfn; 76485aa125fSMichal Nazarewicz } 76585aa125fSMichal Nazarewicz 766748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 7675f438eeeSAndrey Ryabinin static bool too_many_isolated(pg_data_t *pgdat) 768748446bbSMel Gorman { 769bc693045SMinchan Kim unsigned long active, inactive, isolated; 770748446bbSMel Gorman 7715f438eeeSAndrey Ryabinin inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + 7725f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_INACTIVE_ANON); 7735f438eeeSAndrey Ryabinin active = node_page_state(pgdat, NR_ACTIVE_FILE) + 7745f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_ACTIVE_ANON); 7755f438eeeSAndrey Ryabinin isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + 7765f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_ISOLATED_ANON); 777748446bbSMel Gorman 778bc693045SMinchan Kim return isolated > (inactive + active) / 2; 779748446bbSMel Gorman } 780748446bbSMel Gorman 7812fe86e00SMichal Nazarewicz /** 782edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 783edc2ca61SVlastimil Babka * a single pageblock 7842fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 785edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 786edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 787edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 7882fe86e00SMichal Nazarewicz * 7892fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 790edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 791edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 792edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 793edc2ca61SVlastimil Babka * than end_pfn). 7942fe86e00SMichal Nazarewicz * 795edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 796edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 797edc2ca61SVlastimil Babka * is neither read nor updated. 798748446bbSMel Gorman */ 799edc2ca61SVlastimil Babka static unsigned long 800edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 801edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 802748446bbSMel Gorman { 8035f438eeeSAndrey Ryabinin pg_data_t *pgdat = cc->zone->zone_pgdat; 804b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 805fa9add64SHugh Dickins struct lruvec *lruvec; 806b8b2d825SXiubo Li unsigned long flags = 0; 8072a1402aaSMel Gorman bool locked = false; 808bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 809e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 810fdd048e1SVlastimil Babka bool skip_on_failure = false; 811fdd048e1SVlastimil Babka unsigned long next_skip_pfn = 0; 812e380bebeSMel Gorman bool skip_updated = false; 813748446bbSMel Gorman 814748446bbSMel Gorman /* 815748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 816748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 817748446bbSMel Gorman * delay for some time until fewer pages are isolated 818748446bbSMel Gorman */ 8195f438eeeSAndrey Ryabinin while (unlikely(too_many_isolated(pgdat))) { 820d20bdd57SZi Yan /* stop isolation if there are still pages not migrated */ 821d20bdd57SZi Yan if (cc->nr_migratepages) 822d20bdd57SZi Yan return 0; 823d20bdd57SZi Yan 824f9e35b3bSMel Gorman /* async migration should just abort */ 825e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 8262fe86e00SMichal Nazarewicz return 0; 827f9e35b3bSMel Gorman 828748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 829748446bbSMel Gorman 830748446bbSMel Gorman if (fatal_signal_pending(current)) 8312fe86e00SMichal Nazarewicz return 0; 832748446bbSMel Gorman } 833748446bbSMel Gorman 834cf66f070SMel Gorman cond_resched(); 835aeef4b83SDavid Rientjes 836fdd048e1SVlastimil Babka if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 837fdd048e1SVlastimil Babka skip_on_failure = true; 838fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 839fdd048e1SVlastimil Babka } 840fdd048e1SVlastimil Babka 841748446bbSMel Gorman /* Time to isolate some pages for migration */ 842748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 84329c0dde8SVlastimil Babka 844fdd048e1SVlastimil Babka if (skip_on_failure && low_pfn >= next_skip_pfn) { 845fdd048e1SVlastimil Babka /* 846fdd048e1SVlastimil Babka * We have isolated all migration candidates in the 847fdd048e1SVlastimil Babka * previous order-aligned block, and did not skip it due 848fdd048e1SVlastimil Babka * to failure. We should migrate the pages now and 849fdd048e1SVlastimil Babka * hopefully succeed compaction. 850fdd048e1SVlastimil Babka */ 851fdd048e1SVlastimil Babka if (nr_isolated) 852fdd048e1SVlastimil Babka break; 853fdd048e1SVlastimil Babka 854fdd048e1SVlastimil Babka /* 855fdd048e1SVlastimil Babka * We failed to isolate in the previous order-aligned 856fdd048e1SVlastimil Babka * block. Set the new boundary to the end of the 857fdd048e1SVlastimil Babka * current block. Note we can't simply increase 858fdd048e1SVlastimil Babka * next_skip_pfn by 1 << order, as low_pfn might have 859fdd048e1SVlastimil Babka * been incremented by a higher number due to skipping 860fdd048e1SVlastimil Babka * a compound or a high-order buddy page in the 861fdd048e1SVlastimil Babka * previous loop iteration. 862fdd048e1SVlastimil Babka */ 863fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 864fdd048e1SVlastimil Babka } 865fdd048e1SVlastimil Babka 8668b44d279SVlastimil Babka /* 8678b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 868670105a2SMel Gorman * contention, to give chance to IRQs. Abort completely if 869670105a2SMel Gorman * a fatal signal is pending. 8708b44d279SVlastimil Babka */ 8718b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 872f4b7e272SAndrey Ryabinin && compact_unlock_should_abort(&pgdat->lru_lock, 873670105a2SMel Gorman flags, &locked, cc)) { 874670105a2SMel Gorman low_pfn = 0; 875670105a2SMel Gorman goto fatal_pending; 876670105a2SMel Gorman } 877b2eef8c0SAndrea Arcangeli 878748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 879fdd048e1SVlastimil Babka goto isolate_fail; 880b7aba698SMel Gorman nr_scanned++; 881748446bbSMel Gorman 882748446bbSMel Gorman page = pfn_to_page(low_pfn); 883dc908600SMel Gorman 884e380bebeSMel Gorman /* 885e380bebeSMel Gorman * Check if the pageblock has already been marked skipped. 886e380bebeSMel Gorman * Only the aligned PFN is checked as the caller isolates 887e380bebeSMel Gorman * COMPACT_CLUSTER_MAX at a time so the second call must 888e380bebeSMel Gorman * not falsely conclude that the block should be skipped. 889e380bebeSMel Gorman */ 890e380bebeSMel Gorman if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { 891e380bebeSMel Gorman if (!cc->ignore_skip_hint && get_pageblock_skip(page)) { 892e380bebeSMel Gorman low_pfn = end_pfn; 893*9df41314SAlex Shi page = NULL; 894e380bebeSMel Gorman goto isolate_abort; 895e380bebeSMel Gorman } 896bb13ffebSMel Gorman valid_page = page; 897e380bebeSMel Gorman } 898bb13ffebSMel Gorman 899c122b208SJoonsoo Kim /* 90099c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 90199c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 90299c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 90399c0fd5eSVlastimil Babka * potential isolation targets. 9046c14466cSMel Gorman */ 90599c0fd5eSVlastimil Babka if (PageBuddy(page)) { 906ab130f91SMatthew Wilcox (Oracle) unsigned long freepage_order = buddy_order_unsafe(page); 90799c0fd5eSVlastimil Babka 90899c0fd5eSVlastimil Babka /* 90999c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 91099c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 91199c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 91299c0fd5eSVlastimil Babka */ 91399c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 91499c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 915748446bbSMel Gorman continue; 91699c0fd5eSVlastimil Babka } 917748446bbSMel Gorman 9189927af74SMel Gorman /* 91929c0dde8SVlastimil Babka * Regardless of being on LRU, compound pages such as THP and 9201da2f328SRik van Riel * hugetlbfs are not to be compacted unless we are attempting 9211da2f328SRik van Riel * an allocation much larger than the huge page size (eg CMA). 9221da2f328SRik van Riel * We can potentially save a lot of iterations if we skip them 9231da2f328SRik van Riel * at once. The check is racy, but we can consider only valid 9241da2f328SRik van Riel * values and the only danger is skipping too much. 925bc835011SAndrea Arcangeli */ 9261da2f328SRik van Riel if (PageCompound(page) && !cc->alloc_contig) { 92721dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 92829c0dde8SVlastimil Babka 929d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) 93021dc7e02SDavid Rientjes low_pfn += (1UL << order) - 1; 931fdd048e1SVlastimil Babka goto isolate_fail; 9322a1402aaSMel Gorman } 9332a1402aaSMel Gorman 934bda807d4SMinchan Kim /* 935bda807d4SMinchan Kim * Check may be lockless but that's ok as we recheck later. 936bda807d4SMinchan Kim * It's possible to migrate LRU and non-lru movable pages. 937bda807d4SMinchan Kim * Skip any other type of page 938bda807d4SMinchan Kim */ 939bda807d4SMinchan Kim if (!PageLRU(page)) { 940bda807d4SMinchan Kim /* 941bda807d4SMinchan Kim * __PageMovable can return false positive so we need 942bda807d4SMinchan Kim * to verify it under page_lock. 943bda807d4SMinchan Kim */ 944bda807d4SMinchan Kim if (unlikely(__PageMovable(page)) && 945bda807d4SMinchan Kim !PageIsolated(page)) { 946bda807d4SMinchan Kim if (locked) { 947f4b7e272SAndrey Ryabinin spin_unlock_irqrestore(&pgdat->lru_lock, 948bda807d4SMinchan Kim flags); 949bda807d4SMinchan Kim locked = false; 950bda807d4SMinchan Kim } 951bda807d4SMinchan Kim 9529e5bcd61SYisheng Xie if (!isolate_movable_page(page, isolate_mode)) 953bda807d4SMinchan Kim goto isolate_success; 954bda807d4SMinchan Kim } 955bda807d4SMinchan Kim 956fdd048e1SVlastimil Babka goto isolate_fail; 957bda807d4SMinchan Kim } 95829c0dde8SVlastimil Babka 959119d6d59SDavid Rientjes /* 960119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 961119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 962119d6d59SDavid Rientjes * admittedly racy check. 963119d6d59SDavid Rientjes */ 964119d6d59SDavid Rientjes if (!page_mapping(page) && 965119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 966fdd048e1SVlastimil Babka goto isolate_fail; 967119d6d59SDavid Rientjes 96873e64c51SMichal Hocko /* 96973e64c51SMichal Hocko * Only allow to migrate anonymous pages in GFP_NOFS context 97073e64c51SMichal Hocko * because those do not depend on fs locks. 97173e64c51SMichal Hocko */ 97273e64c51SMichal Hocko if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) 97373e64c51SMichal Hocko goto isolate_fail; 97473e64c51SMichal Hocko 975*9df41314SAlex Shi /* 976*9df41314SAlex Shi * Be careful not to clear PageLRU until after we're 977*9df41314SAlex Shi * sure the page is not being freed elsewhere -- the 978*9df41314SAlex Shi * page release code relies on it. 979*9df41314SAlex Shi */ 980*9df41314SAlex Shi if (unlikely(!get_page_unless_zero(page))) 981*9df41314SAlex Shi goto isolate_fail; 982*9df41314SAlex Shi 983*9df41314SAlex Shi if (__isolate_lru_page_prepare(page, isolate_mode) != 0) 984*9df41314SAlex Shi goto isolate_fail_put; 985*9df41314SAlex Shi 986*9df41314SAlex Shi /* Try isolate the page */ 987*9df41314SAlex Shi if (!TestClearPageLRU(page)) 988*9df41314SAlex Shi goto isolate_fail_put; 989*9df41314SAlex Shi 99069b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 99169b7189fSVlastimil Babka if (!locked) { 992f4b7e272SAndrey Ryabinin locked = compact_lock_irqsave(&pgdat->lru_lock, 9938b44d279SVlastimil Babka &flags, cc); 994e380bebeSMel Gorman 995e380bebeSMel Gorman /* Try get exclusive access under lock */ 996e380bebeSMel Gorman if (!skip_updated) { 997e380bebeSMel Gorman skip_updated = true; 998e380bebeSMel Gorman if (test_and_set_skip(cc, page, low_pfn)) 999e380bebeSMel Gorman goto isolate_abort; 1000e380bebeSMel Gorman } 10012a1402aaSMel Gorman 100229c0dde8SVlastimil Babka /* 100329c0dde8SVlastimil Babka * Page become compound since the non-locked check, 100429c0dde8SVlastimil Babka * and it's on LRU. It can only be a THP so the order 100529c0dde8SVlastimil Babka * is safe to read and it's 0 for tail pages. 100629c0dde8SVlastimil Babka */ 10071da2f328SRik van Riel if (unlikely(PageCompound(page) && !cc->alloc_contig)) { 1008d8c6546bSMatthew Wilcox (Oracle) low_pfn += compound_nr(page) - 1; 1009*9df41314SAlex Shi SetPageLRU(page); 1010*9df41314SAlex Shi goto isolate_fail_put; 1011bc835011SAndrea Arcangeli } 101269b7189fSVlastimil Babka } 1013bc835011SAndrea Arcangeli 1014f4b7e272SAndrey Ryabinin lruvec = mem_cgroup_page_lruvec(page, pgdat); 1015fa9add64SHugh Dickins 10161da2f328SRik van Riel /* The whole page is taken off the LRU; skip the tail pages. */ 10171da2f328SRik van Riel if (PageCompound(page)) 10181da2f328SRik van Riel low_pfn += compound_nr(page) - 1; 1019bc835011SAndrea Arcangeli 1020748446bbSMel Gorman /* Successfully isolated */ 1021fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 10221da2f328SRik van Riel mod_node_page_state(page_pgdat(page), 10239de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(page), 10246c357848SMatthew Wilcox (Oracle) thp_nr_pages(page)); 1025b6c75016SJoonsoo Kim 1026b6c75016SJoonsoo Kim isolate_success: 1027fdd048e1SVlastimil Babka list_add(&page->lru, &cc->migratepages); 102838935861SZi Yan cc->nr_migratepages += compound_nr(page); 102938935861SZi Yan nr_isolated += compound_nr(page); 1030748446bbSMel Gorman 1031804d3121SMel Gorman /* 1032804d3121SMel Gorman * Avoid isolating too much unless this block is being 1033cb2dcaf0SMel Gorman * rescanned (e.g. dirty/writeback pages, parallel allocation) 1034cb2dcaf0SMel Gorman * or a lock is contended. For contention, isolate quickly to 1035cb2dcaf0SMel Gorman * potentially remove one source of contention. 1036804d3121SMel Gorman */ 103738935861SZi Yan if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && 1038cb2dcaf0SMel Gorman !cc->rescan && !cc->contended) { 103931b8384aSHillf Danton ++low_pfn; 1040748446bbSMel Gorman break; 1041748446bbSMel Gorman } 1042fdd048e1SVlastimil Babka 1043fdd048e1SVlastimil Babka continue; 1044*9df41314SAlex Shi 1045*9df41314SAlex Shi isolate_fail_put: 1046*9df41314SAlex Shi /* Avoid potential deadlock in freeing page under lru_lock */ 1047*9df41314SAlex Shi if (locked) { 1048*9df41314SAlex Shi spin_unlock_irqrestore(&pgdat->lru_lock, flags); 1049*9df41314SAlex Shi locked = false; 1050*9df41314SAlex Shi } 1051*9df41314SAlex Shi put_page(page); 1052*9df41314SAlex Shi 1053fdd048e1SVlastimil Babka isolate_fail: 1054fdd048e1SVlastimil Babka if (!skip_on_failure) 1055fdd048e1SVlastimil Babka continue; 1056fdd048e1SVlastimil Babka 1057fdd048e1SVlastimil Babka /* 1058fdd048e1SVlastimil Babka * We have isolated some pages, but then failed. Release them 1059fdd048e1SVlastimil Babka * instead of migrating, as we cannot form the cc->order buddy 1060fdd048e1SVlastimil Babka * page anyway. 1061fdd048e1SVlastimil Babka */ 1062fdd048e1SVlastimil Babka if (nr_isolated) { 1063fdd048e1SVlastimil Babka if (locked) { 1064f4b7e272SAndrey Ryabinin spin_unlock_irqrestore(&pgdat->lru_lock, flags); 1065fdd048e1SVlastimil Babka locked = false; 1066fdd048e1SVlastimil Babka } 1067fdd048e1SVlastimil Babka putback_movable_pages(&cc->migratepages); 1068fdd048e1SVlastimil Babka cc->nr_migratepages = 0; 1069fdd048e1SVlastimil Babka nr_isolated = 0; 1070fdd048e1SVlastimil Babka } 1071fdd048e1SVlastimil Babka 1072fdd048e1SVlastimil Babka if (low_pfn < next_skip_pfn) { 1073fdd048e1SVlastimil Babka low_pfn = next_skip_pfn - 1; 1074fdd048e1SVlastimil Babka /* 1075fdd048e1SVlastimil Babka * The check near the loop beginning would have updated 1076fdd048e1SVlastimil Babka * next_skip_pfn too, but this is a bit simpler. 1077fdd048e1SVlastimil Babka */ 1078fdd048e1SVlastimil Babka next_skip_pfn += 1UL << cc->order; 1079fdd048e1SVlastimil Babka } 108031b8384aSHillf Danton } 1081748446bbSMel Gorman 108299c0fd5eSVlastimil Babka /* 108399c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 108499c0fd5eSVlastimil Babka * the range to be scanned. 108599c0fd5eSVlastimil Babka */ 108699c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 108799c0fd5eSVlastimil Babka low_pfn = end_pfn; 108899c0fd5eSVlastimil Babka 1089*9df41314SAlex Shi page = NULL; 1090*9df41314SAlex Shi 1091e380bebeSMel Gorman isolate_abort: 1092c67fe375SMel Gorman if (locked) 1093f4b7e272SAndrey Ryabinin spin_unlock_irqrestore(&pgdat->lru_lock, flags); 1094*9df41314SAlex Shi if (page) { 1095*9df41314SAlex Shi SetPageLRU(page); 1096*9df41314SAlex Shi put_page(page); 1097*9df41314SAlex Shi } 1098748446bbSMel Gorman 109950b5b094SVlastimil Babka /* 1100804d3121SMel Gorman * Updated the cached scanner pfn once the pageblock has been scanned 1101804d3121SMel Gorman * Pages will either be migrated in which case there is no point 1102804d3121SMel Gorman * scanning in the near future or migration failed in which case the 1103804d3121SMel Gorman * failure reason may persist. The block is marked for skipping if 1104804d3121SMel Gorman * there were no pages isolated in the block or if the block is 1105804d3121SMel Gorman * rescanned twice in a row. 110650b5b094SVlastimil Babka */ 1107804d3121SMel Gorman if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { 1108e380bebeSMel Gorman if (valid_page && !skip_updated) 1109e380bebeSMel Gorman set_pageblock_skip(valid_page); 1110e380bebeSMel Gorman update_cached_migrate(cc, low_pfn); 1111e380bebeSMel Gorman } 1112bb13ffebSMel Gorman 1113e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 1114e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 1115b7aba698SMel Gorman 1116670105a2SMel Gorman fatal_pending: 11177f354a54SDavid Rientjes cc->total_migrate_scanned += nr_scanned; 1118397487dbSMel Gorman if (nr_isolated) 1119010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 1120397487dbSMel Gorman 11212fe86e00SMichal Nazarewicz return low_pfn; 11222fe86e00SMichal Nazarewicz } 11232fe86e00SMichal Nazarewicz 1124edc2ca61SVlastimil Babka /** 1125edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 1126edc2ca61SVlastimil Babka * @cc: Compaction control structure. 1127edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 1128edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 1129edc2ca61SVlastimil Babka * 1130edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 1131edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 1132edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 1133edc2ca61SVlastimil Babka */ 1134edc2ca61SVlastimil Babka unsigned long 1135edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 1136edc2ca61SVlastimil Babka unsigned long end_pfn) 1137edc2ca61SVlastimil Babka { 1138e1409c32SJoonsoo Kim unsigned long pfn, block_start_pfn, block_end_pfn; 1139edc2ca61SVlastimil Babka 1140edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 1141edc2ca61SVlastimil Babka pfn = start_pfn; 114206b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 1143e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 1144e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 114506b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 1146edc2ca61SVlastimil Babka 1147edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 1148e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1149edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 1150edc2ca61SVlastimil Babka 1151edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 1152edc2ca61SVlastimil Babka 1153e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 1154e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 1155edc2ca61SVlastimil Babka continue; 1156edc2ca61SVlastimil Babka 1157edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 1158edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 1159edc2ca61SVlastimil Babka 116014af4a5eSHugh Dickins if (!pfn) 1161edc2ca61SVlastimil Babka break; 11626ea41c0cSJoonsoo Kim 116338935861SZi Yan if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) 11646ea41c0cSJoonsoo Kim break; 1165edc2ca61SVlastimil Babka } 1166edc2ca61SVlastimil Babka 1167edc2ca61SVlastimil Babka return pfn; 1168edc2ca61SVlastimil Babka } 1169edc2ca61SVlastimil Babka 1170ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1171ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 1172018e9a49SAndrew Morton 1173b682debdSVlastimil Babka static bool suitable_migration_source(struct compact_control *cc, 1174b682debdSVlastimil Babka struct page *page) 1175b682debdSVlastimil Babka { 1176282722b0SVlastimil Babka int block_mt; 1177282722b0SVlastimil Babka 11789bebefd5SMel Gorman if (pageblock_skip_persistent(page)) 11799bebefd5SMel Gorman return false; 11809bebefd5SMel Gorman 1181282722b0SVlastimil Babka if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1182b682debdSVlastimil Babka return true; 1183b682debdSVlastimil Babka 1184282722b0SVlastimil Babka block_mt = get_pageblock_migratetype(page); 1185282722b0SVlastimil Babka 1186282722b0SVlastimil Babka if (cc->migratetype == MIGRATE_MOVABLE) 1187282722b0SVlastimil Babka return is_migrate_movable(block_mt); 1188282722b0SVlastimil Babka else 1189282722b0SVlastimil Babka return block_mt == cc->migratetype; 1190b682debdSVlastimil Babka } 1191b682debdSVlastimil Babka 1192018e9a49SAndrew Morton /* Returns true if the page is within a block suitable for migration to */ 11939f7e3387SVlastimil Babka static bool suitable_migration_target(struct compact_control *cc, 11949f7e3387SVlastimil Babka struct page *page) 1195018e9a49SAndrew Morton { 1196018e9a49SAndrew Morton /* If the page is a large free page, then disallow migration */ 1197018e9a49SAndrew Morton if (PageBuddy(page)) { 1198018e9a49SAndrew Morton /* 1199018e9a49SAndrew Morton * We are checking page_order without zone->lock taken. But 1200018e9a49SAndrew Morton * the only small danger is that we skip a potentially suitable 1201018e9a49SAndrew Morton * pageblock, so it's not worth to check order for valid range. 1202018e9a49SAndrew Morton */ 1203ab130f91SMatthew Wilcox (Oracle) if (buddy_order_unsafe(page) >= pageblock_order) 1204018e9a49SAndrew Morton return false; 1205018e9a49SAndrew Morton } 1206018e9a49SAndrew Morton 12071ef36db2SYisheng Xie if (cc->ignore_block_suitable) 12081ef36db2SYisheng Xie return true; 12091ef36db2SYisheng Xie 1210018e9a49SAndrew Morton /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1211b682debdSVlastimil Babka if (is_migrate_movable(get_pageblock_migratetype(page))) 1212018e9a49SAndrew Morton return true; 1213018e9a49SAndrew Morton 1214018e9a49SAndrew Morton /* Otherwise skip the block */ 1215018e9a49SAndrew Morton return false; 1216018e9a49SAndrew Morton } 1217018e9a49SAndrew Morton 121870b44595SMel Gorman static inline unsigned int 121970b44595SMel Gorman freelist_scan_limit(struct compact_control *cc) 122070b44595SMel Gorman { 1221dd7ef7bdSQian Cai unsigned short shift = BITS_PER_LONG - 1; 1222dd7ef7bdSQian Cai 1223dd7ef7bdSQian Cai return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; 122470b44595SMel Gorman } 122570b44595SMel Gorman 1226ff9543fdSMichal Nazarewicz /* 1227f2849aa0SVlastimil Babka * Test whether the free scanner has reached the same or lower pageblock than 1228f2849aa0SVlastimil Babka * the migration scanner, and compaction should thus terminate. 1229f2849aa0SVlastimil Babka */ 1230f2849aa0SVlastimil Babka static inline bool compact_scanners_met(struct compact_control *cc) 1231f2849aa0SVlastimil Babka { 1232f2849aa0SVlastimil Babka return (cc->free_pfn >> pageblock_order) 1233f2849aa0SVlastimil Babka <= (cc->migrate_pfn >> pageblock_order); 1234f2849aa0SVlastimil Babka } 1235f2849aa0SVlastimil Babka 12365a811889SMel Gorman /* 12375a811889SMel Gorman * Used when scanning for a suitable migration target which scans freelists 12385a811889SMel Gorman * in reverse. Reorders the list such as the unscanned pages are scanned 12395a811889SMel Gorman * first on the next iteration of the free scanner 12405a811889SMel Gorman */ 12415a811889SMel Gorman static void 12425a811889SMel Gorman move_freelist_head(struct list_head *freelist, struct page *freepage) 12435a811889SMel Gorman { 12445a811889SMel Gorman LIST_HEAD(sublist); 12455a811889SMel Gorman 12465a811889SMel Gorman if (!list_is_last(freelist, &freepage->lru)) { 12475a811889SMel Gorman list_cut_before(&sublist, freelist, &freepage->lru); 12485a811889SMel Gorman if (!list_empty(&sublist)) 12495a811889SMel Gorman list_splice_tail(&sublist, freelist); 12505a811889SMel Gorman } 12515a811889SMel Gorman } 12525a811889SMel Gorman 12535a811889SMel Gorman /* 12545a811889SMel Gorman * Similar to move_freelist_head except used by the migration scanner 12555a811889SMel Gorman * when scanning forward. It's possible for these list operations to 12565a811889SMel Gorman * move against each other if they search the free list exactly in 12575a811889SMel Gorman * lockstep. 12585a811889SMel Gorman */ 125970b44595SMel Gorman static void 126070b44595SMel Gorman move_freelist_tail(struct list_head *freelist, struct page *freepage) 126170b44595SMel Gorman { 126270b44595SMel Gorman LIST_HEAD(sublist); 126370b44595SMel Gorman 126470b44595SMel Gorman if (!list_is_first(freelist, &freepage->lru)) { 126570b44595SMel Gorman list_cut_position(&sublist, freelist, &freepage->lru); 126670b44595SMel Gorman if (!list_empty(&sublist)) 126770b44595SMel Gorman list_splice_tail(&sublist, freelist); 126870b44595SMel Gorman } 126970b44595SMel Gorman } 127070b44595SMel Gorman 12715a811889SMel Gorman static void 12725a811889SMel Gorman fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated) 12735a811889SMel Gorman { 12745a811889SMel Gorman unsigned long start_pfn, end_pfn; 12755a811889SMel Gorman struct page *page = pfn_to_page(pfn); 12765a811889SMel Gorman 12775a811889SMel Gorman /* Do not search around if there are enough pages already */ 12785a811889SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) 12795a811889SMel Gorman return; 12805a811889SMel Gorman 12815a811889SMel Gorman /* Minimise scanning during async compaction */ 12825a811889SMel Gorman if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) 12835a811889SMel Gorman return; 12845a811889SMel Gorman 12855a811889SMel Gorman /* Pageblock boundaries */ 12865a811889SMel Gorman start_pfn = pageblock_start_pfn(pfn); 128760fce36aSMel Gorman end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1; 12885a811889SMel Gorman 12895a811889SMel Gorman /* Scan before */ 12905a811889SMel Gorman if (start_pfn != pfn) { 12914fca9730SMel Gorman isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); 12925a811889SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) 12935a811889SMel Gorman return; 12945a811889SMel Gorman } 12955a811889SMel Gorman 12965a811889SMel Gorman /* Scan after */ 12975a811889SMel Gorman start_pfn = pfn + nr_isolated; 129860fce36aSMel Gorman if (start_pfn < end_pfn) 12994fca9730SMel Gorman isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); 13005a811889SMel Gorman 13015a811889SMel Gorman /* Skip this pageblock in the future as it's full or nearly full */ 13025a811889SMel Gorman if (cc->nr_freepages < cc->nr_migratepages) 13035a811889SMel Gorman set_pageblock_skip(page); 13045a811889SMel Gorman } 13055a811889SMel Gorman 1306dbe2d4e4SMel Gorman /* Search orders in round-robin fashion */ 1307dbe2d4e4SMel Gorman static int next_search_order(struct compact_control *cc, int order) 1308dbe2d4e4SMel Gorman { 1309dbe2d4e4SMel Gorman order--; 1310dbe2d4e4SMel Gorman if (order < 0) 1311dbe2d4e4SMel Gorman order = cc->order - 1; 1312dbe2d4e4SMel Gorman 1313dbe2d4e4SMel Gorman /* Search wrapped around? */ 1314dbe2d4e4SMel Gorman if (order == cc->search_order) { 1315dbe2d4e4SMel Gorman cc->search_order--; 1316dbe2d4e4SMel Gorman if (cc->search_order < 0) 1317dbe2d4e4SMel Gorman cc->search_order = cc->order - 1; 1318dbe2d4e4SMel Gorman return -1; 1319dbe2d4e4SMel Gorman } 1320dbe2d4e4SMel Gorman 1321dbe2d4e4SMel Gorman return order; 1322dbe2d4e4SMel Gorman } 1323dbe2d4e4SMel Gorman 13245a811889SMel Gorman static unsigned long 13255a811889SMel Gorman fast_isolate_freepages(struct compact_control *cc) 13265a811889SMel Gorman { 13275a811889SMel Gorman unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); 13285a811889SMel Gorman unsigned int nr_scanned = 0; 13295a811889SMel Gorman unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0; 13305a811889SMel Gorman unsigned long nr_isolated = 0; 13315a811889SMel Gorman unsigned long distance; 13325a811889SMel Gorman struct page *page = NULL; 13335a811889SMel Gorman bool scan_start = false; 13345a811889SMel Gorman int order; 13355a811889SMel Gorman 13365a811889SMel Gorman /* Full compaction passes in a negative order */ 13375a811889SMel Gorman if (cc->order <= 0) 13385a811889SMel Gorman return cc->free_pfn; 13395a811889SMel Gorman 13405a811889SMel Gorman /* 13415a811889SMel Gorman * If starting the scan, use a deeper search and use the highest 13425a811889SMel Gorman * PFN found if a suitable one is not found. 13435a811889SMel Gorman */ 1344e332f741SMel Gorman if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { 13455a811889SMel Gorman limit = pageblock_nr_pages >> 1; 13465a811889SMel Gorman scan_start = true; 13475a811889SMel Gorman } 13485a811889SMel Gorman 13495a811889SMel Gorman /* 13505a811889SMel Gorman * Preferred point is in the top quarter of the scan space but take 13515a811889SMel Gorman * a pfn from the top half if the search is problematic. 13525a811889SMel Gorman */ 13535a811889SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn); 13545a811889SMel Gorman low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); 13555a811889SMel Gorman min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); 13565a811889SMel Gorman 13575a811889SMel Gorman if (WARN_ON_ONCE(min_pfn > low_pfn)) 13585a811889SMel Gorman low_pfn = min_pfn; 13595a811889SMel Gorman 1360dbe2d4e4SMel Gorman /* 1361dbe2d4e4SMel Gorman * Search starts from the last successful isolation order or the next 1362dbe2d4e4SMel Gorman * order to search after a previous failure 1363dbe2d4e4SMel Gorman */ 1364dbe2d4e4SMel Gorman cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); 1365dbe2d4e4SMel Gorman 1366dbe2d4e4SMel Gorman for (order = cc->search_order; 1367dbe2d4e4SMel Gorman !page && order >= 0; 1368dbe2d4e4SMel Gorman order = next_search_order(cc, order)) { 13695a811889SMel Gorman struct free_area *area = &cc->zone->free_area[order]; 13705a811889SMel Gorman struct list_head *freelist; 13715a811889SMel Gorman struct page *freepage; 13725a811889SMel Gorman unsigned long flags; 13735a811889SMel Gorman unsigned int order_scanned = 0; 13745a811889SMel Gorman 13755a811889SMel Gorman if (!area->nr_free) 13765a811889SMel Gorman continue; 13775a811889SMel Gorman 13785a811889SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags); 13795a811889SMel Gorman freelist = &area->free_list[MIGRATE_MOVABLE]; 13805a811889SMel Gorman list_for_each_entry_reverse(freepage, freelist, lru) { 13815a811889SMel Gorman unsigned long pfn; 13825a811889SMel Gorman 13835a811889SMel Gorman order_scanned++; 13845a811889SMel Gorman nr_scanned++; 13855a811889SMel Gorman pfn = page_to_pfn(freepage); 13865a811889SMel Gorman 13875a811889SMel Gorman if (pfn >= highest) 13885a811889SMel Gorman highest = pageblock_start_pfn(pfn); 13895a811889SMel Gorman 13905a811889SMel Gorman if (pfn >= low_pfn) { 13915a811889SMel Gorman cc->fast_search_fail = 0; 1392dbe2d4e4SMel Gorman cc->search_order = order; 13935a811889SMel Gorman page = freepage; 13945a811889SMel Gorman break; 13955a811889SMel Gorman } 13965a811889SMel Gorman 13975a811889SMel Gorman if (pfn >= min_pfn && pfn > high_pfn) { 13985a811889SMel Gorman high_pfn = pfn; 13995a811889SMel Gorman 14005a811889SMel Gorman /* Shorten the scan if a candidate is found */ 14015a811889SMel Gorman limit >>= 1; 14025a811889SMel Gorman } 14035a811889SMel Gorman 14045a811889SMel Gorman if (order_scanned >= limit) 14055a811889SMel Gorman break; 14065a811889SMel Gorman } 14075a811889SMel Gorman 14085a811889SMel Gorman /* Use a minimum pfn if a preferred one was not found */ 14095a811889SMel Gorman if (!page && high_pfn) { 14105a811889SMel Gorman page = pfn_to_page(high_pfn); 14115a811889SMel Gorman 14125a811889SMel Gorman /* Update freepage for the list reorder below */ 14135a811889SMel Gorman freepage = page; 14145a811889SMel Gorman } 14155a811889SMel Gorman 14165a811889SMel Gorman /* Reorder to so a future search skips recent pages */ 14175a811889SMel Gorman move_freelist_head(freelist, freepage); 14185a811889SMel Gorman 14195a811889SMel Gorman /* Isolate the page if available */ 14205a811889SMel Gorman if (page) { 14215a811889SMel Gorman if (__isolate_free_page(page, order)) { 14225a811889SMel Gorman set_page_private(page, order); 14235a811889SMel Gorman nr_isolated = 1 << order; 14245a811889SMel Gorman cc->nr_freepages += nr_isolated; 14255a811889SMel Gorman list_add_tail(&page->lru, &cc->freepages); 14265a811889SMel Gorman count_compact_events(COMPACTISOLATED, nr_isolated); 14275a811889SMel Gorman } else { 14285a811889SMel Gorman /* If isolation fails, abort the search */ 14295b56d996SQian Cai order = cc->search_order + 1; 14305a811889SMel Gorman page = NULL; 14315a811889SMel Gorman } 14325a811889SMel Gorman } 14335a811889SMel Gorman 14345a811889SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 14355a811889SMel Gorman 14365a811889SMel Gorman /* 14375a811889SMel Gorman * Smaller scan on next order so the total scan ig related 14385a811889SMel Gorman * to freelist_scan_limit. 14395a811889SMel Gorman */ 14405a811889SMel Gorman if (order_scanned >= limit) 14415a811889SMel Gorman limit = min(1U, limit >> 1); 14425a811889SMel Gorman } 14435a811889SMel Gorman 14445a811889SMel Gorman if (!page) { 14455a811889SMel Gorman cc->fast_search_fail++; 14465a811889SMel Gorman if (scan_start) { 14475a811889SMel Gorman /* 14485a811889SMel Gorman * Use the highest PFN found above min. If one was 1449f3867755SEthon Paul * not found, be pessimistic for direct compaction 14505a811889SMel Gorman * and use the min mark. 14515a811889SMel Gorman */ 14525a811889SMel Gorman if (highest) { 14535a811889SMel Gorman page = pfn_to_page(highest); 14545a811889SMel Gorman cc->free_pfn = highest; 14555a811889SMel Gorman } else { 1456e577c8b6SSuzuki K Poulose if (cc->direct_compaction && pfn_valid(min_pfn)) { 145773a6e474SBaoquan He page = pageblock_pfn_to_page(min_pfn, 145873a6e474SBaoquan He pageblock_end_pfn(min_pfn), 145973a6e474SBaoquan He cc->zone); 14605a811889SMel Gorman cc->free_pfn = min_pfn; 14615a811889SMel Gorman } 14625a811889SMel Gorman } 14635a811889SMel Gorman } 14645a811889SMel Gorman } 14655a811889SMel Gorman 1466d097a6f6SMel Gorman if (highest && highest >= cc->zone->compact_cached_free_pfn) { 1467d097a6f6SMel Gorman highest -= pageblock_nr_pages; 14685a811889SMel Gorman cc->zone->compact_cached_free_pfn = highest; 1469d097a6f6SMel Gorman } 14705a811889SMel Gorman 14715a811889SMel Gorman cc->total_free_scanned += nr_scanned; 14725a811889SMel Gorman if (!page) 14735a811889SMel Gorman return cc->free_pfn; 14745a811889SMel Gorman 14755a811889SMel Gorman low_pfn = page_to_pfn(page); 14765a811889SMel Gorman fast_isolate_around(cc, low_pfn, nr_isolated); 14775a811889SMel Gorman return low_pfn; 14785a811889SMel Gorman } 14795a811889SMel Gorman 1480f2849aa0SVlastimil Babka /* 1481ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 1482ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 1483ff9543fdSMichal Nazarewicz */ 1484edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 1485ff9543fdSMichal Nazarewicz { 1486edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 1487ff9543fdSMichal Nazarewicz struct page *page; 1488c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 1489e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 1490c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 1491c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1492ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 14934fca9730SMel Gorman unsigned int stride; 14942fe86e00SMichal Nazarewicz 14955a811889SMel Gorman /* Try a small search of the free lists for a candidate */ 14965a811889SMel Gorman isolate_start_pfn = fast_isolate_freepages(cc); 14975a811889SMel Gorman if (cc->nr_freepages) 14985a811889SMel Gorman goto splitmap; 14995a811889SMel Gorman 1500ff9543fdSMichal Nazarewicz /* 1501ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 150249e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 1503e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 1504e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 1505c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 1506c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 1507a1c1dbebSRandy Dunlap * zone which ends in the middle of a pageblock. 150849e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 150949e068f0SVlastimil Babka * is using. 1510ff9543fdSMichal Nazarewicz */ 1511e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 15125a811889SMel Gorman block_start_pfn = pageblock_start_pfn(isolate_start_pfn); 1513c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1514c96b9e50SVlastimil Babka zone_end_pfn(zone)); 151506b6640aSVlastimil Babka low_pfn = pageblock_end_pfn(cc->migrate_pfn); 15164fca9730SMel Gorman stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; 15172fe86e00SMichal Nazarewicz 1518ff9543fdSMichal Nazarewicz /* 1519ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 1520ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 1521ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 1522ff9543fdSMichal Nazarewicz */ 1523f5f61a32SVlastimil Babka for (; block_start_pfn >= low_pfn; 1524c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 1525e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 1526e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 15274fca9730SMel Gorman unsigned long nr_isolated; 15284fca9730SMel Gorman 1529f6ea3adbSDavid Rientjes /* 1530f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 1531cb810ad2SMel Gorman * suitable migration targets, so periodically check resched. 1532f6ea3adbSDavid Rientjes */ 1533cb810ad2SMel Gorman if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1534cf66f070SMel Gorman cond_resched(); 1535f6ea3adbSDavid Rientjes 15367d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 15377d49d886SVlastimil Babka zone); 15387d49d886SVlastimil Babka if (!page) 1539ff9543fdSMichal Nazarewicz continue; 1540ff9543fdSMichal Nazarewicz 1541ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 15429f7e3387SVlastimil Babka if (!suitable_migration_target(cc, page)) 1543ff9543fdSMichal Nazarewicz continue; 154468e3e926SLinus Torvalds 1545bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 1546bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 1547bb13ffebSMel Gorman continue; 1548bb13ffebSMel Gorman 1549e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 15504fca9730SMel Gorman nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, 15514fca9730SMel Gorman block_end_pfn, freelist, stride, false); 1552ff9543fdSMichal Nazarewicz 1553d097a6f6SMel Gorman /* Update the skip hint if the full pageblock was scanned */ 1554d097a6f6SMel Gorman if (isolate_start_pfn == block_end_pfn) 1555d097a6f6SMel Gorman update_pageblock_skip(cc, page, block_start_pfn); 1556d097a6f6SMel Gorman 1557cb2dcaf0SMel Gorman /* Are enough freepages isolated? */ 1558cb2dcaf0SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) { 1559a46cbf3bSDavid Rientjes if (isolate_start_pfn >= block_end_pfn) { 1560a46cbf3bSDavid Rientjes /* 1561a46cbf3bSDavid Rientjes * Restart at previous pageblock if more 1562a46cbf3bSDavid Rientjes * freepages can be isolated next time. 1563a46cbf3bSDavid Rientjes */ 1564f5f61a32SVlastimil Babka isolate_start_pfn = 1565e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 1566a46cbf3bSDavid Rientjes } 1567be976572SVlastimil Babka break; 1568a46cbf3bSDavid Rientjes } else if (isolate_start_pfn < block_end_pfn) { 1569f5f61a32SVlastimil Babka /* 1570a46cbf3bSDavid Rientjes * If isolation failed early, do not continue 1571a46cbf3bSDavid Rientjes * needlessly. 1572f5f61a32SVlastimil Babka */ 1573a46cbf3bSDavid Rientjes break; 1574f5f61a32SVlastimil Babka } 15754fca9730SMel Gorman 15764fca9730SMel Gorman /* Adjust stride depending on isolation */ 15774fca9730SMel Gorman if (nr_isolated) { 15784fca9730SMel Gorman stride = 1; 15794fca9730SMel Gorman continue; 15804fca9730SMel Gorman } 15814fca9730SMel Gorman stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); 1582c89511abSMel Gorman } 1583ff9543fdSMichal Nazarewicz 15847ed695e0SVlastimil Babka /* 1585f5f61a32SVlastimil Babka * Record where the free scanner will restart next time. Either we 1586f5f61a32SVlastimil Babka * broke from the loop and set isolate_start_pfn based on the last 1587f5f61a32SVlastimil Babka * call to isolate_freepages_block(), or we met the migration scanner 1588f5f61a32SVlastimil Babka * and the loop terminated due to isolate_start_pfn < low_pfn 15897ed695e0SVlastimil Babka */ 1590f5f61a32SVlastimil Babka cc->free_pfn = isolate_start_pfn; 15915a811889SMel Gorman 15925a811889SMel Gorman splitmap: 15935a811889SMel Gorman /* __isolate_free_page() does not map the pages */ 15945a811889SMel Gorman split_map_pages(freelist); 1595748446bbSMel Gorman } 1596748446bbSMel Gorman 1597748446bbSMel Gorman /* 1598748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1599748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1600748446bbSMel Gorman */ 1601748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 1602666feb21SMichal Hocko unsigned long data) 1603748446bbSMel Gorman { 1604748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 1605748446bbSMel Gorman struct page *freepage; 1606748446bbSMel Gorman 1607748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1608edc2ca61SVlastimil Babka isolate_freepages(cc); 1609748446bbSMel Gorman 1610748446bbSMel Gorman if (list_empty(&cc->freepages)) 1611748446bbSMel Gorman return NULL; 1612748446bbSMel Gorman } 1613748446bbSMel Gorman 1614748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 1615748446bbSMel Gorman list_del(&freepage->lru); 1616748446bbSMel Gorman cc->nr_freepages--; 1617748446bbSMel Gorman 1618748446bbSMel Gorman return freepage; 1619748446bbSMel Gorman } 1620748446bbSMel Gorman 1621748446bbSMel Gorman /* 1622d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1623d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1624d53aea3dSDavid Rientjes * special handling needed for NUMA. 1625d53aea3dSDavid Rientjes */ 1626d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 1627d53aea3dSDavid Rientjes { 1628d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1629d53aea3dSDavid Rientjes 1630d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 1631d53aea3dSDavid Rientjes cc->nr_freepages++; 1632d53aea3dSDavid Rientjes } 1633d53aea3dSDavid Rientjes 1634ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1635ff9543fdSMichal Nazarewicz typedef enum { 1636ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1637ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1638ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1639ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1640ff9543fdSMichal Nazarewicz 1641ff9543fdSMichal Nazarewicz /* 16425bbe3547SEric B Munson * Allow userspace to control policy on scanning the unevictable LRU for 16435bbe3547SEric B Munson * compactable pages. 16445bbe3547SEric B Munson */ 16456923aa0dSSebastian Andrzej Siewior #ifdef CONFIG_PREEMPT_RT 16466923aa0dSSebastian Andrzej Siewior int sysctl_compact_unevictable_allowed __read_mostly = 0; 16476923aa0dSSebastian Andrzej Siewior #else 16485bbe3547SEric B Munson int sysctl_compact_unevictable_allowed __read_mostly = 1; 16496923aa0dSSebastian Andrzej Siewior #endif 16505bbe3547SEric B Munson 165170b44595SMel Gorman static inline void 165270b44595SMel Gorman update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) 165370b44595SMel Gorman { 165470b44595SMel Gorman if (cc->fast_start_pfn == ULONG_MAX) 165570b44595SMel Gorman return; 165670b44595SMel Gorman 165770b44595SMel Gorman if (!cc->fast_start_pfn) 165870b44595SMel Gorman cc->fast_start_pfn = pfn; 165970b44595SMel Gorman 166070b44595SMel Gorman cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); 166170b44595SMel Gorman } 166270b44595SMel Gorman 166370b44595SMel Gorman static inline unsigned long 166470b44595SMel Gorman reinit_migrate_pfn(struct compact_control *cc) 166570b44595SMel Gorman { 166670b44595SMel Gorman if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) 166770b44595SMel Gorman return cc->migrate_pfn; 166870b44595SMel Gorman 166970b44595SMel Gorman cc->migrate_pfn = cc->fast_start_pfn; 167070b44595SMel Gorman cc->fast_start_pfn = ULONG_MAX; 167170b44595SMel Gorman 167270b44595SMel Gorman return cc->migrate_pfn; 167370b44595SMel Gorman } 167470b44595SMel Gorman 167570b44595SMel Gorman /* 167670b44595SMel Gorman * Briefly search the free lists for a migration source that already has 167770b44595SMel Gorman * some free pages to reduce the number of pages that need migration 167870b44595SMel Gorman * before a pageblock is free. 167970b44595SMel Gorman */ 168070b44595SMel Gorman static unsigned long fast_find_migrateblock(struct compact_control *cc) 168170b44595SMel Gorman { 168270b44595SMel Gorman unsigned int limit = freelist_scan_limit(cc); 168370b44595SMel Gorman unsigned int nr_scanned = 0; 168470b44595SMel Gorman unsigned long distance; 168570b44595SMel Gorman unsigned long pfn = cc->migrate_pfn; 168670b44595SMel Gorman unsigned long high_pfn; 168770b44595SMel Gorman int order; 168870b44595SMel Gorman 168970b44595SMel Gorman /* Skip hints are relied on to avoid repeats on the fast search */ 169070b44595SMel Gorman if (cc->ignore_skip_hint) 169170b44595SMel Gorman return pfn; 169270b44595SMel Gorman 169370b44595SMel Gorman /* 169470b44595SMel Gorman * If the migrate_pfn is not at the start of a zone or the start 169570b44595SMel Gorman * of a pageblock then assume this is a continuation of a previous 169670b44595SMel Gorman * scan restarted due to COMPACT_CLUSTER_MAX. 169770b44595SMel Gorman */ 169870b44595SMel Gorman if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) 169970b44595SMel Gorman return pfn; 170070b44595SMel Gorman 170170b44595SMel Gorman /* 170270b44595SMel Gorman * For smaller orders, just linearly scan as the number of pages 170370b44595SMel Gorman * to migrate should be relatively small and does not necessarily 170470b44595SMel Gorman * justify freeing up a large block for a small allocation. 170570b44595SMel Gorman */ 170670b44595SMel Gorman if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) 170770b44595SMel Gorman return pfn; 170870b44595SMel Gorman 170970b44595SMel Gorman /* 171070b44595SMel Gorman * Only allow kcompactd and direct requests for movable pages to 171170b44595SMel Gorman * quickly clear out a MOVABLE pageblock for allocation. This 171270b44595SMel Gorman * reduces the risk that a large movable pageblock is freed for 171370b44595SMel Gorman * an unmovable/reclaimable small allocation. 171470b44595SMel Gorman */ 171570b44595SMel Gorman if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) 171670b44595SMel Gorman return pfn; 171770b44595SMel Gorman 171870b44595SMel Gorman /* 171970b44595SMel Gorman * When starting the migration scanner, pick any pageblock within the 172070b44595SMel Gorman * first half of the search space. Otherwise try and pick a pageblock 172170b44595SMel Gorman * within the first eighth to reduce the chances that a migration 172270b44595SMel Gorman * target later becomes a source. 172370b44595SMel Gorman */ 172470b44595SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn) >> 1; 172570b44595SMel Gorman if (cc->migrate_pfn != cc->zone->zone_start_pfn) 172670b44595SMel Gorman distance >>= 2; 172770b44595SMel Gorman high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); 172870b44595SMel Gorman 172970b44595SMel Gorman for (order = cc->order - 1; 173070b44595SMel Gorman order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit; 173170b44595SMel Gorman order--) { 173270b44595SMel Gorman struct free_area *area = &cc->zone->free_area[order]; 173370b44595SMel Gorman struct list_head *freelist; 173470b44595SMel Gorman unsigned long flags; 173570b44595SMel Gorman struct page *freepage; 173670b44595SMel Gorman 173770b44595SMel Gorman if (!area->nr_free) 173870b44595SMel Gorman continue; 173970b44595SMel Gorman 174070b44595SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags); 174170b44595SMel Gorman freelist = &area->free_list[MIGRATE_MOVABLE]; 174270b44595SMel Gorman list_for_each_entry(freepage, freelist, lru) { 174370b44595SMel Gorman unsigned long free_pfn; 174470b44595SMel Gorman 174570b44595SMel Gorman nr_scanned++; 174670b44595SMel Gorman free_pfn = page_to_pfn(freepage); 174770b44595SMel Gorman if (free_pfn < high_pfn) { 174870b44595SMel Gorman /* 174970b44595SMel Gorman * Avoid if skipped recently. Ideally it would 175070b44595SMel Gorman * move to the tail but even safe iteration of 175170b44595SMel Gorman * the list assumes an entry is deleted, not 175270b44595SMel Gorman * reordered. 175370b44595SMel Gorman */ 175470b44595SMel Gorman if (get_pageblock_skip(freepage)) { 175570b44595SMel Gorman if (list_is_last(freelist, &freepage->lru)) 175670b44595SMel Gorman break; 175770b44595SMel Gorman 175870b44595SMel Gorman continue; 175970b44595SMel Gorman } 176070b44595SMel Gorman 176170b44595SMel Gorman /* Reorder to so a future search skips recent pages */ 176270b44595SMel Gorman move_freelist_tail(freelist, freepage); 176370b44595SMel Gorman 1764e380bebeSMel Gorman update_fast_start_pfn(cc, free_pfn); 176570b44595SMel Gorman pfn = pageblock_start_pfn(free_pfn); 176670b44595SMel Gorman cc->fast_search_fail = 0; 176770b44595SMel Gorman set_pageblock_skip(freepage); 176870b44595SMel Gorman break; 176970b44595SMel Gorman } 177070b44595SMel Gorman 177170b44595SMel Gorman if (nr_scanned >= limit) { 177270b44595SMel Gorman cc->fast_search_fail++; 177370b44595SMel Gorman move_freelist_tail(freelist, freepage); 177470b44595SMel Gorman break; 177570b44595SMel Gorman } 177670b44595SMel Gorman } 177770b44595SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 177870b44595SMel Gorman } 177970b44595SMel Gorman 178070b44595SMel Gorman cc->total_migrate_scanned += nr_scanned; 178170b44595SMel Gorman 178270b44595SMel Gorman /* 178370b44595SMel Gorman * If fast scanning failed then use a cached entry for a page block 178470b44595SMel Gorman * that had free pages as the basis for starting a linear scan. 178570b44595SMel Gorman */ 178670b44595SMel Gorman if (pfn == cc->migrate_pfn) 178770b44595SMel Gorman pfn = reinit_migrate_pfn(cc); 178870b44595SMel Gorman 178970b44595SMel Gorman return pfn; 179070b44595SMel Gorman } 179170b44595SMel Gorman 17925bbe3547SEric B Munson /* 1793edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1794edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1795edc2ca61SVlastimil Babka * compact_control. 1796ff9543fdSMichal Nazarewicz */ 179732aaf055SPengfei Li static isolate_migrate_t isolate_migratepages(struct compact_control *cc) 1798ff9543fdSMichal Nazarewicz { 1799e1409c32SJoonsoo Kim unsigned long block_start_pfn; 1800e1409c32SJoonsoo Kim unsigned long block_end_pfn; 1801e1409c32SJoonsoo Kim unsigned long low_pfn; 1802edc2ca61SVlastimil Babka struct page *page; 1803edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 18045bbe3547SEric B Munson (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 18051d2047feSHugh Dickins (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 180670b44595SMel Gorman bool fast_find_block; 1807ff9543fdSMichal Nazarewicz 1808edc2ca61SVlastimil Babka /* 1809edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 181070b44595SMel Gorman * initialized by compact_zone(). The first failure will use 181170b44595SMel Gorman * the lowest PFN as the starting point for linear scanning. 1812edc2ca61SVlastimil Babka */ 181370b44595SMel Gorman low_pfn = fast_find_migrateblock(cc); 181406b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(low_pfn); 181532aaf055SPengfei Li if (block_start_pfn < cc->zone->zone_start_pfn) 181632aaf055SPengfei Li block_start_pfn = cc->zone->zone_start_pfn; 1817ff9543fdSMichal Nazarewicz 181870b44595SMel Gorman /* 181970b44595SMel Gorman * fast_find_migrateblock marks a pageblock skipped so to avoid 182070b44595SMel Gorman * the isolation_suitable check below, check whether the fast 182170b44595SMel Gorman * search was successful. 182270b44595SMel Gorman */ 182370b44595SMel Gorman fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; 182470b44595SMel Gorman 1825ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 182606b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(low_pfn); 1827ff9543fdSMichal Nazarewicz 1828edc2ca61SVlastimil Babka /* 1829edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 1830edc2ca61SVlastimil Babka * Do not cross the free scanner. 1831edc2ca61SVlastimil Babka */ 1832e1409c32SJoonsoo Kim for (; block_end_pfn <= cc->free_pfn; 183370b44595SMel Gorman fast_find_block = false, 1834e1409c32SJoonsoo Kim low_pfn = block_end_pfn, 1835e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1836e1409c32SJoonsoo Kim block_end_pfn += pageblock_nr_pages) { 1837edc2ca61SVlastimil Babka 1838edc2ca61SVlastimil Babka /* 1839edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1840edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1841cb810ad2SMel Gorman * need to schedule. 1842edc2ca61SVlastimil Babka */ 1843cb810ad2SMel Gorman if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1844cf66f070SMel Gorman cond_resched(); 1845edc2ca61SVlastimil Babka 184632aaf055SPengfei Li page = pageblock_pfn_to_page(block_start_pfn, 184732aaf055SPengfei Li block_end_pfn, cc->zone); 18487d49d886SVlastimil Babka if (!page) 1849edc2ca61SVlastimil Babka continue; 1850edc2ca61SVlastimil Babka 1851e380bebeSMel Gorman /* 1852e380bebeSMel Gorman * If isolation recently failed, do not retry. Only check the 1853e380bebeSMel Gorman * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock 1854e380bebeSMel Gorman * to be visited multiple times. Assume skip was checked 1855e380bebeSMel Gorman * before making it "skip" so other compaction instances do 1856e380bebeSMel Gorman * not scan the same block. 1857e380bebeSMel Gorman */ 1858e380bebeSMel Gorman if (IS_ALIGNED(low_pfn, pageblock_nr_pages) && 1859e380bebeSMel Gorman !fast_find_block && !isolation_suitable(cc, page)) 1860edc2ca61SVlastimil Babka continue; 1861edc2ca61SVlastimil Babka 1862edc2ca61SVlastimil Babka /* 18639bebefd5SMel Gorman * For async compaction, also only scan in MOVABLE blocks 18649bebefd5SMel Gorman * without huge pages. Async compaction is optimistic to see 18659bebefd5SMel Gorman * if the minimum amount of work satisfies the allocation. 18669bebefd5SMel Gorman * The cached PFN is updated as it's possible that all 18679bebefd5SMel Gorman * remaining blocks between source and target are unsuitable 18689bebefd5SMel Gorman * and the compaction scanners fail to meet. 1869edc2ca61SVlastimil Babka */ 18709bebefd5SMel Gorman if (!suitable_migration_source(cc, page)) { 18719bebefd5SMel Gorman update_cached_migrate(cc, block_end_pfn); 1872edc2ca61SVlastimil Babka continue; 18739bebefd5SMel Gorman } 1874ff9543fdSMichal Nazarewicz 1875ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1876e1409c32SJoonsoo Kim low_pfn = isolate_migratepages_block(cc, low_pfn, 1877e1409c32SJoonsoo Kim block_end_pfn, isolate_mode); 1878edc2ca61SVlastimil Babka 1879cb2dcaf0SMel Gorman if (!low_pfn) 1880ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1881ff9543fdSMichal Nazarewicz 1882edc2ca61SVlastimil Babka /* 1883edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1884edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1885edc2ca61SVlastimil Babka * continue or not. 1886edc2ca61SVlastimil Babka */ 1887edc2ca61SVlastimil Babka break; 1888edc2ca61SVlastimil Babka } 1889edc2ca61SVlastimil Babka 1890f2849aa0SVlastimil Babka /* Record where migration scanner will be restarted. */ 1891f2849aa0SVlastimil Babka cc->migrate_pfn = low_pfn; 1892ff9543fdSMichal Nazarewicz 1893edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1894ff9543fdSMichal Nazarewicz } 1895ff9543fdSMichal Nazarewicz 189621c527a3SYaowei Bai /* 189721c527a3SYaowei Bai * order == -1 is expected when compacting via 189821c527a3SYaowei Bai * /proc/sys/vm/compact_memory 189921c527a3SYaowei Bai */ 190021c527a3SYaowei Bai static inline bool is_via_compact_memory(int order) 190121c527a3SYaowei Bai { 190221c527a3SYaowei Bai return order == -1; 190321c527a3SYaowei Bai } 190421c527a3SYaowei Bai 1905facdaa91SNitin Gupta static bool kswapd_is_running(pg_data_t *pgdat) 1906facdaa91SNitin Gupta { 1907facdaa91SNitin Gupta return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING); 1908facdaa91SNitin Gupta } 1909facdaa91SNitin Gupta 1910facdaa91SNitin Gupta /* 1911facdaa91SNitin Gupta * A zone's fragmentation score is the external fragmentation wrt to the 1912facdaa91SNitin Gupta * COMPACTION_HPAGE_ORDER scaled by the zone's size. It returns a value 1913facdaa91SNitin Gupta * in the range [0, 100]. 1914facdaa91SNitin Gupta * 1915facdaa91SNitin Gupta * The scaling factor ensures that proactive compaction focuses on larger 1916facdaa91SNitin Gupta * zones like ZONE_NORMAL, rather than smaller, specialized zones like 1917facdaa91SNitin Gupta * ZONE_DMA32. For smaller zones, the score value remains close to zero, 1918facdaa91SNitin Gupta * and thus never exceeds the high threshold for proactive compaction. 1919facdaa91SNitin Gupta */ 1920d34c0a75SNitin Gupta static unsigned int fragmentation_score_zone(struct zone *zone) 1921facdaa91SNitin Gupta { 1922facdaa91SNitin Gupta unsigned long score; 1923facdaa91SNitin Gupta 1924facdaa91SNitin Gupta score = zone->present_pages * 1925facdaa91SNitin Gupta extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); 1926facdaa91SNitin Gupta return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); 1927facdaa91SNitin Gupta } 1928facdaa91SNitin Gupta 1929facdaa91SNitin Gupta /* 1930facdaa91SNitin Gupta * The per-node proactive (background) compaction process is started by its 1931facdaa91SNitin Gupta * corresponding kcompactd thread when the node's fragmentation score 1932facdaa91SNitin Gupta * exceeds the high threshold. The compaction process remains active till 1933facdaa91SNitin Gupta * the node's score falls below the low threshold, or one of the back-off 1934facdaa91SNitin Gupta * conditions is met. 1935facdaa91SNitin Gupta */ 1936d34c0a75SNitin Gupta static unsigned int fragmentation_score_node(pg_data_t *pgdat) 1937facdaa91SNitin Gupta { 1938d34c0a75SNitin Gupta unsigned int score = 0; 1939facdaa91SNitin Gupta int zoneid; 1940facdaa91SNitin Gupta 1941facdaa91SNitin Gupta for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 1942facdaa91SNitin Gupta struct zone *zone; 1943facdaa91SNitin Gupta 1944facdaa91SNitin Gupta zone = &pgdat->node_zones[zoneid]; 1945facdaa91SNitin Gupta score += fragmentation_score_zone(zone); 1946facdaa91SNitin Gupta } 1947facdaa91SNitin Gupta 1948facdaa91SNitin Gupta return score; 1949facdaa91SNitin Gupta } 1950facdaa91SNitin Gupta 1951d34c0a75SNitin Gupta static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) 1952facdaa91SNitin Gupta { 1953d34c0a75SNitin Gupta unsigned int wmark_low; 1954facdaa91SNitin Gupta 1955facdaa91SNitin Gupta /* 1956facdaa91SNitin Gupta * Cap the low watermak to avoid excessive compaction 1957facdaa91SNitin Gupta * activity in case a user sets the proactivess tunable 1958facdaa91SNitin Gupta * close to 100 (maximum). 1959facdaa91SNitin Gupta */ 1960d34c0a75SNitin Gupta wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); 1961d34c0a75SNitin Gupta return low ? wmark_low : min(wmark_low + 10, 100U); 1962facdaa91SNitin Gupta } 1963facdaa91SNitin Gupta 1964facdaa91SNitin Gupta static bool should_proactive_compact_node(pg_data_t *pgdat) 1965facdaa91SNitin Gupta { 1966facdaa91SNitin Gupta int wmark_high; 1967facdaa91SNitin Gupta 1968facdaa91SNitin Gupta if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) 1969facdaa91SNitin Gupta return false; 1970facdaa91SNitin Gupta 1971facdaa91SNitin Gupta wmark_high = fragmentation_score_wmark(pgdat, false); 1972facdaa91SNitin Gupta return fragmentation_score_node(pgdat) > wmark_high; 1973facdaa91SNitin Gupta } 1974facdaa91SNitin Gupta 197540cacbcbSMel Gorman static enum compact_result __compact_finished(struct compact_control *cc) 1976748446bbSMel Gorman { 19778fb74b9fSMel Gorman unsigned int order; 1978d39773a0SVlastimil Babka const int migratetype = cc->migratetype; 1979cb2dcaf0SMel Gorman int ret; 1980748446bbSMel Gorman 1981753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1982f2849aa0SVlastimil Babka if (compact_scanners_met(cc)) { 198355b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 198440cacbcbSMel Gorman reset_cached_positions(cc->zone); 198555b7c4c9SVlastimil Babka 198662997027SMel Gorman /* 198762997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 1988accf6242SVlastimil Babka * by kswapd when it goes to sleep. kcompactd does not set the 198962997027SMel Gorman * flag itself as the decision to be clear should be directly 199062997027SMel Gorman * based on an allocation request. 199162997027SMel Gorman */ 1992accf6242SVlastimil Babka if (cc->direct_compaction) 199340cacbcbSMel Gorman cc->zone->compact_blockskip_flush = true; 199462997027SMel Gorman 1995c8f7de0bSMichal Hocko if (cc->whole_zone) 1996748446bbSMel Gorman return COMPACT_COMPLETE; 1997c8f7de0bSMichal Hocko else 1998c8f7de0bSMichal Hocko return COMPACT_PARTIAL_SKIPPED; 1999bb13ffebSMel Gorman } 2000748446bbSMel Gorman 2001facdaa91SNitin Gupta if (cc->proactive_compaction) { 2002facdaa91SNitin Gupta int score, wmark_low; 2003facdaa91SNitin Gupta pg_data_t *pgdat; 2004facdaa91SNitin Gupta 2005facdaa91SNitin Gupta pgdat = cc->zone->zone_pgdat; 2006facdaa91SNitin Gupta if (kswapd_is_running(pgdat)) 2007facdaa91SNitin Gupta return COMPACT_PARTIAL_SKIPPED; 2008facdaa91SNitin Gupta 2009facdaa91SNitin Gupta score = fragmentation_score_zone(cc->zone); 2010facdaa91SNitin Gupta wmark_low = fragmentation_score_wmark(pgdat, true); 2011facdaa91SNitin Gupta 2012facdaa91SNitin Gupta if (score > wmark_low) 2013facdaa91SNitin Gupta ret = COMPACT_CONTINUE; 2014facdaa91SNitin Gupta else 2015facdaa91SNitin Gupta ret = COMPACT_SUCCESS; 2016facdaa91SNitin Gupta 2017facdaa91SNitin Gupta goto out; 2018facdaa91SNitin Gupta } 2019facdaa91SNitin Gupta 202021c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 202156de7263SMel Gorman return COMPACT_CONTINUE; 202256de7263SMel Gorman 2023baf6a9a1SVlastimil Babka /* 2024efe771c7SMel Gorman * Always finish scanning a pageblock to reduce the possibility of 2025efe771c7SMel Gorman * fallbacks in the future. This is particularly important when 2026efe771c7SMel Gorman * migration source is unmovable/reclaimable but it's not worth 2027efe771c7SMel Gorman * special casing. 2028baf6a9a1SVlastimil Babka */ 2029efe771c7SMel Gorman if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) 2030baf6a9a1SVlastimil Babka return COMPACT_CONTINUE; 2031baf6a9a1SVlastimil Babka 203256de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 2033cb2dcaf0SMel Gorman ret = COMPACT_NO_SUITABLE_PAGE; 203456de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 203540cacbcbSMel Gorman struct free_area *area = &cc->zone->free_area[order]; 20362149cdaeSJoonsoo Kim bool can_steal; 20378fb74b9fSMel Gorman 203856de7263SMel Gorman /* Job done if page is free of the right migratetype */ 2039b03641afSDan Williams if (!free_area_empty(area, migratetype)) 2040cf378319SVlastimil Babka return COMPACT_SUCCESS; 204156de7263SMel Gorman 20422149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 20432149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 20442149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 2045b03641afSDan Williams !free_area_empty(area, MIGRATE_CMA)) 2046cf378319SVlastimil Babka return COMPACT_SUCCESS; 20472149cdaeSJoonsoo Kim #endif 20482149cdaeSJoonsoo Kim /* 20492149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 20502149cdaeSJoonsoo Kim * other migratetype buddy lists. 20512149cdaeSJoonsoo Kim */ 20522149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 2053baf6a9a1SVlastimil Babka true, &can_steal) != -1) { 2054baf6a9a1SVlastimil Babka 2055baf6a9a1SVlastimil Babka /* movable pages are OK in any pageblock */ 2056baf6a9a1SVlastimil Babka if (migratetype == MIGRATE_MOVABLE) 2057cf378319SVlastimil Babka return COMPACT_SUCCESS; 2058baf6a9a1SVlastimil Babka 2059baf6a9a1SVlastimil Babka /* 2060baf6a9a1SVlastimil Babka * We are stealing for a non-movable allocation. Make 2061baf6a9a1SVlastimil Babka * sure we finish compacting the current pageblock 2062baf6a9a1SVlastimil Babka * first so it is as free as possible and we won't 2063baf6a9a1SVlastimil Babka * have to steal another one soon. This only applies 2064baf6a9a1SVlastimil Babka * to sync compaction, as async compaction operates 2065baf6a9a1SVlastimil Babka * on pageblocks of the same migratetype. 2066baf6a9a1SVlastimil Babka */ 2067baf6a9a1SVlastimil Babka if (cc->mode == MIGRATE_ASYNC || 2068baf6a9a1SVlastimil Babka IS_ALIGNED(cc->migrate_pfn, 2069baf6a9a1SVlastimil Babka pageblock_nr_pages)) { 2070baf6a9a1SVlastimil Babka return COMPACT_SUCCESS; 2071baf6a9a1SVlastimil Babka } 2072baf6a9a1SVlastimil Babka 2073cb2dcaf0SMel Gorman ret = COMPACT_CONTINUE; 2074cb2dcaf0SMel Gorman break; 2075baf6a9a1SVlastimil Babka } 207656de7263SMel Gorman } 207756de7263SMel Gorman 2078facdaa91SNitin Gupta out: 2079cb2dcaf0SMel Gorman if (cc->contended || fatal_signal_pending(current)) 2080cb2dcaf0SMel Gorman ret = COMPACT_CONTENDED; 2081cb2dcaf0SMel Gorman 2082cb2dcaf0SMel Gorman return ret; 2083837d026dSJoonsoo Kim } 2084837d026dSJoonsoo Kim 208540cacbcbSMel Gorman static enum compact_result compact_finished(struct compact_control *cc) 2086837d026dSJoonsoo Kim { 2087837d026dSJoonsoo Kim int ret; 2088837d026dSJoonsoo Kim 208940cacbcbSMel Gorman ret = __compact_finished(cc); 209040cacbcbSMel Gorman trace_mm_compaction_finished(cc->zone, cc->order, ret); 2091837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 2092837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 2093837d026dSJoonsoo Kim 2094837d026dSJoonsoo Kim return ret; 2095748446bbSMel Gorman } 2096748446bbSMel Gorman 2097ea7ab982SMichal Hocko static enum compact_result __compaction_suitable(struct zone *zone, int order, 2098c603844bSMel Gorman unsigned int alloc_flags, 209997a225e6SJoonsoo Kim int highest_zoneidx, 210086a294a8SMichal Hocko unsigned long wmark_target) 21013e7d3449SMel Gorman { 21023e7d3449SMel Gorman unsigned long watermark; 21033e7d3449SMel Gorman 210421c527a3SYaowei Bai if (is_via_compact_memory(order)) 21053957c776SMichal Hocko return COMPACT_CONTINUE; 21063957c776SMichal Hocko 2107a9214443SMel Gorman watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 2108ebff3980SVlastimil Babka /* 2109ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 2110ebff3980SVlastimil Babka * should be no need for compaction at all. 2111ebff3980SVlastimil Babka */ 211297a225e6SJoonsoo Kim if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, 2113ebff3980SVlastimil Babka alloc_flags)) 2114cf378319SVlastimil Babka return COMPACT_SUCCESS; 2115ebff3980SVlastimil Babka 21163957c776SMichal Hocko /* 21179861a62cSVlastimil Babka * Watermarks for order-0 must be met for compaction to be able to 2118984fdba6SVlastimil Babka * isolate free pages for migration targets. This means that the 2119984fdba6SVlastimil Babka * watermark and alloc_flags have to match, or be more pessimistic than 2120984fdba6SVlastimil Babka * the check in __isolate_free_page(). We don't use the direct 2121984fdba6SVlastimil Babka * compactor's alloc_flags, as they are not relevant for freepage 212297a225e6SJoonsoo Kim * isolation. We however do use the direct compactor's highest_zoneidx 212397a225e6SJoonsoo Kim * to skip over zones where lowmem reserves would prevent allocation 212497a225e6SJoonsoo Kim * even if compaction succeeds. 21258348faf9SVlastimil Babka * For costly orders, we require low watermark instead of min for 21268348faf9SVlastimil Babka * compaction to proceed to increase its chances. 2127d883c6cfSJoonsoo Kim * ALLOC_CMA is used, as pages in CMA pageblocks are considered 2128d883c6cfSJoonsoo Kim * suitable migration targets 21293e7d3449SMel Gorman */ 21308348faf9SVlastimil Babka watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 21318348faf9SVlastimil Babka low_wmark_pages(zone) : min_wmark_pages(zone); 21328348faf9SVlastimil Babka watermark += compact_gap(order); 213397a225e6SJoonsoo Kim if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, 2134d883c6cfSJoonsoo Kim ALLOC_CMA, wmark_target)) 21353e7d3449SMel Gorman return COMPACT_SKIPPED; 21363e7d3449SMel Gorman 2137cc5c9f09SVlastimil Babka return COMPACT_CONTINUE; 2138cc5c9f09SVlastimil Babka } 2139cc5c9f09SVlastimil Babka 21402b1a20c3SHui Su /* 21412b1a20c3SHui Su * compaction_suitable: Is this suitable to run compaction on this zone now? 21422b1a20c3SHui Su * Returns 21432b1a20c3SHui Su * COMPACT_SKIPPED - If there are too few free pages for compaction 21442b1a20c3SHui Su * COMPACT_SUCCESS - If the allocation would succeed without compaction 21452b1a20c3SHui Su * COMPACT_CONTINUE - If compaction should run now 21462b1a20c3SHui Su */ 2147cc5c9f09SVlastimil Babka enum compact_result compaction_suitable(struct zone *zone, int order, 2148cc5c9f09SVlastimil Babka unsigned int alloc_flags, 214997a225e6SJoonsoo Kim int highest_zoneidx) 2150cc5c9f09SVlastimil Babka { 2151cc5c9f09SVlastimil Babka enum compact_result ret; 2152cc5c9f09SVlastimil Babka int fragindex; 2153cc5c9f09SVlastimil Babka 215497a225e6SJoonsoo Kim ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, 2155cc5c9f09SVlastimil Babka zone_page_state(zone, NR_FREE_PAGES)); 21563e7d3449SMel Gorman /* 21573e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 21583e7d3449SMel Gorman * low memory or external fragmentation 21593e7d3449SMel Gorman * 2160ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 2161ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 21623e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 21633e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 21643e7d3449SMel Gorman * 216520311420SVlastimil Babka * Only compact if a failure would be due to fragmentation. Also 216620311420SVlastimil Babka * ignore fragindex for non-costly orders where the alternative to 216720311420SVlastimil Babka * a successful reclaim/compaction is OOM. Fragindex and the 216820311420SVlastimil Babka * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 216920311420SVlastimil Babka * excessive compaction for costly orders, but it should not be at the 217020311420SVlastimil Babka * expense of system stability. 21713e7d3449SMel Gorman */ 217220311420SVlastimil Babka if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { 21733e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 21743e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 2175cc5c9f09SVlastimil Babka ret = COMPACT_NOT_SUITABLE_ZONE; 21763e7d3449SMel Gorman } 21773e7d3449SMel Gorman 2178837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 2179837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 2180837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 2181837d026dSJoonsoo Kim 2182837d026dSJoonsoo Kim return ret; 2183837d026dSJoonsoo Kim } 2184837d026dSJoonsoo Kim 218586a294a8SMichal Hocko bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 218686a294a8SMichal Hocko int alloc_flags) 218786a294a8SMichal Hocko { 218886a294a8SMichal Hocko struct zone *zone; 218986a294a8SMichal Hocko struct zoneref *z; 219086a294a8SMichal Hocko 219186a294a8SMichal Hocko /* 219286a294a8SMichal Hocko * Make sure at least one zone would pass __compaction_suitable if we continue 219386a294a8SMichal Hocko * retrying the reclaim. 219486a294a8SMichal Hocko */ 219597a225e6SJoonsoo Kim for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 219697a225e6SJoonsoo Kim ac->highest_zoneidx, ac->nodemask) { 219786a294a8SMichal Hocko unsigned long available; 219886a294a8SMichal Hocko enum compact_result compact_result; 219986a294a8SMichal Hocko 220086a294a8SMichal Hocko /* 220186a294a8SMichal Hocko * Do not consider all the reclaimable memory because we do not 220286a294a8SMichal Hocko * want to trash just for a single high order allocation which 220386a294a8SMichal Hocko * is even not guaranteed to appear even if __compaction_suitable 220486a294a8SMichal Hocko * is happy about the watermark check. 220586a294a8SMichal Hocko */ 22065a1c84b4SMel Gorman available = zone_reclaimable_pages(zone) / order; 220786a294a8SMichal Hocko available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 220886a294a8SMichal Hocko compact_result = __compaction_suitable(zone, order, alloc_flags, 220997a225e6SJoonsoo Kim ac->highest_zoneidx, available); 2210cc5c9f09SVlastimil Babka if (compact_result != COMPACT_SKIPPED) 221186a294a8SMichal Hocko return true; 221286a294a8SMichal Hocko } 221386a294a8SMichal Hocko 221486a294a8SMichal Hocko return false; 221586a294a8SMichal Hocko } 221686a294a8SMichal Hocko 22175e1f0f09SMel Gorman static enum compact_result 22185e1f0f09SMel Gorman compact_zone(struct compact_control *cc, struct capture_control *capc) 2219748446bbSMel Gorman { 2220ea7ab982SMichal Hocko enum compact_result ret; 222140cacbcbSMel Gorman unsigned long start_pfn = cc->zone->zone_start_pfn; 222240cacbcbSMel Gorman unsigned long end_pfn = zone_end_pfn(cc->zone); 2223566e54e1SMel Gorman unsigned long last_migrated_pfn; 2224e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 22258854c55fSMel Gorman bool update_cached; 2226748446bbSMel Gorman 2227a94b5252SYafang Shao /* 2228a94b5252SYafang Shao * These counters track activities during zone compaction. Initialize 2229a94b5252SYafang Shao * them before compacting a new zone. 2230a94b5252SYafang Shao */ 2231a94b5252SYafang Shao cc->total_migrate_scanned = 0; 2232a94b5252SYafang Shao cc->total_free_scanned = 0; 2233a94b5252SYafang Shao cc->nr_migratepages = 0; 2234a94b5252SYafang Shao cc->nr_freepages = 0; 2235a94b5252SYafang Shao INIT_LIST_HEAD(&cc->freepages); 2236a94b5252SYafang Shao INIT_LIST_HEAD(&cc->migratepages); 2237a94b5252SYafang Shao 223801c0bfe0SWei Yang cc->migratetype = gfp_migratetype(cc->gfp_mask); 223940cacbcbSMel Gorman ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, 224097a225e6SJoonsoo Kim cc->highest_zoneidx); 22413e7d3449SMel Gorman /* Compaction is likely to fail */ 2242cf378319SVlastimil Babka if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) 22433e7d3449SMel Gorman return ret; 2244c46649deSMichal Hocko 2245c46649deSMichal Hocko /* huh, compaction_suitable is returning something unexpected */ 2246c46649deSMichal Hocko VM_BUG_ON(ret != COMPACT_CONTINUE); 22473e7d3449SMel Gorman 2248c89511abSMel Gorman /* 2249d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 2250accf6242SVlastimil Babka * is about to be retried after being deferred. 2251d3132e4bSVlastimil Babka */ 225240cacbcbSMel Gorman if (compaction_restarting(cc->zone, cc->order)) 225340cacbcbSMel Gorman __reset_isolation_suitable(cc->zone); 2254d3132e4bSVlastimil Babka 2255d3132e4bSVlastimil Babka /* 2256c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 225706ed2998SVlastimil Babka * information on where the scanners should start (unless we explicitly 225806ed2998SVlastimil Babka * want to compact the whole zone), but check that it is initialised 225906ed2998SVlastimil Babka * by ensuring the values are within zone boundaries. 2260c89511abSMel Gorman */ 226170b44595SMel Gorman cc->fast_start_pfn = 0; 226206ed2998SVlastimil Babka if (cc->whole_zone) { 226306ed2998SVlastimil Babka cc->migrate_pfn = start_pfn; 226406ed2998SVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 226506ed2998SVlastimil Babka } else { 226640cacbcbSMel Gorman cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; 226740cacbcbSMel Gorman cc->free_pfn = cc->zone->compact_cached_free_pfn; 2268623446e4SJoonsoo Kim if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 226906b6640aSVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 227040cacbcbSMel Gorman cc->zone->compact_cached_free_pfn = cc->free_pfn; 2271c89511abSMel Gorman } 2272623446e4SJoonsoo Kim if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 2273c89511abSMel Gorman cc->migrate_pfn = start_pfn; 227440cacbcbSMel Gorman cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 227540cacbcbSMel Gorman cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 2276c89511abSMel Gorman } 2277c8f7de0bSMichal Hocko 2278e332f741SMel Gorman if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) 2279c8f7de0bSMichal Hocko cc->whole_zone = true; 228006ed2998SVlastimil Babka } 2281c8f7de0bSMichal Hocko 2282566e54e1SMel Gorman last_migrated_pfn = 0; 2283748446bbSMel Gorman 22848854c55fSMel Gorman /* 22858854c55fSMel Gorman * Migrate has separate cached PFNs for ASYNC and SYNC* migration on 22868854c55fSMel Gorman * the basis that some migrations will fail in ASYNC mode. However, 22878854c55fSMel Gorman * if the cached PFNs match and pageblocks are skipped due to having 22888854c55fSMel Gorman * no isolation candidates, then the sync state does not matter. 22898854c55fSMel Gorman * Until a pageblock with isolation candidates is found, keep the 22908854c55fSMel Gorman * cached PFNs in sync to avoid revisiting the same blocks. 22918854c55fSMel Gorman */ 22928854c55fSMel Gorman update_cached = !sync && 22938854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; 22948854c55fSMel Gorman 229516c4a097SJoonsoo Kim trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 229616c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync); 22970eb927c0SMel Gorman 2298748446bbSMel Gorman migrate_prep_local(); 2299748446bbSMel Gorman 230040cacbcbSMel Gorman while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { 23019d502c1cSMinchan Kim int err; 230219d3cf9dSYanfei Xu unsigned long iteration_start_pfn = cc->migrate_pfn; 2303748446bbSMel Gorman 2304804d3121SMel Gorman /* 2305804d3121SMel Gorman * Avoid multiple rescans which can happen if a page cannot be 2306804d3121SMel Gorman * isolated (dirty/writeback in async mode) or if the migrated 2307804d3121SMel Gorman * pages are being allocated before the pageblock is cleared. 2308804d3121SMel Gorman * The first rescan will capture the entire pageblock for 2309804d3121SMel Gorman * migration. If it fails, it'll be marked skip and scanning 2310804d3121SMel Gorman * will proceed as normal. 2311804d3121SMel Gorman */ 2312804d3121SMel Gorman cc->rescan = false; 2313804d3121SMel Gorman if (pageblock_start_pfn(last_migrated_pfn) == 231419d3cf9dSYanfei Xu pageblock_start_pfn(iteration_start_pfn)) { 2315804d3121SMel Gorman cc->rescan = true; 2316804d3121SMel Gorman } 2317804d3121SMel Gorman 231832aaf055SPengfei Li switch (isolate_migratepages(cc)) { 2319f9e35b3bSMel Gorman case ISOLATE_ABORT: 23202d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 23215733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 2322e64c5237SShaohua Li cc->nr_migratepages = 0; 2323f9e35b3bSMel Gorman goto out; 2324f9e35b3bSMel Gorman case ISOLATE_NONE: 23258854c55fSMel Gorman if (update_cached) { 23268854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[1] = 23278854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[0]; 23288854c55fSMel Gorman } 23298854c55fSMel Gorman 2330fdaf7f5cSVlastimil Babka /* 2331fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 2332fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 2333fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 2334fdaf7f5cSVlastimil Babka */ 2335fdaf7f5cSVlastimil Babka goto check_drain; 2336f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 23378854c55fSMel Gorman update_cached = false; 233819d3cf9dSYanfei Xu last_migrated_pfn = iteration_start_pfn; 2339f9e35b3bSMel Gorman } 2340748446bbSMel Gorman 2341d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 2342e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 23437b2a2d4aSMel Gorman MR_COMPACTION); 2344748446bbSMel Gorman 2345f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 2346f8c9301fSVlastimil Babka &cc->migratepages); 2347748446bbSMel Gorman 2348f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 2349f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 23509d502c1cSMinchan Kim if (err) { 23515733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 23527ed695e0SVlastimil Babka /* 23537ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 23547ed695e0SVlastimil Babka * and we want compact_finished() to detect it 23557ed695e0SVlastimil Babka */ 2356f2849aa0SVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) { 23572d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 23584bf2bba3SDavid Rientjes goto out; 2359748446bbSMel Gorman } 2360fdd048e1SVlastimil Babka /* 2361fdd048e1SVlastimil Babka * We failed to migrate at least one page in the current 2362fdd048e1SVlastimil Babka * order-aligned block, so skip the rest of it. 2363fdd048e1SVlastimil Babka */ 2364fdd048e1SVlastimil Babka if (cc->direct_compaction && 2365fdd048e1SVlastimil Babka (cc->mode == MIGRATE_ASYNC)) { 2366fdd048e1SVlastimil Babka cc->migrate_pfn = block_end_pfn( 2367fdd048e1SVlastimil Babka cc->migrate_pfn - 1, cc->order); 2368fdd048e1SVlastimil Babka /* Draining pcplists is useless in this case */ 2369566e54e1SMel Gorman last_migrated_pfn = 0; 2370fdd048e1SVlastimil Babka } 23714bf2bba3SDavid Rientjes } 2372fdaf7f5cSVlastimil Babka 2373fdaf7f5cSVlastimil Babka check_drain: 2374fdaf7f5cSVlastimil Babka /* 2375fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 2376fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 2377fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 2378fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 2379fdaf7f5cSVlastimil Babka * would succeed. 2380fdaf7f5cSVlastimil Babka */ 2381566e54e1SMel Gorman if (cc->order > 0 && last_migrated_pfn) { 2382fdaf7f5cSVlastimil Babka unsigned long current_block_start = 238306b6640aSVlastimil Babka block_start_pfn(cc->migrate_pfn, cc->order); 2384fdaf7f5cSVlastimil Babka 2385566e54e1SMel Gorman if (last_migrated_pfn < current_block_start) { 2386b01b2141SIngo Molnar lru_add_drain_cpu_zone(cc->zone); 2387fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 2388566e54e1SMel Gorman last_migrated_pfn = 0; 2389fdaf7f5cSVlastimil Babka } 2390fdaf7f5cSVlastimil Babka } 2391fdaf7f5cSVlastimil Babka 23925e1f0f09SMel Gorman /* Stop if a page has been captured */ 23935e1f0f09SMel Gorman if (capc && capc->page) { 23945e1f0f09SMel Gorman ret = COMPACT_SUCCESS; 23955e1f0f09SMel Gorman break; 23965e1f0f09SMel Gorman } 2397748446bbSMel Gorman } 2398748446bbSMel Gorman 2399f9e35b3bSMel Gorman out: 24006bace090SVlastimil Babka /* 24016bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 24026bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 24036bace090SVlastimil Babka */ 24046bace090SVlastimil Babka if (cc->nr_freepages > 0) { 24056bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 24066bace090SVlastimil Babka 24076bace090SVlastimil Babka cc->nr_freepages = 0; 24086bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 24096bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 241006b6640aSVlastimil Babka free_pfn = pageblock_start_pfn(free_pfn); 24116bace090SVlastimil Babka /* 24126bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 24136bace090SVlastimil Babka * already reset to zone end in compact_finished() 24146bace090SVlastimil Babka */ 241540cacbcbSMel Gorman if (free_pfn > cc->zone->compact_cached_free_pfn) 241640cacbcbSMel Gorman cc->zone->compact_cached_free_pfn = free_pfn; 24176bace090SVlastimil Babka } 2418748446bbSMel Gorman 24197f354a54SDavid Rientjes count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 24207f354a54SDavid Rientjes count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 24217f354a54SDavid Rientjes 242216c4a097SJoonsoo Kim trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 242316c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync, ret); 24240eb927c0SMel Gorman 2425748446bbSMel Gorman return ret; 2426748446bbSMel Gorman } 242776ab0f53SMel Gorman 2428ea7ab982SMichal Hocko static enum compact_result compact_zone_order(struct zone *zone, int order, 2429c3486f53SVlastimil Babka gfp_t gfp_mask, enum compact_priority prio, 243097a225e6SJoonsoo Kim unsigned int alloc_flags, int highest_zoneidx, 24315e1f0f09SMel Gorman struct page **capture) 243256de7263SMel Gorman { 2433ea7ab982SMichal Hocko enum compact_result ret; 243456de7263SMel Gorman struct compact_control cc = { 243556de7263SMel Gorman .order = order, 2436dbe2d4e4SMel Gorman .search_order = order, 24376d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 243856de7263SMel Gorman .zone = zone, 2439a5508cd8SVlastimil Babka .mode = (prio == COMPACT_PRIO_ASYNC) ? 2440a5508cd8SVlastimil Babka MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 2441ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 244297a225e6SJoonsoo Kim .highest_zoneidx = highest_zoneidx, 2443accf6242SVlastimil Babka .direct_compaction = true, 2444a8e025e5SVlastimil Babka .whole_zone = (prio == MIN_COMPACT_PRIORITY), 24459f7e3387SVlastimil Babka .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 24469f7e3387SVlastimil Babka .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 244756de7263SMel Gorman }; 24485e1f0f09SMel Gorman struct capture_control capc = { 24495e1f0f09SMel Gorman .cc = &cc, 24505e1f0f09SMel Gorman .page = NULL, 24515e1f0f09SMel Gorman }; 24525e1f0f09SMel Gorman 2453b9e20f0dSVlastimil Babka /* 2454b9e20f0dSVlastimil Babka * Make sure the structs are really initialized before we expose the 2455b9e20f0dSVlastimil Babka * capture control, in case we are interrupted and the interrupt handler 2456b9e20f0dSVlastimil Babka * frees a page. 2457b9e20f0dSVlastimil Babka */ 2458b9e20f0dSVlastimil Babka barrier(); 2459b9e20f0dSVlastimil Babka WRITE_ONCE(current->capture_control, &capc); 246056de7263SMel Gorman 24615e1f0f09SMel Gorman ret = compact_zone(&cc, &capc); 2462e64c5237SShaohua Li 2463e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 2464e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 2465e64c5237SShaohua Li 2466b9e20f0dSVlastimil Babka /* 2467b9e20f0dSVlastimil Babka * Make sure we hide capture control first before we read the captured 2468b9e20f0dSVlastimil Babka * page pointer, otherwise an interrupt could free and capture a page 2469b9e20f0dSVlastimil Babka * and we would leak it. 2470b9e20f0dSVlastimil Babka */ 2471b9e20f0dSVlastimil Babka WRITE_ONCE(current->capture_control, NULL); 2472b9e20f0dSVlastimil Babka *capture = READ_ONCE(capc.page); 24735e1f0f09SMel Gorman 2474e64c5237SShaohua Li return ret; 247556de7263SMel Gorman } 247656de7263SMel Gorman 24775e771905SMel Gorman int sysctl_extfrag_threshold = 500; 24785e771905SMel Gorman 247956de7263SMel Gorman /** 248056de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 248156de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 24821a6d53a1SVlastimil Babka * @order: The order of the current allocation 24831a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 24841a6d53a1SVlastimil Babka * @ac: The context of current allocation 2485112d2d29SYang Shi * @prio: Determines how hard direct compaction should try to succeed 24866467552cSVlastimil Babka * @capture: Pointer to free page created by compaction will be stored here 248756de7263SMel Gorman * 248856de7263SMel Gorman * This is the main entry point for direct page compaction. 248956de7263SMel Gorman */ 2490ea7ab982SMichal Hocko enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 2491c603844bSMel Gorman unsigned int alloc_flags, const struct alloc_context *ac, 24925e1f0f09SMel Gorman enum compact_priority prio, struct page **capture) 249356de7263SMel Gorman { 249456de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 249556de7263SMel Gorman struct zoneref *z; 249656de7263SMel Gorman struct zone *zone; 24971d4746d3SMichal Hocko enum compact_result rc = COMPACT_SKIPPED; 249856de7263SMel Gorman 249973e64c51SMichal Hocko /* 250073e64c51SMichal Hocko * Check if the GFP flags allow compaction - GFP_NOIO is really 250173e64c51SMichal Hocko * tricky context because the migration might require IO 250273e64c51SMichal Hocko */ 250373e64c51SMichal Hocko if (!may_perform_io) 250453853e2dSVlastimil Babka return COMPACT_SKIPPED; 250556de7263SMel Gorman 2506a5508cd8SVlastimil Babka trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 2507837d026dSJoonsoo Kim 250856de7263SMel Gorman /* Compact each zone in the list */ 250997a225e6SJoonsoo Kim for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 251097a225e6SJoonsoo Kim ac->highest_zoneidx, ac->nodemask) { 2511ea7ab982SMichal Hocko enum compact_result status; 251256de7263SMel Gorman 2513a8e025e5SVlastimil Babka if (prio > MIN_COMPACT_PRIORITY 2514a8e025e5SVlastimil Babka && compaction_deferred(zone, order)) { 25151d4746d3SMichal Hocko rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 251653853e2dSVlastimil Babka continue; 25171d4746d3SMichal Hocko } 251853853e2dSVlastimil Babka 2519a5508cd8SVlastimil Babka status = compact_zone_order(zone, order, gfp_mask, prio, 252097a225e6SJoonsoo Kim alloc_flags, ac->highest_zoneidx, capture); 252156de7263SMel Gorman rc = max(status, rc); 252256de7263SMel Gorman 25237ceb009aSVlastimil Babka /* The allocation should succeed, stop compacting */ 25247ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 252553853e2dSVlastimil Babka /* 252653853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 252753853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 252853853e2dSVlastimil Babka * will repeat this with true if allocation indeed 252953853e2dSVlastimil Babka * succeeds in this zone. 253053853e2dSVlastimil Babka */ 253153853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 25321f9efdefSVlastimil Babka 2533c3486f53SVlastimil Babka break; 25341f9efdefSVlastimil Babka } 25351f9efdefSVlastimil Babka 2536a5508cd8SVlastimil Babka if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 2537c3486f53SVlastimil Babka status == COMPACT_PARTIAL_SKIPPED)) 253853853e2dSVlastimil Babka /* 253953853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 254053853e2dSVlastimil Babka * so we defer compaction there. If it ends up 254153853e2dSVlastimil Babka * succeeding after all, it will be reset. 254253853e2dSVlastimil Babka */ 254353853e2dSVlastimil Babka defer_compaction(zone, order); 25441f9efdefSVlastimil Babka 25451f9efdefSVlastimil Babka /* 25461f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 25471f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 2548c3486f53SVlastimil Babka * case do not try further zones 25491f9efdefSVlastimil Babka */ 2550c3486f53SVlastimil Babka if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 2551c3486f53SVlastimil Babka || fatal_signal_pending(current)) 25521f9efdefSVlastimil Babka break; 25531f9efdefSVlastimil Babka } 25541f9efdefSVlastimil Babka 255556de7263SMel Gorman return rc; 255656de7263SMel Gorman } 255756de7263SMel Gorman 2558facdaa91SNitin Gupta /* 2559facdaa91SNitin Gupta * Compact all zones within a node till each zone's fragmentation score 2560facdaa91SNitin Gupta * reaches within proactive compaction thresholds (as determined by the 2561facdaa91SNitin Gupta * proactiveness tunable). 2562facdaa91SNitin Gupta * 2563facdaa91SNitin Gupta * It is possible that the function returns before reaching score targets 2564facdaa91SNitin Gupta * due to various back-off conditions, such as, contention on per-node or 2565facdaa91SNitin Gupta * per-zone locks. 2566facdaa91SNitin Gupta */ 2567facdaa91SNitin Gupta static void proactive_compact_node(pg_data_t *pgdat) 2568facdaa91SNitin Gupta { 2569facdaa91SNitin Gupta int zoneid; 2570facdaa91SNitin Gupta struct zone *zone; 2571facdaa91SNitin Gupta struct compact_control cc = { 2572facdaa91SNitin Gupta .order = -1, 2573facdaa91SNitin Gupta .mode = MIGRATE_SYNC_LIGHT, 2574facdaa91SNitin Gupta .ignore_skip_hint = true, 2575facdaa91SNitin Gupta .whole_zone = true, 2576facdaa91SNitin Gupta .gfp_mask = GFP_KERNEL, 2577facdaa91SNitin Gupta .proactive_compaction = true, 2578facdaa91SNitin Gupta }; 2579facdaa91SNitin Gupta 2580facdaa91SNitin Gupta for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2581facdaa91SNitin Gupta zone = &pgdat->node_zones[zoneid]; 2582facdaa91SNitin Gupta if (!populated_zone(zone)) 2583facdaa91SNitin Gupta continue; 2584facdaa91SNitin Gupta 2585facdaa91SNitin Gupta cc.zone = zone; 2586facdaa91SNitin Gupta 2587facdaa91SNitin Gupta compact_zone(&cc, NULL); 2588facdaa91SNitin Gupta 2589facdaa91SNitin Gupta VM_BUG_ON(!list_empty(&cc.freepages)); 2590facdaa91SNitin Gupta VM_BUG_ON(!list_empty(&cc.migratepages)); 2591facdaa91SNitin Gupta } 2592facdaa91SNitin Gupta } 259356de7263SMel Gorman 259476ab0f53SMel Gorman /* Compact all zones within a node */ 25957103f16dSAndrew Morton static void compact_node(int nid) 25967be62de9SRik van Riel { 2597791cae96SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2598791cae96SVlastimil Babka int zoneid; 2599791cae96SVlastimil Babka struct zone *zone; 26007be62de9SRik van Riel struct compact_control cc = { 26017be62de9SRik van Riel .order = -1, 2602e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 260391ca9186SDavid Rientjes .ignore_skip_hint = true, 260406ed2998SVlastimil Babka .whole_zone = true, 260573e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 26067be62de9SRik van Riel }; 26077be62de9SRik van Riel 2608791cae96SVlastimil Babka 2609791cae96SVlastimil Babka for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2610791cae96SVlastimil Babka 2611791cae96SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2612791cae96SVlastimil Babka if (!populated_zone(zone)) 2613791cae96SVlastimil Babka continue; 2614791cae96SVlastimil Babka 2615791cae96SVlastimil Babka cc.zone = zone; 2616791cae96SVlastimil Babka 26175e1f0f09SMel Gorman compact_zone(&cc, NULL); 2618791cae96SVlastimil Babka 2619791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 2620791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 2621791cae96SVlastimil Babka } 26227be62de9SRik van Riel } 26237be62de9SRik van Riel 262476ab0f53SMel Gorman /* Compact all nodes in the system */ 26257964c06dSJason Liu static void compact_nodes(void) 262676ab0f53SMel Gorman { 262776ab0f53SMel Gorman int nid; 262876ab0f53SMel Gorman 26298575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 26308575ec29SHugh Dickins lru_add_drain_all(); 26318575ec29SHugh Dickins 263276ab0f53SMel Gorman for_each_online_node(nid) 263376ab0f53SMel Gorman compact_node(nid); 263476ab0f53SMel Gorman } 263576ab0f53SMel Gorman 263676ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 263776ab0f53SMel Gorman int sysctl_compact_memory; 263876ab0f53SMel Gorman 2639fec4eb2cSYaowei Bai /* 2640facdaa91SNitin Gupta * Tunable for proactive compaction. It determines how 2641facdaa91SNitin Gupta * aggressively the kernel should compact memory in the 2642facdaa91SNitin Gupta * background. It takes values in the range [0, 100]. 2643facdaa91SNitin Gupta */ 2644d34c0a75SNitin Gupta unsigned int __read_mostly sysctl_compaction_proactiveness = 20; 2645facdaa91SNitin Gupta 2646facdaa91SNitin Gupta /* 2647fec4eb2cSYaowei Bai * This is the entry point for compacting all nodes via 2648fec4eb2cSYaowei Bai * /proc/sys/vm/compact_memory 2649fec4eb2cSYaowei Bai */ 265076ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 265132927393SChristoph Hellwig void *buffer, size_t *length, loff_t *ppos) 265276ab0f53SMel Gorman { 265376ab0f53SMel Gorman if (write) 26547964c06dSJason Liu compact_nodes(); 265576ab0f53SMel Gorman 265676ab0f53SMel Gorman return 0; 265776ab0f53SMel Gorman } 2658ed4a6d7fSMel Gorman 2659ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 266074e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 266110fbcf4cSKay Sievers struct device_attribute *attr, 2662ed4a6d7fSMel Gorman const char *buf, size_t count) 2663ed4a6d7fSMel Gorman { 26648575ec29SHugh Dickins int nid = dev->id; 26658575ec29SHugh Dickins 26668575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 26678575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 26688575ec29SHugh Dickins lru_add_drain_all(); 26698575ec29SHugh Dickins 26708575ec29SHugh Dickins compact_node(nid); 26718575ec29SHugh Dickins } 2672ed4a6d7fSMel Gorman 2673ed4a6d7fSMel Gorman return count; 2674ed4a6d7fSMel Gorman } 26750825a6f9SJoe Perches static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node); 2676ed4a6d7fSMel Gorman 2677ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 2678ed4a6d7fSMel Gorman { 267910fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 2680ed4a6d7fSMel Gorman } 2681ed4a6d7fSMel Gorman 2682ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 2683ed4a6d7fSMel Gorman { 268410fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 2685ed4a6d7fSMel Gorman } 2686ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 2687ff9543fdSMichal Nazarewicz 2688698b1b30SVlastimil Babka static inline bool kcompactd_work_requested(pg_data_t *pgdat) 2689698b1b30SVlastimil Babka { 2690172400c6SVlastimil Babka return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); 2691698b1b30SVlastimil Babka } 2692698b1b30SVlastimil Babka 2693698b1b30SVlastimil Babka static bool kcompactd_node_suitable(pg_data_t *pgdat) 2694698b1b30SVlastimil Babka { 2695698b1b30SVlastimil Babka int zoneid; 2696698b1b30SVlastimil Babka struct zone *zone; 269797a225e6SJoonsoo Kim enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; 2698698b1b30SVlastimil Babka 269997a225e6SJoonsoo Kim for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { 2700698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2701698b1b30SVlastimil Babka 2702698b1b30SVlastimil Babka if (!populated_zone(zone)) 2703698b1b30SVlastimil Babka continue; 2704698b1b30SVlastimil Babka 2705698b1b30SVlastimil Babka if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 270697a225e6SJoonsoo Kim highest_zoneidx) == COMPACT_CONTINUE) 2707698b1b30SVlastimil Babka return true; 2708698b1b30SVlastimil Babka } 2709698b1b30SVlastimil Babka 2710698b1b30SVlastimil Babka return false; 2711698b1b30SVlastimil Babka } 2712698b1b30SVlastimil Babka 2713698b1b30SVlastimil Babka static void kcompactd_do_work(pg_data_t *pgdat) 2714698b1b30SVlastimil Babka { 2715698b1b30SVlastimil Babka /* 2716698b1b30SVlastimil Babka * With no special task, compact all zones so that a page of requested 2717698b1b30SVlastimil Babka * order is allocatable. 2718698b1b30SVlastimil Babka */ 2719698b1b30SVlastimil Babka int zoneid; 2720698b1b30SVlastimil Babka struct zone *zone; 2721698b1b30SVlastimil Babka struct compact_control cc = { 2722698b1b30SVlastimil Babka .order = pgdat->kcompactd_max_order, 2723dbe2d4e4SMel Gorman .search_order = pgdat->kcompactd_max_order, 272497a225e6SJoonsoo Kim .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, 2725698b1b30SVlastimil Babka .mode = MIGRATE_SYNC_LIGHT, 2726a0647dc9SDavid Rientjes .ignore_skip_hint = false, 272773e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 2728698b1b30SVlastimil Babka }; 2729698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 273097a225e6SJoonsoo Kim cc.highest_zoneidx); 27317f354a54SDavid Rientjes count_compact_event(KCOMPACTD_WAKE); 2732698b1b30SVlastimil Babka 273397a225e6SJoonsoo Kim for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { 2734698b1b30SVlastimil Babka int status; 2735698b1b30SVlastimil Babka 2736698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2737698b1b30SVlastimil Babka if (!populated_zone(zone)) 2738698b1b30SVlastimil Babka continue; 2739698b1b30SVlastimil Babka 2740698b1b30SVlastimil Babka if (compaction_deferred(zone, cc.order)) 2741698b1b30SVlastimil Babka continue; 2742698b1b30SVlastimil Babka 2743698b1b30SVlastimil Babka if (compaction_suitable(zone, cc.order, 0, zoneid) != 2744698b1b30SVlastimil Babka COMPACT_CONTINUE) 2745698b1b30SVlastimil Babka continue; 2746698b1b30SVlastimil Babka 2747172400c6SVlastimil Babka if (kthread_should_stop()) 2748172400c6SVlastimil Babka return; 2749a94b5252SYafang Shao 2750a94b5252SYafang Shao cc.zone = zone; 27515e1f0f09SMel Gorman status = compact_zone(&cc, NULL); 2752698b1b30SVlastimil Babka 27537ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 2754698b1b30SVlastimil Babka compaction_defer_reset(zone, cc.order, false); 2755c8f7de0bSMichal Hocko } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 2756698b1b30SVlastimil Babka /* 2757bc3106b2SDavid Rientjes * Buddy pages may become stranded on pcps that could 2758bc3106b2SDavid Rientjes * otherwise coalesce on the zone's free area for 2759bc3106b2SDavid Rientjes * order >= cc.order. This is ratelimited by the 2760bc3106b2SDavid Rientjes * upcoming deferral. 2761bc3106b2SDavid Rientjes */ 2762bc3106b2SDavid Rientjes drain_all_pages(zone); 2763bc3106b2SDavid Rientjes 2764bc3106b2SDavid Rientjes /* 2765698b1b30SVlastimil Babka * We use sync migration mode here, so we defer like 2766698b1b30SVlastimil Babka * sync direct compaction does. 2767698b1b30SVlastimil Babka */ 2768698b1b30SVlastimil Babka defer_compaction(zone, cc.order); 2769698b1b30SVlastimil Babka } 2770698b1b30SVlastimil Babka 27717f354a54SDavid Rientjes count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 27727f354a54SDavid Rientjes cc.total_migrate_scanned); 27737f354a54SDavid Rientjes count_compact_events(KCOMPACTD_FREE_SCANNED, 27747f354a54SDavid Rientjes cc.total_free_scanned); 27757f354a54SDavid Rientjes 2776698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 2777698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 2778698b1b30SVlastimil Babka } 2779698b1b30SVlastimil Babka 2780698b1b30SVlastimil Babka /* 2781698b1b30SVlastimil Babka * Regardless of success, we are done until woken up next. But remember 278297a225e6SJoonsoo Kim * the requested order/highest_zoneidx in case it was higher/tighter 278397a225e6SJoonsoo Kim * than our current ones 2784698b1b30SVlastimil Babka */ 2785698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order <= cc.order) 2786698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 278797a225e6SJoonsoo Kim if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) 278897a225e6SJoonsoo Kim pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2789698b1b30SVlastimil Babka } 2790698b1b30SVlastimil Babka 279197a225e6SJoonsoo Kim void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) 2792698b1b30SVlastimil Babka { 2793698b1b30SVlastimil Babka if (!order) 2794698b1b30SVlastimil Babka return; 2795698b1b30SVlastimil Babka 2796698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order < order) 2797698b1b30SVlastimil Babka pgdat->kcompactd_max_order = order; 2798698b1b30SVlastimil Babka 279997a225e6SJoonsoo Kim if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) 280097a225e6SJoonsoo Kim pgdat->kcompactd_highest_zoneidx = highest_zoneidx; 2801698b1b30SVlastimil Babka 28026818600fSDavidlohr Bueso /* 28036818600fSDavidlohr Bueso * Pairs with implicit barrier in wait_event_freezable() 28046818600fSDavidlohr Bueso * such that wakeups are not missed. 28056818600fSDavidlohr Bueso */ 28066818600fSDavidlohr Bueso if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 2807698b1b30SVlastimil Babka return; 2808698b1b30SVlastimil Babka 2809698b1b30SVlastimil Babka if (!kcompactd_node_suitable(pgdat)) 2810698b1b30SVlastimil Babka return; 2811698b1b30SVlastimil Babka 2812698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 281397a225e6SJoonsoo Kim highest_zoneidx); 2814698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait); 2815698b1b30SVlastimil Babka } 2816698b1b30SVlastimil Babka 2817698b1b30SVlastimil Babka /* 2818698b1b30SVlastimil Babka * The background compaction daemon, started as a kernel thread 2819698b1b30SVlastimil Babka * from the init process. 2820698b1b30SVlastimil Babka */ 2821698b1b30SVlastimil Babka static int kcompactd(void *p) 2822698b1b30SVlastimil Babka { 2823698b1b30SVlastimil Babka pg_data_t *pgdat = (pg_data_t*)p; 2824698b1b30SVlastimil Babka struct task_struct *tsk = current; 2825facdaa91SNitin Gupta unsigned int proactive_defer = 0; 2826698b1b30SVlastimil Babka 2827698b1b30SVlastimil Babka const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2828698b1b30SVlastimil Babka 2829698b1b30SVlastimil Babka if (!cpumask_empty(cpumask)) 2830698b1b30SVlastimil Babka set_cpus_allowed_ptr(tsk, cpumask); 2831698b1b30SVlastimil Babka 2832698b1b30SVlastimil Babka set_freezable(); 2833698b1b30SVlastimil Babka 2834698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 283597a225e6SJoonsoo Kim pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2836698b1b30SVlastimil Babka 2837698b1b30SVlastimil Babka while (!kthread_should_stop()) { 2838eb414681SJohannes Weiner unsigned long pflags; 2839eb414681SJohannes Weiner 2840698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 2841facdaa91SNitin Gupta if (wait_event_freezable_timeout(pgdat->kcompactd_wait, 2842facdaa91SNitin Gupta kcompactd_work_requested(pgdat), 2843facdaa91SNitin Gupta msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC))) { 2844698b1b30SVlastimil Babka 2845eb414681SJohannes Weiner psi_memstall_enter(&pflags); 2846698b1b30SVlastimil Babka kcompactd_do_work(pgdat); 2847eb414681SJohannes Weiner psi_memstall_leave(&pflags); 2848facdaa91SNitin Gupta continue; 2849facdaa91SNitin Gupta } 2850facdaa91SNitin Gupta 2851facdaa91SNitin Gupta /* kcompactd wait timeout */ 2852facdaa91SNitin Gupta if (should_proactive_compact_node(pgdat)) { 2853facdaa91SNitin Gupta unsigned int prev_score, score; 2854facdaa91SNitin Gupta 2855facdaa91SNitin Gupta if (proactive_defer) { 2856facdaa91SNitin Gupta proactive_defer--; 2857facdaa91SNitin Gupta continue; 2858facdaa91SNitin Gupta } 2859facdaa91SNitin Gupta prev_score = fragmentation_score_node(pgdat); 2860facdaa91SNitin Gupta proactive_compact_node(pgdat); 2861facdaa91SNitin Gupta score = fragmentation_score_node(pgdat); 2862facdaa91SNitin Gupta /* 2863facdaa91SNitin Gupta * Defer proactive compaction if the fragmentation 2864facdaa91SNitin Gupta * score did not go down i.e. no progress made. 2865facdaa91SNitin Gupta */ 2866facdaa91SNitin Gupta proactive_defer = score < prev_score ? 2867facdaa91SNitin Gupta 0 : 1 << COMPACT_MAX_DEFER_SHIFT; 2868facdaa91SNitin Gupta } 2869698b1b30SVlastimil Babka } 2870698b1b30SVlastimil Babka 2871698b1b30SVlastimil Babka return 0; 2872698b1b30SVlastimil Babka } 2873698b1b30SVlastimil Babka 2874698b1b30SVlastimil Babka /* 2875698b1b30SVlastimil Babka * This kcompactd start function will be called by init and node-hot-add. 2876698b1b30SVlastimil Babka * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 2877698b1b30SVlastimil Babka */ 2878698b1b30SVlastimil Babka int kcompactd_run(int nid) 2879698b1b30SVlastimil Babka { 2880698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2881698b1b30SVlastimil Babka int ret = 0; 2882698b1b30SVlastimil Babka 2883698b1b30SVlastimil Babka if (pgdat->kcompactd) 2884698b1b30SVlastimil Babka return 0; 2885698b1b30SVlastimil Babka 2886698b1b30SVlastimil Babka pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 2887698b1b30SVlastimil Babka if (IS_ERR(pgdat->kcompactd)) { 2888698b1b30SVlastimil Babka pr_err("Failed to start kcompactd on node %d\n", nid); 2889698b1b30SVlastimil Babka ret = PTR_ERR(pgdat->kcompactd); 2890698b1b30SVlastimil Babka pgdat->kcompactd = NULL; 2891698b1b30SVlastimil Babka } 2892698b1b30SVlastimil Babka return ret; 2893698b1b30SVlastimil Babka } 2894698b1b30SVlastimil Babka 2895698b1b30SVlastimil Babka /* 2896698b1b30SVlastimil Babka * Called by memory hotplug when all memory in a node is offlined. Caller must 2897698b1b30SVlastimil Babka * hold mem_hotplug_begin/end(). 2898698b1b30SVlastimil Babka */ 2899698b1b30SVlastimil Babka void kcompactd_stop(int nid) 2900698b1b30SVlastimil Babka { 2901698b1b30SVlastimil Babka struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 2902698b1b30SVlastimil Babka 2903698b1b30SVlastimil Babka if (kcompactd) { 2904698b1b30SVlastimil Babka kthread_stop(kcompactd); 2905698b1b30SVlastimil Babka NODE_DATA(nid)->kcompactd = NULL; 2906698b1b30SVlastimil Babka } 2907698b1b30SVlastimil Babka } 2908698b1b30SVlastimil Babka 2909698b1b30SVlastimil Babka /* 2910698b1b30SVlastimil Babka * It's optimal to keep kcompactd on the same CPUs as their memory, but 2911698b1b30SVlastimil Babka * not required for correctness. So if the last cpu in a node goes 2912698b1b30SVlastimil Babka * away, we get changed to run anywhere: as the first one comes back, 2913698b1b30SVlastimil Babka * restore their cpu bindings. 2914698b1b30SVlastimil Babka */ 2915e46b1db2SAnna-Maria Gleixner static int kcompactd_cpu_online(unsigned int cpu) 2916698b1b30SVlastimil Babka { 2917698b1b30SVlastimil Babka int nid; 2918698b1b30SVlastimil Babka 2919698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) { 2920698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2921698b1b30SVlastimil Babka const struct cpumask *mask; 2922698b1b30SVlastimil Babka 2923698b1b30SVlastimil Babka mask = cpumask_of_node(pgdat->node_id); 2924698b1b30SVlastimil Babka 2925698b1b30SVlastimil Babka if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2926698b1b30SVlastimil Babka /* One of our CPUs online: restore mask */ 2927698b1b30SVlastimil Babka set_cpus_allowed_ptr(pgdat->kcompactd, mask); 2928698b1b30SVlastimil Babka } 2929e46b1db2SAnna-Maria Gleixner return 0; 2930698b1b30SVlastimil Babka } 2931698b1b30SVlastimil Babka 2932698b1b30SVlastimil Babka static int __init kcompactd_init(void) 2933698b1b30SVlastimil Babka { 2934698b1b30SVlastimil Babka int nid; 2935e46b1db2SAnna-Maria Gleixner int ret; 2936e46b1db2SAnna-Maria Gleixner 2937e46b1db2SAnna-Maria Gleixner ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 2938e46b1db2SAnna-Maria Gleixner "mm/compaction:online", 2939e46b1db2SAnna-Maria Gleixner kcompactd_cpu_online, NULL); 2940e46b1db2SAnna-Maria Gleixner if (ret < 0) { 2941e46b1db2SAnna-Maria Gleixner pr_err("kcompactd: failed to register hotplug callbacks.\n"); 2942e46b1db2SAnna-Maria Gleixner return ret; 2943e46b1db2SAnna-Maria Gleixner } 2944698b1b30SVlastimil Babka 2945698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) 2946698b1b30SVlastimil Babka kcompactd_run(nid); 2947698b1b30SVlastimil Babka return 0; 2948698b1b30SVlastimil Babka } 2949698b1b30SVlastimil Babka subsys_initcall(kcompactd_init) 2950698b1b30SVlastimil Babka 2951ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 2952