1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2748446bbSMel Gorman /* 3748446bbSMel Gorman * linux/mm/compaction.c 4748446bbSMel Gorman * 5748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 6748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 7748446bbSMel Gorman * lifting 8748446bbSMel Gorman * 9748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10748446bbSMel Gorman */ 11698b1b30SVlastimil Babka #include <linux/cpu.h> 12748446bbSMel Gorman #include <linux/swap.h> 13748446bbSMel Gorman #include <linux/migrate.h> 14748446bbSMel Gorman #include <linux/compaction.h> 15748446bbSMel Gorman #include <linux/mm_inline.h> 16174cd4b1SIngo Molnar #include <linux/sched/signal.h> 17748446bbSMel Gorman #include <linux/backing-dev.h> 1876ab0f53SMel Gorman #include <linux/sysctl.h> 19ed4a6d7fSMel Gorman #include <linux/sysfs.h> 20194159fbSMinchan Kim #include <linux/page-isolation.h> 21b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 22698b1b30SVlastimil Babka #include <linux/kthread.h> 23698b1b30SVlastimil Babka #include <linux/freezer.h> 2483358eceSJoonsoo Kim #include <linux/page_owner.h> 25eb414681SJohannes Weiner #include <linux/psi.h> 26748446bbSMel Gorman #include "internal.h" 27748446bbSMel Gorman 28010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 29010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 30010fc29aSMinchan Kim { 31010fc29aSMinchan Kim count_vm_event(item); 32010fc29aSMinchan Kim } 33010fc29aSMinchan Kim 34010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 35010fc29aSMinchan Kim { 36010fc29aSMinchan Kim count_vm_events(item, delta); 37010fc29aSMinchan Kim } 38010fc29aSMinchan Kim #else 39010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 40010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 41010fc29aSMinchan Kim #endif 42010fc29aSMinchan Kim 43ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 44ff9543fdSMichal Nazarewicz 45b7aba698SMel Gorman #define CREATE_TRACE_POINTS 46b7aba698SMel Gorman #include <trace/events/compaction.h> 47b7aba698SMel Gorman 4806b6640aSVlastimil Babka #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 4906b6640aSVlastimil Babka #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 5006b6640aSVlastimil Babka #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 5106b6640aSVlastimil Babka #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 5206b6640aSVlastimil Babka 53748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 54748446bbSMel Gorman { 55748446bbSMel Gorman struct page *page, *next; 566bace090SVlastimil Babka unsigned long high_pfn = 0; 57748446bbSMel Gorman 58748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 596bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 60748446bbSMel Gorman list_del(&page->lru); 61748446bbSMel Gorman __free_page(page); 626bace090SVlastimil Babka if (pfn > high_pfn) 636bace090SVlastimil Babka high_pfn = pfn; 64748446bbSMel Gorman } 65748446bbSMel Gorman 666bace090SVlastimil Babka return high_pfn; 67748446bbSMel Gorman } 68748446bbSMel Gorman 694469ab98SMel Gorman static void split_map_pages(struct list_head *list) 70ff9543fdSMichal Nazarewicz { 7166c64223SJoonsoo Kim unsigned int i, order, nr_pages; 7266c64223SJoonsoo Kim struct page *page, *next; 7366c64223SJoonsoo Kim LIST_HEAD(tmp_list); 74ff9543fdSMichal Nazarewicz 7566c64223SJoonsoo Kim list_for_each_entry_safe(page, next, list, lru) { 7666c64223SJoonsoo Kim list_del(&page->lru); 7766c64223SJoonsoo Kim 7866c64223SJoonsoo Kim order = page_private(page); 7966c64223SJoonsoo Kim nr_pages = 1 << order; 8066c64223SJoonsoo Kim 8146f24fd8SJoonsoo Kim post_alloc_hook(page, order, __GFP_MOVABLE); 8266c64223SJoonsoo Kim if (order) 8366c64223SJoonsoo Kim split_page(page, order); 8466c64223SJoonsoo Kim 8566c64223SJoonsoo Kim for (i = 0; i < nr_pages; i++) { 8666c64223SJoonsoo Kim list_add(&page->lru, &tmp_list); 8766c64223SJoonsoo Kim page++; 88ff9543fdSMichal Nazarewicz } 89ff9543fdSMichal Nazarewicz } 90ff9543fdSMichal Nazarewicz 9166c64223SJoonsoo Kim list_splice(&tmp_list, list); 9266c64223SJoonsoo Kim } 9366c64223SJoonsoo Kim 94bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 9524e2716fSJoonsoo Kim 96bda807d4SMinchan Kim int PageMovable(struct page *page) 97bda807d4SMinchan Kim { 98bda807d4SMinchan Kim struct address_space *mapping; 99bda807d4SMinchan Kim 100bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 101bda807d4SMinchan Kim if (!__PageMovable(page)) 102bda807d4SMinchan Kim return 0; 103bda807d4SMinchan Kim 104bda807d4SMinchan Kim mapping = page_mapping(page); 105bda807d4SMinchan Kim if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) 106bda807d4SMinchan Kim return 1; 107bda807d4SMinchan Kim 108bda807d4SMinchan Kim return 0; 109bda807d4SMinchan Kim } 110bda807d4SMinchan Kim EXPORT_SYMBOL(PageMovable); 111bda807d4SMinchan Kim 112bda807d4SMinchan Kim void __SetPageMovable(struct page *page, struct address_space *mapping) 113bda807d4SMinchan Kim { 114bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 115bda807d4SMinchan Kim VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); 116bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); 117bda807d4SMinchan Kim } 118bda807d4SMinchan Kim EXPORT_SYMBOL(__SetPageMovable); 119bda807d4SMinchan Kim 120bda807d4SMinchan Kim void __ClearPageMovable(struct page *page) 121bda807d4SMinchan Kim { 122bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 123bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page); 124bda807d4SMinchan Kim /* 125bda807d4SMinchan Kim * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE 126bda807d4SMinchan Kim * flag so that VM can catch up released page by driver after isolation. 127bda807d4SMinchan Kim * With it, VM migration doesn't try to put it back. 128bda807d4SMinchan Kim */ 129bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)page->mapping & 130bda807d4SMinchan Kim PAGE_MAPPING_MOVABLE); 131bda807d4SMinchan Kim } 132bda807d4SMinchan Kim EXPORT_SYMBOL(__ClearPageMovable); 133bda807d4SMinchan Kim 13424e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 13524e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 13624e2716fSJoonsoo Kim 13724e2716fSJoonsoo Kim /* 13824e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 13924e2716fSJoonsoo Kim * allocation success. 1 << compact_defer_limit compactions are skipped up 14024e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 14124e2716fSJoonsoo Kim */ 14224e2716fSJoonsoo Kim void defer_compaction(struct zone *zone, int order) 14324e2716fSJoonsoo Kim { 14424e2716fSJoonsoo Kim zone->compact_considered = 0; 14524e2716fSJoonsoo Kim zone->compact_defer_shift++; 14624e2716fSJoonsoo Kim 14724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 14824e2716fSJoonsoo Kim zone->compact_order_failed = order; 14924e2716fSJoonsoo Kim 15024e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 15124e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 15224e2716fSJoonsoo Kim 15324e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 15424e2716fSJoonsoo Kim } 15524e2716fSJoonsoo Kim 15624e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 15724e2716fSJoonsoo Kim bool compaction_deferred(struct zone *zone, int order) 15824e2716fSJoonsoo Kim { 15924e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 16024e2716fSJoonsoo Kim 16124e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 16224e2716fSJoonsoo Kim return false; 16324e2716fSJoonsoo Kim 16424e2716fSJoonsoo Kim /* Avoid possible overflow */ 16524e2716fSJoonsoo Kim if (++zone->compact_considered > defer_limit) 16624e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 16724e2716fSJoonsoo Kim 16824e2716fSJoonsoo Kim if (zone->compact_considered >= defer_limit) 16924e2716fSJoonsoo Kim return false; 17024e2716fSJoonsoo Kim 17124e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 17224e2716fSJoonsoo Kim 17324e2716fSJoonsoo Kim return true; 17424e2716fSJoonsoo Kim } 17524e2716fSJoonsoo Kim 17624e2716fSJoonsoo Kim /* 17724e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 17824e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 17924e2716fSJoonsoo Kim * expected to succeed. 18024e2716fSJoonsoo Kim */ 18124e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 18224e2716fSJoonsoo Kim bool alloc_success) 18324e2716fSJoonsoo Kim { 18424e2716fSJoonsoo Kim if (alloc_success) { 18524e2716fSJoonsoo Kim zone->compact_considered = 0; 18624e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 18724e2716fSJoonsoo Kim } 18824e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 18924e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 19024e2716fSJoonsoo Kim 19124e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 19224e2716fSJoonsoo Kim } 19324e2716fSJoonsoo Kim 19424e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 19524e2716fSJoonsoo Kim bool compaction_restarting(struct zone *zone, int order) 19624e2716fSJoonsoo Kim { 19724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 19824e2716fSJoonsoo Kim return false; 19924e2716fSJoonsoo Kim 20024e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 20124e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 20224e2716fSJoonsoo Kim } 20324e2716fSJoonsoo Kim 204bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 205bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 206bb13ffebSMel Gorman struct page *page) 207bb13ffebSMel Gorman { 208bb13ffebSMel Gorman if (cc->ignore_skip_hint) 209bb13ffebSMel Gorman return true; 210bb13ffebSMel Gorman 211bb13ffebSMel Gorman return !get_pageblock_skip(page); 212bb13ffebSMel Gorman } 213bb13ffebSMel Gorman 21402333641SVlastimil Babka static void reset_cached_positions(struct zone *zone) 21502333641SVlastimil Babka { 21602333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 21702333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 218623446e4SJoonsoo Kim zone->compact_cached_free_pfn = 21906b6640aSVlastimil Babka pageblock_start_pfn(zone_end_pfn(zone) - 1); 22002333641SVlastimil Babka } 22102333641SVlastimil Babka 222bb13ffebSMel Gorman /* 223b527cfe5SVlastimil Babka * Compound pages of >= pageblock_order should consistenly be skipped until 224b527cfe5SVlastimil Babka * released. It is always pointless to compact pages of such order (if they are 225b527cfe5SVlastimil Babka * migratable), and the pageblocks they occupy cannot contain any free pages. 22621dc7e02SDavid Rientjes */ 227b527cfe5SVlastimil Babka static bool pageblock_skip_persistent(struct page *page) 22821dc7e02SDavid Rientjes { 229b527cfe5SVlastimil Babka if (!PageCompound(page)) 23021dc7e02SDavid Rientjes return false; 231b527cfe5SVlastimil Babka 232b527cfe5SVlastimil Babka page = compound_head(page); 233b527cfe5SVlastimil Babka 234b527cfe5SVlastimil Babka if (compound_order(page) >= pageblock_order) 23521dc7e02SDavid Rientjes return true; 236b527cfe5SVlastimil Babka 237b527cfe5SVlastimil Babka return false; 23821dc7e02SDavid Rientjes } 23921dc7e02SDavid Rientjes 240e332f741SMel Gorman static bool 241e332f741SMel Gorman __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, 242e332f741SMel Gorman bool check_target) 243e332f741SMel Gorman { 244e332f741SMel Gorman struct page *page = pfn_to_online_page(pfn); 2456b0868c8SMel Gorman struct page *block_page; 246e332f741SMel Gorman struct page *end_page; 247e332f741SMel Gorman unsigned long block_pfn; 248e332f741SMel Gorman 249e332f741SMel Gorman if (!page) 250e332f741SMel Gorman return false; 251e332f741SMel Gorman if (zone != page_zone(page)) 252e332f741SMel Gorman return false; 253e332f741SMel Gorman if (pageblock_skip_persistent(page)) 254e332f741SMel Gorman return false; 255e332f741SMel Gorman 256e332f741SMel Gorman /* 257e332f741SMel Gorman * If skip is already cleared do no further checking once the 258e332f741SMel Gorman * restart points have been set. 259e332f741SMel Gorman */ 260e332f741SMel Gorman if (check_source && check_target && !get_pageblock_skip(page)) 261e332f741SMel Gorman return true; 262e332f741SMel Gorman 263e332f741SMel Gorman /* 264e332f741SMel Gorman * If clearing skip for the target scanner, do not select a 265e332f741SMel Gorman * non-movable pageblock as the starting point. 266e332f741SMel Gorman */ 267e332f741SMel Gorman if (!check_source && check_target && 268e332f741SMel Gorman get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 269e332f741SMel Gorman return false; 270e332f741SMel Gorman 2716b0868c8SMel Gorman /* Ensure the start of the pageblock or zone is online and valid */ 2726b0868c8SMel Gorman block_pfn = pageblock_start_pfn(pfn); 273a2e9a5afSVlastimil Babka block_pfn = max(block_pfn, zone->zone_start_pfn); 274a2e9a5afSVlastimil Babka block_page = pfn_to_online_page(block_pfn); 2756b0868c8SMel Gorman if (block_page) { 2766b0868c8SMel Gorman page = block_page; 2776b0868c8SMel Gorman pfn = block_pfn; 2786b0868c8SMel Gorman } 2796b0868c8SMel Gorman 2806b0868c8SMel Gorman /* Ensure the end of the pageblock or zone is online and valid */ 281a2e9a5afSVlastimil Babka block_pfn = pageblock_end_pfn(pfn) - 1; 2826b0868c8SMel Gorman block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); 2836b0868c8SMel Gorman end_page = pfn_to_online_page(block_pfn); 2846b0868c8SMel Gorman if (!end_page) 2856b0868c8SMel Gorman return false; 2866b0868c8SMel Gorman 287e332f741SMel Gorman /* 288e332f741SMel Gorman * Only clear the hint if a sample indicates there is either a 289e332f741SMel Gorman * free page or an LRU page in the block. One or other condition 290e332f741SMel Gorman * is necessary for the block to be a migration source/target. 291e332f741SMel Gorman */ 292e332f741SMel Gorman do { 293e332f741SMel Gorman if (pfn_valid_within(pfn)) { 294e332f741SMel Gorman if (check_source && PageLRU(page)) { 295e332f741SMel Gorman clear_pageblock_skip(page); 296e332f741SMel Gorman return true; 297e332f741SMel Gorman } 298e332f741SMel Gorman 299e332f741SMel Gorman if (check_target && PageBuddy(page)) { 300e332f741SMel Gorman clear_pageblock_skip(page); 301e332f741SMel Gorman return true; 302e332f741SMel Gorman } 303e332f741SMel Gorman } 304e332f741SMel Gorman 305e332f741SMel Gorman page += (1 << PAGE_ALLOC_COSTLY_ORDER); 306e332f741SMel Gorman pfn += (1 << PAGE_ALLOC_COSTLY_ORDER); 307a2e9a5afSVlastimil Babka } while (page <= end_page); 308e332f741SMel Gorman 309e332f741SMel Gorman return false; 310e332f741SMel Gorman } 311e332f741SMel Gorman 31221dc7e02SDavid Rientjes /* 313bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 314bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 315bb13ffebSMel Gorman * meet. 316bb13ffebSMel Gorman */ 31762997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 318bb13ffebSMel Gorman { 319e332f741SMel Gorman unsigned long migrate_pfn = zone->zone_start_pfn; 3206b0868c8SMel Gorman unsigned long free_pfn = zone_end_pfn(zone) - 1; 321e332f741SMel Gorman unsigned long reset_migrate = free_pfn; 322e332f741SMel Gorman unsigned long reset_free = migrate_pfn; 323e332f741SMel Gorman bool source_set = false; 324e332f741SMel Gorman bool free_set = false; 325e332f741SMel Gorman 326e332f741SMel Gorman if (!zone->compact_blockskip_flush) 327e332f741SMel Gorman return; 328bb13ffebSMel Gorman 32962997027SMel Gorman zone->compact_blockskip_flush = false; 330bb13ffebSMel Gorman 331e332f741SMel Gorman /* 332e332f741SMel Gorman * Walk the zone and update pageblock skip information. Source looks 333e332f741SMel Gorman * for PageLRU while target looks for PageBuddy. When the scanner 334e332f741SMel Gorman * is found, both PageBuddy and PageLRU are checked as the pageblock 335e332f741SMel Gorman * is suitable as both source and target. 336e332f741SMel Gorman */ 337e332f741SMel Gorman for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, 338e332f741SMel Gorman free_pfn -= pageblock_nr_pages) { 339bb13ffebSMel Gorman cond_resched(); 340bb13ffebSMel Gorman 341e332f741SMel Gorman /* Update the migrate PFN */ 342e332f741SMel Gorman if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && 343e332f741SMel Gorman migrate_pfn < reset_migrate) { 344e332f741SMel Gorman source_set = true; 345e332f741SMel Gorman reset_migrate = migrate_pfn; 346e332f741SMel Gorman zone->compact_init_migrate_pfn = reset_migrate; 347e332f741SMel Gorman zone->compact_cached_migrate_pfn[0] = reset_migrate; 348e332f741SMel Gorman zone->compact_cached_migrate_pfn[1] = reset_migrate; 349bb13ffebSMel Gorman } 35002333641SVlastimil Babka 351e332f741SMel Gorman /* Update the free PFN */ 352e332f741SMel Gorman if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && 353e332f741SMel Gorman free_pfn > reset_free) { 354e332f741SMel Gorman free_set = true; 355e332f741SMel Gorman reset_free = free_pfn; 356e332f741SMel Gorman zone->compact_init_free_pfn = reset_free; 357e332f741SMel Gorman zone->compact_cached_free_pfn = reset_free; 358e332f741SMel Gorman } 359e332f741SMel Gorman } 360e332f741SMel Gorman 361e332f741SMel Gorman /* Leave no distance if no suitable block was reset */ 362e332f741SMel Gorman if (reset_migrate >= reset_free) { 363e332f741SMel Gorman zone->compact_cached_migrate_pfn[0] = migrate_pfn; 364e332f741SMel Gorman zone->compact_cached_migrate_pfn[1] = migrate_pfn; 365e332f741SMel Gorman zone->compact_cached_free_pfn = free_pfn; 366e332f741SMel Gorman } 367bb13ffebSMel Gorman } 368bb13ffebSMel Gorman 36962997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 37062997027SMel Gorman { 37162997027SMel Gorman int zoneid; 37262997027SMel Gorman 37362997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 37462997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 37562997027SMel Gorman if (!populated_zone(zone)) 37662997027SMel Gorman continue; 37762997027SMel Gorman 37862997027SMel Gorman /* Only flush if a full compaction finished recently */ 37962997027SMel Gorman if (zone->compact_blockskip_flush) 38062997027SMel Gorman __reset_isolation_suitable(zone); 38162997027SMel Gorman } 38262997027SMel Gorman } 38362997027SMel Gorman 384bb13ffebSMel Gorman /* 385e380bebeSMel Gorman * Sets the pageblock skip bit if it was clear. Note that this is a hint as 386e380bebeSMel Gorman * locks are not required for read/writers. Returns true if it was already set. 387e380bebeSMel Gorman */ 388e380bebeSMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page, 389e380bebeSMel Gorman unsigned long pfn) 390e380bebeSMel Gorman { 391e380bebeSMel Gorman bool skip; 392e380bebeSMel Gorman 393e380bebeSMel Gorman /* Do no update if skip hint is being ignored */ 394e380bebeSMel Gorman if (cc->ignore_skip_hint) 395e380bebeSMel Gorman return false; 396e380bebeSMel Gorman 397e380bebeSMel Gorman if (!IS_ALIGNED(pfn, pageblock_nr_pages)) 398e380bebeSMel Gorman return false; 399e380bebeSMel Gorman 400e380bebeSMel Gorman skip = get_pageblock_skip(page); 401e380bebeSMel Gorman if (!skip && !cc->no_set_skip_hint) 402e380bebeSMel Gorman set_pageblock_skip(page); 403e380bebeSMel Gorman 404e380bebeSMel Gorman return skip; 405e380bebeSMel Gorman } 406e380bebeSMel Gorman 407e380bebeSMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 408e380bebeSMel Gorman { 409e380bebeSMel Gorman struct zone *zone = cc->zone; 410e380bebeSMel Gorman 411e380bebeSMel Gorman pfn = pageblock_end_pfn(pfn); 412e380bebeSMel Gorman 413e380bebeSMel Gorman /* Set for isolation rather than compaction */ 414e380bebeSMel Gorman if (cc->no_set_skip_hint) 415e380bebeSMel Gorman return; 416e380bebeSMel Gorman 417e380bebeSMel Gorman if (pfn > zone->compact_cached_migrate_pfn[0]) 418e380bebeSMel Gorman zone->compact_cached_migrate_pfn[0] = pfn; 419e380bebeSMel Gorman if (cc->mode != MIGRATE_ASYNC && 420e380bebeSMel Gorman pfn > zone->compact_cached_migrate_pfn[1]) 421e380bebeSMel Gorman zone->compact_cached_migrate_pfn[1] = pfn; 422e380bebeSMel Gorman } 423e380bebeSMel Gorman 424e380bebeSMel Gorman /* 425bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 42662997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 427bb13ffebSMel Gorman */ 428c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 429d097a6f6SMel Gorman struct page *page, unsigned long pfn) 430bb13ffebSMel Gorman { 431c89511abSMel Gorman struct zone *zone = cc->zone; 4326815bf3fSJoonsoo Kim 4332583d671SVlastimil Babka if (cc->no_set_skip_hint) 4346815bf3fSJoonsoo Kim return; 4356815bf3fSJoonsoo Kim 436bb13ffebSMel Gorman if (!page) 437bb13ffebSMel Gorman return; 438bb13ffebSMel Gorman 439bb13ffebSMel Gorman set_pageblock_skip(page); 440c89511abSMel Gorman 44135979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 44235979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 443c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 444c89511abSMel Gorman } 445bb13ffebSMel Gorman #else 446bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 447bb13ffebSMel Gorman struct page *page) 448bb13ffebSMel Gorman { 449bb13ffebSMel Gorman return true; 450bb13ffebSMel Gorman } 451bb13ffebSMel Gorman 452b527cfe5SVlastimil Babka static inline bool pageblock_skip_persistent(struct page *page) 45321dc7e02SDavid Rientjes { 45421dc7e02SDavid Rientjes return false; 45521dc7e02SDavid Rientjes } 45621dc7e02SDavid Rientjes 45721dc7e02SDavid Rientjes static inline void update_pageblock_skip(struct compact_control *cc, 458d097a6f6SMel Gorman struct page *page, unsigned long pfn) 459bb13ffebSMel Gorman { 460bb13ffebSMel Gorman } 461e380bebeSMel Gorman 462e380bebeSMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 463e380bebeSMel Gorman { 464e380bebeSMel Gorman } 465e380bebeSMel Gorman 466e380bebeSMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page, 467e380bebeSMel Gorman unsigned long pfn) 468e380bebeSMel Gorman { 469e380bebeSMel Gorman return false; 470e380bebeSMel Gorman } 471bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 472bb13ffebSMel Gorman 4731f9efdefSVlastimil Babka /* 4748b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 475cb2dcaf0SMel Gorman * very heavily contended. For async compaction, trylock and record if the 476cb2dcaf0SMel Gorman * lock is contended. The lock will still be acquired but compaction will 477cb2dcaf0SMel Gorman * abort when the current block is finished regardless of success rate. 478cb2dcaf0SMel Gorman * Sync compaction acquires the lock. 4798b44d279SVlastimil Babka * 480cb2dcaf0SMel Gorman * Always returns true which makes it easier to track lock state in callers. 4811f9efdefSVlastimil Babka */ 482cb2dcaf0SMel Gorman static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, 4838b44d279SVlastimil Babka struct compact_control *cc) 4848b44d279SVlastimil Babka { 485cb2dcaf0SMel Gorman /* Track if the lock is contended in async mode */ 486cb2dcaf0SMel Gorman if (cc->mode == MIGRATE_ASYNC && !cc->contended) { 487cb2dcaf0SMel Gorman if (spin_trylock_irqsave(lock, *flags)) 488cb2dcaf0SMel Gorman return true; 489cb2dcaf0SMel Gorman 490c3486f53SVlastimil Babka cc->contended = true; 4918b44d279SVlastimil Babka } 4921f9efdefSVlastimil Babka 493cb2dcaf0SMel Gorman spin_lock_irqsave(lock, *flags); 4948b44d279SVlastimil Babka return true; 4952a1402aaSMel Gorman } 4962a1402aaSMel Gorman 49785aa125fSMichal Nazarewicz /* 498c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 4998b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 5008b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 5018b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 5028b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 5038b44d279SVlastimil Babka * aborts. Sync compaction schedules. 5048b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 5058b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 506c67fe375SMel Gorman * 5078b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 5088b44d279SVlastimil Babka * async compaction due to need_resched() 5098b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 5108b44d279SVlastimil Babka * scheduled) 511c67fe375SMel Gorman */ 5128b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 5138b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 514c67fe375SMel Gorman { 5158b44d279SVlastimil Babka if (*locked) { 5168b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 5178b44d279SVlastimil Babka *locked = false; 518c67fe375SMel Gorman } 519c67fe375SMel Gorman 5208b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 521c3486f53SVlastimil Babka cc->contended = true; 5228b44d279SVlastimil Babka return true; 5238b44d279SVlastimil Babka } 5248b44d279SVlastimil Babka 525cf66f070SMel Gorman cond_resched(); 526be976572SVlastimil Babka 527be976572SVlastimil Babka return false; 528be976572SVlastimil Babka } 529be976572SVlastimil Babka 530c67fe375SMel Gorman /* 5319e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 5329e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 5339e4be470SJerome Marchand * (even though it may still end up isolating some pages). 53485aa125fSMichal Nazarewicz */ 535f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 536e14c720eSVlastimil Babka unsigned long *start_pfn, 53785aa125fSMichal Nazarewicz unsigned long end_pfn, 53885aa125fSMichal Nazarewicz struct list_head *freelist, 5394fca9730SMel Gorman unsigned int stride, 54085aa125fSMichal Nazarewicz bool strict) 541748446bbSMel Gorman { 542b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 543d097a6f6SMel Gorman struct page *cursor; 544b8b2d825SXiubo Li unsigned long flags = 0; 545f40d1e42SMel Gorman bool locked = false; 546e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 54766c64223SJoonsoo Kim unsigned int order; 548748446bbSMel Gorman 5494fca9730SMel Gorman /* Strict mode is for isolation, speed is secondary */ 5504fca9730SMel Gorman if (strict) 5514fca9730SMel Gorman stride = 1; 5524fca9730SMel Gorman 553748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 554748446bbSMel Gorman 555f40d1e42SMel Gorman /* Isolate free pages. */ 5564fca9730SMel Gorman for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) { 55766c64223SJoonsoo Kim int isolated; 558748446bbSMel Gorman struct page *page = cursor; 559748446bbSMel Gorman 5608b44d279SVlastimil Babka /* 5618b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 5628b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 5638b44d279SVlastimil Babka * pending or async compaction detects need_resched() 5648b44d279SVlastimil Babka */ 5658b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 5668b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 5678b44d279SVlastimil Babka &locked, cc)) 5688b44d279SVlastimil Babka break; 5698b44d279SVlastimil Babka 570b7aba698SMel Gorman nr_scanned++; 571f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 5722af120bcSLaura Abbott goto isolate_fail; 5732af120bcSLaura Abbott 5749fcd6d2eSVlastimil Babka /* 5759fcd6d2eSVlastimil Babka * For compound pages such as THP and hugetlbfs, we can save 5769fcd6d2eSVlastimil Babka * potentially a lot of iterations if we skip them at once. 5779fcd6d2eSVlastimil Babka * The check is racy, but we can consider only valid values 5789fcd6d2eSVlastimil Babka * and the only danger is skipping too much. 5799fcd6d2eSVlastimil Babka */ 5809fcd6d2eSVlastimil Babka if (PageCompound(page)) { 58121dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 5829fcd6d2eSVlastimil Babka 583d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) { 58421dc7e02SDavid Rientjes blockpfn += (1UL << order) - 1; 58521dc7e02SDavid Rientjes cursor += (1UL << order) - 1; 5869fcd6d2eSVlastimil Babka } 5879fcd6d2eSVlastimil Babka goto isolate_fail; 5889fcd6d2eSVlastimil Babka } 5899fcd6d2eSVlastimil Babka 590f40d1e42SMel Gorman if (!PageBuddy(page)) 5912af120bcSLaura Abbott goto isolate_fail; 592f40d1e42SMel Gorman 593f40d1e42SMel Gorman /* 59469b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 59569b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 59669b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 59769b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 59869b7189fSVlastimil Babka * recheck as well. 59969b7189fSVlastimil Babka */ 60069b7189fSVlastimil Babka if (!locked) { 601cb2dcaf0SMel Gorman locked = compact_lock_irqsave(&cc->zone->lock, 6028b44d279SVlastimil Babka &flags, cc); 603f40d1e42SMel Gorman 604f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 605f40d1e42SMel Gorman if (!PageBuddy(page)) 6062af120bcSLaura Abbott goto isolate_fail; 60769b7189fSVlastimil Babka } 608748446bbSMel Gorman 60966c64223SJoonsoo Kim /* Found a free page, will break it into order-0 pages */ 61066c64223SJoonsoo Kim order = page_order(page); 61166c64223SJoonsoo Kim isolated = __isolate_free_page(page, order); 612a4f04f2cSDavid Rientjes if (!isolated) 613a4f04f2cSDavid Rientjes break; 61466c64223SJoonsoo Kim set_page_private(page, order); 615a4f04f2cSDavid Rientjes 616748446bbSMel Gorman total_isolated += isolated; 617a4f04f2cSDavid Rientjes cc->nr_freepages += isolated; 61866c64223SJoonsoo Kim list_add_tail(&page->lru, freelist); 61966c64223SJoonsoo Kim 620a4f04f2cSDavid Rientjes if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 621932ff6bbSJoonsoo Kim blockpfn += isolated; 622932ff6bbSJoonsoo Kim break; 623932ff6bbSJoonsoo Kim } 624a4f04f2cSDavid Rientjes /* Advance to the end of split page */ 625748446bbSMel Gorman blockpfn += isolated - 1; 626748446bbSMel Gorman cursor += isolated - 1; 6272af120bcSLaura Abbott continue; 6282af120bcSLaura Abbott 6292af120bcSLaura Abbott isolate_fail: 6302af120bcSLaura Abbott if (strict) 6312af120bcSLaura Abbott break; 6322af120bcSLaura Abbott else 6332af120bcSLaura Abbott continue; 6342af120bcSLaura Abbott 635748446bbSMel Gorman } 636748446bbSMel Gorman 637a4f04f2cSDavid Rientjes if (locked) 638a4f04f2cSDavid Rientjes spin_unlock_irqrestore(&cc->zone->lock, flags); 639a4f04f2cSDavid Rientjes 6409fcd6d2eSVlastimil Babka /* 6419fcd6d2eSVlastimil Babka * There is a tiny chance that we have read bogus compound_order(), 6429fcd6d2eSVlastimil Babka * so be careful to not go outside of the pageblock. 6439fcd6d2eSVlastimil Babka */ 6449fcd6d2eSVlastimil Babka if (unlikely(blockpfn > end_pfn)) 6459fcd6d2eSVlastimil Babka blockpfn = end_pfn; 6469fcd6d2eSVlastimil Babka 647e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 648e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 649e34d85f0SJoonsoo Kim 650e14c720eSVlastimil Babka /* Record how far we have got within the block */ 651e14c720eSVlastimil Babka *start_pfn = blockpfn; 652e14c720eSVlastimil Babka 653f40d1e42SMel Gorman /* 654f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 655f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 656f40d1e42SMel Gorman * returned and CMA will fail. 657f40d1e42SMel Gorman */ 6582af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 659f40d1e42SMel Gorman total_isolated = 0; 660f40d1e42SMel Gorman 6617f354a54SDavid Rientjes cc->total_free_scanned += nr_scanned; 662397487dbSMel Gorman if (total_isolated) 663010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 664748446bbSMel Gorman return total_isolated; 665748446bbSMel Gorman } 666748446bbSMel Gorman 66785aa125fSMichal Nazarewicz /** 66885aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 669e8b098fcSMike Rapoport * @cc: Compaction control structure. 67085aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 67185aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 67285aa125fSMichal Nazarewicz * 67385aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 67485aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 67585aa125fSMichal Nazarewicz * undo its actions and return zero. 67685aa125fSMichal Nazarewicz * 67785aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 67885aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 67985aa125fSMichal Nazarewicz * a free page). 68085aa125fSMichal Nazarewicz */ 681ff9543fdSMichal Nazarewicz unsigned long 682bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 683bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 68485aa125fSMichal Nazarewicz { 685e1409c32SJoonsoo Kim unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 68685aa125fSMichal Nazarewicz LIST_HEAD(freelist); 68785aa125fSMichal Nazarewicz 6887d49d886SVlastimil Babka pfn = start_pfn; 68906b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 690e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 691e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 69206b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 6937d49d886SVlastimil Babka 6947d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 695e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 6967d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 697e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 698e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 6997d49d886SVlastimil Babka 70085aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 70185aa125fSMichal Nazarewicz 70258420016SJoonsoo Kim /* 70358420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 70458420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 70558420016SJoonsoo Kim * scanning range to right one. 70658420016SJoonsoo Kim */ 70758420016SJoonsoo Kim if (pfn >= block_end_pfn) { 70806b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 70906b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 71058420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 71158420016SJoonsoo Kim } 71258420016SJoonsoo Kim 713e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 714e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 7157d49d886SVlastimil Babka break; 7167d49d886SVlastimil Babka 717e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 7184fca9730SMel Gorman block_end_pfn, &freelist, 0, true); 71985aa125fSMichal Nazarewicz 72085aa125fSMichal Nazarewicz /* 72185aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 72285aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 72385aa125fSMichal Nazarewicz * non-free pages). 72485aa125fSMichal Nazarewicz */ 72585aa125fSMichal Nazarewicz if (!isolated) 72685aa125fSMichal Nazarewicz break; 72785aa125fSMichal Nazarewicz 72885aa125fSMichal Nazarewicz /* 72985aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 73085aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 73185aa125fSMichal Nazarewicz * page may span two pageblocks). 73285aa125fSMichal Nazarewicz */ 73385aa125fSMichal Nazarewicz } 73485aa125fSMichal Nazarewicz 73566c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 7364469ab98SMel Gorman split_map_pages(&freelist); 73785aa125fSMichal Nazarewicz 73885aa125fSMichal Nazarewicz if (pfn < end_pfn) { 73985aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 74085aa125fSMichal Nazarewicz release_freepages(&freelist); 74185aa125fSMichal Nazarewicz return 0; 74285aa125fSMichal Nazarewicz } 74385aa125fSMichal Nazarewicz 74485aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 74585aa125fSMichal Nazarewicz return pfn; 74685aa125fSMichal Nazarewicz } 74785aa125fSMichal Nazarewicz 748748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 7495f438eeeSAndrey Ryabinin static bool too_many_isolated(pg_data_t *pgdat) 750748446bbSMel Gorman { 751bc693045SMinchan Kim unsigned long active, inactive, isolated; 752748446bbSMel Gorman 7535f438eeeSAndrey Ryabinin inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + 7545f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_INACTIVE_ANON); 7555f438eeeSAndrey Ryabinin active = node_page_state(pgdat, NR_ACTIVE_FILE) + 7565f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_ACTIVE_ANON); 7575f438eeeSAndrey Ryabinin isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + 7585f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_ISOLATED_ANON); 759748446bbSMel Gorman 760bc693045SMinchan Kim return isolated > (inactive + active) / 2; 761748446bbSMel Gorman } 762748446bbSMel Gorman 7632fe86e00SMichal Nazarewicz /** 764edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 765edc2ca61SVlastimil Babka * a single pageblock 7662fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 767edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 768edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 769edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 7702fe86e00SMichal Nazarewicz * 7712fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 772edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 773edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 774edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 775edc2ca61SVlastimil Babka * than end_pfn). 7762fe86e00SMichal Nazarewicz * 777edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 778edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 779edc2ca61SVlastimil Babka * is neither read nor updated. 780748446bbSMel Gorman */ 781edc2ca61SVlastimil Babka static unsigned long 782edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 783edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 784748446bbSMel Gorman { 7855f438eeeSAndrey Ryabinin pg_data_t *pgdat = cc->zone->zone_pgdat; 786b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 787fa9add64SHugh Dickins struct lruvec *lruvec; 788b8b2d825SXiubo Li unsigned long flags = 0; 7892a1402aaSMel Gorman bool locked = false; 790bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 791e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 792fdd048e1SVlastimil Babka bool skip_on_failure = false; 793fdd048e1SVlastimil Babka unsigned long next_skip_pfn = 0; 794e380bebeSMel Gorman bool skip_updated = false; 795748446bbSMel Gorman 796748446bbSMel Gorman /* 797748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 798748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 799748446bbSMel Gorman * delay for some time until fewer pages are isolated 800748446bbSMel Gorman */ 8015f438eeeSAndrey Ryabinin while (unlikely(too_many_isolated(pgdat))) { 802f9e35b3bSMel Gorman /* async migration should just abort */ 803e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 8042fe86e00SMichal Nazarewicz return 0; 805f9e35b3bSMel Gorman 806748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 807748446bbSMel Gorman 808748446bbSMel Gorman if (fatal_signal_pending(current)) 8092fe86e00SMichal Nazarewicz return 0; 810748446bbSMel Gorman } 811748446bbSMel Gorman 812cf66f070SMel Gorman cond_resched(); 813aeef4b83SDavid Rientjes 814fdd048e1SVlastimil Babka if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 815fdd048e1SVlastimil Babka skip_on_failure = true; 816fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 817fdd048e1SVlastimil Babka } 818fdd048e1SVlastimil Babka 819748446bbSMel Gorman /* Time to isolate some pages for migration */ 820748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 82129c0dde8SVlastimil Babka 822fdd048e1SVlastimil Babka if (skip_on_failure && low_pfn >= next_skip_pfn) { 823fdd048e1SVlastimil Babka /* 824fdd048e1SVlastimil Babka * We have isolated all migration candidates in the 825fdd048e1SVlastimil Babka * previous order-aligned block, and did not skip it due 826fdd048e1SVlastimil Babka * to failure. We should migrate the pages now and 827fdd048e1SVlastimil Babka * hopefully succeed compaction. 828fdd048e1SVlastimil Babka */ 829fdd048e1SVlastimil Babka if (nr_isolated) 830fdd048e1SVlastimil Babka break; 831fdd048e1SVlastimil Babka 832fdd048e1SVlastimil Babka /* 833fdd048e1SVlastimil Babka * We failed to isolate in the previous order-aligned 834fdd048e1SVlastimil Babka * block. Set the new boundary to the end of the 835fdd048e1SVlastimil Babka * current block. Note we can't simply increase 836fdd048e1SVlastimil Babka * next_skip_pfn by 1 << order, as low_pfn might have 837fdd048e1SVlastimil Babka * been incremented by a higher number due to skipping 838fdd048e1SVlastimil Babka * a compound or a high-order buddy page in the 839fdd048e1SVlastimil Babka * previous loop iteration. 840fdd048e1SVlastimil Babka */ 841fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 842fdd048e1SVlastimil Babka } 843fdd048e1SVlastimil Babka 8448b44d279SVlastimil Babka /* 8458b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 846670105a2SMel Gorman * contention, to give chance to IRQs. Abort completely if 847670105a2SMel Gorman * a fatal signal is pending. 8488b44d279SVlastimil Babka */ 8498b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 850f4b7e272SAndrey Ryabinin && compact_unlock_should_abort(&pgdat->lru_lock, 851670105a2SMel Gorman flags, &locked, cc)) { 852670105a2SMel Gorman low_pfn = 0; 853670105a2SMel Gorman goto fatal_pending; 854670105a2SMel Gorman } 855b2eef8c0SAndrea Arcangeli 856748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 857fdd048e1SVlastimil Babka goto isolate_fail; 858b7aba698SMel Gorman nr_scanned++; 859748446bbSMel Gorman 860748446bbSMel Gorman page = pfn_to_page(low_pfn); 861dc908600SMel Gorman 862e380bebeSMel Gorman /* 863e380bebeSMel Gorman * Check if the pageblock has already been marked skipped. 864e380bebeSMel Gorman * Only the aligned PFN is checked as the caller isolates 865e380bebeSMel Gorman * COMPACT_CLUSTER_MAX at a time so the second call must 866e380bebeSMel Gorman * not falsely conclude that the block should be skipped. 867e380bebeSMel Gorman */ 868e380bebeSMel Gorman if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { 869e380bebeSMel Gorman if (!cc->ignore_skip_hint && get_pageblock_skip(page)) { 870e380bebeSMel Gorman low_pfn = end_pfn; 871e380bebeSMel Gorman goto isolate_abort; 872e380bebeSMel Gorman } 873bb13ffebSMel Gorman valid_page = page; 874e380bebeSMel Gorman } 875bb13ffebSMel Gorman 876c122b208SJoonsoo Kim /* 87799c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 87899c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 87999c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 88099c0fd5eSVlastimil Babka * potential isolation targets. 8816c14466cSMel Gorman */ 88299c0fd5eSVlastimil Babka if (PageBuddy(page)) { 88399c0fd5eSVlastimil Babka unsigned long freepage_order = page_order_unsafe(page); 88499c0fd5eSVlastimil Babka 88599c0fd5eSVlastimil Babka /* 88699c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 88799c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 88899c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 88999c0fd5eSVlastimil Babka */ 89099c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 89199c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 892748446bbSMel Gorman continue; 89399c0fd5eSVlastimil Babka } 894748446bbSMel Gorman 8959927af74SMel Gorman /* 89629c0dde8SVlastimil Babka * Regardless of being on LRU, compound pages such as THP and 897*1da2f328SRik van Riel * hugetlbfs are not to be compacted unless we are attempting 898*1da2f328SRik van Riel * an allocation much larger than the huge page size (eg CMA). 899*1da2f328SRik van Riel * We can potentially save a lot of iterations if we skip them 900*1da2f328SRik van Riel * at once. The check is racy, but we can consider only valid 901*1da2f328SRik van Riel * values and the only danger is skipping too much. 902bc835011SAndrea Arcangeli */ 903*1da2f328SRik van Riel if (PageCompound(page) && !cc->alloc_contig) { 90421dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 90529c0dde8SVlastimil Babka 906d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) 90721dc7e02SDavid Rientjes low_pfn += (1UL << order) - 1; 908fdd048e1SVlastimil Babka goto isolate_fail; 9092a1402aaSMel Gorman } 9102a1402aaSMel Gorman 911bda807d4SMinchan Kim /* 912bda807d4SMinchan Kim * Check may be lockless but that's ok as we recheck later. 913bda807d4SMinchan Kim * It's possible to migrate LRU and non-lru movable pages. 914bda807d4SMinchan Kim * Skip any other type of page 915bda807d4SMinchan Kim */ 916bda807d4SMinchan Kim if (!PageLRU(page)) { 917bda807d4SMinchan Kim /* 918bda807d4SMinchan Kim * __PageMovable can return false positive so we need 919bda807d4SMinchan Kim * to verify it under page_lock. 920bda807d4SMinchan Kim */ 921bda807d4SMinchan Kim if (unlikely(__PageMovable(page)) && 922bda807d4SMinchan Kim !PageIsolated(page)) { 923bda807d4SMinchan Kim if (locked) { 924f4b7e272SAndrey Ryabinin spin_unlock_irqrestore(&pgdat->lru_lock, 925bda807d4SMinchan Kim flags); 926bda807d4SMinchan Kim locked = false; 927bda807d4SMinchan Kim } 928bda807d4SMinchan Kim 9299e5bcd61SYisheng Xie if (!isolate_movable_page(page, isolate_mode)) 930bda807d4SMinchan Kim goto isolate_success; 931bda807d4SMinchan Kim } 932bda807d4SMinchan Kim 933fdd048e1SVlastimil Babka goto isolate_fail; 934bda807d4SMinchan Kim } 93529c0dde8SVlastimil Babka 936119d6d59SDavid Rientjes /* 937119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 938119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 939119d6d59SDavid Rientjes * admittedly racy check. 940119d6d59SDavid Rientjes */ 941119d6d59SDavid Rientjes if (!page_mapping(page) && 942119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 943fdd048e1SVlastimil Babka goto isolate_fail; 944119d6d59SDavid Rientjes 94573e64c51SMichal Hocko /* 94673e64c51SMichal Hocko * Only allow to migrate anonymous pages in GFP_NOFS context 94773e64c51SMichal Hocko * because those do not depend on fs locks. 94873e64c51SMichal Hocko */ 94973e64c51SMichal Hocko if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) 95073e64c51SMichal Hocko goto isolate_fail; 95173e64c51SMichal Hocko 95269b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 95369b7189fSVlastimil Babka if (!locked) { 954f4b7e272SAndrey Ryabinin locked = compact_lock_irqsave(&pgdat->lru_lock, 9558b44d279SVlastimil Babka &flags, cc); 956e380bebeSMel Gorman 957e380bebeSMel Gorman /* Try get exclusive access under lock */ 958e380bebeSMel Gorman if (!skip_updated) { 959e380bebeSMel Gorman skip_updated = true; 960e380bebeSMel Gorman if (test_and_set_skip(cc, page, low_pfn)) 961e380bebeSMel Gorman goto isolate_abort; 962e380bebeSMel Gorman } 9632a1402aaSMel Gorman 96429c0dde8SVlastimil Babka /* Recheck PageLRU and PageCompound under lock */ 9652a1402aaSMel Gorman if (!PageLRU(page)) 966fdd048e1SVlastimil Babka goto isolate_fail; 96729c0dde8SVlastimil Babka 96829c0dde8SVlastimil Babka /* 96929c0dde8SVlastimil Babka * Page become compound since the non-locked check, 97029c0dde8SVlastimil Babka * and it's on LRU. It can only be a THP so the order 97129c0dde8SVlastimil Babka * is safe to read and it's 0 for tail pages. 97229c0dde8SVlastimil Babka */ 973*1da2f328SRik van Riel if (unlikely(PageCompound(page) && !cc->alloc_contig)) { 974d8c6546bSMatthew Wilcox (Oracle) low_pfn += compound_nr(page) - 1; 975fdd048e1SVlastimil Babka goto isolate_fail; 976bc835011SAndrea Arcangeli } 97769b7189fSVlastimil Babka } 978bc835011SAndrea Arcangeli 979f4b7e272SAndrey Ryabinin lruvec = mem_cgroup_page_lruvec(page, pgdat); 980fa9add64SHugh Dickins 981748446bbSMel Gorman /* Try isolate the page */ 982edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 983fdd048e1SVlastimil Babka goto isolate_fail; 984748446bbSMel Gorman 985*1da2f328SRik van Riel /* The whole page is taken off the LRU; skip the tail pages. */ 986*1da2f328SRik van Riel if (PageCompound(page)) 987*1da2f328SRik van Riel low_pfn += compound_nr(page) - 1; 988bc835011SAndrea Arcangeli 989748446bbSMel Gorman /* Successfully isolated */ 990fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 991*1da2f328SRik van Riel mod_node_page_state(page_pgdat(page), 992*1da2f328SRik van Riel NR_ISOLATED_ANON + page_is_file_cache(page), 993*1da2f328SRik van Riel hpage_nr_pages(page)); 994b6c75016SJoonsoo Kim 995b6c75016SJoonsoo Kim isolate_success: 996fdd048e1SVlastimil Babka list_add(&page->lru, &cc->migratepages); 997748446bbSMel Gorman cc->nr_migratepages++; 998b7aba698SMel Gorman nr_isolated++; 999748446bbSMel Gorman 1000804d3121SMel Gorman /* 1001804d3121SMel Gorman * Avoid isolating too much unless this block is being 1002cb2dcaf0SMel Gorman * rescanned (e.g. dirty/writeback pages, parallel allocation) 1003cb2dcaf0SMel Gorman * or a lock is contended. For contention, isolate quickly to 1004cb2dcaf0SMel Gorman * potentially remove one source of contention. 1005804d3121SMel Gorman */ 1006cb2dcaf0SMel Gorman if (cc->nr_migratepages == COMPACT_CLUSTER_MAX && 1007cb2dcaf0SMel Gorman !cc->rescan && !cc->contended) { 100831b8384aSHillf Danton ++low_pfn; 1009748446bbSMel Gorman break; 1010748446bbSMel Gorman } 1011fdd048e1SVlastimil Babka 1012fdd048e1SVlastimil Babka continue; 1013fdd048e1SVlastimil Babka isolate_fail: 1014fdd048e1SVlastimil Babka if (!skip_on_failure) 1015fdd048e1SVlastimil Babka continue; 1016fdd048e1SVlastimil Babka 1017fdd048e1SVlastimil Babka /* 1018fdd048e1SVlastimil Babka * We have isolated some pages, but then failed. Release them 1019fdd048e1SVlastimil Babka * instead of migrating, as we cannot form the cc->order buddy 1020fdd048e1SVlastimil Babka * page anyway. 1021fdd048e1SVlastimil Babka */ 1022fdd048e1SVlastimil Babka if (nr_isolated) { 1023fdd048e1SVlastimil Babka if (locked) { 1024f4b7e272SAndrey Ryabinin spin_unlock_irqrestore(&pgdat->lru_lock, flags); 1025fdd048e1SVlastimil Babka locked = false; 1026fdd048e1SVlastimil Babka } 1027fdd048e1SVlastimil Babka putback_movable_pages(&cc->migratepages); 1028fdd048e1SVlastimil Babka cc->nr_migratepages = 0; 1029fdd048e1SVlastimil Babka nr_isolated = 0; 1030fdd048e1SVlastimil Babka } 1031fdd048e1SVlastimil Babka 1032fdd048e1SVlastimil Babka if (low_pfn < next_skip_pfn) { 1033fdd048e1SVlastimil Babka low_pfn = next_skip_pfn - 1; 1034fdd048e1SVlastimil Babka /* 1035fdd048e1SVlastimil Babka * The check near the loop beginning would have updated 1036fdd048e1SVlastimil Babka * next_skip_pfn too, but this is a bit simpler. 1037fdd048e1SVlastimil Babka */ 1038fdd048e1SVlastimil Babka next_skip_pfn += 1UL << cc->order; 1039fdd048e1SVlastimil Babka } 104031b8384aSHillf Danton } 1041748446bbSMel Gorman 104299c0fd5eSVlastimil Babka /* 104399c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 104499c0fd5eSVlastimil Babka * the range to be scanned. 104599c0fd5eSVlastimil Babka */ 104699c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 104799c0fd5eSVlastimil Babka low_pfn = end_pfn; 104899c0fd5eSVlastimil Babka 1049e380bebeSMel Gorman isolate_abort: 1050c67fe375SMel Gorman if (locked) 1051f4b7e272SAndrey Ryabinin spin_unlock_irqrestore(&pgdat->lru_lock, flags); 1052748446bbSMel Gorman 105350b5b094SVlastimil Babka /* 1054804d3121SMel Gorman * Updated the cached scanner pfn once the pageblock has been scanned 1055804d3121SMel Gorman * Pages will either be migrated in which case there is no point 1056804d3121SMel Gorman * scanning in the near future or migration failed in which case the 1057804d3121SMel Gorman * failure reason may persist. The block is marked for skipping if 1058804d3121SMel Gorman * there were no pages isolated in the block or if the block is 1059804d3121SMel Gorman * rescanned twice in a row. 106050b5b094SVlastimil Babka */ 1061804d3121SMel Gorman if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { 1062e380bebeSMel Gorman if (valid_page && !skip_updated) 1063e380bebeSMel Gorman set_pageblock_skip(valid_page); 1064e380bebeSMel Gorman update_cached_migrate(cc, low_pfn); 1065e380bebeSMel Gorman } 1066bb13ffebSMel Gorman 1067e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 1068e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 1069b7aba698SMel Gorman 1070670105a2SMel Gorman fatal_pending: 10717f354a54SDavid Rientjes cc->total_migrate_scanned += nr_scanned; 1072397487dbSMel Gorman if (nr_isolated) 1073010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 1074397487dbSMel Gorman 10752fe86e00SMichal Nazarewicz return low_pfn; 10762fe86e00SMichal Nazarewicz } 10772fe86e00SMichal Nazarewicz 1078edc2ca61SVlastimil Babka /** 1079edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 1080edc2ca61SVlastimil Babka * @cc: Compaction control structure. 1081edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 1082edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 1083edc2ca61SVlastimil Babka * 1084edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 1085edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 1086edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 1087edc2ca61SVlastimil Babka */ 1088edc2ca61SVlastimil Babka unsigned long 1089edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 1090edc2ca61SVlastimil Babka unsigned long end_pfn) 1091edc2ca61SVlastimil Babka { 1092e1409c32SJoonsoo Kim unsigned long pfn, block_start_pfn, block_end_pfn; 1093edc2ca61SVlastimil Babka 1094edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 1095edc2ca61SVlastimil Babka pfn = start_pfn; 109606b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 1097e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 1098e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 109906b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 1100edc2ca61SVlastimil Babka 1101edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 1102e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1103edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 1104edc2ca61SVlastimil Babka 1105edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 1106edc2ca61SVlastimil Babka 1107e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 1108e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 1109edc2ca61SVlastimil Babka continue; 1110edc2ca61SVlastimil Babka 1111edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 1112edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 1113edc2ca61SVlastimil Babka 111414af4a5eSHugh Dickins if (!pfn) 1115edc2ca61SVlastimil Babka break; 11166ea41c0cSJoonsoo Kim 11176ea41c0cSJoonsoo Kim if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 11186ea41c0cSJoonsoo Kim break; 1119edc2ca61SVlastimil Babka } 1120edc2ca61SVlastimil Babka 1121edc2ca61SVlastimil Babka return pfn; 1122edc2ca61SVlastimil Babka } 1123edc2ca61SVlastimil Babka 1124ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1125ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 1126018e9a49SAndrew Morton 1127b682debdSVlastimil Babka static bool suitable_migration_source(struct compact_control *cc, 1128b682debdSVlastimil Babka struct page *page) 1129b682debdSVlastimil Babka { 1130282722b0SVlastimil Babka int block_mt; 1131282722b0SVlastimil Babka 11329bebefd5SMel Gorman if (pageblock_skip_persistent(page)) 11339bebefd5SMel Gorman return false; 11349bebefd5SMel Gorman 1135282722b0SVlastimil Babka if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1136b682debdSVlastimil Babka return true; 1137b682debdSVlastimil Babka 1138282722b0SVlastimil Babka block_mt = get_pageblock_migratetype(page); 1139282722b0SVlastimil Babka 1140282722b0SVlastimil Babka if (cc->migratetype == MIGRATE_MOVABLE) 1141282722b0SVlastimil Babka return is_migrate_movable(block_mt); 1142282722b0SVlastimil Babka else 1143282722b0SVlastimil Babka return block_mt == cc->migratetype; 1144b682debdSVlastimil Babka } 1145b682debdSVlastimil Babka 1146018e9a49SAndrew Morton /* Returns true if the page is within a block suitable for migration to */ 11479f7e3387SVlastimil Babka static bool suitable_migration_target(struct compact_control *cc, 11489f7e3387SVlastimil Babka struct page *page) 1149018e9a49SAndrew Morton { 1150018e9a49SAndrew Morton /* If the page is a large free page, then disallow migration */ 1151018e9a49SAndrew Morton if (PageBuddy(page)) { 1152018e9a49SAndrew Morton /* 1153018e9a49SAndrew Morton * We are checking page_order without zone->lock taken. But 1154018e9a49SAndrew Morton * the only small danger is that we skip a potentially suitable 1155018e9a49SAndrew Morton * pageblock, so it's not worth to check order for valid range. 1156018e9a49SAndrew Morton */ 1157018e9a49SAndrew Morton if (page_order_unsafe(page) >= pageblock_order) 1158018e9a49SAndrew Morton return false; 1159018e9a49SAndrew Morton } 1160018e9a49SAndrew Morton 11611ef36db2SYisheng Xie if (cc->ignore_block_suitable) 11621ef36db2SYisheng Xie return true; 11631ef36db2SYisheng Xie 1164018e9a49SAndrew Morton /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1165b682debdSVlastimil Babka if (is_migrate_movable(get_pageblock_migratetype(page))) 1166018e9a49SAndrew Morton return true; 1167018e9a49SAndrew Morton 1168018e9a49SAndrew Morton /* Otherwise skip the block */ 1169018e9a49SAndrew Morton return false; 1170018e9a49SAndrew Morton } 1171018e9a49SAndrew Morton 117270b44595SMel Gorman static inline unsigned int 117370b44595SMel Gorman freelist_scan_limit(struct compact_control *cc) 117470b44595SMel Gorman { 1175dd7ef7bdSQian Cai unsigned short shift = BITS_PER_LONG - 1; 1176dd7ef7bdSQian Cai 1177dd7ef7bdSQian Cai return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; 117870b44595SMel Gorman } 117970b44595SMel Gorman 1180ff9543fdSMichal Nazarewicz /* 1181f2849aa0SVlastimil Babka * Test whether the free scanner has reached the same or lower pageblock than 1182f2849aa0SVlastimil Babka * the migration scanner, and compaction should thus terminate. 1183f2849aa0SVlastimil Babka */ 1184f2849aa0SVlastimil Babka static inline bool compact_scanners_met(struct compact_control *cc) 1185f2849aa0SVlastimil Babka { 1186f2849aa0SVlastimil Babka return (cc->free_pfn >> pageblock_order) 1187f2849aa0SVlastimil Babka <= (cc->migrate_pfn >> pageblock_order); 1188f2849aa0SVlastimil Babka } 1189f2849aa0SVlastimil Babka 11905a811889SMel Gorman /* 11915a811889SMel Gorman * Used when scanning for a suitable migration target which scans freelists 11925a811889SMel Gorman * in reverse. Reorders the list such as the unscanned pages are scanned 11935a811889SMel Gorman * first on the next iteration of the free scanner 11945a811889SMel Gorman */ 11955a811889SMel Gorman static void 11965a811889SMel Gorman move_freelist_head(struct list_head *freelist, struct page *freepage) 11975a811889SMel Gorman { 11985a811889SMel Gorman LIST_HEAD(sublist); 11995a811889SMel Gorman 12005a811889SMel Gorman if (!list_is_last(freelist, &freepage->lru)) { 12015a811889SMel Gorman list_cut_before(&sublist, freelist, &freepage->lru); 12025a811889SMel Gorman if (!list_empty(&sublist)) 12035a811889SMel Gorman list_splice_tail(&sublist, freelist); 12045a811889SMel Gorman } 12055a811889SMel Gorman } 12065a811889SMel Gorman 12075a811889SMel Gorman /* 12085a811889SMel Gorman * Similar to move_freelist_head except used by the migration scanner 12095a811889SMel Gorman * when scanning forward. It's possible for these list operations to 12105a811889SMel Gorman * move against each other if they search the free list exactly in 12115a811889SMel Gorman * lockstep. 12125a811889SMel Gorman */ 121370b44595SMel Gorman static void 121470b44595SMel Gorman move_freelist_tail(struct list_head *freelist, struct page *freepage) 121570b44595SMel Gorman { 121670b44595SMel Gorman LIST_HEAD(sublist); 121770b44595SMel Gorman 121870b44595SMel Gorman if (!list_is_first(freelist, &freepage->lru)) { 121970b44595SMel Gorman list_cut_position(&sublist, freelist, &freepage->lru); 122070b44595SMel Gorman if (!list_empty(&sublist)) 122170b44595SMel Gorman list_splice_tail(&sublist, freelist); 122270b44595SMel Gorman } 122370b44595SMel Gorman } 122470b44595SMel Gorman 12255a811889SMel Gorman static void 12265a811889SMel Gorman fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated) 12275a811889SMel Gorman { 12285a811889SMel Gorman unsigned long start_pfn, end_pfn; 12295a811889SMel Gorman struct page *page = pfn_to_page(pfn); 12305a811889SMel Gorman 12315a811889SMel Gorman /* Do not search around if there are enough pages already */ 12325a811889SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) 12335a811889SMel Gorman return; 12345a811889SMel Gorman 12355a811889SMel Gorman /* Minimise scanning during async compaction */ 12365a811889SMel Gorman if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) 12375a811889SMel Gorman return; 12385a811889SMel Gorman 12395a811889SMel Gorman /* Pageblock boundaries */ 12405a811889SMel Gorman start_pfn = pageblock_start_pfn(pfn); 124160fce36aSMel Gorman end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1; 12425a811889SMel Gorman 12435a811889SMel Gorman /* Scan before */ 12445a811889SMel Gorman if (start_pfn != pfn) { 12454fca9730SMel Gorman isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); 12465a811889SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) 12475a811889SMel Gorman return; 12485a811889SMel Gorman } 12495a811889SMel Gorman 12505a811889SMel Gorman /* Scan after */ 12515a811889SMel Gorman start_pfn = pfn + nr_isolated; 125260fce36aSMel Gorman if (start_pfn < end_pfn) 12534fca9730SMel Gorman isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); 12545a811889SMel Gorman 12555a811889SMel Gorman /* Skip this pageblock in the future as it's full or nearly full */ 12565a811889SMel Gorman if (cc->nr_freepages < cc->nr_migratepages) 12575a811889SMel Gorman set_pageblock_skip(page); 12585a811889SMel Gorman } 12595a811889SMel Gorman 1260dbe2d4e4SMel Gorman /* Search orders in round-robin fashion */ 1261dbe2d4e4SMel Gorman static int next_search_order(struct compact_control *cc, int order) 1262dbe2d4e4SMel Gorman { 1263dbe2d4e4SMel Gorman order--; 1264dbe2d4e4SMel Gorman if (order < 0) 1265dbe2d4e4SMel Gorman order = cc->order - 1; 1266dbe2d4e4SMel Gorman 1267dbe2d4e4SMel Gorman /* Search wrapped around? */ 1268dbe2d4e4SMel Gorman if (order == cc->search_order) { 1269dbe2d4e4SMel Gorman cc->search_order--; 1270dbe2d4e4SMel Gorman if (cc->search_order < 0) 1271dbe2d4e4SMel Gorman cc->search_order = cc->order - 1; 1272dbe2d4e4SMel Gorman return -1; 1273dbe2d4e4SMel Gorman } 1274dbe2d4e4SMel Gorman 1275dbe2d4e4SMel Gorman return order; 1276dbe2d4e4SMel Gorman } 1277dbe2d4e4SMel Gorman 12785a811889SMel Gorman static unsigned long 12795a811889SMel Gorman fast_isolate_freepages(struct compact_control *cc) 12805a811889SMel Gorman { 12815a811889SMel Gorman unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); 12825a811889SMel Gorman unsigned int nr_scanned = 0; 12835a811889SMel Gorman unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0; 12845a811889SMel Gorman unsigned long nr_isolated = 0; 12855a811889SMel Gorman unsigned long distance; 12865a811889SMel Gorman struct page *page = NULL; 12875a811889SMel Gorman bool scan_start = false; 12885a811889SMel Gorman int order; 12895a811889SMel Gorman 12905a811889SMel Gorman /* Full compaction passes in a negative order */ 12915a811889SMel Gorman if (cc->order <= 0) 12925a811889SMel Gorman return cc->free_pfn; 12935a811889SMel Gorman 12945a811889SMel Gorman /* 12955a811889SMel Gorman * If starting the scan, use a deeper search and use the highest 12965a811889SMel Gorman * PFN found if a suitable one is not found. 12975a811889SMel Gorman */ 1298e332f741SMel Gorman if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { 12995a811889SMel Gorman limit = pageblock_nr_pages >> 1; 13005a811889SMel Gorman scan_start = true; 13015a811889SMel Gorman } 13025a811889SMel Gorman 13035a811889SMel Gorman /* 13045a811889SMel Gorman * Preferred point is in the top quarter of the scan space but take 13055a811889SMel Gorman * a pfn from the top half if the search is problematic. 13065a811889SMel Gorman */ 13075a811889SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn); 13085a811889SMel Gorman low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); 13095a811889SMel Gorman min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); 13105a811889SMel Gorman 13115a811889SMel Gorman if (WARN_ON_ONCE(min_pfn > low_pfn)) 13125a811889SMel Gorman low_pfn = min_pfn; 13135a811889SMel Gorman 1314dbe2d4e4SMel Gorman /* 1315dbe2d4e4SMel Gorman * Search starts from the last successful isolation order or the next 1316dbe2d4e4SMel Gorman * order to search after a previous failure 1317dbe2d4e4SMel Gorman */ 1318dbe2d4e4SMel Gorman cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); 1319dbe2d4e4SMel Gorman 1320dbe2d4e4SMel Gorman for (order = cc->search_order; 1321dbe2d4e4SMel Gorman !page && order >= 0; 1322dbe2d4e4SMel Gorman order = next_search_order(cc, order)) { 13235a811889SMel Gorman struct free_area *area = &cc->zone->free_area[order]; 13245a811889SMel Gorman struct list_head *freelist; 13255a811889SMel Gorman struct page *freepage; 13265a811889SMel Gorman unsigned long flags; 13275a811889SMel Gorman unsigned int order_scanned = 0; 13285a811889SMel Gorman 13295a811889SMel Gorman if (!area->nr_free) 13305a811889SMel Gorman continue; 13315a811889SMel Gorman 13325a811889SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags); 13335a811889SMel Gorman freelist = &area->free_list[MIGRATE_MOVABLE]; 13345a811889SMel Gorman list_for_each_entry_reverse(freepage, freelist, lru) { 13355a811889SMel Gorman unsigned long pfn; 13365a811889SMel Gorman 13375a811889SMel Gorman order_scanned++; 13385a811889SMel Gorman nr_scanned++; 13395a811889SMel Gorman pfn = page_to_pfn(freepage); 13405a811889SMel Gorman 13415a811889SMel Gorman if (pfn >= highest) 13425a811889SMel Gorman highest = pageblock_start_pfn(pfn); 13435a811889SMel Gorman 13445a811889SMel Gorman if (pfn >= low_pfn) { 13455a811889SMel Gorman cc->fast_search_fail = 0; 1346dbe2d4e4SMel Gorman cc->search_order = order; 13475a811889SMel Gorman page = freepage; 13485a811889SMel Gorman break; 13495a811889SMel Gorman } 13505a811889SMel Gorman 13515a811889SMel Gorman if (pfn >= min_pfn && pfn > high_pfn) { 13525a811889SMel Gorman high_pfn = pfn; 13535a811889SMel Gorman 13545a811889SMel Gorman /* Shorten the scan if a candidate is found */ 13555a811889SMel Gorman limit >>= 1; 13565a811889SMel Gorman } 13575a811889SMel Gorman 13585a811889SMel Gorman if (order_scanned >= limit) 13595a811889SMel Gorman break; 13605a811889SMel Gorman } 13615a811889SMel Gorman 13625a811889SMel Gorman /* Use a minimum pfn if a preferred one was not found */ 13635a811889SMel Gorman if (!page && high_pfn) { 13645a811889SMel Gorman page = pfn_to_page(high_pfn); 13655a811889SMel Gorman 13665a811889SMel Gorman /* Update freepage for the list reorder below */ 13675a811889SMel Gorman freepage = page; 13685a811889SMel Gorman } 13695a811889SMel Gorman 13705a811889SMel Gorman /* Reorder to so a future search skips recent pages */ 13715a811889SMel Gorman move_freelist_head(freelist, freepage); 13725a811889SMel Gorman 13735a811889SMel Gorman /* Isolate the page if available */ 13745a811889SMel Gorman if (page) { 13755a811889SMel Gorman if (__isolate_free_page(page, order)) { 13765a811889SMel Gorman set_page_private(page, order); 13775a811889SMel Gorman nr_isolated = 1 << order; 13785a811889SMel Gorman cc->nr_freepages += nr_isolated; 13795a811889SMel Gorman list_add_tail(&page->lru, &cc->freepages); 13805a811889SMel Gorman count_compact_events(COMPACTISOLATED, nr_isolated); 13815a811889SMel Gorman } else { 13825a811889SMel Gorman /* If isolation fails, abort the search */ 13835b56d996SQian Cai order = cc->search_order + 1; 13845a811889SMel Gorman page = NULL; 13855a811889SMel Gorman } 13865a811889SMel Gorman } 13875a811889SMel Gorman 13885a811889SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 13895a811889SMel Gorman 13905a811889SMel Gorman /* 13915a811889SMel Gorman * Smaller scan on next order so the total scan ig related 13925a811889SMel Gorman * to freelist_scan_limit. 13935a811889SMel Gorman */ 13945a811889SMel Gorman if (order_scanned >= limit) 13955a811889SMel Gorman limit = min(1U, limit >> 1); 13965a811889SMel Gorman } 13975a811889SMel Gorman 13985a811889SMel Gorman if (!page) { 13995a811889SMel Gorman cc->fast_search_fail++; 14005a811889SMel Gorman if (scan_start) { 14015a811889SMel Gorman /* 14025a811889SMel Gorman * Use the highest PFN found above min. If one was 14035a811889SMel Gorman * not found, be pessemistic for direct compaction 14045a811889SMel Gorman * and use the min mark. 14055a811889SMel Gorman */ 14065a811889SMel Gorman if (highest) { 14075a811889SMel Gorman page = pfn_to_page(highest); 14085a811889SMel Gorman cc->free_pfn = highest; 14095a811889SMel Gorman } else { 1410e577c8b6SSuzuki K Poulose if (cc->direct_compaction && pfn_valid(min_pfn)) { 14115a811889SMel Gorman page = pfn_to_page(min_pfn); 14125a811889SMel Gorman cc->free_pfn = min_pfn; 14135a811889SMel Gorman } 14145a811889SMel Gorman } 14155a811889SMel Gorman } 14165a811889SMel Gorman } 14175a811889SMel Gorman 1418d097a6f6SMel Gorman if (highest && highest >= cc->zone->compact_cached_free_pfn) { 1419d097a6f6SMel Gorman highest -= pageblock_nr_pages; 14205a811889SMel Gorman cc->zone->compact_cached_free_pfn = highest; 1421d097a6f6SMel Gorman } 14225a811889SMel Gorman 14235a811889SMel Gorman cc->total_free_scanned += nr_scanned; 14245a811889SMel Gorman if (!page) 14255a811889SMel Gorman return cc->free_pfn; 14265a811889SMel Gorman 14275a811889SMel Gorman low_pfn = page_to_pfn(page); 14285a811889SMel Gorman fast_isolate_around(cc, low_pfn, nr_isolated); 14295a811889SMel Gorman return low_pfn; 14305a811889SMel Gorman } 14315a811889SMel Gorman 1432f2849aa0SVlastimil Babka /* 1433ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 1434ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 1435ff9543fdSMichal Nazarewicz */ 1436edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 1437ff9543fdSMichal Nazarewicz { 1438edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 1439ff9543fdSMichal Nazarewicz struct page *page; 1440c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 1441e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 1442c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 1443c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1444ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 14454fca9730SMel Gorman unsigned int stride; 14462fe86e00SMichal Nazarewicz 14475a811889SMel Gorman /* Try a small search of the free lists for a candidate */ 14485a811889SMel Gorman isolate_start_pfn = fast_isolate_freepages(cc); 14495a811889SMel Gorman if (cc->nr_freepages) 14505a811889SMel Gorman goto splitmap; 14515a811889SMel Gorman 1452ff9543fdSMichal Nazarewicz /* 1453ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 145449e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 1455e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 1456e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 1457c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 1458c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 1459c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 146049e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 146149e068f0SVlastimil Babka * is using. 1462ff9543fdSMichal Nazarewicz */ 1463e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 14645a811889SMel Gorman block_start_pfn = pageblock_start_pfn(isolate_start_pfn); 1465c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1466c96b9e50SVlastimil Babka zone_end_pfn(zone)); 146706b6640aSVlastimil Babka low_pfn = pageblock_end_pfn(cc->migrate_pfn); 14684fca9730SMel Gorman stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; 14692fe86e00SMichal Nazarewicz 1470ff9543fdSMichal Nazarewicz /* 1471ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 1472ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 1473ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 1474ff9543fdSMichal Nazarewicz */ 1475f5f61a32SVlastimil Babka for (; block_start_pfn >= low_pfn; 1476c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 1477e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 1478e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 14794fca9730SMel Gorman unsigned long nr_isolated; 14804fca9730SMel Gorman 1481f6ea3adbSDavid Rientjes /* 1482f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 1483cb810ad2SMel Gorman * suitable migration targets, so periodically check resched. 1484f6ea3adbSDavid Rientjes */ 1485cb810ad2SMel Gorman if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1486cf66f070SMel Gorman cond_resched(); 1487f6ea3adbSDavid Rientjes 14887d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 14897d49d886SVlastimil Babka zone); 14907d49d886SVlastimil Babka if (!page) 1491ff9543fdSMichal Nazarewicz continue; 1492ff9543fdSMichal Nazarewicz 1493ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 14949f7e3387SVlastimil Babka if (!suitable_migration_target(cc, page)) 1495ff9543fdSMichal Nazarewicz continue; 149668e3e926SLinus Torvalds 1497bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 1498bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 1499bb13ffebSMel Gorman continue; 1500bb13ffebSMel Gorman 1501e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 15024fca9730SMel Gorman nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, 15034fca9730SMel Gorman block_end_pfn, freelist, stride, false); 1504ff9543fdSMichal Nazarewicz 1505d097a6f6SMel Gorman /* Update the skip hint if the full pageblock was scanned */ 1506d097a6f6SMel Gorman if (isolate_start_pfn == block_end_pfn) 1507d097a6f6SMel Gorman update_pageblock_skip(cc, page, block_start_pfn); 1508d097a6f6SMel Gorman 1509cb2dcaf0SMel Gorman /* Are enough freepages isolated? */ 1510cb2dcaf0SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) { 1511a46cbf3bSDavid Rientjes if (isolate_start_pfn >= block_end_pfn) { 1512a46cbf3bSDavid Rientjes /* 1513a46cbf3bSDavid Rientjes * Restart at previous pageblock if more 1514a46cbf3bSDavid Rientjes * freepages can be isolated next time. 1515a46cbf3bSDavid Rientjes */ 1516f5f61a32SVlastimil Babka isolate_start_pfn = 1517e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 1518a46cbf3bSDavid Rientjes } 1519be976572SVlastimil Babka break; 1520a46cbf3bSDavid Rientjes } else if (isolate_start_pfn < block_end_pfn) { 1521f5f61a32SVlastimil Babka /* 1522a46cbf3bSDavid Rientjes * If isolation failed early, do not continue 1523a46cbf3bSDavid Rientjes * needlessly. 1524f5f61a32SVlastimil Babka */ 1525a46cbf3bSDavid Rientjes break; 1526f5f61a32SVlastimil Babka } 15274fca9730SMel Gorman 15284fca9730SMel Gorman /* Adjust stride depending on isolation */ 15294fca9730SMel Gorman if (nr_isolated) { 15304fca9730SMel Gorman stride = 1; 15314fca9730SMel Gorman continue; 15324fca9730SMel Gorman } 15334fca9730SMel Gorman stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); 1534c89511abSMel Gorman } 1535ff9543fdSMichal Nazarewicz 15367ed695e0SVlastimil Babka /* 1537f5f61a32SVlastimil Babka * Record where the free scanner will restart next time. Either we 1538f5f61a32SVlastimil Babka * broke from the loop and set isolate_start_pfn based on the last 1539f5f61a32SVlastimil Babka * call to isolate_freepages_block(), or we met the migration scanner 1540f5f61a32SVlastimil Babka * and the loop terminated due to isolate_start_pfn < low_pfn 15417ed695e0SVlastimil Babka */ 1542f5f61a32SVlastimil Babka cc->free_pfn = isolate_start_pfn; 15435a811889SMel Gorman 15445a811889SMel Gorman splitmap: 15455a811889SMel Gorman /* __isolate_free_page() does not map the pages */ 15465a811889SMel Gorman split_map_pages(freelist); 1547748446bbSMel Gorman } 1548748446bbSMel Gorman 1549748446bbSMel Gorman /* 1550748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1551748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1552748446bbSMel Gorman */ 1553748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 1554666feb21SMichal Hocko unsigned long data) 1555748446bbSMel Gorman { 1556748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 1557748446bbSMel Gorman struct page *freepage; 1558748446bbSMel Gorman 1559748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1560edc2ca61SVlastimil Babka isolate_freepages(cc); 1561748446bbSMel Gorman 1562748446bbSMel Gorman if (list_empty(&cc->freepages)) 1563748446bbSMel Gorman return NULL; 1564748446bbSMel Gorman } 1565748446bbSMel Gorman 1566748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 1567748446bbSMel Gorman list_del(&freepage->lru); 1568748446bbSMel Gorman cc->nr_freepages--; 1569748446bbSMel Gorman 1570748446bbSMel Gorman return freepage; 1571748446bbSMel Gorman } 1572748446bbSMel Gorman 1573748446bbSMel Gorman /* 1574d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1575d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1576d53aea3dSDavid Rientjes * special handling needed for NUMA. 1577d53aea3dSDavid Rientjes */ 1578d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 1579d53aea3dSDavid Rientjes { 1580d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1581d53aea3dSDavid Rientjes 1582d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 1583d53aea3dSDavid Rientjes cc->nr_freepages++; 1584d53aea3dSDavid Rientjes } 1585d53aea3dSDavid Rientjes 1586ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1587ff9543fdSMichal Nazarewicz typedef enum { 1588ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1589ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1590ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1591ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1592ff9543fdSMichal Nazarewicz 1593ff9543fdSMichal Nazarewicz /* 15945bbe3547SEric B Munson * Allow userspace to control policy on scanning the unevictable LRU for 15955bbe3547SEric B Munson * compactable pages. 15965bbe3547SEric B Munson */ 15975bbe3547SEric B Munson int sysctl_compact_unevictable_allowed __read_mostly = 1; 15985bbe3547SEric B Munson 159970b44595SMel Gorman static inline void 160070b44595SMel Gorman update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) 160170b44595SMel Gorman { 160270b44595SMel Gorman if (cc->fast_start_pfn == ULONG_MAX) 160370b44595SMel Gorman return; 160470b44595SMel Gorman 160570b44595SMel Gorman if (!cc->fast_start_pfn) 160670b44595SMel Gorman cc->fast_start_pfn = pfn; 160770b44595SMel Gorman 160870b44595SMel Gorman cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); 160970b44595SMel Gorman } 161070b44595SMel Gorman 161170b44595SMel Gorman static inline unsigned long 161270b44595SMel Gorman reinit_migrate_pfn(struct compact_control *cc) 161370b44595SMel Gorman { 161470b44595SMel Gorman if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) 161570b44595SMel Gorman return cc->migrate_pfn; 161670b44595SMel Gorman 161770b44595SMel Gorman cc->migrate_pfn = cc->fast_start_pfn; 161870b44595SMel Gorman cc->fast_start_pfn = ULONG_MAX; 161970b44595SMel Gorman 162070b44595SMel Gorman return cc->migrate_pfn; 162170b44595SMel Gorman } 162270b44595SMel Gorman 162370b44595SMel Gorman /* 162470b44595SMel Gorman * Briefly search the free lists for a migration source that already has 162570b44595SMel Gorman * some free pages to reduce the number of pages that need migration 162670b44595SMel Gorman * before a pageblock is free. 162770b44595SMel Gorman */ 162870b44595SMel Gorman static unsigned long fast_find_migrateblock(struct compact_control *cc) 162970b44595SMel Gorman { 163070b44595SMel Gorman unsigned int limit = freelist_scan_limit(cc); 163170b44595SMel Gorman unsigned int nr_scanned = 0; 163270b44595SMel Gorman unsigned long distance; 163370b44595SMel Gorman unsigned long pfn = cc->migrate_pfn; 163470b44595SMel Gorman unsigned long high_pfn; 163570b44595SMel Gorman int order; 163670b44595SMel Gorman 163770b44595SMel Gorman /* Skip hints are relied on to avoid repeats on the fast search */ 163870b44595SMel Gorman if (cc->ignore_skip_hint) 163970b44595SMel Gorman return pfn; 164070b44595SMel Gorman 164170b44595SMel Gorman /* 164270b44595SMel Gorman * If the migrate_pfn is not at the start of a zone or the start 164370b44595SMel Gorman * of a pageblock then assume this is a continuation of a previous 164470b44595SMel Gorman * scan restarted due to COMPACT_CLUSTER_MAX. 164570b44595SMel Gorman */ 164670b44595SMel Gorman if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) 164770b44595SMel Gorman return pfn; 164870b44595SMel Gorman 164970b44595SMel Gorman /* 165070b44595SMel Gorman * For smaller orders, just linearly scan as the number of pages 165170b44595SMel Gorman * to migrate should be relatively small and does not necessarily 165270b44595SMel Gorman * justify freeing up a large block for a small allocation. 165370b44595SMel Gorman */ 165470b44595SMel Gorman if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) 165570b44595SMel Gorman return pfn; 165670b44595SMel Gorman 165770b44595SMel Gorman /* 165870b44595SMel Gorman * Only allow kcompactd and direct requests for movable pages to 165970b44595SMel Gorman * quickly clear out a MOVABLE pageblock for allocation. This 166070b44595SMel Gorman * reduces the risk that a large movable pageblock is freed for 166170b44595SMel Gorman * an unmovable/reclaimable small allocation. 166270b44595SMel Gorman */ 166370b44595SMel Gorman if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) 166470b44595SMel Gorman return pfn; 166570b44595SMel Gorman 166670b44595SMel Gorman /* 166770b44595SMel Gorman * When starting the migration scanner, pick any pageblock within the 166870b44595SMel Gorman * first half of the search space. Otherwise try and pick a pageblock 166970b44595SMel Gorman * within the first eighth to reduce the chances that a migration 167070b44595SMel Gorman * target later becomes a source. 167170b44595SMel Gorman */ 167270b44595SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn) >> 1; 167370b44595SMel Gorman if (cc->migrate_pfn != cc->zone->zone_start_pfn) 167470b44595SMel Gorman distance >>= 2; 167570b44595SMel Gorman high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); 167670b44595SMel Gorman 167770b44595SMel Gorman for (order = cc->order - 1; 167870b44595SMel Gorman order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit; 167970b44595SMel Gorman order--) { 168070b44595SMel Gorman struct free_area *area = &cc->zone->free_area[order]; 168170b44595SMel Gorman struct list_head *freelist; 168270b44595SMel Gorman unsigned long flags; 168370b44595SMel Gorman struct page *freepage; 168470b44595SMel Gorman 168570b44595SMel Gorman if (!area->nr_free) 168670b44595SMel Gorman continue; 168770b44595SMel Gorman 168870b44595SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags); 168970b44595SMel Gorman freelist = &area->free_list[MIGRATE_MOVABLE]; 169070b44595SMel Gorman list_for_each_entry(freepage, freelist, lru) { 169170b44595SMel Gorman unsigned long free_pfn; 169270b44595SMel Gorman 169370b44595SMel Gorman nr_scanned++; 169470b44595SMel Gorman free_pfn = page_to_pfn(freepage); 169570b44595SMel Gorman if (free_pfn < high_pfn) { 169670b44595SMel Gorman /* 169770b44595SMel Gorman * Avoid if skipped recently. Ideally it would 169870b44595SMel Gorman * move to the tail but even safe iteration of 169970b44595SMel Gorman * the list assumes an entry is deleted, not 170070b44595SMel Gorman * reordered. 170170b44595SMel Gorman */ 170270b44595SMel Gorman if (get_pageblock_skip(freepage)) { 170370b44595SMel Gorman if (list_is_last(freelist, &freepage->lru)) 170470b44595SMel Gorman break; 170570b44595SMel Gorman 170670b44595SMel Gorman continue; 170770b44595SMel Gorman } 170870b44595SMel Gorman 170970b44595SMel Gorman /* Reorder to so a future search skips recent pages */ 171070b44595SMel Gorman move_freelist_tail(freelist, freepage); 171170b44595SMel Gorman 1712e380bebeSMel Gorman update_fast_start_pfn(cc, free_pfn); 171370b44595SMel Gorman pfn = pageblock_start_pfn(free_pfn); 171470b44595SMel Gorman cc->fast_search_fail = 0; 171570b44595SMel Gorman set_pageblock_skip(freepage); 171670b44595SMel Gorman break; 171770b44595SMel Gorman } 171870b44595SMel Gorman 171970b44595SMel Gorman if (nr_scanned >= limit) { 172070b44595SMel Gorman cc->fast_search_fail++; 172170b44595SMel Gorman move_freelist_tail(freelist, freepage); 172270b44595SMel Gorman break; 172370b44595SMel Gorman } 172470b44595SMel Gorman } 172570b44595SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 172670b44595SMel Gorman } 172770b44595SMel Gorman 172870b44595SMel Gorman cc->total_migrate_scanned += nr_scanned; 172970b44595SMel Gorman 173070b44595SMel Gorman /* 173170b44595SMel Gorman * If fast scanning failed then use a cached entry for a page block 173270b44595SMel Gorman * that had free pages as the basis for starting a linear scan. 173370b44595SMel Gorman */ 173470b44595SMel Gorman if (pfn == cc->migrate_pfn) 173570b44595SMel Gorman pfn = reinit_migrate_pfn(cc); 173670b44595SMel Gorman 173770b44595SMel Gorman return pfn; 173870b44595SMel Gorman } 173970b44595SMel Gorman 17405bbe3547SEric B Munson /* 1741edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1742edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1743edc2ca61SVlastimil Babka * compact_control. 1744ff9543fdSMichal Nazarewicz */ 174532aaf055SPengfei Li static isolate_migrate_t isolate_migratepages(struct compact_control *cc) 1746ff9543fdSMichal Nazarewicz { 1747e1409c32SJoonsoo Kim unsigned long block_start_pfn; 1748e1409c32SJoonsoo Kim unsigned long block_end_pfn; 1749e1409c32SJoonsoo Kim unsigned long low_pfn; 1750edc2ca61SVlastimil Babka struct page *page; 1751edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 17525bbe3547SEric B Munson (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 17531d2047feSHugh Dickins (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 175470b44595SMel Gorman bool fast_find_block; 1755ff9543fdSMichal Nazarewicz 1756edc2ca61SVlastimil Babka /* 1757edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 175870b44595SMel Gorman * initialized by compact_zone(). The first failure will use 175970b44595SMel Gorman * the lowest PFN as the starting point for linear scanning. 1760edc2ca61SVlastimil Babka */ 176170b44595SMel Gorman low_pfn = fast_find_migrateblock(cc); 176206b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(low_pfn); 176332aaf055SPengfei Li if (block_start_pfn < cc->zone->zone_start_pfn) 176432aaf055SPengfei Li block_start_pfn = cc->zone->zone_start_pfn; 1765ff9543fdSMichal Nazarewicz 176670b44595SMel Gorman /* 176770b44595SMel Gorman * fast_find_migrateblock marks a pageblock skipped so to avoid 176870b44595SMel Gorman * the isolation_suitable check below, check whether the fast 176970b44595SMel Gorman * search was successful. 177070b44595SMel Gorman */ 177170b44595SMel Gorman fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; 177270b44595SMel Gorman 1773ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 177406b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(low_pfn); 1775ff9543fdSMichal Nazarewicz 1776edc2ca61SVlastimil Babka /* 1777edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 1778edc2ca61SVlastimil Babka * Do not cross the free scanner. 1779edc2ca61SVlastimil Babka */ 1780e1409c32SJoonsoo Kim for (; block_end_pfn <= cc->free_pfn; 178170b44595SMel Gorman fast_find_block = false, 1782e1409c32SJoonsoo Kim low_pfn = block_end_pfn, 1783e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1784e1409c32SJoonsoo Kim block_end_pfn += pageblock_nr_pages) { 1785edc2ca61SVlastimil Babka 1786edc2ca61SVlastimil Babka /* 1787edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1788edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1789cb810ad2SMel Gorman * need to schedule. 1790edc2ca61SVlastimil Babka */ 1791cb810ad2SMel Gorman if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1792cf66f070SMel Gorman cond_resched(); 1793edc2ca61SVlastimil Babka 179432aaf055SPengfei Li page = pageblock_pfn_to_page(block_start_pfn, 179532aaf055SPengfei Li block_end_pfn, cc->zone); 17967d49d886SVlastimil Babka if (!page) 1797edc2ca61SVlastimil Babka continue; 1798edc2ca61SVlastimil Babka 1799e380bebeSMel Gorman /* 1800e380bebeSMel Gorman * If isolation recently failed, do not retry. Only check the 1801e380bebeSMel Gorman * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock 1802e380bebeSMel Gorman * to be visited multiple times. Assume skip was checked 1803e380bebeSMel Gorman * before making it "skip" so other compaction instances do 1804e380bebeSMel Gorman * not scan the same block. 1805e380bebeSMel Gorman */ 1806e380bebeSMel Gorman if (IS_ALIGNED(low_pfn, pageblock_nr_pages) && 1807e380bebeSMel Gorman !fast_find_block && !isolation_suitable(cc, page)) 1808edc2ca61SVlastimil Babka continue; 1809edc2ca61SVlastimil Babka 1810edc2ca61SVlastimil Babka /* 18119bebefd5SMel Gorman * For async compaction, also only scan in MOVABLE blocks 18129bebefd5SMel Gorman * without huge pages. Async compaction is optimistic to see 18139bebefd5SMel Gorman * if the minimum amount of work satisfies the allocation. 18149bebefd5SMel Gorman * The cached PFN is updated as it's possible that all 18159bebefd5SMel Gorman * remaining blocks between source and target are unsuitable 18169bebefd5SMel Gorman * and the compaction scanners fail to meet. 1817edc2ca61SVlastimil Babka */ 18189bebefd5SMel Gorman if (!suitable_migration_source(cc, page)) { 18199bebefd5SMel Gorman update_cached_migrate(cc, block_end_pfn); 1820edc2ca61SVlastimil Babka continue; 18219bebefd5SMel Gorman } 1822ff9543fdSMichal Nazarewicz 1823ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1824e1409c32SJoonsoo Kim low_pfn = isolate_migratepages_block(cc, low_pfn, 1825e1409c32SJoonsoo Kim block_end_pfn, isolate_mode); 1826edc2ca61SVlastimil Babka 1827cb2dcaf0SMel Gorman if (!low_pfn) 1828ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1829ff9543fdSMichal Nazarewicz 1830edc2ca61SVlastimil Babka /* 1831edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1832edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1833edc2ca61SVlastimil Babka * continue or not. 1834edc2ca61SVlastimil Babka */ 1835edc2ca61SVlastimil Babka break; 1836edc2ca61SVlastimil Babka } 1837edc2ca61SVlastimil Babka 1838f2849aa0SVlastimil Babka /* Record where migration scanner will be restarted. */ 1839f2849aa0SVlastimil Babka cc->migrate_pfn = low_pfn; 1840ff9543fdSMichal Nazarewicz 1841edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1842ff9543fdSMichal Nazarewicz } 1843ff9543fdSMichal Nazarewicz 184421c527a3SYaowei Bai /* 184521c527a3SYaowei Bai * order == -1 is expected when compacting via 184621c527a3SYaowei Bai * /proc/sys/vm/compact_memory 184721c527a3SYaowei Bai */ 184821c527a3SYaowei Bai static inline bool is_via_compact_memory(int order) 184921c527a3SYaowei Bai { 185021c527a3SYaowei Bai return order == -1; 185121c527a3SYaowei Bai } 185221c527a3SYaowei Bai 185340cacbcbSMel Gorman static enum compact_result __compact_finished(struct compact_control *cc) 1854748446bbSMel Gorman { 18558fb74b9fSMel Gorman unsigned int order; 1856d39773a0SVlastimil Babka const int migratetype = cc->migratetype; 1857cb2dcaf0SMel Gorman int ret; 1858748446bbSMel Gorman 1859753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1860f2849aa0SVlastimil Babka if (compact_scanners_met(cc)) { 186155b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 186240cacbcbSMel Gorman reset_cached_positions(cc->zone); 186355b7c4c9SVlastimil Babka 186462997027SMel Gorman /* 186562997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 1866accf6242SVlastimil Babka * by kswapd when it goes to sleep. kcompactd does not set the 186762997027SMel Gorman * flag itself as the decision to be clear should be directly 186862997027SMel Gorman * based on an allocation request. 186962997027SMel Gorman */ 1870accf6242SVlastimil Babka if (cc->direct_compaction) 187140cacbcbSMel Gorman cc->zone->compact_blockskip_flush = true; 187262997027SMel Gorman 1873c8f7de0bSMichal Hocko if (cc->whole_zone) 1874748446bbSMel Gorman return COMPACT_COMPLETE; 1875c8f7de0bSMichal Hocko else 1876c8f7de0bSMichal Hocko return COMPACT_PARTIAL_SKIPPED; 1877bb13ffebSMel Gorman } 1878748446bbSMel Gorman 187921c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 188056de7263SMel Gorman return COMPACT_CONTINUE; 188156de7263SMel Gorman 1882baf6a9a1SVlastimil Babka /* 1883efe771c7SMel Gorman * Always finish scanning a pageblock to reduce the possibility of 1884efe771c7SMel Gorman * fallbacks in the future. This is particularly important when 1885efe771c7SMel Gorman * migration source is unmovable/reclaimable but it's not worth 1886efe771c7SMel Gorman * special casing. 1887baf6a9a1SVlastimil Babka */ 1888efe771c7SMel Gorman if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) 1889baf6a9a1SVlastimil Babka return COMPACT_CONTINUE; 1890baf6a9a1SVlastimil Babka 189156de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 1892cb2dcaf0SMel Gorman ret = COMPACT_NO_SUITABLE_PAGE; 189356de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 189440cacbcbSMel Gorman struct free_area *area = &cc->zone->free_area[order]; 18952149cdaeSJoonsoo Kim bool can_steal; 18968fb74b9fSMel Gorman 189756de7263SMel Gorman /* Job done if page is free of the right migratetype */ 1898b03641afSDan Williams if (!free_area_empty(area, migratetype)) 1899cf378319SVlastimil Babka return COMPACT_SUCCESS; 190056de7263SMel Gorman 19012149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 19022149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 19032149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 1904b03641afSDan Williams !free_area_empty(area, MIGRATE_CMA)) 1905cf378319SVlastimil Babka return COMPACT_SUCCESS; 19062149cdaeSJoonsoo Kim #endif 19072149cdaeSJoonsoo Kim /* 19082149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 19092149cdaeSJoonsoo Kim * other migratetype buddy lists. 19102149cdaeSJoonsoo Kim */ 19112149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 1912baf6a9a1SVlastimil Babka true, &can_steal) != -1) { 1913baf6a9a1SVlastimil Babka 1914baf6a9a1SVlastimil Babka /* movable pages are OK in any pageblock */ 1915baf6a9a1SVlastimil Babka if (migratetype == MIGRATE_MOVABLE) 1916cf378319SVlastimil Babka return COMPACT_SUCCESS; 1917baf6a9a1SVlastimil Babka 1918baf6a9a1SVlastimil Babka /* 1919baf6a9a1SVlastimil Babka * We are stealing for a non-movable allocation. Make 1920baf6a9a1SVlastimil Babka * sure we finish compacting the current pageblock 1921baf6a9a1SVlastimil Babka * first so it is as free as possible and we won't 1922baf6a9a1SVlastimil Babka * have to steal another one soon. This only applies 1923baf6a9a1SVlastimil Babka * to sync compaction, as async compaction operates 1924baf6a9a1SVlastimil Babka * on pageblocks of the same migratetype. 1925baf6a9a1SVlastimil Babka */ 1926baf6a9a1SVlastimil Babka if (cc->mode == MIGRATE_ASYNC || 1927baf6a9a1SVlastimil Babka IS_ALIGNED(cc->migrate_pfn, 1928baf6a9a1SVlastimil Babka pageblock_nr_pages)) { 1929baf6a9a1SVlastimil Babka return COMPACT_SUCCESS; 1930baf6a9a1SVlastimil Babka } 1931baf6a9a1SVlastimil Babka 1932cb2dcaf0SMel Gorman ret = COMPACT_CONTINUE; 1933cb2dcaf0SMel Gorman break; 1934baf6a9a1SVlastimil Babka } 193556de7263SMel Gorman } 193656de7263SMel Gorman 1937cb2dcaf0SMel Gorman if (cc->contended || fatal_signal_pending(current)) 1938cb2dcaf0SMel Gorman ret = COMPACT_CONTENDED; 1939cb2dcaf0SMel Gorman 1940cb2dcaf0SMel Gorman return ret; 1941837d026dSJoonsoo Kim } 1942837d026dSJoonsoo Kim 194340cacbcbSMel Gorman static enum compact_result compact_finished(struct compact_control *cc) 1944837d026dSJoonsoo Kim { 1945837d026dSJoonsoo Kim int ret; 1946837d026dSJoonsoo Kim 194740cacbcbSMel Gorman ret = __compact_finished(cc); 194840cacbcbSMel Gorman trace_mm_compaction_finished(cc->zone, cc->order, ret); 1949837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 1950837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 1951837d026dSJoonsoo Kim 1952837d026dSJoonsoo Kim return ret; 1953748446bbSMel Gorman } 1954748446bbSMel Gorman 19553e7d3449SMel Gorman /* 19563e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 19573e7d3449SMel Gorman * Returns 19583e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 1959cf378319SVlastimil Babka * COMPACT_SUCCESS - If the allocation would succeed without compaction 19603e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 19613e7d3449SMel Gorman */ 1962ea7ab982SMichal Hocko static enum compact_result __compaction_suitable(struct zone *zone, int order, 1963c603844bSMel Gorman unsigned int alloc_flags, 196486a294a8SMichal Hocko int classzone_idx, 196586a294a8SMichal Hocko unsigned long wmark_target) 19663e7d3449SMel Gorman { 19673e7d3449SMel Gorman unsigned long watermark; 19683e7d3449SMel Gorman 196921c527a3SYaowei Bai if (is_via_compact_memory(order)) 19703957c776SMichal Hocko return COMPACT_CONTINUE; 19713957c776SMichal Hocko 1972a9214443SMel Gorman watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 1973ebff3980SVlastimil Babka /* 1974ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 1975ebff3980SVlastimil Babka * should be no need for compaction at all. 1976ebff3980SVlastimil Babka */ 1977ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1978ebff3980SVlastimil Babka alloc_flags)) 1979cf378319SVlastimil Babka return COMPACT_SUCCESS; 1980ebff3980SVlastimil Babka 19813957c776SMichal Hocko /* 19829861a62cSVlastimil Babka * Watermarks for order-0 must be met for compaction to be able to 1983984fdba6SVlastimil Babka * isolate free pages for migration targets. This means that the 1984984fdba6SVlastimil Babka * watermark and alloc_flags have to match, or be more pessimistic than 1985984fdba6SVlastimil Babka * the check in __isolate_free_page(). We don't use the direct 1986984fdba6SVlastimil Babka * compactor's alloc_flags, as they are not relevant for freepage 1987984fdba6SVlastimil Babka * isolation. We however do use the direct compactor's classzone_idx to 1988984fdba6SVlastimil Babka * skip over zones where lowmem reserves would prevent allocation even 1989984fdba6SVlastimil Babka * if compaction succeeds. 19908348faf9SVlastimil Babka * For costly orders, we require low watermark instead of min for 19918348faf9SVlastimil Babka * compaction to proceed to increase its chances. 1992d883c6cfSJoonsoo Kim * ALLOC_CMA is used, as pages in CMA pageblocks are considered 1993d883c6cfSJoonsoo Kim * suitable migration targets 19943e7d3449SMel Gorman */ 19958348faf9SVlastimil Babka watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 19968348faf9SVlastimil Babka low_wmark_pages(zone) : min_wmark_pages(zone); 19978348faf9SVlastimil Babka watermark += compact_gap(order); 199886a294a8SMichal Hocko if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, 1999d883c6cfSJoonsoo Kim ALLOC_CMA, wmark_target)) 20003e7d3449SMel Gorman return COMPACT_SKIPPED; 20013e7d3449SMel Gorman 2002cc5c9f09SVlastimil Babka return COMPACT_CONTINUE; 2003cc5c9f09SVlastimil Babka } 2004cc5c9f09SVlastimil Babka 2005cc5c9f09SVlastimil Babka enum compact_result compaction_suitable(struct zone *zone, int order, 2006cc5c9f09SVlastimil Babka unsigned int alloc_flags, 2007cc5c9f09SVlastimil Babka int classzone_idx) 2008cc5c9f09SVlastimil Babka { 2009cc5c9f09SVlastimil Babka enum compact_result ret; 2010cc5c9f09SVlastimil Babka int fragindex; 2011cc5c9f09SVlastimil Babka 2012cc5c9f09SVlastimil Babka ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, 2013cc5c9f09SVlastimil Babka zone_page_state(zone, NR_FREE_PAGES)); 20143e7d3449SMel Gorman /* 20153e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 20163e7d3449SMel Gorman * low memory or external fragmentation 20173e7d3449SMel Gorman * 2018ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 2019ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 20203e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 20213e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 20223e7d3449SMel Gorman * 202320311420SVlastimil Babka * Only compact if a failure would be due to fragmentation. Also 202420311420SVlastimil Babka * ignore fragindex for non-costly orders where the alternative to 202520311420SVlastimil Babka * a successful reclaim/compaction is OOM. Fragindex and the 202620311420SVlastimil Babka * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 202720311420SVlastimil Babka * excessive compaction for costly orders, but it should not be at the 202820311420SVlastimil Babka * expense of system stability. 20293e7d3449SMel Gorman */ 203020311420SVlastimil Babka if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { 20313e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 20323e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 2033cc5c9f09SVlastimil Babka ret = COMPACT_NOT_SUITABLE_ZONE; 20343e7d3449SMel Gorman } 20353e7d3449SMel Gorman 2036837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 2037837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 2038837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 2039837d026dSJoonsoo Kim 2040837d026dSJoonsoo Kim return ret; 2041837d026dSJoonsoo Kim } 2042837d026dSJoonsoo Kim 204386a294a8SMichal Hocko bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 204486a294a8SMichal Hocko int alloc_flags) 204586a294a8SMichal Hocko { 204686a294a8SMichal Hocko struct zone *zone; 204786a294a8SMichal Hocko struct zoneref *z; 204886a294a8SMichal Hocko 204986a294a8SMichal Hocko /* 205086a294a8SMichal Hocko * Make sure at least one zone would pass __compaction_suitable if we continue 205186a294a8SMichal Hocko * retrying the reclaim. 205286a294a8SMichal Hocko */ 205386a294a8SMichal Hocko for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 205486a294a8SMichal Hocko ac->nodemask) { 205586a294a8SMichal Hocko unsigned long available; 205686a294a8SMichal Hocko enum compact_result compact_result; 205786a294a8SMichal Hocko 205886a294a8SMichal Hocko /* 205986a294a8SMichal Hocko * Do not consider all the reclaimable memory because we do not 206086a294a8SMichal Hocko * want to trash just for a single high order allocation which 206186a294a8SMichal Hocko * is even not guaranteed to appear even if __compaction_suitable 206286a294a8SMichal Hocko * is happy about the watermark check. 206386a294a8SMichal Hocko */ 20645a1c84b4SMel Gorman available = zone_reclaimable_pages(zone) / order; 206586a294a8SMichal Hocko available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 206686a294a8SMichal Hocko compact_result = __compaction_suitable(zone, order, alloc_flags, 206786a294a8SMichal Hocko ac_classzone_idx(ac), available); 2068cc5c9f09SVlastimil Babka if (compact_result != COMPACT_SKIPPED) 206986a294a8SMichal Hocko return true; 207086a294a8SMichal Hocko } 207186a294a8SMichal Hocko 207286a294a8SMichal Hocko return false; 207386a294a8SMichal Hocko } 207486a294a8SMichal Hocko 20755e1f0f09SMel Gorman static enum compact_result 20765e1f0f09SMel Gorman compact_zone(struct compact_control *cc, struct capture_control *capc) 2077748446bbSMel Gorman { 2078ea7ab982SMichal Hocko enum compact_result ret; 207940cacbcbSMel Gorman unsigned long start_pfn = cc->zone->zone_start_pfn; 208040cacbcbSMel Gorman unsigned long end_pfn = zone_end_pfn(cc->zone); 2081566e54e1SMel Gorman unsigned long last_migrated_pfn; 2082e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 20838854c55fSMel Gorman bool update_cached; 2084748446bbSMel Gorman 2085a94b5252SYafang Shao /* 2086a94b5252SYafang Shao * These counters track activities during zone compaction. Initialize 2087a94b5252SYafang Shao * them before compacting a new zone. 2088a94b5252SYafang Shao */ 2089a94b5252SYafang Shao cc->total_migrate_scanned = 0; 2090a94b5252SYafang Shao cc->total_free_scanned = 0; 2091a94b5252SYafang Shao cc->nr_migratepages = 0; 2092a94b5252SYafang Shao cc->nr_freepages = 0; 2093a94b5252SYafang Shao INIT_LIST_HEAD(&cc->freepages); 2094a94b5252SYafang Shao INIT_LIST_HEAD(&cc->migratepages); 2095a94b5252SYafang Shao 2096d39773a0SVlastimil Babka cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); 209740cacbcbSMel Gorman ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, 2098ebff3980SVlastimil Babka cc->classzone_idx); 20993e7d3449SMel Gorman /* Compaction is likely to fail */ 2100cf378319SVlastimil Babka if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) 21013e7d3449SMel Gorman return ret; 2102c46649deSMichal Hocko 2103c46649deSMichal Hocko /* huh, compaction_suitable is returning something unexpected */ 2104c46649deSMichal Hocko VM_BUG_ON(ret != COMPACT_CONTINUE); 21053e7d3449SMel Gorman 2106c89511abSMel Gorman /* 2107d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 2108accf6242SVlastimil Babka * is about to be retried after being deferred. 2109d3132e4bSVlastimil Babka */ 211040cacbcbSMel Gorman if (compaction_restarting(cc->zone, cc->order)) 211140cacbcbSMel Gorman __reset_isolation_suitable(cc->zone); 2112d3132e4bSVlastimil Babka 2113d3132e4bSVlastimil Babka /* 2114c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 211506ed2998SVlastimil Babka * information on where the scanners should start (unless we explicitly 211606ed2998SVlastimil Babka * want to compact the whole zone), but check that it is initialised 211706ed2998SVlastimil Babka * by ensuring the values are within zone boundaries. 2118c89511abSMel Gorman */ 211970b44595SMel Gorman cc->fast_start_pfn = 0; 212006ed2998SVlastimil Babka if (cc->whole_zone) { 212106ed2998SVlastimil Babka cc->migrate_pfn = start_pfn; 212206ed2998SVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 212306ed2998SVlastimil Babka } else { 212440cacbcbSMel Gorman cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; 212540cacbcbSMel Gorman cc->free_pfn = cc->zone->compact_cached_free_pfn; 2126623446e4SJoonsoo Kim if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 212706b6640aSVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 212840cacbcbSMel Gorman cc->zone->compact_cached_free_pfn = cc->free_pfn; 2129c89511abSMel Gorman } 2130623446e4SJoonsoo Kim if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 2131c89511abSMel Gorman cc->migrate_pfn = start_pfn; 213240cacbcbSMel Gorman cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 213340cacbcbSMel Gorman cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 2134c89511abSMel Gorman } 2135c8f7de0bSMichal Hocko 2136e332f741SMel Gorman if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) 2137c8f7de0bSMichal Hocko cc->whole_zone = true; 213806ed2998SVlastimil Babka } 2139c8f7de0bSMichal Hocko 2140566e54e1SMel Gorman last_migrated_pfn = 0; 2141748446bbSMel Gorman 21428854c55fSMel Gorman /* 21438854c55fSMel Gorman * Migrate has separate cached PFNs for ASYNC and SYNC* migration on 21448854c55fSMel Gorman * the basis that some migrations will fail in ASYNC mode. However, 21458854c55fSMel Gorman * if the cached PFNs match and pageblocks are skipped due to having 21468854c55fSMel Gorman * no isolation candidates, then the sync state does not matter. 21478854c55fSMel Gorman * Until a pageblock with isolation candidates is found, keep the 21488854c55fSMel Gorman * cached PFNs in sync to avoid revisiting the same blocks. 21498854c55fSMel Gorman */ 21508854c55fSMel Gorman update_cached = !sync && 21518854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; 21528854c55fSMel Gorman 215316c4a097SJoonsoo Kim trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 215416c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync); 21550eb927c0SMel Gorman 2156748446bbSMel Gorman migrate_prep_local(); 2157748446bbSMel Gorman 215840cacbcbSMel Gorman while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { 21599d502c1cSMinchan Kim int err; 2160566e54e1SMel Gorman unsigned long start_pfn = cc->migrate_pfn; 2161748446bbSMel Gorman 2162804d3121SMel Gorman /* 2163804d3121SMel Gorman * Avoid multiple rescans which can happen if a page cannot be 2164804d3121SMel Gorman * isolated (dirty/writeback in async mode) or if the migrated 2165804d3121SMel Gorman * pages are being allocated before the pageblock is cleared. 2166804d3121SMel Gorman * The first rescan will capture the entire pageblock for 2167804d3121SMel Gorman * migration. If it fails, it'll be marked skip and scanning 2168804d3121SMel Gorman * will proceed as normal. 2169804d3121SMel Gorman */ 2170804d3121SMel Gorman cc->rescan = false; 2171804d3121SMel Gorman if (pageblock_start_pfn(last_migrated_pfn) == 2172804d3121SMel Gorman pageblock_start_pfn(start_pfn)) { 2173804d3121SMel Gorman cc->rescan = true; 2174804d3121SMel Gorman } 2175804d3121SMel Gorman 217632aaf055SPengfei Li switch (isolate_migratepages(cc)) { 2177f9e35b3bSMel Gorman case ISOLATE_ABORT: 21782d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 21795733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 2180e64c5237SShaohua Li cc->nr_migratepages = 0; 2181566e54e1SMel Gorman last_migrated_pfn = 0; 2182f9e35b3bSMel Gorman goto out; 2183f9e35b3bSMel Gorman case ISOLATE_NONE: 21848854c55fSMel Gorman if (update_cached) { 21858854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[1] = 21868854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[0]; 21878854c55fSMel Gorman } 21888854c55fSMel Gorman 2189fdaf7f5cSVlastimil Babka /* 2190fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 2191fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 2192fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 2193fdaf7f5cSVlastimil Babka */ 2194fdaf7f5cSVlastimil Babka goto check_drain; 2195f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 21968854c55fSMel Gorman update_cached = false; 2197566e54e1SMel Gorman last_migrated_pfn = start_pfn; 2198f9e35b3bSMel Gorman ; 2199f9e35b3bSMel Gorman } 2200748446bbSMel Gorman 2201d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 2202e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 22037b2a2d4aSMel Gorman MR_COMPACTION); 2204748446bbSMel Gorman 2205f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 2206f8c9301fSVlastimil Babka &cc->migratepages); 2207748446bbSMel Gorman 2208f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 2209f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 22109d502c1cSMinchan Kim if (err) { 22115733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 22127ed695e0SVlastimil Babka /* 22137ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 22147ed695e0SVlastimil Babka * and we want compact_finished() to detect it 22157ed695e0SVlastimil Babka */ 2216f2849aa0SVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) { 22172d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 22184bf2bba3SDavid Rientjes goto out; 2219748446bbSMel Gorman } 2220fdd048e1SVlastimil Babka /* 2221fdd048e1SVlastimil Babka * We failed to migrate at least one page in the current 2222fdd048e1SVlastimil Babka * order-aligned block, so skip the rest of it. 2223fdd048e1SVlastimil Babka */ 2224fdd048e1SVlastimil Babka if (cc->direct_compaction && 2225fdd048e1SVlastimil Babka (cc->mode == MIGRATE_ASYNC)) { 2226fdd048e1SVlastimil Babka cc->migrate_pfn = block_end_pfn( 2227fdd048e1SVlastimil Babka cc->migrate_pfn - 1, cc->order); 2228fdd048e1SVlastimil Babka /* Draining pcplists is useless in this case */ 2229566e54e1SMel Gorman last_migrated_pfn = 0; 2230fdd048e1SVlastimil Babka } 22314bf2bba3SDavid Rientjes } 2232fdaf7f5cSVlastimil Babka 2233fdaf7f5cSVlastimil Babka check_drain: 2234fdaf7f5cSVlastimil Babka /* 2235fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 2236fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 2237fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 2238fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 2239fdaf7f5cSVlastimil Babka * would succeed. 2240fdaf7f5cSVlastimil Babka */ 2241566e54e1SMel Gorman if (cc->order > 0 && last_migrated_pfn) { 2242fdaf7f5cSVlastimil Babka int cpu; 2243fdaf7f5cSVlastimil Babka unsigned long current_block_start = 224406b6640aSVlastimil Babka block_start_pfn(cc->migrate_pfn, cc->order); 2245fdaf7f5cSVlastimil Babka 2246566e54e1SMel Gorman if (last_migrated_pfn < current_block_start) { 2247fdaf7f5cSVlastimil Babka cpu = get_cpu(); 2248fdaf7f5cSVlastimil Babka lru_add_drain_cpu(cpu); 224940cacbcbSMel Gorman drain_local_pages(cc->zone); 2250fdaf7f5cSVlastimil Babka put_cpu(); 2251fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 2252566e54e1SMel Gorman last_migrated_pfn = 0; 2253fdaf7f5cSVlastimil Babka } 2254fdaf7f5cSVlastimil Babka } 2255fdaf7f5cSVlastimil Babka 22565e1f0f09SMel Gorman /* Stop if a page has been captured */ 22575e1f0f09SMel Gorman if (capc && capc->page) { 22585e1f0f09SMel Gorman ret = COMPACT_SUCCESS; 22595e1f0f09SMel Gorman break; 22605e1f0f09SMel Gorman } 2261748446bbSMel Gorman } 2262748446bbSMel Gorman 2263f9e35b3bSMel Gorman out: 22646bace090SVlastimil Babka /* 22656bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 22666bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 22676bace090SVlastimil Babka */ 22686bace090SVlastimil Babka if (cc->nr_freepages > 0) { 22696bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 22706bace090SVlastimil Babka 22716bace090SVlastimil Babka cc->nr_freepages = 0; 22726bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 22736bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 227406b6640aSVlastimil Babka free_pfn = pageblock_start_pfn(free_pfn); 22756bace090SVlastimil Babka /* 22766bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 22776bace090SVlastimil Babka * already reset to zone end in compact_finished() 22786bace090SVlastimil Babka */ 227940cacbcbSMel Gorman if (free_pfn > cc->zone->compact_cached_free_pfn) 228040cacbcbSMel Gorman cc->zone->compact_cached_free_pfn = free_pfn; 22816bace090SVlastimil Babka } 2282748446bbSMel Gorman 22837f354a54SDavid Rientjes count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 22847f354a54SDavid Rientjes count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 22857f354a54SDavid Rientjes 228616c4a097SJoonsoo Kim trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 228716c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync, ret); 22880eb927c0SMel Gorman 2289748446bbSMel Gorman return ret; 2290748446bbSMel Gorman } 229176ab0f53SMel Gorman 2292ea7ab982SMichal Hocko static enum compact_result compact_zone_order(struct zone *zone, int order, 2293c3486f53SVlastimil Babka gfp_t gfp_mask, enum compact_priority prio, 22945e1f0f09SMel Gorman unsigned int alloc_flags, int classzone_idx, 22955e1f0f09SMel Gorman struct page **capture) 229656de7263SMel Gorman { 2297ea7ab982SMichal Hocko enum compact_result ret; 229856de7263SMel Gorman struct compact_control cc = { 229956de7263SMel Gorman .order = order, 2300dbe2d4e4SMel Gorman .search_order = order, 23016d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 230256de7263SMel Gorman .zone = zone, 2303a5508cd8SVlastimil Babka .mode = (prio == COMPACT_PRIO_ASYNC) ? 2304a5508cd8SVlastimil Babka MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 2305ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 2306ebff3980SVlastimil Babka .classzone_idx = classzone_idx, 2307accf6242SVlastimil Babka .direct_compaction = true, 2308a8e025e5SVlastimil Babka .whole_zone = (prio == MIN_COMPACT_PRIORITY), 23099f7e3387SVlastimil Babka .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 23109f7e3387SVlastimil Babka .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 231156de7263SMel Gorman }; 23125e1f0f09SMel Gorman struct capture_control capc = { 23135e1f0f09SMel Gorman .cc = &cc, 23145e1f0f09SMel Gorman .page = NULL, 23155e1f0f09SMel Gorman }; 23165e1f0f09SMel Gorman 23175e1f0f09SMel Gorman if (capture) 23185e1f0f09SMel Gorman current->capture_control = &capc; 231956de7263SMel Gorman 23205e1f0f09SMel Gorman ret = compact_zone(&cc, &capc); 2321e64c5237SShaohua Li 2322e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 2323e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 2324e64c5237SShaohua Li 23255e1f0f09SMel Gorman *capture = capc.page; 23265e1f0f09SMel Gorman current->capture_control = NULL; 23275e1f0f09SMel Gorman 2328e64c5237SShaohua Li return ret; 232956de7263SMel Gorman } 233056de7263SMel Gorman 23315e771905SMel Gorman int sysctl_extfrag_threshold = 500; 23325e771905SMel Gorman 233356de7263SMel Gorman /** 233456de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 233556de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 23361a6d53a1SVlastimil Babka * @order: The order of the current allocation 23371a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 23381a6d53a1SVlastimil Babka * @ac: The context of current allocation 2339112d2d29SYang Shi * @prio: Determines how hard direct compaction should try to succeed 234056de7263SMel Gorman * 234156de7263SMel Gorman * This is the main entry point for direct page compaction. 234256de7263SMel Gorman */ 2343ea7ab982SMichal Hocko enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 2344c603844bSMel Gorman unsigned int alloc_flags, const struct alloc_context *ac, 23455e1f0f09SMel Gorman enum compact_priority prio, struct page **capture) 234656de7263SMel Gorman { 234756de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 234856de7263SMel Gorman struct zoneref *z; 234956de7263SMel Gorman struct zone *zone; 23501d4746d3SMichal Hocko enum compact_result rc = COMPACT_SKIPPED; 235156de7263SMel Gorman 235273e64c51SMichal Hocko /* 235373e64c51SMichal Hocko * Check if the GFP flags allow compaction - GFP_NOIO is really 235473e64c51SMichal Hocko * tricky context because the migration might require IO 235573e64c51SMichal Hocko */ 235673e64c51SMichal Hocko if (!may_perform_io) 235753853e2dSVlastimil Babka return COMPACT_SKIPPED; 235856de7263SMel Gorman 2359a5508cd8SVlastimil Babka trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 2360837d026dSJoonsoo Kim 236156de7263SMel Gorman /* Compact each zone in the list */ 23621a6d53a1SVlastimil Babka for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 23631a6d53a1SVlastimil Babka ac->nodemask) { 2364ea7ab982SMichal Hocko enum compact_result status; 236556de7263SMel Gorman 2366a8e025e5SVlastimil Babka if (prio > MIN_COMPACT_PRIORITY 2367a8e025e5SVlastimil Babka && compaction_deferred(zone, order)) { 23681d4746d3SMichal Hocko rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 236953853e2dSVlastimil Babka continue; 23701d4746d3SMichal Hocko } 237153853e2dSVlastimil Babka 2372a5508cd8SVlastimil Babka status = compact_zone_order(zone, order, gfp_mask, prio, 23735e1f0f09SMel Gorman alloc_flags, ac_classzone_idx(ac), capture); 237456de7263SMel Gorman rc = max(status, rc); 237556de7263SMel Gorman 23767ceb009aSVlastimil Babka /* The allocation should succeed, stop compacting */ 23777ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 237853853e2dSVlastimil Babka /* 237953853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 238053853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 238153853e2dSVlastimil Babka * will repeat this with true if allocation indeed 238253853e2dSVlastimil Babka * succeeds in this zone. 238353853e2dSVlastimil Babka */ 238453853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 23851f9efdefSVlastimil Babka 2386c3486f53SVlastimil Babka break; 23871f9efdefSVlastimil Babka } 23881f9efdefSVlastimil Babka 2389a5508cd8SVlastimil Babka if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 2390c3486f53SVlastimil Babka status == COMPACT_PARTIAL_SKIPPED)) 239153853e2dSVlastimil Babka /* 239253853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 239353853e2dSVlastimil Babka * so we defer compaction there. If it ends up 239453853e2dSVlastimil Babka * succeeding after all, it will be reset. 239553853e2dSVlastimil Babka */ 239653853e2dSVlastimil Babka defer_compaction(zone, order); 23971f9efdefSVlastimil Babka 23981f9efdefSVlastimil Babka /* 23991f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 24001f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 2401c3486f53SVlastimil Babka * case do not try further zones 24021f9efdefSVlastimil Babka */ 2403c3486f53SVlastimil Babka if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 2404c3486f53SVlastimil Babka || fatal_signal_pending(current)) 24051f9efdefSVlastimil Babka break; 24061f9efdefSVlastimil Babka } 24071f9efdefSVlastimil Babka 240856de7263SMel Gorman return rc; 240956de7263SMel Gorman } 241056de7263SMel Gorman 241156de7263SMel Gorman 241276ab0f53SMel Gorman /* Compact all zones within a node */ 24137103f16dSAndrew Morton static void compact_node(int nid) 24147be62de9SRik van Riel { 2415791cae96SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2416791cae96SVlastimil Babka int zoneid; 2417791cae96SVlastimil Babka struct zone *zone; 24187be62de9SRik van Riel struct compact_control cc = { 24197be62de9SRik van Riel .order = -1, 2420e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 242191ca9186SDavid Rientjes .ignore_skip_hint = true, 242206ed2998SVlastimil Babka .whole_zone = true, 242373e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 24247be62de9SRik van Riel }; 24257be62de9SRik van Riel 2426791cae96SVlastimil Babka 2427791cae96SVlastimil Babka for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2428791cae96SVlastimil Babka 2429791cae96SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2430791cae96SVlastimil Babka if (!populated_zone(zone)) 2431791cae96SVlastimil Babka continue; 2432791cae96SVlastimil Babka 2433791cae96SVlastimil Babka cc.zone = zone; 2434791cae96SVlastimil Babka 24355e1f0f09SMel Gorman compact_zone(&cc, NULL); 2436791cae96SVlastimil Babka 2437791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 2438791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 2439791cae96SVlastimil Babka } 24407be62de9SRik van Riel } 24417be62de9SRik van Riel 244276ab0f53SMel Gorman /* Compact all nodes in the system */ 24437964c06dSJason Liu static void compact_nodes(void) 244476ab0f53SMel Gorman { 244576ab0f53SMel Gorman int nid; 244676ab0f53SMel Gorman 24478575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 24488575ec29SHugh Dickins lru_add_drain_all(); 24498575ec29SHugh Dickins 245076ab0f53SMel Gorman for_each_online_node(nid) 245176ab0f53SMel Gorman compact_node(nid); 245276ab0f53SMel Gorman } 245376ab0f53SMel Gorman 245476ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 245576ab0f53SMel Gorman int sysctl_compact_memory; 245676ab0f53SMel Gorman 2457fec4eb2cSYaowei Bai /* 2458fec4eb2cSYaowei Bai * This is the entry point for compacting all nodes via 2459fec4eb2cSYaowei Bai * /proc/sys/vm/compact_memory 2460fec4eb2cSYaowei Bai */ 246176ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 246276ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 246376ab0f53SMel Gorman { 246476ab0f53SMel Gorman if (write) 24657964c06dSJason Liu compact_nodes(); 246676ab0f53SMel Gorman 246776ab0f53SMel Gorman return 0; 246876ab0f53SMel Gorman } 2469ed4a6d7fSMel Gorman 2470ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 247174e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 247210fbcf4cSKay Sievers struct device_attribute *attr, 2473ed4a6d7fSMel Gorman const char *buf, size_t count) 2474ed4a6d7fSMel Gorman { 24758575ec29SHugh Dickins int nid = dev->id; 24768575ec29SHugh Dickins 24778575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 24788575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 24798575ec29SHugh Dickins lru_add_drain_all(); 24808575ec29SHugh Dickins 24818575ec29SHugh Dickins compact_node(nid); 24828575ec29SHugh Dickins } 2483ed4a6d7fSMel Gorman 2484ed4a6d7fSMel Gorman return count; 2485ed4a6d7fSMel Gorman } 24860825a6f9SJoe Perches static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node); 2487ed4a6d7fSMel Gorman 2488ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 2489ed4a6d7fSMel Gorman { 249010fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 2491ed4a6d7fSMel Gorman } 2492ed4a6d7fSMel Gorman 2493ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 2494ed4a6d7fSMel Gorman { 249510fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 2496ed4a6d7fSMel Gorman } 2497ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 2498ff9543fdSMichal Nazarewicz 2499698b1b30SVlastimil Babka static inline bool kcompactd_work_requested(pg_data_t *pgdat) 2500698b1b30SVlastimil Babka { 2501172400c6SVlastimil Babka return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); 2502698b1b30SVlastimil Babka } 2503698b1b30SVlastimil Babka 2504698b1b30SVlastimil Babka static bool kcompactd_node_suitable(pg_data_t *pgdat) 2505698b1b30SVlastimil Babka { 2506698b1b30SVlastimil Babka int zoneid; 2507698b1b30SVlastimil Babka struct zone *zone; 2508698b1b30SVlastimil Babka enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx; 2509698b1b30SVlastimil Babka 25106cd9dc3eSChen Feng for (zoneid = 0; zoneid <= classzone_idx; zoneid++) { 2511698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2512698b1b30SVlastimil Babka 2513698b1b30SVlastimil Babka if (!populated_zone(zone)) 2514698b1b30SVlastimil Babka continue; 2515698b1b30SVlastimil Babka 2516698b1b30SVlastimil Babka if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 2517698b1b30SVlastimil Babka classzone_idx) == COMPACT_CONTINUE) 2518698b1b30SVlastimil Babka return true; 2519698b1b30SVlastimil Babka } 2520698b1b30SVlastimil Babka 2521698b1b30SVlastimil Babka return false; 2522698b1b30SVlastimil Babka } 2523698b1b30SVlastimil Babka 2524698b1b30SVlastimil Babka static void kcompactd_do_work(pg_data_t *pgdat) 2525698b1b30SVlastimil Babka { 2526698b1b30SVlastimil Babka /* 2527698b1b30SVlastimil Babka * With no special task, compact all zones so that a page of requested 2528698b1b30SVlastimil Babka * order is allocatable. 2529698b1b30SVlastimil Babka */ 2530698b1b30SVlastimil Babka int zoneid; 2531698b1b30SVlastimil Babka struct zone *zone; 2532698b1b30SVlastimil Babka struct compact_control cc = { 2533698b1b30SVlastimil Babka .order = pgdat->kcompactd_max_order, 2534dbe2d4e4SMel Gorman .search_order = pgdat->kcompactd_max_order, 2535698b1b30SVlastimil Babka .classzone_idx = pgdat->kcompactd_classzone_idx, 2536698b1b30SVlastimil Babka .mode = MIGRATE_SYNC_LIGHT, 2537a0647dc9SDavid Rientjes .ignore_skip_hint = false, 253873e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 2539698b1b30SVlastimil Babka }; 2540698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 2541698b1b30SVlastimil Babka cc.classzone_idx); 25427f354a54SDavid Rientjes count_compact_event(KCOMPACTD_WAKE); 2543698b1b30SVlastimil Babka 25446cd9dc3eSChen Feng for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) { 2545698b1b30SVlastimil Babka int status; 2546698b1b30SVlastimil Babka 2547698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2548698b1b30SVlastimil Babka if (!populated_zone(zone)) 2549698b1b30SVlastimil Babka continue; 2550698b1b30SVlastimil Babka 2551698b1b30SVlastimil Babka if (compaction_deferred(zone, cc.order)) 2552698b1b30SVlastimil Babka continue; 2553698b1b30SVlastimil Babka 2554698b1b30SVlastimil Babka if (compaction_suitable(zone, cc.order, 0, zoneid) != 2555698b1b30SVlastimil Babka COMPACT_CONTINUE) 2556698b1b30SVlastimil Babka continue; 2557698b1b30SVlastimil Babka 2558172400c6SVlastimil Babka if (kthread_should_stop()) 2559172400c6SVlastimil Babka return; 2560a94b5252SYafang Shao 2561a94b5252SYafang Shao cc.zone = zone; 25625e1f0f09SMel Gorman status = compact_zone(&cc, NULL); 2563698b1b30SVlastimil Babka 25647ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 2565698b1b30SVlastimil Babka compaction_defer_reset(zone, cc.order, false); 2566c8f7de0bSMichal Hocko } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 2567698b1b30SVlastimil Babka /* 2568bc3106b2SDavid Rientjes * Buddy pages may become stranded on pcps that could 2569bc3106b2SDavid Rientjes * otherwise coalesce on the zone's free area for 2570bc3106b2SDavid Rientjes * order >= cc.order. This is ratelimited by the 2571bc3106b2SDavid Rientjes * upcoming deferral. 2572bc3106b2SDavid Rientjes */ 2573bc3106b2SDavid Rientjes drain_all_pages(zone); 2574bc3106b2SDavid Rientjes 2575bc3106b2SDavid Rientjes /* 2576698b1b30SVlastimil Babka * We use sync migration mode here, so we defer like 2577698b1b30SVlastimil Babka * sync direct compaction does. 2578698b1b30SVlastimil Babka */ 2579698b1b30SVlastimil Babka defer_compaction(zone, cc.order); 2580698b1b30SVlastimil Babka } 2581698b1b30SVlastimil Babka 25827f354a54SDavid Rientjes count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 25837f354a54SDavid Rientjes cc.total_migrate_scanned); 25847f354a54SDavid Rientjes count_compact_events(KCOMPACTD_FREE_SCANNED, 25857f354a54SDavid Rientjes cc.total_free_scanned); 25867f354a54SDavid Rientjes 2587698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 2588698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 2589698b1b30SVlastimil Babka } 2590698b1b30SVlastimil Babka 2591698b1b30SVlastimil Babka /* 2592698b1b30SVlastimil Babka * Regardless of success, we are done until woken up next. But remember 2593698b1b30SVlastimil Babka * the requested order/classzone_idx in case it was higher/tighter than 2594698b1b30SVlastimil Babka * our current ones 2595698b1b30SVlastimil Babka */ 2596698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order <= cc.order) 2597698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 2598698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx) 2599698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2600698b1b30SVlastimil Babka } 2601698b1b30SVlastimil Babka 2602698b1b30SVlastimil Babka void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) 2603698b1b30SVlastimil Babka { 2604698b1b30SVlastimil Babka if (!order) 2605698b1b30SVlastimil Babka return; 2606698b1b30SVlastimil Babka 2607698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order < order) 2608698b1b30SVlastimil Babka pgdat->kcompactd_max_order = order; 2609698b1b30SVlastimil Babka 2610698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx > classzone_idx) 2611698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = classzone_idx; 2612698b1b30SVlastimil Babka 26136818600fSDavidlohr Bueso /* 26146818600fSDavidlohr Bueso * Pairs with implicit barrier in wait_event_freezable() 26156818600fSDavidlohr Bueso * such that wakeups are not missed. 26166818600fSDavidlohr Bueso */ 26176818600fSDavidlohr Bueso if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 2618698b1b30SVlastimil Babka return; 2619698b1b30SVlastimil Babka 2620698b1b30SVlastimil Babka if (!kcompactd_node_suitable(pgdat)) 2621698b1b30SVlastimil Babka return; 2622698b1b30SVlastimil Babka 2623698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 2624698b1b30SVlastimil Babka classzone_idx); 2625698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait); 2626698b1b30SVlastimil Babka } 2627698b1b30SVlastimil Babka 2628698b1b30SVlastimil Babka /* 2629698b1b30SVlastimil Babka * The background compaction daemon, started as a kernel thread 2630698b1b30SVlastimil Babka * from the init process. 2631698b1b30SVlastimil Babka */ 2632698b1b30SVlastimil Babka static int kcompactd(void *p) 2633698b1b30SVlastimil Babka { 2634698b1b30SVlastimil Babka pg_data_t *pgdat = (pg_data_t*)p; 2635698b1b30SVlastimil Babka struct task_struct *tsk = current; 2636698b1b30SVlastimil Babka 2637698b1b30SVlastimil Babka const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2638698b1b30SVlastimil Babka 2639698b1b30SVlastimil Babka if (!cpumask_empty(cpumask)) 2640698b1b30SVlastimil Babka set_cpus_allowed_ptr(tsk, cpumask); 2641698b1b30SVlastimil Babka 2642698b1b30SVlastimil Babka set_freezable(); 2643698b1b30SVlastimil Babka 2644698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 2645698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2646698b1b30SVlastimil Babka 2647698b1b30SVlastimil Babka while (!kthread_should_stop()) { 2648eb414681SJohannes Weiner unsigned long pflags; 2649eb414681SJohannes Weiner 2650698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 2651698b1b30SVlastimil Babka wait_event_freezable(pgdat->kcompactd_wait, 2652698b1b30SVlastimil Babka kcompactd_work_requested(pgdat)); 2653698b1b30SVlastimil Babka 2654eb414681SJohannes Weiner psi_memstall_enter(&pflags); 2655698b1b30SVlastimil Babka kcompactd_do_work(pgdat); 2656eb414681SJohannes Weiner psi_memstall_leave(&pflags); 2657698b1b30SVlastimil Babka } 2658698b1b30SVlastimil Babka 2659698b1b30SVlastimil Babka return 0; 2660698b1b30SVlastimil Babka } 2661698b1b30SVlastimil Babka 2662698b1b30SVlastimil Babka /* 2663698b1b30SVlastimil Babka * This kcompactd start function will be called by init and node-hot-add. 2664698b1b30SVlastimil Babka * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 2665698b1b30SVlastimil Babka */ 2666698b1b30SVlastimil Babka int kcompactd_run(int nid) 2667698b1b30SVlastimil Babka { 2668698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2669698b1b30SVlastimil Babka int ret = 0; 2670698b1b30SVlastimil Babka 2671698b1b30SVlastimil Babka if (pgdat->kcompactd) 2672698b1b30SVlastimil Babka return 0; 2673698b1b30SVlastimil Babka 2674698b1b30SVlastimil Babka pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 2675698b1b30SVlastimil Babka if (IS_ERR(pgdat->kcompactd)) { 2676698b1b30SVlastimil Babka pr_err("Failed to start kcompactd on node %d\n", nid); 2677698b1b30SVlastimil Babka ret = PTR_ERR(pgdat->kcompactd); 2678698b1b30SVlastimil Babka pgdat->kcompactd = NULL; 2679698b1b30SVlastimil Babka } 2680698b1b30SVlastimil Babka return ret; 2681698b1b30SVlastimil Babka } 2682698b1b30SVlastimil Babka 2683698b1b30SVlastimil Babka /* 2684698b1b30SVlastimil Babka * Called by memory hotplug when all memory in a node is offlined. Caller must 2685698b1b30SVlastimil Babka * hold mem_hotplug_begin/end(). 2686698b1b30SVlastimil Babka */ 2687698b1b30SVlastimil Babka void kcompactd_stop(int nid) 2688698b1b30SVlastimil Babka { 2689698b1b30SVlastimil Babka struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 2690698b1b30SVlastimil Babka 2691698b1b30SVlastimil Babka if (kcompactd) { 2692698b1b30SVlastimil Babka kthread_stop(kcompactd); 2693698b1b30SVlastimil Babka NODE_DATA(nid)->kcompactd = NULL; 2694698b1b30SVlastimil Babka } 2695698b1b30SVlastimil Babka } 2696698b1b30SVlastimil Babka 2697698b1b30SVlastimil Babka /* 2698698b1b30SVlastimil Babka * It's optimal to keep kcompactd on the same CPUs as their memory, but 2699698b1b30SVlastimil Babka * not required for correctness. So if the last cpu in a node goes 2700698b1b30SVlastimil Babka * away, we get changed to run anywhere: as the first one comes back, 2701698b1b30SVlastimil Babka * restore their cpu bindings. 2702698b1b30SVlastimil Babka */ 2703e46b1db2SAnna-Maria Gleixner static int kcompactd_cpu_online(unsigned int cpu) 2704698b1b30SVlastimil Babka { 2705698b1b30SVlastimil Babka int nid; 2706698b1b30SVlastimil Babka 2707698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) { 2708698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2709698b1b30SVlastimil Babka const struct cpumask *mask; 2710698b1b30SVlastimil Babka 2711698b1b30SVlastimil Babka mask = cpumask_of_node(pgdat->node_id); 2712698b1b30SVlastimil Babka 2713698b1b30SVlastimil Babka if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2714698b1b30SVlastimil Babka /* One of our CPUs online: restore mask */ 2715698b1b30SVlastimil Babka set_cpus_allowed_ptr(pgdat->kcompactd, mask); 2716698b1b30SVlastimil Babka } 2717e46b1db2SAnna-Maria Gleixner return 0; 2718698b1b30SVlastimil Babka } 2719698b1b30SVlastimil Babka 2720698b1b30SVlastimil Babka static int __init kcompactd_init(void) 2721698b1b30SVlastimil Babka { 2722698b1b30SVlastimil Babka int nid; 2723e46b1db2SAnna-Maria Gleixner int ret; 2724e46b1db2SAnna-Maria Gleixner 2725e46b1db2SAnna-Maria Gleixner ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 2726e46b1db2SAnna-Maria Gleixner "mm/compaction:online", 2727e46b1db2SAnna-Maria Gleixner kcompactd_cpu_online, NULL); 2728e46b1db2SAnna-Maria Gleixner if (ret < 0) { 2729e46b1db2SAnna-Maria Gleixner pr_err("kcompactd: failed to register hotplug callbacks.\n"); 2730e46b1db2SAnna-Maria Gleixner return ret; 2731e46b1db2SAnna-Maria Gleixner } 2732698b1b30SVlastimil Babka 2733698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) 2734698b1b30SVlastimil Babka kcompactd_run(nid); 2735698b1b30SVlastimil Babka return 0; 2736698b1b30SVlastimil Babka } 2737698b1b30SVlastimil Babka subsys_initcall(kcompactd_init) 2738698b1b30SVlastimil Babka 2739ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 2740