1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2748446bbSMel Gorman /*
3748446bbSMel Gorman * linux/mm/compaction.c
4748446bbSMel Gorman *
5748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that
6748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy
7748446bbSMel Gorman * lifting
8748446bbSMel Gorman *
9748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
10748446bbSMel Gorman */
11698b1b30SVlastimil Babka #include <linux/cpu.h>
12748446bbSMel Gorman #include <linux/swap.h>
13748446bbSMel Gorman #include <linux/migrate.h>
14748446bbSMel Gorman #include <linux/compaction.h>
15748446bbSMel Gorman #include <linux/mm_inline.h>
16174cd4b1SIngo Molnar #include <linux/sched/signal.h>
17748446bbSMel Gorman #include <linux/backing-dev.h>
1876ab0f53SMel Gorman #include <linux/sysctl.h>
19ed4a6d7fSMel Gorman #include <linux/sysfs.h>
20194159fbSMinchan Kim #include <linux/page-isolation.h>
21b8c73fc2SAndrey Ryabinin #include <linux/kasan.h>
22698b1b30SVlastimil Babka #include <linux/kthread.h>
23698b1b30SVlastimil Babka #include <linux/freezer.h>
2483358eceSJoonsoo Kim #include <linux/page_owner.h>
25eb414681SJohannes Weiner #include <linux/psi.h>
26748446bbSMel Gorman #include "internal.h"
27748446bbSMel Gorman
28010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION
2931ca72faSCharan Teja Kalla /*
3031ca72faSCharan Teja Kalla * Fragmentation score check interval for proactive compaction purposes.
3131ca72faSCharan Teja Kalla */
3231ca72faSCharan Teja Kalla #define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500)
3331ca72faSCharan Teja Kalla
count_compact_event(enum vm_event_item item)34010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item)
35010fc29aSMinchan Kim {
36010fc29aSMinchan Kim count_vm_event(item);
37010fc29aSMinchan Kim }
38010fc29aSMinchan Kim
count_compact_events(enum vm_event_item item,long delta)39010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta)
40010fc29aSMinchan Kim {
41010fc29aSMinchan Kim count_vm_events(item, delta);
42010fc29aSMinchan Kim }
43010fc29aSMinchan Kim #else
44010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0)
45010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0)
46010fc29aSMinchan Kim #endif
47010fc29aSMinchan Kim
48ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA
49ff9543fdSMichal Nazarewicz
50b7aba698SMel Gorman #define CREATE_TRACE_POINTS
51b7aba698SMel Gorman #include <trace/events/compaction.h>
52b7aba698SMel Gorman
5306b6640aSVlastimil Babka #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
5406b6640aSVlastimil Babka #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
5506b6640aSVlastimil Babka
56facdaa91SNitin Gupta /*
57facdaa91SNitin Gupta * Page order with-respect-to which proactive compaction
58facdaa91SNitin Gupta * calculates external fragmentation, which is used as
59facdaa91SNitin Gupta * the "fragmentation score" of a node/zone.
60facdaa91SNitin Gupta */
61facdaa91SNitin Gupta #if defined CONFIG_TRANSPARENT_HUGEPAGE
62facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER
6325788738SNitin Gupta #elif defined CONFIG_HUGETLBFS
64facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER
65facdaa91SNitin Gupta #else
66facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
67facdaa91SNitin Gupta #endif
68facdaa91SNitin Gupta
release_freepages(struct list_head * freelist)69748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist)
70748446bbSMel Gorman {
71748446bbSMel Gorman struct page *page, *next;
726bace090SVlastimil Babka unsigned long high_pfn = 0;
73748446bbSMel Gorman
74748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) {
756bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page);
76748446bbSMel Gorman list_del(&page->lru);
77748446bbSMel Gorman __free_page(page);
786bace090SVlastimil Babka if (pfn > high_pfn)
796bace090SVlastimil Babka high_pfn = pfn;
80748446bbSMel Gorman }
81748446bbSMel Gorman
826bace090SVlastimil Babka return high_pfn;
83748446bbSMel Gorman }
84748446bbSMel Gorman
split_map_pages(struct list_head * list)854469ab98SMel Gorman static void split_map_pages(struct list_head *list)
86ff9543fdSMichal Nazarewicz {
8766c64223SJoonsoo Kim unsigned int i, order, nr_pages;
8866c64223SJoonsoo Kim struct page *page, *next;
8966c64223SJoonsoo Kim LIST_HEAD(tmp_list);
90ff9543fdSMichal Nazarewicz
9166c64223SJoonsoo Kim list_for_each_entry_safe(page, next, list, lru) {
9266c64223SJoonsoo Kim list_del(&page->lru);
9366c64223SJoonsoo Kim
9466c64223SJoonsoo Kim order = page_private(page);
9566c64223SJoonsoo Kim nr_pages = 1 << order;
9666c64223SJoonsoo Kim
9746f24fd8SJoonsoo Kim post_alloc_hook(page, order, __GFP_MOVABLE);
9866c64223SJoonsoo Kim if (order)
9966c64223SJoonsoo Kim split_page(page, order);
10066c64223SJoonsoo Kim
10166c64223SJoonsoo Kim for (i = 0; i < nr_pages; i++) {
10266c64223SJoonsoo Kim list_add(&page->lru, &tmp_list);
10366c64223SJoonsoo Kim page++;
104ff9543fdSMichal Nazarewicz }
105ff9543fdSMichal Nazarewicz }
106ff9543fdSMichal Nazarewicz
10766c64223SJoonsoo Kim list_splice(&tmp_list, list);
10866c64223SJoonsoo Kim }
10966c64223SJoonsoo Kim
110bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION
PageMovable(struct page * page)11168f2736aSMatthew Wilcox (Oracle) bool PageMovable(struct page *page)
112bda807d4SMinchan Kim {
11368f2736aSMatthew Wilcox (Oracle) const struct movable_operations *mops;
114bda807d4SMinchan Kim
115bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page);
116bda807d4SMinchan Kim if (!__PageMovable(page))
11768f2736aSMatthew Wilcox (Oracle) return false;
118bda807d4SMinchan Kim
11968f2736aSMatthew Wilcox (Oracle) mops = page_movable_ops(page);
12068f2736aSMatthew Wilcox (Oracle) if (mops)
12168f2736aSMatthew Wilcox (Oracle) return true;
122bda807d4SMinchan Kim
12368f2736aSMatthew Wilcox (Oracle) return false;
124bda807d4SMinchan Kim }
125bda807d4SMinchan Kim
__SetPageMovable(struct page * page,const struct movable_operations * mops)126bda807d4SMinchan Kim void __SetPageMovable(struct page *page, const struct movable_operations *mops)
12768f2736aSMatthew Wilcox (Oracle) {
128bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page);
129bda807d4SMinchan Kim VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page);
13068f2736aSMatthew Wilcox (Oracle) page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE);
13168f2736aSMatthew Wilcox (Oracle) }
132bda807d4SMinchan Kim EXPORT_SYMBOL(__SetPageMovable);
133bda807d4SMinchan Kim
__ClearPageMovable(struct page * page)134bda807d4SMinchan Kim void __ClearPageMovable(struct page *page)
135bda807d4SMinchan Kim {
136bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page);
137bda807d4SMinchan Kim /*
138bda807d4SMinchan Kim * This page still has the type of a movable page, but it's
13968f2736aSMatthew Wilcox (Oracle) * actually not movable any more.
14068f2736aSMatthew Wilcox (Oracle) */
141bda807d4SMinchan Kim page->mapping = (void *)PAGE_MAPPING_MOVABLE;
14268f2736aSMatthew Wilcox (Oracle) }
143bda807d4SMinchan Kim EXPORT_SYMBOL(__ClearPageMovable);
144bda807d4SMinchan Kim
145bda807d4SMinchan Kim /* Do not skip compaction more than 64 times */
14624e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6
14724e2716fSJoonsoo Kim
14824e2716fSJoonsoo Kim /*
14924e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page
15024e2716fSJoonsoo Kim * allocation success. 1 << compact_defer_shift, compactions are skipped up
151860b3272SAlex Shi * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
15224e2716fSJoonsoo Kim */
defer_compaction(struct zone * zone,int order)15324e2716fSJoonsoo Kim static void defer_compaction(struct zone *zone, int order)
1542271b016SHui Su {
15524e2716fSJoonsoo Kim zone->compact_considered = 0;
15624e2716fSJoonsoo Kim zone->compact_defer_shift++;
15724e2716fSJoonsoo Kim
15824e2716fSJoonsoo Kim if (order < zone->compact_order_failed)
15924e2716fSJoonsoo Kim zone->compact_order_failed = order;
16024e2716fSJoonsoo Kim
16124e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
16224e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
16324e2716fSJoonsoo Kim
16424e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order);
16524e2716fSJoonsoo Kim }
16624e2716fSJoonsoo Kim
16724e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */
compaction_deferred(struct zone * zone,int order)16824e2716fSJoonsoo Kim static bool compaction_deferred(struct zone *zone, int order)
1692271b016SHui Su {
17024e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift;
17124e2716fSJoonsoo Kim
17224e2716fSJoonsoo Kim if (order < zone->compact_order_failed)
17324e2716fSJoonsoo Kim return false;
17424e2716fSJoonsoo Kim
17524e2716fSJoonsoo Kim /* Avoid possible overflow */
17624e2716fSJoonsoo Kim if (++zone->compact_considered >= defer_limit) {
17762b35fe0SMateusz Nosek zone->compact_considered = defer_limit;
17824e2716fSJoonsoo Kim return false;
17924e2716fSJoonsoo Kim }
18062b35fe0SMateusz Nosek
18124e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order);
18224e2716fSJoonsoo Kim
18324e2716fSJoonsoo Kim return true;
18424e2716fSJoonsoo Kim }
18524e2716fSJoonsoo Kim
18624e2716fSJoonsoo Kim /*
18724e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order,
18824e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is
18924e2716fSJoonsoo Kim * expected to succeed.
19024e2716fSJoonsoo Kim */
compaction_defer_reset(struct zone * zone,int order,bool alloc_success)19124e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order,
19224e2716fSJoonsoo Kim bool alloc_success)
19324e2716fSJoonsoo Kim {
19424e2716fSJoonsoo Kim if (alloc_success) {
19524e2716fSJoonsoo Kim zone->compact_considered = 0;
19624e2716fSJoonsoo Kim zone->compact_defer_shift = 0;
19724e2716fSJoonsoo Kim }
19824e2716fSJoonsoo Kim if (order >= zone->compact_order_failed)
19924e2716fSJoonsoo Kim zone->compact_order_failed = order + 1;
20024e2716fSJoonsoo Kim
20124e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order);
20224e2716fSJoonsoo Kim }
20324e2716fSJoonsoo Kim
20424e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */
compaction_restarting(struct zone * zone,int order)20524e2716fSJoonsoo Kim static bool compaction_restarting(struct zone *zone, int order)
2062271b016SHui Su {
20724e2716fSJoonsoo Kim if (order < zone->compact_order_failed)
20824e2716fSJoonsoo Kim return false;
20924e2716fSJoonsoo Kim
21024e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
21124e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift;
21224e2716fSJoonsoo Kim }
21324e2716fSJoonsoo Kim
21424e2716fSJoonsoo Kim /* Returns true if the pageblock should be scanned for pages to isolate. */
isolation_suitable(struct compact_control * cc,struct page * page)215bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc,
216bb13ffebSMel Gorman struct page *page)
217bb13ffebSMel Gorman {
218bb13ffebSMel Gorman if (cc->ignore_skip_hint)
219bb13ffebSMel Gorman return true;
220bb13ffebSMel Gorman
221bb13ffebSMel Gorman return !get_pageblock_skip(page);
222bb13ffebSMel Gorman }
223bb13ffebSMel Gorman
reset_cached_positions(struct zone * zone)224bb13ffebSMel Gorman static void reset_cached_positions(struct zone *zone)
22502333641SVlastimil Babka {
22602333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
22702333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
22802333641SVlastimil Babka zone->compact_cached_free_pfn =
229623446e4SJoonsoo Kim pageblock_start_pfn(zone_end_pfn(zone) - 1);
23006b6640aSVlastimil Babka }
23102333641SVlastimil Babka
23202333641SVlastimil Babka #ifdef CONFIG_SPARSEMEM
233bb13ffebSMel Gorman /*
2342271b016SHui Su * If the PFN falls into an offline section, return the start PFN of the
235b527cfe5SVlastimil Babka * next online section. If the PFN falls into an online section or if
236b527cfe5SVlastimil Babka * there is no next online section, return 0.
23721dc7e02SDavid Rientjes */
skip_offline_sections(unsigned long start_pfn)238b527cfe5SVlastimil Babka static unsigned long skip_offline_sections(unsigned long start_pfn)
23921dc7e02SDavid Rientjes {
240b527cfe5SVlastimil Babka unsigned long start_nr = pfn_to_section_nr(start_pfn);
24121dc7e02SDavid Rientjes
242b527cfe5SVlastimil Babka if (online_section_nr(start_nr))
243b527cfe5SVlastimil Babka return 0;
244b527cfe5SVlastimil Babka
245b527cfe5SVlastimil Babka while (++start_nr <= __highest_present_section_nr) {
24621dc7e02SDavid Rientjes if (online_section_nr(start_nr))
247b527cfe5SVlastimil Babka return section_nr_to_pfn(start_nr);
248b527cfe5SVlastimil Babka }
24921dc7e02SDavid Rientjes
25021dc7e02SDavid Rientjes return 0;
251e332f741SMel Gorman }
252e332f741SMel Gorman
253e332f741SMel Gorman /*
254e332f741SMel Gorman * If the PFN falls into an offline section, return the end PFN of the
255e332f741SMel Gorman * next online section in reverse. If the PFN falls into an online section
2566b0868c8SMel Gorman * or if there is no next online section in reverse, return 0.
257e332f741SMel Gorman */
skip_offline_sections_reverse(unsigned long start_pfn)258e332f741SMel Gorman static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
259e332f741SMel Gorman {
260e332f741SMel Gorman unsigned long start_nr = pfn_to_section_nr(start_pfn);
261e332f741SMel Gorman
262e332f741SMel Gorman if (!start_nr || online_section_nr(start_nr))
263e332f741SMel Gorman return 0;
264e332f741SMel Gorman
265e332f741SMel Gorman while (start_nr-- > 0) {
266e332f741SMel Gorman if (online_section_nr(start_nr))
267e332f741SMel Gorman return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION;
268e332f741SMel Gorman }
269e332f741SMel Gorman
270e332f741SMel Gorman return 0;
271e332f741SMel Gorman }
272e332f741SMel Gorman #else
skip_offline_sections(unsigned long start_pfn)273e332f741SMel Gorman static unsigned long skip_offline_sections(unsigned long start_pfn)
274e332f741SMel Gorman {
275e332f741SMel Gorman return 0;
276e332f741SMel Gorman }
277e332f741SMel Gorman
skip_offline_sections_reverse(unsigned long start_pfn)278e332f741SMel Gorman static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
279e332f741SMel Gorman {
280e332f741SMel Gorman return 0;
281e332f741SMel Gorman }
2826b0868c8SMel Gorman #endif
2836b0868c8SMel Gorman
284a2e9a5afSVlastimil Babka /*
285a2e9a5afSVlastimil Babka * Compound pages of >= pageblock_order should consistently be skipped until
2866b0868c8SMel Gorman * released. It is always pointless to compact pages of such order (if they are
2876b0868c8SMel Gorman * migratable), and the pageblocks they occupy cannot contain any free pages.
2886b0868c8SMel Gorman */
pageblock_skip_persistent(struct page * page)2896b0868c8SMel Gorman static bool pageblock_skip_persistent(struct page *page)
2906b0868c8SMel Gorman {
2916b0868c8SMel Gorman if (!PageCompound(page))
292a2e9a5afSVlastimil Babka return false;
2936b0868c8SMel Gorman
2946b0868c8SMel Gorman page = compound_head(page);
2956b0868c8SMel Gorman
2966b0868c8SMel Gorman if (compound_order(page) >= pageblock_order)
2976b0868c8SMel Gorman return true;
298e332f741SMel Gorman
299e332f741SMel Gorman return false;
300e332f741SMel Gorman }
301e332f741SMel Gorman
302e332f741SMel Gorman static bool
__reset_isolation_pfn(struct zone * zone,unsigned long pfn,bool check_source,bool check_target)303e332f741SMel Gorman __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
304e332f741SMel Gorman bool check_target)
305e332f741SMel Gorman {
306e332f741SMel Gorman struct page *page = pfn_to_online_page(pfn);
307e332f741SMel Gorman struct page *block_page;
308e332f741SMel Gorman struct page *end_page;
309e332f741SMel Gorman unsigned long block_pfn;
310e332f741SMel Gorman
311e332f741SMel Gorman if (!page)
312e332f741SMel Gorman return false;
313e332f741SMel Gorman if (zone != page_zone(page))
314e332f741SMel Gorman return false;
315a2e9a5afSVlastimil Babka if (pageblock_skip_persistent(page))
316e332f741SMel Gorman return false;
317e332f741SMel Gorman
318e332f741SMel Gorman /*
319e332f741SMel Gorman * If skip is already cleared do no further checking once the
32021dc7e02SDavid Rientjes * restart points have been set.
321bb13ffebSMel Gorman */
322bb13ffebSMel Gorman if (check_source && check_target && !get_pageblock_skip(page))
323bb13ffebSMel Gorman return true;
324bb13ffebSMel Gorman
32562997027SMel Gorman /*
326bb13ffebSMel Gorman * If clearing skip for the target scanner, do not select a
327e332f741SMel Gorman * non-movable pageblock as the starting point.
3286b0868c8SMel Gorman */
329e332f741SMel Gorman if (!check_source && check_target &&
330e332f741SMel Gorman get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
331e332f741SMel Gorman return false;
332e332f741SMel Gorman
333e332f741SMel Gorman /* Ensure the start of the pageblock or zone is online and valid */
334e332f741SMel Gorman block_pfn = pageblock_start_pfn(pfn);
335e332f741SMel Gorman block_pfn = max(block_pfn, zone->zone_start_pfn);
336bb13ffebSMel Gorman block_page = pfn_to_online_page(block_pfn);
33762997027SMel Gorman if (block_page) {
338bb13ffebSMel Gorman page = block_page;
339e332f741SMel Gorman pfn = block_pfn;
340e332f741SMel Gorman }
341e332f741SMel Gorman
342e332f741SMel Gorman /* Ensure the end of the pageblock or zone is online and valid */
343e332f741SMel Gorman block_pfn = pageblock_end_pfn(pfn) - 1;
344e332f741SMel Gorman block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
345e332f741SMel Gorman end_page = pfn_to_online_page(block_pfn);
346e332f741SMel Gorman if (!end_page)
347bb13ffebSMel Gorman return false;
348bb13ffebSMel Gorman
349e332f741SMel Gorman /*
350e332f741SMel Gorman * Only clear the hint if a sample indicates there is either a
351e332f741SMel Gorman * free page or an LRU page in the block. One or other condition
352e332f741SMel Gorman * is necessary for the block to be a migration source/target.
353e332f741SMel Gorman */
354e332f741SMel Gorman do {
355e332f741SMel Gorman if (check_source && PageLRU(page)) {
356e332f741SMel Gorman clear_pageblock_skip(page);
357bb13ffebSMel Gorman return true;
35802333641SVlastimil Babka }
359e332f741SMel Gorman
360e332f741SMel Gorman if (check_target && PageBuddy(page)) {
361e332f741SMel Gorman clear_pageblock_skip(page);
362e332f741SMel Gorman return true;
363e332f741SMel Gorman }
364e332f741SMel Gorman
365e332f741SMel Gorman page += (1 << PAGE_ALLOC_COSTLY_ORDER);
366e332f741SMel Gorman } while (page <= end_page);
367e332f741SMel Gorman
368e332f741SMel Gorman return false;
369e332f741SMel Gorman }
370e332f741SMel Gorman
371e332f741SMel Gorman /*
372e332f741SMel Gorman * This function is called to clear all cached information on pageblocks that
373e332f741SMel Gorman * should be skipped for page isolation when the migrate and free page scanner
374e332f741SMel Gorman * meet.
375bb13ffebSMel Gorman */
__reset_isolation_suitable(struct zone * zone)376bb13ffebSMel Gorman static void __reset_isolation_suitable(struct zone *zone)
37762997027SMel Gorman {
37862997027SMel Gorman unsigned long migrate_pfn = zone->zone_start_pfn;
37962997027SMel Gorman unsigned long free_pfn = zone_end_pfn(zone) - 1;
38062997027SMel Gorman unsigned long reset_migrate = free_pfn;
38162997027SMel Gorman unsigned long reset_free = migrate_pfn;
38262997027SMel Gorman bool source_set = false;
38362997027SMel Gorman bool free_set = false;
38462997027SMel Gorman
38562997027SMel Gorman if (!zone->compact_blockskip_flush)
38662997027SMel Gorman return;
38762997027SMel Gorman
38862997027SMel Gorman zone->compact_blockskip_flush = false;
38962997027SMel Gorman
39062997027SMel Gorman /*
39162997027SMel Gorman * Walk the zone and update pageblock skip information. Source looks
392bb13ffebSMel Gorman * for PageLRU while target looks for PageBuddy. When the scanner
393e380bebeSMel Gorman * is found, both PageBuddy and PageLRU are checked as the pageblock
394e380bebeSMel Gorman * is suitable as both source and target.
395e380bebeSMel Gorman */
396e380bebeSMel Gorman for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
397e380bebeSMel Gorman free_pfn -= pageblock_nr_pages) {
398e380bebeSMel Gorman cond_resched();
399e380bebeSMel Gorman
400e380bebeSMel Gorman /* Update the migrate PFN */
401e380bebeSMel Gorman if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
402e380bebeSMel Gorman migrate_pfn < reset_migrate) {
403e380bebeSMel Gorman source_set = true;
404e380bebeSMel Gorman reset_migrate = migrate_pfn;
405ee0913c4SKefeng Wang zone->compact_init_migrate_pfn = reset_migrate;
406e380bebeSMel Gorman zone->compact_cached_migrate_pfn[0] = reset_migrate;
407e380bebeSMel Gorman zone->compact_cached_migrate_pfn[1] = reset_migrate;
408e380bebeSMel Gorman }
409e380bebeSMel Gorman
410e380bebeSMel Gorman /* Update the free PFN */
411e380bebeSMel Gorman if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
412e380bebeSMel Gorman free_pfn > reset_free) {
413e380bebeSMel Gorman free_set = true;
414e380bebeSMel Gorman reset_free = free_pfn;
415e380bebeSMel Gorman zone->compact_init_free_pfn = reset_free;
416e380bebeSMel Gorman zone->compact_cached_free_pfn = reset_free;
417e380bebeSMel Gorman }
418e380bebeSMel Gorman }
419e380bebeSMel Gorman
420e380bebeSMel Gorman /* Leave no distance if no suitable block was reset */
421e380bebeSMel Gorman if (reset_migrate >= reset_free) {
422e380bebeSMel Gorman zone->compact_cached_migrate_pfn[0] = migrate_pfn;
423e380bebeSMel Gorman zone->compact_cached_migrate_pfn[1] = migrate_pfn;
424e380bebeSMel Gorman zone->compact_cached_free_pfn = free_pfn;
425e380bebeSMel Gorman }
426e380bebeSMel Gorman }
427e380bebeSMel Gorman
reset_isolation_suitable(pg_data_t * pgdat)428e380bebeSMel Gorman void reset_isolation_suitable(pg_data_t *pgdat)
429e380bebeSMel Gorman {
430e380bebeSMel Gorman int zoneid;
431e380bebeSMel Gorman
432e380bebeSMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
433bb13ffebSMel Gorman struct zone *zone = &pgdat->node_zones[zoneid];
43462997027SMel Gorman if (!populated_zone(zone))
435bb13ffebSMel Gorman continue;
436c89511abSMel Gorman
437d097a6f6SMel Gorman /* Only flush if a full compaction finished recently */
438bb13ffebSMel Gorman if (zone->compact_blockskip_flush)
439c89511abSMel Gorman __reset_isolation_suitable(zone);
4406815bf3fSJoonsoo Kim }
4412583d671SVlastimil Babka }
4426815bf3fSJoonsoo Kim
4436815bf3fSJoonsoo Kim /*
444bb13ffebSMel Gorman * Sets the pageblock skip bit if it was clear. Note that this is a hint as
445bb13ffebSMel Gorman * locks are not required for read/writers. Returns true if it was already set.
446bb13ffebSMel Gorman */
test_and_set_skip(struct compact_control * cc,struct page * page)447bb13ffebSMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page)
448c89511abSMel Gorman {
44935979ef3SDavid Rientjes bool skip;
45035979ef3SDavid Rientjes
451c89511abSMel Gorman /* Do not update if skip hint is being ignored */
452c89511abSMel Gorman if (cc->ignore_skip_hint)
453bb13ffebSMel Gorman return false;
454bb13ffebSMel Gorman
455bb13ffebSMel Gorman skip = get_pageblock_skip(page);
456bb13ffebSMel Gorman if (!skip && !cc->no_set_skip_hint)
457bb13ffebSMel Gorman set_pageblock_skip(page);
458bb13ffebSMel Gorman
459bb13ffebSMel Gorman return skip;
460b527cfe5SVlastimil Babka }
46121dc7e02SDavid Rientjes
update_cached_migrate(struct compact_control * cc,unsigned long pfn)46221dc7e02SDavid Rientjes static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
46321dc7e02SDavid Rientjes {
46421dc7e02SDavid Rientjes struct zone *zone = cc->zone;
46521dc7e02SDavid Rientjes
466d097a6f6SMel Gorman /* Set for isolation rather than compaction */
467bb13ffebSMel Gorman if (cc->no_set_skip_hint)
468bb13ffebSMel Gorman return;
469e380bebeSMel Gorman
470e380bebeSMel Gorman pfn = pageblock_end_pfn(pfn);
471e380bebeSMel Gorman
472e380bebeSMel Gorman /* Update where async and sync compaction should restart */
473e380bebeSMel Gorman if (pfn > zone->compact_cached_migrate_pfn[0])
474e380bebeSMel Gorman zone->compact_cached_migrate_pfn[0] = pfn;
475e380bebeSMel Gorman if (cc->mode != MIGRATE_ASYNC &&
476e380bebeSMel Gorman pfn > zone->compact_cached_migrate_pfn[1])
477e380bebeSMel Gorman zone->compact_cached_migrate_pfn[1] = pfn;
478e380bebeSMel Gorman }
479bb13ffebSMel Gorman
480bb13ffebSMel Gorman /*
4811f9efdefSVlastimil Babka * If no pages were isolated then mark this pageblock to be skipped in the
4828b44d279SVlastimil Babka * future. The information is later cleared by __reset_isolation_suitable().
483cb2dcaf0SMel Gorman */
update_pageblock_skip(struct compact_control * cc,struct page * page,unsigned long pfn)484cb2dcaf0SMel Gorman static void update_pageblock_skip(struct compact_control *cc,
485cb2dcaf0SMel Gorman struct page *page, unsigned long pfn)
486cb2dcaf0SMel Gorman {
4878b44d279SVlastimil Babka struct zone *zone = cc->zone;
488cb2dcaf0SMel Gorman
4891f9efdefSVlastimil Babka if (cc->no_set_skip_hint)
490cb2dcaf0SMel Gorman return;
4918b44d279SVlastimil Babka
49277337edeSJules Irenge set_pageblock_skip(page);
4938b44d279SVlastimil Babka
494cb2dcaf0SMel Gorman if (pfn < zone->compact_cached_free_pfn)
495cb2dcaf0SMel Gorman zone->compact_cached_free_pfn = pfn;
496cb2dcaf0SMel Gorman }
497cb2dcaf0SMel Gorman #else
isolation_suitable(struct compact_control * cc,struct page * page)498cb2dcaf0SMel Gorman static inline bool isolation_suitable(struct compact_control *cc,
499c3486f53SVlastimil Babka struct page *page)
5008b44d279SVlastimil Babka {
5011f9efdefSVlastimil Babka return true;
502cb2dcaf0SMel Gorman }
5038b44d279SVlastimil Babka
pageblock_skip_persistent(struct page * page)5042a1402aaSMel Gorman static inline bool pageblock_skip_persistent(struct page *page)
5052a1402aaSMel Gorman {
50685aa125fSMichal Nazarewicz return false;
507c67fe375SMel Gorman }
5088b44d279SVlastimil Babka
update_pageblock_skip(struct compact_control * cc,struct page * page,unsigned long pfn)5098b44d279SVlastimil Babka static inline void update_pageblock_skip(struct compact_control *cc,
5108b44d279SVlastimil Babka struct page *page, unsigned long pfn)
511d56c1584SMiaohe Lin {
5128b44d279SVlastimil Babka }
5138b44d279SVlastimil Babka
update_cached_migrate(struct compact_control * cc,unsigned long pfn)514c67fe375SMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
515d56c1584SMiaohe Lin {
516d56c1584SMiaohe Lin }
517c67fe375SMel Gorman
test_and_set_skip(struct compact_control * cc,struct page * page)5188b44d279SVlastimil Babka static bool test_and_set_skip(struct compact_control *cc, struct page *page)
5198b44d279SVlastimil Babka {
520c67fe375SMel Gorman return false;
5218b44d279SVlastimil Babka }
5228b44d279SVlastimil Babka #endif /* CONFIG_COMPACTION */
5238b44d279SVlastimil Babka
524c67fe375SMel Gorman /*
525c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially
5268b44d279SVlastimil Babka * very heavily contended. For async compaction, trylock and record if the
527c3486f53SVlastimil Babka * lock is contended. The lock will still be acquired but compaction will
5288b44d279SVlastimil Babka * abort when the current block is finished regardless of success rate.
5298b44d279SVlastimil Babka * Sync compaction acquires the lock.
5308b44d279SVlastimil Babka *
531cf66f070SMel Gorman * Always returns true which makes it easier to track lock state in callers.
532be976572SVlastimil Babka */
compact_lock_irqsave(spinlock_t * lock,unsigned long * flags,struct compact_control * cc)533be976572SVlastimil Babka static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
534be976572SVlastimil Babka struct compact_control *cc)
535be976572SVlastimil Babka __acquires(lock)
536c67fe375SMel Gorman {
5379e4be470SJerome Marchand /* Track if the lock is contended in async mode */
5389e4be470SJerome Marchand if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
5399e4be470SJerome Marchand if (spin_trylock_irqsave(lock, *flags))
54085aa125fSMichal Nazarewicz return true;
541f40d1e42SMel Gorman
542e14c720eSVlastimil Babka cc->contended = true;
54385aa125fSMichal Nazarewicz }
54485aa125fSMichal Nazarewicz
5454fca9730SMel Gorman spin_lock_irqsave(lock, *flags);
54685aa125fSMichal Nazarewicz return true;
547748446bbSMel Gorman }
548b7aba698SMel Gorman
549d097a6f6SMel Gorman /*
550b8b2d825SXiubo Li * Compaction requires the taking of some coarse locks that are potentially
551f40d1e42SMel Gorman * very heavily contended. The lock should be periodically unlocked to avoid
552e14c720eSVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on
55366c64223SJoonsoo Kim * the lock. It might also be that allowing the IRQs will result in
554748446bbSMel Gorman * need_resched() becoming true. If scheduling is needed, compaction schedules.
5554fca9730SMel Gorman * Either compaction type will also abort if a fatal signal is pending.
5564fca9730SMel Gorman * In either case if the lock was locked, it is dropped and not regained.
5574fca9730SMel Gorman *
5584fca9730SMel Gorman * Returns true if compaction should abort due to fatal signal pending.
559748446bbSMel Gorman * Returns false when compaction can continue.
560748446bbSMel Gorman */
compact_unlock_should_abort(spinlock_t * lock,unsigned long flags,bool * locked,struct compact_control * cc)561f40d1e42SMel Gorman static bool compact_unlock_should_abort(spinlock_t *lock,
5624fca9730SMel Gorman unsigned long flags, bool *locked, struct compact_control *cc)
56366c64223SJoonsoo Kim {
564748446bbSMel Gorman if (*locked) {
565748446bbSMel Gorman spin_unlock_irqrestore(lock, flags);
5668b44d279SVlastimil Babka *locked = false;
5678b44d279SVlastimil Babka }
5688b44d279SVlastimil Babka
569d56c1584SMiaohe Lin if (fatal_signal_pending(current)) {
5708b44d279SVlastimil Babka cc->contended = true;
571c036ddffSMiaohe Lin return true;
5728b44d279SVlastimil Babka }
5738b44d279SVlastimil Babka
5748b44d279SVlastimil Babka cond_resched();
5758b44d279SVlastimil Babka
576b7aba698SMel Gorman return false;
5772af120bcSLaura Abbott }
5789fcd6d2eSVlastimil Babka
5799fcd6d2eSVlastimil Babka /*
5809fcd6d2eSVlastimil Babka * Isolate free pages onto a private freelist. If @strict is true, will abort
5819fcd6d2eSVlastimil Babka * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
5829fcd6d2eSVlastimil Babka * (even though it may still end up isolating some pages).
5839fcd6d2eSVlastimil Babka */
isolate_freepages_block(struct compact_control * cc,unsigned long * start_pfn,unsigned long end_pfn,struct list_head * freelist,unsigned int stride,bool strict)5849fcd6d2eSVlastimil Babka static unsigned long isolate_freepages_block(struct compact_control *cc,
58521dc7e02SDavid Rientjes unsigned long *start_pfn,
5869fcd6d2eSVlastimil Babka unsigned long end_pfn,
587d3c85badSVlastimil Babka struct list_head *freelist,
58821dc7e02SDavid Rientjes unsigned int stride,
58921dc7e02SDavid Rientjes bool strict)
5909fcd6d2eSVlastimil Babka {
5919fcd6d2eSVlastimil Babka int nr_scanned = 0, total_isolated = 0;
5929fcd6d2eSVlastimil Babka struct page *page;
5939fcd6d2eSVlastimil Babka unsigned long flags = 0;
594f40d1e42SMel Gorman bool locked = false;
5952af120bcSLaura Abbott unsigned long blockpfn = *start_pfn;
596f40d1e42SMel Gorman unsigned int order;
59785f73e6dSMiaohe Lin
59869b7189fSVlastimil Babka /* Strict mode is for isolation, speed is secondary */
599cb2dcaf0SMel Gorman if (strict)
6008b44d279SVlastimil Babka stride = 1;
601f40d1e42SMel Gorman
602f40d1e42SMel Gorman page = pfn_to_page(blockpfn);
603f40d1e42SMel Gorman
6042af120bcSLaura Abbott /* Isolate free pages. */
60569b7189fSVlastimil Babka for (; blockpfn < end_pfn; blockpfn += stride, page += stride) {
606748446bbSMel Gorman int isolated;
60766c64223SJoonsoo Kim
608ab130f91SMatthew Wilcox (Oracle) /*
60966c64223SJoonsoo Kim * Periodically drop the lock (if held) regardless of its
610a4f04f2cSDavid Rientjes * contention, to give chance to IRQs. Abort if fatal signal
611a4f04f2cSDavid Rientjes * pending.
61266c64223SJoonsoo Kim */
613a4f04f2cSDavid Rientjes if (!(blockpfn % COMPACT_CLUSTER_MAX)
614b717d6b9SWilliam Lam && compact_unlock_should_abort(&cc->zone->lock, flags,
615748446bbSMel Gorman &locked, cc))
616a4f04f2cSDavid Rientjes break;
61766c64223SJoonsoo Kim
61866c64223SJoonsoo Kim nr_scanned++;
619a4f04f2cSDavid Rientjes
620932ff6bbSJoonsoo Kim /*
621932ff6bbSJoonsoo Kim * For compound pages such as THP and hugetlbfs, we can save
622932ff6bbSJoonsoo Kim * potentially a lot of iterations if we skip them at once.
623a4f04f2cSDavid Rientjes * The check is racy, but we can consider only valid values
624748446bbSMel Gorman * and the only danger is skipping too much.
625748446bbSMel Gorman */
6262af120bcSLaura Abbott if (PageCompound(page)) {
6272af120bcSLaura Abbott const unsigned int order = compound_order(page);
6282af120bcSLaura Abbott
6292af120bcSLaura Abbott if (likely(order <= MAX_ORDER)) {
6302af120bcSLaura Abbott blockpfn += (1UL << order) - 1;
6312af120bcSLaura Abbott page += (1UL << order) - 1;
6322af120bcSLaura Abbott nr_scanned += (1UL << order) - 1;
6332af120bcSLaura Abbott }
634748446bbSMel Gorman goto isolate_fail;
635748446bbSMel Gorman }
636a4f04f2cSDavid Rientjes
637a4f04f2cSDavid Rientjes if (!PageBuddy(page))
638a4f04f2cSDavid Rientjes goto isolate_fail;
6399fcd6d2eSVlastimil Babka
6409fcd6d2eSVlastimil Babka /* If we already hold the lock, we can skip some rechecking. */
6419fcd6d2eSVlastimil Babka if (!locked) {
6429fcd6d2eSVlastimil Babka locked = compact_lock_irqsave(&cc->zone->lock,
6439fcd6d2eSVlastimil Babka &flags, cc);
6449fcd6d2eSVlastimil Babka
6459fcd6d2eSVlastimil Babka /* Recheck this is a buddy page under lock */
646e34d85f0SJoonsoo Kim if (!PageBuddy(page))
647e34d85f0SJoonsoo Kim goto isolate_fail;
648e34d85f0SJoonsoo Kim }
649e14c720eSVlastimil Babka
650e14c720eSVlastimil Babka /* Found a free page, will break it into order-0 pages */
651e14c720eSVlastimil Babka order = buddy_order(page);
652f40d1e42SMel Gorman isolated = __isolate_free_page(page, order);
653f40d1e42SMel Gorman if (!isolated)
654f40d1e42SMel Gorman break;
655f40d1e42SMel Gorman set_page_private(page, order);
656f40d1e42SMel Gorman
6572af120bcSLaura Abbott nr_scanned += isolated - 1;
658f40d1e42SMel Gorman total_isolated += isolated;
659f40d1e42SMel Gorman cc->nr_freepages += isolated;
6607f354a54SDavid Rientjes list_add_tail(&page->lru, freelist);
661397487dbSMel Gorman
662010fc29aSMinchan Kim if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
663748446bbSMel Gorman blockpfn += isolated;
664748446bbSMel Gorman break;
665748446bbSMel Gorman }
66685aa125fSMichal Nazarewicz /* Advance to the end of split page */
66785aa125fSMichal Nazarewicz blockpfn += isolated - 1;
668e8b098fcSMike Rapoport page += isolated - 1;
66985aa125fSMichal Nazarewicz continue;
67085aa125fSMichal Nazarewicz
67185aa125fSMichal Nazarewicz isolate_fail:
67285aa125fSMichal Nazarewicz if (strict)
67385aa125fSMichal Nazarewicz break;
67485aa125fSMichal Nazarewicz
67585aa125fSMichal Nazarewicz }
67685aa125fSMichal Nazarewicz
67785aa125fSMichal Nazarewicz if (locked)
67885aa125fSMichal Nazarewicz spin_unlock_irqrestore(&cc->zone->lock, flags);
67985aa125fSMichal Nazarewicz
680ff9543fdSMichal Nazarewicz /*
681bb13ffebSMel Gorman * There is a tiny chance that we have read bogus compound_order(),
682bb13ffebSMel Gorman * so be careful to not go outside of the pageblock.
68385aa125fSMichal Nazarewicz */
684e1409c32SJoonsoo Kim if (unlikely(blockpfn > end_pfn))
68585aa125fSMichal Nazarewicz blockpfn = end_pfn;
68685aa125fSMichal Nazarewicz
6877d49d886SVlastimil Babka trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
68806b6640aSVlastimil Babka nr_scanned, total_isolated);
689e1409c32SJoonsoo Kim
690e1409c32SJoonsoo Kim /* Record how far we have got within the block */
69106b6640aSVlastimil Babka *start_pfn = blockpfn;
6927d49d886SVlastimil Babka
6937d49d886SVlastimil Babka /*
694e1409c32SJoonsoo Kim * If strict isolation is requested by CMA then check that all the
6957d49d886SVlastimil Babka * pages requested were isolated. If there were any failures, 0 is
696e14c720eSVlastimil Babka * returned and CMA will fail.
697e14c720eSVlastimil Babka */
6987d49d886SVlastimil Babka if (strict && blockpfn < end_pfn)
69985aa125fSMichal Nazarewicz total_isolated = 0;
70085aa125fSMichal Nazarewicz
70158420016SJoonsoo Kim cc->total_free_scanned += nr_scanned;
70258420016SJoonsoo Kim if (total_isolated)
70358420016SJoonsoo Kim count_compact_events(COMPACTISOLATED, total_isolated);
70458420016SJoonsoo Kim return total_isolated;
70558420016SJoonsoo Kim }
70658420016SJoonsoo Kim
70706b6640aSVlastimil Babka /**
70806b6640aSVlastimil Babka * isolate_freepages_range() - isolate free pages.
70958420016SJoonsoo Kim * @cc: Compaction control structure.
71058420016SJoonsoo Kim * @start_pfn: The first PFN to start isolating.
71158420016SJoonsoo Kim * @end_pfn: The one-past-last PFN.
712e1409c32SJoonsoo Kim *
713e1409c32SJoonsoo Kim * Non-free pages, invalid PFNs, or zone boundaries within the
7147d49d886SVlastimil Babka * [start_pfn, end_pfn) range are considered errors, cause function to
7157d49d886SVlastimil Babka * undo its actions and return zero.
716e14c720eSVlastimil Babka *
7174fca9730SMel Gorman * Otherwise, function returns one-past-the-last PFN of isolated page
71885aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of
71985aa125fSMichal Nazarewicz * a free page).
72085aa125fSMichal Nazarewicz */
72185aa125fSMichal Nazarewicz unsigned long
isolate_freepages_range(struct compact_control * cc,unsigned long start_pfn,unsigned long end_pfn)72285aa125fSMichal Nazarewicz isolate_freepages_range(struct compact_control *cc,
72385aa125fSMichal Nazarewicz unsigned long start_pfn, unsigned long end_pfn)
72485aa125fSMichal Nazarewicz {
72585aa125fSMichal Nazarewicz unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
72685aa125fSMichal Nazarewicz LIST_HEAD(freelist);
72785aa125fSMichal Nazarewicz
72885aa125fSMichal Nazarewicz pfn = start_pfn;
72985aa125fSMichal Nazarewicz block_start_pfn = pageblock_start_pfn(pfn);
73085aa125fSMichal Nazarewicz if (block_start_pfn < cc->zone->zone_start_pfn)
73185aa125fSMichal Nazarewicz block_start_pfn = cc->zone->zone_start_pfn;
73285aa125fSMichal Nazarewicz block_end_pfn = pageblock_end_pfn(pfn);
73385aa125fSMichal Nazarewicz
73466c64223SJoonsoo Kim for (; pfn < end_pfn; pfn += isolated,
7354469ab98SMel Gorman block_start_pfn = block_end_pfn,
73685aa125fSMichal Nazarewicz block_end_pfn += pageblock_nr_pages) {
73785aa125fSMichal Nazarewicz /* Protect pfn from changing by isolate_freepages_block */
73885aa125fSMichal Nazarewicz unsigned long isolate_start_pfn = pfn;
73985aa125fSMichal Nazarewicz
74085aa125fSMichal Nazarewicz /*
74185aa125fSMichal Nazarewicz * pfn could pass the block_end_pfn if isolated freepage
74285aa125fSMichal Nazarewicz * is more than pageblock order. In this case, we adjust
74385aa125fSMichal Nazarewicz * scanning range to right one.
74485aa125fSMichal Nazarewicz */
74585aa125fSMichal Nazarewicz if (pfn >= block_end_pfn) {
74685aa125fSMichal Nazarewicz block_start_pfn = pageblock_start_pfn(pfn);
747748446bbSMel Gorman block_end_pfn = pageblock_end_pfn(pfn);
7485f438eeeSAndrey Ryabinin }
749748446bbSMel Gorman
750d818fca1SMel Gorman block_end_pfn = min(block_end_pfn, end_pfn);
751d818fca1SMel Gorman
752bc693045SMinchan Kim if (!pageblock_pfn_to_page(block_start_pfn,
753748446bbSMel Gorman block_end_pfn, cc->zone))
7545f438eeeSAndrey Ryabinin break;
7555f438eeeSAndrey Ryabinin
7565f438eeeSAndrey Ryabinin isolated = isolate_freepages_block(cc, &isolate_start_pfn,
7575f438eeeSAndrey Ryabinin block_end_pfn, &freelist, 0, true);
7585f438eeeSAndrey Ryabinin
7595f438eeeSAndrey Ryabinin /*
760748446bbSMel Gorman * In strict mode, isolate_freepages_block() returns 0 if
761d818fca1SMel Gorman * there are any holes in the block (ie. invalid PFNs or
762d818fca1SMel Gorman * non-free pages).
763d818fca1SMel Gorman */
764d818fca1SMel Gorman if (!isolated)
765d818fca1SMel Gorman break;
766748446bbSMel Gorman
767748446bbSMel Gorman /*
7682fe86e00SMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) *
769edc2ca61SVlastimil Babka * pageblock_nr_pages for some non-negative n. (Max order
770edc2ca61SVlastimil Babka * page may span two pageblocks).
7712fe86e00SMichal Nazarewicz */
772edc2ca61SVlastimil Babka }
773edc2ca61SVlastimil Babka
77489f6c88aSHugh Dickins /* __isolate_free_page() does not map the pages */
7752fe86e00SMichal Nazarewicz split_map_pages(&freelist);
7762fe86e00SMichal Nazarewicz
777edc2ca61SVlastimil Babka if (pfn < end_pfn) {
778c2ad7a1fSOscar Salvador /* Loop terminated early, cleanup. */
779369fa227SOscar Salvador release_freepages(&freelist);
780c2ad7a1fSOscar Salvador return 0;
7812fe86e00SMichal Nazarewicz }
782edc2ca61SVlastimil Babka
783c2ad7a1fSOscar Salvador /* We don't use freelists for anything. */
784748446bbSMel Gorman return pfn;
785c2ad7a1fSOscar Salvador }
786edc2ca61SVlastimil Babka
78789f6c88aSHugh Dickins /* Similar to reclaim, but different enough that they don't share logic */
too_many_isolated(struct compact_control * cc)788748446bbSMel Gorman static bool too_many_isolated(struct compact_control *cc)
7895f438eeeSAndrey Ryabinin {
790b7aba698SMel Gorman pg_data_t *pgdat = cc->zone->zone_pgdat;
791fa9add64SHugh Dickins bool too_many;
792b8b2d825SXiubo Li
7936168d0daSAlex Shi unsigned long active, inactive, isolated;
794bb13ffebSMel Gorman
79589f6c88aSHugh Dickins inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
796e34d85f0SJoonsoo Kim node_page_state(pgdat, NR_INACTIVE_ANON);
797fdd048e1SVlastimil Babka active = node_page_state(pgdat, NR_ACTIVE_FILE) +
798fdd048e1SVlastimil Babka node_page_state(pgdat, NR_ACTIVE_ANON);
799e380bebeSMel Gorman isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
800c2ad7a1fSOscar Salvador node_page_state(pgdat, NR_ISOLATED_ANON);
801c2ad7a1fSOscar Salvador
802c2ad7a1fSOscar Salvador /*
803748446bbSMel Gorman * Allow GFP_NOFS to isolate past the limit set for regular
804748446bbSMel Gorman * compaction runs. This prevents an ABBA deadlock when other
805748446bbSMel Gorman * compactors have already isolated to the limit, but are
806748446bbSMel Gorman * blocked on filesystem locks held by the GFP_NOFS thread.
807748446bbSMel Gorman */
808748446bbSMel Gorman if (cc->gfp_mask & __GFP_FS) {
8095f438eeeSAndrey Ryabinin inactive >>= 3;
810d20bdd57SZi Yan active >>= 3;
811d20bdd57SZi Yan }
812c2ad7a1fSOscar Salvador
813d20bdd57SZi Yan too_many = isolated > (inactive + active) / 2;
814f9e35b3bSMel Gorman if (!too_many)
815e0b9daebSDavid Rientjes wake_throttle_isolated(pgdat);
816c2ad7a1fSOscar Salvador
817f9e35b3bSMel Gorman return too_many;
818c3f4a9a2SMel Gorman }
819748446bbSMel Gorman
820748446bbSMel Gorman /**
821c2ad7a1fSOscar Salvador * isolate_migratepages_block() - isolate all migrate-able pages within
822748446bbSMel Gorman * a single pageblock
823748446bbSMel Gorman * @cc: Compaction control structure.
824cf66f070SMel Gorman * @low_pfn: The first PFN to isolate
825aeef4b83SDavid Rientjes * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
826fdd048e1SVlastimil Babka * @mode: Isolation mode to be used.
827fdd048e1SVlastimil Babka *
828fdd048e1SVlastimil Babka * Isolate all pages that can be migrated from the range specified by
829fdd048e1SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock.
830fdd048e1SVlastimil Babka * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion,
831748446bbSMel Gorman * -ENOMEM in case we could not allocate a page, or 0.
832748446bbSMel Gorman * cc->migrate_pfn will contain the next pfn to scan.
83329c0dde8SVlastimil Babka *
834fdd048e1SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty),
835fdd048e1SVlastimil Babka * and cc->nr_migratepages is updated accordingly.
836fdd048e1SVlastimil Babka */
837fdd048e1SVlastimil Babka static int
isolate_migratepages_block(struct compact_control * cc,unsigned long low_pfn,unsigned long end_pfn,isolate_mode_t mode)838fdd048e1SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
839fdd048e1SVlastimil Babka unsigned long end_pfn, isolate_mode_t mode)
840fdd048e1SVlastimil Babka {
841fdd048e1SVlastimil Babka pg_data_t *pgdat = cc->zone->zone_pgdat;
842fdd048e1SVlastimil Babka unsigned long nr_scanned = 0, nr_isolated = 0;
843fdd048e1SVlastimil Babka struct lruvec *lruvec;
844fdd048e1SVlastimil Babka unsigned long flags = 0;
845fdd048e1SVlastimil Babka struct lruvec *locked = NULL;
846fdd048e1SVlastimil Babka struct folio *folio = NULL;
847fdd048e1SVlastimil Babka struct page *page = NULL, *valid_page = NULL;
848fdd048e1SVlastimil Babka struct address_space *mapping;
849fdd048e1SVlastimil Babka unsigned long start_pfn = low_pfn;
850fdd048e1SVlastimil Babka bool skip_on_failure = false;
851fdd048e1SVlastimil Babka unsigned long next_skip_pfn = 0;
852fdd048e1SVlastimil Babka bool skip_updated = false;
853fdd048e1SVlastimil Babka int ret = 0;
854fdd048e1SVlastimil Babka
855fdd048e1SVlastimil Babka cc->migrate_pfn = low_pfn;
8568b44d279SVlastimil Babka
8578b44d279SVlastimil Babka /*
858670105a2SMel Gorman * Ensure that there are not too many pages isolated from the LRU
859670105a2SMel Gorman * list by either parallel reclaimers or compaction. If there are,
8608b44d279SVlastimil Babka * delay for some time until fewer pages are isolated
861c036ddffSMiaohe Lin */
8626168d0daSAlex Shi while (unlikely(too_many_isolated(cc))) {
8636168d0daSAlex Shi /* stop isolation if there are still pages not migrated */
8646168d0daSAlex Shi if (cc->nr_migratepages)
8656168d0daSAlex Shi return -EAGAIN;
8666168d0daSAlex Shi
8676168d0daSAlex Shi /* async migration should just abort */
8686168d0daSAlex Shi if (cc->mode == MIGRATE_ASYNC)
869c2ad7a1fSOscar Salvador return -EAGAIN;
8706168d0daSAlex Shi
871670105a2SMel Gorman reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED);
872670105a2SMel Gorman
873b2eef8c0SAndrea Arcangeli if (fatal_signal_pending(current))
8746168d0daSAlex Shi return -EINTR;
8756168d0daSAlex Shi }
8766168d0daSAlex Shi
877b7aba698SMel Gorman cond_resched();
878748446bbSMel Gorman
879748446bbSMel Gorman if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
880dc908600SMel Gorman skip_on_failure = true;
881e380bebeSMel Gorman next_skip_pfn = block_end_pfn(low_pfn, cc->order);
882e380bebeSMel Gorman }
883e380bebeSMel Gorman
884e380bebeSMel Gorman /* Time to isolate some pages for migration */
885e380bebeSMel Gorman for (; low_pfn < end_pfn; low_pfn++) {
886e380bebeSMel Gorman
887ee0913c4SKefeng Wang if (skip_on_failure && low_pfn >= next_skip_pfn) {
8884af12d04SMiaohe Lin /*
889e380bebeSMel Gorman * We have isolated all migration candidates in the
8909df41314SAlex Shi * previous order-aligned block, and did not skip it due
891e380bebeSMel Gorman * to failure. We should migrate the pages now and
892e380bebeSMel Gorman * hopefully succeed compaction.
893bb13ffebSMel Gorman */
894e380bebeSMel Gorman if (nr_isolated)
895bb13ffebSMel Gorman break;
896369fa227SOscar Salvador
897ae37c7ffSOscar Salvador /*
898369fa227SOscar Salvador * We failed to isolate in the previous order-aligned
899369fa227SOscar Salvador * block. Set the new boundary to the end of the
900369fa227SOscar Salvador * current block. Note we can't simply increase
901369fa227SOscar Salvador * next_skip_pfn by 1 << order, as low_pfn might have
902369fa227SOscar Salvador * been incremented by a higher number due to skipping
903369fa227SOscar Salvador * a compound or a high-order buddy page in the
904369fa227SOscar Salvador * previous loop iteration.
905369fa227SOscar Salvador */
906369fa227SOscar Salvador next_skip_pfn = block_end_pfn(low_pfn, cc->order);
90766fe1cf7SMiaohe Lin }
908369fa227SOscar Salvador
909369fa227SOscar Salvador /*
910369fa227SOscar Salvador * Periodically drop the lock (if held) regardless of its
911ae37c7ffSOscar Salvador * contention, to give chance to IRQs. Abort completely if
912ae37c7ffSOscar Salvador * a fatal signal is pending.
913ae37c7ffSOscar Salvador */
914ae37c7ffSOscar Salvador if (!(low_pfn % COMPACT_CLUSTER_MAX)) {
915ae37c7ffSOscar Salvador if (locked) {
916ae37c7ffSOscar Salvador unlock_page_lruvec_irqrestore(locked, flags);
917ae37c7ffSOscar Salvador locked = NULL;
918ae37c7ffSOscar Salvador }
919ae37c7ffSOscar Salvador
920369fa227SOscar Salvador if (fatal_signal_pending(current)) {
921369fa227SOscar Salvador cc->contended = true;
922369fa227SOscar Salvador ret = -EINTR;
923369fa227SOscar Salvador
924369fa227SOscar Salvador goto fatal_pending;
925369fa227SOscar Salvador }
926369fa227SOscar Salvador
927369fa227SOscar Salvador cond_resched();
928c122b208SJoonsoo Kim }
92999c0fd5eSVlastimil Babka
93099c0fd5eSVlastimil Babka nr_scanned++;
93199c0fd5eSVlastimil Babka
93299c0fd5eSVlastimil Babka page = pfn_to_page(low_pfn);
9336c14466cSMel Gorman
93499c0fd5eSVlastimil Babka /*
935ab130f91SMatthew Wilcox (Oracle) * Check if the pageblock has already been marked skipped.
93699c0fd5eSVlastimil Babka * Only the first PFN is checked as the caller isolates
93799c0fd5eSVlastimil Babka * COMPACT_CLUSTER_MAX at a time so the second call must
93899c0fd5eSVlastimil Babka * not falsely conclude that the block should be skipped.
93999c0fd5eSVlastimil Babka */
94099c0fd5eSVlastimil Babka if (!valid_page && (pageblock_aligned(low_pfn) ||
94199c0fd5eSVlastimil Babka low_pfn == cc->zone->zone_start_pfn)) {
94299c0fd5eSVlastimil Babka if (!isolation_suitable(cc, page)) {
94399c0fd5eSVlastimil Babka low_pfn = end_pfn;
944748446bbSMel Gorman folio = NULL;
94599c0fd5eSVlastimil Babka goto isolate_abort;
946748446bbSMel Gorman }
9479927af74SMel Gorman valid_page = page;
94829c0dde8SVlastimil Babka }
9491da2f328SRik van Riel
9501da2f328SRik van Riel if (PageHuge(page) && cc->alloc_contig) {
9511da2f328SRik van Riel if (locked) {
9521da2f328SRik van Riel unlock_page_lruvec_irqrestore(locked, flags);
9531da2f328SRik van Riel locked = NULL;
954bc835011SAndrea Arcangeli }
9551da2f328SRik van Riel
95621dc7e02SDavid Rientjes ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
95729c0dde8SVlastimil Babka
958d3c85badSVlastimil Babka /*
95921dc7e02SDavid Rientjes * Fail isolation in case isolate_or_dissolve_huge_page()
960fdd048e1SVlastimil Babka * reports an error. In case of -ENOMEM, abort right away.
9612a1402aaSMel Gorman */
9622a1402aaSMel Gorman if (ret < 0) {
963bda807d4SMinchan Kim /* Do not report -EBUSY down the chain */
964bda807d4SMinchan Kim if (ret == -EBUSY)
965bda807d4SMinchan Kim ret = 0;
966bda807d4SMinchan Kim low_pfn += compound_nr(page) - 1;
967bda807d4SMinchan Kim nr_scanned += compound_nr(page) - 1;
968bda807d4SMinchan Kim goto isolate_fail;
969bda807d4SMinchan Kim }
970bda807d4SMinchan Kim
971bda807d4SMinchan Kim if (PageHuge(page)) {
972bda807d4SMinchan Kim /*
973bda807d4SMinchan Kim * Hugepage was successfully isolated and placed
974bda807d4SMinchan Kim * on the cc->migratepages list.
975bda807d4SMinchan Kim */
9766168d0daSAlex Shi folio = page_folio(page);
9776168d0daSAlex Shi low_pfn += folio_nr_pages(folio) - 1;
978bda807d4SMinchan Kim goto isolate_success_no_list;
979bda807d4SMinchan Kim }
98089f6c88aSHugh Dickins
981bda807d4SMinchan Kim /*
982bda807d4SMinchan Kim * Ok, the hugepage was dissolved. Now these pages are
983bda807d4SMinchan Kim * Buddy and cannot be re-allocated because they are
984fdd048e1SVlastimil Babka * isolated. Fall-through as the check below handles
985bda807d4SMinchan Kim * Buddy pages.
98629c0dde8SVlastimil Babka */
987119d6d59SDavid Rientjes }
9889df41314SAlex Shi
9899df41314SAlex Shi /*
9909df41314SAlex Shi * Skip if free. We read page order here without zone lock
9919df41314SAlex Shi * which is generally unsafe, but the race window is small and
9929df41314SAlex Shi * the worst thing that can happen is that we skip some
9939df41314SAlex Shi * potential isolation targets.
9949df41314SAlex Shi */
995829ae0f8SGavin Shan if (PageBuddy(page)) {
996829ae0f8SGavin Shan unsigned long freepage_order = buddy_order_unsafe(page);
997829ae0f8SGavin Shan
998829ae0f8SGavin Shan /*
999829ae0f8SGavin Shan * Without lock, we cannot be sure that what we got is
1000829ae0f8SGavin Shan * a valid page order. Consider only values in the
1001829ae0f8SGavin Shan * valid order range to prevent low_pfn overflow.
1002829ae0f8SGavin Shan */
1003829ae0f8SGavin Shan if (freepage_order > 0 && freepage_order <= MAX_ORDER) {
1004829ae0f8SGavin Shan low_pfn += (1UL << freepage_order) - 1;
1005829ae0f8SGavin Shan nr_scanned += (1UL << freepage_order) - 1;
1006829ae0f8SGavin Shan }
1007829ae0f8SGavin Shan continue;
1008829ae0f8SGavin Shan }
1009829ae0f8SGavin Shan
1010829ae0f8SGavin Shan /*
101189f6c88aSHugh Dickins * Regardless of being on LRU, compound pages such as THP and
101289f6c88aSHugh Dickins * hugetlbfs are not to be compacted unless we are attempting
10139df41314SAlex Shi * an allocation much larger than the huge page size (eg CMA).
10149df41314SAlex Shi * We can potentially save a lot of iterations if we skip them
101589f6c88aSHugh Dickins * at once. The check is racy, but we can consider only valid
101689f6c88aSHugh Dickins * values and the only danger is skipping too much.
101789f6c88aSHugh Dickins */
101889f6c88aSHugh Dickins if (PageCompound(page) && !cc->alloc_contig) {
101989f6c88aSHugh Dickins const unsigned int order = compound_order(page);
102089f6c88aSHugh Dickins
102189f6c88aSHugh Dickins if (likely(order <= MAX_ORDER)) {
102289f6c88aSHugh Dickins low_pfn += (1UL << order) - 1;
102389f6c88aSHugh Dickins nr_scanned += (1UL << order) - 1;
102489f6c88aSHugh Dickins }
102589f6c88aSHugh Dickins goto isolate_fail;
102689f6c88aSHugh Dickins }
102789f6c88aSHugh Dickins
102889f6c88aSHugh Dickins /*
102989f6c88aSHugh Dickins * Check may be lockless but that's ok as we recheck later.
103089f6c88aSHugh Dickins * It's possible to migrate LRU and non-lru movable pages.
103189f6c88aSHugh Dickins * Skip any other type of page
103289f6c88aSHugh Dickins */
10339d0ddc0cSMatthew Wilcox (Oracle) if (!PageLRU(page)) {
103489f6c88aSHugh Dickins /*
103589f6c88aSHugh Dickins * __PageMovable can return false positive so we need
103689f6c88aSHugh Dickins * to verify it under page_lock.
103789f6c88aSHugh Dickins */
103889f6c88aSHugh Dickins if (unlikely(__PageMovable(page)) &&
103989f6c88aSHugh Dickins !PageIsolated(page)) {
104089f6c88aSHugh Dickins if (locked) {
104189f6c88aSHugh Dickins unlock_page_lruvec_irqrestore(locked, flags);
104289f6c88aSHugh Dickins locked = NULL;
104389f6c88aSHugh Dickins }
10445490da4fSMatthew Wilcox (Oracle)
10459d0ddc0cSMatthew Wilcox (Oracle) if (isolate_movable_page(page, mode)) {
104689f6c88aSHugh Dickins folio = page_folio(page);
104789f6c88aSHugh Dickins goto isolate_success;
104889f6c88aSHugh Dickins }
104989f6c88aSHugh Dickins }
105089f6c88aSHugh Dickins
10519df41314SAlex Shi goto isolate_fail;
10529df41314SAlex Shi }
10539df41314SAlex Shi
10549df41314SAlex Shi /*
1055b1baabd9SMatthew Wilcox (Oracle) * Be careful not to clear PageLRU until after we're
10566168d0daSAlex Shi * sure the page is not being freed elsewhere -- the
105769b7189fSVlastimil Babka * page release code relies on it.
10586168d0daSAlex Shi */
10596168d0daSAlex Shi folio = folio_get_nontail_page(page);
10606168d0daSAlex Shi if (unlikely(!folio))
10616168d0daSAlex Shi goto isolate_fail;
10626168d0daSAlex Shi
10636168d0daSAlex Shi /*
10646168d0daSAlex Shi * Migration will fail if an anonymous page is pinned in memory,
1065e809c3feSMatthew Wilcox (Oracle) * so avoid taking lru_lock and isolating it unnecessarily in an
1066e380bebeSMel Gorman * admittedly racy check.
1067e380bebeSMel Gorman */
1068e380bebeSMel Gorman mapping = folio_mapping(folio);
1069e380bebeSMel Gorman if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio))
1070e380bebeSMel Gorman goto isolate_fail_put;
1071e380bebeSMel Gorman
1072e380bebeSMel Gorman /*
10732a1402aaSMel Gorman * Only allow to migrate anonymous pages in GFP_NOFS context
107429c0dde8SVlastimil Babka * because those do not depend on fs locks.
107529c0dde8SVlastimil Babka */
107629c0dde8SVlastimil Babka if (!(cc->gfp_mask & __GFP_FS) && mapping)
107729c0dde8SVlastimil Babka goto isolate_fail_put;
107829c0dde8SVlastimil Babka
10791da2f328SRik van Riel /* Only take pages on LRU: a check now makes later tests safe */
1080d8c6546bSMatthew Wilcox (Oracle) if (!folio_test_lru(folio))
10819df41314SAlex Shi goto isolate_fail_put;
10829df41314SAlex Shi
1083bc835011SAndrea Arcangeli /* Compaction might skip unevictable pages but CMA takes them */
1084d99fd5feSAlex Shi if (!(mode & ISOLATE_UNEVICTABLE) && folio_test_unevictable(folio))
1085fa9add64SHugh Dickins goto isolate_fail_put;
10861da2f328SRik van Riel
10871da2f328SRik van Riel /*
10881da2f328SRik van Riel * To minimise LRU disruption, the caller can indicate with
1089bc835011SAndrea Arcangeli * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages
1090748446bbSMel Gorman * it will be able to migrate without blocking - clean pages
109146ae6b2cSYu Zhao * for the most part. PageWriteback would require blocking.
10921da2f328SRik van Riel */
10939de4f22aSHuang Ying if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio))
10946c357848SMatthew Wilcox (Oracle) goto isolate_fail_put;
1095b6c75016SJoonsoo Kim
1096b6c75016SJoonsoo Kim if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_dirty(folio)) {
1097fdd048e1SVlastimil Babka bool migrate_dirty;
1098ae37c7ffSOscar Salvador
109938935861SZi Yan /*
110038935861SZi Yan * Only folios without mappings or that have
1101b717d6b9SWilliam Lam * a ->migrate_folio callback are possible to
1102748446bbSMel Gorman * migrate without blocking. However, we may
1103804d3121SMel Gorman * be racing with truncation, which can free
1104804d3121SMel Gorman * the mapping. Truncation holds the folio lock
1105cb2dcaf0SMel Gorman * until after the folio is removed from the page
1106cb2dcaf0SMel Gorman * cache so holding it ourselves is sufficient.
1107cb2dcaf0SMel Gorman */
1108804d3121SMel Gorman if (!folio_trylock(folio))
110938935861SZi Yan goto isolate_fail_put;
1110cb2dcaf0SMel Gorman
111131b8384aSHillf Danton mapping = folio_mapping(folio);
1112748446bbSMel Gorman migrate_dirty = !mapping ||
1113748446bbSMel Gorman mapping->a_ops->migrate_folio;
1114fdd048e1SVlastimil Babka folio_unlock(folio);
1115fdd048e1SVlastimil Babka if (!migrate_dirty)
11169df41314SAlex Shi goto isolate_fail_put;
11179df41314SAlex Shi }
11189df41314SAlex Shi
11199df41314SAlex Shi /* Try isolate the folio */
11206168d0daSAlex Shi if (!folio_test_clear_lru(folio))
11216168d0daSAlex Shi goto isolate_fail_put;
11229df41314SAlex Shi
11239df41314SAlex Shi lruvec = folio_lruvec(folio);
11249df41314SAlex Shi
1125fdd048e1SVlastimil Babka /* If we already hold the lock, we can skip some rechecking */
1126369fa227SOscar Salvador if (lruvec != locked) {
1127fdd048e1SVlastimil Babka if (locked)
1128fdd048e1SVlastimil Babka unlock_page_lruvec_irqrestore(locked, flags);
1129fdd048e1SVlastimil Babka
1130fdd048e1SVlastimil Babka compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
1131fdd048e1SVlastimil Babka locked = lruvec;
1132fdd048e1SVlastimil Babka
1133fdd048e1SVlastimil Babka lruvec_memcg_debug(lruvec, folio);
1134fdd048e1SVlastimil Babka
1135fdd048e1SVlastimil Babka /*
11366168d0daSAlex Shi * Try get exclusive access under lock. If marked for
11376168d0daSAlex Shi * skip, the scan is aborted unless the current context
1138fdd048e1SVlastimil Babka * is a rescan to reach the end of the pageblock.
1139fdd048e1SVlastimil Babka */
1140fdd048e1SVlastimil Babka if (!skip_updated && valid_page) {
1141fdd048e1SVlastimil Babka skip_updated = true;
1142fdd048e1SVlastimil Babka if (test_and_set_skip(cc, valid_page) &&
1143fdd048e1SVlastimil Babka !cc->finish_pageblock) {
1144fdd048e1SVlastimil Babka low_pfn = end_pfn;
1145fdd048e1SVlastimil Babka goto isolate_abort;
1146fdd048e1SVlastimil Babka }
1147fdd048e1SVlastimil Babka }
1148fdd048e1SVlastimil Babka
1149fdd048e1SVlastimil Babka /*
1150fdd048e1SVlastimil Babka * folio become large since the non-locked check,
1151fdd048e1SVlastimil Babka * and it's on LRU.
1152369fa227SOscar Salvador */
1153369fa227SOscar Salvador if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) {
1154369fa227SOscar Salvador low_pfn += folio_nr_pages(folio) - 1;
115531b8384aSHillf Danton nr_scanned += folio_nr_pages(folio) - 1;
1156748446bbSMel Gorman folio_set_lru(folio);
115799c0fd5eSVlastimil Babka goto isolate_fail_put;
115899c0fd5eSVlastimil Babka }
115999c0fd5eSVlastimil Babka }
116099c0fd5eSVlastimil Babka
116199c0fd5eSVlastimil Babka /* The folio is taken off the LRU */
116299c0fd5eSVlastimil Babka if (folio_test_large(folio))
116399c0fd5eSVlastimil Babka low_pfn += folio_nr_pages(folio) - 1;
11649df41314SAlex Shi
11659df41314SAlex Shi /* Successfully isolated */
1166e380bebeSMel Gorman lruvec_del_folio(lruvec, folio);
1167c67fe375SMel Gorman node_stat_mod_folio(folio,
11686168d0daSAlex Shi NR_ISOLATED_ANON + folio_is_file_lru(folio),
11699df41314SAlex Shi folio_nr_pages(folio));
11709df41314SAlex Shi
11719df41314SAlex Shi isolate_success:
11729df41314SAlex Shi list_add(&folio->lru, &cc->migratepages);
1173748446bbSMel Gorman isolate_success_no_list:
117450b5b094SVlastimil Babka cc->nr_migratepages += folio_nr_pages(folio);
1175804d3121SMel Gorman nr_isolated += folio_nr_pages(folio);
1176804d3121SMel Gorman nr_scanned += folio_nr_pages(folio) - 1;
1177804d3121SMel Gorman
1178804d3121SMel Gorman /*
1179804d3121SMel Gorman * Avoid isolating too much unless this block is being
1180804d3121SMel Gorman * fully scanned (e.g. dirty/writeback pages, parallel allocation)
118150b5b094SVlastimil Babka * or a lock is contended. For contention, isolate quickly to
1182804d3121SMel Gorman * potentially remove one source of contention.
1183e380bebeSMel Gorman */
1184e380bebeSMel Gorman if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
1185e380bebeSMel Gorman !cc->finish_pageblock && !cc->contended) {
1186e380bebeSMel Gorman ++low_pfn;
1187bb13ffebSMel Gorman break;
1188e34d85f0SJoonsoo Kim }
1189e34d85f0SJoonsoo Kim
1190b7aba698SMel Gorman continue;
1191670105a2SMel Gorman
11927f354a54SDavid Rientjes isolate_fail_put:
1193397487dbSMel Gorman /* Avoid potential deadlock in freeing page under lru_lock */
1194010fc29aSMinchan Kim if (locked) {
1195397487dbSMel Gorman unlock_page_lruvec_irqrestore(locked, flags);
1196c2ad7a1fSOscar Salvador locked = NULL;
1197c2ad7a1fSOscar Salvador }
1198c2ad7a1fSOscar Salvador folio_put(folio);
11992fe86e00SMichal Nazarewicz
12002fe86e00SMichal Nazarewicz isolate_fail:
1201edc2ca61SVlastimil Babka if (!skip_on_failure && ret != -ENOMEM)
1202edc2ca61SVlastimil Babka continue;
1203edc2ca61SVlastimil Babka
1204edc2ca61SVlastimil Babka /*
1205edc2ca61SVlastimil Babka * We have isolated some pages, but then failed. Release them
1206edc2ca61SVlastimil Babka * instead of migrating, as we cannot form the cc->order buddy
1207369fa227SOscar Salvador * page anyway.
1208369fa227SOscar Salvador */
1209edc2ca61SVlastimil Babka if (nr_isolated) {
1210c2ad7a1fSOscar Salvador if (locked) {
1211edc2ca61SVlastimil Babka unlock_page_lruvec_irqrestore(locked, flags);
1212edc2ca61SVlastimil Babka locked = NULL;
1213edc2ca61SVlastimil Babka }
1214e1409c32SJoonsoo Kim putback_movable_pages(&cc->migratepages);
1215c2ad7a1fSOscar Salvador cc->nr_migratepages = 0;
1216edc2ca61SVlastimil Babka nr_isolated = 0;
1217edc2ca61SVlastimil Babka }
1218edc2ca61SVlastimil Babka
121906b6640aSVlastimil Babka if (low_pfn < next_skip_pfn) {
1220e1409c32SJoonsoo Kim low_pfn = next_skip_pfn - 1;
1221e1409c32SJoonsoo Kim /*
122206b6640aSVlastimil Babka * The check near the loop beginning would have updated
1223edc2ca61SVlastimil Babka * next_skip_pfn too, but this is a bit simpler.
1224edc2ca61SVlastimil Babka */
1225e1409c32SJoonsoo Kim next_skip_pfn += 1UL << cc->order;
1226edc2ca61SVlastimil Babka }
1227edc2ca61SVlastimil Babka
1228edc2ca61SVlastimil Babka if (ret == -ENOMEM)
1229edc2ca61SVlastimil Babka break;
1230e1409c32SJoonsoo Kim }
1231e1409c32SJoonsoo Kim
1232edc2ca61SVlastimil Babka /*
1233edc2ca61SVlastimil Babka * The PageBuddy() check could have potentially brought us outside
1234c2ad7a1fSOscar Salvador * the range to be scanned.
1235edc2ca61SVlastimil Babka */
1236edc2ca61SVlastimil Babka if (unlikely(low_pfn > end_pfn))
1237c2ad7a1fSOscar Salvador low_pfn = end_pfn;
1238edc2ca61SVlastimil Babka
12396ea41c0cSJoonsoo Kim folio = NULL;
124038935861SZi Yan
12416ea41c0cSJoonsoo Kim isolate_abort:
1242edc2ca61SVlastimil Babka if (locked)
1243edc2ca61SVlastimil Babka unlock_page_lruvec_irqrestore(locked, flags);
1244c2ad7a1fSOscar Salvador if (folio) {
1245edc2ca61SVlastimil Babka folio_set_lru(folio);
1246edc2ca61SVlastimil Babka folio_put(folio);
1247ff9543fdSMichal Nazarewicz }
1248ff9543fdSMichal Nazarewicz
1249018e9a49SAndrew Morton /*
1250b682debdSVlastimil Babka * Update the cached scanner pfn once the pageblock has been scanned.
1251b682debdSVlastimil Babka * Pages will either be migrated in which case there is no point
1252b682debdSVlastimil Babka * scanning in the near future or migration failed in which case the
1253282722b0SVlastimil Babka * failure reason may persist. The block is marked for skipping if
1254282722b0SVlastimil Babka * there were no pages isolated in the block or if the block is
12559bebefd5SMel Gorman * rescanned twice in a row.
12569bebefd5SMel Gorman */
12579bebefd5SMel Gorman if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) {
1258282722b0SVlastimil Babka if (!cc->no_set_skip_hint && valid_page && !skip_updated)
1259b682debdSVlastimil Babka set_pageblock_skip(valid_page);
1260b682debdSVlastimil Babka update_cached_migrate(cc, low_pfn);
1261282722b0SVlastimil Babka }
1262282722b0SVlastimil Babka
1263282722b0SVlastimil Babka trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
1264282722b0SVlastimil Babka nr_scanned, nr_isolated);
1265282722b0SVlastimil Babka
1266282722b0SVlastimil Babka fatal_pending:
1267b682debdSVlastimil Babka cc->total_migrate_scanned += nr_scanned;
1268b682debdSVlastimil Babka if (nr_isolated)
1269018e9a49SAndrew Morton count_compact_events(COMPACTISOLATED, nr_isolated);
12709f7e3387SVlastimil Babka
12719f7e3387SVlastimil Babka cc->migrate_pfn = low_pfn;
1272018e9a49SAndrew Morton
1273018e9a49SAndrew Morton return ret;
1274018e9a49SAndrew Morton }
1275018e9a49SAndrew Morton
1276018e9a49SAndrew Morton /**
1277018e9a49SAndrew Morton * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
1278018e9a49SAndrew Morton * @cc: Compaction control structure.
1279018e9a49SAndrew Morton * @start_pfn: The first PFN to start isolating.
1280ab130f91SMatthew Wilcox (Oracle) * @end_pfn: The one-past-last PFN.
1281018e9a49SAndrew Morton *
1282018e9a49SAndrew Morton * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM
1283018e9a49SAndrew Morton * in case we could not allocate a page, or 0.
12841ef36db2SYisheng Xie */
12851ef36db2SYisheng Xie int
isolate_migratepages_range(struct compact_control * cc,unsigned long start_pfn,unsigned long end_pfn)12861ef36db2SYisheng Xie isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
1287018e9a49SAndrew Morton unsigned long end_pfn)
1288b682debdSVlastimil Babka {
1289018e9a49SAndrew Morton unsigned long pfn, block_start_pfn, block_end_pfn;
1290018e9a49SAndrew Morton int ret = 0;
1291018e9a49SAndrew Morton
1292018e9a49SAndrew Morton /* Scan block by block. First and last block may be incomplete */
1293018e9a49SAndrew Morton pfn = start_pfn;
1294018e9a49SAndrew Morton block_start_pfn = pageblock_start_pfn(pfn);
129570b44595SMel Gorman if (block_start_pfn < cc->zone->zone_start_pfn)
129670b44595SMel Gorman block_start_pfn = cc->zone->zone_start_pfn;
129770b44595SMel Gorman block_end_pfn = pageblock_end_pfn(pfn);
1298dd7ef7bdSQian Cai
1299dd7ef7bdSQian Cai for (; pfn < end_pfn; pfn = block_end_pfn,
1300dd7ef7bdSQian Cai block_start_pfn = block_end_pfn,
130170b44595SMel Gorman block_end_pfn += pageblock_nr_pages) {
130270b44595SMel Gorman
1303ff9543fdSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn);
1304f2849aa0SVlastimil Babka
1305f2849aa0SVlastimil Babka if (!pageblock_pfn_to_page(block_start_pfn,
1306f2849aa0SVlastimil Babka block_end_pfn, cc->zone))
1307f2849aa0SVlastimil Babka continue;
1308f2849aa0SVlastimil Babka
1309f2849aa0SVlastimil Babka ret = isolate_migratepages_block(cc, pfn, block_end_pfn,
1310f2849aa0SVlastimil Babka ISOLATE_UNEVICTABLE);
1311f2849aa0SVlastimil Babka
1312f2849aa0SVlastimil Babka if (ret)
13135a811889SMel Gorman break;
13145a811889SMel Gorman
13155a811889SMel Gorman if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
13165a811889SMel Gorman break;
13175a811889SMel Gorman }
13185a811889SMel Gorman
13195a811889SMel Gorman return ret;
13205a811889SMel Gorman }
13215a811889SMel Gorman
13225a811889SMel Gorman #endif /* CONFIG_COMPACTION || CONFIG_CMA */
13235a811889SMel Gorman #ifdef CONFIG_COMPACTION
13245a811889SMel Gorman
suitable_migration_source(struct compact_control * cc,struct page * page)13255a811889SMel Gorman static bool suitable_migration_source(struct compact_control *cc,
13265a811889SMel Gorman struct page *page)
13275a811889SMel Gorman {
13285a811889SMel Gorman int block_mt;
13295a811889SMel Gorman
13305a811889SMel Gorman if (pageblock_skip_persistent(page))
13315a811889SMel Gorman return false;
13325a811889SMel Gorman
13335a811889SMel Gorman if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
13345a811889SMel Gorman return true;
133570b44595SMel Gorman
133670b44595SMel Gorman block_mt = get_pageblock_migratetype(page);
133770b44595SMel Gorman
133870b44595SMel Gorman if (cc->migratetype == MIGRATE_MOVABLE)
133970b44595SMel Gorman return is_migrate_movable(block_mt);
134070b44595SMel Gorman else
134170b44595SMel Gorman return block_mt == cc->migratetype;
134270b44595SMel Gorman }
134370b44595SMel Gorman
134470b44595SMel Gorman /* Returns true if the page is within a block suitable for migration to */
suitable_migration_target(struct compact_control * cc,struct page * page)134570b44595SMel Gorman static bool suitable_migration_target(struct compact_control *cc,
13465a811889SMel Gorman struct page *page)
1347*be21b32aSNARIBAYASHI Akira {
13485a811889SMel Gorman /* If the page is a large free page, then disallow migration */
13495a811889SMel Gorman if (PageBuddy(page)) {
13506e2b7044SVlastimil Babka /*
13515a811889SMel Gorman * We are checking page_order without zone->lock taken. But
13525a811889SMel Gorman * the only small danger is that we skip a potentially suitable
13535a811889SMel Gorman * pageblock, so it's not worth to check order for valid range.
13545a811889SMel Gorman */
13555a811889SMel Gorman if (buddy_order_unsafe(page) >= pageblock_order)
13565a811889SMel Gorman return false;
13575a811889SMel Gorman }
13585a811889SMel Gorman
13595a811889SMel Gorman if (cc->ignore_block_suitable)
13605a811889SMel Gorman return true;
13616e2b7044SVlastimil Babka
13626e2b7044SVlastimil Babka /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
13636e2b7044SVlastimil Babka if (is_migrate_movable(get_pageblock_migratetype(page)))
13646e2b7044SVlastimil Babka return true;
13656e2b7044SVlastimil Babka
13666e2b7044SVlastimil Babka /* Otherwise skip the block */
13675a811889SMel Gorman return false;
13684fca9730SMel Gorman }
13695a811889SMel Gorman
13705a811889SMel Gorman static inline unsigned int
freelist_scan_limit(struct compact_control * cc)13715a811889SMel Gorman freelist_scan_limit(struct compact_control *cc)
13725a811889SMel Gorman {
1373*be21b32aSNARIBAYASHI Akira unsigned short shift = BITS_PER_LONG - 1;
1374*be21b32aSNARIBAYASHI Akira
13755a811889SMel Gorman return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
13765a811889SMel Gorman }
1377dbe2d4e4SMel Gorman
1378dbe2d4e4SMel Gorman /*
1379dbe2d4e4SMel Gorman * Test whether the free scanner has reached the same or lower pageblock than
1380dbe2d4e4SMel Gorman * the migration scanner, and compaction should thus terminate.
1381dbe2d4e4SMel Gorman */
compact_scanners_met(struct compact_control * cc)1382dbe2d4e4SMel Gorman static inline bool compact_scanners_met(struct compact_control *cc)
1383dbe2d4e4SMel Gorman {
1384dbe2d4e4SMel Gorman return (cc->free_pfn >> pageblock_order)
1385dbe2d4e4SMel Gorman <= (cc->migrate_pfn >> pageblock_order);
1386dbe2d4e4SMel Gorman }
1387dbe2d4e4SMel Gorman
1388dbe2d4e4SMel Gorman /*
1389dbe2d4e4SMel Gorman * Used when scanning for a suitable migration target which scans freelists
1390dbe2d4e4SMel Gorman * in reverse. Reorders the list such as the unscanned pages are scanned
1391dbe2d4e4SMel Gorman * first on the next iteration of the free scanner
1392dbe2d4e4SMel Gorman */
1393dbe2d4e4SMel Gorman static void
move_freelist_head(struct list_head * freelist,struct page * freepage)1394dbe2d4e4SMel Gorman move_freelist_head(struct list_head *freelist, struct page *freepage)
13955a811889SMel Gorman {
13965a811889SMel Gorman LIST_HEAD(sublist);
13975a811889SMel Gorman
1398b55ca526SWonhyuk Yang if (!list_is_last(freelist, &freepage->lru)) {
13995a811889SMel Gorman list_cut_before(&sublist, freelist, &freepage->lru);
140074e21484SRokudo Yan list_splice_tail(&sublist, freelist);
14015a811889SMel Gorman }
14025a811889SMel Gorman }
14035a811889SMel Gorman
14045a811889SMel Gorman /*
14055a811889SMel Gorman * Similar to move_freelist_head except used by the migration scanner
14065a811889SMel Gorman * when scanning forward. It's possible for these list operations to
14075a811889SMel Gorman * move against each other if they search the free list exactly in
14085a811889SMel Gorman * lockstep.
14095a811889SMel Gorman */
14105a811889SMel Gorman static void
move_freelist_tail(struct list_head * freelist,struct page * freepage)14115a811889SMel Gorman move_freelist_tail(struct list_head *freelist, struct page *freepage)
14125a811889SMel Gorman {
14135a811889SMel Gorman LIST_HEAD(sublist);
14145a811889SMel Gorman
1415e332f741SMel Gorman if (!list_is_first(freelist, &freepage->lru)) {
14165a811889SMel Gorman list_cut_position(&sublist, freelist, &freepage->lru);
14175a811889SMel Gorman list_splice_tail(&sublist, freelist);
14185a811889SMel Gorman }
14195a811889SMel Gorman }
14205a811889SMel Gorman
14215a811889SMel Gorman static void
fast_isolate_around(struct compact_control * cc,unsigned long pfn)14225a811889SMel Gorman fast_isolate_around(struct compact_control *cc, unsigned long pfn)
14235a811889SMel Gorman {
14245a811889SMel Gorman unsigned long start_pfn, end_pfn;
14255a811889SMel Gorman struct page *page;
14265a811889SMel Gorman
14275a811889SMel Gorman /* Do not search around if there are enough pages already */
14285a811889SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages)
14295a811889SMel Gorman return;
14305a811889SMel Gorman
1431dbe2d4e4SMel Gorman /* Minimise scanning during async compaction */
1432dbe2d4e4SMel Gorman if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
1433dbe2d4e4SMel Gorman return;
1434dbe2d4e4SMel Gorman
1435dbe2d4e4SMel Gorman /* Pageblock boundaries */
1436dbe2d4e4SMel Gorman start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
1437dbe2d4e4SMel Gorman end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
1438dbe2d4e4SMel Gorman
1439dbe2d4e4SMel Gorman page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
14405a811889SMel Gorman if (!page)
14415a811889SMel Gorman return;
14425a811889SMel Gorman
14435a811889SMel Gorman isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
14445a811889SMel Gorman
144574e21484SRokudo Yan /* Skip this pageblock in the future as it's full or nearly full */
14465a811889SMel Gorman if (start_pfn == end_pfn && !cc->no_set_skip_hint)
14475a811889SMel Gorman set_pageblock_skip(page);
14485a811889SMel Gorman }
14495a811889SMel Gorman
14505a811889SMel Gorman /* Search orders in round-robin fashion */
next_search_order(struct compact_control * cc,int order)14515a811889SMel Gorman static int next_search_order(struct compact_control *cc, int order)
14525a811889SMel Gorman {
14535a811889SMel Gorman order--;
14545a811889SMel Gorman if (order < 0)
14555a811889SMel Gorman order = cc->order - 1;
14565a811889SMel Gorman
14575a811889SMel Gorman /* Search wrapped around? */
14585a811889SMel Gorman if (order == cc->search_order) {
14595a811889SMel Gorman cc->search_order--;
14606e2b7044SVlastimil Babka if (cc->search_order < 0)
14616e2b7044SVlastimil Babka cc->search_order = cc->order - 1;
14625a811889SMel Gorman return -1;
14635a811889SMel Gorman }
14645a811889SMel Gorman
1465dbe2d4e4SMel Gorman return order;
14665a811889SMel Gorman }
14675a811889SMel Gorman
fast_isolate_freepages(struct compact_control * cc)14685a811889SMel Gorman static void fast_isolate_freepages(struct compact_control *cc)
14695a811889SMel Gorman {
14705a811889SMel Gorman unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1);
14715a811889SMel Gorman unsigned int nr_scanned = 0, total_isolated = 0;
14725a811889SMel Gorman unsigned long low_pfn, min_pfn, highest = 0;
14735a811889SMel Gorman unsigned long nr_isolated = 0;
14745a811889SMel Gorman unsigned long distance;
14755a811889SMel Gorman struct page *page = NULL;
14765a811889SMel Gorman bool scan_start = false;
14775a811889SMel Gorman int order;
14785a811889SMel Gorman
14795a811889SMel Gorman /* Full compaction passes in a negative order */
14805a811889SMel Gorman if (cc->order <= 0)
14815a811889SMel Gorman return;
14825a811889SMel Gorman
14835a811889SMel Gorman /*
14845a811889SMel Gorman * If starting the scan, use a deeper search and use the highest
14855a811889SMel Gorman * PFN found if a suitable one is not found.
14865a811889SMel Gorman */
14875a811889SMel Gorman if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
14885a811889SMel Gorman limit = pageblock_nr_pages >> 1;
14895a811889SMel Gorman scan_start = true;
14905a811889SMel Gorman }
14915a811889SMel Gorman
14925a811889SMel Gorman /*
14935a811889SMel Gorman * Preferred point is in the top quarter of the scan space but take
14945a811889SMel Gorman * a pfn from the top half if the search is problematic.
14955a811889SMel Gorman */
14965a811889SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn);
1497b717d6b9SWilliam Lam low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
14985a811889SMel Gorman min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
14995a811889SMel Gorman
15005a811889SMel Gorman if (WARN_ON_ONCE(min_pfn > low_pfn))
15015a811889SMel Gorman low_pfn = min_pfn;
15025a811889SMel Gorman
15035b56d996SQian Cai /*
15045a811889SMel Gorman * Search starts from the last successful isolation order or the next
15055a811889SMel Gorman * order to search after a previous failure
15065a811889SMel Gorman */
15075a811889SMel Gorman cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
15085a811889SMel Gorman
15095a811889SMel Gorman for (order = cc->search_order;
15105a811889SMel Gorman !page && order >= 0;
1511b55ca526SWonhyuk Yang order = next_search_order(cc, order)) {
15125a811889SMel Gorman struct free_area *area = &cc->zone->free_area[order];
15135a811889SMel Gorman struct list_head *freelist;
15145a811889SMel Gorman struct page *freepage;
1515b55ca526SWonhyuk Yang unsigned long flags;
15165a811889SMel Gorman unsigned int order_scanned = 0;
15175a811889SMel Gorman unsigned long high_pfn = 0;
15185a811889SMel Gorman
15195a811889SMel Gorman if (!area->nr_free)
15205a811889SMel Gorman continue;
15215a811889SMel Gorman
15225a811889SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags);
1523f3867755SEthon Paul freelist = &area->free_list[MIGRATE_MOVABLE];
15245a811889SMel Gorman list_for_each_entry_reverse(freepage, freelist, buddy_list) {
15255a811889SMel Gorman unsigned long pfn;
1526ca2864e5SMiaohe Lin
15275a811889SMel Gorman order_scanned++;
15285a811889SMel Gorman nr_scanned++;
15295a811889SMel Gorman pfn = page_to_pfn(freepage);
1530e577c8b6SSuzuki K Poulose
153173a6e474SBaoquan He if (pfn >= highest)
15326e2b7044SVlastimil Babka highest = max(pageblock_start_pfn(pfn),
15336e2b7044SVlastimil Babka cc->zone->zone_start_pfn);
153473a6e474SBaoquan He
15355a811889SMel Gorman if (pfn >= low_pfn) {
15365a811889SMel Gorman cc->fast_search_fail = 0;
15375a811889SMel Gorman cc->search_order = order;
15385a811889SMel Gorman page = freepage;
15395a811889SMel Gorman break;
15405a811889SMel Gorman }
1541d097a6f6SMel Gorman
1542d097a6f6SMel Gorman if (pfn >= min_pfn && pfn > high_pfn) {
15435a811889SMel Gorman high_pfn = pfn;
1544d097a6f6SMel Gorman
15455a811889SMel Gorman /* Shorten the scan if a candidate is found */
15465a811889SMel Gorman limit >>= 1;
15475a811889SMel Gorman }
15485a811889SMel Gorman
15495a811889SMel Gorman if (order_scanned >= limit)
15505a811889SMel Gorman break;
1551*be21b32aSNARIBAYASHI Akira }
15525a811889SMel Gorman
15535a811889SMel Gorman /* Use a maximum candidate pfn if a preferred one was not found */
15545a811889SMel Gorman if (!page && high_pfn) {
1555f2849aa0SVlastimil Babka page = pfn_to_page(high_pfn);
1556ff9543fdSMichal Nazarewicz
1557ff9543fdSMichal Nazarewicz /* Update freepage for the list reorder below */
1558ff9543fdSMichal Nazarewicz freepage = page;
1559edc2ca61SVlastimil Babka }
1560ff9543fdSMichal Nazarewicz
1561edc2ca61SVlastimil Babka /* Reorder to so a future search skips recent pages */
1562ff9543fdSMichal Nazarewicz move_freelist_head(freelist, freepage);
1563c96b9e50SVlastimil Babka
1564e14c720eSVlastimil Babka /* Isolate the page if available */
1565c96b9e50SVlastimil Babka if (page) {
1566c96b9e50SVlastimil Babka if (__isolate_free_page(page, order)) {
1567ff9543fdSMichal Nazarewicz set_page_private(page, order);
15684fca9730SMel Gorman nr_isolated = 1 << order;
15692fe86e00SMichal Nazarewicz nr_scanned += nr_isolated - 1;
15705a811889SMel Gorman total_isolated += nr_isolated;
157100bc102fSMiaohe Lin cc->nr_freepages += nr_isolated;
15725a811889SMel Gorman list_add_tail(&page->lru, &cc->freepages);
15735a811889SMel Gorman count_compact_events(COMPACTISOLATED, nr_isolated);
15745a811889SMel Gorman } else {
1575ff9543fdSMichal Nazarewicz /* If isolation fails, abort the search */
1576ff9543fdSMichal Nazarewicz order = cc->search_order + 1;
157749e068f0SVlastimil Babka page = NULL;
1578e14c720eSVlastimil Babka }
1579e14c720eSVlastimil Babka }
1580c96b9e50SVlastimil Babka
1581c96b9e50SVlastimil Babka spin_unlock_irqrestore(&cc->zone->lock, flags);
1582a1c1dbebSRandy Dunlap
158349e068f0SVlastimil Babka /* Skip fast search if enough freepages isolated */
158449e068f0SVlastimil Babka if (cc->nr_freepages >= cc->nr_migratepages)
1585ff9543fdSMichal Nazarewicz break;
1586e14c720eSVlastimil Babka
15875a811889SMel Gorman /*
1588c96b9e50SVlastimil Babka * Smaller scan on next order so the total scan is related
1589c96b9e50SVlastimil Babka * to freelist_scan_limit.
159006b6640aSVlastimil Babka */
15914fca9730SMel Gorman if (order_scanned >= limit)
15922fe86e00SMichal Nazarewicz limit = max(1U, limit >> 1);
1593ff9543fdSMichal Nazarewicz }
1594ff9543fdSMichal Nazarewicz
1595ff9543fdSMichal Nazarewicz trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn,
1596ff9543fdSMichal Nazarewicz nr_scanned, total_isolated);
1597ff9543fdSMichal Nazarewicz
1598f5f61a32SVlastimil Babka if (!page) {
1599c96b9e50SVlastimil Babka cc->fast_search_fail++;
1600e14c720eSVlastimil Babka if (scan_start) {
1601e14c720eSVlastimil Babka /*
16024fca9730SMel Gorman * Use the highest PFN found above min. If one was
16034fca9730SMel Gorman * not found, be pessimistic for direct compaction
1604f6ea3adbSDavid Rientjes * and use the min mark.
1605f6ea3adbSDavid Rientjes */
1606cb810ad2SMel Gorman if (highest >= min_pfn) {
1607f6ea3adbSDavid Rientjes page = pfn_to_page(highest);
1608c036ddffSMiaohe Lin cc->free_pfn = highest;
1609cf66f070SMel Gorman } else {
1610f6ea3adbSDavid Rientjes if (cc->direct_compaction && pfn_valid(min_pfn)) {
16117d49d886SVlastimil Babka page = pageblock_pfn_to_page(min_pfn,
16127d49d886SVlastimil Babka min(pageblock_end_pfn(min_pfn),
16137d49d886SVlastimil Babka zone_end_pfn(cc->zone)),
1614ff9543fdSMichal Nazarewicz cc->zone);
1615ff9543fdSMichal Nazarewicz cc->free_pfn = min_pfn;
1616ff9543fdSMichal Nazarewicz }
16179f7e3387SVlastimil Babka }
1618ff9543fdSMichal Nazarewicz }
161968e3e926SLinus Torvalds }
1620bb13ffebSMel Gorman
1621bb13ffebSMel Gorman if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1622bb13ffebSMel Gorman highest -= pageblock_nr_pages;
1623bb13ffebSMel Gorman cc->zone->compact_cached_free_pfn = highest;
1624e14c720eSVlastimil Babka }
16254fca9730SMel Gorman
16264fca9730SMel Gorman cc->total_free_scanned += nr_scanned;
1627ff9543fdSMichal Nazarewicz if (!page)
1628d097a6f6SMel Gorman return;
1629d097a6f6SMel Gorman
1630d097a6f6SMel Gorman low_pfn = page_to_pfn(page);
1631d097a6f6SMel Gorman fast_isolate_around(cc, low_pfn);
1632cb2dcaf0SMel Gorman }
1633cb2dcaf0SMel Gorman
1634a46cbf3bSDavid Rientjes /*
1635a46cbf3bSDavid Rientjes * Based on information in the current compact_control, find blocks
1636a46cbf3bSDavid Rientjes * suitable for isolating free pages from and then isolate them.
1637a46cbf3bSDavid Rientjes */
isolate_freepages(struct compact_control * cc)1638a46cbf3bSDavid Rientjes static void isolate_freepages(struct compact_control *cc)
1639f5f61a32SVlastimil Babka {
1640e14c720eSVlastimil Babka struct zone *zone = cc->zone;
1641a46cbf3bSDavid Rientjes struct page *page;
1642be976572SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */
1643a46cbf3bSDavid Rientjes unsigned long isolate_start_pfn; /* exact pfn we start at */
1644f5f61a32SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */
1645a46cbf3bSDavid Rientjes unsigned long low_pfn; /* lowest pfn scanner is able to scan */
1646a46cbf3bSDavid Rientjes struct list_head *freelist = &cc->freepages;
1647f5f61a32SVlastimil Babka unsigned int stride;
1648a46cbf3bSDavid Rientjes
1649f5f61a32SVlastimil Babka /* Try a small search of the free lists for a candidate */
16504fca9730SMel Gorman fast_isolate_freepages(cc);
16514fca9730SMel Gorman if (cc->nr_freepages)
16524fca9730SMel Gorman goto splitmap;
16534fca9730SMel Gorman
16544fca9730SMel Gorman /*
16554fca9730SMel Gorman * Initialise the free scanner. The starting point is where we last
16564fca9730SMel Gorman * successfully isolated from, zone-cached value, or the end of the
1657c89511abSMel Gorman * zone when isolating for the first time. For looping we also need
1658ff9543fdSMichal Nazarewicz * this pfn aligned down to the pageblock boundary, because we do
16597ed695e0SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop.
1660f5f61a32SVlastimil Babka * For ending point, take care when isolating in last pageblock of a
1661f5f61a32SVlastimil Babka * zone which ends in the middle of a pageblock.
1662f5f61a32SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner
1663f5f61a32SVlastimil Babka * is using.
16647ed695e0SVlastimil Babka */
1665f5f61a32SVlastimil Babka isolate_start_pfn = cc->free_pfn;
16665a811889SMel Gorman block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
16675a811889SMel Gorman block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
16685a811889SMel Gorman zone_end_pfn(zone));
16695a811889SMel Gorman low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1670748446bbSMel Gorman stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
1671748446bbSMel Gorman
1672748446bbSMel Gorman /*
1673748446bbSMel Gorman * Isolate free pages until enough are available to migrate the
1674748446bbSMel Gorman * pages on cc->migratepages. We stop searching if the migrate
1675748446bbSMel Gorman * and free page scanners meet or enough free pages are isolated.
1676748446bbSMel Gorman */
1677666feb21SMichal Hocko for (; block_start_pfn >= low_pfn;
1678748446bbSMel Gorman block_end_pfn = block_start_pfn,
1679748446bbSMel Gorman block_start_pfn -= pageblock_nr_pages,
1680748446bbSMel Gorman isolate_start_pfn = block_start_pfn) {
1681748446bbSMel Gorman unsigned long nr_isolated;
1682748446bbSMel Gorman
1683edc2ca61SVlastimil Babka /*
1684748446bbSMel Gorman * This can iterate a massively long zone without finding any
1685748446bbSMel Gorman * suitable migration targets, so periodically check resched.
1686748446bbSMel Gorman */
1687748446bbSMel Gorman if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
1688748446bbSMel Gorman cond_resched();
1689748446bbSMel Gorman
1690748446bbSMel Gorman page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1691748446bbSMel Gorman zone);
1692748446bbSMel Gorman if (!page) {
1693748446bbSMel Gorman unsigned long next_pfn;
1694748446bbSMel Gorman
1695748446bbSMel Gorman next_pfn = skip_offline_sections_reverse(block_start_pfn);
1696748446bbSMel Gorman if (next_pfn)
1697d53aea3dSDavid Rientjes block_start_pfn = max(next_pfn, low_pfn);
1698d53aea3dSDavid Rientjes
1699d53aea3dSDavid Rientjes continue;
1700d53aea3dSDavid Rientjes }
1701d53aea3dSDavid Rientjes
1702d53aea3dSDavid Rientjes /* Check the block is suitable for migration */
1703d53aea3dSDavid Rientjes if (!suitable_migration_target(cc, page))
1704d53aea3dSDavid Rientjes continue;
1705d53aea3dSDavid Rientjes
1706d53aea3dSDavid Rientjes /* If isolation recently failed, do not retry */
1707d53aea3dSDavid Rientjes if (!isolation_suitable(cc, page))
1708d53aea3dSDavid Rientjes continue;
1709ff9543fdSMichal Nazarewicz
1710ff9543fdSMichal Nazarewicz /* Found a block suitable for isolating free pages from. */
1711ff9543fdSMichal Nazarewicz nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1712ff9543fdSMichal Nazarewicz block_end_pfn, freelist, stride, false);
1713ff9543fdSMichal Nazarewicz
1714ff9543fdSMichal Nazarewicz /* Update the skip hint if the full pageblock was scanned */
1715ff9543fdSMichal Nazarewicz if (isolate_start_pfn == block_end_pfn)
1716ff9543fdSMichal Nazarewicz update_pageblock_skip(cc, page, block_start_pfn -
17175bbe3547SEric B Munson pageblock_nr_pages);
17185bbe3547SEric B Munson
17195bbe3547SEric B Munson /* Are enough freepages isolated? */
1720c7e0b3d0SThomas Gleixner if (cc->nr_freepages >= cc->nr_migratepages) {
17215bbe3547SEric B Munson if (isolate_start_pfn >= block_end_pfn) {
172270b44595SMel Gorman /*
172370b44595SMel Gorman * Restart at previous pageblock if more
172470b44595SMel Gorman * freepages can be isolated next time.
172570b44595SMel Gorman */
172670b44595SMel Gorman isolate_start_pfn =
172770b44595SMel Gorman block_start_pfn - pageblock_nr_pages;
172870b44595SMel Gorman }
172970b44595SMel Gorman break;
173070b44595SMel Gorman } else if (isolate_start_pfn < block_end_pfn) {
173170b44595SMel Gorman /*
173270b44595SMel Gorman * If isolation failed early, do not continue
173370b44595SMel Gorman * needlessly.
173470b44595SMel Gorman */
173570b44595SMel Gorman break;
173670b44595SMel Gorman }
173770b44595SMel Gorman
173870b44595SMel Gorman /* Adjust stride depending on isolation */
173970b44595SMel Gorman if (nr_isolated) {
174070b44595SMel Gorman stride = 1;
174170b44595SMel Gorman continue;
174270b44595SMel Gorman }
174370b44595SMel Gorman stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
174470b44595SMel Gorman }
174570b44595SMel Gorman
174670b44595SMel Gorman /*
174770b44595SMel Gorman * Record where the free scanner will restart next time. Either we
174870b44595SMel Gorman * broke from the loop and set isolate_start_pfn based on the last
174970b44595SMel Gorman * call to isolate_freepages_block(), or we met the migration scanner
175070b44595SMel Gorman * and the loop terminated due to isolate_start_pfn < low_pfn
175170b44595SMel Gorman */
175270b44595SMel Gorman cc->free_pfn = isolate_start_pfn;
175370b44595SMel Gorman
175470b44595SMel Gorman splitmap:
175570b44595SMel Gorman /* __isolate_free_page() does not map the pages */
175670b44595SMel Gorman split_map_pages(freelist);
175770b44595SMel Gorman }
175870b44595SMel Gorman
175915d28d0dSWonhyuk Yang /*
176070b44595SMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages
176170b44595SMel Gorman * from the isolated freelists in the block we are migrating to.
176270b44595SMel Gorman */
compaction_alloc(struct folio * src,unsigned long data)176370b44595SMel Gorman static struct folio *compaction_alloc(struct folio *src, unsigned long data)
176470b44595SMel Gorman {
176570b44595SMel Gorman struct compact_control *cc = (struct compact_control *)data;
176670b44595SMel Gorman struct folio *dst;
176770b44595SMel Gorman
176870b44595SMel Gorman if (list_empty(&cc->freepages)) {
176970b44595SMel Gorman isolate_freepages(cc);
177070b44595SMel Gorman
177170b44595SMel Gorman if (list_empty(&cc->freepages))
177270b44595SMel Gorman return NULL;
177370b44595SMel Gorman }
177470b44595SMel Gorman
177570b44595SMel Gorman dst = list_entry(cc->freepages.next, struct folio, lru);
177670b44595SMel Gorman list_del(&dst->lru);
177770b44595SMel Gorman cc->nr_freepages--;
177870b44595SMel Gorman
177970b44595SMel Gorman return dst;
178070b44595SMel Gorman }
178170b44595SMel Gorman
178270b44595SMel Gorman /*
178370b44595SMel Gorman * This is a migrate-callback that "frees" freepages back to the isolated
178470b44595SMel Gorman * freelist. All pages on the freelist are from the same zone, so there is no
178570b44595SMel Gorman * special handling needed for NUMA.
178670b44595SMel Gorman */
compaction_free(struct folio * dst,unsigned long data)178770b44595SMel Gorman static void compaction_free(struct folio *dst, unsigned long data)
178870b44595SMel Gorman {
178970b44595SMel Gorman struct compact_control *cc = (struct compact_control *)data;
179070b44595SMel Gorman
179170b44595SMel Gorman list_add(&dst->lru, &cc->freepages);
179270b44595SMel Gorman cc->nr_freepages++;
179370b44595SMel Gorman }
179470b44595SMel Gorman
179570b44595SMel Gorman /* possible outcome of isolate_migratepages */
179670b44595SMel Gorman typedef enum {
179770b44595SMel Gorman ISOLATE_ABORT, /* Abort compaction now */
179870b44595SMel Gorman ISOLATE_NONE, /* No pages isolated, continue scanning */
179970b44595SMel Gorman ISOLATE_SUCCESS, /* Pages isolated, migrate */
180070b44595SMel Gorman } isolate_migrate_t;
180170b44595SMel Gorman
180215d28d0dSWonhyuk Yang /*
180370b44595SMel Gorman * Allow userspace to control policy on scanning the unevictable LRU for
180470b44595SMel Gorman * compactable pages.
180570b44595SMel Gorman */
180670b44595SMel Gorman static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT;
180770b44595SMel Gorman /*
180870b44595SMel Gorman * Tunable for proactive compaction. It determines how
180970b44595SMel Gorman * aggressively the kernel should compact memory in the
181070b44595SMel Gorman * background. It takes values in the range [0, 100].
181170b44595SMel Gorman */
181270b44595SMel Gorman static unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
181370b44595SMel Gorman static int sysctl_extfrag_threshold = 500;
181470b44595SMel Gorman static int __read_mostly sysctl_compact_memory;
181570b44595SMel Gorman
181670b44595SMel Gorman static inline void
update_fast_start_pfn(struct compact_control * cc,unsigned long pfn)181715d28d0dSWonhyuk Yang update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
181815d28d0dSWonhyuk Yang {
181915d28d0dSWonhyuk Yang if (cc->fast_start_pfn == ULONG_MAX)
182015d28d0dSWonhyuk Yang return;
182115d28d0dSWonhyuk Yang
182270b44595SMel Gorman if (!cc->fast_start_pfn)
182370b44595SMel Gorman cc->fast_start_pfn = pfn;
182470b44595SMel Gorman
182570b44595SMel Gorman cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
182670b44595SMel Gorman }
182770b44595SMel Gorman
182870b44595SMel Gorman static inline unsigned long
reinit_migrate_pfn(struct compact_control * cc)182970b44595SMel Gorman reinit_migrate_pfn(struct compact_control *cc)
183015d28d0dSWonhyuk Yang {
183170b44595SMel Gorman if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
183270b44595SMel Gorman return cc->migrate_pfn;
183370b44595SMel Gorman
183470b44595SMel Gorman cc->migrate_pfn = cc->fast_start_pfn;
183570b44595SMel Gorman cc->fast_start_pfn = ULONG_MAX;
1836e380bebeSMel Gorman
183770b44595SMel Gorman return cc->migrate_pfn;
1838bbe832b9SRei Yamamoto }
1839bbe832b9SRei Yamamoto
184070b44595SMel Gorman /*
184115d28d0dSWonhyuk Yang * Briefly search the free lists for a migration source that already has
184270b44595SMel Gorman * some free pages to reduce the number of pages that need migration
184370b44595SMel Gorman * before a pageblock is free.
184470b44595SMel Gorman */
fast_find_migrateblock(struct compact_control * cc)184570b44595SMel Gorman static unsigned long fast_find_migrateblock(struct compact_control *cc)
184670b44595SMel Gorman {
184770b44595SMel Gorman unsigned int limit = freelist_scan_limit(cc);
184870b44595SMel Gorman unsigned int nr_scanned = 0;
184970b44595SMel Gorman unsigned long distance;
185070b44595SMel Gorman unsigned long pfn = cc->migrate_pfn;
185170b44595SMel Gorman unsigned long high_pfn;
185270b44595SMel Gorman int order;
185370b44595SMel Gorman bool found_block = false;
185415d28d0dSWonhyuk Yang
185515d28d0dSWonhyuk Yang /* Skip hints are relied on to avoid repeats on the fast search */
185670b44595SMel Gorman if (cc->ignore_skip_hint)
185715d28d0dSWonhyuk Yang return pfn;
185870b44595SMel Gorman
185970b44595SMel Gorman /*
186070b44595SMel Gorman * If the pageblock should be finished then do not select a different
18615bbe3547SEric B Munson * pageblock.
1862edc2ca61SVlastimil Babka */
1863edc2ca61SVlastimil Babka if (cc->finish_pageblock)
1864edc2ca61SVlastimil Babka return pfn;
1865ff9543fdSMichal Nazarewicz
186632aaf055SPengfei Li /*
1867ff9543fdSMichal Nazarewicz * If the migrate_pfn is not at the start of a zone or the start
1868e1409c32SJoonsoo Kim * of a pageblock then assume this is a continuation of a previous
1869e1409c32SJoonsoo Kim * scan restarted due to COMPACT_CLUSTER_MAX.
1870e1409c32SJoonsoo Kim */
1871edc2ca61SVlastimil Babka if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
1872edc2ca61SVlastimil Babka return pfn;
18735bbe3547SEric B Munson
18741d2047feSHugh Dickins /*
187570b44595SMel Gorman * For smaller orders, just linearly scan as the number of pages
1876ff9543fdSMichal Nazarewicz * to migrate should be relatively small and does not necessarily
1877edc2ca61SVlastimil Babka * justify freeing up a large block for a small allocation.
1878edc2ca61SVlastimil Babka */
187970b44595SMel Gorman if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
188070b44595SMel Gorman return pfn;
1881edc2ca61SVlastimil Babka
188270b44595SMel Gorman /*
188306b6640aSVlastimil Babka * Only allow kcompactd and direct requests for movable pages to
188432aaf055SPengfei Li * quickly clear out a MOVABLE pageblock for allocation. This
188532aaf055SPengfei Li * reduces the risk that a large movable pageblock is freed for
1886ff9543fdSMichal Nazarewicz * an unmovable/reclaimable small allocation.
188770b44595SMel Gorman */
188870b44595SMel Gorman if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
188970b44595SMel Gorman return pfn;
189070b44595SMel Gorman
189170b44595SMel Gorman /*
189270b44595SMel Gorman * When starting the migration scanner, pick any pageblock within the
189370b44595SMel Gorman * first half of the search space. Otherwise try and pick a pageblock
1894ff9543fdSMichal Nazarewicz * within the first eighth to reduce the chances that a migration
189506b6640aSVlastimil Babka * target later becomes a source.
1896ff9543fdSMichal Nazarewicz */
1897edc2ca61SVlastimil Babka distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
1898edc2ca61SVlastimil Babka if (cc->migrate_pfn != cc->zone->zone_start_pfn)
1899edc2ca61SVlastimil Babka distance >>= 2;
1900edc2ca61SVlastimil Babka high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
1901e1409c32SJoonsoo Kim
190270b44595SMel Gorman for (order = cc->order - 1;
1903c2ad7a1fSOscar Salvador order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
1904e1409c32SJoonsoo Kim order--) {
1905e1409c32SJoonsoo Kim struct free_area *area = &cc->zone->free_area[order];
1906edc2ca61SVlastimil Babka struct list_head *freelist;
1907edc2ca61SVlastimil Babka unsigned long flags;
1908edc2ca61SVlastimil Babka struct page *freepage;
1909edc2ca61SVlastimil Babka
1910cb810ad2SMel Gorman if (!area->nr_free)
1911edc2ca61SVlastimil Babka continue;
1912c036ddffSMiaohe Lin
1913cf66f070SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags);
1914edc2ca61SVlastimil Babka freelist = &area->free_list[MIGRATE_MOVABLE];
191532aaf055SPengfei Li list_for_each_entry(freepage, freelist, buddy_list) {
191632aaf055SPengfei Li unsigned long free_pfn;
19177d49d886SVlastimil Babka
1918edc2ca61SVlastimil Babka if (nr_scanned++ >= limit) {
1919edc2ca61SVlastimil Babka move_freelist_tail(freelist, freepage);
1920e380bebeSMel Gorman break;
1921e380bebeSMel Gorman }
1922e380bebeSMel Gorman
1923e380bebeSMel Gorman free_pfn = page_to_pfn(freepage);
1924e380bebeSMel Gorman if (free_pfn < high_pfn) {
1925e380bebeSMel Gorman /*
1926e380bebeSMel Gorman * Avoid if skipped recently. Ideally it would
1927ee0913c4SKefeng Wang * move to the tail but even safe iteration of
1928e380bebeSMel Gorman * the list assumes an entry is deleted, not
1929edc2ca61SVlastimil Babka * reordered.
1930edc2ca61SVlastimil Babka */
1931edc2ca61SVlastimil Babka if (get_pageblock_skip(freepage))
1932556162bfSMiaohe Lin continue;
1933556162bfSMiaohe Lin
1934556162bfSMiaohe Lin /* Reorder to so a future search skips recent pages */
1935556162bfSMiaohe Lin move_freelist_tail(freelist, freepage);
1936556162bfSMiaohe Lin
1937556162bfSMiaohe Lin update_fast_start_pfn(cc, free_pfn);
1938edc2ca61SVlastimil Babka pfn = pageblock_start_pfn(free_pfn);
19399bebefd5SMel Gorman if (pfn < cc->zone->zone_start_pfn)
19409bebefd5SMel Gorman pfn = cc->zone->zone_start_pfn;
1941edc2ca61SVlastimil Babka cc->fast_search_fail = 0;
19429bebefd5SMel Gorman found_block = true;
1943ff9543fdSMichal Nazarewicz break;
1944ff9543fdSMichal Nazarewicz }
1945c2ad7a1fSOscar Salvador }
1946c2ad7a1fSOscar Salvador spin_unlock_irqrestore(&cc->zone->lock, flags);
1947ff9543fdSMichal Nazarewicz }
1948ff9543fdSMichal Nazarewicz
1949edc2ca61SVlastimil Babka cc->total_migrate_scanned += nr_scanned;
1950edc2ca61SVlastimil Babka
1951edc2ca61SVlastimil Babka /*
1952edc2ca61SVlastimil Babka * If fast scanning failed then use a cached entry for a page block
1953edc2ca61SVlastimil Babka * that had free pages as the basis for starting a linear scan.
1954edc2ca61SVlastimil Babka */
1955edc2ca61SVlastimil Babka if (!found_block) {
1956edc2ca61SVlastimil Babka cc->fast_search_fail++;
1957edc2ca61SVlastimil Babka pfn = reinit_migrate_pfn(cc);
1958ff9543fdSMichal Nazarewicz }
1959ff9543fdSMichal Nazarewicz return pfn;
196021c527a3SYaowei Bai }
196121c527a3SYaowei Bai
196221c527a3SYaowei Bai /*
196321c527a3SYaowei Bai * Isolate all pages that can be migrated from the first suitable block,
196421c527a3SYaowei Bai * starting at the block pointed to by the migrate scanner pfn within
196521c527a3SYaowei Bai * compact_control.
196621c527a3SYaowei Bai */
isolate_migratepages(struct compact_control * cc)196721c527a3SYaowei Bai static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
196821c527a3SYaowei Bai {
1969b4a0215eSKefeng Wang unsigned long block_start_pfn;
1970b4a0215eSKefeng Wang unsigned long block_end_pfn;
1971b4a0215eSKefeng Wang unsigned long low_pfn;
1972b4a0215eSKefeng Wang struct page *page;
1973b4a0215eSKefeng Wang const isolate_mode_t isolate_mode =
1974b4a0215eSKefeng Wang (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1975facdaa91SNitin Gupta (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1976facdaa91SNitin Gupta bool fast_find_block;
1977b4a0215eSKefeng Wang
1978b4a0215eSKefeng Wang /*
1979b4a0215eSKefeng Wang * Start at where we last stopped, or beginning of the zone as
1980b4a0215eSKefeng Wang * initialized by compact_zone(). The first failure will use
1981b4a0215eSKefeng Wang * the lowest PFN as the starting point for linear scanning.
1982b4a0215eSKefeng Wang */
1983b4a0215eSKefeng Wang low_pfn = fast_find_migrateblock(cc);
1984facdaa91SNitin Gupta block_start_pfn = pageblock_start_pfn(low_pfn);
1985facdaa91SNitin Gupta if (block_start_pfn < cc->zone->zone_start_pfn)
1986facdaa91SNitin Gupta block_start_pfn = cc->zone->zone_start_pfn;
1987facdaa91SNitin Gupta
198840d7e203SCharan Teja Reddy /*
198940d7e203SCharan Teja Reddy * fast_find_migrateblock() has already ensured the pageblock is not
199040d7e203SCharan Teja Reddy * set with a skipped flag, so to avoid the isolation_suitable check
199140d7e203SCharan Teja Reddy * below again, check whether the fast search was successful.
199240d7e203SCharan Teja Reddy */
199340d7e203SCharan Teja Reddy fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
199440d7e203SCharan Teja Reddy
199540d7e203SCharan Teja Reddy /* Only scan within a pageblock boundary */
199640d7e203SCharan Teja Reddy block_end_pfn = pageblock_end_pfn(low_pfn);
199740d7e203SCharan Teja Reddy
199840d7e203SCharan Teja Reddy /*
1999facdaa91SNitin Gupta * Iterate over whole pageblocks until we find the first suitable.
2000facdaa91SNitin Gupta * Do not cross the free scanner.
2001facdaa91SNitin Gupta */
2002facdaa91SNitin Gupta for (; block_end_pfn <= cc->free_pfn;
2003facdaa91SNitin Gupta fast_find_block = false,
2004facdaa91SNitin Gupta cc->migrate_pfn = low_pfn = block_end_pfn,
200540d7e203SCharan Teja Reddy block_start_pfn = block_end_pfn,
2006facdaa91SNitin Gupta block_end_pfn += pageblock_nr_pages) {
2007facdaa91SNitin Gupta
2008facdaa91SNitin Gupta /*
200940d7e203SCharan Teja Reddy * This can potentially iterate a massively long zone with
2010facdaa91SNitin Gupta * many pageblocks unsuitable, so periodically check if we
2011facdaa91SNitin Gupta * need to schedule.
2012facdaa91SNitin Gupta */
2013facdaa91SNitin Gupta if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
2014facdaa91SNitin Gupta cond_resched();
2015facdaa91SNitin Gupta
2016facdaa91SNitin Gupta page = pageblock_pfn_to_page(block_start_pfn,
2017facdaa91SNitin Gupta block_end_pfn, cc->zone);
2018facdaa91SNitin Gupta if (!page) {
2019facdaa91SNitin Gupta unsigned long next_pfn;
2020d34c0a75SNitin Gupta
2021facdaa91SNitin Gupta next_pfn = skip_offline_sections(block_start_pfn);
2022d34c0a75SNitin Gupta if (next_pfn)
2023facdaa91SNitin Gupta block_end_pfn = min(next_pfn, cc->free_pfn);
2024facdaa91SNitin Gupta continue;
2025facdaa91SNitin Gupta }
2026facdaa91SNitin Gupta
2027facdaa91SNitin Gupta /*
2028facdaa91SNitin Gupta * If isolation recently failed, do not retry. Only check the
202940d7e203SCharan Teja Reddy * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
2030facdaa91SNitin Gupta * to be visited multiple times. Assume skip was checked
2031facdaa91SNitin Gupta * before making it "skip" so other compaction instances do
2032facdaa91SNitin Gupta * not scan the same block.
2033facdaa91SNitin Gupta */
2034facdaa91SNitin Gupta if ((pageblock_aligned(low_pfn) ||
2035d34c0a75SNitin Gupta low_pfn == cc->zone->zone_start_pfn) &&
2036facdaa91SNitin Gupta !fast_find_block && !isolation_suitable(cc, page))
2037d34c0a75SNitin Gupta continue;
2038facdaa91SNitin Gupta
2039facdaa91SNitin Gupta /*
2040f0953a1bSIngo Molnar * For async direct compaction, only scan the pageblocks of the
2041f0953a1bSIngo Molnar * same migratetype without huge pages. Async direct compaction
2042facdaa91SNitin Gupta * is optimistic to see if the minimum amount of work satisfies
2043facdaa91SNitin Gupta * the allocation. The cached PFN is updated as it's possible
2044d34c0a75SNitin Gupta * that all remaining blocks between source and target are
2045d34c0a75SNitin Gupta * unsuitable and the compaction scanners fail to meet.
2046facdaa91SNitin Gupta */
2047facdaa91SNitin Gupta if (!suitable_migration_source(cc, page)) {
2048facdaa91SNitin Gupta update_cached_migrate(cc, block_end_pfn);
2049facdaa91SNitin Gupta continue;
2050facdaa91SNitin Gupta }
2051facdaa91SNitin Gupta
2052facdaa91SNitin Gupta /* Perform the isolation */
2053facdaa91SNitin Gupta if (isolate_migratepages_block(cc, low_pfn, block_end_pfn,
2054facdaa91SNitin Gupta isolate_mode))
2055facdaa91SNitin Gupta return ISOLATE_ABORT;
2056facdaa91SNitin Gupta
2057facdaa91SNitin Gupta /*
2058facdaa91SNitin Gupta * Either we isolated something and proceed with migration. Or
205940cacbcbSMel Gorman * we failed and compact_zone should decide if we should
2060748446bbSMel Gorman * continue or not.
20618fb74b9fSMel Gorman */
2062d39773a0SVlastimil Babka break;
2063cb2dcaf0SMel Gorman }
2064748446bbSMel Gorman
2065753341a4SMel Gorman return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
2066f2849aa0SVlastimil Babka }
206755b7c4c9SVlastimil Babka
206840cacbcbSMel Gorman /*
206955b7c4c9SVlastimil Babka * order == -1 is expected when compacting via
207062997027SMel Gorman * /proc/sys/vm/compact_memory
207162997027SMel Gorman */
is_via_compact_memory(int order)2072accf6242SVlastimil Babka static inline bool is_via_compact_memory(int order)
207362997027SMel Gorman {
207462997027SMel Gorman return order == -1;
207562997027SMel Gorman }
2076accf6242SVlastimil Babka
207740cacbcbSMel Gorman /*
207862997027SMel Gorman * Determine whether kswapd is (or recently was!) running on this node.
2079c8f7de0bSMichal Hocko *
2080748446bbSMel Gorman * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't
2081c8f7de0bSMichal Hocko * zero it.
2082c8f7de0bSMichal Hocko */
kswapd_is_running(pg_data_t * pgdat)2083bb13ffebSMel Gorman static bool kswapd_is_running(pg_data_t *pgdat)
2084748446bbSMel Gorman {
2085facdaa91SNitin Gupta bool running;
2086facdaa91SNitin Gupta
2087facdaa91SNitin Gupta pgdat_kswapd_lock(pgdat);
2088facdaa91SNitin Gupta running = pgdat->kswapd && task_is_running(pgdat->kswapd);
2089facdaa91SNitin Gupta pgdat_kswapd_unlock(pgdat);
2090facdaa91SNitin Gupta
2091facdaa91SNitin Gupta return running;
2092facdaa91SNitin Gupta }
2093facdaa91SNitin Gupta
2094facdaa91SNitin Gupta /*
2095facdaa91SNitin Gupta * A zone's fragmentation score is the external fragmentation wrt to the
2096facdaa91SNitin Gupta * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100].
2097facdaa91SNitin Gupta */
fragmentation_score_zone(struct zone * zone)2098facdaa91SNitin Gupta static unsigned int fragmentation_score_zone(struct zone *zone)
2099facdaa91SNitin Gupta {
2100facdaa91SNitin Gupta return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
2101facdaa91SNitin Gupta }
2102facdaa91SNitin Gupta
2103facdaa91SNitin Gupta /*
210421c527a3SYaowei Bai * A weighted zone's fragmentation score is the external fragmentation
210556de7263SMel Gorman * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
210656de7263SMel Gorman * returns a value in the range [0, 100].
2107baf6a9a1SVlastimil Babka *
2108efe771c7SMel Gorman * The scaling factor ensures that proactive compaction focuses on larger
2109efe771c7SMel Gorman * zones like ZONE_NORMAL, rather than smaller, specialized zones like
2110efe771c7SMel Gorman * ZONE_DMA32. For smaller zones, the score value remains close to zero,
2111efe771c7SMel Gorman * and thus never exceeds the high threshold for proactive compaction.
2112baf6a9a1SVlastimil Babka */
fragmentation_score_zone_weighted(struct zone * zone)2113ee0913c4SKefeng Wang static unsigned int fragmentation_score_zone_weighted(struct zone *zone)
2114baf6a9a1SVlastimil Babka {
2115baf6a9a1SVlastimil Babka unsigned long score;
211656de7263SMel Gorman
2117cb2dcaf0SMel Gorman score = zone->present_pages * fragmentation_score_zone(zone);
211856de7263SMel Gorman return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
211940cacbcbSMel Gorman }
21202149cdaeSJoonsoo Kim
21218fb74b9fSMel Gorman /*
212256de7263SMel Gorman * The per-node proactive (background) compaction process is started by its
2123b03641afSDan Williams * corresponding kcompactd thread when the node's fragmentation score
2124cf378319SVlastimil Babka * exceeds the high threshold. The compaction process remains active till
212556de7263SMel Gorman * the node's score falls below the low threshold, or one of the back-off
21262149cdaeSJoonsoo Kim * conditions is met.
21272149cdaeSJoonsoo Kim */
fragmentation_score_node(pg_data_t * pgdat)21282149cdaeSJoonsoo Kim static unsigned int fragmentation_score_node(pg_data_t *pgdat)
2129b03641afSDan Williams {
2130cf378319SVlastimil Babka unsigned int score = 0;
21312149cdaeSJoonsoo Kim int zoneid;
21322149cdaeSJoonsoo Kim
21332149cdaeSJoonsoo Kim for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
21342149cdaeSJoonsoo Kim struct zone *zone;
21352149cdaeSJoonsoo Kim
21362149cdaeSJoonsoo Kim zone = &pgdat->node_zones[zoneid];
2137fa599c44SMiaohe Lin if (!populated_zone(zone))
2138baf6a9a1SVlastimil Babka continue;
2139fa599c44SMiaohe Lin score += fragmentation_score_zone_weighted(zone);
2140fa599c44SMiaohe Lin }
2141fa599c44SMiaohe Lin
2142fa599c44SMiaohe Lin return score;
2143fa599c44SMiaohe Lin }
2144fa599c44SMiaohe Lin
fragmentation_score_wmark(bool low)2145baf6a9a1SVlastimil Babka static unsigned int fragmentation_score_wmark(bool low)
2146baf6a9a1SVlastimil Babka {
2147baf6a9a1SVlastimil Babka unsigned int wmark_low;
2148baf6a9a1SVlastimil Babka
2149facdaa91SNitin Gupta /*
2150cb2dcaf0SMel Gorman * Cap the low watermark to avoid excessive compaction
2151cb2dcaf0SMel Gorman * activity in case a user sets the proactiveness tunable
2152cb2dcaf0SMel Gorman * close to 100 (maximum).
2153cb2dcaf0SMel Gorman */
2154837d026dSJoonsoo Kim wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
2155837d026dSJoonsoo Kim return low ? wmark_low : min(wmark_low + 10, 100U);
215640cacbcbSMel Gorman }
2157837d026dSJoonsoo Kim
should_proactive_compact_node(pg_data_t * pgdat)2158837d026dSJoonsoo Kim static bool should_proactive_compact_node(pg_data_t *pgdat)
2159837d026dSJoonsoo Kim {
216040cacbcbSMel Gorman int wmark_high;
216140cacbcbSMel Gorman
2162837d026dSJoonsoo Kim if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat))
2163837d026dSJoonsoo Kim return false;
2164837d026dSJoonsoo Kim
2165837d026dSJoonsoo Kim wmark_high = fragmentation_score_wmark(false);
2166748446bbSMel Gorman return fragmentation_score_node(pgdat) > wmark_high;
2167748446bbSMel Gorman }
2168ea7ab982SMichal Hocko
__compact_finished(struct compact_control * cc)2169c603844bSMel Gorman static enum compact_result __compact_finished(struct compact_control *cc)
217097a225e6SJoonsoo Kim {
217186a294a8SMichal Hocko unsigned int order;
21723e7d3449SMel Gorman const int migratetype = cc->migratetype;
21733e7d3449SMel Gorman int ret;
21743e7d3449SMel Gorman
217521c527a3SYaowei Bai /* Compaction run completes if the migrate and free scanner meet */
21763957c776SMichal Hocko if (compact_scanners_met(cc)) {
21773957c776SMichal Hocko /* Let the next compaction start anew. */
2178a9214443SMel Gorman reset_cached_positions(cc->zone);
2179ebff3980SVlastimil Babka
2180ebff3980SVlastimil Babka /*
2181ebff3980SVlastimil Babka * Mark that the PG_migrate_skip information should be cleared
2182ebff3980SVlastimil Babka * by kswapd when it goes to sleep. kcompactd does not set the
218397a225e6SJoonsoo Kim * flag itself as the decision to be clear should be directly
2184ebff3980SVlastimil Babka * based on an allocation request.
2185cf378319SVlastimil Babka */
2186ebff3980SVlastimil Babka if (cc->direct_compaction)
21873957c776SMichal Hocko cc->zone->compact_blockskip_flush = true;
21889861a62cSVlastimil Babka
2189984fdba6SVlastimil Babka if (cc->whole_zone)
2190984fdba6SVlastimil Babka return COMPACT_COMPLETE;
2191984fdba6SVlastimil Babka else
2192984fdba6SVlastimil Babka return COMPACT_PARTIAL_SKIPPED;
219397a225e6SJoonsoo Kim }
219497a225e6SJoonsoo Kim
219597a225e6SJoonsoo Kim if (cc->proactive_compaction) {
21968348faf9SVlastimil Babka int score, wmark_low;
21978348faf9SVlastimil Babka pg_data_t *pgdat;
2198d883c6cfSJoonsoo Kim
2199d883c6cfSJoonsoo Kim pgdat = cc->zone->zone_pgdat;
22003e7d3449SMel Gorman if (kswapd_is_running(pgdat))
22018348faf9SVlastimil Babka return COMPACT_PARTIAL_SKIPPED;
22028348faf9SVlastimil Babka
22038348faf9SVlastimil Babka score = fragmentation_score_zone(cc->zone);
220497a225e6SJoonsoo Kim wmark_low = fragmentation_score_wmark(true);
2205d883c6cfSJoonsoo Kim
22063e7d3449SMel Gorman if (score > wmark_low)
22073e7d3449SMel Gorman ret = COMPACT_CONTINUE;
2208cc5c9f09SVlastimil Babka else
2209cc5c9f09SVlastimil Babka ret = COMPACT_SUCCESS;
2210cc5c9f09SVlastimil Babka
22112b1a20c3SHui Su goto out;
22122b1a20c3SHui Su }
22132b1a20c3SHui Su
22142b1a20c3SHui Su if (is_via_compact_memory(cc->order))
22152b1a20c3SHui Su return COMPACT_CONTINUE;
22162b1a20c3SHui Su
22172b1a20c3SHui Su /*
2218cc5c9f09SVlastimil Babka * Always finish scanning a pageblock to reduce the possibility of
2219cc5c9f09SVlastimil Babka * fallbacks in the future. This is particularly important when
222097a225e6SJoonsoo Kim * migration source is unmovable/reclaimable but it's not worth
2221cc5c9f09SVlastimil Babka * special casing.
2222cc5c9f09SVlastimil Babka */
2223cc5c9f09SVlastimil Babka if (!pageblock_aligned(cc->migrate_pfn))
2224cc5c9f09SVlastimil Babka return COMPACT_CONTINUE;
222597a225e6SJoonsoo Kim
2226cc5c9f09SVlastimil Babka /* Direct compactor: Is a suitable page free? */
22273e7d3449SMel Gorman ret = COMPACT_NO_SUITABLE_PAGE;
22283e7d3449SMel Gorman for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
22293e7d3449SMel Gorman struct free_area *area = &cc->zone->free_area[order];
22303e7d3449SMel Gorman bool can_steal;
2231ebff3980SVlastimil Babka
2232ebff3980SVlastimil Babka /* Job done if page is free of the right migratetype */
22333e7d3449SMel Gorman if (!free_area_empty(area, migratetype))
22343e7d3449SMel Gorman return COMPACT_SUCCESS;
22353e7d3449SMel Gorman
223620311420SVlastimil Babka #ifdef CONFIG_CMA
223720311420SVlastimil Babka /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
223820311420SVlastimil Babka if (migratetype == MIGRATE_MOVABLE &&
223920311420SVlastimil Babka !free_area_empty(area, MIGRATE_CMA))
224020311420SVlastimil Babka return COMPACT_SUCCESS;
224120311420SVlastimil Babka #endif
22423e7d3449SMel Gorman /*
224320311420SVlastimil Babka * Job done if allocation would steal freepages from
22443e7d3449SMel Gorman * other migratetype buddy lists.
22453e7d3449SMel Gorman */
2246cc5c9f09SVlastimil Babka if (find_suitable_fallback(area, order, migratetype,
22473e7d3449SMel Gorman true, &can_steal) != -1)
22483e7d3449SMel Gorman /*
2249837d026dSJoonsoo Kim * Movable pages are OK in any pageblock. If we are
2250837d026dSJoonsoo Kim * stealing for a non-movable allocation, make sure
2251837d026dSJoonsoo Kim * we finish compacting the current pageblock first
2252837d026dSJoonsoo Kim * (which is assured by the above migrate_pfn align
2253837d026dSJoonsoo Kim * check) so it is as free as possible and we won't
2254837d026dSJoonsoo Kim * have to steal another one soon.
2255837d026dSJoonsoo Kim */
225686a294a8SMichal Hocko return COMPACT_SUCCESS;
225786a294a8SMichal Hocko }
225886a294a8SMichal Hocko
225986a294a8SMichal Hocko out:
226086a294a8SMichal Hocko if (cc->contended || fatal_signal_pending(current))
226186a294a8SMichal Hocko ret = COMPACT_CONTENDED;
226286a294a8SMichal Hocko
226386a294a8SMichal Hocko return ret;
226486a294a8SMichal Hocko }
226586a294a8SMichal Hocko
compact_finished(struct compact_control * cc)226697a225e6SJoonsoo Kim static enum compact_result compact_finished(struct compact_control *cc)
226797a225e6SJoonsoo Kim {
226886a294a8SMichal Hocko int ret;
226986a294a8SMichal Hocko
227086a294a8SMichal Hocko ret = __compact_finished(cc);
227186a294a8SMichal Hocko trace_mm_compaction_finished(cc->zone, cc->order, ret);
227286a294a8SMichal Hocko if (ret == COMPACT_NO_SUITABLE_PAGE)
227386a294a8SMichal Hocko ret = COMPACT_CONTINUE;
227486a294a8SMichal Hocko
227586a294a8SMichal Hocko return ret;
227686a294a8SMichal Hocko }
22775a1c84b4SMel Gorman
__compaction_suitable(struct zone * zone,int order,int highest_zoneidx,unsigned long wmark_target)227886a294a8SMichal Hocko static bool __compaction_suitable(struct zone *zone, int order,
227986a294a8SMichal Hocko int highest_zoneidx,
228097a225e6SJoonsoo Kim unsigned long wmark_target)
2281cff387d6SMiaohe Lin {
228286a294a8SMichal Hocko unsigned long watermark;
228386a294a8SMichal Hocko /*
228486a294a8SMichal Hocko * Watermarks for order-0 must be met for compaction to be able to
228586a294a8SMichal Hocko * isolate free pages for migration targets. This means that the
228686a294a8SMichal Hocko * watermark and alloc_flags have to match, or be more pessimistic than
228786a294a8SMichal Hocko * the check in __isolate_free_page(). We don't use the direct
22885e1f0f09SMel Gorman * compactor's alloc_flags, as they are not relevant for freepage
22895e1f0f09SMel Gorman * isolation. We however do use the direct compactor's highest_zoneidx
2290748446bbSMel Gorman * to skip over zones where lowmem reserves would prevent allocation
2291ea7ab982SMichal Hocko * even if compaction succeeds.
229240cacbcbSMel Gorman * For costly orders, we require low watermark instead of min for
229340cacbcbSMel Gorman * compaction to proceed to increase its chances.
2294566e54e1SMel Gorman * ALLOC_CMA is used, as pages in CMA pageblocks are considered
2295e0b9daebSDavid Rientjes * suitable migration targets
22968854c55fSMel Gorman */
229784b328aaSBaolin Wang watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
2298748446bbSMel Gorman low_wmark_pages(zone) : min_wmark_pages(zone);
2299a94b5252SYafang Shao watermark += compact_gap(order);
2300a94b5252SYafang Shao return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
2301a94b5252SYafang Shao ALLOC_CMA, wmark_target);
2302a94b5252SYafang Shao }
2303a94b5252SYafang Shao
2304a94b5252SYafang Shao /*
2305a94b5252SYafang Shao * compaction_suitable: Is this suitable to run compaction on this zone now?
2306a94b5252SYafang Shao */
compaction_suitable(struct zone * zone,int order,int highest_zoneidx)2307a94b5252SYafang Shao bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx)
2308a94b5252SYafang Shao {
2309a94b5252SYafang Shao enum compact_result compact_result;
231001c0bfe0SWei Yang bool suitable;
231140cacbcbSMel Gorman
231297a225e6SJoonsoo Kim suitable = __compaction_suitable(zone, order, highest_zoneidx,
23133e7d3449SMel Gorman zone_page_state(zone, NR_FREE_PAGES));
2314cf378319SVlastimil Babka /*
23153e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to
2316c46649deSMichal Hocko * low memory or external fragmentation
2317c46649deSMichal Hocko *
2318c46649deSMichal Hocko * index of -1000 would imply allocations might succeed depending on
23193e7d3449SMel Gorman * watermarks, but we already failed the high-order watermark check
2320c89511abSMel Gorman * index towards 0 implies failure is due to lack of memory
2321d3132e4bSVlastimil Babka * index towards 1000 implies failure is due to fragmentation
2322accf6242SVlastimil Babka *
2323d3132e4bSVlastimil Babka * Only compact if a failure would be due to fragmentation. Also
232440cacbcbSMel Gorman * ignore fragindex for non-costly orders where the alternative to
232540cacbcbSMel Gorman * a successful reclaim/compaction is OOM. Fragindex and the
2326d3132e4bSVlastimil Babka * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
2327d3132e4bSVlastimil Babka * excessive compaction for costly orders, but it should not be at the
2328c89511abSMel Gorman * expense of system stability.
232906ed2998SVlastimil Babka */
233006ed2998SVlastimil Babka if (suitable) {
233106ed2998SVlastimil Babka compact_result = COMPACT_CONTINUE;
2332c89511abSMel Gorman if (order > PAGE_ALLOC_COSTLY_ORDER) {
233370b44595SMel Gorman int fragindex = fragmentation_index(zone, order);
233406ed2998SVlastimil Babka
233506ed2998SVlastimil Babka if (fragindex >= 0 &&
233606ed2998SVlastimil Babka fragindex <= sysctl_extfrag_threshold) {
233706ed2998SVlastimil Babka suitable = false;
233840cacbcbSMel Gorman compact_result = COMPACT_NOT_SUITABLE_ZONE;
233940cacbcbSMel Gorman }
2340623446e4SJoonsoo Kim }
234106b6640aSVlastimil Babka } else {
234240cacbcbSMel Gorman compact_result = COMPACT_SKIPPED;
2343c89511abSMel Gorman }
2344623446e4SJoonsoo Kim
2345c89511abSMel Gorman trace_mm_compaction_suitable(zone, order, compact_result);
234640cacbcbSMel Gorman
234740cacbcbSMel Gorman return suitable;
2348c89511abSMel Gorman }
2349c8f7de0bSMichal Hocko
compaction_zonelist_suitable(struct alloc_context * ac,int order,int alloc_flags)2350e332f741SMel Gorman bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
2351c8f7de0bSMichal Hocko int alloc_flags)
235206ed2998SVlastimil Babka {
2353c8f7de0bSMichal Hocko struct zone *zone;
2354566e54e1SMel Gorman struct zoneref *z;
2355748446bbSMel Gorman
23568854c55fSMel Gorman /*
23578854c55fSMel Gorman * Make sure at least one zone would pass __compaction_suitable if we continue
23588854c55fSMel Gorman * retrying the reclaim.
23598854c55fSMel Gorman */
23608854c55fSMel Gorman for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
23618854c55fSMel Gorman ac->highest_zoneidx, ac->nodemask) {
23628854c55fSMel Gorman unsigned long available;
23638854c55fSMel Gorman
23648854c55fSMel Gorman /*
23658854c55fSMel Gorman * Do not consider all the reclaimable memory because we do not
23668854c55fSMel Gorman * want to trash just for a single high order allocation which
2367abd4349fSBaolin Wang * is even not guaranteed to appear even if __compaction_suitable
23680eb927c0SMel Gorman * is happy about the watermark check.
2369361a2a22SMinchan Kim */
2370361a2a22SMinchan Kim available = zone_reclaimable_pages(zone) / order;
2371748446bbSMel Gorman available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
237240cacbcbSMel Gorman if (__compaction_suitable(zone, order, ac->highest_zoneidx,
23739d502c1cSMinchan Kim available))
237419d3cf9dSYanfei Xu return true;
2375748446bbSMel Gorman }
2376804d3121SMel Gorman
2377804d3121SMel Gorman return false;
2378804d3121SMel Gorman }
2379804d3121SMel Gorman
2380804d3121SMel Gorman static enum compact_result
compact_zone(struct compact_control * cc,struct capture_control * capc)2381804d3121SMel Gorman compact_zone(struct compact_control *cc, struct capture_control *capc)
2382804d3121SMel Gorman {
2383804d3121SMel Gorman enum compact_result ret;
2384804d3121SMel Gorman unsigned long start_pfn = cc->zone->zone_start_pfn;
2385804d3121SMel Gorman unsigned long end_pfn = zone_end_pfn(cc->zone);
238619d3cf9dSYanfei Xu unsigned long last_migrated_pfn;
2387804d3121SMel Gorman const bool sync = cc->mode != MIGRATE_ASYNC;
2388804d3121SMel Gorman bool update_cached;
2389804d3121SMel Gorman unsigned int nr_succeeded = 0;
239032aaf055SPengfei Li
2391f9e35b3bSMel Gorman /*
23922d1e1041SVlastimil Babka * These counters track activities during zone compaction. Initialize
23935733c7d1SRafael Aquini * them before compacting a new zone.
2394e64c5237SShaohua Li */
2395f9e35b3bSMel Gorman cc->total_migrate_scanned = 0;
2396f9e35b3bSMel Gorman cc->total_free_scanned = 0;
23978854c55fSMel Gorman cc->nr_migratepages = 0;
23988854c55fSMel Gorman cc->nr_freepages = 0;
23998854c55fSMel Gorman INIT_LIST_HEAD(&cc->freepages);
24008854c55fSMel Gorman INIT_LIST_HEAD(&cc->migratepages);
24018854c55fSMel Gorman
2402fdaf7f5cSVlastimil Babka cc->migratetype = gfp_migratetype(cc->gfp_mask);
2403fdaf7f5cSVlastimil Babka
2404fdaf7f5cSVlastimil Babka if (!is_via_compact_memory(cc->order)) {
2405fdaf7f5cSVlastimil Babka unsigned long watermark;
2406fdaf7f5cSVlastimil Babka
2407fdaf7f5cSVlastimil Babka /* Allocation can already succeed, nothing to do */
2408f9e35b3bSMel Gorman watermark = wmark_pages(cc->zone,
24098854c55fSMel Gorman cc->alloc_flags & ALLOC_WMARK_MASK);
241019d3cf9dSYanfei Xu if (zone_watermark_ok(cc->zone, cc->order, watermark,
2411f9e35b3bSMel Gorman cc->highest_zoneidx, cc->alloc_flags))
2412748446bbSMel Gorman return COMPACT_SUCCESS;
2413d53aea3dSDavid Rientjes
2414e0b9daebSDavid Rientjes /* Compaction is likely to fail */
241584b328aaSBaolin Wang if (!compaction_suitable(cc->zone, cc->order,
2416748446bbSMel Gorman cc->highest_zoneidx))
2417abd4349fSBaolin Wang return COMPACT_SKIPPED;
2418748446bbSMel Gorman }
2419f8c9301fSVlastimil Babka
2420f8c9301fSVlastimil Babka /*
24219d502c1cSMinchan Kim * Clear pageblock skip if there were failures recently and compaction
24225733c7d1SRafael Aquini * is about to be retried after being deferred.
24237ed695e0SVlastimil Babka */
24247ed695e0SVlastimil Babka if (compaction_restarting(cc->zone, cc->order))
24257ed695e0SVlastimil Babka __reset_isolation_suitable(cc->zone);
24267ed695e0SVlastimil Babka
2427f2849aa0SVlastimil Babka /*
24282d1e1041SVlastimil Babka * Setup to move all movable pages to the end of the zone. Used cached
24294bf2bba3SDavid Rientjes * information on where the scanners should start (unless we explicitly
2430748446bbSMel Gorman * want to compact the whole zone), but check that it is initialised
2431fdd048e1SVlastimil Babka * by ensuring the values are within zone boundaries.
2432fdd048e1SVlastimil Babka */
2433fdd048e1SVlastimil Babka cc->fast_start_pfn = 0;
2434fdd048e1SVlastimil Babka if (cc->whole_zone) {
2435fdd048e1SVlastimil Babka cc->migrate_pfn = start_pfn;
2436fdd048e1SVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2437fdd048e1SVlastimil Babka } else {
2438fdd048e1SVlastimil Babka cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2439fdd048e1SVlastimil Babka cc->free_pfn = cc->zone->compact_cached_free_pfn;
2440566e54e1SMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
2441fdd048e1SVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
24424bf2bba3SDavid Rientjes cc->zone->compact_cached_free_pfn = cc->free_pfn;
2443fdaf7f5cSVlastimil Babka }
2444fdaf7f5cSVlastimil Babka if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
2445fdaf7f5cSVlastimil Babka cc->migrate_pfn = start_pfn;
2446fdaf7f5cSVlastimil Babka cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2447fdaf7f5cSVlastimil Babka cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2448fdaf7f5cSVlastimil Babka }
2449fdaf7f5cSVlastimil Babka
2450fdaf7f5cSVlastimil Babka if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2451fdaf7f5cSVlastimil Babka cc->whole_zone = true;
2452566e54e1SMel Gorman }
2453fdaf7f5cSVlastimil Babka
245406b6640aSVlastimil Babka last_migrated_pfn = 0;
2455fdaf7f5cSVlastimil Babka
2456566e54e1SMel Gorman /*
2457b01b2141SIngo Molnar * Migrate has separate cached PFNs for ASYNC and SYNC* migration on
2458fdaf7f5cSVlastimil Babka * the basis that some migrations will fail in ASYNC mode. However,
2459566e54e1SMel Gorman * if the cached PFNs match and pageblocks are skipped due to having
2460fdaf7f5cSVlastimil Babka * no isolation candidates, then the sync state does not matter.
2461fdaf7f5cSVlastimil Babka * Until a pageblock with isolation candidates is found, keep the
2462fdaf7f5cSVlastimil Babka * cached PFNs in sync to avoid revisiting the same blocks.
24635e1f0f09SMel Gorman */
24645e1f0f09SMel Gorman update_cached = !sync &&
24655e1f0f09SMel Gorman cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
24665e1f0f09SMel Gorman
24675e1f0f09SMel Gorman trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync);
2468748446bbSMel Gorman
2469748446bbSMel Gorman /* lru_add_drain_all could be expensive with involving other CPUs */
2470f9e35b3bSMel Gorman lru_add_drain();
24716bace090SVlastimil Babka
24726bace090SVlastimil Babka while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
24736bace090SVlastimil Babka int err;
24746bace090SVlastimil Babka unsigned long iteration_start_pfn = cc->migrate_pfn;
24756bace090SVlastimil Babka
24766bace090SVlastimil Babka /*
24776bace090SVlastimil Babka * Avoid multiple rescans of the same pageblock which can
24786bace090SVlastimil Babka * happen if a page cannot be isolated (dirty/writeback in
24796bace090SVlastimil Babka * async mode) or if the migrated pages are being allocated
24806bace090SVlastimil Babka * before the pageblock is cleared. The first rescan will
248106b6640aSVlastimil Babka * capture the entire pageblock for migration. If it fails,
24826bace090SVlastimil Babka * it'll be marked skip and scanning will proceed as normal.
24836bace090SVlastimil Babka */
24846bace090SVlastimil Babka cc->finish_pageblock = false;
24856bace090SVlastimil Babka if (pageblock_start_pfn(last_migrated_pfn) ==
248640cacbcbSMel Gorman pageblock_start_pfn(iteration_start_pfn)) {
248740cacbcbSMel Gorman cc->finish_pageblock = true;
24886bace090SVlastimil Babka }
2489748446bbSMel Gorman
24907f354a54SDavid Rientjes rescan:
24917f354a54SDavid Rientjes switch (isolate_migratepages(cc)) {
24927f354a54SDavid Rientjes case ISOLATE_ABORT:
2493abd4349fSBaolin Wang ret = COMPACT_CONTENDED;
24940eb927c0SMel Gorman putback_movable_pages(&cc->migratepages);
2495748446bbSMel Gorman cc->nr_migratepages = 0;
2496748446bbSMel Gorman goto out;
249776ab0f53SMel Gorman case ISOLATE_NONE:
2498ea7ab982SMichal Hocko if (update_cached) {
2499c3486f53SVlastimil Babka cc->zone->compact_cached_migrate_pfn[1] =
250097a225e6SJoonsoo Kim cc->zone->compact_cached_migrate_pfn[0];
25015e1f0f09SMel Gorman }
250256de7263SMel Gorman
2503ea7ab982SMichal Hocko /*
250456de7263SMel Gorman * We haven't isolated and migrated anything, but
250556de7263SMel Gorman * there might still be unflushed migrations from
2506dbe2d4e4SMel Gorman * previous cc->order aligned block.
25076d7ce559SDavid Rientjes */
250856de7263SMel Gorman goto check_drain;
2509a5508cd8SVlastimil Babka case ISOLATE_SUCCESS:
2510a5508cd8SVlastimil Babka update_cached = false;
2511ebff3980SVlastimil Babka last_migrated_pfn = max(cc->zone->zone_start_pfn,
251297a225e6SJoonsoo Kim pageblock_start_pfn(cc->migrate_pfn - 1));
2513accf6242SVlastimil Babka }
2514a8e025e5SVlastimil Babka
25159f7e3387SVlastimil Babka err = migrate_pages(&cc->migratepages, compaction_alloc,
25169f7e3387SVlastimil Babka compaction_free, (unsigned long)cc, cc->mode,
251756de7263SMel Gorman MR_COMPACTION, &nr_succeeded);
25185e1f0f09SMel Gorman
25195e1f0f09SMel Gorman trace_mm_compaction_migratepages(cc, nr_succeeded);
25205e1f0f09SMel Gorman
25215e1f0f09SMel Gorman /* All pages were either migrated or will be released */
25225e1f0f09SMel Gorman cc->nr_migratepages = 0;
2523b9e20f0dSVlastimil Babka if (err) {
2524b9e20f0dSVlastimil Babka putback_movable_pages(&cc->migratepages);
2525b9e20f0dSVlastimil Babka /*
2526b9e20f0dSVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet
2527b9e20f0dSVlastimil Babka * and we want compact_finished() to detect it
2528b9e20f0dSVlastimil Babka */
2529b9e20f0dSVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) {
253056de7263SMel Gorman ret = COMPACT_CONTENDED;
25315e1f0f09SMel Gorman goto out;
2532e64c5237SShaohua Li }
2533e64c5237SShaohua Li /*
2534e64c5237SShaohua Li * If an ASYNC or SYNC_LIGHT fails to migrate a page
2535e64c5237SShaohua Li * within the pageblock_order-aligned block and
2536b9e20f0dSVlastimil Babka * fast_find_migrateblock may be used then scan the
2537b9e20f0dSVlastimil Babka * remainder of the pageblock. This will mark the
2538b9e20f0dSVlastimil Babka * pageblock "skip" to avoid rescanning in the near
2539b9e20f0dSVlastimil Babka * future. This will isolate more pages than necessary
2540b9e20f0dSVlastimil Babka * for the request but avoid loops due to
2541b9e20f0dSVlastimil Babka * fast_find_migrateblock revisiting blocks that were
2542b9e20f0dSVlastimil Babka * recently partially scanned.
254306dac2f4SCharan Teja Reddy */
254406dac2f4SCharan Teja Reddy if (!pageblock_aligned(cc->migrate_pfn) &&
254506dac2f4SCharan Teja Reddy !cc->ignore_skip_hint && !cc->finish_pageblock &&
254606dac2f4SCharan Teja Reddy (cc->mode < MIGRATE_SYNC)) {
254706dac2f4SCharan Teja Reddy cc->finish_pageblock = true;
254806dac2f4SCharan Teja Reddy
254906dac2f4SCharan Teja Reddy /*
255006dac2f4SCharan Teja Reddy * Draining pcplists does not help THP if
25515e1f0f09SMel Gorman * any page failed to migrate. Even after
2552e64c5237SShaohua Li * drain, the pageblock will not be free.
255356de7263SMel Gorman */
255456de7263SMel Gorman if (cc->order == COMPACTION_HPAGE_ORDER)
25555e771905SMel Gorman last_migrated_pfn = 0;
25565e771905SMel Gorman
255756de7263SMel Gorman goto rescan;
255856de7263SMel Gorman }
255956de7263SMel Gorman }
25601a6d53a1SVlastimil Babka
25611a6d53a1SVlastimil Babka /* Stop if a page has been captured */
25621a6d53a1SVlastimil Babka if (capc && capc->page) {
2563112d2d29SYang Shi ret = COMPACT_SUCCESS;
25646467552cSVlastimil Babka break;
256556de7263SMel Gorman }
256656de7263SMel Gorman
256756de7263SMel Gorman check_drain:
2568ea7ab982SMichal Hocko /*
2569c603844bSMel Gorman * Has the migration scanner moved away from the previous
25705e1f0f09SMel Gorman * cc->order aligned block where we migrated from? If yes,
257156de7263SMel Gorman * flush the pages that were freed, so that they can merge and
2572fe573327SVasily Averin * compact_finished() can detect immediately if allocation
257356de7263SMel Gorman * would succeed.
257456de7263SMel Gorman */
25751d4746d3SMichal Hocko if (cc->order > 0 && last_migrated_pfn) {
257656de7263SMel Gorman unsigned long current_block_start =
257773e64c51SMichal Hocko block_start_pfn(cc->migrate_pfn, cc->order);
257873e64c51SMichal Hocko
257973e64c51SMichal Hocko if (last_migrated_pfn < current_block_start) {
258073e64c51SMichal Hocko lru_add_drain_cpu_zone(cc->zone);
258173e64c51SMichal Hocko /* No more flushing until we migrate again */
258253853e2dSVlastimil Babka last_migrated_pfn = 0;
258356de7263SMel Gorman }
2584a5508cd8SVlastimil Babka }
2585837d026dSJoonsoo Kim }
258656de7263SMel Gorman
258797a225e6SJoonsoo Kim out:
258897a225e6SJoonsoo Kim /*
2589ea7ab982SMichal Hocko * Release free pages and update where the free scanner should restart,
259056de7263SMel Gorman * so we don't leave any returned pages behind in the next attempt.
2591a8e025e5SVlastimil Babka */
2592a8e025e5SVlastimil Babka if (cc->nr_freepages > 0) {
25931d4746d3SMichal Hocko unsigned long free_pfn = release_freepages(&cc->freepages);
259453853e2dSVlastimil Babka
25951d4746d3SMichal Hocko cc->nr_freepages = 0;
259653853e2dSVlastimil Babka VM_BUG_ON(free_pfn == 0);
2597a5508cd8SVlastimil Babka /* The cached pfn is always the first in a pageblock */
259897a225e6SJoonsoo Kim free_pfn = pageblock_start_pfn(free_pfn);
259956de7263SMel Gorman /*
260056de7263SMel Gorman * Only go back, not forward. The cached pfn might have been
26017ceb009aSVlastimil Babka * already reset to zone end in compact_finished()
26027ceb009aSVlastimil Babka */
260353853e2dSVlastimil Babka if (free_pfn > cc->zone->compact_cached_free_pfn)
260453853e2dSVlastimil Babka cc->zone->compact_cached_free_pfn = free_pfn;
260553853e2dSVlastimil Babka }
260653853e2dSVlastimil Babka
260753853e2dSVlastimil Babka count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
260853853e2dSVlastimil Babka count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
260953853e2dSVlastimil Babka
26101f9efdefSVlastimil Babka trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret);
2611c3486f53SVlastimil Babka
26121f9efdefSVlastimil Babka VM_BUG_ON(!list_empty(&cc->freepages));
26131f9efdefSVlastimil Babka VM_BUG_ON(!list_empty(&cc->migratepages));
2614a5508cd8SVlastimil Babka
2615c3486f53SVlastimil Babka return ret;
261653853e2dSVlastimil Babka }
261753853e2dSVlastimil Babka
compact_zone_order(struct zone * zone,int order,gfp_t gfp_mask,enum compact_priority prio,unsigned int alloc_flags,int highest_zoneidx,struct page ** capture)261853853e2dSVlastimil Babka static enum compact_result compact_zone_order(struct zone *zone, int order,
261953853e2dSVlastimil Babka gfp_t gfp_mask, enum compact_priority prio,
262053853e2dSVlastimil Babka unsigned int alloc_flags, int highest_zoneidx,
262153853e2dSVlastimil Babka struct page **capture)
26221f9efdefSVlastimil Babka {
26231f9efdefSVlastimil Babka enum compact_result ret;
26241f9efdefSVlastimil Babka struct compact_control cc = {
26251f9efdefSVlastimil Babka .order = order,
2626c3486f53SVlastimil Babka .search_order = order,
26271f9efdefSVlastimil Babka .gfp_mask = gfp_mask,
2628c3486f53SVlastimil Babka .zone = zone,
2629c3486f53SVlastimil Babka .mode = (prio == COMPACT_PRIO_ASYNC) ?
26301f9efdefSVlastimil Babka MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
26311f9efdefSVlastimil Babka .alloc_flags = alloc_flags,
26321f9efdefSVlastimil Babka .highest_zoneidx = highest_zoneidx,
263356de7263SMel Gorman .direct_compaction = true,
263456de7263SMel Gorman .whole_zone = (prio == MIN_COMPACT_PRIORITY),
263556de7263SMel Gorman .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
2636facdaa91SNitin Gupta .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
2637facdaa91SNitin Gupta };
2638facdaa91SNitin Gupta struct capture_control capc = {
2639facdaa91SNitin Gupta .cc = &cc,
2640facdaa91SNitin Gupta .page = NULL,
2641facdaa91SNitin Gupta };
2642facdaa91SNitin Gupta
2643facdaa91SNitin Gupta /*
2644facdaa91SNitin Gupta * Make sure the structs are really initialized before we expose the
2645facdaa91SNitin Gupta * capture control, in case we are interrupted and the interrupt handler
2646facdaa91SNitin Gupta * frees a page.
2647facdaa91SNitin Gupta */
2648facdaa91SNitin Gupta barrier();
2649facdaa91SNitin Gupta WRITE_ONCE(current->capture_control, &capc);
2650facdaa91SNitin Gupta
2651facdaa91SNitin Gupta ret = compact_zone(&cc, &capc);
2652facdaa91SNitin Gupta
2653facdaa91SNitin Gupta /*
2654facdaa91SNitin Gupta * Make sure we hide capture control first before we read the captured
2655facdaa91SNitin Gupta * page pointer, otherwise an interrupt could free and capture a page
2656facdaa91SNitin Gupta * and we would leak it.
2657facdaa91SNitin Gupta */
2658facdaa91SNitin Gupta WRITE_ONCE(current->capture_control, NULL);
2659facdaa91SNitin Gupta *capture = READ_ONCE(capc.page);
2660facdaa91SNitin Gupta /*
2661facdaa91SNitin Gupta * Technically, it is also possible that compaction is skipped but
2662facdaa91SNitin Gupta * the page is still captured out of luck(IRQ came and freed the page).
2663facdaa91SNitin Gupta * Returning COMPACT_SUCCESS in such cases helps in properly accounting
2664facdaa91SNitin Gupta * the COMPACT[STALL|FAIL] when compaction is skipped.
2665facdaa91SNitin Gupta */
2666facdaa91SNitin Gupta if (*capture)
2667facdaa91SNitin Gupta ret = COMPACT_SUCCESS;
2668facdaa91SNitin Gupta
2669facdaa91SNitin Gupta return ret;
2670facdaa91SNitin Gupta }
267156de7263SMel Gorman
267276ab0f53SMel Gorman /**
26737103f16dSAndrew Morton * try_to_compact_pages - Direct compact to satisfy a high-order allocation
26747be62de9SRik van Riel * @gfp_mask: The GFP mask of the current allocation
2675791cae96SVlastimil Babka * @order: The order of the current allocation
2676791cae96SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation
2677791cae96SVlastimil Babka * @ac: The context of current allocation
26787be62de9SRik van Riel * @prio: Determines how hard direct compaction should try to succeed
26797be62de9SRik van Riel * @capture: Pointer to free page created by compaction will be stored here
2680e0b9daebSDavid Rientjes *
268191ca9186SDavid Rientjes * This is the main entry point for direct page compaction.
268206ed2998SVlastimil Babka */
try_to_compact_pages(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,struct page ** capture)268373e64c51SMichal Hocko enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
26847be62de9SRik van Riel unsigned int alloc_flags, const struct alloc_context *ac,
26857be62de9SRik van Riel enum compact_priority prio, struct page **capture)
2686791cae96SVlastimil Babka {
2687791cae96SVlastimil Babka struct zoneref *z;
2688791cae96SVlastimil Babka struct zone *zone;
2689791cae96SVlastimil Babka enum compact_result rc = COMPACT_SKIPPED;
2690791cae96SVlastimil Babka
2691791cae96SVlastimil Babka if (!gfp_compaction_allowed(gfp_mask))
2692791cae96SVlastimil Babka return COMPACT_SKIPPED;
2693791cae96SVlastimil Babka
2694791cae96SVlastimil Babka trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
26955e1f0f09SMel Gorman
2696791cae96SVlastimil Babka /* Compact each zone in the list */
2697791cae96SVlastimil Babka for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2698791cae96SVlastimil Babka ac->highest_zoneidx, ac->nodemask) {
2699791cae96SVlastimil Babka enum compact_result status;
27007be62de9SRik van Riel
27017be62de9SRik van Riel if (prio > MIN_COMPACT_PRIORITY
270276ab0f53SMel Gorman && compaction_deferred(zone, order)) {
27037964c06dSJason Liu rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
270476ab0f53SMel Gorman continue;
270576ab0f53SMel Gorman }
270676ab0f53SMel Gorman
27078575ec29SHugh Dickins status = compact_zone_order(zone, order, gfp_mask, prio,
27088575ec29SHugh Dickins alloc_flags, ac->highest_zoneidx, capture);
27098575ec29SHugh Dickins rc = max(status, rc);
271076ab0f53SMel Gorman
271176ab0f53SMel Gorman /* The allocation should succeed, stop compacting */
271276ab0f53SMel Gorman if (status == COMPACT_SUCCESS) {
271376ab0f53SMel Gorman /*
2714fec4eb2cSYaowei Bai * We think the allocation will succeed in this zone,
2715facdaa91SNitin Gupta * but it is not certain, hence the false. The caller
2716facdaa91SNitin Gupta * will repeat this with true if allocation indeed
2717facdaa91SNitin Gupta * succeeds in this zone.
2718facdaa91SNitin Gupta */
2719d34c0a75SNitin Gupta compaction_defer_reset(zone, order, false);
2720facdaa91SNitin Gupta
272165d759c8SCharan Teja Reddy break;
272265d759c8SCharan Teja Reddy }
272365d759c8SCharan Teja Reddy
272465d759c8SCharan Teja Reddy if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
272565d759c8SCharan Teja Reddy status == COMPACT_PARTIAL_SKIPPED))
272665d759c8SCharan Teja Reddy /*
272765d759c8SCharan Teja Reddy * We think that allocation won't succeed in this zone
272865d759c8SCharan Teja Reddy * so we defer compaction there. If it ends up
272965d759c8SCharan Teja Reddy * succeeding after all, it will be reset.
273065d759c8SCharan Teja Reddy */
273165d759c8SCharan Teja Reddy defer_compaction(zone, order);
273265d759c8SCharan Teja Reddy
273365d759c8SCharan Teja Reddy /*
273465d759c8SCharan Teja Reddy * We might have stopped compacting due to need_resched() in
273565d759c8SCharan Teja Reddy * async compaction, or due to a fatal signal detected. In that
273665d759c8SCharan Teja Reddy * case do not try further zones
273765d759c8SCharan Teja Reddy */
273865d759c8SCharan Teja Reddy if ((prio == COMPACT_PRIO_ASYNC && need_resched())
273965d759c8SCharan Teja Reddy || fatal_signal_pending(current))
274065d759c8SCharan Teja Reddy break;
274165d759c8SCharan Teja Reddy }
274265d759c8SCharan Teja Reddy
274365d759c8SCharan Teja Reddy return rc;
274465d759c8SCharan Teja Reddy }
2745facdaa91SNitin Gupta
2746fec4eb2cSYaowei Bai /*
2747fec4eb2cSYaowei Bai * Compact all zones within a node till each zone's fragmentation score
2748fec4eb2cSYaowei Bai * reaches within proactive compaction thresholds (as determined by the
274976ab0f53SMel Gorman * proactiveness tunable).
275032927393SChristoph Hellwig *
275176ab0f53SMel Gorman * It is possible that the function returns before reaching score targets
275276ab0f53SMel Gorman * due to various back-off conditions, such as, contention on per-node or
27537964c06dSJason Liu * per-zone locks.
275476ab0f53SMel Gorman */
proactive_compact_node(pg_data_t * pgdat)275576ab0f53SMel Gorman static void proactive_compact_node(pg_data_t *pgdat)
275676ab0f53SMel Gorman {
2757ed4a6d7fSMel Gorman int zoneid;
2758ed4a6d7fSMel Gorman struct zone *zone;
275917adb230SYueHaibing struct compact_control cc = {
276010fbcf4cSKay Sievers .order = -1,
2761ed4a6d7fSMel Gorman .mode = MIGRATE_SYNC_LIGHT,
2762ed4a6d7fSMel Gorman .ignore_skip_hint = true,
27638575ec29SHugh Dickins .whole_zone = true,
27648575ec29SHugh Dickins .gfp_mask = GFP_KERNEL,
27658575ec29SHugh Dickins .proactive_compaction = true,
27668575ec29SHugh Dickins };
27678575ec29SHugh Dickins
27688575ec29SHugh Dickins for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
27698575ec29SHugh Dickins zone = &pgdat->node_zones[zoneid];
27708575ec29SHugh Dickins if (!populated_zone(zone))
2771ed4a6d7fSMel Gorman continue;
2772ed4a6d7fSMel Gorman
2773ed4a6d7fSMel Gorman cc.zone = zone;
277417adb230SYueHaibing
2775ed4a6d7fSMel Gorman compact_zone(&cc, NULL);
2776ed4a6d7fSMel Gorman
2777ed4a6d7fSMel Gorman count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
277810fbcf4cSKay Sievers cc.total_migrate_scanned);
2779ed4a6d7fSMel Gorman count_compact_events(KCOMPACTD_FREE_SCANNED,
2780ed4a6d7fSMel Gorman cc.total_free_scanned);
2781ed4a6d7fSMel Gorman }
2782ed4a6d7fSMel Gorman }
278310fbcf4cSKay Sievers
2784ed4a6d7fSMel Gorman /* Compact all zones within a node */
compact_node(int nid)2785ed4a6d7fSMel Gorman static void compact_node(int nid)
2786ff9543fdSMichal Nazarewicz {
2787698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid);
2788698b1b30SVlastimil Babka int zoneid;
278965d759c8SCharan Teja Reddy struct zone *zone;
279065d759c8SCharan Teja Reddy struct compact_control cc = {
2791698b1b30SVlastimil Babka .order = -1,
2792698b1b30SVlastimil Babka .mode = MIGRATE_SYNC,
2793698b1b30SVlastimil Babka .ignore_skip_hint = true,
2794698b1b30SVlastimil Babka .whole_zone = true,
2795698b1b30SVlastimil Babka .gfp_mask = GFP_KERNEL,
2796698b1b30SVlastimil Babka };
279797a225e6SJoonsoo Kim
2798698b1b30SVlastimil Babka
279997a225e6SJoonsoo Kim for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
2800698b1b30SVlastimil Babka
2801698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid];
2802698b1b30SVlastimil Babka if (!populated_zone(zone))
2803698b1b30SVlastimil Babka continue;
2804698b1b30SVlastimil Babka
2805698b1b30SVlastimil Babka cc.zone = zone;
280697a225e6SJoonsoo Kim
2807698b1b30SVlastimil Babka compact_zone(&cc, NULL);
2808698b1b30SVlastimil Babka }
2809698b1b30SVlastimil Babka }
2810698b1b30SVlastimil Babka
2811698b1b30SVlastimil Babka /* Compact all nodes in the system */
compact_nodes(void)2812698b1b30SVlastimil Babka static void compact_nodes(void)
2813698b1b30SVlastimil Babka {
2814698b1b30SVlastimil Babka int nid;
2815698b1b30SVlastimil Babka
2816698b1b30SVlastimil Babka /* Flush pending updates to the LRU lists */
2817698b1b30SVlastimil Babka lru_add_drain_all();
2818698b1b30SVlastimil Babka
2819698b1b30SVlastimil Babka for_each_online_node(nid)
2820698b1b30SVlastimil Babka compact_node(nid);
2821698b1b30SVlastimil Babka }
2822698b1b30SVlastimil Babka
compaction_proactiveness_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)2823dbe2d4e4SMel Gorman static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
282497a225e6SJoonsoo Kim void *buffer, size_t *length, loff_t *ppos)
2825698b1b30SVlastimil Babka {
2826a0647dc9SDavid Rientjes int rc, nid;
282773e64c51SMichal Hocko
2828698b1b30SVlastimil Babka rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
2829698b1b30SVlastimil Babka if (rc)
283097a225e6SJoonsoo Kim return rc;
28317f354a54SDavid Rientjes
2832698b1b30SVlastimil Babka if (write && sysctl_compaction_proactiveness) {
283397a225e6SJoonsoo Kim for_each_online_node(nid) {
2834698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid);
2835698b1b30SVlastimil Babka
2836698b1b30SVlastimil Babka if (pgdat->proactive_compact_trigger)
2837698b1b30SVlastimil Babka continue;
2838698b1b30SVlastimil Babka
2839698b1b30SVlastimil Babka pgdat->proactive_compact_trigger = true;
2840698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1,
2841698b1b30SVlastimil Babka pgdat->nr_zones - 1);
2842698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait);
2843698b1b30SVlastimil Babka }
2844698b1b30SVlastimil Babka }
2845698b1b30SVlastimil Babka
2846698b1b30SVlastimil Babka return 0;
2847172400c6SVlastimil Babka }
2848172400c6SVlastimil Babka
2849a94b5252SYafang Shao /*
2850a94b5252SYafang Shao * This is the entry point for compacting all nodes via
28515e1f0f09SMel Gorman * /proc/sys/vm/compact_memory
2852698b1b30SVlastimil Babka */
sysctl_compaction_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)28537ceb009aSVlastimil Babka static int sysctl_compaction_handler(struct ctl_table *table, int write,
2854698b1b30SVlastimil Babka void *buffer, size_t *length, loff_t *ppos)
2855c8f7de0bSMichal Hocko {
2856698b1b30SVlastimil Babka int ret;
2857bc3106b2SDavid Rientjes
2858bc3106b2SDavid Rientjes ret = proc_dointvec(table, write, buffer, length, ppos);
2859bc3106b2SDavid Rientjes if (ret)
2860bc3106b2SDavid Rientjes return ret;
2861bc3106b2SDavid Rientjes
2862bc3106b2SDavid Rientjes if (sysctl_compact_memory != 1)
2863bc3106b2SDavid Rientjes return -EINVAL;
2864bc3106b2SDavid Rientjes
2865698b1b30SVlastimil Babka if (write)
2866698b1b30SVlastimil Babka compact_nodes();
2867698b1b30SVlastimil Babka
2868698b1b30SVlastimil Babka return 0;
2869698b1b30SVlastimil Babka }
2870698b1b30SVlastimil Babka
28717f354a54SDavid Rientjes #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
compact_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)28727f354a54SDavid Rientjes static ssize_t compact_store(struct device *dev,
28737f354a54SDavid Rientjes struct device_attribute *attr,
28747f354a54SDavid Rientjes const char *buf, size_t count)
28757f354a54SDavid Rientjes {
2876698b1b30SVlastimil Babka int nid = dev->id;
2877698b1b30SVlastimil Babka
2878698b1b30SVlastimil Babka if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
2879698b1b30SVlastimil Babka /* Flush pending updates to the LRU lists */
2880698b1b30SVlastimil Babka lru_add_drain_all();
2881698b1b30SVlastimil Babka
288297a225e6SJoonsoo Kim compact_node(nid);
288397a225e6SJoonsoo Kim }
2884698b1b30SVlastimil Babka
2885698b1b30SVlastimil Babka return count;
2886698b1b30SVlastimil Babka }
288797a225e6SJoonsoo Kim static DEVICE_ATTR_WO(compact);
288897a225e6SJoonsoo Kim
compaction_register_node(struct node * node)2889698b1b30SVlastimil Babka int compaction_register_node(struct node *node)
2890698b1b30SVlastimil Babka {
289197a225e6SJoonsoo Kim return device_create_file(&node->dev, &dev_attr_compact);
2892698b1b30SVlastimil Babka }
2893698b1b30SVlastimil Babka
compaction_unregister_node(struct node * node)2894698b1b30SVlastimil Babka void compaction_unregister_node(struct node *node)
2895698b1b30SVlastimil Babka {
2896698b1b30SVlastimil Babka device_remove_file(&node->dev, &dev_attr_compact);
2897698b1b30SVlastimil Babka }
2898698b1b30SVlastimil Babka #endif /* CONFIG_SYSFS && CONFIG_NUMA */
289997a225e6SJoonsoo Kim
kcompactd_work_requested(pg_data_t * pgdat)290097a225e6SJoonsoo Kim static inline bool kcompactd_work_requested(pg_data_t *pgdat)
2901698b1b30SVlastimil Babka {
29026818600fSDavidlohr Bueso return pgdat->kcompactd_max_order > 0 || kthread_should_stop() ||
29036818600fSDavidlohr Bueso pgdat->proactive_compact_trigger;
29046818600fSDavidlohr Bueso }
29056818600fSDavidlohr Bueso
kcompactd_node_suitable(pg_data_t * pgdat)29066818600fSDavidlohr Bueso static bool kcompactd_node_suitable(pg_data_t *pgdat)
2907698b1b30SVlastimil Babka {
2908698b1b30SVlastimil Babka int zoneid;
2909698b1b30SVlastimil Babka struct zone *zone;
2910698b1b30SVlastimil Babka enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;
2911698b1b30SVlastimil Babka
2912698b1b30SVlastimil Babka for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
291397a225e6SJoonsoo Kim zone = &pgdat->node_zones[zoneid];
2914698b1b30SVlastimil Babka
2915698b1b30SVlastimil Babka if (!populated_zone(zone))
2916698b1b30SVlastimil Babka continue;
2917698b1b30SVlastimil Babka
2918698b1b30SVlastimil Babka /* Allocation can already succeed, check other zones */
2919698b1b30SVlastimil Babka if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
2920698b1b30SVlastimil Babka min_wmark_pages(zone),
2921698b1b30SVlastimil Babka highest_zoneidx, 0))
2922698b1b30SVlastimil Babka continue;
2923698b1b30SVlastimil Babka
2924698b1b30SVlastimil Babka if (compaction_suitable(zone, pgdat->kcompactd_max_order,
2925e1e92bfaSCharan Teja Reddy highest_zoneidx))
2926e1e92bfaSCharan Teja Reddy return true;
2927698b1b30SVlastimil Babka }
2928698b1b30SVlastimil Babka
2929698b1b30SVlastimil Babka return false;
2930698b1b30SVlastimil Babka }
2931698b1b30SVlastimil Babka
kcompactd_do_work(pg_data_t * pgdat)2932698b1b30SVlastimil Babka static void kcompactd_do_work(pg_data_t *pgdat)
2933698b1b30SVlastimil Babka {
2934698b1b30SVlastimil Babka /*
2935698b1b30SVlastimil Babka * With no special task, compact all zones so that a page of requested
293697a225e6SJoonsoo Kim * order is allocatable.
2937698b1b30SVlastimil Babka */
2938698b1b30SVlastimil Babka int zoneid;
2939eb414681SJohannes Weiner struct zone *zone;
2940eb414681SJohannes Weiner struct compact_control cc = {
294165d759c8SCharan Teja Reddy .order = pgdat->kcompactd_max_order,
294265d759c8SCharan Teja Reddy .search_order = pgdat->kcompactd_max_order,
294365d759c8SCharan Teja Reddy .highest_zoneidx = pgdat->kcompactd_highest_zoneidx,
294465d759c8SCharan Teja Reddy .mode = MIGRATE_SYNC_LIGHT,
294565d759c8SCharan Teja Reddy .ignore_skip_hint = false,
294665d759c8SCharan Teja Reddy .gfp_mask = GFP_KERNEL,
2947698b1b30SVlastimil Babka };
2948facdaa91SNitin Gupta trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
294965d759c8SCharan Teja Reddy cc.highest_zoneidx);
295065d759c8SCharan Teja Reddy count_compact_event(KCOMPACTD_WAKE);
2951698b1b30SVlastimil Babka
2952eb414681SJohannes Weiner for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
2953698b1b30SVlastimil Babka int status;
2954eb414681SJohannes Weiner
2955e1e92bfaSCharan Teja Reddy zone = &pgdat->node_zones[zoneid];
2956e1e92bfaSCharan Teja Reddy if (!populated_zone(zone))
2957e1e92bfaSCharan Teja Reddy continue;
2958e1e92bfaSCharan Teja Reddy
2959e1e92bfaSCharan Teja Reddy if (compaction_deferred(zone, cc.order))
2960e1e92bfaSCharan Teja Reddy continue;
2961e1e92bfaSCharan Teja Reddy
2962e1e92bfaSCharan Teja Reddy /* Allocation can already succeed, nothing to do */
2963facdaa91SNitin Gupta if (zone_watermark_ok(zone, cc.order,
2964facdaa91SNitin Gupta min_wmark_pages(zone), zoneid, 0))
2965facdaa91SNitin Gupta continue;
2966e1e92bfaSCharan Teja Reddy
2967e1e92bfaSCharan Teja Reddy if (!compaction_suitable(zone, cc.order, zoneid))
2968e1e92bfaSCharan Teja Reddy continue;
2969e1e92bfaSCharan Teja Reddy
2970e1e92bfaSCharan Teja Reddy if (kthread_should_stop())
2971facdaa91SNitin Gupta return;
2972facdaa91SNitin Gupta
2973facdaa91SNitin Gupta cc.zone = zone;
2974facdaa91SNitin Gupta status = compact_zone(&cc, NULL);
2975facdaa91SNitin Gupta
2976facdaa91SNitin Gupta if (status == COMPACT_SUCCESS) {
2977facdaa91SNitin Gupta compaction_defer_reset(zone, cc.order, false);
2978facdaa91SNitin Gupta } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
2979facdaa91SNitin Gupta /*
2980facdaa91SNitin Gupta * Buddy pages may become stranded on pcps that could
2981e1e92bfaSCharan Teja Reddy * otherwise coalesce on the zone's free area for
2982e1e92bfaSCharan Teja Reddy * order >= cc.order. This is ratelimited by the
2983e1e92bfaSCharan Teja Reddy * upcoming deferral.
2984facdaa91SNitin Gupta */
298565d759c8SCharan Teja Reddy drain_all_pages(zone);
298665d759c8SCharan Teja Reddy
2987698b1b30SVlastimil Babka /*
2988698b1b30SVlastimil Babka * We use sync migration mode here, so we defer like
2989698b1b30SVlastimil Babka * sync direct compaction does.
2990698b1b30SVlastimil Babka */
2991698b1b30SVlastimil Babka defer_compaction(zone, cc.order);
2992698b1b30SVlastimil Babka }
2993698b1b30SVlastimil Babka
2994698b1b30SVlastimil Babka count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
2995698b1b30SVlastimil Babka cc.total_migrate_scanned);
2996024c61eaSMiaohe Lin count_compact_events(KCOMPACTD_FREE_SCANNED,
2997698b1b30SVlastimil Babka cc.total_free_scanned);
2998698b1b30SVlastimil Babka }
2999698b1b30SVlastimil Babka
3000698b1b30SVlastimil Babka /*
3001024c61eaSMiaohe Lin * Regardless of success, we are done until woken up next. But remember
3002698b1b30SVlastimil Babka * the requested order/highest_zoneidx in case it was higher/tighter
3003698b1b30SVlastimil Babka * than our current ones
3004698b1b30SVlastimil Babka */
3005698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order <= cc.order)
3006698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0;
3007698b1b30SVlastimil Babka if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx)
3008698b1b30SVlastimil Babka pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
3009698b1b30SVlastimil Babka }
3010698b1b30SVlastimil Babka
wakeup_kcompactd(pg_data_t * pgdat,int order,int highest_zoneidx)3011698b1b30SVlastimil Babka void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
3012e8da368aSYun-Ze Li {
3013698b1b30SVlastimil Babka if (!order)
3014698b1b30SVlastimil Babka return;
3015698b1b30SVlastimil Babka
3016698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order < order)
3017698b1b30SVlastimil Babka pgdat->kcompactd_max_order = order;
3018698b1b30SVlastimil Babka
3019698b1b30SVlastimil Babka if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx)
3020698b1b30SVlastimil Babka pgdat->kcompactd_highest_zoneidx = highest_zoneidx;
3021698b1b30SVlastimil Babka
3022698b1b30SVlastimil Babka /*
3023698b1b30SVlastimil Babka * Pairs with implicit barrier in wait_event_freezable()
3024698b1b30SVlastimil Babka * such that wakeups are not missed.
3025698b1b30SVlastimil Babka */
3026698b1b30SVlastimil Babka if (!wq_has_sleeper(&pgdat->kcompactd_wait))
3027698b1b30SVlastimil Babka return;
3028698b1b30SVlastimil Babka
3029698b1b30SVlastimil Babka if (!kcompactd_node_suitable(pgdat))
3030e46b1db2SAnna-Maria Gleixner return;
3031698b1b30SVlastimil Babka
3032698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
3033698b1b30SVlastimil Babka highest_zoneidx);
3034698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait);
3035698b1b30SVlastimil Babka }
3036698b1b30SVlastimil Babka
3037698b1b30SVlastimil Babka /*
3038698b1b30SVlastimil Babka * The background compaction daemon, started as a kernel thread
3039698b1b30SVlastimil Babka * from the init process.
3040698b1b30SVlastimil Babka */
kcompactd(void * p)3041698b1b30SVlastimil Babka static int kcompactd(void *p)
30423109de30SMiaohe Lin {
3043698b1b30SVlastimil Babka pg_data_t *pgdat = (pg_data_t *)p;
3044698b1b30SVlastimil Babka struct task_struct *tsk = current;
3045e46b1db2SAnna-Maria Gleixner long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC);
3046698b1b30SVlastimil Babka long timeout = default_timeout;
3047698b1b30SVlastimil Babka
3048698b1b30SVlastimil Babka const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3049698b1b30SVlastimil Babka
3050698b1b30SVlastimil Babka if (!cpumask_empty(cpumask))
3051e46b1db2SAnna-Maria Gleixner set_cpus_allowed_ptr(tsk, cpumask);
3052e46b1db2SAnna-Maria Gleixner
3053e46b1db2SAnna-Maria Gleixner set_freezable();
3054e46b1db2SAnna-Maria Gleixner
3055e46b1db2SAnna-Maria Gleixner pgdat->kcompactd_max_order = 0;
3056e46b1db2SAnna-Maria Gleixner pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
3057e46b1db2SAnna-Maria Gleixner
3058e46b1db2SAnna-Maria Gleixner while (!kthread_should_stop()) {
3059e46b1db2SAnna-Maria Gleixner unsigned long pflags;
3060698b1b30SVlastimil Babka
3061698b1b30SVlastimil Babka /*
3062698b1b30SVlastimil Babka * Avoid the unnecessary wakeup for proactive compaction
3063698b1b30SVlastimil Babka * when it is disabled.
3064698b1b30SVlastimil Babka */
3065698b1b30SVlastimil Babka if (!sysctl_compaction_proactiveness)
3066698b1b30SVlastimil Babka timeout = MAX_SCHEDULE_TIMEOUT;
3067ff9543fdSMichal Nazarewicz trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
3068 if (wait_event_freezable_timeout(pgdat->kcompactd_wait,
3069 kcompactd_work_requested(pgdat), timeout) &&
3070 !pgdat->proactive_compact_trigger) {
3071
3072 psi_memstall_enter(&pflags);
3073 kcompactd_do_work(pgdat);
3074 psi_memstall_leave(&pflags);
3075 /*
3076 * Reset the timeout value. The defer timeout from
3077 * proactive compaction is lost here but that is fine
3078 * as the condition of the zone changing substantionally
3079 * then carrying on with the previous defer interval is
3080 * not useful.
3081 */
3082 timeout = default_timeout;
3083 continue;
3084 }
3085
3086 /*
3087 * Start the proactive work with default timeout. Based
3088 * on the fragmentation score, this timeout is updated.
3089 */
3090 timeout = default_timeout;
3091 if (should_proactive_compact_node(pgdat)) {
3092 unsigned int prev_score, score;
3093
3094 prev_score = fragmentation_score_node(pgdat);
3095 proactive_compact_node(pgdat);
3096 score = fragmentation_score_node(pgdat);
3097 /*
3098 * Defer proactive compaction if the fragmentation
3099 * score did not go down i.e. no progress made.
3100 */
3101 if (unlikely(score >= prev_score))
3102 timeout =
3103 default_timeout << COMPACT_MAX_DEFER_SHIFT;
3104 }
3105 if (unlikely(pgdat->proactive_compact_trigger))
3106 pgdat->proactive_compact_trigger = false;
3107 }
3108
3109 return 0;
3110 }
3111
3112 /*
3113 * This kcompactd start function will be called by init and node-hot-add.
3114 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
3115 */
kcompactd_run(int nid)3116 void __meminit kcompactd_run(int nid)
3117 {
3118 pg_data_t *pgdat = NODE_DATA(nid);
3119
3120 if (pgdat->kcompactd)
3121 return;
3122
3123 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
3124 if (IS_ERR(pgdat->kcompactd)) {
3125 pr_err("Failed to start kcompactd on node %d\n", nid);
3126 pgdat->kcompactd = NULL;
3127 }
3128 }
3129
3130 /*
3131 * Called by memory hotplug when all memory in a node is offlined. Caller must
3132 * be holding mem_hotplug_begin/done().
3133 */
kcompactd_stop(int nid)3134 void __meminit kcompactd_stop(int nid)
3135 {
3136 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
3137
3138 if (kcompactd) {
3139 kthread_stop(kcompactd);
3140 NODE_DATA(nid)->kcompactd = NULL;
3141 }
3142 }
3143
3144 /*
3145 * It's optimal to keep kcompactd on the same CPUs as their memory, but
3146 * not required for correctness. So if the last cpu in a node goes
3147 * away, we get changed to run anywhere: as the first one comes back,
3148 * restore their cpu bindings.
3149 */
kcompactd_cpu_online(unsigned int cpu)3150 static int kcompactd_cpu_online(unsigned int cpu)
3151 {
3152 int nid;
3153
3154 for_each_node_state(nid, N_MEMORY) {
3155 pg_data_t *pgdat = NODE_DATA(nid);
3156 const struct cpumask *mask;
3157
3158 mask = cpumask_of_node(pgdat->node_id);
3159
3160 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3161 /* One of our CPUs online: restore mask */
3162 if (pgdat->kcompactd)
3163 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
3164 }
3165 return 0;
3166 }
3167
proc_dointvec_minmax_warn_RT_change(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3168 static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table,
3169 int write, void *buffer, size_t *lenp, loff_t *ppos)
3170 {
3171 int ret, old;
3172
3173 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || !write)
3174 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3175
3176 old = *(int *)table->data;
3177 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3178 if (ret)
3179 return ret;
3180 if (old != *(int *)table->data)
3181 pr_warn_once("sysctl attribute %s changed by %s[%d]\n",
3182 table->procname, current->comm,
3183 task_pid_nr(current));
3184 return ret;
3185 }
3186
3187 static struct ctl_table vm_compaction[] = {
3188 {
3189 .procname = "compact_memory",
3190 .data = &sysctl_compact_memory,
3191 .maxlen = sizeof(int),
3192 .mode = 0200,
3193 .proc_handler = sysctl_compaction_handler,
3194 },
3195 {
3196 .procname = "compaction_proactiveness",
3197 .data = &sysctl_compaction_proactiveness,
3198 .maxlen = sizeof(sysctl_compaction_proactiveness),
3199 .mode = 0644,
3200 .proc_handler = compaction_proactiveness_sysctl_handler,
3201 .extra1 = SYSCTL_ZERO,
3202 .extra2 = SYSCTL_ONE_HUNDRED,
3203 },
3204 {
3205 .procname = "extfrag_threshold",
3206 .data = &sysctl_extfrag_threshold,
3207 .maxlen = sizeof(int),
3208 .mode = 0644,
3209 .proc_handler = proc_dointvec_minmax,
3210 .extra1 = SYSCTL_ZERO,
3211 .extra2 = SYSCTL_ONE_THOUSAND,
3212 },
3213 {
3214 .procname = "compact_unevictable_allowed",
3215 .data = &sysctl_compact_unevictable_allowed,
3216 .maxlen = sizeof(int),
3217 .mode = 0644,
3218 .proc_handler = proc_dointvec_minmax_warn_RT_change,
3219 .extra1 = SYSCTL_ZERO,
3220 .extra2 = SYSCTL_ONE,
3221 },
3222 { }
3223 };
3224
kcompactd_init(void)3225 static int __init kcompactd_init(void)
3226 {
3227 int nid;
3228 int ret;
3229
3230 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
3231 "mm/compaction:online",
3232 kcompactd_cpu_online, NULL);
3233 if (ret < 0) {
3234 pr_err("kcompactd: failed to register hotplug callbacks.\n");
3235 return ret;
3236 }
3237
3238 for_each_node_state(nid, N_MEMORY)
3239 kcompactd_run(nid);
3240 register_sysctl_init("vm", vm_compaction);
3241 return 0;
3242 }
3243 subsys_initcall(kcompactd_init)
3244
3245 #endif /* CONFIG_COMPACTION */
3246