1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17748446bbSMel Gorman #include "internal.h" 18748446bbSMel Gorman 19ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 20ff9543fdSMichal Nazarewicz 21b7aba698SMel Gorman #define CREATE_TRACE_POINTS 22b7aba698SMel Gorman #include <trace/events/compaction.h> 23b7aba698SMel Gorman 24748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 25748446bbSMel Gorman { 26748446bbSMel Gorman struct page *page, *next; 27748446bbSMel Gorman unsigned long count = 0; 28748446bbSMel Gorman 29748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 30748446bbSMel Gorman list_del(&page->lru); 31748446bbSMel Gorman __free_page(page); 32748446bbSMel Gorman count++; 33748446bbSMel Gorman } 34748446bbSMel Gorman 35748446bbSMel Gorman return count; 36748446bbSMel Gorman } 37748446bbSMel Gorman 38ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 39ff9543fdSMichal Nazarewicz { 40ff9543fdSMichal Nazarewicz struct page *page; 41ff9543fdSMichal Nazarewicz 42ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 43ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 44ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 45ff9543fdSMichal Nazarewicz } 46ff9543fdSMichal Nazarewicz } 47ff9543fdSMichal Nazarewicz 4847118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 4947118af0SMichal Nazarewicz { 5047118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 5147118af0SMichal Nazarewicz } 5247118af0SMichal Nazarewicz 5385aa125fSMichal Nazarewicz /* 5485aa125fSMichal Nazarewicz * Isolate free pages onto a private freelist. Caller must hold zone->lock. 5585aa125fSMichal Nazarewicz * If @strict is true, will abort returning 0 on any invalid PFNs or non-free 5685aa125fSMichal Nazarewicz * pages inside of the pageblock (even though it may still end up isolating 5785aa125fSMichal Nazarewicz * some pages). 5885aa125fSMichal Nazarewicz */ 5985aa125fSMichal Nazarewicz static unsigned long isolate_freepages_block(unsigned long blockpfn, 6085aa125fSMichal Nazarewicz unsigned long end_pfn, 6185aa125fSMichal Nazarewicz struct list_head *freelist, 6285aa125fSMichal Nazarewicz bool strict) 63748446bbSMel Gorman { 64b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 65748446bbSMel Gorman struct page *cursor; 66748446bbSMel Gorman 67748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 68748446bbSMel Gorman 69748446bbSMel Gorman /* Isolate free pages. This assumes the block is valid */ 70748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 71748446bbSMel Gorman int isolated, i; 72748446bbSMel Gorman struct page *page = cursor; 73748446bbSMel Gorman 7485aa125fSMichal Nazarewicz if (!pfn_valid_within(blockpfn)) { 7585aa125fSMichal Nazarewicz if (strict) 7685aa125fSMichal Nazarewicz return 0; 77748446bbSMel Gorman continue; 7885aa125fSMichal Nazarewicz } 79b7aba698SMel Gorman nr_scanned++; 80748446bbSMel Gorman 8185aa125fSMichal Nazarewicz if (!PageBuddy(page)) { 8285aa125fSMichal Nazarewicz if (strict) 8385aa125fSMichal Nazarewicz return 0; 84748446bbSMel Gorman continue; 8585aa125fSMichal Nazarewicz } 86748446bbSMel Gorman 87748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 88748446bbSMel Gorman isolated = split_free_page(page); 8985aa125fSMichal Nazarewicz if (!isolated && strict) 9085aa125fSMichal Nazarewicz return 0; 91748446bbSMel Gorman total_isolated += isolated; 92748446bbSMel Gorman for (i = 0; i < isolated; i++) { 93748446bbSMel Gorman list_add(&page->lru, freelist); 94748446bbSMel Gorman page++; 95748446bbSMel Gorman } 96748446bbSMel Gorman 97748446bbSMel Gorman /* If a page was split, advance to the end of it */ 98748446bbSMel Gorman if (isolated) { 99748446bbSMel Gorman blockpfn += isolated - 1; 100748446bbSMel Gorman cursor += isolated - 1; 101748446bbSMel Gorman } 102748446bbSMel Gorman } 103748446bbSMel Gorman 104b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 105748446bbSMel Gorman return total_isolated; 106748446bbSMel Gorman } 107748446bbSMel Gorman 10885aa125fSMichal Nazarewicz /** 10985aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 11085aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 11185aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 11285aa125fSMichal Nazarewicz * 11385aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 11485aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 11585aa125fSMichal Nazarewicz * undo its actions and return zero. 11685aa125fSMichal Nazarewicz * 11785aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 11885aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 11985aa125fSMichal Nazarewicz * a free page). 12085aa125fSMichal Nazarewicz */ 121ff9543fdSMichal Nazarewicz unsigned long 12285aa125fSMichal Nazarewicz isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) 12385aa125fSMichal Nazarewicz { 12485aa125fSMichal Nazarewicz unsigned long isolated, pfn, block_end_pfn, flags; 12585aa125fSMichal Nazarewicz struct zone *zone = NULL; 12685aa125fSMichal Nazarewicz LIST_HEAD(freelist); 12785aa125fSMichal Nazarewicz 12885aa125fSMichal Nazarewicz if (pfn_valid(start_pfn)) 12985aa125fSMichal Nazarewicz zone = page_zone(pfn_to_page(start_pfn)); 13085aa125fSMichal Nazarewicz 13185aa125fSMichal Nazarewicz for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 13285aa125fSMichal Nazarewicz if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn))) 13385aa125fSMichal Nazarewicz break; 13485aa125fSMichal Nazarewicz 13585aa125fSMichal Nazarewicz /* 13685aa125fSMichal Nazarewicz * On subsequent iterations ALIGN() is actually not needed, 13785aa125fSMichal Nazarewicz * but we keep it that we not to complicate the code. 13885aa125fSMichal Nazarewicz */ 13985aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 14085aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 14185aa125fSMichal Nazarewicz 14285aa125fSMichal Nazarewicz spin_lock_irqsave(&zone->lock, flags); 14385aa125fSMichal Nazarewicz isolated = isolate_freepages_block(pfn, block_end_pfn, 14485aa125fSMichal Nazarewicz &freelist, true); 14585aa125fSMichal Nazarewicz spin_unlock_irqrestore(&zone->lock, flags); 14685aa125fSMichal Nazarewicz 14785aa125fSMichal Nazarewicz /* 14885aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 14985aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 15085aa125fSMichal Nazarewicz * non-free pages). 15185aa125fSMichal Nazarewicz */ 15285aa125fSMichal Nazarewicz if (!isolated) 15385aa125fSMichal Nazarewicz break; 15485aa125fSMichal Nazarewicz 15585aa125fSMichal Nazarewicz /* 15685aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 15785aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 15885aa125fSMichal Nazarewicz * page may span two pageblocks). 15985aa125fSMichal Nazarewicz */ 16085aa125fSMichal Nazarewicz } 16185aa125fSMichal Nazarewicz 16285aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 16385aa125fSMichal Nazarewicz map_pages(&freelist); 16485aa125fSMichal Nazarewicz 16585aa125fSMichal Nazarewicz if (pfn < end_pfn) { 16685aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 16785aa125fSMichal Nazarewicz release_freepages(&freelist); 16885aa125fSMichal Nazarewicz return 0; 16985aa125fSMichal Nazarewicz } 17085aa125fSMichal Nazarewicz 17185aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 17285aa125fSMichal Nazarewicz return pfn; 17385aa125fSMichal Nazarewicz } 17485aa125fSMichal Nazarewicz 175748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 176748446bbSMel Gorman static void acct_isolated(struct zone *zone, struct compact_control *cc) 177748446bbSMel Gorman { 178748446bbSMel Gorman struct page *page; 179b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 180748446bbSMel Gorman 181b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 182b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 183748446bbSMel Gorman 184b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 185b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 186748446bbSMel Gorman } 187748446bbSMel Gorman 188748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 189748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 190748446bbSMel Gorman { 191bc693045SMinchan Kim unsigned long active, inactive, isolated; 192748446bbSMel Gorman 193748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 194748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 195bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 196bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 197748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 198748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 199748446bbSMel Gorman 200bc693045SMinchan Kim return isolated > (inactive + active) / 2; 201748446bbSMel Gorman } 202748446bbSMel Gorman 2032fe86e00SMichal Nazarewicz /** 2042fe86e00SMichal Nazarewicz * isolate_migratepages_range() - isolate all migrate-able pages in range. 2052fe86e00SMichal Nazarewicz * @zone: Zone pages are in. 2062fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 2072fe86e00SMichal Nazarewicz * @low_pfn: The first PFN of the range. 2082fe86e00SMichal Nazarewicz * @end_pfn: The one-past-the-last PFN of the range. 2092fe86e00SMichal Nazarewicz * 2102fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 2112fe86e00SMichal Nazarewicz * [low_pfn, end_pfn). Returns zero if there is a fatal signal 2122fe86e00SMichal Nazarewicz * pending), otherwise PFN of the first page that was not scanned 2132fe86e00SMichal Nazarewicz * (which may be both less, equal to or more then end_pfn). 2142fe86e00SMichal Nazarewicz * 2152fe86e00SMichal Nazarewicz * Assumes that cc->migratepages is empty and cc->nr_migratepages is 2162fe86e00SMichal Nazarewicz * zero. 2172fe86e00SMichal Nazarewicz * 2182fe86e00SMichal Nazarewicz * Apart from cc->migratepages and cc->nr_migratetypes this function 2192fe86e00SMichal Nazarewicz * does not modify any cc's fields, in particular it does not modify 2202fe86e00SMichal Nazarewicz * (or read for that matter) cc->migrate_pfn. 221748446bbSMel Gorman */ 222ff9543fdSMichal Nazarewicz unsigned long 2232fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 2242fe86e00SMichal Nazarewicz unsigned long low_pfn, unsigned long end_pfn) 225748446bbSMel Gorman { 2269927af74SMel Gorman unsigned long last_pageblock_nr = 0, pageblock_nr; 227b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 228748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 229*f3fd4a61SKonstantin Khlebnikov isolate_mode_t mode = 0; 230748446bbSMel Gorman 231748446bbSMel Gorman /* 232748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 233748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 234748446bbSMel Gorman * delay for some time until fewer pages are isolated 235748446bbSMel Gorman */ 236748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 237f9e35b3bSMel Gorman /* async migration should just abort */ 2385ceb9ce6SBartlomiej Zolnierkiewicz if (cc->mode != COMPACT_SYNC) 2392fe86e00SMichal Nazarewicz return 0; 240f9e35b3bSMel Gorman 241748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 242748446bbSMel Gorman 243748446bbSMel Gorman if (fatal_signal_pending(current)) 2442fe86e00SMichal Nazarewicz return 0; 245748446bbSMel Gorman } 246748446bbSMel Gorman 247748446bbSMel Gorman /* Time to isolate some pages for migration */ 248b2eef8c0SAndrea Arcangeli cond_resched(); 249748446bbSMel Gorman spin_lock_irq(&zone->lru_lock); 250748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 251748446bbSMel Gorman struct page *page; 252b2eef8c0SAndrea Arcangeli bool locked = true; 253b2eef8c0SAndrea Arcangeli 254b2eef8c0SAndrea Arcangeli /* give a chance to irqs before checking need_resched() */ 255b2eef8c0SAndrea Arcangeli if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { 256b2eef8c0SAndrea Arcangeli spin_unlock_irq(&zone->lru_lock); 257b2eef8c0SAndrea Arcangeli locked = false; 258b2eef8c0SAndrea Arcangeli } 259b2eef8c0SAndrea Arcangeli if (need_resched() || spin_is_contended(&zone->lru_lock)) { 260b2eef8c0SAndrea Arcangeli if (locked) 261b2eef8c0SAndrea Arcangeli spin_unlock_irq(&zone->lru_lock); 262b2eef8c0SAndrea Arcangeli cond_resched(); 263b2eef8c0SAndrea Arcangeli spin_lock_irq(&zone->lru_lock); 264b2eef8c0SAndrea Arcangeli if (fatal_signal_pending(current)) 265b2eef8c0SAndrea Arcangeli break; 266b2eef8c0SAndrea Arcangeli } else if (!locked) 267b2eef8c0SAndrea Arcangeli spin_lock_irq(&zone->lru_lock); 268b2eef8c0SAndrea Arcangeli 2690bf380bcSMel Gorman /* 2700bf380bcSMel Gorman * migrate_pfn does not necessarily start aligned to a 2710bf380bcSMel Gorman * pageblock. Ensure that pfn_valid is called when moving 2720bf380bcSMel Gorman * into a new MAX_ORDER_NR_PAGES range in case of large 2730bf380bcSMel Gorman * memory holes within the zone 2740bf380bcSMel Gorman */ 2750bf380bcSMel Gorman if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 2760bf380bcSMel Gorman if (!pfn_valid(low_pfn)) { 2770bf380bcSMel Gorman low_pfn += MAX_ORDER_NR_PAGES - 1; 2780bf380bcSMel Gorman continue; 2790bf380bcSMel Gorman } 2800bf380bcSMel Gorman } 2810bf380bcSMel Gorman 282748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 283748446bbSMel Gorman continue; 284b7aba698SMel Gorman nr_scanned++; 285748446bbSMel Gorman 286dc908600SMel Gorman /* 287dc908600SMel Gorman * Get the page and ensure the page is within the same zone. 288dc908600SMel Gorman * See the comment in isolate_freepages about overlapping 289dc908600SMel Gorman * nodes. It is deliberate that the new zone lock is not taken 290dc908600SMel Gorman * as memory compaction should not move pages between nodes. 291dc908600SMel Gorman */ 292748446bbSMel Gorman page = pfn_to_page(low_pfn); 293dc908600SMel Gorman if (page_zone(page) != zone) 294dc908600SMel Gorman continue; 295dc908600SMel Gorman 296dc908600SMel Gorman /* Skip if free */ 297748446bbSMel Gorman if (PageBuddy(page)) 298748446bbSMel Gorman continue; 299748446bbSMel Gorman 3009927af74SMel Gorman /* 3019927af74SMel Gorman * For async migration, also only scan in MOVABLE blocks. Async 3029927af74SMel Gorman * migration is optimistic to see if the minimum amount of work 3039927af74SMel Gorman * satisfies the allocation 3049927af74SMel Gorman */ 3059927af74SMel Gorman pageblock_nr = low_pfn >> pageblock_order; 3065ceb9ce6SBartlomiej Zolnierkiewicz if (cc->mode != COMPACT_SYNC && 3075ceb9ce6SBartlomiej Zolnierkiewicz last_pageblock_nr != pageblock_nr && 30847118af0SMichal Nazarewicz !migrate_async_suitable(get_pageblock_migratetype(page))) { 3099927af74SMel Gorman low_pfn += pageblock_nr_pages; 3109927af74SMel Gorman low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 3119927af74SMel Gorman last_pageblock_nr = pageblock_nr; 3129927af74SMel Gorman continue; 3139927af74SMel Gorman } 3149927af74SMel Gorman 315bc835011SAndrea Arcangeli if (!PageLRU(page)) 316bc835011SAndrea Arcangeli continue; 317bc835011SAndrea Arcangeli 318bc835011SAndrea Arcangeli /* 319bc835011SAndrea Arcangeli * PageLRU is set, and lru_lock excludes isolation, 320bc835011SAndrea Arcangeli * splitting and collapsing (collapsing has already 321bc835011SAndrea Arcangeli * happened if PageLRU is set). 322bc835011SAndrea Arcangeli */ 323bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 324bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 325bc835011SAndrea Arcangeli continue; 326bc835011SAndrea Arcangeli } 327bc835011SAndrea Arcangeli 3285ceb9ce6SBartlomiej Zolnierkiewicz if (cc->mode != COMPACT_SYNC) 329c8244935SMel Gorman mode |= ISOLATE_ASYNC_MIGRATE; 330c8244935SMel Gorman 331748446bbSMel Gorman /* Try isolate the page */ 332*f3fd4a61SKonstantin Khlebnikov if (__isolate_lru_page(page, mode) != 0) 333748446bbSMel Gorman continue; 334748446bbSMel Gorman 335bc835011SAndrea Arcangeli VM_BUG_ON(PageTransCompound(page)); 336bc835011SAndrea Arcangeli 337748446bbSMel Gorman /* Successfully isolated */ 338748446bbSMel Gorman del_page_from_lru_list(zone, page, page_lru(page)); 339748446bbSMel Gorman list_add(&page->lru, migratelist); 340748446bbSMel Gorman cc->nr_migratepages++; 341b7aba698SMel Gorman nr_isolated++; 342748446bbSMel Gorman 343748446bbSMel Gorman /* Avoid isolating too much */ 34431b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 34531b8384aSHillf Danton ++low_pfn; 346748446bbSMel Gorman break; 347748446bbSMel Gorman } 34831b8384aSHillf Danton } 349748446bbSMel Gorman 350748446bbSMel Gorman acct_isolated(zone, cc); 351748446bbSMel Gorman 352748446bbSMel Gorman spin_unlock_irq(&zone->lru_lock); 353748446bbSMel Gorman 354b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 355b7aba698SMel Gorman 3562fe86e00SMichal Nazarewicz return low_pfn; 3572fe86e00SMichal Nazarewicz } 3582fe86e00SMichal Nazarewicz 359ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 360ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 3615ceb9ce6SBartlomiej Zolnierkiewicz /* 3625ceb9ce6SBartlomiej Zolnierkiewicz * Returns true if MIGRATE_UNMOVABLE pageblock was successfully 3635ceb9ce6SBartlomiej Zolnierkiewicz * converted to MIGRATE_MOVABLE type, false otherwise. 3645ceb9ce6SBartlomiej Zolnierkiewicz */ 3655ceb9ce6SBartlomiej Zolnierkiewicz static bool rescue_unmovable_pageblock(struct page *page) 3665ceb9ce6SBartlomiej Zolnierkiewicz { 3675ceb9ce6SBartlomiej Zolnierkiewicz unsigned long pfn, start_pfn, end_pfn; 3685ceb9ce6SBartlomiej Zolnierkiewicz struct page *start_page, *end_page; 369ff9543fdSMichal Nazarewicz 3705ceb9ce6SBartlomiej Zolnierkiewicz pfn = page_to_pfn(page); 3715ceb9ce6SBartlomiej Zolnierkiewicz start_pfn = pfn & ~(pageblock_nr_pages - 1); 3725ceb9ce6SBartlomiej Zolnierkiewicz end_pfn = start_pfn + pageblock_nr_pages; 3735ceb9ce6SBartlomiej Zolnierkiewicz 3745ceb9ce6SBartlomiej Zolnierkiewicz start_page = pfn_to_page(start_pfn); 3755ceb9ce6SBartlomiej Zolnierkiewicz end_page = pfn_to_page(end_pfn); 3765ceb9ce6SBartlomiej Zolnierkiewicz 3775ceb9ce6SBartlomiej Zolnierkiewicz /* Do not deal with pageblocks that overlap zones */ 3785ceb9ce6SBartlomiej Zolnierkiewicz if (page_zone(start_page) != page_zone(end_page)) 3795ceb9ce6SBartlomiej Zolnierkiewicz return false; 3805ceb9ce6SBartlomiej Zolnierkiewicz 3815ceb9ce6SBartlomiej Zolnierkiewicz for (page = start_page, pfn = start_pfn; page < end_page; pfn++, 3825ceb9ce6SBartlomiej Zolnierkiewicz page++) { 3835ceb9ce6SBartlomiej Zolnierkiewicz if (!pfn_valid_within(pfn)) 3845ceb9ce6SBartlomiej Zolnierkiewicz continue; 3855ceb9ce6SBartlomiej Zolnierkiewicz 3865ceb9ce6SBartlomiej Zolnierkiewicz if (PageBuddy(page)) { 3875ceb9ce6SBartlomiej Zolnierkiewicz int order = page_order(page); 3885ceb9ce6SBartlomiej Zolnierkiewicz 3895ceb9ce6SBartlomiej Zolnierkiewicz pfn += (1 << order) - 1; 3905ceb9ce6SBartlomiej Zolnierkiewicz page += (1 << order) - 1; 3915ceb9ce6SBartlomiej Zolnierkiewicz 3925ceb9ce6SBartlomiej Zolnierkiewicz continue; 3935ceb9ce6SBartlomiej Zolnierkiewicz } else if (page_count(page) == 0 || PageLRU(page)) 3945ceb9ce6SBartlomiej Zolnierkiewicz continue; 3955ceb9ce6SBartlomiej Zolnierkiewicz 3965ceb9ce6SBartlomiej Zolnierkiewicz return false; 3975ceb9ce6SBartlomiej Zolnierkiewicz } 3985ceb9ce6SBartlomiej Zolnierkiewicz 3995ceb9ce6SBartlomiej Zolnierkiewicz set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4005ceb9ce6SBartlomiej Zolnierkiewicz move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE); 4015ceb9ce6SBartlomiej Zolnierkiewicz return true; 4025ceb9ce6SBartlomiej Zolnierkiewicz } 4035ceb9ce6SBartlomiej Zolnierkiewicz 4045ceb9ce6SBartlomiej Zolnierkiewicz enum smt_result { 4055ceb9ce6SBartlomiej Zolnierkiewicz GOOD_AS_MIGRATION_TARGET, 4065ceb9ce6SBartlomiej Zolnierkiewicz FAIL_UNMOVABLE_TARGET, 4075ceb9ce6SBartlomiej Zolnierkiewicz FAIL_BAD_TARGET, 4085ceb9ce6SBartlomiej Zolnierkiewicz }; 4095ceb9ce6SBartlomiej Zolnierkiewicz 4105ceb9ce6SBartlomiej Zolnierkiewicz /* 4115ceb9ce6SBartlomiej Zolnierkiewicz * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block 4125ceb9ce6SBartlomiej Zolnierkiewicz * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page 4135ceb9ce6SBartlomiej Zolnierkiewicz * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise. 4145ceb9ce6SBartlomiej Zolnierkiewicz */ 4155ceb9ce6SBartlomiej Zolnierkiewicz static enum smt_result suitable_migration_target(struct page *page, 4165ceb9ce6SBartlomiej Zolnierkiewicz struct compact_control *cc) 4172fe86e00SMichal Nazarewicz { 4182fe86e00SMichal Nazarewicz 419ff9543fdSMichal Nazarewicz int migratetype = get_pageblock_migratetype(page); 4202fe86e00SMichal Nazarewicz 421ff9543fdSMichal Nazarewicz /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 422ff9543fdSMichal Nazarewicz if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 4235ceb9ce6SBartlomiej Zolnierkiewicz return FAIL_BAD_TARGET; 4242fe86e00SMichal Nazarewicz 425ff9543fdSMichal Nazarewicz /* If the page is a large free page, then allow migration */ 426ff9543fdSMichal Nazarewicz if (PageBuddy(page) && page_order(page) >= pageblock_order) 4275ceb9ce6SBartlomiej Zolnierkiewicz return GOOD_AS_MIGRATION_TARGET; 428ff9543fdSMichal Nazarewicz 42947118af0SMichal Nazarewicz /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 4305ceb9ce6SBartlomiej Zolnierkiewicz if (cc->mode != COMPACT_ASYNC_UNMOVABLE && 4315ceb9ce6SBartlomiej Zolnierkiewicz migrate_async_suitable(migratetype)) 4325ceb9ce6SBartlomiej Zolnierkiewicz return GOOD_AS_MIGRATION_TARGET; 4335ceb9ce6SBartlomiej Zolnierkiewicz 4345ceb9ce6SBartlomiej Zolnierkiewicz if (cc->mode == COMPACT_ASYNC_MOVABLE && 4355ceb9ce6SBartlomiej Zolnierkiewicz migratetype == MIGRATE_UNMOVABLE) 4365ceb9ce6SBartlomiej Zolnierkiewicz return FAIL_UNMOVABLE_TARGET; 4375ceb9ce6SBartlomiej Zolnierkiewicz 4385ceb9ce6SBartlomiej Zolnierkiewicz if (cc->mode != COMPACT_ASYNC_MOVABLE && 4395ceb9ce6SBartlomiej Zolnierkiewicz migratetype == MIGRATE_UNMOVABLE && 4405ceb9ce6SBartlomiej Zolnierkiewicz rescue_unmovable_pageblock(page)) 4415ceb9ce6SBartlomiej Zolnierkiewicz return GOOD_AS_MIGRATION_TARGET; 442ff9543fdSMichal Nazarewicz 443ff9543fdSMichal Nazarewicz /* Otherwise skip the block */ 4445ceb9ce6SBartlomiej Zolnierkiewicz return FAIL_BAD_TARGET; 4452fe86e00SMichal Nazarewicz } 4462fe86e00SMichal Nazarewicz 447ff9543fdSMichal Nazarewicz /* 448ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 449ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 450ff9543fdSMichal Nazarewicz */ 451ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone, 452ff9543fdSMichal Nazarewicz struct compact_control *cc) 453ff9543fdSMichal Nazarewicz { 454ff9543fdSMichal Nazarewicz struct page *page; 455ff9543fdSMichal Nazarewicz unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; 456ff9543fdSMichal Nazarewicz unsigned long flags; 457ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 458ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 4592fe86e00SMichal Nazarewicz 460ff9543fdSMichal Nazarewicz /* 461ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 462ff9543fdSMichal Nazarewicz * scanned from (or the end of the zone if starting). The low point 463ff9543fdSMichal Nazarewicz * is the end of the pageblock the migration scanner is using. 464ff9543fdSMichal Nazarewicz */ 465ff9543fdSMichal Nazarewicz pfn = cc->free_pfn; 466ff9543fdSMichal Nazarewicz low_pfn = cc->migrate_pfn + pageblock_nr_pages; 4672fe86e00SMichal Nazarewicz 468ff9543fdSMichal Nazarewicz /* 469ff9543fdSMichal Nazarewicz * Take care that if the migration scanner is at the end of the zone 470ff9543fdSMichal Nazarewicz * that the free scanner does not accidentally move to the next zone 471ff9543fdSMichal Nazarewicz * in the next isolation cycle. 472ff9543fdSMichal Nazarewicz */ 473ff9543fdSMichal Nazarewicz high_pfn = min(low_pfn, pfn); 474ff9543fdSMichal Nazarewicz 475ff9543fdSMichal Nazarewicz zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 476ff9543fdSMichal Nazarewicz 477ff9543fdSMichal Nazarewicz /* 4785ceb9ce6SBartlomiej Zolnierkiewicz * isolate_freepages() may be called more than once during 4795ceb9ce6SBartlomiej Zolnierkiewicz * compact_zone_order() run and we want only the most recent 4805ceb9ce6SBartlomiej Zolnierkiewicz * count. 4815ceb9ce6SBartlomiej Zolnierkiewicz */ 4825ceb9ce6SBartlomiej Zolnierkiewicz cc->nr_pageblocks_skipped = 0; 4835ceb9ce6SBartlomiej Zolnierkiewicz 4845ceb9ce6SBartlomiej Zolnierkiewicz /* 485ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 486ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 487ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 488ff9543fdSMichal Nazarewicz */ 489ff9543fdSMichal Nazarewicz for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 490ff9543fdSMichal Nazarewicz pfn -= pageblock_nr_pages) { 491ff9543fdSMichal Nazarewicz unsigned long isolated; 4925ceb9ce6SBartlomiej Zolnierkiewicz enum smt_result ret; 493ff9543fdSMichal Nazarewicz 494ff9543fdSMichal Nazarewicz if (!pfn_valid(pfn)) 495ff9543fdSMichal Nazarewicz continue; 496ff9543fdSMichal Nazarewicz 497ff9543fdSMichal Nazarewicz /* 498ff9543fdSMichal Nazarewicz * Check for overlapping nodes/zones. It's possible on some 499ff9543fdSMichal Nazarewicz * configurations to have a setup like 500ff9543fdSMichal Nazarewicz * node0 node1 node0 501ff9543fdSMichal Nazarewicz * i.e. it's possible that all pages within a zones range of 502ff9543fdSMichal Nazarewicz * pages do not belong to a single zone. 503ff9543fdSMichal Nazarewicz */ 504ff9543fdSMichal Nazarewicz page = pfn_to_page(pfn); 505ff9543fdSMichal Nazarewicz if (page_zone(page) != zone) 506ff9543fdSMichal Nazarewicz continue; 507ff9543fdSMichal Nazarewicz 508ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 5095ceb9ce6SBartlomiej Zolnierkiewicz ret = suitable_migration_target(page, cc); 5105ceb9ce6SBartlomiej Zolnierkiewicz if (ret != GOOD_AS_MIGRATION_TARGET) { 5115ceb9ce6SBartlomiej Zolnierkiewicz if (ret == FAIL_UNMOVABLE_TARGET) 5125ceb9ce6SBartlomiej Zolnierkiewicz cc->nr_pageblocks_skipped++; 513ff9543fdSMichal Nazarewicz continue; 5145ceb9ce6SBartlomiej Zolnierkiewicz } 515ff9543fdSMichal Nazarewicz /* 516ff9543fdSMichal Nazarewicz * Found a block suitable for isolating free pages from. Now 517ff9543fdSMichal Nazarewicz * we disabled interrupts, double check things are ok and 518ff9543fdSMichal Nazarewicz * isolate the pages. This is to minimise the time IRQs 519ff9543fdSMichal Nazarewicz * are disabled 520ff9543fdSMichal Nazarewicz */ 521ff9543fdSMichal Nazarewicz isolated = 0; 522ff9543fdSMichal Nazarewicz spin_lock_irqsave(&zone->lock, flags); 5235ceb9ce6SBartlomiej Zolnierkiewicz ret = suitable_migration_target(page, cc); 5245ceb9ce6SBartlomiej Zolnierkiewicz if (ret == GOOD_AS_MIGRATION_TARGET) { 525ff9543fdSMichal Nazarewicz end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 526ff9543fdSMichal Nazarewicz isolated = isolate_freepages_block(pfn, end_pfn, 527ff9543fdSMichal Nazarewicz freelist, false); 528ff9543fdSMichal Nazarewicz nr_freepages += isolated; 5295ceb9ce6SBartlomiej Zolnierkiewicz } else if (ret == FAIL_UNMOVABLE_TARGET) 5305ceb9ce6SBartlomiej Zolnierkiewicz cc->nr_pageblocks_skipped++; 531ff9543fdSMichal Nazarewicz spin_unlock_irqrestore(&zone->lock, flags); 532ff9543fdSMichal Nazarewicz 533ff9543fdSMichal Nazarewicz /* 534ff9543fdSMichal Nazarewicz * Record the highest PFN we isolated pages from. When next 535ff9543fdSMichal Nazarewicz * looking for free pages, the search will restart here as 536ff9543fdSMichal Nazarewicz * page migration may have returned some pages to the allocator 537ff9543fdSMichal Nazarewicz */ 538ff9543fdSMichal Nazarewicz if (isolated) 539ff9543fdSMichal Nazarewicz high_pfn = max(high_pfn, pfn); 540ff9543fdSMichal Nazarewicz } 541ff9543fdSMichal Nazarewicz 542ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 543ff9543fdSMichal Nazarewicz map_pages(freelist); 544ff9543fdSMichal Nazarewicz 545ff9543fdSMichal Nazarewicz cc->free_pfn = high_pfn; 546ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 547748446bbSMel Gorman } 548748446bbSMel Gorman 549748446bbSMel Gorman /* 550748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 551748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 552748446bbSMel Gorman */ 553748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 554748446bbSMel Gorman unsigned long data, 555748446bbSMel Gorman int **result) 556748446bbSMel Gorman { 557748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 558748446bbSMel Gorman struct page *freepage; 559748446bbSMel Gorman 560748446bbSMel Gorman /* Isolate free pages if necessary */ 561748446bbSMel Gorman if (list_empty(&cc->freepages)) { 562748446bbSMel Gorman isolate_freepages(cc->zone, cc); 563748446bbSMel Gorman 564748446bbSMel Gorman if (list_empty(&cc->freepages)) 565748446bbSMel Gorman return NULL; 566748446bbSMel Gorman } 567748446bbSMel Gorman 568748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 569748446bbSMel Gorman list_del(&freepage->lru); 570748446bbSMel Gorman cc->nr_freepages--; 571748446bbSMel Gorman 572748446bbSMel Gorman return freepage; 573748446bbSMel Gorman } 574748446bbSMel Gorman 575748446bbSMel Gorman /* 576748446bbSMel Gorman * We cannot control nr_migratepages and nr_freepages fully when migration is 577748446bbSMel Gorman * running as migrate_pages() has no knowledge of compact_control. When 578748446bbSMel Gorman * migration is complete, we count the number of pages on the lists by hand. 579748446bbSMel Gorman */ 580748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc) 581748446bbSMel Gorman { 582748446bbSMel Gorman int nr_migratepages = 0; 583748446bbSMel Gorman int nr_freepages = 0; 584748446bbSMel Gorman struct page *page; 585748446bbSMel Gorman 586748446bbSMel Gorman list_for_each_entry(page, &cc->migratepages, lru) 587748446bbSMel Gorman nr_migratepages++; 588748446bbSMel Gorman list_for_each_entry(page, &cc->freepages, lru) 589748446bbSMel Gorman nr_freepages++; 590748446bbSMel Gorman 591748446bbSMel Gorman cc->nr_migratepages = nr_migratepages; 592748446bbSMel Gorman cc->nr_freepages = nr_freepages; 593748446bbSMel Gorman } 594748446bbSMel Gorman 595ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 596ff9543fdSMichal Nazarewicz typedef enum { 597ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 598ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 599ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 600ff9543fdSMichal Nazarewicz } isolate_migrate_t; 601ff9543fdSMichal Nazarewicz 602ff9543fdSMichal Nazarewicz /* 603ff9543fdSMichal Nazarewicz * Isolate all pages that can be migrated from the block pointed to by 604ff9543fdSMichal Nazarewicz * the migrate scanner within compact_control. 605ff9543fdSMichal Nazarewicz */ 606ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 607ff9543fdSMichal Nazarewicz struct compact_control *cc) 608ff9543fdSMichal Nazarewicz { 609ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 610ff9543fdSMichal Nazarewicz 611ff9543fdSMichal Nazarewicz /* Do not scan outside zone boundaries */ 612ff9543fdSMichal Nazarewicz low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 613ff9543fdSMichal Nazarewicz 614ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 615ff9543fdSMichal Nazarewicz end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 616ff9543fdSMichal Nazarewicz 617ff9543fdSMichal Nazarewicz /* Do not cross the free scanner or scan within a memory hole */ 618ff9543fdSMichal Nazarewicz if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 619ff9543fdSMichal Nazarewicz cc->migrate_pfn = end_pfn; 620ff9543fdSMichal Nazarewicz return ISOLATE_NONE; 621ff9543fdSMichal Nazarewicz } 622ff9543fdSMichal Nazarewicz 623ff9543fdSMichal Nazarewicz /* Perform the isolation */ 624ff9543fdSMichal Nazarewicz low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); 625ff9543fdSMichal Nazarewicz if (!low_pfn) 626ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 627ff9543fdSMichal Nazarewicz 628ff9543fdSMichal Nazarewicz cc->migrate_pfn = low_pfn; 629ff9543fdSMichal Nazarewicz 630ff9543fdSMichal Nazarewicz return ISOLATE_SUCCESS; 631ff9543fdSMichal Nazarewicz } 632ff9543fdSMichal Nazarewicz 633748446bbSMel Gorman static int compact_finished(struct zone *zone, 634748446bbSMel Gorman struct compact_control *cc) 635748446bbSMel Gorman { 63656de7263SMel Gorman unsigned int order; 6375a03b051SAndrea Arcangeli unsigned long watermark; 63856de7263SMel Gorman 639748446bbSMel Gorman if (fatal_signal_pending(current)) 640748446bbSMel Gorman return COMPACT_PARTIAL; 641748446bbSMel Gorman 642748446bbSMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 643748446bbSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) 644748446bbSMel Gorman return COMPACT_COMPLETE; 645748446bbSMel Gorman 64682478fb7SJohannes Weiner /* 64782478fb7SJohannes Weiner * order == -1 is expected when compacting via 64882478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 64982478fb7SJohannes Weiner */ 65056de7263SMel Gorman if (cc->order == -1) 65156de7263SMel Gorman return COMPACT_CONTINUE; 65256de7263SMel Gorman 6533957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 6543957c776SMichal Hocko watermark = low_wmark_pages(zone); 6553957c776SMichal Hocko watermark += (1 << cc->order); 6563957c776SMichal Hocko 6573957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 6583957c776SMichal Hocko return COMPACT_CONTINUE; 6593957c776SMichal Hocko 66056de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 66156de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 66256de7263SMel Gorman /* Job done if page is free of the right migratetype */ 66356de7263SMel Gorman if (!list_empty(&zone->free_area[order].free_list[cc->migratetype])) 66456de7263SMel Gorman return COMPACT_PARTIAL; 66556de7263SMel Gorman 66656de7263SMel Gorman /* Job done if allocation would set block type */ 66756de7263SMel Gorman if (order >= pageblock_order && zone->free_area[order].nr_free) 66856de7263SMel Gorman return COMPACT_PARTIAL; 66956de7263SMel Gorman } 67056de7263SMel Gorman 671748446bbSMel Gorman return COMPACT_CONTINUE; 672748446bbSMel Gorman } 673748446bbSMel Gorman 6743e7d3449SMel Gorman /* 6753e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 6763e7d3449SMel Gorman * Returns 6773e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 6783e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 6793e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 6803e7d3449SMel Gorman */ 6813e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 6823e7d3449SMel Gorman { 6833e7d3449SMel Gorman int fragindex; 6843e7d3449SMel Gorman unsigned long watermark; 6853e7d3449SMel Gorman 6863e7d3449SMel Gorman /* 6873957c776SMichal Hocko * order == -1 is expected when compacting via 6883957c776SMichal Hocko * /proc/sys/vm/compact_memory 6893957c776SMichal Hocko */ 6903957c776SMichal Hocko if (order == -1) 6913957c776SMichal Hocko return COMPACT_CONTINUE; 6923957c776SMichal Hocko 6933957c776SMichal Hocko /* 6943e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 6953e7d3449SMel Gorman * This is because during migration, copies of pages need to be 6963e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 6973e7d3449SMel Gorman */ 6983e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 6993e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 7003e7d3449SMel Gorman return COMPACT_SKIPPED; 7013e7d3449SMel Gorman 7023e7d3449SMel Gorman /* 7033e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 7043e7d3449SMel Gorman * low memory or external fragmentation 7053e7d3449SMel Gorman * 706a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 707a582a738SShaohua Li * watermarks 7083e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 7093e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 7103e7d3449SMel Gorman * 7113e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 7123e7d3449SMel Gorman */ 7133e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 7143e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 7153e7d3449SMel Gorman return COMPACT_SKIPPED; 7163e7d3449SMel Gorman 717a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 718a582a738SShaohua Li 0, 0)) 7193e7d3449SMel Gorman return COMPACT_PARTIAL; 7203e7d3449SMel Gorman 7213e7d3449SMel Gorman return COMPACT_CONTINUE; 7223e7d3449SMel Gorman } 7233e7d3449SMel Gorman 724748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 725748446bbSMel Gorman { 726748446bbSMel Gorman int ret; 727748446bbSMel Gorman 7283e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 7293e7d3449SMel Gorman switch (ret) { 7303e7d3449SMel Gorman case COMPACT_PARTIAL: 7313e7d3449SMel Gorman case COMPACT_SKIPPED: 7323e7d3449SMel Gorman /* Compaction is likely to fail */ 7333e7d3449SMel Gorman return ret; 7343e7d3449SMel Gorman case COMPACT_CONTINUE: 7353e7d3449SMel Gorman /* Fall through to compaction */ 7363e7d3449SMel Gorman ; 7373e7d3449SMel Gorman } 7383e7d3449SMel Gorman 739748446bbSMel Gorman /* Setup to move all movable pages to the end of the zone */ 740748446bbSMel Gorman cc->migrate_pfn = zone->zone_start_pfn; 741748446bbSMel Gorman cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; 742748446bbSMel Gorman cc->free_pfn &= ~(pageblock_nr_pages-1); 743748446bbSMel Gorman 744748446bbSMel Gorman migrate_prep_local(); 745748446bbSMel Gorman 746748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 747748446bbSMel Gorman unsigned long nr_migrate, nr_remaining; 7489d502c1cSMinchan Kim int err; 749748446bbSMel Gorman 750f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 751f9e35b3bSMel Gorman case ISOLATE_ABORT: 752f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 753f9e35b3bSMel Gorman goto out; 754f9e35b3bSMel Gorman case ISOLATE_NONE: 755748446bbSMel Gorman continue; 756f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 757f9e35b3bSMel Gorman ; 758f9e35b3bSMel Gorman } 759748446bbSMel Gorman 760748446bbSMel Gorman nr_migrate = cc->nr_migratepages; 7619d502c1cSMinchan Kim err = migrate_pages(&cc->migratepages, compaction_alloc, 7625ceb9ce6SBartlomiej Zolnierkiewicz (unsigned long)&cc->freepages, false, 7635ceb9ce6SBartlomiej Zolnierkiewicz (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT 7645ceb9ce6SBartlomiej Zolnierkiewicz : MIGRATE_ASYNC); 765748446bbSMel Gorman update_nr_listpages(cc); 766748446bbSMel Gorman nr_remaining = cc->nr_migratepages; 767748446bbSMel Gorman 768748446bbSMel Gorman count_vm_event(COMPACTBLOCKS); 769748446bbSMel Gorman count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); 770748446bbSMel Gorman if (nr_remaining) 771748446bbSMel Gorman count_vm_events(COMPACTPAGEFAILED, nr_remaining); 772b7aba698SMel Gorman trace_mm_compaction_migratepages(nr_migrate - nr_remaining, 773b7aba698SMel Gorman nr_remaining); 774748446bbSMel Gorman 775748446bbSMel Gorman /* Release LRU pages not migrated */ 7769d502c1cSMinchan Kim if (err) { 777748446bbSMel Gorman putback_lru_pages(&cc->migratepages); 778748446bbSMel Gorman cc->nr_migratepages = 0; 779748446bbSMel Gorman } 780748446bbSMel Gorman 781748446bbSMel Gorman } 782748446bbSMel Gorman 783f9e35b3bSMel Gorman out: 784748446bbSMel Gorman /* Release free pages and check accounting */ 785748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 786748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 787748446bbSMel Gorman 788748446bbSMel Gorman return ret; 789748446bbSMel Gorman } 79076ab0f53SMel Gorman 791d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone, 79277f1fe6bSMel Gorman int order, gfp_t gfp_mask, 7935ceb9ce6SBartlomiej Zolnierkiewicz enum compact_mode mode, 7945ceb9ce6SBartlomiej Zolnierkiewicz unsigned long *nr_pageblocks_skipped) 79556de7263SMel Gorman { 79656de7263SMel Gorman struct compact_control cc = { 79756de7263SMel Gorman .nr_freepages = 0, 79856de7263SMel Gorman .nr_migratepages = 0, 79956de7263SMel Gorman .order = order, 80056de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 80156de7263SMel Gorman .zone = zone, 8025ceb9ce6SBartlomiej Zolnierkiewicz .mode = mode, 80356de7263SMel Gorman }; 8045ceb9ce6SBartlomiej Zolnierkiewicz unsigned long rc; 8055ceb9ce6SBartlomiej Zolnierkiewicz 80656de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 80756de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 80856de7263SMel Gorman 8095ceb9ce6SBartlomiej Zolnierkiewicz rc = compact_zone(zone, &cc); 8105ceb9ce6SBartlomiej Zolnierkiewicz *nr_pageblocks_skipped = cc.nr_pageblocks_skipped; 8115ceb9ce6SBartlomiej Zolnierkiewicz 8125ceb9ce6SBartlomiej Zolnierkiewicz return rc; 81356de7263SMel Gorman } 81456de7263SMel Gorman 8155e771905SMel Gorman int sysctl_extfrag_threshold = 500; 8165e771905SMel Gorman 81756de7263SMel Gorman /** 81856de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 81956de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 82056de7263SMel Gorman * @order: The order of the current allocation 82156de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 82256de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 82377f1fe6bSMel Gorman * @sync: Whether migration is synchronous or not 82456de7263SMel Gorman * 82556de7263SMel Gorman * This is the main entry point for direct page compaction. 82656de7263SMel Gorman */ 82756de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 82877f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 82977f1fe6bSMel Gorman bool sync) 83056de7263SMel Gorman { 83156de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 83256de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 83356de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 83456de7263SMel Gorman struct zoneref *z; 83556de7263SMel Gorman struct zone *zone; 83656de7263SMel Gorman int rc = COMPACT_SKIPPED; 8375ceb9ce6SBartlomiej Zolnierkiewicz unsigned long nr_pageblocks_skipped; 8385ceb9ce6SBartlomiej Zolnierkiewicz enum compact_mode mode; 83956de7263SMel Gorman 84056de7263SMel Gorman /* 84156de7263SMel Gorman * Check whether it is worth even starting compaction. The order check is 84256de7263SMel Gorman * made because an assumption is made that the page allocator can satisfy 84356de7263SMel Gorman * the "cheaper" orders without taking special steps 84456de7263SMel Gorman */ 845c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 84656de7263SMel Gorman return rc; 84756de7263SMel Gorman 84856de7263SMel Gorman count_vm_event(COMPACTSTALL); 84956de7263SMel Gorman 85056de7263SMel Gorman /* Compact each zone in the list */ 85156de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 85256de7263SMel Gorman nodemask) { 85356de7263SMel Gorman int status; 85456de7263SMel Gorman 8555ceb9ce6SBartlomiej Zolnierkiewicz mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE; 8565ceb9ce6SBartlomiej Zolnierkiewicz retry: 8575ceb9ce6SBartlomiej Zolnierkiewicz status = compact_zone_order(zone, order, gfp_mask, mode, 8585ceb9ce6SBartlomiej Zolnierkiewicz &nr_pageblocks_skipped); 85956de7263SMel Gorman rc = max(status, rc); 86056de7263SMel Gorman 8613e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 8623e7d3449SMel Gorman if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 86356de7263SMel Gorman break; 8645ceb9ce6SBartlomiej Zolnierkiewicz 8655ceb9ce6SBartlomiej Zolnierkiewicz if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) { 8665ceb9ce6SBartlomiej Zolnierkiewicz if (nr_pageblocks_skipped) { 8675ceb9ce6SBartlomiej Zolnierkiewicz mode = COMPACT_ASYNC_UNMOVABLE; 8685ceb9ce6SBartlomiej Zolnierkiewicz goto retry; 8695ceb9ce6SBartlomiej Zolnierkiewicz } 8705ceb9ce6SBartlomiej Zolnierkiewicz } 87156de7263SMel Gorman } 87256de7263SMel Gorman 87356de7263SMel Gorman return rc; 87456de7263SMel Gorman } 87556de7263SMel Gorman 87656de7263SMel Gorman 87776ab0f53SMel Gorman /* Compact all zones within a node */ 8787be62de9SRik van Riel static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 87976ab0f53SMel Gorman { 88076ab0f53SMel Gorman int zoneid; 88176ab0f53SMel Gorman struct zone *zone; 88276ab0f53SMel Gorman 88376ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 88476ab0f53SMel Gorman 88576ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 88676ab0f53SMel Gorman if (!populated_zone(zone)) 88776ab0f53SMel Gorman continue; 88876ab0f53SMel Gorman 8897be62de9SRik van Riel cc->nr_freepages = 0; 8907be62de9SRik van Riel cc->nr_migratepages = 0; 8917be62de9SRik van Riel cc->zone = zone; 8927be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 8937be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 89476ab0f53SMel Gorman 895aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 8967be62de9SRik van Riel compact_zone(zone, cc); 89776ab0f53SMel Gorman 898aff62249SRik van Riel if (cc->order > 0) { 899aff62249SRik van Riel int ok = zone_watermark_ok(zone, cc->order, 900aff62249SRik van Riel low_wmark_pages(zone), 0, 0); 901aff62249SRik van Riel if (ok && cc->order > zone->compact_order_failed) 902aff62249SRik van Riel zone->compact_order_failed = cc->order + 1; 903aff62249SRik van Riel /* Currently async compaction is never deferred. */ 9045ceb9ce6SBartlomiej Zolnierkiewicz else if (!ok && cc->mode == COMPACT_SYNC) 905aff62249SRik van Riel defer_compaction(zone, cc->order); 906aff62249SRik van Riel } 907aff62249SRik van Riel 9087be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 9097be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 91076ab0f53SMel Gorman } 91176ab0f53SMel Gorman 91276ab0f53SMel Gorman return 0; 91376ab0f53SMel Gorman } 91476ab0f53SMel Gorman 9157be62de9SRik van Riel int compact_pgdat(pg_data_t *pgdat, int order) 9167be62de9SRik van Riel { 9177be62de9SRik van Riel struct compact_control cc = { 9187be62de9SRik van Riel .order = order, 9195ceb9ce6SBartlomiej Zolnierkiewicz .mode = COMPACT_ASYNC_MOVABLE, 9207be62de9SRik van Riel }; 9217be62de9SRik van Riel 9227be62de9SRik van Riel return __compact_pgdat(pgdat, &cc); 9237be62de9SRik van Riel } 9247be62de9SRik van Riel 9257be62de9SRik van Riel static int compact_node(int nid) 9267be62de9SRik van Riel { 9277be62de9SRik van Riel struct compact_control cc = { 9287be62de9SRik van Riel .order = -1, 9295ceb9ce6SBartlomiej Zolnierkiewicz .mode = COMPACT_SYNC, 9307be62de9SRik van Riel }; 9317be62de9SRik van Riel 9328575ec29SHugh Dickins return __compact_pgdat(NODE_DATA(nid), &cc); 9337be62de9SRik van Riel } 9347be62de9SRik van Riel 93576ab0f53SMel Gorman /* Compact all nodes in the system */ 93676ab0f53SMel Gorman static int compact_nodes(void) 93776ab0f53SMel Gorman { 93876ab0f53SMel Gorman int nid; 93976ab0f53SMel Gorman 9408575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 9418575ec29SHugh Dickins lru_add_drain_all(); 9428575ec29SHugh Dickins 94376ab0f53SMel Gorman for_each_online_node(nid) 94476ab0f53SMel Gorman compact_node(nid); 94576ab0f53SMel Gorman 94676ab0f53SMel Gorman return COMPACT_COMPLETE; 94776ab0f53SMel Gorman } 94876ab0f53SMel Gorman 94976ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 95076ab0f53SMel Gorman int sysctl_compact_memory; 95176ab0f53SMel Gorman 95276ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 95376ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 95476ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 95576ab0f53SMel Gorman { 95676ab0f53SMel Gorman if (write) 95776ab0f53SMel Gorman return compact_nodes(); 95876ab0f53SMel Gorman 95976ab0f53SMel Gorman return 0; 96076ab0f53SMel Gorman } 961ed4a6d7fSMel Gorman 9625e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 9635e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 9645e771905SMel Gorman { 9655e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 9665e771905SMel Gorman 9675e771905SMel Gorman return 0; 9685e771905SMel Gorman } 9695e771905SMel Gorman 970ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 97110fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev, 97210fbcf4cSKay Sievers struct device_attribute *attr, 973ed4a6d7fSMel Gorman const char *buf, size_t count) 974ed4a6d7fSMel Gorman { 9758575ec29SHugh Dickins int nid = dev->id; 9768575ec29SHugh Dickins 9778575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 9788575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 9798575ec29SHugh Dickins lru_add_drain_all(); 9808575ec29SHugh Dickins 9818575ec29SHugh Dickins compact_node(nid); 9828575ec29SHugh Dickins } 983ed4a6d7fSMel Gorman 984ed4a6d7fSMel Gorman return count; 985ed4a6d7fSMel Gorman } 98610fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 987ed4a6d7fSMel Gorman 988ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 989ed4a6d7fSMel Gorman { 99010fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 991ed4a6d7fSMel Gorman } 992ed4a6d7fSMel Gorman 993ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 994ed4a6d7fSMel Gorman { 99510fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 996ed4a6d7fSMel Gorman } 997ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 998ff9543fdSMichal Nazarewicz 999ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1000