1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17748446bbSMel Gorman #include "internal.h" 18748446bbSMel Gorman 19*ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 20*ff9543fdSMichal Nazarewicz 21b7aba698SMel Gorman #define CREATE_TRACE_POINTS 22b7aba698SMel Gorman #include <trace/events/compaction.h> 23b7aba698SMel Gorman 24748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 25748446bbSMel Gorman { 26748446bbSMel Gorman struct page *page, *next; 27748446bbSMel Gorman unsigned long count = 0; 28748446bbSMel Gorman 29748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 30748446bbSMel Gorman list_del(&page->lru); 31748446bbSMel Gorman __free_page(page); 32748446bbSMel Gorman count++; 33748446bbSMel Gorman } 34748446bbSMel Gorman 35748446bbSMel Gorman return count; 36748446bbSMel Gorman } 37748446bbSMel Gorman 38*ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 39*ff9543fdSMichal Nazarewicz { 40*ff9543fdSMichal Nazarewicz struct page *page; 41*ff9543fdSMichal Nazarewicz 42*ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 43*ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 44*ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 45*ff9543fdSMichal Nazarewicz } 46*ff9543fdSMichal Nazarewicz } 47*ff9543fdSMichal Nazarewicz 4885aa125fSMichal Nazarewicz /* 4985aa125fSMichal Nazarewicz * Isolate free pages onto a private freelist. Caller must hold zone->lock. 5085aa125fSMichal Nazarewicz * If @strict is true, will abort returning 0 on any invalid PFNs or non-free 5185aa125fSMichal Nazarewicz * pages inside of the pageblock (even though it may still end up isolating 5285aa125fSMichal Nazarewicz * some pages). 5385aa125fSMichal Nazarewicz */ 5485aa125fSMichal Nazarewicz static unsigned long isolate_freepages_block(unsigned long blockpfn, 5585aa125fSMichal Nazarewicz unsigned long end_pfn, 5685aa125fSMichal Nazarewicz struct list_head *freelist, 5785aa125fSMichal Nazarewicz bool strict) 58748446bbSMel Gorman { 59b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 60748446bbSMel Gorman struct page *cursor; 61748446bbSMel Gorman 62748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 63748446bbSMel Gorman 64748446bbSMel Gorman /* Isolate free pages. This assumes the block is valid */ 65748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 66748446bbSMel Gorman int isolated, i; 67748446bbSMel Gorman struct page *page = cursor; 68748446bbSMel Gorman 6985aa125fSMichal Nazarewicz if (!pfn_valid_within(blockpfn)) { 7085aa125fSMichal Nazarewicz if (strict) 7185aa125fSMichal Nazarewicz return 0; 72748446bbSMel Gorman continue; 7385aa125fSMichal Nazarewicz } 74b7aba698SMel Gorman nr_scanned++; 75748446bbSMel Gorman 7685aa125fSMichal Nazarewicz if (!PageBuddy(page)) { 7785aa125fSMichal Nazarewicz if (strict) 7885aa125fSMichal Nazarewicz return 0; 79748446bbSMel Gorman continue; 8085aa125fSMichal Nazarewicz } 81748446bbSMel Gorman 82748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 83748446bbSMel Gorman isolated = split_free_page(page); 8485aa125fSMichal Nazarewicz if (!isolated && strict) 8585aa125fSMichal Nazarewicz return 0; 86748446bbSMel Gorman total_isolated += isolated; 87748446bbSMel Gorman for (i = 0; i < isolated; i++) { 88748446bbSMel Gorman list_add(&page->lru, freelist); 89748446bbSMel Gorman page++; 90748446bbSMel Gorman } 91748446bbSMel Gorman 92748446bbSMel Gorman /* If a page was split, advance to the end of it */ 93748446bbSMel Gorman if (isolated) { 94748446bbSMel Gorman blockpfn += isolated - 1; 95748446bbSMel Gorman cursor += isolated - 1; 96748446bbSMel Gorman } 97748446bbSMel Gorman } 98748446bbSMel Gorman 99b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 100748446bbSMel Gorman return total_isolated; 101748446bbSMel Gorman } 102748446bbSMel Gorman 10385aa125fSMichal Nazarewicz /** 10485aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 10585aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 10685aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 10785aa125fSMichal Nazarewicz * 10885aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 10985aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 11085aa125fSMichal Nazarewicz * undo its actions and return zero. 11185aa125fSMichal Nazarewicz * 11285aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 11385aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 11485aa125fSMichal Nazarewicz * a free page). 11585aa125fSMichal Nazarewicz */ 116*ff9543fdSMichal Nazarewicz unsigned long 11785aa125fSMichal Nazarewicz isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) 11885aa125fSMichal Nazarewicz { 11985aa125fSMichal Nazarewicz unsigned long isolated, pfn, block_end_pfn, flags; 12085aa125fSMichal Nazarewicz struct zone *zone = NULL; 12185aa125fSMichal Nazarewicz LIST_HEAD(freelist); 12285aa125fSMichal Nazarewicz 12385aa125fSMichal Nazarewicz if (pfn_valid(start_pfn)) 12485aa125fSMichal Nazarewicz zone = page_zone(pfn_to_page(start_pfn)); 12585aa125fSMichal Nazarewicz 12685aa125fSMichal Nazarewicz for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 12785aa125fSMichal Nazarewicz if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn))) 12885aa125fSMichal Nazarewicz break; 12985aa125fSMichal Nazarewicz 13085aa125fSMichal Nazarewicz /* 13185aa125fSMichal Nazarewicz * On subsequent iterations ALIGN() is actually not needed, 13285aa125fSMichal Nazarewicz * but we keep it that we not to complicate the code. 13385aa125fSMichal Nazarewicz */ 13485aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 13585aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 13685aa125fSMichal Nazarewicz 13785aa125fSMichal Nazarewicz spin_lock_irqsave(&zone->lock, flags); 13885aa125fSMichal Nazarewicz isolated = isolate_freepages_block(pfn, block_end_pfn, 13985aa125fSMichal Nazarewicz &freelist, true); 14085aa125fSMichal Nazarewicz spin_unlock_irqrestore(&zone->lock, flags); 14185aa125fSMichal Nazarewicz 14285aa125fSMichal Nazarewicz /* 14385aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 14485aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 14585aa125fSMichal Nazarewicz * non-free pages). 14685aa125fSMichal Nazarewicz */ 14785aa125fSMichal Nazarewicz if (!isolated) 14885aa125fSMichal Nazarewicz break; 14985aa125fSMichal Nazarewicz 15085aa125fSMichal Nazarewicz /* 15185aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 15285aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 15385aa125fSMichal Nazarewicz * page may span two pageblocks). 15485aa125fSMichal Nazarewicz */ 15585aa125fSMichal Nazarewicz } 15685aa125fSMichal Nazarewicz 15785aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 15885aa125fSMichal Nazarewicz map_pages(&freelist); 15985aa125fSMichal Nazarewicz 16085aa125fSMichal Nazarewicz if (pfn < end_pfn) { 16185aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 16285aa125fSMichal Nazarewicz release_freepages(&freelist); 16385aa125fSMichal Nazarewicz return 0; 16485aa125fSMichal Nazarewicz } 16585aa125fSMichal Nazarewicz 16685aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 16785aa125fSMichal Nazarewicz return pfn; 16885aa125fSMichal Nazarewicz } 16985aa125fSMichal Nazarewicz 170748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 171748446bbSMel Gorman static void acct_isolated(struct zone *zone, struct compact_control *cc) 172748446bbSMel Gorman { 173748446bbSMel Gorman struct page *page; 174b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 175748446bbSMel Gorman 176b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 177b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 178748446bbSMel Gorman 179b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 180b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 181748446bbSMel Gorman } 182748446bbSMel Gorman 183748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 184748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 185748446bbSMel Gorman { 186bc693045SMinchan Kim unsigned long active, inactive, isolated; 187748446bbSMel Gorman 188748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 189748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 190bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 191bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 192748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 193748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 194748446bbSMel Gorman 195bc693045SMinchan Kim return isolated > (inactive + active) / 2; 196748446bbSMel Gorman } 197748446bbSMel Gorman 1982fe86e00SMichal Nazarewicz /** 1992fe86e00SMichal Nazarewicz * isolate_migratepages_range() - isolate all migrate-able pages in range. 2002fe86e00SMichal Nazarewicz * @zone: Zone pages are in. 2012fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 2022fe86e00SMichal Nazarewicz * @low_pfn: The first PFN of the range. 2032fe86e00SMichal Nazarewicz * @end_pfn: The one-past-the-last PFN of the range. 2042fe86e00SMichal Nazarewicz * 2052fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 2062fe86e00SMichal Nazarewicz * [low_pfn, end_pfn). Returns zero if there is a fatal signal 2072fe86e00SMichal Nazarewicz * pending), otherwise PFN of the first page that was not scanned 2082fe86e00SMichal Nazarewicz * (which may be both less, equal to or more then end_pfn). 2092fe86e00SMichal Nazarewicz * 2102fe86e00SMichal Nazarewicz * Assumes that cc->migratepages is empty and cc->nr_migratepages is 2112fe86e00SMichal Nazarewicz * zero. 2122fe86e00SMichal Nazarewicz * 2132fe86e00SMichal Nazarewicz * Apart from cc->migratepages and cc->nr_migratetypes this function 2142fe86e00SMichal Nazarewicz * does not modify any cc's fields, in particular it does not modify 2152fe86e00SMichal Nazarewicz * (or read for that matter) cc->migrate_pfn. 216748446bbSMel Gorman */ 217*ff9543fdSMichal Nazarewicz unsigned long 2182fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 2192fe86e00SMichal Nazarewicz unsigned long low_pfn, unsigned long end_pfn) 220748446bbSMel Gorman { 2219927af74SMel Gorman unsigned long last_pageblock_nr = 0, pageblock_nr; 222b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 223748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 22439deaf85SMinchan Kim isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE; 225748446bbSMel Gorman 226748446bbSMel Gorman /* 227748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 228748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 229748446bbSMel Gorman * delay for some time until fewer pages are isolated 230748446bbSMel Gorman */ 231748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 232f9e35b3bSMel Gorman /* async migration should just abort */ 233f9e35b3bSMel Gorman if (!cc->sync) 2342fe86e00SMichal Nazarewicz return 0; 235f9e35b3bSMel Gorman 236748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 237748446bbSMel Gorman 238748446bbSMel Gorman if (fatal_signal_pending(current)) 2392fe86e00SMichal Nazarewicz return 0; 240748446bbSMel Gorman } 241748446bbSMel Gorman 242748446bbSMel Gorman /* Time to isolate some pages for migration */ 243b2eef8c0SAndrea Arcangeli cond_resched(); 244748446bbSMel Gorman spin_lock_irq(&zone->lru_lock); 245748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 246748446bbSMel Gorman struct page *page; 247b2eef8c0SAndrea Arcangeli bool locked = true; 248b2eef8c0SAndrea Arcangeli 249b2eef8c0SAndrea Arcangeli /* give a chance to irqs before checking need_resched() */ 250b2eef8c0SAndrea Arcangeli if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { 251b2eef8c0SAndrea Arcangeli spin_unlock_irq(&zone->lru_lock); 252b2eef8c0SAndrea Arcangeli locked = false; 253b2eef8c0SAndrea Arcangeli } 254b2eef8c0SAndrea Arcangeli if (need_resched() || spin_is_contended(&zone->lru_lock)) { 255b2eef8c0SAndrea Arcangeli if (locked) 256b2eef8c0SAndrea Arcangeli spin_unlock_irq(&zone->lru_lock); 257b2eef8c0SAndrea Arcangeli cond_resched(); 258b2eef8c0SAndrea Arcangeli spin_lock_irq(&zone->lru_lock); 259b2eef8c0SAndrea Arcangeli if (fatal_signal_pending(current)) 260b2eef8c0SAndrea Arcangeli break; 261b2eef8c0SAndrea Arcangeli } else if (!locked) 262b2eef8c0SAndrea Arcangeli spin_lock_irq(&zone->lru_lock); 263b2eef8c0SAndrea Arcangeli 2640bf380bcSMel Gorman /* 2650bf380bcSMel Gorman * migrate_pfn does not necessarily start aligned to a 2660bf380bcSMel Gorman * pageblock. Ensure that pfn_valid is called when moving 2670bf380bcSMel Gorman * into a new MAX_ORDER_NR_PAGES range in case of large 2680bf380bcSMel Gorman * memory holes within the zone 2690bf380bcSMel Gorman */ 2700bf380bcSMel Gorman if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 2710bf380bcSMel Gorman if (!pfn_valid(low_pfn)) { 2720bf380bcSMel Gorman low_pfn += MAX_ORDER_NR_PAGES - 1; 2730bf380bcSMel Gorman continue; 2740bf380bcSMel Gorman } 2750bf380bcSMel Gorman } 2760bf380bcSMel Gorman 277748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 278748446bbSMel Gorman continue; 279b7aba698SMel Gorman nr_scanned++; 280748446bbSMel Gorman 281dc908600SMel Gorman /* 282dc908600SMel Gorman * Get the page and ensure the page is within the same zone. 283dc908600SMel Gorman * See the comment in isolate_freepages about overlapping 284dc908600SMel Gorman * nodes. It is deliberate that the new zone lock is not taken 285dc908600SMel Gorman * as memory compaction should not move pages between nodes. 286dc908600SMel Gorman */ 287748446bbSMel Gorman page = pfn_to_page(low_pfn); 288dc908600SMel Gorman if (page_zone(page) != zone) 289dc908600SMel Gorman continue; 290dc908600SMel Gorman 291dc908600SMel Gorman /* Skip if free */ 292748446bbSMel Gorman if (PageBuddy(page)) 293748446bbSMel Gorman continue; 294748446bbSMel Gorman 2959927af74SMel Gorman /* 2969927af74SMel Gorman * For async migration, also only scan in MOVABLE blocks. Async 2979927af74SMel Gorman * migration is optimistic to see if the minimum amount of work 2989927af74SMel Gorman * satisfies the allocation 2999927af74SMel Gorman */ 3009927af74SMel Gorman pageblock_nr = low_pfn >> pageblock_order; 3019927af74SMel Gorman if (!cc->sync && last_pageblock_nr != pageblock_nr && 3029927af74SMel Gorman get_pageblock_migratetype(page) != MIGRATE_MOVABLE) { 3039927af74SMel Gorman low_pfn += pageblock_nr_pages; 3049927af74SMel Gorman low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 3059927af74SMel Gorman last_pageblock_nr = pageblock_nr; 3069927af74SMel Gorman continue; 3079927af74SMel Gorman } 3089927af74SMel Gorman 309bc835011SAndrea Arcangeli if (!PageLRU(page)) 310bc835011SAndrea Arcangeli continue; 311bc835011SAndrea Arcangeli 312bc835011SAndrea Arcangeli /* 313bc835011SAndrea Arcangeli * PageLRU is set, and lru_lock excludes isolation, 314bc835011SAndrea Arcangeli * splitting and collapsing (collapsing has already 315bc835011SAndrea Arcangeli * happened if PageLRU is set). 316bc835011SAndrea Arcangeli */ 317bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 318bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 319bc835011SAndrea Arcangeli continue; 320bc835011SAndrea Arcangeli } 321bc835011SAndrea Arcangeli 322c8244935SMel Gorman if (!cc->sync) 323c8244935SMel Gorman mode |= ISOLATE_ASYNC_MIGRATE; 324c8244935SMel Gorman 325748446bbSMel Gorman /* Try isolate the page */ 32639deaf85SMinchan Kim if (__isolate_lru_page(page, mode, 0) != 0) 327748446bbSMel Gorman continue; 328748446bbSMel Gorman 329bc835011SAndrea Arcangeli VM_BUG_ON(PageTransCompound(page)); 330bc835011SAndrea Arcangeli 331748446bbSMel Gorman /* Successfully isolated */ 332748446bbSMel Gorman del_page_from_lru_list(zone, page, page_lru(page)); 333748446bbSMel Gorman list_add(&page->lru, migratelist); 334748446bbSMel Gorman cc->nr_migratepages++; 335b7aba698SMel Gorman nr_isolated++; 336748446bbSMel Gorman 337748446bbSMel Gorman /* Avoid isolating too much */ 33831b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 33931b8384aSHillf Danton ++low_pfn; 340748446bbSMel Gorman break; 341748446bbSMel Gorman } 34231b8384aSHillf Danton } 343748446bbSMel Gorman 344748446bbSMel Gorman acct_isolated(zone, cc); 345748446bbSMel Gorman 346748446bbSMel Gorman spin_unlock_irq(&zone->lru_lock); 347748446bbSMel Gorman 348b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 349b7aba698SMel Gorman 3502fe86e00SMichal Nazarewicz return low_pfn; 3512fe86e00SMichal Nazarewicz } 3522fe86e00SMichal Nazarewicz 353*ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 354*ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 355*ff9543fdSMichal Nazarewicz 356*ff9543fdSMichal Nazarewicz /* Returns true if the page is within a block suitable for migration to */ 357*ff9543fdSMichal Nazarewicz static bool suitable_migration_target(struct page *page) 3582fe86e00SMichal Nazarewicz { 3592fe86e00SMichal Nazarewicz 360*ff9543fdSMichal Nazarewicz int migratetype = get_pageblock_migratetype(page); 3612fe86e00SMichal Nazarewicz 362*ff9543fdSMichal Nazarewicz /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 363*ff9543fdSMichal Nazarewicz if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 364*ff9543fdSMichal Nazarewicz return false; 3652fe86e00SMichal Nazarewicz 366*ff9543fdSMichal Nazarewicz /* If the page is a large free page, then allow migration */ 367*ff9543fdSMichal Nazarewicz if (PageBuddy(page) && page_order(page) >= pageblock_order) 368*ff9543fdSMichal Nazarewicz return true; 369*ff9543fdSMichal Nazarewicz 370*ff9543fdSMichal Nazarewicz /* If the block is MIGRATE_MOVABLE, allow migration */ 371*ff9543fdSMichal Nazarewicz if (migratetype == MIGRATE_MOVABLE) 372*ff9543fdSMichal Nazarewicz return true; 373*ff9543fdSMichal Nazarewicz 374*ff9543fdSMichal Nazarewicz /* Otherwise skip the block */ 375*ff9543fdSMichal Nazarewicz return false; 3762fe86e00SMichal Nazarewicz } 3772fe86e00SMichal Nazarewicz 378*ff9543fdSMichal Nazarewicz /* 379*ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 380*ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 381*ff9543fdSMichal Nazarewicz */ 382*ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone, 383*ff9543fdSMichal Nazarewicz struct compact_control *cc) 384*ff9543fdSMichal Nazarewicz { 385*ff9543fdSMichal Nazarewicz struct page *page; 386*ff9543fdSMichal Nazarewicz unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; 387*ff9543fdSMichal Nazarewicz unsigned long flags; 388*ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 389*ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 3902fe86e00SMichal Nazarewicz 391*ff9543fdSMichal Nazarewicz /* 392*ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 393*ff9543fdSMichal Nazarewicz * scanned from (or the end of the zone if starting). The low point 394*ff9543fdSMichal Nazarewicz * is the end of the pageblock the migration scanner is using. 395*ff9543fdSMichal Nazarewicz */ 396*ff9543fdSMichal Nazarewicz pfn = cc->free_pfn; 397*ff9543fdSMichal Nazarewicz low_pfn = cc->migrate_pfn + pageblock_nr_pages; 3982fe86e00SMichal Nazarewicz 399*ff9543fdSMichal Nazarewicz /* 400*ff9543fdSMichal Nazarewicz * Take care that if the migration scanner is at the end of the zone 401*ff9543fdSMichal Nazarewicz * that the free scanner does not accidentally move to the next zone 402*ff9543fdSMichal Nazarewicz * in the next isolation cycle. 403*ff9543fdSMichal Nazarewicz */ 404*ff9543fdSMichal Nazarewicz high_pfn = min(low_pfn, pfn); 405*ff9543fdSMichal Nazarewicz 406*ff9543fdSMichal Nazarewicz zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 407*ff9543fdSMichal Nazarewicz 408*ff9543fdSMichal Nazarewicz /* 409*ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 410*ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 411*ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 412*ff9543fdSMichal Nazarewicz */ 413*ff9543fdSMichal Nazarewicz for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 414*ff9543fdSMichal Nazarewicz pfn -= pageblock_nr_pages) { 415*ff9543fdSMichal Nazarewicz unsigned long isolated; 416*ff9543fdSMichal Nazarewicz 417*ff9543fdSMichal Nazarewicz if (!pfn_valid(pfn)) 418*ff9543fdSMichal Nazarewicz continue; 419*ff9543fdSMichal Nazarewicz 420*ff9543fdSMichal Nazarewicz /* 421*ff9543fdSMichal Nazarewicz * Check for overlapping nodes/zones. It's possible on some 422*ff9543fdSMichal Nazarewicz * configurations to have a setup like 423*ff9543fdSMichal Nazarewicz * node0 node1 node0 424*ff9543fdSMichal Nazarewicz * i.e. it's possible that all pages within a zones range of 425*ff9543fdSMichal Nazarewicz * pages do not belong to a single zone. 426*ff9543fdSMichal Nazarewicz */ 427*ff9543fdSMichal Nazarewicz page = pfn_to_page(pfn); 428*ff9543fdSMichal Nazarewicz if (page_zone(page) != zone) 429*ff9543fdSMichal Nazarewicz continue; 430*ff9543fdSMichal Nazarewicz 431*ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 432*ff9543fdSMichal Nazarewicz if (!suitable_migration_target(page)) 433*ff9543fdSMichal Nazarewicz continue; 434*ff9543fdSMichal Nazarewicz 435*ff9543fdSMichal Nazarewicz /* 436*ff9543fdSMichal Nazarewicz * Found a block suitable for isolating free pages from. Now 437*ff9543fdSMichal Nazarewicz * we disabled interrupts, double check things are ok and 438*ff9543fdSMichal Nazarewicz * isolate the pages. This is to minimise the time IRQs 439*ff9543fdSMichal Nazarewicz * are disabled 440*ff9543fdSMichal Nazarewicz */ 441*ff9543fdSMichal Nazarewicz isolated = 0; 442*ff9543fdSMichal Nazarewicz spin_lock_irqsave(&zone->lock, flags); 443*ff9543fdSMichal Nazarewicz if (suitable_migration_target(page)) { 444*ff9543fdSMichal Nazarewicz end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 445*ff9543fdSMichal Nazarewicz isolated = isolate_freepages_block(pfn, end_pfn, 446*ff9543fdSMichal Nazarewicz freelist, false); 447*ff9543fdSMichal Nazarewicz nr_freepages += isolated; 448*ff9543fdSMichal Nazarewicz } 449*ff9543fdSMichal Nazarewicz spin_unlock_irqrestore(&zone->lock, flags); 450*ff9543fdSMichal Nazarewicz 451*ff9543fdSMichal Nazarewicz /* 452*ff9543fdSMichal Nazarewicz * Record the highest PFN we isolated pages from. When next 453*ff9543fdSMichal Nazarewicz * looking for free pages, the search will restart here as 454*ff9543fdSMichal Nazarewicz * page migration may have returned some pages to the allocator 455*ff9543fdSMichal Nazarewicz */ 456*ff9543fdSMichal Nazarewicz if (isolated) 457*ff9543fdSMichal Nazarewicz high_pfn = max(high_pfn, pfn); 458*ff9543fdSMichal Nazarewicz } 459*ff9543fdSMichal Nazarewicz 460*ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 461*ff9543fdSMichal Nazarewicz map_pages(freelist); 462*ff9543fdSMichal Nazarewicz 463*ff9543fdSMichal Nazarewicz cc->free_pfn = high_pfn; 464*ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 465748446bbSMel Gorman } 466748446bbSMel Gorman 467748446bbSMel Gorman /* 468748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 469748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 470748446bbSMel Gorman */ 471748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 472748446bbSMel Gorman unsigned long data, 473748446bbSMel Gorman int **result) 474748446bbSMel Gorman { 475748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 476748446bbSMel Gorman struct page *freepage; 477748446bbSMel Gorman 478748446bbSMel Gorman /* Isolate free pages if necessary */ 479748446bbSMel Gorman if (list_empty(&cc->freepages)) { 480748446bbSMel Gorman isolate_freepages(cc->zone, cc); 481748446bbSMel Gorman 482748446bbSMel Gorman if (list_empty(&cc->freepages)) 483748446bbSMel Gorman return NULL; 484748446bbSMel Gorman } 485748446bbSMel Gorman 486748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 487748446bbSMel Gorman list_del(&freepage->lru); 488748446bbSMel Gorman cc->nr_freepages--; 489748446bbSMel Gorman 490748446bbSMel Gorman return freepage; 491748446bbSMel Gorman } 492748446bbSMel Gorman 493748446bbSMel Gorman /* 494748446bbSMel Gorman * We cannot control nr_migratepages and nr_freepages fully when migration is 495748446bbSMel Gorman * running as migrate_pages() has no knowledge of compact_control. When 496748446bbSMel Gorman * migration is complete, we count the number of pages on the lists by hand. 497748446bbSMel Gorman */ 498748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc) 499748446bbSMel Gorman { 500748446bbSMel Gorman int nr_migratepages = 0; 501748446bbSMel Gorman int nr_freepages = 0; 502748446bbSMel Gorman struct page *page; 503748446bbSMel Gorman 504748446bbSMel Gorman list_for_each_entry(page, &cc->migratepages, lru) 505748446bbSMel Gorman nr_migratepages++; 506748446bbSMel Gorman list_for_each_entry(page, &cc->freepages, lru) 507748446bbSMel Gorman nr_freepages++; 508748446bbSMel Gorman 509748446bbSMel Gorman cc->nr_migratepages = nr_migratepages; 510748446bbSMel Gorman cc->nr_freepages = nr_freepages; 511748446bbSMel Gorman } 512748446bbSMel Gorman 513*ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 514*ff9543fdSMichal Nazarewicz typedef enum { 515*ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 516*ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 517*ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 518*ff9543fdSMichal Nazarewicz } isolate_migrate_t; 519*ff9543fdSMichal Nazarewicz 520*ff9543fdSMichal Nazarewicz /* 521*ff9543fdSMichal Nazarewicz * Isolate all pages that can be migrated from the block pointed to by 522*ff9543fdSMichal Nazarewicz * the migrate scanner within compact_control. 523*ff9543fdSMichal Nazarewicz */ 524*ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 525*ff9543fdSMichal Nazarewicz struct compact_control *cc) 526*ff9543fdSMichal Nazarewicz { 527*ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 528*ff9543fdSMichal Nazarewicz 529*ff9543fdSMichal Nazarewicz /* Do not scan outside zone boundaries */ 530*ff9543fdSMichal Nazarewicz low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 531*ff9543fdSMichal Nazarewicz 532*ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 533*ff9543fdSMichal Nazarewicz end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 534*ff9543fdSMichal Nazarewicz 535*ff9543fdSMichal Nazarewicz /* Do not cross the free scanner or scan within a memory hole */ 536*ff9543fdSMichal Nazarewicz if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 537*ff9543fdSMichal Nazarewicz cc->migrate_pfn = end_pfn; 538*ff9543fdSMichal Nazarewicz return ISOLATE_NONE; 539*ff9543fdSMichal Nazarewicz } 540*ff9543fdSMichal Nazarewicz 541*ff9543fdSMichal Nazarewicz /* Perform the isolation */ 542*ff9543fdSMichal Nazarewicz low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); 543*ff9543fdSMichal Nazarewicz if (!low_pfn) 544*ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 545*ff9543fdSMichal Nazarewicz 546*ff9543fdSMichal Nazarewicz cc->migrate_pfn = low_pfn; 547*ff9543fdSMichal Nazarewicz 548*ff9543fdSMichal Nazarewicz return ISOLATE_SUCCESS; 549*ff9543fdSMichal Nazarewicz } 550*ff9543fdSMichal Nazarewicz 551748446bbSMel Gorman static int compact_finished(struct zone *zone, 552748446bbSMel Gorman struct compact_control *cc) 553748446bbSMel Gorman { 55456de7263SMel Gorman unsigned int order; 5555a03b051SAndrea Arcangeli unsigned long watermark; 55656de7263SMel Gorman 557748446bbSMel Gorman if (fatal_signal_pending(current)) 558748446bbSMel Gorman return COMPACT_PARTIAL; 559748446bbSMel Gorman 560748446bbSMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 561748446bbSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) 562748446bbSMel Gorman return COMPACT_COMPLETE; 563748446bbSMel Gorman 56482478fb7SJohannes Weiner /* 56582478fb7SJohannes Weiner * order == -1 is expected when compacting via 56682478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 56782478fb7SJohannes Weiner */ 56856de7263SMel Gorman if (cc->order == -1) 56956de7263SMel Gorman return COMPACT_CONTINUE; 57056de7263SMel Gorman 5713957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 5723957c776SMichal Hocko watermark = low_wmark_pages(zone); 5733957c776SMichal Hocko watermark += (1 << cc->order); 5743957c776SMichal Hocko 5753957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 5763957c776SMichal Hocko return COMPACT_CONTINUE; 5773957c776SMichal Hocko 57856de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 57956de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 58056de7263SMel Gorman /* Job done if page is free of the right migratetype */ 58156de7263SMel Gorman if (!list_empty(&zone->free_area[order].free_list[cc->migratetype])) 58256de7263SMel Gorman return COMPACT_PARTIAL; 58356de7263SMel Gorman 58456de7263SMel Gorman /* Job done if allocation would set block type */ 58556de7263SMel Gorman if (order >= pageblock_order && zone->free_area[order].nr_free) 58656de7263SMel Gorman return COMPACT_PARTIAL; 58756de7263SMel Gorman } 58856de7263SMel Gorman 589748446bbSMel Gorman return COMPACT_CONTINUE; 590748446bbSMel Gorman } 591748446bbSMel Gorman 5923e7d3449SMel Gorman /* 5933e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 5943e7d3449SMel Gorman * Returns 5953e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 5963e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 5973e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 5983e7d3449SMel Gorman */ 5993e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 6003e7d3449SMel Gorman { 6013e7d3449SMel Gorman int fragindex; 6023e7d3449SMel Gorman unsigned long watermark; 6033e7d3449SMel Gorman 6043e7d3449SMel Gorman /* 6053957c776SMichal Hocko * order == -1 is expected when compacting via 6063957c776SMichal Hocko * /proc/sys/vm/compact_memory 6073957c776SMichal Hocko */ 6083957c776SMichal Hocko if (order == -1) 6093957c776SMichal Hocko return COMPACT_CONTINUE; 6103957c776SMichal Hocko 6113957c776SMichal Hocko /* 6123e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 6133e7d3449SMel Gorman * This is because during migration, copies of pages need to be 6143e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 6153e7d3449SMel Gorman */ 6163e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 6173e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 6183e7d3449SMel Gorman return COMPACT_SKIPPED; 6193e7d3449SMel Gorman 6203e7d3449SMel Gorman /* 6213e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 6223e7d3449SMel Gorman * low memory or external fragmentation 6233e7d3449SMel Gorman * 624a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 625a582a738SShaohua Li * watermarks 6263e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 6273e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 6283e7d3449SMel Gorman * 6293e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 6303e7d3449SMel Gorman */ 6313e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 6323e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 6333e7d3449SMel Gorman return COMPACT_SKIPPED; 6343e7d3449SMel Gorman 635a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 636a582a738SShaohua Li 0, 0)) 6373e7d3449SMel Gorman return COMPACT_PARTIAL; 6383e7d3449SMel Gorman 6393e7d3449SMel Gorman return COMPACT_CONTINUE; 6403e7d3449SMel Gorman } 6413e7d3449SMel Gorman 642748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 643748446bbSMel Gorman { 644748446bbSMel Gorman int ret; 645748446bbSMel Gorman 6463e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 6473e7d3449SMel Gorman switch (ret) { 6483e7d3449SMel Gorman case COMPACT_PARTIAL: 6493e7d3449SMel Gorman case COMPACT_SKIPPED: 6503e7d3449SMel Gorman /* Compaction is likely to fail */ 6513e7d3449SMel Gorman return ret; 6523e7d3449SMel Gorman case COMPACT_CONTINUE: 6533e7d3449SMel Gorman /* Fall through to compaction */ 6543e7d3449SMel Gorman ; 6553e7d3449SMel Gorman } 6563e7d3449SMel Gorman 657748446bbSMel Gorman /* Setup to move all movable pages to the end of the zone */ 658748446bbSMel Gorman cc->migrate_pfn = zone->zone_start_pfn; 659748446bbSMel Gorman cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; 660748446bbSMel Gorman cc->free_pfn &= ~(pageblock_nr_pages-1); 661748446bbSMel Gorman 662748446bbSMel Gorman migrate_prep_local(); 663748446bbSMel Gorman 664748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 665748446bbSMel Gorman unsigned long nr_migrate, nr_remaining; 6669d502c1cSMinchan Kim int err; 667748446bbSMel Gorman 668f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 669f9e35b3bSMel Gorman case ISOLATE_ABORT: 670f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 671f9e35b3bSMel Gorman goto out; 672f9e35b3bSMel Gorman case ISOLATE_NONE: 673748446bbSMel Gorman continue; 674f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 675f9e35b3bSMel Gorman ; 676f9e35b3bSMel Gorman } 677748446bbSMel Gorman 678748446bbSMel Gorman nr_migrate = cc->nr_migratepages; 6799d502c1cSMinchan Kim err = migrate_pages(&cc->migratepages, compaction_alloc, 6807f0f2496SMel Gorman (unsigned long)cc, false, 681a6bc32b8SMel Gorman cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); 682748446bbSMel Gorman update_nr_listpages(cc); 683748446bbSMel Gorman nr_remaining = cc->nr_migratepages; 684748446bbSMel Gorman 685748446bbSMel Gorman count_vm_event(COMPACTBLOCKS); 686748446bbSMel Gorman count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); 687748446bbSMel Gorman if (nr_remaining) 688748446bbSMel Gorman count_vm_events(COMPACTPAGEFAILED, nr_remaining); 689b7aba698SMel Gorman trace_mm_compaction_migratepages(nr_migrate - nr_remaining, 690b7aba698SMel Gorman nr_remaining); 691748446bbSMel Gorman 692748446bbSMel Gorman /* Release LRU pages not migrated */ 6939d502c1cSMinchan Kim if (err) { 694748446bbSMel Gorman putback_lru_pages(&cc->migratepages); 695748446bbSMel Gorman cc->nr_migratepages = 0; 696748446bbSMel Gorman } 697748446bbSMel Gorman 698748446bbSMel Gorman } 699748446bbSMel Gorman 700f9e35b3bSMel Gorman out: 701748446bbSMel Gorman /* Release free pages and check accounting */ 702748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 703748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 704748446bbSMel Gorman 705748446bbSMel Gorman return ret; 706748446bbSMel Gorman } 70776ab0f53SMel Gorman 708d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone, 70977f1fe6bSMel Gorman int order, gfp_t gfp_mask, 710d527caf2SAndrea Arcangeli bool sync) 71156de7263SMel Gorman { 71256de7263SMel Gorman struct compact_control cc = { 71356de7263SMel Gorman .nr_freepages = 0, 71456de7263SMel Gorman .nr_migratepages = 0, 71556de7263SMel Gorman .order = order, 71656de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 71756de7263SMel Gorman .zone = zone, 71877f1fe6bSMel Gorman .sync = sync, 71956de7263SMel Gorman }; 72056de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 72156de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 72256de7263SMel Gorman 72356de7263SMel Gorman return compact_zone(zone, &cc); 72456de7263SMel Gorman } 72556de7263SMel Gorman 7265e771905SMel Gorman int sysctl_extfrag_threshold = 500; 7275e771905SMel Gorman 72856de7263SMel Gorman /** 72956de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 73056de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 73156de7263SMel Gorman * @order: The order of the current allocation 73256de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 73356de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 73477f1fe6bSMel Gorman * @sync: Whether migration is synchronous or not 73556de7263SMel Gorman * 73656de7263SMel Gorman * This is the main entry point for direct page compaction. 73756de7263SMel Gorman */ 73856de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 73977f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 74077f1fe6bSMel Gorman bool sync) 74156de7263SMel Gorman { 74256de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 74356de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 74456de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 74556de7263SMel Gorman struct zoneref *z; 74656de7263SMel Gorman struct zone *zone; 74756de7263SMel Gorman int rc = COMPACT_SKIPPED; 74856de7263SMel Gorman 74956de7263SMel Gorman /* 75056de7263SMel Gorman * Check whether it is worth even starting compaction. The order check is 75156de7263SMel Gorman * made because an assumption is made that the page allocator can satisfy 75256de7263SMel Gorman * the "cheaper" orders without taking special steps 75356de7263SMel Gorman */ 754c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 75556de7263SMel Gorman return rc; 75656de7263SMel Gorman 75756de7263SMel Gorman count_vm_event(COMPACTSTALL); 75856de7263SMel Gorman 75956de7263SMel Gorman /* Compact each zone in the list */ 76056de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 76156de7263SMel Gorman nodemask) { 76256de7263SMel Gorman int status; 76356de7263SMel Gorman 764d527caf2SAndrea Arcangeli status = compact_zone_order(zone, order, gfp_mask, sync); 76556de7263SMel Gorman rc = max(status, rc); 76656de7263SMel Gorman 7673e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 7683e7d3449SMel Gorman if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 76956de7263SMel Gorman break; 77056de7263SMel Gorman } 77156de7263SMel Gorman 77256de7263SMel Gorman return rc; 77356de7263SMel Gorman } 77456de7263SMel Gorman 77556de7263SMel Gorman 77676ab0f53SMel Gorman /* Compact all zones within a node */ 7777be62de9SRik van Riel static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 77876ab0f53SMel Gorman { 77976ab0f53SMel Gorman int zoneid; 78076ab0f53SMel Gorman struct zone *zone; 78176ab0f53SMel Gorman 78276ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 78376ab0f53SMel Gorman 78476ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 78576ab0f53SMel Gorman if (!populated_zone(zone)) 78676ab0f53SMel Gorman continue; 78776ab0f53SMel Gorman 7887be62de9SRik van Riel cc->nr_freepages = 0; 7897be62de9SRik van Riel cc->nr_migratepages = 0; 7907be62de9SRik van Riel cc->zone = zone; 7917be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 7927be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 79376ab0f53SMel Gorman 794aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 7957be62de9SRik van Riel compact_zone(zone, cc); 79676ab0f53SMel Gorman 797aff62249SRik van Riel if (cc->order > 0) { 798aff62249SRik van Riel int ok = zone_watermark_ok(zone, cc->order, 799aff62249SRik van Riel low_wmark_pages(zone), 0, 0); 800aff62249SRik van Riel if (ok && cc->order > zone->compact_order_failed) 801aff62249SRik van Riel zone->compact_order_failed = cc->order + 1; 802aff62249SRik van Riel /* Currently async compaction is never deferred. */ 803aff62249SRik van Riel else if (!ok && cc->sync) 804aff62249SRik van Riel defer_compaction(zone, cc->order); 805aff62249SRik van Riel } 806aff62249SRik van Riel 8077be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 8087be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 80976ab0f53SMel Gorman } 81076ab0f53SMel Gorman 81176ab0f53SMel Gorman return 0; 81276ab0f53SMel Gorman } 81376ab0f53SMel Gorman 8147be62de9SRik van Riel int compact_pgdat(pg_data_t *pgdat, int order) 8157be62de9SRik van Riel { 8167be62de9SRik van Riel struct compact_control cc = { 8177be62de9SRik van Riel .order = order, 8187be62de9SRik van Riel .sync = false, 8197be62de9SRik van Riel }; 8207be62de9SRik van Riel 8217be62de9SRik van Riel return __compact_pgdat(pgdat, &cc); 8227be62de9SRik van Riel } 8237be62de9SRik van Riel 8247be62de9SRik van Riel static int compact_node(int nid) 8257be62de9SRik van Riel { 8267be62de9SRik van Riel struct compact_control cc = { 8277be62de9SRik van Riel .order = -1, 8287be62de9SRik van Riel .sync = true, 8297be62de9SRik van Riel }; 8307be62de9SRik van Riel 8318575ec29SHugh Dickins return __compact_pgdat(NODE_DATA(nid), &cc); 8327be62de9SRik van Riel } 8337be62de9SRik van Riel 83476ab0f53SMel Gorman /* Compact all nodes in the system */ 83576ab0f53SMel Gorman static int compact_nodes(void) 83676ab0f53SMel Gorman { 83776ab0f53SMel Gorman int nid; 83876ab0f53SMel Gorman 8398575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 8408575ec29SHugh Dickins lru_add_drain_all(); 8418575ec29SHugh Dickins 84276ab0f53SMel Gorman for_each_online_node(nid) 84376ab0f53SMel Gorman compact_node(nid); 84476ab0f53SMel Gorman 84576ab0f53SMel Gorman return COMPACT_COMPLETE; 84676ab0f53SMel Gorman } 84776ab0f53SMel Gorman 84876ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 84976ab0f53SMel Gorman int sysctl_compact_memory; 85076ab0f53SMel Gorman 85176ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 85276ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 85376ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 85476ab0f53SMel Gorman { 85576ab0f53SMel Gorman if (write) 85676ab0f53SMel Gorman return compact_nodes(); 85776ab0f53SMel Gorman 85876ab0f53SMel Gorman return 0; 85976ab0f53SMel Gorman } 860ed4a6d7fSMel Gorman 8615e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 8625e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 8635e771905SMel Gorman { 8645e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 8655e771905SMel Gorman 8665e771905SMel Gorman return 0; 8675e771905SMel Gorman } 8685e771905SMel Gorman 869ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 87010fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev, 87110fbcf4cSKay Sievers struct device_attribute *attr, 872ed4a6d7fSMel Gorman const char *buf, size_t count) 873ed4a6d7fSMel Gorman { 8748575ec29SHugh Dickins int nid = dev->id; 8758575ec29SHugh Dickins 8768575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 8778575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 8788575ec29SHugh Dickins lru_add_drain_all(); 8798575ec29SHugh Dickins 8808575ec29SHugh Dickins compact_node(nid); 8818575ec29SHugh Dickins } 882ed4a6d7fSMel Gorman 883ed4a6d7fSMel Gorman return count; 884ed4a6d7fSMel Gorman } 88510fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 886ed4a6d7fSMel Gorman 887ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 888ed4a6d7fSMel Gorman { 88910fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 890ed4a6d7fSMel Gorman } 891ed4a6d7fSMel Gorman 892ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 893ed4a6d7fSMel Gorman { 89410fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 895ed4a6d7fSMel Gorman } 896ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 897*ff9543fdSMichal Nazarewicz 898*ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 899