1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17748446bbSMel Gorman #include "internal.h" 18748446bbSMel Gorman 19ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 20ff9543fdSMichal Nazarewicz 21b7aba698SMel Gorman #define CREATE_TRACE_POINTS 22b7aba698SMel Gorman #include <trace/events/compaction.h> 23b7aba698SMel Gorman 24748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 25748446bbSMel Gorman { 26748446bbSMel Gorman struct page *page, *next; 27748446bbSMel Gorman unsigned long count = 0; 28748446bbSMel Gorman 29748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 30748446bbSMel Gorman list_del(&page->lru); 31748446bbSMel Gorman __free_page(page); 32748446bbSMel Gorman count++; 33748446bbSMel Gorman } 34748446bbSMel Gorman 35748446bbSMel Gorman return count; 36748446bbSMel Gorman } 37748446bbSMel Gorman 38ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 39ff9543fdSMichal Nazarewicz { 40ff9543fdSMichal Nazarewicz struct page *page; 41ff9543fdSMichal Nazarewicz 42ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 43ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 44ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 45ff9543fdSMichal Nazarewicz } 46ff9543fdSMichal Nazarewicz } 47ff9543fdSMichal Nazarewicz 4847118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 4947118af0SMichal Nazarewicz { 5047118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 5147118af0SMichal Nazarewicz } 5247118af0SMichal Nazarewicz 5385aa125fSMichal Nazarewicz /* 54c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 55c67fe375SMel Gorman * very heavily contended. Check if the process needs to be scheduled or 56c67fe375SMel Gorman * if the lock is contended. For async compaction, back out in the event 57c67fe375SMel Gorman * if contention is severe. For sync compaction, schedule. 58c67fe375SMel Gorman * 59c67fe375SMel Gorman * Returns true if the lock is held. 60c67fe375SMel Gorman * Returns false if the lock is released and compaction should abort 61c67fe375SMel Gorman */ 62c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, 63c67fe375SMel Gorman bool locked, struct compact_control *cc) 64c67fe375SMel Gorman { 65c67fe375SMel Gorman if (need_resched() || spin_is_contended(lock)) { 66c67fe375SMel Gorman if (locked) { 67c67fe375SMel Gorman spin_unlock_irqrestore(lock, *flags); 68c67fe375SMel Gorman locked = false; 69c67fe375SMel Gorman } 70c67fe375SMel Gorman 71c67fe375SMel Gorman /* async aborts if taking too long or contended */ 72c67fe375SMel Gorman if (!cc->sync) { 73c67fe375SMel Gorman if (cc->contended) 74c67fe375SMel Gorman *cc->contended = true; 75c67fe375SMel Gorman return false; 76c67fe375SMel Gorman } 77c67fe375SMel Gorman 78c67fe375SMel Gorman cond_resched(); 79c67fe375SMel Gorman if (fatal_signal_pending(current)) 80c67fe375SMel Gorman return false; 81c67fe375SMel Gorman } 82c67fe375SMel Gorman 83c67fe375SMel Gorman if (!locked) 84c67fe375SMel Gorman spin_lock_irqsave(lock, *flags); 85c67fe375SMel Gorman return true; 86c67fe375SMel Gorman } 87c67fe375SMel Gorman 88c67fe375SMel Gorman static inline bool compact_trylock_irqsave(spinlock_t *lock, 89c67fe375SMel Gorman unsigned long *flags, struct compact_control *cc) 90c67fe375SMel Gorman { 91c67fe375SMel Gorman return compact_checklock_irqsave(lock, flags, false, cc); 92c67fe375SMel Gorman } 93c67fe375SMel Gorman 94c67fe375SMel Gorman /* 9585aa125fSMichal Nazarewicz * Isolate free pages onto a private freelist. Caller must hold zone->lock. 9685aa125fSMichal Nazarewicz * If @strict is true, will abort returning 0 on any invalid PFNs or non-free 9785aa125fSMichal Nazarewicz * pages inside of the pageblock (even though it may still end up isolating 9885aa125fSMichal Nazarewicz * some pages). 9985aa125fSMichal Nazarewicz */ 10085aa125fSMichal Nazarewicz static unsigned long isolate_freepages_block(unsigned long blockpfn, 10185aa125fSMichal Nazarewicz unsigned long end_pfn, 10285aa125fSMichal Nazarewicz struct list_head *freelist, 10385aa125fSMichal Nazarewicz bool strict) 104748446bbSMel Gorman { 105b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 106748446bbSMel Gorman struct page *cursor; 107748446bbSMel Gorman 108748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 109748446bbSMel Gorman 110748446bbSMel Gorman /* Isolate free pages. This assumes the block is valid */ 111748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 112748446bbSMel Gorman int isolated, i; 113748446bbSMel Gorman struct page *page = cursor; 114748446bbSMel Gorman 11585aa125fSMichal Nazarewicz if (!pfn_valid_within(blockpfn)) { 11685aa125fSMichal Nazarewicz if (strict) 11785aa125fSMichal Nazarewicz return 0; 118748446bbSMel Gorman continue; 11985aa125fSMichal Nazarewicz } 120b7aba698SMel Gorman nr_scanned++; 121748446bbSMel Gorman 12285aa125fSMichal Nazarewicz if (!PageBuddy(page)) { 12385aa125fSMichal Nazarewicz if (strict) 12485aa125fSMichal Nazarewicz return 0; 125748446bbSMel Gorman continue; 12685aa125fSMichal Nazarewicz } 127748446bbSMel Gorman 128748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 129748446bbSMel Gorman isolated = split_free_page(page); 13085aa125fSMichal Nazarewicz if (!isolated && strict) 13185aa125fSMichal Nazarewicz return 0; 132748446bbSMel Gorman total_isolated += isolated; 133748446bbSMel Gorman for (i = 0; i < isolated; i++) { 134748446bbSMel Gorman list_add(&page->lru, freelist); 135748446bbSMel Gorman page++; 136748446bbSMel Gorman } 137748446bbSMel Gorman 138748446bbSMel Gorman /* If a page was split, advance to the end of it */ 139748446bbSMel Gorman if (isolated) { 140748446bbSMel Gorman blockpfn += isolated - 1; 141748446bbSMel Gorman cursor += isolated - 1; 142748446bbSMel Gorman } 143748446bbSMel Gorman } 144748446bbSMel Gorman 145b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 146748446bbSMel Gorman return total_isolated; 147748446bbSMel Gorman } 148748446bbSMel Gorman 14985aa125fSMichal Nazarewicz /** 15085aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 15185aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 15285aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 15385aa125fSMichal Nazarewicz * 15485aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 15585aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 15685aa125fSMichal Nazarewicz * undo its actions and return zero. 15785aa125fSMichal Nazarewicz * 15885aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 15985aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 16085aa125fSMichal Nazarewicz * a free page). 16185aa125fSMichal Nazarewicz */ 162ff9543fdSMichal Nazarewicz unsigned long 16385aa125fSMichal Nazarewicz isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) 16485aa125fSMichal Nazarewicz { 16585aa125fSMichal Nazarewicz unsigned long isolated, pfn, block_end_pfn, flags; 16685aa125fSMichal Nazarewicz struct zone *zone = NULL; 16785aa125fSMichal Nazarewicz LIST_HEAD(freelist); 16885aa125fSMichal Nazarewicz 16985aa125fSMichal Nazarewicz if (pfn_valid(start_pfn)) 17085aa125fSMichal Nazarewicz zone = page_zone(pfn_to_page(start_pfn)); 17185aa125fSMichal Nazarewicz 17285aa125fSMichal Nazarewicz for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 17385aa125fSMichal Nazarewicz if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn))) 17485aa125fSMichal Nazarewicz break; 17585aa125fSMichal Nazarewicz 17685aa125fSMichal Nazarewicz /* 17785aa125fSMichal Nazarewicz * On subsequent iterations ALIGN() is actually not needed, 17885aa125fSMichal Nazarewicz * but we keep it that we not to complicate the code. 17985aa125fSMichal Nazarewicz */ 18085aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 18185aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 18285aa125fSMichal Nazarewicz 18385aa125fSMichal Nazarewicz spin_lock_irqsave(&zone->lock, flags); 18485aa125fSMichal Nazarewicz isolated = isolate_freepages_block(pfn, block_end_pfn, 18585aa125fSMichal Nazarewicz &freelist, true); 18685aa125fSMichal Nazarewicz spin_unlock_irqrestore(&zone->lock, flags); 18785aa125fSMichal Nazarewicz 18885aa125fSMichal Nazarewicz /* 18985aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 19085aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 19185aa125fSMichal Nazarewicz * non-free pages). 19285aa125fSMichal Nazarewicz */ 19385aa125fSMichal Nazarewicz if (!isolated) 19485aa125fSMichal Nazarewicz break; 19585aa125fSMichal Nazarewicz 19685aa125fSMichal Nazarewicz /* 19785aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 19885aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 19985aa125fSMichal Nazarewicz * page may span two pageblocks). 20085aa125fSMichal Nazarewicz */ 20185aa125fSMichal Nazarewicz } 20285aa125fSMichal Nazarewicz 20385aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 20485aa125fSMichal Nazarewicz map_pages(&freelist); 20585aa125fSMichal Nazarewicz 20685aa125fSMichal Nazarewicz if (pfn < end_pfn) { 20785aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 20885aa125fSMichal Nazarewicz release_freepages(&freelist); 20985aa125fSMichal Nazarewicz return 0; 21085aa125fSMichal Nazarewicz } 21185aa125fSMichal Nazarewicz 21285aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 21385aa125fSMichal Nazarewicz return pfn; 21485aa125fSMichal Nazarewicz } 21585aa125fSMichal Nazarewicz 216748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 217c67fe375SMel Gorman static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc) 218748446bbSMel Gorman { 219748446bbSMel Gorman struct page *page; 220b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 221748446bbSMel Gorman 222b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 223b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 224748446bbSMel Gorman 225c67fe375SMel Gorman /* If locked we can use the interrupt unsafe versions */ 226c67fe375SMel Gorman if (locked) { 227b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 228b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 229c67fe375SMel Gorman } else { 230c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 231c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 232c67fe375SMel Gorman } 233748446bbSMel Gorman } 234748446bbSMel Gorman 235748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 236748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 237748446bbSMel Gorman { 238bc693045SMinchan Kim unsigned long active, inactive, isolated; 239748446bbSMel Gorman 240748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 241748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 242bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 243bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 244748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 245748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 246748446bbSMel Gorman 247bc693045SMinchan Kim return isolated > (inactive + active) / 2; 248748446bbSMel Gorman } 249748446bbSMel Gorman 2502fe86e00SMichal Nazarewicz /** 2512fe86e00SMichal Nazarewicz * isolate_migratepages_range() - isolate all migrate-able pages in range. 2522fe86e00SMichal Nazarewicz * @zone: Zone pages are in. 2532fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 2542fe86e00SMichal Nazarewicz * @low_pfn: The first PFN of the range. 2552fe86e00SMichal Nazarewicz * @end_pfn: The one-past-the-last PFN of the range. 2562fe86e00SMichal Nazarewicz * 2572fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 2582fe86e00SMichal Nazarewicz * [low_pfn, end_pfn). Returns zero if there is a fatal signal 2592fe86e00SMichal Nazarewicz * pending), otherwise PFN of the first page that was not scanned 2602fe86e00SMichal Nazarewicz * (which may be both less, equal to or more then end_pfn). 2612fe86e00SMichal Nazarewicz * 2622fe86e00SMichal Nazarewicz * Assumes that cc->migratepages is empty and cc->nr_migratepages is 2632fe86e00SMichal Nazarewicz * zero. 2642fe86e00SMichal Nazarewicz * 2652fe86e00SMichal Nazarewicz * Apart from cc->migratepages and cc->nr_migratetypes this function 2662fe86e00SMichal Nazarewicz * does not modify any cc's fields, in particular it does not modify 2672fe86e00SMichal Nazarewicz * (or read for that matter) cc->migrate_pfn. 268748446bbSMel Gorman */ 269ff9543fdSMichal Nazarewicz unsigned long 2702fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 2712fe86e00SMichal Nazarewicz unsigned long low_pfn, unsigned long end_pfn) 272748446bbSMel Gorman { 2739927af74SMel Gorman unsigned long last_pageblock_nr = 0, pageblock_nr; 274b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 275748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 276f3fd4a61SKonstantin Khlebnikov isolate_mode_t mode = 0; 277fa9add64SHugh Dickins struct lruvec *lruvec; 278c67fe375SMel Gorman unsigned long flags; 279c67fe375SMel Gorman bool locked; 280748446bbSMel Gorman 281748446bbSMel Gorman /* 282748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 283748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 284748446bbSMel Gorman * delay for some time until fewer pages are isolated 285748446bbSMel Gorman */ 286748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 287f9e35b3bSMel Gorman /* async migration should just abort */ 28868e3e926SLinus Torvalds if (!cc->sync) 2892fe86e00SMichal Nazarewicz return 0; 290f9e35b3bSMel Gorman 291748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 292748446bbSMel Gorman 293748446bbSMel Gorman if (fatal_signal_pending(current)) 2942fe86e00SMichal Nazarewicz return 0; 295748446bbSMel Gorman } 296748446bbSMel Gorman 297748446bbSMel Gorman /* Time to isolate some pages for migration */ 298b2eef8c0SAndrea Arcangeli cond_resched(); 299c67fe375SMel Gorman spin_lock_irqsave(&zone->lru_lock, flags); 300c67fe375SMel Gorman locked = true; 301748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 302748446bbSMel Gorman struct page *page; 303b2eef8c0SAndrea Arcangeli 304b2eef8c0SAndrea Arcangeli /* give a chance to irqs before checking need_resched() */ 305b2eef8c0SAndrea Arcangeli if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { 306c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 307b2eef8c0SAndrea Arcangeli locked = false; 308b2eef8c0SAndrea Arcangeli } 309c67fe375SMel Gorman 310c67fe375SMel Gorman /* Check if it is ok to still hold the lock */ 311c67fe375SMel Gorman locked = compact_checklock_irqsave(&zone->lru_lock, &flags, 312c67fe375SMel Gorman locked, cc); 313c67fe375SMel Gorman if (!locked) 314b2eef8c0SAndrea Arcangeli break; 315b2eef8c0SAndrea Arcangeli 3160bf380bcSMel Gorman /* 3170bf380bcSMel Gorman * migrate_pfn does not necessarily start aligned to a 3180bf380bcSMel Gorman * pageblock. Ensure that pfn_valid is called when moving 3190bf380bcSMel Gorman * into a new MAX_ORDER_NR_PAGES range in case of large 3200bf380bcSMel Gorman * memory holes within the zone 3210bf380bcSMel Gorman */ 3220bf380bcSMel Gorman if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 3230bf380bcSMel Gorman if (!pfn_valid(low_pfn)) { 3240bf380bcSMel Gorman low_pfn += MAX_ORDER_NR_PAGES - 1; 3250bf380bcSMel Gorman continue; 3260bf380bcSMel Gorman } 3270bf380bcSMel Gorman } 3280bf380bcSMel Gorman 329748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 330748446bbSMel Gorman continue; 331b7aba698SMel Gorman nr_scanned++; 332748446bbSMel Gorman 333dc908600SMel Gorman /* 334dc908600SMel Gorman * Get the page and ensure the page is within the same zone. 335dc908600SMel Gorman * See the comment in isolate_freepages about overlapping 336dc908600SMel Gorman * nodes. It is deliberate that the new zone lock is not taken 337dc908600SMel Gorman * as memory compaction should not move pages between nodes. 338dc908600SMel Gorman */ 339748446bbSMel Gorman page = pfn_to_page(low_pfn); 340dc908600SMel Gorman if (page_zone(page) != zone) 341dc908600SMel Gorman continue; 342dc908600SMel Gorman 343dc908600SMel Gorman /* Skip if free */ 344748446bbSMel Gorman if (PageBuddy(page)) 345748446bbSMel Gorman continue; 346748446bbSMel Gorman 3479927af74SMel Gorman /* 3489927af74SMel Gorman * For async migration, also only scan in MOVABLE blocks. Async 3499927af74SMel Gorman * migration is optimistic to see if the minimum amount of work 3509927af74SMel Gorman * satisfies the allocation 3519927af74SMel Gorman */ 3529927af74SMel Gorman pageblock_nr = low_pfn >> pageblock_order; 35368e3e926SLinus Torvalds if (!cc->sync && last_pageblock_nr != pageblock_nr && 35447118af0SMichal Nazarewicz !migrate_async_suitable(get_pageblock_migratetype(page))) { 3559927af74SMel Gorman low_pfn += pageblock_nr_pages; 3569927af74SMel Gorman low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 3579927af74SMel Gorman last_pageblock_nr = pageblock_nr; 3589927af74SMel Gorman continue; 3599927af74SMel Gorman } 3609927af74SMel Gorman 361bc835011SAndrea Arcangeli if (!PageLRU(page)) 362bc835011SAndrea Arcangeli continue; 363bc835011SAndrea Arcangeli 364bc835011SAndrea Arcangeli /* 365bc835011SAndrea Arcangeli * PageLRU is set, and lru_lock excludes isolation, 366bc835011SAndrea Arcangeli * splitting and collapsing (collapsing has already 367bc835011SAndrea Arcangeli * happened if PageLRU is set). 368bc835011SAndrea Arcangeli */ 369bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 370bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 371bc835011SAndrea Arcangeli continue; 372bc835011SAndrea Arcangeli } 373bc835011SAndrea Arcangeli 37468e3e926SLinus Torvalds if (!cc->sync) 375c8244935SMel Gorman mode |= ISOLATE_ASYNC_MIGRATE; 376c8244935SMel Gorman 377fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 378fa9add64SHugh Dickins 379748446bbSMel Gorman /* Try isolate the page */ 380f3fd4a61SKonstantin Khlebnikov if (__isolate_lru_page(page, mode) != 0) 381748446bbSMel Gorman continue; 382748446bbSMel Gorman 383bc835011SAndrea Arcangeli VM_BUG_ON(PageTransCompound(page)); 384bc835011SAndrea Arcangeli 385748446bbSMel Gorman /* Successfully isolated */ 386fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 387748446bbSMel Gorman list_add(&page->lru, migratelist); 388748446bbSMel Gorman cc->nr_migratepages++; 389b7aba698SMel Gorman nr_isolated++; 390748446bbSMel Gorman 391748446bbSMel Gorman /* Avoid isolating too much */ 39231b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 39331b8384aSHillf Danton ++low_pfn; 394748446bbSMel Gorman break; 395748446bbSMel Gorman } 39631b8384aSHillf Danton } 397748446bbSMel Gorman 398c67fe375SMel Gorman acct_isolated(zone, locked, cc); 399748446bbSMel Gorman 400c67fe375SMel Gorman if (locked) 401c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 402748446bbSMel Gorman 403b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 404b7aba698SMel Gorman 4052fe86e00SMichal Nazarewicz return low_pfn; 4062fe86e00SMichal Nazarewicz } 4072fe86e00SMichal Nazarewicz 408ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 409ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 410ff9543fdSMichal Nazarewicz 41168e3e926SLinus Torvalds /* Returns true if the page is within a block suitable for migration to */ 41268e3e926SLinus Torvalds static bool suitable_migration_target(struct page *page) 4132fe86e00SMichal Nazarewicz { 4142fe86e00SMichal Nazarewicz 415ff9543fdSMichal Nazarewicz int migratetype = get_pageblock_migratetype(page); 4162fe86e00SMichal Nazarewicz 417ff9543fdSMichal Nazarewicz /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 418ff9543fdSMichal Nazarewicz if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 41968e3e926SLinus Torvalds return false; 4202fe86e00SMichal Nazarewicz 421ff9543fdSMichal Nazarewicz /* If the page is a large free page, then allow migration */ 422ff9543fdSMichal Nazarewicz if (PageBuddy(page) && page_order(page) >= pageblock_order) 42368e3e926SLinus Torvalds return true; 424ff9543fdSMichal Nazarewicz 42547118af0SMichal Nazarewicz /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 42668e3e926SLinus Torvalds if (migrate_async_suitable(migratetype)) 42768e3e926SLinus Torvalds return true; 428ff9543fdSMichal Nazarewicz 429ff9543fdSMichal Nazarewicz /* Otherwise skip the block */ 43068e3e926SLinus Torvalds return false; 4312fe86e00SMichal Nazarewicz } 4322fe86e00SMichal Nazarewicz 433ff9543fdSMichal Nazarewicz /* 434de74f1ccSMel Gorman * Returns the start pfn of the last page block in a zone. This is the starting 435de74f1ccSMel Gorman * point for full compaction of a zone. Compaction searches for free pages from 436de74f1ccSMel Gorman * the end of each zone, while isolate_freepages_block scans forward inside each 437de74f1ccSMel Gorman * page block. 438de74f1ccSMel Gorman */ 439de74f1ccSMel Gorman static unsigned long start_free_pfn(struct zone *zone) 440de74f1ccSMel Gorman { 441de74f1ccSMel Gorman unsigned long free_pfn; 442de74f1ccSMel Gorman free_pfn = zone->zone_start_pfn + zone->spanned_pages; 443de74f1ccSMel Gorman free_pfn &= ~(pageblock_nr_pages-1); 444de74f1ccSMel Gorman return free_pfn; 445de74f1ccSMel Gorman } 446de74f1ccSMel Gorman 447de74f1ccSMel Gorman /* 448ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 449ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 450ff9543fdSMichal Nazarewicz */ 451ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone, 452ff9543fdSMichal Nazarewicz struct compact_control *cc) 453ff9543fdSMichal Nazarewicz { 454ff9543fdSMichal Nazarewicz struct page *page; 455ff9543fdSMichal Nazarewicz unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; 456ff9543fdSMichal Nazarewicz unsigned long flags; 457ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 458ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 4592fe86e00SMichal Nazarewicz 460ff9543fdSMichal Nazarewicz /* 461ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 462ff9543fdSMichal Nazarewicz * scanned from (or the end of the zone if starting). The low point 463ff9543fdSMichal Nazarewicz * is the end of the pageblock the migration scanner is using. 464ff9543fdSMichal Nazarewicz */ 465ff9543fdSMichal Nazarewicz pfn = cc->free_pfn; 466ff9543fdSMichal Nazarewicz low_pfn = cc->migrate_pfn + pageblock_nr_pages; 4672fe86e00SMichal Nazarewicz 468ff9543fdSMichal Nazarewicz /* 469ff9543fdSMichal Nazarewicz * Take care that if the migration scanner is at the end of the zone 470ff9543fdSMichal Nazarewicz * that the free scanner does not accidentally move to the next zone 471ff9543fdSMichal Nazarewicz * in the next isolation cycle. 472ff9543fdSMichal Nazarewicz */ 473ff9543fdSMichal Nazarewicz high_pfn = min(low_pfn, pfn); 474ff9543fdSMichal Nazarewicz 475ff9543fdSMichal Nazarewicz zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 476ff9543fdSMichal Nazarewicz 477ff9543fdSMichal Nazarewicz /* 478ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 479ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 480ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 481ff9543fdSMichal Nazarewicz */ 482ff9543fdSMichal Nazarewicz for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 483ff9543fdSMichal Nazarewicz pfn -= pageblock_nr_pages) { 484ff9543fdSMichal Nazarewicz unsigned long isolated; 485ff9543fdSMichal Nazarewicz 486ff9543fdSMichal Nazarewicz if (!pfn_valid(pfn)) 487ff9543fdSMichal Nazarewicz continue; 488ff9543fdSMichal Nazarewicz 489ff9543fdSMichal Nazarewicz /* 490ff9543fdSMichal Nazarewicz * Check for overlapping nodes/zones. It's possible on some 491ff9543fdSMichal Nazarewicz * configurations to have a setup like 492ff9543fdSMichal Nazarewicz * node0 node1 node0 493ff9543fdSMichal Nazarewicz * i.e. it's possible that all pages within a zones range of 494ff9543fdSMichal Nazarewicz * pages do not belong to a single zone. 495ff9543fdSMichal Nazarewicz */ 496ff9543fdSMichal Nazarewicz page = pfn_to_page(pfn); 497ff9543fdSMichal Nazarewicz if (page_zone(page) != zone) 498ff9543fdSMichal Nazarewicz continue; 499ff9543fdSMichal Nazarewicz 500ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 50168e3e926SLinus Torvalds if (!suitable_migration_target(page)) 502ff9543fdSMichal Nazarewicz continue; 50368e3e926SLinus Torvalds 504ff9543fdSMichal Nazarewicz /* 505ff9543fdSMichal Nazarewicz * Found a block suitable for isolating free pages from. Now 506ff9543fdSMichal Nazarewicz * we disabled interrupts, double check things are ok and 507ff9543fdSMichal Nazarewicz * isolate the pages. This is to minimise the time IRQs 508ff9543fdSMichal Nazarewicz * are disabled 509ff9543fdSMichal Nazarewicz */ 510ff9543fdSMichal Nazarewicz isolated = 0; 511c67fe375SMel Gorman 512c67fe375SMel Gorman /* 513c67fe375SMel Gorman * The zone lock must be held to isolate freepages. This 514c67fe375SMel Gorman * unfortunately this is a very coarse lock and can be 515c67fe375SMel Gorman * heavily contended if there are parallel allocations 516c67fe375SMel Gorman * or parallel compactions. For async compaction do not 517c67fe375SMel Gorman * spin on the lock 518c67fe375SMel Gorman */ 519c67fe375SMel Gorman if (!compact_trylock_irqsave(&zone->lock, &flags, cc)) 520c67fe375SMel Gorman break; 52168e3e926SLinus Torvalds if (suitable_migration_target(page)) { 522ff9543fdSMichal Nazarewicz end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 523ff9543fdSMichal Nazarewicz isolated = isolate_freepages_block(pfn, end_pfn, 524ff9543fdSMichal Nazarewicz freelist, false); 525ff9543fdSMichal Nazarewicz nr_freepages += isolated; 52668e3e926SLinus Torvalds } 527ff9543fdSMichal Nazarewicz spin_unlock_irqrestore(&zone->lock, flags); 528ff9543fdSMichal Nazarewicz 529ff9543fdSMichal Nazarewicz /* 530ff9543fdSMichal Nazarewicz * Record the highest PFN we isolated pages from. When next 531ff9543fdSMichal Nazarewicz * looking for free pages, the search will restart here as 532ff9543fdSMichal Nazarewicz * page migration may have returned some pages to the allocator 533ff9543fdSMichal Nazarewicz */ 5347db8889aSRik van Riel if (isolated) { 535ff9543fdSMichal Nazarewicz high_pfn = max(high_pfn, pfn); 536de74f1ccSMel Gorman 537de74f1ccSMel Gorman /* 538de74f1ccSMel Gorman * If the free scanner has wrapped, update 539de74f1ccSMel Gorman * compact_cached_free_pfn to point to the highest 540de74f1ccSMel Gorman * pageblock with free pages. This reduces excessive 541de74f1ccSMel Gorman * scanning of full pageblocks near the end of the 542de74f1ccSMel Gorman * zone 543de74f1ccSMel Gorman */ 544de74f1ccSMel Gorman if (cc->order > 0 && cc->wrapped) 5457db8889aSRik van Riel zone->compact_cached_free_pfn = high_pfn; 5467db8889aSRik van Riel } 547ff9543fdSMichal Nazarewicz } 548ff9543fdSMichal Nazarewicz 549ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 550ff9543fdSMichal Nazarewicz map_pages(freelist); 551ff9543fdSMichal Nazarewicz 552ff9543fdSMichal Nazarewicz cc->free_pfn = high_pfn; 553ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 554de74f1ccSMel Gorman 555de74f1ccSMel Gorman /* If compact_cached_free_pfn is reset then set it now */ 556de74f1ccSMel Gorman if (cc->order > 0 && !cc->wrapped && 557de74f1ccSMel Gorman zone->compact_cached_free_pfn == start_free_pfn(zone)) 558de74f1ccSMel Gorman zone->compact_cached_free_pfn = high_pfn; 559748446bbSMel Gorman } 560748446bbSMel Gorman 561748446bbSMel Gorman /* 562748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 563748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 564748446bbSMel Gorman */ 565748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 566748446bbSMel Gorman unsigned long data, 567748446bbSMel Gorman int **result) 568748446bbSMel Gorman { 569748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 570748446bbSMel Gorman struct page *freepage; 571748446bbSMel Gorman 572748446bbSMel Gorman /* Isolate free pages if necessary */ 573748446bbSMel Gorman if (list_empty(&cc->freepages)) { 574748446bbSMel Gorman isolate_freepages(cc->zone, cc); 575748446bbSMel Gorman 576748446bbSMel Gorman if (list_empty(&cc->freepages)) 577748446bbSMel Gorman return NULL; 578748446bbSMel Gorman } 579748446bbSMel Gorman 580748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 581748446bbSMel Gorman list_del(&freepage->lru); 582748446bbSMel Gorman cc->nr_freepages--; 583748446bbSMel Gorman 584748446bbSMel Gorman return freepage; 585748446bbSMel Gorman } 586748446bbSMel Gorman 587748446bbSMel Gorman /* 588748446bbSMel Gorman * We cannot control nr_migratepages and nr_freepages fully when migration is 589748446bbSMel Gorman * running as migrate_pages() has no knowledge of compact_control. When 590748446bbSMel Gorman * migration is complete, we count the number of pages on the lists by hand. 591748446bbSMel Gorman */ 592748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc) 593748446bbSMel Gorman { 594748446bbSMel Gorman int nr_migratepages = 0; 595748446bbSMel Gorman int nr_freepages = 0; 596748446bbSMel Gorman struct page *page; 597748446bbSMel Gorman 598748446bbSMel Gorman list_for_each_entry(page, &cc->migratepages, lru) 599748446bbSMel Gorman nr_migratepages++; 600748446bbSMel Gorman list_for_each_entry(page, &cc->freepages, lru) 601748446bbSMel Gorman nr_freepages++; 602748446bbSMel Gorman 603748446bbSMel Gorman cc->nr_migratepages = nr_migratepages; 604748446bbSMel Gorman cc->nr_freepages = nr_freepages; 605748446bbSMel Gorman } 606748446bbSMel Gorman 607ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 608ff9543fdSMichal Nazarewicz typedef enum { 609ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 610ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 611ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 612ff9543fdSMichal Nazarewicz } isolate_migrate_t; 613ff9543fdSMichal Nazarewicz 614ff9543fdSMichal Nazarewicz /* 615ff9543fdSMichal Nazarewicz * Isolate all pages that can be migrated from the block pointed to by 616ff9543fdSMichal Nazarewicz * the migrate scanner within compact_control. 617ff9543fdSMichal Nazarewicz */ 618ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 619ff9543fdSMichal Nazarewicz struct compact_control *cc) 620ff9543fdSMichal Nazarewicz { 621ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 622ff9543fdSMichal Nazarewicz 623ff9543fdSMichal Nazarewicz /* Do not scan outside zone boundaries */ 624ff9543fdSMichal Nazarewicz low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 625ff9543fdSMichal Nazarewicz 626ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 627ff9543fdSMichal Nazarewicz end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 628ff9543fdSMichal Nazarewicz 629ff9543fdSMichal Nazarewicz /* Do not cross the free scanner or scan within a memory hole */ 630ff9543fdSMichal Nazarewicz if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 631ff9543fdSMichal Nazarewicz cc->migrate_pfn = end_pfn; 632ff9543fdSMichal Nazarewicz return ISOLATE_NONE; 633ff9543fdSMichal Nazarewicz } 634ff9543fdSMichal Nazarewicz 635ff9543fdSMichal Nazarewicz /* Perform the isolation */ 636ff9543fdSMichal Nazarewicz low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); 637ff9543fdSMichal Nazarewicz if (!low_pfn) 638ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 639ff9543fdSMichal Nazarewicz 640ff9543fdSMichal Nazarewicz cc->migrate_pfn = low_pfn; 641ff9543fdSMichal Nazarewicz 642ff9543fdSMichal Nazarewicz return ISOLATE_SUCCESS; 643ff9543fdSMichal Nazarewicz } 644ff9543fdSMichal Nazarewicz 645748446bbSMel Gorman static int compact_finished(struct zone *zone, 646748446bbSMel Gorman struct compact_control *cc) 647748446bbSMel Gorman { 64856de7263SMel Gorman unsigned int order; 6495a03b051SAndrea Arcangeli unsigned long watermark; 65056de7263SMel Gorman 651748446bbSMel Gorman if (fatal_signal_pending(current)) 652748446bbSMel Gorman return COMPACT_PARTIAL; 653748446bbSMel Gorman 6547db8889aSRik van Riel /* 6557db8889aSRik van Riel * A full (order == -1) compaction run starts at the beginning and 6567db8889aSRik van Riel * end of a zone; it completes when the migrate and free scanner meet. 6577db8889aSRik van Riel * A partial (order > 0) compaction can start with the free scanner 6587db8889aSRik van Riel * at a random point in the zone, and may have to restart. 6597db8889aSRik van Riel */ 6607db8889aSRik van Riel if (cc->free_pfn <= cc->migrate_pfn) { 6617db8889aSRik van Riel if (cc->order > 0 && !cc->wrapped) { 6627db8889aSRik van Riel /* We started partway through; restart at the end. */ 6637db8889aSRik van Riel unsigned long free_pfn = start_free_pfn(zone); 6647db8889aSRik van Riel zone->compact_cached_free_pfn = free_pfn; 6657db8889aSRik van Riel cc->free_pfn = free_pfn; 6667db8889aSRik van Riel cc->wrapped = 1; 6677db8889aSRik van Riel return COMPACT_CONTINUE; 6687db8889aSRik van Riel } 6697db8889aSRik van Riel return COMPACT_COMPLETE; 6707db8889aSRik van Riel } 6717db8889aSRik van Riel 6727db8889aSRik van Riel /* We wrapped around and ended up where we started. */ 6737db8889aSRik van Riel if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn) 674748446bbSMel Gorman return COMPACT_COMPLETE; 675748446bbSMel Gorman 67682478fb7SJohannes Weiner /* 67782478fb7SJohannes Weiner * order == -1 is expected when compacting via 67882478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 67982478fb7SJohannes Weiner */ 68056de7263SMel Gorman if (cc->order == -1) 68156de7263SMel Gorman return COMPACT_CONTINUE; 68256de7263SMel Gorman 6833957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 6843957c776SMichal Hocko watermark = low_wmark_pages(zone); 6853957c776SMichal Hocko watermark += (1 << cc->order); 6863957c776SMichal Hocko 6873957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 6883957c776SMichal Hocko return COMPACT_CONTINUE; 6893957c776SMichal Hocko 69056de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 69156de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 69256de7263SMel Gorman /* Job done if page is free of the right migratetype */ 69356de7263SMel Gorman if (!list_empty(&zone->free_area[order].free_list[cc->migratetype])) 69456de7263SMel Gorman return COMPACT_PARTIAL; 69556de7263SMel Gorman 69656de7263SMel Gorman /* Job done if allocation would set block type */ 69756de7263SMel Gorman if (order >= pageblock_order && zone->free_area[order].nr_free) 69856de7263SMel Gorman return COMPACT_PARTIAL; 69956de7263SMel Gorman } 70056de7263SMel Gorman 701748446bbSMel Gorman return COMPACT_CONTINUE; 702748446bbSMel Gorman } 703748446bbSMel Gorman 7043e7d3449SMel Gorman /* 7053e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 7063e7d3449SMel Gorman * Returns 7073e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 7083e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 7093e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 7103e7d3449SMel Gorman */ 7113e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 7123e7d3449SMel Gorman { 7133e7d3449SMel Gorman int fragindex; 7143e7d3449SMel Gorman unsigned long watermark; 7153e7d3449SMel Gorman 7163e7d3449SMel Gorman /* 7173957c776SMichal Hocko * order == -1 is expected when compacting via 7183957c776SMichal Hocko * /proc/sys/vm/compact_memory 7193957c776SMichal Hocko */ 7203957c776SMichal Hocko if (order == -1) 7213957c776SMichal Hocko return COMPACT_CONTINUE; 7223957c776SMichal Hocko 7233957c776SMichal Hocko /* 7243e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 7253e7d3449SMel Gorman * This is because during migration, copies of pages need to be 7263e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 7273e7d3449SMel Gorman */ 7283e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 7293e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 7303e7d3449SMel Gorman return COMPACT_SKIPPED; 7313e7d3449SMel Gorman 7323e7d3449SMel Gorman /* 7333e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 7343e7d3449SMel Gorman * low memory or external fragmentation 7353e7d3449SMel Gorman * 736a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 737a582a738SShaohua Li * watermarks 7383e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 7393e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 7403e7d3449SMel Gorman * 7413e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 7423e7d3449SMel Gorman */ 7433e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 7443e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 7453e7d3449SMel Gorman return COMPACT_SKIPPED; 7463e7d3449SMel Gorman 747a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 748a582a738SShaohua Li 0, 0)) 7493e7d3449SMel Gorman return COMPACT_PARTIAL; 7503e7d3449SMel Gorman 7513e7d3449SMel Gorman return COMPACT_CONTINUE; 7523e7d3449SMel Gorman } 7533e7d3449SMel Gorman 754748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 755748446bbSMel Gorman { 756748446bbSMel Gorman int ret; 757748446bbSMel Gorman 7583e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 7593e7d3449SMel Gorman switch (ret) { 7603e7d3449SMel Gorman case COMPACT_PARTIAL: 7613e7d3449SMel Gorman case COMPACT_SKIPPED: 7623e7d3449SMel Gorman /* Compaction is likely to fail */ 7633e7d3449SMel Gorman return ret; 7643e7d3449SMel Gorman case COMPACT_CONTINUE: 7653e7d3449SMel Gorman /* Fall through to compaction */ 7663e7d3449SMel Gorman ; 7673e7d3449SMel Gorman } 7683e7d3449SMel Gorman 769748446bbSMel Gorman /* Setup to move all movable pages to the end of the zone */ 770748446bbSMel Gorman cc->migrate_pfn = zone->zone_start_pfn; 7717db8889aSRik van Riel 7727db8889aSRik van Riel if (cc->order > 0) { 7737db8889aSRik van Riel /* Incremental compaction. Start where the last one stopped. */ 7747db8889aSRik van Riel cc->free_pfn = zone->compact_cached_free_pfn; 7757db8889aSRik van Riel cc->start_free_pfn = cc->free_pfn; 7767db8889aSRik van Riel } else { 7777db8889aSRik van Riel /* Order == -1 starts at the end of the zone. */ 7787db8889aSRik van Riel cc->free_pfn = start_free_pfn(zone); 7797db8889aSRik van Riel } 780748446bbSMel Gorman 781748446bbSMel Gorman migrate_prep_local(); 782748446bbSMel Gorman 783748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 784748446bbSMel Gorman unsigned long nr_migrate, nr_remaining; 7859d502c1cSMinchan Kim int err; 786748446bbSMel Gorman 787f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 788f9e35b3bSMel Gorman case ISOLATE_ABORT: 789f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 790f9e35b3bSMel Gorman goto out; 791f9e35b3bSMel Gorman case ISOLATE_NONE: 792748446bbSMel Gorman continue; 793f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 794f9e35b3bSMel Gorman ; 795f9e35b3bSMel Gorman } 796748446bbSMel Gorman 797748446bbSMel Gorman nr_migrate = cc->nr_migratepages; 7989d502c1cSMinchan Kim err = migrate_pages(&cc->migratepages, compaction_alloc, 79968e3e926SLinus Torvalds (unsigned long)cc, false, 80068e3e926SLinus Torvalds cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); 801748446bbSMel Gorman update_nr_listpages(cc); 802748446bbSMel Gorman nr_remaining = cc->nr_migratepages; 803748446bbSMel Gorman 804748446bbSMel Gorman count_vm_event(COMPACTBLOCKS); 805748446bbSMel Gorman count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); 806748446bbSMel Gorman if (nr_remaining) 807748446bbSMel Gorman count_vm_events(COMPACTPAGEFAILED, nr_remaining); 808b7aba698SMel Gorman trace_mm_compaction_migratepages(nr_migrate - nr_remaining, 809b7aba698SMel Gorman nr_remaining); 810748446bbSMel Gorman 811748446bbSMel Gorman /* Release LRU pages not migrated */ 8129d502c1cSMinchan Kim if (err) { 813748446bbSMel Gorman putback_lru_pages(&cc->migratepages); 814748446bbSMel Gorman cc->nr_migratepages = 0; 8154bf2bba3SDavid Rientjes if (err == -ENOMEM) { 8164bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 8174bf2bba3SDavid Rientjes goto out; 818748446bbSMel Gorman } 8194bf2bba3SDavid Rientjes } 820748446bbSMel Gorman } 821748446bbSMel Gorman 822f9e35b3bSMel Gorman out: 823748446bbSMel Gorman /* Release free pages and check accounting */ 824748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 825748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 826748446bbSMel Gorman 827748446bbSMel Gorman return ret; 828748446bbSMel Gorman } 82976ab0f53SMel Gorman 830d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone, 83177f1fe6bSMel Gorman int order, gfp_t gfp_mask, 832c67fe375SMel Gorman bool sync, bool *contended) 83356de7263SMel Gorman { 83456de7263SMel Gorman struct compact_control cc = { 83556de7263SMel Gorman .nr_freepages = 0, 83656de7263SMel Gorman .nr_migratepages = 0, 83756de7263SMel Gorman .order = order, 83856de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 83956de7263SMel Gorman .zone = zone, 84068e3e926SLinus Torvalds .sync = sync, 841c67fe375SMel Gorman .contended = contended, 84256de7263SMel Gorman }; 84356de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 84456de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 84556de7263SMel Gorman 84668e3e926SLinus Torvalds return compact_zone(zone, &cc); 84756de7263SMel Gorman } 84856de7263SMel Gorman 8495e771905SMel Gorman int sysctl_extfrag_threshold = 500; 8505e771905SMel Gorman 85156de7263SMel Gorman /** 85256de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 85356de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 85456de7263SMel Gorman * @order: The order of the current allocation 85556de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 85656de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 85777f1fe6bSMel Gorman * @sync: Whether migration is synchronous or not 85856de7263SMel Gorman * 85956de7263SMel Gorman * This is the main entry point for direct page compaction. 86056de7263SMel Gorman */ 86156de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 86277f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 863c67fe375SMel Gorman bool sync, bool *contended) 86456de7263SMel Gorman { 86556de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 86656de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 86756de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 86856de7263SMel Gorman struct zoneref *z; 86956de7263SMel Gorman struct zone *zone; 87056de7263SMel Gorman int rc = COMPACT_SKIPPED; 87156de7263SMel Gorman 872*4ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 873c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 87456de7263SMel Gorman return rc; 87556de7263SMel Gorman 87656de7263SMel Gorman count_vm_event(COMPACTSTALL); 87756de7263SMel Gorman 87856de7263SMel Gorman /* Compact each zone in the list */ 87956de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 88056de7263SMel Gorman nodemask) { 88156de7263SMel Gorman int status; 88256de7263SMel Gorman 883c67fe375SMel Gorman status = compact_zone_order(zone, order, gfp_mask, sync, 884c67fe375SMel Gorman contended); 88556de7263SMel Gorman rc = max(status, rc); 88656de7263SMel Gorman 8873e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 8883e7d3449SMel Gorman if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 88956de7263SMel Gorman break; 89056de7263SMel Gorman } 89156de7263SMel Gorman 89256de7263SMel Gorman return rc; 89356de7263SMel Gorman } 89456de7263SMel Gorman 89556de7263SMel Gorman 89676ab0f53SMel Gorman /* Compact all zones within a node */ 8977be62de9SRik van Riel static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 89876ab0f53SMel Gorman { 89976ab0f53SMel Gorman int zoneid; 90076ab0f53SMel Gorman struct zone *zone; 90176ab0f53SMel Gorman 90276ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 90376ab0f53SMel Gorman 90476ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 90576ab0f53SMel Gorman if (!populated_zone(zone)) 90676ab0f53SMel Gorman continue; 90776ab0f53SMel Gorman 9087be62de9SRik van Riel cc->nr_freepages = 0; 9097be62de9SRik van Riel cc->nr_migratepages = 0; 9107be62de9SRik van Riel cc->zone = zone; 9117be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 9127be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 91376ab0f53SMel Gorman 914aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 9157be62de9SRik van Riel compact_zone(zone, cc); 91676ab0f53SMel Gorman 917aff62249SRik van Riel if (cc->order > 0) { 918aff62249SRik van Riel int ok = zone_watermark_ok(zone, cc->order, 919aff62249SRik van Riel low_wmark_pages(zone), 0, 0); 920c81758fbSMinchan Kim if (ok && cc->order >= zone->compact_order_failed) 921aff62249SRik van Riel zone->compact_order_failed = cc->order + 1; 922aff62249SRik van Riel /* Currently async compaction is never deferred. */ 92368e3e926SLinus Torvalds else if (!ok && cc->sync) 924aff62249SRik van Riel defer_compaction(zone, cc->order); 925aff62249SRik van Riel } 926aff62249SRik van Riel 9277be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 9287be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 92976ab0f53SMel Gorman } 93076ab0f53SMel Gorman 93176ab0f53SMel Gorman return 0; 93276ab0f53SMel Gorman } 93376ab0f53SMel Gorman 9347be62de9SRik van Riel int compact_pgdat(pg_data_t *pgdat, int order) 9357be62de9SRik van Riel { 9367be62de9SRik van Riel struct compact_control cc = { 9377be62de9SRik van Riel .order = order, 93868e3e926SLinus Torvalds .sync = false, 9397be62de9SRik van Riel }; 9407be62de9SRik van Riel 9417be62de9SRik van Riel return __compact_pgdat(pgdat, &cc); 9427be62de9SRik van Riel } 9437be62de9SRik van Riel 9447be62de9SRik van Riel static int compact_node(int nid) 9457be62de9SRik van Riel { 9467be62de9SRik van Riel struct compact_control cc = { 9477be62de9SRik van Riel .order = -1, 94868e3e926SLinus Torvalds .sync = true, 9497be62de9SRik van Riel }; 9507be62de9SRik van Riel 9518575ec29SHugh Dickins return __compact_pgdat(NODE_DATA(nid), &cc); 9527be62de9SRik van Riel } 9537be62de9SRik van Riel 95476ab0f53SMel Gorman /* Compact all nodes in the system */ 95576ab0f53SMel Gorman static int compact_nodes(void) 95676ab0f53SMel Gorman { 95776ab0f53SMel Gorman int nid; 95876ab0f53SMel Gorman 9598575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 9608575ec29SHugh Dickins lru_add_drain_all(); 9618575ec29SHugh Dickins 96276ab0f53SMel Gorman for_each_online_node(nid) 96376ab0f53SMel Gorman compact_node(nid); 96476ab0f53SMel Gorman 96576ab0f53SMel Gorman return COMPACT_COMPLETE; 96676ab0f53SMel Gorman } 96776ab0f53SMel Gorman 96876ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 96976ab0f53SMel Gorman int sysctl_compact_memory; 97076ab0f53SMel Gorman 97176ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 97276ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 97376ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 97476ab0f53SMel Gorman { 97576ab0f53SMel Gorman if (write) 97676ab0f53SMel Gorman return compact_nodes(); 97776ab0f53SMel Gorman 97876ab0f53SMel Gorman return 0; 97976ab0f53SMel Gorman } 980ed4a6d7fSMel Gorman 9815e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 9825e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 9835e771905SMel Gorman { 9845e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 9855e771905SMel Gorman 9865e771905SMel Gorman return 0; 9875e771905SMel Gorman } 9885e771905SMel Gorman 989ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 99010fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev, 99110fbcf4cSKay Sievers struct device_attribute *attr, 992ed4a6d7fSMel Gorman const char *buf, size_t count) 993ed4a6d7fSMel Gorman { 9948575ec29SHugh Dickins int nid = dev->id; 9958575ec29SHugh Dickins 9968575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 9978575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 9988575ec29SHugh Dickins lru_add_drain_all(); 9998575ec29SHugh Dickins 10008575ec29SHugh Dickins compact_node(nid); 10018575ec29SHugh Dickins } 1002ed4a6d7fSMel Gorman 1003ed4a6d7fSMel Gorman return count; 1004ed4a6d7fSMel Gorman } 100510fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1006ed4a6d7fSMel Gorman 1007ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1008ed4a6d7fSMel Gorman { 100910fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1010ed4a6d7fSMel Gorman } 1011ed4a6d7fSMel Gorman 1012ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1013ed4a6d7fSMel Gorman { 101410fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1015ed4a6d7fSMel Gorman } 1016ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1017ff9543fdSMichal Nazarewicz 1018ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1019