1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17748446bbSMel Gorman #include "internal.h" 18748446bbSMel Gorman 19b7aba698SMel Gorman #define CREATE_TRACE_POINTS 20b7aba698SMel Gorman #include <trace/events/compaction.h> 21b7aba698SMel Gorman 22748446bbSMel Gorman /* 23748446bbSMel Gorman * compact_control is used to track pages being migrated and the free pages 24748446bbSMel Gorman * they are being migrated to during memory compaction. The free_pfn starts 25748446bbSMel Gorman * at the end of a zone and migrate_pfn begins at the start. Movable pages 26748446bbSMel Gorman * are moved to the end of a zone during a compaction run and the run 27748446bbSMel Gorman * completes when free_pfn <= migrate_pfn 28748446bbSMel Gorman */ 29748446bbSMel Gorman struct compact_control { 30748446bbSMel Gorman struct list_head freepages; /* List of free pages to migrate to */ 31748446bbSMel Gorman struct list_head migratepages; /* List of pages being migrated */ 32748446bbSMel Gorman unsigned long nr_freepages; /* Number of isolated free pages */ 33748446bbSMel Gorman unsigned long nr_migratepages; /* Number of pages to migrate */ 34748446bbSMel Gorman unsigned long free_pfn; /* isolate_freepages search base */ 35748446bbSMel Gorman unsigned long migrate_pfn; /* isolate_migratepages search base */ 3677f1fe6bSMel Gorman bool sync; /* Synchronous migration */ 37748446bbSMel Gorman 38748446bbSMel Gorman /* Account for isolated anon and file pages */ 39748446bbSMel Gorman unsigned long nr_anon; 40748446bbSMel Gorman unsigned long nr_file; 41748446bbSMel Gorman 4256de7263SMel Gorman unsigned int order; /* order a direct compactor needs */ 4356de7263SMel Gorman int migratetype; /* MOVABLE, RECLAIMABLE etc */ 44748446bbSMel Gorman struct zone *zone; 45*5a03b051SAndrea Arcangeli 46*5a03b051SAndrea Arcangeli int compact_mode; 47748446bbSMel Gorman }; 48748446bbSMel Gorman 49748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 50748446bbSMel Gorman { 51748446bbSMel Gorman struct page *page, *next; 52748446bbSMel Gorman unsigned long count = 0; 53748446bbSMel Gorman 54748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 55748446bbSMel Gorman list_del(&page->lru); 56748446bbSMel Gorman __free_page(page); 57748446bbSMel Gorman count++; 58748446bbSMel Gorman } 59748446bbSMel Gorman 60748446bbSMel Gorman return count; 61748446bbSMel Gorman } 62748446bbSMel Gorman 63748446bbSMel Gorman /* Isolate free pages onto a private freelist. Must hold zone->lock */ 64748446bbSMel Gorman static unsigned long isolate_freepages_block(struct zone *zone, 65748446bbSMel Gorman unsigned long blockpfn, 66748446bbSMel Gorman struct list_head *freelist) 67748446bbSMel Gorman { 68748446bbSMel Gorman unsigned long zone_end_pfn, end_pfn; 69b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 70748446bbSMel Gorman struct page *cursor; 71748446bbSMel Gorman 72748446bbSMel Gorman /* Get the last PFN we should scan for free pages at */ 73748446bbSMel Gorman zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 74748446bbSMel Gorman end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn); 75748446bbSMel Gorman 76748446bbSMel Gorman /* Find the first usable PFN in the block to initialse page cursor */ 77748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++) { 78748446bbSMel Gorman if (pfn_valid_within(blockpfn)) 79748446bbSMel Gorman break; 80748446bbSMel Gorman } 81748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 82748446bbSMel Gorman 83748446bbSMel Gorman /* Isolate free pages. This assumes the block is valid */ 84748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 85748446bbSMel Gorman int isolated, i; 86748446bbSMel Gorman struct page *page = cursor; 87748446bbSMel Gorman 88748446bbSMel Gorman if (!pfn_valid_within(blockpfn)) 89748446bbSMel Gorman continue; 90b7aba698SMel Gorman nr_scanned++; 91748446bbSMel Gorman 92748446bbSMel Gorman if (!PageBuddy(page)) 93748446bbSMel Gorman continue; 94748446bbSMel Gorman 95748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 96748446bbSMel Gorman isolated = split_free_page(page); 97748446bbSMel Gorman total_isolated += isolated; 98748446bbSMel Gorman for (i = 0; i < isolated; i++) { 99748446bbSMel Gorman list_add(&page->lru, freelist); 100748446bbSMel Gorman page++; 101748446bbSMel Gorman } 102748446bbSMel Gorman 103748446bbSMel Gorman /* If a page was split, advance to the end of it */ 104748446bbSMel Gorman if (isolated) { 105748446bbSMel Gorman blockpfn += isolated - 1; 106748446bbSMel Gorman cursor += isolated - 1; 107748446bbSMel Gorman } 108748446bbSMel Gorman } 109748446bbSMel Gorman 110b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 111748446bbSMel Gorman return total_isolated; 112748446bbSMel Gorman } 113748446bbSMel Gorman 114748446bbSMel Gorman /* Returns true if the page is within a block suitable for migration to */ 115748446bbSMel Gorman static bool suitable_migration_target(struct page *page) 116748446bbSMel Gorman { 117748446bbSMel Gorman 118748446bbSMel Gorman int migratetype = get_pageblock_migratetype(page); 119748446bbSMel Gorman 120748446bbSMel Gorman /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 121748446bbSMel Gorman if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 122748446bbSMel Gorman return false; 123748446bbSMel Gorman 124748446bbSMel Gorman /* If the page is a large free page, then allow migration */ 125748446bbSMel Gorman if (PageBuddy(page) && page_order(page) >= pageblock_order) 126748446bbSMel Gorman return true; 127748446bbSMel Gorman 128748446bbSMel Gorman /* If the block is MIGRATE_MOVABLE, allow migration */ 129748446bbSMel Gorman if (migratetype == MIGRATE_MOVABLE) 130748446bbSMel Gorman return true; 131748446bbSMel Gorman 132748446bbSMel Gorman /* Otherwise skip the block */ 133748446bbSMel Gorman return false; 134748446bbSMel Gorman } 135748446bbSMel Gorman 136748446bbSMel Gorman /* 137748446bbSMel Gorman * Based on information in the current compact_control, find blocks 138748446bbSMel Gorman * suitable for isolating free pages from and then isolate them. 139748446bbSMel Gorman */ 140748446bbSMel Gorman static void isolate_freepages(struct zone *zone, 141748446bbSMel Gorman struct compact_control *cc) 142748446bbSMel Gorman { 143748446bbSMel Gorman struct page *page; 144748446bbSMel Gorman unsigned long high_pfn, low_pfn, pfn; 145748446bbSMel Gorman unsigned long flags; 146748446bbSMel Gorman int nr_freepages = cc->nr_freepages; 147748446bbSMel Gorman struct list_head *freelist = &cc->freepages; 148748446bbSMel Gorman 149748446bbSMel Gorman pfn = cc->free_pfn; 150748446bbSMel Gorman low_pfn = cc->migrate_pfn + pageblock_nr_pages; 151748446bbSMel Gorman high_pfn = low_pfn; 152748446bbSMel Gorman 153748446bbSMel Gorman /* 154748446bbSMel Gorman * Isolate free pages until enough are available to migrate the 155748446bbSMel Gorman * pages on cc->migratepages. We stop searching if the migrate 156748446bbSMel Gorman * and free page scanners meet or enough free pages are isolated. 157748446bbSMel Gorman */ 158748446bbSMel Gorman spin_lock_irqsave(&zone->lock, flags); 159748446bbSMel Gorman for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 160748446bbSMel Gorman pfn -= pageblock_nr_pages) { 161748446bbSMel Gorman unsigned long isolated; 162748446bbSMel Gorman 163748446bbSMel Gorman if (!pfn_valid(pfn)) 164748446bbSMel Gorman continue; 165748446bbSMel Gorman 166748446bbSMel Gorman /* 167748446bbSMel Gorman * Check for overlapping nodes/zones. It's possible on some 168748446bbSMel Gorman * configurations to have a setup like 169748446bbSMel Gorman * node0 node1 node0 170748446bbSMel Gorman * i.e. it's possible that all pages within a zones range of 171748446bbSMel Gorman * pages do not belong to a single zone. 172748446bbSMel Gorman */ 173748446bbSMel Gorman page = pfn_to_page(pfn); 174748446bbSMel Gorman if (page_zone(page) != zone) 175748446bbSMel Gorman continue; 176748446bbSMel Gorman 177748446bbSMel Gorman /* Check the block is suitable for migration */ 178748446bbSMel Gorman if (!suitable_migration_target(page)) 179748446bbSMel Gorman continue; 180748446bbSMel Gorman 181748446bbSMel Gorman /* Found a block suitable for isolating free pages from */ 182748446bbSMel Gorman isolated = isolate_freepages_block(zone, pfn, freelist); 183748446bbSMel Gorman nr_freepages += isolated; 184748446bbSMel Gorman 185748446bbSMel Gorman /* 186748446bbSMel Gorman * Record the highest PFN we isolated pages from. When next 187748446bbSMel Gorman * looking for free pages, the search will restart here as 188748446bbSMel Gorman * page migration may have returned some pages to the allocator 189748446bbSMel Gorman */ 190748446bbSMel Gorman if (isolated) 191748446bbSMel Gorman high_pfn = max(high_pfn, pfn); 192748446bbSMel Gorman } 193748446bbSMel Gorman spin_unlock_irqrestore(&zone->lock, flags); 194748446bbSMel Gorman 195748446bbSMel Gorman /* split_free_page does not map the pages */ 196748446bbSMel Gorman list_for_each_entry(page, freelist, lru) { 197748446bbSMel Gorman arch_alloc_page(page, 0); 198748446bbSMel Gorman kernel_map_pages(page, 1, 1); 199748446bbSMel Gorman } 200748446bbSMel Gorman 201748446bbSMel Gorman cc->free_pfn = high_pfn; 202748446bbSMel Gorman cc->nr_freepages = nr_freepages; 203748446bbSMel Gorman } 204748446bbSMel Gorman 205748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 206748446bbSMel Gorman static void acct_isolated(struct zone *zone, struct compact_control *cc) 207748446bbSMel Gorman { 208748446bbSMel Gorman struct page *page; 209748446bbSMel Gorman unsigned int count[NR_LRU_LISTS] = { 0, }; 210748446bbSMel Gorman 211748446bbSMel Gorman list_for_each_entry(page, &cc->migratepages, lru) { 212748446bbSMel Gorman int lru = page_lru_base_type(page); 213748446bbSMel Gorman count[lru]++; 214748446bbSMel Gorman } 215748446bbSMel Gorman 216748446bbSMel Gorman cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; 217748446bbSMel Gorman cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; 218748446bbSMel Gorman __mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon); 219748446bbSMel Gorman __mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file); 220748446bbSMel Gorman } 221748446bbSMel Gorman 222748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 223748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 224748446bbSMel Gorman { 225bc693045SMinchan Kim unsigned long active, inactive, isolated; 226748446bbSMel Gorman 227748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 228748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 229bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 230bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 231748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 232748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 233748446bbSMel Gorman 234bc693045SMinchan Kim return isolated > (inactive + active) / 2; 235748446bbSMel Gorman } 236748446bbSMel Gorman 237748446bbSMel Gorman /* 238748446bbSMel Gorman * Isolate all pages that can be migrated from the block pointed to by 239748446bbSMel Gorman * the migrate scanner within compact_control. 240748446bbSMel Gorman */ 241748446bbSMel Gorman static unsigned long isolate_migratepages(struct zone *zone, 242748446bbSMel Gorman struct compact_control *cc) 243748446bbSMel Gorman { 244748446bbSMel Gorman unsigned long low_pfn, end_pfn; 2459927af74SMel Gorman unsigned long last_pageblock_nr = 0, pageblock_nr; 246b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 247748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 248748446bbSMel Gorman 249748446bbSMel Gorman /* Do not scan outside zone boundaries */ 250748446bbSMel Gorman low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 251748446bbSMel Gorman 252748446bbSMel Gorman /* Only scan within a pageblock boundary */ 253748446bbSMel Gorman end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 254748446bbSMel Gorman 255748446bbSMel Gorman /* Do not cross the free scanner or scan within a memory hole */ 256748446bbSMel Gorman if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 257748446bbSMel Gorman cc->migrate_pfn = end_pfn; 258748446bbSMel Gorman return 0; 259748446bbSMel Gorman } 260748446bbSMel Gorman 261748446bbSMel Gorman /* 262748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 263748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 264748446bbSMel Gorman * delay for some time until fewer pages are isolated 265748446bbSMel Gorman */ 266748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 267748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 268748446bbSMel Gorman 269748446bbSMel Gorman if (fatal_signal_pending(current)) 270748446bbSMel Gorman return 0; 271748446bbSMel Gorman } 272748446bbSMel Gorman 273748446bbSMel Gorman /* Time to isolate some pages for migration */ 274748446bbSMel Gorman spin_lock_irq(&zone->lru_lock); 275748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 276748446bbSMel Gorman struct page *page; 277748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 278748446bbSMel Gorman continue; 279b7aba698SMel Gorman nr_scanned++; 280748446bbSMel Gorman 281748446bbSMel Gorman /* Get the page and skip if free */ 282748446bbSMel Gorman page = pfn_to_page(low_pfn); 283748446bbSMel Gorman if (PageBuddy(page)) 284748446bbSMel Gorman continue; 285748446bbSMel Gorman 2869927af74SMel Gorman /* 2879927af74SMel Gorman * For async migration, also only scan in MOVABLE blocks. Async 2889927af74SMel Gorman * migration is optimistic to see if the minimum amount of work 2899927af74SMel Gorman * satisfies the allocation 2909927af74SMel Gorman */ 2919927af74SMel Gorman pageblock_nr = low_pfn >> pageblock_order; 2929927af74SMel Gorman if (!cc->sync && last_pageblock_nr != pageblock_nr && 2939927af74SMel Gorman get_pageblock_migratetype(page) != MIGRATE_MOVABLE) { 2949927af74SMel Gorman low_pfn += pageblock_nr_pages; 2959927af74SMel Gorman low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 2969927af74SMel Gorman last_pageblock_nr = pageblock_nr; 2979927af74SMel Gorman continue; 2989927af74SMel Gorman } 2999927af74SMel Gorman 300bc835011SAndrea Arcangeli if (!PageLRU(page)) 301bc835011SAndrea Arcangeli continue; 302bc835011SAndrea Arcangeli 303bc835011SAndrea Arcangeli /* 304bc835011SAndrea Arcangeli * PageLRU is set, and lru_lock excludes isolation, 305bc835011SAndrea Arcangeli * splitting and collapsing (collapsing has already 306bc835011SAndrea Arcangeli * happened if PageLRU is set). 307bc835011SAndrea Arcangeli */ 308bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 309bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 310bc835011SAndrea Arcangeli continue; 311bc835011SAndrea Arcangeli } 312bc835011SAndrea Arcangeli 313748446bbSMel Gorman /* Try isolate the page */ 314748446bbSMel Gorman if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0) 315748446bbSMel Gorman continue; 316748446bbSMel Gorman 317bc835011SAndrea Arcangeli VM_BUG_ON(PageTransCompound(page)); 318bc835011SAndrea Arcangeli 319748446bbSMel Gorman /* Successfully isolated */ 320748446bbSMel Gorman del_page_from_lru_list(zone, page, page_lru(page)); 321748446bbSMel Gorman list_add(&page->lru, migratelist); 322748446bbSMel Gorman cc->nr_migratepages++; 323b7aba698SMel Gorman nr_isolated++; 324748446bbSMel Gorman 325748446bbSMel Gorman /* Avoid isolating too much */ 326748446bbSMel Gorman if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 327748446bbSMel Gorman break; 328748446bbSMel Gorman } 329748446bbSMel Gorman 330748446bbSMel Gorman acct_isolated(zone, cc); 331748446bbSMel Gorman 332748446bbSMel Gorman spin_unlock_irq(&zone->lru_lock); 333748446bbSMel Gorman cc->migrate_pfn = low_pfn; 334748446bbSMel Gorman 335b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 336b7aba698SMel Gorman 337748446bbSMel Gorman return cc->nr_migratepages; 338748446bbSMel Gorman } 339748446bbSMel Gorman 340748446bbSMel Gorman /* 341748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 342748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 343748446bbSMel Gorman */ 344748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 345748446bbSMel Gorman unsigned long data, 346748446bbSMel Gorman int **result) 347748446bbSMel Gorman { 348748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 349748446bbSMel Gorman struct page *freepage; 350748446bbSMel Gorman 351748446bbSMel Gorman /* Isolate free pages if necessary */ 352748446bbSMel Gorman if (list_empty(&cc->freepages)) { 353748446bbSMel Gorman isolate_freepages(cc->zone, cc); 354748446bbSMel Gorman 355748446bbSMel Gorman if (list_empty(&cc->freepages)) 356748446bbSMel Gorman return NULL; 357748446bbSMel Gorman } 358748446bbSMel Gorman 359748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 360748446bbSMel Gorman list_del(&freepage->lru); 361748446bbSMel Gorman cc->nr_freepages--; 362748446bbSMel Gorman 363748446bbSMel Gorman return freepage; 364748446bbSMel Gorman } 365748446bbSMel Gorman 366748446bbSMel Gorman /* 367748446bbSMel Gorman * We cannot control nr_migratepages and nr_freepages fully when migration is 368748446bbSMel Gorman * running as migrate_pages() has no knowledge of compact_control. When 369748446bbSMel Gorman * migration is complete, we count the number of pages on the lists by hand. 370748446bbSMel Gorman */ 371748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc) 372748446bbSMel Gorman { 373748446bbSMel Gorman int nr_migratepages = 0; 374748446bbSMel Gorman int nr_freepages = 0; 375748446bbSMel Gorman struct page *page; 376748446bbSMel Gorman 377748446bbSMel Gorman list_for_each_entry(page, &cc->migratepages, lru) 378748446bbSMel Gorman nr_migratepages++; 379748446bbSMel Gorman list_for_each_entry(page, &cc->freepages, lru) 380748446bbSMel Gorman nr_freepages++; 381748446bbSMel Gorman 382748446bbSMel Gorman cc->nr_migratepages = nr_migratepages; 383748446bbSMel Gorman cc->nr_freepages = nr_freepages; 384748446bbSMel Gorman } 385748446bbSMel Gorman 386748446bbSMel Gorman static int compact_finished(struct zone *zone, 387748446bbSMel Gorman struct compact_control *cc) 388748446bbSMel Gorman { 38956de7263SMel Gorman unsigned int order; 390*5a03b051SAndrea Arcangeli unsigned long watermark; 39156de7263SMel Gorman 392748446bbSMel Gorman if (fatal_signal_pending(current)) 393748446bbSMel Gorman return COMPACT_PARTIAL; 394748446bbSMel Gorman 395748446bbSMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 396748446bbSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) 397748446bbSMel Gorman return COMPACT_COMPLETE; 398748446bbSMel Gorman 39956de7263SMel Gorman /* Compaction run is not finished if the watermark is not met */ 400*5a03b051SAndrea Arcangeli if (cc->compact_mode != COMPACT_MODE_KSWAPD) 401*5a03b051SAndrea Arcangeli watermark = low_wmark_pages(zone); 402*5a03b051SAndrea Arcangeli else 403*5a03b051SAndrea Arcangeli watermark = high_wmark_pages(zone); 404*5a03b051SAndrea Arcangeli watermark += (1 << cc->order); 405*5a03b051SAndrea Arcangeli 40656de7263SMel Gorman if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 40756de7263SMel Gorman return COMPACT_CONTINUE; 40856de7263SMel Gorman 40956de7263SMel Gorman if (cc->order == -1) 41056de7263SMel Gorman return COMPACT_CONTINUE; 41156de7263SMel Gorman 412*5a03b051SAndrea Arcangeli /* 413*5a03b051SAndrea Arcangeli * Generating only one page of the right order is not enough 414*5a03b051SAndrea Arcangeli * for kswapd, we must continue until we're above the high 415*5a03b051SAndrea Arcangeli * watermark as a pool for high order GFP_ATOMIC allocations 416*5a03b051SAndrea Arcangeli * too. 417*5a03b051SAndrea Arcangeli */ 418*5a03b051SAndrea Arcangeli if (cc->compact_mode == COMPACT_MODE_KSWAPD) 419*5a03b051SAndrea Arcangeli return COMPACT_CONTINUE; 420*5a03b051SAndrea Arcangeli 42156de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 42256de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 42356de7263SMel Gorman /* Job done if page is free of the right migratetype */ 42456de7263SMel Gorman if (!list_empty(&zone->free_area[order].free_list[cc->migratetype])) 42556de7263SMel Gorman return COMPACT_PARTIAL; 42656de7263SMel Gorman 42756de7263SMel Gorman /* Job done if allocation would set block type */ 42856de7263SMel Gorman if (order >= pageblock_order && zone->free_area[order].nr_free) 42956de7263SMel Gorman return COMPACT_PARTIAL; 43056de7263SMel Gorman } 43156de7263SMel Gorman 432748446bbSMel Gorman return COMPACT_CONTINUE; 433748446bbSMel Gorman } 434748446bbSMel Gorman 4353e7d3449SMel Gorman /* 4363e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 4373e7d3449SMel Gorman * Returns 4383e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 4393e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 4403e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 4413e7d3449SMel Gorman */ 4423e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 4433e7d3449SMel Gorman { 4443e7d3449SMel Gorman int fragindex; 4453e7d3449SMel Gorman unsigned long watermark; 4463e7d3449SMel Gorman 4473e7d3449SMel Gorman /* 4483e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 4493e7d3449SMel Gorman * This is because during migration, copies of pages need to be 4503e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 4513e7d3449SMel Gorman */ 4523e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 4533e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 4543e7d3449SMel Gorman return COMPACT_SKIPPED; 4553e7d3449SMel Gorman 4563e7d3449SMel Gorman /* 4573e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 4583e7d3449SMel Gorman * low memory or external fragmentation 4593e7d3449SMel Gorman * 4603e7d3449SMel Gorman * index of -1 implies allocations might succeed dependingon watermarks 4613e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 4623e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 4633e7d3449SMel Gorman * 4643e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 4653e7d3449SMel Gorman */ 4663e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 4673e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 4683e7d3449SMel Gorman return COMPACT_SKIPPED; 4693e7d3449SMel Gorman 4703e7d3449SMel Gorman if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) 4713e7d3449SMel Gorman return COMPACT_PARTIAL; 4723e7d3449SMel Gorman 4733e7d3449SMel Gorman return COMPACT_CONTINUE; 4743e7d3449SMel Gorman } 4753e7d3449SMel Gorman 476748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 477748446bbSMel Gorman { 478748446bbSMel Gorman int ret; 479748446bbSMel Gorman 4803e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 4813e7d3449SMel Gorman switch (ret) { 4823e7d3449SMel Gorman case COMPACT_PARTIAL: 4833e7d3449SMel Gorman case COMPACT_SKIPPED: 4843e7d3449SMel Gorman /* Compaction is likely to fail */ 4853e7d3449SMel Gorman return ret; 4863e7d3449SMel Gorman case COMPACT_CONTINUE: 4873e7d3449SMel Gorman /* Fall through to compaction */ 4883e7d3449SMel Gorman ; 4893e7d3449SMel Gorman } 4903e7d3449SMel Gorman 491748446bbSMel Gorman /* Setup to move all movable pages to the end of the zone */ 492748446bbSMel Gorman cc->migrate_pfn = zone->zone_start_pfn; 493748446bbSMel Gorman cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; 494748446bbSMel Gorman cc->free_pfn &= ~(pageblock_nr_pages-1); 495748446bbSMel Gorman 496748446bbSMel Gorman migrate_prep_local(); 497748446bbSMel Gorman 498748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 499748446bbSMel Gorman unsigned long nr_migrate, nr_remaining; 500748446bbSMel Gorman 501748446bbSMel Gorman if (!isolate_migratepages(zone, cc)) 502748446bbSMel Gorman continue; 503748446bbSMel Gorman 504748446bbSMel Gorman nr_migrate = cc->nr_migratepages; 505748446bbSMel Gorman migrate_pages(&cc->migratepages, compaction_alloc, 5067f0f2496SMel Gorman (unsigned long)cc, false, 50777f1fe6bSMel Gorman cc->sync); 508748446bbSMel Gorman update_nr_listpages(cc); 509748446bbSMel Gorman nr_remaining = cc->nr_migratepages; 510748446bbSMel Gorman 511748446bbSMel Gorman count_vm_event(COMPACTBLOCKS); 512748446bbSMel Gorman count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); 513748446bbSMel Gorman if (nr_remaining) 514748446bbSMel Gorman count_vm_events(COMPACTPAGEFAILED, nr_remaining); 515b7aba698SMel Gorman trace_mm_compaction_migratepages(nr_migrate - nr_remaining, 516b7aba698SMel Gorman nr_remaining); 517748446bbSMel Gorman 518748446bbSMel Gorman /* Release LRU pages not migrated */ 519748446bbSMel Gorman if (!list_empty(&cc->migratepages)) { 520748446bbSMel Gorman putback_lru_pages(&cc->migratepages); 521748446bbSMel Gorman cc->nr_migratepages = 0; 522748446bbSMel Gorman } 523748446bbSMel Gorman 524748446bbSMel Gorman } 525748446bbSMel Gorman 526748446bbSMel Gorman /* Release free pages and check accounting */ 527748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 528748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 529748446bbSMel Gorman 530748446bbSMel Gorman return ret; 531748446bbSMel Gorman } 53276ab0f53SMel Gorman 5333e7d3449SMel Gorman unsigned long compact_zone_order(struct zone *zone, 53477f1fe6bSMel Gorman int order, gfp_t gfp_mask, 535*5a03b051SAndrea Arcangeli bool sync, 536*5a03b051SAndrea Arcangeli int compact_mode) 53756de7263SMel Gorman { 53856de7263SMel Gorman struct compact_control cc = { 53956de7263SMel Gorman .nr_freepages = 0, 54056de7263SMel Gorman .nr_migratepages = 0, 54156de7263SMel Gorman .order = order, 54256de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 54356de7263SMel Gorman .zone = zone, 54477f1fe6bSMel Gorman .sync = sync, 545*5a03b051SAndrea Arcangeli .compact_mode = compact_mode, 54656de7263SMel Gorman }; 54756de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 54856de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 54956de7263SMel Gorman 55056de7263SMel Gorman return compact_zone(zone, &cc); 55156de7263SMel Gorman } 55256de7263SMel Gorman 5535e771905SMel Gorman int sysctl_extfrag_threshold = 500; 5545e771905SMel Gorman 55556de7263SMel Gorman /** 55656de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 55756de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 55856de7263SMel Gorman * @order: The order of the current allocation 55956de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 56056de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 56177f1fe6bSMel Gorman * @sync: Whether migration is synchronous or not 56256de7263SMel Gorman * 56356de7263SMel Gorman * This is the main entry point for direct page compaction. 56456de7263SMel Gorman */ 56556de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 56677f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 56777f1fe6bSMel Gorman bool sync) 56856de7263SMel Gorman { 56956de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 57056de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 57156de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 57256de7263SMel Gorman struct zoneref *z; 57356de7263SMel Gorman struct zone *zone; 57456de7263SMel Gorman int rc = COMPACT_SKIPPED; 57556de7263SMel Gorman 57656de7263SMel Gorman /* 57756de7263SMel Gorman * Check whether it is worth even starting compaction. The order check is 57856de7263SMel Gorman * made because an assumption is made that the page allocator can satisfy 57956de7263SMel Gorman * the "cheaper" orders without taking special steps 58056de7263SMel Gorman */ 58156de7263SMel Gorman if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io) 58256de7263SMel Gorman return rc; 58356de7263SMel Gorman 58456de7263SMel Gorman count_vm_event(COMPACTSTALL); 58556de7263SMel Gorman 58656de7263SMel Gorman /* Compact each zone in the list */ 58756de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 58856de7263SMel Gorman nodemask) { 58956de7263SMel Gorman int status; 59056de7263SMel Gorman 591*5a03b051SAndrea Arcangeli status = compact_zone_order(zone, order, gfp_mask, sync, 592*5a03b051SAndrea Arcangeli COMPACT_MODE_DIRECT_RECLAIM); 59356de7263SMel Gorman rc = max(status, rc); 59456de7263SMel Gorman 5953e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 5963e7d3449SMel Gorman if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 59756de7263SMel Gorman break; 59856de7263SMel Gorman } 59956de7263SMel Gorman 60056de7263SMel Gorman return rc; 60156de7263SMel Gorman } 60256de7263SMel Gorman 60356de7263SMel Gorman 60476ab0f53SMel Gorman /* Compact all zones within a node */ 60576ab0f53SMel Gorman static int compact_node(int nid) 60676ab0f53SMel Gorman { 60776ab0f53SMel Gorman int zoneid; 60876ab0f53SMel Gorman pg_data_t *pgdat; 60976ab0f53SMel Gorman struct zone *zone; 61076ab0f53SMel Gorman 61176ab0f53SMel Gorman if (nid < 0 || nid >= nr_node_ids || !node_online(nid)) 61276ab0f53SMel Gorman return -EINVAL; 61376ab0f53SMel Gorman pgdat = NODE_DATA(nid); 61476ab0f53SMel Gorman 61576ab0f53SMel Gorman /* Flush pending updates to the LRU lists */ 61676ab0f53SMel Gorman lru_add_drain_all(); 61776ab0f53SMel Gorman 61876ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 61976ab0f53SMel Gorman struct compact_control cc = { 62076ab0f53SMel Gorman .nr_freepages = 0, 62176ab0f53SMel Gorman .nr_migratepages = 0, 62256de7263SMel Gorman .order = -1, 623*5a03b051SAndrea Arcangeli .compact_mode = COMPACT_MODE_DIRECT_RECLAIM, 62476ab0f53SMel Gorman }; 62576ab0f53SMel Gorman 62676ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 62776ab0f53SMel Gorman if (!populated_zone(zone)) 62876ab0f53SMel Gorman continue; 62976ab0f53SMel Gorman 63076ab0f53SMel Gorman cc.zone = zone; 63176ab0f53SMel Gorman INIT_LIST_HEAD(&cc.freepages); 63276ab0f53SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 63376ab0f53SMel Gorman 63476ab0f53SMel Gorman compact_zone(zone, &cc); 63576ab0f53SMel Gorman 63676ab0f53SMel Gorman VM_BUG_ON(!list_empty(&cc.freepages)); 63776ab0f53SMel Gorman VM_BUG_ON(!list_empty(&cc.migratepages)); 63876ab0f53SMel Gorman } 63976ab0f53SMel Gorman 64076ab0f53SMel Gorman return 0; 64176ab0f53SMel Gorman } 64276ab0f53SMel Gorman 64376ab0f53SMel Gorman /* Compact all nodes in the system */ 64476ab0f53SMel Gorman static int compact_nodes(void) 64576ab0f53SMel Gorman { 64676ab0f53SMel Gorman int nid; 64776ab0f53SMel Gorman 64876ab0f53SMel Gorman for_each_online_node(nid) 64976ab0f53SMel Gorman compact_node(nid); 65076ab0f53SMel Gorman 65176ab0f53SMel Gorman return COMPACT_COMPLETE; 65276ab0f53SMel Gorman } 65376ab0f53SMel Gorman 65476ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 65576ab0f53SMel Gorman int sysctl_compact_memory; 65676ab0f53SMel Gorman 65776ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 65876ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 65976ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 66076ab0f53SMel Gorman { 66176ab0f53SMel Gorman if (write) 66276ab0f53SMel Gorman return compact_nodes(); 66376ab0f53SMel Gorman 66476ab0f53SMel Gorman return 0; 66576ab0f53SMel Gorman } 666ed4a6d7fSMel Gorman 6675e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 6685e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 6695e771905SMel Gorman { 6705e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 6715e771905SMel Gorman 6725e771905SMel Gorman return 0; 6735e771905SMel Gorman } 6745e771905SMel Gorman 675ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 676ed4a6d7fSMel Gorman ssize_t sysfs_compact_node(struct sys_device *dev, 677ed4a6d7fSMel Gorman struct sysdev_attribute *attr, 678ed4a6d7fSMel Gorman const char *buf, size_t count) 679ed4a6d7fSMel Gorman { 680ed4a6d7fSMel Gorman compact_node(dev->id); 681ed4a6d7fSMel Gorman 682ed4a6d7fSMel Gorman return count; 683ed4a6d7fSMel Gorman } 684ed4a6d7fSMel Gorman static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 685ed4a6d7fSMel Gorman 686ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 687ed4a6d7fSMel Gorman { 688ed4a6d7fSMel Gorman return sysdev_create_file(&node->sysdev, &attr_compact); 689ed4a6d7fSMel Gorman } 690ed4a6d7fSMel Gorman 691ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 692ed4a6d7fSMel Gorman { 693ed4a6d7fSMel Gorman return sysdev_remove_file(&node->sysdev, &attr_compact); 694ed4a6d7fSMel Gorman } 695ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 696