1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17748446bbSMel Gorman #include "internal.h" 18748446bbSMel Gorman 19b7aba698SMel Gorman #define CREATE_TRACE_POINTS 20b7aba698SMel Gorman #include <trace/events/compaction.h> 21b7aba698SMel Gorman 22748446bbSMel Gorman /* 23748446bbSMel Gorman * compact_control is used to track pages being migrated and the free pages 24748446bbSMel Gorman * they are being migrated to during memory compaction. The free_pfn starts 25748446bbSMel Gorman * at the end of a zone and migrate_pfn begins at the start. Movable pages 26748446bbSMel Gorman * are moved to the end of a zone during a compaction run and the run 27748446bbSMel Gorman * completes when free_pfn <= migrate_pfn 28748446bbSMel Gorman */ 29748446bbSMel Gorman struct compact_control { 30748446bbSMel Gorman struct list_head freepages; /* List of free pages to migrate to */ 31748446bbSMel Gorman struct list_head migratepages; /* List of pages being migrated */ 32748446bbSMel Gorman unsigned long nr_freepages; /* Number of isolated free pages */ 33748446bbSMel Gorman unsigned long nr_migratepages; /* Number of pages to migrate */ 34748446bbSMel Gorman unsigned long free_pfn; /* isolate_freepages search base */ 35748446bbSMel Gorman unsigned long migrate_pfn; /* isolate_migratepages search base */ 3677f1fe6bSMel Gorman bool sync; /* Synchronous migration */ 37748446bbSMel Gorman 38aad6ec37SDan Carpenter int order; /* order a direct compactor needs */ 3956de7263SMel Gorman int migratetype; /* MOVABLE, RECLAIMABLE etc */ 40748446bbSMel Gorman struct zone *zone; 41748446bbSMel Gorman }; 42748446bbSMel Gorman 43748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 44748446bbSMel Gorman { 45748446bbSMel Gorman struct page *page, *next; 46748446bbSMel Gorman unsigned long count = 0; 47748446bbSMel Gorman 48748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 49748446bbSMel Gorman list_del(&page->lru); 50748446bbSMel Gorman __free_page(page); 51748446bbSMel Gorman count++; 52748446bbSMel Gorman } 53748446bbSMel Gorman 54748446bbSMel Gorman return count; 55748446bbSMel Gorman } 56748446bbSMel Gorman 57748446bbSMel Gorman /* Isolate free pages onto a private freelist. Must hold zone->lock */ 58748446bbSMel Gorman static unsigned long isolate_freepages_block(struct zone *zone, 59748446bbSMel Gorman unsigned long blockpfn, 60748446bbSMel Gorman struct list_head *freelist) 61748446bbSMel Gorman { 62748446bbSMel Gorman unsigned long zone_end_pfn, end_pfn; 63b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 64748446bbSMel Gorman struct page *cursor; 65748446bbSMel Gorman 66748446bbSMel Gorman /* Get the last PFN we should scan for free pages at */ 67748446bbSMel Gorman zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 68748446bbSMel Gorman end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn); 69748446bbSMel Gorman 70748446bbSMel Gorman /* Find the first usable PFN in the block to initialse page cursor */ 71748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++) { 72748446bbSMel Gorman if (pfn_valid_within(blockpfn)) 73748446bbSMel Gorman break; 74748446bbSMel Gorman } 75748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 76748446bbSMel Gorman 77748446bbSMel Gorman /* Isolate free pages. This assumes the block is valid */ 78748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 79748446bbSMel Gorman int isolated, i; 80748446bbSMel Gorman struct page *page = cursor; 81748446bbSMel Gorman 82748446bbSMel Gorman if (!pfn_valid_within(blockpfn)) 83748446bbSMel Gorman continue; 84b7aba698SMel Gorman nr_scanned++; 85748446bbSMel Gorman 86748446bbSMel Gorman if (!PageBuddy(page)) 87748446bbSMel Gorman continue; 88748446bbSMel Gorman 89748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 90748446bbSMel Gorman isolated = split_free_page(page); 91748446bbSMel Gorman total_isolated += isolated; 92748446bbSMel Gorman for (i = 0; i < isolated; i++) { 93748446bbSMel Gorman list_add(&page->lru, freelist); 94748446bbSMel Gorman page++; 95748446bbSMel Gorman } 96748446bbSMel Gorman 97748446bbSMel Gorman /* If a page was split, advance to the end of it */ 98748446bbSMel Gorman if (isolated) { 99748446bbSMel Gorman blockpfn += isolated - 1; 100748446bbSMel Gorman cursor += isolated - 1; 101748446bbSMel Gorman } 102748446bbSMel Gorman } 103748446bbSMel Gorman 104b7aba698SMel Gorman trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 105748446bbSMel Gorman return total_isolated; 106748446bbSMel Gorman } 107748446bbSMel Gorman 108748446bbSMel Gorman /* Returns true if the page is within a block suitable for migration to */ 109748446bbSMel Gorman static bool suitable_migration_target(struct page *page) 110748446bbSMel Gorman { 111748446bbSMel Gorman 112748446bbSMel Gorman int migratetype = get_pageblock_migratetype(page); 113748446bbSMel Gorman 114748446bbSMel Gorman /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 115748446bbSMel Gorman if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 116748446bbSMel Gorman return false; 117748446bbSMel Gorman 118748446bbSMel Gorman /* If the page is a large free page, then allow migration */ 119748446bbSMel Gorman if (PageBuddy(page) && page_order(page) >= pageblock_order) 120748446bbSMel Gorman return true; 121748446bbSMel Gorman 122748446bbSMel Gorman /* If the block is MIGRATE_MOVABLE, allow migration */ 123748446bbSMel Gorman if (migratetype == MIGRATE_MOVABLE) 124748446bbSMel Gorman return true; 125748446bbSMel Gorman 126748446bbSMel Gorman /* Otherwise skip the block */ 127748446bbSMel Gorman return false; 128748446bbSMel Gorman } 129748446bbSMel Gorman 130*03d44192SMichal Nazarewicz static void map_pages(struct list_head *list) 131*03d44192SMichal Nazarewicz { 132*03d44192SMichal Nazarewicz struct page *page; 133*03d44192SMichal Nazarewicz 134*03d44192SMichal Nazarewicz list_for_each_entry(page, list, lru) { 135*03d44192SMichal Nazarewicz arch_alloc_page(page, 0); 136*03d44192SMichal Nazarewicz kernel_map_pages(page, 1, 1); 137*03d44192SMichal Nazarewicz } 138*03d44192SMichal Nazarewicz } 139*03d44192SMichal Nazarewicz 140748446bbSMel Gorman /* 141748446bbSMel Gorman * Based on information in the current compact_control, find blocks 142748446bbSMel Gorman * suitable for isolating free pages from and then isolate them. 143748446bbSMel Gorman */ 144748446bbSMel Gorman static void isolate_freepages(struct zone *zone, 145748446bbSMel Gorman struct compact_control *cc) 146748446bbSMel Gorman { 147748446bbSMel Gorman struct page *page; 148748446bbSMel Gorman unsigned long high_pfn, low_pfn, pfn; 149748446bbSMel Gorman unsigned long flags; 150748446bbSMel Gorman int nr_freepages = cc->nr_freepages; 151748446bbSMel Gorman struct list_head *freelist = &cc->freepages; 152748446bbSMel Gorman 1537454f4baSMel Gorman /* 1547454f4baSMel Gorman * Initialise the free scanner. The starting point is where we last 1557454f4baSMel Gorman * scanned from (or the end of the zone if starting). The low point 1567454f4baSMel Gorman * is the end of the pageblock the migration scanner is using. 1577454f4baSMel Gorman */ 158748446bbSMel Gorman pfn = cc->free_pfn; 159748446bbSMel Gorman low_pfn = cc->migrate_pfn + pageblock_nr_pages; 1607454f4baSMel Gorman 1617454f4baSMel Gorman /* 1627454f4baSMel Gorman * Take care that if the migration scanner is at the end of the zone 1637454f4baSMel Gorman * that the free scanner does not accidentally move to the next zone 1647454f4baSMel Gorman * in the next isolation cycle. 1657454f4baSMel Gorman */ 1667454f4baSMel Gorman high_pfn = min(low_pfn, pfn); 167748446bbSMel Gorman 168748446bbSMel Gorman /* 169748446bbSMel Gorman * Isolate free pages until enough are available to migrate the 170748446bbSMel Gorman * pages on cc->migratepages. We stop searching if the migrate 171748446bbSMel Gorman * and free page scanners meet or enough free pages are isolated. 172748446bbSMel Gorman */ 173748446bbSMel Gorman for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 174748446bbSMel Gorman pfn -= pageblock_nr_pages) { 175748446bbSMel Gorman unsigned long isolated; 176748446bbSMel Gorman 177748446bbSMel Gorman if (!pfn_valid(pfn)) 178748446bbSMel Gorman continue; 179748446bbSMel Gorman 180748446bbSMel Gorman /* 181748446bbSMel Gorman * Check for overlapping nodes/zones. It's possible on some 182748446bbSMel Gorman * configurations to have a setup like 183748446bbSMel Gorman * node0 node1 node0 184748446bbSMel Gorman * i.e. it's possible that all pages within a zones range of 185748446bbSMel Gorman * pages do not belong to a single zone. 186748446bbSMel Gorman */ 187748446bbSMel Gorman page = pfn_to_page(pfn); 188748446bbSMel Gorman if (page_zone(page) != zone) 189748446bbSMel Gorman continue; 190748446bbSMel Gorman 191748446bbSMel Gorman /* Check the block is suitable for migration */ 192748446bbSMel Gorman if (!suitable_migration_target(page)) 193748446bbSMel Gorman continue; 194748446bbSMel Gorman 195602605a4SMel Gorman /* 196602605a4SMel Gorman * Found a block suitable for isolating free pages from. Now 197602605a4SMel Gorman * we disabled interrupts, double check things are ok and 198602605a4SMel Gorman * isolate the pages. This is to minimise the time IRQs 199602605a4SMel Gorman * are disabled 200602605a4SMel Gorman */ 201602605a4SMel Gorman isolated = 0; 202602605a4SMel Gorman spin_lock_irqsave(&zone->lock, flags); 203602605a4SMel Gorman if (suitable_migration_target(page)) { 204748446bbSMel Gorman isolated = isolate_freepages_block(zone, pfn, freelist); 205748446bbSMel Gorman nr_freepages += isolated; 206602605a4SMel Gorman } 207602605a4SMel Gorman spin_unlock_irqrestore(&zone->lock, flags); 208748446bbSMel Gorman 209748446bbSMel Gorman /* 210748446bbSMel Gorman * Record the highest PFN we isolated pages from. When next 211748446bbSMel Gorman * looking for free pages, the search will restart here as 212748446bbSMel Gorman * page migration may have returned some pages to the allocator 213748446bbSMel Gorman */ 214748446bbSMel Gorman if (isolated) 215748446bbSMel Gorman high_pfn = max(high_pfn, pfn); 216748446bbSMel Gorman } 217748446bbSMel Gorman 218748446bbSMel Gorman /* split_free_page does not map the pages */ 219*03d44192SMichal Nazarewicz map_pages(freelist); 220748446bbSMel Gorman 221748446bbSMel Gorman cc->free_pfn = high_pfn; 222748446bbSMel Gorman cc->nr_freepages = nr_freepages; 223748446bbSMel Gorman } 224748446bbSMel Gorman 225748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 226748446bbSMel Gorman static void acct_isolated(struct zone *zone, struct compact_control *cc) 227748446bbSMel Gorman { 228748446bbSMel Gorman struct page *page; 229b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 230748446bbSMel Gorman 231b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 232b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 233748446bbSMel Gorman 234b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 235b9e84ac1SMinchan Kim __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 236748446bbSMel Gorman } 237748446bbSMel Gorman 238748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 239748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 240748446bbSMel Gorman { 241bc693045SMinchan Kim unsigned long active, inactive, isolated; 242748446bbSMel Gorman 243748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 244748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 245bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 246bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 247748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 248748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 249748446bbSMel Gorman 250bc693045SMinchan Kim return isolated > (inactive + active) / 2; 251748446bbSMel Gorman } 252748446bbSMel Gorman 253f9e35b3bSMel Gorman /* possible outcome of isolate_migratepages */ 254f9e35b3bSMel Gorman typedef enum { 255f9e35b3bSMel Gorman ISOLATE_ABORT, /* Abort compaction now */ 256f9e35b3bSMel Gorman ISOLATE_NONE, /* No pages isolated, continue scanning */ 257f9e35b3bSMel Gorman ISOLATE_SUCCESS, /* Pages isolated, migrate */ 258f9e35b3bSMel Gorman } isolate_migrate_t; 259f9e35b3bSMel Gorman 2602fe86e00SMichal Nazarewicz /** 2612fe86e00SMichal Nazarewicz * isolate_migratepages_range() - isolate all migrate-able pages in range. 2622fe86e00SMichal Nazarewicz * @zone: Zone pages are in. 2632fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 2642fe86e00SMichal Nazarewicz * @low_pfn: The first PFN of the range. 2652fe86e00SMichal Nazarewicz * @end_pfn: The one-past-the-last PFN of the range. 2662fe86e00SMichal Nazarewicz * 2672fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 2682fe86e00SMichal Nazarewicz * [low_pfn, end_pfn). Returns zero if there is a fatal signal 2692fe86e00SMichal Nazarewicz * pending), otherwise PFN of the first page that was not scanned 2702fe86e00SMichal Nazarewicz * (which may be both less, equal to or more then end_pfn). 2712fe86e00SMichal Nazarewicz * 2722fe86e00SMichal Nazarewicz * Assumes that cc->migratepages is empty and cc->nr_migratepages is 2732fe86e00SMichal Nazarewicz * zero. 2742fe86e00SMichal Nazarewicz * 2752fe86e00SMichal Nazarewicz * Apart from cc->migratepages and cc->nr_migratetypes this function 2762fe86e00SMichal Nazarewicz * does not modify any cc's fields, in particular it does not modify 2772fe86e00SMichal Nazarewicz * (or read for that matter) cc->migrate_pfn. 278748446bbSMel Gorman */ 2792fe86e00SMichal Nazarewicz static unsigned long 2802fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 2812fe86e00SMichal Nazarewicz unsigned long low_pfn, unsigned long end_pfn) 282748446bbSMel Gorman { 2839927af74SMel Gorman unsigned long last_pageblock_nr = 0, pageblock_nr; 284b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 285748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 28639deaf85SMinchan Kim isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE; 287748446bbSMel Gorman 288748446bbSMel Gorman /* 289748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 290748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 291748446bbSMel Gorman * delay for some time until fewer pages are isolated 292748446bbSMel Gorman */ 293748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 294f9e35b3bSMel Gorman /* async migration should just abort */ 295f9e35b3bSMel Gorman if (!cc->sync) 2962fe86e00SMichal Nazarewicz return 0; 297f9e35b3bSMel Gorman 298748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 299748446bbSMel Gorman 300748446bbSMel Gorman if (fatal_signal_pending(current)) 3012fe86e00SMichal Nazarewicz return 0; 302748446bbSMel Gorman } 303748446bbSMel Gorman 304748446bbSMel Gorman /* Time to isolate some pages for migration */ 305b2eef8c0SAndrea Arcangeli cond_resched(); 306748446bbSMel Gorman spin_lock_irq(&zone->lru_lock); 307748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 308748446bbSMel Gorman struct page *page; 309b2eef8c0SAndrea Arcangeli bool locked = true; 310b2eef8c0SAndrea Arcangeli 311b2eef8c0SAndrea Arcangeli /* give a chance to irqs before checking need_resched() */ 312b2eef8c0SAndrea Arcangeli if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { 313b2eef8c0SAndrea Arcangeli spin_unlock_irq(&zone->lru_lock); 314b2eef8c0SAndrea Arcangeli locked = false; 315b2eef8c0SAndrea Arcangeli } 316b2eef8c0SAndrea Arcangeli if (need_resched() || spin_is_contended(&zone->lru_lock)) { 317b2eef8c0SAndrea Arcangeli if (locked) 318b2eef8c0SAndrea Arcangeli spin_unlock_irq(&zone->lru_lock); 319b2eef8c0SAndrea Arcangeli cond_resched(); 320b2eef8c0SAndrea Arcangeli spin_lock_irq(&zone->lru_lock); 321b2eef8c0SAndrea Arcangeli if (fatal_signal_pending(current)) 322b2eef8c0SAndrea Arcangeli break; 323b2eef8c0SAndrea Arcangeli } else if (!locked) 324b2eef8c0SAndrea Arcangeli spin_lock_irq(&zone->lru_lock); 325b2eef8c0SAndrea Arcangeli 3260bf380bcSMel Gorman /* 3270bf380bcSMel Gorman * migrate_pfn does not necessarily start aligned to a 3280bf380bcSMel Gorman * pageblock. Ensure that pfn_valid is called when moving 3290bf380bcSMel Gorman * into a new MAX_ORDER_NR_PAGES range in case of large 3300bf380bcSMel Gorman * memory holes within the zone 3310bf380bcSMel Gorman */ 3320bf380bcSMel Gorman if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 3330bf380bcSMel Gorman if (!pfn_valid(low_pfn)) { 3340bf380bcSMel Gorman low_pfn += MAX_ORDER_NR_PAGES - 1; 3350bf380bcSMel Gorman continue; 3360bf380bcSMel Gorman } 3370bf380bcSMel Gorman } 3380bf380bcSMel Gorman 339748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 340748446bbSMel Gorman continue; 341b7aba698SMel Gorman nr_scanned++; 342748446bbSMel Gorman 343dc908600SMel Gorman /* 344dc908600SMel Gorman * Get the page and ensure the page is within the same zone. 345dc908600SMel Gorman * See the comment in isolate_freepages about overlapping 346dc908600SMel Gorman * nodes. It is deliberate that the new zone lock is not taken 347dc908600SMel Gorman * as memory compaction should not move pages between nodes. 348dc908600SMel Gorman */ 349748446bbSMel Gorman page = pfn_to_page(low_pfn); 350dc908600SMel Gorman if (page_zone(page) != zone) 351dc908600SMel Gorman continue; 352dc908600SMel Gorman 353dc908600SMel Gorman /* Skip if free */ 354748446bbSMel Gorman if (PageBuddy(page)) 355748446bbSMel Gorman continue; 356748446bbSMel Gorman 3579927af74SMel Gorman /* 3589927af74SMel Gorman * For async migration, also only scan in MOVABLE blocks. Async 3599927af74SMel Gorman * migration is optimistic to see if the minimum amount of work 3609927af74SMel Gorman * satisfies the allocation 3619927af74SMel Gorman */ 3629927af74SMel Gorman pageblock_nr = low_pfn >> pageblock_order; 3639927af74SMel Gorman if (!cc->sync && last_pageblock_nr != pageblock_nr && 3649927af74SMel Gorman get_pageblock_migratetype(page) != MIGRATE_MOVABLE) { 3659927af74SMel Gorman low_pfn += pageblock_nr_pages; 3669927af74SMel Gorman low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 3679927af74SMel Gorman last_pageblock_nr = pageblock_nr; 3689927af74SMel Gorman continue; 3699927af74SMel Gorman } 3709927af74SMel Gorman 371bc835011SAndrea Arcangeli if (!PageLRU(page)) 372bc835011SAndrea Arcangeli continue; 373bc835011SAndrea Arcangeli 374bc835011SAndrea Arcangeli /* 375bc835011SAndrea Arcangeli * PageLRU is set, and lru_lock excludes isolation, 376bc835011SAndrea Arcangeli * splitting and collapsing (collapsing has already 377bc835011SAndrea Arcangeli * happened if PageLRU is set). 378bc835011SAndrea Arcangeli */ 379bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 380bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 381bc835011SAndrea Arcangeli continue; 382bc835011SAndrea Arcangeli } 383bc835011SAndrea Arcangeli 384c8244935SMel Gorman if (!cc->sync) 385c8244935SMel Gorman mode |= ISOLATE_ASYNC_MIGRATE; 386c8244935SMel Gorman 387748446bbSMel Gorman /* Try isolate the page */ 38839deaf85SMinchan Kim if (__isolate_lru_page(page, mode, 0) != 0) 389748446bbSMel Gorman continue; 390748446bbSMel Gorman 391bc835011SAndrea Arcangeli VM_BUG_ON(PageTransCompound(page)); 392bc835011SAndrea Arcangeli 393748446bbSMel Gorman /* Successfully isolated */ 394748446bbSMel Gorman del_page_from_lru_list(zone, page, page_lru(page)); 395748446bbSMel Gorman list_add(&page->lru, migratelist); 396748446bbSMel Gorman cc->nr_migratepages++; 397b7aba698SMel Gorman nr_isolated++; 398748446bbSMel Gorman 399748446bbSMel Gorman /* Avoid isolating too much */ 40031b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 40131b8384aSHillf Danton ++low_pfn; 402748446bbSMel Gorman break; 403748446bbSMel Gorman } 40431b8384aSHillf Danton } 405748446bbSMel Gorman 406748446bbSMel Gorman acct_isolated(zone, cc); 407748446bbSMel Gorman 408748446bbSMel Gorman spin_unlock_irq(&zone->lru_lock); 409748446bbSMel Gorman 410b7aba698SMel Gorman trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 411b7aba698SMel Gorman 4122fe86e00SMichal Nazarewicz return low_pfn; 4132fe86e00SMichal Nazarewicz } 4142fe86e00SMichal Nazarewicz 4152fe86e00SMichal Nazarewicz /* 4162fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the block pointed to by 4172fe86e00SMichal Nazarewicz * the migrate scanner within compact_control. 4182fe86e00SMichal Nazarewicz */ 4192fe86e00SMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 4202fe86e00SMichal Nazarewicz struct compact_control *cc) 4212fe86e00SMichal Nazarewicz { 4222fe86e00SMichal Nazarewicz unsigned long low_pfn, end_pfn; 4232fe86e00SMichal Nazarewicz 4242fe86e00SMichal Nazarewicz /* Do not scan outside zone boundaries */ 4252fe86e00SMichal Nazarewicz low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 4262fe86e00SMichal Nazarewicz 4272fe86e00SMichal Nazarewicz /* Only scan within a pageblock boundary */ 4282fe86e00SMichal Nazarewicz end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 4292fe86e00SMichal Nazarewicz 4302fe86e00SMichal Nazarewicz /* Do not cross the free scanner or scan within a memory hole */ 4312fe86e00SMichal Nazarewicz if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 4322fe86e00SMichal Nazarewicz cc->migrate_pfn = end_pfn; 4332fe86e00SMichal Nazarewicz return ISOLATE_NONE; 4342fe86e00SMichal Nazarewicz } 4352fe86e00SMichal Nazarewicz 4362fe86e00SMichal Nazarewicz /* Perform the isolation */ 4372fe86e00SMichal Nazarewicz low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); 4382fe86e00SMichal Nazarewicz if (!low_pfn) 4392fe86e00SMichal Nazarewicz return ISOLATE_ABORT; 4402fe86e00SMichal Nazarewicz 4412fe86e00SMichal Nazarewicz cc->migrate_pfn = low_pfn; 4422fe86e00SMichal Nazarewicz 443f9e35b3bSMel Gorman return ISOLATE_SUCCESS; 444748446bbSMel Gorman } 445748446bbSMel Gorman 446748446bbSMel Gorman /* 447748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 448748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 449748446bbSMel Gorman */ 450748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 451748446bbSMel Gorman unsigned long data, 452748446bbSMel Gorman int **result) 453748446bbSMel Gorman { 454748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 455748446bbSMel Gorman struct page *freepage; 456748446bbSMel Gorman 457748446bbSMel Gorman /* Isolate free pages if necessary */ 458748446bbSMel Gorman if (list_empty(&cc->freepages)) { 459748446bbSMel Gorman isolate_freepages(cc->zone, cc); 460748446bbSMel Gorman 461748446bbSMel Gorman if (list_empty(&cc->freepages)) 462748446bbSMel Gorman return NULL; 463748446bbSMel Gorman } 464748446bbSMel Gorman 465748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 466748446bbSMel Gorman list_del(&freepage->lru); 467748446bbSMel Gorman cc->nr_freepages--; 468748446bbSMel Gorman 469748446bbSMel Gorman return freepage; 470748446bbSMel Gorman } 471748446bbSMel Gorman 472748446bbSMel Gorman /* 473748446bbSMel Gorman * We cannot control nr_migratepages and nr_freepages fully when migration is 474748446bbSMel Gorman * running as migrate_pages() has no knowledge of compact_control. When 475748446bbSMel Gorman * migration is complete, we count the number of pages on the lists by hand. 476748446bbSMel Gorman */ 477748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc) 478748446bbSMel Gorman { 479748446bbSMel Gorman int nr_migratepages = 0; 480748446bbSMel Gorman int nr_freepages = 0; 481748446bbSMel Gorman struct page *page; 482748446bbSMel Gorman 483748446bbSMel Gorman list_for_each_entry(page, &cc->migratepages, lru) 484748446bbSMel Gorman nr_migratepages++; 485748446bbSMel Gorman list_for_each_entry(page, &cc->freepages, lru) 486748446bbSMel Gorman nr_freepages++; 487748446bbSMel Gorman 488748446bbSMel Gorman cc->nr_migratepages = nr_migratepages; 489748446bbSMel Gorman cc->nr_freepages = nr_freepages; 490748446bbSMel Gorman } 491748446bbSMel Gorman 492748446bbSMel Gorman static int compact_finished(struct zone *zone, 493748446bbSMel Gorman struct compact_control *cc) 494748446bbSMel Gorman { 49556de7263SMel Gorman unsigned int order; 4965a03b051SAndrea Arcangeli unsigned long watermark; 49756de7263SMel Gorman 498748446bbSMel Gorman if (fatal_signal_pending(current)) 499748446bbSMel Gorman return COMPACT_PARTIAL; 500748446bbSMel Gorman 501748446bbSMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 502748446bbSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) 503748446bbSMel Gorman return COMPACT_COMPLETE; 504748446bbSMel Gorman 50582478fb7SJohannes Weiner /* 50682478fb7SJohannes Weiner * order == -1 is expected when compacting via 50782478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 50882478fb7SJohannes Weiner */ 50956de7263SMel Gorman if (cc->order == -1) 51056de7263SMel Gorman return COMPACT_CONTINUE; 51156de7263SMel Gorman 5123957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 5133957c776SMichal Hocko watermark = low_wmark_pages(zone); 5143957c776SMichal Hocko watermark += (1 << cc->order); 5153957c776SMichal Hocko 5163957c776SMichal Hocko if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 5173957c776SMichal Hocko return COMPACT_CONTINUE; 5183957c776SMichal Hocko 51956de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 52056de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 52156de7263SMel Gorman /* Job done if page is free of the right migratetype */ 52256de7263SMel Gorman if (!list_empty(&zone->free_area[order].free_list[cc->migratetype])) 52356de7263SMel Gorman return COMPACT_PARTIAL; 52456de7263SMel Gorman 52556de7263SMel Gorman /* Job done if allocation would set block type */ 52656de7263SMel Gorman if (order >= pageblock_order && zone->free_area[order].nr_free) 52756de7263SMel Gorman return COMPACT_PARTIAL; 52856de7263SMel Gorman } 52956de7263SMel Gorman 530748446bbSMel Gorman return COMPACT_CONTINUE; 531748446bbSMel Gorman } 532748446bbSMel Gorman 5333e7d3449SMel Gorman /* 5343e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 5353e7d3449SMel Gorman * Returns 5363e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 5373e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 5383e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 5393e7d3449SMel Gorman */ 5403e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order) 5413e7d3449SMel Gorman { 5423e7d3449SMel Gorman int fragindex; 5433e7d3449SMel Gorman unsigned long watermark; 5443e7d3449SMel Gorman 5453e7d3449SMel Gorman /* 5463957c776SMichal Hocko * order == -1 is expected when compacting via 5473957c776SMichal Hocko * /proc/sys/vm/compact_memory 5483957c776SMichal Hocko */ 5493957c776SMichal Hocko if (order == -1) 5503957c776SMichal Hocko return COMPACT_CONTINUE; 5513957c776SMichal Hocko 5523957c776SMichal Hocko /* 5533e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 5543e7d3449SMel Gorman * This is because during migration, copies of pages need to be 5553e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 5563e7d3449SMel Gorman */ 5573e7d3449SMel Gorman watermark = low_wmark_pages(zone) + (2UL << order); 5583e7d3449SMel Gorman if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 5593e7d3449SMel Gorman return COMPACT_SKIPPED; 5603e7d3449SMel Gorman 5613e7d3449SMel Gorman /* 5623e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 5633e7d3449SMel Gorman * low memory or external fragmentation 5643e7d3449SMel Gorman * 565a582a738SShaohua Li * index of -1000 implies allocations might succeed depending on 566a582a738SShaohua Li * watermarks 5673e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 5683e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 5693e7d3449SMel Gorman * 5703e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 5713e7d3449SMel Gorman */ 5723e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 5733e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 5743e7d3449SMel Gorman return COMPACT_SKIPPED; 5753e7d3449SMel Gorman 576a582a738SShaohua Li if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 577a582a738SShaohua Li 0, 0)) 5783e7d3449SMel Gorman return COMPACT_PARTIAL; 5793e7d3449SMel Gorman 5803e7d3449SMel Gorman return COMPACT_CONTINUE; 5813e7d3449SMel Gorman } 5823e7d3449SMel Gorman 583748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 584748446bbSMel Gorman { 585748446bbSMel Gorman int ret; 586748446bbSMel Gorman 5873e7d3449SMel Gorman ret = compaction_suitable(zone, cc->order); 5883e7d3449SMel Gorman switch (ret) { 5893e7d3449SMel Gorman case COMPACT_PARTIAL: 5903e7d3449SMel Gorman case COMPACT_SKIPPED: 5913e7d3449SMel Gorman /* Compaction is likely to fail */ 5923e7d3449SMel Gorman return ret; 5933e7d3449SMel Gorman case COMPACT_CONTINUE: 5943e7d3449SMel Gorman /* Fall through to compaction */ 5953e7d3449SMel Gorman ; 5963e7d3449SMel Gorman } 5973e7d3449SMel Gorman 598748446bbSMel Gorman /* Setup to move all movable pages to the end of the zone */ 599748446bbSMel Gorman cc->migrate_pfn = zone->zone_start_pfn; 600748446bbSMel Gorman cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; 601748446bbSMel Gorman cc->free_pfn &= ~(pageblock_nr_pages-1); 602748446bbSMel Gorman 603748446bbSMel Gorman migrate_prep_local(); 604748446bbSMel Gorman 605748446bbSMel Gorman while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 606748446bbSMel Gorman unsigned long nr_migrate, nr_remaining; 6079d502c1cSMinchan Kim int err; 608748446bbSMel Gorman 609f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 610f9e35b3bSMel Gorman case ISOLATE_ABORT: 611f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 612f9e35b3bSMel Gorman goto out; 613f9e35b3bSMel Gorman case ISOLATE_NONE: 614748446bbSMel Gorman continue; 615f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 616f9e35b3bSMel Gorman ; 617f9e35b3bSMel Gorman } 618748446bbSMel Gorman 619748446bbSMel Gorman nr_migrate = cc->nr_migratepages; 6209d502c1cSMinchan Kim err = migrate_pages(&cc->migratepages, compaction_alloc, 6217f0f2496SMel Gorman (unsigned long)cc, false, 622a6bc32b8SMel Gorman cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); 623748446bbSMel Gorman update_nr_listpages(cc); 624748446bbSMel Gorman nr_remaining = cc->nr_migratepages; 625748446bbSMel Gorman 626748446bbSMel Gorman count_vm_event(COMPACTBLOCKS); 627748446bbSMel Gorman count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); 628748446bbSMel Gorman if (nr_remaining) 629748446bbSMel Gorman count_vm_events(COMPACTPAGEFAILED, nr_remaining); 630b7aba698SMel Gorman trace_mm_compaction_migratepages(nr_migrate - nr_remaining, 631b7aba698SMel Gorman nr_remaining); 632748446bbSMel Gorman 633748446bbSMel Gorman /* Release LRU pages not migrated */ 6349d502c1cSMinchan Kim if (err) { 635748446bbSMel Gorman putback_lru_pages(&cc->migratepages); 636748446bbSMel Gorman cc->nr_migratepages = 0; 637748446bbSMel Gorman } 638748446bbSMel Gorman 639748446bbSMel Gorman } 640748446bbSMel Gorman 641f9e35b3bSMel Gorman out: 642748446bbSMel Gorman /* Release free pages and check accounting */ 643748446bbSMel Gorman cc->nr_freepages -= release_freepages(&cc->freepages); 644748446bbSMel Gorman VM_BUG_ON(cc->nr_freepages != 0); 645748446bbSMel Gorman 646748446bbSMel Gorman return ret; 647748446bbSMel Gorman } 64876ab0f53SMel Gorman 649d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone, 65077f1fe6bSMel Gorman int order, gfp_t gfp_mask, 651d527caf2SAndrea Arcangeli bool sync) 65256de7263SMel Gorman { 65356de7263SMel Gorman struct compact_control cc = { 65456de7263SMel Gorman .nr_freepages = 0, 65556de7263SMel Gorman .nr_migratepages = 0, 65656de7263SMel Gorman .order = order, 65756de7263SMel Gorman .migratetype = allocflags_to_migratetype(gfp_mask), 65856de7263SMel Gorman .zone = zone, 65977f1fe6bSMel Gorman .sync = sync, 66056de7263SMel Gorman }; 66156de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 66256de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 66356de7263SMel Gorman 66456de7263SMel Gorman return compact_zone(zone, &cc); 66556de7263SMel Gorman } 66656de7263SMel Gorman 6675e771905SMel Gorman int sysctl_extfrag_threshold = 500; 6685e771905SMel Gorman 66956de7263SMel Gorman /** 67056de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 67156de7263SMel Gorman * @zonelist: The zonelist used for the current allocation 67256de7263SMel Gorman * @order: The order of the current allocation 67356de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 67456de7263SMel Gorman * @nodemask: The allowed nodes to allocate from 67577f1fe6bSMel Gorman * @sync: Whether migration is synchronous or not 67656de7263SMel Gorman * 67756de7263SMel Gorman * This is the main entry point for direct page compaction. 67856de7263SMel Gorman */ 67956de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist, 68077f1fe6bSMel Gorman int order, gfp_t gfp_mask, nodemask_t *nodemask, 68177f1fe6bSMel Gorman bool sync) 68256de7263SMel Gorman { 68356de7263SMel Gorman enum zone_type high_zoneidx = gfp_zone(gfp_mask); 68456de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 68556de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 68656de7263SMel Gorman struct zoneref *z; 68756de7263SMel Gorman struct zone *zone; 68856de7263SMel Gorman int rc = COMPACT_SKIPPED; 68956de7263SMel Gorman 69056de7263SMel Gorman /* 69156de7263SMel Gorman * Check whether it is worth even starting compaction. The order check is 69256de7263SMel Gorman * made because an assumption is made that the page allocator can satisfy 69356de7263SMel Gorman * the "cheaper" orders without taking special steps 69456de7263SMel Gorman */ 695c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 69656de7263SMel Gorman return rc; 69756de7263SMel Gorman 69856de7263SMel Gorman count_vm_event(COMPACTSTALL); 69956de7263SMel Gorman 70056de7263SMel Gorman /* Compact each zone in the list */ 70156de7263SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 70256de7263SMel Gorman nodemask) { 70356de7263SMel Gorman int status; 70456de7263SMel Gorman 705d527caf2SAndrea Arcangeli status = compact_zone_order(zone, order, gfp_mask, sync); 70656de7263SMel Gorman rc = max(status, rc); 70756de7263SMel Gorman 7083e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 7093e7d3449SMel Gorman if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 71056de7263SMel Gorman break; 71156de7263SMel Gorman } 71256de7263SMel Gorman 71356de7263SMel Gorman return rc; 71456de7263SMel Gorman } 71556de7263SMel Gorman 71656de7263SMel Gorman 71776ab0f53SMel Gorman /* Compact all zones within a node */ 7187be62de9SRik van Riel static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 71976ab0f53SMel Gorman { 72076ab0f53SMel Gorman int zoneid; 72176ab0f53SMel Gorman struct zone *zone; 72276ab0f53SMel Gorman 72376ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 72476ab0f53SMel Gorman 72576ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 72676ab0f53SMel Gorman if (!populated_zone(zone)) 72776ab0f53SMel Gorman continue; 72876ab0f53SMel Gorman 7297be62de9SRik van Riel cc->nr_freepages = 0; 7307be62de9SRik van Riel cc->nr_migratepages = 0; 7317be62de9SRik van Riel cc->zone = zone; 7327be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 7337be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 73476ab0f53SMel Gorman 735aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 7367be62de9SRik van Riel compact_zone(zone, cc); 73776ab0f53SMel Gorman 738aff62249SRik van Riel if (cc->order > 0) { 739aff62249SRik van Riel int ok = zone_watermark_ok(zone, cc->order, 740aff62249SRik van Riel low_wmark_pages(zone), 0, 0); 741aff62249SRik van Riel if (ok && cc->order > zone->compact_order_failed) 742aff62249SRik van Riel zone->compact_order_failed = cc->order + 1; 743aff62249SRik van Riel /* Currently async compaction is never deferred. */ 744aff62249SRik van Riel else if (!ok && cc->sync) 745aff62249SRik van Riel defer_compaction(zone, cc->order); 746aff62249SRik van Riel } 747aff62249SRik van Riel 7487be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 7497be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 75076ab0f53SMel Gorman } 75176ab0f53SMel Gorman 75276ab0f53SMel Gorman return 0; 75376ab0f53SMel Gorman } 75476ab0f53SMel Gorman 7557be62de9SRik van Riel int compact_pgdat(pg_data_t *pgdat, int order) 7567be62de9SRik van Riel { 7577be62de9SRik van Riel struct compact_control cc = { 7587be62de9SRik van Riel .order = order, 7597be62de9SRik van Riel .sync = false, 7607be62de9SRik van Riel }; 7617be62de9SRik van Riel 7627be62de9SRik van Riel return __compact_pgdat(pgdat, &cc); 7637be62de9SRik van Riel } 7647be62de9SRik van Riel 7657be62de9SRik van Riel static int compact_node(int nid) 7667be62de9SRik van Riel { 7677be62de9SRik van Riel struct compact_control cc = { 7687be62de9SRik van Riel .order = -1, 7697be62de9SRik van Riel .sync = true, 7707be62de9SRik van Riel }; 7717be62de9SRik van Riel 7728575ec29SHugh Dickins return __compact_pgdat(NODE_DATA(nid), &cc); 7737be62de9SRik van Riel } 7747be62de9SRik van Riel 77576ab0f53SMel Gorman /* Compact all nodes in the system */ 77676ab0f53SMel Gorman static int compact_nodes(void) 77776ab0f53SMel Gorman { 77876ab0f53SMel Gorman int nid; 77976ab0f53SMel Gorman 7808575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 7818575ec29SHugh Dickins lru_add_drain_all(); 7828575ec29SHugh Dickins 78376ab0f53SMel Gorman for_each_online_node(nid) 78476ab0f53SMel Gorman compact_node(nid); 78576ab0f53SMel Gorman 78676ab0f53SMel Gorman return COMPACT_COMPLETE; 78776ab0f53SMel Gorman } 78876ab0f53SMel Gorman 78976ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 79076ab0f53SMel Gorman int sysctl_compact_memory; 79176ab0f53SMel Gorman 79276ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 79376ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 79476ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 79576ab0f53SMel Gorman { 79676ab0f53SMel Gorman if (write) 79776ab0f53SMel Gorman return compact_nodes(); 79876ab0f53SMel Gorman 79976ab0f53SMel Gorman return 0; 80076ab0f53SMel Gorman } 801ed4a6d7fSMel Gorman 8025e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 8035e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 8045e771905SMel Gorman { 8055e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 8065e771905SMel Gorman 8075e771905SMel Gorman return 0; 8085e771905SMel Gorman } 8095e771905SMel Gorman 810ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 81110fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev, 81210fbcf4cSKay Sievers struct device_attribute *attr, 813ed4a6d7fSMel Gorman const char *buf, size_t count) 814ed4a6d7fSMel Gorman { 8158575ec29SHugh Dickins int nid = dev->id; 8168575ec29SHugh Dickins 8178575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 8188575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 8198575ec29SHugh Dickins lru_add_drain_all(); 8208575ec29SHugh Dickins 8218575ec29SHugh Dickins compact_node(nid); 8228575ec29SHugh Dickins } 823ed4a6d7fSMel Gorman 824ed4a6d7fSMel Gorman return count; 825ed4a6d7fSMel Gorman } 82610fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 827ed4a6d7fSMel Gorman 828ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 829ed4a6d7fSMel Gorman { 83010fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 831ed4a6d7fSMel Gorman } 832ed4a6d7fSMel Gorman 833ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 834ed4a6d7fSMel Gorman { 83510fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 836ed4a6d7fSMel Gorman } 837ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 838