1 /* 2 * linux/mm/compaction.c 3 * 4 * Memory compaction for the reduction of external fragmentation. Note that 5 * this heavily depends upon page migration to do all the real heavy 6 * lifting 7 * 8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9 */ 10 #include <linux/swap.h> 11 #include <linux/migrate.h> 12 #include <linux/compaction.h> 13 #include <linux/mm_inline.h> 14 #include <linux/backing-dev.h> 15 #include <linux/sysctl.h> 16 #include <linux/sysfs.h> 17 #include "internal.h" 18 19 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 20 21 #define CREATE_TRACE_POINTS 22 #include <trace/events/compaction.h> 23 24 static unsigned long release_freepages(struct list_head *freelist) 25 { 26 struct page *page, *next; 27 unsigned long count = 0; 28 29 list_for_each_entry_safe(page, next, freelist, lru) { 30 list_del(&page->lru); 31 __free_page(page); 32 count++; 33 } 34 35 return count; 36 } 37 38 static void map_pages(struct list_head *list) 39 { 40 struct page *page; 41 42 list_for_each_entry(page, list, lru) { 43 arch_alloc_page(page, 0); 44 kernel_map_pages(page, 1, 1); 45 } 46 } 47 48 static inline bool migrate_async_suitable(int migratetype) 49 { 50 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 51 } 52 53 /* 54 * Isolate free pages onto a private freelist. Caller must hold zone->lock. 55 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free 56 * pages inside of the pageblock (even though it may still end up isolating 57 * some pages). 58 */ 59 static unsigned long isolate_freepages_block(unsigned long blockpfn, 60 unsigned long end_pfn, 61 struct list_head *freelist, 62 bool strict) 63 { 64 int nr_scanned = 0, total_isolated = 0; 65 struct page *cursor; 66 67 cursor = pfn_to_page(blockpfn); 68 69 /* Isolate free pages. This assumes the block is valid */ 70 for (; blockpfn < end_pfn; blockpfn++, cursor++) { 71 int isolated, i; 72 struct page *page = cursor; 73 74 if (!pfn_valid_within(blockpfn)) { 75 if (strict) 76 return 0; 77 continue; 78 } 79 nr_scanned++; 80 81 if (!PageBuddy(page)) { 82 if (strict) 83 return 0; 84 continue; 85 } 86 87 /* Found a free page, break it into order-0 pages */ 88 isolated = split_free_page(page); 89 if (!isolated && strict) 90 return 0; 91 total_isolated += isolated; 92 for (i = 0; i < isolated; i++) { 93 list_add(&page->lru, freelist); 94 page++; 95 } 96 97 /* If a page was split, advance to the end of it */ 98 if (isolated) { 99 blockpfn += isolated - 1; 100 cursor += isolated - 1; 101 } 102 } 103 104 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 105 return total_isolated; 106 } 107 108 /** 109 * isolate_freepages_range() - isolate free pages. 110 * @start_pfn: The first PFN to start isolating. 111 * @end_pfn: The one-past-last PFN. 112 * 113 * Non-free pages, invalid PFNs, or zone boundaries within the 114 * [start_pfn, end_pfn) range are considered errors, cause function to 115 * undo its actions and return zero. 116 * 117 * Otherwise, function returns one-past-the-last PFN of isolated page 118 * (which may be greater then end_pfn if end fell in a middle of 119 * a free page). 120 */ 121 unsigned long 122 isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) 123 { 124 unsigned long isolated, pfn, block_end_pfn, flags; 125 struct zone *zone = NULL; 126 LIST_HEAD(freelist); 127 128 if (pfn_valid(start_pfn)) 129 zone = page_zone(pfn_to_page(start_pfn)); 130 131 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 132 if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn))) 133 break; 134 135 /* 136 * On subsequent iterations ALIGN() is actually not needed, 137 * but we keep it that we not to complicate the code. 138 */ 139 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 140 block_end_pfn = min(block_end_pfn, end_pfn); 141 142 spin_lock_irqsave(&zone->lock, flags); 143 isolated = isolate_freepages_block(pfn, block_end_pfn, 144 &freelist, true); 145 spin_unlock_irqrestore(&zone->lock, flags); 146 147 /* 148 * In strict mode, isolate_freepages_block() returns 0 if 149 * there are any holes in the block (ie. invalid PFNs or 150 * non-free pages). 151 */ 152 if (!isolated) 153 break; 154 155 /* 156 * If we managed to isolate pages, it is always (1 << n) * 157 * pageblock_nr_pages for some non-negative n. (Max order 158 * page may span two pageblocks). 159 */ 160 } 161 162 /* split_free_page does not map the pages */ 163 map_pages(&freelist); 164 165 if (pfn < end_pfn) { 166 /* Loop terminated early, cleanup. */ 167 release_freepages(&freelist); 168 return 0; 169 } 170 171 /* We don't use freelists for anything. */ 172 return pfn; 173 } 174 175 /* Update the number of anon and file isolated pages in the zone */ 176 static void acct_isolated(struct zone *zone, struct compact_control *cc) 177 { 178 struct page *page; 179 unsigned int count[2] = { 0, }; 180 181 list_for_each_entry(page, &cc->migratepages, lru) 182 count[!!page_is_file_cache(page)]++; 183 184 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 185 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 186 } 187 188 /* Similar to reclaim, but different enough that they don't share logic */ 189 static bool too_many_isolated(struct zone *zone) 190 { 191 unsigned long active, inactive, isolated; 192 193 inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 194 zone_page_state(zone, NR_INACTIVE_ANON); 195 active = zone_page_state(zone, NR_ACTIVE_FILE) + 196 zone_page_state(zone, NR_ACTIVE_ANON); 197 isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 198 zone_page_state(zone, NR_ISOLATED_ANON); 199 200 return isolated > (inactive + active) / 2; 201 } 202 203 /** 204 * isolate_migratepages_range() - isolate all migrate-able pages in range. 205 * @zone: Zone pages are in. 206 * @cc: Compaction control structure. 207 * @low_pfn: The first PFN of the range. 208 * @end_pfn: The one-past-the-last PFN of the range. 209 * 210 * Isolate all pages that can be migrated from the range specified by 211 * [low_pfn, end_pfn). Returns zero if there is a fatal signal 212 * pending), otherwise PFN of the first page that was not scanned 213 * (which may be both less, equal to or more then end_pfn). 214 * 215 * Assumes that cc->migratepages is empty and cc->nr_migratepages is 216 * zero. 217 * 218 * Apart from cc->migratepages and cc->nr_migratetypes this function 219 * does not modify any cc's fields, in particular it does not modify 220 * (or read for that matter) cc->migrate_pfn. 221 */ 222 unsigned long 223 isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 224 unsigned long low_pfn, unsigned long end_pfn) 225 { 226 unsigned long last_pageblock_nr = 0, pageblock_nr; 227 unsigned long nr_scanned = 0, nr_isolated = 0; 228 struct list_head *migratelist = &cc->migratepages; 229 isolate_mode_t mode = 0; 230 struct lruvec *lruvec; 231 232 /* 233 * Ensure that there are not too many pages isolated from the LRU 234 * list by either parallel reclaimers or compaction. If there are, 235 * delay for some time until fewer pages are isolated 236 */ 237 while (unlikely(too_many_isolated(zone))) { 238 /* async migration should just abort */ 239 if (cc->mode != COMPACT_SYNC) 240 return 0; 241 242 congestion_wait(BLK_RW_ASYNC, HZ/10); 243 244 if (fatal_signal_pending(current)) 245 return 0; 246 } 247 248 /* Time to isolate some pages for migration */ 249 cond_resched(); 250 spin_lock_irq(&zone->lru_lock); 251 for (; low_pfn < end_pfn; low_pfn++) { 252 struct page *page; 253 bool locked = true; 254 255 /* give a chance to irqs before checking need_resched() */ 256 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { 257 spin_unlock_irq(&zone->lru_lock); 258 locked = false; 259 } 260 if (need_resched() || spin_is_contended(&zone->lru_lock)) { 261 if (locked) 262 spin_unlock_irq(&zone->lru_lock); 263 cond_resched(); 264 spin_lock_irq(&zone->lru_lock); 265 if (fatal_signal_pending(current)) 266 break; 267 } else if (!locked) 268 spin_lock_irq(&zone->lru_lock); 269 270 /* 271 * migrate_pfn does not necessarily start aligned to a 272 * pageblock. Ensure that pfn_valid is called when moving 273 * into a new MAX_ORDER_NR_PAGES range in case of large 274 * memory holes within the zone 275 */ 276 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 277 if (!pfn_valid(low_pfn)) { 278 low_pfn += MAX_ORDER_NR_PAGES - 1; 279 continue; 280 } 281 } 282 283 if (!pfn_valid_within(low_pfn)) 284 continue; 285 nr_scanned++; 286 287 /* 288 * Get the page and ensure the page is within the same zone. 289 * See the comment in isolate_freepages about overlapping 290 * nodes. It is deliberate that the new zone lock is not taken 291 * as memory compaction should not move pages between nodes. 292 */ 293 page = pfn_to_page(low_pfn); 294 if (page_zone(page) != zone) 295 continue; 296 297 /* Skip if free */ 298 if (PageBuddy(page)) 299 continue; 300 301 /* 302 * For async migration, also only scan in MOVABLE blocks. Async 303 * migration is optimistic to see if the minimum amount of work 304 * satisfies the allocation 305 */ 306 pageblock_nr = low_pfn >> pageblock_order; 307 if (cc->mode != COMPACT_SYNC && 308 last_pageblock_nr != pageblock_nr && 309 !migrate_async_suitable(get_pageblock_migratetype(page))) { 310 low_pfn += pageblock_nr_pages; 311 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 312 last_pageblock_nr = pageblock_nr; 313 continue; 314 } 315 316 if (!PageLRU(page)) 317 continue; 318 319 /* 320 * PageLRU is set, and lru_lock excludes isolation, 321 * splitting and collapsing (collapsing has already 322 * happened if PageLRU is set). 323 */ 324 if (PageTransHuge(page)) { 325 low_pfn += (1 << compound_order(page)) - 1; 326 continue; 327 } 328 329 if (cc->mode != COMPACT_SYNC) 330 mode |= ISOLATE_ASYNC_MIGRATE; 331 332 lruvec = mem_cgroup_page_lruvec(page, zone); 333 334 /* Try isolate the page */ 335 if (__isolate_lru_page(page, mode) != 0) 336 continue; 337 338 VM_BUG_ON(PageTransCompound(page)); 339 340 /* Successfully isolated */ 341 del_page_from_lru_list(page, lruvec, page_lru(page)); 342 list_add(&page->lru, migratelist); 343 cc->nr_migratepages++; 344 nr_isolated++; 345 346 /* Avoid isolating too much */ 347 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 348 ++low_pfn; 349 break; 350 } 351 } 352 353 acct_isolated(zone, cc); 354 355 spin_unlock_irq(&zone->lru_lock); 356 357 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 358 359 return low_pfn; 360 } 361 362 #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 363 #ifdef CONFIG_COMPACTION 364 /* 365 * Returns true if MIGRATE_UNMOVABLE pageblock was successfully 366 * converted to MIGRATE_MOVABLE type, false otherwise. 367 */ 368 static bool rescue_unmovable_pageblock(struct page *page) 369 { 370 unsigned long pfn, start_pfn, end_pfn; 371 struct page *start_page, *end_page; 372 373 pfn = page_to_pfn(page); 374 start_pfn = pfn & ~(pageblock_nr_pages - 1); 375 end_pfn = start_pfn + pageblock_nr_pages; 376 377 start_page = pfn_to_page(start_pfn); 378 end_page = pfn_to_page(end_pfn); 379 380 /* Do not deal with pageblocks that overlap zones */ 381 if (page_zone(start_page) != page_zone(end_page)) 382 return false; 383 384 for (page = start_page, pfn = start_pfn; page < end_page; pfn++, 385 page++) { 386 if (!pfn_valid_within(pfn)) 387 continue; 388 389 if (PageBuddy(page)) { 390 int order = page_order(page); 391 392 pfn += (1 << order) - 1; 393 page += (1 << order) - 1; 394 395 continue; 396 } else if (page_count(page) == 0 || PageLRU(page)) 397 continue; 398 399 return false; 400 } 401 402 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 403 move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE); 404 return true; 405 } 406 407 enum smt_result { 408 GOOD_AS_MIGRATION_TARGET, 409 FAIL_UNMOVABLE_TARGET, 410 FAIL_BAD_TARGET, 411 }; 412 413 /* 414 * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block 415 * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page 416 * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise. 417 */ 418 static enum smt_result suitable_migration_target(struct page *page, 419 struct compact_control *cc) 420 { 421 422 int migratetype = get_pageblock_migratetype(page); 423 424 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 425 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 426 return FAIL_BAD_TARGET; 427 428 /* If the page is a large free page, then allow migration */ 429 if (PageBuddy(page) && page_order(page) >= pageblock_order) 430 return GOOD_AS_MIGRATION_TARGET; 431 432 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 433 if (cc->mode != COMPACT_ASYNC_UNMOVABLE && 434 migrate_async_suitable(migratetype)) 435 return GOOD_AS_MIGRATION_TARGET; 436 437 if (cc->mode == COMPACT_ASYNC_MOVABLE && 438 migratetype == MIGRATE_UNMOVABLE) 439 return FAIL_UNMOVABLE_TARGET; 440 441 if (cc->mode != COMPACT_ASYNC_MOVABLE && 442 migratetype == MIGRATE_UNMOVABLE && 443 rescue_unmovable_pageblock(page)) 444 return GOOD_AS_MIGRATION_TARGET; 445 446 /* Otherwise skip the block */ 447 return FAIL_BAD_TARGET; 448 } 449 450 /* 451 * Based on information in the current compact_control, find blocks 452 * suitable for isolating free pages from and then isolate them. 453 */ 454 static void isolate_freepages(struct zone *zone, 455 struct compact_control *cc) 456 { 457 struct page *page; 458 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; 459 unsigned long flags; 460 int nr_freepages = cc->nr_freepages; 461 struct list_head *freelist = &cc->freepages; 462 463 /* 464 * Initialise the free scanner. The starting point is where we last 465 * scanned from (or the end of the zone if starting). The low point 466 * is the end of the pageblock the migration scanner is using. 467 */ 468 pfn = cc->free_pfn; 469 low_pfn = cc->migrate_pfn + pageblock_nr_pages; 470 471 /* 472 * Take care that if the migration scanner is at the end of the zone 473 * that the free scanner does not accidentally move to the next zone 474 * in the next isolation cycle. 475 */ 476 high_pfn = min(low_pfn, pfn); 477 478 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 479 480 /* 481 * isolate_freepages() may be called more than once during 482 * compact_zone_order() run and we want only the most recent 483 * count. 484 */ 485 cc->nr_pageblocks_skipped = 0; 486 487 /* 488 * Isolate free pages until enough are available to migrate the 489 * pages on cc->migratepages. We stop searching if the migrate 490 * and free page scanners meet or enough free pages are isolated. 491 */ 492 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 493 pfn -= pageblock_nr_pages) { 494 unsigned long isolated; 495 enum smt_result ret; 496 497 if (!pfn_valid(pfn)) 498 continue; 499 500 /* 501 * Check for overlapping nodes/zones. It's possible on some 502 * configurations to have a setup like 503 * node0 node1 node0 504 * i.e. it's possible that all pages within a zones range of 505 * pages do not belong to a single zone. 506 */ 507 page = pfn_to_page(pfn); 508 if (page_zone(page) != zone) 509 continue; 510 511 /* Check the block is suitable for migration */ 512 ret = suitable_migration_target(page, cc); 513 if (ret != GOOD_AS_MIGRATION_TARGET) { 514 if (ret == FAIL_UNMOVABLE_TARGET) 515 cc->nr_pageblocks_skipped++; 516 continue; 517 } 518 /* 519 * Found a block suitable for isolating free pages from. Now 520 * we disabled interrupts, double check things are ok and 521 * isolate the pages. This is to minimise the time IRQs 522 * are disabled 523 */ 524 isolated = 0; 525 spin_lock_irqsave(&zone->lock, flags); 526 ret = suitable_migration_target(page, cc); 527 if (ret == GOOD_AS_MIGRATION_TARGET) { 528 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 529 isolated = isolate_freepages_block(pfn, end_pfn, 530 freelist, false); 531 nr_freepages += isolated; 532 } else if (ret == FAIL_UNMOVABLE_TARGET) 533 cc->nr_pageblocks_skipped++; 534 spin_unlock_irqrestore(&zone->lock, flags); 535 536 /* 537 * Record the highest PFN we isolated pages from. When next 538 * looking for free pages, the search will restart here as 539 * page migration may have returned some pages to the allocator 540 */ 541 if (isolated) 542 high_pfn = max(high_pfn, pfn); 543 } 544 545 /* split_free_page does not map the pages */ 546 map_pages(freelist); 547 548 cc->free_pfn = high_pfn; 549 cc->nr_freepages = nr_freepages; 550 } 551 552 /* 553 * This is a migrate-callback that "allocates" freepages by taking pages 554 * from the isolated freelists in the block we are migrating to. 555 */ 556 static struct page *compaction_alloc(struct page *migratepage, 557 unsigned long data, 558 int **result) 559 { 560 struct compact_control *cc = (struct compact_control *)data; 561 struct page *freepage; 562 563 /* Isolate free pages if necessary */ 564 if (list_empty(&cc->freepages)) { 565 isolate_freepages(cc->zone, cc); 566 567 if (list_empty(&cc->freepages)) 568 return NULL; 569 } 570 571 freepage = list_entry(cc->freepages.next, struct page, lru); 572 list_del(&freepage->lru); 573 cc->nr_freepages--; 574 575 return freepage; 576 } 577 578 /* 579 * We cannot control nr_migratepages and nr_freepages fully when migration is 580 * running as migrate_pages() has no knowledge of compact_control. When 581 * migration is complete, we count the number of pages on the lists by hand. 582 */ 583 static void update_nr_listpages(struct compact_control *cc) 584 { 585 int nr_migratepages = 0; 586 int nr_freepages = 0; 587 struct page *page; 588 589 list_for_each_entry(page, &cc->migratepages, lru) 590 nr_migratepages++; 591 list_for_each_entry(page, &cc->freepages, lru) 592 nr_freepages++; 593 594 cc->nr_migratepages = nr_migratepages; 595 cc->nr_freepages = nr_freepages; 596 } 597 598 /* possible outcome of isolate_migratepages */ 599 typedef enum { 600 ISOLATE_ABORT, /* Abort compaction now */ 601 ISOLATE_NONE, /* No pages isolated, continue scanning */ 602 ISOLATE_SUCCESS, /* Pages isolated, migrate */ 603 } isolate_migrate_t; 604 605 /* 606 * Isolate all pages that can be migrated from the block pointed to by 607 * the migrate scanner within compact_control. 608 */ 609 static isolate_migrate_t isolate_migratepages(struct zone *zone, 610 struct compact_control *cc) 611 { 612 unsigned long low_pfn, end_pfn; 613 614 /* Do not scan outside zone boundaries */ 615 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 616 617 /* Only scan within a pageblock boundary */ 618 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 619 620 /* Do not cross the free scanner or scan within a memory hole */ 621 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 622 cc->migrate_pfn = end_pfn; 623 return ISOLATE_NONE; 624 } 625 626 /* Perform the isolation */ 627 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); 628 if (!low_pfn) 629 return ISOLATE_ABORT; 630 631 cc->migrate_pfn = low_pfn; 632 633 return ISOLATE_SUCCESS; 634 } 635 636 static int compact_finished(struct zone *zone, 637 struct compact_control *cc) 638 { 639 unsigned int order; 640 unsigned long watermark; 641 642 if (fatal_signal_pending(current)) 643 return COMPACT_PARTIAL; 644 645 /* Compaction run completes if the migrate and free scanner meet */ 646 if (cc->free_pfn <= cc->migrate_pfn) 647 return COMPACT_COMPLETE; 648 649 /* 650 * order == -1 is expected when compacting via 651 * /proc/sys/vm/compact_memory 652 */ 653 if (cc->order == -1) 654 return COMPACT_CONTINUE; 655 656 /* Compaction run is not finished if the watermark is not met */ 657 watermark = low_wmark_pages(zone); 658 watermark += (1 << cc->order); 659 660 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 661 return COMPACT_CONTINUE; 662 663 /* Direct compactor: Is a suitable page free? */ 664 for (order = cc->order; order < MAX_ORDER; order++) { 665 /* Job done if page is free of the right migratetype */ 666 if (!list_empty(&zone->free_area[order].free_list[cc->migratetype])) 667 return COMPACT_PARTIAL; 668 669 /* Job done if allocation would set block type */ 670 if (order >= pageblock_order && zone->free_area[order].nr_free) 671 return COMPACT_PARTIAL; 672 } 673 674 return COMPACT_CONTINUE; 675 } 676 677 /* 678 * compaction_suitable: Is this suitable to run compaction on this zone now? 679 * Returns 680 * COMPACT_SKIPPED - If there are too few free pages for compaction 681 * COMPACT_PARTIAL - If the allocation would succeed without compaction 682 * COMPACT_CONTINUE - If compaction should run now 683 */ 684 unsigned long compaction_suitable(struct zone *zone, int order) 685 { 686 int fragindex; 687 unsigned long watermark; 688 689 /* 690 * order == -1 is expected when compacting via 691 * /proc/sys/vm/compact_memory 692 */ 693 if (order == -1) 694 return COMPACT_CONTINUE; 695 696 /* 697 * Watermarks for order-0 must be met for compaction. Note the 2UL. 698 * This is because during migration, copies of pages need to be 699 * allocated and for a short time, the footprint is higher 700 */ 701 watermark = low_wmark_pages(zone) + (2UL << order); 702 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 703 return COMPACT_SKIPPED; 704 705 /* 706 * fragmentation index determines if allocation failures are due to 707 * low memory or external fragmentation 708 * 709 * index of -1000 implies allocations might succeed depending on 710 * watermarks 711 * index towards 0 implies failure is due to lack of memory 712 * index towards 1000 implies failure is due to fragmentation 713 * 714 * Only compact if a failure would be due to fragmentation. 715 */ 716 fragindex = fragmentation_index(zone, order); 717 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 718 return COMPACT_SKIPPED; 719 720 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 721 0, 0)) 722 return COMPACT_PARTIAL; 723 724 return COMPACT_CONTINUE; 725 } 726 727 static int compact_zone(struct zone *zone, struct compact_control *cc) 728 { 729 int ret; 730 731 ret = compaction_suitable(zone, cc->order); 732 switch (ret) { 733 case COMPACT_PARTIAL: 734 case COMPACT_SKIPPED: 735 /* Compaction is likely to fail */ 736 return ret; 737 case COMPACT_CONTINUE: 738 /* Fall through to compaction */ 739 ; 740 } 741 742 /* Setup to move all movable pages to the end of the zone */ 743 cc->migrate_pfn = zone->zone_start_pfn; 744 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; 745 cc->free_pfn &= ~(pageblock_nr_pages-1); 746 747 migrate_prep_local(); 748 749 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 750 unsigned long nr_migrate, nr_remaining; 751 int err; 752 753 switch (isolate_migratepages(zone, cc)) { 754 case ISOLATE_ABORT: 755 ret = COMPACT_PARTIAL; 756 goto out; 757 case ISOLATE_NONE: 758 continue; 759 case ISOLATE_SUCCESS: 760 ; 761 } 762 763 nr_migrate = cc->nr_migratepages; 764 err = migrate_pages(&cc->migratepages, compaction_alloc, 765 (unsigned long)&cc->freepages, false, 766 (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT 767 : MIGRATE_ASYNC); 768 update_nr_listpages(cc); 769 nr_remaining = cc->nr_migratepages; 770 771 count_vm_event(COMPACTBLOCKS); 772 count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); 773 if (nr_remaining) 774 count_vm_events(COMPACTPAGEFAILED, nr_remaining); 775 trace_mm_compaction_migratepages(nr_migrate - nr_remaining, 776 nr_remaining); 777 778 /* Release LRU pages not migrated */ 779 if (err) { 780 putback_lru_pages(&cc->migratepages); 781 cc->nr_migratepages = 0; 782 } 783 784 } 785 786 out: 787 /* Release free pages and check accounting */ 788 cc->nr_freepages -= release_freepages(&cc->freepages); 789 VM_BUG_ON(cc->nr_freepages != 0); 790 791 return ret; 792 } 793 794 static unsigned long compact_zone_order(struct zone *zone, 795 int order, gfp_t gfp_mask, 796 enum compact_mode mode, 797 unsigned long *nr_pageblocks_skipped) 798 { 799 struct compact_control cc = { 800 .nr_freepages = 0, 801 .nr_migratepages = 0, 802 .order = order, 803 .migratetype = allocflags_to_migratetype(gfp_mask), 804 .zone = zone, 805 .mode = mode, 806 }; 807 unsigned long rc; 808 809 INIT_LIST_HEAD(&cc.freepages); 810 INIT_LIST_HEAD(&cc.migratepages); 811 812 rc = compact_zone(zone, &cc); 813 *nr_pageblocks_skipped = cc.nr_pageblocks_skipped; 814 815 return rc; 816 } 817 818 int sysctl_extfrag_threshold = 500; 819 820 /** 821 * try_to_compact_pages - Direct compact to satisfy a high-order allocation 822 * @zonelist: The zonelist used for the current allocation 823 * @order: The order of the current allocation 824 * @gfp_mask: The GFP mask of the current allocation 825 * @nodemask: The allowed nodes to allocate from 826 * @sync: Whether migration is synchronous or not 827 * 828 * This is the main entry point for direct page compaction. 829 */ 830 unsigned long try_to_compact_pages(struct zonelist *zonelist, 831 int order, gfp_t gfp_mask, nodemask_t *nodemask, 832 bool sync) 833 { 834 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 835 int may_enter_fs = gfp_mask & __GFP_FS; 836 int may_perform_io = gfp_mask & __GFP_IO; 837 struct zoneref *z; 838 struct zone *zone; 839 int rc = COMPACT_SKIPPED; 840 unsigned long nr_pageblocks_skipped; 841 enum compact_mode mode; 842 843 /* 844 * Check whether it is worth even starting compaction. The order check is 845 * made because an assumption is made that the page allocator can satisfy 846 * the "cheaper" orders without taking special steps 847 */ 848 if (!order || !may_enter_fs || !may_perform_io) 849 return rc; 850 851 count_vm_event(COMPACTSTALL); 852 853 /* Compact each zone in the list */ 854 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 855 nodemask) { 856 int status; 857 858 mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE; 859 retry: 860 status = compact_zone_order(zone, order, gfp_mask, mode, 861 &nr_pageblocks_skipped); 862 rc = max(status, rc); 863 864 /* If a normal allocation would succeed, stop compacting */ 865 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 866 break; 867 868 if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) { 869 if (nr_pageblocks_skipped) { 870 mode = COMPACT_ASYNC_UNMOVABLE; 871 goto retry; 872 } 873 } 874 } 875 876 return rc; 877 } 878 879 880 /* Compact all zones within a node */ 881 static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 882 { 883 int zoneid; 884 struct zone *zone; 885 886 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 887 888 zone = &pgdat->node_zones[zoneid]; 889 if (!populated_zone(zone)) 890 continue; 891 892 cc->nr_freepages = 0; 893 cc->nr_migratepages = 0; 894 cc->zone = zone; 895 INIT_LIST_HEAD(&cc->freepages); 896 INIT_LIST_HEAD(&cc->migratepages); 897 898 if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 899 compact_zone(zone, cc); 900 901 if (cc->order > 0) { 902 int ok = zone_watermark_ok(zone, cc->order, 903 low_wmark_pages(zone), 0, 0); 904 if (ok && cc->order > zone->compact_order_failed) 905 zone->compact_order_failed = cc->order + 1; 906 /* Currently async compaction is never deferred. */ 907 else if (!ok && cc->mode == COMPACT_SYNC) 908 defer_compaction(zone, cc->order); 909 } 910 911 VM_BUG_ON(!list_empty(&cc->freepages)); 912 VM_BUG_ON(!list_empty(&cc->migratepages)); 913 } 914 915 return 0; 916 } 917 918 int compact_pgdat(pg_data_t *pgdat, int order) 919 { 920 struct compact_control cc = { 921 .order = order, 922 .mode = COMPACT_ASYNC_MOVABLE, 923 }; 924 925 return __compact_pgdat(pgdat, &cc); 926 } 927 928 static int compact_node(int nid) 929 { 930 struct compact_control cc = { 931 .order = -1, 932 .mode = COMPACT_SYNC, 933 }; 934 935 return __compact_pgdat(NODE_DATA(nid), &cc); 936 } 937 938 /* Compact all nodes in the system */ 939 static int compact_nodes(void) 940 { 941 int nid; 942 943 /* Flush pending updates to the LRU lists */ 944 lru_add_drain_all(); 945 946 for_each_online_node(nid) 947 compact_node(nid); 948 949 return COMPACT_COMPLETE; 950 } 951 952 /* The written value is actually unused, all memory is compacted */ 953 int sysctl_compact_memory; 954 955 /* This is the entry point for compacting all nodes via /proc/sys/vm */ 956 int sysctl_compaction_handler(struct ctl_table *table, int write, 957 void __user *buffer, size_t *length, loff_t *ppos) 958 { 959 if (write) 960 return compact_nodes(); 961 962 return 0; 963 } 964 965 int sysctl_extfrag_handler(struct ctl_table *table, int write, 966 void __user *buffer, size_t *length, loff_t *ppos) 967 { 968 proc_dointvec_minmax(table, write, buffer, length, ppos); 969 970 return 0; 971 } 972 973 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 974 ssize_t sysfs_compact_node(struct device *dev, 975 struct device_attribute *attr, 976 const char *buf, size_t count) 977 { 978 int nid = dev->id; 979 980 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 981 /* Flush pending updates to the LRU lists */ 982 lru_add_drain_all(); 983 984 compact_node(nid); 985 } 986 987 return count; 988 } 989 static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 990 991 int compaction_register_node(struct node *node) 992 { 993 return device_create_file(&node->dev, &dev_attr_compact); 994 } 995 996 void compaction_unregister_node(struct node *node) 997 { 998 return device_remove_file(&node->dev, &dev_attr_compact); 999 } 1000 #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1001 1002 #endif /* CONFIG_COMPACTION */ 1003