1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/compaction.c 4 * 5 * Memory compaction for the reduction of external fragmentation. Note that 6 * this heavily depends upon page migration to do all the real heavy 7 * lifting 8 * 9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10 */ 11 #include <linux/cpu.h> 12 #include <linux/swap.h> 13 #include <linux/migrate.h> 14 #include <linux/compaction.h> 15 #include <linux/mm_inline.h> 16 #include <linux/sched/signal.h> 17 #include <linux/backing-dev.h> 18 #include <linux/sysctl.h> 19 #include <linux/sysfs.h> 20 #include <linux/page-isolation.h> 21 #include <linux/kasan.h> 22 #include <linux/kthread.h> 23 #include <linux/freezer.h> 24 #include <linux/page_owner.h> 25 #include <linux/psi.h> 26 #include "internal.h" 27 28 #ifdef CONFIG_COMPACTION 29 static inline void count_compact_event(enum vm_event_item item) 30 { 31 count_vm_event(item); 32 } 33 34 static inline void count_compact_events(enum vm_event_item item, long delta) 35 { 36 count_vm_events(item, delta); 37 } 38 #else 39 #define count_compact_event(item) do { } while (0) 40 #define count_compact_events(item, delta) do { } while (0) 41 #endif 42 43 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/compaction.h> 47 48 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 49 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 50 #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 51 #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 52 53 /* 54 * Fragmentation score check interval for proactive compaction purposes. 55 */ 56 static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; 57 58 /* 59 * Page order with-respect-to which proactive compaction 60 * calculates external fragmentation, which is used as 61 * the "fragmentation score" of a node/zone. 62 */ 63 #if defined CONFIG_TRANSPARENT_HUGEPAGE 64 #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER 65 #elif defined CONFIG_HUGETLBFS 66 #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER 67 #else 68 #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) 69 #endif 70 71 static unsigned long release_freepages(struct list_head *freelist) 72 { 73 struct page *page, *next; 74 unsigned long high_pfn = 0; 75 76 list_for_each_entry_safe(page, next, freelist, lru) { 77 unsigned long pfn = page_to_pfn(page); 78 list_del(&page->lru); 79 __free_page(page); 80 if (pfn > high_pfn) 81 high_pfn = pfn; 82 } 83 84 return high_pfn; 85 } 86 87 static void split_map_pages(struct list_head *list) 88 { 89 unsigned int i, order, nr_pages; 90 struct page *page, *next; 91 LIST_HEAD(tmp_list); 92 93 list_for_each_entry_safe(page, next, list, lru) { 94 list_del(&page->lru); 95 96 order = page_private(page); 97 nr_pages = 1 << order; 98 99 post_alloc_hook(page, order, __GFP_MOVABLE); 100 if (order) 101 split_page(page, order); 102 103 for (i = 0; i < nr_pages; i++) { 104 list_add(&page->lru, &tmp_list); 105 page++; 106 } 107 } 108 109 list_splice(&tmp_list, list); 110 } 111 112 #ifdef CONFIG_COMPACTION 113 114 int PageMovable(struct page *page) 115 { 116 struct address_space *mapping; 117 118 VM_BUG_ON_PAGE(!PageLocked(page), page); 119 if (!__PageMovable(page)) 120 return 0; 121 122 mapping = page_mapping(page); 123 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) 124 return 1; 125 126 return 0; 127 } 128 EXPORT_SYMBOL(PageMovable); 129 130 void __SetPageMovable(struct page *page, struct address_space *mapping) 131 { 132 VM_BUG_ON_PAGE(!PageLocked(page), page); 133 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); 134 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); 135 } 136 EXPORT_SYMBOL(__SetPageMovable); 137 138 void __ClearPageMovable(struct page *page) 139 { 140 VM_BUG_ON_PAGE(!PageMovable(page), page); 141 /* 142 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE 143 * flag so that VM can catch up released page by driver after isolation. 144 * With it, VM migration doesn't try to put it back. 145 */ 146 page->mapping = (void *)((unsigned long)page->mapping & 147 PAGE_MAPPING_MOVABLE); 148 } 149 EXPORT_SYMBOL(__ClearPageMovable); 150 151 /* Do not skip compaction more than 64 times */ 152 #define COMPACT_MAX_DEFER_SHIFT 6 153 154 /* 155 * Compaction is deferred when compaction fails to result in a page 156 * allocation success. 1 << compact_defer_shift, compactions are skipped up 157 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 158 */ 159 static void defer_compaction(struct zone *zone, int order) 160 { 161 zone->compact_considered = 0; 162 zone->compact_defer_shift++; 163 164 if (order < zone->compact_order_failed) 165 zone->compact_order_failed = order; 166 167 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 168 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 169 170 trace_mm_compaction_defer_compaction(zone, order); 171 } 172 173 /* Returns true if compaction should be skipped this time */ 174 static bool compaction_deferred(struct zone *zone, int order) 175 { 176 unsigned long defer_limit = 1UL << zone->compact_defer_shift; 177 178 if (order < zone->compact_order_failed) 179 return false; 180 181 /* Avoid possible overflow */ 182 if (++zone->compact_considered >= defer_limit) { 183 zone->compact_considered = defer_limit; 184 return false; 185 } 186 187 trace_mm_compaction_deferred(zone, order); 188 189 return true; 190 } 191 192 /* 193 * Update defer tracking counters after successful compaction of given order, 194 * which means an allocation either succeeded (alloc_success == true) or is 195 * expected to succeed. 196 */ 197 void compaction_defer_reset(struct zone *zone, int order, 198 bool alloc_success) 199 { 200 if (alloc_success) { 201 zone->compact_considered = 0; 202 zone->compact_defer_shift = 0; 203 } 204 if (order >= zone->compact_order_failed) 205 zone->compact_order_failed = order + 1; 206 207 trace_mm_compaction_defer_reset(zone, order); 208 } 209 210 /* Returns true if restarting compaction after many failures */ 211 static bool compaction_restarting(struct zone *zone, int order) 212 { 213 if (order < zone->compact_order_failed) 214 return false; 215 216 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 217 zone->compact_considered >= 1UL << zone->compact_defer_shift; 218 } 219 220 /* Returns true if the pageblock should be scanned for pages to isolate. */ 221 static inline bool isolation_suitable(struct compact_control *cc, 222 struct page *page) 223 { 224 if (cc->ignore_skip_hint) 225 return true; 226 227 return !get_pageblock_skip(page); 228 } 229 230 static void reset_cached_positions(struct zone *zone) 231 { 232 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 233 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 234 zone->compact_cached_free_pfn = 235 pageblock_start_pfn(zone_end_pfn(zone) - 1); 236 } 237 238 /* 239 * Compound pages of >= pageblock_order should consistently be skipped until 240 * released. It is always pointless to compact pages of such order (if they are 241 * migratable), and the pageblocks they occupy cannot contain any free pages. 242 */ 243 static bool pageblock_skip_persistent(struct page *page) 244 { 245 if (!PageCompound(page)) 246 return false; 247 248 page = compound_head(page); 249 250 if (compound_order(page) >= pageblock_order) 251 return true; 252 253 return false; 254 } 255 256 static bool 257 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, 258 bool check_target) 259 { 260 struct page *page = pfn_to_online_page(pfn); 261 struct page *block_page; 262 struct page *end_page; 263 unsigned long block_pfn; 264 265 if (!page) 266 return false; 267 if (zone != page_zone(page)) 268 return false; 269 if (pageblock_skip_persistent(page)) 270 return false; 271 272 /* 273 * If skip is already cleared do no further checking once the 274 * restart points have been set. 275 */ 276 if (check_source && check_target && !get_pageblock_skip(page)) 277 return true; 278 279 /* 280 * If clearing skip for the target scanner, do not select a 281 * non-movable pageblock as the starting point. 282 */ 283 if (!check_source && check_target && 284 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 285 return false; 286 287 /* Ensure the start of the pageblock or zone is online and valid */ 288 block_pfn = pageblock_start_pfn(pfn); 289 block_pfn = max(block_pfn, zone->zone_start_pfn); 290 block_page = pfn_to_online_page(block_pfn); 291 if (block_page) { 292 page = block_page; 293 pfn = block_pfn; 294 } 295 296 /* Ensure the end of the pageblock or zone is online and valid */ 297 block_pfn = pageblock_end_pfn(pfn) - 1; 298 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); 299 end_page = pfn_to_online_page(block_pfn); 300 if (!end_page) 301 return false; 302 303 /* 304 * Only clear the hint if a sample indicates there is either a 305 * free page or an LRU page in the block. One or other condition 306 * is necessary for the block to be a migration source/target. 307 */ 308 do { 309 if (check_source && PageLRU(page)) { 310 clear_pageblock_skip(page); 311 return true; 312 } 313 314 if (check_target && PageBuddy(page)) { 315 clear_pageblock_skip(page); 316 return true; 317 } 318 319 page += (1 << PAGE_ALLOC_COSTLY_ORDER); 320 pfn += (1 << PAGE_ALLOC_COSTLY_ORDER); 321 } while (page <= end_page); 322 323 return false; 324 } 325 326 /* 327 * This function is called to clear all cached information on pageblocks that 328 * should be skipped for page isolation when the migrate and free page scanner 329 * meet. 330 */ 331 static void __reset_isolation_suitable(struct zone *zone) 332 { 333 unsigned long migrate_pfn = zone->zone_start_pfn; 334 unsigned long free_pfn = zone_end_pfn(zone) - 1; 335 unsigned long reset_migrate = free_pfn; 336 unsigned long reset_free = migrate_pfn; 337 bool source_set = false; 338 bool free_set = false; 339 340 if (!zone->compact_blockskip_flush) 341 return; 342 343 zone->compact_blockskip_flush = false; 344 345 /* 346 * Walk the zone and update pageblock skip information. Source looks 347 * for PageLRU while target looks for PageBuddy. When the scanner 348 * is found, both PageBuddy and PageLRU are checked as the pageblock 349 * is suitable as both source and target. 350 */ 351 for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, 352 free_pfn -= pageblock_nr_pages) { 353 cond_resched(); 354 355 /* Update the migrate PFN */ 356 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && 357 migrate_pfn < reset_migrate) { 358 source_set = true; 359 reset_migrate = migrate_pfn; 360 zone->compact_init_migrate_pfn = reset_migrate; 361 zone->compact_cached_migrate_pfn[0] = reset_migrate; 362 zone->compact_cached_migrate_pfn[1] = reset_migrate; 363 } 364 365 /* Update the free PFN */ 366 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && 367 free_pfn > reset_free) { 368 free_set = true; 369 reset_free = free_pfn; 370 zone->compact_init_free_pfn = reset_free; 371 zone->compact_cached_free_pfn = reset_free; 372 } 373 } 374 375 /* Leave no distance if no suitable block was reset */ 376 if (reset_migrate >= reset_free) { 377 zone->compact_cached_migrate_pfn[0] = migrate_pfn; 378 zone->compact_cached_migrate_pfn[1] = migrate_pfn; 379 zone->compact_cached_free_pfn = free_pfn; 380 } 381 } 382 383 void reset_isolation_suitable(pg_data_t *pgdat) 384 { 385 int zoneid; 386 387 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 388 struct zone *zone = &pgdat->node_zones[zoneid]; 389 if (!populated_zone(zone)) 390 continue; 391 392 /* Only flush if a full compaction finished recently */ 393 if (zone->compact_blockskip_flush) 394 __reset_isolation_suitable(zone); 395 } 396 } 397 398 /* 399 * Sets the pageblock skip bit if it was clear. Note that this is a hint as 400 * locks are not required for read/writers. Returns true if it was already set. 401 */ 402 static bool test_and_set_skip(struct compact_control *cc, struct page *page, 403 unsigned long pfn) 404 { 405 bool skip; 406 407 /* Do no update if skip hint is being ignored */ 408 if (cc->ignore_skip_hint) 409 return false; 410 411 if (!IS_ALIGNED(pfn, pageblock_nr_pages)) 412 return false; 413 414 skip = get_pageblock_skip(page); 415 if (!skip && !cc->no_set_skip_hint) 416 set_pageblock_skip(page); 417 418 return skip; 419 } 420 421 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 422 { 423 struct zone *zone = cc->zone; 424 425 pfn = pageblock_end_pfn(pfn); 426 427 /* Set for isolation rather than compaction */ 428 if (cc->no_set_skip_hint) 429 return; 430 431 if (pfn > zone->compact_cached_migrate_pfn[0]) 432 zone->compact_cached_migrate_pfn[0] = pfn; 433 if (cc->mode != MIGRATE_ASYNC && 434 pfn > zone->compact_cached_migrate_pfn[1]) 435 zone->compact_cached_migrate_pfn[1] = pfn; 436 } 437 438 /* 439 * If no pages were isolated then mark this pageblock to be skipped in the 440 * future. The information is later cleared by __reset_isolation_suitable(). 441 */ 442 static void update_pageblock_skip(struct compact_control *cc, 443 struct page *page, unsigned long pfn) 444 { 445 struct zone *zone = cc->zone; 446 447 if (cc->no_set_skip_hint) 448 return; 449 450 if (!page) 451 return; 452 453 set_pageblock_skip(page); 454 455 /* Update where async and sync compaction should restart */ 456 if (pfn < zone->compact_cached_free_pfn) 457 zone->compact_cached_free_pfn = pfn; 458 } 459 #else 460 static inline bool isolation_suitable(struct compact_control *cc, 461 struct page *page) 462 { 463 return true; 464 } 465 466 static inline bool pageblock_skip_persistent(struct page *page) 467 { 468 return false; 469 } 470 471 static inline void update_pageblock_skip(struct compact_control *cc, 472 struct page *page, unsigned long pfn) 473 { 474 } 475 476 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 477 { 478 } 479 480 static bool test_and_set_skip(struct compact_control *cc, struct page *page, 481 unsigned long pfn) 482 { 483 return false; 484 } 485 #endif /* CONFIG_COMPACTION */ 486 487 /* 488 * Compaction requires the taking of some coarse locks that are potentially 489 * very heavily contended. For async compaction, trylock and record if the 490 * lock is contended. The lock will still be acquired but compaction will 491 * abort when the current block is finished regardless of success rate. 492 * Sync compaction acquires the lock. 493 * 494 * Always returns true which makes it easier to track lock state in callers. 495 */ 496 static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, 497 struct compact_control *cc) 498 __acquires(lock) 499 { 500 /* Track if the lock is contended in async mode */ 501 if (cc->mode == MIGRATE_ASYNC && !cc->contended) { 502 if (spin_trylock_irqsave(lock, *flags)) 503 return true; 504 505 cc->contended = true; 506 } 507 508 spin_lock_irqsave(lock, *flags); 509 return true; 510 } 511 512 /* 513 * Compaction requires the taking of some coarse locks that are potentially 514 * very heavily contended. The lock should be periodically unlocked to avoid 515 * having disabled IRQs for a long time, even when there is nobody waiting on 516 * the lock. It might also be that allowing the IRQs will result in 517 * need_resched() becoming true. If scheduling is needed, async compaction 518 * aborts. Sync compaction schedules. 519 * Either compaction type will also abort if a fatal signal is pending. 520 * In either case if the lock was locked, it is dropped and not regained. 521 * 522 * Returns true if compaction should abort due to fatal signal pending, or 523 * async compaction due to need_resched() 524 * Returns false when compaction can continue (sync compaction might have 525 * scheduled) 526 */ 527 static bool compact_unlock_should_abort(spinlock_t *lock, 528 unsigned long flags, bool *locked, struct compact_control *cc) 529 { 530 if (*locked) { 531 spin_unlock_irqrestore(lock, flags); 532 *locked = false; 533 } 534 535 if (fatal_signal_pending(current)) { 536 cc->contended = true; 537 return true; 538 } 539 540 cond_resched(); 541 542 return false; 543 } 544 545 /* 546 * Isolate free pages onto a private freelist. If @strict is true, will abort 547 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 548 * (even though it may still end up isolating some pages). 549 */ 550 static unsigned long isolate_freepages_block(struct compact_control *cc, 551 unsigned long *start_pfn, 552 unsigned long end_pfn, 553 struct list_head *freelist, 554 unsigned int stride, 555 bool strict) 556 { 557 int nr_scanned = 0, total_isolated = 0; 558 struct page *cursor; 559 unsigned long flags = 0; 560 bool locked = false; 561 unsigned long blockpfn = *start_pfn; 562 unsigned int order; 563 564 /* Strict mode is for isolation, speed is secondary */ 565 if (strict) 566 stride = 1; 567 568 cursor = pfn_to_page(blockpfn); 569 570 /* Isolate free pages. */ 571 for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) { 572 int isolated; 573 struct page *page = cursor; 574 575 /* 576 * Periodically drop the lock (if held) regardless of its 577 * contention, to give chance to IRQs. Abort if fatal signal 578 * pending or async compaction detects need_resched() 579 */ 580 if (!(blockpfn % SWAP_CLUSTER_MAX) 581 && compact_unlock_should_abort(&cc->zone->lock, flags, 582 &locked, cc)) 583 break; 584 585 nr_scanned++; 586 587 /* 588 * For compound pages such as THP and hugetlbfs, we can save 589 * potentially a lot of iterations if we skip them at once. 590 * The check is racy, but we can consider only valid values 591 * and the only danger is skipping too much. 592 */ 593 if (PageCompound(page)) { 594 const unsigned int order = compound_order(page); 595 596 if (likely(order < MAX_ORDER)) { 597 blockpfn += (1UL << order) - 1; 598 cursor += (1UL << order) - 1; 599 } 600 goto isolate_fail; 601 } 602 603 if (!PageBuddy(page)) 604 goto isolate_fail; 605 606 /* 607 * If we already hold the lock, we can skip some rechecking. 608 * Note that if we hold the lock now, checked_pageblock was 609 * already set in some previous iteration (or strict is true), 610 * so it is correct to skip the suitable migration target 611 * recheck as well. 612 */ 613 if (!locked) { 614 locked = compact_lock_irqsave(&cc->zone->lock, 615 &flags, cc); 616 617 /* Recheck this is a buddy page under lock */ 618 if (!PageBuddy(page)) 619 goto isolate_fail; 620 } 621 622 /* Found a free page, will break it into order-0 pages */ 623 order = buddy_order(page); 624 isolated = __isolate_free_page(page, order); 625 if (!isolated) 626 break; 627 set_page_private(page, order); 628 629 total_isolated += isolated; 630 cc->nr_freepages += isolated; 631 list_add_tail(&page->lru, freelist); 632 633 if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 634 blockpfn += isolated; 635 break; 636 } 637 /* Advance to the end of split page */ 638 blockpfn += isolated - 1; 639 cursor += isolated - 1; 640 continue; 641 642 isolate_fail: 643 if (strict) 644 break; 645 else 646 continue; 647 648 } 649 650 if (locked) 651 spin_unlock_irqrestore(&cc->zone->lock, flags); 652 653 /* 654 * There is a tiny chance that we have read bogus compound_order(), 655 * so be careful to not go outside of the pageblock. 656 */ 657 if (unlikely(blockpfn > end_pfn)) 658 blockpfn = end_pfn; 659 660 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 661 nr_scanned, total_isolated); 662 663 /* Record how far we have got within the block */ 664 *start_pfn = blockpfn; 665 666 /* 667 * If strict isolation is requested by CMA then check that all the 668 * pages requested were isolated. If there were any failures, 0 is 669 * returned and CMA will fail. 670 */ 671 if (strict && blockpfn < end_pfn) 672 total_isolated = 0; 673 674 cc->total_free_scanned += nr_scanned; 675 if (total_isolated) 676 count_compact_events(COMPACTISOLATED, total_isolated); 677 return total_isolated; 678 } 679 680 /** 681 * isolate_freepages_range() - isolate free pages. 682 * @cc: Compaction control structure. 683 * @start_pfn: The first PFN to start isolating. 684 * @end_pfn: The one-past-last PFN. 685 * 686 * Non-free pages, invalid PFNs, or zone boundaries within the 687 * [start_pfn, end_pfn) range are considered errors, cause function to 688 * undo its actions and return zero. 689 * 690 * Otherwise, function returns one-past-the-last PFN of isolated page 691 * (which may be greater then end_pfn if end fell in a middle of 692 * a free page). 693 */ 694 unsigned long 695 isolate_freepages_range(struct compact_control *cc, 696 unsigned long start_pfn, unsigned long end_pfn) 697 { 698 unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 699 LIST_HEAD(freelist); 700 701 pfn = start_pfn; 702 block_start_pfn = pageblock_start_pfn(pfn); 703 if (block_start_pfn < cc->zone->zone_start_pfn) 704 block_start_pfn = cc->zone->zone_start_pfn; 705 block_end_pfn = pageblock_end_pfn(pfn); 706 707 for (; pfn < end_pfn; pfn += isolated, 708 block_start_pfn = block_end_pfn, 709 block_end_pfn += pageblock_nr_pages) { 710 /* Protect pfn from changing by isolate_freepages_block */ 711 unsigned long isolate_start_pfn = pfn; 712 713 block_end_pfn = min(block_end_pfn, end_pfn); 714 715 /* 716 * pfn could pass the block_end_pfn if isolated freepage 717 * is more than pageblock order. In this case, we adjust 718 * scanning range to right one. 719 */ 720 if (pfn >= block_end_pfn) { 721 block_start_pfn = pageblock_start_pfn(pfn); 722 block_end_pfn = pageblock_end_pfn(pfn); 723 block_end_pfn = min(block_end_pfn, end_pfn); 724 } 725 726 if (!pageblock_pfn_to_page(block_start_pfn, 727 block_end_pfn, cc->zone)) 728 break; 729 730 isolated = isolate_freepages_block(cc, &isolate_start_pfn, 731 block_end_pfn, &freelist, 0, true); 732 733 /* 734 * In strict mode, isolate_freepages_block() returns 0 if 735 * there are any holes in the block (ie. invalid PFNs or 736 * non-free pages). 737 */ 738 if (!isolated) 739 break; 740 741 /* 742 * If we managed to isolate pages, it is always (1 << n) * 743 * pageblock_nr_pages for some non-negative n. (Max order 744 * page may span two pageblocks). 745 */ 746 } 747 748 /* __isolate_free_page() does not map the pages */ 749 split_map_pages(&freelist); 750 751 if (pfn < end_pfn) { 752 /* Loop terminated early, cleanup. */ 753 release_freepages(&freelist); 754 return 0; 755 } 756 757 /* We don't use freelists for anything. */ 758 return pfn; 759 } 760 761 /* Similar to reclaim, but different enough that they don't share logic */ 762 static bool too_many_isolated(pg_data_t *pgdat) 763 { 764 unsigned long active, inactive, isolated; 765 766 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + 767 node_page_state(pgdat, NR_INACTIVE_ANON); 768 active = node_page_state(pgdat, NR_ACTIVE_FILE) + 769 node_page_state(pgdat, NR_ACTIVE_ANON); 770 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + 771 node_page_state(pgdat, NR_ISOLATED_ANON); 772 773 return isolated > (inactive + active) / 2; 774 } 775 776 /** 777 * isolate_migratepages_block() - isolate all migrate-able pages within 778 * a single pageblock 779 * @cc: Compaction control structure. 780 * @low_pfn: The first PFN to isolate 781 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 782 * @isolate_mode: Isolation mode to be used. 783 * 784 * Isolate all pages that can be migrated from the range specified by 785 * [low_pfn, end_pfn). The range is expected to be within same pageblock. 786 * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, 787 * -ENOMEM in case we could not allocate a page, or 0. 788 * cc->migrate_pfn will contain the next pfn to scan. 789 * 790 * The pages are isolated on cc->migratepages list (not required to be empty), 791 * and cc->nr_migratepages is updated accordingly. 792 */ 793 static int 794 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 795 unsigned long end_pfn, isolate_mode_t isolate_mode) 796 { 797 pg_data_t *pgdat = cc->zone->zone_pgdat; 798 unsigned long nr_scanned = 0, nr_isolated = 0; 799 struct lruvec *lruvec; 800 unsigned long flags = 0; 801 struct lruvec *locked = NULL; 802 struct page *page = NULL, *valid_page = NULL; 803 unsigned long start_pfn = low_pfn; 804 bool skip_on_failure = false; 805 unsigned long next_skip_pfn = 0; 806 bool skip_updated = false; 807 int ret = 0; 808 809 cc->migrate_pfn = low_pfn; 810 811 /* 812 * Ensure that there are not too many pages isolated from the LRU 813 * list by either parallel reclaimers or compaction. If there are, 814 * delay for some time until fewer pages are isolated 815 */ 816 while (unlikely(too_many_isolated(pgdat))) { 817 /* stop isolation if there are still pages not migrated */ 818 if (cc->nr_migratepages) 819 return -EAGAIN; 820 821 /* async migration should just abort */ 822 if (cc->mode == MIGRATE_ASYNC) 823 return -EAGAIN; 824 825 congestion_wait(BLK_RW_ASYNC, HZ/10); 826 827 if (fatal_signal_pending(current)) 828 return -EINTR; 829 } 830 831 cond_resched(); 832 833 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 834 skip_on_failure = true; 835 next_skip_pfn = block_end_pfn(low_pfn, cc->order); 836 } 837 838 /* Time to isolate some pages for migration */ 839 for (; low_pfn < end_pfn; low_pfn++) { 840 841 if (skip_on_failure && low_pfn >= next_skip_pfn) { 842 /* 843 * We have isolated all migration candidates in the 844 * previous order-aligned block, and did not skip it due 845 * to failure. We should migrate the pages now and 846 * hopefully succeed compaction. 847 */ 848 if (nr_isolated) 849 break; 850 851 /* 852 * We failed to isolate in the previous order-aligned 853 * block. Set the new boundary to the end of the 854 * current block. Note we can't simply increase 855 * next_skip_pfn by 1 << order, as low_pfn might have 856 * been incremented by a higher number due to skipping 857 * a compound or a high-order buddy page in the 858 * previous loop iteration. 859 */ 860 next_skip_pfn = block_end_pfn(low_pfn, cc->order); 861 } 862 863 /* 864 * Periodically drop the lock (if held) regardless of its 865 * contention, to give chance to IRQs. Abort completely if 866 * a fatal signal is pending. 867 */ 868 if (!(low_pfn % SWAP_CLUSTER_MAX)) { 869 if (locked) { 870 unlock_page_lruvec_irqrestore(locked, flags); 871 locked = NULL; 872 } 873 874 if (fatal_signal_pending(current)) { 875 cc->contended = true; 876 ret = -EINTR; 877 878 goto fatal_pending; 879 } 880 881 cond_resched(); 882 } 883 884 nr_scanned++; 885 886 page = pfn_to_page(low_pfn); 887 888 /* 889 * Check if the pageblock has already been marked skipped. 890 * Only the aligned PFN is checked as the caller isolates 891 * COMPACT_CLUSTER_MAX at a time so the second call must 892 * not falsely conclude that the block should be skipped. 893 */ 894 if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { 895 if (!cc->ignore_skip_hint && get_pageblock_skip(page)) { 896 low_pfn = end_pfn; 897 page = NULL; 898 goto isolate_abort; 899 } 900 valid_page = page; 901 } 902 903 if (PageHuge(page) && cc->alloc_contig) { 904 ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); 905 906 /* 907 * Fail isolation in case isolate_or_dissolve_huge_page() 908 * reports an error. In case of -ENOMEM, abort right away. 909 */ 910 if (ret < 0) { 911 /* Do not report -EBUSY down the chain */ 912 if (ret == -EBUSY) 913 ret = 0; 914 low_pfn += (1UL << compound_order(page)) - 1; 915 goto isolate_fail; 916 } 917 918 if (PageHuge(page)) { 919 /* 920 * Hugepage was successfully isolated and placed 921 * on the cc->migratepages list. 922 */ 923 low_pfn += compound_nr(page) - 1; 924 goto isolate_success_no_list; 925 } 926 927 /* 928 * Ok, the hugepage was dissolved. Now these pages are 929 * Buddy and cannot be re-allocated because they are 930 * isolated. Fall-through as the check below handles 931 * Buddy pages. 932 */ 933 } 934 935 /* 936 * Skip if free. We read page order here without zone lock 937 * which is generally unsafe, but the race window is small and 938 * the worst thing that can happen is that we skip some 939 * potential isolation targets. 940 */ 941 if (PageBuddy(page)) { 942 unsigned long freepage_order = buddy_order_unsafe(page); 943 944 /* 945 * Without lock, we cannot be sure that what we got is 946 * a valid page order. Consider only values in the 947 * valid order range to prevent low_pfn overflow. 948 */ 949 if (freepage_order > 0 && freepage_order < MAX_ORDER) 950 low_pfn += (1UL << freepage_order) - 1; 951 continue; 952 } 953 954 /* 955 * Regardless of being on LRU, compound pages such as THP and 956 * hugetlbfs are not to be compacted unless we are attempting 957 * an allocation much larger than the huge page size (eg CMA). 958 * We can potentially save a lot of iterations if we skip them 959 * at once. The check is racy, but we can consider only valid 960 * values and the only danger is skipping too much. 961 */ 962 if (PageCompound(page) && !cc->alloc_contig) { 963 const unsigned int order = compound_order(page); 964 965 if (likely(order < MAX_ORDER)) 966 low_pfn += (1UL << order) - 1; 967 goto isolate_fail; 968 } 969 970 /* 971 * Check may be lockless but that's ok as we recheck later. 972 * It's possible to migrate LRU and non-lru movable pages. 973 * Skip any other type of page 974 */ 975 if (!PageLRU(page)) { 976 /* 977 * __PageMovable can return false positive so we need 978 * to verify it under page_lock. 979 */ 980 if (unlikely(__PageMovable(page)) && 981 !PageIsolated(page)) { 982 if (locked) { 983 unlock_page_lruvec_irqrestore(locked, flags); 984 locked = NULL; 985 } 986 987 if (!isolate_movable_page(page, isolate_mode)) 988 goto isolate_success; 989 } 990 991 goto isolate_fail; 992 } 993 994 /* 995 * Migration will fail if an anonymous page is pinned in memory, 996 * so avoid taking lru_lock and isolating it unnecessarily in an 997 * admittedly racy check. 998 */ 999 if (!page_mapping(page) && 1000 page_count(page) > page_mapcount(page)) 1001 goto isolate_fail; 1002 1003 /* 1004 * Only allow to migrate anonymous pages in GFP_NOFS context 1005 * because those do not depend on fs locks. 1006 */ 1007 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) 1008 goto isolate_fail; 1009 1010 /* 1011 * Be careful not to clear PageLRU until after we're 1012 * sure the page is not being freed elsewhere -- the 1013 * page release code relies on it. 1014 */ 1015 if (unlikely(!get_page_unless_zero(page))) 1016 goto isolate_fail; 1017 1018 if (!__isolate_lru_page_prepare(page, isolate_mode)) 1019 goto isolate_fail_put; 1020 1021 /* Try isolate the page */ 1022 if (!TestClearPageLRU(page)) 1023 goto isolate_fail_put; 1024 1025 lruvec = mem_cgroup_page_lruvec(page); 1026 1027 /* If we already hold the lock, we can skip some rechecking */ 1028 if (lruvec != locked) { 1029 if (locked) 1030 unlock_page_lruvec_irqrestore(locked, flags); 1031 1032 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); 1033 locked = lruvec; 1034 1035 lruvec_memcg_debug(lruvec, page); 1036 1037 /* Try get exclusive access under lock */ 1038 if (!skip_updated) { 1039 skip_updated = true; 1040 if (test_and_set_skip(cc, page, low_pfn)) 1041 goto isolate_abort; 1042 } 1043 1044 /* 1045 * Page become compound since the non-locked check, 1046 * and it's on LRU. It can only be a THP so the order 1047 * is safe to read and it's 0 for tail pages. 1048 */ 1049 if (unlikely(PageCompound(page) && !cc->alloc_contig)) { 1050 low_pfn += compound_nr(page) - 1; 1051 SetPageLRU(page); 1052 goto isolate_fail_put; 1053 } 1054 } 1055 1056 /* The whole page is taken off the LRU; skip the tail pages. */ 1057 if (PageCompound(page)) 1058 low_pfn += compound_nr(page) - 1; 1059 1060 /* Successfully isolated */ 1061 del_page_from_lru_list(page, lruvec); 1062 mod_node_page_state(page_pgdat(page), 1063 NR_ISOLATED_ANON + page_is_file_lru(page), 1064 thp_nr_pages(page)); 1065 1066 isolate_success: 1067 list_add(&page->lru, &cc->migratepages); 1068 isolate_success_no_list: 1069 cc->nr_migratepages += compound_nr(page); 1070 nr_isolated += compound_nr(page); 1071 1072 /* 1073 * Avoid isolating too much unless this block is being 1074 * rescanned (e.g. dirty/writeback pages, parallel allocation) 1075 * or a lock is contended. For contention, isolate quickly to 1076 * potentially remove one source of contention. 1077 */ 1078 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && 1079 !cc->rescan && !cc->contended) { 1080 ++low_pfn; 1081 break; 1082 } 1083 1084 continue; 1085 1086 isolate_fail_put: 1087 /* Avoid potential deadlock in freeing page under lru_lock */ 1088 if (locked) { 1089 unlock_page_lruvec_irqrestore(locked, flags); 1090 locked = NULL; 1091 } 1092 put_page(page); 1093 1094 isolate_fail: 1095 if (!skip_on_failure && ret != -ENOMEM) 1096 continue; 1097 1098 /* 1099 * We have isolated some pages, but then failed. Release them 1100 * instead of migrating, as we cannot form the cc->order buddy 1101 * page anyway. 1102 */ 1103 if (nr_isolated) { 1104 if (locked) { 1105 unlock_page_lruvec_irqrestore(locked, flags); 1106 locked = NULL; 1107 } 1108 putback_movable_pages(&cc->migratepages); 1109 cc->nr_migratepages = 0; 1110 nr_isolated = 0; 1111 } 1112 1113 if (low_pfn < next_skip_pfn) { 1114 low_pfn = next_skip_pfn - 1; 1115 /* 1116 * The check near the loop beginning would have updated 1117 * next_skip_pfn too, but this is a bit simpler. 1118 */ 1119 next_skip_pfn += 1UL << cc->order; 1120 } 1121 1122 if (ret == -ENOMEM) 1123 break; 1124 } 1125 1126 /* 1127 * The PageBuddy() check could have potentially brought us outside 1128 * the range to be scanned. 1129 */ 1130 if (unlikely(low_pfn > end_pfn)) 1131 low_pfn = end_pfn; 1132 1133 page = NULL; 1134 1135 isolate_abort: 1136 if (locked) 1137 unlock_page_lruvec_irqrestore(locked, flags); 1138 if (page) { 1139 SetPageLRU(page); 1140 put_page(page); 1141 } 1142 1143 /* 1144 * Updated the cached scanner pfn once the pageblock has been scanned 1145 * Pages will either be migrated in which case there is no point 1146 * scanning in the near future or migration failed in which case the 1147 * failure reason may persist. The block is marked for skipping if 1148 * there were no pages isolated in the block or if the block is 1149 * rescanned twice in a row. 1150 */ 1151 if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { 1152 if (valid_page && !skip_updated) 1153 set_pageblock_skip(valid_page); 1154 update_cached_migrate(cc, low_pfn); 1155 } 1156 1157 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 1158 nr_scanned, nr_isolated); 1159 1160 fatal_pending: 1161 cc->total_migrate_scanned += nr_scanned; 1162 if (nr_isolated) 1163 count_compact_events(COMPACTISOLATED, nr_isolated); 1164 1165 cc->migrate_pfn = low_pfn; 1166 1167 return ret; 1168 } 1169 1170 /** 1171 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 1172 * @cc: Compaction control structure. 1173 * @start_pfn: The first PFN to start isolating. 1174 * @end_pfn: The one-past-last PFN. 1175 * 1176 * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM 1177 * in case we could not allocate a page, or 0. 1178 */ 1179 int 1180 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 1181 unsigned long end_pfn) 1182 { 1183 unsigned long pfn, block_start_pfn, block_end_pfn; 1184 int ret = 0; 1185 1186 /* Scan block by block. First and last block may be incomplete */ 1187 pfn = start_pfn; 1188 block_start_pfn = pageblock_start_pfn(pfn); 1189 if (block_start_pfn < cc->zone->zone_start_pfn) 1190 block_start_pfn = cc->zone->zone_start_pfn; 1191 block_end_pfn = pageblock_end_pfn(pfn); 1192 1193 for (; pfn < end_pfn; pfn = block_end_pfn, 1194 block_start_pfn = block_end_pfn, 1195 block_end_pfn += pageblock_nr_pages) { 1196 1197 block_end_pfn = min(block_end_pfn, end_pfn); 1198 1199 if (!pageblock_pfn_to_page(block_start_pfn, 1200 block_end_pfn, cc->zone)) 1201 continue; 1202 1203 ret = isolate_migratepages_block(cc, pfn, block_end_pfn, 1204 ISOLATE_UNEVICTABLE); 1205 1206 if (ret) 1207 break; 1208 1209 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) 1210 break; 1211 } 1212 1213 return ret; 1214 } 1215 1216 #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1217 #ifdef CONFIG_COMPACTION 1218 1219 static bool suitable_migration_source(struct compact_control *cc, 1220 struct page *page) 1221 { 1222 int block_mt; 1223 1224 if (pageblock_skip_persistent(page)) 1225 return false; 1226 1227 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1228 return true; 1229 1230 block_mt = get_pageblock_migratetype(page); 1231 1232 if (cc->migratetype == MIGRATE_MOVABLE) 1233 return is_migrate_movable(block_mt); 1234 else 1235 return block_mt == cc->migratetype; 1236 } 1237 1238 /* Returns true if the page is within a block suitable for migration to */ 1239 static bool suitable_migration_target(struct compact_control *cc, 1240 struct page *page) 1241 { 1242 /* If the page is a large free page, then disallow migration */ 1243 if (PageBuddy(page)) { 1244 /* 1245 * We are checking page_order without zone->lock taken. But 1246 * the only small danger is that we skip a potentially suitable 1247 * pageblock, so it's not worth to check order for valid range. 1248 */ 1249 if (buddy_order_unsafe(page) >= pageblock_order) 1250 return false; 1251 } 1252 1253 if (cc->ignore_block_suitable) 1254 return true; 1255 1256 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1257 if (is_migrate_movable(get_pageblock_migratetype(page))) 1258 return true; 1259 1260 /* Otherwise skip the block */ 1261 return false; 1262 } 1263 1264 static inline unsigned int 1265 freelist_scan_limit(struct compact_control *cc) 1266 { 1267 unsigned short shift = BITS_PER_LONG - 1; 1268 1269 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; 1270 } 1271 1272 /* 1273 * Test whether the free scanner has reached the same or lower pageblock than 1274 * the migration scanner, and compaction should thus terminate. 1275 */ 1276 static inline bool compact_scanners_met(struct compact_control *cc) 1277 { 1278 return (cc->free_pfn >> pageblock_order) 1279 <= (cc->migrate_pfn >> pageblock_order); 1280 } 1281 1282 /* 1283 * Used when scanning for a suitable migration target which scans freelists 1284 * in reverse. Reorders the list such as the unscanned pages are scanned 1285 * first on the next iteration of the free scanner 1286 */ 1287 static void 1288 move_freelist_head(struct list_head *freelist, struct page *freepage) 1289 { 1290 LIST_HEAD(sublist); 1291 1292 if (!list_is_last(freelist, &freepage->lru)) { 1293 list_cut_before(&sublist, freelist, &freepage->lru); 1294 list_splice_tail(&sublist, freelist); 1295 } 1296 } 1297 1298 /* 1299 * Similar to move_freelist_head except used by the migration scanner 1300 * when scanning forward. It's possible for these list operations to 1301 * move against each other if they search the free list exactly in 1302 * lockstep. 1303 */ 1304 static void 1305 move_freelist_tail(struct list_head *freelist, struct page *freepage) 1306 { 1307 LIST_HEAD(sublist); 1308 1309 if (!list_is_first(freelist, &freepage->lru)) { 1310 list_cut_position(&sublist, freelist, &freepage->lru); 1311 list_splice_tail(&sublist, freelist); 1312 } 1313 } 1314 1315 static void 1316 fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated) 1317 { 1318 unsigned long start_pfn, end_pfn; 1319 struct page *page; 1320 1321 /* Do not search around if there are enough pages already */ 1322 if (cc->nr_freepages >= cc->nr_migratepages) 1323 return; 1324 1325 /* Minimise scanning during async compaction */ 1326 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) 1327 return; 1328 1329 /* Pageblock boundaries */ 1330 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); 1331 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); 1332 1333 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); 1334 if (!page) 1335 return; 1336 1337 /* Scan before */ 1338 if (start_pfn != pfn) { 1339 isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); 1340 if (cc->nr_freepages >= cc->nr_migratepages) 1341 return; 1342 } 1343 1344 /* Scan after */ 1345 start_pfn = pfn + nr_isolated; 1346 if (start_pfn < end_pfn) 1347 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); 1348 1349 /* Skip this pageblock in the future as it's full or nearly full */ 1350 if (cc->nr_freepages < cc->nr_migratepages) 1351 set_pageblock_skip(page); 1352 } 1353 1354 /* Search orders in round-robin fashion */ 1355 static int next_search_order(struct compact_control *cc, int order) 1356 { 1357 order--; 1358 if (order < 0) 1359 order = cc->order - 1; 1360 1361 /* Search wrapped around? */ 1362 if (order == cc->search_order) { 1363 cc->search_order--; 1364 if (cc->search_order < 0) 1365 cc->search_order = cc->order - 1; 1366 return -1; 1367 } 1368 1369 return order; 1370 } 1371 1372 static unsigned long 1373 fast_isolate_freepages(struct compact_control *cc) 1374 { 1375 unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); 1376 unsigned int nr_scanned = 0; 1377 unsigned long low_pfn, min_pfn, highest = 0; 1378 unsigned long nr_isolated = 0; 1379 unsigned long distance; 1380 struct page *page = NULL; 1381 bool scan_start = false; 1382 int order; 1383 1384 /* Full compaction passes in a negative order */ 1385 if (cc->order <= 0) 1386 return cc->free_pfn; 1387 1388 /* 1389 * If starting the scan, use a deeper search and use the highest 1390 * PFN found if a suitable one is not found. 1391 */ 1392 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { 1393 limit = pageblock_nr_pages >> 1; 1394 scan_start = true; 1395 } 1396 1397 /* 1398 * Preferred point is in the top quarter of the scan space but take 1399 * a pfn from the top half if the search is problematic. 1400 */ 1401 distance = (cc->free_pfn - cc->migrate_pfn); 1402 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); 1403 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); 1404 1405 if (WARN_ON_ONCE(min_pfn > low_pfn)) 1406 low_pfn = min_pfn; 1407 1408 /* 1409 * Search starts from the last successful isolation order or the next 1410 * order to search after a previous failure 1411 */ 1412 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); 1413 1414 for (order = cc->search_order; 1415 !page && order >= 0; 1416 order = next_search_order(cc, order)) { 1417 struct free_area *area = &cc->zone->free_area[order]; 1418 struct list_head *freelist; 1419 struct page *freepage; 1420 unsigned long flags; 1421 unsigned int order_scanned = 0; 1422 unsigned long high_pfn = 0; 1423 1424 if (!area->nr_free) 1425 continue; 1426 1427 spin_lock_irqsave(&cc->zone->lock, flags); 1428 freelist = &area->free_list[MIGRATE_MOVABLE]; 1429 list_for_each_entry_reverse(freepage, freelist, lru) { 1430 unsigned long pfn; 1431 1432 order_scanned++; 1433 nr_scanned++; 1434 pfn = page_to_pfn(freepage); 1435 1436 if (pfn >= highest) 1437 highest = max(pageblock_start_pfn(pfn), 1438 cc->zone->zone_start_pfn); 1439 1440 if (pfn >= low_pfn) { 1441 cc->fast_search_fail = 0; 1442 cc->search_order = order; 1443 page = freepage; 1444 break; 1445 } 1446 1447 if (pfn >= min_pfn && pfn > high_pfn) { 1448 high_pfn = pfn; 1449 1450 /* Shorten the scan if a candidate is found */ 1451 limit >>= 1; 1452 } 1453 1454 if (order_scanned >= limit) 1455 break; 1456 } 1457 1458 /* Use a minimum pfn if a preferred one was not found */ 1459 if (!page && high_pfn) { 1460 page = pfn_to_page(high_pfn); 1461 1462 /* Update freepage for the list reorder below */ 1463 freepage = page; 1464 } 1465 1466 /* Reorder to so a future search skips recent pages */ 1467 move_freelist_head(freelist, freepage); 1468 1469 /* Isolate the page if available */ 1470 if (page) { 1471 if (__isolate_free_page(page, order)) { 1472 set_page_private(page, order); 1473 nr_isolated = 1 << order; 1474 cc->nr_freepages += nr_isolated; 1475 list_add_tail(&page->lru, &cc->freepages); 1476 count_compact_events(COMPACTISOLATED, nr_isolated); 1477 } else { 1478 /* If isolation fails, abort the search */ 1479 order = cc->search_order + 1; 1480 page = NULL; 1481 } 1482 } 1483 1484 spin_unlock_irqrestore(&cc->zone->lock, flags); 1485 1486 /* 1487 * Smaller scan on next order so the total scan is related 1488 * to freelist_scan_limit. 1489 */ 1490 if (order_scanned >= limit) 1491 limit = max(1U, limit >> 1); 1492 } 1493 1494 if (!page) { 1495 cc->fast_search_fail++; 1496 if (scan_start) { 1497 /* 1498 * Use the highest PFN found above min. If one was 1499 * not found, be pessimistic for direct compaction 1500 * and use the min mark. 1501 */ 1502 if (highest) { 1503 page = pfn_to_page(highest); 1504 cc->free_pfn = highest; 1505 } else { 1506 if (cc->direct_compaction && pfn_valid(min_pfn)) { 1507 page = pageblock_pfn_to_page(min_pfn, 1508 min(pageblock_end_pfn(min_pfn), 1509 zone_end_pfn(cc->zone)), 1510 cc->zone); 1511 cc->free_pfn = min_pfn; 1512 } 1513 } 1514 } 1515 } 1516 1517 if (highest && highest >= cc->zone->compact_cached_free_pfn) { 1518 highest -= pageblock_nr_pages; 1519 cc->zone->compact_cached_free_pfn = highest; 1520 } 1521 1522 cc->total_free_scanned += nr_scanned; 1523 if (!page) 1524 return cc->free_pfn; 1525 1526 low_pfn = page_to_pfn(page); 1527 fast_isolate_around(cc, low_pfn, nr_isolated); 1528 return low_pfn; 1529 } 1530 1531 /* 1532 * Based on information in the current compact_control, find blocks 1533 * suitable for isolating free pages from and then isolate them. 1534 */ 1535 static void isolate_freepages(struct compact_control *cc) 1536 { 1537 struct zone *zone = cc->zone; 1538 struct page *page; 1539 unsigned long block_start_pfn; /* start of current pageblock */ 1540 unsigned long isolate_start_pfn; /* exact pfn we start at */ 1541 unsigned long block_end_pfn; /* end of current pageblock */ 1542 unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1543 struct list_head *freelist = &cc->freepages; 1544 unsigned int stride; 1545 1546 /* Try a small search of the free lists for a candidate */ 1547 isolate_start_pfn = fast_isolate_freepages(cc); 1548 if (cc->nr_freepages) 1549 goto splitmap; 1550 1551 /* 1552 * Initialise the free scanner. The starting point is where we last 1553 * successfully isolated from, zone-cached value, or the end of the 1554 * zone when isolating for the first time. For looping we also need 1555 * this pfn aligned down to the pageblock boundary, because we do 1556 * block_start_pfn -= pageblock_nr_pages in the for loop. 1557 * For ending point, take care when isolating in last pageblock of a 1558 * zone which ends in the middle of a pageblock. 1559 * The low boundary is the end of the pageblock the migration scanner 1560 * is using. 1561 */ 1562 isolate_start_pfn = cc->free_pfn; 1563 block_start_pfn = pageblock_start_pfn(isolate_start_pfn); 1564 block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1565 zone_end_pfn(zone)); 1566 low_pfn = pageblock_end_pfn(cc->migrate_pfn); 1567 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; 1568 1569 /* 1570 * Isolate free pages until enough are available to migrate the 1571 * pages on cc->migratepages. We stop searching if the migrate 1572 * and free page scanners meet or enough free pages are isolated. 1573 */ 1574 for (; block_start_pfn >= low_pfn; 1575 block_end_pfn = block_start_pfn, 1576 block_start_pfn -= pageblock_nr_pages, 1577 isolate_start_pfn = block_start_pfn) { 1578 unsigned long nr_isolated; 1579 1580 /* 1581 * This can iterate a massively long zone without finding any 1582 * suitable migration targets, so periodically check resched. 1583 */ 1584 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1585 cond_resched(); 1586 1587 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1588 zone); 1589 if (!page) 1590 continue; 1591 1592 /* Check the block is suitable for migration */ 1593 if (!suitable_migration_target(cc, page)) 1594 continue; 1595 1596 /* If isolation recently failed, do not retry */ 1597 if (!isolation_suitable(cc, page)) 1598 continue; 1599 1600 /* Found a block suitable for isolating free pages from. */ 1601 nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, 1602 block_end_pfn, freelist, stride, false); 1603 1604 /* Update the skip hint if the full pageblock was scanned */ 1605 if (isolate_start_pfn == block_end_pfn) 1606 update_pageblock_skip(cc, page, block_start_pfn); 1607 1608 /* Are enough freepages isolated? */ 1609 if (cc->nr_freepages >= cc->nr_migratepages) { 1610 if (isolate_start_pfn >= block_end_pfn) { 1611 /* 1612 * Restart at previous pageblock if more 1613 * freepages can be isolated next time. 1614 */ 1615 isolate_start_pfn = 1616 block_start_pfn - pageblock_nr_pages; 1617 } 1618 break; 1619 } else if (isolate_start_pfn < block_end_pfn) { 1620 /* 1621 * If isolation failed early, do not continue 1622 * needlessly. 1623 */ 1624 break; 1625 } 1626 1627 /* Adjust stride depending on isolation */ 1628 if (nr_isolated) { 1629 stride = 1; 1630 continue; 1631 } 1632 stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); 1633 } 1634 1635 /* 1636 * Record where the free scanner will restart next time. Either we 1637 * broke from the loop and set isolate_start_pfn based on the last 1638 * call to isolate_freepages_block(), or we met the migration scanner 1639 * and the loop terminated due to isolate_start_pfn < low_pfn 1640 */ 1641 cc->free_pfn = isolate_start_pfn; 1642 1643 splitmap: 1644 /* __isolate_free_page() does not map the pages */ 1645 split_map_pages(freelist); 1646 } 1647 1648 /* 1649 * This is a migrate-callback that "allocates" freepages by taking pages 1650 * from the isolated freelists in the block we are migrating to. 1651 */ 1652 static struct page *compaction_alloc(struct page *migratepage, 1653 unsigned long data) 1654 { 1655 struct compact_control *cc = (struct compact_control *)data; 1656 struct page *freepage; 1657 1658 if (list_empty(&cc->freepages)) { 1659 isolate_freepages(cc); 1660 1661 if (list_empty(&cc->freepages)) 1662 return NULL; 1663 } 1664 1665 freepage = list_entry(cc->freepages.next, struct page, lru); 1666 list_del(&freepage->lru); 1667 cc->nr_freepages--; 1668 1669 return freepage; 1670 } 1671 1672 /* 1673 * This is a migrate-callback that "frees" freepages back to the isolated 1674 * freelist. All pages on the freelist are from the same zone, so there is no 1675 * special handling needed for NUMA. 1676 */ 1677 static void compaction_free(struct page *page, unsigned long data) 1678 { 1679 struct compact_control *cc = (struct compact_control *)data; 1680 1681 list_add(&page->lru, &cc->freepages); 1682 cc->nr_freepages++; 1683 } 1684 1685 /* possible outcome of isolate_migratepages */ 1686 typedef enum { 1687 ISOLATE_ABORT, /* Abort compaction now */ 1688 ISOLATE_NONE, /* No pages isolated, continue scanning */ 1689 ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1690 } isolate_migrate_t; 1691 1692 /* 1693 * Allow userspace to control policy on scanning the unevictable LRU for 1694 * compactable pages. 1695 */ 1696 #ifdef CONFIG_PREEMPT_RT 1697 int sysctl_compact_unevictable_allowed __read_mostly = 0; 1698 #else 1699 int sysctl_compact_unevictable_allowed __read_mostly = 1; 1700 #endif 1701 1702 static inline void 1703 update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) 1704 { 1705 if (cc->fast_start_pfn == ULONG_MAX) 1706 return; 1707 1708 if (!cc->fast_start_pfn) 1709 cc->fast_start_pfn = pfn; 1710 1711 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); 1712 } 1713 1714 static inline unsigned long 1715 reinit_migrate_pfn(struct compact_control *cc) 1716 { 1717 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) 1718 return cc->migrate_pfn; 1719 1720 cc->migrate_pfn = cc->fast_start_pfn; 1721 cc->fast_start_pfn = ULONG_MAX; 1722 1723 return cc->migrate_pfn; 1724 } 1725 1726 /* 1727 * Briefly search the free lists for a migration source that already has 1728 * some free pages to reduce the number of pages that need migration 1729 * before a pageblock is free. 1730 */ 1731 static unsigned long fast_find_migrateblock(struct compact_control *cc) 1732 { 1733 unsigned int limit = freelist_scan_limit(cc); 1734 unsigned int nr_scanned = 0; 1735 unsigned long distance; 1736 unsigned long pfn = cc->migrate_pfn; 1737 unsigned long high_pfn; 1738 int order; 1739 bool found_block = false; 1740 1741 /* Skip hints are relied on to avoid repeats on the fast search */ 1742 if (cc->ignore_skip_hint) 1743 return pfn; 1744 1745 /* 1746 * If the migrate_pfn is not at the start of a zone or the start 1747 * of a pageblock then assume this is a continuation of a previous 1748 * scan restarted due to COMPACT_CLUSTER_MAX. 1749 */ 1750 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) 1751 return pfn; 1752 1753 /* 1754 * For smaller orders, just linearly scan as the number of pages 1755 * to migrate should be relatively small and does not necessarily 1756 * justify freeing up a large block for a small allocation. 1757 */ 1758 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) 1759 return pfn; 1760 1761 /* 1762 * Only allow kcompactd and direct requests for movable pages to 1763 * quickly clear out a MOVABLE pageblock for allocation. This 1764 * reduces the risk that a large movable pageblock is freed for 1765 * an unmovable/reclaimable small allocation. 1766 */ 1767 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) 1768 return pfn; 1769 1770 /* 1771 * When starting the migration scanner, pick any pageblock within the 1772 * first half of the search space. Otherwise try and pick a pageblock 1773 * within the first eighth to reduce the chances that a migration 1774 * target later becomes a source. 1775 */ 1776 distance = (cc->free_pfn - cc->migrate_pfn) >> 1; 1777 if (cc->migrate_pfn != cc->zone->zone_start_pfn) 1778 distance >>= 2; 1779 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); 1780 1781 for (order = cc->order - 1; 1782 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; 1783 order--) { 1784 struct free_area *area = &cc->zone->free_area[order]; 1785 struct list_head *freelist; 1786 unsigned long flags; 1787 struct page *freepage; 1788 1789 if (!area->nr_free) 1790 continue; 1791 1792 spin_lock_irqsave(&cc->zone->lock, flags); 1793 freelist = &area->free_list[MIGRATE_MOVABLE]; 1794 list_for_each_entry(freepage, freelist, lru) { 1795 unsigned long free_pfn; 1796 1797 if (nr_scanned++ >= limit) { 1798 move_freelist_tail(freelist, freepage); 1799 break; 1800 } 1801 1802 free_pfn = page_to_pfn(freepage); 1803 if (free_pfn < high_pfn) { 1804 /* 1805 * Avoid if skipped recently. Ideally it would 1806 * move to the tail but even safe iteration of 1807 * the list assumes an entry is deleted, not 1808 * reordered. 1809 */ 1810 if (get_pageblock_skip(freepage)) 1811 continue; 1812 1813 /* Reorder to so a future search skips recent pages */ 1814 move_freelist_tail(freelist, freepage); 1815 1816 update_fast_start_pfn(cc, free_pfn); 1817 pfn = pageblock_start_pfn(free_pfn); 1818 cc->fast_search_fail = 0; 1819 found_block = true; 1820 set_pageblock_skip(freepage); 1821 break; 1822 } 1823 } 1824 spin_unlock_irqrestore(&cc->zone->lock, flags); 1825 } 1826 1827 cc->total_migrate_scanned += nr_scanned; 1828 1829 /* 1830 * If fast scanning failed then use a cached entry for a page block 1831 * that had free pages as the basis for starting a linear scan. 1832 */ 1833 if (!found_block) { 1834 cc->fast_search_fail++; 1835 pfn = reinit_migrate_pfn(cc); 1836 } 1837 return pfn; 1838 } 1839 1840 /* 1841 * Isolate all pages that can be migrated from the first suitable block, 1842 * starting at the block pointed to by the migrate scanner pfn within 1843 * compact_control. 1844 */ 1845 static isolate_migrate_t isolate_migratepages(struct compact_control *cc) 1846 { 1847 unsigned long block_start_pfn; 1848 unsigned long block_end_pfn; 1849 unsigned long low_pfn; 1850 struct page *page; 1851 const isolate_mode_t isolate_mode = 1852 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 1853 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 1854 bool fast_find_block; 1855 1856 /* 1857 * Start at where we last stopped, or beginning of the zone as 1858 * initialized by compact_zone(). The first failure will use 1859 * the lowest PFN as the starting point for linear scanning. 1860 */ 1861 low_pfn = fast_find_migrateblock(cc); 1862 block_start_pfn = pageblock_start_pfn(low_pfn); 1863 if (block_start_pfn < cc->zone->zone_start_pfn) 1864 block_start_pfn = cc->zone->zone_start_pfn; 1865 1866 /* 1867 * fast_find_migrateblock marks a pageblock skipped so to avoid 1868 * the isolation_suitable check below, check whether the fast 1869 * search was successful. 1870 */ 1871 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; 1872 1873 /* Only scan within a pageblock boundary */ 1874 block_end_pfn = pageblock_end_pfn(low_pfn); 1875 1876 /* 1877 * Iterate over whole pageblocks until we find the first suitable. 1878 * Do not cross the free scanner. 1879 */ 1880 for (; block_end_pfn <= cc->free_pfn; 1881 fast_find_block = false, 1882 cc->migrate_pfn = low_pfn = block_end_pfn, 1883 block_start_pfn = block_end_pfn, 1884 block_end_pfn += pageblock_nr_pages) { 1885 1886 /* 1887 * This can potentially iterate a massively long zone with 1888 * many pageblocks unsuitable, so periodically check if we 1889 * need to schedule. 1890 */ 1891 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1892 cond_resched(); 1893 1894 page = pageblock_pfn_to_page(block_start_pfn, 1895 block_end_pfn, cc->zone); 1896 if (!page) 1897 continue; 1898 1899 /* 1900 * If isolation recently failed, do not retry. Only check the 1901 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock 1902 * to be visited multiple times. Assume skip was checked 1903 * before making it "skip" so other compaction instances do 1904 * not scan the same block. 1905 */ 1906 if (IS_ALIGNED(low_pfn, pageblock_nr_pages) && 1907 !fast_find_block && !isolation_suitable(cc, page)) 1908 continue; 1909 1910 /* 1911 * For async compaction, also only scan in MOVABLE blocks 1912 * without huge pages. Async compaction is optimistic to see 1913 * if the minimum amount of work satisfies the allocation. 1914 * The cached PFN is updated as it's possible that all 1915 * remaining blocks between source and target are unsuitable 1916 * and the compaction scanners fail to meet. 1917 */ 1918 if (!suitable_migration_source(cc, page)) { 1919 update_cached_migrate(cc, block_end_pfn); 1920 continue; 1921 } 1922 1923 /* Perform the isolation */ 1924 if (isolate_migratepages_block(cc, low_pfn, block_end_pfn, 1925 isolate_mode)) 1926 return ISOLATE_ABORT; 1927 1928 /* 1929 * Either we isolated something and proceed with migration. Or 1930 * we failed and compact_zone should decide if we should 1931 * continue or not. 1932 */ 1933 break; 1934 } 1935 1936 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1937 } 1938 1939 /* 1940 * order == -1 is expected when compacting via 1941 * /proc/sys/vm/compact_memory 1942 */ 1943 static inline bool is_via_compact_memory(int order) 1944 { 1945 return order == -1; 1946 } 1947 1948 static bool kswapd_is_running(pg_data_t *pgdat) 1949 { 1950 return pgdat->kswapd && task_is_running(pgdat->kswapd); 1951 } 1952 1953 /* 1954 * A zone's fragmentation score is the external fragmentation wrt to the 1955 * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100]. 1956 */ 1957 static unsigned int fragmentation_score_zone(struct zone *zone) 1958 { 1959 return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); 1960 } 1961 1962 /* 1963 * A weighted zone's fragmentation score is the external fragmentation 1964 * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It 1965 * returns a value in the range [0, 100]. 1966 * 1967 * The scaling factor ensures that proactive compaction focuses on larger 1968 * zones like ZONE_NORMAL, rather than smaller, specialized zones like 1969 * ZONE_DMA32. For smaller zones, the score value remains close to zero, 1970 * and thus never exceeds the high threshold for proactive compaction. 1971 */ 1972 static unsigned int fragmentation_score_zone_weighted(struct zone *zone) 1973 { 1974 unsigned long score; 1975 1976 score = zone->present_pages * fragmentation_score_zone(zone); 1977 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); 1978 } 1979 1980 /* 1981 * The per-node proactive (background) compaction process is started by its 1982 * corresponding kcompactd thread when the node's fragmentation score 1983 * exceeds the high threshold. The compaction process remains active till 1984 * the node's score falls below the low threshold, or one of the back-off 1985 * conditions is met. 1986 */ 1987 static unsigned int fragmentation_score_node(pg_data_t *pgdat) 1988 { 1989 unsigned int score = 0; 1990 int zoneid; 1991 1992 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 1993 struct zone *zone; 1994 1995 zone = &pgdat->node_zones[zoneid]; 1996 score += fragmentation_score_zone_weighted(zone); 1997 } 1998 1999 return score; 2000 } 2001 2002 static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) 2003 { 2004 unsigned int wmark_low; 2005 2006 /* 2007 * Cap the low watermark to avoid excessive compaction 2008 * activity in case a user sets the proactiveness tunable 2009 * close to 100 (maximum). 2010 */ 2011 wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); 2012 return low ? wmark_low : min(wmark_low + 10, 100U); 2013 } 2014 2015 static bool should_proactive_compact_node(pg_data_t *pgdat) 2016 { 2017 int wmark_high; 2018 2019 if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) 2020 return false; 2021 2022 wmark_high = fragmentation_score_wmark(pgdat, false); 2023 return fragmentation_score_node(pgdat) > wmark_high; 2024 } 2025 2026 static enum compact_result __compact_finished(struct compact_control *cc) 2027 { 2028 unsigned int order; 2029 const int migratetype = cc->migratetype; 2030 int ret; 2031 2032 /* Compaction run completes if the migrate and free scanner meet */ 2033 if (compact_scanners_met(cc)) { 2034 /* Let the next compaction start anew. */ 2035 reset_cached_positions(cc->zone); 2036 2037 /* 2038 * Mark that the PG_migrate_skip information should be cleared 2039 * by kswapd when it goes to sleep. kcompactd does not set the 2040 * flag itself as the decision to be clear should be directly 2041 * based on an allocation request. 2042 */ 2043 if (cc->direct_compaction) 2044 cc->zone->compact_blockskip_flush = true; 2045 2046 if (cc->whole_zone) 2047 return COMPACT_COMPLETE; 2048 else 2049 return COMPACT_PARTIAL_SKIPPED; 2050 } 2051 2052 if (cc->proactive_compaction) { 2053 int score, wmark_low; 2054 pg_data_t *pgdat; 2055 2056 pgdat = cc->zone->zone_pgdat; 2057 if (kswapd_is_running(pgdat)) 2058 return COMPACT_PARTIAL_SKIPPED; 2059 2060 score = fragmentation_score_zone(cc->zone); 2061 wmark_low = fragmentation_score_wmark(pgdat, true); 2062 2063 if (score > wmark_low) 2064 ret = COMPACT_CONTINUE; 2065 else 2066 ret = COMPACT_SUCCESS; 2067 2068 goto out; 2069 } 2070 2071 if (is_via_compact_memory(cc->order)) 2072 return COMPACT_CONTINUE; 2073 2074 /* 2075 * Always finish scanning a pageblock to reduce the possibility of 2076 * fallbacks in the future. This is particularly important when 2077 * migration source is unmovable/reclaimable but it's not worth 2078 * special casing. 2079 */ 2080 if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) 2081 return COMPACT_CONTINUE; 2082 2083 /* Direct compactor: Is a suitable page free? */ 2084 ret = COMPACT_NO_SUITABLE_PAGE; 2085 for (order = cc->order; order < MAX_ORDER; order++) { 2086 struct free_area *area = &cc->zone->free_area[order]; 2087 bool can_steal; 2088 2089 /* Job done if page is free of the right migratetype */ 2090 if (!free_area_empty(area, migratetype)) 2091 return COMPACT_SUCCESS; 2092 2093 #ifdef CONFIG_CMA 2094 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 2095 if (migratetype == MIGRATE_MOVABLE && 2096 !free_area_empty(area, MIGRATE_CMA)) 2097 return COMPACT_SUCCESS; 2098 #endif 2099 /* 2100 * Job done if allocation would steal freepages from 2101 * other migratetype buddy lists. 2102 */ 2103 if (find_suitable_fallback(area, order, migratetype, 2104 true, &can_steal) != -1) { 2105 2106 /* movable pages are OK in any pageblock */ 2107 if (migratetype == MIGRATE_MOVABLE) 2108 return COMPACT_SUCCESS; 2109 2110 /* 2111 * We are stealing for a non-movable allocation. Make 2112 * sure we finish compacting the current pageblock 2113 * first so it is as free as possible and we won't 2114 * have to steal another one soon. This only applies 2115 * to sync compaction, as async compaction operates 2116 * on pageblocks of the same migratetype. 2117 */ 2118 if (cc->mode == MIGRATE_ASYNC || 2119 IS_ALIGNED(cc->migrate_pfn, 2120 pageblock_nr_pages)) { 2121 return COMPACT_SUCCESS; 2122 } 2123 2124 ret = COMPACT_CONTINUE; 2125 break; 2126 } 2127 } 2128 2129 out: 2130 if (cc->contended || fatal_signal_pending(current)) 2131 ret = COMPACT_CONTENDED; 2132 2133 return ret; 2134 } 2135 2136 static enum compact_result compact_finished(struct compact_control *cc) 2137 { 2138 int ret; 2139 2140 ret = __compact_finished(cc); 2141 trace_mm_compaction_finished(cc->zone, cc->order, ret); 2142 if (ret == COMPACT_NO_SUITABLE_PAGE) 2143 ret = COMPACT_CONTINUE; 2144 2145 return ret; 2146 } 2147 2148 static enum compact_result __compaction_suitable(struct zone *zone, int order, 2149 unsigned int alloc_flags, 2150 int highest_zoneidx, 2151 unsigned long wmark_target) 2152 { 2153 unsigned long watermark; 2154 2155 if (is_via_compact_memory(order)) 2156 return COMPACT_CONTINUE; 2157 2158 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 2159 /* 2160 * If watermarks for high-order allocation are already met, there 2161 * should be no need for compaction at all. 2162 */ 2163 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, 2164 alloc_flags)) 2165 return COMPACT_SUCCESS; 2166 2167 /* 2168 * Watermarks for order-0 must be met for compaction to be able to 2169 * isolate free pages for migration targets. This means that the 2170 * watermark and alloc_flags have to match, or be more pessimistic than 2171 * the check in __isolate_free_page(). We don't use the direct 2172 * compactor's alloc_flags, as they are not relevant for freepage 2173 * isolation. We however do use the direct compactor's highest_zoneidx 2174 * to skip over zones where lowmem reserves would prevent allocation 2175 * even if compaction succeeds. 2176 * For costly orders, we require low watermark instead of min for 2177 * compaction to proceed to increase its chances. 2178 * ALLOC_CMA is used, as pages in CMA pageblocks are considered 2179 * suitable migration targets 2180 */ 2181 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 2182 low_wmark_pages(zone) : min_wmark_pages(zone); 2183 watermark += compact_gap(order); 2184 if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, 2185 ALLOC_CMA, wmark_target)) 2186 return COMPACT_SKIPPED; 2187 2188 return COMPACT_CONTINUE; 2189 } 2190 2191 /* 2192 * compaction_suitable: Is this suitable to run compaction on this zone now? 2193 * Returns 2194 * COMPACT_SKIPPED - If there are too few free pages for compaction 2195 * COMPACT_SUCCESS - If the allocation would succeed without compaction 2196 * COMPACT_CONTINUE - If compaction should run now 2197 */ 2198 enum compact_result compaction_suitable(struct zone *zone, int order, 2199 unsigned int alloc_flags, 2200 int highest_zoneidx) 2201 { 2202 enum compact_result ret; 2203 int fragindex; 2204 2205 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, 2206 zone_page_state(zone, NR_FREE_PAGES)); 2207 /* 2208 * fragmentation index determines if allocation failures are due to 2209 * low memory or external fragmentation 2210 * 2211 * index of -1000 would imply allocations might succeed depending on 2212 * watermarks, but we already failed the high-order watermark check 2213 * index towards 0 implies failure is due to lack of memory 2214 * index towards 1000 implies failure is due to fragmentation 2215 * 2216 * Only compact if a failure would be due to fragmentation. Also 2217 * ignore fragindex for non-costly orders where the alternative to 2218 * a successful reclaim/compaction is OOM. Fragindex and the 2219 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 2220 * excessive compaction for costly orders, but it should not be at the 2221 * expense of system stability. 2222 */ 2223 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { 2224 fragindex = fragmentation_index(zone, order); 2225 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 2226 ret = COMPACT_NOT_SUITABLE_ZONE; 2227 } 2228 2229 trace_mm_compaction_suitable(zone, order, ret); 2230 if (ret == COMPACT_NOT_SUITABLE_ZONE) 2231 ret = COMPACT_SKIPPED; 2232 2233 return ret; 2234 } 2235 2236 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 2237 int alloc_flags) 2238 { 2239 struct zone *zone; 2240 struct zoneref *z; 2241 2242 /* 2243 * Make sure at least one zone would pass __compaction_suitable if we continue 2244 * retrying the reclaim. 2245 */ 2246 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2247 ac->highest_zoneidx, ac->nodemask) { 2248 unsigned long available; 2249 enum compact_result compact_result; 2250 2251 /* 2252 * Do not consider all the reclaimable memory because we do not 2253 * want to trash just for a single high order allocation which 2254 * is even not guaranteed to appear even if __compaction_suitable 2255 * is happy about the watermark check. 2256 */ 2257 available = zone_reclaimable_pages(zone) / order; 2258 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 2259 compact_result = __compaction_suitable(zone, order, alloc_flags, 2260 ac->highest_zoneidx, available); 2261 if (compact_result != COMPACT_SKIPPED) 2262 return true; 2263 } 2264 2265 return false; 2266 } 2267 2268 static enum compact_result 2269 compact_zone(struct compact_control *cc, struct capture_control *capc) 2270 { 2271 enum compact_result ret; 2272 unsigned long start_pfn = cc->zone->zone_start_pfn; 2273 unsigned long end_pfn = zone_end_pfn(cc->zone); 2274 unsigned long last_migrated_pfn; 2275 const bool sync = cc->mode != MIGRATE_ASYNC; 2276 bool update_cached; 2277 2278 /* 2279 * These counters track activities during zone compaction. Initialize 2280 * them before compacting a new zone. 2281 */ 2282 cc->total_migrate_scanned = 0; 2283 cc->total_free_scanned = 0; 2284 cc->nr_migratepages = 0; 2285 cc->nr_freepages = 0; 2286 INIT_LIST_HEAD(&cc->freepages); 2287 INIT_LIST_HEAD(&cc->migratepages); 2288 2289 cc->migratetype = gfp_migratetype(cc->gfp_mask); 2290 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, 2291 cc->highest_zoneidx); 2292 /* Compaction is likely to fail */ 2293 if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) 2294 return ret; 2295 2296 /* huh, compaction_suitable is returning something unexpected */ 2297 VM_BUG_ON(ret != COMPACT_CONTINUE); 2298 2299 /* 2300 * Clear pageblock skip if there were failures recently and compaction 2301 * is about to be retried after being deferred. 2302 */ 2303 if (compaction_restarting(cc->zone, cc->order)) 2304 __reset_isolation_suitable(cc->zone); 2305 2306 /* 2307 * Setup to move all movable pages to the end of the zone. Used cached 2308 * information on where the scanners should start (unless we explicitly 2309 * want to compact the whole zone), but check that it is initialised 2310 * by ensuring the values are within zone boundaries. 2311 */ 2312 cc->fast_start_pfn = 0; 2313 if (cc->whole_zone) { 2314 cc->migrate_pfn = start_pfn; 2315 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 2316 } else { 2317 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; 2318 cc->free_pfn = cc->zone->compact_cached_free_pfn; 2319 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 2320 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 2321 cc->zone->compact_cached_free_pfn = cc->free_pfn; 2322 } 2323 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 2324 cc->migrate_pfn = start_pfn; 2325 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 2326 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 2327 } 2328 2329 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) 2330 cc->whole_zone = true; 2331 } 2332 2333 last_migrated_pfn = 0; 2334 2335 /* 2336 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on 2337 * the basis that some migrations will fail in ASYNC mode. However, 2338 * if the cached PFNs match and pageblocks are skipped due to having 2339 * no isolation candidates, then the sync state does not matter. 2340 * Until a pageblock with isolation candidates is found, keep the 2341 * cached PFNs in sync to avoid revisiting the same blocks. 2342 */ 2343 update_cached = !sync && 2344 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; 2345 2346 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 2347 cc->free_pfn, end_pfn, sync); 2348 2349 /* lru_add_drain_all could be expensive with involving other CPUs */ 2350 lru_add_drain(); 2351 2352 while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { 2353 int err; 2354 unsigned long iteration_start_pfn = cc->migrate_pfn; 2355 2356 /* 2357 * Avoid multiple rescans which can happen if a page cannot be 2358 * isolated (dirty/writeback in async mode) or if the migrated 2359 * pages are being allocated before the pageblock is cleared. 2360 * The first rescan will capture the entire pageblock for 2361 * migration. If it fails, it'll be marked skip and scanning 2362 * will proceed as normal. 2363 */ 2364 cc->rescan = false; 2365 if (pageblock_start_pfn(last_migrated_pfn) == 2366 pageblock_start_pfn(iteration_start_pfn)) { 2367 cc->rescan = true; 2368 } 2369 2370 switch (isolate_migratepages(cc)) { 2371 case ISOLATE_ABORT: 2372 ret = COMPACT_CONTENDED; 2373 putback_movable_pages(&cc->migratepages); 2374 cc->nr_migratepages = 0; 2375 goto out; 2376 case ISOLATE_NONE: 2377 if (update_cached) { 2378 cc->zone->compact_cached_migrate_pfn[1] = 2379 cc->zone->compact_cached_migrate_pfn[0]; 2380 } 2381 2382 /* 2383 * We haven't isolated and migrated anything, but 2384 * there might still be unflushed migrations from 2385 * previous cc->order aligned block. 2386 */ 2387 goto check_drain; 2388 case ISOLATE_SUCCESS: 2389 update_cached = false; 2390 last_migrated_pfn = iteration_start_pfn; 2391 } 2392 2393 err = migrate_pages(&cc->migratepages, compaction_alloc, 2394 compaction_free, (unsigned long)cc, cc->mode, 2395 MR_COMPACTION, NULL); 2396 2397 trace_mm_compaction_migratepages(cc->nr_migratepages, err, 2398 &cc->migratepages); 2399 2400 /* All pages were either migrated or will be released */ 2401 cc->nr_migratepages = 0; 2402 if (err) { 2403 putback_movable_pages(&cc->migratepages); 2404 /* 2405 * migrate_pages() may return -ENOMEM when scanners meet 2406 * and we want compact_finished() to detect it 2407 */ 2408 if (err == -ENOMEM && !compact_scanners_met(cc)) { 2409 ret = COMPACT_CONTENDED; 2410 goto out; 2411 } 2412 /* 2413 * We failed to migrate at least one page in the current 2414 * order-aligned block, so skip the rest of it. 2415 */ 2416 if (cc->direct_compaction && 2417 (cc->mode == MIGRATE_ASYNC)) { 2418 cc->migrate_pfn = block_end_pfn( 2419 cc->migrate_pfn - 1, cc->order); 2420 /* Draining pcplists is useless in this case */ 2421 last_migrated_pfn = 0; 2422 } 2423 } 2424 2425 check_drain: 2426 /* 2427 * Has the migration scanner moved away from the previous 2428 * cc->order aligned block where we migrated from? If yes, 2429 * flush the pages that were freed, so that they can merge and 2430 * compact_finished() can detect immediately if allocation 2431 * would succeed. 2432 */ 2433 if (cc->order > 0 && last_migrated_pfn) { 2434 unsigned long current_block_start = 2435 block_start_pfn(cc->migrate_pfn, cc->order); 2436 2437 if (last_migrated_pfn < current_block_start) { 2438 lru_add_drain_cpu_zone(cc->zone); 2439 /* No more flushing until we migrate again */ 2440 last_migrated_pfn = 0; 2441 } 2442 } 2443 2444 /* Stop if a page has been captured */ 2445 if (capc && capc->page) { 2446 ret = COMPACT_SUCCESS; 2447 break; 2448 } 2449 } 2450 2451 out: 2452 /* 2453 * Release free pages and update where the free scanner should restart, 2454 * so we don't leave any returned pages behind in the next attempt. 2455 */ 2456 if (cc->nr_freepages > 0) { 2457 unsigned long free_pfn = release_freepages(&cc->freepages); 2458 2459 cc->nr_freepages = 0; 2460 VM_BUG_ON(free_pfn == 0); 2461 /* The cached pfn is always the first in a pageblock */ 2462 free_pfn = pageblock_start_pfn(free_pfn); 2463 /* 2464 * Only go back, not forward. The cached pfn might have been 2465 * already reset to zone end in compact_finished() 2466 */ 2467 if (free_pfn > cc->zone->compact_cached_free_pfn) 2468 cc->zone->compact_cached_free_pfn = free_pfn; 2469 } 2470 2471 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 2472 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 2473 2474 trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 2475 cc->free_pfn, end_pfn, sync, ret); 2476 2477 return ret; 2478 } 2479 2480 static enum compact_result compact_zone_order(struct zone *zone, int order, 2481 gfp_t gfp_mask, enum compact_priority prio, 2482 unsigned int alloc_flags, int highest_zoneidx, 2483 struct page **capture) 2484 { 2485 enum compact_result ret; 2486 struct compact_control cc = { 2487 .order = order, 2488 .search_order = order, 2489 .gfp_mask = gfp_mask, 2490 .zone = zone, 2491 .mode = (prio == COMPACT_PRIO_ASYNC) ? 2492 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 2493 .alloc_flags = alloc_flags, 2494 .highest_zoneidx = highest_zoneidx, 2495 .direct_compaction = true, 2496 .whole_zone = (prio == MIN_COMPACT_PRIORITY), 2497 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 2498 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 2499 }; 2500 struct capture_control capc = { 2501 .cc = &cc, 2502 .page = NULL, 2503 }; 2504 2505 /* 2506 * Make sure the structs are really initialized before we expose the 2507 * capture control, in case we are interrupted and the interrupt handler 2508 * frees a page. 2509 */ 2510 barrier(); 2511 WRITE_ONCE(current->capture_control, &capc); 2512 2513 ret = compact_zone(&cc, &capc); 2514 2515 VM_BUG_ON(!list_empty(&cc.freepages)); 2516 VM_BUG_ON(!list_empty(&cc.migratepages)); 2517 2518 /* 2519 * Make sure we hide capture control first before we read the captured 2520 * page pointer, otherwise an interrupt could free and capture a page 2521 * and we would leak it. 2522 */ 2523 WRITE_ONCE(current->capture_control, NULL); 2524 *capture = READ_ONCE(capc.page); 2525 /* 2526 * Technically, it is also possible that compaction is skipped but 2527 * the page is still captured out of luck(IRQ came and freed the page). 2528 * Returning COMPACT_SUCCESS in such cases helps in properly accounting 2529 * the COMPACT[STALL|FAIL] when compaction is skipped. 2530 */ 2531 if (*capture) 2532 ret = COMPACT_SUCCESS; 2533 2534 return ret; 2535 } 2536 2537 int sysctl_extfrag_threshold = 500; 2538 2539 /** 2540 * try_to_compact_pages - Direct compact to satisfy a high-order allocation 2541 * @gfp_mask: The GFP mask of the current allocation 2542 * @order: The order of the current allocation 2543 * @alloc_flags: The allocation flags of the current allocation 2544 * @ac: The context of current allocation 2545 * @prio: Determines how hard direct compaction should try to succeed 2546 * @capture: Pointer to free page created by compaction will be stored here 2547 * 2548 * This is the main entry point for direct page compaction. 2549 */ 2550 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 2551 unsigned int alloc_flags, const struct alloc_context *ac, 2552 enum compact_priority prio, struct page **capture) 2553 { 2554 int may_perform_io = gfp_mask & __GFP_IO; 2555 struct zoneref *z; 2556 struct zone *zone; 2557 enum compact_result rc = COMPACT_SKIPPED; 2558 2559 /* 2560 * Check if the GFP flags allow compaction - GFP_NOIO is really 2561 * tricky context because the migration might require IO 2562 */ 2563 if (!may_perform_io) 2564 return COMPACT_SKIPPED; 2565 2566 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 2567 2568 /* Compact each zone in the list */ 2569 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2570 ac->highest_zoneidx, ac->nodemask) { 2571 enum compact_result status; 2572 2573 if (prio > MIN_COMPACT_PRIORITY 2574 && compaction_deferred(zone, order)) { 2575 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 2576 continue; 2577 } 2578 2579 status = compact_zone_order(zone, order, gfp_mask, prio, 2580 alloc_flags, ac->highest_zoneidx, capture); 2581 rc = max(status, rc); 2582 2583 /* The allocation should succeed, stop compacting */ 2584 if (status == COMPACT_SUCCESS) { 2585 /* 2586 * We think the allocation will succeed in this zone, 2587 * but it is not certain, hence the false. The caller 2588 * will repeat this with true if allocation indeed 2589 * succeeds in this zone. 2590 */ 2591 compaction_defer_reset(zone, order, false); 2592 2593 break; 2594 } 2595 2596 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 2597 status == COMPACT_PARTIAL_SKIPPED)) 2598 /* 2599 * We think that allocation won't succeed in this zone 2600 * so we defer compaction there. If it ends up 2601 * succeeding after all, it will be reset. 2602 */ 2603 defer_compaction(zone, order); 2604 2605 /* 2606 * We might have stopped compacting due to need_resched() in 2607 * async compaction, or due to a fatal signal detected. In that 2608 * case do not try further zones 2609 */ 2610 if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 2611 || fatal_signal_pending(current)) 2612 break; 2613 } 2614 2615 return rc; 2616 } 2617 2618 /* 2619 * Compact all zones within a node till each zone's fragmentation score 2620 * reaches within proactive compaction thresholds (as determined by the 2621 * proactiveness tunable). 2622 * 2623 * It is possible that the function returns before reaching score targets 2624 * due to various back-off conditions, such as, contention on per-node or 2625 * per-zone locks. 2626 */ 2627 static void proactive_compact_node(pg_data_t *pgdat) 2628 { 2629 int zoneid; 2630 struct zone *zone; 2631 struct compact_control cc = { 2632 .order = -1, 2633 .mode = MIGRATE_SYNC_LIGHT, 2634 .ignore_skip_hint = true, 2635 .whole_zone = true, 2636 .gfp_mask = GFP_KERNEL, 2637 .proactive_compaction = true, 2638 }; 2639 2640 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2641 zone = &pgdat->node_zones[zoneid]; 2642 if (!populated_zone(zone)) 2643 continue; 2644 2645 cc.zone = zone; 2646 2647 compact_zone(&cc, NULL); 2648 2649 VM_BUG_ON(!list_empty(&cc.freepages)); 2650 VM_BUG_ON(!list_empty(&cc.migratepages)); 2651 } 2652 } 2653 2654 /* Compact all zones within a node */ 2655 static void compact_node(int nid) 2656 { 2657 pg_data_t *pgdat = NODE_DATA(nid); 2658 int zoneid; 2659 struct zone *zone; 2660 struct compact_control cc = { 2661 .order = -1, 2662 .mode = MIGRATE_SYNC, 2663 .ignore_skip_hint = true, 2664 .whole_zone = true, 2665 .gfp_mask = GFP_KERNEL, 2666 }; 2667 2668 2669 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2670 2671 zone = &pgdat->node_zones[zoneid]; 2672 if (!populated_zone(zone)) 2673 continue; 2674 2675 cc.zone = zone; 2676 2677 compact_zone(&cc, NULL); 2678 2679 VM_BUG_ON(!list_empty(&cc.freepages)); 2680 VM_BUG_ON(!list_empty(&cc.migratepages)); 2681 } 2682 } 2683 2684 /* Compact all nodes in the system */ 2685 static void compact_nodes(void) 2686 { 2687 int nid; 2688 2689 /* Flush pending updates to the LRU lists */ 2690 lru_add_drain_all(); 2691 2692 for_each_online_node(nid) 2693 compact_node(nid); 2694 } 2695 2696 /* 2697 * Tunable for proactive compaction. It determines how 2698 * aggressively the kernel should compact memory in the 2699 * background. It takes values in the range [0, 100]. 2700 */ 2701 unsigned int __read_mostly sysctl_compaction_proactiveness = 20; 2702 2703 int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, 2704 void *buffer, size_t *length, loff_t *ppos) 2705 { 2706 int rc, nid; 2707 2708 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 2709 if (rc) 2710 return rc; 2711 2712 if (write && sysctl_compaction_proactiveness) { 2713 for_each_online_node(nid) { 2714 pg_data_t *pgdat = NODE_DATA(nid); 2715 2716 if (pgdat->proactive_compact_trigger) 2717 continue; 2718 2719 pgdat->proactive_compact_trigger = true; 2720 wake_up_interruptible(&pgdat->kcompactd_wait); 2721 } 2722 } 2723 2724 return 0; 2725 } 2726 2727 /* 2728 * This is the entry point for compacting all nodes via 2729 * /proc/sys/vm/compact_memory 2730 */ 2731 int sysctl_compaction_handler(struct ctl_table *table, int write, 2732 void *buffer, size_t *length, loff_t *ppos) 2733 { 2734 if (write) 2735 compact_nodes(); 2736 2737 return 0; 2738 } 2739 2740 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 2741 static ssize_t compact_store(struct device *dev, 2742 struct device_attribute *attr, 2743 const char *buf, size_t count) 2744 { 2745 int nid = dev->id; 2746 2747 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 2748 /* Flush pending updates to the LRU lists */ 2749 lru_add_drain_all(); 2750 2751 compact_node(nid); 2752 } 2753 2754 return count; 2755 } 2756 static DEVICE_ATTR_WO(compact); 2757 2758 int compaction_register_node(struct node *node) 2759 { 2760 return device_create_file(&node->dev, &dev_attr_compact); 2761 } 2762 2763 void compaction_unregister_node(struct node *node) 2764 { 2765 return device_remove_file(&node->dev, &dev_attr_compact); 2766 } 2767 #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 2768 2769 static inline bool kcompactd_work_requested(pg_data_t *pgdat) 2770 { 2771 return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || 2772 pgdat->proactive_compact_trigger; 2773 } 2774 2775 static bool kcompactd_node_suitable(pg_data_t *pgdat) 2776 { 2777 int zoneid; 2778 struct zone *zone; 2779 enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; 2780 2781 for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { 2782 zone = &pgdat->node_zones[zoneid]; 2783 2784 if (!populated_zone(zone)) 2785 continue; 2786 2787 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 2788 highest_zoneidx) == COMPACT_CONTINUE) 2789 return true; 2790 } 2791 2792 return false; 2793 } 2794 2795 static void kcompactd_do_work(pg_data_t *pgdat) 2796 { 2797 /* 2798 * With no special task, compact all zones so that a page of requested 2799 * order is allocatable. 2800 */ 2801 int zoneid; 2802 struct zone *zone; 2803 struct compact_control cc = { 2804 .order = pgdat->kcompactd_max_order, 2805 .search_order = pgdat->kcompactd_max_order, 2806 .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, 2807 .mode = MIGRATE_SYNC_LIGHT, 2808 .ignore_skip_hint = false, 2809 .gfp_mask = GFP_KERNEL, 2810 }; 2811 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 2812 cc.highest_zoneidx); 2813 count_compact_event(KCOMPACTD_WAKE); 2814 2815 for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { 2816 int status; 2817 2818 zone = &pgdat->node_zones[zoneid]; 2819 if (!populated_zone(zone)) 2820 continue; 2821 2822 if (compaction_deferred(zone, cc.order)) 2823 continue; 2824 2825 if (compaction_suitable(zone, cc.order, 0, zoneid) != 2826 COMPACT_CONTINUE) 2827 continue; 2828 2829 if (kthread_should_stop()) 2830 return; 2831 2832 cc.zone = zone; 2833 status = compact_zone(&cc, NULL); 2834 2835 if (status == COMPACT_SUCCESS) { 2836 compaction_defer_reset(zone, cc.order, false); 2837 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 2838 /* 2839 * Buddy pages may become stranded on pcps that could 2840 * otherwise coalesce on the zone's free area for 2841 * order >= cc.order. This is ratelimited by the 2842 * upcoming deferral. 2843 */ 2844 drain_all_pages(zone); 2845 2846 /* 2847 * We use sync migration mode here, so we defer like 2848 * sync direct compaction does. 2849 */ 2850 defer_compaction(zone, cc.order); 2851 } 2852 2853 count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 2854 cc.total_migrate_scanned); 2855 count_compact_events(KCOMPACTD_FREE_SCANNED, 2856 cc.total_free_scanned); 2857 2858 VM_BUG_ON(!list_empty(&cc.freepages)); 2859 VM_BUG_ON(!list_empty(&cc.migratepages)); 2860 } 2861 2862 /* 2863 * Regardless of success, we are done until woken up next. But remember 2864 * the requested order/highest_zoneidx in case it was higher/tighter 2865 * than our current ones 2866 */ 2867 if (pgdat->kcompactd_max_order <= cc.order) 2868 pgdat->kcompactd_max_order = 0; 2869 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) 2870 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2871 } 2872 2873 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) 2874 { 2875 if (!order) 2876 return; 2877 2878 if (pgdat->kcompactd_max_order < order) 2879 pgdat->kcompactd_max_order = order; 2880 2881 if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) 2882 pgdat->kcompactd_highest_zoneidx = highest_zoneidx; 2883 2884 /* 2885 * Pairs with implicit barrier in wait_event_freezable() 2886 * such that wakeups are not missed. 2887 */ 2888 if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 2889 return; 2890 2891 if (!kcompactd_node_suitable(pgdat)) 2892 return; 2893 2894 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 2895 highest_zoneidx); 2896 wake_up_interruptible(&pgdat->kcompactd_wait); 2897 } 2898 2899 /* 2900 * The background compaction daemon, started as a kernel thread 2901 * from the init process. 2902 */ 2903 static int kcompactd(void *p) 2904 { 2905 pg_data_t *pgdat = (pg_data_t *)p; 2906 struct task_struct *tsk = current; 2907 long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); 2908 long timeout = default_timeout; 2909 2910 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2911 2912 if (!cpumask_empty(cpumask)) 2913 set_cpus_allowed_ptr(tsk, cpumask); 2914 2915 set_freezable(); 2916 2917 pgdat->kcompactd_max_order = 0; 2918 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2919 2920 while (!kthread_should_stop()) { 2921 unsigned long pflags; 2922 2923 /* 2924 * Avoid the unnecessary wakeup for proactive compaction 2925 * when it is disabled. 2926 */ 2927 if (!sysctl_compaction_proactiveness) 2928 timeout = MAX_SCHEDULE_TIMEOUT; 2929 trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 2930 if (wait_event_freezable_timeout(pgdat->kcompactd_wait, 2931 kcompactd_work_requested(pgdat), timeout) && 2932 !pgdat->proactive_compact_trigger) { 2933 2934 psi_memstall_enter(&pflags); 2935 kcompactd_do_work(pgdat); 2936 psi_memstall_leave(&pflags); 2937 /* 2938 * Reset the timeout value. The defer timeout from 2939 * proactive compaction is lost here but that is fine 2940 * as the condition of the zone changing substantionally 2941 * then carrying on with the previous defer interval is 2942 * not useful. 2943 */ 2944 timeout = default_timeout; 2945 continue; 2946 } 2947 2948 /* 2949 * Start the proactive work with default timeout. Based 2950 * on the fragmentation score, this timeout is updated. 2951 */ 2952 timeout = default_timeout; 2953 if (should_proactive_compact_node(pgdat)) { 2954 unsigned int prev_score, score; 2955 2956 prev_score = fragmentation_score_node(pgdat); 2957 proactive_compact_node(pgdat); 2958 score = fragmentation_score_node(pgdat); 2959 /* 2960 * Defer proactive compaction if the fragmentation 2961 * score did not go down i.e. no progress made. 2962 */ 2963 if (unlikely(score >= prev_score)) 2964 timeout = 2965 default_timeout << COMPACT_MAX_DEFER_SHIFT; 2966 } 2967 if (unlikely(pgdat->proactive_compact_trigger)) 2968 pgdat->proactive_compact_trigger = false; 2969 } 2970 2971 return 0; 2972 } 2973 2974 /* 2975 * This kcompactd start function will be called by init and node-hot-add. 2976 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 2977 */ 2978 int kcompactd_run(int nid) 2979 { 2980 pg_data_t *pgdat = NODE_DATA(nid); 2981 int ret = 0; 2982 2983 if (pgdat->kcompactd) 2984 return 0; 2985 2986 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 2987 if (IS_ERR(pgdat->kcompactd)) { 2988 pr_err("Failed to start kcompactd on node %d\n", nid); 2989 ret = PTR_ERR(pgdat->kcompactd); 2990 pgdat->kcompactd = NULL; 2991 } 2992 return ret; 2993 } 2994 2995 /* 2996 * Called by memory hotplug when all memory in a node is offlined. Caller must 2997 * hold mem_hotplug_begin/end(). 2998 */ 2999 void kcompactd_stop(int nid) 3000 { 3001 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 3002 3003 if (kcompactd) { 3004 kthread_stop(kcompactd); 3005 NODE_DATA(nid)->kcompactd = NULL; 3006 } 3007 } 3008 3009 /* 3010 * It's optimal to keep kcompactd on the same CPUs as their memory, but 3011 * not required for correctness. So if the last cpu in a node goes 3012 * away, we get changed to run anywhere: as the first one comes back, 3013 * restore their cpu bindings. 3014 */ 3015 static int kcompactd_cpu_online(unsigned int cpu) 3016 { 3017 int nid; 3018 3019 for_each_node_state(nid, N_MEMORY) { 3020 pg_data_t *pgdat = NODE_DATA(nid); 3021 const struct cpumask *mask; 3022 3023 mask = cpumask_of_node(pgdat->node_id); 3024 3025 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3026 /* One of our CPUs online: restore mask */ 3027 set_cpus_allowed_ptr(pgdat->kcompactd, mask); 3028 } 3029 return 0; 3030 } 3031 3032 static int __init kcompactd_init(void) 3033 { 3034 int nid; 3035 int ret; 3036 3037 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 3038 "mm/compaction:online", 3039 kcompactd_cpu_online, NULL); 3040 if (ret < 0) { 3041 pr_err("kcompactd: failed to register hotplug callbacks.\n"); 3042 return ret; 3043 } 3044 3045 for_each_node_state(nid, N_MEMORY) 3046 kcompactd_run(nid); 3047 return 0; 3048 } 3049 subsys_initcall(kcompactd_init) 3050 3051 #endif /* CONFIG_COMPACTION */ 3052