1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/compaction.c 4 * 5 * Memory compaction for the reduction of external fragmentation. Note that 6 * this heavily depends upon page migration to do all the real heavy 7 * lifting 8 * 9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10 */ 11 #include <linux/cpu.h> 12 #include <linux/swap.h> 13 #include <linux/migrate.h> 14 #include <linux/compaction.h> 15 #include <linux/mm_inline.h> 16 #include <linux/sched/signal.h> 17 #include <linux/backing-dev.h> 18 #include <linux/sysctl.h> 19 #include <linux/sysfs.h> 20 #include <linux/page-isolation.h> 21 #include <linux/kasan.h> 22 #include <linux/kthread.h> 23 #include <linux/freezer.h> 24 #include <linux/page_owner.h> 25 #include <linux/psi.h> 26 #include "internal.h" 27 28 #ifdef CONFIG_COMPACTION 29 /* 30 * Fragmentation score check interval for proactive compaction purposes. 31 */ 32 #define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500) 33 34 static inline void count_compact_event(enum vm_event_item item) 35 { 36 count_vm_event(item); 37 } 38 39 static inline void count_compact_events(enum vm_event_item item, long delta) 40 { 41 count_vm_events(item, delta); 42 } 43 #else 44 #define count_compact_event(item) do { } while (0) 45 #define count_compact_events(item, delta) do { } while (0) 46 #endif 47 48 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/compaction.h> 52 53 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 54 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 55 56 /* 57 * Page order with-respect-to which proactive compaction 58 * calculates external fragmentation, which is used as 59 * the "fragmentation score" of a node/zone. 60 */ 61 #if defined CONFIG_TRANSPARENT_HUGEPAGE 62 #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER 63 #elif defined CONFIG_HUGETLBFS 64 #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER 65 #else 66 #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) 67 #endif 68 69 static unsigned long release_freepages(struct list_head *freelist) 70 { 71 struct page *page, *next; 72 unsigned long high_pfn = 0; 73 74 list_for_each_entry_safe(page, next, freelist, lru) { 75 unsigned long pfn = page_to_pfn(page); 76 list_del(&page->lru); 77 __free_page(page); 78 if (pfn > high_pfn) 79 high_pfn = pfn; 80 } 81 82 return high_pfn; 83 } 84 85 static void split_map_pages(struct list_head *list) 86 { 87 unsigned int i, order, nr_pages; 88 struct page *page, *next; 89 LIST_HEAD(tmp_list); 90 91 list_for_each_entry_safe(page, next, list, lru) { 92 list_del(&page->lru); 93 94 order = page_private(page); 95 nr_pages = 1 << order; 96 97 post_alloc_hook(page, order, __GFP_MOVABLE); 98 if (order) 99 split_page(page, order); 100 101 for (i = 0; i < nr_pages; i++) { 102 list_add(&page->lru, &tmp_list); 103 page++; 104 } 105 } 106 107 list_splice(&tmp_list, list); 108 } 109 110 #ifdef CONFIG_COMPACTION 111 bool PageMovable(struct page *page) 112 { 113 const struct movable_operations *mops; 114 115 VM_BUG_ON_PAGE(!PageLocked(page), page); 116 if (!__PageMovable(page)) 117 return false; 118 119 mops = page_movable_ops(page); 120 if (mops) 121 return true; 122 123 return false; 124 } 125 126 void __SetPageMovable(struct page *page, const struct movable_operations *mops) 127 { 128 VM_BUG_ON_PAGE(!PageLocked(page), page); 129 VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page); 130 page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE); 131 } 132 EXPORT_SYMBOL(__SetPageMovable); 133 134 void __ClearPageMovable(struct page *page) 135 { 136 VM_BUG_ON_PAGE(!PageMovable(page), page); 137 /* 138 * This page still has the type of a movable page, but it's 139 * actually not movable any more. 140 */ 141 page->mapping = (void *)PAGE_MAPPING_MOVABLE; 142 } 143 EXPORT_SYMBOL(__ClearPageMovable); 144 145 /* Do not skip compaction more than 64 times */ 146 #define COMPACT_MAX_DEFER_SHIFT 6 147 148 /* 149 * Compaction is deferred when compaction fails to result in a page 150 * allocation success. 1 << compact_defer_shift, compactions are skipped up 151 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 152 */ 153 static void defer_compaction(struct zone *zone, int order) 154 { 155 zone->compact_considered = 0; 156 zone->compact_defer_shift++; 157 158 if (order < zone->compact_order_failed) 159 zone->compact_order_failed = order; 160 161 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 162 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 163 164 trace_mm_compaction_defer_compaction(zone, order); 165 } 166 167 /* Returns true if compaction should be skipped this time */ 168 static bool compaction_deferred(struct zone *zone, int order) 169 { 170 unsigned long defer_limit = 1UL << zone->compact_defer_shift; 171 172 if (order < zone->compact_order_failed) 173 return false; 174 175 /* Avoid possible overflow */ 176 if (++zone->compact_considered >= defer_limit) { 177 zone->compact_considered = defer_limit; 178 return false; 179 } 180 181 trace_mm_compaction_deferred(zone, order); 182 183 return true; 184 } 185 186 /* 187 * Update defer tracking counters after successful compaction of given order, 188 * which means an allocation either succeeded (alloc_success == true) or is 189 * expected to succeed. 190 */ 191 void compaction_defer_reset(struct zone *zone, int order, 192 bool alloc_success) 193 { 194 if (alloc_success) { 195 zone->compact_considered = 0; 196 zone->compact_defer_shift = 0; 197 } 198 if (order >= zone->compact_order_failed) 199 zone->compact_order_failed = order + 1; 200 201 trace_mm_compaction_defer_reset(zone, order); 202 } 203 204 /* Returns true if restarting compaction after many failures */ 205 static bool compaction_restarting(struct zone *zone, int order) 206 { 207 if (order < zone->compact_order_failed) 208 return false; 209 210 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 211 zone->compact_considered >= 1UL << zone->compact_defer_shift; 212 } 213 214 /* Returns true if the pageblock should be scanned for pages to isolate. */ 215 static inline bool isolation_suitable(struct compact_control *cc, 216 struct page *page) 217 { 218 if (cc->ignore_skip_hint) 219 return true; 220 221 return !get_pageblock_skip(page); 222 } 223 224 static void reset_cached_positions(struct zone *zone) 225 { 226 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 227 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 228 zone->compact_cached_free_pfn = 229 pageblock_start_pfn(zone_end_pfn(zone) - 1); 230 } 231 232 /* 233 * Compound pages of >= pageblock_order should consistently be skipped until 234 * released. It is always pointless to compact pages of such order (if they are 235 * migratable), and the pageblocks they occupy cannot contain any free pages. 236 */ 237 static bool pageblock_skip_persistent(struct page *page) 238 { 239 if (!PageCompound(page)) 240 return false; 241 242 page = compound_head(page); 243 244 if (compound_order(page) >= pageblock_order) 245 return true; 246 247 return false; 248 } 249 250 static bool 251 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, 252 bool check_target) 253 { 254 struct page *page = pfn_to_online_page(pfn); 255 struct page *block_page; 256 struct page *end_page; 257 unsigned long block_pfn; 258 259 if (!page) 260 return false; 261 if (zone != page_zone(page)) 262 return false; 263 if (pageblock_skip_persistent(page)) 264 return false; 265 266 /* 267 * If skip is already cleared do no further checking once the 268 * restart points have been set. 269 */ 270 if (check_source && check_target && !get_pageblock_skip(page)) 271 return true; 272 273 /* 274 * If clearing skip for the target scanner, do not select a 275 * non-movable pageblock as the starting point. 276 */ 277 if (!check_source && check_target && 278 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 279 return false; 280 281 /* Ensure the start of the pageblock or zone is online and valid */ 282 block_pfn = pageblock_start_pfn(pfn); 283 block_pfn = max(block_pfn, zone->zone_start_pfn); 284 block_page = pfn_to_online_page(block_pfn); 285 if (block_page) { 286 page = block_page; 287 pfn = block_pfn; 288 } 289 290 /* Ensure the end of the pageblock or zone is online and valid */ 291 block_pfn = pageblock_end_pfn(pfn) - 1; 292 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); 293 end_page = pfn_to_online_page(block_pfn); 294 if (!end_page) 295 return false; 296 297 /* 298 * Only clear the hint if a sample indicates there is either a 299 * free page or an LRU page in the block. One or other condition 300 * is necessary for the block to be a migration source/target. 301 */ 302 do { 303 if (check_source && PageLRU(page)) { 304 clear_pageblock_skip(page); 305 return true; 306 } 307 308 if (check_target && PageBuddy(page)) { 309 clear_pageblock_skip(page); 310 return true; 311 } 312 313 page += (1 << PAGE_ALLOC_COSTLY_ORDER); 314 } while (page <= end_page); 315 316 return false; 317 } 318 319 /* 320 * This function is called to clear all cached information on pageblocks that 321 * should be skipped for page isolation when the migrate and free page scanner 322 * meet. 323 */ 324 static void __reset_isolation_suitable(struct zone *zone) 325 { 326 unsigned long migrate_pfn = zone->zone_start_pfn; 327 unsigned long free_pfn = zone_end_pfn(zone) - 1; 328 unsigned long reset_migrate = free_pfn; 329 unsigned long reset_free = migrate_pfn; 330 bool source_set = false; 331 bool free_set = false; 332 333 if (!zone->compact_blockskip_flush) 334 return; 335 336 zone->compact_blockskip_flush = false; 337 338 /* 339 * Walk the zone and update pageblock skip information. Source looks 340 * for PageLRU while target looks for PageBuddy. When the scanner 341 * is found, both PageBuddy and PageLRU are checked as the pageblock 342 * is suitable as both source and target. 343 */ 344 for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, 345 free_pfn -= pageblock_nr_pages) { 346 cond_resched(); 347 348 /* Update the migrate PFN */ 349 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && 350 migrate_pfn < reset_migrate) { 351 source_set = true; 352 reset_migrate = migrate_pfn; 353 zone->compact_init_migrate_pfn = reset_migrate; 354 zone->compact_cached_migrate_pfn[0] = reset_migrate; 355 zone->compact_cached_migrate_pfn[1] = reset_migrate; 356 } 357 358 /* Update the free PFN */ 359 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && 360 free_pfn > reset_free) { 361 free_set = true; 362 reset_free = free_pfn; 363 zone->compact_init_free_pfn = reset_free; 364 zone->compact_cached_free_pfn = reset_free; 365 } 366 } 367 368 /* Leave no distance if no suitable block was reset */ 369 if (reset_migrate >= reset_free) { 370 zone->compact_cached_migrate_pfn[0] = migrate_pfn; 371 zone->compact_cached_migrate_pfn[1] = migrate_pfn; 372 zone->compact_cached_free_pfn = free_pfn; 373 } 374 } 375 376 void reset_isolation_suitable(pg_data_t *pgdat) 377 { 378 int zoneid; 379 380 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 381 struct zone *zone = &pgdat->node_zones[zoneid]; 382 if (!populated_zone(zone)) 383 continue; 384 385 /* Only flush if a full compaction finished recently */ 386 if (zone->compact_blockskip_flush) 387 __reset_isolation_suitable(zone); 388 } 389 } 390 391 /* 392 * Sets the pageblock skip bit if it was clear. Note that this is a hint as 393 * locks are not required for read/writers. Returns true if it was already set. 394 */ 395 static bool test_and_set_skip(struct compact_control *cc, struct page *page) 396 { 397 bool skip; 398 399 /* Do not update if skip hint is being ignored */ 400 if (cc->ignore_skip_hint) 401 return false; 402 403 skip = get_pageblock_skip(page); 404 if (!skip && !cc->no_set_skip_hint) 405 set_pageblock_skip(page); 406 407 return skip; 408 } 409 410 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 411 { 412 struct zone *zone = cc->zone; 413 414 pfn = pageblock_end_pfn(pfn); 415 416 /* Set for isolation rather than compaction */ 417 if (cc->no_set_skip_hint) 418 return; 419 420 if (pfn > zone->compact_cached_migrate_pfn[0]) 421 zone->compact_cached_migrate_pfn[0] = pfn; 422 if (cc->mode != MIGRATE_ASYNC && 423 pfn > zone->compact_cached_migrate_pfn[1]) 424 zone->compact_cached_migrate_pfn[1] = pfn; 425 } 426 427 /* 428 * If no pages were isolated then mark this pageblock to be skipped in the 429 * future. The information is later cleared by __reset_isolation_suitable(). 430 */ 431 static void update_pageblock_skip(struct compact_control *cc, 432 struct page *page, unsigned long pfn) 433 { 434 struct zone *zone = cc->zone; 435 436 if (cc->no_set_skip_hint) 437 return; 438 439 set_pageblock_skip(page); 440 441 /* Update where async and sync compaction should restart */ 442 if (pfn < zone->compact_cached_free_pfn) 443 zone->compact_cached_free_pfn = pfn; 444 } 445 #else 446 static inline bool isolation_suitable(struct compact_control *cc, 447 struct page *page) 448 { 449 return true; 450 } 451 452 static inline bool pageblock_skip_persistent(struct page *page) 453 { 454 return false; 455 } 456 457 static inline void update_pageblock_skip(struct compact_control *cc, 458 struct page *page, unsigned long pfn) 459 { 460 } 461 462 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 463 { 464 } 465 466 static bool test_and_set_skip(struct compact_control *cc, struct page *page) 467 { 468 return false; 469 } 470 #endif /* CONFIG_COMPACTION */ 471 472 /* 473 * Compaction requires the taking of some coarse locks that are potentially 474 * very heavily contended. For async compaction, trylock and record if the 475 * lock is contended. The lock will still be acquired but compaction will 476 * abort when the current block is finished regardless of success rate. 477 * Sync compaction acquires the lock. 478 * 479 * Always returns true which makes it easier to track lock state in callers. 480 */ 481 static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, 482 struct compact_control *cc) 483 __acquires(lock) 484 { 485 /* Track if the lock is contended in async mode */ 486 if (cc->mode == MIGRATE_ASYNC && !cc->contended) { 487 if (spin_trylock_irqsave(lock, *flags)) 488 return true; 489 490 cc->contended = true; 491 } 492 493 spin_lock_irqsave(lock, *flags); 494 return true; 495 } 496 497 /* 498 * Compaction requires the taking of some coarse locks that are potentially 499 * very heavily contended. The lock should be periodically unlocked to avoid 500 * having disabled IRQs for a long time, even when there is nobody waiting on 501 * the lock. It might also be that allowing the IRQs will result in 502 * need_resched() becoming true. If scheduling is needed, compaction schedules. 503 * Either compaction type will also abort if a fatal signal is pending. 504 * In either case if the lock was locked, it is dropped and not regained. 505 * 506 * Returns true if compaction should abort due to fatal signal pending. 507 * Returns false when compaction can continue. 508 */ 509 static bool compact_unlock_should_abort(spinlock_t *lock, 510 unsigned long flags, bool *locked, struct compact_control *cc) 511 { 512 if (*locked) { 513 spin_unlock_irqrestore(lock, flags); 514 *locked = false; 515 } 516 517 if (fatal_signal_pending(current)) { 518 cc->contended = true; 519 return true; 520 } 521 522 cond_resched(); 523 524 return false; 525 } 526 527 /* 528 * Isolate free pages onto a private freelist. If @strict is true, will abort 529 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 530 * (even though it may still end up isolating some pages). 531 */ 532 static unsigned long isolate_freepages_block(struct compact_control *cc, 533 unsigned long *start_pfn, 534 unsigned long end_pfn, 535 struct list_head *freelist, 536 unsigned int stride, 537 bool strict) 538 { 539 int nr_scanned = 0, total_isolated = 0; 540 struct page *cursor; 541 unsigned long flags = 0; 542 bool locked = false; 543 unsigned long blockpfn = *start_pfn; 544 unsigned int order; 545 546 /* Strict mode is for isolation, speed is secondary */ 547 if (strict) 548 stride = 1; 549 550 cursor = pfn_to_page(blockpfn); 551 552 /* Isolate free pages. */ 553 for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) { 554 int isolated; 555 struct page *page = cursor; 556 557 /* 558 * Periodically drop the lock (if held) regardless of its 559 * contention, to give chance to IRQs. Abort if fatal signal 560 * pending. 561 */ 562 if (!(blockpfn % COMPACT_CLUSTER_MAX) 563 && compact_unlock_should_abort(&cc->zone->lock, flags, 564 &locked, cc)) 565 break; 566 567 nr_scanned++; 568 569 /* 570 * For compound pages such as THP and hugetlbfs, we can save 571 * potentially a lot of iterations if we skip them at once. 572 * The check is racy, but we can consider only valid values 573 * and the only danger is skipping too much. 574 */ 575 if (PageCompound(page)) { 576 const unsigned int order = compound_order(page); 577 578 if (likely(order <= MAX_ORDER)) { 579 blockpfn += (1UL << order) - 1; 580 cursor += (1UL << order) - 1; 581 nr_scanned += (1UL << order) - 1; 582 } 583 goto isolate_fail; 584 } 585 586 if (!PageBuddy(page)) 587 goto isolate_fail; 588 589 /* If we already hold the lock, we can skip some rechecking. */ 590 if (!locked) { 591 locked = compact_lock_irqsave(&cc->zone->lock, 592 &flags, cc); 593 594 /* Recheck this is a buddy page under lock */ 595 if (!PageBuddy(page)) 596 goto isolate_fail; 597 } 598 599 /* Found a free page, will break it into order-0 pages */ 600 order = buddy_order(page); 601 isolated = __isolate_free_page(page, order); 602 if (!isolated) 603 break; 604 set_page_private(page, order); 605 606 nr_scanned += isolated - 1; 607 total_isolated += isolated; 608 cc->nr_freepages += isolated; 609 list_add_tail(&page->lru, freelist); 610 611 if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 612 blockpfn += isolated; 613 break; 614 } 615 /* Advance to the end of split page */ 616 blockpfn += isolated - 1; 617 cursor += isolated - 1; 618 continue; 619 620 isolate_fail: 621 if (strict) 622 break; 623 else 624 continue; 625 626 } 627 628 if (locked) 629 spin_unlock_irqrestore(&cc->zone->lock, flags); 630 631 /* 632 * There is a tiny chance that we have read bogus compound_order(), 633 * so be careful to not go outside of the pageblock. 634 */ 635 if (unlikely(blockpfn > end_pfn)) 636 blockpfn = end_pfn; 637 638 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 639 nr_scanned, total_isolated); 640 641 /* Record how far we have got within the block */ 642 *start_pfn = blockpfn; 643 644 /* 645 * If strict isolation is requested by CMA then check that all the 646 * pages requested were isolated. If there were any failures, 0 is 647 * returned and CMA will fail. 648 */ 649 if (strict && blockpfn < end_pfn) 650 total_isolated = 0; 651 652 cc->total_free_scanned += nr_scanned; 653 if (total_isolated) 654 count_compact_events(COMPACTISOLATED, total_isolated); 655 return total_isolated; 656 } 657 658 /** 659 * isolate_freepages_range() - isolate free pages. 660 * @cc: Compaction control structure. 661 * @start_pfn: The first PFN to start isolating. 662 * @end_pfn: The one-past-last PFN. 663 * 664 * Non-free pages, invalid PFNs, or zone boundaries within the 665 * [start_pfn, end_pfn) range are considered errors, cause function to 666 * undo its actions and return zero. 667 * 668 * Otherwise, function returns one-past-the-last PFN of isolated page 669 * (which may be greater then end_pfn if end fell in a middle of 670 * a free page). 671 */ 672 unsigned long 673 isolate_freepages_range(struct compact_control *cc, 674 unsigned long start_pfn, unsigned long end_pfn) 675 { 676 unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 677 LIST_HEAD(freelist); 678 679 pfn = start_pfn; 680 block_start_pfn = pageblock_start_pfn(pfn); 681 if (block_start_pfn < cc->zone->zone_start_pfn) 682 block_start_pfn = cc->zone->zone_start_pfn; 683 block_end_pfn = pageblock_end_pfn(pfn); 684 685 for (; pfn < end_pfn; pfn += isolated, 686 block_start_pfn = block_end_pfn, 687 block_end_pfn += pageblock_nr_pages) { 688 /* Protect pfn from changing by isolate_freepages_block */ 689 unsigned long isolate_start_pfn = pfn; 690 691 block_end_pfn = min(block_end_pfn, end_pfn); 692 693 /* 694 * pfn could pass the block_end_pfn if isolated freepage 695 * is more than pageblock order. In this case, we adjust 696 * scanning range to right one. 697 */ 698 if (pfn >= block_end_pfn) { 699 block_start_pfn = pageblock_start_pfn(pfn); 700 block_end_pfn = pageblock_end_pfn(pfn); 701 block_end_pfn = min(block_end_pfn, end_pfn); 702 } 703 704 if (!pageblock_pfn_to_page(block_start_pfn, 705 block_end_pfn, cc->zone)) 706 break; 707 708 isolated = isolate_freepages_block(cc, &isolate_start_pfn, 709 block_end_pfn, &freelist, 0, true); 710 711 /* 712 * In strict mode, isolate_freepages_block() returns 0 if 713 * there are any holes in the block (ie. invalid PFNs or 714 * non-free pages). 715 */ 716 if (!isolated) 717 break; 718 719 /* 720 * If we managed to isolate pages, it is always (1 << n) * 721 * pageblock_nr_pages for some non-negative n. (Max order 722 * page may span two pageblocks). 723 */ 724 } 725 726 /* __isolate_free_page() does not map the pages */ 727 split_map_pages(&freelist); 728 729 if (pfn < end_pfn) { 730 /* Loop terminated early, cleanup. */ 731 release_freepages(&freelist); 732 return 0; 733 } 734 735 /* We don't use freelists for anything. */ 736 return pfn; 737 } 738 739 /* Similar to reclaim, but different enough that they don't share logic */ 740 static bool too_many_isolated(struct compact_control *cc) 741 { 742 pg_data_t *pgdat = cc->zone->zone_pgdat; 743 bool too_many; 744 745 unsigned long active, inactive, isolated; 746 747 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + 748 node_page_state(pgdat, NR_INACTIVE_ANON); 749 active = node_page_state(pgdat, NR_ACTIVE_FILE) + 750 node_page_state(pgdat, NR_ACTIVE_ANON); 751 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + 752 node_page_state(pgdat, NR_ISOLATED_ANON); 753 754 /* 755 * Allow GFP_NOFS to isolate past the limit set for regular 756 * compaction runs. This prevents an ABBA deadlock when other 757 * compactors have already isolated to the limit, but are 758 * blocked on filesystem locks held by the GFP_NOFS thread. 759 */ 760 if (cc->gfp_mask & __GFP_FS) { 761 inactive >>= 3; 762 active >>= 3; 763 } 764 765 too_many = isolated > (inactive + active) / 2; 766 if (!too_many) 767 wake_throttle_isolated(pgdat); 768 769 return too_many; 770 } 771 772 /** 773 * isolate_migratepages_block() - isolate all migrate-able pages within 774 * a single pageblock 775 * @cc: Compaction control structure. 776 * @low_pfn: The first PFN to isolate 777 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 778 * @mode: Isolation mode to be used. 779 * 780 * Isolate all pages that can be migrated from the range specified by 781 * [low_pfn, end_pfn). The range is expected to be within same pageblock. 782 * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, 783 * -ENOMEM in case we could not allocate a page, or 0. 784 * cc->migrate_pfn will contain the next pfn to scan. 785 * 786 * The pages are isolated on cc->migratepages list (not required to be empty), 787 * and cc->nr_migratepages is updated accordingly. 788 */ 789 static int 790 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 791 unsigned long end_pfn, isolate_mode_t mode) 792 { 793 pg_data_t *pgdat = cc->zone->zone_pgdat; 794 unsigned long nr_scanned = 0, nr_isolated = 0; 795 struct lruvec *lruvec; 796 unsigned long flags = 0; 797 struct lruvec *locked = NULL; 798 struct page *page = NULL, *valid_page = NULL; 799 struct address_space *mapping; 800 unsigned long start_pfn = low_pfn; 801 bool skip_on_failure = false; 802 unsigned long next_skip_pfn = 0; 803 bool skip_updated = false; 804 int ret = 0; 805 806 cc->migrate_pfn = low_pfn; 807 808 /* 809 * Ensure that there are not too many pages isolated from the LRU 810 * list by either parallel reclaimers or compaction. If there are, 811 * delay for some time until fewer pages are isolated 812 */ 813 while (unlikely(too_many_isolated(cc))) { 814 /* stop isolation if there are still pages not migrated */ 815 if (cc->nr_migratepages) 816 return -EAGAIN; 817 818 /* async migration should just abort */ 819 if (cc->mode == MIGRATE_ASYNC) 820 return -EAGAIN; 821 822 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); 823 824 if (fatal_signal_pending(current)) 825 return -EINTR; 826 } 827 828 cond_resched(); 829 830 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 831 skip_on_failure = true; 832 next_skip_pfn = block_end_pfn(low_pfn, cc->order); 833 } 834 835 /* Time to isolate some pages for migration */ 836 for (; low_pfn < end_pfn; low_pfn++) { 837 838 if (skip_on_failure && low_pfn >= next_skip_pfn) { 839 /* 840 * We have isolated all migration candidates in the 841 * previous order-aligned block, and did not skip it due 842 * to failure. We should migrate the pages now and 843 * hopefully succeed compaction. 844 */ 845 if (nr_isolated) 846 break; 847 848 /* 849 * We failed to isolate in the previous order-aligned 850 * block. Set the new boundary to the end of the 851 * current block. Note we can't simply increase 852 * next_skip_pfn by 1 << order, as low_pfn might have 853 * been incremented by a higher number due to skipping 854 * a compound or a high-order buddy page in the 855 * previous loop iteration. 856 */ 857 next_skip_pfn = block_end_pfn(low_pfn, cc->order); 858 } 859 860 /* 861 * Periodically drop the lock (if held) regardless of its 862 * contention, to give chance to IRQs. Abort completely if 863 * a fatal signal is pending. 864 */ 865 if (!(low_pfn % COMPACT_CLUSTER_MAX)) { 866 if (locked) { 867 unlock_page_lruvec_irqrestore(locked, flags); 868 locked = NULL; 869 } 870 871 if (fatal_signal_pending(current)) { 872 cc->contended = true; 873 ret = -EINTR; 874 875 goto fatal_pending; 876 } 877 878 cond_resched(); 879 } 880 881 nr_scanned++; 882 883 page = pfn_to_page(low_pfn); 884 885 /* 886 * Check if the pageblock has already been marked skipped. 887 * Only the aligned PFN is checked as the caller isolates 888 * COMPACT_CLUSTER_MAX at a time so the second call must 889 * not falsely conclude that the block should be skipped. 890 */ 891 if (!valid_page && pageblock_aligned(low_pfn)) { 892 if (!isolation_suitable(cc, page)) { 893 low_pfn = end_pfn; 894 page = NULL; 895 goto isolate_abort; 896 } 897 valid_page = page; 898 } 899 900 if (PageHuge(page) && cc->alloc_contig) { 901 if (locked) { 902 unlock_page_lruvec_irqrestore(locked, flags); 903 locked = NULL; 904 } 905 906 ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); 907 908 /* 909 * Fail isolation in case isolate_or_dissolve_huge_page() 910 * reports an error. In case of -ENOMEM, abort right away. 911 */ 912 if (ret < 0) { 913 /* Do not report -EBUSY down the chain */ 914 if (ret == -EBUSY) 915 ret = 0; 916 low_pfn += compound_nr(page) - 1; 917 nr_scanned += compound_nr(page) - 1; 918 goto isolate_fail; 919 } 920 921 if (PageHuge(page)) { 922 /* 923 * Hugepage was successfully isolated and placed 924 * on the cc->migratepages list. 925 */ 926 low_pfn += compound_nr(page) - 1; 927 goto isolate_success_no_list; 928 } 929 930 /* 931 * Ok, the hugepage was dissolved. Now these pages are 932 * Buddy and cannot be re-allocated because they are 933 * isolated. Fall-through as the check below handles 934 * Buddy pages. 935 */ 936 } 937 938 /* 939 * Skip if free. We read page order here without zone lock 940 * which is generally unsafe, but the race window is small and 941 * the worst thing that can happen is that we skip some 942 * potential isolation targets. 943 */ 944 if (PageBuddy(page)) { 945 unsigned long freepage_order = buddy_order_unsafe(page); 946 947 /* 948 * Without lock, we cannot be sure that what we got is 949 * a valid page order. Consider only values in the 950 * valid order range to prevent low_pfn overflow. 951 */ 952 if (freepage_order > 0 && freepage_order <= MAX_ORDER) { 953 low_pfn += (1UL << freepage_order) - 1; 954 nr_scanned += (1UL << freepage_order) - 1; 955 } 956 continue; 957 } 958 959 /* 960 * Regardless of being on LRU, compound pages such as THP and 961 * hugetlbfs are not to be compacted unless we are attempting 962 * an allocation much larger than the huge page size (eg CMA). 963 * We can potentially save a lot of iterations if we skip them 964 * at once. The check is racy, but we can consider only valid 965 * values and the only danger is skipping too much. 966 */ 967 if (PageCompound(page) && !cc->alloc_contig) { 968 const unsigned int order = compound_order(page); 969 970 if (likely(order <= MAX_ORDER)) { 971 low_pfn += (1UL << order) - 1; 972 nr_scanned += (1UL << order) - 1; 973 } 974 goto isolate_fail; 975 } 976 977 /* 978 * Check may be lockless but that's ok as we recheck later. 979 * It's possible to migrate LRU and non-lru movable pages. 980 * Skip any other type of page 981 */ 982 if (!PageLRU(page)) { 983 /* 984 * __PageMovable can return false positive so we need 985 * to verify it under page_lock. 986 */ 987 if (unlikely(__PageMovable(page)) && 988 !PageIsolated(page)) { 989 if (locked) { 990 unlock_page_lruvec_irqrestore(locked, flags); 991 locked = NULL; 992 } 993 994 if (isolate_movable_page(page, mode)) 995 goto isolate_success; 996 } 997 998 goto isolate_fail; 999 } 1000 1001 /* 1002 * Be careful not to clear PageLRU until after we're 1003 * sure the page is not being freed elsewhere -- the 1004 * page release code relies on it. 1005 */ 1006 if (unlikely(!get_page_unless_zero(page))) 1007 goto isolate_fail; 1008 1009 /* 1010 * Migration will fail if an anonymous page is pinned in memory, 1011 * so avoid taking lru_lock and isolating it unnecessarily in an 1012 * admittedly racy check. 1013 */ 1014 mapping = page_mapping(page); 1015 if (!mapping && (page_count(page) - 1) > total_mapcount(page)) 1016 goto isolate_fail_put; 1017 1018 /* 1019 * Only allow to migrate anonymous pages in GFP_NOFS context 1020 * because those do not depend on fs locks. 1021 */ 1022 if (!(cc->gfp_mask & __GFP_FS) && mapping) 1023 goto isolate_fail_put; 1024 1025 /* Only take pages on LRU: a check now makes later tests safe */ 1026 if (!PageLRU(page)) 1027 goto isolate_fail_put; 1028 1029 /* Compaction might skip unevictable pages but CMA takes them */ 1030 if (!(mode & ISOLATE_UNEVICTABLE) && PageUnevictable(page)) 1031 goto isolate_fail_put; 1032 1033 /* 1034 * To minimise LRU disruption, the caller can indicate with 1035 * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages 1036 * it will be able to migrate without blocking - clean pages 1037 * for the most part. PageWriteback would require blocking. 1038 */ 1039 if ((mode & ISOLATE_ASYNC_MIGRATE) && PageWriteback(page)) 1040 goto isolate_fail_put; 1041 1042 if ((mode & ISOLATE_ASYNC_MIGRATE) && PageDirty(page)) { 1043 bool migrate_dirty; 1044 1045 /* 1046 * Only pages without mappings or that have a 1047 * ->migrate_folio callback are possible to migrate 1048 * without blocking. However, we can be racing with 1049 * truncation so it's necessary to lock the page 1050 * to stabilise the mapping as truncation holds 1051 * the page lock until after the page is removed 1052 * from the page cache. 1053 */ 1054 if (!trylock_page(page)) 1055 goto isolate_fail_put; 1056 1057 mapping = page_mapping(page); 1058 migrate_dirty = !mapping || 1059 mapping->a_ops->migrate_folio; 1060 unlock_page(page); 1061 if (!migrate_dirty) 1062 goto isolate_fail_put; 1063 } 1064 1065 /* Try isolate the page */ 1066 if (!TestClearPageLRU(page)) 1067 goto isolate_fail_put; 1068 1069 lruvec = folio_lruvec(page_folio(page)); 1070 1071 /* If we already hold the lock, we can skip some rechecking */ 1072 if (lruvec != locked) { 1073 if (locked) 1074 unlock_page_lruvec_irqrestore(locked, flags); 1075 1076 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); 1077 locked = lruvec; 1078 1079 lruvec_memcg_debug(lruvec, page_folio(page)); 1080 1081 /* 1082 * Try get exclusive access under lock. If marked for 1083 * skip, the scan is aborted unless the current context 1084 * is a rescan to reach the end of the pageblock. 1085 */ 1086 if (!skip_updated && valid_page) { 1087 skip_updated = true; 1088 if (test_and_set_skip(cc, valid_page) && 1089 !cc->finish_pageblock) { 1090 goto isolate_abort; 1091 } 1092 } 1093 1094 /* 1095 * Page become compound since the non-locked check, 1096 * and it's on LRU. It can only be a THP so the order 1097 * is safe to read and it's 0 for tail pages. 1098 */ 1099 if (unlikely(PageCompound(page) && !cc->alloc_contig)) { 1100 low_pfn += compound_nr(page) - 1; 1101 nr_scanned += compound_nr(page) - 1; 1102 SetPageLRU(page); 1103 goto isolate_fail_put; 1104 } 1105 } 1106 1107 /* The whole page is taken off the LRU; skip the tail pages. */ 1108 if (PageCompound(page)) 1109 low_pfn += compound_nr(page) - 1; 1110 1111 /* Successfully isolated */ 1112 del_page_from_lru_list(page, lruvec); 1113 mod_node_page_state(page_pgdat(page), 1114 NR_ISOLATED_ANON + page_is_file_lru(page), 1115 thp_nr_pages(page)); 1116 1117 isolate_success: 1118 list_add(&page->lru, &cc->migratepages); 1119 isolate_success_no_list: 1120 cc->nr_migratepages += compound_nr(page); 1121 nr_isolated += compound_nr(page); 1122 nr_scanned += compound_nr(page) - 1; 1123 1124 /* 1125 * Avoid isolating too much unless this block is being 1126 * fully scanned (e.g. dirty/writeback pages, parallel allocation) 1127 * or a lock is contended. For contention, isolate quickly to 1128 * potentially remove one source of contention. 1129 */ 1130 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && 1131 !cc->finish_pageblock && !cc->contended) { 1132 ++low_pfn; 1133 break; 1134 } 1135 1136 continue; 1137 1138 isolate_fail_put: 1139 /* Avoid potential deadlock in freeing page under lru_lock */ 1140 if (locked) { 1141 unlock_page_lruvec_irqrestore(locked, flags); 1142 locked = NULL; 1143 } 1144 put_page(page); 1145 1146 isolate_fail: 1147 if (!skip_on_failure && ret != -ENOMEM) 1148 continue; 1149 1150 /* 1151 * We have isolated some pages, but then failed. Release them 1152 * instead of migrating, as we cannot form the cc->order buddy 1153 * page anyway. 1154 */ 1155 if (nr_isolated) { 1156 if (locked) { 1157 unlock_page_lruvec_irqrestore(locked, flags); 1158 locked = NULL; 1159 } 1160 putback_movable_pages(&cc->migratepages); 1161 cc->nr_migratepages = 0; 1162 nr_isolated = 0; 1163 } 1164 1165 if (low_pfn < next_skip_pfn) { 1166 low_pfn = next_skip_pfn - 1; 1167 /* 1168 * The check near the loop beginning would have updated 1169 * next_skip_pfn too, but this is a bit simpler. 1170 */ 1171 next_skip_pfn += 1UL << cc->order; 1172 } 1173 1174 if (ret == -ENOMEM) 1175 break; 1176 } 1177 1178 /* 1179 * The PageBuddy() check could have potentially brought us outside 1180 * the range to be scanned. 1181 */ 1182 if (unlikely(low_pfn > end_pfn)) 1183 low_pfn = end_pfn; 1184 1185 page = NULL; 1186 1187 isolate_abort: 1188 if (locked) 1189 unlock_page_lruvec_irqrestore(locked, flags); 1190 if (page) { 1191 SetPageLRU(page); 1192 put_page(page); 1193 } 1194 1195 /* 1196 * Update the cached scanner pfn once the pageblock has been scanned. 1197 * Pages will either be migrated in which case there is no point 1198 * scanning in the near future or migration failed in which case the 1199 * failure reason may persist. The block is marked for skipping if 1200 * there were no pages isolated in the block or if the block is 1201 * rescanned twice in a row. 1202 */ 1203 if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) { 1204 if (!cc->no_set_skip_hint && valid_page && !skip_updated) 1205 set_pageblock_skip(valid_page); 1206 update_cached_migrate(cc, low_pfn); 1207 } 1208 1209 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 1210 nr_scanned, nr_isolated); 1211 1212 fatal_pending: 1213 cc->total_migrate_scanned += nr_scanned; 1214 if (nr_isolated) 1215 count_compact_events(COMPACTISOLATED, nr_isolated); 1216 1217 cc->migrate_pfn = low_pfn; 1218 1219 return ret; 1220 } 1221 1222 /** 1223 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 1224 * @cc: Compaction control structure. 1225 * @start_pfn: The first PFN to start isolating. 1226 * @end_pfn: The one-past-last PFN. 1227 * 1228 * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM 1229 * in case we could not allocate a page, or 0. 1230 */ 1231 int 1232 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 1233 unsigned long end_pfn) 1234 { 1235 unsigned long pfn, block_start_pfn, block_end_pfn; 1236 int ret = 0; 1237 1238 /* Scan block by block. First and last block may be incomplete */ 1239 pfn = start_pfn; 1240 block_start_pfn = pageblock_start_pfn(pfn); 1241 if (block_start_pfn < cc->zone->zone_start_pfn) 1242 block_start_pfn = cc->zone->zone_start_pfn; 1243 block_end_pfn = pageblock_end_pfn(pfn); 1244 1245 for (; pfn < end_pfn; pfn = block_end_pfn, 1246 block_start_pfn = block_end_pfn, 1247 block_end_pfn += pageblock_nr_pages) { 1248 1249 block_end_pfn = min(block_end_pfn, end_pfn); 1250 1251 if (!pageblock_pfn_to_page(block_start_pfn, 1252 block_end_pfn, cc->zone)) 1253 continue; 1254 1255 ret = isolate_migratepages_block(cc, pfn, block_end_pfn, 1256 ISOLATE_UNEVICTABLE); 1257 1258 if (ret) 1259 break; 1260 1261 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) 1262 break; 1263 } 1264 1265 return ret; 1266 } 1267 1268 #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1269 #ifdef CONFIG_COMPACTION 1270 1271 static bool suitable_migration_source(struct compact_control *cc, 1272 struct page *page) 1273 { 1274 int block_mt; 1275 1276 if (pageblock_skip_persistent(page)) 1277 return false; 1278 1279 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1280 return true; 1281 1282 block_mt = get_pageblock_migratetype(page); 1283 1284 if (cc->migratetype == MIGRATE_MOVABLE) 1285 return is_migrate_movable(block_mt); 1286 else 1287 return block_mt == cc->migratetype; 1288 } 1289 1290 /* Returns true if the page is within a block suitable for migration to */ 1291 static bool suitable_migration_target(struct compact_control *cc, 1292 struct page *page) 1293 { 1294 /* If the page is a large free page, then disallow migration */ 1295 if (PageBuddy(page)) { 1296 /* 1297 * We are checking page_order without zone->lock taken. But 1298 * the only small danger is that we skip a potentially suitable 1299 * pageblock, so it's not worth to check order for valid range. 1300 */ 1301 if (buddy_order_unsafe(page) >= pageblock_order) 1302 return false; 1303 } 1304 1305 if (cc->ignore_block_suitable) 1306 return true; 1307 1308 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1309 if (is_migrate_movable(get_pageblock_migratetype(page))) 1310 return true; 1311 1312 /* Otherwise skip the block */ 1313 return false; 1314 } 1315 1316 static inline unsigned int 1317 freelist_scan_limit(struct compact_control *cc) 1318 { 1319 unsigned short shift = BITS_PER_LONG - 1; 1320 1321 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; 1322 } 1323 1324 /* 1325 * Test whether the free scanner has reached the same or lower pageblock than 1326 * the migration scanner, and compaction should thus terminate. 1327 */ 1328 static inline bool compact_scanners_met(struct compact_control *cc) 1329 { 1330 return (cc->free_pfn >> pageblock_order) 1331 <= (cc->migrate_pfn >> pageblock_order); 1332 } 1333 1334 /* 1335 * Used when scanning for a suitable migration target which scans freelists 1336 * in reverse. Reorders the list such as the unscanned pages are scanned 1337 * first on the next iteration of the free scanner 1338 */ 1339 static void 1340 move_freelist_head(struct list_head *freelist, struct page *freepage) 1341 { 1342 LIST_HEAD(sublist); 1343 1344 if (!list_is_last(freelist, &freepage->lru)) { 1345 list_cut_before(&sublist, freelist, &freepage->lru); 1346 list_splice_tail(&sublist, freelist); 1347 } 1348 } 1349 1350 /* 1351 * Similar to move_freelist_head except used by the migration scanner 1352 * when scanning forward. It's possible for these list operations to 1353 * move against each other if they search the free list exactly in 1354 * lockstep. 1355 */ 1356 static void 1357 move_freelist_tail(struct list_head *freelist, struct page *freepage) 1358 { 1359 LIST_HEAD(sublist); 1360 1361 if (!list_is_first(freelist, &freepage->lru)) { 1362 list_cut_position(&sublist, freelist, &freepage->lru); 1363 list_splice_tail(&sublist, freelist); 1364 } 1365 } 1366 1367 static void 1368 fast_isolate_around(struct compact_control *cc, unsigned long pfn) 1369 { 1370 unsigned long start_pfn, end_pfn; 1371 struct page *page; 1372 1373 /* Do not search around if there are enough pages already */ 1374 if (cc->nr_freepages >= cc->nr_migratepages) 1375 return; 1376 1377 /* Minimise scanning during async compaction */ 1378 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) 1379 return; 1380 1381 /* Pageblock boundaries */ 1382 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); 1383 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); 1384 1385 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); 1386 if (!page) 1387 return; 1388 1389 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); 1390 1391 /* Skip this pageblock in the future as it's full or nearly full */ 1392 if (start_pfn == end_pfn) 1393 set_pageblock_skip(page); 1394 1395 return; 1396 } 1397 1398 /* Search orders in round-robin fashion */ 1399 static int next_search_order(struct compact_control *cc, int order) 1400 { 1401 order--; 1402 if (order < 0) 1403 order = cc->order - 1; 1404 1405 /* Search wrapped around? */ 1406 if (order == cc->search_order) { 1407 cc->search_order--; 1408 if (cc->search_order < 0) 1409 cc->search_order = cc->order - 1; 1410 return -1; 1411 } 1412 1413 return order; 1414 } 1415 1416 static void fast_isolate_freepages(struct compact_control *cc) 1417 { 1418 unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); 1419 unsigned int nr_scanned = 0, total_isolated = 0; 1420 unsigned long low_pfn, min_pfn, highest = 0; 1421 unsigned long nr_isolated = 0; 1422 unsigned long distance; 1423 struct page *page = NULL; 1424 bool scan_start = false; 1425 int order; 1426 1427 /* Full compaction passes in a negative order */ 1428 if (cc->order <= 0) 1429 return; 1430 1431 /* 1432 * If starting the scan, use a deeper search and use the highest 1433 * PFN found if a suitable one is not found. 1434 */ 1435 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { 1436 limit = pageblock_nr_pages >> 1; 1437 scan_start = true; 1438 } 1439 1440 /* 1441 * Preferred point is in the top quarter of the scan space but take 1442 * a pfn from the top half if the search is problematic. 1443 */ 1444 distance = (cc->free_pfn - cc->migrate_pfn); 1445 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); 1446 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); 1447 1448 if (WARN_ON_ONCE(min_pfn > low_pfn)) 1449 low_pfn = min_pfn; 1450 1451 /* 1452 * Search starts from the last successful isolation order or the next 1453 * order to search after a previous failure 1454 */ 1455 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); 1456 1457 for (order = cc->search_order; 1458 !page && order >= 0; 1459 order = next_search_order(cc, order)) { 1460 struct free_area *area = &cc->zone->free_area[order]; 1461 struct list_head *freelist; 1462 struct page *freepage; 1463 unsigned long flags; 1464 unsigned int order_scanned = 0; 1465 unsigned long high_pfn = 0; 1466 1467 if (!area->nr_free) 1468 continue; 1469 1470 spin_lock_irqsave(&cc->zone->lock, flags); 1471 freelist = &area->free_list[MIGRATE_MOVABLE]; 1472 list_for_each_entry_reverse(freepage, freelist, lru) { 1473 unsigned long pfn; 1474 1475 order_scanned++; 1476 nr_scanned++; 1477 pfn = page_to_pfn(freepage); 1478 1479 if (pfn >= highest) 1480 highest = max(pageblock_start_pfn(pfn), 1481 cc->zone->zone_start_pfn); 1482 1483 if (pfn >= low_pfn) { 1484 cc->fast_search_fail = 0; 1485 cc->search_order = order; 1486 page = freepage; 1487 break; 1488 } 1489 1490 if (pfn >= min_pfn && pfn > high_pfn) { 1491 high_pfn = pfn; 1492 1493 /* Shorten the scan if a candidate is found */ 1494 limit >>= 1; 1495 } 1496 1497 if (order_scanned >= limit) 1498 break; 1499 } 1500 1501 /* Use a minimum pfn if a preferred one was not found */ 1502 if (!page && high_pfn) { 1503 page = pfn_to_page(high_pfn); 1504 1505 /* Update freepage for the list reorder below */ 1506 freepage = page; 1507 } 1508 1509 /* Reorder to so a future search skips recent pages */ 1510 move_freelist_head(freelist, freepage); 1511 1512 /* Isolate the page if available */ 1513 if (page) { 1514 if (__isolate_free_page(page, order)) { 1515 set_page_private(page, order); 1516 nr_isolated = 1 << order; 1517 nr_scanned += nr_isolated - 1; 1518 total_isolated += nr_isolated; 1519 cc->nr_freepages += nr_isolated; 1520 list_add_tail(&page->lru, &cc->freepages); 1521 count_compact_events(COMPACTISOLATED, nr_isolated); 1522 } else { 1523 /* If isolation fails, abort the search */ 1524 order = cc->search_order + 1; 1525 page = NULL; 1526 } 1527 } 1528 1529 spin_unlock_irqrestore(&cc->zone->lock, flags); 1530 1531 /* Skip fast search if enough freepages isolated */ 1532 if (cc->nr_freepages >= cc->nr_migratepages) 1533 break; 1534 1535 /* 1536 * Smaller scan on next order so the total scan is related 1537 * to freelist_scan_limit. 1538 */ 1539 if (order_scanned >= limit) 1540 limit = max(1U, limit >> 1); 1541 } 1542 1543 trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn, 1544 nr_scanned, total_isolated); 1545 1546 if (!page) { 1547 cc->fast_search_fail++; 1548 if (scan_start) { 1549 /* 1550 * Use the highest PFN found above min. If one was 1551 * not found, be pessimistic for direct compaction 1552 * and use the min mark. 1553 */ 1554 if (highest >= min_pfn) { 1555 page = pfn_to_page(highest); 1556 cc->free_pfn = highest; 1557 } else { 1558 if (cc->direct_compaction && pfn_valid(min_pfn)) { 1559 page = pageblock_pfn_to_page(min_pfn, 1560 min(pageblock_end_pfn(min_pfn), 1561 zone_end_pfn(cc->zone)), 1562 cc->zone); 1563 cc->free_pfn = min_pfn; 1564 } 1565 } 1566 } 1567 } 1568 1569 if (highest && highest >= cc->zone->compact_cached_free_pfn) { 1570 highest -= pageblock_nr_pages; 1571 cc->zone->compact_cached_free_pfn = highest; 1572 } 1573 1574 cc->total_free_scanned += nr_scanned; 1575 if (!page) 1576 return; 1577 1578 low_pfn = page_to_pfn(page); 1579 fast_isolate_around(cc, low_pfn); 1580 } 1581 1582 /* 1583 * Based on information in the current compact_control, find blocks 1584 * suitable for isolating free pages from and then isolate them. 1585 */ 1586 static void isolate_freepages(struct compact_control *cc) 1587 { 1588 struct zone *zone = cc->zone; 1589 struct page *page; 1590 unsigned long block_start_pfn; /* start of current pageblock */ 1591 unsigned long isolate_start_pfn; /* exact pfn we start at */ 1592 unsigned long block_end_pfn; /* end of current pageblock */ 1593 unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1594 struct list_head *freelist = &cc->freepages; 1595 unsigned int stride; 1596 1597 /* Try a small search of the free lists for a candidate */ 1598 fast_isolate_freepages(cc); 1599 if (cc->nr_freepages) 1600 goto splitmap; 1601 1602 /* 1603 * Initialise the free scanner. The starting point is where we last 1604 * successfully isolated from, zone-cached value, or the end of the 1605 * zone when isolating for the first time. For looping we also need 1606 * this pfn aligned down to the pageblock boundary, because we do 1607 * block_start_pfn -= pageblock_nr_pages in the for loop. 1608 * For ending point, take care when isolating in last pageblock of a 1609 * zone which ends in the middle of a pageblock. 1610 * The low boundary is the end of the pageblock the migration scanner 1611 * is using. 1612 */ 1613 isolate_start_pfn = cc->free_pfn; 1614 block_start_pfn = pageblock_start_pfn(isolate_start_pfn); 1615 block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1616 zone_end_pfn(zone)); 1617 low_pfn = pageblock_end_pfn(cc->migrate_pfn); 1618 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; 1619 1620 /* 1621 * Isolate free pages until enough are available to migrate the 1622 * pages on cc->migratepages. We stop searching if the migrate 1623 * and free page scanners meet or enough free pages are isolated. 1624 */ 1625 for (; block_start_pfn >= low_pfn; 1626 block_end_pfn = block_start_pfn, 1627 block_start_pfn -= pageblock_nr_pages, 1628 isolate_start_pfn = block_start_pfn) { 1629 unsigned long nr_isolated; 1630 1631 /* 1632 * This can iterate a massively long zone without finding any 1633 * suitable migration targets, so periodically check resched. 1634 */ 1635 if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) 1636 cond_resched(); 1637 1638 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1639 zone); 1640 if (!page) 1641 continue; 1642 1643 /* Check the block is suitable for migration */ 1644 if (!suitable_migration_target(cc, page)) 1645 continue; 1646 1647 /* If isolation recently failed, do not retry */ 1648 if (!isolation_suitable(cc, page)) 1649 continue; 1650 1651 /* Found a block suitable for isolating free pages from. */ 1652 nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, 1653 block_end_pfn, freelist, stride, false); 1654 1655 /* Update the skip hint if the full pageblock was scanned */ 1656 if (isolate_start_pfn == block_end_pfn) 1657 update_pageblock_skip(cc, page, block_start_pfn); 1658 1659 /* Are enough freepages isolated? */ 1660 if (cc->nr_freepages >= cc->nr_migratepages) { 1661 if (isolate_start_pfn >= block_end_pfn) { 1662 /* 1663 * Restart at previous pageblock if more 1664 * freepages can be isolated next time. 1665 */ 1666 isolate_start_pfn = 1667 block_start_pfn - pageblock_nr_pages; 1668 } 1669 break; 1670 } else if (isolate_start_pfn < block_end_pfn) { 1671 /* 1672 * If isolation failed early, do not continue 1673 * needlessly. 1674 */ 1675 break; 1676 } 1677 1678 /* Adjust stride depending on isolation */ 1679 if (nr_isolated) { 1680 stride = 1; 1681 continue; 1682 } 1683 stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); 1684 } 1685 1686 /* 1687 * Record where the free scanner will restart next time. Either we 1688 * broke from the loop and set isolate_start_pfn based on the last 1689 * call to isolate_freepages_block(), or we met the migration scanner 1690 * and the loop terminated due to isolate_start_pfn < low_pfn 1691 */ 1692 cc->free_pfn = isolate_start_pfn; 1693 1694 splitmap: 1695 /* __isolate_free_page() does not map the pages */ 1696 split_map_pages(freelist); 1697 } 1698 1699 /* 1700 * This is a migrate-callback that "allocates" freepages by taking pages 1701 * from the isolated freelists in the block we are migrating to. 1702 */ 1703 static struct folio *compaction_alloc(struct folio *src, unsigned long data) 1704 { 1705 struct compact_control *cc = (struct compact_control *)data; 1706 struct folio *dst; 1707 1708 if (list_empty(&cc->freepages)) { 1709 isolate_freepages(cc); 1710 1711 if (list_empty(&cc->freepages)) 1712 return NULL; 1713 } 1714 1715 dst = list_entry(cc->freepages.next, struct folio, lru); 1716 list_del(&dst->lru); 1717 cc->nr_freepages--; 1718 1719 return dst; 1720 } 1721 1722 /* 1723 * This is a migrate-callback that "frees" freepages back to the isolated 1724 * freelist. All pages on the freelist are from the same zone, so there is no 1725 * special handling needed for NUMA. 1726 */ 1727 static void compaction_free(struct folio *dst, unsigned long data) 1728 { 1729 struct compact_control *cc = (struct compact_control *)data; 1730 1731 list_add(&dst->lru, &cc->freepages); 1732 cc->nr_freepages++; 1733 } 1734 1735 /* possible outcome of isolate_migratepages */ 1736 typedef enum { 1737 ISOLATE_ABORT, /* Abort compaction now */ 1738 ISOLATE_NONE, /* No pages isolated, continue scanning */ 1739 ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1740 } isolate_migrate_t; 1741 1742 /* 1743 * Allow userspace to control policy on scanning the unevictable LRU for 1744 * compactable pages. 1745 */ 1746 static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT; 1747 /* 1748 * Tunable for proactive compaction. It determines how 1749 * aggressively the kernel should compact memory in the 1750 * background. It takes values in the range [0, 100]. 1751 */ 1752 static unsigned int __read_mostly sysctl_compaction_proactiveness = 20; 1753 static int sysctl_extfrag_threshold = 500; 1754 static int __read_mostly sysctl_compact_memory; 1755 1756 static inline void 1757 update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) 1758 { 1759 if (cc->fast_start_pfn == ULONG_MAX) 1760 return; 1761 1762 if (!cc->fast_start_pfn) 1763 cc->fast_start_pfn = pfn; 1764 1765 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); 1766 } 1767 1768 static inline unsigned long 1769 reinit_migrate_pfn(struct compact_control *cc) 1770 { 1771 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) 1772 return cc->migrate_pfn; 1773 1774 cc->migrate_pfn = cc->fast_start_pfn; 1775 cc->fast_start_pfn = ULONG_MAX; 1776 1777 return cc->migrate_pfn; 1778 } 1779 1780 /* 1781 * Briefly search the free lists for a migration source that already has 1782 * some free pages to reduce the number of pages that need migration 1783 * before a pageblock is free. 1784 */ 1785 static unsigned long fast_find_migrateblock(struct compact_control *cc) 1786 { 1787 unsigned int limit = freelist_scan_limit(cc); 1788 unsigned int nr_scanned = 0; 1789 unsigned long distance; 1790 unsigned long pfn = cc->migrate_pfn; 1791 unsigned long high_pfn; 1792 int order; 1793 bool found_block = false; 1794 1795 /* Skip hints are relied on to avoid repeats on the fast search */ 1796 if (cc->ignore_skip_hint) 1797 return pfn; 1798 1799 /* 1800 * If the pageblock should be finished then do not select a different 1801 * pageblock. 1802 */ 1803 if (cc->finish_pageblock) 1804 return pfn; 1805 1806 /* 1807 * If the migrate_pfn is not at the start of a zone or the start 1808 * of a pageblock then assume this is a continuation of a previous 1809 * scan restarted due to COMPACT_CLUSTER_MAX. 1810 */ 1811 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) 1812 return pfn; 1813 1814 /* 1815 * For smaller orders, just linearly scan as the number of pages 1816 * to migrate should be relatively small and does not necessarily 1817 * justify freeing up a large block for a small allocation. 1818 */ 1819 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) 1820 return pfn; 1821 1822 /* 1823 * Only allow kcompactd and direct requests for movable pages to 1824 * quickly clear out a MOVABLE pageblock for allocation. This 1825 * reduces the risk that a large movable pageblock is freed for 1826 * an unmovable/reclaimable small allocation. 1827 */ 1828 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) 1829 return pfn; 1830 1831 /* 1832 * When starting the migration scanner, pick any pageblock within the 1833 * first half of the search space. Otherwise try and pick a pageblock 1834 * within the first eighth to reduce the chances that a migration 1835 * target later becomes a source. 1836 */ 1837 distance = (cc->free_pfn - cc->migrate_pfn) >> 1; 1838 if (cc->migrate_pfn != cc->zone->zone_start_pfn) 1839 distance >>= 2; 1840 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); 1841 1842 for (order = cc->order - 1; 1843 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; 1844 order--) { 1845 struct free_area *area = &cc->zone->free_area[order]; 1846 struct list_head *freelist; 1847 unsigned long flags; 1848 struct page *freepage; 1849 1850 if (!area->nr_free) 1851 continue; 1852 1853 spin_lock_irqsave(&cc->zone->lock, flags); 1854 freelist = &area->free_list[MIGRATE_MOVABLE]; 1855 list_for_each_entry(freepage, freelist, lru) { 1856 unsigned long free_pfn; 1857 1858 if (nr_scanned++ >= limit) { 1859 move_freelist_tail(freelist, freepage); 1860 break; 1861 } 1862 1863 free_pfn = page_to_pfn(freepage); 1864 if (free_pfn < high_pfn) { 1865 /* 1866 * Avoid if skipped recently. Ideally it would 1867 * move to the tail but even safe iteration of 1868 * the list assumes an entry is deleted, not 1869 * reordered. 1870 */ 1871 if (get_pageblock_skip(freepage)) 1872 continue; 1873 1874 /* Reorder to so a future search skips recent pages */ 1875 move_freelist_tail(freelist, freepage); 1876 1877 update_fast_start_pfn(cc, free_pfn); 1878 pfn = pageblock_start_pfn(free_pfn); 1879 if (pfn < cc->zone->zone_start_pfn) 1880 pfn = cc->zone->zone_start_pfn; 1881 cc->fast_search_fail = 0; 1882 found_block = true; 1883 break; 1884 } 1885 } 1886 spin_unlock_irqrestore(&cc->zone->lock, flags); 1887 } 1888 1889 cc->total_migrate_scanned += nr_scanned; 1890 1891 /* 1892 * If fast scanning failed then use a cached entry for a page block 1893 * that had free pages as the basis for starting a linear scan. 1894 */ 1895 if (!found_block) { 1896 cc->fast_search_fail++; 1897 pfn = reinit_migrate_pfn(cc); 1898 } 1899 return pfn; 1900 } 1901 1902 /* 1903 * Isolate all pages that can be migrated from the first suitable block, 1904 * starting at the block pointed to by the migrate scanner pfn within 1905 * compact_control. 1906 */ 1907 static isolate_migrate_t isolate_migratepages(struct compact_control *cc) 1908 { 1909 unsigned long block_start_pfn; 1910 unsigned long block_end_pfn; 1911 unsigned long low_pfn; 1912 struct page *page; 1913 const isolate_mode_t isolate_mode = 1914 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 1915 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 1916 bool fast_find_block; 1917 1918 /* 1919 * Start at where we last stopped, or beginning of the zone as 1920 * initialized by compact_zone(). The first failure will use 1921 * the lowest PFN as the starting point for linear scanning. 1922 */ 1923 low_pfn = fast_find_migrateblock(cc); 1924 block_start_pfn = pageblock_start_pfn(low_pfn); 1925 if (block_start_pfn < cc->zone->zone_start_pfn) 1926 block_start_pfn = cc->zone->zone_start_pfn; 1927 1928 /* 1929 * fast_find_migrateblock marks a pageblock skipped so to avoid 1930 * the isolation_suitable check below, check whether the fast 1931 * search was successful. 1932 */ 1933 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; 1934 1935 /* Only scan within a pageblock boundary */ 1936 block_end_pfn = pageblock_end_pfn(low_pfn); 1937 1938 /* 1939 * Iterate over whole pageblocks until we find the first suitable. 1940 * Do not cross the free scanner. 1941 */ 1942 for (; block_end_pfn <= cc->free_pfn; 1943 fast_find_block = false, 1944 cc->migrate_pfn = low_pfn = block_end_pfn, 1945 block_start_pfn = block_end_pfn, 1946 block_end_pfn += pageblock_nr_pages) { 1947 1948 /* 1949 * This can potentially iterate a massively long zone with 1950 * many pageblocks unsuitable, so periodically check if we 1951 * need to schedule. 1952 */ 1953 if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) 1954 cond_resched(); 1955 1956 page = pageblock_pfn_to_page(block_start_pfn, 1957 block_end_pfn, cc->zone); 1958 if (!page) 1959 continue; 1960 1961 /* 1962 * If isolation recently failed, do not retry. Only check the 1963 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock 1964 * to be visited multiple times. Assume skip was checked 1965 * before making it "skip" so other compaction instances do 1966 * not scan the same block. 1967 */ 1968 if (pageblock_aligned(low_pfn) && 1969 !fast_find_block && !isolation_suitable(cc, page)) 1970 continue; 1971 1972 /* 1973 * For async direct compaction, only scan the pageblocks of the 1974 * same migratetype without huge pages. Async direct compaction 1975 * is optimistic to see if the minimum amount of work satisfies 1976 * the allocation. The cached PFN is updated as it's possible 1977 * that all remaining blocks between source and target are 1978 * unsuitable and the compaction scanners fail to meet. 1979 */ 1980 if (!suitable_migration_source(cc, page)) { 1981 update_cached_migrate(cc, block_end_pfn); 1982 continue; 1983 } 1984 1985 /* Perform the isolation */ 1986 if (isolate_migratepages_block(cc, low_pfn, block_end_pfn, 1987 isolate_mode)) 1988 return ISOLATE_ABORT; 1989 1990 /* 1991 * Either we isolated something and proceed with migration. Or 1992 * we failed and compact_zone should decide if we should 1993 * continue or not. 1994 */ 1995 break; 1996 } 1997 1998 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1999 } 2000 2001 /* 2002 * order == -1 is expected when compacting via 2003 * /proc/sys/vm/compact_memory 2004 */ 2005 static inline bool is_via_compact_memory(int order) 2006 { 2007 return order == -1; 2008 } 2009 2010 /* 2011 * Determine whether kswapd is (or recently was!) running on this node. 2012 * 2013 * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't 2014 * zero it. 2015 */ 2016 static bool kswapd_is_running(pg_data_t *pgdat) 2017 { 2018 bool running; 2019 2020 pgdat_kswapd_lock(pgdat); 2021 running = pgdat->kswapd && task_is_running(pgdat->kswapd); 2022 pgdat_kswapd_unlock(pgdat); 2023 2024 return running; 2025 } 2026 2027 /* 2028 * A zone's fragmentation score is the external fragmentation wrt to the 2029 * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100]. 2030 */ 2031 static unsigned int fragmentation_score_zone(struct zone *zone) 2032 { 2033 return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); 2034 } 2035 2036 /* 2037 * A weighted zone's fragmentation score is the external fragmentation 2038 * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It 2039 * returns a value in the range [0, 100]. 2040 * 2041 * The scaling factor ensures that proactive compaction focuses on larger 2042 * zones like ZONE_NORMAL, rather than smaller, specialized zones like 2043 * ZONE_DMA32. For smaller zones, the score value remains close to zero, 2044 * and thus never exceeds the high threshold for proactive compaction. 2045 */ 2046 static unsigned int fragmentation_score_zone_weighted(struct zone *zone) 2047 { 2048 unsigned long score; 2049 2050 score = zone->present_pages * fragmentation_score_zone(zone); 2051 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); 2052 } 2053 2054 /* 2055 * The per-node proactive (background) compaction process is started by its 2056 * corresponding kcompactd thread when the node's fragmentation score 2057 * exceeds the high threshold. The compaction process remains active till 2058 * the node's score falls below the low threshold, or one of the back-off 2059 * conditions is met. 2060 */ 2061 static unsigned int fragmentation_score_node(pg_data_t *pgdat) 2062 { 2063 unsigned int score = 0; 2064 int zoneid; 2065 2066 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2067 struct zone *zone; 2068 2069 zone = &pgdat->node_zones[zoneid]; 2070 if (!populated_zone(zone)) 2071 continue; 2072 score += fragmentation_score_zone_weighted(zone); 2073 } 2074 2075 return score; 2076 } 2077 2078 static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) 2079 { 2080 unsigned int wmark_low; 2081 2082 /* 2083 * Cap the low watermark to avoid excessive compaction 2084 * activity in case a user sets the proactiveness tunable 2085 * close to 100 (maximum). 2086 */ 2087 wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); 2088 return low ? wmark_low : min(wmark_low + 10, 100U); 2089 } 2090 2091 static bool should_proactive_compact_node(pg_data_t *pgdat) 2092 { 2093 int wmark_high; 2094 2095 if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) 2096 return false; 2097 2098 wmark_high = fragmentation_score_wmark(pgdat, false); 2099 return fragmentation_score_node(pgdat) > wmark_high; 2100 } 2101 2102 static enum compact_result __compact_finished(struct compact_control *cc) 2103 { 2104 unsigned int order; 2105 const int migratetype = cc->migratetype; 2106 int ret; 2107 2108 /* Compaction run completes if the migrate and free scanner meet */ 2109 if (compact_scanners_met(cc)) { 2110 /* Let the next compaction start anew. */ 2111 reset_cached_positions(cc->zone); 2112 2113 /* 2114 * Mark that the PG_migrate_skip information should be cleared 2115 * by kswapd when it goes to sleep. kcompactd does not set the 2116 * flag itself as the decision to be clear should be directly 2117 * based on an allocation request. 2118 */ 2119 if (cc->direct_compaction) 2120 cc->zone->compact_blockskip_flush = true; 2121 2122 if (cc->whole_zone) 2123 return COMPACT_COMPLETE; 2124 else 2125 return COMPACT_PARTIAL_SKIPPED; 2126 } 2127 2128 if (cc->proactive_compaction) { 2129 int score, wmark_low; 2130 pg_data_t *pgdat; 2131 2132 pgdat = cc->zone->zone_pgdat; 2133 if (kswapd_is_running(pgdat)) 2134 return COMPACT_PARTIAL_SKIPPED; 2135 2136 score = fragmentation_score_zone(cc->zone); 2137 wmark_low = fragmentation_score_wmark(pgdat, true); 2138 2139 if (score > wmark_low) 2140 ret = COMPACT_CONTINUE; 2141 else 2142 ret = COMPACT_SUCCESS; 2143 2144 goto out; 2145 } 2146 2147 if (is_via_compact_memory(cc->order)) 2148 return COMPACT_CONTINUE; 2149 2150 /* 2151 * Always finish scanning a pageblock to reduce the possibility of 2152 * fallbacks in the future. This is particularly important when 2153 * migration source is unmovable/reclaimable but it's not worth 2154 * special casing. 2155 */ 2156 if (!pageblock_aligned(cc->migrate_pfn)) 2157 return COMPACT_CONTINUE; 2158 2159 /* Direct compactor: Is a suitable page free? */ 2160 ret = COMPACT_NO_SUITABLE_PAGE; 2161 for (order = cc->order; order <= MAX_ORDER; order++) { 2162 struct free_area *area = &cc->zone->free_area[order]; 2163 bool can_steal; 2164 2165 /* Job done if page is free of the right migratetype */ 2166 if (!free_area_empty(area, migratetype)) 2167 return COMPACT_SUCCESS; 2168 2169 #ifdef CONFIG_CMA 2170 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 2171 if (migratetype == MIGRATE_MOVABLE && 2172 !free_area_empty(area, MIGRATE_CMA)) 2173 return COMPACT_SUCCESS; 2174 #endif 2175 /* 2176 * Job done if allocation would steal freepages from 2177 * other migratetype buddy lists. 2178 */ 2179 if (find_suitable_fallback(area, order, migratetype, 2180 true, &can_steal) != -1) 2181 /* 2182 * Movable pages are OK in any pageblock. If we are 2183 * stealing for a non-movable allocation, make sure 2184 * we finish compacting the current pageblock first 2185 * (which is assured by the above migrate_pfn align 2186 * check) so it is as free as possible and we won't 2187 * have to steal another one soon. 2188 */ 2189 return COMPACT_SUCCESS; 2190 } 2191 2192 out: 2193 if (cc->contended || fatal_signal_pending(current)) 2194 ret = COMPACT_CONTENDED; 2195 2196 return ret; 2197 } 2198 2199 static enum compact_result compact_finished(struct compact_control *cc) 2200 { 2201 int ret; 2202 2203 ret = __compact_finished(cc); 2204 trace_mm_compaction_finished(cc->zone, cc->order, ret); 2205 if (ret == COMPACT_NO_SUITABLE_PAGE) 2206 ret = COMPACT_CONTINUE; 2207 2208 return ret; 2209 } 2210 2211 static bool __compaction_suitable(struct zone *zone, int order, 2212 int highest_zoneidx, 2213 unsigned long wmark_target) 2214 { 2215 unsigned long watermark; 2216 /* 2217 * Watermarks for order-0 must be met for compaction to be able to 2218 * isolate free pages for migration targets. This means that the 2219 * watermark and alloc_flags have to match, or be more pessimistic than 2220 * the check in __isolate_free_page(). We don't use the direct 2221 * compactor's alloc_flags, as they are not relevant for freepage 2222 * isolation. We however do use the direct compactor's highest_zoneidx 2223 * to skip over zones where lowmem reserves would prevent allocation 2224 * even if compaction succeeds. 2225 * For costly orders, we require low watermark instead of min for 2226 * compaction to proceed to increase its chances. 2227 * ALLOC_CMA is used, as pages in CMA pageblocks are considered 2228 * suitable migration targets 2229 */ 2230 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 2231 low_wmark_pages(zone) : min_wmark_pages(zone); 2232 watermark += compact_gap(order); 2233 return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, 2234 ALLOC_CMA, wmark_target); 2235 } 2236 2237 /* 2238 * compaction_suitable: Is this suitable to run compaction on this zone now? 2239 */ 2240 bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) 2241 { 2242 enum compact_result compact_result; 2243 bool suitable; 2244 2245 suitable = __compaction_suitable(zone, order, highest_zoneidx, 2246 zone_page_state(zone, NR_FREE_PAGES)); 2247 /* 2248 * fragmentation index determines if allocation failures are due to 2249 * low memory or external fragmentation 2250 * 2251 * index of -1000 would imply allocations might succeed depending on 2252 * watermarks, but we already failed the high-order watermark check 2253 * index towards 0 implies failure is due to lack of memory 2254 * index towards 1000 implies failure is due to fragmentation 2255 * 2256 * Only compact if a failure would be due to fragmentation. Also 2257 * ignore fragindex for non-costly orders where the alternative to 2258 * a successful reclaim/compaction is OOM. Fragindex and the 2259 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 2260 * excessive compaction for costly orders, but it should not be at the 2261 * expense of system stability. 2262 */ 2263 if (suitable) { 2264 compact_result = COMPACT_CONTINUE; 2265 if (order > PAGE_ALLOC_COSTLY_ORDER) { 2266 int fragindex = fragmentation_index(zone, order); 2267 2268 if (fragindex >= 0 && 2269 fragindex <= sysctl_extfrag_threshold) { 2270 suitable = false; 2271 compact_result = COMPACT_NOT_SUITABLE_ZONE; 2272 } 2273 } 2274 } else { 2275 compact_result = COMPACT_SKIPPED; 2276 } 2277 2278 trace_mm_compaction_suitable(zone, order, compact_result); 2279 2280 return suitable; 2281 } 2282 2283 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 2284 int alloc_flags) 2285 { 2286 struct zone *zone; 2287 struct zoneref *z; 2288 2289 /* 2290 * Make sure at least one zone would pass __compaction_suitable if we continue 2291 * retrying the reclaim. 2292 */ 2293 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2294 ac->highest_zoneidx, ac->nodemask) { 2295 unsigned long available; 2296 2297 /* 2298 * Do not consider all the reclaimable memory because we do not 2299 * want to trash just for a single high order allocation which 2300 * is even not guaranteed to appear even if __compaction_suitable 2301 * is happy about the watermark check. 2302 */ 2303 available = zone_reclaimable_pages(zone) / order; 2304 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 2305 if (__compaction_suitable(zone, order, ac->highest_zoneidx, 2306 available)) 2307 return true; 2308 } 2309 2310 return false; 2311 } 2312 2313 static enum compact_result 2314 compact_zone(struct compact_control *cc, struct capture_control *capc) 2315 { 2316 enum compact_result ret; 2317 unsigned long start_pfn = cc->zone->zone_start_pfn; 2318 unsigned long end_pfn = zone_end_pfn(cc->zone); 2319 unsigned long last_migrated_pfn; 2320 const bool sync = cc->mode != MIGRATE_ASYNC; 2321 bool update_cached; 2322 unsigned int nr_succeeded = 0; 2323 2324 /* 2325 * These counters track activities during zone compaction. Initialize 2326 * them before compacting a new zone. 2327 */ 2328 cc->total_migrate_scanned = 0; 2329 cc->total_free_scanned = 0; 2330 cc->nr_migratepages = 0; 2331 cc->nr_freepages = 0; 2332 INIT_LIST_HEAD(&cc->freepages); 2333 INIT_LIST_HEAD(&cc->migratepages); 2334 2335 cc->migratetype = gfp_migratetype(cc->gfp_mask); 2336 2337 if (!is_via_compact_memory(cc->order)) { 2338 unsigned long watermark; 2339 2340 /* Allocation can already succeed, nothing to do */ 2341 watermark = wmark_pages(cc->zone, 2342 cc->alloc_flags & ALLOC_WMARK_MASK); 2343 if (zone_watermark_ok(cc->zone, cc->order, watermark, 2344 cc->highest_zoneidx, cc->alloc_flags)) 2345 return COMPACT_SUCCESS; 2346 2347 /* Compaction is likely to fail */ 2348 if (!compaction_suitable(cc->zone, cc->order, 2349 cc->highest_zoneidx)) 2350 return COMPACT_SKIPPED; 2351 } 2352 2353 /* 2354 * Clear pageblock skip if there were failures recently and compaction 2355 * is about to be retried after being deferred. 2356 */ 2357 if (compaction_restarting(cc->zone, cc->order)) 2358 __reset_isolation_suitable(cc->zone); 2359 2360 /* 2361 * Setup to move all movable pages to the end of the zone. Used cached 2362 * information on where the scanners should start (unless we explicitly 2363 * want to compact the whole zone), but check that it is initialised 2364 * by ensuring the values are within zone boundaries. 2365 */ 2366 cc->fast_start_pfn = 0; 2367 if (cc->whole_zone) { 2368 cc->migrate_pfn = start_pfn; 2369 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 2370 } else { 2371 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; 2372 cc->free_pfn = cc->zone->compact_cached_free_pfn; 2373 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 2374 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 2375 cc->zone->compact_cached_free_pfn = cc->free_pfn; 2376 } 2377 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 2378 cc->migrate_pfn = start_pfn; 2379 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 2380 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 2381 } 2382 2383 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) 2384 cc->whole_zone = true; 2385 } 2386 2387 last_migrated_pfn = 0; 2388 2389 /* 2390 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on 2391 * the basis that some migrations will fail in ASYNC mode. However, 2392 * if the cached PFNs match and pageblocks are skipped due to having 2393 * no isolation candidates, then the sync state does not matter. 2394 * Until a pageblock with isolation candidates is found, keep the 2395 * cached PFNs in sync to avoid revisiting the same blocks. 2396 */ 2397 update_cached = !sync && 2398 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; 2399 2400 trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync); 2401 2402 /* lru_add_drain_all could be expensive with involving other CPUs */ 2403 lru_add_drain(); 2404 2405 while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { 2406 int err; 2407 unsigned long iteration_start_pfn = cc->migrate_pfn; 2408 2409 /* 2410 * Avoid multiple rescans of the same pageblock which can 2411 * happen if a page cannot be isolated (dirty/writeback in 2412 * async mode) or if the migrated pages are being allocated 2413 * before the pageblock is cleared. The first rescan will 2414 * capture the entire pageblock for migration. If it fails, 2415 * it'll be marked skip and scanning will proceed as normal. 2416 */ 2417 cc->finish_pageblock = false; 2418 if (pageblock_start_pfn(last_migrated_pfn) == 2419 pageblock_start_pfn(iteration_start_pfn)) { 2420 cc->finish_pageblock = true; 2421 } 2422 2423 rescan: 2424 switch (isolate_migratepages(cc)) { 2425 case ISOLATE_ABORT: 2426 ret = COMPACT_CONTENDED; 2427 putback_movable_pages(&cc->migratepages); 2428 cc->nr_migratepages = 0; 2429 goto out; 2430 case ISOLATE_NONE: 2431 if (update_cached) { 2432 cc->zone->compact_cached_migrate_pfn[1] = 2433 cc->zone->compact_cached_migrate_pfn[0]; 2434 } 2435 2436 /* 2437 * We haven't isolated and migrated anything, but 2438 * there might still be unflushed migrations from 2439 * previous cc->order aligned block. 2440 */ 2441 goto check_drain; 2442 case ISOLATE_SUCCESS: 2443 update_cached = false; 2444 last_migrated_pfn = iteration_start_pfn; 2445 } 2446 2447 err = migrate_pages(&cc->migratepages, compaction_alloc, 2448 compaction_free, (unsigned long)cc, cc->mode, 2449 MR_COMPACTION, &nr_succeeded); 2450 2451 trace_mm_compaction_migratepages(cc, nr_succeeded); 2452 2453 /* All pages were either migrated or will be released */ 2454 cc->nr_migratepages = 0; 2455 if (err) { 2456 putback_movable_pages(&cc->migratepages); 2457 /* 2458 * migrate_pages() may return -ENOMEM when scanners meet 2459 * and we want compact_finished() to detect it 2460 */ 2461 if (err == -ENOMEM && !compact_scanners_met(cc)) { 2462 ret = COMPACT_CONTENDED; 2463 goto out; 2464 } 2465 /* 2466 * If an ASYNC or SYNC_LIGHT fails to migrate a page 2467 * within the current order-aligned block and 2468 * fast_find_migrateblock may be used then scan the 2469 * remainder of the pageblock. This will mark the 2470 * pageblock "skip" to avoid rescanning in the near 2471 * future. This will isolate more pages than necessary 2472 * for the request but avoid loops due to 2473 * fast_find_migrateblock revisiting blocks that were 2474 * recently partially scanned. 2475 */ 2476 if (!pageblock_aligned(cc->migrate_pfn) && 2477 !cc->ignore_skip_hint && !cc->finish_pageblock && 2478 (cc->mode < MIGRATE_SYNC)) { 2479 cc->finish_pageblock = true; 2480 2481 /* 2482 * Draining pcplists does not help THP if 2483 * any page failed to migrate. Even after 2484 * drain, the pageblock will not be free. 2485 */ 2486 if (cc->order == COMPACTION_HPAGE_ORDER) 2487 last_migrated_pfn = 0; 2488 2489 goto rescan; 2490 } 2491 } 2492 2493 /* Stop if a page has been captured */ 2494 if (capc && capc->page) { 2495 ret = COMPACT_SUCCESS; 2496 break; 2497 } 2498 2499 check_drain: 2500 /* 2501 * Has the migration scanner moved away from the previous 2502 * cc->order aligned block where we migrated from? If yes, 2503 * flush the pages that were freed, so that they can merge and 2504 * compact_finished() can detect immediately if allocation 2505 * would succeed. 2506 */ 2507 if (cc->order > 0 && last_migrated_pfn) { 2508 unsigned long current_block_start = 2509 block_start_pfn(cc->migrate_pfn, cc->order); 2510 2511 if (last_migrated_pfn < current_block_start) { 2512 lru_add_drain_cpu_zone(cc->zone); 2513 /* No more flushing until we migrate again */ 2514 last_migrated_pfn = 0; 2515 } 2516 } 2517 } 2518 2519 out: 2520 /* 2521 * Release free pages and update where the free scanner should restart, 2522 * so we don't leave any returned pages behind in the next attempt. 2523 */ 2524 if (cc->nr_freepages > 0) { 2525 unsigned long free_pfn = release_freepages(&cc->freepages); 2526 2527 cc->nr_freepages = 0; 2528 VM_BUG_ON(free_pfn == 0); 2529 /* The cached pfn is always the first in a pageblock */ 2530 free_pfn = pageblock_start_pfn(free_pfn); 2531 /* 2532 * Only go back, not forward. The cached pfn might have been 2533 * already reset to zone end in compact_finished() 2534 */ 2535 if (free_pfn > cc->zone->compact_cached_free_pfn) 2536 cc->zone->compact_cached_free_pfn = free_pfn; 2537 } 2538 2539 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 2540 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 2541 2542 trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); 2543 2544 VM_BUG_ON(!list_empty(&cc->freepages)); 2545 VM_BUG_ON(!list_empty(&cc->migratepages)); 2546 2547 return ret; 2548 } 2549 2550 static enum compact_result compact_zone_order(struct zone *zone, int order, 2551 gfp_t gfp_mask, enum compact_priority prio, 2552 unsigned int alloc_flags, int highest_zoneidx, 2553 struct page **capture) 2554 { 2555 enum compact_result ret; 2556 struct compact_control cc = { 2557 .order = order, 2558 .search_order = order, 2559 .gfp_mask = gfp_mask, 2560 .zone = zone, 2561 .mode = (prio == COMPACT_PRIO_ASYNC) ? 2562 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 2563 .alloc_flags = alloc_flags, 2564 .highest_zoneidx = highest_zoneidx, 2565 .direct_compaction = true, 2566 .whole_zone = (prio == MIN_COMPACT_PRIORITY), 2567 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 2568 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 2569 }; 2570 struct capture_control capc = { 2571 .cc = &cc, 2572 .page = NULL, 2573 }; 2574 2575 /* 2576 * Make sure the structs are really initialized before we expose the 2577 * capture control, in case we are interrupted and the interrupt handler 2578 * frees a page. 2579 */ 2580 barrier(); 2581 WRITE_ONCE(current->capture_control, &capc); 2582 2583 ret = compact_zone(&cc, &capc); 2584 2585 /* 2586 * Make sure we hide capture control first before we read the captured 2587 * page pointer, otherwise an interrupt could free and capture a page 2588 * and we would leak it. 2589 */ 2590 WRITE_ONCE(current->capture_control, NULL); 2591 *capture = READ_ONCE(capc.page); 2592 /* 2593 * Technically, it is also possible that compaction is skipped but 2594 * the page is still captured out of luck(IRQ came and freed the page). 2595 * Returning COMPACT_SUCCESS in such cases helps in properly accounting 2596 * the COMPACT[STALL|FAIL] when compaction is skipped. 2597 */ 2598 if (*capture) 2599 ret = COMPACT_SUCCESS; 2600 2601 return ret; 2602 } 2603 2604 /** 2605 * try_to_compact_pages - Direct compact to satisfy a high-order allocation 2606 * @gfp_mask: The GFP mask of the current allocation 2607 * @order: The order of the current allocation 2608 * @alloc_flags: The allocation flags of the current allocation 2609 * @ac: The context of current allocation 2610 * @prio: Determines how hard direct compaction should try to succeed 2611 * @capture: Pointer to free page created by compaction will be stored here 2612 * 2613 * This is the main entry point for direct page compaction. 2614 */ 2615 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 2616 unsigned int alloc_flags, const struct alloc_context *ac, 2617 enum compact_priority prio, struct page **capture) 2618 { 2619 int may_perform_io = (__force int)(gfp_mask & __GFP_IO); 2620 struct zoneref *z; 2621 struct zone *zone; 2622 enum compact_result rc = COMPACT_SKIPPED; 2623 2624 /* 2625 * Check if the GFP flags allow compaction - GFP_NOIO is really 2626 * tricky context because the migration might require IO 2627 */ 2628 if (!may_perform_io) 2629 return COMPACT_SKIPPED; 2630 2631 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 2632 2633 /* Compact each zone in the list */ 2634 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2635 ac->highest_zoneidx, ac->nodemask) { 2636 enum compact_result status; 2637 2638 if (prio > MIN_COMPACT_PRIORITY 2639 && compaction_deferred(zone, order)) { 2640 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 2641 continue; 2642 } 2643 2644 status = compact_zone_order(zone, order, gfp_mask, prio, 2645 alloc_flags, ac->highest_zoneidx, capture); 2646 rc = max(status, rc); 2647 2648 /* The allocation should succeed, stop compacting */ 2649 if (status == COMPACT_SUCCESS) { 2650 /* 2651 * We think the allocation will succeed in this zone, 2652 * but it is not certain, hence the false. The caller 2653 * will repeat this with true if allocation indeed 2654 * succeeds in this zone. 2655 */ 2656 compaction_defer_reset(zone, order, false); 2657 2658 break; 2659 } 2660 2661 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 2662 status == COMPACT_PARTIAL_SKIPPED)) 2663 /* 2664 * We think that allocation won't succeed in this zone 2665 * so we defer compaction there. If it ends up 2666 * succeeding after all, it will be reset. 2667 */ 2668 defer_compaction(zone, order); 2669 2670 /* 2671 * We might have stopped compacting due to need_resched() in 2672 * async compaction, or due to a fatal signal detected. In that 2673 * case do not try further zones 2674 */ 2675 if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 2676 || fatal_signal_pending(current)) 2677 break; 2678 } 2679 2680 return rc; 2681 } 2682 2683 /* 2684 * Compact all zones within a node till each zone's fragmentation score 2685 * reaches within proactive compaction thresholds (as determined by the 2686 * proactiveness tunable). 2687 * 2688 * It is possible that the function returns before reaching score targets 2689 * due to various back-off conditions, such as, contention on per-node or 2690 * per-zone locks. 2691 */ 2692 static void proactive_compact_node(pg_data_t *pgdat) 2693 { 2694 int zoneid; 2695 struct zone *zone; 2696 struct compact_control cc = { 2697 .order = -1, 2698 .mode = MIGRATE_SYNC_LIGHT, 2699 .ignore_skip_hint = true, 2700 .whole_zone = true, 2701 .gfp_mask = GFP_KERNEL, 2702 .proactive_compaction = true, 2703 }; 2704 2705 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2706 zone = &pgdat->node_zones[zoneid]; 2707 if (!populated_zone(zone)) 2708 continue; 2709 2710 cc.zone = zone; 2711 2712 compact_zone(&cc, NULL); 2713 2714 count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 2715 cc.total_migrate_scanned); 2716 count_compact_events(KCOMPACTD_FREE_SCANNED, 2717 cc.total_free_scanned); 2718 } 2719 } 2720 2721 /* Compact all zones within a node */ 2722 static void compact_node(int nid) 2723 { 2724 pg_data_t *pgdat = NODE_DATA(nid); 2725 int zoneid; 2726 struct zone *zone; 2727 struct compact_control cc = { 2728 .order = -1, 2729 .mode = MIGRATE_SYNC, 2730 .ignore_skip_hint = true, 2731 .whole_zone = true, 2732 .gfp_mask = GFP_KERNEL, 2733 }; 2734 2735 2736 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2737 2738 zone = &pgdat->node_zones[zoneid]; 2739 if (!populated_zone(zone)) 2740 continue; 2741 2742 cc.zone = zone; 2743 2744 compact_zone(&cc, NULL); 2745 } 2746 } 2747 2748 /* Compact all nodes in the system */ 2749 static void compact_nodes(void) 2750 { 2751 int nid; 2752 2753 /* Flush pending updates to the LRU lists */ 2754 lru_add_drain_all(); 2755 2756 for_each_online_node(nid) 2757 compact_node(nid); 2758 } 2759 2760 static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, 2761 void *buffer, size_t *length, loff_t *ppos) 2762 { 2763 int rc, nid; 2764 2765 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 2766 if (rc) 2767 return rc; 2768 2769 if (write && sysctl_compaction_proactiveness) { 2770 for_each_online_node(nid) { 2771 pg_data_t *pgdat = NODE_DATA(nid); 2772 2773 if (pgdat->proactive_compact_trigger) 2774 continue; 2775 2776 pgdat->proactive_compact_trigger = true; 2777 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1, 2778 pgdat->nr_zones - 1); 2779 wake_up_interruptible(&pgdat->kcompactd_wait); 2780 } 2781 } 2782 2783 return 0; 2784 } 2785 2786 /* 2787 * This is the entry point for compacting all nodes via 2788 * /proc/sys/vm/compact_memory 2789 */ 2790 static int sysctl_compaction_handler(struct ctl_table *table, int write, 2791 void *buffer, size_t *length, loff_t *ppos) 2792 { 2793 int ret; 2794 2795 ret = proc_dointvec(table, write, buffer, length, ppos); 2796 if (ret) 2797 return ret; 2798 2799 if (sysctl_compact_memory != 1) 2800 return -EINVAL; 2801 2802 if (write) 2803 compact_nodes(); 2804 2805 return 0; 2806 } 2807 2808 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 2809 static ssize_t compact_store(struct device *dev, 2810 struct device_attribute *attr, 2811 const char *buf, size_t count) 2812 { 2813 int nid = dev->id; 2814 2815 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 2816 /* Flush pending updates to the LRU lists */ 2817 lru_add_drain_all(); 2818 2819 compact_node(nid); 2820 } 2821 2822 return count; 2823 } 2824 static DEVICE_ATTR_WO(compact); 2825 2826 int compaction_register_node(struct node *node) 2827 { 2828 return device_create_file(&node->dev, &dev_attr_compact); 2829 } 2830 2831 void compaction_unregister_node(struct node *node) 2832 { 2833 return device_remove_file(&node->dev, &dev_attr_compact); 2834 } 2835 #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 2836 2837 static inline bool kcompactd_work_requested(pg_data_t *pgdat) 2838 { 2839 return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || 2840 pgdat->proactive_compact_trigger; 2841 } 2842 2843 static bool kcompactd_node_suitable(pg_data_t *pgdat) 2844 { 2845 int zoneid; 2846 struct zone *zone; 2847 enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; 2848 2849 for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { 2850 zone = &pgdat->node_zones[zoneid]; 2851 2852 if (!populated_zone(zone)) 2853 continue; 2854 2855 /* Allocation can already succeed, check other zones */ 2856 if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, 2857 min_wmark_pages(zone), 2858 highest_zoneidx, 0)) 2859 continue; 2860 2861 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 2862 highest_zoneidx)) 2863 return true; 2864 } 2865 2866 return false; 2867 } 2868 2869 static void kcompactd_do_work(pg_data_t *pgdat) 2870 { 2871 /* 2872 * With no special task, compact all zones so that a page of requested 2873 * order is allocatable. 2874 */ 2875 int zoneid; 2876 struct zone *zone; 2877 struct compact_control cc = { 2878 .order = pgdat->kcompactd_max_order, 2879 .search_order = pgdat->kcompactd_max_order, 2880 .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, 2881 .mode = MIGRATE_SYNC_LIGHT, 2882 .ignore_skip_hint = false, 2883 .gfp_mask = GFP_KERNEL, 2884 }; 2885 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 2886 cc.highest_zoneidx); 2887 count_compact_event(KCOMPACTD_WAKE); 2888 2889 for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { 2890 int status; 2891 2892 zone = &pgdat->node_zones[zoneid]; 2893 if (!populated_zone(zone)) 2894 continue; 2895 2896 if (compaction_deferred(zone, cc.order)) 2897 continue; 2898 2899 /* Allocation can already succeed, nothing to do */ 2900 if (zone_watermark_ok(zone, cc.order, 2901 min_wmark_pages(zone), zoneid, 0)) 2902 continue; 2903 2904 if (!compaction_suitable(zone, cc.order, zoneid)) 2905 continue; 2906 2907 if (kthread_should_stop()) 2908 return; 2909 2910 cc.zone = zone; 2911 status = compact_zone(&cc, NULL); 2912 2913 if (status == COMPACT_SUCCESS) { 2914 compaction_defer_reset(zone, cc.order, false); 2915 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 2916 /* 2917 * Buddy pages may become stranded on pcps that could 2918 * otherwise coalesce on the zone's free area for 2919 * order >= cc.order. This is ratelimited by the 2920 * upcoming deferral. 2921 */ 2922 drain_all_pages(zone); 2923 2924 /* 2925 * We use sync migration mode here, so we defer like 2926 * sync direct compaction does. 2927 */ 2928 defer_compaction(zone, cc.order); 2929 } 2930 2931 count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 2932 cc.total_migrate_scanned); 2933 count_compact_events(KCOMPACTD_FREE_SCANNED, 2934 cc.total_free_scanned); 2935 } 2936 2937 /* 2938 * Regardless of success, we are done until woken up next. But remember 2939 * the requested order/highest_zoneidx in case it was higher/tighter 2940 * than our current ones 2941 */ 2942 if (pgdat->kcompactd_max_order <= cc.order) 2943 pgdat->kcompactd_max_order = 0; 2944 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) 2945 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2946 } 2947 2948 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) 2949 { 2950 if (!order) 2951 return; 2952 2953 if (pgdat->kcompactd_max_order < order) 2954 pgdat->kcompactd_max_order = order; 2955 2956 if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) 2957 pgdat->kcompactd_highest_zoneidx = highest_zoneidx; 2958 2959 /* 2960 * Pairs with implicit barrier in wait_event_freezable() 2961 * such that wakeups are not missed. 2962 */ 2963 if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 2964 return; 2965 2966 if (!kcompactd_node_suitable(pgdat)) 2967 return; 2968 2969 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 2970 highest_zoneidx); 2971 wake_up_interruptible(&pgdat->kcompactd_wait); 2972 } 2973 2974 /* 2975 * The background compaction daemon, started as a kernel thread 2976 * from the init process. 2977 */ 2978 static int kcompactd(void *p) 2979 { 2980 pg_data_t *pgdat = (pg_data_t *)p; 2981 struct task_struct *tsk = current; 2982 long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); 2983 long timeout = default_timeout; 2984 2985 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2986 2987 if (!cpumask_empty(cpumask)) 2988 set_cpus_allowed_ptr(tsk, cpumask); 2989 2990 set_freezable(); 2991 2992 pgdat->kcompactd_max_order = 0; 2993 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2994 2995 while (!kthread_should_stop()) { 2996 unsigned long pflags; 2997 2998 /* 2999 * Avoid the unnecessary wakeup for proactive compaction 3000 * when it is disabled. 3001 */ 3002 if (!sysctl_compaction_proactiveness) 3003 timeout = MAX_SCHEDULE_TIMEOUT; 3004 trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 3005 if (wait_event_freezable_timeout(pgdat->kcompactd_wait, 3006 kcompactd_work_requested(pgdat), timeout) && 3007 !pgdat->proactive_compact_trigger) { 3008 3009 psi_memstall_enter(&pflags); 3010 kcompactd_do_work(pgdat); 3011 psi_memstall_leave(&pflags); 3012 /* 3013 * Reset the timeout value. The defer timeout from 3014 * proactive compaction is lost here but that is fine 3015 * as the condition of the zone changing substantionally 3016 * then carrying on with the previous defer interval is 3017 * not useful. 3018 */ 3019 timeout = default_timeout; 3020 continue; 3021 } 3022 3023 /* 3024 * Start the proactive work with default timeout. Based 3025 * on the fragmentation score, this timeout is updated. 3026 */ 3027 timeout = default_timeout; 3028 if (should_proactive_compact_node(pgdat)) { 3029 unsigned int prev_score, score; 3030 3031 prev_score = fragmentation_score_node(pgdat); 3032 proactive_compact_node(pgdat); 3033 score = fragmentation_score_node(pgdat); 3034 /* 3035 * Defer proactive compaction if the fragmentation 3036 * score did not go down i.e. no progress made. 3037 */ 3038 if (unlikely(score >= prev_score)) 3039 timeout = 3040 default_timeout << COMPACT_MAX_DEFER_SHIFT; 3041 } 3042 if (unlikely(pgdat->proactive_compact_trigger)) 3043 pgdat->proactive_compact_trigger = false; 3044 } 3045 3046 return 0; 3047 } 3048 3049 /* 3050 * This kcompactd start function will be called by init and node-hot-add. 3051 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 3052 */ 3053 void __meminit kcompactd_run(int nid) 3054 { 3055 pg_data_t *pgdat = NODE_DATA(nid); 3056 3057 if (pgdat->kcompactd) 3058 return; 3059 3060 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 3061 if (IS_ERR(pgdat->kcompactd)) { 3062 pr_err("Failed to start kcompactd on node %d\n", nid); 3063 pgdat->kcompactd = NULL; 3064 } 3065 } 3066 3067 /* 3068 * Called by memory hotplug when all memory in a node is offlined. Caller must 3069 * be holding mem_hotplug_begin/done(). 3070 */ 3071 void __meminit kcompactd_stop(int nid) 3072 { 3073 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 3074 3075 if (kcompactd) { 3076 kthread_stop(kcompactd); 3077 NODE_DATA(nid)->kcompactd = NULL; 3078 } 3079 } 3080 3081 /* 3082 * It's optimal to keep kcompactd on the same CPUs as their memory, but 3083 * not required for correctness. So if the last cpu in a node goes 3084 * away, we get changed to run anywhere: as the first one comes back, 3085 * restore their cpu bindings. 3086 */ 3087 static int kcompactd_cpu_online(unsigned int cpu) 3088 { 3089 int nid; 3090 3091 for_each_node_state(nid, N_MEMORY) { 3092 pg_data_t *pgdat = NODE_DATA(nid); 3093 const struct cpumask *mask; 3094 3095 mask = cpumask_of_node(pgdat->node_id); 3096 3097 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3098 /* One of our CPUs online: restore mask */ 3099 if (pgdat->kcompactd) 3100 set_cpus_allowed_ptr(pgdat->kcompactd, mask); 3101 } 3102 return 0; 3103 } 3104 3105 static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, 3106 int write, void *buffer, size_t *lenp, loff_t *ppos) 3107 { 3108 int ret, old; 3109 3110 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || !write) 3111 return proc_dointvec_minmax(table, write, buffer, lenp, ppos); 3112 3113 old = *(int *)table->data; 3114 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 3115 if (ret) 3116 return ret; 3117 if (old != *(int *)table->data) 3118 pr_warn_once("sysctl attribute %s changed by %s[%d]\n", 3119 table->procname, current->comm, 3120 task_pid_nr(current)); 3121 return ret; 3122 } 3123 3124 static struct ctl_table vm_compaction[] = { 3125 { 3126 .procname = "compact_memory", 3127 .data = &sysctl_compact_memory, 3128 .maxlen = sizeof(int), 3129 .mode = 0200, 3130 .proc_handler = sysctl_compaction_handler, 3131 }, 3132 { 3133 .procname = "compaction_proactiveness", 3134 .data = &sysctl_compaction_proactiveness, 3135 .maxlen = sizeof(sysctl_compaction_proactiveness), 3136 .mode = 0644, 3137 .proc_handler = compaction_proactiveness_sysctl_handler, 3138 .extra1 = SYSCTL_ZERO, 3139 .extra2 = SYSCTL_ONE_HUNDRED, 3140 }, 3141 { 3142 .procname = "extfrag_threshold", 3143 .data = &sysctl_extfrag_threshold, 3144 .maxlen = sizeof(int), 3145 .mode = 0644, 3146 .proc_handler = proc_dointvec_minmax, 3147 .extra1 = SYSCTL_ZERO, 3148 .extra2 = SYSCTL_ONE_THOUSAND, 3149 }, 3150 { 3151 .procname = "compact_unevictable_allowed", 3152 .data = &sysctl_compact_unevictable_allowed, 3153 .maxlen = sizeof(int), 3154 .mode = 0644, 3155 .proc_handler = proc_dointvec_minmax_warn_RT_change, 3156 .extra1 = SYSCTL_ZERO, 3157 .extra2 = SYSCTL_ONE, 3158 }, 3159 { } 3160 }; 3161 3162 static int __init kcompactd_init(void) 3163 { 3164 int nid; 3165 int ret; 3166 3167 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 3168 "mm/compaction:online", 3169 kcompactd_cpu_online, NULL); 3170 if (ret < 0) { 3171 pr_err("kcompactd: failed to register hotplug callbacks.\n"); 3172 return ret; 3173 } 3174 3175 for_each_node_state(nid, N_MEMORY) 3176 kcompactd_run(nid); 3177 register_sysctl_init("vm", vm_compaction); 3178 return 0; 3179 } 3180 subsys_initcall(kcompactd_init) 3181 3182 #endif /* CONFIG_COMPACTION */ 3183