1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/compaction.c 4 * 5 * Memory compaction for the reduction of external fragmentation. Note that 6 * this heavily depends upon page migration to do all the real heavy 7 * lifting 8 * 9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10 */ 11 #include <linux/cpu.h> 12 #include <linux/swap.h> 13 #include <linux/migrate.h> 14 #include <linux/compaction.h> 15 #include <linux/mm_inline.h> 16 #include <linux/sched/signal.h> 17 #include <linux/backing-dev.h> 18 #include <linux/sysctl.h> 19 #include <linux/sysfs.h> 20 #include <linux/page-isolation.h> 21 #include <linux/kasan.h> 22 #include <linux/kthread.h> 23 #include <linux/freezer.h> 24 #include <linux/page_owner.h> 25 #include <linux/psi.h> 26 #include "internal.h" 27 28 #ifdef CONFIG_COMPACTION 29 static inline void count_compact_event(enum vm_event_item item) 30 { 31 count_vm_event(item); 32 } 33 34 static inline void count_compact_events(enum vm_event_item item, long delta) 35 { 36 count_vm_events(item, delta); 37 } 38 #else 39 #define count_compact_event(item) do { } while (0) 40 #define count_compact_events(item, delta) do { } while (0) 41 #endif 42 43 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/compaction.h> 47 48 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 49 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 50 #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 51 #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 52 53 /* 54 * Fragmentation score check interval for proactive compaction purposes. 55 */ 56 static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; 57 58 /* 59 * Page order with-respect-to which proactive compaction 60 * calculates external fragmentation, which is used as 61 * the "fragmentation score" of a node/zone. 62 */ 63 #if defined CONFIG_TRANSPARENT_HUGEPAGE 64 #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER 65 #elif defined CONFIG_HUGETLBFS 66 #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER 67 #else 68 #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) 69 #endif 70 71 static unsigned long release_freepages(struct list_head *freelist) 72 { 73 struct page *page, *next; 74 unsigned long high_pfn = 0; 75 76 list_for_each_entry_safe(page, next, freelist, lru) { 77 unsigned long pfn = page_to_pfn(page); 78 list_del(&page->lru); 79 __free_page(page); 80 if (pfn > high_pfn) 81 high_pfn = pfn; 82 } 83 84 return high_pfn; 85 } 86 87 static void split_map_pages(struct list_head *list) 88 { 89 unsigned int i, order, nr_pages; 90 struct page *page, *next; 91 LIST_HEAD(tmp_list); 92 93 list_for_each_entry_safe(page, next, list, lru) { 94 list_del(&page->lru); 95 96 order = page_private(page); 97 nr_pages = 1 << order; 98 99 post_alloc_hook(page, order, __GFP_MOVABLE); 100 if (order) 101 split_page(page, order); 102 103 for (i = 0; i < nr_pages; i++) { 104 list_add(&page->lru, &tmp_list); 105 page++; 106 } 107 } 108 109 list_splice(&tmp_list, list); 110 } 111 112 #ifdef CONFIG_COMPACTION 113 114 int PageMovable(struct page *page) 115 { 116 struct address_space *mapping; 117 118 VM_BUG_ON_PAGE(!PageLocked(page), page); 119 if (!__PageMovable(page)) 120 return 0; 121 122 mapping = page_mapping(page); 123 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) 124 return 1; 125 126 return 0; 127 } 128 EXPORT_SYMBOL(PageMovable); 129 130 void __SetPageMovable(struct page *page, struct address_space *mapping) 131 { 132 VM_BUG_ON_PAGE(!PageLocked(page), page); 133 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); 134 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); 135 } 136 EXPORT_SYMBOL(__SetPageMovable); 137 138 void __ClearPageMovable(struct page *page) 139 { 140 VM_BUG_ON_PAGE(!PageMovable(page), page); 141 /* 142 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE 143 * flag so that VM can catch up released page by driver after isolation. 144 * With it, VM migration doesn't try to put it back. 145 */ 146 page->mapping = (void *)((unsigned long)page->mapping & 147 PAGE_MAPPING_MOVABLE); 148 } 149 EXPORT_SYMBOL(__ClearPageMovable); 150 151 /* Do not skip compaction more than 64 times */ 152 #define COMPACT_MAX_DEFER_SHIFT 6 153 154 /* 155 * Compaction is deferred when compaction fails to result in a page 156 * allocation success. 1 << compact_defer_shift, compactions are skipped up 157 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 158 */ 159 static void defer_compaction(struct zone *zone, int order) 160 { 161 zone->compact_considered = 0; 162 zone->compact_defer_shift++; 163 164 if (order < zone->compact_order_failed) 165 zone->compact_order_failed = order; 166 167 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 168 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 169 170 trace_mm_compaction_defer_compaction(zone, order); 171 } 172 173 /* Returns true if compaction should be skipped this time */ 174 static bool compaction_deferred(struct zone *zone, int order) 175 { 176 unsigned long defer_limit = 1UL << zone->compact_defer_shift; 177 178 if (order < zone->compact_order_failed) 179 return false; 180 181 /* Avoid possible overflow */ 182 if (++zone->compact_considered >= defer_limit) { 183 zone->compact_considered = defer_limit; 184 return false; 185 } 186 187 trace_mm_compaction_deferred(zone, order); 188 189 return true; 190 } 191 192 /* 193 * Update defer tracking counters after successful compaction of given order, 194 * which means an allocation either succeeded (alloc_success == true) or is 195 * expected to succeed. 196 */ 197 void compaction_defer_reset(struct zone *zone, int order, 198 bool alloc_success) 199 { 200 if (alloc_success) { 201 zone->compact_considered = 0; 202 zone->compact_defer_shift = 0; 203 } 204 if (order >= zone->compact_order_failed) 205 zone->compact_order_failed = order + 1; 206 207 trace_mm_compaction_defer_reset(zone, order); 208 } 209 210 /* Returns true if restarting compaction after many failures */ 211 static bool compaction_restarting(struct zone *zone, int order) 212 { 213 if (order < zone->compact_order_failed) 214 return false; 215 216 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 217 zone->compact_considered >= 1UL << zone->compact_defer_shift; 218 } 219 220 /* Returns true if the pageblock should be scanned for pages to isolate. */ 221 static inline bool isolation_suitable(struct compact_control *cc, 222 struct page *page) 223 { 224 if (cc->ignore_skip_hint) 225 return true; 226 227 return !get_pageblock_skip(page); 228 } 229 230 static void reset_cached_positions(struct zone *zone) 231 { 232 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 233 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 234 zone->compact_cached_free_pfn = 235 pageblock_start_pfn(zone_end_pfn(zone) - 1); 236 } 237 238 /* 239 * Compound pages of >= pageblock_order should consistently be skipped until 240 * released. It is always pointless to compact pages of such order (if they are 241 * migratable), and the pageblocks they occupy cannot contain any free pages. 242 */ 243 static bool pageblock_skip_persistent(struct page *page) 244 { 245 if (!PageCompound(page)) 246 return false; 247 248 page = compound_head(page); 249 250 if (compound_order(page) >= pageblock_order) 251 return true; 252 253 return false; 254 } 255 256 static bool 257 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, 258 bool check_target) 259 { 260 struct page *page = pfn_to_online_page(pfn); 261 struct page *block_page; 262 struct page *end_page; 263 unsigned long block_pfn; 264 265 if (!page) 266 return false; 267 if (zone != page_zone(page)) 268 return false; 269 if (pageblock_skip_persistent(page)) 270 return false; 271 272 /* 273 * If skip is already cleared do no further checking once the 274 * restart points have been set. 275 */ 276 if (check_source && check_target && !get_pageblock_skip(page)) 277 return true; 278 279 /* 280 * If clearing skip for the target scanner, do not select a 281 * non-movable pageblock as the starting point. 282 */ 283 if (!check_source && check_target && 284 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 285 return false; 286 287 /* Ensure the start of the pageblock or zone is online and valid */ 288 block_pfn = pageblock_start_pfn(pfn); 289 block_pfn = max(block_pfn, zone->zone_start_pfn); 290 block_page = pfn_to_online_page(block_pfn); 291 if (block_page) { 292 page = block_page; 293 pfn = block_pfn; 294 } 295 296 /* Ensure the end of the pageblock or zone is online and valid */ 297 block_pfn = pageblock_end_pfn(pfn) - 1; 298 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); 299 end_page = pfn_to_online_page(block_pfn); 300 if (!end_page) 301 return false; 302 303 /* 304 * Only clear the hint if a sample indicates there is either a 305 * free page or an LRU page in the block. One or other condition 306 * is necessary for the block to be a migration source/target. 307 */ 308 do { 309 if (check_source && PageLRU(page)) { 310 clear_pageblock_skip(page); 311 return true; 312 } 313 314 if (check_target && PageBuddy(page)) { 315 clear_pageblock_skip(page); 316 return true; 317 } 318 319 page += (1 << PAGE_ALLOC_COSTLY_ORDER); 320 pfn += (1 << PAGE_ALLOC_COSTLY_ORDER); 321 } while (page <= end_page); 322 323 return false; 324 } 325 326 /* 327 * This function is called to clear all cached information on pageblocks that 328 * should be skipped for page isolation when the migrate and free page scanner 329 * meet. 330 */ 331 static void __reset_isolation_suitable(struct zone *zone) 332 { 333 unsigned long migrate_pfn = zone->zone_start_pfn; 334 unsigned long free_pfn = zone_end_pfn(zone) - 1; 335 unsigned long reset_migrate = free_pfn; 336 unsigned long reset_free = migrate_pfn; 337 bool source_set = false; 338 bool free_set = false; 339 340 if (!zone->compact_blockskip_flush) 341 return; 342 343 zone->compact_blockskip_flush = false; 344 345 /* 346 * Walk the zone and update pageblock skip information. Source looks 347 * for PageLRU while target looks for PageBuddy. When the scanner 348 * is found, both PageBuddy and PageLRU are checked as the pageblock 349 * is suitable as both source and target. 350 */ 351 for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, 352 free_pfn -= pageblock_nr_pages) { 353 cond_resched(); 354 355 /* Update the migrate PFN */ 356 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && 357 migrate_pfn < reset_migrate) { 358 source_set = true; 359 reset_migrate = migrate_pfn; 360 zone->compact_init_migrate_pfn = reset_migrate; 361 zone->compact_cached_migrate_pfn[0] = reset_migrate; 362 zone->compact_cached_migrate_pfn[1] = reset_migrate; 363 } 364 365 /* Update the free PFN */ 366 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && 367 free_pfn > reset_free) { 368 free_set = true; 369 reset_free = free_pfn; 370 zone->compact_init_free_pfn = reset_free; 371 zone->compact_cached_free_pfn = reset_free; 372 } 373 } 374 375 /* Leave no distance if no suitable block was reset */ 376 if (reset_migrate >= reset_free) { 377 zone->compact_cached_migrate_pfn[0] = migrate_pfn; 378 zone->compact_cached_migrate_pfn[1] = migrate_pfn; 379 zone->compact_cached_free_pfn = free_pfn; 380 } 381 } 382 383 void reset_isolation_suitable(pg_data_t *pgdat) 384 { 385 int zoneid; 386 387 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 388 struct zone *zone = &pgdat->node_zones[zoneid]; 389 if (!populated_zone(zone)) 390 continue; 391 392 /* Only flush if a full compaction finished recently */ 393 if (zone->compact_blockskip_flush) 394 __reset_isolation_suitable(zone); 395 } 396 } 397 398 /* 399 * Sets the pageblock skip bit if it was clear. Note that this is a hint as 400 * locks are not required for read/writers. Returns true if it was already set. 401 */ 402 static bool test_and_set_skip(struct compact_control *cc, struct page *page, 403 unsigned long pfn) 404 { 405 bool skip; 406 407 /* Do no update if skip hint is being ignored */ 408 if (cc->ignore_skip_hint) 409 return false; 410 411 if (!IS_ALIGNED(pfn, pageblock_nr_pages)) 412 return false; 413 414 skip = get_pageblock_skip(page); 415 if (!skip && !cc->no_set_skip_hint) 416 set_pageblock_skip(page); 417 418 return skip; 419 } 420 421 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 422 { 423 struct zone *zone = cc->zone; 424 425 pfn = pageblock_end_pfn(pfn); 426 427 /* Set for isolation rather than compaction */ 428 if (cc->no_set_skip_hint) 429 return; 430 431 if (pfn > zone->compact_cached_migrate_pfn[0]) 432 zone->compact_cached_migrate_pfn[0] = pfn; 433 if (cc->mode != MIGRATE_ASYNC && 434 pfn > zone->compact_cached_migrate_pfn[1]) 435 zone->compact_cached_migrate_pfn[1] = pfn; 436 } 437 438 /* 439 * If no pages were isolated then mark this pageblock to be skipped in the 440 * future. The information is later cleared by __reset_isolation_suitable(). 441 */ 442 static void update_pageblock_skip(struct compact_control *cc, 443 struct page *page, unsigned long pfn) 444 { 445 struct zone *zone = cc->zone; 446 447 if (cc->no_set_skip_hint) 448 return; 449 450 if (!page) 451 return; 452 453 set_pageblock_skip(page); 454 455 /* Update where async and sync compaction should restart */ 456 if (pfn < zone->compact_cached_free_pfn) 457 zone->compact_cached_free_pfn = pfn; 458 } 459 #else 460 static inline bool isolation_suitable(struct compact_control *cc, 461 struct page *page) 462 { 463 return true; 464 } 465 466 static inline bool pageblock_skip_persistent(struct page *page) 467 { 468 return false; 469 } 470 471 static inline void update_pageblock_skip(struct compact_control *cc, 472 struct page *page, unsigned long pfn) 473 { 474 } 475 476 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 477 { 478 } 479 480 static bool test_and_set_skip(struct compact_control *cc, struct page *page, 481 unsigned long pfn) 482 { 483 return false; 484 } 485 #endif /* CONFIG_COMPACTION */ 486 487 /* 488 * Compaction requires the taking of some coarse locks that are potentially 489 * very heavily contended. For async compaction, trylock and record if the 490 * lock is contended. The lock will still be acquired but compaction will 491 * abort when the current block is finished regardless of success rate. 492 * Sync compaction acquires the lock. 493 * 494 * Always returns true which makes it easier to track lock state in callers. 495 */ 496 static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, 497 struct compact_control *cc) 498 __acquires(lock) 499 { 500 /* Track if the lock is contended in async mode */ 501 if (cc->mode == MIGRATE_ASYNC && !cc->contended) { 502 if (spin_trylock_irqsave(lock, *flags)) 503 return true; 504 505 cc->contended = true; 506 } 507 508 spin_lock_irqsave(lock, *flags); 509 return true; 510 } 511 512 /* 513 * Compaction requires the taking of some coarse locks that are potentially 514 * very heavily contended. The lock should be periodically unlocked to avoid 515 * having disabled IRQs for a long time, even when there is nobody waiting on 516 * the lock. It might also be that allowing the IRQs will result in 517 * need_resched() becoming true. If scheduling is needed, async compaction 518 * aborts. Sync compaction schedules. 519 * Either compaction type will also abort if a fatal signal is pending. 520 * In either case if the lock was locked, it is dropped and not regained. 521 * 522 * Returns true if compaction should abort due to fatal signal pending, or 523 * async compaction due to need_resched() 524 * Returns false when compaction can continue (sync compaction might have 525 * scheduled) 526 */ 527 static bool compact_unlock_should_abort(spinlock_t *lock, 528 unsigned long flags, bool *locked, struct compact_control *cc) 529 { 530 if (*locked) { 531 spin_unlock_irqrestore(lock, flags); 532 *locked = false; 533 } 534 535 if (fatal_signal_pending(current)) { 536 cc->contended = true; 537 return true; 538 } 539 540 cond_resched(); 541 542 return false; 543 } 544 545 /* 546 * Isolate free pages onto a private freelist. If @strict is true, will abort 547 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 548 * (even though it may still end up isolating some pages). 549 */ 550 static unsigned long isolate_freepages_block(struct compact_control *cc, 551 unsigned long *start_pfn, 552 unsigned long end_pfn, 553 struct list_head *freelist, 554 unsigned int stride, 555 bool strict) 556 { 557 int nr_scanned = 0, total_isolated = 0; 558 struct page *cursor; 559 unsigned long flags = 0; 560 bool locked = false; 561 unsigned long blockpfn = *start_pfn; 562 unsigned int order; 563 564 /* Strict mode is for isolation, speed is secondary */ 565 if (strict) 566 stride = 1; 567 568 cursor = pfn_to_page(blockpfn); 569 570 /* Isolate free pages. */ 571 for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) { 572 int isolated; 573 struct page *page = cursor; 574 575 /* 576 * Periodically drop the lock (if held) regardless of its 577 * contention, to give chance to IRQs. Abort if fatal signal 578 * pending or async compaction detects need_resched() 579 */ 580 if (!(blockpfn % SWAP_CLUSTER_MAX) 581 && compact_unlock_should_abort(&cc->zone->lock, flags, 582 &locked, cc)) 583 break; 584 585 nr_scanned++; 586 587 /* 588 * For compound pages such as THP and hugetlbfs, we can save 589 * potentially a lot of iterations if we skip them at once. 590 * The check is racy, but we can consider only valid values 591 * and the only danger is skipping too much. 592 */ 593 if (PageCompound(page)) { 594 const unsigned int order = compound_order(page); 595 596 if (likely(order < MAX_ORDER)) { 597 blockpfn += (1UL << order) - 1; 598 cursor += (1UL << order) - 1; 599 } 600 goto isolate_fail; 601 } 602 603 if (!PageBuddy(page)) 604 goto isolate_fail; 605 606 /* 607 * If we already hold the lock, we can skip some rechecking. 608 * Note that if we hold the lock now, checked_pageblock was 609 * already set in some previous iteration (or strict is true), 610 * so it is correct to skip the suitable migration target 611 * recheck as well. 612 */ 613 if (!locked) { 614 locked = compact_lock_irqsave(&cc->zone->lock, 615 &flags, cc); 616 617 /* Recheck this is a buddy page under lock */ 618 if (!PageBuddy(page)) 619 goto isolate_fail; 620 } 621 622 /* Found a free page, will break it into order-0 pages */ 623 order = buddy_order(page); 624 isolated = __isolate_free_page(page, order); 625 if (!isolated) 626 break; 627 set_page_private(page, order); 628 629 total_isolated += isolated; 630 cc->nr_freepages += isolated; 631 list_add_tail(&page->lru, freelist); 632 633 if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 634 blockpfn += isolated; 635 break; 636 } 637 /* Advance to the end of split page */ 638 blockpfn += isolated - 1; 639 cursor += isolated - 1; 640 continue; 641 642 isolate_fail: 643 if (strict) 644 break; 645 else 646 continue; 647 648 } 649 650 if (locked) 651 spin_unlock_irqrestore(&cc->zone->lock, flags); 652 653 /* 654 * There is a tiny chance that we have read bogus compound_order(), 655 * so be careful to not go outside of the pageblock. 656 */ 657 if (unlikely(blockpfn > end_pfn)) 658 blockpfn = end_pfn; 659 660 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 661 nr_scanned, total_isolated); 662 663 /* Record how far we have got within the block */ 664 *start_pfn = blockpfn; 665 666 /* 667 * If strict isolation is requested by CMA then check that all the 668 * pages requested were isolated. If there were any failures, 0 is 669 * returned and CMA will fail. 670 */ 671 if (strict && blockpfn < end_pfn) 672 total_isolated = 0; 673 674 cc->total_free_scanned += nr_scanned; 675 if (total_isolated) 676 count_compact_events(COMPACTISOLATED, total_isolated); 677 return total_isolated; 678 } 679 680 /** 681 * isolate_freepages_range() - isolate free pages. 682 * @cc: Compaction control structure. 683 * @start_pfn: The first PFN to start isolating. 684 * @end_pfn: The one-past-last PFN. 685 * 686 * Non-free pages, invalid PFNs, or zone boundaries within the 687 * [start_pfn, end_pfn) range are considered errors, cause function to 688 * undo its actions and return zero. 689 * 690 * Otherwise, function returns one-past-the-last PFN of isolated page 691 * (which may be greater then end_pfn if end fell in a middle of 692 * a free page). 693 */ 694 unsigned long 695 isolate_freepages_range(struct compact_control *cc, 696 unsigned long start_pfn, unsigned long end_pfn) 697 { 698 unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 699 LIST_HEAD(freelist); 700 701 pfn = start_pfn; 702 block_start_pfn = pageblock_start_pfn(pfn); 703 if (block_start_pfn < cc->zone->zone_start_pfn) 704 block_start_pfn = cc->zone->zone_start_pfn; 705 block_end_pfn = pageblock_end_pfn(pfn); 706 707 for (; pfn < end_pfn; pfn += isolated, 708 block_start_pfn = block_end_pfn, 709 block_end_pfn += pageblock_nr_pages) { 710 /* Protect pfn from changing by isolate_freepages_block */ 711 unsigned long isolate_start_pfn = pfn; 712 713 block_end_pfn = min(block_end_pfn, end_pfn); 714 715 /* 716 * pfn could pass the block_end_pfn if isolated freepage 717 * is more than pageblock order. In this case, we adjust 718 * scanning range to right one. 719 */ 720 if (pfn >= block_end_pfn) { 721 block_start_pfn = pageblock_start_pfn(pfn); 722 block_end_pfn = pageblock_end_pfn(pfn); 723 block_end_pfn = min(block_end_pfn, end_pfn); 724 } 725 726 if (!pageblock_pfn_to_page(block_start_pfn, 727 block_end_pfn, cc->zone)) 728 break; 729 730 isolated = isolate_freepages_block(cc, &isolate_start_pfn, 731 block_end_pfn, &freelist, 0, true); 732 733 /* 734 * In strict mode, isolate_freepages_block() returns 0 if 735 * there are any holes in the block (ie. invalid PFNs or 736 * non-free pages). 737 */ 738 if (!isolated) 739 break; 740 741 /* 742 * If we managed to isolate pages, it is always (1 << n) * 743 * pageblock_nr_pages for some non-negative n. (Max order 744 * page may span two pageblocks). 745 */ 746 } 747 748 /* __isolate_free_page() does not map the pages */ 749 split_map_pages(&freelist); 750 751 if (pfn < end_pfn) { 752 /* Loop terminated early, cleanup. */ 753 release_freepages(&freelist); 754 return 0; 755 } 756 757 /* We don't use freelists for anything. */ 758 return pfn; 759 } 760 761 /* Similar to reclaim, but different enough that they don't share logic */ 762 static bool too_many_isolated(pg_data_t *pgdat) 763 { 764 bool too_many; 765 766 unsigned long active, inactive, isolated; 767 768 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + 769 node_page_state(pgdat, NR_INACTIVE_ANON); 770 active = node_page_state(pgdat, NR_ACTIVE_FILE) + 771 node_page_state(pgdat, NR_ACTIVE_ANON); 772 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + 773 node_page_state(pgdat, NR_ISOLATED_ANON); 774 775 too_many = isolated > (inactive + active) / 2; 776 if (!too_many) 777 wake_throttle_isolated(pgdat); 778 779 return too_many; 780 } 781 782 /** 783 * isolate_migratepages_block() - isolate all migrate-able pages within 784 * a single pageblock 785 * @cc: Compaction control structure. 786 * @low_pfn: The first PFN to isolate 787 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 788 * @isolate_mode: Isolation mode to be used. 789 * 790 * Isolate all pages that can be migrated from the range specified by 791 * [low_pfn, end_pfn). The range is expected to be within same pageblock. 792 * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, 793 * -ENOMEM in case we could not allocate a page, or 0. 794 * cc->migrate_pfn will contain the next pfn to scan. 795 * 796 * The pages are isolated on cc->migratepages list (not required to be empty), 797 * and cc->nr_migratepages is updated accordingly. 798 */ 799 static int 800 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 801 unsigned long end_pfn, isolate_mode_t isolate_mode) 802 { 803 pg_data_t *pgdat = cc->zone->zone_pgdat; 804 unsigned long nr_scanned = 0, nr_isolated = 0; 805 struct lruvec *lruvec; 806 unsigned long flags = 0; 807 struct lruvec *locked = NULL; 808 struct page *page = NULL, *valid_page = NULL; 809 unsigned long start_pfn = low_pfn; 810 bool skip_on_failure = false; 811 unsigned long next_skip_pfn = 0; 812 bool skip_updated = false; 813 int ret = 0; 814 815 cc->migrate_pfn = low_pfn; 816 817 /* 818 * Ensure that there are not too many pages isolated from the LRU 819 * list by either parallel reclaimers or compaction. If there are, 820 * delay for some time until fewer pages are isolated 821 */ 822 while (unlikely(too_many_isolated(pgdat))) { 823 /* stop isolation if there are still pages not migrated */ 824 if (cc->nr_migratepages) 825 return -EAGAIN; 826 827 /* async migration should just abort */ 828 if (cc->mode == MIGRATE_ASYNC) 829 return -EAGAIN; 830 831 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); 832 833 if (fatal_signal_pending(current)) 834 return -EINTR; 835 } 836 837 cond_resched(); 838 839 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 840 skip_on_failure = true; 841 next_skip_pfn = block_end_pfn(low_pfn, cc->order); 842 } 843 844 /* Time to isolate some pages for migration */ 845 for (; low_pfn < end_pfn; low_pfn++) { 846 847 if (skip_on_failure && low_pfn >= next_skip_pfn) { 848 /* 849 * We have isolated all migration candidates in the 850 * previous order-aligned block, and did not skip it due 851 * to failure. We should migrate the pages now and 852 * hopefully succeed compaction. 853 */ 854 if (nr_isolated) 855 break; 856 857 /* 858 * We failed to isolate in the previous order-aligned 859 * block. Set the new boundary to the end of the 860 * current block. Note we can't simply increase 861 * next_skip_pfn by 1 << order, as low_pfn might have 862 * been incremented by a higher number due to skipping 863 * a compound or a high-order buddy page in the 864 * previous loop iteration. 865 */ 866 next_skip_pfn = block_end_pfn(low_pfn, cc->order); 867 } 868 869 /* 870 * Periodically drop the lock (if held) regardless of its 871 * contention, to give chance to IRQs. Abort completely if 872 * a fatal signal is pending. 873 */ 874 if (!(low_pfn % SWAP_CLUSTER_MAX)) { 875 if (locked) { 876 unlock_page_lruvec_irqrestore(locked, flags); 877 locked = NULL; 878 } 879 880 if (fatal_signal_pending(current)) { 881 cc->contended = true; 882 ret = -EINTR; 883 884 goto fatal_pending; 885 } 886 887 cond_resched(); 888 } 889 890 nr_scanned++; 891 892 page = pfn_to_page(low_pfn); 893 894 /* 895 * Check if the pageblock has already been marked skipped. 896 * Only the aligned PFN is checked as the caller isolates 897 * COMPACT_CLUSTER_MAX at a time so the second call must 898 * not falsely conclude that the block should be skipped. 899 */ 900 if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { 901 if (!cc->ignore_skip_hint && get_pageblock_skip(page)) { 902 low_pfn = end_pfn; 903 page = NULL; 904 goto isolate_abort; 905 } 906 valid_page = page; 907 } 908 909 if (PageHuge(page) && cc->alloc_contig) { 910 ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); 911 912 /* 913 * Fail isolation in case isolate_or_dissolve_huge_page() 914 * reports an error. In case of -ENOMEM, abort right away. 915 */ 916 if (ret < 0) { 917 /* Do not report -EBUSY down the chain */ 918 if (ret == -EBUSY) 919 ret = 0; 920 low_pfn += (1UL << compound_order(page)) - 1; 921 goto isolate_fail; 922 } 923 924 if (PageHuge(page)) { 925 /* 926 * Hugepage was successfully isolated and placed 927 * on the cc->migratepages list. 928 */ 929 low_pfn += compound_nr(page) - 1; 930 goto isolate_success_no_list; 931 } 932 933 /* 934 * Ok, the hugepage was dissolved. Now these pages are 935 * Buddy and cannot be re-allocated because they are 936 * isolated. Fall-through as the check below handles 937 * Buddy pages. 938 */ 939 } 940 941 /* 942 * Skip if free. We read page order here without zone lock 943 * which is generally unsafe, but the race window is small and 944 * the worst thing that can happen is that we skip some 945 * potential isolation targets. 946 */ 947 if (PageBuddy(page)) { 948 unsigned long freepage_order = buddy_order_unsafe(page); 949 950 /* 951 * Without lock, we cannot be sure that what we got is 952 * a valid page order. Consider only values in the 953 * valid order range to prevent low_pfn overflow. 954 */ 955 if (freepage_order > 0 && freepage_order < MAX_ORDER) 956 low_pfn += (1UL << freepage_order) - 1; 957 continue; 958 } 959 960 /* 961 * Regardless of being on LRU, compound pages such as THP and 962 * hugetlbfs are not to be compacted unless we are attempting 963 * an allocation much larger than the huge page size (eg CMA). 964 * We can potentially save a lot of iterations if we skip them 965 * at once. The check is racy, but we can consider only valid 966 * values and the only danger is skipping too much. 967 */ 968 if (PageCompound(page) && !cc->alloc_contig) { 969 const unsigned int order = compound_order(page); 970 971 if (likely(order < MAX_ORDER)) 972 low_pfn += (1UL << order) - 1; 973 goto isolate_fail; 974 } 975 976 /* 977 * Check may be lockless but that's ok as we recheck later. 978 * It's possible to migrate LRU and non-lru movable pages. 979 * Skip any other type of page 980 */ 981 if (!PageLRU(page)) { 982 /* 983 * __PageMovable can return false positive so we need 984 * to verify it under page_lock. 985 */ 986 if (unlikely(__PageMovable(page)) && 987 !PageIsolated(page)) { 988 if (locked) { 989 unlock_page_lruvec_irqrestore(locked, flags); 990 locked = NULL; 991 } 992 993 if (!isolate_movable_page(page, isolate_mode)) 994 goto isolate_success; 995 } 996 997 goto isolate_fail; 998 } 999 1000 /* 1001 * Migration will fail if an anonymous page is pinned in memory, 1002 * so avoid taking lru_lock and isolating it unnecessarily in an 1003 * admittedly racy check. 1004 */ 1005 if (!page_mapping(page) && 1006 page_count(page) > page_mapcount(page)) 1007 goto isolate_fail; 1008 1009 /* 1010 * Only allow to migrate anonymous pages in GFP_NOFS context 1011 * because those do not depend on fs locks. 1012 */ 1013 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) 1014 goto isolate_fail; 1015 1016 /* 1017 * Be careful not to clear PageLRU until after we're 1018 * sure the page is not being freed elsewhere -- the 1019 * page release code relies on it. 1020 */ 1021 if (unlikely(!get_page_unless_zero(page))) 1022 goto isolate_fail; 1023 1024 if (!__isolate_lru_page_prepare(page, isolate_mode)) 1025 goto isolate_fail_put; 1026 1027 /* Try isolate the page */ 1028 if (!TestClearPageLRU(page)) 1029 goto isolate_fail_put; 1030 1031 lruvec = folio_lruvec(page_folio(page)); 1032 1033 /* If we already hold the lock, we can skip some rechecking */ 1034 if (lruvec != locked) { 1035 if (locked) 1036 unlock_page_lruvec_irqrestore(locked, flags); 1037 1038 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); 1039 locked = lruvec; 1040 1041 lruvec_memcg_debug(lruvec, page_folio(page)); 1042 1043 /* Try get exclusive access under lock */ 1044 if (!skip_updated) { 1045 skip_updated = true; 1046 if (test_and_set_skip(cc, page, low_pfn)) 1047 goto isolate_abort; 1048 } 1049 1050 /* 1051 * Page become compound since the non-locked check, 1052 * and it's on LRU. It can only be a THP so the order 1053 * is safe to read and it's 0 for tail pages. 1054 */ 1055 if (unlikely(PageCompound(page) && !cc->alloc_contig)) { 1056 low_pfn += compound_nr(page) - 1; 1057 SetPageLRU(page); 1058 goto isolate_fail_put; 1059 } 1060 } 1061 1062 /* The whole page is taken off the LRU; skip the tail pages. */ 1063 if (PageCompound(page)) 1064 low_pfn += compound_nr(page) - 1; 1065 1066 /* Successfully isolated */ 1067 del_page_from_lru_list(page, lruvec); 1068 mod_node_page_state(page_pgdat(page), 1069 NR_ISOLATED_ANON + page_is_file_lru(page), 1070 thp_nr_pages(page)); 1071 1072 isolate_success: 1073 list_add(&page->lru, &cc->migratepages); 1074 isolate_success_no_list: 1075 cc->nr_migratepages += compound_nr(page); 1076 nr_isolated += compound_nr(page); 1077 1078 /* 1079 * Avoid isolating too much unless this block is being 1080 * rescanned (e.g. dirty/writeback pages, parallel allocation) 1081 * or a lock is contended. For contention, isolate quickly to 1082 * potentially remove one source of contention. 1083 */ 1084 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && 1085 !cc->rescan && !cc->contended) { 1086 ++low_pfn; 1087 break; 1088 } 1089 1090 continue; 1091 1092 isolate_fail_put: 1093 /* Avoid potential deadlock in freeing page under lru_lock */ 1094 if (locked) { 1095 unlock_page_lruvec_irqrestore(locked, flags); 1096 locked = NULL; 1097 } 1098 put_page(page); 1099 1100 isolate_fail: 1101 if (!skip_on_failure && ret != -ENOMEM) 1102 continue; 1103 1104 /* 1105 * We have isolated some pages, but then failed. Release them 1106 * instead of migrating, as we cannot form the cc->order buddy 1107 * page anyway. 1108 */ 1109 if (nr_isolated) { 1110 if (locked) { 1111 unlock_page_lruvec_irqrestore(locked, flags); 1112 locked = NULL; 1113 } 1114 putback_movable_pages(&cc->migratepages); 1115 cc->nr_migratepages = 0; 1116 nr_isolated = 0; 1117 } 1118 1119 if (low_pfn < next_skip_pfn) { 1120 low_pfn = next_skip_pfn - 1; 1121 /* 1122 * The check near the loop beginning would have updated 1123 * next_skip_pfn too, but this is a bit simpler. 1124 */ 1125 next_skip_pfn += 1UL << cc->order; 1126 } 1127 1128 if (ret == -ENOMEM) 1129 break; 1130 } 1131 1132 /* 1133 * The PageBuddy() check could have potentially brought us outside 1134 * the range to be scanned. 1135 */ 1136 if (unlikely(low_pfn > end_pfn)) 1137 low_pfn = end_pfn; 1138 1139 page = NULL; 1140 1141 isolate_abort: 1142 if (locked) 1143 unlock_page_lruvec_irqrestore(locked, flags); 1144 if (page) { 1145 SetPageLRU(page); 1146 put_page(page); 1147 } 1148 1149 /* 1150 * Updated the cached scanner pfn once the pageblock has been scanned 1151 * Pages will either be migrated in which case there is no point 1152 * scanning in the near future or migration failed in which case the 1153 * failure reason may persist. The block is marked for skipping if 1154 * there were no pages isolated in the block or if the block is 1155 * rescanned twice in a row. 1156 */ 1157 if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { 1158 if (valid_page && !skip_updated) 1159 set_pageblock_skip(valid_page); 1160 update_cached_migrate(cc, low_pfn); 1161 } 1162 1163 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 1164 nr_scanned, nr_isolated); 1165 1166 fatal_pending: 1167 cc->total_migrate_scanned += nr_scanned; 1168 if (nr_isolated) 1169 count_compact_events(COMPACTISOLATED, nr_isolated); 1170 1171 cc->migrate_pfn = low_pfn; 1172 1173 return ret; 1174 } 1175 1176 /** 1177 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 1178 * @cc: Compaction control structure. 1179 * @start_pfn: The first PFN to start isolating. 1180 * @end_pfn: The one-past-last PFN. 1181 * 1182 * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM 1183 * in case we could not allocate a page, or 0. 1184 */ 1185 int 1186 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 1187 unsigned long end_pfn) 1188 { 1189 unsigned long pfn, block_start_pfn, block_end_pfn; 1190 int ret = 0; 1191 1192 /* Scan block by block. First and last block may be incomplete */ 1193 pfn = start_pfn; 1194 block_start_pfn = pageblock_start_pfn(pfn); 1195 if (block_start_pfn < cc->zone->zone_start_pfn) 1196 block_start_pfn = cc->zone->zone_start_pfn; 1197 block_end_pfn = pageblock_end_pfn(pfn); 1198 1199 for (; pfn < end_pfn; pfn = block_end_pfn, 1200 block_start_pfn = block_end_pfn, 1201 block_end_pfn += pageblock_nr_pages) { 1202 1203 block_end_pfn = min(block_end_pfn, end_pfn); 1204 1205 if (!pageblock_pfn_to_page(block_start_pfn, 1206 block_end_pfn, cc->zone)) 1207 continue; 1208 1209 ret = isolate_migratepages_block(cc, pfn, block_end_pfn, 1210 ISOLATE_UNEVICTABLE); 1211 1212 if (ret) 1213 break; 1214 1215 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) 1216 break; 1217 } 1218 1219 return ret; 1220 } 1221 1222 #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1223 #ifdef CONFIG_COMPACTION 1224 1225 static bool suitable_migration_source(struct compact_control *cc, 1226 struct page *page) 1227 { 1228 int block_mt; 1229 1230 if (pageblock_skip_persistent(page)) 1231 return false; 1232 1233 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1234 return true; 1235 1236 block_mt = get_pageblock_migratetype(page); 1237 1238 if (cc->migratetype == MIGRATE_MOVABLE) 1239 return is_migrate_movable(block_mt); 1240 else 1241 return block_mt == cc->migratetype; 1242 } 1243 1244 /* Returns true if the page is within a block suitable for migration to */ 1245 static bool suitable_migration_target(struct compact_control *cc, 1246 struct page *page) 1247 { 1248 /* If the page is a large free page, then disallow migration */ 1249 if (PageBuddy(page)) { 1250 /* 1251 * We are checking page_order without zone->lock taken. But 1252 * the only small danger is that we skip a potentially suitable 1253 * pageblock, so it's not worth to check order for valid range. 1254 */ 1255 if (buddy_order_unsafe(page) >= pageblock_order) 1256 return false; 1257 } 1258 1259 if (cc->ignore_block_suitable) 1260 return true; 1261 1262 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1263 if (is_migrate_movable(get_pageblock_migratetype(page))) 1264 return true; 1265 1266 /* Otherwise skip the block */ 1267 return false; 1268 } 1269 1270 static inline unsigned int 1271 freelist_scan_limit(struct compact_control *cc) 1272 { 1273 unsigned short shift = BITS_PER_LONG - 1; 1274 1275 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; 1276 } 1277 1278 /* 1279 * Test whether the free scanner has reached the same or lower pageblock than 1280 * the migration scanner, and compaction should thus terminate. 1281 */ 1282 static inline bool compact_scanners_met(struct compact_control *cc) 1283 { 1284 return (cc->free_pfn >> pageblock_order) 1285 <= (cc->migrate_pfn >> pageblock_order); 1286 } 1287 1288 /* 1289 * Used when scanning for a suitable migration target which scans freelists 1290 * in reverse. Reorders the list such as the unscanned pages are scanned 1291 * first on the next iteration of the free scanner 1292 */ 1293 static void 1294 move_freelist_head(struct list_head *freelist, struct page *freepage) 1295 { 1296 LIST_HEAD(sublist); 1297 1298 if (!list_is_last(freelist, &freepage->lru)) { 1299 list_cut_before(&sublist, freelist, &freepage->lru); 1300 list_splice_tail(&sublist, freelist); 1301 } 1302 } 1303 1304 /* 1305 * Similar to move_freelist_head except used by the migration scanner 1306 * when scanning forward. It's possible for these list operations to 1307 * move against each other if they search the free list exactly in 1308 * lockstep. 1309 */ 1310 static void 1311 move_freelist_tail(struct list_head *freelist, struct page *freepage) 1312 { 1313 LIST_HEAD(sublist); 1314 1315 if (!list_is_first(freelist, &freepage->lru)) { 1316 list_cut_position(&sublist, freelist, &freepage->lru); 1317 list_splice_tail(&sublist, freelist); 1318 } 1319 } 1320 1321 static void 1322 fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated) 1323 { 1324 unsigned long start_pfn, end_pfn; 1325 struct page *page; 1326 1327 /* Do not search around if there are enough pages already */ 1328 if (cc->nr_freepages >= cc->nr_migratepages) 1329 return; 1330 1331 /* Minimise scanning during async compaction */ 1332 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) 1333 return; 1334 1335 /* Pageblock boundaries */ 1336 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); 1337 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); 1338 1339 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); 1340 if (!page) 1341 return; 1342 1343 /* Scan before */ 1344 if (start_pfn != pfn) { 1345 isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); 1346 if (cc->nr_freepages >= cc->nr_migratepages) 1347 return; 1348 } 1349 1350 /* Scan after */ 1351 start_pfn = pfn + nr_isolated; 1352 if (start_pfn < end_pfn) 1353 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); 1354 1355 /* Skip this pageblock in the future as it's full or nearly full */ 1356 if (cc->nr_freepages < cc->nr_migratepages) 1357 set_pageblock_skip(page); 1358 } 1359 1360 /* Search orders in round-robin fashion */ 1361 static int next_search_order(struct compact_control *cc, int order) 1362 { 1363 order--; 1364 if (order < 0) 1365 order = cc->order - 1; 1366 1367 /* Search wrapped around? */ 1368 if (order == cc->search_order) { 1369 cc->search_order--; 1370 if (cc->search_order < 0) 1371 cc->search_order = cc->order - 1; 1372 return -1; 1373 } 1374 1375 return order; 1376 } 1377 1378 static unsigned long 1379 fast_isolate_freepages(struct compact_control *cc) 1380 { 1381 unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); 1382 unsigned int nr_scanned = 0; 1383 unsigned long low_pfn, min_pfn, highest = 0; 1384 unsigned long nr_isolated = 0; 1385 unsigned long distance; 1386 struct page *page = NULL; 1387 bool scan_start = false; 1388 int order; 1389 1390 /* Full compaction passes in a negative order */ 1391 if (cc->order <= 0) 1392 return cc->free_pfn; 1393 1394 /* 1395 * If starting the scan, use a deeper search and use the highest 1396 * PFN found if a suitable one is not found. 1397 */ 1398 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { 1399 limit = pageblock_nr_pages >> 1; 1400 scan_start = true; 1401 } 1402 1403 /* 1404 * Preferred point is in the top quarter of the scan space but take 1405 * a pfn from the top half if the search is problematic. 1406 */ 1407 distance = (cc->free_pfn - cc->migrate_pfn); 1408 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); 1409 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); 1410 1411 if (WARN_ON_ONCE(min_pfn > low_pfn)) 1412 low_pfn = min_pfn; 1413 1414 /* 1415 * Search starts from the last successful isolation order or the next 1416 * order to search after a previous failure 1417 */ 1418 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); 1419 1420 for (order = cc->search_order; 1421 !page && order >= 0; 1422 order = next_search_order(cc, order)) { 1423 struct free_area *area = &cc->zone->free_area[order]; 1424 struct list_head *freelist; 1425 struct page *freepage; 1426 unsigned long flags; 1427 unsigned int order_scanned = 0; 1428 unsigned long high_pfn = 0; 1429 1430 if (!area->nr_free) 1431 continue; 1432 1433 spin_lock_irqsave(&cc->zone->lock, flags); 1434 freelist = &area->free_list[MIGRATE_MOVABLE]; 1435 list_for_each_entry_reverse(freepage, freelist, lru) { 1436 unsigned long pfn; 1437 1438 order_scanned++; 1439 nr_scanned++; 1440 pfn = page_to_pfn(freepage); 1441 1442 if (pfn >= highest) 1443 highest = max(pageblock_start_pfn(pfn), 1444 cc->zone->zone_start_pfn); 1445 1446 if (pfn >= low_pfn) { 1447 cc->fast_search_fail = 0; 1448 cc->search_order = order; 1449 page = freepage; 1450 break; 1451 } 1452 1453 if (pfn >= min_pfn && pfn > high_pfn) { 1454 high_pfn = pfn; 1455 1456 /* Shorten the scan if a candidate is found */ 1457 limit >>= 1; 1458 } 1459 1460 if (order_scanned >= limit) 1461 break; 1462 } 1463 1464 /* Use a minimum pfn if a preferred one was not found */ 1465 if (!page && high_pfn) { 1466 page = pfn_to_page(high_pfn); 1467 1468 /* Update freepage for the list reorder below */ 1469 freepage = page; 1470 } 1471 1472 /* Reorder to so a future search skips recent pages */ 1473 move_freelist_head(freelist, freepage); 1474 1475 /* Isolate the page if available */ 1476 if (page) { 1477 if (__isolate_free_page(page, order)) { 1478 set_page_private(page, order); 1479 nr_isolated = 1 << order; 1480 cc->nr_freepages += nr_isolated; 1481 list_add_tail(&page->lru, &cc->freepages); 1482 count_compact_events(COMPACTISOLATED, nr_isolated); 1483 } else { 1484 /* If isolation fails, abort the search */ 1485 order = cc->search_order + 1; 1486 page = NULL; 1487 } 1488 } 1489 1490 spin_unlock_irqrestore(&cc->zone->lock, flags); 1491 1492 /* 1493 * Smaller scan on next order so the total scan is related 1494 * to freelist_scan_limit. 1495 */ 1496 if (order_scanned >= limit) 1497 limit = max(1U, limit >> 1); 1498 } 1499 1500 if (!page) { 1501 cc->fast_search_fail++; 1502 if (scan_start) { 1503 /* 1504 * Use the highest PFN found above min. If one was 1505 * not found, be pessimistic for direct compaction 1506 * and use the min mark. 1507 */ 1508 if (highest) { 1509 page = pfn_to_page(highest); 1510 cc->free_pfn = highest; 1511 } else { 1512 if (cc->direct_compaction && pfn_valid(min_pfn)) { 1513 page = pageblock_pfn_to_page(min_pfn, 1514 min(pageblock_end_pfn(min_pfn), 1515 zone_end_pfn(cc->zone)), 1516 cc->zone); 1517 cc->free_pfn = min_pfn; 1518 } 1519 } 1520 } 1521 } 1522 1523 if (highest && highest >= cc->zone->compact_cached_free_pfn) { 1524 highest -= pageblock_nr_pages; 1525 cc->zone->compact_cached_free_pfn = highest; 1526 } 1527 1528 cc->total_free_scanned += nr_scanned; 1529 if (!page) 1530 return cc->free_pfn; 1531 1532 low_pfn = page_to_pfn(page); 1533 fast_isolate_around(cc, low_pfn, nr_isolated); 1534 return low_pfn; 1535 } 1536 1537 /* 1538 * Based on information in the current compact_control, find blocks 1539 * suitable for isolating free pages from and then isolate them. 1540 */ 1541 static void isolate_freepages(struct compact_control *cc) 1542 { 1543 struct zone *zone = cc->zone; 1544 struct page *page; 1545 unsigned long block_start_pfn; /* start of current pageblock */ 1546 unsigned long isolate_start_pfn; /* exact pfn we start at */ 1547 unsigned long block_end_pfn; /* end of current pageblock */ 1548 unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1549 struct list_head *freelist = &cc->freepages; 1550 unsigned int stride; 1551 1552 /* Try a small search of the free lists for a candidate */ 1553 isolate_start_pfn = fast_isolate_freepages(cc); 1554 if (cc->nr_freepages) 1555 goto splitmap; 1556 1557 /* 1558 * Initialise the free scanner. The starting point is where we last 1559 * successfully isolated from, zone-cached value, or the end of the 1560 * zone when isolating for the first time. For looping we also need 1561 * this pfn aligned down to the pageblock boundary, because we do 1562 * block_start_pfn -= pageblock_nr_pages in the for loop. 1563 * For ending point, take care when isolating in last pageblock of a 1564 * zone which ends in the middle of a pageblock. 1565 * The low boundary is the end of the pageblock the migration scanner 1566 * is using. 1567 */ 1568 isolate_start_pfn = cc->free_pfn; 1569 block_start_pfn = pageblock_start_pfn(isolate_start_pfn); 1570 block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1571 zone_end_pfn(zone)); 1572 low_pfn = pageblock_end_pfn(cc->migrate_pfn); 1573 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; 1574 1575 /* 1576 * Isolate free pages until enough are available to migrate the 1577 * pages on cc->migratepages. We stop searching if the migrate 1578 * and free page scanners meet or enough free pages are isolated. 1579 */ 1580 for (; block_start_pfn >= low_pfn; 1581 block_end_pfn = block_start_pfn, 1582 block_start_pfn -= pageblock_nr_pages, 1583 isolate_start_pfn = block_start_pfn) { 1584 unsigned long nr_isolated; 1585 1586 /* 1587 * This can iterate a massively long zone without finding any 1588 * suitable migration targets, so periodically check resched. 1589 */ 1590 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1591 cond_resched(); 1592 1593 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1594 zone); 1595 if (!page) 1596 continue; 1597 1598 /* Check the block is suitable for migration */ 1599 if (!suitable_migration_target(cc, page)) 1600 continue; 1601 1602 /* If isolation recently failed, do not retry */ 1603 if (!isolation_suitable(cc, page)) 1604 continue; 1605 1606 /* Found a block suitable for isolating free pages from. */ 1607 nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, 1608 block_end_pfn, freelist, stride, false); 1609 1610 /* Update the skip hint if the full pageblock was scanned */ 1611 if (isolate_start_pfn == block_end_pfn) 1612 update_pageblock_skip(cc, page, block_start_pfn); 1613 1614 /* Are enough freepages isolated? */ 1615 if (cc->nr_freepages >= cc->nr_migratepages) { 1616 if (isolate_start_pfn >= block_end_pfn) { 1617 /* 1618 * Restart at previous pageblock if more 1619 * freepages can be isolated next time. 1620 */ 1621 isolate_start_pfn = 1622 block_start_pfn - pageblock_nr_pages; 1623 } 1624 break; 1625 } else if (isolate_start_pfn < block_end_pfn) { 1626 /* 1627 * If isolation failed early, do not continue 1628 * needlessly. 1629 */ 1630 break; 1631 } 1632 1633 /* Adjust stride depending on isolation */ 1634 if (nr_isolated) { 1635 stride = 1; 1636 continue; 1637 } 1638 stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); 1639 } 1640 1641 /* 1642 * Record where the free scanner will restart next time. Either we 1643 * broke from the loop and set isolate_start_pfn based on the last 1644 * call to isolate_freepages_block(), or we met the migration scanner 1645 * and the loop terminated due to isolate_start_pfn < low_pfn 1646 */ 1647 cc->free_pfn = isolate_start_pfn; 1648 1649 splitmap: 1650 /* __isolate_free_page() does not map the pages */ 1651 split_map_pages(freelist); 1652 } 1653 1654 /* 1655 * This is a migrate-callback that "allocates" freepages by taking pages 1656 * from the isolated freelists in the block we are migrating to. 1657 */ 1658 static struct page *compaction_alloc(struct page *migratepage, 1659 unsigned long data) 1660 { 1661 struct compact_control *cc = (struct compact_control *)data; 1662 struct page *freepage; 1663 1664 if (list_empty(&cc->freepages)) { 1665 isolate_freepages(cc); 1666 1667 if (list_empty(&cc->freepages)) 1668 return NULL; 1669 } 1670 1671 freepage = list_entry(cc->freepages.next, struct page, lru); 1672 list_del(&freepage->lru); 1673 cc->nr_freepages--; 1674 1675 return freepage; 1676 } 1677 1678 /* 1679 * This is a migrate-callback that "frees" freepages back to the isolated 1680 * freelist. All pages on the freelist are from the same zone, so there is no 1681 * special handling needed for NUMA. 1682 */ 1683 static void compaction_free(struct page *page, unsigned long data) 1684 { 1685 struct compact_control *cc = (struct compact_control *)data; 1686 1687 list_add(&page->lru, &cc->freepages); 1688 cc->nr_freepages++; 1689 } 1690 1691 /* possible outcome of isolate_migratepages */ 1692 typedef enum { 1693 ISOLATE_ABORT, /* Abort compaction now */ 1694 ISOLATE_NONE, /* No pages isolated, continue scanning */ 1695 ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1696 } isolate_migrate_t; 1697 1698 /* 1699 * Allow userspace to control policy on scanning the unevictable LRU for 1700 * compactable pages. 1701 */ 1702 #ifdef CONFIG_PREEMPT_RT 1703 int sysctl_compact_unevictable_allowed __read_mostly = 0; 1704 #else 1705 int sysctl_compact_unevictable_allowed __read_mostly = 1; 1706 #endif 1707 1708 static inline void 1709 update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) 1710 { 1711 if (cc->fast_start_pfn == ULONG_MAX) 1712 return; 1713 1714 if (!cc->fast_start_pfn) 1715 cc->fast_start_pfn = pfn; 1716 1717 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); 1718 } 1719 1720 static inline unsigned long 1721 reinit_migrate_pfn(struct compact_control *cc) 1722 { 1723 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) 1724 return cc->migrate_pfn; 1725 1726 cc->migrate_pfn = cc->fast_start_pfn; 1727 cc->fast_start_pfn = ULONG_MAX; 1728 1729 return cc->migrate_pfn; 1730 } 1731 1732 /* 1733 * Briefly search the free lists for a migration source that already has 1734 * some free pages to reduce the number of pages that need migration 1735 * before a pageblock is free. 1736 */ 1737 static unsigned long fast_find_migrateblock(struct compact_control *cc) 1738 { 1739 unsigned int limit = freelist_scan_limit(cc); 1740 unsigned int nr_scanned = 0; 1741 unsigned long distance; 1742 unsigned long pfn = cc->migrate_pfn; 1743 unsigned long high_pfn; 1744 int order; 1745 bool found_block = false; 1746 1747 /* Skip hints are relied on to avoid repeats on the fast search */ 1748 if (cc->ignore_skip_hint) 1749 return pfn; 1750 1751 /* 1752 * If the migrate_pfn is not at the start of a zone or the start 1753 * of a pageblock then assume this is a continuation of a previous 1754 * scan restarted due to COMPACT_CLUSTER_MAX. 1755 */ 1756 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) 1757 return pfn; 1758 1759 /* 1760 * For smaller orders, just linearly scan as the number of pages 1761 * to migrate should be relatively small and does not necessarily 1762 * justify freeing up a large block for a small allocation. 1763 */ 1764 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) 1765 return pfn; 1766 1767 /* 1768 * Only allow kcompactd and direct requests for movable pages to 1769 * quickly clear out a MOVABLE pageblock for allocation. This 1770 * reduces the risk that a large movable pageblock is freed for 1771 * an unmovable/reclaimable small allocation. 1772 */ 1773 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) 1774 return pfn; 1775 1776 /* 1777 * When starting the migration scanner, pick any pageblock within the 1778 * first half of the search space. Otherwise try and pick a pageblock 1779 * within the first eighth to reduce the chances that a migration 1780 * target later becomes a source. 1781 */ 1782 distance = (cc->free_pfn - cc->migrate_pfn) >> 1; 1783 if (cc->migrate_pfn != cc->zone->zone_start_pfn) 1784 distance >>= 2; 1785 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); 1786 1787 for (order = cc->order - 1; 1788 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; 1789 order--) { 1790 struct free_area *area = &cc->zone->free_area[order]; 1791 struct list_head *freelist; 1792 unsigned long flags; 1793 struct page *freepage; 1794 1795 if (!area->nr_free) 1796 continue; 1797 1798 spin_lock_irqsave(&cc->zone->lock, flags); 1799 freelist = &area->free_list[MIGRATE_MOVABLE]; 1800 list_for_each_entry(freepage, freelist, lru) { 1801 unsigned long free_pfn; 1802 1803 if (nr_scanned++ >= limit) { 1804 move_freelist_tail(freelist, freepage); 1805 break; 1806 } 1807 1808 free_pfn = page_to_pfn(freepage); 1809 if (free_pfn < high_pfn) { 1810 /* 1811 * Avoid if skipped recently. Ideally it would 1812 * move to the tail but even safe iteration of 1813 * the list assumes an entry is deleted, not 1814 * reordered. 1815 */ 1816 if (get_pageblock_skip(freepage)) 1817 continue; 1818 1819 /* Reorder to so a future search skips recent pages */ 1820 move_freelist_tail(freelist, freepage); 1821 1822 update_fast_start_pfn(cc, free_pfn); 1823 pfn = pageblock_start_pfn(free_pfn); 1824 cc->fast_search_fail = 0; 1825 found_block = true; 1826 set_pageblock_skip(freepage); 1827 break; 1828 } 1829 } 1830 spin_unlock_irqrestore(&cc->zone->lock, flags); 1831 } 1832 1833 cc->total_migrate_scanned += nr_scanned; 1834 1835 /* 1836 * If fast scanning failed then use a cached entry for a page block 1837 * that had free pages as the basis for starting a linear scan. 1838 */ 1839 if (!found_block) { 1840 cc->fast_search_fail++; 1841 pfn = reinit_migrate_pfn(cc); 1842 } 1843 return pfn; 1844 } 1845 1846 /* 1847 * Isolate all pages that can be migrated from the first suitable block, 1848 * starting at the block pointed to by the migrate scanner pfn within 1849 * compact_control. 1850 */ 1851 static isolate_migrate_t isolate_migratepages(struct compact_control *cc) 1852 { 1853 unsigned long block_start_pfn; 1854 unsigned long block_end_pfn; 1855 unsigned long low_pfn; 1856 struct page *page; 1857 const isolate_mode_t isolate_mode = 1858 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 1859 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 1860 bool fast_find_block; 1861 1862 /* 1863 * Start at where we last stopped, or beginning of the zone as 1864 * initialized by compact_zone(). The first failure will use 1865 * the lowest PFN as the starting point for linear scanning. 1866 */ 1867 low_pfn = fast_find_migrateblock(cc); 1868 block_start_pfn = pageblock_start_pfn(low_pfn); 1869 if (block_start_pfn < cc->zone->zone_start_pfn) 1870 block_start_pfn = cc->zone->zone_start_pfn; 1871 1872 /* 1873 * fast_find_migrateblock marks a pageblock skipped so to avoid 1874 * the isolation_suitable check below, check whether the fast 1875 * search was successful. 1876 */ 1877 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; 1878 1879 /* Only scan within a pageblock boundary */ 1880 block_end_pfn = pageblock_end_pfn(low_pfn); 1881 1882 /* 1883 * Iterate over whole pageblocks until we find the first suitable. 1884 * Do not cross the free scanner. 1885 */ 1886 for (; block_end_pfn <= cc->free_pfn; 1887 fast_find_block = false, 1888 cc->migrate_pfn = low_pfn = block_end_pfn, 1889 block_start_pfn = block_end_pfn, 1890 block_end_pfn += pageblock_nr_pages) { 1891 1892 /* 1893 * This can potentially iterate a massively long zone with 1894 * many pageblocks unsuitable, so periodically check if we 1895 * need to schedule. 1896 */ 1897 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1898 cond_resched(); 1899 1900 page = pageblock_pfn_to_page(block_start_pfn, 1901 block_end_pfn, cc->zone); 1902 if (!page) 1903 continue; 1904 1905 /* 1906 * If isolation recently failed, do not retry. Only check the 1907 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock 1908 * to be visited multiple times. Assume skip was checked 1909 * before making it "skip" so other compaction instances do 1910 * not scan the same block. 1911 */ 1912 if (IS_ALIGNED(low_pfn, pageblock_nr_pages) && 1913 !fast_find_block && !isolation_suitable(cc, page)) 1914 continue; 1915 1916 /* 1917 * For async compaction, also only scan in MOVABLE blocks 1918 * without huge pages. Async compaction is optimistic to see 1919 * if the minimum amount of work satisfies the allocation. 1920 * The cached PFN is updated as it's possible that all 1921 * remaining blocks between source and target are unsuitable 1922 * and the compaction scanners fail to meet. 1923 */ 1924 if (!suitable_migration_source(cc, page)) { 1925 update_cached_migrate(cc, block_end_pfn); 1926 continue; 1927 } 1928 1929 /* Perform the isolation */ 1930 if (isolate_migratepages_block(cc, low_pfn, block_end_pfn, 1931 isolate_mode)) 1932 return ISOLATE_ABORT; 1933 1934 /* 1935 * Either we isolated something and proceed with migration. Or 1936 * we failed and compact_zone should decide if we should 1937 * continue or not. 1938 */ 1939 break; 1940 } 1941 1942 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1943 } 1944 1945 /* 1946 * order == -1 is expected when compacting via 1947 * /proc/sys/vm/compact_memory 1948 */ 1949 static inline bool is_via_compact_memory(int order) 1950 { 1951 return order == -1; 1952 } 1953 1954 static bool kswapd_is_running(pg_data_t *pgdat) 1955 { 1956 return pgdat->kswapd && task_is_running(pgdat->kswapd); 1957 } 1958 1959 /* 1960 * A zone's fragmentation score is the external fragmentation wrt to the 1961 * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100]. 1962 */ 1963 static unsigned int fragmentation_score_zone(struct zone *zone) 1964 { 1965 return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); 1966 } 1967 1968 /* 1969 * A weighted zone's fragmentation score is the external fragmentation 1970 * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It 1971 * returns a value in the range [0, 100]. 1972 * 1973 * The scaling factor ensures that proactive compaction focuses on larger 1974 * zones like ZONE_NORMAL, rather than smaller, specialized zones like 1975 * ZONE_DMA32. For smaller zones, the score value remains close to zero, 1976 * and thus never exceeds the high threshold for proactive compaction. 1977 */ 1978 static unsigned int fragmentation_score_zone_weighted(struct zone *zone) 1979 { 1980 unsigned long score; 1981 1982 score = zone->present_pages * fragmentation_score_zone(zone); 1983 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); 1984 } 1985 1986 /* 1987 * The per-node proactive (background) compaction process is started by its 1988 * corresponding kcompactd thread when the node's fragmentation score 1989 * exceeds the high threshold. The compaction process remains active till 1990 * the node's score falls below the low threshold, or one of the back-off 1991 * conditions is met. 1992 */ 1993 static unsigned int fragmentation_score_node(pg_data_t *pgdat) 1994 { 1995 unsigned int score = 0; 1996 int zoneid; 1997 1998 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 1999 struct zone *zone; 2000 2001 zone = &pgdat->node_zones[zoneid]; 2002 score += fragmentation_score_zone_weighted(zone); 2003 } 2004 2005 return score; 2006 } 2007 2008 static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) 2009 { 2010 unsigned int wmark_low; 2011 2012 /* 2013 * Cap the low watermark to avoid excessive compaction 2014 * activity in case a user sets the proactiveness tunable 2015 * close to 100 (maximum). 2016 */ 2017 wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); 2018 return low ? wmark_low : min(wmark_low + 10, 100U); 2019 } 2020 2021 static bool should_proactive_compact_node(pg_data_t *pgdat) 2022 { 2023 int wmark_high; 2024 2025 if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) 2026 return false; 2027 2028 wmark_high = fragmentation_score_wmark(pgdat, false); 2029 return fragmentation_score_node(pgdat) > wmark_high; 2030 } 2031 2032 static enum compact_result __compact_finished(struct compact_control *cc) 2033 { 2034 unsigned int order; 2035 const int migratetype = cc->migratetype; 2036 int ret; 2037 2038 /* Compaction run completes if the migrate and free scanner meet */ 2039 if (compact_scanners_met(cc)) { 2040 /* Let the next compaction start anew. */ 2041 reset_cached_positions(cc->zone); 2042 2043 /* 2044 * Mark that the PG_migrate_skip information should be cleared 2045 * by kswapd when it goes to sleep. kcompactd does not set the 2046 * flag itself as the decision to be clear should be directly 2047 * based on an allocation request. 2048 */ 2049 if (cc->direct_compaction) 2050 cc->zone->compact_blockskip_flush = true; 2051 2052 if (cc->whole_zone) 2053 return COMPACT_COMPLETE; 2054 else 2055 return COMPACT_PARTIAL_SKIPPED; 2056 } 2057 2058 if (cc->proactive_compaction) { 2059 int score, wmark_low; 2060 pg_data_t *pgdat; 2061 2062 pgdat = cc->zone->zone_pgdat; 2063 if (kswapd_is_running(pgdat)) 2064 return COMPACT_PARTIAL_SKIPPED; 2065 2066 score = fragmentation_score_zone(cc->zone); 2067 wmark_low = fragmentation_score_wmark(pgdat, true); 2068 2069 if (score > wmark_low) 2070 ret = COMPACT_CONTINUE; 2071 else 2072 ret = COMPACT_SUCCESS; 2073 2074 goto out; 2075 } 2076 2077 if (is_via_compact_memory(cc->order)) 2078 return COMPACT_CONTINUE; 2079 2080 /* 2081 * Always finish scanning a pageblock to reduce the possibility of 2082 * fallbacks in the future. This is particularly important when 2083 * migration source is unmovable/reclaimable but it's not worth 2084 * special casing. 2085 */ 2086 if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) 2087 return COMPACT_CONTINUE; 2088 2089 /* Direct compactor: Is a suitable page free? */ 2090 ret = COMPACT_NO_SUITABLE_PAGE; 2091 for (order = cc->order; order < MAX_ORDER; order++) { 2092 struct free_area *area = &cc->zone->free_area[order]; 2093 bool can_steal; 2094 2095 /* Job done if page is free of the right migratetype */ 2096 if (!free_area_empty(area, migratetype)) 2097 return COMPACT_SUCCESS; 2098 2099 #ifdef CONFIG_CMA 2100 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 2101 if (migratetype == MIGRATE_MOVABLE && 2102 !free_area_empty(area, MIGRATE_CMA)) 2103 return COMPACT_SUCCESS; 2104 #endif 2105 /* 2106 * Job done if allocation would steal freepages from 2107 * other migratetype buddy lists. 2108 */ 2109 if (find_suitable_fallback(area, order, migratetype, 2110 true, &can_steal) != -1) { 2111 2112 /* movable pages are OK in any pageblock */ 2113 if (migratetype == MIGRATE_MOVABLE) 2114 return COMPACT_SUCCESS; 2115 2116 /* 2117 * We are stealing for a non-movable allocation. Make 2118 * sure we finish compacting the current pageblock 2119 * first so it is as free as possible and we won't 2120 * have to steal another one soon. This only applies 2121 * to sync compaction, as async compaction operates 2122 * on pageblocks of the same migratetype. 2123 */ 2124 if (cc->mode == MIGRATE_ASYNC || 2125 IS_ALIGNED(cc->migrate_pfn, 2126 pageblock_nr_pages)) { 2127 return COMPACT_SUCCESS; 2128 } 2129 2130 ret = COMPACT_CONTINUE; 2131 break; 2132 } 2133 } 2134 2135 out: 2136 if (cc->contended || fatal_signal_pending(current)) 2137 ret = COMPACT_CONTENDED; 2138 2139 return ret; 2140 } 2141 2142 static enum compact_result compact_finished(struct compact_control *cc) 2143 { 2144 int ret; 2145 2146 ret = __compact_finished(cc); 2147 trace_mm_compaction_finished(cc->zone, cc->order, ret); 2148 if (ret == COMPACT_NO_SUITABLE_PAGE) 2149 ret = COMPACT_CONTINUE; 2150 2151 return ret; 2152 } 2153 2154 static enum compact_result __compaction_suitable(struct zone *zone, int order, 2155 unsigned int alloc_flags, 2156 int highest_zoneidx, 2157 unsigned long wmark_target) 2158 { 2159 unsigned long watermark; 2160 2161 if (is_via_compact_memory(order)) 2162 return COMPACT_CONTINUE; 2163 2164 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 2165 /* 2166 * If watermarks for high-order allocation are already met, there 2167 * should be no need for compaction at all. 2168 */ 2169 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, 2170 alloc_flags)) 2171 return COMPACT_SUCCESS; 2172 2173 /* 2174 * Watermarks for order-0 must be met for compaction to be able to 2175 * isolate free pages for migration targets. This means that the 2176 * watermark and alloc_flags have to match, or be more pessimistic than 2177 * the check in __isolate_free_page(). We don't use the direct 2178 * compactor's alloc_flags, as they are not relevant for freepage 2179 * isolation. We however do use the direct compactor's highest_zoneidx 2180 * to skip over zones where lowmem reserves would prevent allocation 2181 * even if compaction succeeds. 2182 * For costly orders, we require low watermark instead of min for 2183 * compaction to proceed to increase its chances. 2184 * ALLOC_CMA is used, as pages in CMA pageblocks are considered 2185 * suitable migration targets 2186 */ 2187 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 2188 low_wmark_pages(zone) : min_wmark_pages(zone); 2189 watermark += compact_gap(order); 2190 if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, 2191 ALLOC_CMA, wmark_target)) 2192 return COMPACT_SKIPPED; 2193 2194 return COMPACT_CONTINUE; 2195 } 2196 2197 /* 2198 * compaction_suitable: Is this suitable to run compaction on this zone now? 2199 * Returns 2200 * COMPACT_SKIPPED - If there are too few free pages for compaction 2201 * COMPACT_SUCCESS - If the allocation would succeed without compaction 2202 * COMPACT_CONTINUE - If compaction should run now 2203 */ 2204 enum compact_result compaction_suitable(struct zone *zone, int order, 2205 unsigned int alloc_flags, 2206 int highest_zoneidx) 2207 { 2208 enum compact_result ret; 2209 int fragindex; 2210 2211 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, 2212 zone_page_state(zone, NR_FREE_PAGES)); 2213 /* 2214 * fragmentation index determines if allocation failures are due to 2215 * low memory or external fragmentation 2216 * 2217 * index of -1000 would imply allocations might succeed depending on 2218 * watermarks, but we already failed the high-order watermark check 2219 * index towards 0 implies failure is due to lack of memory 2220 * index towards 1000 implies failure is due to fragmentation 2221 * 2222 * Only compact if a failure would be due to fragmentation. Also 2223 * ignore fragindex for non-costly orders where the alternative to 2224 * a successful reclaim/compaction is OOM. Fragindex and the 2225 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 2226 * excessive compaction for costly orders, but it should not be at the 2227 * expense of system stability. 2228 */ 2229 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { 2230 fragindex = fragmentation_index(zone, order); 2231 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 2232 ret = COMPACT_NOT_SUITABLE_ZONE; 2233 } 2234 2235 trace_mm_compaction_suitable(zone, order, ret); 2236 if (ret == COMPACT_NOT_SUITABLE_ZONE) 2237 ret = COMPACT_SKIPPED; 2238 2239 return ret; 2240 } 2241 2242 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 2243 int alloc_flags) 2244 { 2245 struct zone *zone; 2246 struct zoneref *z; 2247 2248 /* 2249 * Make sure at least one zone would pass __compaction_suitable if we continue 2250 * retrying the reclaim. 2251 */ 2252 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2253 ac->highest_zoneidx, ac->nodemask) { 2254 unsigned long available; 2255 enum compact_result compact_result; 2256 2257 /* 2258 * Do not consider all the reclaimable memory because we do not 2259 * want to trash just for a single high order allocation which 2260 * is even not guaranteed to appear even if __compaction_suitable 2261 * is happy about the watermark check. 2262 */ 2263 available = zone_reclaimable_pages(zone) / order; 2264 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 2265 compact_result = __compaction_suitable(zone, order, alloc_flags, 2266 ac->highest_zoneidx, available); 2267 if (compact_result != COMPACT_SKIPPED) 2268 return true; 2269 } 2270 2271 return false; 2272 } 2273 2274 static enum compact_result 2275 compact_zone(struct compact_control *cc, struct capture_control *capc) 2276 { 2277 enum compact_result ret; 2278 unsigned long start_pfn = cc->zone->zone_start_pfn; 2279 unsigned long end_pfn = zone_end_pfn(cc->zone); 2280 unsigned long last_migrated_pfn; 2281 const bool sync = cc->mode != MIGRATE_ASYNC; 2282 bool update_cached; 2283 2284 /* 2285 * These counters track activities during zone compaction. Initialize 2286 * them before compacting a new zone. 2287 */ 2288 cc->total_migrate_scanned = 0; 2289 cc->total_free_scanned = 0; 2290 cc->nr_migratepages = 0; 2291 cc->nr_freepages = 0; 2292 INIT_LIST_HEAD(&cc->freepages); 2293 INIT_LIST_HEAD(&cc->migratepages); 2294 2295 cc->migratetype = gfp_migratetype(cc->gfp_mask); 2296 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, 2297 cc->highest_zoneidx); 2298 /* Compaction is likely to fail */ 2299 if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) 2300 return ret; 2301 2302 /* huh, compaction_suitable is returning something unexpected */ 2303 VM_BUG_ON(ret != COMPACT_CONTINUE); 2304 2305 /* 2306 * Clear pageblock skip if there were failures recently and compaction 2307 * is about to be retried after being deferred. 2308 */ 2309 if (compaction_restarting(cc->zone, cc->order)) 2310 __reset_isolation_suitable(cc->zone); 2311 2312 /* 2313 * Setup to move all movable pages to the end of the zone. Used cached 2314 * information on where the scanners should start (unless we explicitly 2315 * want to compact the whole zone), but check that it is initialised 2316 * by ensuring the values are within zone boundaries. 2317 */ 2318 cc->fast_start_pfn = 0; 2319 if (cc->whole_zone) { 2320 cc->migrate_pfn = start_pfn; 2321 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 2322 } else { 2323 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; 2324 cc->free_pfn = cc->zone->compact_cached_free_pfn; 2325 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 2326 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 2327 cc->zone->compact_cached_free_pfn = cc->free_pfn; 2328 } 2329 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 2330 cc->migrate_pfn = start_pfn; 2331 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 2332 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 2333 } 2334 2335 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) 2336 cc->whole_zone = true; 2337 } 2338 2339 last_migrated_pfn = 0; 2340 2341 /* 2342 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on 2343 * the basis that some migrations will fail in ASYNC mode. However, 2344 * if the cached PFNs match and pageblocks are skipped due to having 2345 * no isolation candidates, then the sync state does not matter. 2346 * Until a pageblock with isolation candidates is found, keep the 2347 * cached PFNs in sync to avoid revisiting the same blocks. 2348 */ 2349 update_cached = !sync && 2350 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; 2351 2352 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 2353 cc->free_pfn, end_pfn, sync); 2354 2355 /* lru_add_drain_all could be expensive with involving other CPUs */ 2356 lru_add_drain(); 2357 2358 while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { 2359 int err; 2360 unsigned long iteration_start_pfn = cc->migrate_pfn; 2361 2362 /* 2363 * Avoid multiple rescans which can happen if a page cannot be 2364 * isolated (dirty/writeback in async mode) or if the migrated 2365 * pages are being allocated before the pageblock is cleared. 2366 * The first rescan will capture the entire pageblock for 2367 * migration. If it fails, it'll be marked skip and scanning 2368 * will proceed as normal. 2369 */ 2370 cc->rescan = false; 2371 if (pageblock_start_pfn(last_migrated_pfn) == 2372 pageblock_start_pfn(iteration_start_pfn)) { 2373 cc->rescan = true; 2374 } 2375 2376 switch (isolate_migratepages(cc)) { 2377 case ISOLATE_ABORT: 2378 ret = COMPACT_CONTENDED; 2379 putback_movable_pages(&cc->migratepages); 2380 cc->nr_migratepages = 0; 2381 goto out; 2382 case ISOLATE_NONE: 2383 if (update_cached) { 2384 cc->zone->compact_cached_migrate_pfn[1] = 2385 cc->zone->compact_cached_migrate_pfn[0]; 2386 } 2387 2388 /* 2389 * We haven't isolated and migrated anything, but 2390 * there might still be unflushed migrations from 2391 * previous cc->order aligned block. 2392 */ 2393 goto check_drain; 2394 case ISOLATE_SUCCESS: 2395 update_cached = false; 2396 last_migrated_pfn = iteration_start_pfn; 2397 } 2398 2399 err = migrate_pages(&cc->migratepages, compaction_alloc, 2400 compaction_free, (unsigned long)cc, cc->mode, 2401 MR_COMPACTION, NULL); 2402 2403 trace_mm_compaction_migratepages(cc->nr_migratepages, err, 2404 &cc->migratepages); 2405 2406 /* All pages were either migrated or will be released */ 2407 cc->nr_migratepages = 0; 2408 if (err) { 2409 putback_movable_pages(&cc->migratepages); 2410 /* 2411 * migrate_pages() may return -ENOMEM when scanners meet 2412 * and we want compact_finished() to detect it 2413 */ 2414 if (err == -ENOMEM && !compact_scanners_met(cc)) { 2415 ret = COMPACT_CONTENDED; 2416 goto out; 2417 } 2418 /* 2419 * We failed to migrate at least one page in the current 2420 * order-aligned block, so skip the rest of it. 2421 */ 2422 if (cc->direct_compaction && 2423 (cc->mode == MIGRATE_ASYNC)) { 2424 cc->migrate_pfn = block_end_pfn( 2425 cc->migrate_pfn - 1, cc->order); 2426 /* Draining pcplists is useless in this case */ 2427 last_migrated_pfn = 0; 2428 } 2429 } 2430 2431 check_drain: 2432 /* 2433 * Has the migration scanner moved away from the previous 2434 * cc->order aligned block where we migrated from? If yes, 2435 * flush the pages that were freed, so that they can merge and 2436 * compact_finished() can detect immediately if allocation 2437 * would succeed. 2438 */ 2439 if (cc->order > 0 && last_migrated_pfn) { 2440 unsigned long current_block_start = 2441 block_start_pfn(cc->migrate_pfn, cc->order); 2442 2443 if (last_migrated_pfn < current_block_start) { 2444 lru_add_drain_cpu_zone(cc->zone); 2445 /* No more flushing until we migrate again */ 2446 last_migrated_pfn = 0; 2447 } 2448 } 2449 2450 /* Stop if a page has been captured */ 2451 if (capc && capc->page) { 2452 ret = COMPACT_SUCCESS; 2453 break; 2454 } 2455 } 2456 2457 out: 2458 /* 2459 * Release free pages and update where the free scanner should restart, 2460 * so we don't leave any returned pages behind in the next attempt. 2461 */ 2462 if (cc->nr_freepages > 0) { 2463 unsigned long free_pfn = release_freepages(&cc->freepages); 2464 2465 cc->nr_freepages = 0; 2466 VM_BUG_ON(free_pfn == 0); 2467 /* The cached pfn is always the first in a pageblock */ 2468 free_pfn = pageblock_start_pfn(free_pfn); 2469 /* 2470 * Only go back, not forward. The cached pfn might have been 2471 * already reset to zone end in compact_finished() 2472 */ 2473 if (free_pfn > cc->zone->compact_cached_free_pfn) 2474 cc->zone->compact_cached_free_pfn = free_pfn; 2475 } 2476 2477 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 2478 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 2479 2480 trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 2481 cc->free_pfn, end_pfn, sync, ret); 2482 2483 return ret; 2484 } 2485 2486 static enum compact_result compact_zone_order(struct zone *zone, int order, 2487 gfp_t gfp_mask, enum compact_priority prio, 2488 unsigned int alloc_flags, int highest_zoneidx, 2489 struct page **capture) 2490 { 2491 enum compact_result ret; 2492 struct compact_control cc = { 2493 .order = order, 2494 .search_order = order, 2495 .gfp_mask = gfp_mask, 2496 .zone = zone, 2497 .mode = (prio == COMPACT_PRIO_ASYNC) ? 2498 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 2499 .alloc_flags = alloc_flags, 2500 .highest_zoneidx = highest_zoneidx, 2501 .direct_compaction = true, 2502 .whole_zone = (prio == MIN_COMPACT_PRIORITY), 2503 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 2504 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 2505 }; 2506 struct capture_control capc = { 2507 .cc = &cc, 2508 .page = NULL, 2509 }; 2510 2511 /* 2512 * Make sure the structs are really initialized before we expose the 2513 * capture control, in case we are interrupted and the interrupt handler 2514 * frees a page. 2515 */ 2516 barrier(); 2517 WRITE_ONCE(current->capture_control, &capc); 2518 2519 ret = compact_zone(&cc, &capc); 2520 2521 VM_BUG_ON(!list_empty(&cc.freepages)); 2522 VM_BUG_ON(!list_empty(&cc.migratepages)); 2523 2524 /* 2525 * Make sure we hide capture control first before we read the captured 2526 * page pointer, otherwise an interrupt could free and capture a page 2527 * and we would leak it. 2528 */ 2529 WRITE_ONCE(current->capture_control, NULL); 2530 *capture = READ_ONCE(capc.page); 2531 /* 2532 * Technically, it is also possible that compaction is skipped but 2533 * the page is still captured out of luck(IRQ came and freed the page). 2534 * Returning COMPACT_SUCCESS in such cases helps in properly accounting 2535 * the COMPACT[STALL|FAIL] when compaction is skipped. 2536 */ 2537 if (*capture) 2538 ret = COMPACT_SUCCESS; 2539 2540 return ret; 2541 } 2542 2543 int sysctl_extfrag_threshold = 500; 2544 2545 /** 2546 * try_to_compact_pages - Direct compact to satisfy a high-order allocation 2547 * @gfp_mask: The GFP mask of the current allocation 2548 * @order: The order of the current allocation 2549 * @alloc_flags: The allocation flags of the current allocation 2550 * @ac: The context of current allocation 2551 * @prio: Determines how hard direct compaction should try to succeed 2552 * @capture: Pointer to free page created by compaction will be stored here 2553 * 2554 * This is the main entry point for direct page compaction. 2555 */ 2556 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 2557 unsigned int alloc_flags, const struct alloc_context *ac, 2558 enum compact_priority prio, struct page **capture) 2559 { 2560 int may_perform_io = gfp_mask & __GFP_IO; 2561 struct zoneref *z; 2562 struct zone *zone; 2563 enum compact_result rc = COMPACT_SKIPPED; 2564 2565 /* 2566 * Check if the GFP flags allow compaction - GFP_NOIO is really 2567 * tricky context because the migration might require IO 2568 */ 2569 if (!may_perform_io) 2570 return COMPACT_SKIPPED; 2571 2572 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 2573 2574 /* Compact each zone in the list */ 2575 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2576 ac->highest_zoneidx, ac->nodemask) { 2577 enum compact_result status; 2578 2579 if (prio > MIN_COMPACT_PRIORITY 2580 && compaction_deferred(zone, order)) { 2581 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 2582 continue; 2583 } 2584 2585 status = compact_zone_order(zone, order, gfp_mask, prio, 2586 alloc_flags, ac->highest_zoneidx, capture); 2587 rc = max(status, rc); 2588 2589 /* The allocation should succeed, stop compacting */ 2590 if (status == COMPACT_SUCCESS) { 2591 /* 2592 * We think the allocation will succeed in this zone, 2593 * but it is not certain, hence the false. The caller 2594 * will repeat this with true if allocation indeed 2595 * succeeds in this zone. 2596 */ 2597 compaction_defer_reset(zone, order, false); 2598 2599 break; 2600 } 2601 2602 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 2603 status == COMPACT_PARTIAL_SKIPPED)) 2604 /* 2605 * We think that allocation won't succeed in this zone 2606 * so we defer compaction there. If it ends up 2607 * succeeding after all, it will be reset. 2608 */ 2609 defer_compaction(zone, order); 2610 2611 /* 2612 * We might have stopped compacting due to need_resched() in 2613 * async compaction, or due to a fatal signal detected. In that 2614 * case do not try further zones 2615 */ 2616 if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 2617 || fatal_signal_pending(current)) 2618 break; 2619 } 2620 2621 return rc; 2622 } 2623 2624 /* 2625 * Compact all zones within a node till each zone's fragmentation score 2626 * reaches within proactive compaction thresholds (as determined by the 2627 * proactiveness tunable). 2628 * 2629 * It is possible that the function returns before reaching score targets 2630 * due to various back-off conditions, such as, contention on per-node or 2631 * per-zone locks. 2632 */ 2633 static void proactive_compact_node(pg_data_t *pgdat) 2634 { 2635 int zoneid; 2636 struct zone *zone; 2637 struct compact_control cc = { 2638 .order = -1, 2639 .mode = MIGRATE_SYNC_LIGHT, 2640 .ignore_skip_hint = true, 2641 .whole_zone = true, 2642 .gfp_mask = GFP_KERNEL, 2643 .proactive_compaction = true, 2644 }; 2645 2646 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2647 zone = &pgdat->node_zones[zoneid]; 2648 if (!populated_zone(zone)) 2649 continue; 2650 2651 cc.zone = zone; 2652 2653 compact_zone(&cc, NULL); 2654 2655 VM_BUG_ON(!list_empty(&cc.freepages)); 2656 VM_BUG_ON(!list_empty(&cc.migratepages)); 2657 } 2658 } 2659 2660 /* Compact all zones within a node */ 2661 static void compact_node(int nid) 2662 { 2663 pg_data_t *pgdat = NODE_DATA(nid); 2664 int zoneid; 2665 struct zone *zone; 2666 struct compact_control cc = { 2667 .order = -1, 2668 .mode = MIGRATE_SYNC, 2669 .ignore_skip_hint = true, 2670 .whole_zone = true, 2671 .gfp_mask = GFP_KERNEL, 2672 }; 2673 2674 2675 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2676 2677 zone = &pgdat->node_zones[zoneid]; 2678 if (!populated_zone(zone)) 2679 continue; 2680 2681 cc.zone = zone; 2682 2683 compact_zone(&cc, NULL); 2684 2685 VM_BUG_ON(!list_empty(&cc.freepages)); 2686 VM_BUG_ON(!list_empty(&cc.migratepages)); 2687 } 2688 } 2689 2690 /* Compact all nodes in the system */ 2691 static void compact_nodes(void) 2692 { 2693 int nid; 2694 2695 /* Flush pending updates to the LRU lists */ 2696 lru_add_drain_all(); 2697 2698 for_each_online_node(nid) 2699 compact_node(nid); 2700 } 2701 2702 /* 2703 * Tunable for proactive compaction. It determines how 2704 * aggressively the kernel should compact memory in the 2705 * background. It takes values in the range [0, 100]. 2706 */ 2707 unsigned int __read_mostly sysctl_compaction_proactiveness = 20; 2708 2709 int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, 2710 void *buffer, size_t *length, loff_t *ppos) 2711 { 2712 int rc, nid; 2713 2714 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 2715 if (rc) 2716 return rc; 2717 2718 if (write && sysctl_compaction_proactiveness) { 2719 for_each_online_node(nid) { 2720 pg_data_t *pgdat = NODE_DATA(nid); 2721 2722 if (pgdat->proactive_compact_trigger) 2723 continue; 2724 2725 pgdat->proactive_compact_trigger = true; 2726 wake_up_interruptible(&pgdat->kcompactd_wait); 2727 } 2728 } 2729 2730 return 0; 2731 } 2732 2733 /* 2734 * This is the entry point for compacting all nodes via 2735 * /proc/sys/vm/compact_memory 2736 */ 2737 int sysctl_compaction_handler(struct ctl_table *table, int write, 2738 void *buffer, size_t *length, loff_t *ppos) 2739 { 2740 if (write) 2741 compact_nodes(); 2742 2743 return 0; 2744 } 2745 2746 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 2747 static ssize_t compact_store(struct device *dev, 2748 struct device_attribute *attr, 2749 const char *buf, size_t count) 2750 { 2751 int nid = dev->id; 2752 2753 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 2754 /* Flush pending updates to the LRU lists */ 2755 lru_add_drain_all(); 2756 2757 compact_node(nid); 2758 } 2759 2760 return count; 2761 } 2762 static DEVICE_ATTR_WO(compact); 2763 2764 int compaction_register_node(struct node *node) 2765 { 2766 return device_create_file(&node->dev, &dev_attr_compact); 2767 } 2768 2769 void compaction_unregister_node(struct node *node) 2770 { 2771 return device_remove_file(&node->dev, &dev_attr_compact); 2772 } 2773 #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 2774 2775 static inline bool kcompactd_work_requested(pg_data_t *pgdat) 2776 { 2777 return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || 2778 pgdat->proactive_compact_trigger; 2779 } 2780 2781 static bool kcompactd_node_suitable(pg_data_t *pgdat) 2782 { 2783 int zoneid; 2784 struct zone *zone; 2785 enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; 2786 2787 for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { 2788 zone = &pgdat->node_zones[zoneid]; 2789 2790 if (!populated_zone(zone)) 2791 continue; 2792 2793 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 2794 highest_zoneidx) == COMPACT_CONTINUE) 2795 return true; 2796 } 2797 2798 return false; 2799 } 2800 2801 static void kcompactd_do_work(pg_data_t *pgdat) 2802 { 2803 /* 2804 * With no special task, compact all zones so that a page of requested 2805 * order is allocatable. 2806 */ 2807 int zoneid; 2808 struct zone *zone; 2809 struct compact_control cc = { 2810 .order = pgdat->kcompactd_max_order, 2811 .search_order = pgdat->kcompactd_max_order, 2812 .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, 2813 .mode = MIGRATE_SYNC_LIGHT, 2814 .ignore_skip_hint = false, 2815 .gfp_mask = GFP_KERNEL, 2816 }; 2817 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 2818 cc.highest_zoneidx); 2819 count_compact_event(KCOMPACTD_WAKE); 2820 2821 for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { 2822 int status; 2823 2824 zone = &pgdat->node_zones[zoneid]; 2825 if (!populated_zone(zone)) 2826 continue; 2827 2828 if (compaction_deferred(zone, cc.order)) 2829 continue; 2830 2831 if (compaction_suitable(zone, cc.order, 0, zoneid) != 2832 COMPACT_CONTINUE) 2833 continue; 2834 2835 if (kthread_should_stop()) 2836 return; 2837 2838 cc.zone = zone; 2839 status = compact_zone(&cc, NULL); 2840 2841 if (status == COMPACT_SUCCESS) { 2842 compaction_defer_reset(zone, cc.order, false); 2843 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 2844 /* 2845 * Buddy pages may become stranded on pcps that could 2846 * otherwise coalesce on the zone's free area for 2847 * order >= cc.order. This is ratelimited by the 2848 * upcoming deferral. 2849 */ 2850 drain_all_pages(zone); 2851 2852 /* 2853 * We use sync migration mode here, so we defer like 2854 * sync direct compaction does. 2855 */ 2856 defer_compaction(zone, cc.order); 2857 } 2858 2859 count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 2860 cc.total_migrate_scanned); 2861 count_compact_events(KCOMPACTD_FREE_SCANNED, 2862 cc.total_free_scanned); 2863 2864 VM_BUG_ON(!list_empty(&cc.freepages)); 2865 VM_BUG_ON(!list_empty(&cc.migratepages)); 2866 } 2867 2868 /* 2869 * Regardless of success, we are done until woken up next. But remember 2870 * the requested order/highest_zoneidx in case it was higher/tighter 2871 * than our current ones 2872 */ 2873 if (pgdat->kcompactd_max_order <= cc.order) 2874 pgdat->kcompactd_max_order = 0; 2875 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) 2876 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2877 } 2878 2879 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) 2880 { 2881 if (!order) 2882 return; 2883 2884 if (pgdat->kcompactd_max_order < order) 2885 pgdat->kcompactd_max_order = order; 2886 2887 if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) 2888 pgdat->kcompactd_highest_zoneidx = highest_zoneidx; 2889 2890 /* 2891 * Pairs with implicit barrier in wait_event_freezable() 2892 * such that wakeups are not missed. 2893 */ 2894 if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 2895 return; 2896 2897 if (!kcompactd_node_suitable(pgdat)) 2898 return; 2899 2900 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 2901 highest_zoneidx); 2902 wake_up_interruptible(&pgdat->kcompactd_wait); 2903 } 2904 2905 /* 2906 * The background compaction daemon, started as a kernel thread 2907 * from the init process. 2908 */ 2909 static int kcompactd(void *p) 2910 { 2911 pg_data_t *pgdat = (pg_data_t *)p; 2912 struct task_struct *tsk = current; 2913 long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); 2914 long timeout = default_timeout; 2915 2916 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2917 2918 if (!cpumask_empty(cpumask)) 2919 set_cpus_allowed_ptr(tsk, cpumask); 2920 2921 set_freezable(); 2922 2923 pgdat->kcompactd_max_order = 0; 2924 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2925 2926 while (!kthread_should_stop()) { 2927 unsigned long pflags; 2928 2929 /* 2930 * Avoid the unnecessary wakeup for proactive compaction 2931 * when it is disabled. 2932 */ 2933 if (!sysctl_compaction_proactiveness) 2934 timeout = MAX_SCHEDULE_TIMEOUT; 2935 trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 2936 if (wait_event_freezable_timeout(pgdat->kcompactd_wait, 2937 kcompactd_work_requested(pgdat), timeout) && 2938 !pgdat->proactive_compact_trigger) { 2939 2940 psi_memstall_enter(&pflags); 2941 kcompactd_do_work(pgdat); 2942 psi_memstall_leave(&pflags); 2943 /* 2944 * Reset the timeout value. The defer timeout from 2945 * proactive compaction is lost here but that is fine 2946 * as the condition of the zone changing substantionally 2947 * then carrying on with the previous defer interval is 2948 * not useful. 2949 */ 2950 timeout = default_timeout; 2951 continue; 2952 } 2953 2954 /* 2955 * Start the proactive work with default timeout. Based 2956 * on the fragmentation score, this timeout is updated. 2957 */ 2958 timeout = default_timeout; 2959 if (should_proactive_compact_node(pgdat)) { 2960 unsigned int prev_score, score; 2961 2962 prev_score = fragmentation_score_node(pgdat); 2963 proactive_compact_node(pgdat); 2964 score = fragmentation_score_node(pgdat); 2965 /* 2966 * Defer proactive compaction if the fragmentation 2967 * score did not go down i.e. no progress made. 2968 */ 2969 if (unlikely(score >= prev_score)) 2970 timeout = 2971 default_timeout << COMPACT_MAX_DEFER_SHIFT; 2972 } 2973 if (unlikely(pgdat->proactive_compact_trigger)) 2974 pgdat->proactive_compact_trigger = false; 2975 } 2976 2977 return 0; 2978 } 2979 2980 /* 2981 * This kcompactd start function will be called by init and node-hot-add. 2982 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 2983 */ 2984 int kcompactd_run(int nid) 2985 { 2986 pg_data_t *pgdat = NODE_DATA(nid); 2987 int ret = 0; 2988 2989 if (pgdat->kcompactd) 2990 return 0; 2991 2992 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 2993 if (IS_ERR(pgdat->kcompactd)) { 2994 pr_err("Failed to start kcompactd on node %d\n", nid); 2995 ret = PTR_ERR(pgdat->kcompactd); 2996 pgdat->kcompactd = NULL; 2997 } 2998 return ret; 2999 } 3000 3001 /* 3002 * Called by memory hotplug when all memory in a node is offlined. Caller must 3003 * hold mem_hotplug_begin/end(). 3004 */ 3005 void kcompactd_stop(int nid) 3006 { 3007 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 3008 3009 if (kcompactd) { 3010 kthread_stop(kcompactd); 3011 NODE_DATA(nid)->kcompactd = NULL; 3012 } 3013 } 3014 3015 /* 3016 * It's optimal to keep kcompactd on the same CPUs as their memory, but 3017 * not required for correctness. So if the last cpu in a node goes 3018 * away, we get changed to run anywhere: as the first one comes back, 3019 * restore their cpu bindings. 3020 */ 3021 static int kcompactd_cpu_online(unsigned int cpu) 3022 { 3023 int nid; 3024 3025 for_each_node_state(nid, N_MEMORY) { 3026 pg_data_t *pgdat = NODE_DATA(nid); 3027 const struct cpumask *mask; 3028 3029 mask = cpumask_of_node(pgdat->node_id); 3030 3031 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3032 /* One of our CPUs online: restore mask */ 3033 set_cpus_allowed_ptr(pgdat->kcompactd, mask); 3034 } 3035 return 0; 3036 } 3037 3038 static int __init kcompactd_init(void) 3039 { 3040 int nid; 3041 int ret; 3042 3043 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 3044 "mm/compaction:online", 3045 kcompactd_cpu_online, NULL); 3046 if (ret < 0) { 3047 pr_err("kcompactd: failed to register hotplug callbacks.\n"); 3048 return ret; 3049 } 3050 3051 for_each_node_state(nid, N_MEMORY) 3052 kcompactd_run(nid); 3053 return 0; 3054 } 3055 subsys_initcall(kcompactd_init) 3056 3057 #endif /* CONFIG_COMPACTION */ 3058