1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/mm.h> 5 #include <linux/sched.h> 6 #include <linux/sched/mm.h> 7 #include <linux/sched/coredump.h> 8 #include <linux/mmu_notifier.h> 9 #include <linux/rmap.h> 10 #include <linux/swap.h> 11 #include <linux/mm_inline.h> 12 #include <linux/kthread.h> 13 #include <linux/khugepaged.h> 14 #include <linux/freezer.h> 15 #include <linux/mman.h> 16 #include <linux/hashtable.h> 17 #include <linux/userfaultfd_k.h> 18 #include <linux/page_idle.h> 19 #include <linux/page_table_check.h> 20 #include <linux/swapops.h> 21 #include <linux/shmem_fs.h> 22 #include <linux/ksm.h> 23 24 #include <asm/tlb.h> 25 #include <asm/pgalloc.h> 26 #include "internal.h" 27 #include "mm_slot.h" 28 29 enum scan_result { 30 SCAN_FAIL, 31 SCAN_SUCCEED, 32 SCAN_PMD_NULL, 33 SCAN_PMD_NONE, 34 SCAN_PMD_MAPPED, 35 SCAN_EXCEED_NONE_PTE, 36 SCAN_EXCEED_SWAP_PTE, 37 SCAN_EXCEED_SHARED_PTE, 38 SCAN_PTE_NON_PRESENT, 39 SCAN_PTE_UFFD_WP, 40 SCAN_PTE_MAPPED_HUGEPAGE, 41 SCAN_PAGE_RO, 42 SCAN_LACK_REFERENCED_PAGE, 43 SCAN_PAGE_NULL, 44 SCAN_SCAN_ABORT, 45 SCAN_PAGE_COUNT, 46 SCAN_PAGE_LRU, 47 SCAN_PAGE_LOCK, 48 SCAN_PAGE_ANON, 49 SCAN_PAGE_COMPOUND, 50 SCAN_ANY_PROCESS, 51 SCAN_VMA_NULL, 52 SCAN_VMA_CHECK, 53 SCAN_ADDRESS_RANGE, 54 SCAN_DEL_PAGE_LRU, 55 SCAN_ALLOC_HUGE_PAGE_FAIL, 56 SCAN_CGROUP_CHARGE_FAIL, 57 SCAN_TRUNCATED, 58 SCAN_PAGE_HAS_PRIVATE, 59 SCAN_STORE_FAILED, 60 SCAN_COPY_MC, 61 SCAN_PAGE_FILLED, 62 }; 63 64 #define CREATE_TRACE_POINTS 65 #include <trace/events/huge_memory.h> 66 67 static struct task_struct *khugepaged_thread __read_mostly; 68 static DEFINE_MUTEX(khugepaged_mutex); 69 70 /* default scan 8*512 pte (or vmas) every 30 second */ 71 static unsigned int khugepaged_pages_to_scan __read_mostly; 72 static unsigned int khugepaged_pages_collapsed; 73 static unsigned int khugepaged_full_scans; 74 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 75 /* during fragmentation poll the hugepage allocator once every minute */ 76 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 77 static unsigned long khugepaged_sleep_expire; 78 static DEFINE_SPINLOCK(khugepaged_mm_lock); 79 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 80 /* 81 * default collapse hugepages if there is at least one pte mapped like 82 * it would have happened if the vma was large enough during page 83 * fault. 84 * 85 * Note that these are only respected if collapse was initiated by khugepaged. 86 */ 87 static unsigned int khugepaged_max_ptes_none __read_mostly; 88 static unsigned int khugepaged_max_ptes_swap __read_mostly; 89 static unsigned int khugepaged_max_ptes_shared __read_mostly; 90 91 #define MM_SLOTS_HASH_BITS 10 92 static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 93 94 static struct kmem_cache *mm_slot_cache __read_mostly; 95 96 #define MAX_PTE_MAPPED_THP 8 97 98 struct collapse_control { 99 bool is_khugepaged; 100 101 /* Num pages scanned per node */ 102 u32 node_load[MAX_NUMNODES]; 103 104 /* nodemask for allocation fallback */ 105 nodemask_t alloc_nmask; 106 }; 107 108 /** 109 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned 110 * @slot: hash lookup from mm to mm_slot 111 * @nr_pte_mapped_thp: number of pte mapped THP 112 * @pte_mapped_thp: address array corresponding pte mapped THP 113 */ 114 struct khugepaged_mm_slot { 115 struct mm_slot slot; 116 117 /* pte-mapped THP in this mm */ 118 int nr_pte_mapped_thp; 119 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP]; 120 }; 121 122 /** 123 * struct khugepaged_scan - cursor for scanning 124 * @mm_head: the head of the mm list to scan 125 * @mm_slot: the current mm_slot we are scanning 126 * @address: the next address inside that to be scanned 127 * 128 * There is only the one khugepaged_scan instance of this cursor structure. 129 */ 130 struct khugepaged_scan { 131 struct list_head mm_head; 132 struct khugepaged_mm_slot *mm_slot; 133 unsigned long address; 134 }; 135 136 static struct khugepaged_scan khugepaged_scan = { 137 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 138 }; 139 140 #ifdef CONFIG_SYSFS 141 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 142 struct kobj_attribute *attr, 143 char *buf) 144 { 145 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs); 146 } 147 148 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 149 struct kobj_attribute *attr, 150 const char *buf, size_t count) 151 { 152 unsigned int msecs; 153 int err; 154 155 err = kstrtouint(buf, 10, &msecs); 156 if (err) 157 return -EINVAL; 158 159 khugepaged_scan_sleep_millisecs = msecs; 160 khugepaged_sleep_expire = 0; 161 wake_up_interruptible(&khugepaged_wait); 162 163 return count; 164 } 165 static struct kobj_attribute scan_sleep_millisecs_attr = 166 __ATTR_RW(scan_sleep_millisecs); 167 168 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 169 struct kobj_attribute *attr, 170 char *buf) 171 { 172 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 173 } 174 175 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 176 struct kobj_attribute *attr, 177 const char *buf, size_t count) 178 { 179 unsigned int msecs; 180 int err; 181 182 err = kstrtouint(buf, 10, &msecs); 183 if (err) 184 return -EINVAL; 185 186 khugepaged_alloc_sleep_millisecs = msecs; 187 khugepaged_sleep_expire = 0; 188 wake_up_interruptible(&khugepaged_wait); 189 190 return count; 191 } 192 static struct kobj_attribute alloc_sleep_millisecs_attr = 193 __ATTR_RW(alloc_sleep_millisecs); 194 195 static ssize_t pages_to_scan_show(struct kobject *kobj, 196 struct kobj_attribute *attr, 197 char *buf) 198 { 199 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan); 200 } 201 static ssize_t pages_to_scan_store(struct kobject *kobj, 202 struct kobj_attribute *attr, 203 const char *buf, size_t count) 204 { 205 unsigned int pages; 206 int err; 207 208 err = kstrtouint(buf, 10, &pages); 209 if (err || !pages) 210 return -EINVAL; 211 212 khugepaged_pages_to_scan = pages; 213 214 return count; 215 } 216 static struct kobj_attribute pages_to_scan_attr = 217 __ATTR_RW(pages_to_scan); 218 219 static ssize_t pages_collapsed_show(struct kobject *kobj, 220 struct kobj_attribute *attr, 221 char *buf) 222 { 223 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed); 224 } 225 static struct kobj_attribute pages_collapsed_attr = 226 __ATTR_RO(pages_collapsed); 227 228 static ssize_t full_scans_show(struct kobject *kobj, 229 struct kobj_attribute *attr, 230 char *buf) 231 { 232 return sysfs_emit(buf, "%u\n", khugepaged_full_scans); 233 } 234 static struct kobj_attribute full_scans_attr = 235 __ATTR_RO(full_scans); 236 237 static ssize_t defrag_show(struct kobject *kobj, 238 struct kobj_attribute *attr, char *buf) 239 { 240 return single_hugepage_flag_show(kobj, attr, buf, 241 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 242 } 243 static ssize_t defrag_store(struct kobject *kobj, 244 struct kobj_attribute *attr, 245 const char *buf, size_t count) 246 { 247 return single_hugepage_flag_store(kobj, attr, buf, count, 248 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 249 } 250 static struct kobj_attribute khugepaged_defrag_attr = 251 __ATTR_RW(defrag); 252 253 /* 254 * max_ptes_none controls if khugepaged should collapse hugepages over 255 * any unmapped ptes in turn potentially increasing the memory 256 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 257 * reduce the available free memory in the system as it 258 * runs. Increasing max_ptes_none will instead potentially reduce the 259 * free memory in the system during the khugepaged scan. 260 */ 261 static ssize_t max_ptes_none_show(struct kobject *kobj, 262 struct kobj_attribute *attr, 263 char *buf) 264 { 265 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none); 266 } 267 static ssize_t max_ptes_none_store(struct kobject *kobj, 268 struct kobj_attribute *attr, 269 const char *buf, size_t count) 270 { 271 int err; 272 unsigned long max_ptes_none; 273 274 err = kstrtoul(buf, 10, &max_ptes_none); 275 if (err || max_ptes_none > HPAGE_PMD_NR - 1) 276 return -EINVAL; 277 278 khugepaged_max_ptes_none = max_ptes_none; 279 280 return count; 281 } 282 static struct kobj_attribute khugepaged_max_ptes_none_attr = 283 __ATTR_RW(max_ptes_none); 284 285 static ssize_t max_ptes_swap_show(struct kobject *kobj, 286 struct kobj_attribute *attr, 287 char *buf) 288 { 289 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap); 290 } 291 292 static ssize_t max_ptes_swap_store(struct kobject *kobj, 293 struct kobj_attribute *attr, 294 const char *buf, size_t count) 295 { 296 int err; 297 unsigned long max_ptes_swap; 298 299 err = kstrtoul(buf, 10, &max_ptes_swap); 300 if (err || max_ptes_swap > HPAGE_PMD_NR - 1) 301 return -EINVAL; 302 303 khugepaged_max_ptes_swap = max_ptes_swap; 304 305 return count; 306 } 307 308 static struct kobj_attribute khugepaged_max_ptes_swap_attr = 309 __ATTR_RW(max_ptes_swap); 310 311 static ssize_t max_ptes_shared_show(struct kobject *kobj, 312 struct kobj_attribute *attr, 313 char *buf) 314 { 315 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared); 316 } 317 318 static ssize_t max_ptes_shared_store(struct kobject *kobj, 319 struct kobj_attribute *attr, 320 const char *buf, size_t count) 321 { 322 int err; 323 unsigned long max_ptes_shared; 324 325 err = kstrtoul(buf, 10, &max_ptes_shared); 326 if (err || max_ptes_shared > HPAGE_PMD_NR - 1) 327 return -EINVAL; 328 329 khugepaged_max_ptes_shared = max_ptes_shared; 330 331 return count; 332 } 333 334 static struct kobj_attribute khugepaged_max_ptes_shared_attr = 335 __ATTR_RW(max_ptes_shared); 336 337 static struct attribute *khugepaged_attr[] = { 338 &khugepaged_defrag_attr.attr, 339 &khugepaged_max_ptes_none_attr.attr, 340 &khugepaged_max_ptes_swap_attr.attr, 341 &khugepaged_max_ptes_shared_attr.attr, 342 &pages_to_scan_attr.attr, 343 &pages_collapsed_attr.attr, 344 &full_scans_attr.attr, 345 &scan_sleep_millisecs_attr.attr, 346 &alloc_sleep_millisecs_attr.attr, 347 NULL, 348 }; 349 350 struct attribute_group khugepaged_attr_group = { 351 .attrs = khugepaged_attr, 352 .name = "khugepaged", 353 }; 354 #endif /* CONFIG_SYSFS */ 355 356 int hugepage_madvise(struct vm_area_struct *vma, 357 unsigned long *vm_flags, int advice) 358 { 359 switch (advice) { 360 case MADV_HUGEPAGE: 361 #ifdef CONFIG_S390 362 /* 363 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 364 * can't handle this properly after s390_enable_sie, so we simply 365 * ignore the madvise to prevent qemu from causing a SIGSEGV. 366 */ 367 if (mm_has_pgste(vma->vm_mm)) 368 return 0; 369 #endif 370 *vm_flags &= ~VM_NOHUGEPAGE; 371 *vm_flags |= VM_HUGEPAGE; 372 /* 373 * If the vma become good for khugepaged to scan, 374 * register it here without waiting a page fault that 375 * may not happen any time soon. 376 */ 377 khugepaged_enter_vma(vma, *vm_flags); 378 break; 379 case MADV_NOHUGEPAGE: 380 *vm_flags &= ~VM_HUGEPAGE; 381 *vm_flags |= VM_NOHUGEPAGE; 382 /* 383 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 384 * this vma even if we leave the mm registered in khugepaged if 385 * it got registered before VM_NOHUGEPAGE was set. 386 */ 387 break; 388 } 389 390 return 0; 391 } 392 393 int __init khugepaged_init(void) 394 { 395 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 396 sizeof(struct khugepaged_mm_slot), 397 __alignof__(struct khugepaged_mm_slot), 398 0, NULL); 399 if (!mm_slot_cache) 400 return -ENOMEM; 401 402 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; 403 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; 404 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; 405 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; 406 407 return 0; 408 } 409 410 void __init khugepaged_destroy(void) 411 { 412 kmem_cache_destroy(mm_slot_cache); 413 } 414 415 static inline int hpage_collapse_test_exit(struct mm_struct *mm) 416 { 417 return atomic_read(&mm->mm_users) == 0; 418 } 419 420 void __khugepaged_enter(struct mm_struct *mm) 421 { 422 struct khugepaged_mm_slot *mm_slot; 423 struct mm_slot *slot; 424 int wakeup; 425 426 /* __khugepaged_exit() must not run from under us */ 427 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); 428 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) 429 return; 430 431 mm_slot = mm_slot_alloc(mm_slot_cache); 432 if (!mm_slot) 433 return; 434 435 slot = &mm_slot->slot; 436 437 spin_lock(&khugepaged_mm_lock); 438 mm_slot_insert(mm_slots_hash, mm, slot); 439 /* 440 * Insert just behind the scanning cursor, to let the area settle 441 * down a little. 442 */ 443 wakeup = list_empty(&khugepaged_scan.mm_head); 444 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head); 445 spin_unlock(&khugepaged_mm_lock); 446 447 mmgrab(mm); 448 if (wakeup) 449 wake_up_interruptible(&khugepaged_wait); 450 } 451 452 void khugepaged_enter_vma(struct vm_area_struct *vma, 453 unsigned long vm_flags) 454 { 455 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && 456 hugepage_flags_enabled()) { 457 if (hugepage_vma_check(vma, vm_flags, false, false, true)) 458 __khugepaged_enter(vma->vm_mm); 459 } 460 } 461 462 void __khugepaged_exit(struct mm_struct *mm) 463 { 464 struct khugepaged_mm_slot *mm_slot; 465 struct mm_slot *slot; 466 int free = 0; 467 468 spin_lock(&khugepaged_mm_lock); 469 slot = mm_slot_lookup(mm_slots_hash, mm); 470 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 471 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 472 hash_del(&slot->hash); 473 list_del(&slot->mm_node); 474 free = 1; 475 } 476 spin_unlock(&khugepaged_mm_lock); 477 478 if (free) { 479 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 480 mm_slot_free(mm_slot_cache, mm_slot); 481 mmdrop(mm); 482 } else if (mm_slot) { 483 /* 484 * This is required to serialize against 485 * hpage_collapse_test_exit() (which is guaranteed to run 486 * under mmap sem read mode). Stop here (after we return all 487 * pagetables will be destroyed) until khugepaged has finished 488 * working on the pagetables under the mmap_lock. 489 */ 490 mmap_write_lock(mm); 491 mmap_write_unlock(mm); 492 } 493 } 494 495 static void release_pte_folio(struct folio *folio) 496 { 497 node_stat_mod_folio(folio, 498 NR_ISOLATED_ANON + folio_is_file_lru(folio), 499 -folio_nr_pages(folio)); 500 folio_unlock(folio); 501 folio_putback_lru(folio); 502 } 503 504 static void release_pte_page(struct page *page) 505 { 506 release_pte_folio(page_folio(page)); 507 } 508 509 static void release_pte_pages(pte_t *pte, pte_t *_pte, 510 struct list_head *compound_pagelist) 511 { 512 struct folio *folio, *tmp; 513 514 while (--_pte >= pte) { 515 pte_t pteval = ptep_get(_pte); 516 unsigned long pfn; 517 518 if (pte_none(pteval)) 519 continue; 520 pfn = pte_pfn(pteval); 521 if (is_zero_pfn(pfn)) 522 continue; 523 folio = pfn_folio(pfn); 524 if (folio_test_large(folio)) 525 continue; 526 release_pte_folio(folio); 527 } 528 529 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) { 530 list_del(&folio->lru); 531 release_pte_folio(folio); 532 } 533 } 534 535 static bool is_refcount_suitable(struct page *page) 536 { 537 int expected_refcount; 538 539 expected_refcount = total_mapcount(page); 540 if (PageSwapCache(page)) 541 expected_refcount += compound_nr(page); 542 543 return page_count(page) == expected_refcount; 544 } 545 546 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 547 unsigned long address, 548 pte_t *pte, 549 struct collapse_control *cc, 550 struct list_head *compound_pagelist) 551 { 552 struct page *page = NULL; 553 pte_t *_pte; 554 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; 555 bool writable = false; 556 557 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 558 _pte++, address += PAGE_SIZE) { 559 pte_t pteval = ptep_get(_pte); 560 if (pte_none(pteval) || (pte_present(pteval) && 561 is_zero_pfn(pte_pfn(pteval)))) { 562 ++none_or_zero; 563 if (!userfaultfd_armed(vma) && 564 (!cc->is_khugepaged || 565 none_or_zero <= khugepaged_max_ptes_none)) { 566 continue; 567 } else { 568 result = SCAN_EXCEED_NONE_PTE; 569 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 570 goto out; 571 } 572 } 573 if (!pte_present(pteval)) { 574 result = SCAN_PTE_NON_PRESENT; 575 goto out; 576 } 577 if (pte_uffd_wp(pteval)) { 578 result = SCAN_PTE_UFFD_WP; 579 goto out; 580 } 581 page = vm_normal_page(vma, address, pteval); 582 if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 583 result = SCAN_PAGE_NULL; 584 goto out; 585 } 586 587 VM_BUG_ON_PAGE(!PageAnon(page), page); 588 589 if (page_mapcount(page) > 1) { 590 ++shared; 591 if (cc->is_khugepaged && 592 shared > khugepaged_max_ptes_shared) { 593 result = SCAN_EXCEED_SHARED_PTE; 594 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 595 goto out; 596 } 597 } 598 599 if (PageCompound(page)) { 600 struct page *p; 601 page = compound_head(page); 602 603 /* 604 * Check if we have dealt with the compound page 605 * already 606 */ 607 list_for_each_entry(p, compound_pagelist, lru) { 608 if (page == p) 609 goto next; 610 } 611 } 612 613 /* 614 * We can do it before isolate_lru_page because the 615 * page can't be freed from under us. NOTE: PG_lock 616 * is needed to serialize against split_huge_page 617 * when invoked from the VM. 618 */ 619 if (!trylock_page(page)) { 620 result = SCAN_PAGE_LOCK; 621 goto out; 622 } 623 624 /* 625 * Check if the page has any GUP (or other external) pins. 626 * 627 * The page table that maps the page has been already unlinked 628 * from the page table tree and this process cannot get 629 * an additional pin on the page. 630 * 631 * New pins can come later if the page is shared across fork, 632 * but not from this process. The other process cannot write to 633 * the page, only trigger CoW. 634 */ 635 if (!is_refcount_suitable(page)) { 636 unlock_page(page); 637 result = SCAN_PAGE_COUNT; 638 goto out; 639 } 640 641 /* 642 * Isolate the page to avoid collapsing an hugepage 643 * currently in use by the VM. 644 */ 645 if (!isolate_lru_page(page)) { 646 unlock_page(page); 647 result = SCAN_DEL_PAGE_LRU; 648 goto out; 649 } 650 mod_node_page_state(page_pgdat(page), 651 NR_ISOLATED_ANON + page_is_file_lru(page), 652 compound_nr(page)); 653 VM_BUG_ON_PAGE(!PageLocked(page), page); 654 VM_BUG_ON_PAGE(PageLRU(page), page); 655 656 if (PageCompound(page)) 657 list_add_tail(&page->lru, compound_pagelist); 658 next: 659 /* 660 * If collapse was initiated by khugepaged, check that there is 661 * enough young pte to justify collapsing the page 662 */ 663 if (cc->is_khugepaged && 664 (pte_young(pteval) || page_is_young(page) || 665 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, 666 address))) 667 referenced++; 668 669 if (pte_write(pteval)) 670 writable = true; 671 } 672 673 if (unlikely(!writable)) { 674 result = SCAN_PAGE_RO; 675 } else if (unlikely(cc->is_khugepaged && !referenced)) { 676 result = SCAN_LACK_REFERENCED_PAGE; 677 } else { 678 result = SCAN_SUCCEED; 679 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 680 referenced, writable, result); 681 return result; 682 } 683 out: 684 release_pte_pages(pte, _pte, compound_pagelist); 685 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 686 referenced, writable, result); 687 return result; 688 } 689 690 static void __collapse_huge_page_copy_succeeded(pte_t *pte, 691 struct vm_area_struct *vma, 692 unsigned long address, 693 spinlock_t *ptl, 694 struct list_head *compound_pagelist) 695 { 696 struct page *src_page; 697 struct page *tmp; 698 pte_t *_pte; 699 pte_t pteval; 700 701 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 702 _pte++, address += PAGE_SIZE) { 703 pteval = ptep_get(_pte); 704 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 705 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 706 if (is_zero_pfn(pte_pfn(pteval))) { 707 /* 708 * ptl mostly unnecessary. 709 */ 710 spin_lock(ptl); 711 ptep_clear(vma->vm_mm, address, _pte); 712 spin_unlock(ptl); 713 ksm_might_unmap_zero_page(vma->vm_mm, pteval); 714 } 715 } else { 716 src_page = pte_page(pteval); 717 if (!PageCompound(src_page)) 718 release_pte_page(src_page); 719 /* 720 * ptl mostly unnecessary, but preempt has to 721 * be disabled to update the per-cpu stats 722 * inside page_remove_rmap(). 723 */ 724 spin_lock(ptl); 725 ptep_clear(vma->vm_mm, address, _pte); 726 page_remove_rmap(src_page, vma, false); 727 spin_unlock(ptl); 728 free_page_and_swap_cache(src_page); 729 } 730 } 731 732 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { 733 list_del(&src_page->lru); 734 mod_node_page_state(page_pgdat(src_page), 735 NR_ISOLATED_ANON + page_is_file_lru(src_page), 736 -compound_nr(src_page)); 737 unlock_page(src_page); 738 free_swap_cache(src_page); 739 putback_lru_page(src_page); 740 } 741 } 742 743 static void __collapse_huge_page_copy_failed(pte_t *pte, 744 pmd_t *pmd, 745 pmd_t orig_pmd, 746 struct vm_area_struct *vma, 747 struct list_head *compound_pagelist) 748 { 749 spinlock_t *pmd_ptl; 750 751 /* 752 * Re-establish the PMD to point to the original page table 753 * entry. Restoring PMD needs to be done prior to releasing 754 * pages. Since pages are still isolated and locked here, 755 * acquiring anon_vma_lock_write is unnecessary. 756 */ 757 pmd_ptl = pmd_lock(vma->vm_mm, pmd); 758 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd)); 759 spin_unlock(pmd_ptl); 760 /* 761 * Release both raw and compound pages isolated 762 * in __collapse_huge_page_isolate. 763 */ 764 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist); 765 } 766 767 /* 768 * __collapse_huge_page_copy - attempts to copy memory contents from raw 769 * pages to a hugepage. Cleans up the raw pages if copying succeeds; 770 * otherwise restores the original page table and releases isolated raw pages. 771 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC. 772 * 773 * @pte: starting of the PTEs to copy from 774 * @page: the new hugepage to copy contents to 775 * @pmd: pointer to the new hugepage's PMD 776 * @orig_pmd: the original raw pages' PMD 777 * @vma: the original raw pages' virtual memory area 778 * @address: starting address to copy 779 * @ptl: lock on raw pages' PTEs 780 * @compound_pagelist: list that stores compound pages 781 */ 782 static int __collapse_huge_page_copy(pte_t *pte, 783 struct page *page, 784 pmd_t *pmd, 785 pmd_t orig_pmd, 786 struct vm_area_struct *vma, 787 unsigned long address, 788 spinlock_t *ptl, 789 struct list_head *compound_pagelist) 790 { 791 struct page *src_page; 792 pte_t *_pte; 793 pte_t pteval; 794 unsigned long _address; 795 int result = SCAN_SUCCEED; 796 797 /* 798 * Copying pages' contents is subject to memory poison at any iteration. 799 */ 800 for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR; 801 _pte++, page++, _address += PAGE_SIZE) { 802 pteval = ptep_get(_pte); 803 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 804 clear_user_highpage(page, _address); 805 continue; 806 } 807 src_page = pte_page(pteval); 808 if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) { 809 result = SCAN_COPY_MC; 810 break; 811 } 812 } 813 814 if (likely(result == SCAN_SUCCEED)) 815 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl, 816 compound_pagelist); 817 else 818 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma, 819 compound_pagelist); 820 821 return result; 822 } 823 824 static void khugepaged_alloc_sleep(void) 825 { 826 DEFINE_WAIT(wait); 827 828 add_wait_queue(&khugepaged_wait, &wait); 829 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 830 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 831 remove_wait_queue(&khugepaged_wait, &wait); 832 } 833 834 struct collapse_control khugepaged_collapse_control = { 835 .is_khugepaged = true, 836 }; 837 838 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc) 839 { 840 int i; 841 842 /* 843 * If node_reclaim_mode is disabled, then no extra effort is made to 844 * allocate memory locally. 845 */ 846 if (!node_reclaim_enabled()) 847 return false; 848 849 /* If there is a count for this node already, it must be acceptable */ 850 if (cc->node_load[nid]) 851 return false; 852 853 for (i = 0; i < MAX_NUMNODES; i++) { 854 if (!cc->node_load[i]) 855 continue; 856 if (node_distance(nid, i) > node_reclaim_distance) 857 return true; 858 } 859 return false; 860 } 861 862 #define khugepaged_defrag() \ 863 (transparent_hugepage_flags & \ 864 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) 865 866 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ 867 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) 868 { 869 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; 870 } 871 872 #ifdef CONFIG_NUMA 873 static int hpage_collapse_find_target_node(struct collapse_control *cc) 874 { 875 int nid, target_node = 0, max_value = 0; 876 877 /* find first node with max normal pages hit */ 878 for (nid = 0; nid < MAX_NUMNODES; nid++) 879 if (cc->node_load[nid] > max_value) { 880 max_value = cc->node_load[nid]; 881 target_node = nid; 882 } 883 884 for_each_online_node(nid) { 885 if (max_value == cc->node_load[nid]) 886 node_set(nid, cc->alloc_nmask); 887 } 888 889 return target_node; 890 } 891 #else 892 static int hpage_collapse_find_target_node(struct collapse_control *cc) 893 { 894 return 0; 895 } 896 #endif 897 898 static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node, 899 nodemask_t *nmask) 900 { 901 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask); 902 if (unlikely(!*hpage)) { 903 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 904 return false; 905 } 906 907 prep_transhuge_page(*hpage); 908 count_vm_event(THP_COLLAPSE_ALLOC); 909 return true; 910 } 911 912 /* 913 * If mmap_lock temporarily dropped, revalidate vma 914 * before taking mmap_lock. 915 * Returns enum scan_result value. 916 */ 917 918 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, 919 bool expect_anon, 920 struct vm_area_struct **vmap, 921 struct collapse_control *cc) 922 { 923 struct vm_area_struct *vma; 924 925 if (unlikely(hpage_collapse_test_exit(mm))) 926 return SCAN_ANY_PROCESS; 927 928 *vmap = vma = find_vma(mm, address); 929 if (!vma) 930 return SCAN_VMA_NULL; 931 932 if (!transhuge_vma_suitable(vma, address)) 933 return SCAN_ADDRESS_RANGE; 934 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, 935 cc->is_khugepaged)) 936 return SCAN_VMA_CHECK; 937 /* 938 * Anon VMA expected, the address may be unmapped then 939 * remapped to file after khugepaged reaquired the mmap_lock. 940 * 941 * hugepage_vma_check may return true for qualified file 942 * vmas. 943 */ 944 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap))) 945 return SCAN_PAGE_ANON; 946 return SCAN_SUCCEED; 947 } 948 949 static int find_pmd_or_thp_or_none(struct mm_struct *mm, 950 unsigned long address, 951 pmd_t **pmd) 952 { 953 pmd_t pmde; 954 955 *pmd = mm_find_pmd(mm, address); 956 if (!*pmd) 957 return SCAN_PMD_NULL; 958 959 pmde = pmdp_get_lockless(*pmd); 960 if (pmd_none(pmde)) 961 return SCAN_PMD_NONE; 962 if (!pmd_present(pmde)) 963 return SCAN_PMD_NULL; 964 if (pmd_trans_huge(pmde)) 965 return SCAN_PMD_MAPPED; 966 if (pmd_devmap(pmde)) 967 return SCAN_PMD_NULL; 968 if (pmd_bad(pmde)) 969 return SCAN_PMD_NULL; 970 return SCAN_SUCCEED; 971 } 972 973 static int check_pmd_still_valid(struct mm_struct *mm, 974 unsigned long address, 975 pmd_t *pmd) 976 { 977 pmd_t *new_pmd; 978 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd); 979 980 if (result != SCAN_SUCCEED) 981 return result; 982 if (new_pmd != pmd) 983 return SCAN_FAIL; 984 return SCAN_SUCCEED; 985 } 986 987 /* 988 * Bring missing pages in from swap, to complete THP collapse. 989 * Only done if hpage_collapse_scan_pmd believes it is worthwhile. 990 * 991 * Called and returns without pte mapped or spinlocks held. 992 * Returns result: if not SCAN_SUCCEED, mmap_lock has been released. 993 */ 994 static int __collapse_huge_page_swapin(struct mm_struct *mm, 995 struct vm_area_struct *vma, 996 unsigned long haddr, pmd_t *pmd, 997 int referenced) 998 { 999 int swapped_in = 0; 1000 vm_fault_t ret = 0; 1001 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); 1002 int result; 1003 pte_t *pte = NULL; 1004 spinlock_t *ptl; 1005 1006 for (address = haddr; address < end; address += PAGE_SIZE) { 1007 struct vm_fault vmf = { 1008 .vma = vma, 1009 .address = address, 1010 .pgoff = linear_page_index(vma, address), 1011 .flags = FAULT_FLAG_ALLOW_RETRY, 1012 .pmd = pmd, 1013 }; 1014 1015 if (!pte++) { 1016 pte = pte_offset_map_nolock(mm, pmd, address, &ptl); 1017 if (!pte) { 1018 mmap_read_unlock(mm); 1019 result = SCAN_PMD_NULL; 1020 goto out; 1021 } 1022 } 1023 1024 vmf.orig_pte = ptep_get_lockless(pte); 1025 if (!is_swap_pte(vmf.orig_pte)) 1026 continue; 1027 1028 vmf.pte = pte; 1029 vmf.ptl = ptl; 1030 ret = do_swap_page(&vmf); 1031 /* Which unmaps pte (after perhaps re-checking the entry) */ 1032 pte = NULL; 1033 1034 /* 1035 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock. 1036 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because 1037 * we do not retry here and swap entry will remain in pagetable 1038 * resulting in later failure. 1039 */ 1040 if (ret & VM_FAULT_RETRY) { 1041 /* Likely, but not guaranteed, that page lock failed */ 1042 result = SCAN_PAGE_LOCK; 1043 goto out; 1044 } 1045 if (ret & VM_FAULT_ERROR) { 1046 mmap_read_unlock(mm); 1047 result = SCAN_FAIL; 1048 goto out; 1049 } 1050 swapped_in++; 1051 } 1052 1053 if (pte) 1054 pte_unmap(pte); 1055 1056 /* Drain LRU cache to remove extra pin on the swapped in pages */ 1057 if (swapped_in) 1058 lru_add_drain(); 1059 1060 result = SCAN_SUCCEED; 1061 out: 1062 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result); 1063 return result; 1064 } 1065 1066 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, 1067 struct collapse_control *cc) 1068 { 1069 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : 1070 GFP_TRANSHUGE); 1071 int node = hpage_collapse_find_target_node(cc); 1072 struct folio *folio; 1073 1074 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask)) 1075 return SCAN_ALLOC_HUGE_PAGE_FAIL; 1076 1077 folio = page_folio(*hpage); 1078 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) { 1079 folio_put(folio); 1080 *hpage = NULL; 1081 return SCAN_CGROUP_CHARGE_FAIL; 1082 } 1083 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC); 1084 1085 return SCAN_SUCCEED; 1086 } 1087 1088 static int collapse_huge_page(struct mm_struct *mm, unsigned long address, 1089 int referenced, int unmapped, 1090 struct collapse_control *cc) 1091 { 1092 LIST_HEAD(compound_pagelist); 1093 pmd_t *pmd, _pmd; 1094 pte_t *pte; 1095 pgtable_t pgtable; 1096 struct page *hpage; 1097 spinlock_t *pmd_ptl, *pte_ptl; 1098 int result = SCAN_FAIL; 1099 struct vm_area_struct *vma; 1100 struct mmu_notifier_range range; 1101 1102 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1103 1104 /* 1105 * Before allocating the hugepage, release the mmap_lock read lock. 1106 * The allocation can take potentially a long time if it involves 1107 * sync compaction, and we do not need to hold the mmap_lock during 1108 * that. We will recheck the vma after taking it again in write mode. 1109 */ 1110 mmap_read_unlock(mm); 1111 1112 result = alloc_charge_hpage(&hpage, mm, cc); 1113 if (result != SCAN_SUCCEED) 1114 goto out_nolock; 1115 1116 mmap_read_lock(mm); 1117 result = hugepage_vma_revalidate(mm, address, true, &vma, cc); 1118 if (result != SCAN_SUCCEED) { 1119 mmap_read_unlock(mm); 1120 goto out_nolock; 1121 } 1122 1123 result = find_pmd_or_thp_or_none(mm, address, &pmd); 1124 if (result != SCAN_SUCCEED) { 1125 mmap_read_unlock(mm); 1126 goto out_nolock; 1127 } 1128 1129 if (unmapped) { 1130 /* 1131 * __collapse_huge_page_swapin will return with mmap_lock 1132 * released when it fails. So we jump out_nolock directly in 1133 * that case. Continuing to collapse causes inconsistency. 1134 */ 1135 result = __collapse_huge_page_swapin(mm, vma, address, pmd, 1136 referenced); 1137 if (result != SCAN_SUCCEED) 1138 goto out_nolock; 1139 } 1140 1141 mmap_read_unlock(mm); 1142 /* 1143 * Prevent all access to pagetables with the exception of 1144 * gup_fast later handled by the ptep_clear_flush and the VM 1145 * handled by the anon_vma lock + PG_lock. 1146 */ 1147 mmap_write_lock(mm); 1148 result = hugepage_vma_revalidate(mm, address, true, &vma, cc); 1149 if (result != SCAN_SUCCEED) 1150 goto out_up_write; 1151 /* check if the pmd is still valid */ 1152 result = check_pmd_still_valid(mm, address, pmd); 1153 if (result != SCAN_SUCCEED) 1154 goto out_up_write; 1155 1156 vma_start_write(vma); 1157 anon_vma_lock_write(vma->anon_vma); 1158 1159 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address, 1160 address + HPAGE_PMD_SIZE); 1161 mmu_notifier_invalidate_range_start(&range); 1162 1163 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1164 /* 1165 * This removes any huge TLB entry from the CPU so we won't allow 1166 * huge and small TLB entries for the same virtual address to 1167 * avoid the risk of CPU bugs in that area. 1168 * 1169 * Parallel fast GUP is fine since fast GUP will back off when 1170 * it detects PMD is changed. 1171 */ 1172 _pmd = pmdp_collapse_flush(vma, address, pmd); 1173 spin_unlock(pmd_ptl); 1174 mmu_notifier_invalidate_range_end(&range); 1175 tlb_remove_table_sync_one(); 1176 1177 pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl); 1178 if (pte) { 1179 result = __collapse_huge_page_isolate(vma, address, pte, cc, 1180 &compound_pagelist); 1181 spin_unlock(pte_ptl); 1182 } else { 1183 result = SCAN_PMD_NULL; 1184 } 1185 1186 if (unlikely(result != SCAN_SUCCEED)) { 1187 if (pte) 1188 pte_unmap(pte); 1189 spin_lock(pmd_ptl); 1190 BUG_ON(!pmd_none(*pmd)); 1191 /* 1192 * We can only use set_pmd_at when establishing 1193 * hugepmds and never for establishing regular pmds that 1194 * points to regular pagetables. Use pmd_populate for that 1195 */ 1196 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 1197 spin_unlock(pmd_ptl); 1198 anon_vma_unlock_write(vma->anon_vma); 1199 goto out_up_write; 1200 } 1201 1202 /* 1203 * All pages are isolated and locked so anon_vma rmap 1204 * can't run anymore. 1205 */ 1206 anon_vma_unlock_write(vma->anon_vma); 1207 1208 result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd, 1209 vma, address, pte_ptl, 1210 &compound_pagelist); 1211 pte_unmap(pte); 1212 if (unlikely(result != SCAN_SUCCEED)) 1213 goto out_up_write; 1214 1215 /* 1216 * spin_lock() below is not the equivalent of smp_wmb(), but 1217 * the smp_wmb() inside __SetPageUptodate() can be reused to 1218 * avoid the copy_huge_page writes to become visible after 1219 * the set_pmd_at() write. 1220 */ 1221 __SetPageUptodate(hpage); 1222 pgtable = pmd_pgtable(_pmd); 1223 1224 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); 1225 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1226 1227 spin_lock(pmd_ptl); 1228 BUG_ON(!pmd_none(*pmd)); 1229 page_add_new_anon_rmap(hpage, vma, address); 1230 lru_cache_add_inactive_or_unevictable(hpage, vma); 1231 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1232 set_pmd_at(mm, address, pmd, _pmd); 1233 update_mmu_cache_pmd(vma, address, pmd); 1234 spin_unlock(pmd_ptl); 1235 1236 hpage = NULL; 1237 1238 result = SCAN_SUCCEED; 1239 out_up_write: 1240 mmap_write_unlock(mm); 1241 out_nolock: 1242 if (hpage) 1243 put_page(hpage); 1244 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); 1245 return result; 1246 } 1247 1248 static int hpage_collapse_scan_pmd(struct mm_struct *mm, 1249 struct vm_area_struct *vma, 1250 unsigned long address, bool *mmap_locked, 1251 struct collapse_control *cc) 1252 { 1253 pmd_t *pmd; 1254 pte_t *pte, *_pte; 1255 int result = SCAN_FAIL, referenced = 0; 1256 int none_or_zero = 0, shared = 0; 1257 struct page *page = NULL; 1258 unsigned long _address; 1259 spinlock_t *ptl; 1260 int node = NUMA_NO_NODE, unmapped = 0; 1261 bool writable = false; 1262 1263 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1264 1265 result = find_pmd_or_thp_or_none(mm, address, &pmd); 1266 if (result != SCAN_SUCCEED) 1267 goto out; 1268 1269 memset(cc->node_load, 0, sizeof(cc->node_load)); 1270 nodes_clear(cc->alloc_nmask); 1271 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1272 if (!pte) { 1273 result = SCAN_PMD_NULL; 1274 goto out; 1275 } 1276 1277 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; 1278 _pte++, _address += PAGE_SIZE) { 1279 pte_t pteval = ptep_get(_pte); 1280 if (is_swap_pte(pteval)) { 1281 ++unmapped; 1282 if (!cc->is_khugepaged || 1283 unmapped <= khugepaged_max_ptes_swap) { 1284 /* 1285 * Always be strict with uffd-wp 1286 * enabled swap entries. Please see 1287 * comment below for pte_uffd_wp(). 1288 */ 1289 if (pte_swp_uffd_wp_any(pteval)) { 1290 result = SCAN_PTE_UFFD_WP; 1291 goto out_unmap; 1292 } 1293 continue; 1294 } else { 1295 result = SCAN_EXCEED_SWAP_PTE; 1296 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 1297 goto out_unmap; 1298 } 1299 } 1300 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 1301 ++none_or_zero; 1302 if (!userfaultfd_armed(vma) && 1303 (!cc->is_khugepaged || 1304 none_or_zero <= khugepaged_max_ptes_none)) { 1305 continue; 1306 } else { 1307 result = SCAN_EXCEED_NONE_PTE; 1308 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 1309 goto out_unmap; 1310 } 1311 } 1312 if (pte_uffd_wp(pteval)) { 1313 /* 1314 * Don't collapse the page if any of the small 1315 * PTEs are armed with uffd write protection. 1316 * Here we can also mark the new huge pmd as 1317 * write protected if any of the small ones is 1318 * marked but that could bring unknown 1319 * userfault messages that falls outside of 1320 * the registered range. So, just be simple. 1321 */ 1322 result = SCAN_PTE_UFFD_WP; 1323 goto out_unmap; 1324 } 1325 if (pte_write(pteval)) 1326 writable = true; 1327 1328 page = vm_normal_page(vma, _address, pteval); 1329 if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 1330 result = SCAN_PAGE_NULL; 1331 goto out_unmap; 1332 } 1333 1334 if (page_mapcount(page) > 1) { 1335 ++shared; 1336 if (cc->is_khugepaged && 1337 shared > khugepaged_max_ptes_shared) { 1338 result = SCAN_EXCEED_SHARED_PTE; 1339 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 1340 goto out_unmap; 1341 } 1342 } 1343 1344 page = compound_head(page); 1345 1346 /* 1347 * Record which node the original page is from and save this 1348 * information to cc->node_load[]. 1349 * Khugepaged will allocate hugepage from the node has the max 1350 * hit record. 1351 */ 1352 node = page_to_nid(page); 1353 if (hpage_collapse_scan_abort(node, cc)) { 1354 result = SCAN_SCAN_ABORT; 1355 goto out_unmap; 1356 } 1357 cc->node_load[node]++; 1358 if (!PageLRU(page)) { 1359 result = SCAN_PAGE_LRU; 1360 goto out_unmap; 1361 } 1362 if (PageLocked(page)) { 1363 result = SCAN_PAGE_LOCK; 1364 goto out_unmap; 1365 } 1366 if (!PageAnon(page)) { 1367 result = SCAN_PAGE_ANON; 1368 goto out_unmap; 1369 } 1370 1371 /* 1372 * Check if the page has any GUP (or other external) pins. 1373 * 1374 * Here the check may be racy: 1375 * it may see total_mapcount > refcount in some cases? 1376 * But such case is ephemeral we could always retry collapse 1377 * later. However it may report false positive if the page 1378 * has excessive GUP pins (i.e. 512). Anyway the same check 1379 * will be done again later the risk seems low. 1380 */ 1381 if (!is_refcount_suitable(page)) { 1382 result = SCAN_PAGE_COUNT; 1383 goto out_unmap; 1384 } 1385 1386 /* 1387 * If collapse was initiated by khugepaged, check that there is 1388 * enough young pte to justify collapsing the page 1389 */ 1390 if (cc->is_khugepaged && 1391 (pte_young(pteval) || page_is_young(page) || 1392 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, 1393 address))) 1394 referenced++; 1395 } 1396 if (!writable) { 1397 result = SCAN_PAGE_RO; 1398 } else if (cc->is_khugepaged && 1399 (!referenced || 1400 (unmapped && referenced < HPAGE_PMD_NR / 2))) { 1401 result = SCAN_LACK_REFERENCED_PAGE; 1402 } else { 1403 result = SCAN_SUCCEED; 1404 } 1405 out_unmap: 1406 pte_unmap_unlock(pte, ptl); 1407 if (result == SCAN_SUCCEED) { 1408 result = collapse_huge_page(mm, address, referenced, 1409 unmapped, cc); 1410 /* collapse_huge_page will return with the mmap_lock released */ 1411 *mmap_locked = false; 1412 } 1413 out: 1414 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, 1415 none_or_zero, result, unmapped); 1416 return result; 1417 } 1418 1419 static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot) 1420 { 1421 struct mm_slot *slot = &mm_slot->slot; 1422 struct mm_struct *mm = slot->mm; 1423 1424 lockdep_assert_held(&khugepaged_mm_lock); 1425 1426 if (hpage_collapse_test_exit(mm)) { 1427 /* free mm_slot */ 1428 hash_del(&slot->hash); 1429 list_del(&slot->mm_node); 1430 1431 /* 1432 * Not strictly needed because the mm exited already. 1433 * 1434 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1435 */ 1436 1437 /* khugepaged_mm_lock actually not necessary for the below */ 1438 mm_slot_free(mm_slot_cache, mm_slot); 1439 mmdrop(mm); 1440 } 1441 } 1442 1443 #ifdef CONFIG_SHMEM 1444 /* 1445 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then 1446 * khugepaged should try to collapse the page table. 1447 * 1448 * Note that following race exists: 1449 * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A, 1450 * emptying the A's ->pte_mapped_thp[] array. 1451 * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and 1452 * retract_page_tables() finds a VMA in mm_struct A mapping the same extent 1453 * (at virtual address X) and adds an entry (for X) into mm_struct A's 1454 * ->pte-mapped_thp[] array. 1455 * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X, 1456 * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry 1457 * (for X) into mm_struct A's ->pte-mapped_thp[] array. 1458 * Thus, it's possible the same address is added multiple times for the same 1459 * mm_struct. Should this happen, we'll simply attempt 1460 * collapse_pte_mapped_thp() multiple times for the same address, under the same 1461 * exclusive mmap_lock, and assuming the first call is successful, subsequent 1462 * attempts will return quickly (without grabbing any additional locks) when 1463 * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap 1464 * check, and since this is a rare occurrence, the cost of preventing this 1465 * "multiple-add" is thought to be more expensive than just handling it, should 1466 * it occur. 1467 */ 1468 static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm, 1469 unsigned long addr) 1470 { 1471 struct khugepaged_mm_slot *mm_slot; 1472 struct mm_slot *slot; 1473 bool ret = false; 1474 1475 VM_BUG_ON(addr & ~HPAGE_PMD_MASK); 1476 1477 spin_lock(&khugepaged_mm_lock); 1478 slot = mm_slot_lookup(mm_slots_hash, mm); 1479 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 1480 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) { 1481 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr; 1482 ret = true; 1483 } 1484 spin_unlock(&khugepaged_mm_lock); 1485 return ret; 1486 } 1487 1488 /* hpage must be locked, and mmap_lock must be held in write */ 1489 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr, 1490 pmd_t *pmdp, struct page *hpage) 1491 { 1492 struct vm_fault vmf = { 1493 .vma = vma, 1494 .address = addr, 1495 .flags = 0, 1496 .pmd = pmdp, 1497 }; 1498 1499 VM_BUG_ON(!PageTransHuge(hpage)); 1500 mmap_assert_write_locked(vma->vm_mm); 1501 1502 if (do_set_pmd(&vmf, hpage)) 1503 return SCAN_FAIL; 1504 1505 get_page(hpage); 1506 return SCAN_SUCCEED; 1507 } 1508 1509 /* 1510 * A note about locking: 1511 * Trying to take the page table spinlocks would be useless here because those 1512 * are only used to synchronize: 1513 * 1514 * - modifying terminal entries (ones that point to a data page, not to another 1515 * page table) 1516 * - installing *new* non-terminal entries 1517 * 1518 * Instead, we need roughly the same kind of protection as free_pgtables() or 1519 * mm_take_all_locks() (but only for a single VMA): 1520 * The mmap lock together with this VMA's rmap locks covers all paths towards 1521 * the page table entries we're messing with here, except for hardware page 1522 * table walks and lockless_pages_from_mm(). 1523 */ 1524 static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 1525 unsigned long addr, pmd_t *pmdp) 1526 { 1527 pmd_t pmd; 1528 struct mmu_notifier_range range; 1529 1530 mmap_assert_write_locked(mm); 1531 if (vma->vm_file) 1532 lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem); 1533 /* 1534 * All anon_vmas attached to the VMA have the same root and are 1535 * therefore locked by the same lock. 1536 */ 1537 if (vma->anon_vma) 1538 lockdep_assert_held_write(&vma->anon_vma->root->rwsem); 1539 1540 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, 1541 addr + HPAGE_PMD_SIZE); 1542 mmu_notifier_invalidate_range_start(&range); 1543 pmd = pmdp_collapse_flush(vma, addr, pmdp); 1544 tlb_remove_table_sync_one(); 1545 mmu_notifier_invalidate_range_end(&range); 1546 mm_dec_nr_ptes(mm); 1547 page_table_check_pte_clear_range(mm, addr, pmd); 1548 pte_free(mm, pmd_pgtable(pmd)); 1549 } 1550 1551 /** 1552 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at 1553 * address haddr. 1554 * 1555 * @mm: process address space where collapse happens 1556 * @addr: THP collapse address 1557 * @install_pmd: If a huge PMD should be installed 1558 * 1559 * This function checks whether all the PTEs in the PMD are pointing to the 1560 * right THP. If so, retract the page table so the THP can refault in with 1561 * as pmd-mapped. Possibly install a huge PMD mapping the THP. 1562 */ 1563 int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, 1564 bool install_pmd) 1565 { 1566 unsigned long haddr = addr & HPAGE_PMD_MASK; 1567 struct vm_area_struct *vma = vma_lookup(mm, haddr); 1568 struct page *hpage; 1569 pte_t *start_pte, *pte; 1570 pmd_t *pmd; 1571 spinlock_t *ptl; 1572 int count = 0, result = SCAN_FAIL; 1573 int i; 1574 1575 mmap_assert_write_locked(mm); 1576 1577 /* Fast check before locking page if already PMD-mapped */ 1578 result = find_pmd_or_thp_or_none(mm, haddr, &pmd); 1579 if (result == SCAN_PMD_MAPPED) 1580 return result; 1581 1582 if (!vma || !vma->vm_file || 1583 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) 1584 return SCAN_VMA_CHECK; 1585 1586 /* 1587 * If we are here, we've succeeded in replacing all the native pages 1588 * in the page cache with a single hugepage. If a mm were to fault-in 1589 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage 1590 * and map it by a PMD, regardless of sysfs THP settings. As such, let's 1591 * analogously elide sysfs THP settings here. 1592 */ 1593 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) 1594 return SCAN_VMA_CHECK; 1595 1596 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ 1597 if (userfaultfd_wp(vma)) 1598 return SCAN_PTE_UFFD_WP; 1599 1600 hpage = find_lock_page(vma->vm_file->f_mapping, 1601 linear_page_index(vma, haddr)); 1602 if (!hpage) 1603 return SCAN_PAGE_NULL; 1604 1605 if (!PageHead(hpage)) { 1606 result = SCAN_FAIL; 1607 goto drop_hpage; 1608 } 1609 1610 if (compound_order(hpage) != HPAGE_PMD_ORDER) { 1611 result = SCAN_PAGE_COMPOUND; 1612 goto drop_hpage; 1613 } 1614 1615 switch (result) { 1616 case SCAN_SUCCEED: 1617 break; 1618 case SCAN_PMD_NONE: 1619 /* 1620 * All pte entries have been removed and pmd cleared. 1621 * Skip all the pte checks and just update the pmd mapping. 1622 */ 1623 goto maybe_install_pmd; 1624 default: 1625 goto drop_hpage; 1626 } 1627 1628 /* Lock the vma before taking i_mmap and page table locks */ 1629 vma_start_write(vma); 1630 1631 /* 1632 * We need to lock the mapping so that from here on, only GUP-fast and 1633 * hardware page walks can access the parts of the page tables that 1634 * we're operating on. 1635 * See collapse_and_free_pmd(). 1636 */ 1637 i_mmap_lock_write(vma->vm_file->f_mapping); 1638 1639 /* 1640 * This spinlock should be unnecessary: Nobody else should be accessing 1641 * the page tables under spinlock protection here, only 1642 * lockless_pages_from_mm() and the hardware page walker can access page 1643 * tables while all the high-level locks are held in write mode. 1644 */ 1645 result = SCAN_FAIL; 1646 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); 1647 if (!start_pte) 1648 goto drop_immap; 1649 1650 /* step 1: check all mapped PTEs are to the right huge page */ 1651 for (i = 0, addr = haddr, pte = start_pte; 1652 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 1653 struct page *page; 1654 pte_t ptent = ptep_get(pte); 1655 1656 /* empty pte, skip */ 1657 if (pte_none(ptent)) 1658 continue; 1659 1660 /* page swapped out, abort */ 1661 if (!pte_present(ptent)) { 1662 result = SCAN_PTE_NON_PRESENT; 1663 goto abort; 1664 } 1665 1666 page = vm_normal_page(vma, addr, ptent); 1667 if (WARN_ON_ONCE(page && is_zone_device_page(page))) 1668 page = NULL; 1669 /* 1670 * Note that uprobe, debugger, or MAP_PRIVATE may change the 1671 * page table, but the new page will not be a subpage of hpage. 1672 */ 1673 if (hpage + i != page) 1674 goto abort; 1675 count++; 1676 } 1677 1678 /* step 2: adjust rmap */ 1679 for (i = 0, addr = haddr, pte = start_pte; 1680 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 1681 struct page *page; 1682 pte_t ptent = ptep_get(pte); 1683 1684 if (pte_none(ptent)) 1685 continue; 1686 page = vm_normal_page(vma, addr, ptent); 1687 if (WARN_ON_ONCE(page && is_zone_device_page(page))) 1688 goto abort; 1689 page_remove_rmap(page, vma, false); 1690 } 1691 1692 pte_unmap_unlock(start_pte, ptl); 1693 1694 /* step 3: set proper refcount and mm_counters. */ 1695 if (count) { 1696 page_ref_sub(hpage, count); 1697 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); 1698 } 1699 1700 /* step 4: remove pte entries */ 1701 /* we make no change to anon, but protect concurrent anon page lookup */ 1702 if (vma->anon_vma) 1703 anon_vma_lock_write(vma->anon_vma); 1704 1705 collapse_and_free_pmd(mm, vma, haddr, pmd); 1706 1707 if (vma->anon_vma) 1708 anon_vma_unlock_write(vma->anon_vma); 1709 i_mmap_unlock_write(vma->vm_file->f_mapping); 1710 1711 maybe_install_pmd: 1712 /* step 5: install pmd entry */ 1713 result = install_pmd 1714 ? set_huge_pmd(vma, haddr, pmd, hpage) 1715 : SCAN_SUCCEED; 1716 1717 drop_hpage: 1718 unlock_page(hpage); 1719 put_page(hpage); 1720 return result; 1721 1722 abort: 1723 pte_unmap_unlock(start_pte, ptl); 1724 drop_immap: 1725 i_mmap_unlock_write(vma->vm_file->f_mapping); 1726 goto drop_hpage; 1727 } 1728 1729 static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot) 1730 { 1731 struct mm_slot *slot = &mm_slot->slot; 1732 struct mm_struct *mm = slot->mm; 1733 int i; 1734 1735 if (likely(mm_slot->nr_pte_mapped_thp == 0)) 1736 return; 1737 1738 if (!mmap_write_trylock(mm)) 1739 return; 1740 1741 if (unlikely(hpage_collapse_test_exit(mm))) 1742 goto out; 1743 1744 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) 1745 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false); 1746 1747 out: 1748 mm_slot->nr_pte_mapped_thp = 0; 1749 mmap_write_unlock(mm); 1750 } 1751 1752 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) 1753 { 1754 struct vm_area_struct *vma; 1755 1756 i_mmap_lock_read(mapping); 1757 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1758 struct mmu_notifier_range range; 1759 struct mm_struct *mm; 1760 unsigned long addr; 1761 pmd_t *pmd, pgt_pmd; 1762 spinlock_t *pml; 1763 spinlock_t *ptl; 1764 bool skipped_uffd = false; 1765 1766 /* 1767 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that 1768 * got written to. These VMAs are likely not worth removing 1769 * page tables from, as PMD-mapping is likely to be split later. 1770 */ 1771 if (READ_ONCE(vma->anon_vma)) 1772 continue; 1773 1774 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1775 if (addr & ~HPAGE_PMD_MASK || 1776 vma->vm_end < addr + HPAGE_PMD_SIZE) 1777 continue; 1778 1779 mm = vma->vm_mm; 1780 if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED) 1781 continue; 1782 1783 if (hpage_collapse_test_exit(mm)) 1784 continue; 1785 /* 1786 * When a vma is registered with uffd-wp, we cannot recycle 1787 * the page table because there may be pte markers installed. 1788 * Other vmas can still have the same file mapped hugely, but 1789 * skip this one: it will always be mapped in small page size 1790 * for uffd-wp registered ranges. 1791 */ 1792 if (userfaultfd_wp(vma)) 1793 continue; 1794 1795 /* PTEs were notified when unmapped; but now for the PMD? */ 1796 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 1797 addr, addr + HPAGE_PMD_SIZE); 1798 mmu_notifier_invalidate_range_start(&range); 1799 1800 pml = pmd_lock(mm, pmd); 1801 ptl = pte_lockptr(mm, pmd); 1802 if (ptl != pml) 1803 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); 1804 1805 /* 1806 * Huge page lock is still held, so normally the page table 1807 * must remain empty; and we have already skipped anon_vma 1808 * and userfaultfd_wp() vmas. But since the mmap_lock is not 1809 * held, it is still possible for a racing userfaultfd_ioctl() 1810 * to have inserted ptes or markers. Now that we hold ptlock, 1811 * repeating the anon_vma check protects from one category, 1812 * and repeating the userfaultfd_wp() check from another. 1813 */ 1814 if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) { 1815 skipped_uffd = true; 1816 } else { 1817 pgt_pmd = pmdp_collapse_flush(vma, addr, pmd); 1818 pmdp_get_lockless_sync(); 1819 } 1820 1821 if (ptl != pml) 1822 spin_unlock(ptl); 1823 spin_unlock(pml); 1824 1825 mmu_notifier_invalidate_range_end(&range); 1826 1827 if (!skipped_uffd) { 1828 mm_dec_nr_ptes(mm); 1829 page_table_check_pte_clear_range(mm, addr, pgt_pmd); 1830 pte_free_defer(mm, pmd_pgtable(pgt_pmd)); 1831 } 1832 } 1833 i_mmap_unlock_read(mapping); 1834 } 1835 1836 /** 1837 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one. 1838 * 1839 * @mm: process address space where collapse happens 1840 * @addr: virtual collapse start address 1841 * @file: file that collapse on 1842 * @start: collapse start address 1843 * @cc: collapse context and scratchpad 1844 * 1845 * Basic scheme is simple, details are more complex: 1846 * - allocate and lock a new huge page; 1847 * - scan page cache, locking old pages 1848 * + swap/gup in pages if necessary; 1849 * - copy data to new page 1850 * - handle shmem holes 1851 * + re-validate that holes weren't filled by someone else 1852 * + check for userfaultfd 1853 * - finalize updates to the page cache; 1854 * - if replacing succeeds: 1855 * + unlock huge page; 1856 * + free old pages; 1857 * - if replacing failed; 1858 * + unlock old pages 1859 * + unlock and free huge page; 1860 */ 1861 static int collapse_file(struct mm_struct *mm, unsigned long addr, 1862 struct file *file, pgoff_t start, 1863 struct collapse_control *cc) 1864 { 1865 struct address_space *mapping = file->f_mapping; 1866 struct page *hpage; 1867 struct page *page; 1868 struct page *tmp; 1869 struct folio *folio; 1870 pgoff_t index = 0, end = start + HPAGE_PMD_NR; 1871 LIST_HEAD(pagelist); 1872 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); 1873 int nr_none = 0, result = SCAN_SUCCEED; 1874 bool is_shmem = shmem_file(file); 1875 int nr = 0; 1876 1877 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); 1878 VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); 1879 1880 result = alloc_charge_hpage(&hpage, mm, cc); 1881 if (result != SCAN_SUCCEED) 1882 goto out; 1883 1884 __SetPageLocked(hpage); 1885 if (is_shmem) 1886 __SetPageSwapBacked(hpage); 1887 hpage->index = start; 1888 hpage->mapping = mapping; 1889 1890 /* 1891 * Ensure we have slots for all the pages in the range. This is 1892 * almost certainly a no-op because most of the pages must be present 1893 */ 1894 do { 1895 xas_lock_irq(&xas); 1896 xas_create_range(&xas); 1897 if (!xas_error(&xas)) 1898 break; 1899 xas_unlock_irq(&xas); 1900 if (!xas_nomem(&xas, GFP_KERNEL)) { 1901 result = SCAN_FAIL; 1902 goto rollback; 1903 } 1904 } while (1); 1905 1906 for (index = start; index < end; index++) { 1907 xas_set(&xas, index); 1908 page = xas_load(&xas); 1909 1910 VM_BUG_ON(index != xas.xa_index); 1911 if (is_shmem) { 1912 if (!page) { 1913 /* 1914 * Stop if extent has been truncated or 1915 * hole-punched, and is now completely 1916 * empty. 1917 */ 1918 if (index == start) { 1919 if (!xas_next_entry(&xas, end - 1)) { 1920 result = SCAN_TRUNCATED; 1921 goto xa_locked; 1922 } 1923 } 1924 if (!shmem_charge(mapping->host, 1)) { 1925 result = SCAN_FAIL; 1926 goto xa_locked; 1927 } 1928 nr_none++; 1929 continue; 1930 } 1931 1932 if (xa_is_value(page) || !PageUptodate(page)) { 1933 xas_unlock_irq(&xas); 1934 /* swap in or instantiate fallocated page */ 1935 if (shmem_get_folio(mapping->host, index, 1936 &folio, SGP_NOALLOC)) { 1937 result = SCAN_FAIL; 1938 goto xa_unlocked; 1939 } 1940 /* drain lru cache to help isolate_lru_page() */ 1941 lru_add_drain(); 1942 page = folio_file_page(folio, index); 1943 } else if (trylock_page(page)) { 1944 get_page(page); 1945 xas_unlock_irq(&xas); 1946 } else { 1947 result = SCAN_PAGE_LOCK; 1948 goto xa_locked; 1949 } 1950 } else { /* !is_shmem */ 1951 if (!page || xa_is_value(page)) { 1952 xas_unlock_irq(&xas); 1953 page_cache_sync_readahead(mapping, &file->f_ra, 1954 file, index, 1955 end - index); 1956 /* drain lru cache to help isolate_lru_page() */ 1957 lru_add_drain(); 1958 page = find_lock_page(mapping, index); 1959 if (unlikely(page == NULL)) { 1960 result = SCAN_FAIL; 1961 goto xa_unlocked; 1962 } 1963 } else if (PageDirty(page)) { 1964 /* 1965 * khugepaged only works on read-only fd, 1966 * so this page is dirty because it hasn't 1967 * been flushed since first write. There 1968 * won't be new dirty pages. 1969 * 1970 * Trigger async flush here and hope the 1971 * writeback is done when khugepaged 1972 * revisits this page. 1973 * 1974 * This is a one-off situation. We are not 1975 * forcing writeback in loop. 1976 */ 1977 xas_unlock_irq(&xas); 1978 filemap_flush(mapping); 1979 result = SCAN_FAIL; 1980 goto xa_unlocked; 1981 } else if (PageWriteback(page)) { 1982 xas_unlock_irq(&xas); 1983 result = SCAN_FAIL; 1984 goto xa_unlocked; 1985 } else if (trylock_page(page)) { 1986 get_page(page); 1987 xas_unlock_irq(&xas); 1988 } else { 1989 result = SCAN_PAGE_LOCK; 1990 goto xa_locked; 1991 } 1992 } 1993 1994 /* 1995 * The page must be locked, so we can drop the i_pages lock 1996 * without racing with truncate. 1997 */ 1998 VM_BUG_ON_PAGE(!PageLocked(page), page); 1999 2000 /* make sure the page is up to date */ 2001 if (unlikely(!PageUptodate(page))) { 2002 result = SCAN_FAIL; 2003 goto out_unlock; 2004 } 2005 2006 /* 2007 * If file was truncated then extended, or hole-punched, before 2008 * we locked the first page, then a THP might be there already. 2009 * This will be discovered on the first iteration. 2010 */ 2011 if (PageTransCompound(page)) { 2012 struct page *head = compound_head(page); 2013 2014 result = compound_order(head) == HPAGE_PMD_ORDER && 2015 head->index == start 2016 /* Maybe PMD-mapped */ 2017 ? SCAN_PTE_MAPPED_HUGEPAGE 2018 : SCAN_PAGE_COMPOUND; 2019 goto out_unlock; 2020 } 2021 2022 folio = page_folio(page); 2023 2024 if (folio_mapping(folio) != mapping) { 2025 result = SCAN_TRUNCATED; 2026 goto out_unlock; 2027 } 2028 2029 if (!is_shmem && (folio_test_dirty(folio) || 2030 folio_test_writeback(folio))) { 2031 /* 2032 * khugepaged only works on read-only fd, so this 2033 * page is dirty because it hasn't been flushed 2034 * since first write. 2035 */ 2036 result = SCAN_FAIL; 2037 goto out_unlock; 2038 } 2039 2040 if (!folio_isolate_lru(folio)) { 2041 result = SCAN_DEL_PAGE_LRU; 2042 goto out_unlock; 2043 } 2044 2045 if (!filemap_release_folio(folio, GFP_KERNEL)) { 2046 result = SCAN_PAGE_HAS_PRIVATE; 2047 folio_putback_lru(folio); 2048 goto out_unlock; 2049 } 2050 2051 if (folio_mapped(folio)) 2052 try_to_unmap(folio, 2053 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH); 2054 2055 xas_lock_irq(&xas); 2056 2057 VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page); 2058 2059 /* 2060 * We control three references to the page: 2061 * - we hold a pin on it; 2062 * - one reference from page cache; 2063 * - one from isolate_lru_page; 2064 * If those are the only references, then any new usage of the 2065 * page will have to fetch it from the page cache. That requires 2066 * locking the page to handle truncate, so any new usage will be 2067 * blocked until we unlock page after collapse/during rollback. 2068 */ 2069 if (page_count(page) != 3) { 2070 result = SCAN_PAGE_COUNT; 2071 xas_unlock_irq(&xas); 2072 putback_lru_page(page); 2073 goto out_unlock; 2074 } 2075 2076 /* 2077 * Accumulate the pages that are being collapsed. 2078 */ 2079 list_add_tail(&page->lru, &pagelist); 2080 continue; 2081 out_unlock: 2082 unlock_page(page); 2083 put_page(page); 2084 goto xa_unlocked; 2085 } 2086 2087 if (!is_shmem) { 2088 filemap_nr_thps_inc(mapping); 2089 /* 2090 * Paired with smp_mb() in do_dentry_open() to ensure 2091 * i_writecount is up to date and the update to nr_thps is 2092 * visible. Ensures the page cache will be truncated if the 2093 * file is opened writable. 2094 */ 2095 smp_mb(); 2096 if (inode_is_open_for_write(mapping->host)) { 2097 result = SCAN_FAIL; 2098 filemap_nr_thps_dec(mapping); 2099 } 2100 } 2101 2102 xa_locked: 2103 xas_unlock_irq(&xas); 2104 xa_unlocked: 2105 2106 /* 2107 * If collapse is successful, flush must be done now before copying. 2108 * If collapse is unsuccessful, does flush actually need to be done? 2109 * Do it anyway, to clear the state. 2110 */ 2111 try_to_unmap_flush(); 2112 2113 if (result != SCAN_SUCCEED) 2114 goto rollback; 2115 2116 /* 2117 * The old pages are locked, so they won't change anymore. 2118 */ 2119 index = start; 2120 list_for_each_entry(page, &pagelist, lru) { 2121 while (index < page->index) { 2122 clear_highpage(hpage + (index % HPAGE_PMD_NR)); 2123 index++; 2124 } 2125 if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) { 2126 result = SCAN_COPY_MC; 2127 goto rollback; 2128 } 2129 index++; 2130 } 2131 while (index < end) { 2132 clear_highpage(hpage + (index % HPAGE_PMD_NR)); 2133 index++; 2134 } 2135 2136 if (nr_none) { 2137 struct vm_area_struct *vma; 2138 int nr_none_check = 0; 2139 2140 i_mmap_lock_read(mapping); 2141 xas_lock_irq(&xas); 2142 2143 xas_set(&xas, start); 2144 for (index = start; index < end; index++) { 2145 if (!xas_next(&xas)) { 2146 xas_store(&xas, XA_RETRY_ENTRY); 2147 if (xas_error(&xas)) { 2148 result = SCAN_STORE_FAILED; 2149 goto immap_locked; 2150 } 2151 nr_none_check++; 2152 } 2153 } 2154 2155 if (nr_none != nr_none_check) { 2156 result = SCAN_PAGE_FILLED; 2157 goto immap_locked; 2158 } 2159 2160 /* 2161 * If userspace observed a missing page in a VMA with a MODE_MISSING 2162 * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that 2163 * page. If so, we need to roll back to avoid suppressing such an 2164 * event. Since wp/minor userfaultfds don't give userspace any 2165 * guarantees that the kernel doesn't fill a missing page with a zero 2166 * page, so they don't matter here. 2167 * 2168 * Any userfaultfds registered after this point will not be able to 2169 * observe any missing pages due to the previously inserted retry 2170 * entries. 2171 */ 2172 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) { 2173 if (userfaultfd_missing(vma)) { 2174 result = SCAN_EXCEED_NONE_PTE; 2175 goto immap_locked; 2176 } 2177 } 2178 2179 immap_locked: 2180 i_mmap_unlock_read(mapping); 2181 if (result != SCAN_SUCCEED) { 2182 xas_set(&xas, start); 2183 for (index = start; index < end; index++) { 2184 if (xas_next(&xas) == XA_RETRY_ENTRY) 2185 xas_store(&xas, NULL); 2186 } 2187 2188 xas_unlock_irq(&xas); 2189 goto rollback; 2190 } 2191 } else { 2192 xas_lock_irq(&xas); 2193 } 2194 2195 nr = thp_nr_pages(hpage); 2196 if (is_shmem) 2197 __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr); 2198 else 2199 __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr); 2200 2201 if (nr_none) { 2202 __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none); 2203 /* nr_none is always 0 for non-shmem. */ 2204 __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none); 2205 } 2206 2207 /* 2208 * Mark hpage as uptodate before inserting it into the page cache so 2209 * that it isn't mistaken for an fallocated but unwritten page. 2210 */ 2211 folio = page_folio(hpage); 2212 folio_mark_uptodate(folio); 2213 folio_ref_add(folio, HPAGE_PMD_NR - 1); 2214 2215 if (is_shmem) 2216 folio_mark_dirty(folio); 2217 folio_add_lru(folio); 2218 2219 /* Join all the small entries into a single multi-index entry. */ 2220 xas_set_order(&xas, start, HPAGE_PMD_ORDER); 2221 xas_store(&xas, hpage); 2222 WARN_ON_ONCE(xas_error(&xas)); 2223 xas_unlock_irq(&xas); 2224 2225 /* 2226 * Remove pte page tables, so we can re-fault the page as huge. 2227 * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp(). 2228 */ 2229 retract_page_tables(mapping, start); 2230 if (cc && !cc->is_khugepaged) 2231 result = SCAN_PTE_MAPPED_HUGEPAGE; 2232 unlock_page(hpage); 2233 2234 /* 2235 * The collapse has succeeded, so free the old pages. 2236 */ 2237 list_for_each_entry_safe(page, tmp, &pagelist, lru) { 2238 list_del(&page->lru); 2239 page->mapping = NULL; 2240 ClearPageActive(page); 2241 ClearPageUnevictable(page); 2242 unlock_page(page); 2243 folio_put_refs(page_folio(page), 3); 2244 } 2245 2246 goto out; 2247 2248 rollback: 2249 /* Something went wrong: roll back page cache changes */ 2250 if (nr_none) { 2251 xas_lock_irq(&xas); 2252 mapping->nrpages -= nr_none; 2253 shmem_uncharge(mapping->host, nr_none); 2254 xas_unlock_irq(&xas); 2255 } 2256 2257 list_for_each_entry_safe(page, tmp, &pagelist, lru) { 2258 list_del(&page->lru); 2259 unlock_page(page); 2260 putback_lru_page(page); 2261 put_page(page); 2262 } 2263 /* 2264 * Undo the updates of filemap_nr_thps_inc for non-SHMEM 2265 * file only. This undo is not needed unless failure is 2266 * due to SCAN_COPY_MC. 2267 */ 2268 if (!is_shmem && result == SCAN_COPY_MC) { 2269 filemap_nr_thps_dec(mapping); 2270 /* 2271 * Paired with smp_mb() in do_dentry_open() to 2272 * ensure the update to nr_thps is visible. 2273 */ 2274 smp_mb(); 2275 } 2276 2277 hpage->mapping = NULL; 2278 2279 unlock_page(hpage); 2280 put_page(hpage); 2281 out: 2282 VM_BUG_ON(!list_empty(&pagelist)); 2283 trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result); 2284 return result; 2285 } 2286 2287 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, 2288 struct file *file, pgoff_t start, 2289 struct collapse_control *cc) 2290 { 2291 struct page *page = NULL; 2292 struct address_space *mapping = file->f_mapping; 2293 XA_STATE(xas, &mapping->i_pages, start); 2294 int present, swap; 2295 int node = NUMA_NO_NODE; 2296 int result = SCAN_SUCCEED; 2297 2298 present = 0; 2299 swap = 0; 2300 memset(cc->node_load, 0, sizeof(cc->node_load)); 2301 nodes_clear(cc->alloc_nmask); 2302 rcu_read_lock(); 2303 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { 2304 if (xas_retry(&xas, page)) 2305 continue; 2306 2307 if (xa_is_value(page)) { 2308 ++swap; 2309 if (cc->is_khugepaged && 2310 swap > khugepaged_max_ptes_swap) { 2311 result = SCAN_EXCEED_SWAP_PTE; 2312 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 2313 break; 2314 } 2315 continue; 2316 } 2317 2318 /* 2319 * TODO: khugepaged should compact smaller compound pages 2320 * into a PMD sized page 2321 */ 2322 if (PageTransCompound(page)) { 2323 struct page *head = compound_head(page); 2324 2325 result = compound_order(head) == HPAGE_PMD_ORDER && 2326 head->index == start 2327 /* Maybe PMD-mapped */ 2328 ? SCAN_PTE_MAPPED_HUGEPAGE 2329 : SCAN_PAGE_COMPOUND; 2330 /* 2331 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing 2332 * by the caller won't touch the page cache, and so 2333 * it's safe to skip LRU and refcount checks before 2334 * returning. 2335 */ 2336 break; 2337 } 2338 2339 node = page_to_nid(page); 2340 if (hpage_collapse_scan_abort(node, cc)) { 2341 result = SCAN_SCAN_ABORT; 2342 break; 2343 } 2344 cc->node_load[node]++; 2345 2346 if (!PageLRU(page)) { 2347 result = SCAN_PAGE_LRU; 2348 break; 2349 } 2350 2351 if (page_count(page) != 2352 1 + page_mapcount(page) + page_has_private(page)) { 2353 result = SCAN_PAGE_COUNT; 2354 break; 2355 } 2356 2357 /* 2358 * We probably should check if the page is referenced here, but 2359 * nobody would transfer pte_young() to PageReferenced() for us. 2360 * And rmap walk here is just too costly... 2361 */ 2362 2363 present++; 2364 2365 if (need_resched()) { 2366 xas_pause(&xas); 2367 cond_resched_rcu(); 2368 } 2369 } 2370 rcu_read_unlock(); 2371 2372 if (result == SCAN_SUCCEED) { 2373 if (cc->is_khugepaged && 2374 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { 2375 result = SCAN_EXCEED_NONE_PTE; 2376 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 2377 } else { 2378 result = collapse_file(mm, addr, file, start, cc); 2379 } 2380 } 2381 2382 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result); 2383 return result; 2384 } 2385 #else 2386 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, 2387 struct file *file, pgoff_t start, 2388 struct collapse_control *cc) 2389 { 2390 BUILD_BUG(); 2391 } 2392 2393 static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot) 2394 { 2395 } 2396 2397 static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm, 2398 unsigned long addr) 2399 { 2400 return false; 2401 } 2402 #endif 2403 2404 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, 2405 struct collapse_control *cc) 2406 __releases(&khugepaged_mm_lock) 2407 __acquires(&khugepaged_mm_lock) 2408 { 2409 struct vma_iterator vmi; 2410 struct khugepaged_mm_slot *mm_slot; 2411 struct mm_slot *slot; 2412 struct mm_struct *mm; 2413 struct vm_area_struct *vma; 2414 int progress = 0; 2415 2416 VM_BUG_ON(!pages); 2417 lockdep_assert_held(&khugepaged_mm_lock); 2418 *result = SCAN_FAIL; 2419 2420 if (khugepaged_scan.mm_slot) { 2421 mm_slot = khugepaged_scan.mm_slot; 2422 slot = &mm_slot->slot; 2423 } else { 2424 slot = list_entry(khugepaged_scan.mm_head.next, 2425 struct mm_slot, mm_node); 2426 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 2427 khugepaged_scan.address = 0; 2428 khugepaged_scan.mm_slot = mm_slot; 2429 } 2430 spin_unlock(&khugepaged_mm_lock); 2431 khugepaged_collapse_pte_mapped_thps(mm_slot); 2432 2433 mm = slot->mm; 2434 /* 2435 * Don't wait for semaphore (to avoid long wait times). Just move to 2436 * the next mm on the list. 2437 */ 2438 vma = NULL; 2439 if (unlikely(!mmap_read_trylock(mm))) 2440 goto breakouterloop_mmap_lock; 2441 2442 progress++; 2443 if (unlikely(hpage_collapse_test_exit(mm))) 2444 goto breakouterloop; 2445 2446 vma_iter_init(&vmi, mm, khugepaged_scan.address); 2447 for_each_vma(vmi, vma) { 2448 unsigned long hstart, hend; 2449 2450 cond_resched(); 2451 if (unlikely(hpage_collapse_test_exit(mm))) { 2452 progress++; 2453 break; 2454 } 2455 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) { 2456 skip: 2457 progress++; 2458 continue; 2459 } 2460 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE); 2461 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE); 2462 if (khugepaged_scan.address > hend) 2463 goto skip; 2464 if (khugepaged_scan.address < hstart) 2465 khugepaged_scan.address = hstart; 2466 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2467 2468 while (khugepaged_scan.address < hend) { 2469 bool mmap_locked = true; 2470 2471 cond_resched(); 2472 if (unlikely(hpage_collapse_test_exit(mm))) 2473 goto breakouterloop; 2474 2475 VM_BUG_ON(khugepaged_scan.address < hstart || 2476 khugepaged_scan.address + HPAGE_PMD_SIZE > 2477 hend); 2478 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 2479 struct file *file = get_file(vma->vm_file); 2480 pgoff_t pgoff = linear_page_index(vma, 2481 khugepaged_scan.address); 2482 2483 mmap_read_unlock(mm); 2484 *result = hpage_collapse_scan_file(mm, 2485 khugepaged_scan.address, 2486 file, pgoff, cc); 2487 mmap_locked = false; 2488 fput(file); 2489 } else { 2490 *result = hpage_collapse_scan_pmd(mm, vma, 2491 khugepaged_scan.address, 2492 &mmap_locked, 2493 cc); 2494 } 2495 switch (*result) { 2496 case SCAN_PTE_MAPPED_HUGEPAGE: { 2497 pmd_t *pmd; 2498 2499 *result = find_pmd_or_thp_or_none(mm, 2500 khugepaged_scan.address, 2501 &pmd); 2502 if (*result != SCAN_SUCCEED) 2503 break; 2504 if (!khugepaged_add_pte_mapped_thp(mm, 2505 khugepaged_scan.address)) 2506 break; 2507 } fallthrough; 2508 case SCAN_SUCCEED: 2509 ++khugepaged_pages_collapsed; 2510 break; 2511 default: 2512 break; 2513 } 2514 2515 /* move to next address */ 2516 khugepaged_scan.address += HPAGE_PMD_SIZE; 2517 progress += HPAGE_PMD_NR; 2518 if (!mmap_locked) 2519 /* 2520 * We released mmap_lock so break loop. Note 2521 * that we drop mmap_lock before all hugepage 2522 * allocations, so if allocation fails, we are 2523 * guaranteed to break here and report the 2524 * correct result back to caller. 2525 */ 2526 goto breakouterloop_mmap_lock; 2527 if (progress >= pages) 2528 goto breakouterloop; 2529 } 2530 } 2531 breakouterloop: 2532 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ 2533 breakouterloop_mmap_lock: 2534 2535 spin_lock(&khugepaged_mm_lock); 2536 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2537 /* 2538 * Release the current mm_slot if this mm is about to die, or 2539 * if we scanned all vmas of this mm. 2540 */ 2541 if (hpage_collapse_test_exit(mm) || !vma) { 2542 /* 2543 * Make sure that if mm_users is reaching zero while 2544 * khugepaged runs here, khugepaged_exit will find 2545 * mm_slot not pointing to the exiting mm. 2546 */ 2547 if (slot->mm_node.next != &khugepaged_scan.mm_head) { 2548 slot = list_entry(slot->mm_node.next, 2549 struct mm_slot, mm_node); 2550 khugepaged_scan.mm_slot = 2551 mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 2552 khugepaged_scan.address = 0; 2553 } else { 2554 khugepaged_scan.mm_slot = NULL; 2555 khugepaged_full_scans++; 2556 } 2557 2558 collect_mm_slot(mm_slot); 2559 } 2560 2561 return progress; 2562 } 2563 2564 static int khugepaged_has_work(void) 2565 { 2566 return !list_empty(&khugepaged_scan.mm_head) && 2567 hugepage_flags_enabled(); 2568 } 2569 2570 static int khugepaged_wait_event(void) 2571 { 2572 return !list_empty(&khugepaged_scan.mm_head) || 2573 kthread_should_stop(); 2574 } 2575 2576 static void khugepaged_do_scan(struct collapse_control *cc) 2577 { 2578 unsigned int progress = 0, pass_through_head = 0; 2579 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan); 2580 bool wait = true; 2581 int result = SCAN_SUCCEED; 2582 2583 lru_add_drain_all(); 2584 2585 while (true) { 2586 cond_resched(); 2587 2588 if (unlikely(kthread_should_stop() || try_to_freeze())) 2589 break; 2590 2591 spin_lock(&khugepaged_mm_lock); 2592 if (!khugepaged_scan.mm_slot) 2593 pass_through_head++; 2594 if (khugepaged_has_work() && 2595 pass_through_head < 2) 2596 progress += khugepaged_scan_mm_slot(pages - progress, 2597 &result, cc); 2598 else 2599 progress = pages; 2600 spin_unlock(&khugepaged_mm_lock); 2601 2602 if (progress >= pages) 2603 break; 2604 2605 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) { 2606 /* 2607 * If fail to allocate the first time, try to sleep for 2608 * a while. When hit again, cancel the scan. 2609 */ 2610 if (!wait) 2611 break; 2612 wait = false; 2613 khugepaged_alloc_sleep(); 2614 } 2615 } 2616 } 2617 2618 static bool khugepaged_should_wakeup(void) 2619 { 2620 return kthread_should_stop() || 2621 time_after_eq(jiffies, khugepaged_sleep_expire); 2622 } 2623 2624 static void khugepaged_wait_work(void) 2625 { 2626 if (khugepaged_has_work()) { 2627 const unsigned long scan_sleep_jiffies = 2628 msecs_to_jiffies(khugepaged_scan_sleep_millisecs); 2629 2630 if (!scan_sleep_jiffies) 2631 return; 2632 2633 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; 2634 wait_event_freezable_timeout(khugepaged_wait, 2635 khugepaged_should_wakeup(), 2636 scan_sleep_jiffies); 2637 return; 2638 } 2639 2640 if (hugepage_flags_enabled()) 2641 wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 2642 } 2643 2644 static int khugepaged(void *none) 2645 { 2646 struct khugepaged_mm_slot *mm_slot; 2647 2648 set_freezable(); 2649 set_user_nice(current, MAX_NICE); 2650 2651 while (!kthread_should_stop()) { 2652 khugepaged_do_scan(&khugepaged_collapse_control); 2653 khugepaged_wait_work(); 2654 } 2655 2656 spin_lock(&khugepaged_mm_lock); 2657 mm_slot = khugepaged_scan.mm_slot; 2658 khugepaged_scan.mm_slot = NULL; 2659 if (mm_slot) 2660 collect_mm_slot(mm_slot); 2661 spin_unlock(&khugepaged_mm_lock); 2662 return 0; 2663 } 2664 2665 static void set_recommended_min_free_kbytes(void) 2666 { 2667 struct zone *zone; 2668 int nr_zones = 0; 2669 unsigned long recommended_min; 2670 2671 if (!hugepage_flags_enabled()) { 2672 calculate_min_free_kbytes(); 2673 goto update_wmarks; 2674 } 2675 2676 for_each_populated_zone(zone) { 2677 /* 2678 * We don't need to worry about fragmentation of 2679 * ZONE_MOVABLE since it only has movable pages. 2680 */ 2681 if (zone_idx(zone) > gfp_zone(GFP_USER)) 2682 continue; 2683 2684 nr_zones++; 2685 } 2686 2687 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ 2688 recommended_min = pageblock_nr_pages * nr_zones * 2; 2689 2690 /* 2691 * Make sure that on average at least two pageblocks are almost free 2692 * of another type, one for a migratetype to fall back to and a 2693 * second to avoid subsequent fallbacks of other types There are 3 2694 * MIGRATE_TYPES we care about. 2695 */ 2696 recommended_min += pageblock_nr_pages * nr_zones * 2697 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 2698 2699 /* don't ever allow to reserve more than 5% of the lowmem */ 2700 recommended_min = min(recommended_min, 2701 (unsigned long) nr_free_buffer_pages() / 20); 2702 recommended_min <<= (PAGE_SHIFT-10); 2703 2704 if (recommended_min > min_free_kbytes) { 2705 if (user_min_free_kbytes >= 0) 2706 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", 2707 min_free_kbytes, recommended_min); 2708 2709 min_free_kbytes = recommended_min; 2710 } 2711 2712 update_wmarks: 2713 setup_per_zone_wmarks(); 2714 } 2715 2716 int start_stop_khugepaged(void) 2717 { 2718 int err = 0; 2719 2720 mutex_lock(&khugepaged_mutex); 2721 if (hugepage_flags_enabled()) { 2722 if (!khugepaged_thread) 2723 khugepaged_thread = kthread_run(khugepaged, NULL, 2724 "khugepaged"); 2725 if (IS_ERR(khugepaged_thread)) { 2726 pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 2727 err = PTR_ERR(khugepaged_thread); 2728 khugepaged_thread = NULL; 2729 goto fail; 2730 } 2731 2732 if (!list_empty(&khugepaged_scan.mm_head)) 2733 wake_up_interruptible(&khugepaged_wait); 2734 } else if (khugepaged_thread) { 2735 kthread_stop(khugepaged_thread); 2736 khugepaged_thread = NULL; 2737 } 2738 set_recommended_min_free_kbytes(); 2739 fail: 2740 mutex_unlock(&khugepaged_mutex); 2741 return err; 2742 } 2743 2744 void khugepaged_min_free_kbytes_update(void) 2745 { 2746 mutex_lock(&khugepaged_mutex); 2747 if (hugepage_flags_enabled() && khugepaged_thread) 2748 set_recommended_min_free_kbytes(); 2749 mutex_unlock(&khugepaged_mutex); 2750 } 2751 2752 bool current_is_khugepaged(void) 2753 { 2754 return kthread_func(current) == khugepaged; 2755 } 2756 2757 static int madvise_collapse_errno(enum scan_result r) 2758 { 2759 /* 2760 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide 2761 * actionable feedback to caller, so they may take an appropriate 2762 * fallback measure depending on the nature of the failure. 2763 */ 2764 switch (r) { 2765 case SCAN_ALLOC_HUGE_PAGE_FAIL: 2766 return -ENOMEM; 2767 case SCAN_CGROUP_CHARGE_FAIL: 2768 case SCAN_EXCEED_NONE_PTE: 2769 return -EBUSY; 2770 /* Resource temporary unavailable - trying again might succeed */ 2771 case SCAN_PAGE_COUNT: 2772 case SCAN_PAGE_LOCK: 2773 case SCAN_PAGE_LRU: 2774 case SCAN_DEL_PAGE_LRU: 2775 case SCAN_PAGE_FILLED: 2776 return -EAGAIN; 2777 /* 2778 * Other: Trying again likely not to succeed / error intrinsic to 2779 * specified memory range. khugepaged likely won't be able to collapse 2780 * either. 2781 */ 2782 default: 2783 return -EINVAL; 2784 } 2785 } 2786 2787 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, 2788 unsigned long start, unsigned long end) 2789 { 2790 struct collapse_control *cc; 2791 struct mm_struct *mm = vma->vm_mm; 2792 unsigned long hstart, hend, addr; 2793 int thps = 0, last_fail = SCAN_FAIL; 2794 bool mmap_locked = true; 2795 2796 BUG_ON(vma->vm_start > start); 2797 BUG_ON(vma->vm_end < end); 2798 2799 *prev = vma; 2800 2801 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) 2802 return -EINVAL; 2803 2804 cc = kmalloc(sizeof(*cc), GFP_KERNEL); 2805 if (!cc) 2806 return -ENOMEM; 2807 cc->is_khugepaged = false; 2808 2809 mmgrab(mm); 2810 lru_add_drain_all(); 2811 2812 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2813 hend = end & HPAGE_PMD_MASK; 2814 2815 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) { 2816 int result = SCAN_FAIL; 2817 2818 if (!mmap_locked) { 2819 cond_resched(); 2820 mmap_read_lock(mm); 2821 mmap_locked = true; 2822 result = hugepage_vma_revalidate(mm, addr, false, &vma, 2823 cc); 2824 if (result != SCAN_SUCCEED) { 2825 last_fail = result; 2826 goto out_nolock; 2827 } 2828 2829 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK); 2830 } 2831 mmap_assert_locked(mm); 2832 memset(cc->node_load, 0, sizeof(cc->node_load)); 2833 nodes_clear(cc->alloc_nmask); 2834 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 2835 struct file *file = get_file(vma->vm_file); 2836 pgoff_t pgoff = linear_page_index(vma, addr); 2837 2838 mmap_read_unlock(mm); 2839 mmap_locked = false; 2840 result = hpage_collapse_scan_file(mm, addr, file, pgoff, 2841 cc); 2842 fput(file); 2843 } else { 2844 result = hpage_collapse_scan_pmd(mm, vma, addr, 2845 &mmap_locked, cc); 2846 } 2847 if (!mmap_locked) 2848 *prev = NULL; /* Tell caller we dropped mmap_lock */ 2849 2850 handle_result: 2851 switch (result) { 2852 case SCAN_SUCCEED: 2853 case SCAN_PMD_MAPPED: 2854 ++thps; 2855 break; 2856 case SCAN_PTE_MAPPED_HUGEPAGE: 2857 BUG_ON(mmap_locked); 2858 BUG_ON(*prev); 2859 mmap_write_lock(mm); 2860 result = collapse_pte_mapped_thp(mm, addr, true); 2861 mmap_write_unlock(mm); 2862 goto handle_result; 2863 /* Whitelisted set of results where continuing OK */ 2864 case SCAN_PMD_NULL: 2865 case SCAN_PTE_NON_PRESENT: 2866 case SCAN_PTE_UFFD_WP: 2867 case SCAN_PAGE_RO: 2868 case SCAN_LACK_REFERENCED_PAGE: 2869 case SCAN_PAGE_NULL: 2870 case SCAN_PAGE_COUNT: 2871 case SCAN_PAGE_LOCK: 2872 case SCAN_PAGE_COMPOUND: 2873 case SCAN_PAGE_LRU: 2874 case SCAN_DEL_PAGE_LRU: 2875 last_fail = result; 2876 break; 2877 default: 2878 last_fail = result; 2879 /* Other error, exit */ 2880 goto out_maybelock; 2881 } 2882 } 2883 2884 out_maybelock: 2885 /* Caller expects us to hold mmap_lock on return */ 2886 if (!mmap_locked) 2887 mmap_read_lock(mm); 2888 out_nolock: 2889 mmap_assert_locked(mm); 2890 mmdrop(mm); 2891 kfree(cc); 2892 2893 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0 2894 : madvise_collapse_errno(last_fail); 2895 } 2896