1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/mm.h> 5 #include <linux/sched.h> 6 #include <linux/sched/mm.h> 7 #include <linux/sched/coredump.h> 8 #include <linux/mmu_notifier.h> 9 #include <linux/rmap.h> 10 #include <linux/swap.h> 11 #include <linux/mm_inline.h> 12 #include <linux/kthread.h> 13 #include <linux/khugepaged.h> 14 #include <linux/freezer.h> 15 #include <linux/mman.h> 16 #include <linux/hashtable.h> 17 #include <linux/userfaultfd_k.h> 18 #include <linux/page_idle.h> 19 #include <linux/page_table_check.h> 20 #include <linux/swapops.h> 21 #include <linux/shmem_fs.h> 22 #include <linux/ksm.h> 23 24 #include <asm/tlb.h> 25 #include <asm/pgalloc.h> 26 #include "internal.h" 27 #include "mm_slot.h" 28 29 enum scan_result { 30 SCAN_FAIL, 31 SCAN_SUCCEED, 32 SCAN_PMD_NULL, 33 SCAN_PMD_NONE, 34 SCAN_PMD_MAPPED, 35 SCAN_EXCEED_NONE_PTE, 36 SCAN_EXCEED_SWAP_PTE, 37 SCAN_EXCEED_SHARED_PTE, 38 SCAN_PTE_NON_PRESENT, 39 SCAN_PTE_UFFD_WP, 40 SCAN_PTE_MAPPED_HUGEPAGE, 41 SCAN_PAGE_RO, 42 SCAN_LACK_REFERENCED_PAGE, 43 SCAN_PAGE_NULL, 44 SCAN_SCAN_ABORT, 45 SCAN_PAGE_COUNT, 46 SCAN_PAGE_LRU, 47 SCAN_PAGE_LOCK, 48 SCAN_PAGE_ANON, 49 SCAN_PAGE_COMPOUND, 50 SCAN_ANY_PROCESS, 51 SCAN_VMA_NULL, 52 SCAN_VMA_CHECK, 53 SCAN_ADDRESS_RANGE, 54 SCAN_DEL_PAGE_LRU, 55 SCAN_ALLOC_HUGE_PAGE_FAIL, 56 SCAN_CGROUP_CHARGE_FAIL, 57 SCAN_TRUNCATED, 58 SCAN_PAGE_HAS_PRIVATE, 59 SCAN_STORE_FAILED, 60 SCAN_COPY_MC, 61 SCAN_PAGE_FILLED, 62 }; 63 64 #define CREATE_TRACE_POINTS 65 #include <trace/events/huge_memory.h> 66 67 static struct task_struct *khugepaged_thread __read_mostly; 68 static DEFINE_MUTEX(khugepaged_mutex); 69 70 /* default scan 8*512 pte (or vmas) every 30 second */ 71 static unsigned int khugepaged_pages_to_scan __read_mostly; 72 static unsigned int khugepaged_pages_collapsed; 73 static unsigned int khugepaged_full_scans; 74 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 75 /* during fragmentation poll the hugepage allocator once every minute */ 76 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 77 static unsigned long khugepaged_sleep_expire; 78 static DEFINE_SPINLOCK(khugepaged_mm_lock); 79 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 80 /* 81 * default collapse hugepages if there is at least one pte mapped like 82 * it would have happened if the vma was large enough during page 83 * fault. 84 * 85 * Note that these are only respected if collapse was initiated by khugepaged. 86 */ 87 static unsigned int khugepaged_max_ptes_none __read_mostly; 88 static unsigned int khugepaged_max_ptes_swap __read_mostly; 89 static unsigned int khugepaged_max_ptes_shared __read_mostly; 90 91 #define MM_SLOTS_HASH_BITS 10 92 static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 93 94 static struct kmem_cache *mm_slot_cache __read_mostly; 95 96 struct collapse_control { 97 bool is_khugepaged; 98 99 /* Num pages scanned per node */ 100 u32 node_load[MAX_NUMNODES]; 101 102 /* nodemask for allocation fallback */ 103 nodemask_t alloc_nmask; 104 }; 105 106 /** 107 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned 108 * @slot: hash lookup from mm to mm_slot 109 */ 110 struct khugepaged_mm_slot { 111 struct mm_slot slot; 112 }; 113 114 /** 115 * struct khugepaged_scan - cursor for scanning 116 * @mm_head: the head of the mm list to scan 117 * @mm_slot: the current mm_slot we are scanning 118 * @address: the next address inside that to be scanned 119 * 120 * There is only the one khugepaged_scan instance of this cursor structure. 121 */ 122 struct khugepaged_scan { 123 struct list_head mm_head; 124 struct khugepaged_mm_slot *mm_slot; 125 unsigned long address; 126 }; 127 128 static struct khugepaged_scan khugepaged_scan = { 129 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 130 }; 131 132 #ifdef CONFIG_SYSFS 133 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 134 struct kobj_attribute *attr, 135 char *buf) 136 { 137 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs); 138 } 139 140 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 141 struct kobj_attribute *attr, 142 const char *buf, size_t count) 143 { 144 unsigned int msecs; 145 int err; 146 147 err = kstrtouint(buf, 10, &msecs); 148 if (err) 149 return -EINVAL; 150 151 khugepaged_scan_sleep_millisecs = msecs; 152 khugepaged_sleep_expire = 0; 153 wake_up_interruptible(&khugepaged_wait); 154 155 return count; 156 } 157 static struct kobj_attribute scan_sleep_millisecs_attr = 158 __ATTR_RW(scan_sleep_millisecs); 159 160 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 161 struct kobj_attribute *attr, 162 char *buf) 163 { 164 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 165 } 166 167 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 168 struct kobj_attribute *attr, 169 const char *buf, size_t count) 170 { 171 unsigned int msecs; 172 int err; 173 174 err = kstrtouint(buf, 10, &msecs); 175 if (err) 176 return -EINVAL; 177 178 khugepaged_alloc_sleep_millisecs = msecs; 179 khugepaged_sleep_expire = 0; 180 wake_up_interruptible(&khugepaged_wait); 181 182 return count; 183 } 184 static struct kobj_attribute alloc_sleep_millisecs_attr = 185 __ATTR_RW(alloc_sleep_millisecs); 186 187 static ssize_t pages_to_scan_show(struct kobject *kobj, 188 struct kobj_attribute *attr, 189 char *buf) 190 { 191 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan); 192 } 193 static ssize_t pages_to_scan_store(struct kobject *kobj, 194 struct kobj_attribute *attr, 195 const char *buf, size_t count) 196 { 197 unsigned int pages; 198 int err; 199 200 err = kstrtouint(buf, 10, &pages); 201 if (err || !pages) 202 return -EINVAL; 203 204 khugepaged_pages_to_scan = pages; 205 206 return count; 207 } 208 static struct kobj_attribute pages_to_scan_attr = 209 __ATTR_RW(pages_to_scan); 210 211 static ssize_t pages_collapsed_show(struct kobject *kobj, 212 struct kobj_attribute *attr, 213 char *buf) 214 { 215 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed); 216 } 217 static struct kobj_attribute pages_collapsed_attr = 218 __ATTR_RO(pages_collapsed); 219 220 static ssize_t full_scans_show(struct kobject *kobj, 221 struct kobj_attribute *attr, 222 char *buf) 223 { 224 return sysfs_emit(buf, "%u\n", khugepaged_full_scans); 225 } 226 static struct kobj_attribute full_scans_attr = 227 __ATTR_RO(full_scans); 228 229 static ssize_t defrag_show(struct kobject *kobj, 230 struct kobj_attribute *attr, char *buf) 231 { 232 return single_hugepage_flag_show(kobj, attr, buf, 233 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 234 } 235 static ssize_t defrag_store(struct kobject *kobj, 236 struct kobj_attribute *attr, 237 const char *buf, size_t count) 238 { 239 return single_hugepage_flag_store(kobj, attr, buf, count, 240 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 241 } 242 static struct kobj_attribute khugepaged_defrag_attr = 243 __ATTR_RW(defrag); 244 245 /* 246 * max_ptes_none controls if khugepaged should collapse hugepages over 247 * any unmapped ptes in turn potentially increasing the memory 248 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 249 * reduce the available free memory in the system as it 250 * runs. Increasing max_ptes_none will instead potentially reduce the 251 * free memory in the system during the khugepaged scan. 252 */ 253 static ssize_t max_ptes_none_show(struct kobject *kobj, 254 struct kobj_attribute *attr, 255 char *buf) 256 { 257 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none); 258 } 259 static ssize_t max_ptes_none_store(struct kobject *kobj, 260 struct kobj_attribute *attr, 261 const char *buf, size_t count) 262 { 263 int err; 264 unsigned long max_ptes_none; 265 266 err = kstrtoul(buf, 10, &max_ptes_none); 267 if (err || max_ptes_none > HPAGE_PMD_NR - 1) 268 return -EINVAL; 269 270 khugepaged_max_ptes_none = max_ptes_none; 271 272 return count; 273 } 274 static struct kobj_attribute khugepaged_max_ptes_none_attr = 275 __ATTR_RW(max_ptes_none); 276 277 static ssize_t max_ptes_swap_show(struct kobject *kobj, 278 struct kobj_attribute *attr, 279 char *buf) 280 { 281 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap); 282 } 283 284 static ssize_t max_ptes_swap_store(struct kobject *kobj, 285 struct kobj_attribute *attr, 286 const char *buf, size_t count) 287 { 288 int err; 289 unsigned long max_ptes_swap; 290 291 err = kstrtoul(buf, 10, &max_ptes_swap); 292 if (err || max_ptes_swap > HPAGE_PMD_NR - 1) 293 return -EINVAL; 294 295 khugepaged_max_ptes_swap = max_ptes_swap; 296 297 return count; 298 } 299 300 static struct kobj_attribute khugepaged_max_ptes_swap_attr = 301 __ATTR_RW(max_ptes_swap); 302 303 static ssize_t max_ptes_shared_show(struct kobject *kobj, 304 struct kobj_attribute *attr, 305 char *buf) 306 { 307 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared); 308 } 309 310 static ssize_t max_ptes_shared_store(struct kobject *kobj, 311 struct kobj_attribute *attr, 312 const char *buf, size_t count) 313 { 314 int err; 315 unsigned long max_ptes_shared; 316 317 err = kstrtoul(buf, 10, &max_ptes_shared); 318 if (err || max_ptes_shared > HPAGE_PMD_NR - 1) 319 return -EINVAL; 320 321 khugepaged_max_ptes_shared = max_ptes_shared; 322 323 return count; 324 } 325 326 static struct kobj_attribute khugepaged_max_ptes_shared_attr = 327 __ATTR_RW(max_ptes_shared); 328 329 static struct attribute *khugepaged_attr[] = { 330 &khugepaged_defrag_attr.attr, 331 &khugepaged_max_ptes_none_attr.attr, 332 &khugepaged_max_ptes_swap_attr.attr, 333 &khugepaged_max_ptes_shared_attr.attr, 334 &pages_to_scan_attr.attr, 335 &pages_collapsed_attr.attr, 336 &full_scans_attr.attr, 337 &scan_sleep_millisecs_attr.attr, 338 &alloc_sleep_millisecs_attr.attr, 339 NULL, 340 }; 341 342 struct attribute_group khugepaged_attr_group = { 343 .attrs = khugepaged_attr, 344 .name = "khugepaged", 345 }; 346 #endif /* CONFIG_SYSFS */ 347 348 int hugepage_madvise(struct vm_area_struct *vma, 349 unsigned long *vm_flags, int advice) 350 { 351 switch (advice) { 352 case MADV_HUGEPAGE: 353 #ifdef CONFIG_S390 354 /* 355 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 356 * can't handle this properly after s390_enable_sie, so we simply 357 * ignore the madvise to prevent qemu from causing a SIGSEGV. 358 */ 359 if (mm_has_pgste(vma->vm_mm)) 360 return 0; 361 #endif 362 *vm_flags &= ~VM_NOHUGEPAGE; 363 *vm_flags |= VM_HUGEPAGE; 364 /* 365 * If the vma become good for khugepaged to scan, 366 * register it here without waiting a page fault that 367 * may not happen any time soon. 368 */ 369 khugepaged_enter_vma(vma, *vm_flags); 370 break; 371 case MADV_NOHUGEPAGE: 372 *vm_flags &= ~VM_HUGEPAGE; 373 *vm_flags |= VM_NOHUGEPAGE; 374 /* 375 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 376 * this vma even if we leave the mm registered in khugepaged if 377 * it got registered before VM_NOHUGEPAGE was set. 378 */ 379 break; 380 } 381 382 return 0; 383 } 384 385 int __init khugepaged_init(void) 386 { 387 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 388 sizeof(struct khugepaged_mm_slot), 389 __alignof__(struct khugepaged_mm_slot), 390 0, NULL); 391 if (!mm_slot_cache) 392 return -ENOMEM; 393 394 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; 395 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; 396 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; 397 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; 398 399 return 0; 400 } 401 402 void __init khugepaged_destroy(void) 403 { 404 kmem_cache_destroy(mm_slot_cache); 405 } 406 407 static inline int hpage_collapse_test_exit(struct mm_struct *mm) 408 { 409 return atomic_read(&mm->mm_users) == 0; 410 } 411 412 void __khugepaged_enter(struct mm_struct *mm) 413 { 414 struct khugepaged_mm_slot *mm_slot; 415 struct mm_slot *slot; 416 int wakeup; 417 418 /* __khugepaged_exit() must not run from under us */ 419 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); 420 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) 421 return; 422 423 mm_slot = mm_slot_alloc(mm_slot_cache); 424 if (!mm_slot) 425 return; 426 427 slot = &mm_slot->slot; 428 429 spin_lock(&khugepaged_mm_lock); 430 mm_slot_insert(mm_slots_hash, mm, slot); 431 /* 432 * Insert just behind the scanning cursor, to let the area settle 433 * down a little. 434 */ 435 wakeup = list_empty(&khugepaged_scan.mm_head); 436 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head); 437 spin_unlock(&khugepaged_mm_lock); 438 439 mmgrab(mm); 440 if (wakeup) 441 wake_up_interruptible(&khugepaged_wait); 442 } 443 444 void khugepaged_enter_vma(struct vm_area_struct *vma, 445 unsigned long vm_flags) 446 { 447 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && 448 hugepage_flags_enabled()) { 449 if (hugepage_vma_check(vma, vm_flags, false, false, true)) 450 __khugepaged_enter(vma->vm_mm); 451 } 452 } 453 454 void __khugepaged_exit(struct mm_struct *mm) 455 { 456 struct khugepaged_mm_slot *mm_slot; 457 struct mm_slot *slot; 458 int free = 0; 459 460 spin_lock(&khugepaged_mm_lock); 461 slot = mm_slot_lookup(mm_slots_hash, mm); 462 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 463 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 464 hash_del(&slot->hash); 465 list_del(&slot->mm_node); 466 free = 1; 467 } 468 spin_unlock(&khugepaged_mm_lock); 469 470 if (free) { 471 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 472 mm_slot_free(mm_slot_cache, mm_slot); 473 mmdrop(mm); 474 } else if (mm_slot) { 475 /* 476 * This is required to serialize against 477 * hpage_collapse_test_exit() (which is guaranteed to run 478 * under mmap sem read mode). Stop here (after we return all 479 * pagetables will be destroyed) until khugepaged has finished 480 * working on the pagetables under the mmap_lock. 481 */ 482 mmap_write_lock(mm); 483 mmap_write_unlock(mm); 484 } 485 } 486 487 static void release_pte_folio(struct folio *folio) 488 { 489 node_stat_mod_folio(folio, 490 NR_ISOLATED_ANON + folio_is_file_lru(folio), 491 -folio_nr_pages(folio)); 492 folio_unlock(folio); 493 folio_putback_lru(folio); 494 } 495 496 static void release_pte_page(struct page *page) 497 { 498 release_pte_folio(page_folio(page)); 499 } 500 501 static void release_pte_pages(pte_t *pte, pte_t *_pte, 502 struct list_head *compound_pagelist) 503 { 504 struct folio *folio, *tmp; 505 506 while (--_pte >= pte) { 507 pte_t pteval = ptep_get(_pte); 508 unsigned long pfn; 509 510 if (pte_none(pteval)) 511 continue; 512 pfn = pte_pfn(pteval); 513 if (is_zero_pfn(pfn)) 514 continue; 515 folio = pfn_folio(pfn); 516 if (folio_test_large(folio)) 517 continue; 518 release_pte_folio(folio); 519 } 520 521 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) { 522 list_del(&folio->lru); 523 release_pte_folio(folio); 524 } 525 } 526 527 static bool is_refcount_suitable(struct page *page) 528 { 529 int expected_refcount; 530 531 expected_refcount = total_mapcount(page); 532 if (PageSwapCache(page)) 533 expected_refcount += compound_nr(page); 534 535 return page_count(page) == expected_refcount; 536 } 537 538 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 539 unsigned long address, 540 pte_t *pte, 541 struct collapse_control *cc, 542 struct list_head *compound_pagelist) 543 { 544 struct page *page = NULL; 545 pte_t *_pte; 546 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; 547 bool writable = false; 548 549 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 550 _pte++, address += PAGE_SIZE) { 551 pte_t pteval = ptep_get(_pte); 552 if (pte_none(pteval) || (pte_present(pteval) && 553 is_zero_pfn(pte_pfn(pteval)))) { 554 ++none_or_zero; 555 if (!userfaultfd_armed(vma) && 556 (!cc->is_khugepaged || 557 none_or_zero <= khugepaged_max_ptes_none)) { 558 continue; 559 } else { 560 result = SCAN_EXCEED_NONE_PTE; 561 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 562 goto out; 563 } 564 } 565 if (!pte_present(pteval)) { 566 result = SCAN_PTE_NON_PRESENT; 567 goto out; 568 } 569 if (pte_uffd_wp(pteval)) { 570 result = SCAN_PTE_UFFD_WP; 571 goto out; 572 } 573 page = vm_normal_page(vma, address, pteval); 574 if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 575 result = SCAN_PAGE_NULL; 576 goto out; 577 } 578 579 VM_BUG_ON_PAGE(!PageAnon(page), page); 580 581 if (page_mapcount(page) > 1) { 582 ++shared; 583 if (cc->is_khugepaged && 584 shared > khugepaged_max_ptes_shared) { 585 result = SCAN_EXCEED_SHARED_PTE; 586 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 587 goto out; 588 } 589 } 590 591 if (PageCompound(page)) { 592 struct page *p; 593 page = compound_head(page); 594 595 /* 596 * Check if we have dealt with the compound page 597 * already 598 */ 599 list_for_each_entry(p, compound_pagelist, lru) { 600 if (page == p) 601 goto next; 602 } 603 } 604 605 /* 606 * We can do it before isolate_lru_page because the 607 * page can't be freed from under us. NOTE: PG_lock 608 * is needed to serialize against split_huge_page 609 * when invoked from the VM. 610 */ 611 if (!trylock_page(page)) { 612 result = SCAN_PAGE_LOCK; 613 goto out; 614 } 615 616 /* 617 * Check if the page has any GUP (or other external) pins. 618 * 619 * The page table that maps the page has been already unlinked 620 * from the page table tree and this process cannot get 621 * an additional pin on the page. 622 * 623 * New pins can come later if the page is shared across fork, 624 * but not from this process. The other process cannot write to 625 * the page, only trigger CoW. 626 */ 627 if (!is_refcount_suitable(page)) { 628 unlock_page(page); 629 result = SCAN_PAGE_COUNT; 630 goto out; 631 } 632 633 /* 634 * Isolate the page to avoid collapsing an hugepage 635 * currently in use by the VM. 636 */ 637 if (!isolate_lru_page(page)) { 638 unlock_page(page); 639 result = SCAN_DEL_PAGE_LRU; 640 goto out; 641 } 642 mod_node_page_state(page_pgdat(page), 643 NR_ISOLATED_ANON + page_is_file_lru(page), 644 compound_nr(page)); 645 VM_BUG_ON_PAGE(!PageLocked(page), page); 646 VM_BUG_ON_PAGE(PageLRU(page), page); 647 648 if (PageCompound(page)) 649 list_add_tail(&page->lru, compound_pagelist); 650 next: 651 /* 652 * If collapse was initiated by khugepaged, check that there is 653 * enough young pte to justify collapsing the page 654 */ 655 if (cc->is_khugepaged && 656 (pte_young(pteval) || page_is_young(page) || 657 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, 658 address))) 659 referenced++; 660 661 if (pte_write(pteval)) 662 writable = true; 663 } 664 665 if (unlikely(!writable)) { 666 result = SCAN_PAGE_RO; 667 } else if (unlikely(cc->is_khugepaged && !referenced)) { 668 result = SCAN_LACK_REFERENCED_PAGE; 669 } else { 670 result = SCAN_SUCCEED; 671 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 672 referenced, writable, result); 673 return result; 674 } 675 out: 676 release_pte_pages(pte, _pte, compound_pagelist); 677 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 678 referenced, writable, result); 679 return result; 680 } 681 682 static void __collapse_huge_page_copy_succeeded(pte_t *pte, 683 struct vm_area_struct *vma, 684 unsigned long address, 685 spinlock_t *ptl, 686 struct list_head *compound_pagelist) 687 { 688 struct page *src_page; 689 struct page *tmp; 690 pte_t *_pte; 691 pte_t pteval; 692 693 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 694 _pte++, address += PAGE_SIZE) { 695 pteval = ptep_get(_pte); 696 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 697 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 698 if (is_zero_pfn(pte_pfn(pteval))) { 699 /* 700 * ptl mostly unnecessary. 701 */ 702 spin_lock(ptl); 703 ptep_clear(vma->vm_mm, address, _pte); 704 spin_unlock(ptl); 705 ksm_might_unmap_zero_page(vma->vm_mm, pteval); 706 } 707 } else { 708 src_page = pte_page(pteval); 709 if (!PageCompound(src_page)) 710 release_pte_page(src_page); 711 /* 712 * ptl mostly unnecessary, but preempt has to 713 * be disabled to update the per-cpu stats 714 * inside page_remove_rmap(). 715 */ 716 spin_lock(ptl); 717 ptep_clear(vma->vm_mm, address, _pte); 718 page_remove_rmap(src_page, vma, false); 719 spin_unlock(ptl); 720 free_page_and_swap_cache(src_page); 721 } 722 } 723 724 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { 725 list_del(&src_page->lru); 726 mod_node_page_state(page_pgdat(src_page), 727 NR_ISOLATED_ANON + page_is_file_lru(src_page), 728 -compound_nr(src_page)); 729 unlock_page(src_page); 730 free_swap_cache(src_page); 731 putback_lru_page(src_page); 732 } 733 } 734 735 static void __collapse_huge_page_copy_failed(pte_t *pte, 736 pmd_t *pmd, 737 pmd_t orig_pmd, 738 struct vm_area_struct *vma, 739 struct list_head *compound_pagelist) 740 { 741 spinlock_t *pmd_ptl; 742 743 /* 744 * Re-establish the PMD to point to the original page table 745 * entry. Restoring PMD needs to be done prior to releasing 746 * pages. Since pages are still isolated and locked here, 747 * acquiring anon_vma_lock_write is unnecessary. 748 */ 749 pmd_ptl = pmd_lock(vma->vm_mm, pmd); 750 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd)); 751 spin_unlock(pmd_ptl); 752 /* 753 * Release both raw and compound pages isolated 754 * in __collapse_huge_page_isolate. 755 */ 756 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist); 757 } 758 759 /* 760 * __collapse_huge_page_copy - attempts to copy memory contents from raw 761 * pages to a hugepage. Cleans up the raw pages if copying succeeds; 762 * otherwise restores the original page table and releases isolated raw pages. 763 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC. 764 * 765 * @pte: starting of the PTEs to copy from 766 * @page: the new hugepage to copy contents to 767 * @pmd: pointer to the new hugepage's PMD 768 * @orig_pmd: the original raw pages' PMD 769 * @vma: the original raw pages' virtual memory area 770 * @address: starting address to copy 771 * @ptl: lock on raw pages' PTEs 772 * @compound_pagelist: list that stores compound pages 773 */ 774 static int __collapse_huge_page_copy(pte_t *pte, 775 struct page *page, 776 pmd_t *pmd, 777 pmd_t orig_pmd, 778 struct vm_area_struct *vma, 779 unsigned long address, 780 spinlock_t *ptl, 781 struct list_head *compound_pagelist) 782 { 783 struct page *src_page; 784 pte_t *_pte; 785 pte_t pteval; 786 unsigned long _address; 787 int result = SCAN_SUCCEED; 788 789 /* 790 * Copying pages' contents is subject to memory poison at any iteration. 791 */ 792 for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR; 793 _pte++, page++, _address += PAGE_SIZE) { 794 pteval = ptep_get(_pte); 795 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 796 clear_user_highpage(page, _address); 797 continue; 798 } 799 src_page = pte_page(pteval); 800 if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) { 801 result = SCAN_COPY_MC; 802 break; 803 } 804 } 805 806 if (likely(result == SCAN_SUCCEED)) 807 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl, 808 compound_pagelist); 809 else 810 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma, 811 compound_pagelist); 812 813 return result; 814 } 815 816 static void khugepaged_alloc_sleep(void) 817 { 818 DEFINE_WAIT(wait); 819 820 add_wait_queue(&khugepaged_wait, &wait); 821 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 822 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 823 remove_wait_queue(&khugepaged_wait, &wait); 824 } 825 826 struct collapse_control khugepaged_collapse_control = { 827 .is_khugepaged = true, 828 }; 829 830 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc) 831 { 832 int i; 833 834 /* 835 * If node_reclaim_mode is disabled, then no extra effort is made to 836 * allocate memory locally. 837 */ 838 if (!node_reclaim_enabled()) 839 return false; 840 841 /* If there is a count for this node already, it must be acceptable */ 842 if (cc->node_load[nid]) 843 return false; 844 845 for (i = 0; i < MAX_NUMNODES; i++) { 846 if (!cc->node_load[i]) 847 continue; 848 if (node_distance(nid, i) > node_reclaim_distance) 849 return true; 850 } 851 return false; 852 } 853 854 #define khugepaged_defrag() \ 855 (transparent_hugepage_flags & \ 856 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) 857 858 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ 859 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) 860 { 861 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; 862 } 863 864 #ifdef CONFIG_NUMA 865 static int hpage_collapse_find_target_node(struct collapse_control *cc) 866 { 867 int nid, target_node = 0, max_value = 0; 868 869 /* find first node with max normal pages hit */ 870 for (nid = 0; nid < MAX_NUMNODES; nid++) 871 if (cc->node_load[nid] > max_value) { 872 max_value = cc->node_load[nid]; 873 target_node = nid; 874 } 875 876 for_each_online_node(nid) { 877 if (max_value == cc->node_load[nid]) 878 node_set(nid, cc->alloc_nmask); 879 } 880 881 return target_node; 882 } 883 #else 884 static int hpage_collapse_find_target_node(struct collapse_control *cc) 885 { 886 return 0; 887 } 888 #endif 889 890 /* 891 * If mmap_lock temporarily dropped, revalidate vma 892 * before taking mmap_lock. 893 * Returns enum scan_result value. 894 */ 895 896 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, 897 bool expect_anon, 898 struct vm_area_struct **vmap, 899 struct collapse_control *cc) 900 { 901 struct vm_area_struct *vma; 902 903 if (unlikely(hpage_collapse_test_exit(mm))) 904 return SCAN_ANY_PROCESS; 905 906 *vmap = vma = find_vma(mm, address); 907 if (!vma) 908 return SCAN_VMA_NULL; 909 910 if (!transhuge_vma_suitable(vma, address)) 911 return SCAN_ADDRESS_RANGE; 912 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, 913 cc->is_khugepaged)) 914 return SCAN_VMA_CHECK; 915 /* 916 * Anon VMA expected, the address may be unmapped then 917 * remapped to file after khugepaged reaquired the mmap_lock. 918 * 919 * hugepage_vma_check may return true for qualified file 920 * vmas. 921 */ 922 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap))) 923 return SCAN_PAGE_ANON; 924 return SCAN_SUCCEED; 925 } 926 927 static int find_pmd_or_thp_or_none(struct mm_struct *mm, 928 unsigned long address, 929 pmd_t **pmd) 930 { 931 pmd_t pmde; 932 933 *pmd = mm_find_pmd(mm, address); 934 if (!*pmd) 935 return SCAN_PMD_NULL; 936 937 pmde = pmdp_get_lockless(*pmd); 938 if (pmd_none(pmde)) 939 return SCAN_PMD_NONE; 940 if (!pmd_present(pmde)) 941 return SCAN_PMD_NULL; 942 if (pmd_trans_huge(pmde)) 943 return SCAN_PMD_MAPPED; 944 if (pmd_devmap(pmde)) 945 return SCAN_PMD_NULL; 946 if (pmd_bad(pmde)) 947 return SCAN_PMD_NULL; 948 return SCAN_SUCCEED; 949 } 950 951 static int check_pmd_still_valid(struct mm_struct *mm, 952 unsigned long address, 953 pmd_t *pmd) 954 { 955 pmd_t *new_pmd; 956 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd); 957 958 if (result != SCAN_SUCCEED) 959 return result; 960 if (new_pmd != pmd) 961 return SCAN_FAIL; 962 return SCAN_SUCCEED; 963 } 964 965 /* 966 * Bring missing pages in from swap, to complete THP collapse. 967 * Only done if hpage_collapse_scan_pmd believes it is worthwhile. 968 * 969 * Called and returns without pte mapped or spinlocks held. 970 * Returns result: if not SCAN_SUCCEED, mmap_lock has been released. 971 */ 972 static int __collapse_huge_page_swapin(struct mm_struct *mm, 973 struct vm_area_struct *vma, 974 unsigned long haddr, pmd_t *pmd, 975 int referenced) 976 { 977 int swapped_in = 0; 978 vm_fault_t ret = 0; 979 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); 980 int result; 981 pte_t *pte = NULL; 982 spinlock_t *ptl; 983 984 for (address = haddr; address < end; address += PAGE_SIZE) { 985 struct vm_fault vmf = { 986 .vma = vma, 987 .address = address, 988 .pgoff = linear_page_index(vma, address), 989 .flags = FAULT_FLAG_ALLOW_RETRY, 990 .pmd = pmd, 991 }; 992 993 if (!pte++) { 994 pte = pte_offset_map_nolock(mm, pmd, address, &ptl); 995 if (!pte) { 996 mmap_read_unlock(mm); 997 result = SCAN_PMD_NULL; 998 goto out; 999 } 1000 } 1001 1002 vmf.orig_pte = ptep_get_lockless(pte); 1003 if (!is_swap_pte(vmf.orig_pte)) 1004 continue; 1005 1006 vmf.pte = pte; 1007 vmf.ptl = ptl; 1008 ret = do_swap_page(&vmf); 1009 /* Which unmaps pte (after perhaps re-checking the entry) */ 1010 pte = NULL; 1011 1012 /* 1013 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock. 1014 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because 1015 * we do not retry here and swap entry will remain in pagetable 1016 * resulting in later failure. 1017 */ 1018 if (ret & VM_FAULT_RETRY) { 1019 /* Likely, but not guaranteed, that page lock failed */ 1020 result = SCAN_PAGE_LOCK; 1021 goto out; 1022 } 1023 if (ret & VM_FAULT_ERROR) { 1024 mmap_read_unlock(mm); 1025 result = SCAN_FAIL; 1026 goto out; 1027 } 1028 swapped_in++; 1029 } 1030 1031 if (pte) 1032 pte_unmap(pte); 1033 1034 /* Drain LRU cache to remove extra pin on the swapped in pages */ 1035 if (swapped_in) 1036 lru_add_drain(); 1037 1038 result = SCAN_SUCCEED; 1039 out: 1040 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result); 1041 return result; 1042 } 1043 1044 static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm, 1045 struct collapse_control *cc) 1046 { 1047 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : 1048 GFP_TRANSHUGE); 1049 int node = hpage_collapse_find_target_node(cc); 1050 struct folio *folio; 1051 1052 folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask); 1053 if (!folio) { 1054 *foliop = NULL; 1055 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 1056 return SCAN_ALLOC_HUGE_PAGE_FAIL; 1057 } 1058 1059 count_vm_event(THP_COLLAPSE_ALLOC); 1060 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) { 1061 folio_put(folio); 1062 *foliop = NULL; 1063 return SCAN_CGROUP_CHARGE_FAIL; 1064 } 1065 1066 count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1); 1067 1068 *foliop = folio; 1069 return SCAN_SUCCEED; 1070 } 1071 1072 static int collapse_huge_page(struct mm_struct *mm, unsigned long address, 1073 int referenced, int unmapped, 1074 struct collapse_control *cc) 1075 { 1076 LIST_HEAD(compound_pagelist); 1077 pmd_t *pmd, _pmd; 1078 pte_t *pte; 1079 pgtable_t pgtable; 1080 struct folio *folio; 1081 struct page *hpage; 1082 spinlock_t *pmd_ptl, *pte_ptl; 1083 int result = SCAN_FAIL; 1084 struct vm_area_struct *vma; 1085 struct mmu_notifier_range range; 1086 1087 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1088 1089 /* 1090 * Before allocating the hugepage, release the mmap_lock read lock. 1091 * The allocation can take potentially a long time if it involves 1092 * sync compaction, and we do not need to hold the mmap_lock during 1093 * that. We will recheck the vma after taking it again in write mode. 1094 */ 1095 mmap_read_unlock(mm); 1096 1097 result = alloc_charge_folio(&folio, mm, cc); 1098 hpage = &folio->page; 1099 if (result != SCAN_SUCCEED) 1100 goto out_nolock; 1101 1102 mmap_read_lock(mm); 1103 result = hugepage_vma_revalidate(mm, address, true, &vma, cc); 1104 if (result != SCAN_SUCCEED) { 1105 mmap_read_unlock(mm); 1106 goto out_nolock; 1107 } 1108 1109 result = find_pmd_or_thp_or_none(mm, address, &pmd); 1110 if (result != SCAN_SUCCEED) { 1111 mmap_read_unlock(mm); 1112 goto out_nolock; 1113 } 1114 1115 if (unmapped) { 1116 /* 1117 * __collapse_huge_page_swapin will return with mmap_lock 1118 * released when it fails. So we jump out_nolock directly in 1119 * that case. Continuing to collapse causes inconsistency. 1120 */ 1121 result = __collapse_huge_page_swapin(mm, vma, address, pmd, 1122 referenced); 1123 if (result != SCAN_SUCCEED) 1124 goto out_nolock; 1125 } 1126 1127 mmap_read_unlock(mm); 1128 /* 1129 * Prevent all access to pagetables with the exception of 1130 * gup_fast later handled by the ptep_clear_flush and the VM 1131 * handled by the anon_vma lock + PG_lock. 1132 */ 1133 mmap_write_lock(mm); 1134 result = hugepage_vma_revalidate(mm, address, true, &vma, cc); 1135 if (result != SCAN_SUCCEED) 1136 goto out_up_write; 1137 /* check if the pmd is still valid */ 1138 result = check_pmd_still_valid(mm, address, pmd); 1139 if (result != SCAN_SUCCEED) 1140 goto out_up_write; 1141 1142 vma_start_write(vma); 1143 anon_vma_lock_write(vma->anon_vma); 1144 1145 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address, 1146 address + HPAGE_PMD_SIZE); 1147 mmu_notifier_invalidate_range_start(&range); 1148 1149 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1150 /* 1151 * This removes any huge TLB entry from the CPU so we won't allow 1152 * huge and small TLB entries for the same virtual address to 1153 * avoid the risk of CPU bugs in that area. 1154 * 1155 * Parallel fast GUP is fine since fast GUP will back off when 1156 * it detects PMD is changed. 1157 */ 1158 _pmd = pmdp_collapse_flush(vma, address, pmd); 1159 spin_unlock(pmd_ptl); 1160 mmu_notifier_invalidate_range_end(&range); 1161 tlb_remove_table_sync_one(); 1162 1163 pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl); 1164 if (pte) { 1165 result = __collapse_huge_page_isolate(vma, address, pte, cc, 1166 &compound_pagelist); 1167 spin_unlock(pte_ptl); 1168 } else { 1169 result = SCAN_PMD_NULL; 1170 } 1171 1172 if (unlikely(result != SCAN_SUCCEED)) { 1173 if (pte) 1174 pte_unmap(pte); 1175 spin_lock(pmd_ptl); 1176 BUG_ON(!pmd_none(*pmd)); 1177 /* 1178 * We can only use set_pmd_at when establishing 1179 * hugepmds and never for establishing regular pmds that 1180 * points to regular pagetables. Use pmd_populate for that 1181 */ 1182 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 1183 spin_unlock(pmd_ptl); 1184 anon_vma_unlock_write(vma->anon_vma); 1185 goto out_up_write; 1186 } 1187 1188 /* 1189 * All pages are isolated and locked so anon_vma rmap 1190 * can't run anymore. 1191 */ 1192 anon_vma_unlock_write(vma->anon_vma); 1193 1194 result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd, 1195 vma, address, pte_ptl, 1196 &compound_pagelist); 1197 pte_unmap(pte); 1198 if (unlikely(result != SCAN_SUCCEED)) 1199 goto out_up_write; 1200 1201 /* 1202 * The smp_wmb() inside __folio_mark_uptodate() ensures the 1203 * copy_huge_page writes become visible before the set_pmd_at() 1204 * write. 1205 */ 1206 __folio_mark_uptodate(folio); 1207 pgtable = pmd_pgtable(_pmd); 1208 1209 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); 1210 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1211 1212 spin_lock(pmd_ptl); 1213 BUG_ON(!pmd_none(*pmd)); 1214 folio_add_new_anon_rmap(folio, vma, address); 1215 folio_add_lru_vma(folio, vma); 1216 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1217 set_pmd_at(mm, address, pmd, _pmd); 1218 update_mmu_cache_pmd(vma, address, pmd); 1219 spin_unlock(pmd_ptl); 1220 1221 hpage = NULL; 1222 1223 result = SCAN_SUCCEED; 1224 out_up_write: 1225 mmap_write_unlock(mm); 1226 out_nolock: 1227 if (hpage) 1228 put_page(hpage); 1229 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); 1230 return result; 1231 } 1232 1233 static int hpage_collapse_scan_pmd(struct mm_struct *mm, 1234 struct vm_area_struct *vma, 1235 unsigned long address, bool *mmap_locked, 1236 struct collapse_control *cc) 1237 { 1238 pmd_t *pmd; 1239 pte_t *pte, *_pte; 1240 int result = SCAN_FAIL, referenced = 0; 1241 int none_or_zero = 0, shared = 0; 1242 struct page *page = NULL; 1243 unsigned long _address; 1244 spinlock_t *ptl; 1245 int node = NUMA_NO_NODE, unmapped = 0; 1246 bool writable = false; 1247 1248 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1249 1250 result = find_pmd_or_thp_or_none(mm, address, &pmd); 1251 if (result != SCAN_SUCCEED) 1252 goto out; 1253 1254 memset(cc->node_load, 0, sizeof(cc->node_load)); 1255 nodes_clear(cc->alloc_nmask); 1256 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1257 if (!pte) { 1258 result = SCAN_PMD_NULL; 1259 goto out; 1260 } 1261 1262 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; 1263 _pte++, _address += PAGE_SIZE) { 1264 pte_t pteval = ptep_get(_pte); 1265 if (is_swap_pte(pteval)) { 1266 ++unmapped; 1267 if (!cc->is_khugepaged || 1268 unmapped <= khugepaged_max_ptes_swap) { 1269 /* 1270 * Always be strict with uffd-wp 1271 * enabled swap entries. Please see 1272 * comment below for pte_uffd_wp(). 1273 */ 1274 if (pte_swp_uffd_wp_any(pteval)) { 1275 result = SCAN_PTE_UFFD_WP; 1276 goto out_unmap; 1277 } 1278 continue; 1279 } else { 1280 result = SCAN_EXCEED_SWAP_PTE; 1281 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 1282 goto out_unmap; 1283 } 1284 } 1285 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 1286 ++none_or_zero; 1287 if (!userfaultfd_armed(vma) && 1288 (!cc->is_khugepaged || 1289 none_or_zero <= khugepaged_max_ptes_none)) { 1290 continue; 1291 } else { 1292 result = SCAN_EXCEED_NONE_PTE; 1293 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 1294 goto out_unmap; 1295 } 1296 } 1297 if (pte_uffd_wp(pteval)) { 1298 /* 1299 * Don't collapse the page if any of the small 1300 * PTEs are armed with uffd write protection. 1301 * Here we can also mark the new huge pmd as 1302 * write protected if any of the small ones is 1303 * marked but that could bring unknown 1304 * userfault messages that falls outside of 1305 * the registered range. So, just be simple. 1306 */ 1307 result = SCAN_PTE_UFFD_WP; 1308 goto out_unmap; 1309 } 1310 if (pte_write(pteval)) 1311 writable = true; 1312 1313 page = vm_normal_page(vma, _address, pteval); 1314 if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 1315 result = SCAN_PAGE_NULL; 1316 goto out_unmap; 1317 } 1318 1319 if (page_mapcount(page) > 1) { 1320 ++shared; 1321 if (cc->is_khugepaged && 1322 shared > khugepaged_max_ptes_shared) { 1323 result = SCAN_EXCEED_SHARED_PTE; 1324 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 1325 goto out_unmap; 1326 } 1327 } 1328 1329 page = compound_head(page); 1330 1331 /* 1332 * Record which node the original page is from and save this 1333 * information to cc->node_load[]. 1334 * Khugepaged will allocate hugepage from the node has the max 1335 * hit record. 1336 */ 1337 node = page_to_nid(page); 1338 if (hpage_collapse_scan_abort(node, cc)) { 1339 result = SCAN_SCAN_ABORT; 1340 goto out_unmap; 1341 } 1342 cc->node_load[node]++; 1343 if (!PageLRU(page)) { 1344 result = SCAN_PAGE_LRU; 1345 goto out_unmap; 1346 } 1347 if (PageLocked(page)) { 1348 result = SCAN_PAGE_LOCK; 1349 goto out_unmap; 1350 } 1351 if (!PageAnon(page)) { 1352 result = SCAN_PAGE_ANON; 1353 goto out_unmap; 1354 } 1355 1356 /* 1357 * Check if the page has any GUP (or other external) pins. 1358 * 1359 * Here the check may be racy: 1360 * it may see total_mapcount > refcount in some cases? 1361 * But such case is ephemeral we could always retry collapse 1362 * later. However it may report false positive if the page 1363 * has excessive GUP pins (i.e. 512). Anyway the same check 1364 * will be done again later the risk seems low. 1365 */ 1366 if (!is_refcount_suitable(page)) { 1367 result = SCAN_PAGE_COUNT; 1368 goto out_unmap; 1369 } 1370 1371 /* 1372 * If collapse was initiated by khugepaged, check that there is 1373 * enough young pte to justify collapsing the page 1374 */ 1375 if (cc->is_khugepaged && 1376 (pte_young(pteval) || page_is_young(page) || 1377 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, 1378 address))) 1379 referenced++; 1380 } 1381 if (!writable) { 1382 result = SCAN_PAGE_RO; 1383 } else if (cc->is_khugepaged && 1384 (!referenced || 1385 (unmapped && referenced < HPAGE_PMD_NR / 2))) { 1386 result = SCAN_LACK_REFERENCED_PAGE; 1387 } else { 1388 result = SCAN_SUCCEED; 1389 } 1390 out_unmap: 1391 pte_unmap_unlock(pte, ptl); 1392 if (result == SCAN_SUCCEED) { 1393 result = collapse_huge_page(mm, address, referenced, 1394 unmapped, cc); 1395 /* collapse_huge_page will return with the mmap_lock released */ 1396 *mmap_locked = false; 1397 } 1398 out: 1399 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, 1400 none_or_zero, result, unmapped); 1401 return result; 1402 } 1403 1404 static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot) 1405 { 1406 struct mm_slot *slot = &mm_slot->slot; 1407 struct mm_struct *mm = slot->mm; 1408 1409 lockdep_assert_held(&khugepaged_mm_lock); 1410 1411 if (hpage_collapse_test_exit(mm)) { 1412 /* free mm_slot */ 1413 hash_del(&slot->hash); 1414 list_del(&slot->mm_node); 1415 1416 /* 1417 * Not strictly needed because the mm exited already. 1418 * 1419 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1420 */ 1421 1422 /* khugepaged_mm_lock actually not necessary for the below */ 1423 mm_slot_free(mm_slot_cache, mm_slot); 1424 mmdrop(mm); 1425 } 1426 } 1427 1428 #ifdef CONFIG_SHMEM 1429 /* hpage must be locked, and mmap_lock must be held */ 1430 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr, 1431 pmd_t *pmdp, struct page *hpage) 1432 { 1433 struct vm_fault vmf = { 1434 .vma = vma, 1435 .address = addr, 1436 .flags = 0, 1437 .pmd = pmdp, 1438 }; 1439 1440 VM_BUG_ON(!PageTransHuge(hpage)); 1441 mmap_assert_locked(vma->vm_mm); 1442 1443 if (do_set_pmd(&vmf, hpage)) 1444 return SCAN_FAIL; 1445 1446 get_page(hpage); 1447 return SCAN_SUCCEED; 1448 } 1449 1450 /** 1451 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at 1452 * address haddr. 1453 * 1454 * @mm: process address space where collapse happens 1455 * @addr: THP collapse address 1456 * @install_pmd: If a huge PMD should be installed 1457 * 1458 * This function checks whether all the PTEs in the PMD are pointing to the 1459 * right THP. If so, retract the page table so the THP can refault in with 1460 * as pmd-mapped. Possibly install a huge PMD mapping the THP. 1461 */ 1462 int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, 1463 bool install_pmd) 1464 { 1465 struct mmu_notifier_range range; 1466 bool notified = false; 1467 unsigned long haddr = addr & HPAGE_PMD_MASK; 1468 struct vm_area_struct *vma = vma_lookup(mm, haddr); 1469 struct page *hpage; 1470 pte_t *start_pte, *pte; 1471 pmd_t *pmd, pgt_pmd; 1472 spinlock_t *pml = NULL, *ptl; 1473 int nr_ptes = 0, result = SCAN_FAIL; 1474 int i; 1475 1476 mmap_assert_locked(mm); 1477 1478 /* First check VMA found, in case page tables are being torn down */ 1479 if (!vma || !vma->vm_file || 1480 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) 1481 return SCAN_VMA_CHECK; 1482 1483 /* Fast check before locking page if already PMD-mapped */ 1484 result = find_pmd_or_thp_or_none(mm, haddr, &pmd); 1485 if (result == SCAN_PMD_MAPPED) 1486 return result; 1487 1488 /* 1489 * If we are here, we've succeeded in replacing all the native pages 1490 * in the page cache with a single hugepage. If a mm were to fault-in 1491 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage 1492 * and map it by a PMD, regardless of sysfs THP settings. As such, let's 1493 * analogously elide sysfs THP settings here. 1494 */ 1495 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) 1496 return SCAN_VMA_CHECK; 1497 1498 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ 1499 if (userfaultfd_wp(vma)) 1500 return SCAN_PTE_UFFD_WP; 1501 1502 hpage = find_lock_page(vma->vm_file->f_mapping, 1503 linear_page_index(vma, haddr)); 1504 if (!hpage) 1505 return SCAN_PAGE_NULL; 1506 1507 if (!PageHead(hpage)) { 1508 result = SCAN_FAIL; 1509 goto drop_hpage; 1510 } 1511 1512 if (compound_order(hpage) != HPAGE_PMD_ORDER) { 1513 result = SCAN_PAGE_COMPOUND; 1514 goto drop_hpage; 1515 } 1516 1517 result = find_pmd_or_thp_or_none(mm, haddr, &pmd); 1518 switch (result) { 1519 case SCAN_SUCCEED: 1520 break; 1521 case SCAN_PMD_NONE: 1522 /* 1523 * All pte entries have been removed and pmd cleared. 1524 * Skip all the pte checks and just update the pmd mapping. 1525 */ 1526 goto maybe_install_pmd; 1527 default: 1528 goto drop_hpage; 1529 } 1530 1531 result = SCAN_FAIL; 1532 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); 1533 if (!start_pte) /* mmap_lock + page lock should prevent this */ 1534 goto drop_hpage; 1535 1536 /* step 1: check all mapped PTEs are to the right huge page */ 1537 for (i = 0, addr = haddr, pte = start_pte; 1538 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 1539 struct page *page; 1540 pte_t ptent = ptep_get(pte); 1541 1542 /* empty pte, skip */ 1543 if (pte_none(ptent)) 1544 continue; 1545 1546 /* page swapped out, abort */ 1547 if (!pte_present(ptent)) { 1548 result = SCAN_PTE_NON_PRESENT; 1549 goto abort; 1550 } 1551 1552 page = vm_normal_page(vma, addr, ptent); 1553 if (WARN_ON_ONCE(page && is_zone_device_page(page))) 1554 page = NULL; 1555 /* 1556 * Note that uprobe, debugger, or MAP_PRIVATE may change the 1557 * page table, but the new page will not be a subpage of hpage. 1558 */ 1559 if (hpage + i != page) 1560 goto abort; 1561 } 1562 1563 pte_unmap_unlock(start_pte, ptl); 1564 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 1565 haddr, haddr + HPAGE_PMD_SIZE); 1566 mmu_notifier_invalidate_range_start(&range); 1567 notified = true; 1568 1569 /* 1570 * pmd_lock covers a wider range than ptl, and (if split from mm's 1571 * page_table_lock) ptl nests inside pml. The less time we hold pml, 1572 * the better; but userfaultfd's mfill_atomic_pte() on a private VMA 1573 * inserts a valid as-if-COWed PTE without even looking up page cache. 1574 * So page lock of hpage does not protect from it, so we must not drop 1575 * ptl before pgt_pmd is removed, so uffd private needs pml taken now. 1576 */ 1577 if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED)) 1578 pml = pmd_lock(mm, pmd); 1579 1580 start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl); 1581 if (!start_pte) /* mmap_lock + page lock should prevent this */ 1582 goto abort; 1583 if (!pml) 1584 spin_lock(ptl); 1585 else if (ptl != pml) 1586 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); 1587 1588 /* step 2: clear page table and adjust rmap */ 1589 for (i = 0, addr = haddr, pte = start_pte; 1590 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 1591 struct page *page; 1592 pte_t ptent = ptep_get(pte); 1593 1594 if (pte_none(ptent)) 1595 continue; 1596 /* 1597 * We dropped ptl after the first scan, to do the mmu_notifier: 1598 * page lock stops more PTEs of the hpage being faulted in, but 1599 * does not stop write faults COWing anon copies from existing 1600 * PTEs; and does not stop those being swapped out or migrated. 1601 */ 1602 if (!pte_present(ptent)) { 1603 result = SCAN_PTE_NON_PRESENT; 1604 goto abort; 1605 } 1606 page = vm_normal_page(vma, addr, ptent); 1607 if (hpage + i != page) 1608 goto abort; 1609 1610 /* 1611 * Must clear entry, or a racing truncate may re-remove it. 1612 * TLB flush can be left until pmdp_collapse_flush() does it. 1613 * PTE dirty? Shmem page is already dirty; file is read-only. 1614 */ 1615 ptep_clear(mm, addr, pte); 1616 page_remove_rmap(page, vma, false); 1617 nr_ptes++; 1618 } 1619 1620 pte_unmap(start_pte); 1621 if (!pml) 1622 spin_unlock(ptl); 1623 1624 /* step 3: set proper refcount and mm_counters. */ 1625 if (nr_ptes) { 1626 page_ref_sub(hpage, nr_ptes); 1627 add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes); 1628 } 1629 1630 /* step 4: remove empty page table */ 1631 if (!pml) { 1632 pml = pmd_lock(mm, pmd); 1633 if (ptl != pml) 1634 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); 1635 } 1636 pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd); 1637 pmdp_get_lockless_sync(); 1638 if (ptl != pml) 1639 spin_unlock(ptl); 1640 spin_unlock(pml); 1641 1642 mmu_notifier_invalidate_range_end(&range); 1643 1644 mm_dec_nr_ptes(mm); 1645 page_table_check_pte_clear_range(mm, haddr, pgt_pmd); 1646 pte_free_defer(mm, pmd_pgtable(pgt_pmd)); 1647 1648 maybe_install_pmd: 1649 /* step 5: install pmd entry */ 1650 result = install_pmd 1651 ? set_huge_pmd(vma, haddr, pmd, hpage) 1652 : SCAN_SUCCEED; 1653 goto drop_hpage; 1654 abort: 1655 if (nr_ptes) { 1656 flush_tlb_mm(mm); 1657 page_ref_sub(hpage, nr_ptes); 1658 add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes); 1659 } 1660 if (start_pte) 1661 pte_unmap_unlock(start_pte, ptl); 1662 if (pml && pml != ptl) 1663 spin_unlock(pml); 1664 if (notified) 1665 mmu_notifier_invalidate_range_end(&range); 1666 drop_hpage: 1667 unlock_page(hpage); 1668 put_page(hpage); 1669 return result; 1670 } 1671 1672 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) 1673 { 1674 struct vm_area_struct *vma; 1675 1676 i_mmap_lock_read(mapping); 1677 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1678 struct mmu_notifier_range range; 1679 struct mm_struct *mm; 1680 unsigned long addr; 1681 pmd_t *pmd, pgt_pmd; 1682 spinlock_t *pml; 1683 spinlock_t *ptl; 1684 bool skipped_uffd = false; 1685 1686 /* 1687 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that 1688 * got written to. These VMAs are likely not worth removing 1689 * page tables from, as PMD-mapping is likely to be split later. 1690 */ 1691 if (READ_ONCE(vma->anon_vma)) 1692 continue; 1693 1694 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1695 if (addr & ~HPAGE_PMD_MASK || 1696 vma->vm_end < addr + HPAGE_PMD_SIZE) 1697 continue; 1698 1699 mm = vma->vm_mm; 1700 if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED) 1701 continue; 1702 1703 if (hpage_collapse_test_exit(mm)) 1704 continue; 1705 /* 1706 * When a vma is registered with uffd-wp, we cannot recycle 1707 * the page table because there may be pte markers installed. 1708 * Other vmas can still have the same file mapped hugely, but 1709 * skip this one: it will always be mapped in small page size 1710 * for uffd-wp registered ranges. 1711 */ 1712 if (userfaultfd_wp(vma)) 1713 continue; 1714 1715 /* PTEs were notified when unmapped; but now for the PMD? */ 1716 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 1717 addr, addr + HPAGE_PMD_SIZE); 1718 mmu_notifier_invalidate_range_start(&range); 1719 1720 pml = pmd_lock(mm, pmd); 1721 ptl = pte_lockptr(mm, pmd); 1722 if (ptl != pml) 1723 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); 1724 1725 /* 1726 * Huge page lock is still held, so normally the page table 1727 * must remain empty; and we have already skipped anon_vma 1728 * and userfaultfd_wp() vmas. But since the mmap_lock is not 1729 * held, it is still possible for a racing userfaultfd_ioctl() 1730 * to have inserted ptes or markers. Now that we hold ptlock, 1731 * repeating the anon_vma check protects from one category, 1732 * and repeating the userfaultfd_wp() check from another. 1733 */ 1734 if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) { 1735 skipped_uffd = true; 1736 } else { 1737 pgt_pmd = pmdp_collapse_flush(vma, addr, pmd); 1738 pmdp_get_lockless_sync(); 1739 } 1740 1741 if (ptl != pml) 1742 spin_unlock(ptl); 1743 spin_unlock(pml); 1744 1745 mmu_notifier_invalidate_range_end(&range); 1746 1747 if (!skipped_uffd) { 1748 mm_dec_nr_ptes(mm); 1749 page_table_check_pte_clear_range(mm, addr, pgt_pmd); 1750 pte_free_defer(mm, pmd_pgtable(pgt_pmd)); 1751 } 1752 } 1753 i_mmap_unlock_read(mapping); 1754 } 1755 1756 /** 1757 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one. 1758 * 1759 * @mm: process address space where collapse happens 1760 * @addr: virtual collapse start address 1761 * @file: file that collapse on 1762 * @start: collapse start address 1763 * @cc: collapse context and scratchpad 1764 * 1765 * Basic scheme is simple, details are more complex: 1766 * - allocate and lock a new huge page; 1767 * - scan page cache, locking old pages 1768 * + swap/gup in pages if necessary; 1769 * - copy data to new page 1770 * - handle shmem holes 1771 * + re-validate that holes weren't filled by someone else 1772 * + check for userfaultfd 1773 * - finalize updates to the page cache; 1774 * - if replacing succeeds: 1775 * + unlock huge page; 1776 * + free old pages; 1777 * - if replacing failed; 1778 * + unlock old pages 1779 * + unlock and free huge page; 1780 */ 1781 static int collapse_file(struct mm_struct *mm, unsigned long addr, 1782 struct file *file, pgoff_t start, 1783 struct collapse_control *cc) 1784 { 1785 struct address_space *mapping = file->f_mapping; 1786 struct page *page; 1787 struct page *tmp, *dst; 1788 struct folio *folio, *new_folio; 1789 pgoff_t index = 0, end = start + HPAGE_PMD_NR; 1790 LIST_HEAD(pagelist); 1791 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); 1792 int nr_none = 0, result = SCAN_SUCCEED; 1793 bool is_shmem = shmem_file(file); 1794 1795 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); 1796 VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); 1797 1798 result = alloc_charge_folio(&new_folio, mm, cc); 1799 if (result != SCAN_SUCCEED) 1800 goto out; 1801 1802 __folio_set_locked(new_folio); 1803 if (is_shmem) 1804 __folio_set_swapbacked(new_folio); 1805 new_folio->index = start; 1806 new_folio->mapping = mapping; 1807 1808 /* 1809 * Ensure we have slots for all the pages in the range. This is 1810 * almost certainly a no-op because most of the pages must be present 1811 */ 1812 do { 1813 xas_lock_irq(&xas); 1814 xas_create_range(&xas); 1815 if (!xas_error(&xas)) 1816 break; 1817 xas_unlock_irq(&xas); 1818 if (!xas_nomem(&xas, GFP_KERNEL)) { 1819 result = SCAN_FAIL; 1820 goto rollback; 1821 } 1822 } while (1); 1823 1824 for (index = start; index < end; index++) { 1825 xas_set(&xas, index); 1826 page = xas_load(&xas); 1827 1828 VM_BUG_ON(index != xas.xa_index); 1829 if (is_shmem) { 1830 if (!page) { 1831 /* 1832 * Stop if extent has been truncated or 1833 * hole-punched, and is now completely 1834 * empty. 1835 */ 1836 if (index == start) { 1837 if (!xas_next_entry(&xas, end - 1)) { 1838 result = SCAN_TRUNCATED; 1839 goto xa_locked; 1840 } 1841 } 1842 nr_none++; 1843 continue; 1844 } 1845 1846 if (xa_is_value(page) || !PageUptodate(page)) { 1847 xas_unlock_irq(&xas); 1848 /* swap in or instantiate fallocated page */ 1849 if (shmem_get_folio(mapping->host, index, 1850 &folio, SGP_NOALLOC)) { 1851 result = SCAN_FAIL; 1852 goto xa_unlocked; 1853 } 1854 /* drain lru cache to help isolate_lru_page() */ 1855 lru_add_drain(); 1856 page = folio_file_page(folio, index); 1857 } else if (trylock_page(page)) { 1858 get_page(page); 1859 xas_unlock_irq(&xas); 1860 } else { 1861 result = SCAN_PAGE_LOCK; 1862 goto xa_locked; 1863 } 1864 } else { /* !is_shmem */ 1865 if (!page || xa_is_value(page)) { 1866 xas_unlock_irq(&xas); 1867 page_cache_sync_readahead(mapping, &file->f_ra, 1868 file, index, 1869 end - index); 1870 /* drain lru cache to help isolate_lru_page() */ 1871 lru_add_drain(); 1872 page = find_lock_page(mapping, index); 1873 if (unlikely(page == NULL)) { 1874 result = SCAN_FAIL; 1875 goto xa_unlocked; 1876 } 1877 } else if (PageDirty(page)) { 1878 /* 1879 * khugepaged only works on read-only fd, 1880 * so this page is dirty because it hasn't 1881 * been flushed since first write. There 1882 * won't be new dirty pages. 1883 * 1884 * Trigger async flush here and hope the 1885 * writeback is done when khugepaged 1886 * revisits this page. 1887 * 1888 * This is a one-off situation. We are not 1889 * forcing writeback in loop. 1890 */ 1891 xas_unlock_irq(&xas); 1892 filemap_flush(mapping); 1893 result = SCAN_FAIL; 1894 goto xa_unlocked; 1895 } else if (PageWriteback(page)) { 1896 xas_unlock_irq(&xas); 1897 result = SCAN_FAIL; 1898 goto xa_unlocked; 1899 } else if (trylock_page(page)) { 1900 get_page(page); 1901 xas_unlock_irq(&xas); 1902 } else { 1903 result = SCAN_PAGE_LOCK; 1904 goto xa_locked; 1905 } 1906 } 1907 1908 /* 1909 * The page must be locked, so we can drop the i_pages lock 1910 * without racing with truncate. 1911 */ 1912 VM_BUG_ON_PAGE(!PageLocked(page), page); 1913 1914 /* make sure the page is up to date */ 1915 if (unlikely(!PageUptodate(page))) { 1916 result = SCAN_FAIL; 1917 goto out_unlock; 1918 } 1919 1920 /* 1921 * If file was truncated then extended, or hole-punched, before 1922 * we locked the first page, then a THP might be there already. 1923 * This will be discovered on the first iteration. 1924 */ 1925 if (PageTransCompound(page)) { 1926 struct page *head = compound_head(page); 1927 1928 result = compound_order(head) == HPAGE_PMD_ORDER && 1929 head->index == start 1930 /* Maybe PMD-mapped */ 1931 ? SCAN_PTE_MAPPED_HUGEPAGE 1932 : SCAN_PAGE_COMPOUND; 1933 goto out_unlock; 1934 } 1935 1936 folio = page_folio(page); 1937 1938 if (folio_mapping(folio) != mapping) { 1939 result = SCAN_TRUNCATED; 1940 goto out_unlock; 1941 } 1942 1943 if (!is_shmem && (folio_test_dirty(folio) || 1944 folio_test_writeback(folio))) { 1945 /* 1946 * khugepaged only works on read-only fd, so this 1947 * page is dirty because it hasn't been flushed 1948 * since first write. 1949 */ 1950 result = SCAN_FAIL; 1951 goto out_unlock; 1952 } 1953 1954 if (!folio_isolate_lru(folio)) { 1955 result = SCAN_DEL_PAGE_LRU; 1956 goto out_unlock; 1957 } 1958 1959 if (!filemap_release_folio(folio, GFP_KERNEL)) { 1960 result = SCAN_PAGE_HAS_PRIVATE; 1961 folio_putback_lru(folio); 1962 goto out_unlock; 1963 } 1964 1965 if (folio_mapped(folio)) 1966 try_to_unmap(folio, 1967 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH); 1968 1969 xas_lock_irq(&xas); 1970 1971 VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page); 1972 1973 /* 1974 * We control three references to the page: 1975 * - we hold a pin on it; 1976 * - one reference from page cache; 1977 * - one from isolate_lru_page; 1978 * If those are the only references, then any new usage of the 1979 * page will have to fetch it from the page cache. That requires 1980 * locking the page to handle truncate, so any new usage will be 1981 * blocked until we unlock page after collapse/during rollback. 1982 */ 1983 if (page_count(page) != 3) { 1984 result = SCAN_PAGE_COUNT; 1985 xas_unlock_irq(&xas); 1986 putback_lru_page(page); 1987 goto out_unlock; 1988 } 1989 1990 /* 1991 * Accumulate the pages that are being collapsed. 1992 */ 1993 list_add_tail(&page->lru, &pagelist); 1994 continue; 1995 out_unlock: 1996 unlock_page(page); 1997 put_page(page); 1998 goto xa_unlocked; 1999 } 2000 2001 if (!is_shmem) { 2002 filemap_nr_thps_inc(mapping); 2003 /* 2004 * Paired with smp_mb() in do_dentry_open() to ensure 2005 * i_writecount is up to date and the update to nr_thps is 2006 * visible. Ensures the page cache will be truncated if the 2007 * file is opened writable. 2008 */ 2009 smp_mb(); 2010 if (inode_is_open_for_write(mapping->host)) { 2011 result = SCAN_FAIL; 2012 filemap_nr_thps_dec(mapping); 2013 } 2014 } 2015 2016 xa_locked: 2017 xas_unlock_irq(&xas); 2018 xa_unlocked: 2019 2020 /* 2021 * If collapse is successful, flush must be done now before copying. 2022 * If collapse is unsuccessful, does flush actually need to be done? 2023 * Do it anyway, to clear the state. 2024 */ 2025 try_to_unmap_flush(); 2026 2027 if (result == SCAN_SUCCEED && nr_none && 2028 !shmem_charge(mapping->host, nr_none)) 2029 result = SCAN_FAIL; 2030 if (result != SCAN_SUCCEED) { 2031 nr_none = 0; 2032 goto rollback; 2033 } 2034 2035 /* 2036 * The old pages are locked, so they won't change anymore. 2037 */ 2038 index = start; 2039 dst = folio_page(new_folio, 0); 2040 list_for_each_entry(page, &pagelist, lru) { 2041 while (index < page->index) { 2042 clear_highpage(dst); 2043 index++; 2044 dst++; 2045 } 2046 if (copy_mc_highpage(dst, page) > 0) { 2047 result = SCAN_COPY_MC; 2048 goto rollback; 2049 } 2050 index++; 2051 dst++; 2052 } 2053 while (index < end) { 2054 clear_highpage(dst); 2055 index++; 2056 dst++; 2057 } 2058 2059 if (nr_none) { 2060 struct vm_area_struct *vma; 2061 int nr_none_check = 0; 2062 2063 i_mmap_lock_read(mapping); 2064 xas_lock_irq(&xas); 2065 2066 xas_set(&xas, start); 2067 for (index = start; index < end; index++) { 2068 if (!xas_next(&xas)) { 2069 xas_store(&xas, XA_RETRY_ENTRY); 2070 if (xas_error(&xas)) { 2071 result = SCAN_STORE_FAILED; 2072 goto immap_locked; 2073 } 2074 nr_none_check++; 2075 } 2076 } 2077 2078 if (nr_none != nr_none_check) { 2079 result = SCAN_PAGE_FILLED; 2080 goto immap_locked; 2081 } 2082 2083 /* 2084 * If userspace observed a missing page in a VMA with 2085 * a MODE_MISSING userfaultfd, then it might expect a 2086 * UFFD_EVENT_PAGEFAULT for that page. If so, we need to 2087 * roll back to avoid suppressing such an event. Since 2088 * wp/minor userfaultfds don't give userspace any 2089 * guarantees that the kernel doesn't fill a missing 2090 * page with a zero page, so they don't matter here. 2091 * 2092 * Any userfaultfds registered after this point will 2093 * not be able to observe any missing pages due to the 2094 * previously inserted retry entries. 2095 */ 2096 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) { 2097 if (userfaultfd_missing(vma)) { 2098 result = SCAN_EXCEED_NONE_PTE; 2099 goto immap_locked; 2100 } 2101 } 2102 2103 immap_locked: 2104 i_mmap_unlock_read(mapping); 2105 if (result != SCAN_SUCCEED) { 2106 xas_set(&xas, start); 2107 for (index = start; index < end; index++) { 2108 if (xas_next(&xas) == XA_RETRY_ENTRY) 2109 xas_store(&xas, NULL); 2110 } 2111 2112 xas_unlock_irq(&xas); 2113 goto rollback; 2114 } 2115 } else { 2116 xas_lock_irq(&xas); 2117 } 2118 2119 if (is_shmem) 2120 __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR); 2121 else 2122 __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR); 2123 2124 if (nr_none) { 2125 __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none); 2126 /* nr_none is always 0 for non-shmem. */ 2127 __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none); 2128 } 2129 2130 /* 2131 * Mark new_folio as uptodate before inserting it into the 2132 * page cache so that it isn't mistaken for an fallocated but 2133 * unwritten page. 2134 */ 2135 folio_mark_uptodate(new_folio); 2136 folio_ref_add(new_folio, HPAGE_PMD_NR - 1); 2137 2138 if (is_shmem) 2139 folio_mark_dirty(new_folio); 2140 folio_add_lru(new_folio); 2141 2142 /* Join all the small entries into a single multi-index entry. */ 2143 xas_set_order(&xas, start, HPAGE_PMD_ORDER); 2144 xas_store(&xas, new_folio); 2145 WARN_ON_ONCE(xas_error(&xas)); 2146 xas_unlock_irq(&xas); 2147 2148 /* 2149 * Remove pte page tables, so we can re-fault the page as huge. 2150 * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp(). 2151 */ 2152 retract_page_tables(mapping, start); 2153 if (cc && !cc->is_khugepaged) 2154 result = SCAN_PTE_MAPPED_HUGEPAGE; 2155 folio_unlock(new_folio); 2156 2157 /* 2158 * The collapse has succeeded, so free the old pages. 2159 */ 2160 list_for_each_entry_safe(page, tmp, &pagelist, lru) { 2161 list_del(&page->lru); 2162 page->mapping = NULL; 2163 ClearPageActive(page); 2164 ClearPageUnevictable(page); 2165 unlock_page(page); 2166 folio_put_refs(page_folio(page), 3); 2167 } 2168 2169 goto out; 2170 2171 rollback: 2172 /* Something went wrong: roll back page cache changes */ 2173 if (nr_none) { 2174 xas_lock_irq(&xas); 2175 mapping->nrpages -= nr_none; 2176 xas_unlock_irq(&xas); 2177 shmem_uncharge(mapping->host, nr_none); 2178 } 2179 2180 list_for_each_entry_safe(page, tmp, &pagelist, lru) { 2181 list_del(&page->lru); 2182 unlock_page(page); 2183 putback_lru_page(page); 2184 put_page(page); 2185 } 2186 /* 2187 * Undo the updates of filemap_nr_thps_inc for non-SHMEM 2188 * file only. This undo is not needed unless failure is 2189 * due to SCAN_COPY_MC. 2190 */ 2191 if (!is_shmem && result == SCAN_COPY_MC) { 2192 filemap_nr_thps_dec(mapping); 2193 /* 2194 * Paired with smp_mb() in do_dentry_open() to 2195 * ensure the update to nr_thps is visible. 2196 */ 2197 smp_mb(); 2198 } 2199 2200 new_folio->mapping = NULL; 2201 2202 folio_unlock(new_folio); 2203 folio_put(new_folio); 2204 out: 2205 VM_BUG_ON(!list_empty(&pagelist)); 2206 trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result); 2207 return result; 2208 } 2209 2210 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, 2211 struct file *file, pgoff_t start, 2212 struct collapse_control *cc) 2213 { 2214 struct page *page = NULL; 2215 struct address_space *mapping = file->f_mapping; 2216 XA_STATE(xas, &mapping->i_pages, start); 2217 int present, swap; 2218 int node = NUMA_NO_NODE; 2219 int result = SCAN_SUCCEED; 2220 2221 present = 0; 2222 swap = 0; 2223 memset(cc->node_load, 0, sizeof(cc->node_load)); 2224 nodes_clear(cc->alloc_nmask); 2225 rcu_read_lock(); 2226 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { 2227 if (xas_retry(&xas, page)) 2228 continue; 2229 2230 if (xa_is_value(page)) { 2231 ++swap; 2232 if (cc->is_khugepaged && 2233 swap > khugepaged_max_ptes_swap) { 2234 result = SCAN_EXCEED_SWAP_PTE; 2235 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 2236 break; 2237 } 2238 continue; 2239 } 2240 2241 /* 2242 * TODO: khugepaged should compact smaller compound pages 2243 * into a PMD sized page 2244 */ 2245 if (PageTransCompound(page)) { 2246 struct page *head = compound_head(page); 2247 2248 result = compound_order(head) == HPAGE_PMD_ORDER && 2249 head->index == start 2250 /* Maybe PMD-mapped */ 2251 ? SCAN_PTE_MAPPED_HUGEPAGE 2252 : SCAN_PAGE_COMPOUND; 2253 /* 2254 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing 2255 * by the caller won't touch the page cache, and so 2256 * it's safe to skip LRU and refcount checks before 2257 * returning. 2258 */ 2259 break; 2260 } 2261 2262 node = page_to_nid(page); 2263 if (hpage_collapse_scan_abort(node, cc)) { 2264 result = SCAN_SCAN_ABORT; 2265 break; 2266 } 2267 cc->node_load[node]++; 2268 2269 if (!PageLRU(page)) { 2270 result = SCAN_PAGE_LRU; 2271 break; 2272 } 2273 2274 if (page_count(page) != 2275 1 + page_mapcount(page) + page_has_private(page)) { 2276 result = SCAN_PAGE_COUNT; 2277 break; 2278 } 2279 2280 /* 2281 * We probably should check if the page is referenced here, but 2282 * nobody would transfer pte_young() to PageReferenced() for us. 2283 * And rmap walk here is just too costly... 2284 */ 2285 2286 present++; 2287 2288 if (need_resched()) { 2289 xas_pause(&xas); 2290 cond_resched_rcu(); 2291 } 2292 } 2293 rcu_read_unlock(); 2294 2295 if (result == SCAN_SUCCEED) { 2296 if (cc->is_khugepaged && 2297 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { 2298 result = SCAN_EXCEED_NONE_PTE; 2299 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 2300 } else { 2301 result = collapse_file(mm, addr, file, start, cc); 2302 } 2303 } 2304 2305 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result); 2306 return result; 2307 } 2308 #else 2309 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, 2310 struct file *file, pgoff_t start, 2311 struct collapse_control *cc) 2312 { 2313 BUILD_BUG(); 2314 } 2315 #endif 2316 2317 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, 2318 struct collapse_control *cc) 2319 __releases(&khugepaged_mm_lock) 2320 __acquires(&khugepaged_mm_lock) 2321 { 2322 struct vma_iterator vmi; 2323 struct khugepaged_mm_slot *mm_slot; 2324 struct mm_slot *slot; 2325 struct mm_struct *mm; 2326 struct vm_area_struct *vma; 2327 int progress = 0; 2328 2329 VM_BUG_ON(!pages); 2330 lockdep_assert_held(&khugepaged_mm_lock); 2331 *result = SCAN_FAIL; 2332 2333 if (khugepaged_scan.mm_slot) { 2334 mm_slot = khugepaged_scan.mm_slot; 2335 slot = &mm_slot->slot; 2336 } else { 2337 slot = list_entry(khugepaged_scan.mm_head.next, 2338 struct mm_slot, mm_node); 2339 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 2340 khugepaged_scan.address = 0; 2341 khugepaged_scan.mm_slot = mm_slot; 2342 } 2343 spin_unlock(&khugepaged_mm_lock); 2344 2345 mm = slot->mm; 2346 /* 2347 * Don't wait for semaphore (to avoid long wait times). Just move to 2348 * the next mm on the list. 2349 */ 2350 vma = NULL; 2351 if (unlikely(!mmap_read_trylock(mm))) 2352 goto breakouterloop_mmap_lock; 2353 2354 progress++; 2355 if (unlikely(hpage_collapse_test_exit(mm))) 2356 goto breakouterloop; 2357 2358 vma_iter_init(&vmi, mm, khugepaged_scan.address); 2359 for_each_vma(vmi, vma) { 2360 unsigned long hstart, hend; 2361 2362 cond_resched(); 2363 if (unlikely(hpage_collapse_test_exit(mm))) { 2364 progress++; 2365 break; 2366 } 2367 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) { 2368 skip: 2369 progress++; 2370 continue; 2371 } 2372 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE); 2373 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE); 2374 if (khugepaged_scan.address > hend) 2375 goto skip; 2376 if (khugepaged_scan.address < hstart) 2377 khugepaged_scan.address = hstart; 2378 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2379 2380 while (khugepaged_scan.address < hend) { 2381 bool mmap_locked = true; 2382 2383 cond_resched(); 2384 if (unlikely(hpage_collapse_test_exit(mm))) 2385 goto breakouterloop; 2386 2387 VM_BUG_ON(khugepaged_scan.address < hstart || 2388 khugepaged_scan.address + HPAGE_PMD_SIZE > 2389 hend); 2390 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 2391 struct file *file = get_file(vma->vm_file); 2392 pgoff_t pgoff = linear_page_index(vma, 2393 khugepaged_scan.address); 2394 2395 mmap_read_unlock(mm); 2396 mmap_locked = false; 2397 *result = hpage_collapse_scan_file(mm, 2398 khugepaged_scan.address, file, pgoff, cc); 2399 fput(file); 2400 if (*result == SCAN_PTE_MAPPED_HUGEPAGE) { 2401 mmap_read_lock(mm); 2402 if (hpage_collapse_test_exit(mm)) 2403 goto breakouterloop; 2404 *result = collapse_pte_mapped_thp(mm, 2405 khugepaged_scan.address, false); 2406 if (*result == SCAN_PMD_MAPPED) 2407 *result = SCAN_SUCCEED; 2408 mmap_read_unlock(mm); 2409 } 2410 } else { 2411 *result = hpage_collapse_scan_pmd(mm, vma, 2412 khugepaged_scan.address, &mmap_locked, cc); 2413 } 2414 2415 if (*result == SCAN_SUCCEED) 2416 ++khugepaged_pages_collapsed; 2417 2418 /* move to next address */ 2419 khugepaged_scan.address += HPAGE_PMD_SIZE; 2420 progress += HPAGE_PMD_NR; 2421 if (!mmap_locked) 2422 /* 2423 * We released mmap_lock so break loop. Note 2424 * that we drop mmap_lock before all hugepage 2425 * allocations, so if allocation fails, we are 2426 * guaranteed to break here and report the 2427 * correct result back to caller. 2428 */ 2429 goto breakouterloop_mmap_lock; 2430 if (progress >= pages) 2431 goto breakouterloop; 2432 } 2433 } 2434 breakouterloop: 2435 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ 2436 breakouterloop_mmap_lock: 2437 2438 spin_lock(&khugepaged_mm_lock); 2439 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2440 /* 2441 * Release the current mm_slot if this mm is about to die, or 2442 * if we scanned all vmas of this mm. 2443 */ 2444 if (hpage_collapse_test_exit(mm) || !vma) { 2445 /* 2446 * Make sure that if mm_users is reaching zero while 2447 * khugepaged runs here, khugepaged_exit will find 2448 * mm_slot not pointing to the exiting mm. 2449 */ 2450 if (slot->mm_node.next != &khugepaged_scan.mm_head) { 2451 slot = list_entry(slot->mm_node.next, 2452 struct mm_slot, mm_node); 2453 khugepaged_scan.mm_slot = 2454 mm_slot_entry(slot, struct khugepaged_mm_slot, slot); 2455 khugepaged_scan.address = 0; 2456 } else { 2457 khugepaged_scan.mm_slot = NULL; 2458 khugepaged_full_scans++; 2459 } 2460 2461 collect_mm_slot(mm_slot); 2462 } 2463 2464 return progress; 2465 } 2466 2467 static int khugepaged_has_work(void) 2468 { 2469 return !list_empty(&khugepaged_scan.mm_head) && 2470 hugepage_flags_enabled(); 2471 } 2472 2473 static int khugepaged_wait_event(void) 2474 { 2475 return !list_empty(&khugepaged_scan.mm_head) || 2476 kthread_should_stop(); 2477 } 2478 2479 static void khugepaged_do_scan(struct collapse_control *cc) 2480 { 2481 unsigned int progress = 0, pass_through_head = 0; 2482 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan); 2483 bool wait = true; 2484 int result = SCAN_SUCCEED; 2485 2486 lru_add_drain_all(); 2487 2488 while (true) { 2489 cond_resched(); 2490 2491 if (unlikely(kthread_should_stop() || try_to_freeze())) 2492 break; 2493 2494 spin_lock(&khugepaged_mm_lock); 2495 if (!khugepaged_scan.mm_slot) 2496 pass_through_head++; 2497 if (khugepaged_has_work() && 2498 pass_through_head < 2) 2499 progress += khugepaged_scan_mm_slot(pages - progress, 2500 &result, cc); 2501 else 2502 progress = pages; 2503 spin_unlock(&khugepaged_mm_lock); 2504 2505 if (progress >= pages) 2506 break; 2507 2508 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) { 2509 /* 2510 * If fail to allocate the first time, try to sleep for 2511 * a while. When hit again, cancel the scan. 2512 */ 2513 if (!wait) 2514 break; 2515 wait = false; 2516 khugepaged_alloc_sleep(); 2517 } 2518 } 2519 } 2520 2521 static bool khugepaged_should_wakeup(void) 2522 { 2523 return kthread_should_stop() || 2524 time_after_eq(jiffies, khugepaged_sleep_expire); 2525 } 2526 2527 static void khugepaged_wait_work(void) 2528 { 2529 if (khugepaged_has_work()) { 2530 const unsigned long scan_sleep_jiffies = 2531 msecs_to_jiffies(khugepaged_scan_sleep_millisecs); 2532 2533 if (!scan_sleep_jiffies) 2534 return; 2535 2536 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; 2537 wait_event_freezable_timeout(khugepaged_wait, 2538 khugepaged_should_wakeup(), 2539 scan_sleep_jiffies); 2540 return; 2541 } 2542 2543 if (hugepage_flags_enabled()) 2544 wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 2545 } 2546 2547 static int khugepaged(void *none) 2548 { 2549 struct khugepaged_mm_slot *mm_slot; 2550 2551 set_freezable(); 2552 set_user_nice(current, MAX_NICE); 2553 2554 while (!kthread_should_stop()) { 2555 khugepaged_do_scan(&khugepaged_collapse_control); 2556 khugepaged_wait_work(); 2557 } 2558 2559 spin_lock(&khugepaged_mm_lock); 2560 mm_slot = khugepaged_scan.mm_slot; 2561 khugepaged_scan.mm_slot = NULL; 2562 if (mm_slot) 2563 collect_mm_slot(mm_slot); 2564 spin_unlock(&khugepaged_mm_lock); 2565 return 0; 2566 } 2567 2568 static void set_recommended_min_free_kbytes(void) 2569 { 2570 struct zone *zone; 2571 int nr_zones = 0; 2572 unsigned long recommended_min; 2573 2574 if (!hugepage_flags_enabled()) { 2575 calculate_min_free_kbytes(); 2576 goto update_wmarks; 2577 } 2578 2579 for_each_populated_zone(zone) { 2580 /* 2581 * We don't need to worry about fragmentation of 2582 * ZONE_MOVABLE since it only has movable pages. 2583 */ 2584 if (zone_idx(zone) > gfp_zone(GFP_USER)) 2585 continue; 2586 2587 nr_zones++; 2588 } 2589 2590 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ 2591 recommended_min = pageblock_nr_pages * nr_zones * 2; 2592 2593 /* 2594 * Make sure that on average at least two pageblocks are almost free 2595 * of another type, one for a migratetype to fall back to and a 2596 * second to avoid subsequent fallbacks of other types There are 3 2597 * MIGRATE_TYPES we care about. 2598 */ 2599 recommended_min += pageblock_nr_pages * nr_zones * 2600 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 2601 2602 /* don't ever allow to reserve more than 5% of the lowmem */ 2603 recommended_min = min(recommended_min, 2604 (unsigned long) nr_free_buffer_pages() / 20); 2605 recommended_min <<= (PAGE_SHIFT-10); 2606 2607 if (recommended_min > min_free_kbytes) { 2608 if (user_min_free_kbytes >= 0) 2609 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", 2610 min_free_kbytes, recommended_min); 2611 2612 min_free_kbytes = recommended_min; 2613 } 2614 2615 update_wmarks: 2616 setup_per_zone_wmarks(); 2617 } 2618 2619 int start_stop_khugepaged(void) 2620 { 2621 int err = 0; 2622 2623 mutex_lock(&khugepaged_mutex); 2624 if (hugepage_flags_enabled()) { 2625 if (!khugepaged_thread) 2626 khugepaged_thread = kthread_run(khugepaged, NULL, 2627 "khugepaged"); 2628 if (IS_ERR(khugepaged_thread)) { 2629 pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 2630 err = PTR_ERR(khugepaged_thread); 2631 khugepaged_thread = NULL; 2632 goto fail; 2633 } 2634 2635 if (!list_empty(&khugepaged_scan.mm_head)) 2636 wake_up_interruptible(&khugepaged_wait); 2637 } else if (khugepaged_thread) { 2638 kthread_stop(khugepaged_thread); 2639 khugepaged_thread = NULL; 2640 } 2641 set_recommended_min_free_kbytes(); 2642 fail: 2643 mutex_unlock(&khugepaged_mutex); 2644 return err; 2645 } 2646 2647 void khugepaged_min_free_kbytes_update(void) 2648 { 2649 mutex_lock(&khugepaged_mutex); 2650 if (hugepage_flags_enabled() && khugepaged_thread) 2651 set_recommended_min_free_kbytes(); 2652 mutex_unlock(&khugepaged_mutex); 2653 } 2654 2655 bool current_is_khugepaged(void) 2656 { 2657 return kthread_func(current) == khugepaged; 2658 } 2659 2660 static int madvise_collapse_errno(enum scan_result r) 2661 { 2662 /* 2663 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide 2664 * actionable feedback to caller, so they may take an appropriate 2665 * fallback measure depending on the nature of the failure. 2666 */ 2667 switch (r) { 2668 case SCAN_ALLOC_HUGE_PAGE_FAIL: 2669 return -ENOMEM; 2670 case SCAN_CGROUP_CHARGE_FAIL: 2671 case SCAN_EXCEED_NONE_PTE: 2672 return -EBUSY; 2673 /* Resource temporary unavailable - trying again might succeed */ 2674 case SCAN_PAGE_COUNT: 2675 case SCAN_PAGE_LOCK: 2676 case SCAN_PAGE_LRU: 2677 case SCAN_DEL_PAGE_LRU: 2678 case SCAN_PAGE_FILLED: 2679 return -EAGAIN; 2680 /* 2681 * Other: Trying again likely not to succeed / error intrinsic to 2682 * specified memory range. khugepaged likely won't be able to collapse 2683 * either. 2684 */ 2685 default: 2686 return -EINVAL; 2687 } 2688 } 2689 2690 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, 2691 unsigned long start, unsigned long end) 2692 { 2693 struct collapse_control *cc; 2694 struct mm_struct *mm = vma->vm_mm; 2695 unsigned long hstart, hend, addr; 2696 int thps = 0, last_fail = SCAN_FAIL; 2697 bool mmap_locked = true; 2698 2699 BUG_ON(vma->vm_start > start); 2700 BUG_ON(vma->vm_end < end); 2701 2702 *prev = vma; 2703 2704 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) 2705 return -EINVAL; 2706 2707 cc = kmalloc(sizeof(*cc), GFP_KERNEL); 2708 if (!cc) 2709 return -ENOMEM; 2710 cc->is_khugepaged = false; 2711 2712 mmgrab(mm); 2713 lru_add_drain_all(); 2714 2715 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2716 hend = end & HPAGE_PMD_MASK; 2717 2718 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) { 2719 int result = SCAN_FAIL; 2720 2721 if (!mmap_locked) { 2722 cond_resched(); 2723 mmap_read_lock(mm); 2724 mmap_locked = true; 2725 result = hugepage_vma_revalidate(mm, addr, false, &vma, 2726 cc); 2727 if (result != SCAN_SUCCEED) { 2728 last_fail = result; 2729 goto out_nolock; 2730 } 2731 2732 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK); 2733 } 2734 mmap_assert_locked(mm); 2735 memset(cc->node_load, 0, sizeof(cc->node_load)); 2736 nodes_clear(cc->alloc_nmask); 2737 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 2738 struct file *file = get_file(vma->vm_file); 2739 pgoff_t pgoff = linear_page_index(vma, addr); 2740 2741 mmap_read_unlock(mm); 2742 mmap_locked = false; 2743 result = hpage_collapse_scan_file(mm, addr, file, pgoff, 2744 cc); 2745 fput(file); 2746 } else { 2747 result = hpage_collapse_scan_pmd(mm, vma, addr, 2748 &mmap_locked, cc); 2749 } 2750 if (!mmap_locked) 2751 *prev = NULL; /* Tell caller we dropped mmap_lock */ 2752 2753 handle_result: 2754 switch (result) { 2755 case SCAN_SUCCEED: 2756 case SCAN_PMD_MAPPED: 2757 ++thps; 2758 break; 2759 case SCAN_PTE_MAPPED_HUGEPAGE: 2760 BUG_ON(mmap_locked); 2761 BUG_ON(*prev); 2762 mmap_read_lock(mm); 2763 result = collapse_pte_mapped_thp(mm, addr, true); 2764 mmap_read_unlock(mm); 2765 goto handle_result; 2766 /* Whitelisted set of results where continuing OK */ 2767 case SCAN_PMD_NULL: 2768 case SCAN_PTE_NON_PRESENT: 2769 case SCAN_PTE_UFFD_WP: 2770 case SCAN_PAGE_RO: 2771 case SCAN_LACK_REFERENCED_PAGE: 2772 case SCAN_PAGE_NULL: 2773 case SCAN_PAGE_COUNT: 2774 case SCAN_PAGE_LOCK: 2775 case SCAN_PAGE_COMPOUND: 2776 case SCAN_PAGE_LRU: 2777 case SCAN_DEL_PAGE_LRU: 2778 last_fail = result; 2779 break; 2780 default: 2781 last_fail = result; 2782 /* Other error, exit */ 2783 goto out_maybelock; 2784 } 2785 } 2786 2787 out_maybelock: 2788 /* Caller expects us to hold mmap_lock on return */ 2789 if (!mmap_locked) 2790 mmap_read_lock(mm); 2791 out_nolock: 2792 mmap_assert_locked(mm); 2793 mmdrop(mm); 2794 kfree(cc); 2795 2796 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0 2797 : madvise_collapse_errno(last_fail); 2798 } 2799