1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/mm.h> 5 #include <linux/sched.h> 6 #include <linux/sched/mm.h> 7 #include <linux/sched/coredump.h> 8 #include <linux/mmu_notifier.h> 9 #include <linux/rmap.h> 10 #include <linux/swap.h> 11 #include <linux/mm_inline.h> 12 #include <linux/kthread.h> 13 #include <linux/khugepaged.h> 14 #include <linux/freezer.h> 15 #include <linux/mman.h> 16 #include <linux/hashtable.h> 17 #include <linux/userfaultfd_k.h> 18 #include <linux/page_idle.h> 19 #include <linux/page_table_check.h> 20 #include <linux/swapops.h> 21 #include <linux/shmem_fs.h> 22 23 #include <asm/tlb.h> 24 #include <asm/pgalloc.h> 25 #include "internal.h" 26 27 enum scan_result { 28 SCAN_FAIL, 29 SCAN_SUCCEED, 30 SCAN_PMD_NULL, 31 SCAN_EXCEED_NONE_PTE, 32 SCAN_EXCEED_SWAP_PTE, 33 SCAN_EXCEED_SHARED_PTE, 34 SCAN_PTE_NON_PRESENT, 35 SCAN_PTE_UFFD_WP, 36 SCAN_PAGE_RO, 37 SCAN_LACK_REFERENCED_PAGE, 38 SCAN_PAGE_NULL, 39 SCAN_SCAN_ABORT, 40 SCAN_PAGE_COUNT, 41 SCAN_PAGE_LRU, 42 SCAN_PAGE_LOCK, 43 SCAN_PAGE_ANON, 44 SCAN_PAGE_COMPOUND, 45 SCAN_ANY_PROCESS, 46 SCAN_VMA_NULL, 47 SCAN_VMA_CHECK, 48 SCAN_ADDRESS_RANGE, 49 SCAN_DEL_PAGE_LRU, 50 SCAN_ALLOC_HUGE_PAGE_FAIL, 51 SCAN_CGROUP_CHARGE_FAIL, 52 SCAN_TRUNCATED, 53 SCAN_PAGE_HAS_PRIVATE, 54 }; 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/huge_memory.h> 58 59 static struct task_struct *khugepaged_thread __read_mostly; 60 static DEFINE_MUTEX(khugepaged_mutex); 61 62 /* default scan 8*512 pte (or vmas) every 30 second */ 63 static unsigned int khugepaged_pages_to_scan __read_mostly; 64 static unsigned int khugepaged_pages_collapsed; 65 static unsigned int khugepaged_full_scans; 66 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 67 /* during fragmentation poll the hugepage allocator once every minute */ 68 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 69 static unsigned long khugepaged_sleep_expire; 70 static DEFINE_SPINLOCK(khugepaged_mm_lock); 71 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 72 /* 73 * default collapse hugepages if there is at least one pte mapped like 74 * it would have happened if the vma was large enough during page 75 * fault. 76 */ 77 static unsigned int khugepaged_max_ptes_none __read_mostly; 78 static unsigned int khugepaged_max_ptes_swap __read_mostly; 79 static unsigned int khugepaged_max_ptes_shared __read_mostly; 80 81 #define MM_SLOTS_HASH_BITS 10 82 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 83 84 static struct kmem_cache *mm_slot_cache __read_mostly; 85 86 #define MAX_PTE_MAPPED_THP 8 87 88 /** 89 * struct mm_slot - hash lookup from mm to mm_slot 90 * @hash: hash collision list 91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 92 * @mm: the mm that this information is valid for 93 * @nr_pte_mapped_thp: number of pte mapped THP 94 * @pte_mapped_thp: address array corresponding pte mapped THP 95 */ 96 struct mm_slot { 97 struct hlist_node hash; 98 struct list_head mm_node; 99 struct mm_struct *mm; 100 101 /* pte-mapped THP in this mm */ 102 int nr_pte_mapped_thp; 103 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP]; 104 }; 105 106 /** 107 * struct khugepaged_scan - cursor for scanning 108 * @mm_head: the head of the mm list to scan 109 * @mm_slot: the current mm_slot we are scanning 110 * @address: the next address inside that to be scanned 111 * 112 * There is only the one khugepaged_scan instance of this cursor structure. 113 */ 114 struct khugepaged_scan { 115 struct list_head mm_head; 116 struct mm_slot *mm_slot; 117 unsigned long address; 118 }; 119 120 static struct khugepaged_scan khugepaged_scan = { 121 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 122 }; 123 124 #ifdef CONFIG_SYSFS 125 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 126 struct kobj_attribute *attr, 127 char *buf) 128 { 129 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs); 130 } 131 132 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 133 struct kobj_attribute *attr, 134 const char *buf, size_t count) 135 { 136 unsigned int msecs; 137 int err; 138 139 err = kstrtouint(buf, 10, &msecs); 140 if (err) 141 return -EINVAL; 142 143 khugepaged_scan_sleep_millisecs = msecs; 144 khugepaged_sleep_expire = 0; 145 wake_up_interruptible(&khugepaged_wait); 146 147 return count; 148 } 149 static struct kobj_attribute scan_sleep_millisecs_attr = 150 __ATTR_RW(scan_sleep_millisecs); 151 152 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 153 struct kobj_attribute *attr, 154 char *buf) 155 { 156 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 157 } 158 159 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 160 struct kobj_attribute *attr, 161 const char *buf, size_t count) 162 { 163 unsigned int msecs; 164 int err; 165 166 err = kstrtouint(buf, 10, &msecs); 167 if (err) 168 return -EINVAL; 169 170 khugepaged_alloc_sleep_millisecs = msecs; 171 khugepaged_sleep_expire = 0; 172 wake_up_interruptible(&khugepaged_wait); 173 174 return count; 175 } 176 static struct kobj_attribute alloc_sleep_millisecs_attr = 177 __ATTR_RW(alloc_sleep_millisecs); 178 179 static ssize_t pages_to_scan_show(struct kobject *kobj, 180 struct kobj_attribute *attr, 181 char *buf) 182 { 183 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan); 184 } 185 static ssize_t pages_to_scan_store(struct kobject *kobj, 186 struct kobj_attribute *attr, 187 const char *buf, size_t count) 188 { 189 unsigned int pages; 190 int err; 191 192 err = kstrtouint(buf, 10, &pages); 193 if (err || !pages) 194 return -EINVAL; 195 196 khugepaged_pages_to_scan = pages; 197 198 return count; 199 } 200 static struct kobj_attribute pages_to_scan_attr = 201 __ATTR_RW(pages_to_scan); 202 203 static ssize_t pages_collapsed_show(struct kobject *kobj, 204 struct kobj_attribute *attr, 205 char *buf) 206 { 207 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed); 208 } 209 static struct kobj_attribute pages_collapsed_attr = 210 __ATTR_RO(pages_collapsed); 211 212 static ssize_t full_scans_show(struct kobject *kobj, 213 struct kobj_attribute *attr, 214 char *buf) 215 { 216 return sysfs_emit(buf, "%u\n", khugepaged_full_scans); 217 } 218 static struct kobj_attribute full_scans_attr = 219 __ATTR_RO(full_scans); 220 221 static ssize_t defrag_show(struct kobject *kobj, 222 struct kobj_attribute *attr, char *buf) 223 { 224 return single_hugepage_flag_show(kobj, attr, buf, 225 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 226 } 227 static ssize_t defrag_store(struct kobject *kobj, 228 struct kobj_attribute *attr, 229 const char *buf, size_t count) 230 { 231 return single_hugepage_flag_store(kobj, attr, buf, count, 232 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 233 } 234 static struct kobj_attribute khugepaged_defrag_attr = 235 __ATTR_RW(defrag); 236 237 /* 238 * max_ptes_none controls if khugepaged should collapse hugepages over 239 * any unmapped ptes in turn potentially increasing the memory 240 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 241 * reduce the available free memory in the system as it 242 * runs. Increasing max_ptes_none will instead potentially reduce the 243 * free memory in the system during the khugepaged scan. 244 */ 245 static ssize_t max_ptes_none_show(struct kobject *kobj, 246 struct kobj_attribute *attr, 247 char *buf) 248 { 249 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none); 250 } 251 static ssize_t max_ptes_none_store(struct kobject *kobj, 252 struct kobj_attribute *attr, 253 const char *buf, size_t count) 254 { 255 int err; 256 unsigned long max_ptes_none; 257 258 err = kstrtoul(buf, 10, &max_ptes_none); 259 if (err || max_ptes_none > HPAGE_PMD_NR - 1) 260 return -EINVAL; 261 262 khugepaged_max_ptes_none = max_ptes_none; 263 264 return count; 265 } 266 static struct kobj_attribute khugepaged_max_ptes_none_attr = 267 __ATTR_RW(max_ptes_none); 268 269 static ssize_t max_ptes_swap_show(struct kobject *kobj, 270 struct kobj_attribute *attr, 271 char *buf) 272 { 273 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap); 274 } 275 276 static ssize_t max_ptes_swap_store(struct kobject *kobj, 277 struct kobj_attribute *attr, 278 const char *buf, size_t count) 279 { 280 int err; 281 unsigned long max_ptes_swap; 282 283 err = kstrtoul(buf, 10, &max_ptes_swap); 284 if (err || max_ptes_swap > HPAGE_PMD_NR - 1) 285 return -EINVAL; 286 287 khugepaged_max_ptes_swap = max_ptes_swap; 288 289 return count; 290 } 291 292 static struct kobj_attribute khugepaged_max_ptes_swap_attr = 293 __ATTR_RW(max_ptes_swap); 294 295 static ssize_t max_ptes_shared_show(struct kobject *kobj, 296 struct kobj_attribute *attr, 297 char *buf) 298 { 299 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared); 300 } 301 302 static ssize_t max_ptes_shared_store(struct kobject *kobj, 303 struct kobj_attribute *attr, 304 const char *buf, size_t count) 305 { 306 int err; 307 unsigned long max_ptes_shared; 308 309 err = kstrtoul(buf, 10, &max_ptes_shared); 310 if (err || max_ptes_shared > HPAGE_PMD_NR - 1) 311 return -EINVAL; 312 313 khugepaged_max_ptes_shared = max_ptes_shared; 314 315 return count; 316 } 317 318 static struct kobj_attribute khugepaged_max_ptes_shared_attr = 319 __ATTR_RW(max_ptes_shared); 320 321 static struct attribute *khugepaged_attr[] = { 322 &khugepaged_defrag_attr.attr, 323 &khugepaged_max_ptes_none_attr.attr, 324 &khugepaged_max_ptes_swap_attr.attr, 325 &khugepaged_max_ptes_shared_attr.attr, 326 &pages_to_scan_attr.attr, 327 &pages_collapsed_attr.attr, 328 &full_scans_attr.attr, 329 &scan_sleep_millisecs_attr.attr, 330 &alloc_sleep_millisecs_attr.attr, 331 NULL, 332 }; 333 334 struct attribute_group khugepaged_attr_group = { 335 .attrs = khugepaged_attr, 336 .name = "khugepaged", 337 }; 338 #endif /* CONFIG_SYSFS */ 339 340 int hugepage_madvise(struct vm_area_struct *vma, 341 unsigned long *vm_flags, int advice) 342 { 343 switch (advice) { 344 case MADV_HUGEPAGE: 345 #ifdef CONFIG_S390 346 /* 347 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 348 * can't handle this properly after s390_enable_sie, so we simply 349 * ignore the madvise to prevent qemu from causing a SIGSEGV. 350 */ 351 if (mm_has_pgste(vma->vm_mm)) 352 return 0; 353 #endif 354 *vm_flags &= ~VM_NOHUGEPAGE; 355 *vm_flags |= VM_HUGEPAGE; 356 /* 357 * If the vma become good for khugepaged to scan, 358 * register it here without waiting a page fault that 359 * may not happen any time soon. 360 */ 361 khugepaged_enter_vma(vma, *vm_flags); 362 break; 363 case MADV_NOHUGEPAGE: 364 *vm_flags &= ~VM_HUGEPAGE; 365 *vm_flags |= VM_NOHUGEPAGE; 366 /* 367 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 368 * this vma even if we leave the mm registered in khugepaged if 369 * it got registered before VM_NOHUGEPAGE was set. 370 */ 371 break; 372 } 373 374 return 0; 375 } 376 377 int __init khugepaged_init(void) 378 { 379 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 380 sizeof(struct mm_slot), 381 __alignof__(struct mm_slot), 0, NULL); 382 if (!mm_slot_cache) 383 return -ENOMEM; 384 385 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; 386 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; 387 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; 388 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; 389 390 return 0; 391 } 392 393 void __init khugepaged_destroy(void) 394 { 395 kmem_cache_destroy(mm_slot_cache); 396 } 397 398 static inline struct mm_slot *alloc_mm_slot(void) 399 { 400 if (!mm_slot_cache) /* initialization failed */ 401 return NULL; 402 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 403 } 404 405 static inline void free_mm_slot(struct mm_slot *mm_slot) 406 { 407 kmem_cache_free(mm_slot_cache, mm_slot); 408 } 409 410 static struct mm_slot *get_mm_slot(struct mm_struct *mm) 411 { 412 struct mm_slot *mm_slot; 413 414 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 415 if (mm == mm_slot->mm) 416 return mm_slot; 417 418 return NULL; 419 } 420 421 static void insert_to_mm_slots_hash(struct mm_struct *mm, 422 struct mm_slot *mm_slot) 423 { 424 mm_slot->mm = mm; 425 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 426 } 427 428 static inline int khugepaged_test_exit(struct mm_struct *mm) 429 { 430 return atomic_read(&mm->mm_users) == 0; 431 } 432 433 void __khugepaged_enter(struct mm_struct *mm) 434 { 435 struct mm_slot *mm_slot; 436 int wakeup; 437 438 mm_slot = alloc_mm_slot(); 439 if (!mm_slot) 440 return; 441 442 /* __khugepaged_exit() must not run from under us */ 443 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); 444 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 445 free_mm_slot(mm_slot); 446 return; 447 } 448 449 spin_lock(&khugepaged_mm_lock); 450 insert_to_mm_slots_hash(mm, mm_slot); 451 /* 452 * Insert just behind the scanning cursor, to let the area settle 453 * down a little. 454 */ 455 wakeup = list_empty(&khugepaged_scan.mm_head); 456 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 457 spin_unlock(&khugepaged_mm_lock); 458 459 mmgrab(mm); 460 if (wakeup) 461 wake_up_interruptible(&khugepaged_wait); 462 } 463 464 void khugepaged_enter_vma(struct vm_area_struct *vma, 465 unsigned long vm_flags) 466 { 467 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && 468 hugepage_flags_enabled()) { 469 if (hugepage_vma_check(vma, vm_flags, false, false)) 470 __khugepaged_enter(vma->vm_mm); 471 } 472 } 473 474 void __khugepaged_exit(struct mm_struct *mm) 475 { 476 struct mm_slot *mm_slot; 477 int free = 0; 478 479 spin_lock(&khugepaged_mm_lock); 480 mm_slot = get_mm_slot(mm); 481 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 482 hash_del(&mm_slot->hash); 483 list_del(&mm_slot->mm_node); 484 free = 1; 485 } 486 spin_unlock(&khugepaged_mm_lock); 487 488 if (free) { 489 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 490 free_mm_slot(mm_slot); 491 mmdrop(mm); 492 } else if (mm_slot) { 493 /* 494 * This is required to serialize against 495 * khugepaged_test_exit() (which is guaranteed to run 496 * under mmap sem read mode). Stop here (after we 497 * return all pagetables will be destroyed) until 498 * khugepaged has finished working on the pagetables 499 * under the mmap_lock. 500 */ 501 mmap_write_lock(mm); 502 mmap_write_unlock(mm); 503 } 504 } 505 506 static void release_pte_page(struct page *page) 507 { 508 mod_node_page_state(page_pgdat(page), 509 NR_ISOLATED_ANON + page_is_file_lru(page), 510 -compound_nr(page)); 511 unlock_page(page); 512 putback_lru_page(page); 513 } 514 515 static void release_pte_pages(pte_t *pte, pte_t *_pte, 516 struct list_head *compound_pagelist) 517 { 518 struct page *page, *tmp; 519 520 while (--_pte >= pte) { 521 pte_t pteval = *_pte; 522 523 page = pte_page(pteval); 524 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) && 525 !PageCompound(page)) 526 release_pte_page(page); 527 } 528 529 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) { 530 list_del(&page->lru); 531 release_pte_page(page); 532 } 533 } 534 535 static bool is_refcount_suitable(struct page *page) 536 { 537 int expected_refcount; 538 539 expected_refcount = total_mapcount(page); 540 if (PageSwapCache(page)) 541 expected_refcount += compound_nr(page); 542 543 return page_count(page) == expected_refcount; 544 } 545 546 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 547 unsigned long address, 548 pte_t *pte, 549 struct list_head *compound_pagelist) 550 { 551 struct page *page = NULL; 552 pte_t *_pte; 553 int none_or_zero = 0, shared = 0, result = 0, referenced = 0; 554 bool writable = false; 555 556 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 557 _pte++, address += PAGE_SIZE) { 558 pte_t pteval = *_pte; 559 if (pte_none(pteval) || (pte_present(pteval) && 560 is_zero_pfn(pte_pfn(pteval)))) { 561 if (!userfaultfd_armed(vma) && 562 ++none_or_zero <= khugepaged_max_ptes_none) { 563 continue; 564 } else { 565 result = SCAN_EXCEED_NONE_PTE; 566 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 567 goto out; 568 } 569 } 570 if (!pte_present(pteval)) { 571 result = SCAN_PTE_NON_PRESENT; 572 goto out; 573 } 574 page = vm_normal_page(vma, address, pteval); 575 if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 576 result = SCAN_PAGE_NULL; 577 goto out; 578 } 579 580 VM_BUG_ON_PAGE(!PageAnon(page), page); 581 582 if (page_mapcount(page) > 1 && 583 ++shared > khugepaged_max_ptes_shared) { 584 result = SCAN_EXCEED_SHARED_PTE; 585 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 586 goto out; 587 } 588 589 if (PageCompound(page)) { 590 struct page *p; 591 page = compound_head(page); 592 593 /* 594 * Check if we have dealt with the compound page 595 * already 596 */ 597 list_for_each_entry(p, compound_pagelist, lru) { 598 if (page == p) 599 goto next; 600 } 601 } 602 603 /* 604 * We can do it before isolate_lru_page because the 605 * page can't be freed from under us. NOTE: PG_lock 606 * is needed to serialize against split_huge_page 607 * when invoked from the VM. 608 */ 609 if (!trylock_page(page)) { 610 result = SCAN_PAGE_LOCK; 611 goto out; 612 } 613 614 /* 615 * Check if the page has any GUP (or other external) pins. 616 * 617 * The page table that maps the page has been already unlinked 618 * from the page table tree and this process cannot get 619 * an additional pin on the page. 620 * 621 * New pins can come later if the page is shared across fork, 622 * but not from this process. The other process cannot write to 623 * the page, only trigger CoW. 624 */ 625 if (!is_refcount_suitable(page)) { 626 unlock_page(page); 627 result = SCAN_PAGE_COUNT; 628 goto out; 629 } 630 631 /* 632 * Isolate the page to avoid collapsing an hugepage 633 * currently in use by the VM. 634 */ 635 if (isolate_lru_page(page)) { 636 unlock_page(page); 637 result = SCAN_DEL_PAGE_LRU; 638 goto out; 639 } 640 mod_node_page_state(page_pgdat(page), 641 NR_ISOLATED_ANON + page_is_file_lru(page), 642 compound_nr(page)); 643 VM_BUG_ON_PAGE(!PageLocked(page), page); 644 VM_BUG_ON_PAGE(PageLRU(page), page); 645 646 if (PageCompound(page)) 647 list_add_tail(&page->lru, compound_pagelist); 648 next: 649 /* There should be enough young pte to collapse the page */ 650 if (pte_young(pteval) || 651 page_is_young(page) || PageReferenced(page) || 652 mmu_notifier_test_young(vma->vm_mm, address)) 653 referenced++; 654 655 if (pte_write(pteval)) 656 writable = true; 657 } 658 659 if (unlikely(!writable)) { 660 result = SCAN_PAGE_RO; 661 } else if (unlikely(!referenced)) { 662 result = SCAN_LACK_REFERENCED_PAGE; 663 } else { 664 result = SCAN_SUCCEED; 665 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 666 referenced, writable, result); 667 return 1; 668 } 669 out: 670 release_pte_pages(pte, _pte, compound_pagelist); 671 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 672 referenced, writable, result); 673 return 0; 674 } 675 676 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 677 struct vm_area_struct *vma, 678 unsigned long address, 679 spinlock_t *ptl, 680 struct list_head *compound_pagelist) 681 { 682 struct page *src_page, *tmp; 683 pte_t *_pte; 684 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 685 _pte++, page++, address += PAGE_SIZE) { 686 pte_t pteval = *_pte; 687 688 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 689 clear_user_highpage(page, address); 690 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 691 if (is_zero_pfn(pte_pfn(pteval))) { 692 /* 693 * ptl mostly unnecessary. 694 */ 695 spin_lock(ptl); 696 ptep_clear(vma->vm_mm, address, _pte); 697 spin_unlock(ptl); 698 } 699 } else { 700 src_page = pte_page(pteval); 701 copy_user_highpage(page, src_page, address, vma); 702 if (!PageCompound(src_page)) 703 release_pte_page(src_page); 704 /* 705 * ptl mostly unnecessary, but preempt has to 706 * be disabled to update the per-cpu stats 707 * inside page_remove_rmap(). 708 */ 709 spin_lock(ptl); 710 ptep_clear(vma->vm_mm, address, _pte); 711 page_remove_rmap(src_page, vma, false); 712 spin_unlock(ptl); 713 free_page_and_swap_cache(src_page); 714 } 715 } 716 717 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { 718 list_del(&src_page->lru); 719 mod_node_page_state(page_pgdat(src_page), 720 NR_ISOLATED_ANON + page_is_file_lru(src_page), 721 -compound_nr(src_page)); 722 unlock_page(src_page); 723 free_swap_cache(src_page); 724 putback_lru_page(src_page); 725 } 726 } 727 728 static void khugepaged_alloc_sleep(void) 729 { 730 DEFINE_WAIT(wait); 731 732 add_wait_queue(&khugepaged_wait, &wait); 733 freezable_schedule_timeout_interruptible( 734 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 735 remove_wait_queue(&khugepaged_wait, &wait); 736 } 737 738 static int khugepaged_node_load[MAX_NUMNODES]; 739 740 static bool khugepaged_scan_abort(int nid) 741 { 742 int i; 743 744 /* 745 * If node_reclaim_mode is disabled, then no extra effort is made to 746 * allocate memory locally. 747 */ 748 if (!node_reclaim_enabled()) 749 return false; 750 751 /* If there is a count for this node already, it must be acceptable */ 752 if (khugepaged_node_load[nid]) 753 return false; 754 755 for (i = 0; i < MAX_NUMNODES; i++) { 756 if (!khugepaged_node_load[i]) 757 continue; 758 if (node_distance(nid, i) > node_reclaim_distance) 759 return true; 760 } 761 return false; 762 } 763 764 #define khugepaged_defrag() \ 765 (transparent_hugepage_flags & \ 766 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) 767 768 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ 769 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) 770 { 771 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; 772 } 773 774 #ifdef CONFIG_NUMA 775 static int khugepaged_find_target_node(void) 776 { 777 static int last_khugepaged_target_node = NUMA_NO_NODE; 778 int nid, target_node = 0, max_value = 0; 779 780 /* find first node with max normal pages hit */ 781 for (nid = 0; nid < MAX_NUMNODES; nid++) 782 if (khugepaged_node_load[nid] > max_value) { 783 max_value = khugepaged_node_load[nid]; 784 target_node = nid; 785 } 786 787 /* do some balance if several nodes have the same hit record */ 788 if (target_node <= last_khugepaged_target_node) 789 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; 790 nid++) 791 if (max_value == khugepaged_node_load[nid]) { 792 target_node = nid; 793 break; 794 } 795 796 last_khugepaged_target_node = target_node; 797 return target_node; 798 } 799 800 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 801 { 802 if (IS_ERR(*hpage)) { 803 if (!*wait) 804 return false; 805 806 *wait = false; 807 *hpage = NULL; 808 khugepaged_alloc_sleep(); 809 } else if (*hpage) { 810 put_page(*hpage); 811 *hpage = NULL; 812 } 813 814 return true; 815 } 816 817 static struct page * 818 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) 819 { 820 VM_BUG_ON_PAGE(*hpage, *hpage); 821 822 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); 823 if (unlikely(!*hpage)) { 824 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 825 *hpage = ERR_PTR(-ENOMEM); 826 return NULL; 827 } 828 829 prep_transhuge_page(*hpage); 830 count_vm_event(THP_COLLAPSE_ALLOC); 831 return *hpage; 832 } 833 #else 834 static int khugepaged_find_target_node(void) 835 { 836 return 0; 837 } 838 839 static inline struct page *alloc_khugepaged_hugepage(void) 840 { 841 struct page *page; 842 843 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), 844 HPAGE_PMD_ORDER); 845 if (page) 846 prep_transhuge_page(page); 847 return page; 848 } 849 850 static struct page *khugepaged_alloc_hugepage(bool *wait) 851 { 852 struct page *hpage; 853 854 do { 855 hpage = alloc_khugepaged_hugepage(); 856 if (!hpage) { 857 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 858 if (!*wait) 859 return NULL; 860 861 *wait = false; 862 khugepaged_alloc_sleep(); 863 } else 864 count_vm_event(THP_COLLAPSE_ALLOC); 865 } while (unlikely(!hpage) && likely(hugepage_flags_enabled())); 866 867 return hpage; 868 } 869 870 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 871 { 872 /* 873 * If the hpage allocated earlier was briefly exposed in page cache 874 * before collapse_file() failed, it is possible that racing lookups 875 * have not yet completed, and would then be unpleasantly surprised by 876 * finding the hpage reused for the same mapping at a different offset. 877 * Just release the previous allocation if there is any danger of that. 878 */ 879 if (*hpage && page_count(*hpage) > 1) { 880 put_page(*hpage); 881 *hpage = NULL; 882 } 883 884 if (!*hpage) 885 *hpage = khugepaged_alloc_hugepage(wait); 886 887 if (unlikely(!*hpage)) 888 return false; 889 890 return true; 891 } 892 893 static struct page * 894 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) 895 { 896 VM_BUG_ON(!*hpage); 897 898 return *hpage; 899 } 900 #endif 901 902 /* 903 * If mmap_lock temporarily dropped, revalidate vma 904 * before taking mmap_lock. 905 * Return 0 if succeeds, otherwise return none-zero 906 * value (scan code). 907 */ 908 909 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, 910 struct vm_area_struct **vmap) 911 { 912 struct vm_area_struct *vma; 913 914 if (unlikely(khugepaged_test_exit(mm))) 915 return SCAN_ANY_PROCESS; 916 917 *vmap = vma = find_vma(mm, address); 918 if (!vma) 919 return SCAN_VMA_NULL; 920 921 if (!transhuge_vma_suitable(vma, address)) 922 return SCAN_ADDRESS_RANGE; 923 if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) 924 return SCAN_VMA_CHECK; 925 /* 926 * Anon VMA expected, the address may be unmapped then 927 * remapped to file after khugepaged reaquired the mmap_lock. 928 * 929 * hugepage_vma_check may return true for qualified file 930 * vmas. 931 */ 932 if (!vma->anon_vma || !vma_is_anonymous(vma)) 933 return SCAN_VMA_CHECK; 934 return 0; 935 } 936 937 /* 938 * Bring missing pages in from swap, to complete THP collapse. 939 * Only done if khugepaged_scan_pmd believes it is worthwhile. 940 * 941 * Called and returns without pte mapped or spinlocks held. 942 * Note that if false is returned, mmap_lock will be released. 943 */ 944 945 static bool __collapse_huge_page_swapin(struct mm_struct *mm, 946 struct vm_area_struct *vma, 947 unsigned long haddr, pmd_t *pmd, 948 int referenced) 949 { 950 int swapped_in = 0; 951 vm_fault_t ret = 0; 952 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); 953 954 for (address = haddr; address < end; address += PAGE_SIZE) { 955 struct vm_fault vmf = { 956 .vma = vma, 957 .address = address, 958 .pgoff = linear_page_index(vma, haddr), 959 .flags = FAULT_FLAG_ALLOW_RETRY, 960 .pmd = pmd, 961 }; 962 963 vmf.pte = pte_offset_map(pmd, address); 964 vmf.orig_pte = *vmf.pte; 965 if (!is_swap_pte(vmf.orig_pte)) { 966 pte_unmap(vmf.pte); 967 continue; 968 } 969 ret = do_swap_page(&vmf); 970 971 /* 972 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock. 973 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because 974 * we do not retry here and swap entry will remain in pagetable 975 * resulting in later failure. 976 */ 977 if (ret & VM_FAULT_RETRY) { 978 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 979 return false; 980 } 981 if (ret & VM_FAULT_ERROR) { 982 mmap_read_unlock(mm); 983 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 984 return false; 985 } 986 swapped_in++; 987 } 988 989 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */ 990 if (swapped_in) 991 lru_add_drain(); 992 993 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); 994 return true; 995 } 996 997 static void collapse_huge_page(struct mm_struct *mm, 998 unsigned long address, 999 struct page **hpage, 1000 int node, int referenced, int unmapped) 1001 { 1002 LIST_HEAD(compound_pagelist); 1003 pmd_t *pmd, _pmd; 1004 pte_t *pte; 1005 pgtable_t pgtable; 1006 struct page *new_page; 1007 spinlock_t *pmd_ptl, *pte_ptl; 1008 int isolated = 0, result = 0; 1009 struct vm_area_struct *vma; 1010 struct mmu_notifier_range range; 1011 gfp_t gfp; 1012 1013 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1014 1015 /* Only allocate from the target node */ 1016 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; 1017 1018 /* 1019 * Before allocating the hugepage, release the mmap_lock read lock. 1020 * The allocation can take potentially a long time if it involves 1021 * sync compaction, and we do not need to hold the mmap_lock during 1022 * that. We will recheck the vma after taking it again in write mode. 1023 */ 1024 mmap_read_unlock(mm); 1025 new_page = khugepaged_alloc_page(hpage, gfp, node); 1026 if (!new_page) { 1027 result = SCAN_ALLOC_HUGE_PAGE_FAIL; 1028 goto out_nolock; 1029 } 1030 1031 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { 1032 result = SCAN_CGROUP_CHARGE_FAIL; 1033 goto out_nolock; 1034 } 1035 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); 1036 1037 mmap_read_lock(mm); 1038 result = hugepage_vma_revalidate(mm, address, &vma); 1039 if (result) { 1040 mmap_read_unlock(mm); 1041 goto out_nolock; 1042 } 1043 1044 pmd = mm_find_pmd(mm, address); 1045 if (!pmd) { 1046 result = SCAN_PMD_NULL; 1047 mmap_read_unlock(mm); 1048 goto out_nolock; 1049 } 1050 1051 /* 1052 * __collapse_huge_page_swapin will return with mmap_lock released 1053 * when it fails. So we jump out_nolock directly in that case. 1054 * Continuing to collapse causes inconsistency. 1055 */ 1056 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address, 1057 pmd, referenced)) { 1058 goto out_nolock; 1059 } 1060 1061 mmap_read_unlock(mm); 1062 /* 1063 * Prevent all access to pagetables with the exception of 1064 * gup_fast later handled by the ptep_clear_flush and the VM 1065 * handled by the anon_vma lock + PG_lock. 1066 */ 1067 mmap_write_lock(mm); 1068 result = hugepage_vma_revalidate(mm, address, &vma); 1069 if (result) 1070 goto out_up_write; 1071 /* check if the pmd is still valid */ 1072 if (mm_find_pmd(mm, address) != pmd) 1073 goto out_up_write; 1074 1075 anon_vma_lock_write(vma->anon_vma); 1076 1077 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, 1078 address, address + HPAGE_PMD_SIZE); 1079 mmu_notifier_invalidate_range_start(&range); 1080 1081 pte = pte_offset_map(pmd, address); 1082 pte_ptl = pte_lockptr(mm, pmd); 1083 1084 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1085 /* 1086 * After this gup_fast can't run anymore. This also removes 1087 * any huge TLB entry from the CPU so we won't allow 1088 * huge and small TLB entries for the same virtual address 1089 * to avoid the risk of CPU bugs in that area. 1090 */ 1091 _pmd = pmdp_collapse_flush(vma, address, pmd); 1092 spin_unlock(pmd_ptl); 1093 mmu_notifier_invalidate_range_end(&range); 1094 1095 spin_lock(pte_ptl); 1096 isolated = __collapse_huge_page_isolate(vma, address, pte, 1097 &compound_pagelist); 1098 spin_unlock(pte_ptl); 1099 1100 if (unlikely(!isolated)) { 1101 pte_unmap(pte); 1102 spin_lock(pmd_ptl); 1103 BUG_ON(!pmd_none(*pmd)); 1104 /* 1105 * We can only use set_pmd_at when establishing 1106 * hugepmds and never for establishing regular pmds that 1107 * points to regular pagetables. Use pmd_populate for that 1108 */ 1109 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 1110 spin_unlock(pmd_ptl); 1111 anon_vma_unlock_write(vma->anon_vma); 1112 result = SCAN_FAIL; 1113 goto out_up_write; 1114 } 1115 1116 /* 1117 * All pages are isolated and locked so anon_vma rmap 1118 * can't run anymore. 1119 */ 1120 anon_vma_unlock_write(vma->anon_vma); 1121 1122 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl, 1123 &compound_pagelist); 1124 pte_unmap(pte); 1125 /* 1126 * spin_lock() below is not the equivalent of smp_wmb(), but 1127 * the smp_wmb() inside __SetPageUptodate() can be reused to 1128 * avoid the copy_huge_page writes to become visible after 1129 * the set_pmd_at() write. 1130 */ 1131 __SetPageUptodate(new_page); 1132 pgtable = pmd_pgtable(_pmd); 1133 1134 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); 1135 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1136 1137 spin_lock(pmd_ptl); 1138 BUG_ON(!pmd_none(*pmd)); 1139 page_add_new_anon_rmap(new_page, vma, address); 1140 lru_cache_add_inactive_or_unevictable(new_page, vma); 1141 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1142 set_pmd_at(mm, address, pmd, _pmd); 1143 update_mmu_cache_pmd(vma, address, pmd); 1144 spin_unlock(pmd_ptl); 1145 1146 *hpage = NULL; 1147 1148 khugepaged_pages_collapsed++; 1149 result = SCAN_SUCCEED; 1150 out_up_write: 1151 mmap_write_unlock(mm); 1152 out_nolock: 1153 if (!IS_ERR_OR_NULL(*hpage)) 1154 mem_cgroup_uncharge(page_folio(*hpage)); 1155 trace_mm_collapse_huge_page(mm, isolated, result); 1156 return; 1157 } 1158 1159 static int khugepaged_scan_pmd(struct mm_struct *mm, 1160 struct vm_area_struct *vma, 1161 unsigned long address, 1162 struct page **hpage) 1163 { 1164 pmd_t *pmd; 1165 pte_t *pte, *_pte; 1166 int ret = 0, result = 0, referenced = 0; 1167 int none_or_zero = 0, shared = 0; 1168 struct page *page = NULL; 1169 unsigned long _address; 1170 spinlock_t *ptl; 1171 int node = NUMA_NO_NODE, unmapped = 0; 1172 bool writable = false; 1173 1174 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1175 1176 pmd = mm_find_pmd(mm, address); 1177 if (!pmd) { 1178 result = SCAN_PMD_NULL; 1179 goto out; 1180 } 1181 1182 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 1183 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1184 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; 1185 _pte++, _address += PAGE_SIZE) { 1186 pte_t pteval = *_pte; 1187 if (is_swap_pte(pteval)) { 1188 if (++unmapped <= khugepaged_max_ptes_swap) { 1189 /* 1190 * Always be strict with uffd-wp 1191 * enabled swap entries. Please see 1192 * comment below for pte_uffd_wp(). 1193 */ 1194 if (pte_swp_uffd_wp(pteval)) { 1195 result = SCAN_PTE_UFFD_WP; 1196 goto out_unmap; 1197 } 1198 continue; 1199 } else { 1200 result = SCAN_EXCEED_SWAP_PTE; 1201 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 1202 goto out_unmap; 1203 } 1204 } 1205 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 1206 if (!userfaultfd_armed(vma) && 1207 ++none_or_zero <= khugepaged_max_ptes_none) { 1208 continue; 1209 } else { 1210 result = SCAN_EXCEED_NONE_PTE; 1211 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 1212 goto out_unmap; 1213 } 1214 } 1215 if (pte_uffd_wp(pteval)) { 1216 /* 1217 * Don't collapse the page if any of the small 1218 * PTEs are armed with uffd write protection. 1219 * Here we can also mark the new huge pmd as 1220 * write protected if any of the small ones is 1221 * marked but that could bring unknown 1222 * userfault messages that falls outside of 1223 * the registered range. So, just be simple. 1224 */ 1225 result = SCAN_PTE_UFFD_WP; 1226 goto out_unmap; 1227 } 1228 if (pte_write(pteval)) 1229 writable = true; 1230 1231 page = vm_normal_page(vma, _address, pteval); 1232 if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 1233 result = SCAN_PAGE_NULL; 1234 goto out_unmap; 1235 } 1236 1237 if (page_mapcount(page) > 1 && 1238 ++shared > khugepaged_max_ptes_shared) { 1239 result = SCAN_EXCEED_SHARED_PTE; 1240 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 1241 goto out_unmap; 1242 } 1243 1244 page = compound_head(page); 1245 1246 /* 1247 * Record which node the original page is from and save this 1248 * information to khugepaged_node_load[]. 1249 * Khugepaged will allocate hugepage from the node has the max 1250 * hit record. 1251 */ 1252 node = page_to_nid(page); 1253 if (khugepaged_scan_abort(node)) { 1254 result = SCAN_SCAN_ABORT; 1255 goto out_unmap; 1256 } 1257 khugepaged_node_load[node]++; 1258 if (!PageLRU(page)) { 1259 result = SCAN_PAGE_LRU; 1260 goto out_unmap; 1261 } 1262 if (PageLocked(page)) { 1263 result = SCAN_PAGE_LOCK; 1264 goto out_unmap; 1265 } 1266 if (!PageAnon(page)) { 1267 result = SCAN_PAGE_ANON; 1268 goto out_unmap; 1269 } 1270 1271 /* 1272 * Check if the page has any GUP (or other external) pins. 1273 * 1274 * Here the check is racy it may see total_mapcount > refcount 1275 * in some cases. 1276 * For example, one process with one forked child process. 1277 * The parent has the PMD split due to MADV_DONTNEED, then 1278 * the child is trying unmap the whole PMD, but khugepaged 1279 * may be scanning the parent between the child has 1280 * PageDoubleMap flag cleared and dec the mapcount. So 1281 * khugepaged may see total_mapcount > refcount. 1282 * 1283 * But such case is ephemeral we could always retry collapse 1284 * later. However it may report false positive if the page 1285 * has excessive GUP pins (i.e. 512). Anyway the same check 1286 * will be done again later the risk seems low. 1287 */ 1288 if (!is_refcount_suitable(page)) { 1289 result = SCAN_PAGE_COUNT; 1290 goto out_unmap; 1291 } 1292 if (pte_young(pteval) || 1293 page_is_young(page) || PageReferenced(page) || 1294 mmu_notifier_test_young(vma->vm_mm, address)) 1295 referenced++; 1296 } 1297 if (!writable) { 1298 result = SCAN_PAGE_RO; 1299 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) { 1300 result = SCAN_LACK_REFERENCED_PAGE; 1301 } else { 1302 result = SCAN_SUCCEED; 1303 ret = 1; 1304 } 1305 out_unmap: 1306 pte_unmap_unlock(pte, ptl); 1307 if (ret) { 1308 node = khugepaged_find_target_node(); 1309 /* collapse_huge_page will return with the mmap_lock released */ 1310 collapse_huge_page(mm, address, hpage, node, 1311 referenced, unmapped); 1312 } 1313 out: 1314 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, 1315 none_or_zero, result, unmapped); 1316 return ret; 1317 } 1318 1319 static void collect_mm_slot(struct mm_slot *mm_slot) 1320 { 1321 struct mm_struct *mm = mm_slot->mm; 1322 1323 lockdep_assert_held(&khugepaged_mm_lock); 1324 1325 if (khugepaged_test_exit(mm)) { 1326 /* free mm_slot */ 1327 hash_del(&mm_slot->hash); 1328 list_del(&mm_slot->mm_node); 1329 1330 /* 1331 * Not strictly needed because the mm exited already. 1332 * 1333 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1334 */ 1335 1336 /* khugepaged_mm_lock actually not necessary for the below */ 1337 free_mm_slot(mm_slot); 1338 mmdrop(mm); 1339 } 1340 } 1341 1342 #ifdef CONFIG_SHMEM 1343 /* 1344 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then 1345 * khugepaged should try to collapse the page table. 1346 */ 1347 static void khugepaged_add_pte_mapped_thp(struct mm_struct *mm, 1348 unsigned long addr) 1349 { 1350 struct mm_slot *mm_slot; 1351 1352 VM_BUG_ON(addr & ~HPAGE_PMD_MASK); 1353 1354 spin_lock(&khugepaged_mm_lock); 1355 mm_slot = get_mm_slot(mm); 1356 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) 1357 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr; 1358 spin_unlock(&khugepaged_mm_lock); 1359 } 1360 1361 static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 1362 unsigned long addr, pmd_t *pmdp) 1363 { 1364 spinlock_t *ptl; 1365 pmd_t pmd; 1366 1367 mmap_assert_write_locked(mm); 1368 ptl = pmd_lock(vma->vm_mm, pmdp); 1369 pmd = pmdp_collapse_flush(vma, addr, pmdp); 1370 spin_unlock(ptl); 1371 mm_dec_nr_ptes(mm); 1372 page_table_check_pte_clear_range(mm, addr, pmd); 1373 pte_free(mm, pmd_pgtable(pmd)); 1374 } 1375 1376 /** 1377 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at 1378 * address haddr. 1379 * 1380 * @mm: process address space where collapse happens 1381 * @addr: THP collapse address 1382 * 1383 * This function checks whether all the PTEs in the PMD are pointing to the 1384 * right THP. If so, retract the page table so the THP can refault in with 1385 * as pmd-mapped. 1386 */ 1387 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) 1388 { 1389 unsigned long haddr = addr & HPAGE_PMD_MASK; 1390 struct vm_area_struct *vma = find_vma(mm, haddr); 1391 struct page *hpage; 1392 pte_t *start_pte, *pte; 1393 pmd_t *pmd; 1394 spinlock_t *ptl; 1395 int count = 0; 1396 int i; 1397 1398 if (!vma || !vma->vm_file || 1399 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) 1400 return; 1401 1402 /* 1403 * This vm_flags may not have VM_HUGEPAGE if the page was not 1404 * collapsed by this mm. But we can still collapse if the page is 1405 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check() 1406 * will not fail the vma for missing VM_HUGEPAGE 1407 */ 1408 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false, false)) 1409 return; 1410 1411 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ 1412 if (userfaultfd_wp(vma)) 1413 return; 1414 1415 hpage = find_lock_page(vma->vm_file->f_mapping, 1416 linear_page_index(vma, haddr)); 1417 if (!hpage) 1418 return; 1419 1420 if (!PageHead(hpage)) 1421 goto drop_hpage; 1422 1423 pmd = mm_find_pmd(mm, haddr); 1424 if (!pmd) 1425 goto drop_hpage; 1426 1427 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); 1428 1429 /* step 1: check all mapped PTEs are to the right huge page */ 1430 for (i = 0, addr = haddr, pte = start_pte; 1431 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 1432 struct page *page; 1433 1434 /* empty pte, skip */ 1435 if (pte_none(*pte)) 1436 continue; 1437 1438 /* page swapped out, abort */ 1439 if (!pte_present(*pte)) 1440 goto abort; 1441 1442 page = vm_normal_page(vma, addr, *pte); 1443 if (WARN_ON_ONCE(page && is_zone_device_page(page))) 1444 page = NULL; 1445 /* 1446 * Note that uprobe, debugger, or MAP_PRIVATE may change the 1447 * page table, but the new page will not be a subpage of hpage. 1448 */ 1449 if (hpage + i != page) 1450 goto abort; 1451 count++; 1452 } 1453 1454 /* step 2: adjust rmap */ 1455 for (i = 0, addr = haddr, pte = start_pte; 1456 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 1457 struct page *page; 1458 1459 if (pte_none(*pte)) 1460 continue; 1461 page = vm_normal_page(vma, addr, *pte); 1462 if (WARN_ON_ONCE(page && is_zone_device_page(page))) 1463 goto abort; 1464 page_remove_rmap(page, vma, false); 1465 } 1466 1467 pte_unmap_unlock(start_pte, ptl); 1468 1469 /* step 3: set proper refcount and mm_counters. */ 1470 if (count) { 1471 page_ref_sub(hpage, count); 1472 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); 1473 } 1474 1475 /* step 4: collapse pmd */ 1476 collapse_and_free_pmd(mm, vma, haddr, pmd); 1477 drop_hpage: 1478 unlock_page(hpage); 1479 put_page(hpage); 1480 return; 1481 1482 abort: 1483 pte_unmap_unlock(start_pte, ptl); 1484 goto drop_hpage; 1485 } 1486 1487 static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) 1488 { 1489 struct mm_struct *mm = mm_slot->mm; 1490 int i; 1491 1492 if (likely(mm_slot->nr_pte_mapped_thp == 0)) 1493 return; 1494 1495 if (!mmap_write_trylock(mm)) 1496 return; 1497 1498 if (unlikely(khugepaged_test_exit(mm))) 1499 goto out; 1500 1501 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) 1502 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]); 1503 1504 out: 1505 mm_slot->nr_pte_mapped_thp = 0; 1506 mmap_write_unlock(mm); 1507 } 1508 1509 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) 1510 { 1511 struct vm_area_struct *vma; 1512 struct mm_struct *mm; 1513 unsigned long addr; 1514 pmd_t *pmd; 1515 1516 i_mmap_lock_write(mapping); 1517 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1518 /* 1519 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that 1520 * got written to. These VMAs are likely not worth investing 1521 * mmap_write_lock(mm) as PMD-mapping is likely to be split 1522 * later. 1523 * 1524 * Note that vma->anon_vma check is racy: it can be set up after 1525 * the check but before we took mmap_lock by the fault path. 1526 * But page lock would prevent establishing any new ptes of the 1527 * page, so we are safe. 1528 * 1529 * An alternative would be drop the check, but check that page 1530 * table is clear before calling pmdp_collapse_flush() under 1531 * ptl. It has higher chance to recover THP for the VMA, but 1532 * has higher cost too. 1533 */ 1534 if (vma->anon_vma) 1535 continue; 1536 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1537 if (addr & ~HPAGE_PMD_MASK) 1538 continue; 1539 if (vma->vm_end < addr + HPAGE_PMD_SIZE) 1540 continue; 1541 mm = vma->vm_mm; 1542 pmd = mm_find_pmd(mm, addr); 1543 if (!pmd) 1544 continue; 1545 /* 1546 * We need exclusive mmap_lock to retract page table. 1547 * 1548 * We use trylock due to lock inversion: we need to acquire 1549 * mmap_lock while holding page lock. Fault path does it in 1550 * reverse order. Trylock is a way to avoid deadlock. 1551 */ 1552 if (mmap_write_trylock(mm)) { 1553 /* 1554 * When a vma is registered with uffd-wp, we can't 1555 * recycle the pmd pgtable because there can be pte 1556 * markers installed. Skip it only, so the rest mm/vma 1557 * can still have the same file mapped hugely, however 1558 * it'll always mapped in small page size for uffd-wp 1559 * registered ranges. 1560 */ 1561 if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma)) 1562 collapse_and_free_pmd(mm, vma, addr, pmd); 1563 mmap_write_unlock(mm); 1564 } else { 1565 /* Try again later */ 1566 khugepaged_add_pte_mapped_thp(mm, addr); 1567 } 1568 } 1569 i_mmap_unlock_write(mapping); 1570 } 1571 1572 /** 1573 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one. 1574 * 1575 * @mm: process address space where collapse happens 1576 * @file: file that collapse on 1577 * @start: collapse start address 1578 * @hpage: new allocated huge page for collapse 1579 * @node: appointed node the new huge page allocate from 1580 * 1581 * Basic scheme is simple, details are more complex: 1582 * - allocate and lock a new huge page; 1583 * - scan page cache replacing old pages with the new one 1584 * + swap/gup in pages if necessary; 1585 * + fill in gaps; 1586 * + keep old pages around in case rollback is required; 1587 * - if replacing succeeds: 1588 * + copy data over; 1589 * + free old pages; 1590 * + unlock huge page; 1591 * - if replacing failed; 1592 * + put all pages back and unfreeze them; 1593 * + restore gaps in the page cache; 1594 * + unlock and free huge page; 1595 */ 1596 static void collapse_file(struct mm_struct *mm, 1597 struct file *file, pgoff_t start, 1598 struct page **hpage, int node) 1599 { 1600 struct address_space *mapping = file->f_mapping; 1601 gfp_t gfp; 1602 struct page *new_page; 1603 pgoff_t index, end = start + HPAGE_PMD_NR; 1604 LIST_HEAD(pagelist); 1605 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); 1606 int nr_none = 0, result = SCAN_SUCCEED; 1607 bool is_shmem = shmem_file(file); 1608 int nr; 1609 1610 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); 1611 VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); 1612 1613 /* Only allocate from the target node */ 1614 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; 1615 1616 new_page = khugepaged_alloc_page(hpage, gfp, node); 1617 if (!new_page) { 1618 result = SCAN_ALLOC_HUGE_PAGE_FAIL; 1619 goto out; 1620 } 1621 1622 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { 1623 result = SCAN_CGROUP_CHARGE_FAIL; 1624 goto out; 1625 } 1626 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); 1627 1628 /* 1629 * Ensure we have slots for all the pages in the range. This is 1630 * almost certainly a no-op because most of the pages must be present 1631 */ 1632 do { 1633 xas_lock_irq(&xas); 1634 xas_create_range(&xas); 1635 if (!xas_error(&xas)) 1636 break; 1637 xas_unlock_irq(&xas); 1638 if (!xas_nomem(&xas, GFP_KERNEL)) { 1639 result = SCAN_FAIL; 1640 goto out; 1641 } 1642 } while (1); 1643 1644 __SetPageLocked(new_page); 1645 if (is_shmem) 1646 __SetPageSwapBacked(new_page); 1647 new_page->index = start; 1648 new_page->mapping = mapping; 1649 1650 /* 1651 * At this point the new_page is locked and not up-to-date. 1652 * It's safe to insert it into the page cache, because nobody would 1653 * be able to map it or use it in another way until we unlock it. 1654 */ 1655 1656 xas_set(&xas, start); 1657 for (index = start; index < end; index++) { 1658 struct page *page = xas_next(&xas); 1659 1660 VM_BUG_ON(index != xas.xa_index); 1661 if (is_shmem) { 1662 if (!page) { 1663 /* 1664 * Stop if extent has been truncated or 1665 * hole-punched, and is now completely 1666 * empty. 1667 */ 1668 if (index == start) { 1669 if (!xas_next_entry(&xas, end - 1)) { 1670 result = SCAN_TRUNCATED; 1671 goto xa_locked; 1672 } 1673 xas_set(&xas, index); 1674 } 1675 if (!shmem_charge(mapping->host, 1)) { 1676 result = SCAN_FAIL; 1677 goto xa_locked; 1678 } 1679 xas_store(&xas, new_page); 1680 nr_none++; 1681 continue; 1682 } 1683 1684 if (xa_is_value(page) || !PageUptodate(page)) { 1685 xas_unlock_irq(&xas); 1686 /* swap in or instantiate fallocated page */ 1687 if (shmem_getpage(mapping->host, index, &page, 1688 SGP_NOALLOC)) { 1689 result = SCAN_FAIL; 1690 goto xa_unlocked; 1691 } 1692 } else if (trylock_page(page)) { 1693 get_page(page); 1694 xas_unlock_irq(&xas); 1695 } else { 1696 result = SCAN_PAGE_LOCK; 1697 goto xa_locked; 1698 } 1699 } else { /* !is_shmem */ 1700 if (!page || xa_is_value(page)) { 1701 xas_unlock_irq(&xas); 1702 page_cache_sync_readahead(mapping, &file->f_ra, 1703 file, index, 1704 end - index); 1705 /* drain pagevecs to help isolate_lru_page() */ 1706 lru_add_drain(); 1707 page = find_lock_page(mapping, index); 1708 if (unlikely(page == NULL)) { 1709 result = SCAN_FAIL; 1710 goto xa_unlocked; 1711 } 1712 } else if (PageDirty(page)) { 1713 /* 1714 * khugepaged only works on read-only fd, 1715 * so this page is dirty because it hasn't 1716 * been flushed since first write. There 1717 * won't be new dirty pages. 1718 * 1719 * Trigger async flush here and hope the 1720 * writeback is done when khugepaged 1721 * revisits this page. 1722 * 1723 * This is a one-off situation. We are not 1724 * forcing writeback in loop. 1725 */ 1726 xas_unlock_irq(&xas); 1727 filemap_flush(mapping); 1728 result = SCAN_FAIL; 1729 goto xa_unlocked; 1730 } else if (PageWriteback(page)) { 1731 xas_unlock_irq(&xas); 1732 result = SCAN_FAIL; 1733 goto xa_unlocked; 1734 } else if (trylock_page(page)) { 1735 get_page(page); 1736 xas_unlock_irq(&xas); 1737 } else { 1738 result = SCAN_PAGE_LOCK; 1739 goto xa_locked; 1740 } 1741 } 1742 1743 /* 1744 * The page must be locked, so we can drop the i_pages lock 1745 * without racing with truncate. 1746 */ 1747 VM_BUG_ON_PAGE(!PageLocked(page), page); 1748 1749 /* make sure the page is up to date */ 1750 if (unlikely(!PageUptodate(page))) { 1751 result = SCAN_FAIL; 1752 goto out_unlock; 1753 } 1754 1755 /* 1756 * If file was truncated then extended, or hole-punched, before 1757 * we locked the first page, then a THP might be there already. 1758 */ 1759 if (PageTransCompound(page)) { 1760 result = SCAN_PAGE_COMPOUND; 1761 goto out_unlock; 1762 } 1763 1764 if (page_mapping(page) != mapping) { 1765 result = SCAN_TRUNCATED; 1766 goto out_unlock; 1767 } 1768 1769 if (!is_shmem && (PageDirty(page) || 1770 PageWriteback(page))) { 1771 /* 1772 * khugepaged only works on read-only fd, so this 1773 * page is dirty because it hasn't been flushed 1774 * since first write. 1775 */ 1776 result = SCAN_FAIL; 1777 goto out_unlock; 1778 } 1779 1780 if (isolate_lru_page(page)) { 1781 result = SCAN_DEL_PAGE_LRU; 1782 goto out_unlock; 1783 } 1784 1785 if (page_has_private(page) && 1786 !try_to_release_page(page, GFP_KERNEL)) { 1787 result = SCAN_PAGE_HAS_PRIVATE; 1788 putback_lru_page(page); 1789 goto out_unlock; 1790 } 1791 1792 if (page_mapped(page)) 1793 try_to_unmap(page_folio(page), 1794 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH); 1795 1796 xas_lock_irq(&xas); 1797 xas_set(&xas, index); 1798 1799 VM_BUG_ON_PAGE(page != xas_load(&xas), page); 1800 1801 /* 1802 * The page is expected to have page_count() == 3: 1803 * - we hold a pin on it; 1804 * - one reference from page cache; 1805 * - one from isolate_lru_page; 1806 */ 1807 if (!page_ref_freeze(page, 3)) { 1808 result = SCAN_PAGE_COUNT; 1809 xas_unlock_irq(&xas); 1810 putback_lru_page(page); 1811 goto out_unlock; 1812 } 1813 1814 /* 1815 * Add the page to the list to be able to undo the collapse if 1816 * something go wrong. 1817 */ 1818 list_add_tail(&page->lru, &pagelist); 1819 1820 /* Finally, replace with the new page. */ 1821 xas_store(&xas, new_page); 1822 continue; 1823 out_unlock: 1824 unlock_page(page); 1825 put_page(page); 1826 goto xa_unlocked; 1827 } 1828 nr = thp_nr_pages(new_page); 1829 1830 if (is_shmem) 1831 __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr); 1832 else { 1833 __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr); 1834 filemap_nr_thps_inc(mapping); 1835 /* 1836 * Paired with smp_mb() in do_dentry_open() to ensure 1837 * i_writecount is up to date and the update to nr_thps is 1838 * visible. Ensures the page cache will be truncated if the 1839 * file is opened writable. 1840 */ 1841 smp_mb(); 1842 if (inode_is_open_for_write(mapping->host)) { 1843 result = SCAN_FAIL; 1844 __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr); 1845 filemap_nr_thps_dec(mapping); 1846 goto xa_locked; 1847 } 1848 } 1849 1850 if (nr_none) { 1851 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none); 1852 /* nr_none is always 0 for non-shmem. */ 1853 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none); 1854 } 1855 1856 /* Join all the small entries into a single multi-index entry */ 1857 xas_set_order(&xas, start, HPAGE_PMD_ORDER); 1858 xas_store(&xas, new_page); 1859 xa_locked: 1860 xas_unlock_irq(&xas); 1861 xa_unlocked: 1862 1863 /* 1864 * If collapse is successful, flush must be done now before copying. 1865 * If collapse is unsuccessful, does flush actually need to be done? 1866 * Do it anyway, to clear the state. 1867 */ 1868 try_to_unmap_flush(); 1869 1870 if (result == SCAN_SUCCEED) { 1871 struct page *page, *tmp; 1872 1873 /* 1874 * Replacing old pages with new one has succeeded, now we 1875 * need to copy the content and free the old pages. 1876 */ 1877 index = start; 1878 list_for_each_entry_safe(page, tmp, &pagelist, lru) { 1879 while (index < page->index) { 1880 clear_highpage(new_page + (index % HPAGE_PMD_NR)); 1881 index++; 1882 } 1883 copy_highpage(new_page + (page->index % HPAGE_PMD_NR), 1884 page); 1885 list_del(&page->lru); 1886 page->mapping = NULL; 1887 page_ref_unfreeze(page, 1); 1888 ClearPageActive(page); 1889 ClearPageUnevictable(page); 1890 unlock_page(page); 1891 put_page(page); 1892 index++; 1893 } 1894 while (index < end) { 1895 clear_highpage(new_page + (index % HPAGE_PMD_NR)); 1896 index++; 1897 } 1898 1899 SetPageUptodate(new_page); 1900 page_ref_add(new_page, HPAGE_PMD_NR - 1); 1901 if (is_shmem) 1902 set_page_dirty(new_page); 1903 lru_cache_add(new_page); 1904 1905 /* 1906 * Remove pte page tables, so we can re-fault the page as huge. 1907 */ 1908 retract_page_tables(mapping, start); 1909 *hpage = NULL; 1910 1911 khugepaged_pages_collapsed++; 1912 } else { 1913 struct page *page; 1914 1915 /* Something went wrong: roll back page cache changes */ 1916 xas_lock_irq(&xas); 1917 if (nr_none) { 1918 mapping->nrpages -= nr_none; 1919 shmem_uncharge(mapping->host, nr_none); 1920 } 1921 1922 xas_set(&xas, start); 1923 xas_for_each(&xas, page, end - 1) { 1924 page = list_first_entry_or_null(&pagelist, 1925 struct page, lru); 1926 if (!page || xas.xa_index < page->index) { 1927 if (!nr_none) 1928 break; 1929 nr_none--; 1930 /* Put holes back where they were */ 1931 xas_store(&xas, NULL); 1932 continue; 1933 } 1934 1935 VM_BUG_ON_PAGE(page->index != xas.xa_index, page); 1936 1937 /* Unfreeze the page. */ 1938 list_del(&page->lru); 1939 page_ref_unfreeze(page, 2); 1940 xas_store(&xas, page); 1941 xas_pause(&xas); 1942 xas_unlock_irq(&xas); 1943 unlock_page(page); 1944 putback_lru_page(page); 1945 xas_lock_irq(&xas); 1946 } 1947 VM_BUG_ON(nr_none); 1948 xas_unlock_irq(&xas); 1949 1950 new_page->mapping = NULL; 1951 } 1952 1953 unlock_page(new_page); 1954 out: 1955 VM_BUG_ON(!list_empty(&pagelist)); 1956 if (!IS_ERR_OR_NULL(*hpage)) 1957 mem_cgroup_uncharge(page_folio(*hpage)); 1958 /* TODO: tracepoints */ 1959 } 1960 1961 static void khugepaged_scan_file(struct mm_struct *mm, 1962 struct file *file, pgoff_t start, struct page **hpage) 1963 { 1964 struct page *page = NULL; 1965 struct address_space *mapping = file->f_mapping; 1966 XA_STATE(xas, &mapping->i_pages, start); 1967 int present, swap; 1968 int node = NUMA_NO_NODE; 1969 int result = SCAN_SUCCEED; 1970 1971 present = 0; 1972 swap = 0; 1973 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 1974 rcu_read_lock(); 1975 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { 1976 if (xas_retry(&xas, page)) 1977 continue; 1978 1979 if (xa_is_value(page)) { 1980 if (++swap > khugepaged_max_ptes_swap) { 1981 result = SCAN_EXCEED_SWAP_PTE; 1982 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 1983 break; 1984 } 1985 continue; 1986 } 1987 1988 /* 1989 * XXX: khugepaged should compact smaller compound pages 1990 * into a PMD sized page 1991 */ 1992 if (PageTransCompound(page)) { 1993 result = SCAN_PAGE_COMPOUND; 1994 break; 1995 } 1996 1997 node = page_to_nid(page); 1998 if (khugepaged_scan_abort(node)) { 1999 result = SCAN_SCAN_ABORT; 2000 break; 2001 } 2002 khugepaged_node_load[node]++; 2003 2004 if (!PageLRU(page)) { 2005 result = SCAN_PAGE_LRU; 2006 break; 2007 } 2008 2009 if (page_count(page) != 2010 1 + page_mapcount(page) + page_has_private(page)) { 2011 result = SCAN_PAGE_COUNT; 2012 break; 2013 } 2014 2015 /* 2016 * We probably should check if the page is referenced here, but 2017 * nobody would transfer pte_young() to PageReferenced() for us. 2018 * And rmap walk here is just too costly... 2019 */ 2020 2021 present++; 2022 2023 if (need_resched()) { 2024 xas_pause(&xas); 2025 cond_resched_rcu(); 2026 } 2027 } 2028 rcu_read_unlock(); 2029 2030 if (result == SCAN_SUCCEED) { 2031 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { 2032 result = SCAN_EXCEED_NONE_PTE; 2033 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 2034 } else { 2035 node = khugepaged_find_target_node(); 2036 collapse_file(mm, file, start, hpage, node); 2037 } 2038 } 2039 2040 /* TODO: tracepoints */ 2041 } 2042 #else 2043 static void khugepaged_scan_file(struct mm_struct *mm, 2044 struct file *file, pgoff_t start, struct page **hpage) 2045 { 2046 BUILD_BUG(); 2047 } 2048 2049 static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) 2050 { 2051 } 2052 #endif 2053 2054 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2055 struct page **hpage) 2056 __releases(&khugepaged_mm_lock) 2057 __acquires(&khugepaged_mm_lock) 2058 { 2059 struct mm_slot *mm_slot; 2060 struct mm_struct *mm; 2061 struct vm_area_struct *vma; 2062 int progress = 0; 2063 2064 VM_BUG_ON(!pages); 2065 lockdep_assert_held(&khugepaged_mm_lock); 2066 2067 if (khugepaged_scan.mm_slot) 2068 mm_slot = khugepaged_scan.mm_slot; 2069 else { 2070 mm_slot = list_entry(khugepaged_scan.mm_head.next, 2071 struct mm_slot, mm_node); 2072 khugepaged_scan.address = 0; 2073 khugepaged_scan.mm_slot = mm_slot; 2074 } 2075 spin_unlock(&khugepaged_mm_lock); 2076 khugepaged_collapse_pte_mapped_thps(mm_slot); 2077 2078 mm = mm_slot->mm; 2079 /* 2080 * Don't wait for semaphore (to avoid long wait times). Just move to 2081 * the next mm on the list. 2082 */ 2083 vma = NULL; 2084 if (unlikely(!mmap_read_trylock(mm))) 2085 goto breakouterloop_mmap_lock; 2086 if (likely(!khugepaged_test_exit(mm))) 2087 vma = find_vma(mm, khugepaged_scan.address); 2088 2089 progress++; 2090 for (; vma; vma = vma->vm_next) { 2091 unsigned long hstart, hend; 2092 2093 cond_resched(); 2094 if (unlikely(khugepaged_test_exit(mm))) { 2095 progress++; 2096 break; 2097 } 2098 if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) { 2099 skip: 2100 progress++; 2101 continue; 2102 } 2103 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE); 2104 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE); 2105 if (khugepaged_scan.address > hend) 2106 goto skip; 2107 if (khugepaged_scan.address < hstart) 2108 khugepaged_scan.address = hstart; 2109 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2110 2111 while (khugepaged_scan.address < hend) { 2112 int ret; 2113 cond_resched(); 2114 if (unlikely(khugepaged_test_exit(mm))) 2115 goto breakouterloop; 2116 2117 VM_BUG_ON(khugepaged_scan.address < hstart || 2118 khugepaged_scan.address + HPAGE_PMD_SIZE > 2119 hend); 2120 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 2121 struct file *file = get_file(vma->vm_file); 2122 pgoff_t pgoff = linear_page_index(vma, 2123 khugepaged_scan.address); 2124 2125 mmap_read_unlock(mm); 2126 ret = 1; 2127 khugepaged_scan_file(mm, file, pgoff, hpage); 2128 fput(file); 2129 } else { 2130 ret = khugepaged_scan_pmd(mm, vma, 2131 khugepaged_scan.address, 2132 hpage); 2133 } 2134 /* move to next address */ 2135 khugepaged_scan.address += HPAGE_PMD_SIZE; 2136 progress += HPAGE_PMD_NR; 2137 if (ret) 2138 /* we released mmap_lock so break loop */ 2139 goto breakouterloop_mmap_lock; 2140 if (progress >= pages) 2141 goto breakouterloop; 2142 } 2143 } 2144 breakouterloop: 2145 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ 2146 breakouterloop_mmap_lock: 2147 2148 spin_lock(&khugepaged_mm_lock); 2149 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2150 /* 2151 * Release the current mm_slot if this mm is about to die, or 2152 * if we scanned all vmas of this mm. 2153 */ 2154 if (khugepaged_test_exit(mm) || !vma) { 2155 /* 2156 * Make sure that if mm_users is reaching zero while 2157 * khugepaged runs here, khugepaged_exit will find 2158 * mm_slot not pointing to the exiting mm. 2159 */ 2160 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2161 khugepaged_scan.mm_slot = list_entry( 2162 mm_slot->mm_node.next, 2163 struct mm_slot, mm_node); 2164 khugepaged_scan.address = 0; 2165 } else { 2166 khugepaged_scan.mm_slot = NULL; 2167 khugepaged_full_scans++; 2168 } 2169 2170 collect_mm_slot(mm_slot); 2171 } 2172 2173 return progress; 2174 } 2175 2176 static int khugepaged_has_work(void) 2177 { 2178 return !list_empty(&khugepaged_scan.mm_head) && 2179 hugepage_flags_enabled(); 2180 } 2181 2182 static int khugepaged_wait_event(void) 2183 { 2184 return !list_empty(&khugepaged_scan.mm_head) || 2185 kthread_should_stop(); 2186 } 2187 2188 static void khugepaged_do_scan(void) 2189 { 2190 struct page *hpage = NULL; 2191 unsigned int progress = 0, pass_through_head = 0; 2192 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan); 2193 bool wait = true; 2194 2195 lru_add_drain_all(); 2196 2197 while (progress < pages) { 2198 if (!khugepaged_prealloc_page(&hpage, &wait)) 2199 break; 2200 2201 cond_resched(); 2202 2203 if (unlikely(kthread_should_stop() || try_to_freeze())) 2204 break; 2205 2206 spin_lock(&khugepaged_mm_lock); 2207 if (!khugepaged_scan.mm_slot) 2208 pass_through_head++; 2209 if (khugepaged_has_work() && 2210 pass_through_head < 2) 2211 progress += khugepaged_scan_mm_slot(pages - progress, 2212 &hpage); 2213 else 2214 progress = pages; 2215 spin_unlock(&khugepaged_mm_lock); 2216 } 2217 2218 if (!IS_ERR_OR_NULL(hpage)) 2219 put_page(hpage); 2220 } 2221 2222 static bool khugepaged_should_wakeup(void) 2223 { 2224 return kthread_should_stop() || 2225 time_after_eq(jiffies, khugepaged_sleep_expire); 2226 } 2227 2228 static void khugepaged_wait_work(void) 2229 { 2230 if (khugepaged_has_work()) { 2231 const unsigned long scan_sleep_jiffies = 2232 msecs_to_jiffies(khugepaged_scan_sleep_millisecs); 2233 2234 if (!scan_sleep_jiffies) 2235 return; 2236 2237 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; 2238 wait_event_freezable_timeout(khugepaged_wait, 2239 khugepaged_should_wakeup(), 2240 scan_sleep_jiffies); 2241 return; 2242 } 2243 2244 if (hugepage_flags_enabled()) 2245 wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 2246 } 2247 2248 static int khugepaged(void *none) 2249 { 2250 struct mm_slot *mm_slot; 2251 2252 set_freezable(); 2253 set_user_nice(current, MAX_NICE); 2254 2255 while (!kthread_should_stop()) { 2256 khugepaged_do_scan(); 2257 khugepaged_wait_work(); 2258 } 2259 2260 spin_lock(&khugepaged_mm_lock); 2261 mm_slot = khugepaged_scan.mm_slot; 2262 khugepaged_scan.mm_slot = NULL; 2263 if (mm_slot) 2264 collect_mm_slot(mm_slot); 2265 spin_unlock(&khugepaged_mm_lock); 2266 return 0; 2267 } 2268 2269 static void set_recommended_min_free_kbytes(void) 2270 { 2271 struct zone *zone; 2272 int nr_zones = 0; 2273 unsigned long recommended_min; 2274 2275 if (!hugepage_flags_enabled()) { 2276 calculate_min_free_kbytes(); 2277 goto update_wmarks; 2278 } 2279 2280 for_each_populated_zone(zone) { 2281 /* 2282 * We don't need to worry about fragmentation of 2283 * ZONE_MOVABLE since it only has movable pages. 2284 */ 2285 if (zone_idx(zone) > gfp_zone(GFP_USER)) 2286 continue; 2287 2288 nr_zones++; 2289 } 2290 2291 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ 2292 recommended_min = pageblock_nr_pages * nr_zones * 2; 2293 2294 /* 2295 * Make sure that on average at least two pageblocks are almost free 2296 * of another type, one for a migratetype to fall back to and a 2297 * second to avoid subsequent fallbacks of other types There are 3 2298 * MIGRATE_TYPES we care about. 2299 */ 2300 recommended_min += pageblock_nr_pages * nr_zones * 2301 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 2302 2303 /* don't ever allow to reserve more than 5% of the lowmem */ 2304 recommended_min = min(recommended_min, 2305 (unsigned long) nr_free_buffer_pages() / 20); 2306 recommended_min <<= (PAGE_SHIFT-10); 2307 2308 if (recommended_min > min_free_kbytes) { 2309 if (user_min_free_kbytes >= 0) 2310 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", 2311 min_free_kbytes, recommended_min); 2312 2313 min_free_kbytes = recommended_min; 2314 } 2315 2316 update_wmarks: 2317 setup_per_zone_wmarks(); 2318 } 2319 2320 int start_stop_khugepaged(void) 2321 { 2322 int err = 0; 2323 2324 mutex_lock(&khugepaged_mutex); 2325 if (hugepage_flags_enabled()) { 2326 if (!khugepaged_thread) 2327 khugepaged_thread = kthread_run(khugepaged, NULL, 2328 "khugepaged"); 2329 if (IS_ERR(khugepaged_thread)) { 2330 pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 2331 err = PTR_ERR(khugepaged_thread); 2332 khugepaged_thread = NULL; 2333 goto fail; 2334 } 2335 2336 if (!list_empty(&khugepaged_scan.mm_head)) 2337 wake_up_interruptible(&khugepaged_wait); 2338 } else if (khugepaged_thread) { 2339 kthread_stop(khugepaged_thread); 2340 khugepaged_thread = NULL; 2341 } 2342 set_recommended_min_free_kbytes(); 2343 fail: 2344 mutex_unlock(&khugepaged_mutex); 2345 return err; 2346 } 2347 2348 void khugepaged_min_free_kbytes_update(void) 2349 { 2350 mutex_lock(&khugepaged_mutex); 2351 if (hugepage_flags_enabled() && khugepaged_thread) 2352 set_recommended_min_free_kbytes(); 2353 mutex_unlock(&khugepaged_mutex); 2354 } 2355