1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/mm.h> 5 #include <linux/sched.h> 6 #include <linux/sched/mm.h> 7 #include <linux/sched/coredump.h> 8 #include <linux/mmu_notifier.h> 9 #include <linux/rmap.h> 10 #include <linux/swap.h> 11 #include <linux/mm_inline.h> 12 #include <linux/kthread.h> 13 #include <linux/khugepaged.h> 14 #include <linux/freezer.h> 15 #include <linux/mman.h> 16 #include <linux/hashtable.h> 17 #include <linux/userfaultfd_k.h> 18 #include <linux/page_idle.h> 19 #include <linux/page_table_check.h> 20 #include <linux/swapops.h> 21 #include <linux/shmem_fs.h> 22 23 #include <asm/tlb.h> 24 #include <asm/pgalloc.h> 25 #include "internal.h" 26 27 enum scan_result { 28 SCAN_FAIL, 29 SCAN_SUCCEED, 30 SCAN_PMD_NULL, 31 SCAN_PMD_MAPPED, 32 SCAN_EXCEED_NONE_PTE, 33 SCAN_EXCEED_SWAP_PTE, 34 SCAN_EXCEED_SHARED_PTE, 35 SCAN_PTE_NON_PRESENT, 36 SCAN_PTE_UFFD_WP, 37 SCAN_PAGE_RO, 38 SCAN_LACK_REFERENCED_PAGE, 39 SCAN_PAGE_NULL, 40 SCAN_SCAN_ABORT, 41 SCAN_PAGE_COUNT, 42 SCAN_PAGE_LRU, 43 SCAN_PAGE_LOCK, 44 SCAN_PAGE_ANON, 45 SCAN_PAGE_COMPOUND, 46 SCAN_ANY_PROCESS, 47 SCAN_VMA_NULL, 48 SCAN_VMA_CHECK, 49 SCAN_ADDRESS_RANGE, 50 SCAN_DEL_PAGE_LRU, 51 SCAN_ALLOC_HUGE_PAGE_FAIL, 52 SCAN_CGROUP_CHARGE_FAIL, 53 SCAN_TRUNCATED, 54 SCAN_PAGE_HAS_PRIVATE, 55 }; 56 57 #define CREATE_TRACE_POINTS 58 #include <trace/events/huge_memory.h> 59 60 static struct task_struct *khugepaged_thread __read_mostly; 61 static DEFINE_MUTEX(khugepaged_mutex); 62 63 /* default scan 8*512 pte (or vmas) every 30 second */ 64 static unsigned int khugepaged_pages_to_scan __read_mostly; 65 static unsigned int khugepaged_pages_collapsed; 66 static unsigned int khugepaged_full_scans; 67 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 68 /* during fragmentation poll the hugepage allocator once every minute */ 69 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 70 static unsigned long khugepaged_sleep_expire; 71 static DEFINE_SPINLOCK(khugepaged_mm_lock); 72 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 73 /* 74 * default collapse hugepages if there is at least one pte mapped like 75 * it would have happened if the vma was large enough during page 76 * fault. 77 * 78 * Note that these are only respected if collapse was initiated by khugepaged. 79 */ 80 static unsigned int khugepaged_max_ptes_none __read_mostly; 81 static unsigned int khugepaged_max_ptes_swap __read_mostly; 82 static unsigned int khugepaged_max_ptes_shared __read_mostly; 83 84 #define MM_SLOTS_HASH_BITS 10 85 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 86 87 static struct kmem_cache *mm_slot_cache __read_mostly; 88 89 #define MAX_PTE_MAPPED_THP 8 90 91 struct collapse_control { 92 bool is_khugepaged; 93 94 /* Num pages scanned per node */ 95 u32 node_load[MAX_NUMNODES]; 96 97 /* Last target selected in hpage_collapse_find_target_node() */ 98 int last_target_node; 99 }; 100 101 /** 102 * struct mm_slot - hash lookup from mm to mm_slot 103 * @hash: hash collision list 104 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 105 * @mm: the mm that this information is valid for 106 * @nr_pte_mapped_thp: number of pte mapped THP 107 * @pte_mapped_thp: address array corresponding pte mapped THP 108 */ 109 struct mm_slot { 110 struct hlist_node hash; 111 struct list_head mm_node; 112 struct mm_struct *mm; 113 114 /* pte-mapped THP in this mm */ 115 int nr_pte_mapped_thp; 116 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP]; 117 }; 118 119 /** 120 * struct khugepaged_scan - cursor for scanning 121 * @mm_head: the head of the mm list to scan 122 * @mm_slot: the current mm_slot we are scanning 123 * @address: the next address inside that to be scanned 124 * 125 * There is only the one khugepaged_scan instance of this cursor structure. 126 */ 127 struct khugepaged_scan { 128 struct list_head mm_head; 129 struct mm_slot *mm_slot; 130 unsigned long address; 131 }; 132 133 static struct khugepaged_scan khugepaged_scan = { 134 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 135 }; 136 137 #ifdef CONFIG_SYSFS 138 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 139 struct kobj_attribute *attr, 140 char *buf) 141 { 142 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs); 143 } 144 145 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 146 struct kobj_attribute *attr, 147 const char *buf, size_t count) 148 { 149 unsigned int msecs; 150 int err; 151 152 err = kstrtouint(buf, 10, &msecs); 153 if (err) 154 return -EINVAL; 155 156 khugepaged_scan_sleep_millisecs = msecs; 157 khugepaged_sleep_expire = 0; 158 wake_up_interruptible(&khugepaged_wait); 159 160 return count; 161 } 162 static struct kobj_attribute scan_sleep_millisecs_attr = 163 __ATTR_RW(scan_sleep_millisecs); 164 165 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 166 struct kobj_attribute *attr, 167 char *buf) 168 { 169 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 170 } 171 172 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 173 struct kobj_attribute *attr, 174 const char *buf, size_t count) 175 { 176 unsigned int msecs; 177 int err; 178 179 err = kstrtouint(buf, 10, &msecs); 180 if (err) 181 return -EINVAL; 182 183 khugepaged_alloc_sleep_millisecs = msecs; 184 khugepaged_sleep_expire = 0; 185 wake_up_interruptible(&khugepaged_wait); 186 187 return count; 188 } 189 static struct kobj_attribute alloc_sleep_millisecs_attr = 190 __ATTR_RW(alloc_sleep_millisecs); 191 192 static ssize_t pages_to_scan_show(struct kobject *kobj, 193 struct kobj_attribute *attr, 194 char *buf) 195 { 196 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan); 197 } 198 static ssize_t pages_to_scan_store(struct kobject *kobj, 199 struct kobj_attribute *attr, 200 const char *buf, size_t count) 201 { 202 unsigned int pages; 203 int err; 204 205 err = kstrtouint(buf, 10, &pages); 206 if (err || !pages) 207 return -EINVAL; 208 209 khugepaged_pages_to_scan = pages; 210 211 return count; 212 } 213 static struct kobj_attribute pages_to_scan_attr = 214 __ATTR_RW(pages_to_scan); 215 216 static ssize_t pages_collapsed_show(struct kobject *kobj, 217 struct kobj_attribute *attr, 218 char *buf) 219 { 220 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed); 221 } 222 static struct kobj_attribute pages_collapsed_attr = 223 __ATTR_RO(pages_collapsed); 224 225 static ssize_t full_scans_show(struct kobject *kobj, 226 struct kobj_attribute *attr, 227 char *buf) 228 { 229 return sysfs_emit(buf, "%u\n", khugepaged_full_scans); 230 } 231 static struct kobj_attribute full_scans_attr = 232 __ATTR_RO(full_scans); 233 234 static ssize_t defrag_show(struct kobject *kobj, 235 struct kobj_attribute *attr, char *buf) 236 { 237 return single_hugepage_flag_show(kobj, attr, buf, 238 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 239 } 240 static ssize_t defrag_store(struct kobject *kobj, 241 struct kobj_attribute *attr, 242 const char *buf, size_t count) 243 { 244 return single_hugepage_flag_store(kobj, attr, buf, count, 245 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 246 } 247 static struct kobj_attribute khugepaged_defrag_attr = 248 __ATTR_RW(defrag); 249 250 /* 251 * max_ptes_none controls if khugepaged should collapse hugepages over 252 * any unmapped ptes in turn potentially increasing the memory 253 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 254 * reduce the available free memory in the system as it 255 * runs. Increasing max_ptes_none will instead potentially reduce the 256 * free memory in the system during the khugepaged scan. 257 */ 258 static ssize_t max_ptes_none_show(struct kobject *kobj, 259 struct kobj_attribute *attr, 260 char *buf) 261 { 262 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none); 263 } 264 static ssize_t max_ptes_none_store(struct kobject *kobj, 265 struct kobj_attribute *attr, 266 const char *buf, size_t count) 267 { 268 int err; 269 unsigned long max_ptes_none; 270 271 err = kstrtoul(buf, 10, &max_ptes_none); 272 if (err || max_ptes_none > HPAGE_PMD_NR - 1) 273 return -EINVAL; 274 275 khugepaged_max_ptes_none = max_ptes_none; 276 277 return count; 278 } 279 static struct kobj_attribute khugepaged_max_ptes_none_attr = 280 __ATTR_RW(max_ptes_none); 281 282 static ssize_t max_ptes_swap_show(struct kobject *kobj, 283 struct kobj_attribute *attr, 284 char *buf) 285 { 286 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap); 287 } 288 289 static ssize_t max_ptes_swap_store(struct kobject *kobj, 290 struct kobj_attribute *attr, 291 const char *buf, size_t count) 292 { 293 int err; 294 unsigned long max_ptes_swap; 295 296 err = kstrtoul(buf, 10, &max_ptes_swap); 297 if (err || max_ptes_swap > HPAGE_PMD_NR - 1) 298 return -EINVAL; 299 300 khugepaged_max_ptes_swap = max_ptes_swap; 301 302 return count; 303 } 304 305 static struct kobj_attribute khugepaged_max_ptes_swap_attr = 306 __ATTR_RW(max_ptes_swap); 307 308 static ssize_t max_ptes_shared_show(struct kobject *kobj, 309 struct kobj_attribute *attr, 310 char *buf) 311 { 312 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared); 313 } 314 315 static ssize_t max_ptes_shared_store(struct kobject *kobj, 316 struct kobj_attribute *attr, 317 const char *buf, size_t count) 318 { 319 int err; 320 unsigned long max_ptes_shared; 321 322 err = kstrtoul(buf, 10, &max_ptes_shared); 323 if (err || max_ptes_shared > HPAGE_PMD_NR - 1) 324 return -EINVAL; 325 326 khugepaged_max_ptes_shared = max_ptes_shared; 327 328 return count; 329 } 330 331 static struct kobj_attribute khugepaged_max_ptes_shared_attr = 332 __ATTR_RW(max_ptes_shared); 333 334 static struct attribute *khugepaged_attr[] = { 335 &khugepaged_defrag_attr.attr, 336 &khugepaged_max_ptes_none_attr.attr, 337 &khugepaged_max_ptes_swap_attr.attr, 338 &khugepaged_max_ptes_shared_attr.attr, 339 &pages_to_scan_attr.attr, 340 &pages_collapsed_attr.attr, 341 &full_scans_attr.attr, 342 &scan_sleep_millisecs_attr.attr, 343 &alloc_sleep_millisecs_attr.attr, 344 NULL, 345 }; 346 347 struct attribute_group khugepaged_attr_group = { 348 .attrs = khugepaged_attr, 349 .name = "khugepaged", 350 }; 351 #endif /* CONFIG_SYSFS */ 352 353 int hugepage_madvise(struct vm_area_struct *vma, 354 unsigned long *vm_flags, int advice) 355 { 356 switch (advice) { 357 case MADV_HUGEPAGE: 358 #ifdef CONFIG_S390 359 /* 360 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 361 * can't handle this properly after s390_enable_sie, so we simply 362 * ignore the madvise to prevent qemu from causing a SIGSEGV. 363 */ 364 if (mm_has_pgste(vma->vm_mm)) 365 return 0; 366 #endif 367 *vm_flags &= ~VM_NOHUGEPAGE; 368 *vm_flags |= VM_HUGEPAGE; 369 /* 370 * If the vma become good for khugepaged to scan, 371 * register it here without waiting a page fault that 372 * may not happen any time soon. 373 */ 374 khugepaged_enter_vma(vma, *vm_flags); 375 break; 376 case MADV_NOHUGEPAGE: 377 *vm_flags &= ~VM_HUGEPAGE; 378 *vm_flags |= VM_NOHUGEPAGE; 379 /* 380 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 381 * this vma even if we leave the mm registered in khugepaged if 382 * it got registered before VM_NOHUGEPAGE was set. 383 */ 384 break; 385 } 386 387 return 0; 388 } 389 390 int __init khugepaged_init(void) 391 { 392 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 393 sizeof(struct mm_slot), 394 __alignof__(struct mm_slot), 0, NULL); 395 if (!mm_slot_cache) 396 return -ENOMEM; 397 398 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; 399 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; 400 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; 401 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; 402 403 return 0; 404 } 405 406 void __init khugepaged_destroy(void) 407 { 408 kmem_cache_destroy(mm_slot_cache); 409 } 410 411 static inline struct mm_slot *alloc_mm_slot(void) 412 { 413 if (!mm_slot_cache) /* initialization failed */ 414 return NULL; 415 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 416 } 417 418 static inline void free_mm_slot(struct mm_slot *mm_slot) 419 { 420 kmem_cache_free(mm_slot_cache, mm_slot); 421 } 422 423 static struct mm_slot *get_mm_slot(struct mm_struct *mm) 424 { 425 struct mm_slot *mm_slot; 426 427 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 428 if (mm == mm_slot->mm) 429 return mm_slot; 430 431 return NULL; 432 } 433 434 static void insert_to_mm_slots_hash(struct mm_struct *mm, 435 struct mm_slot *mm_slot) 436 { 437 mm_slot->mm = mm; 438 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 439 } 440 441 static inline int hpage_collapse_test_exit(struct mm_struct *mm) 442 { 443 return atomic_read(&mm->mm_users) == 0; 444 } 445 446 void __khugepaged_enter(struct mm_struct *mm) 447 { 448 struct mm_slot *mm_slot; 449 int wakeup; 450 451 mm_slot = alloc_mm_slot(); 452 if (!mm_slot) 453 return; 454 455 /* __khugepaged_exit() must not run from under us */ 456 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); 457 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 458 free_mm_slot(mm_slot); 459 return; 460 } 461 462 spin_lock(&khugepaged_mm_lock); 463 insert_to_mm_slots_hash(mm, mm_slot); 464 /* 465 * Insert just behind the scanning cursor, to let the area settle 466 * down a little. 467 */ 468 wakeup = list_empty(&khugepaged_scan.mm_head); 469 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 470 spin_unlock(&khugepaged_mm_lock); 471 472 mmgrab(mm); 473 if (wakeup) 474 wake_up_interruptible(&khugepaged_wait); 475 } 476 477 void khugepaged_enter_vma(struct vm_area_struct *vma, 478 unsigned long vm_flags) 479 { 480 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && 481 hugepage_flags_enabled()) { 482 if (hugepage_vma_check(vma, vm_flags, false, false, true)) 483 __khugepaged_enter(vma->vm_mm); 484 } 485 } 486 487 void __khugepaged_exit(struct mm_struct *mm) 488 { 489 struct mm_slot *mm_slot; 490 int free = 0; 491 492 spin_lock(&khugepaged_mm_lock); 493 mm_slot = get_mm_slot(mm); 494 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 495 hash_del(&mm_slot->hash); 496 list_del(&mm_slot->mm_node); 497 free = 1; 498 } 499 spin_unlock(&khugepaged_mm_lock); 500 501 if (free) { 502 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 503 free_mm_slot(mm_slot); 504 mmdrop(mm); 505 } else if (mm_slot) { 506 /* 507 * This is required to serialize against 508 * hpage_collapse_test_exit() (which is guaranteed to run 509 * under mmap sem read mode). Stop here (after we return all 510 * pagetables will be destroyed) until khugepaged has finished 511 * working on the pagetables under the mmap_lock. 512 */ 513 mmap_write_lock(mm); 514 mmap_write_unlock(mm); 515 } 516 } 517 518 static void release_pte_page(struct page *page) 519 { 520 mod_node_page_state(page_pgdat(page), 521 NR_ISOLATED_ANON + page_is_file_lru(page), 522 -compound_nr(page)); 523 unlock_page(page); 524 putback_lru_page(page); 525 } 526 527 static void release_pte_pages(pte_t *pte, pte_t *_pte, 528 struct list_head *compound_pagelist) 529 { 530 struct page *page, *tmp; 531 532 while (--_pte >= pte) { 533 pte_t pteval = *_pte; 534 535 page = pte_page(pteval); 536 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) && 537 !PageCompound(page)) 538 release_pte_page(page); 539 } 540 541 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) { 542 list_del(&page->lru); 543 release_pte_page(page); 544 } 545 } 546 547 static bool is_refcount_suitable(struct page *page) 548 { 549 int expected_refcount; 550 551 expected_refcount = total_mapcount(page); 552 if (PageSwapCache(page)) 553 expected_refcount += compound_nr(page); 554 555 return page_count(page) == expected_refcount; 556 } 557 558 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 559 unsigned long address, 560 pte_t *pte, 561 struct collapse_control *cc, 562 struct list_head *compound_pagelist) 563 { 564 struct page *page = NULL; 565 pte_t *_pte; 566 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; 567 bool writable = false; 568 569 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 570 _pte++, address += PAGE_SIZE) { 571 pte_t pteval = *_pte; 572 if (pte_none(pteval) || (pte_present(pteval) && 573 is_zero_pfn(pte_pfn(pteval)))) { 574 ++none_or_zero; 575 if (!userfaultfd_armed(vma) && 576 (!cc->is_khugepaged || 577 none_or_zero <= khugepaged_max_ptes_none)) { 578 continue; 579 } else { 580 result = SCAN_EXCEED_NONE_PTE; 581 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 582 goto out; 583 } 584 } 585 if (!pte_present(pteval)) { 586 result = SCAN_PTE_NON_PRESENT; 587 goto out; 588 } 589 page = vm_normal_page(vma, address, pteval); 590 if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 591 result = SCAN_PAGE_NULL; 592 goto out; 593 } 594 595 VM_BUG_ON_PAGE(!PageAnon(page), page); 596 597 if (page_mapcount(page) > 1) { 598 ++shared; 599 if (cc->is_khugepaged && 600 shared > khugepaged_max_ptes_shared) { 601 result = SCAN_EXCEED_SHARED_PTE; 602 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 603 goto out; 604 } 605 } 606 607 if (PageCompound(page)) { 608 struct page *p; 609 page = compound_head(page); 610 611 /* 612 * Check if we have dealt with the compound page 613 * already 614 */ 615 list_for_each_entry(p, compound_pagelist, lru) { 616 if (page == p) 617 goto next; 618 } 619 } 620 621 /* 622 * We can do it before isolate_lru_page because the 623 * page can't be freed from under us. NOTE: PG_lock 624 * is needed to serialize against split_huge_page 625 * when invoked from the VM. 626 */ 627 if (!trylock_page(page)) { 628 result = SCAN_PAGE_LOCK; 629 goto out; 630 } 631 632 /* 633 * Check if the page has any GUP (or other external) pins. 634 * 635 * The page table that maps the page has been already unlinked 636 * from the page table tree and this process cannot get 637 * an additional pin on the page. 638 * 639 * New pins can come later if the page is shared across fork, 640 * but not from this process. The other process cannot write to 641 * the page, only trigger CoW. 642 */ 643 if (!is_refcount_suitable(page)) { 644 unlock_page(page); 645 result = SCAN_PAGE_COUNT; 646 goto out; 647 } 648 649 /* 650 * Isolate the page to avoid collapsing an hugepage 651 * currently in use by the VM. 652 */ 653 if (isolate_lru_page(page)) { 654 unlock_page(page); 655 result = SCAN_DEL_PAGE_LRU; 656 goto out; 657 } 658 mod_node_page_state(page_pgdat(page), 659 NR_ISOLATED_ANON + page_is_file_lru(page), 660 compound_nr(page)); 661 VM_BUG_ON_PAGE(!PageLocked(page), page); 662 VM_BUG_ON_PAGE(PageLRU(page), page); 663 664 if (PageCompound(page)) 665 list_add_tail(&page->lru, compound_pagelist); 666 next: 667 /* 668 * If collapse was initiated by khugepaged, check that there is 669 * enough young pte to justify collapsing the page 670 */ 671 if (cc->is_khugepaged && 672 (pte_young(pteval) || page_is_young(page) || 673 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, 674 address))) 675 referenced++; 676 677 if (pte_write(pteval)) 678 writable = true; 679 } 680 681 if (unlikely(!writable)) { 682 result = SCAN_PAGE_RO; 683 } else if (unlikely(cc->is_khugepaged && !referenced)) { 684 result = SCAN_LACK_REFERENCED_PAGE; 685 } else { 686 result = SCAN_SUCCEED; 687 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 688 referenced, writable, result); 689 return result; 690 } 691 out: 692 release_pte_pages(pte, _pte, compound_pagelist); 693 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 694 referenced, writable, result); 695 return result; 696 } 697 698 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 699 struct vm_area_struct *vma, 700 unsigned long address, 701 spinlock_t *ptl, 702 struct list_head *compound_pagelist) 703 { 704 struct page *src_page, *tmp; 705 pte_t *_pte; 706 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; 707 _pte++, page++, address += PAGE_SIZE) { 708 pte_t pteval = *_pte; 709 710 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 711 clear_user_highpage(page, address); 712 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 713 if (is_zero_pfn(pte_pfn(pteval))) { 714 /* 715 * ptl mostly unnecessary. 716 */ 717 spin_lock(ptl); 718 ptep_clear(vma->vm_mm, address, _pte); 719 spin_unlock(ptl); 720 } 721 } else { 722 src_page = pte_page(pteval); 723 copy_user_highpage(page, src_page, address, vma); 724 if (!PageCompound(src_page)) 725 release_pte_page(src_page); 726 /* 727 * ptl mostly unnecessary, but preempt has to 728 * be disabled to update the per-cpu stats 729 * inside page_remove_rmap(). 730 */ 731 spin_lock(ptl); 732 ptep_clear(vma->vm_mm, address, _pte); 733 page_remove_rmap(src_page, vma, false); 734 spin_unlock(ptl); 735 free_page_and_swap_cache(src_page); 736 } 737 } 738 739 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { 740 list_del(&src_page->lru); 741 mod_node_page_state(page_pgdat(src_page), 742 NR_ISOLATED_ANON + page_is_file_lru(src_page), 743 -compound_nr(src_page)); 744 unlock_page(src_page); 745 free_swap_cache(src_page); 746 putback_lru_page(src_page); 747 } 748 } 749 750 static void khugepaged_alloc_sleep(void) 751 { 752 DEFINE_WAIT(wait); 753 754 add_wait_queue(&khugepaged_wait, &wait); 755 freezable_schedule_timeout_interruptible( 756 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 757 remove_wait_queue(&khugepaged_wait, &wait); 758 } 759 760 struct collapse_control khugepaged_collapse_control = { 761 .is_khugepaged = true, 762 .last_target_node = NUMA_NO_NODE, 763 }; 764 765 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc) 766 { 767 int i; 768 769 /* 770 * If node_reclaim_mode is disabled, then no extra effort is made to 771 * allocate memory locally. 772 */ 773 if (!node_reclaim_enabled()) 774 return false; 775 776 /* If there is a count for this node already, it must be acceptable */ 777 if (cc->node_load[nid]) 778 return false; 779 780 for (i = 0; i < MAX_NUMNODES; i++) { 781 if (!cc->node_load[i]) 782 continue; 783 if (node_distance(nid, i) > node_reclaim_distance) 784 return true; 785 } 786 return false; 787 } 788 789 #define khugepaged_defrag() \ 790 (transparent_hugepage_flags & \ 791 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) 792 793 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ 794 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) 795 { 796 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; 797 } 798 799 #ifdef CONFIG_NUMA 800 static int hpage_collapse_find_target_node(struct collapse_control *cc) 801 { 802 int nid, target_node = 0, max_value = 0; 803 804 /* find first node with max normal pages hit */ 805 for (nid = 0; nid < MAX_NUMNODES; nid++) 806 if (cc->node_load[nid] > max_value) { 807 max_value = cc->node_load[nid]; 808 target_node = nid; 809 } 810 811 /* do some balance if several nodes have the same hit record */ 812 if (target_node <= cc->last_target_node) 813 for (nid = cc->last_target_node + 1; nid < MAX_NUMNODES; 814 nid++) 815 if (max_value == cc->node_load[nid]) { 816 target_node = nid; 817 break; 818 } 819 820 cc->last_target_node = target_node; 821 return target_node; 822 } 823 #else 824 static int hpage_collapse_find_target_node(struct collapse_control *cc) 825 { 826 return 0; 827 } 828 #endif 829 830 static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node) 831 { 832 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); 833 if (unlikely(!*hpage)) { 834 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 835 return false; 836 } 837 838 prep_transhuge_page(*hpage); 839 count_vm_event(THP_COLLAPSE_ALLOC); 840 return true; 841 } 842 843 /* 844 * If mmap_lock temporarily dropped, revalidate vma 845 * before taking mmap_lock. 846 * Returns enum scan_result value. 847 */ 848 849 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, 850 struct vm_area_struct **vmap, 851 struct collapse_control *cc) 852 { 853 struct vm_area_struct *vma; 854 855 if (unlikely(hpage_collapse_test_exit(mm))) 856 return SCAN_ANY_PROCESS; 857 858 *vmap = vma = find_vma(mm, address); 859 if (!vma) 860 return SCAN_VMA_NULL; 861 862 if (!transhuge_vma_suitable(vma, address)) 863 return SCAN_ADDRESS_RANGE; 864 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, 865 cc->is_khugepaged)) 866 return SCAN_VMA_CHECK; 867 /* 868 * Anon VMA expected, the address may be unmapped then 869 * remapped to file after khugepaged reaquired the mmap_lock. 870 * 871 * hugepage_vma_check may return true for qualified file 872 * vmas. 873 */ 874 if (!vma->anon_vma || !vma_is_anonymous(vma)) 875 return SCAN_VMA_CHECK; 876 return SCAN_SUCCEED; 877 } 878 879 static int find_pmd_or_thp_or_none(struct mm_struct *mm, 880 unsigned long address, 881 pmd_t **pmd) 882 { 883 pmd_t pmde; 884 885 *pmd = mm_find_pmd(mm, address); 886 if (!*pmd) 887 return SCAN_PMD_NULL; 888 889 pmde = pmd_read_atomic(*pmd); 890 891 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 892 /* See comments in pmd_none_or_trans_huge_or_clear_bad() */ 893 barrier(); 894 #endif 895 if (!pmd_present(pmde)) 896 return SCAN_PMD_NULL; 897 if (pmd_trans_huge(pmde)) 898 return SCAN_PMD_MAPPED; 899 if (pmd_bad(pmde)) 900 return SCAN_PMD_NULL; 901 return SCAN_SUCCEED; 902 } 903 904 static int check_pmd_still_valid(struct mm_struct *mm, 905 unsigned long address, 906 pmd_t *pmd) 907 { 908 pmd_t *new_pmd; 909 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd); 910 911 if (result != SCAN_SUCCEED) 912 return result; 913 if (new_pmd != pmd) 914 return SCAN_FAIL; 915 return SCAN_SUCCEED; 916 } 917 918 /* 919 * Bring missing pages in from swap, to complete THP collapse. 920 * Only done if hpage_collapse_scan_pmd believes it is worthwhile. 921 * 922 * Called and returns without pte mapped or spinlocks held. 923 * Note that if false is returned, mmap_lock will be released. 924 */ 925 926 static int __collapse_huge_page_swapin(struct mm_struct *mm, 927 struct vm_area_struct *vma, 928 unsigned long haddr, pmd_t *pmd, 929 int referenced) 930 { 931 int swapped_in = 0; 932 vm_fault_t ret = 0; 933 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); 934 935 for (address = haddr; address < end; address += PAGE_SIZE) { 936 struct vm_fault vmf = { 937 .vma = vma, 938 .address = address, 939 .pgoff = linear_page_index(vma, haddr), 940 .flags = FAULT_FLAG_ALLOW_RETRY, 941 .pmd = pmd, 942 }; 943 944 vmf.pte = pte_offset_map(pmd, address); 945 vmf.orig_pte = *vmf.pte; 946 if (!is_swap_pte(vmf.orig_pte)) { 947 pte_unmap(vmf.pte); 948 continue; 949 } 950 ret = do_swap_page(&vmf); 951 952 /* 953 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock. 954 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because 955 * we do not retry here and swap entry will remain in pagetable 956 * resulting in later failure. 957 */ 958 if (ret & VM_FAULT_RETRY) { 959 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 960 /* Likely, but not guaranteed, that page lock failed */ 961 return SCAN_PAGE_LOCK; 962 } 963 if (ret & VM_FAULT_ERROR) { 964 mmap_read_unlock(mm); 965 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 966 return SCAN_FAIL; 967 } 968 swapped_in++; 969 } 970 971 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */ 972 if (swapped_in) 973 lru_add_drain(); 974 975 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); 976 return SCAN_SUCCEED; 977 } 978 979 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, 980 struct collapse_control *cc) 981 { 982 /* Only allocate from the target node */ 983 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : 984 GFP_TRANSHUGE) | __GFP_THISNODE; 985 int node = hpage_collapse_find_target_node(cc); 986 987 if (!hpage_collapse_alloc_page(hpage, gfp, node)) 988 return SCAN_ALLOC_HUGE_PAGE_FAIL; 989 if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp))) 990 return SCAN_CGROUP_CHARGE_FAIL; 991 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC); 992 return SCAN_SUCCEED; 993 } 994 995 static int collapse_huge_page(struct mm_struct *mm, unsigned long address, 996 int referenced, int unmapped, 997 struct collapse_control *cc) 998 { 999 LIST_HEAD(compound_pagelist); 1000 pmd_t *pmd, _pmd; 1001 pte_t *pte; 1002 pgtable_t pgtable; 1003 struct page *hpage; 1004 spinlock_t *pmd_ptl, *pte_ptl; 1005 int result = SCAN_FAIL; 1006 struct vm_area_struct *vma; 1007 struct mmu_notifier_range range; 1008 1009 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1010 1011 /* 1012 * Before allocating the hugepage, release the mmap_lock read lock. 1013 * The allocation can take potentially a long time if it involves 1014 * sync compaction, and we do not need to hold the mmap_lock during 1015 * that. We will recheck the vma after taking it again in write mode. 1016 */ 1017 mmap_read_unlock(mm); 1018 1019 result = alloc_charge_hpage(&hpage, mm, cc); 1020 if (result != SCAN_SUCCEED) 1021 goto out_nolock; 1022 1023 mmap_read_lock(mm); 1024 result = hugepage_vma_revalidate(mm, address, &vma, cc); 1025 if (result != SCAN_SUCCEED) { 1026 mmap_read_unlock(mm); 1027 goto out_nolock; 1028 } 1029 1030 result = find_pmd_or_thp_or_none(mm, address, &pmd); 1031 if (result != SCAN_SUCCEED) { 1032 mmap_read_unlock(mm); 1033 goto out_nolock; 1034 } 1035 1036 if (unmapped) { 1037 /* 1038 * __collapse_huge_page_swapin will return with mmap_lock 1039 * released when it fails. So we jump out_nolock directly in 1040 * that case. Continuing to collapse causes inconsistency. 1041 */ 1042 result = __collapse_huge_page_swapin(mm, vma, address, pmd, 1043 referenced); 1044 if (result != SCAN_SUCCEED) 1045 goto out_nolock; 1046 } 1047 1048 mmap_read_unlock(mm); 1049 /* 1050 * Prevent all access to pagetables with the exception of 1051 * gup_fast later handled by the ptep_clear_flush and the VM 1052 * handled by the anon_vma lock + PG_lock. 1053 */ 1054 mmap_write_lock(mm); 1055 result = hugepage_vma_revalidate(mm, address, &vma, cc); 1056 if (result != SCAN_SUCCEED) 1057 goto out_up_write; 1058 /* check if the pmd is still valid */ 1059 result = check_pmd_still_valid(mm, address, pmd); 1060 if (result != SCAN_SUCCEED) 1061 goto out_up_write; 1062 1063 anon_vma_lock_write(vma->anon_vma); 1064 1065 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, 1066 address, address + HPAGE_PMD_SIZE); 1067 mmu_notifier_invalidate_range_start(&range); 1068 1069 pte = pte_offset_map(pmd, address); 1070 pte_ptl = pte_lockptr(mm, pmd); 1071 1072 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1073 /* 1074 * This removes any huge TLB entry from the CPU so we won't allow 1075 * huge and small TLB entries for the same virtual address to 1076 * avoid the risk of CPU bugs in that area. 1077 * 1078 * Parallel fast GUP is fine since fast GUP will back off when 1079 * it detects PMD is changed. 1080 */ 1081 _pmd = pmdp_collapse_flush(vma, address, pmd); 1082 spin_unlock(pmd_ptl); 1083 mmu_notifier_invalidate_range_end(&range); 1084 1085 spin_lock(pte_ptl); 1086 result = __collapse_huge_page_isolate(vma, address, pte, cc, 1087 &compound_pagelist); 1088 spin_unlock(pte_ptl); 1089 1090 if (unlikely(result != SCAN_SUCCEED)) { 1091 pte_unmap(pte); 1092 spin_lock(pmd_ptl); 1093 BUG_ON(!pmd_none(*pmd)); 1094 /* 1095 * We can only use set_pmd_at when establishing 1096 * hugepmds and never for establishing regular pmds that 1097 * points to regular pagetables. Use pmd_populate for that 1098 */ 1099 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 1100 spin_unlock(pmd_ptl); 1101 anon_vma_unlock_write(vma->anon_vma); 1102 goto out_up_write; 1103 } 1104 1105 /* 1106 * All pages are isolated and locked so anon_vma rmap 1107 * can't run anymore. 1108 */ 1109 anon_vma_unlock_write(vma->anon_vma); 1110 1111 __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl, 1112 &compound_pagelist); 1113 pte_unmap(pte); 1114 /* 1115 * spin_lock() below is not the equivalent of smp_wmb(), but 1116 * the smp_wmb() inside __SetPageUptodate() can be reused to 1117 * avoid the copy_huge_page writes to become visible after 1118 * the set_pmd_at() write. 1119 */ 1120 __SetPageUptodate(hpage); 1121 pgtable = pmd_pgtable(_pmd); 1122 1123 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); 1124 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1125 1126 spin_lock(pmd_ptl); 1127 BUG_ON(!pmd_none(*pmd)); 1128 page_add_new_anon_rmap(hpage, vma, address); 1129 lru_cache_add_inactive_or_unevictable(hpage, vma); 1130 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1131 set_pmd_at(mm, address, pmd, _pmd); 1132 update_mmu_cache_pmd(vma, address, pmd); 1133 spin_unlock(pmd_ptl); 1134 1135 hpage = NULL; 1136 1137 result = SCAN_SUCCEED; 1138 out_up_write: 1139 mmap_write_unlock(mm); 1140 out_nolock: 1141 if (hpage) { 1142 mem_cgroup_uncharge(page_folio(hpage)); 1143 put_page(hpage); 1144 } 1145 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); 1146 return result; 1147 } 1148 1149 static int hpage_collapse_scan_pmd(struct mm_struct *mm, 1150 struct vm_area_struct *vma, 1151 unsigned long address, bool *mmap_locked, 1152 struct collapse_control *cc) 1153 { 1154 pmd_t *pmd; 1155 pte_t *pte, *_pte; 1156 int result = SCAN_FAIL, referenced = 0; 1157 int none_or_zero = 0, shared = 0; 1158 struct page *page = NULL; 1159 unsigned long _address; 1160 spinlock_t *ptl; 1161 int node = NUMA_NO_NODE, unmapped = 0; 1162 bool writable = false; 1163 1164 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1165 1166 result = find_pmd_or_thp_or_none(mm, address, &pmd); 1167 if (result != SCAN_SUCCEED) 1168 goto out; 1169 1170 memset(cc->node_load, 0, sizeof(cc->node_load)); 1171 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1172 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; 1173 _pte++, _address += PAGE_SIZE) { 1174 pte_t pteval = *_pte; 1175 if (is_swap_pte(pteval)) { 1176 ++unmapped; 1177 if (!cc->is_khugepaged || 1178 unmapped <= khugepaged_max_ptes_swap) { 1179 /* 1180 * Always be strict with uffd-wp 1181 * enabled swap entries. Please see 1182 * comment below for pte_uffd_wp(). 1183 */ 1184 if (pte_swp_uffd_wp(pteval)) { 1185 result = SCAN_PTE_UFFD_WP; 1186 goto out_unmap; 1187 } 1188 continue; 1189 } else { 1190 result = SCAN_EXCEED_SWAP_PTE; 1191 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 1192 goto out_unmap; 1193 } 1194 } 1195 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 1196 ++none_or_zero; 1197 if (!userfaultfd_armed(vma) && 1198 (!cc->is_khugepaged || 1199 none_or_zero <= khugepaged_max_ptes_none)) { 1200 continue; 1201 } else { 1202 result = SCAN_EXCEED_NONE_PTE; 1203 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 1204 goto out_unmap; 1205 } 1206 } 1207 if (pte_uffd_wp(pteval)) { 1208 /* 1209 * Don't collapse the page if any of the small 1210 * PTEs are armed with uffd write protection. 1211 * Here we can also mark the new huge pmd as 1212 * write protected if any of the small ones is 1213 * marked but that could bring unknown 1214 * userfault messages that falls outside of 1215 * the registered range. So, just be simple. 1216 */ 1217 result = SCAN_PTE_UFFD_WP; 1218 goto out_unmap; 1219 } 1220 if (pte_write(pteval)) 1221 writable = true; 1222 1223 page = vm_normal_page(vma, _address, pteval); 1224 if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 1225 result = SCAN_PAGE_NULL; 1226 goto out_unmap; 1227 } 1228 1229 if (page_mapcount(page) > 1) { 1230 ++shared; 1231 if (cc->is_khugepaged && 1232 shared > khugepaged_max_ptes_shared) { 1233 result = SCAN_EXCEED_SHARED_PTE; 1234 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); 1235 goto out_unmap; 1236 } 1237 } 1238 1239 page = compound_head(page); 1240 1241 /* 1242 * Record which node the original page is from and save this 1243 * information to cc->node_load[]. 1244 * Khugepaged will allocate hugepage from the node has the max 1245 * hit record. 1246 */ 1247 node = page_to_nid(page); 1248 if (hpage_collapse_scan_abort(node, cc)) { 1249 result = SCAN_SCAN_ABORT; 1250 goto out_unmap; 1251 } 1252 cc->node_load[node]++; 1253 if (!PageLRU(page)) { 1254 result = SCAN_PAGE_LRU; 1255 goto out_unmap; 1256 } 1257 if (PageLocked(page)) { 1258 result = SCAN_PAGE_LOCK; 1259 goto out_unmap; 1260 } 1261 if (!PageAnon(page)) { 1262 result = SCAN_PAGE_ANON; 1263 goto out_unmap; 1264 } 1265 1266 /* 1267 * Check if the page has any GUP (or other external) pins. 1268 * 1269 * Here the check is racy it may see total_mapcount > refcount 1270 * in some cases. 1271 * For example, one process with one forked child process. 1272 * The parent has the PMD split due to MADV_DONTNEED, then 1273 * the child is trying unmap the whole PMD, but khugepaged 1274 * may be scanning the parent between the child has 1275 * PageDoubleMap flag cleared and dec the mapcount. So 1276 * khugepaged may see total_mapcount > refcount. 1277 * 1278 * But such case is ephemeral we could always retry collapse 1279 * later. However it may report false positive if the page 1280 * has excessive GUP pins (i.e. 512). Anyway the same check 1281 * will be done again later the risk seems low. 1282 */ 1283 if (!is_refcount_suitable(page)) { 1284 result = SCAN_PAGE_COUNT; 1285 goto out_unmap; 1286 } 1287 1288 /* 1289 * If collapse was initiated by khugepaged, check that there is 1290 * enough young pte to justify collapsing the page 1291 */ 1292 if (cc->is_khugepaged && 1293 (pte_young(pteval) || page_is_young(page) || 1294 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, 1295 address))) 1296 referenced++; 1297 } 1298 if (!writable) { 1299 result = SCAN_PAGE_RO; 1300 } else if (cc->is_khugepaged && 1301 (!referenced || 1302 (unmapped && referenced < HPAGE_PMD_NR / 2))) { 1303 result = SCAN_LACK_REFERENCED_PAGE; 1304 } else { 1305 result = SCAN_SUCCEED; 1306 } 1307 out_unmap: 1308 pte_unmap_unlock(pte, ptl); 1309 if (result == SCAN_SUCCEED) { 1310 result = collapse_huge_page(mm, address, referenced, 1311 unmapped, cc); 1312 /* collapse_huge_page will return with the mmap_lock released */ 1313 *mmap_locked = false; 1314 } 1315 out: 1316 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, 1317 none_or_zero, result, unmapped); 1318 return result; 1319 } 1320 1321 static void collect_mm_slot(struct mm_slot *mm_slot) 1322 { 1323 struct mm_struct *mm = mm_slot->mm; 1324 1325 lockdep_assert_held(&khugepaged_mm_lock); 1326 1327 if (hpage_collapse_test_exit(mm)) { 1328 /* free mm_slot */ 1329 hash_del(&mm_slot->hash); 1330 list_del(&mm_slot->mm_node); 1331 1332 /* 1333 * Not strictly needed because the mm exited already. 1334 * 1335 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1336 */ 1337 1338 /* khugepaged_mm_lock actually not necessary for the below */ 1339 free_mm_slot(mm_slot); 1340 mmdrop(mm); 1341 } 1342 } 1343 1344 #ifdef CONFIG_SHMEM 1345 /* 1346 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then 1347 * khugepaged should try to collapse the page table. 1348 */ 1349 static void khugepaged_add_pte_mapped_thp(struct mm_struct *mm, 1350 unsigned long addr) 1351 { 1352 struct mm_slot *mm_slot; 1353 1354 VM_BUG_ON(addr & ~HPAGE_PMD_MASK); 1355 1356 spin_lock(&khugepaged_mm_lock); 1357 mm_slot = get_mm_slot(mm); 1358 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) 1359 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr; 1360 spin_unlock(&khugepaged_mm_lock); 1361 } 1362 1363 static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 1364 unsigned long addr, pmd_t *pmdp) 1365 { 1366 spinlock_t *ptl; 1367 pmd_t pmd; 1368 1369 mmap_assert_write_locked(mm); 1370 ptl = pmd_lock(vma->vm_mm, pmdp); 1371 pmd = pmdp_collapse_flush(vma, addr, pmdp); 1372 spin_unlock(ptl); 1373 mm_dec_nr_ptes(mm); 1374 page_table_check_pte_clear_range(mm, addr, pmd); 1375 pte_free(mm, pmd_pgtable(pmd)); 1376 } 1377 1378 /** 1379 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at 1380 * address haddr. 1381 * 1382 * @mm: process address space where collapse happens 1383 * @addr: THP collapse address 1384 * 1385 * This function checks whether all the PTEs in the PMD are pointing to the 1386 * right THP. If so, retract the page table so the THP can refault in with 1387 * as pmd-mapped. 1388 */ 1389 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) 1390 { 1391 unsigned long haddr = addr & HPAGE_PMD_MASK; 1392 struct vm_area_struct *vma = find_vma(mm, haddr); 1393 struct page *hpage; 1394 pte_t *start_pte, *pte; 1395 pmd_t *pmd; 1396 spinlock_t *ptl; 1397 int count = 0; 1398 int i; 1399 1400 if (!vma || !vma->vm_file || 1401 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) 1402 return; 1403 1404 /* 1405 * If we are here, we've succeeded in replacing all the native pages 1406 * in the page cache with a single hugepage. If a mm were to fault-in 1407 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage 1408 * and map it by a PMD, regardless of sysfs THP settings. As such, let's 1409 * analogously elide sysfs THP settings here. 1410 */ 1411 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) 1412 return; 1413 1414 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ 1415 if (userfaultfd_wp(vma)) 1416 return; 1417 1418 hpage = find_lock_page(vma->vm_file->f_mapping, 1419 linear_page_index(vma, haddr)); 1420 if (!hpage) 1421 return; 1422 1423 if (!PageHead(hpage)) 1424 goto drop_hpage; 1425 1426 if (find_pmd_or_thp_or_none(mm, haddr, &pmd) != SCAN_SUCCEED) 1427 goto drop_hpage; 1428 1429 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); 1430 1431 /* step 1: check all mapped PTEs are to the right huge page */ 1432 for (i = 0, addr = haddr, pte = start_pte; 1433 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 1434 struct page *page; 1435 1436 /* empty pte, skip */ 1437 if (pte_none(*pte)) 1438 continue; 1439 1440 /* page swapped out, abort */ 1441 if (!pte_present(*pte)) 1442 goto abort; 1443 1444 page = vm_normal_page(vma, addr, *pte); 1445 if (WARN_ON_ONCE(page && is_zone_device_page(page))) 1446 page = NULL; 1447 /* 1448 * Note that uprobe, debugger, or MAP_PRIVATE may change the 1449 * page table, but the new page will not be a subpage of hpage. 1450 */ 1451 if (hpage + i != page) 1452 goto abort; 1453 count++; 1454 } 1455 1456 /* step 2: adjust rmap */ 1457 for (i = 0, addr = haddr, pte = start_pte; 1458 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { 1459 struct page *page; 1460 1461 if (pte_none(*pte)) 1462 continue; 1463 page = vm_normal_page(vma, addr, *pte); 1464 if (WARN_ON_ONCE(page && is_zone_device_page(page))) 1465 goto abort; 1466 page_remove_rmap(page, vma, false); 1467 } 1468 1469 pte_unmap_unlock(start_pte, ptl); 1470 1471 /* step 3: set proper refcount and mm_counters. */ 1472 if (count) { 1473 page_ref_sub(hpage, count); 1474 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); 1475 } 1476 1477 /* step 4: collapse pmd */ 1478 collapse_and_free_pmd(mm, vma, haddr, pmd); 1479 drop_hpage: 1480 unlock_page(hpage); 1481 put_page(hpage); 1482 return; 1483 1484 abort: 1485 pte_unmap_unlock(start_pte, ptl); 1486 goto drop_hpage; 1487 } 1488 1489 static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) 1490 { 1491 struct mm_struct *mm = mm_slot->mm; 1492 int i; 1493 1494 if (likely(mm_slot->nr_pte_mapped_thp == 0)) 1495 return; 1496 1497 if (!mmap_write_trylock(mm)) 1498 return; 1499 1500 if (unlikely(hpage_collapse_test_exit(mm))) 1501 goto out; 1502 1503 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) 1504 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]); 1505 1506 out: 1507 mm_slot->nr_pte_mapped_thp = 0; 1508 mmap_write_unlock(mm); 1509 } 1510 1511 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) 1512 { 1513 struct vm_area_struct *vma; 1514 struct mm_struct *mm; 1515 unsigned long addr; 1516 pmd_t *pmd; 1517 1518 i_mmap_lock_write(mapping); 1519 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1520 /* 1521 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that 1522 * got written to. These VMAs are likely not worth investing 1523 * mmap_write_lock(mm) as PMD-mapping is likely to be split 1524 * later. 1525 * 1526 * Note that vma->anon_vma check is racy: it can be set up after 1527 * the check but before we took mmap_lock by the fault path. 1528 * But page lock would prevent establishing any new ptes of the 1529 * page, so we are safe. 1530 * 1531 * An alternative would be drop the check, but check that page 1532 * table is clear before calling pmdp_collapse_flush() under 1533 * ptl. It has higher chance to recover THP for the VMA, but 1534 * has higher cost too. 1535 */ 1536 if (vma->anon_vma) 1537 continue; 1538 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1539 if (addr & ~HPAGE_PMD_MASK) 1540 continue; 1541 if (vma->vm_end < addr + HPAGE_PMD_SIZE) 1542 continue; 1543 mm = vma->vm_mm; 1544 if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED) 1545 continue; 1546 /* 1547 * We need exclusive mmap_lock to retract page table. 1548 * 1549 * We use trylock due to lock inversion: we need to acquire 1550 * mmap_lock while holding page lock. Fault path does it in 1551 * reverse order. Trylock is a way to avoid deadlock. 1552 */ 1553 if (mmap_write_trylock(mm)) { 1554 /* 1555 * When a vma is registered with uffd-wp, we can't 1556 * recycle the pmd pgtable because there can be pte 1557 * markers installed. Skip it only, so the rest mm/vma 1558 * can still have the same file mapped hugely, however 1559 * it'll always mapped in small page size for uffd-wp 1560 * registered ranges. 1561 */ 1562 if (!hpage_collapse_test_exit(mm) && 1563 !userfaultfd_wp(vma)) 1564 collapse_and_free_pmd(mm, vma, addr, pmd); 1565 mmap_write_unlock(mm); 1566 } else { 1567 /* Try again later */ 1568 khugepaged_add_pte_mapped_thp(mm, addr); 1569 } 1570 } 1571 i_mmap_unlock_write(mapping); 1572 } 1573 1574 /** 1575 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one. 1576 * 1577 * @mm: process address space where collapse happens 1578 * @file: file that collapse on 1579 * @start: collapse start address 1580 * @cc: collapse context and scratchpad 1581 * 1582 * Basic scheme is simple, details are more complex: 1583 * - allocate and lock a new huge page; 1584 * - scan page cache replacing old pages with the new one 1585 * + swap/gup in pages if necessary; 1586 * + fill in gaps; 1587 * + keep old pages around in case rollback is required; 1588 * - if replacing succeeds: 1589 * + copy data over; 1590 * + free old pages; 1591 * + unlock huge page; 1592 * - if replacing failed; 1593 * + put all pages back and unfreeze them; 1594 * + restore gaps in the page cache; 1595 * + unlock and free huge page; 1596 */ 1597 static int collapse_file(struct mm_struct *mm, struct file *file, 1598 pgoff_t start, struct collapse_control *cc) 1599 { 1600 struct address_space *mapping = file->f_mapping; 1601 struct page *hpage; 1602 pgoff_t index, end = start + HPAGE_PMD_NR; 1603 LIST_HEAD(pagelist); 1604 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); 1605 int nr_none = 0, result = SCAN_SUCCEED; 1606 bool is_shmem = shmem_file(file); 1607 int nr; 1608 1609 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); 1610 VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); 1611 1612 result = alloc_charge_hpage(&hpage, mm, cc); 1613 if (result != SCAN_SUCCEED) 1614 goto out; 1615 1616 /* 1617 * Ensure we have slots for all the pages in the range. This is 1618 * almost certainly a no-op because most of the pages must be present 1619 */ 1620 do { 1621 xas_lock_irq(&xas); 1622 xas_create_range(&xas); 1623 if (!xas_error(&xas)) 1624 break; 1625 xas_unlock_irq(&xas); 1626 if (!xas_nomem(&xas, GFP_KERNEL)) { 1627 result = SCAN_FAIL; 1628 goto out; 1629 } 1630 } while (1); 1631 1632 __SetPageLocked(hpage); 1633 if (is_shmem) 1634 __SetPageSwapBacked(hpage); 1635 hpage->index = start; 1636 hpage->mapping = mapping; 1637 1638 /* 1639 * At this point the hpage is locked and not up-to-date. 1640 * It's safe to insert it into the page cache, because nobody would 1641 * be able to map it or use it in another way until we unlock it. 1642 */ 1643 1644 xas_set(&xas, start); 1645 for (index = start; index < end; index++) { 1646 struct page *page = xas_next(&xas); 1647 1648 VM_BUG_ON(index != xas.xa_index); 1649 if (is_shmem) { 1650 if (!page) { 1651 /* 1652 * Stop if extent has been truncated or 1653 * hole-punched, and is now completely 1654 * empty. 1655 */ 1656 if (index == start) { 1657 if (!xas_next_entry(&xas, end - 1)) { 1658 result = SCAN_TRUNCATED; 1659 goto xa_locked; 1660 } 1661 xas_set(&xas, index); 1662 } 1663 if (!shmem_charge(mapping->host, 1)) { 1664 result = SCAN_FAIL; 1665 goto xa_locked; 1666 } 1667 xas_store(&xas, hpage); 1668 nr_none++; 1669 continue; 1670 } 1671 1672 if (xa_is_value(page) || !PageUptodate(page)) { 1673 xas_unlock_irq(&xas); 1674 /* swap in or instantiate fallocated page */ 1675 if (shmem_getpage(mapping->host, index, &page, 1676 SGP_NOALLOC)) { 1677 result = SCAN_FAIL; 1678 goto xa_unlocked; 1679 } 1680 } else if (trylock_page(page)) { 1681 get_page(page); 1682 xas_unlock_irq(&xas); 1683 } else { 1684 result = SCAN_PAGE_LOCK; 1685 goto xa_locked; 1686 } 1687 } else { /* !is_shmem */ 1688 if (!page || xa_is_value(page)) { 1689 xas_unlock_irq(&xas); 1690 page_cache_sync_readahead(mapping, &file->f_ra, 1691 file, index, 1692 end - index); 1693 /* drain pagevecs to help isolate_lru_page() */ 1694 lru_add_drain(); 1695 page = find_lock_page(mapping, index); 1696 if (unlikely(page == NULL)) { 1697 result = SCAN_FAIL; 1698 goto xa_unlocked; 1699 } 1700 } else if (PageDirty(page)) { 1701 /* 1702 * khugepaged only works on read-only fd, 1703 * so this page is dirty because it hasn't 1704 * been flushed since first write. There 1705 * won't be new dirty pages. 1706 * 1707 * Trigger async flush here and hope the 1708 * writeback is done when khugepaged 1709 * revisits this page. 1710 * 1711 * This is a one-off situation. We are not 1712 * forcing writeback in loop. 1713 */ 1714 xas_unlock_irq(&xas); 1715 filemap_flush(mapping); 1716 result = SCAN_FAIL; 1717 goto xa_unlocked; 1718 } else if (PageWriteback(page)) { 1719 xas_unlock_irq(&xas); 1720 result = SCAN_FAIL; 1721 goto xa_unlocked; 1722 } else if (trylock_page(page)) { 1723 get_page(page); 1724 xas_unlock_irq(&xas); 1725 } else { 1726 result = SCAN_PAGE_LOCK; 1727 goto xa_locked; 1728 } 1729 } 1730 1731 /* 1732 * The page must be locked, so we can drop the i_pages lock 1733 * without racing with truncate. 1734 */ 1735 VM_BUG_ON_PAGE(!PageLocked(page), page); 1736 1737 /* make sure the page is up to date */ 1738 if (unlikely(!PageUptodate(page))) { 1739 result = SCAN_FAIL; 1740 goto out_unlock; 1741 } 1742 1743 /* 1744 * If file was truncated then extended, or hole-punched, before 1745 * we locked the first page, then a THP might be there already. 1746 */ 1747 if (PageTransCompound(page)) { 1748 result = SCAN_PAGE_COMPOUND; 1749 goto out_unlock; 1750 } 1751 1752 if (page_mapping(page) != mapping) { 1753 result = SCAN_TRUNCATED; 1754 goto out_unlock; 1755 } 1756 1757 if (!is_shmem && (PageDirty(page) || 1758 PageWriteback(page))) { 1759 /* 1760 * khugepaged only works on read-only fd, so this 1761 * page is dirty because it hasn't been flushed 1762 * since first write. 1763 */ 1764 result = SCAN_FAIL; 1765 goto out_unlock; 1766 } 1767 1768 if (isolate_lru_page(page)) { 1769 result = SCAN_DEL_PAGE_LRU; 1770 goto out_unlock; 1771 } 1772 1773 if (page_has_private(page) && 1774 !try_to_release_page(page, GFP_KERNEL)) { 1775 result = SCAN_PAGE_HAS_PRIVATE; 1776 putback_lru_page(page); 1777 goto out_unlock; 1778 } 1779 1780 if (page_mapped(page)) 1781 try_to_unmap(page_folio(page), 1782 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH); 1783 1784 xas_lock_irq(&xas); 1785 xas_set(&xas, index); 1786 1787 VM_BUG_ON_PAGE(page != xas_load(&xas), page); 1788 1789 /* 1790 * The page is expected to have page_count() == 3: 1791 * - we hold a pin on it; 1792 * - one reference from page cache; 1793 * - one from isolate_lru_page; 1794 */ 1795 if (!page_ref_freeze(page, 3)) { 1796 result = SCAN_PAGE_COUNT; 1797 xas_unlock_irq(&xas); 1798 putback_lru_page(page); 1799 goto out_unlock; 1800 } 1801 1802 /* 1803 * Add the page to the list to be able to undo the collapse if 1804 * something go wrong. 1805 */ 1806 list_add_tail(&page->lru, &pagelist); 1807 1808 /* Finally, replace with the new page. */ 1809 xas_store(&xas, hpage); 1810 continue; 1811 out_unlock: 1812 unlock_page(page); 1813 put_page(page); 1814 goto xa_unlocked; 1815 } 1816 nr = thp_nr_pages(hpage); 1817 1818 if (is_shmem) 1819 __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr); 1820 else { 1821 __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr); 1822 filemap_nr_thps_inc(mapping); 1823 /* 1824 * Paired with smp_mb() in do_dentry_open() to ensure 1825 * i_writecount is up to date and the update to nr_thps is 1826 * visible. Ensures the page cache will be truncated if the 1827 * file is opened writable. 1828 */ 1829 smp_mb(); 1830 if (inode_is_open_for_write(mapping->host)) { 1831 result = SCAN_FAIL; 1832 __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr); 1833 filemap_nr_thps_dec(mapping); 1834 goto xa_locked; 1835 } 1836 } 1837 1838 if (nr_none) { 1839 __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none); 1840 /* nr_none is always 0 for non-shmem. */ 1841 __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none); 1842 } 1843 1844 /* Join all the small entries into a single multi-index entry */ 1845 xas_set_order(&xas, start, HPAGE_PMD_ORDER); 1846 xas_store(&xas, hpage); 1847 xa_locked: 1848 xas_unlock_irq(&xas); 1849 xa_unlocked: 1850 1851 /* 1852 * If collapse is successful, flush must be done now before copying. 1853 * If collapse is unsuccessful, does flush actually need to be done? 1854 * Do it anyway, to clear the state. 1855 */ 1856 try_to_unmap_flush(); 1857 1858 if (result == SCAN_SUCCEED) { 1859 struct page *page, *tmp; 1860 1861 /* 1862 * Replacing old pages with new one has succeeded, now we 1863 * need to copy the content and free the old pages. 1864 */ 1865 index = start; 1866 list_for_each_entry_safe(page, tmp, &pagelist, lru) { 1867 while (index < page->index) { 1868 clear_highpage(hpage + (index % HPAGE_PMD_NR)); 1869 index++; 1870 } 1871 copy_highpage(hpage + (page->index % HPAGE_PMD_NR), 1872 page); 1873 list_del(&page->lru); 1874 page->mapping = NULL; 1875 page_ref_unfreeze(page, 1); 1876 ClearPageActive(page); 1877 ClearPageUnevictable(page); 1878 unlock_page(page); 1879 put_page(page); 1880 index++; 1881 } 1882 while (index < end) { 1883 clear_highpage(hpage + (index % HPAGE_PMD_NR)); 1884 index++; 1885 } 1886 1887 SetPageUptodate(hpage); 1888 page_ref_add(hpage, HPAGE_PMD_NR - 1); 1889 if (is_shmem) 1890 set_page_dirty(hpage); 1891 lru_cache_add(hpage); 1892 1893 /* 1894 * Remove pte page tables, so we can re-fault the page as huge. 1895 */ 1896 retract_page_tables(mapping, start); 1897 unlock_page(hpage); 1898 hpage = NULL; 1899 } else { 1900 struct page *page; 1901 1902 /* Something went wrong: roll back page cache changes */ 1903 xas_lock_irq(&xas); 1904 if (nr_none) { 1905 mapping->nrpages -= nr_none; 1906 shmem_uncharge(mapping->host, nr_none); 1907 } 1908 1909 xas_set(&xas, start); 1910 xas_for_each(&xas, page, end - 1) { 1911 page = list_first_entry_or_null(&pagelist, 1912 struct page, lru); 1913 if (!page || xas.xa_index < page->index) { 1914 if (!nr_none) 1915 break; 1916 nr_none--; 1917 /* Put holes back where they were */ 1918 xas_store(&xas, NULL); 1919 continue; 1920 } 1921 1922 VM_BUG_ON_PAGE(page->index != xas.xa_index, page); 1923 1924 /* Unfreeze the page. */ 1925 list_del(&page->lru); 1926 page_ref_unfreeze(page, 2); 1927 xas_store(&xas, page); 1928 xas_pause(&xas); 1929 xas_unlock_irq(&xas); 1930 unlock_page(page); 1931 putback_lru_page(page); 1932 xas_lock_irq(&xas); 1933 } 1934 VM_BUG_ON(nr_none); 1935 xas_unlock_irq(&xas); 1936 1937 hpage->mapping = NULL; 1938 } 1939 1940 if (hpage) 1941 unlock_page(hpage); 1942 out: 1943 VM_BUG_ON(!list_empty(&pagelist)); 1944 if (hpage) { 1945 mem_cgroup_uncharge(page_folio(hpage)); 1946 put_page(hpage); 1947 } 1948 /* TODO: tracepoints */ 1949 return result; 1950 } 1951 1952 static int khugepaged_scan_file(struct mm_struct *mm, struct file *file, 1953 pgoff_t start, struct collapse_control *cc) 1954 { 1955 struct page *page = NULL; 1956 struct address_space *mapping = file->f_mapping; 1957 XA_STATE(xas, &mapping->i_pages, start); 1958 int present, swap; 1959 int node = NUMA_NO_NODE; 1960 int result = SCAN_SUCCEED; 1961 1962 present = 0; 1963 swap = 0; 1964 memset(cc->node_load, 0, sizeof(cc->node_load)); 1965 rcu_read_lock(); 1966 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { 1967 if (xas_retry(&xas, page)) 1968 continue; 1969 1970 if (xa_is_value(page)) { 1971 ++swap; 1972 if (cc->is_khugepaged && 1973 swap > khugepaged_max_ptes_swap) { 1974 result = SCAN_EXCEED_SWAP_PTE; 1975 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); 1976 break; 1977 } 1978 continue; 1979 } 1980 1981 /* 1982 * XXX: khugepaged should compact smaller compound pages 1983 * into a PMD sized page 1984 */ 1985 if (PageTransCompound(page)) { 1986 result = SCAN_PAGE_COMPOUND; 1987 break; 1988 } 1989 1990 node = page_to_nid(page); 1991 if (hpage_collapse_scan_abort(node, cc)) { 1992 result = SCAN_SCAN_ABORT; 1993 break; 1994 } 1995 cc->node_load[node]++; 1996 1997 if (!PageLRU(page)) { 1998 result = SCAN_PAGE_LRU; 1999 break; 2000 } 2001 2002 if (page_count(page) != 2003 1 + page_mapcount(page) + page_has_private(page)) { 2004 result = SCAN_PAGE_COUNT; 2005 break; 2006 } 2007 2008 /* 2009 * We probably should check if the page is referenced here, but 2010 * nobody would transfer pte_young() to PageReferenced() for us. 2011 * And rmap walk here is just too costly... 2012 */ 2013 2014 present++; 2015 2016 if (need_resched()) { 2017 xas_pause(&xas); 2018 cond_resched_rcu(); 2019 } 2020 } 2021 rcu_read_unlock(); 2022 2023 if (result == SCAN_SUCCEED) { 2024 if (cc->is_khugepaged && 2025 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { 2026 result = SCAN_EXCEED_NONE_PTE; 2027 count_vm_event(THP_SCAN_EXCEED_NONE_PTE); 2028 } else { 2029 result = collapse_file(mm, file, start, cc); 2030 } 2031 } 2032 2033 /* TODO: tracepoints */ 2034 return result; 2035 } 2036 #else 2037 static int khugepaged_scan_file(struct mm_struct *mm, struct file *file, 2038 pgoff_t start, struct collapse_control *cc) 2039 { 2040 BUILD_BUG(); 2041 } 2042 2043 static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) 2044 { 2045 } 2046 #endif 2047 2048 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, 2049 struct collapse_control *cc) 2050 __releases(&khugepaged_mm_lock) 2051 __acquires(&khugepaged_mm_lock) 2052 { 2053 struct mm_slot *mm_slot; 2054 struct mm_struct *mm; 2055 struct vm_area_struct *vma; 2056 int progress = 0; 2057 2058 VM_BUG_ON(!pages); 2059 lockdep_assert_held(&khugepaged_mm_lock); 2060 *result = SCAN_FAIL; 2061 2062 if (khugepaged_scan.mm_slot) 2063 mm_slot = khugepaged_scan.mm_slot; 2064 else { 2065 mm_slot = list_entry(khugepaged_scan.mm_head.next, 2066 struct mm_slot, mm_node); 2067 khugepaged_scan.address = 0; 2068 khugepaged_scan.mm_slot = mm_slot; 2069 } 2070 spin_unlock(&khugepaged_mm_lock); 2071 khugepaged_collapse_pte_mapped_thps(mm_slot); 2072 2073 mm = mm_slot->mm; 2074 /* 2075 * Don't wait for semaphore (to avoid long wait times). Just move to 2076 * the next mm on the list. 2077 */ 2078 vma = NULL; 2079 if (unlikely(!mmap_read_trylock(mm))) 2080 goto breakouterloop_mmap_lock; 2081 if (likely(!hpage_collapse_test_exit(mm))) 2082 vma = find_vma(mm, khugepaged_scan.address); 2083 2084 progress++; 2085 for (; vma; vma = vma->vm_next) { 2086 unsigned long hstart, hend; 2087 2088 cond_resched(); 2089 if (unlikely(hpage_collapse_test_exit(mm))) { 2090 progress++; 2091 break; 2092 } 2093 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) { 2094 skip: 2095 progress++; 2096 continue; 2097 } 2098 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE); 2099 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE); 2100 if (khugepaged_scan.address > hend) 2101 goto skip; 2102 if (khugepaged_scan.address < hstart) 2103 khugepaged_scan.address = hstart; 2104 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2105 2106 while (khugepaged_scan.address < hend) { 2107 bool mmap_locked = true; 2108 2109 cond_resched(); 2110 if (unlikely(hpage_collapse_test_exit(mm))) 2111 goto breakouterloop; 2112 2113 VM_BUG_ON(khugepaged_scan.address < hstart || 2114 khugepaged_scan.address + HPAGE_PMD_SIZE > 2115 hend); 2116 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 2117 struct file *file = get_file(vma->vm_file); 2118 pgoff_t pgoff = linear_page_index(vma, 2119 khugepaged_scan.address); 2120 2121 mmap_read_unlock(mm); 2122 *result = khugepaged_scan_file(mm, file, pgoff, 2123 cc); 2124 mmap_locked = false; 2125 fput(file); 2126 } else { 2127 *result = hpage_collapse_scan_pmd(mm, vma, 2128 khugepaged_scan.address, 2129 &mmap_locked, 2130 cc); 2131 } 2132 if (*result == SCAN_SUCCEED) 2133 ++khugepaged_pages_collapsed; 2134 /* move to next address */ 2135 khugepaged_scan.address += HPAGE_PMD_SIZE; 2136 progress += HPAGE_PMD_NR; 2137 if (!mmap_locked) 2138 /* 2139 * We released mmap_lock so break loop. Note 2140 * that we drop mmap_lock before all hugepage 2141 * allocations, so if allocation fails, we are 2142 * guaranteed to break here and report the 2143 * correct result back to caller. 2144 */ 2145 goto breakouterloop_mmap_lock; 2146 if (progress >= pages) 2147 goto breakouterloop; 2148 } 2149 } 2150 breakouterloop: 2151 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ 2152 breakouterloop_mmap_lock: 2153 2154 spin_lock(&khugepaged_mm_lock); 2155 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2156 /* 2157 * Release the current mm_slot if this mm is about to die, or 2158 * if we scanned all vmas of this mm. 2159 */ 2160 if (hpage_collapse_test_exit(mm) || !vma) { 2161 /* 2162 * Make sure that if mm_users is reaching zero while 2163 * khugepaged runs here, khugepaged_exit will find 2164 * mm_slot not pointing to the exiting mm. 2165 */ 2166 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2167 khugepaged_scan.mm_slot = list_entry( 2168 mm_slot->mm_node.next, 2169 struct mm_slot, mm_node); 2170 khugepaged_scan.address = 0; 2171 } else { 2172 khugepaged_scan.mm_slot = NULL; 2173 khugepaged_full_scans++; 2174 } 2175 2176 collect_mm_slot(mm_slot); 2177 } 2178 2179 return progress; 2180 } 2181 2182 static int khugepaged_has_work(void) 2183 { 2184 return !list_empty(&khugepaged_scan.mm_head) && 2185 hugepage_flags_enabled(); 2186 } 2187 2188 static int khugepaged_wait_event(void) 2189 { 2190 return !list_empty(&khugepaged_scan.mm_head) || 2191 kthread_should_stop(); 2192 } 2193 2194 static void khugepaged_do_scan(struct collapse_control *cc) 2195 { 2196 unsigned int progress = 0, pass_through_head = 0; 2197 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan); 2198 bool wait = true; 2199 int result = SCAN_SUCCEED; 2200 2201 lru_add_drain_all(); 2202 2203 while (true) { 2204 cond_resched(); 2205 2206 if (unlikely(kthread_should_stop() || try_to_freeze())) 2207 break; 2208 2209 spin_lock(&khugepaged_mm_lock); 2210 if (!khugepaged_scan.mm_slot) 2211 pass_through_head++; 2212 if (khugepaged_has_work() && 2213 pass_through_head < 2) 2214 progress += khugepaged_scan_mm_slot(pages - progress, 2215 &result, cc); 2216 else 2217 progress = pages; 2218 spin_unlock(&khugepaged_mm_lock); 2219 2220 if (progress >= pages) 2221 break; 2222 2223 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) { 2224 /* 2225 * If fail to allocate the first time, try to sleep for 2226 * a while. When hit again, cancel the scan. 2227 */ 2228 if (!wait) 2229 break; 2230 wait = false; 2231 khugepaged_alloc_sleep(); 2232 } 2233 } 2234 } 2235 2236 static bool khugepaged_should_wakeup(void) 2237 { 2238 return kthread_should_stop() || 2239 time_after_eq(jiffies, khugepaged_sleep_expire); 2240 } 2241 2242 static void khugepaged_wait_work(void) 2243 { 2244 if (khugepaged_has_work()) { 2245 const unsigned long scan_sleep_jiffies = 2246 msecs_to_jiffies(khugepaged_scan_sleep_millisecs); 2247 2248 if (!scan_sleep_jiffies) 2249 return; 2250 2251 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; 2252 wait_event_freezable_timeout(khugepaged_wait, 2253 khugepaged_should_wakeup(), 2254 scan_sleep_jiffies); 2255 return; 2256 } 2257 2258 if (hugepage_flags_enabled()) 2259 wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 2260 } 2261 2262 static int khugepaged(void *none) 2263 { 2264 struct mm_slot *mm_slot; 2265 2266 set_freezable(); 2267 set_user_nice(current, MAX_NICE); 2268 2269 while (!kthread_should_stop()) { 2270 khugepaged_do_scan(&khugepaged_collapse_control); 2271 khugepaged_wait_work(); 2272 } 2273 2274 spin_lock(&khugepaged_mm_lock); 2275 mm_slot = khugepaged_scan.mm_slot; 2276 khugepaged_scan.mm_slot = NULL; 2277 if (mm_slot) 2278 collect_mm_slot(mm_slot); 2279 spin_unlock(&khugepaged_mm_lock); 2280 return 0; 2281 } 2282 2283 static void set_recommended_min_free_kbytes(void) 2284 { 2285 struct zone *zone; 2286 int nr_zones = 0; 2287 unsigned long recommended_min; 2288 2289 if (!hugepage_flags_enabled()) { 2290 calculate_min_free_kbytes(); 2291 goto update_wmarks; 2292 } 2293 2294 for_each_populated_zone(zone) { 2295 /* 2296 * We don't need to worry about fragmentation of 2297 * ZONE_MOVABLE since it only has movable pages. 2298 */ 2299 if (zone_idx(zone) > gfp_zone(GFP_USER)) 2300 continue; 2301 2302 nr_zones++; 2303 } 2304 2305 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ 2306 recommended_min = pageblock_nr_pages * nr_zones * 2; 2307 2308 /* 2309 * Make sure that on average at least two pageblocks are almost free 2310 * of another type, one for a migratetype to fall back to and a 2311 * second to avoid subsequent fallbacks of other types There are 3 2312 * MIGRATE_TYPES we care about. 2313 */ 2314 recommended_min += pageblock_nr_pages * nr_zones * 2315 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 2316 2317 /* don't ever allow to reserve more than 5% of the lowmem */ 2318 recommended_min = min(recommended_min, 2319 (unsigned long) nr_free_buffer_pages() / 20); 2320 recommended_min <<= (PAGE_SHIFT-10); 2321 2322 if (recommended_min > min_free_kbytes) { 2323 if (user_min_free_kbytes >= 0) 2324 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", 2325 min_free_kbytes, recommended_min); 2326 2327 min_free_kbytes = recommended_min; 2328 } 2329 2330 update_wmarks: 2331 setup_per_zone_wmarks(); 2332 } 2333 2334 int start_stop_khugepaged(void) 2335 { 2336 int err = 0; 2337 2338 mutex_lock(&khugepaged_mutex); 2339 if (hugepage_flags_enabled()) { 2340 if (!khugepaged_thread) 2341 khugepaged_thread = kthread_run(khugepaged, NULL, 2342 "khugepaged"); 2343 if (IS_ERR(khugepaged_thread)) { 2344 pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 2345 err = PTR_ERR(khugepaged_thread); 2346 khugepaged_thread = NULL; 2347 goto fail; 2348 } 2349 2350 if (!list_empty(&khugepaged_scan.mm_head)) 2351 wake_up_interruptible(&khugepaged_wait); 2352 } else if (khugepaged_thread) { 2353 kthread_stop(khugepaged_thread); 2354 khugepaged_thread = NULL; 2355 } 2356 set_recommended_min_free_kbytes(); 2357 fail: 2358 mutex_unlock(&khugepaged_mutex); 2359 return err; 2360 } 2361 2362 void khugepaged_min_free_kbytes_update(void) 2363 { 2364 mutex_lock(&khugepaged_mutex); 2365 if (hugepage_flags_enabled() && khugepaged_thread) 2366 set_recommended_min_free_kbytes(); 2367 mutex_unlock(&khugepaged_mutex); 2368 } 2369 2370 static int madvise_collapse_errno(enum scan_result r) 2371 { 2372 /* 2373 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide 2374 * actionable feedback to caller, so they may take an appropriate 2375 * fallback measure depending on the nature of the failure. 2376 */ 2377 switch (r) { 2378 case SCAN_ALLOC_HUGE_PAGE_FAIL: 2379 return -ENOMEM; 2380 case SCAN_CGROUP_CHARGE_FAIL: 2381 return -EBUSY; 2382 /* Resource temporary unavailable - trying again might succeed */ 2383 case SCAN_PAGE_LOCK: 2384 case SCAN_PAGE_LRU: 2385 return -EAGAIN; 2386 /* 2387 * Other: Trying again likely not to succeed / error intrinsic to 2388 * specified memory range. khugepaged likely won't be able to collapse 2389 * either. 2390 */ 2391 default: 2392 return -EINVAL; 2393 } 2394 } 2395 2396 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, 2397 unsigned long start, unsigned long end) 2398 { 2399 struct collapse_control *cc; 2400 struct mm_struct *mm = vma->vm_mm; 2401 unsigned long hstart, hend, addr; 2402 int thps = 0, last_fail = SCAN_FAIL; 2403 bool mmap_locked = true; 2404 2405 BUG_ON(vma->vm_start > start); 2406 BUG_ON(vma->vm_end < end); 2407 2408 *prev = vma; 2409 2410 /* TODO: Support file/shmem */ 2411 if (!vma->anon_vma || !vma_is_anonymous(vma)) 2412 return -EINVAL; 2413 2414 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) 2415 return -EINVAL; 2416 2417 cc = kmalloc(sizeof(*cc), GFP_KERNEL); 2418 if (!cc) 2419 return -ENOMEM; 2420 cc->is_khugepaged = false; 2421 cc->last_target_node = NUMA_NO_NODE; 2422 2423 mmgrab(mm); 2424 lru_add_drain_all(); 2425 2426 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2427 hend = end & HPAGE_PMD_MASK; 2428 2429 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) { 2430 int result = SCAN_FAIL; 2431 2432 if (!mmap_locked) { 2433 cond_resched(); 2434 mmap_read_lock(mm); 2435 mmap_locked = true; 2436 result = hugepage_vma_revalidate(mm, addr, &vma, cc); 2437 if (result != SCAN_SUCCEED) { 2438 last_fail = result; 2439 goto out_nolock; 2440 } 2441 2442 hend = vma->vm_end & HPAGE_PMD_MASK; 2443 } 2444 mmap_assert_locked(mm); 2445 memset(cc->node_load, 0, sizeof(cc->node_load)); 2446 result = hpage_collapse_scan_pmd(mm, vma, addr, &mmap_locked, 2447 cc); 2448 if (!mmap_locked) 2449 *prev = NULL; /* Tell caller we dropped mmap_lock */ 2450 2451 switch (result) { 2452 case SCAN_SUCCEED: 2453 case SCAN_PMD_MAPPED: 2454 ++thps; 2455 break; 2456 /* Whitelisted set of results where continuing OK */ 2457 case SCAN_PMD_NULL: 2458 case SCAN_PTE_NON_PRESENT: 2459 case SCAN_PTE_UFFD_WP: 2460 case SCAN_PAGE_RO: 2461 case SCAN_LACK_REFERENCED_PAGE: 2462 case SCAN_PAGE_NULL: 2463 case SCAN_PAGE_COUNT: 2464 case SCAN_PAGE_LOCK: 2465 case SCAN_PAGE_COMPOUND: 2466 case SCAN_PAGE_LRU: 2467 last_fail = result; 2468 break; 2469 default: 2470 last_fail = result; 2471 /* Other error, exit */ 2472 goto out_maybelock; 2473 } 2474 } 2475 2476 out_maybelock: 2477 /* Caller expects us to hold mmap_lock on return */ 2478 if (!mmap_locked) 2479 mmap_read_lock(mm); 2480 out_nolock: 2481 mmap_assert_locked(mm); 2482 mmdrop(mm); 2483 kfree(cc); 2484 2485 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0 2486 : madvise_collapse_errno(last_fail); 2487 } 2488