1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/mm.h> 4 #include <linux/sched.h> 5 #include <linux/mmu_notifier.h> 6 #include <linux/rmap.h> 7 #include <linux/swap.h> 8 #include <linux/mm_inline.h> 9 #include <linux/kthread.h> 10 #include <linux/khugepaged.h> 11 #include <linux/freezer.h> 12 #include <linux/mman.h> 13 #include <linux/hashtable.h> 14 #include <linux/userfaultfd_k.h> 15 #include <linux/page_idle.h> 16 #include <linux/swapops.h> 17 #include <linux/shmem_fs.h> 18 19 #include <asm/tlb.h> 20 #include <asm/pgalloc.h> 21 #include "internal.h" 22 23 enum scan_result { 24 SCAN_FAIL, 25 SCAN_SUCCEED, 26 SCAN_PMD_NULL, 27 SCAN_EXCEED_NONE_PTE, 28 SCAN_PTE_NON_PRESENT, 29 SCAN_PAGE_RO, 30 SCAN_LACK_REFERENCED_PAGE, 31 SCAN_PAGE_NULL, 32 SCAN_SCAN_ABORT, 33 SCAN_PAGE_COUNT, 34 SCAN_PAGE_LRU, 35 SCAN_PAGE_LOCK, 36 SCAN_PAGE_ANON, 37 SCAN_PAGE_COMPOUND, 38 SCAN_ANY_PROCESS, 39 SCAN_VMA_NULL, 40 SCAN_VMA_CHECK, 41 SCAN_ADDRESS_RANGE, 42 SCAN_SWAP_CACHE_PAGE, 43 SCAN_DEL_PAGE_LRU, 44 SCAN_ALLOC_HUGE_PAGE_FAIL, 45 SCAN_CGROUP_CHARGE_FAIL, 46 SCAN_EXCEED_SWAP_PTE, 47 SCAN_TRUNCATED, 48 }; 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/huge_memory.h> 52 53 /* default scan 8*512 pte (or vmas) every 30 second */ 54 static unsigned int khugepaged_pages_to_scan __read_mostly; 55 static unsigned int khugepaged_pages_collapsed; 56 static unsigned int khugepaged_full_scans; 57 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 58 /* during fragmentation poll the hugepage allocator once every minute */ 59 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 60 static unsigned long khugepaged_sleep_expire; 61 static DEFINE_SPINLOCK(khugepaged_mm_lock); 62 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 63 /* 64 * default collapse hugepages if there is at least one pte mapped like 65 * it would have happened if the vma was large enough during page 66 * fault. 67 */ 68 static unsigned int khugepaged_max_ptes_none __read_mostly; 69 static unsigned int khugepaged_max_ptes_swap __read_mostly; 70 71 #define MM_SLOTS_HASH_BITS 10 72 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 73 74 static struct kmem_cache *mm_slot_cache __read_mostly; 75 76 /** 77 * struct mm_slot - hash lookup from mm to mm_slot 78 * @hash: hash collision list 79 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 80 * @mm: the mm that this information is valid for 81 */ 82 struct mm_slot { 83 struct hlist_node hash; 84 struct list_head mm_node; 85 struct mm_struct *mm; 86 }; 87 88 /** 89 * struct khugepaged_scan - cursor for scanning 90 * @mm_head: the head of the mm list to scan 91 * @mm_slot: the current mm_slot we are scanning 92 * @address: the next address inside that to be scanned 93 * 94 * There is only the one khugepaged_scan instance of this cursor structure. 95 */ 96 struct khugepaged_scan { 97 struct list_head mm_head; 98 struct mm_slot *mm_slot; 99 unsigned long address; 100 }; 101 102 static struct khugepaged_scan khugepaged_scan = { 103 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 104 }; 105 106 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 107 struct kobj_attribute *attr, 108 char *buf) 109 { 110 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); 111 } 112 113 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 114 struct kobj_attribute *attr, 115 const char *buf, size_t count) 116 { 117 unsigned long msecs; 118 int err; 119 120 err = kstrtoul(buf, 10, &msecs); 121 if (err || msecs > UINT_MAX) 122 return -EINVAL; 123 124 khugepaged_scan_sleep_millisecs = msecs; 125 khugepaged_sleep_expire = 0; 126 wake_up_interruptible(&khugepaged_wait); 127 128 return count; 129 } 130 static struct kobj_attribute scan_sleep_millisecs_attr = 131 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 132 scan_sleep_millisecs_store); 133 134 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 135 struct kobj_attribute *attr, 136 char *buf) 137 { 138 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 139 } 140 141 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 142 struct kobj_attribute *attr, 143 const char *buf, size_t count) 144 { 145 unsigned long msecs; 146 int err; 147 148 err = kstrtoul(buf, 10, &msecs); 149 if (err || msecs > UINT_MAX) 150 return -EINVAL; 151 152 khugepaged_alloc_sleep_millisecs = msecs; 153 khugepaged_sleep_expire = 0; 154 wake_up_interruptible(&khugepaged_wait); 155 156 return count; 157 } 158 static struct kobj_attribute alloc_sleep_millisecs_attr = 159 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 160 alloc_sleep_millisecs_store); 161 162 static ssize_t pages_to_scan_show(struct kobject *kobj, 163 struct kobj_attribute *attr, 164 char *buf) 165 { 166 return sprintf(buf, "%u\n", khugepaged_pages_to_scan); 167 } 168 static ssize_t pages_to_scan_store(struct kobject *kobj, 169 struct kobj_attribute *attr, 170 const char *buf, size_t count) 171 { 172 int err; 173 unsigned long pages; 174 175 err = kstrtoul(buf, 10, &pages); 176 if (err || !pages || pages > UINT_MAX) 177 return -EINVAL; 178 179 khugepaged_pages_to_scan = pages; 180 181 return count; 182 } 183 static struct kobj_attribute pages_to_scan_attr = 184 __ATTR(pages_to_scan, 0644, pages_to_scan_show, 185 pages_to_scan_store); 186 187 static ssize_t pages_collapsed_show(struct kobject *kobj, 188 struct kobj_attribute *attr, 189 char *buf) 190 { 191 return sprintf(buf, "%u\n", khugepaged_pages_collapsed); 192 } 193 static struct kobj_attribute pages_collapsed_attr = 194 __ATTR_RO(pages_collapsed); 195 196 static ssize_t full_scans_show(struct kobject *kobj, 197 struct kobj_attribute *attr, 198 char *buf) 199 { 200 return sprintf(buf, "%u\n", khugepaged_full_scans); 201 } 202 static struct kobj_attribute full_scans_attr = 203 __ATTR_RO(full_scans); 204 205 static ssize_t khugepaged_defrag_show(struct kobject *kobj, 206 struct kobj_attribute *attr, char *buf) 207 { 208 return single_hugepage_flag_show(kobj, attr, buf, 209 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 210 } 211 static ssize_t khugepaged_defrag_store(struct kobject *kobj, 212 struct kobj_attribute *attr, 213 const char *buf, size_t count) 214 { 215 return single_hugepage_flag_store(kobj, attr, buf, count, 216 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 217 } 218 static struct kobj_attribute khugepaged_defrag_attr = 219 __ATTR(defrag, 0644, khugepaged_defrag_show, 220 khugepaged_defrag_store); 221 222 /* 223 * max_ptes_none controls if khugepaged should collapse hugepages over 224 * any unmapped ptes in turn potentially increasing the memory 225 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 226 * reduce the available free memory in the system as it 227 * runs. Increasing max_ptes_none will instead potentially reduce the 228 * free memory in the system during the khugepaged scan. 229 */ 230 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 231 struct kobj_attribute *attr, 232 char *buf) 233 { 234 return sprintf(buf, "%u\n", khugepaged_max_ptes_none); 235 } 236 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 237 struct kobj_attribute *attr, 238 const char *buf, size_t count) 239 { 240 int err; 241 unsigned long max_ptes_none; 242 243 err = kstrtoul(buf, 10, &max_ptes_none); 244 if (err || max_ptes_none > HPAGE_PMD_NR-1) 245 return -EINVAL; 246 247 khugepaged_max_ptes_none = max_ptes_none; 248 249 return count; 250 } 251 static struct kobj_attribute khugepaged_max_ptes_none_attr = 252 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 253 khugepaged_max_ptes_none_store); 254 255 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, 256 struct kobj_attribute *attr, 257 char *buf) 258 { 259 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap); 260 } 261 262 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, 263 struct kobj_attribute *attr, 264 const char *buf, size_t count) 265 { 266 int err; 267 unsigned long max_ptes_swap; 268 269 err = kstrtoul(buf, 10, &max_ptes_swap); 270 if (err || max_ptes_swap > HPAGE_PMD_NR-1) 271 return -EINVAL; 272 273 khugepaged_max_ptes_swap = max_ptes_swap; 274 275 return count; 276 } 277 278 static struct kobj_attribute khugepaged_max_ptes_swap_attr = 279 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, 280 khugepaged_max_ptes_swap_store); 281 282 static struct attribute *khugepaged_attr[] = { 283 &khugepaged_defrag_attr.attr, 284 &khugepaged_max_ptes_none_attr.attr, 285 &pages_to_scan_attr.attr, 286 &pages_collapsed_attr.attr, 287 &full_scans_attr.attr, 288 &scan_sleep_millisecs_attr.attr, 289 &alloc_sleep_millisecs_attr.attr, 290 &khugepaged_max_ptes_swap_attr.attr, 291 NULL, 292 }; 293 294 struct attribute_group khugepaged_attr_group = { 295 .attrs = khugepaged_attr, 296 .name = "khugepaged", 297 }; 298 299 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) 300 301 int hugepage_madvise(struct vm_area_struct *vma, 302 unsigned long *vm_flags, int advice) 303 { 304 switch (advice) { 305 case MADV_HUGEPAGE: 306 #ifdef CONFIG_S390 307 /* 308 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 309 * can't handle this properly after s390_enable_sie, so we simply 310 * ignore the madvise to prevent qemu from causing a SIGSEGV. 311 */ 312 if (mm_has_pgste(vma->vm_mm)) 313 return 0; 314 #endif 315 *vm_flags &= ~VM_NOHUGEPAGE; 316 *vm_flags |= VM_HUGEPAGE; 317 /* 318 * If the vma become good for khugepaged to scan, 319 * register it here without waiting a page fault that 320 * may not happen any time soon. 321 */ 322 if (!(*vm_flags & VM_NO_KHUGEPAGED) && 323 khugepaged_enter_vma_merge(vma, *vm_flags)) 324 return -ENOMEM; 325 break; 326 case MADV_NOHUGEPAGE: 327 *vm_flags &= ~VM_HUGEPAGE; 328 *vm_flags |= VM_NOHUGEPAGE; 329 /* 330 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 331 * this vma even if we leave the mm registered in khugepaged if 332 * it got registered before VM_NOHUGEPAGE was set. 333 */ 334 break; 335 } 336 337 return 0; 338 } 339 340 int __init khugepaged_init(void) 341 { 342 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 343 sizeof(struct mm_slot), 344 __alignof__(struct mm_slot), 0, NULL); 345 if (!mm_slot_cache) 346 return -ENOMEM; 347 348 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; 349 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; 350 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; 351 352 return 0; 353 } 354 355 void __init khugepaged_destroy(void) 356 { 357 kmem_cache_destroy(mm_slot_cache); 358 } 359 360 static inline struct mm_slot *alloc_mm_slot(void) 361 { 362 if (!mm_slot_cache) /* initialization failed */ 363 return NULL; 364 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 365 } 366 367 static inline void free_mm_slot(struct mm_slot *mm_slot) 368 { 369 kmem_cache_free(mm_slot_cache, mm_slot); 370 } 371 372 static struct mm_slot *get_mm_slot(struct mm_struct *mm) 373 { 374 struct mm_slot *mm_slot; 375 376 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 377 if (mm == mm_slot->mm) 378 return mm_slot; 379 380 return NULL; 381 } 382 383 static void insert_to_mm_slots_hash(struct mm_struct *mm, 384 struct mm_slot *mm_slot) 385 { 386 mm_slot->mm = mm; 387 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 388 } 389 390 static inline int khugepaged_test_exit(struct mm_struct *mm) 391 { 392 return atomic_read(&mm->mm_users) == 0; 393 } 394 395 int __khugepaged_enter(struct mm_struct *mm) 396 { 397 struct mm_slot *mm_slot; 398 int wakeup; 399 400 mm_slot = alloc_mm_slot(); 401 if (!mm_slot) 402 return -ENOMEM; 403 404 /* __khugepaged_exit() must not run from under us */ 405 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); 406 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 407 free_mm_slot(mm_slot); 408 return 0; 409 } 410 411 spin_lock(&khugepaged_mm_lock); 412 insert_to_mm_slots_hash(mm, mm_slot); 413 /* 414 * Insert just behind the scanning cursor, to let the area settle 415 * down a little. 416 */ 417 wakeup = list_empty(&khugepaged_scan.mm_head); 418 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 419 spin_unlock(&khugepaged_mm_lock); 420 421 atomic_inc(&mm->mm_count); 422 if (wakeup) 423 wake_up_interruptible(&khugepaged_wait); 424 425 return 0; 426 } 427 428 int khugepaged_enter_vma_merge(struct vm_area_struct *vma, 429 unsigned long vm_flags) 430 { 431 unsigned long hstart, hend; 432 if (!vma->anon_vma) 433 /* 434 * Not yet faulted in so we will register later in the 435 * page fault if needed. 436 */ 437 return 0; 438 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED)) 439 /* khugepaged not yet working on file or special mappings */ 440 return 0; 441 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 442 hend = vma->vm_end & HPAGE_PMD_MASK; 443 if (hstart < hend) 444 return khugepaged_enter(vma, vm_flags); 445 return 0; 446 } 447 448 void __khugepaged_exit(struct mm_struct *mm) 449 { 450 struct mm_slot *mm_slot; 451 int free = 0; 452 453 spin_lock(&khugepaged_mm_lock); 454 mm_slot = get_mm_slot(mm); 455 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 456 hash_del(&mm_slot->hash); 457 list_del(&mm_slot->mm_node); 458 free = 1; 459 } 460 spin_unlock(&khugepaged_mm_lock); 461 462 if (free) { 463 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 464 free_mm_slot(mm_slot); 465 mmdrop(mm); 466 } else if (mm_slot) { 467 /* 468 * This is required to serialize against 469 * khugepaged_test_exit() (which is guaranteed to run 470 * under mmap sem read mode). Stop here (after we 471 * return all pagetables will be destroyed) until 472 * khugepaged has finished working on the pagetables 473 * under the mmap_sem. 474 */ 475 down_write(&mm->mmap_sem); 476 up_write(&mm->mmap_sem); 477 } 478 } 479 480 static void release_pte_page(struct page *page) 481 { 482 /* 0 stands for page_is_file_cache(page) == false */ 483 dec_node_page_state(page, NR_ISOLATED_ANON + 0); 484 unlock_page(page); 485 putback_lru_page(page); 486 } 487 488 static void release_pte_pages(pte_t *pte, pte_t *_pte) 489 { 490 while (--_pte >= pte) { 491 pte_t pteval = *_pte; 492 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval))) 493 release_pte_page(pte_page(pteval)); 494 } 495 } 496 497 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 498 unsigned long address, 499 pte_t *pte) 500 { 501 struct page *page = NULL; 502 pte_t *_pte; 503 int none_or_zero = 0, result = 0, referenced = 0; 504 bool writable = false; 505 506 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 507 _pte++, address += PAGE_SIZE) { 508 pte_t pteval = *_pte; 509 if (pte_none(pteval) || (pte_present(pteval) && 510 is_zero_pfn(pte_pfn(pteval)))) { 511 if (!userfaultfd_armed(vma) && 512 ++none_or_zero <= khugepaged_max_ptes_none) { 513 continue; 514 } else { 515 result = SCAN_EXCEED_NONE_PTE; 516 goto out; 517 } 518 } 519 if (!pte_present(pteval)) { 520 result = SCAN_PTE_NON_PRESENT; 521 goto out; 522 } 523 page = vm_normal_page(vma, address, pteval); 524 if (unlikely(!page)) { 525 result = SCAN_PAGE_NULL; 526 goto out; 527 } 528 529 VM_BUG_ON_PAGE(PageCompound(page), page); 530 VM_BUG_ON_PAGE(!PageAnon(page), page); 531 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 532 533 /* 534 * We can do it before isolate_lru_page because the 535 * page can't be freed from under us. NOTE: PG_lock 536 * is needed to serialize against split_huge_page 537 * when invoked from the VM. 538 */ 539 if (!trylock_page(page)) { 540 result = SCAN_PAGE_LOCK; 541 goto out; 542 } 543 544 /* 545 * cannot use mapcount: can't collapse if there's a gup pin. 546 * The page must only be referenced by the scanned process 547 * and page swap cache. 548 */ 549 if (page_count(page) != 1 + !!PageSwapCache(page)) { 550 unlock_page(page); 551 result = SCAN_PAGE_COUNT; 552 goto out; 553 } 554 if (pte_write(pteval)) { 555 writable = true; 556 } else { 557 if (PageSwapCache(page) && 558 !reuse_swap_page(page, NULL)) { 559 unlock_page(page); 560 result = SCAN_SWAP_CACHE_PAGE; 561 goto out; 562 } 563 /* 564 * Page is not in the swap cache. It can be collapsed 565 * into a THP. 566 */ 567 } 568 569 /* 570 * Isolate the page to avoid collapsing an hugepage 571 * currently in use by the VM. 572 */ 573 if (isolate_lru_page(page)) { 574 unlock_page(page); 575 result = SCAN_DEL_PAGE_LRU; 576 goto out; 577 } 578 /* 0 stands for page_is_file_cache(page) == false */ 579 inc_node_page_state(page, NR_ISOLATED_ANON + 0); 580 VM_BUG_ON_PAGE(!PageLocked(page), page); 581 VM_BUG_ON_PAGE(PageLRU(page), page); 582 583 /* There should be enough young pte to collapse the page */ 584 if (pte_young(pteval) || 585 page_is_young(page) || PageReferenced(page) || 586 mmu_notifier_test_young(vma->vm_mm, address)) 587 referenced++; 588 } 589 if (likely(writable)) { 590 if (likely(referenced)) { 591 result = SCAN_SUCCEED; 592 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 593 referenced, writable, result); 594 return 1; 595 } 596 } else { 597 result = SCAN_PAGE_RO; 598 } 599 600 out: 601 release_pte_pages(pte, _pte); 602 trace_mm_collapse_huge_page_isolate(page, none_or_zero, 603 referenced, writable, result); 604 return 0; 605 } 606 607 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 608 struct vm_area_struct *vma, 609 unsigned long address, 610 spinlock_t *ptl) 611 { 612 pte_t *_pte; 613 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { 614 pte_t pteval = *_pte; 615 struct page *src_page; 616 617 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 618 clear_user_highpage(page, address); 619 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 620 if (is_zero_pfn(pte_pfn(pteval))) { 621 /* 622 * ptl mostly unnecessary. 623 */ 624 spin_lock(ptl); 625 /* 626 * paravirt calls inside pte_clear here are 627 * superfluous. 628 */ 629 pte_clear(vma->vm_mm, address, _pte); 630 spin_unlock(ptl); 631 } 632 } else { 633 src_page = pte_page(pteval); 634 copy_user_highpage(page, src_page, address, vma); 635 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); 636 release_pte_page(src_page); 637 /* 638 * ptl mostly unnecessary, but preempt has to 639 * be disabled to update the per-cpu stats 640 * inside page_remove_rmap(). 641 */ 642 spin_lock(ptl); 643 /* 644 * paravirt calls inside pte_clear here are 645 * superfluous. 646 */ 647 pte_clear(vma->vm_mm, address, _pte); 648 page_remove_rmap(src_page, false); 649 spin_unlock(ptl); 650 free_page_and_swap_cache(src_page); 651 } 652 653 address += PAGE_SIZE; 654 page++; 655 } 656 } 657 658 static void khugepaged_alloc_sleep(void) 659 { 660 DEFINE_WAIT(wait); 661 662 add_wait_queue(&khugepaged_wait, &wait); 663 freezable_schedule_timeout_interruptible( 664 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 665 remove_wait_queue(&khugepaged_wait, &wait); 666 } 667 668 static int khugepaged_node_load[MAX_NUMNODES]; 669 670 static bool khugepaged_scan_abort(int nid) 671 { 672 int i; 673 674 /* 675 * If node_reclaim_mode is disabled, then no extra effort is made to 676 * allocate memory locally. 677 */ 678 if (!node_reclaim_mode) 679 return false; 680 681 /* If there is a count for this node already, it must be acceptable */ 682 if (khugepaged_node_load[nid]) 683 return false; 684 685 for (i = 0; i < MAX_NUMNODES; i++) { 686 if (!khugepaged_node_load[i]) 687 continue; 688 if (node_distance(nid, i) > RECLAIM_DISTANCE) 689 return true; 690 } 691 return false; 692 } 693 694 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ 695 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) 696 { 697 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; 698 } 699 700 #ifdef CONFIG_NUMA 701 static int khugepaged_find_target_node(void) 702 { 703 static int last_khugepaged_target_node = NUMA_NO_NODE; 704 int nid, target_node = 0, max_value = 0; 705 706 /* find first node with max normal pages hit */ 707 for (nid = 0; nid < MAX_NUMNODES; nid++) 708 if (khugepaged_node_load[nid] > max_value) { 709 max_value = khugepaged_node_load[nid]; 710 target_node = nid; 711 } 712 713 /* do some balance if several nodes have the same hit record */ 714 if (target_node <= last_khugepaged_target_node) 715 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; 716 nid++) 717 if (max_value == khugepaged_node_load[nid]) { 718 target_node = nid; 719 break; 720 } 721 722 last_khugepaged_target_node = target_node; 723 return target_node; 724 } 725 726 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 727 { 728 if (IS_ERR(*hpage)) { 729 if (!*wait) 730 return false; 731 732 *wait = false; 733 *hpage = NULL; 734 khugepaged_alloc_sleep(); 735 } else if (*hpage) { 736 put_page(*hpage); 737 *hpage = NULL; 738 } 739 740 return true; 741 } 742 743 static struct page * 744 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) 745 { 746 VM_BUG_ON_PAGE(*hpage, *hpage); 747 748 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); 749 if (unlikely(!*hpage)) { 750 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 751 *hpage = ERR_PTR(-ENOMEM); 752 return NULL; 753 } 754 755 prep_transhuge_page(*hpage); 756 count_vm_event(THP_COLLAPSE_ALLOC); 757 return *hpage; 758 } 759 #else 760 static int khugepaged_find_target_node(void) 761 { 762 return 0; 763 } 764 765 static inline struct page *alloc_khugepaged_hugepage(void) 766 { 767 struct page *page; 768 769 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), 770 HPAGE_PMD_ORDER); 771 if (page) 772 prep_transhuge_page(page); 773 return page; 774 } 775 776 static struct page *khugepaged_alloc_hugepage(bool *wait) 777 { 778 struct page *hpage; 779 780 do { 781 hpage = alloc_khugepaged_hugepage(); 782 if (!hpage) { 783 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 784 if (!*wait) 785 return NULL; 786 787 *wait = false; 788 khugepaged_alloc_sleep(); 789 } else 790 count_vm_event(THP_COLLAPSE_ALLOC); 791 } while (unlikely(!hpage) && likely(khugepaged_enabled())); 792 793 return hpage; 794 } 795 796 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 797 { 798 if (!*hpage) 799 *hpage = khugepaged_alloc_hugepage(wait); 800 801 if (unlikely(!*hpage)) 802 return false; 803 804 return true; 805 } 806 807 static struct page * 808 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) 809 { 810 VM_BUG_ON(!*hpage); 811 812 return *hpage; 813 } 814 #endif 815 816 static bool hugepage_vma_check(struct vm_area_struct *vma) 817 { 818 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || 819 (vma->vm_flags & VM_NOHUGEPAGE)) 820 return false; 821 if (shmem_file(vma->vm_file)) { 822 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 823 return false; 824 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, 825 HPAGE_PMD_NR); 826 } 827 if (!vma->anon_vma || vma->vm_ops) 828 return false; 829 if (is_vma_temporary_stack(vma)) 830 return false; 831 return !(vma->vm_flags & VM_NO_KHUGEPAGED); 832 } 833 834 /* 835 * If mmap_sem temporarily dropped, revalidate vma 836 * before taking mmap_sem. 837 * Return 0 if succeeds, otherwise return none-zero 838 * value (scan code). 839 */ 840 841 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address) 842 { 843 struct vm_area_struct *vma; 844 unsigned long hstart, hend; 845 846 if (unlikely(khugepaged_test_exit(mm))) 847 return SCAN_ANY_PROCESS; 848 849 vma = find_vma(mm, address); 850 if (!vma) 851 return SCAN_VMA_NULL; 852 853 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 854 hend = vma->vm_end & HPAGE_PMD_MASK; 855 if (address < hstart || address + HPAGE_PMD_SIZE > hend) 856 return SCAN_ADDRESS_RANGE; 857 if (!hugepage_vma_check(vma)) 858 return SCAN_VMA_CHECK; 859 return 0; 860 } 861 862 /* 863 * Bring missing pages in from swap, to complete THP collapse. 864 * Only done if khugepaged_scan_pmd believes it is worthwhile. 865 * 866 * Called and returns without pte mapped or spinlocks held, 867 * but with mmap_sem held to protect against vma changes. 868 */ 869 870 static bool __collapse_huge_page_swapin(struct mm_struct *mm, 871 struct vm_area_struct *vma, 872 unsigned long address, pmd_t *pmd, 873 int referenced) 874 { 875 pte_t pteval; 876 int swapped_in = 0, ret = 0; 877 struct fault_env fe = { 878 .vma = vma, 879 .address = address, 880 .flags = FAULT_FLAG_ALLOW_RETRY, 881 .pmd = pmd, 882 }; 883 884 fe.pte = pte_offset_map(pmd, address); 885 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; 886 fe.pte++, fe.address += PAGE_SIZE) { 887 pteval = *fe.pte; 888 if (!is_swap_pte(pteval)) 889 continue; 890 swapped_in++; 891 /* we only decide to swapin, if there is enough young ptes */ 892 if (referenced < HPAGE_PMD_NR/2) { 893 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 894 return false; 895 } 896 ret = do_swap_page(&fe, pteval); 897 898 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ 899 if (ret & VM_FAULT_RETRY) { 900 down_read(&mm->mmap_sem); 901 if (hugepage_vma_revalidate(mm, address)) { 902 /* vma is no longer available, don't continue to swapin */ 903 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 904 return false; 905 } 906 /* check if the pmd is still valid */ 907 if (mm_find_pmd(mm, address) != pmd) 908 return false; 909 } 910 if (ret & VM_FAULT_ERROR) { 911 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 912 return false; 913 } 914 /* pte is unmapped now, we need to map it */ 915 fe.pte = pte_offset_map(pmd, fe.address); 916 } 917 fe.pte--; 918 pte_unmap(fe.pte); 919 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); 920 return true; 921 } 922 923 static void collapse_huge_page(struct mm_struct *mm, 924 unsigned long address, 925 struct page **hpage, 926 struct vm_area_struct *vma, 927 int node, int referenced) 928 { 929 pmd_t *pmd, _pmd; 930 pte_t *pte; 931 pgtable_t pgtable; 932 struct page *new_page; 933 spinlock_t *pmd_ptl, *pte_ptl; 934 int isolated = 0, result = 0; 935 struct mem_cgroup *memcg; 936 unsigned long mmun_start; /* For mmu_notifiers */ 937 unsigned long mmun_end; /* For mmu_notifiers */ 938 gfp_t gfp; 939 940 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 941 942 /* Only allocate from the target node */ 943 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; 944 945 /* 946 * Before allocating the hugepage, release the mmap_sem read lock. 947 * The allocation can take potentially a long time if it involves 948 * sync compaction, and we do not need to hold the mmap_sem during 949 * that. We will recheck the vma after taking it again in write mode. 950 */ 951 up_read(&mm->mmap_sem); 952 new_page = khugepaged_alloc_page(hpage, gfp, node); 953 if (!new_page) { 954 result = SCAN_ALLOC_HUGE_PAGE_FAIL; 955 goto out_nolock; 956 } 957 958 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { 959 result = SCAN_CGROUP_CHARGE_FAIL; 960 goto out_nolock; 961 } 962 963 down_read(&mm->mmap_sem); 964 result = hugepage_vma_revalidate(mm, address); 965 if (result) { 966 mem_cgroup_cancel_charge(new_page, memcg, true); 967 up_read(&mm->mmap_sem); 968 goto out_nolock; 969 } 970 971 pmd = mm_find_pmd(mm, address); 972 if (!pmd) { 973 result = SCAN_PMD_NULL; 974 mem_cgroup_cancel_charge(new_page, memcg, true); 975 up_read(&mm->mmap_sem); 976 goto out_nolock; 977 } 978 979 /* 980 * __collapse_huge_page_swapin always returns with mmap_sem locked. 981 * If it fails, we release mmap_sem and jump out_nolock. 982 * Continuing to collapse causes inconsistency. 983 */ 984 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { 985 mem_cgroup_cancel_charge(new_page, memcg, true); 986 up_read(&mm->mmap_sem); 987 goto out_nolock; 988 } 989 990 up_read(&mm->mmap_sem); 991 /* 992 * Prevent all access to pagetables with the exception of 993 * gup_fast later handled by the ptep_clear_flush and the VM 994 * handled by the anon_vma lock + PG_lock. 995 */ 996 down_write(&mm->mmap_sem); 997 result = hugepage_vma_revalidate(mm, address); 998 if (result) 999 goto out; 1000 /* check if the pmd is still valid */ 1001 if (mm_find_pmd(mm, address) != pmd) 1002 goto out; 1003 1004 anon_vma_lock_write(vma->anon_vma); 1005 1006 pte = pte_offset_map(pmd, address); 1007 pte_ptl = pte_lockptr(mm, pmd); 1008 1009 mmun_start = address; 1010 mmun_end = address + HPAGE_PMD_SIZE; 1011 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1012 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1013 /* 1014 * After this gup_fast can't run anymore. This also removes 1015 * any huge TLB entry from the CPU so we won't allow 1016 * huge and small TLB entries for the same virtual address 1017 * to avoid the risk of CPU bugs in that area. 1018 */ 1019 _pmd = pmdp_collapse_flush(vma, address, pmd); 1020 spin_unlock(pmd_ptl); 1021 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1022 1023 spin_lock(pte_ptl); 1024 isolated = __collapse_huge_page_isolate(vma, address, pte); 1025 spin_unlock(pte_ptl); 1026 1027 if (unlikely(!isolated)) { 1028 pte_unmap(pte); 1029 spin_lock(pmd_ptl); 1030 BUG_ON(!pmd_none(*pmd)); 1031 /* 1032 * We can only use set_pmd_at when establishing 1033 * hugepmds and never for establishing regular pmds that 1034 * points to regular pagetables. Use pmd_populate for that 1035 */ 1036 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 1037 spin_unlock(pmd_ptl); 1038 anon_vma_unlock_write(vma->anon_vma); 1039 result = SCAN_FAIL; 1040 goto out; 1041 } 1042 1043 /* 1044 * All pages are isolated and locked so anon_vma rmap 1045 * can't run anymore. 1046 */ 1047 anon_vma_unlock_write(vma->anon_vma); 1048 1049 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); 1050 pte_unmap(pte); 1051 __SetPageUptodate(new_page); 1052 pgtable = pmd_pgtable(_pmd); 1053 1054 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); 1055 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1056 1057 /* 1058 * spin_lock() below is not the equivalent of smp_wmb(), so 1059 * this is needed to avoid the copy_huge_page writes to become 1060 * visible after the set_pmd_at() write. 1061 */ 1062 smp_wmb(); 1063 1064 spin_lock(pmd_ptl); 1065 BUG_ON(!pmd_none(*pmd)); 1066 page_add_new_anon_rmap(new_page, vma, address, true); 1067 mem_cgroup_commit_charge(new_page, memcg, false, true); 1068 lru_cache_add_active_or_unevictable(new_page, vma); 1069 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1070 set_pmd_at(mm, address, pmd, _pmd); 1071 update_mmu_cache_pmd(vma, address, pmd); 1072 spin_unlock(pmd_ptl); 1073 1074 *hpage = NULL; 1075 1076 khugepaged_pages_collapsed++; 1077 result = SCAN_SUCCEED; 1078 out_up_write: 1079 up_write(&mm->mmap_sem); 1080 out_nolock: 1081 trace_mm_collapse_huge_page(mm, isolated, result); 1082 return; 1083 out: 1084 mem_cgroup_cancel_charge(new_page, memcg, true); 1085 goto out_up_write; 1086 } 1087 1088 static int khugepaged_scan_pmd(struct mm_struct *mm, 1089 struct vm_area_struct *vma, 1090 unsigned long address, 1091 struct page **hpage) 1092 { 1093 pmd_t *pmd; 1094 pte_t *pte, *_pte; 1095 int ret = 0, none_or_zero = 0, result = 0, referenced = 0; 1096 struct page *page = NULL; 1097 unsigned long _address; 1098 spinlock_t *ptl; 1099 int node = NUMA_NO_NODE, unmapped = 0; 1100 bool writable = false; 1101 1102 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1103 1104 pmd = mm_find_pmd(mm, address); 1105 if (!pmd) { 1106 result = SCAN_PMD_NULL; 1107 goto out; 1108 } 1109 1110 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 1111 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1112 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 1113 _pte++, _address += PAGE_SIZE) { 1114 pte_t pteval = *_pte; 1115 if (is_swap_pte(pteval)) { 1116 if (++unmapped <= khugepaged_max_ptes_swap) { 1117 continue; 1118 } else { 1119 result = SCAN_EXCEED_SWAP_PTE; 1120 goto out_unmap; 1121 } 1122 } 1123 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 1124 if (!userfaultfd_armed(vma) && 1125 ++none_or_zero <= khugepaged_max_ptes_none) { 1126 continue; 1127 } else { 1128 result = SCAN_EXCEED_NONE_PTE; 1129 goto out_unmap; 1130 } 1131 } 1132 if (!pte_present(pteval)) { 1133 result = SCAN_PTE_NON_PRESENT; 1134 goto out_unmap; 1135 } 1136 if (pte_write(pteval)) 1137 writable = true; 1138 1139 page = vm_normal_page(vma, _address, pteval); 1140 if (unlikely(!page)) { 1141 result = SCAN_PAGE_NULL; 1142 goto out_unmap; 1143 } 1144 1145 /* TODO: teach khugepaged to collapse THP mapped with pte */ 1146 if (PageCompound(page)) { 1147 result = SCAN_PAGE_COMPOUND; 1148 goto out_unmap; 1149 } 1150 1151 /* 1152 * Record which node the original page is from and save this 1153 * information to khugepaged_node_load[]. 1154 * Khupaged will allocate hugepage from the node has the max 1155 * hit record. 1156 */ 1157 node = page_to_nid(page); 1158 if (khugepaged_scan_abort(node)) { 1159 result = SCAN_SCAN_ABORT; 1160 goto out_unmap; 1161 } 1162 khugepaged_node_load[node]++; 1163 if (!PageLRU(page)) { 1164 result = SCAN_PAGE_LRU; 1165 goto out_unmap; 1166 } 1167 if (PageLocked(page)) { 1168 result = SCAN_PAGE_LOCK; 1169 goto out_unmap; 1170 } 1171 if (!PageAnon(page)) { 1172 result = SCAN_PAGE_ANON; 1173 goto out_unmap; 1174 } 1175 1176 /* 1177 * cannot use mapcount: can't collapse if there's a gup pin. 1178 * The page must only be referenced by the scanned process 1179 * and page swap cache. 1180 */ 1181 if (page_count(page) != 1 + !!PageSwapCache(page)) { 1182 result = SCAN_PAGE_COUNT; 1183 goto out_unmap; 1184 } 1185 if (pte_young(pteval) || 1186 page_is_young(page) || PageReferenced(page) || 1187 mmu_notifier_test_young(vma->vm_mm, address)) 1188 referenced++; 1189 } 1190 if (writable) { 1191 if (referenced) { 1192 result = SCAN_SUCCEED; 1193 ret = 1; 1194 } else { 1195 result = SCAN_LACK_REFERENCED_PAGE; 1196 } 1197 } else { 1198 result = SCAN_PAGE_RO; 1199 } 1200 out_unmap: 1201 pte_unmap_unlock(pte, ptl); 1202 if (ret) { 1203 node = khugepaged_find_target_node(); 1204 /* collapse_huge_page will return with the mmap_sem released */ 1205 collapse_huge_page(mm, address, hpage, vma, node, referenced); 1206 } 1207 out: 1208 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, 1209 none_or_zero, result, unmapped); 1210 return ret; 1211 } 1212 1213 static void collect_mm_slot(struct mm_slot *mm_slot) 1214 { 1215 struct mm_struct *mm = mm_slot->mm; 1216 1217 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 1218 1219 if (khugepaged_test_exit(mm)) { 1220 /* free mm_slot */ 1221 hash_del(&mm_slot->hash); 1222 list_del(&mm_slot->mm_node); 1223 1224 /* 1225 * Not strictly needed because the mm exited already. 1226 * 1227 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1228 */ 1229 1230 /* khugepaged_mm_lock actually not necessary for the below */ 1231 free_mm_slot(mm_slot); 1232 mmdrop(mm); 1233 } 1234 } 1235 1236 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) 1237 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) 1238 { 1239 struct vm_area_struct *vma; 1240 unsigned long addr; 1241 pmd_t *pmd, _pmd; 1242 1243 i_mmap_lock_write(mapping); 1244 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1245 /* probably overkill */ 1246 if (vma->anon_vma) 1247 continue; 1248 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 1249 if (addr & ~HPAGE_PMD_MASK) 1250 continue; 1251 if (vma->vm_end < addr + HPAGE_PMD_SIZE) 1252 continue; 1253 pmd = mm_find_pmd(vma->vm_mm, addr); 1254 if (!pmd) 1255 continue; 1256 /* 1257 * We need exclusive mmap_sem to retract page table. 1258 * If trylock fails we would end up with pte-mapped THP after 1259 * re-fault. Not ideal, but it's more important to not disturb 1260 * the system too much. 1261 */ 1262 if (down_write_trylock(&vma->vm_mm->mmap_sem)) { 1263 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); 1264 /* assume page table is clear */ 1265 _pmd = pmdp_collapse_flush(vma, addr, pmd); 1266 spin_unlock(ptl); 1267 up_write(&vma->vm_mm->mmap_sem); 1268 atomic_long_dec(&vma->vm_mm->nr_ptes); 1269 pte_free(vma->vm_mm, pmd_pgtable(_pmd)); 1270 } 1271 } 1272 i_mmap_unlock_write(mapping); 1273 } 1274 1275 /** 1276 * collapse_shmem - collapse small tmpfs/shmem pages into huge one. 1277 * 1278 * Basic scheme is simple, details are more complex: 1279 * - allocate and freeze a new huge page; 1280 * - scan over radix tree replacing old pages the new one 1281 * + swap in pages if necessary; 1282 * + fill in gaps; 1283 * + keep old pages around in case if rollback is required; 1284 * - if replacing succeed: 1285 * + copy data over; 1286 * + free old pages; 1287 * + unfreeze huge page; 1288 * - if replacing failed; 1289 * + put all pages back and unfreeze them; 1290 * + restore gaps in the radix-tree; 1291 * + free huge page; 1292 */ 1293 static void collapse_shmem(struct mm_struct *mm, 1294 struct address_space *mapping, pgoff_t start, 1295 struct page **hpage, int node) 1296 { 1297 gfp_t gfp; 1298 struct page *page, *new_page, *tmp; 1299 struct mem_cgroup *memcg; 1300 pgoff_t index, end = start + HPAGE_PMD_NR; 1301 LIST_HEAD(pagelist); 1302 struct radix_tree_iter iter; 1303 void **slot; 1304 int nr_none = 0, result = SCAN_SUCCEED; 1305 1306 VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); 1307 1308 /* Only allocate from the target node */ 1309 gfp = alloc_hugepage_khugepaged_gfpmask() | 1310 __GFP_OTHER_NODE | __GFP_THISNODE; 1311 1312 new_page = khugepaged_alloc_page(hpage, gfp, node); 1313 if (!new_page) { 1314 result = SCAN_ALLOC_HUGE_PAGE_FAIL; 1315 goto out; 1316 } 1317 1318 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { 1319 result = SCAN_CGROUP_CHARGE_FAIL; 1320 goto out; 1321 } 1322 1323 new_page->index = start; 1324 new_page->mapping = mapping; 1325 __SetPageSwapBacked(new_page); 1326 __SetPageLocked(new_page); 1327 BUG_ON(!page_ref_freeze(new_page, 1)); 1328 1329 1330 /* 1331 * At this point the new_page is 'frozen' (page_count() is zero), locked 1332 * and not up-to-date. It's safe to insert it into radix tree, because 1333 * nobody would be able to map it or use it in other way until we 1334 * unfreeze it. 1335 */ 1336 1337 index = start; 1338 spin_lock_irq(&mapping->tree_lock); 1339 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1340 int n = min(iter.index, end) - index; 1341 1342 /* 1343 * Handle holes in the radix tree: charge it from shmem and 1344 * insert relevant subpage of new_page into the radix-tree. 1345 */ 1346 if (n && !shmem_charge(mapping->host, n)) { 1347 result = SCAN_FAIL; 1348 break; 1349 } 1350 nr_none += n; 1351 for (; index < min(iter.index, end); index++) { 1352 radix_tree_insert(&mapping->page_tree, index, 1353 new_page + (index % HPAGE_PMD_NR)); 1354 } 1355 1356 /* We are done. */ 1357 if (index >= end) 1358 break; 1359 1360 page = radix_tree_deref_slot_protected(slot, 1361 &mapping->tree_lock); 1362 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) { 1363 spin_unlock_irq(&mapping->tree_lock); 1364 /* swap in or instantiate fallocated page */ 1365 if (shmem_getpage(mapping->host, index, &page, 1366 SGP_NOHUGE)) { 1367 result = SCAN_FAIL; 1368 goto tree_unlocked; 1369 } 1370 spin_lock_irq(&mapping->tree_lock); 1371 } else if (trylock_page(page)) { 1372 get_page(page); 1373 } else { 1374 result = SCAN_PAGE_LOCK; 1375 break; 1376 } 1377 1378 /* 1379 * The page must be locked, so we can drop the tree_lock 1380 * without racing with truncate. 1381 */ 1382 VM_BUG_ON_PAGE(!PageLocked(page), page); 1383 VM_BUG_ON_PAGE(!PageUptodate(page), page); 1384 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1385 1386 if (page_mapping(page) != mapping) { 1387 result = SCAN_TRUNCATED; 1388 goto out_unlock; 1389 } 1390 spin_unlock_irq(&mapping->tree_lock); 1391 1392 if (isolate_lru_page(page)) { 1393 result = SCAN_DEL_PAGE_LRU; 1394 goto out_isolate_failed; 1395 } 1396 1397 if (page_mapped(page)) 1398 unmap_mapping_range(mapping, index << PAGE_SHIFT, 1399 PAGE_SIZE, 0); 1400 1401 spin_lock_irq(&mapping->tree_lock); 1402 1403 VM_BUG_ON_PAGE(page_mapped(page), page); 1404 1405 /* 1406 * The page is expected to have page_count() == 3: 1407 * - we hold a pin on it; 1408 * - one reference from radix tree; 1409 * - one from isolate_lru_page; 1410 */ 1411 if (!page_ref_freeze(page, 3)) { 1412 result = SCAN_PAGE_COUNT; 1413 goto out_lru; 1414 } 1415 1416 /* 1417 * Add the page to the list to be able to undo the collapse if 1418 * something go wrong. 1419 */ 1420 list_add_tail(&page->lru, &pagelist); 1421 1422 /* Finally, replace with the new page. */ 1423 radix_tree_replace_slot(slot, 1424 new_page + (index % HPAGE_PMD_NR)); 1425 1426 index++; 1427 continue; 1428 out_lru: 1429 spin_unlock_irq(&mapping->tree_lock); 1430 putback_lru_page(page); 1431 out_isolate_failed: 1432 unlock_page(page); 1433 put_page(page); 1434 goto tree_unlocked; 1435 out_unlock: 1436 unlock_page(page); 1437 put_page(page); 1438 break; 1439 } 1440 1441 /* 1442 * Handle hole in radix tree at the end of the range. 1443 * This code only triggers if there's nothing in radix tree 1444 * beyond 'end'. 1445 */ 1446 if (result == SCAN_SUCCEED && index < end) { 1447 int n = end - index; 1448 1449 if (!shmem_charge(mapping->host, n)) { 1450 result = SCAN_FAIL; 1451 goto tree_locked; 1452 } 1453 1454 for (; index < end; index++) { 1455 radix_tree_insert(&mapping->page_tree, index, 1456 new_page + (index % HPAGE_PMD_NR)); 1457 } 1458 nr_none += n; 1459 } 1460 1461 tree_locked: 1462 spin_unlock_irq(&mapping->tree_lock); 1463 tree_unlocked: 1464 1465 if (result == SCAN_SUCCEED) { 1466 unsigned long flags; 1467 struct zone *zone = page_zone(new_page); 1468 1469 /* 1470 * Replacing old pages with new one has succeed, now we need to 1471 * copy the content and free old pages. 1472 */ 1473 list_for_each_entry_safe(page, tmp, &pagelist, lru) { 1474 copy_highpage(new_page + (page->index % HPAGE_PMD_NR), 1475 page); 1476 list_del(&page->lru); 1477 unlock_page(page); 1478 page_ref_unfreeze(page, 1); 1479 page->mapping = NULL; 1480 ClearPageActive(page); 1481 ClearPageUnevictable(page); 1482 put_page(page); 1483 } 1484 1485 local_irq_save(flags); 1486 __inc_node_page_state(new_page, NR_SHMEM_THPS); 1487 if (nr_none) { 1488 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none); 1489 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none); 1490 } 1491 local_irq_restore(flags); 1492 1493 /* 1494 * Remove pte page tables, so we can re-faulti 1495 * the page as huge. 1496 */ 1497 retract_page_tables(mapping, start); 1498 1499 /* Everything is ready, let's unfreeze the new_page */ 1500 set_page_dirty(new_page); 1501 SetPageUptodate(new_page); 1502 page_ref_unfreeze(new_page, HPAGE_PMD_NR); 1503 mem_cgroup_commit_charge(new_page, memcg, false, true); 1504 lru_cache_add_anon(new_page); 1505 unlock_page(new_page); 1506 1507 *hpage = NULL; 1508 } else { 1509 /* Something went wrong: rollback changes to the radix-tree */ 1510 shmem_uncharge(mapping->host, nr_none); 1511 spin_lock_irq(&mapping->tree_lock); 1512 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, 1513 start) { 1514 if (iter.index >= end) 1515 break; 1516 page = list_first_entry_or_null(&pagelist, 1517 struct page, lru); 1518 if (!page || iter.index < page->index) { 1519 if (!nr_none) 1520 break; 1521 /* Put holes back where they were */ 1522 radix_tree_replace_slot(slot, NULL); 1523 nr_none--; 1524 continue; 1525 } 1526 1527 VM_BUG_ON_PAGE(page->index != iter.index, page); 1528 1529 /* Unfreeze the page. */ 1530 list_del(&page->lru); 1531 page_ref_unfreeze(page, 2); 1532 radix_tree_replace_slot(slot, page); 1533 spin_unlock_irq(&mapping->tree_lock); 1534 putback_lru_page(page); 1535 unlock_page(page); 1536 spin_lock_irq(&mapping->tree_lock); 1537 } 1538 VM_BUG_ON(nr_none); 1539 spin_unlock_irq(&mapping->tree_lock); 1540 1541 /* Unfreeze new_page, caller would take care about freeing it */ 1542 page_ref_unfreeze(new_page, 1); 1543 mem_cgroup_cancel_charge(new_page, memcg, true); 1544 unlock_page(new_page); 1545 new_page->mapping = NULL; 1546 } 1547 out: 1548 VM_BUG_ON(!list_empty(&pagelist)); 1549 /* TODO: tracepoints */ 1550 } 1551 1552 static void khugepaged_scan_shmem(struct mm_struct *mm, 1553 struct address_space *mapping, 1554 pgoff_t start, struct page **hpage) 1555 { 1556 struct page *page = NULL; 1557 struct radix_tree_iter iter; 1558 void **slot; 1559 int present, swap; 1560 int node = NUMA_NO_NODE; 1561 int result = SCAN_SUCCEED; 1562 1563 present = 0; 1564 swap = 0; 1565 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 1566 rcu_read_lock(); 1567 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1568 if (iter.index >= start + HPAGE_PMD_NR) 1569 break; 1570 1571 page = radix_tree_deref_slot(slot); 1572 if (radix_tree_deref_retry(page)) { 1573 slot = radix_tree_iter_retry(&iter); 1574 continue; 1575 } 1576 1577 if (radix_tree_exception(page)) { 1578 if (++swap > khugepaged_max_ptes_swap) { 1579 result = SCAN_EXCEED_SWAP_PTE; 1580 break; 1581 } 1582 continue; 1583 } 1584 1585 if (PageTransCompound(page)) { 1586 result = SCAN_PAGE_COMPOUND; 1587 break; 1588 } 1589 1590 node = page_to_nid(page); 1591 if (khugepaged_scan_abort(node)) { 1592 result = SCAN_SCAN_ABORT; 1593 break; 1594 } 1595 khugepaged_node_load[node]++; 1596 1597 if (!PageLRU(page)) { 1598 result = SCAN_PAGE_LRU; 1599 break; 1600 } 1601 1602 if (page_count(page) != 1 + page_mapcount(page)) { 1603 result = SCAN_PAGE_COUNT; 1604 break; 1605 } 1606 1607 /* 1608 * We probably should check if the page is referenced here, but 1609 * nobody would transfer pte_young() to PageReferenced() for us. 1610 * And rmap walk here is just too costly... 1611 */ 1612 1613 present++; 1614 1615 if (need_resched()) { 1616 cond_resched_rcu(); 1617 slot = radix_tree_iter_next(&iter); 1618 } 1619 } 1620 rcu_read_unlock(); 1621 1622 if (result == SCAN_SUCCEED) { 1623 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { 1624 result = SCAN_EXCEED_NONE_PTE; 1625 } else { 1626 node = khugepaged_find_target_node(); 1627 collapse_shmem(mm, mapping, start, hpage, node); 1628 } 1629 } 1630 1631 /* TODO: tracepoints */ 1632 } 1633 #else 1634 static void khugepaged_scan_shmem(struct mm_struct *mm, 1635 struct address_space *mapping, 1636 pgoff_t start, struct page **hpage) 1637 { 1638 BUILD_BUG(); 1639 } 1640 #endif 1641 1642 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 1643 struct page **hpage) 1644 __releases(&khugepaged_mm_lock) 1645 __acquires(&khugepaged_mm_lock) 1646 { 1647 struct mm_slot *mm_slot; 1648 struct mm_struct *mm; 1649 struct vm_area_struct *vma; 1650 int progress = 0; 1651 1652 VM_BUG_ON(!pages); 1653 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 1654 1655 if (khugepaged_scan.mm_slot) 1656 mm_slot = khugepaged_scan.mm_slot; 1657 else { 1658 mm_slot = list_entry(khugepaged_scan.mm_head.next, 1659 struct mm_slot, mm_node); 1660 khugepaged_scan.address = 0; 1661 khugepaged_scan.mm_slot = mm_slot; 1662 } 1663 spin_unlock(&khugepaged_mm_lock); 1664 1665 mm = mm_slot->mm; 1666 down_read(&mm->mmap_sem); 1667 if (unlikely(khugepaged_test_exit(mm))) 1668 vma = NULL; 1669 else 1670 vma = find_vma(mm, khugepaged_scan.address); 1671 1672 progress++; 1673 for (; vma; vma = vma->vm_next) { 1674 unsigned long hstart, hend; 1675 1676 cond_resched(); 1677 if (unlikely(khugepaged_test_exit(mm))) { 1678 progress++; 1679 break; 1680 } 1681 if (!hugepage_vma_check(vma)) { 1682 skip: 1683 progress++; 1684 continue; 1685 } 1686 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1687 hend = vma->vm_end & HPAGE_PMD_MASK; 1688 if (hstart >= hend) 1689 goto skip; 1690 if (khugepaged_scan.address > hend) 1691 goto skip; 1692 if (khugepaged_scan.address < hstart) 1693 khugepaged_scan.address = hstart; 1694 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 1695 1696 while (khugepaged_scan.address < hend) { 1697 int ret; 1698 cond_resched(); 1699 if (unlikely(khugepaged_test_exit(mm))) 1700 goto breakouterloop; 1701 1702 VM_BUG_ON(khugepaged_scan.address < hstart || 1703 khugepaged_scan.address + HPAGE_PMD_SIZE > 1704 hend); 1705 if (shmem_file(vma->vm_file)) { 1706 struct file *file; 1707 pgoff_t pgoff = linear_page_index(vma, 1708 khugepaged_scan.address); 1709 if (!shmem_huge_enabled(vma)) 1710 goto skip; 1711 file = get_file(vma->vm_file); 1712 up_read(&mm->mmap_sem); 1713 ret = 1; 1714 khugepaged_scan_shmem(mm, file->f_mapping, 1715 pgoff, hpage); 1716 fput(file); 1717 } else { 1718 ret = khugepaged_scan_pmd(mm, vma, 1719 khugepaged_scan.address, 1720 hpage); 1721 } 1722 /* move to next address */ 1723 khugepaged_scan.address += HPAGE_PMD_SIZE; 1724 progress += HPAGE_PMD_NR; 1725 if (ret) 1726 /* we released mmap_sem so break loop */ 1727 goto breakouterloop_mmap_sem; 1728 if (progress >= pages) 1729 goto breakouterloop; 1730 } 1731 } 1732 breakouterloop: 1733 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ 1734 breakouterloop_mmap_sem: 1735 1736 spin_lock(&khugepaged_mm_lock); 1737 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 1738 /* 1739 * Release the current mm_slot if this mm is about to die, or 1740 * if we scanned all vmas of this mm. 1741 */ 1742 if (khugepaged_test_exit(mm) || !vma) { 1743 /* 1744 * Make sure that if mm_users is reaching zero while 1745 * khugepaged runs here, khugepaged_exit will find 1746 * mm_slot not pointing to the exiting mm. 1747 */ 1748 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 1749 khugepaged_scan.mm_slot = list_entry( 1750 mm_slot->mm_node.next, 1751 struct mm_slot, mm_node); 1752 khugepaged_scan.address = 0; 1753 } else { 1754 khugepaged_scan.mm_slot = NULL; 1755 khugepaged_full_scans++; 1756 } 1757 1758 collect_mm_slot(mm_slot); 1759 } 1760 1761 return progress; 1762 } 1763 1764 static int khugepaged_has_work(void) 1765 { 1766 return !list_empty(&khugepaged_scan.mm_head) && 1767 khugepaged_enabled(); 1768 } 1769 1770 static int khugepaged_wait_event(void) 1771 { 1772 return !list_empty(&khugepaged_scan.mm_head) || 1773 kthread_should_stop(); 1774 } 1775 1776 static void khugepaged_do_scan(void) 1777 { 1778 struct page *hpage = NULL; 1779 unsigned int progress = 0, pass_through_head = 0; 1780 unsigned int pages = khugepaged_pages_to_scan; 1781 bool wait = true; 1782 1783 barrier(); /* write khugepaged_pages_to_scan to local stack */ 1784 1785 while (progress < pages) { 1786 if (!khugepaged_prealloc_page(&hpage, &wait)) 1787 break; 1788 1789 cond_resched(); 1790 1791 if (unlikely(kthread_should_stop() || try_to_freeze())) 1792 break; 1793 1794 spin_lock(&khugepaged_mm_lock); 1795 if (!khugepaged_scan.mm_slot) 1796 pass_through_head++; 1797 if (khugepaged_has_work() && 1798 pass_through_head < 2) 1799 progress += khugepaged_scan_mm_slot(pages - progress, 1800 &hpage); 1801 else 1802 progress = pages; 1803 spin_unlock(&khugepaged_mm_lock); 1804 } 1805 1806 if (!IS_ERR_OR_NULL(hpage)) 1807 put_page(hpage); 1808 } 1809 1810 static bool khugepaged_should_wakeup(void) 1811 { 1812 return kthread_should_stop() || 1813 time_after_eq(jiffies, khugepaged_sleep_expire); 1814 } 1815 1816 static void khugepaged_wait_work(void) 1817 { 1818 if (khugepaged_has_work()) { 1819 const unsigned long scan_sleep_jiffies = 1820 msecs_to_jiffies(khugepaged_scan_sleep_millisecs); 1821 1822 if (!scan_sleep_jiffies) 1823 return; 1824 1825 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; 1826 wait_event_freezable_timeout(khugepaged_wait, 1827 khugepaged_should_wakeup(), 1828 scan_sleep_jiffies); 1829 return; 1830 } 1831 1832 if (khugepaged_enabled()) 1833 wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 1834 } 1835 1836 static int khugepaged(void *none) 1837 { 1838 struct mm_slot *mm_slot; 1839 1840 set_freezable(); 1841 set_user_nice(current, MAX_NICE); 1842 1843 while (!kthread_should_stop()) { 1844 khugepaged_do_scan(); 1845 khugepaged_wait_work(); 1846 } 1847 1848 spin_lock(&khugepaged_mm_lock); 1849 mm_slot = khugepaged_scan.mm_slot; 1850 khugepaged_scan.mm_slot = NULL; 1851 if (mm_slot) 1852 collect_mm_slot(mm_slot); 1853 spin_unlock(&khugepaged_mm_lock); 1854 return 0; 1855 } 1856 1857 static void set_recommended_min_free_kbytes(void) 1858 { 1859 struct zone *zone; 1860 int nr_zones = 0; 1861 unsigned long recommended_min; 1862 1863 for_each_populated_zone(zone) 1864 nr_zones++; 1865 1866 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ 1867 recommended_min = pageblock_nr_pages * nr_zones * 2; 1868 1869 /* 1870 * Make sure that on average at least two pageblocks are almost free 1871 * of another type, one for a migratetype to fall back to and a 1872 * second to avoid subsequent fallbacks of other types There are 3 1873 * MIGRATE_TYPES we care about. 1874 */ 1875 recommended_min += pageblock_nr_pages * nr_zones * 1876 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 1877 1878 /* don't ever allow to reserve more than 5% of the lowmem */ 1879 recommended_min = min(recommended_min, 1880 (unsigned long) nr_free_buffer_pages() / 20); 1881 recommended_min <<= (PAGE_SHIFT-10); 1882 1883 if (recommended_min > min_free_kbytes) { 1884 if (user_min_free_kbytes >= 0) 1885 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", 1886 min_free_kbytes, recommended_min); 1887 1888 min_free_kbytes = recommended_min; 1889 } 1890 setup_per_zone_wmarks(); 1891 } 1892 1893 int start_stop_khugepaged(void) 1894 { 1895 static struct task_struct *khugepaged_thread __read_mostly; 1896 static DEFINE_MUTEX(khugepaged_mutex); 1897 int err = 0; 1898 1899 mutex_lock(&khugepaged_mutex); 1900 if (khugepaged_enabled()) { 1901 if (!khugepaged_thread) 1902 khugepaged_thread = kthread_run(khugepaged, NULL, 1903 "khugepaged"); 1904 if (IS_ERR(khugepaged_thread)) { 1905 pr_err("khugepaged: kthread_run(khugepaged) failed\n"); 1906 err = PTR_ERR(khugepaged_thread); 1907 khugepaged_thread = NULL; 1908 goto fail; 1909 } 1910 1911 if (!list_empty(&khugepaged_scan.mm_head)) 1912 wake_up_interruptible(&khugepaged_wait); 1913 1914 set_recommended_min_free_kbytes(); 1915 } else if (khugepaged_thread) { 1916 kthread_stop(khugepaged_thread); 1917 khugepaged_thread = NULL; 1918 } 1919 fail: 1920 mutex_unlock(&khugepaged_mutex); 1921 return err; 1922 } 1923