xref: /openbmc/linux/mm/khugepaged.c (revision 7da4e2cb8b1ff8221759bfc7512d651ee69516dc)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/page_table_check.h>
20 #include <linux/swapops.h>
21 #include <linux/shmem_fs.h>
22 
23 #include <asm/tlb.h>
24 #include <asm/pgalloc.h>
25 #include "internal.h"
26 
27 enum scan_result {
28 	SCAN_FAIL,
29 	SCAN_SUCCEED,
30 	SCAN_PMD_NULL,
31 	SCAN_EXCEED_NONE_PTE,
32 	SCAN_EXCEED_SWAP_PTE,
33 	SCAN_EXCEED_SHARED_PTE,
34 	SCAN_PTE_NON_PRESENT,
35 	SCAN_PTE_UFFD_WP,
36 	SCAN_PAGE_RO,
37 	SCAN_LACK_REFERENCED_PAGE,
38 	SCAN_PAGE_NULL,
39 	SCAN_SCAN_ABORT,
40 	SCAN_PAGE_COUNT,
41 	SCAN_PAGE_LRU,
42 	SCAN_PAGE_LOCK,
43 	SCAN_PAGE_ANON,
44 	SCAN_PAGE_COMPOUND,
45 	SCAN_ANY_PROCESS,
46 	SCAN_VMA_NULL,
47 	SCAN_VMA_CHECK,
48 	SCAN_ADDRESS_RANGE,
49 	SCAN_DEL_PAGE_LRU,
50 	SCAN_ALLOC_HUGE_PAGE_FAIL,
51 	SCAN_CGROUP_CHARGE_FAIL,
52 	SCAN_TRUNCATED,
53 	SCAN_PAGE_HAS_PRIVATE,
54 };
55 
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/huge_memory.h>
58 
59 static struct task_struct *khugepaged_thread __read_mostly;
60 static DEFINE_MUTEX(khugepaged_mutex);
61 
62 /* default scan 8*512 pte (or vmas) every 30 second */
63 static unsigned int khugepaged_pages_to_scan __read_mostly;
64 static unsigned int khugepaged_pages_collapsed;
65 static unsigned int khugepaged_full_scans;
66 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67 /* during fragmentation poll the hugepage allocator once every minute */
68 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69 static unsigned long khugepaged_sleep_expire;
70 static DEFINE_SPINLOCK(khugepaged_mm_lock);
71 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72 /*
73  * default collapse hugepages if there is at least one pte mapped like
74  * it would have happened if the vma was large enough during page
75  * fault.
76  */
77 static unsigned int khugepaged_max_ptes_none __read_mostly;
78 static unsigned int khugepaged_max_ptes_swap __read_mostly;
79 static unsigned int khugepaged_max_ptes_shared __read_mostly;
80 
81 #define MM_SLOTS_HASH_BITS 10
82 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83 
84 static struct kmem_cache *mm_slot_cache __read_mostly;
85 
86 #define MAX_PTE_MAPPED_THP 8
87 
88 /**
89  * struct mm_slot - hash lookup from mm to mm_slot
90  * @hash: hash collision list
91  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92  * @mm: the mm that this information is valid for
93  * @nr_pte_mapped_thp: number of pte mapped THP
94  * @pte_mapped_thp: address array corresponding pte mapped THP
95  */
96 struct mm_slot {
97 	struct hlist_node hash;
98 	struct list_head mm_node;
99 	struct mm_struct *mm;
100 
101 	/* pte-mapped THP in this mm */
102 	int nr_pte_mapped_thp;
103 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
104 };
105 
106 /**
107  * struct khugepaged_scan - cursor for scanning
108  * @mm_head: the head of the mm list to scan
109  * @mm_slot: the current mm_slot we are scanning
110  * @address: the next address inside that to be scanned
111  *
112  * There is only the one khugepaged_scan instance of this cursor structure.
113  */
114 struct khugepaged_scan {
115 	struct list_head mm_head;
116 	struct mm_slot *mm_slot;
117 	unsigned long address;
118 };
119 
120 static struct khugepaged_scan khugepaged_scan = {
121 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122 };
123 
124 #ifdef CONFIG_SYSFS
125 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126 					 struct kobj_attribute *attr,
127 					 char *buf)
128 {
129 	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
130 }
131 
132 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133 					  struct kobj_attribute *attr,
134 					  const char *buf, size_t count)
135 {
136 	unsigned int msecs;
137 	int err;
138 
139 	err = kstrtouint(buf, 10, &msecs);
140 	if (err)
141 		return -EINVAL;
142 
143 	khugepaged_scan_sleep_millisecs = msecs;
144 	khugepaged_sleep_expire = 0;
145 	wake_up_interruptible(&khugepaged_wait);
146 
147 	return count;
148 }
149 static struct kobj_attribute scan_sleep_millisecs_attr =
150 	__ATTR_RW(scan_sleep_millisecs);
151 
152 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
153 					  struct kobj_attribute *attr,
154 					  char *buf)
155 {
156 	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
157 }
158 
159 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
160 					   struct kobj_attribute *attr,
161 					   const char *buf, size_t count)
162 {
163 	unsigned int msecs;
164 	int err;
165 
166 	err = kstrtouint(buf, 10, &msecs);
167 	if (err)
168 		return -EINVAL;
169 
170 	khugepaged_alloc_sleep_millisecs = msecs;
171 	khugepaged_sleep_expire = 0;
172 	wake_up_interruptible(&khugepaged_wait);
173 
174 	return count;
175 }
176 static struct kobj_attribute alloc_sleep_millisecs_attr =
177 	__ATTR_RW(alloc_sleep_millisecs);
178 
179 static ssize_t pages_to_scan_show(struct kobject *kobj,
180 				  struct kobj_attribute *attr,
181 				  char *buf)
182 {
183 	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
184 }
185 static ssize_t pages_to_scan_store(struct kobject *kobj,
186 				   struct kobj_attribute *attr,
187 				   const char *buf, size_t count)
188 {
189 	unsigned int pages;
190 	int err;
191 
192 	err = kstrtouint(buf, 10, &pages);
193 	if (err || !pages)
194 		return -EINVAL;
195 
196 	khugepaged_pages_to_scan = pages;
197 
198 	return count;
199 }
200 static struct kobj_attribute pages_to_scan_attr =
201 	__ATTR_RW(pages_to_scan);
202 
203 static ssize_t pages_collapsed_show(struct kobject *kobj,
204 				    struct kobj_attribute *attr,
205 				    char *buf)
206 {
207 	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
208 }
209 static struct kobj_attribute pages_collapsed_attr =
210 	__ATTR_RO(pages_collapsed);
211 
212 static ssize_t full_scans_show(struct kobject *kobj,
213 			       struct kobj_attribute *attr,
214 			       char *buf)
215 {
216 	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
217 }
218 static struct kobj_attribute full_scans_attr =
219 	__ATTR_RO(full_scans);
220 
221 static ssize_t defrag_show(struct kobject *kobj,
222 			   struct kobj_attribute *attr, char *buf)
223 {
224 	return single_hugepage_flag_show(kobj, attr, buf,
225 					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
226 }
227 static ssize_t defrag_store(struct kobject *kobj,
228 			    struct kobj_attribute *attr,
229 			    const char *buf, size_t count)
230 {
231 	return single_hugepage_flag_store(kobj, attr, buf, count,
232 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
233 }
234 static struct kobj_attribute khugepaged_defrag_attr =
235 	__ATTR_RW(defrag);
236 
237 /*
238  * max_ptes_none controls if khugepaged should collapse hugepages over
239  * any unmapped ptes in turn potentially increasing the memory
240  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
241  * reduce the available free memory in the system as it
242  * runs. Increasing max_ptes_none will instead potentially reduce the
243  * free memory in the system during the khugepaged scan.
244  */
245 static ssize_t max_ptes_none_show(struct kobject *kobj,
246 				  struct kobj_attribute *attr,
247 				  char *buf)
248 {
249 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
250 }
251 static ssize_t max_ptes_none_store(struct kobject *kobj,
252 				   struct kobj_attribute *attr,
253 				   const char *buf, size_t count)
254 {
255 	int err;
256 	unsigned long max_ptes_none;
257 
258 	err = kstrtoul(buf, 10, &max_ptes_none);
259 	if (err || max_ptes_none > HPAGE_PMD_NR - 1)
260 		return -EINVAL;
261 
262 	khugepaged_max_ptes_none = max_ptes_none;
263 
264 	return count;
265 }
266 static struct kobj_attribute khugepaged_max_ptes_none_attr =
267 	__ATTR_RW(max_ptes_none);
268 
269 static ssize_t max_ptes_swap_show(struct kobject *kobj,
270 				  struct kobj_attribute *attr,
271 				  char *buf)
272 {
273 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
274 }
275 
276 static ssize_t max_ptes_swap_store(struct kobject *kobj,
277 				   struct kobj_attribute *attr,
278 				   const char *buf, size_t count)
279 {
280 	int err;
281 	unsigned long max_ptes_swap;
282 
283 	err  = kstrtoul(buf, 10, &max_ptes_swap);
284 	if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
285 		return -EINVAL;
286 
287 	khugepaged_max_ptes_swap = max_ptes_swap;
288 
289 	return count;
290 }
291 
292 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
293 	__ATTR_RW(max_ptes_swap);
294 
295 static ssize_t max_ptes_shared_show(struct kobject *kobj,
296 				    struct kobj_attribute *attr,
297 				    char *buf)
298 {
299 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
300 }
301 
302 static ssize_t max_ptes_shared_store(struct kobject *kobj,
303 				     struct kobj_attribute *attr,
304 				     const char *buf, size_t count)
305 {
306 	int err;
307 	unsigned long max_ptes_shared;
308 
309 	err  = kstrtoul(buf, 10, &max_ptes_shared);
310 	if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
311 		return -EINVAL;
312 
313 	khugepaged_max_ptes_shared = max_ptes_shared;
314 
315 	return count;
316 }
317 
318 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
319 	__ATTR_RW(max_ptes_shared);
320 
321 static struct attribute *khugepaged_attr[] = {
322 	&khugepaged_defrag_attr.attr,
323 	&khugepaged_max_ptes_none_attr.attr,
324 	&khugepaged_max_ptes_swap_attr.attr,
325 	&khugepaged_max_ptes_shared_attr.attr,
326 	&pages_to_scan_attr.attr,
327 	&pages_collapsed_attr.attr,
328 	&full_scans_attr.attr,
329 	&scan_sleep_millisecs_attr.attr,
330 	&alloc_sleep_millisecs_attr.attr,
331 	NULL,
332 };
333 
334 struct attribute_group khugepaged_attr_group = {
335 	.attrs = khugepaged_attr,
336 	.name = "khugepaged",
337 };
338 #endif /* CONFIG_SYSFS */
339 
340 int hugepage_madvise(struct vm_area_struct *vma,
341 		     unsigned long *vm_flags, int advice)
342 {
343 	switch (advice) {
344 	case MADV_HUGEPAGE:
345 #ifdef CONFIG_S390
346 		/*
347 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
348 		 * can't handle this properly after s390_enable_sie, so we simply
349 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
350 		 */
351 		if (mm_has_pgste(vma->vm_mm))
352 			return 0;
353 #endif
354 		*vm_flags &= ~VM_NOHUGEPAGE;
355 		*vm_flags |= VM_HUGEPAGE;
356 		/*
357 		 * If the vma become good for khugepaged to scan,
358 		 * register it here without waiting a page fault that
359 		 * may not happen any time soon.
360 		 */
361 		khugepaged_enter_vma(vma, *vm_flags);
362 		break;
363 	case MADV_NOHUGEPAGE:
364 		*vm_flags &= ~VM_HUGEPAGE;
365 		*vm_flags |= VM_NOHUGEPAGE;
366 		/*
367 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
368 		 * this vma even if we leave the mm registered in khugepaged if
369 		 * it got registered before VM_NOHUGEPAGE was set.
370 		 */
371 		break;
372 	}
373 
374 	return 0;
375 }
376 
377 int __init khugepaged_init(void)
378 {
379 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
380 					  sizeof(struct mm_slot),
381 					  __alignof__(struct mm_slot), 0, NULL);
382 	if (!mm_slot_cache)
383 		return -ENOMEM;
384 
385 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
386 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
387 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
388 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
389 
390 	return 0;
391 }
392 
393 void __init khugepaged_destroy(void)
394 {
395 	kmem_cache_destroy(mm_slot_cache);
396 }
397 
398 static inline struct mm_slot *alloc_mm_slot(void)
399 {
400 	if (!mm_slot_cache)	/* initialization failed */
401 		return NULL;
402 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
403 }
404 
405 static inline void free_mm_slot(struct mm_slot *mm_slot)
406 {
407 	kmem_cache_free(mm_slot_cache, mm_slot);
408 }
409 
410 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
411 {
412 	struct mm_slot *mm_slot;
413 
414 	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
415 		if (mm == mm_slot->mm)
416 			return mm_slot;
417 
418 	return NULL;
419 }
420 
421 static void insert_to_mm_slots_hash(struct mm_struct *mm,
422 				    struct mm_slot *mm_slot)
423 {
424 	mm_slot->mm = mm;
425 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
426 }
427 
428 static inline int khugepaged_test_exit(struct mm_struct *mm)
429 {
430 	return atomic_read(&mm->mm_users) == 0;
431 }
432 
433 void __khugepaged_enter(struct mm_struct *mm)
434 {
435 	struct mm_slot *mm_slot;
436 	int wakeup;
437 
438 	mm_slot = alloc_mm_slot();
439 	if (!mm_slot)
440 		return;
441 
442 	/* __khugepaged_exit() must not run from under us */
443 	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
444 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
445 		free_mm_slot(mm_slot);
446 		return;
447 	}
448 
449 	spin_lock(&khugepaged_mm_lock);
450 	insert_to_mm_slots_hash(mm, mm_slot);
451 	/*
452 	 * Insert just behind the scanning cursor, to let the area settle
453 	 * down a little.
454 	 */
455 	wakeup = list_empty(&khugepaged_scan.mm_head);
456 	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
457 	spin_unlock(&khugepaged_mm_lock);
458 
459 	mmgrab(mm);
460 	if (wakeup)
461 		wake_up_interruptible(&khugepaged_wait);
462 }
463 
464 void khugepaged_enter_vma(struct vm_area_struct *vma,
465 			  unsigned long vm_flags)
466 {
467 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
468 	    khugepaged_enabled()) {
469 		if (hugepage_vma_check(vma, vm_flags, false, false))
470 			__khugepaged_enter(vma->vm_mm);
471 	}
472 }
473 
474 void __khugepaged_exit(struct mm_struct *mm)
475 {
476 	struct mm_slot *mm_slot;
477 	int free = 0;
478 
479 	spin_lock(&khugepaged_mm_lock);
480 	mm_slot = get_mm_slot(mm);
481 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
482 		hash_del(&mm_slot->hash);
483 		list_del(&mm_slot->mm_node);
484 		free = 1;
485 	}
486 	spin_unlock(&khugepaged_mm_lock);
487 
488 	if (free) {
489 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
490 		free_mm_slot(mm_slot);
491 		mmdrop(mm);
492 	} else if (mm_slot) {
493 		/*
494 		 * This is required to serialize against
495 		 * khugepaged_test_exit() (which is guaranteed to run
496 		 * under mmap sem read mode). Stop here (after we
497 		 * return all pagetables will be destroyed) until
498 		 * khugepaged has finished working on the pagetables
499 		 * under the mmap_lock.
500 		 */
501 		mmap_write_lock(mm);
502 		mmap_write_unlock(mm);
503 	}
504 }
505 
506 static void release_pte_page(struct page *page)
507 {
508 	mod_node_page_state(page_pgdat(page),
509 			NR_ISOLATED_ANON + page_is_file_lru(page),
510 			-compound_nr(page));
511 	unlock_page(page);
512 	putback_lru_page(page);
513 }
514 
515 static void release_pte_pages(pte_t *pte, pte_t *_pte,
516 		struct list_head *compound_pagelist)
517 {
518 	struct page *page, *tmp;
519 
520 	while (--_pte >= pte) {
521 		pte_t pteval = *_pte;
522 
523 		page = pte_page(pteval);
524 		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
525 				!PageCompound(page))
526 			release_pte_page(page);
527 	}
528 
529 	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
530 		list_del(&page->lru);
531 		release_pte_page(page);
532 	}
533 }
534 
535 static bool is_refcount_suitable(struct page *page)
536 {
537 	int expected_refcount;
538 
539 	expected_refcount = total_mapcount(page);
540 	if (PageSwapCache(page))
541 		expected_refcount += compound_nr(page);
542 
543 	return page_count(page) == expected_refcount;
544 }
545 
546 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
547 					unsigned long address,
548 					pte_t *pte,
549 					struct list_head *compound_pagelist)
550 {
551 	struct page *page = NULL;
552 	pte_t *_pte;
553 	int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
554 	bool writable = false;
555 
556 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
557 	     _pte++, address += PAGE_SIZE) {
558 		pte_t pteval = *_pte;
559 		if (pte_none(pteval) || (pte_present(pteval) &&
560 				is_zero_pfn(pte_pfn(pteval)))) {
561 			if (!userfaultfd_armed(vma) &&
562 			    ++none_or_zero <= khugepaged_max_ptes_none) {
563 				continue;
564 			} else {
565 				result = SCAN_EXCEED_NONE_PTE;
566 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
567 				goto out;
568 			}
569 		}
570 		if (!pte_present(pteval)) {
571 			result = SCAN_PTE_NON_PRESENT;
572 			goto out;
573 		}
574 		page = vm_normal_page(vma, address, pteval);
575 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
576 			result = SCAN_PAGE_NULL;
577 			goto out;
578 		}
579 
580 		VM_BUG_ON_PAGE(!PageAnon(page), page);
581 
582 		if (page_mapcount(page) > 1 &&
583 				++shared > khugepaged_max_ptes_shared) {
584 			result = SCAN_EXCEED_SHARED_PTE;
585 			count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
586 			goto out;
587 		}
588 
589 		if (PageCompound(page)) {
590 			struct page *p;
591 			page = compound_head(page);
592 
593 			/*
594 			 * Check if we have dealt with the compound page
595 			 * already
596 			 */
597 			list_for_each_entry(p, compound_pagelist, lru) {
598 				if (page == p)
599 					goto next;
600 			}
601 		}
602 
603 		/*
604 		 * We can do it before isolate_lru_page because the
605 		 * page can't be freed from under us. NOTE: PG_lock
606 		 * is needed to serialize against split_huge_page
607 		 * when invoked from the VM.
608 		 */
609 		if (!trylock_page(page)) {
610 			result = SCAN_PAGE_LOCK;
611 			goto out;
612 		}
613 
614 		/*
615 		 * Check if the page has any GUP (or other external) pins.
616 		 *
617 		 * The page table that maps the page has been already unlinked
618 		 * from the page table tree and this process cannot get
619 		 * an additional pin on the page.
620 		 *
621 		 * New pins can come later if the page is shared across fork,
622 		 * but not from this process. The other process cannot write to
623 		 * the page, only trigger CoW.
624 		 */
625 		if (!is_refcount_suitable(page)) {
626 			unlock_page(page);
627 			result = SCAN_PAGE_COUNT;
628 			goto out;
629 		}
630 
631 		/*
632 		 * Isolate the page to avoid collapsing an hugepage
633 		 * currently in use by the VM.
634 		 */
635 		if (isolate_lru_page(page)) {
636 			unlock_page(page);
637 			result = SCAN_DEL_PAGE_LRU;
638 			goto out;
639 		}
640 		mod_node_page_state(page_pgdat(page),
641 				NR_ISOLATED_ANON + page_is_file_lru(page),
642 				compound_nr(page));
643 		VM_BUG_ON_PAGE(!PageLocked(page), page);
644 		VM_BUG_ON_PAGE(PageLRU(page), page);
645 
646 		if (PageCompound(page))
647 			list_add_tail(&page->lru, compound_pagelist);
648 next:
649 		/* There should be enough young pte to collapse the page */
650 		if (pte_young(pteval) ||
651 		    page_is_young(page) || PageReferenced(page) ||
652 		    mmu_notifier_test_young(vma->vm_mm, address))
653 			referenced++;
654 
655 		if (pte_write(pteval))
656 			writable = true;
657 	}
658 
659 	if (unlikely(!writable)) {
660 		result = SCAN_PAGE_RO;
661 	} else if (unlikely(!referenced)) {
662 		result = SCAN_LACK_REFERENCED_PAGE;
663 	} else {
664 		result = SCAN_SUCCEED;
665 		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
666 						    referenced, writable, result);
667 		return 1;
668 	}
669 out:
670 	release_pte_pages(pte, _pte, compound_pagelist);
671 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
672 					    referenced, writable, result);
673 	return 0;
674 }
675 
676 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
677 				      struct vm_area_struct *vma,
678 				      unsigned long address,
679 				      spinlock_t *ptl,
680 				      struct list_head *compound_pagelist)
681 {
682 	struct page *src_page, *tmp;
683 	pte_t *_pte;
684 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
685 				_pte++, page++, address += PAGE_SIZE) {
686 		pte_t pteval = *_pte;
687 
688 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
689 			clear_user_highpage(page, address);
690 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
691 			if (is_zero_pfn(pte_pfn(pteval))) {
692 				/*
693 				 * ptl mostly unnecessary.
694 				 */
695 				spin_lock(ptl);
696 				ptep_clear(vma->vm_mm, address, _pte);
697 				spin_unlock(ptl);
698 			}
699 		} else {
700 			src_page = pte_page(pteval);
701 			copy_user_highpage(page, src_page, address, vma);
702 			if (!PageCompound(src_page))
703 				release_pte_page(src_page);
704 			/*
705 			 * ptl mostly unnecessary, but preempt has to
706 			 * be disabled to update the per-cpu stats
707 			 * inside page_remove_rmap().
708 			 */
709 			spin_lock(ptl);
710 			ptep_clear(vma->vm_mm, address, _pte);
711 			page_remove_rmap(src_page, vma, false);
712 			spin_unlock(ptl);
713 			free_page_and_swap_cache(src_page);
714 		}
715 	}
716 
717 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
718 		list_del(&src_page->lru);
719 		mod_node_page_state(page_pgdat(src_page),
720 				    NR_ISOLATED_ANON + page_is_file_lru(src_page),
721 				    -compound_nr(src_page));
722 		unlock_page(src_page);
723 		free_swap_cache(src_page);
724 		putback_lru_page(src_page);
725 	}
726 }
727 
728 static void khugepaged_alloc_sleep(void)
729 {
730 	DEFINE_WAIT(wait);
731 
732 	add_wait_queue(&khugepaged_wait, &wait);
733 	freezable_schedule_timeout_interruptible(
734 		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
735 	remove_wait_queue(&khugepaged_wait, &wait);
736 }
737 
738 static int khugepaged_node_load[MAX_NUMNODES];
739 
740 static bool khugepaged_scan_abort(int nid)
741 {
742 	int i;
743 
744 	/*
745 	 * If node_reclaim_mode is disabled, then no extra effort is made to
746 	 * allocate memory locally.
747 	 */
748 	if (!node_reclaim_enabled())
749 		return false;
750 
751 	/* If there is a count for this node already, it must be acceptable */
752 	if (khugepaged_node_load[nid])
753 		return false;
754 
755 	for (i = 0; i < MAX_NUMNODES; i++) {
756 		if (!khugepaged_node_load[i])
757 			continue;
758 		if (node_distance(nid, i) > node_reclaim_distance)
759 			return true;
760 	}
761 	return false;
762 }
763 
764 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
765 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
766 {
767 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
768 }
769 
770 #ifdef CONFIG_NUMA
771 static int khugepaged_find_target_node(void)
772 {
773 	static int last_khugepaged_target_node = NUMA_NO_NODE;
774 	int nid, target_node = 0, max_value = 0;
775 
776 	/* find first node with max normal pages hit */
777 	for (nid = 0; nid < MAX_NUMNODES; nid++)
778 		if (khugepaged_node_load[nid] > max_value) {
779 			max_value = khugepaged_node_load[nid];
780 			target_node = nid;
781 		}
782 
783 	/* do some balance if several nodes have the same hit record */
784 	if (target_node <= last_khugepaged_target_node)
785 		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
786 				nid++)
787 			if (max_value == khugepaged_node_load[nid]) {
788 				target_node = nid;
789 				break;
790 			}
791 
792 	last_khugepaged_target_node = target_node;
793 	return target_node;
794 }
795 
796 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
797 {
798 	if (IS_ERR(*hpage)) {
799 		if (!*wait)
800 			return false;
801 
802 		*wait = false;
803 		*hpage = NULL;
804 		khugepaged_alloc_sleep();
805 	} else if (*hpage) {
806 		put_page(*hpage);
807 		*hpage = NULL;
808 	}
809 
810 	return true;
811 }
812 
813 static struct page *
814 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
815 {
816 	VM_BUG_ON_PAGE(*hpage, *hpage);
817 
818 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
819 	if (unlikely(!*hpage)) {
820 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
821 		*hpage = ERR_PTR(-ENOMEM);
822 		return NULL;
823 	}
824 
825 	prep_transhuge_page(*hpage);
826 	count_vm_event(THP_COLLAPSE_ALLOC);
827 	return *hpage;
828 }
829 #else
830 static int khugepaged_find_target_node(void)
831 {
832 	return 0;
833 }
834 
835 static inline struct page *alloc_khugepaged_hugepage(void)
836 {
837 	struct page *page;
838 
839 	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
840 			   HPAGE_PMD_ORDER);
841 	if (page)
842 		prep_transhuge_page(page);
843 	return page;
844 }
845 
846 static struct page *khugepaged_alloc_hugepage(bool *wait)
847 {
848 	struct page *hpage;
849 
850 	do {
851 		hpage = alloc_khugepaged_hugepage();
852 		if (!hpage) {
853 			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
854 			if (!*wait)
855 				return NULL;
856 
857 			*wait = false;
858 			khugepaged_alloc_sleep();
859 		} else
860 			count_vm_event(THP_COLLAPSE_ALLOC);
861 	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
862 
863 	return hpage;
864 }
865 
866 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
867 {
868 	/*
869 	 * If the hpage allocated earlier was briefly exposed in page cache
870 	 * before collapse_file() failed, it is possible that racing lookups
871 	 * have not yet completed, and would then be unpleasantly surprised by
872 	 * finding the hpage reused for the same mapping at a different offset.
873 	 * Just release the previous allocation if there is any danger of that.
874 	 */
875 	if (*hpage && page_count(*hpage) > 1) {
876 		put_page(*hpage);
877 		*hpage = NULL;
878 	}
879 
880 	if (!*hpage)
881 		*hpage = khugepaged_alloc_hugepage(wait);
882 
883 	if (unlikely(!*hpage))
884 		return false;
885 
886 	return true;
887 }
888 
889 static struct page *
890 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
891 {
892 	VM_BUG_ON(!*hpage);
893 
894 	return  *hpage;
895 }
896 #endif
897 
898 /*
899  * If mmap_lock temporarily dropped, revalidate vma
900  * before taking mmap_lock.
901  * Return 0 if succeeds, otherwise return none-zero
902  * value (scan code).
903  */
904 
905 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
906 		struct vm_area_struct **vmap)
907 {
908 	struct vm_area_struct *vma;
909 
910 	if (unlikely(khugepaged_test_exit(mm)))
911 		return SCAN_ANY_PROCESS;
912 
913 	*vmap = vma = find_vma(mm, address);
914 	if (!vma)
915 		return SCAN_VMA_NULL;
916 
917 	if (!transhuge_vma_suitable(vma, address))
918 		return SCAN_ADDRESS_RANGE;
919 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false))
920 		return SCAN_VMA_CHECK;
921 	/*
922 	 * Anon VMA expected, the address may be unmapped then
923 	 * remapped to file after khugepaged reaquired the mmap_lock.
924 	 *
925 	 * hugepage_vma_check may return true for qualified file
926 	 * vmas.
927 	 */
928 	if (!vma->anon_vma || !vma_is_anonymous(vma))
929 		return SCAN_VMA_CHECK;
930 	return 0;
931 }
932 
933 /*
934  * Bring missing pages in from swap, to complete THP collapse.
935  * Only done if khugepaged_scan_pmd believes it is worthwhile.
936  *
937  * Called and returns without pte mapped or spinlocks held.
938  * Note that if false is returned, mmap_lock will be released.
939  */
940 
941 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
942 					struct vm_area_struct *vma,
943 					unsigned long haddr, pmd_t *pmd,
944 					int referenced)
945 {
946 	int swapped_in = 0;
947 	vm_fault_t ret = 0;
948 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
949 
950 	for (address = haddr; address < end; address += PAGE_SIZE) {
951 		struct vm_fault vmf = {
952 			.vma = vma,
953 			.address = address,
954 			.pgoff = linear_page_index(vma, haddr),
955 			.flags = FAULT_FLAG_ALLOW_RETRY,
956 			.pmd = pmd,
957 		};
958 
959 		vmf.pte = pte_offset_map(pmd, address);
960 		vmf.orig_pte = *vmf.pte;
961 		if (!is_swap_pte(vmf.orig_pte)) {
962 			pte_unmap(vmf.pte);
963 			continue;
964 		}
965 		ret = do_swap_page(&vmf);
966 
967 		/*
968 		 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
969 		 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
970 		 * we do not retry here and swap entry will remain in pagetable
971 		 * resulting in later failure.
972 		 */
973 		if (ret & VM_FAULT_RETRY) {
974 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
975 			return false;
976 		}
977 		if (ret & VM_FAULT_ERROR) {
978 			mmap_read_unlock(mm);
979 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
980 			return false;
981 		}
982 		swapped_in++;
983 	}
984 
985 	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
986 	if (swapped_in)
987 		lru_add_drain();
988 
989 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
990 	return true;
991 }
992 
993 static void collapse_huge_page(struct mm_struct *mm,
994 				   unsigned long address,
995 				   struct page **hpage,
996 				   int node, int referenced, int unmapped)
997 {
998 	LIST_HEAD(compound_pagelist);
999 	pmd_t *pmd, _pmd;
1000 	pte_t *pte;
1001 	pgtable_t pgtable;
1002 	struct page *new_page;
1003 	spinlock_t *pmd_ptl, *pte_ptl;
1004 	int isolated = 0, result = 0;
1005 	struct vm_area_struct *vma;
1006 	struct mmu_notifier_range range;
1007 	gfp_t gfp;
1008 
1009 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1010 
1011 	/* Only allocate from the target node */
1012 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1013 
1014 	/*
1015 	 * Before allocating the hugepage, release the mmap_lock read lock.
1016 	 * The allocation can take potentially a long time if it involves
1017 	 * sync compaction, and we do not need to hold the mmap_lock during
1018 	 * that. We will recheck the vma after taking it again in write mode.
1019 	 */
1020 	mmap_read_unlock(mm);
1021 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1022 	if (!new_page) {
1023 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1024 		goto out_nolock;
1025 	}
1026 
1027 	if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
1028 		result = SCAN_CGROUP_CHARGE_FAIL;
1029 		goto out_nolock;
1030 	}
1031 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1032 
1033 	mmap_read_lock(mm);
1034 	result = hugepage_vma_revalidate(mm, address, &vma);
1035 	if (result) {
1036 		mmap_read_unlock(mm);
1037 		goto out_nolock;
1038 	}
1039 
1040 	pmd = mm_find_pmd(mm, address);
1041 	if (!pmd) {
1042 		result = SCAN_PMD_NULL;
1043 		mmap_read_unlock(mm);
1044 		goto out_nolock;
1045 	}
1046 
1047 	/*
1048 	 * __collapse_huge_page_swapin will return with mmap_lock released
1049 	 * when it fails. So we jump out_nolock directly in that case.
1050 	 * Continuing to collapse causes inconsistency.
1051 	 */
1052 	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1053 						     pmd, referenced)) {
1054 		goto out_nolock;
1055 	}
1056 
1057 	mmap_read_unlock(mm);
1058 	/*
1059 	 * Prevent all access to pagetables with the exception of
1060 	 * gup_fast later handled by the ptep_clear_flush and the VM
1061 	 * handled by the anon_vma lock + PG_lock.
1062 	 */
1063 	mmap_write_lock(mm);
1064 	result = hugepage_vma_revalidate(mm, address, &vma);
1065 	if (result)
1066 		goto out_up_write;
1067 	/* check if the pmd is still valid */
1068 	if (mm_find_pmd(mm, address) != pmd)
1069 		goto out_up_write;
1070 
1071 	anon_vma_lock_write(vma->anon_vma);
1072 
1073 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1074 				address, address + HPAGE_PMD_SIZE);
1075 	mmu_notifier_invalidate_range_start(&range);
1076 
1077 	pte = pte_offset_map(pmd, address);
1078 	pte_ptl = pte_lockptr(mm, pmd);
1079 
1080 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1081 	/*
1082 	 * After this gup_fast can't run anymore. This also removes
1083 	 * any huge TLB entry from the CPU so we won't allow
1084 	 * huge and small TLB entries for the same virtual address
1085 	 * to avoid the risk of CPU bugs in that area.
1086 	 */
1087 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1088 	spin_unlock(pmd_ptl);
1089 	mmu_notifier_invalidate_range_end(&range);
1090 
1091 	spin_lock(pte_ptl);
1092 	isolated = __collapse_huge_page_isolate(vma, address, pte,
1093 			&compound_pagelist);
1094 	spin_unlock(pte_ptl);
1095 
1096 	if (unlikely(!isolated)) {
1097 		pte_unmap(pte);
1098 		spin_lock(pmd_ptl);
1099 		BUG_ON(!pmd_none(*pmd));
1100 		/*
1101 		 * We can only use set_pmd_at when establishing
1102 		 * hugepmds and never for establishing regular pmds that
1103 		 * points to regular pagetables. Use pmd_populate for that
1104 		 */
1105 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1106 		spin_unlock(pmd_ptl);
1107 		anon_vma_unlock_write(vma->anon_vma);
1108 		result = SCAN_FAIL;
1109 		goto out_up_write;
1110 	}
1111 
1112 	/*
1113 	 * All pages are isolated and locked so anon_vma rmap
1114 	 * can't run anymore.
1115 	 */
1116 	anon_vma_unlock_write(vma->anon_vma);
1117 
1118 	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1119 			&compound_pagelist);
1120 	pte_unmap(pte);
1121 	/*
1122 	 * spin_lock() below is not the equivalent of smp_wmb(), but
1123 	 * the smp_wmb() inside __SetPageUptodate() can be reused to
1124 	 * avoid the copy_huge_page writes to become visible after
1125 	 * the set_pmd_at() write.
1126 	 */
1127 	__SetPageUptodate(new_page);
1128 	pgtable = pmd_pgtable(_pmd);
1129 
1130 	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1131 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1132 
1133 	spin_lock(pmd_ptl);
1134 	BUG_ON(!pmd_none(*pmd));
1135 	page_add_new_anon_rmap(new_page, vma, address);
1136 	lru_cache_add_inactive_or_unevictable(new_page, vma);
1137 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1138 	set_pmd_at(mm, address, pmd, _pmd);
1139 	update_mmu_cache_pmd(vma, address, pmd);
1140 	spin_unlock(pmd_ptl);
1141 
1142 	*hpage = NULL;
1143 
1144 	khugepaged_pages_collapsed++;
1145 	result = SCAN_SUCCEED;
1146 out_up_write:
1147 	mmap_write_unlock(mm);
1148 out_nolock:
1149 	if (!IS_ERR_OR_NULL(*hpage))
1150 		mem_cgroup_uncharge(page_folio(*hpage));
1151 	trace_mm_collapse_huge_page(mm, isolated, result);
1152 	return;
1153 }
1154 
1155 static int khugepaged_scan_pmd(struct mm_struct *mm,
1156 			       struct vm_area_struct *vma,
1157 			       unsigned long address,
1158 			       struct page **hpage)
1159 {
1160 	pmd_t *pmd;
1161 	pte_t *pte, *_pte;
1162 	int ret = 0, result = 0, referenced = 0;
1163 	int none_or_zero = 0, shared = 0;
1164 	struct page *page = NULL;
1165 	unsigned long _address;
1166 	spinlock_t *ptl;
1167 	int node = NUMA_NO_NODE, unmapped = 0;
1168 	bool writable = false;
1169 
1170 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1171 
1172 	pmd = mm_find_pmd(mm, address);
1173 	if (!pmd) {
1174 		result = SCAN_PMD_NULL;
1175 		goto out;
1176 	}
1177 
1178 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1179 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1180 	for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1181 	     _pte++, _address += PAGE_SIZE) {
1182 		pte_t pteval = *_pte;
1183 		if (is_swap_pte(pteval)) {
1184 			if (++unmapped <= khugepaged_max_ptes_swap) {
1185 				/*
1186 				 * Always be strict with uffd-wp
1187 				 * enabled swap entries.  Please see
1188 				 * comment below for pte_uffd_wp().
1189 				 */
1190 				if (pte_swp_uffd_wp(pteval)) {
1191 					result = SCAN_PTE_UFFD_WP;
1192 					goto out_unmap;
1193 				}
1194 				continue;
1195 			} else {
1196 				result = SCAN_EXCEED_SWAP_PTE;
1197 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1198 				goto out_unmap;
1199 			}
1200 		}
1201 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1202 			if (!userfaultfd_armed(vma) &&
1203 			    ++none_or_zero <= khugepaged_max_ptes_none) {
1204 				continue;
1205 			} else {
1206 				result = SCAN_EXCEED_NONE_PTE;
1207 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1208 				goto out_unmap;
1209 			}
1210 		}
1211 		if (pte_uffd_wp(pteval)) {
1212 			/*
1213 			 * Don't collapse the page if any of the small
1214 			 * PTEs are armed with uffd write protection.
1215 			 * Here we can also mark the new huge pmd as
1216 			 * write protected if any of the small ones is
1217 			 * marked but that could bring unknown
1218 			 * userfault messages that falls outside of
1219 			 * the registered range.  So, just be simple.
1220 			 */
1221 			result = SCAN_PTE_UFFD_WP;
1222 			goto out_unmap;
1223 		}
1224 		if (pte_write(pteval))
1225 			writable = true;
1226 
1227 		page = vm_normal_page(vma, _address, pteval);
1228 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1229 			result = SCAN_PAGE_NULL;
1230 			goto out_unmap;
1231 		}
1232 
1233 		if (page_mapcount(page) > 1 &&
1234 				++shared > khugepaged_max_ptes_shared) {
1235 			result = SCAN_EXCEED_SHARED_PTE;
1236 			count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1237 			goto out_unmap;
1238 		}
1239 
1240 		page = compound_head(page);
1241 
1242 		/*
1243 		 * Record which node the original page is from and save this
1244 		 * information to khugepaged_node_load[].
1245 		 * Khugepaged will allocate hugepage from the node has the max
1246 		 * hit record.
1247 		 */
1248 		node = page_to_nid(page);
1249 		if (khugepaged_scan_abort(node)) {
1250 			result = SCAN_SCAN_ABORT;
1251 			goto out_unmap;
1252 		}
1253 		khugepaged_node_load[node]++;
1254 		if (!PageLRU(page)) {
1255 			result = SCAN_PAGE_LRU;
1256 			goto out_unmap;
1257 		}
1258 		if (PageLocked(page)) {
1259 			result = SCAN_PAGE_LOCK;
1260 			goto out_unmap;
1261 		}
1262 		if (!PageAnon(page)) {
1263 			result = SCAN_PAGE_ANON;
1264 			goto out_unmap;
1265 		}
1266 
1267 		/*
1268 		 * Check if the page has any GUP (or other external) pins.
1269 		 *
1270 		 * Here the check is racy it may see total_mapcount > refcount
1271 		 * in some cases.
1272 		 * For example, one process with one forked child process.
1273 		 * The parent has the PMD split due to MADV_DONTNEED, then
1274 		 * the child is trying unmap the whole PMD, but khugepaged
1275 		 * may be scanning the parent between the child has
1276 		 * PageDoubleMap flag cleared and dec the mapcount.  So
1277 		 * khugepaged may see total_mapcount > refcount.
1278 		 *
1279 		 * But such case is ephemeral we could always retry collapse
1280 		 * later.  However it may report false positive if the page
1281 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
1282 		 * will be done again later the risk seems low.
1283 		 */
1284 		if (!is_refcount_suitable(page)) {
1285 			result = SCAN_PAGE_COUNT;
1286 			goto out_unmap;
1287 		}
1288 		if (pte_young(pteval) ||
1289 		    page_is_young(page) || PageReferenced(page) ||
1290 		    mmu_notifier_test_young(vma->vm_mm, address))
1291 			referenced++;
1292 	}
1293 	if (!writable) {
1294 		result = SCAN_PAGE_RO;
1295 	} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1296 		result = SCAN_LACK_REFERENCED_PAGE;
1297 	} else {
1298 		result = SCAN_SUCCEED;
1299 		ret = 1;
1300 	}
1301 out_unmap:
1302 	pte_unmap_unlock(pte, ptl);
1303 	if (ret) {
1304 		node = khugepaged_find_target_node();
1305 		/* collapse_huge_page will return with the mmap_lock released */
1306 		collapse_huge_page(mm, address, hpage, node,
1307 				referenced, unmapped);
1308 	}
1309 out:
1310 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1311 				     none_or_zero, result, unmapped);
1312 	return ret;
1313 }
1314 
1315 static void collect_mm_slot(struct mm_slot *mm_slot)
1316 {
1317 	struct mm_struct *mm = mm_slot->mm;
1318 
1319 	lockdep_assert_held(&khugepaged_mm_lock);
1320 
1321 	if (khugepaged_test_exit(mm)) {
1322 		/* free mm_slot */
1323 		hash_del(&mm_slot->hash);
1324 		list_del(&mm_slot->mm_node);
1325 
1326 		/*
1327 		 * Not strictly needed because the mm exited already.
1328 		 *
1329 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1330 		 */
1331 
1332 		/* khugepaged_mm_lock actually not necessary for the below */
1333 		free_mm_slot(mm_slot);
1334 		mmdrop(mm);
1335 	}
1336 }
1337 
1338 #ifdef CONFIG_SHMEM
1339 /*
1340  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1341  * khugepaged should try to collapse the page table.
1342  */
1343 static void khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1344 					  unsigned long addr)
1345 {
1346 	struct mm_slot *mm_slot;
1347 
1348 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1349 
1350 	spin_lock(&khugepaged_mm_lock);
1351 	mm_slot = get_mm_slot(mm);
1352 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1353 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1354 	spin_unlock(&khugepaged_mm_lock);
1355 }
1356 
1357 static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1358 				  unsigned long addr, pmd_t *pmdp)
1359 {
1360 	spinlock_t *ptl;
1361 	pmd_t pmd;
1362 
1363 	mmap_assert_write_locked(mm);
1364 	ptl = pmd_lock(vma->vm_mm, pmdp);
1365 	pmd = pmdp_collapse_flush(vma, addr, pmdp);
1366 	spin_unlock(ptl);
1367 	mm_dec_nr_ptes(mm);
1368 	page_table_check_pte_clear_range(mm, addr, pmd);
1369 	pte_free(mm, pmd_pgtable(pmd));
1370 }
1371 
1372 /**
1373  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1374  * address haddr.
1375  *
1376  * @mm: process address space where collapse happens
1377  * @addr: THP collapse address
1378  *
1379  * This function checks whether all the PTEs in the PMD are pointing to the
1380  * right THP. If so, retract the page table so the THP can refault in with
1381  * as pmd-mapped.
1382  */
1383 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1384 {
1385 	unsigned long haddr = addr & HPAGE_PMD_MASK;
1386 	struct vm_area_struct *vma = find_vma(mm, haddr);
1387 	struct page *hpage;
1388 	pte_t *start_pte, *pte;
1389 	pmd_t *pmd;
1390 	spinlock_t *ptl;
1391 	int count = 0;
1392 	int i;
1393 
1394 	if (!vma || !vma->vm_file ||
1395 	    !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1396 		return;
1397 
1398 	/*
1399 	 * This vm_flags may not have VM_HUGEPAGE if the page was not
1400 	 * collapsed by this mm. But we can still collapse if the page is
1401 	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1402 	 * will not fail the vma for missing VM_HUGEPAGE
1403 	 */
1404 	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false, false))
1405 		return;
1406 
1407 	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1408 	if (userfaultfd_wp(vma))
1409 		return;
1410 
1411 	hpage = find_lock_page(vma->vm_file->f_mapping,
1412 			       linear_page_index(vma, haddr));
1413 	if (!hpage)
1414 		return;
1415 
1416 	if (!PageHead(hpage))
1417 		goto drop_hpage;
1418 
1419 	pmd = mm_find_pmd(mm, haddr);
1420 	if (!pmd)
1421 		goto drop_hpage;
1422 
1423 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1424 
1425 	/* step 1: check all mapped PTEs are to the right huge page */
1426 	for (i = 0, addr = haddr, pte = start_pte;
1427 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1428 		struct page *page;
1429 
1430 		/* empty pte, skip */
1431 		if (pte_none(*pte))
1432 			continue;
1433 
1434 		/* page swapped out, abort */
1435 		if (!pte_present(*pte))
1436 			goto abort;
1437 
1438 		page = vm_normal_page(vma, addr, *pte);
1439 		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1440 			page = NULL;
1441 		/*
1442 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1443 		 * page table, but the new page will not be a subpage of hpage.
1444 		 */
1445 		if (hpage + i != page)
1446 			goto abort;
1447 		count++;
1448 	}
1449 
1450 	/* step 2: adjust rmap */
1451 	for (i = 0, addr = haddr, pte = start_pte;
1452 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1453 		struct page *page;
1454 
1455 		if (pte_none(*pte))
1456 			continue;
1457 		page = vm_normal_page(vma, addr, *pte);
1458 		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1459 			goto abort;
1460 		page_remove_rmap(page, vma, false);
1461 	}
1462 
1463 	pte_unmap_unlock(start_pte, ptl);
1464 
1465 	/* step 3: set proper refcount and mm_counters. */
1466 	if (count) {
1467 		page_ref_sub(hpage, count);
1468 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1469 	}
1470 
1471 	/* step 4: collapse pmd */
1472 	collapse_and_free_pmd(mm, vma, haddr, pmd);
1473 drop_hpage:
1474 	unlock_page(hpage);
1475 	put_page(hpage);
1476 	return;
1477 
1478 abort:
1479 	pte_unmap_unlock(start_pte, ptl);
1480 	goto drop_hpage;
1481 }
1482 
1483 static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1484 {
1485 	struct mm_struct *mm = mm_slot->mm;
1486 	int i;
1487 
1488 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1489 		return;
1490 
1491 	if (!mmap_write_trylock(mm))
1492 		return;
1493 
1494 	if (unlikely(khugepaged_test_exit(mm)))
1495 		goto out;
1496 
1497 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1498 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1499 
1500 out:
1501 	mm_slot->nr_pte_mapped_thp = 0;
1502 	mmap_write_unlock(mm);
1503 }
1504 
1505 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1506 {
1507 	struct vm_area_struct *vma;
1508 	struct mm_struct *mm;
1509 	unsigned long addr;
1510 	pmd_t *pmd;
1511 
1512 	i_mmap_lock_write(mapping);
1513 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1514 		/*
1515 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1516 		 * got written to. These VMAs are likely not worth investing
1517 		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1518 		 * later.
1519 		 *
1520 		 * Note that vma->anon_vma check is racy: it can be set up after
1521 		 * the check but before we took mmap_lock by the fault path.
1522 		 * But page lock would prevent establishing any new ptes of the
1523 		 * page, so we are safe.
1524 		 *
1525 		 * An alternative would be drop the check, but check that page
1526 		 * table is clear before calling pmdp_collapse_flush() under
1527 		 * ptl. It has higher chance to recover THP for the VMA, but
1528 		 * has higher cost too.
1529 		 */
1530 		if (vma->anon_vma)
1531 			continue;
1532 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1533 		if (addr & ~HPAGE_PMD_MASK)
1534 			continue;
1535 		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1536 			continue;
1537 		mm = vma->vm_mm;
1538 		pmd = mm_find_pmd(mm, addr);
1539 		if (!pmd)
1540 			continue;
1541 		/*
1542 		 * We need exclusive mmap_lock to retract page table.
1543 		 *
1544 		 * We use trylock due to lock inversion: we need to acquire
1545 		 * mmap_lock while holding page lock. Fault path does it in
1546 		 * reverse order. Trylock is a way to avoid deadlock.
1547 		 */
1548 		if (mmap_write_trylock(mm)) {
1549 			/*
1550 			 * When a vma is registered with uffd-wp, we can't
1551 			 * recycle the pmd pgtable because there can be pte
1552 			 * markers installed.  Skip it only, so the rest mm/vma
1553 			 * can still have the same file mapped hugely, however
1554 			 * it'll always mapped in small page size for uffd-wp
1555 			 * registered ranges.
1556 			 */
1557 			if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma))
1558 				collapse_and_free_pmd(mm, vma, addr, pmd);
1559 			mmap_write_unlock(mm);
1560 		} else {
1561 			/* Try again later */
1562 			khugepaged_add_pte_mapped_thp(mm, addr);
1563 		}
1564 	}
1565 	i_mmap_unlock_write(mapping);
1566 }
1567 
1568 /**
1569  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1570  *
1571  * @mm: process address space where collapse happens
1572  * @file: file that collapse on
1573  * @start: collapse start address
1574  * @hpage: new allocated huge page for collapse
1575  * @node: appointed node the new huge page allocate from
1576  *
1577  * Basic scheme is simple, details are more complex:
1578  *  - allocate and lock a new huge page;
1579  *  - scan page cache replacing old pages with the new one
1580  *    + swap/gup in pages if necessary;
1581  *    + fill in gaps;
1582  *    + keep old pages around in case rollback is required;
1583  *  - if replacing succeeds:
1584  *    + copy data over;
1585  *    + free old pages;
1586  *    + unlock huge page;
1587  *  - if replacing failed;
1588  *    + put all pages back and unfreeze them;
1589  *    + restore gaps in the page cache;
1590  *    + unlock and free huge page;
1591  */
1592 static void collapse_file(struct mm_struct *mm,
1593 		struct file *file, pgoff_t start,
1594 		struct page **hpage, int node)
1595 {
1596 	struct address_space *mapping = file->f_mapping;
1597 	gfp_t gfp;
1598 	struct page *new_page;
1599 	pgoff_t index, end = start + HPAGE_PMD_NR;
1600 	LIST_HEAD(pagelist);
1601 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1602 	int nr_none = 0, result = SCAN_SUCCEED;
1603 	bool is_shmem = shmem_file(file);
1604 	int nr;
1605 
1606 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1607 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1608 
1609 	/* Only allocate from the target node */
1610 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1611 
1612 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1613 	if (!new_page) {
1614 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1615 		goto out;
1616 	}
1617 
1618 	if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
1619 		result = SCAN_CGROUP_CHARGE_FAIL;
1620 		goto out;
1621 	}
1622 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1623 
1624 	/*
1625 	 * Ensure we have slots for all the pages in the range.  This is
1626 	 * almost certainly a no-op because most of the pages must be present
1627 	 */
1628 	do {
1629 		xas_lock_irq(&xas);
1630 		xas_create_range(&xas);
1631 		if (!xas_error(&xas))
1632 			break;
1633 		xas_unlock_irq(&xas);
1634 		if (!xas_nomem(&xas, GFP_KERNEL)) {
1635 			result = SCAN_FAIL;
1636 			goto out;
1637 		}
1638 	} while (1);
1639 
1640 	__SetPageLocked(new_page);
1641 	if (is_shmem)
1642 		__SetPageSwapBacked(new_page);
1643 	new_page->index = start;
1644 	new_page->mapping = mapping;
1645 
1646 	/*
1647 	 * At this point the new_page is locked and not up-to-date.
1648 	 * It's safe to insert it into the page cache, because nobody would
1649 	 * be able to map it or use it in another way until we unlock it.
1650 	 */
1651 
1652 	xas_set(&xas, start);
1653 	for (index = start; index < end; index++) {
1654 		struct page *page = xas_next(&xas);
1655 
1656 		VM_BUG_ON(index != xas.xa_index);
1657 		if (is_shmem) {
1658 			if (!page) {
1659 				/*
1660 				 * Stop if extent has been truncated or
1661 				 * hole-punched, and is now completely
1662 				 * empty.
1663 				 */
1664 				if (index == start) {
1665 					if (!xas_next_entry(&xas, end - 1)) {
1666 						result = SCAN_TRUNCATED;
1667 						goto xa_locked;
1668 					}
1669 					xas_set(&xas, index);
1670 				}
1671 				if (!shmem_charge(mapping->host, 1)) {
1672 					result = SCAN_FAIL;
1673 					goto xa_locked;
1674 				}
1675 				xas_store(&xas, new_page);
1676 				nr_none++;
1677 				continue;
1678 			}
1679 
1680 			if (xa_is_value(page) || !PageUptodate(page)) {
1681 				xas_unlock_irq(&xas);
1682 				/* swap in or instantiate fallocated page */
1683 				if (shmem_getpage(mapping->host, index, &page,
1684 						  SGP_NOALLOC)) {
1685 					result = SCAN_FAIL;
1686 					goto xa_unlocked;
1687 				}
1688 			} else if (trylock_page(page)) {
1689 				get_page(page);
1690 				xas_unlock_irq(&xas);
1691 			} else {
1692 				result = SCAN_PAGE_LOCK;
1693 				goto xa_locked;
1694 			}
1695 		} else {	/* !is_shmem */
1696 			if (!page || xa_is_value(page)) {
1697 				xas_unlock_irq(&xas);
1698 				page_cache_sync_readahead(mapping, &file->f_ra,
1699 							  file, index,
1700 							  end - index);
1701 				/* drain pagevecs to help isolate_lru_page() */
1702 				lru_add_drain();
1703 				page = find_lock_page(mapping, index);
1704 				if (unlikely(page == NULL)) {
1705 					result = SCAN_FAIL;
1706 					goto xa_unlocked;
1707 				}
1708 			} else if (PageDirty(page)) {
1709 				/*
1710 				 * khugepaged only works on read-only fd,
1711 				 * so this page is dirty because it hasn't
1712 				 * been flushed since first write. There
1713 				 * won't be new dirty pages.
1714 				 *
1715 				 * Trigger async flush here and hope the
1716 				 * writeback is done when khugepaged
1717 				 * revisits this page.
1718 				 *
1719 				 * This is a one-off situation. We are not
1720 				 * forcing writeback in loop.
1721 				 */
1722 				xas_unlock_irq(&xas);
1723 				filemap_flush(mapping);
1724 				result = SCAN_FAIL;
1725 				goto xa_unlocked;
1726 			} else if (PageWriteback(page)) {
1727 				xas_unlock_irq(&xas);
1728 				result = SCAN_FAIL;
1729 				goto xa_unlocked;
1730 			} else if (trylock_page(page)) {
1731 				get_page(page);
1732 				xas_unlock_irq(&xas);
1733 			} else {
1734 				result = SCAN_PAGE_LOCK;
1735 				goto xa_locked;
1736 			}
1737 		}
1738 
1739 		/*
1740 		 * The page must be locked, so we can drop the i_pages lock
1741 		 * without racing with truncate.
1742 		 */
1743 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1744 
1745 		/* make sure the page is up to date */
1746 		if (unlikely(!PageUptodate(page))) {
1747 			result = SCAN_FAIL;
1748 			goto out_unlock;
1749 		}
1750 
1751 		/*
1752 		 * If file was truncated then extended, or hole-punched, before
1753 		 * we locked the first page, then a THP might be there already.
1754 		 */
1755 		if (PageTransCompound(page)) {
1756 			result = SCAN_PAGE_COMPOUND;
1757 			goto out_unlock;
1758 		}
1759 
1760 		if (page_mapping(page) != mapping) {
1761 			result = SCAN_TRUNCATED;
1762 			goto out_unlock;
1763 		}
1764 
1765 		if (!is_shmem && (PageDirty(page) ||
1766 				  PageWriteback(page))) {
1767 			/*
1768 			 * khugepaged only works on read-only fd, so this
1769 			 * page is dirty because it hasn't been flushed
1770 			 * since first write.
1771 			 */
1772 			result = SCAN_FAIL;
1773 			goto out_unlock;
1774 		}
1775 
1776 		if (isolate_lru_page(page)) {
1777 			result = SCAN_DEL_PAGE_LRU;
1778 			goto out_unlock;
1779 		}
1780 
1781 		if (page_has_private(page) &&
1782 		    !try_to_release_page(page, GFP_KERNEL)) {
1783 			result = SCAN_PAGE_HAS_PRIVATE;
1784 			putback_lru_page(page);
1785 			goto out_unlock;
1786 		}
1787 
1788 		if (page_mapped(page))
1789 			try_to_unmap(page_folio(page),
1790 					TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1791 
1792 		xas_lock_irq(&xas);
1793 		xas_set(&xas, index);
1794 
1795 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1796 
1797 		/*
1798 		 * The page is expected to have page_count() == 3:
1799 		 *  - we hold a pin on it;
1800 		 *  - one reference from page cache;
1801 		 *  - one from isolate_lru_page;
1802 		 */
1803 		if (!page_ref_freeze(page, 3)) {
1804 			result = SCAN_PAGE_COUNT;
1805 			xas_unlock_irq(&xas);
1806 			putback_lru_page(page);
1807 			goto out_unlock;
1808 		}
1809 
1810 		/*
1811 		 * Add the page to the list to be able to undo the collapse if
1812 		 * something go wrong.
1813 		 */
1814 		list_add_tail(&page->lru, &pagelist);
1815 
1816 		/* Finally, replace with the new page. */
1817 		xas_store(&xas, new_page);
1818 		continue;
1819 out_unlock:
1820 		unlock_page(page);
1821 		put_page(page);
1822 		goto xa_unlocked;
1823 	}
1824 	nr = thp_nr_pages(new_page);
1825 
1826 	if (is_shmem)
1827 		__mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
1828 	else {
1829 		__mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
1830 		filemap_nr_thps_inc(mapping);
1831 		/*
1832 		 * Paired with smp_mb() in do_dentry_open() to ensure
1833 		 * i_writecount is up to date and the update to nr_thps is
1834 		 * visible. Ensures the page cache will be truncated if the
1835 		 * file is opened writable.
1836 		 */
1837 		smp_mb();
1838 		if (inode_is_open_for_write(mapping->host)) {
1839 			result = SCAN_FAIL;
1840 			__mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1841 			filemap_nr_thps_dec(mapping);
1842 			goto xa_locked;
1843 		}
1844 	}
1845 
1846 	if (nr_none) {
1847 		__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1848 		/* nr_none is always 0 for non-shmem. */
1849 		__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1850 	}
1851 
1852 	/* Join all the small entries into a single multi-index entry */
1853 	xas_set_order(&xas, start, HPAGE_PMD_ORDER);
1854 	xas_store(&xas, new_page);
1855 xa_locked:
1856 	xas_unlock_irq(&xas);
1857 xa_unlocked:
1858 
1859 	/*
1860 	 * If collapse is successful, flush must be done now before copying.
1861 	 * If collapse is unsuccessful, does flush actually need to be done?
1862 	 * Do it anyway, to clear the state.
1863 	 */
1864 	try_to_unmap_flush();
1865 
1866 	if (result == SCAN_SUCCEED) {
1867 		struct page *page, *tmp;
1868 
1869 		/*
1870 		 * Replacing old pages with new one has succeeded, now we
1871 		 * need to copy the content and free the old pages.
1872 		 */
1873 		index = start;
1874 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1875 			while (index < page->index) {
1876 				clear_highpage(new_page + (index % HPAGE_PMD_NR));
1877 				index++;
1878 			}
1879 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1880 					page);
1881 			list_del(&page->lru);
1882 			page->mapping = NULL;
1883 			page_ref_unfreeze(page, 1);
1884 			ClearPageActive(page);
1885 			ClearPageUnevictable(page);
1886 			unlock_page(page);
1887 			put_page(page);
1888 			index++;
1889 		}
1890 		while (index < end) {
1891 			clear_highpage(new_page + (index % HPAGE_PMD_NR));
1892 			index++;
1893 		}
1894 
1895 		SetPageUptodate(new_page);
1896 		page_ref_add(new_page, HPAGE_PMD_NR - 1);
1897 		if (is_shmem)
1898 			set_page_dirty(new_page);
1899 		lru_cache_add(new_page);
1900 
1901 		/*
1902 		 * Remove pte page tables, so we can re-fault the page as huge.
1903 		 */
1904 		retract_page_tables(mapping, start);
1905 		*hpage = NULL;
1906 
1907 		khugepaged_pages_collapsed++;
1908 	} else {
1909 		struct page *page;
1910 
1911 		/* Something went wrong: roll back page cache changes */
1912 		xas_lock_irq(&xas);
1913 		if (nr_none) {
1914 			mapping->nrpages -= nr_none;
1915 			shmem_uncharge(mapping->host, nr_none);
1916 		}
1917 
1918 		xas_set(&xas, start);
1919 		xas_for_each(&xas, page, end - 1) {
1920 			page = list_first_entry_or_null(&pagelist,
1921 					struct page, lru);
1922 			if (!page || xas.xa_index < page->index) {
1923 				if (!nr_none)
1924 					break;
1925 				nr_none--;
1926 				/* Put holes back where they were */
1927 				xas_store(&xas, NULL);
1928 				continue;
1929 			}
1930 
1931 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1932 
1933 			/* Unfreeze the page. */
1934 			list_del(&page->lru);
1935 			page_ref_unfreeze(page, 2);
1936 			xas_store(&xas, page);
1937 			xas_pause(&xas);
1938 			xas_unlock_irq(&xas);
1939 			unlock_page(page);
1940 			putback_lru_page(page);
1941 			xas_lock_irq(&xas);
1942 		}
1943 		VM_BUG_ON(nr_none);
1944 		xas_unlock_irq(&xas);
1945 
1946 		new_page->mapping = NULL;
1947 	}
1948 
1949 	unlock_page(new_page);
1950 out:
1951 	VM_BUG_ON(!list_empty(&pagelist));
1952 	if (!IS_ERR_OR_NULL(*hpage))
1953 		mem_cgroup_uncharge(page_folio(*hpage));
1954 	/* TODO: tracepoints */
1955 }
1956 
1957 static void khugepaged_scan_file(struct mm_struct *mm,
1958 		struct file *file, pgoff_t start, struct page **hpage)
1959 {
1960 	struct page *page = NULL;
1961 	struct address_space *mapping = file->f_mapping;
1962 	XA_STATE(xas, &mapping->i_pages, start);
1963 	int present, swap;
1964 	int node = NUMA_NO_NODE;
1965 	int result = SCAN_SUCCEED;
1966 
1967 	present = 0;
1968 	swap = 0;
1969 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1970 	rcu_read_lock();
1971 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1972 		if (xas_retry(&xas, page))
1973 			continue;
1974 
1975 		if (xa_is_value(page)) {
1976 			if (++swap > khugepaged_max_ptes_swap) {
1977 				result = SCAN_EXCEED_SWAP_PTE;
1978 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1979 				break;
1980 			}
1981 			continue;
1982 		}
1983 
1984 		/*
1985 		 * XXX: khugepaged should compact smaller compound pages
1986 		 * into a PMD sized page
1987 		 */
1988 		if (PageTransCompound(page)) {
1989 			result = SCAN_PAGE_COMPOUND;
1990 			break;
1991 		}
1992 
1993 		node = page_to_nid(page);
1994 		if (khugepaged_scan_abort(node)) {
1995 			result = SCAN_SCAN_ABORT;
1996 			break;
1997 		}
1998 		khugepaged_node_load[node]++;
1999 
2000 		if (!PageLRU(page)) {
2001 			result = SCAN_PAGE_LRU;
2002 			break;
2003 		}
2004 
2005 		if (page_count(page) !=
2006 		    1 + page_mapcount(page) + page_has_private(page)) {
2007 			result = SCAN_PAGE_COUNT;
2008 			break;
2009 		}
2010 
2011 		/*
2012 		 * We probably should check if the page is referenced here, but
2013 		 * nobody would transfer pte_young() to PageReferenced() for us.
2014 		 * And rmap walk here is just too costly...
2015 		 */
2016 
2017 		present++;
2018 
2019 		if (need_resched()) {
2020 			xas_pause(&xas);
2021 			cond_resched_rcu();
2022 		}
2023 	}
2024 	rcu_read_unlock();
2025 
2026 	if (result == SCAN_SUCCEED) {
2027 		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2028 			result = SCAN_EXCEED_NONE_PTE;
2029 			count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2030 		} else {
2031 			node = khugepaged_find_target_node();
2032 			collapse_file(mm, file, start, hpage, node);
2033 		}
2034 	}
2035 
2036 	/* TODO: tracepoints */
2037 }
2038 #else
2039 static void khugepaged_scan_file(struct mm_struct *mm,
2040 		struct file *file, pgoff_t start, struct page **hpage)
2041 {
2042 	BUILD_BUG();
2043 }
2044 
2045 static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2046 {
2047 }
2048 #endif
2049 
2050 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2051 					    struct page **hpage)
2052 	__releases(&khugepaged_mm_lock)
2053 	__acquires(&khugepaged_mm_lock)
2054 {
2055 	struct mm_slot *mm_slot;
2056 	struct mm_struct *mm;
2057 	struct vm_area_struct *vma;
2058 	int progress = 0;
2059 
2060 	VM_BUG_ON(!pages);
2061 	lockdep_assert_held(&khugepaged_mm_lock);
2062 
2063 	if (khugepaged_scan.mm_slot)
2064 		mm_slot = khugepaged_scan.mm_slot;
2065 	else {
2066 		mm_slot = list_entry(khugepaged_scan.mm_head.next,
2067 				     struct mm_slot, mm_node);
2068 		khugepaged_scan.address = 0;
2069 		khugepaged_scan.mm_slot = mm_slot;
2070 	}
2071 	spin_unlock(&khugepaged_mm_lock);
2072 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2073 
2074 	mm = mm_slot->mm;
2075 	/*
2076 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
2077 	 * the next mm on the list.
2078 	 */
2079 	vma = NULL;
2080 	if (unlikely(!mmap_read_trylock(mm)))
2081 		goto breakouterloop_mmap_lock;
2082 	if (likely(!khugepaged_test_exit(mm)))
2083 		vma = find_vma(mm, khugepaged_scan.address);
2084 
2085 	progress++;
2086 	for (; vma; vma = vma->vm_next) {
2087 		unsigned long hstart, hend;
2088 
2089 		cond_resched();
2090 		if (unlikely(khugepaged_test_exit(mm))) {
2091 			progress++;
2092 			break;
2093 		}
2094 		if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) {
2095 skip:
2096 			progress++;
2097 			continue;
2098 		}
2099 		hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2100 		hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2101 		if (khugepaged_scan.address > hend)
2102 			goto skip;
2103 		if (khugepaged_scan.address < hstart)
2104 			khugepaged_scan.address = hstart;
2105 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2106 
2107 		while (khugepaged_scan.address < hend) {
2108 			int ret;
2109 			cond_resched();
2110 			if (unlikely(khugepaged_test_exit(mm)))
2111 				goto breakouterloop;
2112 
2113 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2114 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2115 				  hend);
2116 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2117 				struct file *file = get_file(vma->vm_file);
2118 				pgoff_t pgoff = linear_page_index(vma,
2119 						khugepaged_scan.address);
2120 
2121 				mmap_read_unlock(mm);
2122 				ret = 1;
2123 				khugepaged_scan_file(mm, file, pgoff, hpage);
2124 				fput(file);
2125 			} else {
2126 				ret = khugepaged_scan_pmd(mm, vma,
2127 						khugepaged_scan.address,
2128 						hpage);
2129 			}
2130 			/* move to next address */
2131 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2132 			progress += HPAGE_PMD_NR;
2133 			if (ret)
2134 				/* we released mmap_lock so break loop */
2135 				goto breakouterloop_mmap_lock;
2136 			if (progress >= pages)
2137 				goto breakouterloop;
2138 		}
2139 	}
2140 breakouterloop:
2141 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2142 breakouterloop_mmap_lock:
2143 
2144 	spin_lock(&khugepaged_mm_lock);
2145 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2146 	/*
2147 	 * Release the current mm_slot if this mm is about to die, or
2148 	 * if we scanned all vmas of this mm.
2149 	 */
2150 	if (khugepaged_test_exit(mm) || !vma) {
2151 		/*
2152 		 * Make sure that if mm_users is reaching zero while
2153 		 * khugepaged runs here, khugepaged_exit will find
2154 		 * mm_slot not pointing to the exiting mm.
2155 		 */
2156 		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2157 			khugepaged_scan.mm_slot = list_entry(
2158 				mm_slot->mm_node.next,
2159 				struct mm_slot, mm_node);
2160 			khugepaged_scan.address = 0;
2161 		} else {
2162 			khugepaged_scan.mm_slot = NULL;
2163 			khugepaged_full_scans++;
2164 		}
2165 
2166 		collect_mm_slot(mm_slot);
2167 	}
2168 
2169 	return progress;
2170 }
2171 
2172 static int khugepaged_has_work(void)
2173 {
2174 	return !list_empty(&khugepaged_scan.mm_head) &&
2175 		khugepaged_enabled();
2176 }
2177 
2178 static int khugepaged_wait_event(void)
2179 {
2180 	return !list_empty(&khugepaged_scan.mm_head) ||
2181 		kthread_should_stop();
2182 }
2183 
2184 static void khugepaged_do_scan(void)
2185 {
2186 	struct page *hpage = NULL;
2187 	unsigned int progress = 0, pass_through_head = 0;
2188 	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2189 	bool wait = true;
2190 
2191 	lru_add_drain_all();
2192 
2193 	while (progress < pages) {
2194 		if (!khugepaged_prealloc_page(&hpage, &wait))
2195 			break;
2196 
2197 		cond_resched();
2198 
2199 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2200 			break;
2201 
2202 		spin_lock(&khugepaged_mm_lock);
2203 		if (!khugepaged_scan.mm_slot)
2204 			pass_through_head++;
2205 		if (khugepaged_has_work() &&
2206 		    pass_through_head < 2)
2207 			progress += khugepaged_scan_mm_slot(pages - progress,
2208 							    &hpage);
2209 		else
2210 			progress = pages;
2211 		spin_unlock(&khugepaged_mm_lock);
2212 	}
2213 
2214 	if (!IS_ERR_OR_NULL(hpage))
2215 		put_page(hpage);
2216 }
2217 
2218 static bool khugepaged_should_wakeup(void)
2219 {
2220 	return kthread_should_stop() ||
2221 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2222 }
2223 
2224 static void khugepaged_wait_work(void)
2225 {
2226 	if (khugepaged_has_work()) {
2227 		const unsigned long scan_sleep_jiffies =
2228 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2229 
2230 		if (!scan_sleep_jiffies)
2231 			return;
2232 
2233 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2234 		wait_event_freezable_timeout(khugepaged_wait,
2235 					     khugepaged_should_wakeup(),
2236 					     scan_sleep_jiffies);
2237 		return;
2238 	}
2239 
2240 	if (khugepaged_enabled())
2241 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2242 }
2243 
2244 static int khugepaged(void *none)
2245 {
2246 	struct mm_slot *mm_slot;
2247 
2248 	set_freezable();
2249 	set_user_nice(current, MAX_NICE);
2250 
2251 	while (!kthread_should_stop()) {
2252 		khugepaged_do_scan();
2253 		khugepaged_wait_work();
2254 	}
2255 
2256 	spin_lock(&khugepaged_mm_lock);
2257 	mm_slot = khugepaged_scan.mm_slot;
2258 	khugepaged_scan.mm_slot = NULL;
2259 	if (mm_slot)
2260 		collect_mm_slot(mm_slot);
2261 	spin_unlock(&khugepaged_mm_lock);
2262 	return 0;
2263 }
2264 
2265 static void set_recommended_min_free_kbytes(void)
2266 {
2267 	struct zone *zone;
2268 	int nr_zones = 0;
2269 	unsigned long recommended_min;
2270 
2271 	if (!khugepaged_enabled()) {
2272 		calculate_min_free_kbytes();
2273 		goto update_wmarks;
2274 	}
2275 
2276 	for_each_populated_zone(zone) {
2277 		/*
2278 		 * We don't need to worry about fragmentation of
2279 		 * ZONE_MOVABLE since it only has movable pages.
2280 		 */
2281 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2282 			continue;
2283 
2284 		nr_zones++;
2285 	}
2286 
2287 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2288 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2289 
2290 	/*
2291 	 * Make sure that on average at least two pageblocks are almost free
2292 	 * of another type, one for a migratetype to fall back to and a
2293 	 * second to avoid subsequent fallbacks of other types There are 3
2294 	 * MIGRATE_TYPES we care about.
2295 	 */
2296 	recommended_min += pageblock_nr_pages * nr_zones *
2297 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2298 
2299 	/* don't ever allow to reserve more than 5% of the lowmem */
2300 	recommended_min = min(recommended_min,
2301 			      (unsigned long) nr_free_buffer_pages() / 20);
2302 	recommended_min <<= (PAGE_SHIFT-10);
2303 
2304 	if (recommended_min > min_free_kbytes) {
2305 		if (user_min_free_kbytes >= 0)
2306 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2307 				min_free_kbytes, recommended_min);
2308 
2309 		min_free_kbytes = recommended_min;
2310 	}
2311 
2312 update_wmarks:
2313 	setup_per_zone_wmarks();
2314 }
2315 
2316 int start_stop_khugepaged(void)
2317 {
2318 	int err = 0;
2319 
2320 	mutex_lock(&khugepaged_mutex);
2321 	if (khugepaged_enabled()) {
2322 		if (!khugepaged_thread)
2323 			khugepaged_thread = kthread_run(khugepaged, NULL,
2324 							"khugepaged");
2325 		if (IS_ERR(khugepaged_thread)) {
2326 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2327 			err = PTR_ERR(khugepaged_thread);
2328 			khugepaged_thread = NULL;
2329 			goto fail;
2330 		}
2331 
2332 		if (!list_empty(&khugepaged_scan.mm_head))
2333 			wake_up_interruptible(&khugepaged_wait);
2334 	} else if (khugepaged_thread) {
2335 		kthread_stop(khugepaged_thread);
2336 		khugepaged_thread = NULL;
2337 	}
2338 	set_recommended_min_free_kbytes();
2339 fail:
2340 	mutex_unlock(&khugepaged_mutex);
2341 	return err;
2342 }
2343 
2344 void khugepaged_min_free_kbytes_update(void)
2345 {
2346 	mutex_lock(&khugepaged_mutex);
2347 	if (khugepaged_enabled() && khugepaged_thread)
2348 		set_recommended_min_free_kbytes();
2349 	mutex_unlock(&khugepaged_mutex);
2350 }
2351