xref: /openbmc/linux/mm/khugepaged.c (revision 1043173eb5eb351a1dba11cca12705075fe74a9e)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/page_table_check.h>
20 #include <linux/swapops.h>
21 #include <linux/shmem_fs.h>
22 #include <linux/ksm.h>
23 
24 #include <asm/tlb.h>
25 #include <asm/pgalloc.h>
26 #include "internal.h"
27 #include "mm_slot.h"
28 
29 enum scan_result {
30 	SCAN_FAIL,
31 	SCAN_SUCCEED,
32 	SCAN_PMD_NULL,
33 	SCAN_PMD_NONE,
34 	SCAN_PMD_MAPPED,
35 	SCAN_EXCEED_NONE_PTE,
36 	SCAN_EXCEED_SWAP_PTE,
37 	SCAN_EXCEED_SHARED_PTE,
38 	SCAN_PTE_NON_PRESENT,
39 	SCAN_PTE_UFFD_WP,
40 	SCAN_PTE_MAPPED_HUGEPAGE,
41 	SCAN_PAGE_RO,
42 	SCAN_LACK_REFERENCED_PAGE,
43 	SCAN_PAGE_NULL,
44 	SCAN_SCAN_ABORT,
45 	SCAN_PAGE_COUNT,
46 	SCAN_PAGE_LRU,
47 	SCAN_PAGE_LOCK,
48 	SCAN_PAGE_ANON,
49 	SCAN_PAGE_COMPOUND,
50 	SCAN_ANY_PROCESS,
51 	SCAN_VMA_NULL,
52 	SCAN_VMA_CHECK,
53 	SCAN_ADDRESS_RANGE,
54 	SCAN_DEL_PAGE_LRU,
55 	SCAN_ALLOC_HUGE_PAGE_FAIL,
56 	SCAN_CGROUP_CHARGE_FAIL,
57 	SCAN_TRUNCATED,
58 	SCAN_PAGE_HAS_PRIVATE,
59 	SCAN_STORE_FAILED,
60 	SCAN_COPY_MC,
61 	SCAN_PAGE_FILLED,
62 };
63 
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/huge_memory.h>
66 
67 static struct task_struct *khugepaged_thread __read_mostly;
68 static DEFINE_MUTEX(khugepaged_mutex);
69 
70 /* default scan 8*512 pte (or vmas) every 30 second */
71 static unsigned int khugepaged_pages_to_scan __read_mostly;
72 static unsigned int khugepaged_pages_collapsed;
73 static unsigned int khugepaged_full_scans;
74 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
75 /* during fragmentation poll the hugepage allocator once every minute */
76 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
77 static unsigned long khugepaged_sleep_expire;
78 static DEFINE_SPINLOCK(khugepaged_mm_lock);
79 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
80 /*
81  * default collapse hugepages if there is at least one pte mapped like
82  * it would have happened if the vma was large enough during page
83  * fault.
84  *
85  * Note that these are only respected if collapse was initiated by khugepaged.
86  */
87 static unsigned int khugepaged_max_ptes_none __read_mostly;
88 static unsigned int khugepaged_max_ptes_swap __read_mostly;
89 static unsigned int khugepaged_max_ptes_shared __read_mostly;
90 
91 #define MM_SLOTS_HASH_BITS 10
92 static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
93 
94 static struct kmem_cache *mm_slot_cache __read_mostly;
95 
96 #define MAX_PTE_MAPPED_THP 8
97 
98 struct collapse_control {
99 	bool is_khugepaged;
100 
101 	/* Num pages scanned per node */
102 	u32 node_load[MAX_NUMNODES];
103 
104 	/* nodemask for allocation fallback */
105 	nodemask_t alloc_nmask;
106 };
107 
108 /**
109  * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
110  * @slot: hash lookup from mm to mm_slot
111  * @nr_pte_mapped_thp: number of pte mapped THP
112  * @pte_mapped_thp: address array corresponding pte mapped THP
113  */
114 struct khugepaged_mm_slot {
115 	struct mm_slot slot;
116 
117 	/* pte-mapped THP in this mm */
118 	int nr_pte_mapped_thp;
119 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
120 };
121 
122 /**
123  * struct khugepaged_scan - cursor for scanning
124  * @mm_head: the head of the mm list to scan
125  * @mm_slot: the current mm_slot we are scanning
126  * @address: the next address inside that to be scanned
127  *
128  * There is only the one khugepaged_scan instance of this cursor structure.
129  */
130 struct khugepaged_scan {
131 	struct list_head mm_head;
132 	struct khugepaged_mm_slot *mm_slot;
133 	unsigned long address;
134 };
135 
136 static struct khugepaged_scan khugepaged_scan = {
137 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
138 };
139 
140 #ifdef CONFIG_SYSFS
141 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
142 					 struct kobj_attribute *attr,
143 					 char *buf)
144 {
145 	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
146 }
147 
148 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
149 					  struct kobj_attribute *attr,
150 					  const char *buf, size_t count)
151 {
152 	unsigned int msecs;
153 	int err;
154 
155 	err = kstrtouint(buf, 10, &msecs);
156 	if (err)
157 		return -EINVAL;
158 
159 	khugepaged_scan_sleep_millisecs = msecs;
160 	khugepaged_sleep_expire = 0;
161 	wake_up_interruptible(&khugepaged_wait);
162 
163 	return count;
164 }
165 static struct kobj_attribute scan_sleep_millisecs_attr =
166 	__ATTR_RW(scan_sleep_millisecs);
167 
168 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
169 					  struct kobj_attribute *attr,
170 					  char *buf)
171 {
172 	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
173 }
174 
175 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
176 					   struct kobj_attribute *attr,
177 					   const char *buf, size_t count)
178 {
179 	unsigned int msecs;
180 	int err;
181 
182 	err = kstrtouint(buf, 10, &msecs);
183 	if (err)
184 		return -EINVAL;
185 
186 	khugepaged_alloc_sleep_millisecs = msecs;
187 	khugepaged_sleep_expire = 0;
188 	wake_up_interruptible(&khugepaged_wait);
189 
190 	return count;
191 }
192 static struct kobj_attribute alloc_sleep_millisecs_attr =
193 	__ATTR_RW(alloc_sleep_millisecs);
194 
195 static ssize_t pages_to_scan_show(struct kobject *kobj,
196 				  struct kobj_attribute *attr,
197 				  char *buf)
198 {
199 	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
200 }
201 static ssize_t pages_to_scan_store(struct kobject *kobj,
202 				   struct kobj_attribute *attr,
203 				   const char *buf, size_t count)
204 {
205 	unsigned int pages;
206 	int err;
207 
208 	err = kstrtouint(buf, 10, &pages);
209 	if (err || !pages)
210 		return -EINVAL;
211 
212 	khugepaged_pages_to_scan = pages;
213 
214 	return count;
215 }
216 static struct kobj_attribute pages_to_scan_attr =
217 	__ATTR_RW(pages_to_scan);
218 
219 static ssize_t pages_collapsed_show(struct kobject *kobj,
220 				    struct kobj_attribute *attr,
221 				    char *buf)
222 {
223 	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
224 }
225 static struct kobj_attribute pages_collapsed_attr =
226 	__ATTR_RO(pages_collapsed);
227 
228 static ssize_t full_scans_show(struct kobject *kobj,
229 			       struct kobj_attribute *attr,
230 			       char *buf)
231 {
232 	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
233 }
234 static struct kobj_attribute full_scans_attr =
235 	__ATTR_RO(full_scans);
236 
237 static ssize_t defrag_show(struct kobject *kobj,
238 			   struct kobj_attribute *attr, char *buf)
239 {
240 	return single_hugepage_flag_show(kobj, attr, buf,
241 					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
242 }
243 static ssize_t defrag_store(struct kobject *kobj,
244 			    struct kobj_attribute *attr,
245 			    const char *buf, size_t count)
246 {
247 	return single_hugepage_flag_store(kobj, attr, buf, count,
248 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
249 }
250 static struct kobj_attribute khugepaged_defrag_attr =
251 	__ATTR_RW(defrag);
252 
253 /*
254  * max_ptes_none controls if khugepaged should collapse hugepages over
255  * any unmapped ptes in turn potentially increasing the memory
256  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
257  * reduce the available free memory in the system as it
258  * runs. Increasing max_ptes_none will instead potentially reduce the
259  * free memory in the system during the khugepaged scan.
260  */
261 static ssize_t max_ptes_none_show(struct kobject *kobj,
262 				  struct kobj_attribute *attr,
263 				  char *buf)
264 {
265 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
266 }
267 static ssize_t max_ptes_none_store(struct kobject *kobj,
268 				   struct kobj_attribute *attr,
269 				   const char *buf, size_t count)
270 {
271 	int err;
272 	unsigned long max_ptes_none;
273 
274 	err = kstrtoul(buf, 10, &max_ptes_none);
275 	if (err || max_ptes_none > HPAGE_PMD_NR - 1)
276 		return -EINVAL;
277 
278 	khugepaged_max_ptes_none = max_ptes_none;
279 
280 	return count;
281 }
282 static struct kobj_attribute khugepaged_max_ptes_none_attr =
283 	__ATTR_RW(max_ptes_none);
284 
285 static ssize_t max_ptes_swap_show(struct kobject *kobj,
286 				  struct kobj_attribute *attr,
287 				  char *buf)
288 {
289 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
290 }
291 
292 static ssize_t max_ptes_swap_store(struct kobject *kobj,
293 				   struct kobj_attribute *attr,
294 				   const char *buf, size_t count)
295 {
296 	int err;
297 	unsigned long max_ptes_swap;
298 
299 	err  = kstrtoul(buf, 10, &max_ptes_swap);
300 	if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
301 		return -EINVAL;
302 
303 	khugepaged_max_ptes_swap = max_ptes_swap;
304 
305 	return count;
306 }
307 
308 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
309 	__ATTR_RW(max_ptes_swap);
310 
311 static ssize_t max_ptes_shared_show(struct kobject *kobj,
312 				    struct kobj_attribute *attr,
313 				    char *buf)
314 {
315 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
316 }
317 
318 static ssize_t max_ptes_shared_store(struct kobject *kobj,
319 				     struct kobj_attribute *attr,
320 				     const char *buf, size_t count)
321 {
322 	int err;
323 	unsigned long max_ptes_shared;
324 
325 	err  = kstrtoul(buf, 10, &max_ptes_shared);
326 	if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
327 		return -EINVAL;
328 
329 	khugepaged_max_ptes_shared = max_ptes_shared;
330 
331 	return count;
332 }
333 
334 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
335 	__ATTR_RW(max_ptes_shared);
336 
337 static struct attribute *khugepaged_attr[] = {
338 	&khugepaged_defrag_attr.attr,
339 	&khugepaged_max_ptes_none_attr.attr,
340 	&khugepaged_max_ptes_swap_attr.attr,
341 	&khugepaged_max_ptes_shared_attr.attr,
342 	&pages_to_scan_attr.attr,
343 	&pages_collapsed_attr.attr,
344 	&full_scans_attr.attr,
345 	&scan_sleep_millisecs_attr.attr,
346 	&alloc_sleep_millisecs_attr.attr,
347 	NULL,
348 };
349 
350 struct attribute_group khugepaged_attr_group = {
351 	.attrs = khugepaged_attr,
352 	.name = "khugepaged",
353 };
354 #endif /* CONFIG_SYSFS */
355 
356 int hugepage_madvise(struct vm_area_struct *vma,
357 		     unsigned long *vm_flags, int advice)
358 {
359 	switch (advice) {
360 	case MADV_HUGEPAGE:
361 #ifdef CONFIG_S390
362 		/*
363 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
364 		 * can't handle this properly after s390_enable_sie, so we simply
365 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
366 		 */
367 		if (mm_has_pgste(vma->vm_mm))
368 			return 0;
369 #endif
370 		*vm_flags &= ~VM_NOHUGEPAGE;
371 		*vm_flags |= VM_HUGEPAGE;
372 		/*
373 		 * If the vma become good for khugepaged to scan,
374 		 * register it here without waiting a page fault that
375 		 * may not happen any time soon.
376 		 */
377 		khugepaged_enter_vma(vma, *vm_flags);
378 		break;
379 	case MADV_NOHUGEPAGE:
380 		*vm_flags &= ~VM_HUGEPAGE;
381 		*vm_flags |= VM_NOHUGEPAGE;
382 		/*
383 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
384 		 * this vma even if we leave the mm registered in khugepaged if
385 		 * it got registered before VM_NOHUGEPAGE was set.
386 		 */
387 		break;
388 	}
389 
390 	return 0;
391 }
392 
393 int __init khugepaged_init(void)
394 {
395 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
396 					  sizeof(struct khugepaged_mm_slot),
397 					  __alignof__(struct khugepaged_mm_slot),
398 					  0, NULL);
399 	if (!mm_slot_cache)
400 		return -ENOMEM;
401 
402 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
403 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
404 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
405 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
406 
407 	return 0;
408 }
409 
410 void __init khugepaged_destroy(void)
411 {
412 	kmem_cache_destroy(mm_slot_cache);
413 }
414 
415 static inline int hpage_collapse_test_exit(struct mm_struct *mm)
416 {
417 	return atomic_read(&mm->mm_users) == 0;
418 }
419 
420 void __khugepaged_enter(struct mm_struct *mm)
421 {
422 	struct khugepaged_mm_slot *mm_slot;
423 	struct mm_slot *slot;
424 	int wakeup;
425 
426 	/* __khugepaged_exit() must not run from under us */
427 	VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
428 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
429 		return;
430 
431 	mm_slot = mm_slot_alloc(mm_slot_cache);
432 	if (!mm_slot)
433 		return;
434 
435 	slot = &mm_slot->slot;
436 
437 	spin_lock(&khugepaged_mm_lock);
438 	mm_slot_insert(mm_slots_hash, mm, slot);
439 	/*
440 	 * Insert just behind the scanning cursor, to let the area settle
441 	 * down a little.
442 	 */
443 	wakeup = list_empty(&khugepaged_scan.mm_head);
444 	list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
445 	spin_unlock(&khugepaged_mm_lock);
446 
447 	mmgrab(mm);
448 	if (wakeup)
449 		wake_up_interruptible(&khugepaged_wait);
450 }
451 
452 void khugepaged_enter_vma(struct vm_area_struct *vma,
453 			  unsigned long vm_flags)
454 {
455 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
456 	    hugepage_flags_enabled()) {
457 		if (hugepage_vma_check(vma, vm_flags, false, false, true))
458 			__khugepaged_enter(vma->vm_mm);
459 	}
460 }
461 
462 void __khugepaged_exit(struct mm_struct *mm)
463 {
464 	struct khugepaged_mm_slot *mm_slot;
465 	struct mm_slot *slot;
466 	int free = 0;
467 
468 	spin_lock(&khugepaged_mm_lock);
469 	slot = mm_slot_lookup(mm_slots_hash, mm);
470 	mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
471 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
472 		hash_del(&slot->hash);
473 		list_del(&slot->mm_node);
474 		free = 1;
475 	}
476 	spin_unlock(&khugepaged_mm_lock);
477 
478 	if (free) {
479 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
480 		mm_slot_free(mm_slot_cache, mm_slot);
481 		mmdrop(mm);
482 	} else if (mm_slot) {
483 		/*
484 		 * This is required to serialize against
485 		 * hpage_collapse_test_exit() (which is guaranteed to run
486 		 * under mmap sem read mode). Stop here (after we return all
487 		 * pagetables will be destroyed) until khugepaged has finished
488 		 * working on the pagetables under the mmap_lock.
489 		 */
490 		mmap_write_lock(mm);
491 		mmap_write_unlock(mm);
492 	}
493 }
494 
495 static void release_pte_folio(struct folio *folio)
496 {
497 	node_stat_mod_folio(folio,
498 			NR_ISOLATED_ANON + folio_is_file_lru(folio),
499 			-folio_nr_pages(folio));
500 	folio_unlock(folio);
501 	folio_putback_lru(folio);
502 }
503 
504 static void release_pte_page(struct page *page)
505 {
506 	release_pte_folio(page_folio(page));
507 }
508 
509 static void release_pte_pages(pte_t *pte, pte_t *_pte,
510 		struct list_head *compound_pagelist)
511 {
512 	struct folio *folio, *tmp;
513 
514 	while (--_pte >= pte) {
515 		pte_t pteval = ptep_get(_pte);
516 		unsigned long pfn;
517 
518 		if (pte_none(pteval))
519 			continue;
520 		pfn = pte_pfn(pteval);
521 		if (is_zero_pfn(pfn))
522 			continue;
523 		folio = pfn_folio(pfn);
524 		if (folio_test_large(folio))
525 			continue;
526 		release_pte_folio(folio);
527 	}
528 
529 	list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
530 		list_del(&folio->lru);
531 		release_pte_folio(folio);
532 	}
533 }
534 
535 static bool is_refcount_suitable(struct page *page)
536 {
537 	int expected_refcount;
538 
539 	expected_refcount = total_mapcount(page);
540 	if (PageSwapCache(page))
541 		expected_refcount += compound_nr(page);
542 
543 	return page_count(page) == expected_refcount;
544 }
545 
546 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
547 					unsigned long address,
548 					pte_t *pte,
549 					struct collapse_control *cc,
550 					struct list_head *compound_pagelist)
551 {
552 	struct page *page = NULL;
553 	pte_t *_pte;
554 	int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
555 	bool writable = false;
556 
557 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
558 	     _pte++, address += PAGE_SIZE) {
559 		pte_t pteval = ptep_get(_pte);
560 		if (pte_none(pteval) || (pte_present(pteval) &&
561 				is_zero_pfn(pte_pfn(pteval)))) {
562 			++none_or_zero;
563 			if (!userfaultfd_armed(vma) &&
564 			    (!cc->is_khugepaged ||
565 			     none_or_zero <= khugepaged_max_ptes_none)) {
566 				continue;
567 			} else {
568 				result = SCAN_EXCEED_NONE_PTE;
569 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
570 				goto out;
571 			}
572 		}
573 		if (!pte_present(pteval)) {
574 			result = SCAN_PTE_NON_PRESENT;
575 			goto out;
576 		}
577 		if (pte_uffd_wp(pteval)) {
578 			result = SCAN_PTE_UFFD_WP;
579 			goto out;
580 		}
581 		page = vm_normal_page(vma, address, pteval);
582 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
583 			result = SCAN_PAGE_NULL;
584 			goto out;
585 		}
586 
587 		VM_BUG_ON_PAGE(!PageAnon(page), page);
588 
589 		if (page_mapcount(page) > 1) {
590 			++shared;
591 			if (cc->is_khugepaged &&
592 			    shared > khugepaged_max_ptes_shared) {
593 				result = SCAN_EXCEED_SHARED_PTE;
594 				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
595 				goto out;
596 			}
597 		}
598 
599 		if (PageCompound(page)) {
600 			struct page *p;
601 			page = compound_head(page);
602 
603 			/*
604 			 * Check if we have dealt with the compound page
605 			 * already
606 			 */
607 			list_for_each_entry(p, compound_pagelist, lru) {
608 				if (page == p)
609 					goto next;
610 			}
611 		}
612 
613 		/*
614 		 * We can do it before isolate_lru_page because the
615 		 * page can't be freed from under us. NOTE: PG_lock
616 		 * is needed to serialize against split_huge_page
617 		 * when invoked from the VM.
618 		 */
619 		if (!trylock_page(page)) {
620 			result = SCAN_PAGE_LOCK;
621 			goto out;
622 		}
623 
624 		/*
625 		 * Check if the page has any GUP (or other external) pins.
626 		 *
627 		 * The page table that maps the page has been already unlinked
628 		 * from the page table tree and this process cannot get
629 		 * an additional pin on the page.
630 		 *
631 		 * New pins can come later if the page is shared across fork,
632 		 * but not from this process. The other process cannot write to
633 		 * the page, only trigger CoW.
634 		 */
635 		if (!is_refcount_suitable(page)) {
636 			unlock_page(page);
637 			result = SCAN_PAGE_COUNT;
638 			goto out;
639 		}
640 
641 		/*
642 		 * Isolate the page to avoid collapsing an hugepage
643 		 * currently in use by the VM.
644 		 */
645 		if (!isolate_lru_page(page)) {
646 			unlock_page(page);
647 			result = SCAN_DEL_PAGE_LRU;
648 			goto out;
649 		}
650 		mod_node_page_state(page_pgdat(page),
651 				NR_ISOLATED_ANON + page_is_file_lru(page),
652 				compound_nr(page));
653 		VM_BUG_ON_PAGE(!PageLocked(page), page);
654 		VM_BUG_ON_PAGE(PageLRU(page), page);
655 
656 		if (PageCompound(page))
657 			list_add_tail(&page->lru, compound_pagelist);
658 next:
659 		/*
660 		 * If collapse was initiated by khugepaged, check that there is
661 		 * enough young pte to justify collapsing the page
662 		 */
663 		if (cc->is_khugepaged &&
664 		    (pte_young(pteval) || page_is_young(page) ||
665 		     PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
666 								     address)))
667 			referenced++;
668 
669 		if (pte_write(pteval))
670 			writable = true;
671 	}
672 
673 	if (unlikely(!writable)) {
674 		result = SCAN_PAGE_RO;
675 	} else if (unlikely(cc->is_khugepaged && !referenced)) {
676 		result = SCAN_LACK_REFERENCED_PAGE;
677 	} else {
678 		result = SCAN_SUCCEED;
679 		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
680 						    referenced, writable, result);
681 		return result;
682 	}
683 out:
684 	release_pte_pages(pte, _pte, compound_pagelist);
685 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
686 					    referenced, writable, result);
687 	return result;
688 }
689 
690 static void __collapse_huge_page_copy_succeeded(pte_t *pte,
691 						struct vm_area_struct *vma,
692 						unsigned long address,
693 						spinlock_t *ptl,
694 						struct list_head *compound_pagelist)
695 {
696 	struct page *src_page;
697 	struct page *tmp;
698 	pte_t *_pte;
699 	pte_t pteval;
700 
701 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
702 	     _pte++, address += PAGE_SIZE) {
703 		pteval = ptep_get(_pte);
704 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
705 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
706 			if (is_zero_pfn(pte_pfn(pteval))) {
707 				/*
708 				 * ptl mostly unnecessary.
709 				 */
710 				spin_lock(ptl);
711 				ptep_clear(vma->vm_mm, address, _pte);
712 				spin_unlock(ptl);
713 				ksm_might_unmap_zero_page(vma->vm_mm, pteval);
714 			}
715 		} else {
716 			src_page = pte_page(pteval);
717 			if (!PageCompound(src_page))
718 				release_pte_page(src_page);
719 			/*
720 			 * ptl mostly unnecessary, but preempt has to
721 			 * be disabled to update the per-cpu stats
722 			 * inside page_remove_rmap().
723 			 */
724 			spin_lock(ptl);
725 			ptep_clear(vma->vm_mm, address, _pte);
726 			page_remove_rmap(src_page, vma, false);
727 			spin_unlock(ptl);
728 			free_page_and_swap_cache(src_page);
729 		}
730 	}
731 
732 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
733 		list_del(&src_page->lru);
734 		mod_node_page_state(page_pgdat(src_page),
735 				    NR_ISOLATED_ANON + page_is_file_lru(src_page),
736 				    -compound_nr(src_page));
737 		unlock_page(src_page);
738 		free_swap_cache(src_page);
739 		putback_lru_page(src_page);
740 	}
741 }
742 
743 static void __collapse_huge_page_copy_failed(pte_t *pte,
744 					     pmd_t *pmd,
745 					     pmd_t orig_pmd,
746 					     struct vm_area_struct *vma,
747 					     struct list_head *compound_pagelist)
748 {
749 	spinlock_t *pmd_ptl;
750 
751 	/*
752 	 * Re-establish the PMD to point to the original page table
753 	 * entry. Restoring PMD needs to be done prior to releasing
754 	 * pages. Since pages are still isolated and locked here,
755 	 * acquiring anon_vma_lock_write is unnecessary.
756 	 */
757 	pmd_ptl = pmd_lock(vma->vm_mm, pmd);
758 	pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
759 	spin_unlock(pmd_ptl);
760 	/*
761 	 * Release both raw and compound pages isolated
762 	 * in __collapse_huge_page_isolate.
763 	 */
764 	release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
765 }
766 
767 /*
768  * __collapse_huge_page_copy - attempts to copy memory contents from raw
769  * pages to a hugepage. Cleans up the raw pages if copying succeeds;
770  * otherwise restores the original page table and releases isolated raw pages.
771  * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
772  *
773  * @pte: starting of the PTEs to copy from
774  * @page: the new hugepage to copy contents to
775  * @pmd: pointer to the new hugepage's PMD
776  * @orig_pmd: the original raw pages' PMD
777  * @vma: the original raw pages' virtual memory area
778  * @address: starting address to copy
779  * @ptl: lock on raw pages' PTEs
780  * @compound_pagelist: list that stores compound pages
781  */
782 static int __collapse_huge_page_copy(pte_t *pte,
783 				     struct page *page,
784 				     pmd_t *pmd,
785 				     pmd_t orig_pmd,
786 				     struct vm_area_struct *vma,
787 				     unsigned long address,
788 				     spinlock_t *ptl,
789 				     struct list_head *compound_pagelist)
790 {
791 	struct page *src_page;
792 	pte_t *_pte;
793 	pte_t pteval;
794 	unsigned long _address;
795 	int result = SCAN_SUCCEED;
796 
797 	/*
798 	 * Copying pages' contents is subject to memory poison at any iteration.
799 	 */
800 	for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
801 	     _pte++, page++, _address += PAGE_SIZE) {
802 		pteval = ptep_get(_pte);
803 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
804 			clear_user_highpage(page, _address);
805 			continue;
806 		}
807 		src_page = pte_page(pteval);
808 		if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
809 			result = SCAN_COPY_MC;
810 			break;
811 		}
812 	}
813 
814 	if (likely(result == SCAN_SUCCEED))
815 		__collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
816 						    compound_pagelist);
817 	else
818 		__collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
819 						 compound_pagelist);
820 
821 	return result;
822 }
823 
824 static void khugepaged_alloc_sleep(void)
825 {
826 	DEFINE_WAIT(wait);
827 
828 	add_wait_queue(&khugepaged_wait, &wait);
829 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
830 	schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
831 	remove_wait_queue(&khugepaged_wait, &wait);
832 }
833 
834 struct collapse_control khugepaged_collapse_control = {
835 	.is_khugepaged = true,
836 };
837 
838 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
839 {
840 	int i;
841 
842 	/*
843 	 * If node_reclaim_mode is disabled, then no extra effort is made to
844 	 * allocate memory locally.
845 	 */
846 	if (!node_reclaim_enabled())
847 		return false;
848 
849 	/* If there is a count for this node already, it must be acceptable */
850 	if (cc->node_load[nid])
851 		return false;
852 
853 	for (i = 0; i < MAX_NUMNODES; i++) {
854 		if (!cc->node_load[i])
855 			continue;
856 		if (node_distance(nid, i) > node_reclaim_distance)
857 			return true;
858 	}
859 	return false;
860 }
861 
862 #define khugepaged_defrag()					\
863 	(transparent_hugepage_flags &				\
864 	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
865 
866 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
867 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
868 {
869 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
870 }
871 
872 #ifdef CONFIG_NUMA
873 static int hpage_collapse_find_target_node(struct collapse_control *cc)
874 {
875 	int nid, target_node = 0, max_value = 0;
876 
877 	/* find first node with max normal pages hit */
878 	for (nid = 0; nid < MAX_NUMNODES; nid++)
879 		if (cc->node_load[nid] > max_value) {
880 			max_value = cc->node_load[nid];
881 			target_node = nid;
882 		}
883 
884 	for_each_online_node(nid) {
885 		if (max_value == cc->node_load[nid])
886 			node_set(nid, cc->alloc_nmask);
887 	}
888 
889 	return target_node;
890 }
891 #else
892 static int hpage_collapse_find_target_node(struct collapse_control *cc)
893 {
894 	return 0;
895 }
896 #endif
897 
898 static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
899 				      nodemask_t *nmask)
900 {
901 	*hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
902 	if (unlikely(!*hpage)) {
903 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
904 		return false;
905 	}
906 
907 	prep_transhuge_page(*hpage);
908 	count_vm_event(THP_COLLAPSE_ALLOC);
909 	return true;
910 }
911 
912 /*
913  * If mmap_lock temporarily dropped, revalidate vma
914  * before taking mmap_lock.
915  * Returns enum scan_result value.
916  */
917 
918 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
919 				   bool expect_anon,
920 				   struct vm_area_struct **vmap,
921 				   struct collapse_control *cc)
922 {
923 	struct vm_area_struct *vma;
924 
925 	if (unlikely(hpage_collapse_test_exit(mm)))
926 		return SCAN_ANY_PROCESS;
927 
928 	*vmap = vma = find_vma(mm, address);
929 	if (!vma)
930 		return SCAN_VMA_NULL;
931 
932 	if (!transhuge_vma_suitable(vma, address))
933 		return SCAN_ADDRESS_RANGE;
934 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
935 				cc->is_khugepaged))
936 		return SCAN_VMA_CHECK;
937 	/*
938 	 * Anon VMA expected, the address may be unmapped then
939 	 * remapped to file after khugepaged reaquired the mmap_lock.
940 	 *
941 	 * hugepage_vma_check may return true for qualified file
942 	 * vmas.
943 	 */
944 	if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
945 		return SCAN_PAGE_ANON;
946 	return SCAN_SUCCEED;
947 }
948 
949 static int find_pmd_or_thp_or_none(struct mm_struct *mm,
950 				   unsigned long address,
951 				   pmd_t **pmd)
952 {
953 	pmd_t pmde;
954 
955 	*pmd = mm_find_pmd(mm, address);
956 	if (!*pmd)
957 		return SCAN_PMD_NULL;
958 
959 	pmde = pmdp_get_lockless(*pmd);
960 	if (pmd_none(pmde))
961 		return SCAN_PMD_NONE;
962 	if (!pmd_present(pmde))
963 		return SCAN_PMD_NULL;
964 	if (pmd_trans_huge(pmde))
965 		return SCAN_PMD_MAPPED;
966 	if (pmd_devmap(pmde))
967 		return SCAN_PMD_NULL;
968 	if (pmd_bad(pmde))
969 		return SCAN_PMD_NULL;
970 	return SCAN_SUCCEED;
971 }
972 
973 static int check_pmd_still_valid(struct mm_struct *mm,
974 				 unsigned long address,
975 				 pmd_t *pmd)
976 {
977 	pmd_t *new_pmd;
978 	int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
979 
980 	if (result != SCAN_SUCCEED)
981 		return result;
982 	if (new_pmd != pmd)
983 		return SCAN_FAIL;
984 	return SCAN_SUCCEED;
985 }
986 
987 /*
988  * Bring missing pages in from swap, to complete THP collapse.
989  * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
990  *
991  * Called and returns without pte mapped or spinlocks held.
992  * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
993  */
994 static int __collapse_huge_page_swapin(struct mm_struct *mm,
995 				       struct vm_area_struct *vma,
996 				       unsigned long haddr, pmd_t *pmd,
997 				       int referenced)
998 {
999 	int swapped_in = 0;
1000 	vm_fault_t ret = 0;
1001 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1002 	int result;
1003 	pte_t *pte = NULL;
1004 	spinlock_t *ptl;
1005 
1006 	for (address = haddr; address < end; address += PAGE_SIZE) {
1007 		struct vm_fault vmf = {
1008 			.vma = vma,
1009 			.address = address,
1010 			.pgoff = linear_page_index(vma, address),
1011 			.flags = FAULT_FLAG_ALLOW_RETRY,
1012 			.pmd = pmd,
1013 		};
1014 
1015 		if (!pte++) {
1016 			pte = pte_offset_map_nolock(mm, pmd, address, &ptl);
1017 			if (!pte) {
1018 				mmap_read_unlock(mm);
1019 				result = SCAN_PMD_NULL;
1020 				goto out;
1021 			}
1022 		}
1023 
1024 		vmf.orig_pte = ptep_get_lockless(pte);
1025 		if (!is_swap_pte(vmf.orig_pte))
1026 			continue;
1027 
1028 		vmf.pte = pte;
1029 		vmf.ptl = ptl;
1030 		ret = do_swap_page(&vmf);
1031 		/* Which unmaps pte (after perhaps re-checking the entry) */
1032 		pte = NULL;
1033 
1034 		/*
1035 		 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1036 		 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1037 		 * we do not retry here and swap entry will remain in pagetable
1038 		 * resulting in later failure.
1039 		 */
1040 		if (ret & VM_FAULT_RETRY) {
1041 			/* Likely, but not guaranteed, that page lock failed */
1042 			result = SCAN_PAGE_LOCK;
1043 			goto out;
1044 		}
1045 		if (ret & VM_FAULT_ERROR) {
1046 			mmap_read_unlock(mm);
1047 			result = SCAN_FAIL;
1048 			goto out;
1049 		}
1050 		swapped_in++;
1051 	}
1052 
1053 	if (pte)
1054 		pte_unmap(pte);
1055 
1056 	/* Drain LRU cache to remove extra pin on the swapped in pages */
1057 	if (swapped_in)
1058 		lru_add_drain();
1059 
1060 	result = SCAN_SUCCEED;
1061 out:
1062 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1063 	return result;
1064 }
1065 
1066 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
1067 			      struct collapse_control *cc)
1068 {
1069 	gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
1070 		     GFP_TRANSHUGE);
1071 	int node = hpage_collapse_find_target_node(cc);
1072 	struct folio *folio;
1073 
1074 	if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
1075 		return SCAN_ALLOC_HUGE_PAGE_FAIL;
1076 
1077 	folio = page_folio(*hpage);
1078 	if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1079 		folio_put(folio);
1080 		*hpage = NULL;
1081 		return SCAN_CGROUP_CHARGE_FAIL;
1082 	}
1083 	count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
1084 
1085 	return SCAN_SUCCEED;
1086 }
1087 
1088 static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1089 			      int referenced, int unmapped,
1090 			      struct collapse_control *cc)
1091 {
1092 	LIST_HEAD(compound_pagelist);
1093 	pmd_t *pmd, _pmd;
1094 	pte_t *pte;
1095 	pgtable_t pgtable;
1096 	struct page *hpage;
1097 	spinlock_t *pmd_ptl, *pte_ptl;
1098 	int result = SCAN_FAIL;
1099 	struct vm_area_struct *vma;
1100 	struct mmu_notifier_range range;
1101 
1102 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1103 
1104 	/*
1105 	 * Before allocating the hugepage, release the mmap_lock read lock.
1106 	 * The allocation can take potentially a long time if it involves
1107 	 * sync compaction, and we do not need to hold the mmap_lock during
1108 	 * that. We will recheck the vma after taking it again in write mode.
1109 	 */
1110 	mmap_read_unlock(mm);
1111 
1112 	result = alloc_charge_hpage(&hpage, mm, cc);
1113 	if (result != SCAN_SUCCEED)
1114 		goto out_nolock;
1115 
1116 	mmap_read_lock(mm);
1117 	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1118 	if (result != SCAN_SUCCEED) {
1119 		mmap_read_unlock(mm);
1120 		goto out_nolock;
1121 	}
1122 
1123 	result = find_pmd_or_thp_or_none(mm, address, &pmd);
1124 	if (result != SCAN_SUCCEED) {
1125 		mmap_read_unlock(mm);
1126 		goto out_nolock;
1127 	}
1128 
1129 	if (unmapped) {
1130 		/*
1131 		 * __collapse_huge_page_swapin will return with mmap_lock
1132 		 * released when it fails. So we jump out_nolock directly in
1133 		 * that case.  Continuing to collapse causes inconsistency.
1134 		 */
1135 		result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1136 						     referenced);
1137 		if (result != SCAN_SUCCEED)
1138 			goto out_nolock;
1139 	}
1140 
1141 	mmap_read_unlock(mm);
1142 	/*
1143 	 * Prevent all access to pagetables with the exception of
1144 	 * gup_fast later handled by the ptep_clear_flush and the VM
1145 	 * handled by the anon_vma lock + PG_lock.
1146 	 */
1147 	mmap_write_lock(mm);
1148 	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1149 	if (result != SCAN_SUCCEED)
1150 		goto out_up_write;
1151 	/* check if the pmd is still valid */
1152 	result = check_pmd_still_valid(mm, address, pmd);
1153 	if (result != SCAN_SUCCEED)
1154 		goto out_up_write;
1155 
1156 	vma_start_write(vma);
1157 	anon_vma_lock_write(vma->anon_vma);
1158 
1159 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1160 				address + HPAGE_PMD_SIZE);
1161 	mmu_notifier_invalidate_range_start(&range);
1162 
1163 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1164 	/*
1165 	 * This removes any huge TLB entry from the CPU so we won't allow
1166 	 * huge and small TLB entries for the same virtual address to
1167 	 * avoid the risk of CPU bugs in that area.
1168 	 *
1169 	 * Parallel fast GUP is fine since fast GUP will back off when
1170 	 * it detects PMD is changed.
1171 	 */
1172 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1173 	spin_unlock(pmd_ptl);
1174 	mmu_notifier_invalidate_range_end(&range);
1175 	tlb_remove_table_sync_one();
1176 
1177 	pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1178 	if (pte) {
1179 		result = __collapse_huge_page_isolate(vma, address, pte, cc,
1180 						      &compound_pagelist);
1181 		spin_unlock(pte_ptl);
1182 	} else {
1183 		result = SCAN_PMD_NULL;
1184 	}
1185 
1186 	if (unlikely(result != SCAN_SUCCEED)) {
1187 		if (pte)
1188 			pte_unmap(pte);
1189 		spin_lock(pmd_ptl);
1190 		BUG_ON(!pmd_none(*pmd));
1191 		/*
1192 		 * We can only use set_pmd_at when establishing
1193 		 * hugepmds and never for establishing regular pmds that
1194 		 * points to regular pagetables. Use pmd_populate for that
1195 		 */
1196 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1197 		spin_unlock(pmd_ptl);
1198 		anon_vma_unlock_write(vma->anon_vma);
1199 		goto out_up_write;
1200 	}
1201 
1202 	/*
1203 	 * All pages are isolated and locked so anon_vma rmap
1204 	 * can't run anymore.
1205 	 */
1206 	anon_vma_unlock_write(vma->anon_vma);
1207 
1208 	result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
1209 					   vma, address, pte_ptl,
1210 					   &compound_pagelist);
1211 	pte_unmap(pte);
1212 	if (unlikely(result != SCAN_SUCCEED))
1213 		goto out_up_write;
1214 
1215 	/*
1216 	 * spin_lock() below is not the equivalent of smp_wmb(), but
1217 	 * the smp_wmb() inside __SetPageUptodate() can be reused to
1218 	 * avoid the copy_huge_page writes to become visible after
1219 	 * the set_pmd_at() write.
1220 	 */
1221 	__SetPageUptodate(hpage);
1222 	pgtable = pmd_pgtable(_pmd);
1223 
1224 	_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1225 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1226 
1227 	spin_lock(pmd_ptl);
1228 	BUG_ON(!pmd_none(*pmd));
1229 	page_add_new_anon_rmap(hpage, vma, address);
1230 	lru_cache_add_inactive_or_unevictable(hpage, vma);
1231 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1232 	set_pmd_at(mm, address, pmd, _pmd);
1233 	update_mmu_cache_pmd(vma, address, pmd);
1234 	spin_unlock(pmd_ptl);
1235 
1236 	hpage = NULL;
1237 
1238 	result = SCAN_SUCCEED;
1239 out_up_write:
1240 	mmap_write_unlock(mm);
1241 out_nolock:
1242 	if (hpage)
1243 		put_page(hpage);
1244 	trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1245 	return result;
1246 }
1247 
1248 static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1249 				   struct vm_area_struct *vma,
1250 				   unsigned long address, bool *mmap_locked,
1251 				   struct collapse_control *cc)
1252 {
1253 	pmd_t *pmd;
1254 	pte_t *pte, *_pte;
1255 	int result = SCAN_FAIL, referenced = 0;
1256 	int none_or_zero = 0, shared = 0;
1257 	struct page *page = NULL;
1258 	unsigned long _address;
1259 	spinlock_t *ptl;
1260 	int node = NUMA_NO_NODE, unmapped = 0;
1261 	bool writable = false;
1262 
1263 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1264 
1265 	result = find_pmd_or_thp_or_none(mm, address, &pmd);
1266 	if (result != SCAN_SUCCEED)
1267 		goto out;
1268 
1269 	memset(cc->node_load, 0, sizeof(cc->node_load));
1270 	nodes_clear(cc->alloc_nmask);
1271 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1272 	if (!pte) {
1273 		result = SCAN_PMD_NULL;
1274 		goto out;
1275 	}
1276 
1277 	for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1278 	     _pte++, _address += PAGE_SIZE) {
1279 		pte_t pteval = ptep_get(_pte);
1280 		if (is_swap_pte(pteval)) {
1281 			++unmapped;
1282 			if (!cc->is_khugepaged ||
1283 			    unmapped <= khugepaged_max_ptes_swap) {
1284 				/*
1285 				 * Always be strict with uffd-wp
1286 				 * enabled swap entries.  Please see
1287 				 * comment below for pte_uffd_wp().
1288 				 */
1289 				if (pte_swp_uffd_wp_any(pteval)) {
1290 					result = SCAN_PTE_UFFD_WP;
1291 					goto out_unmap;
1292 				}
1293 				continue;
1294 			} else {
1295 				result = SCAN_EXCEED_SWAP_PTE;
1296 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1297 				goto out_unmap;
1298 			}
1299 		}
1300 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1301 			++none_or_zero;
1302 			if (!userfaultfd_armed(vma) &&
1303 			    (!cc->is_khugepaged ||
1304 			     none_or_zero <= khugepaged_max_ptes_none)) {
1305 				continue;
1306 			} else {
1307 				result = SCAN_EXCEED_NONE_PTE;
1308 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1309 				goto out_unmap;
1310 			}
1311 		}
1312 		if (pte_uffd_wp(pteval)) {
1313 			/*
1314 			 * Don't collapse the page if any of the small
1315 			 * PTEs are armed with uffd write protection.
1316 			 * Here we can also mark the new huge pmd as
1317 			 * write protected if any of the small ones is
1318 			 * marked but that could bring unknown
1319 			 * userfault messages that falls outside of
1320 			 * the registered range.  So, just be simple.
1321 			 */
1322 			result = SCAN_PTE_UFFD_WP;
1323 			goto out_unmap;
1324 		}
1325 		if (pte_write(pteval))
1326 			writable = true;
1327 
1328 		page = vm_normal_page(vma, _address, pteval);
1329 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1330 			result = SCAN_PAGE_NULL;
1331 			goto out_unmap;
1332 		}
1333 
1334 		if (page_mapcount(page) > 1) {
1335 			++shared;
1336 			if (cc->is_khugepaged &&
1337 			    shared > khugepaged_max_ptes_shared) {
1338 				result = SCAN_EXCEED_SHARED_PTE;
1339 				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1340 				goto out_unmap;
1341 			}
1342 		}
1343 
1344 		page = compound_head(page);
1345 
1346 		/*
1347 		 * Record which node the original page is from and save this
1348 		 * information to cc->node_load[].
1349 		 * Khugepaged will allocate hugepage from the node has the max
1350 		 * hit record.
1351 		 */
1352 		node = page_to_nid(page);
1353 		if (hpage_collapse_scan_abort(node, cc)) {
1354 			result = SCAN_SCAN_ABORT;
1355 			goto out_unmap;
1356 		}
1357 		cc->node_load[node]++;
1358 		if (!PageLRU(page)) {
1359 			result = SCAN_PAGE_LRU;
1360 			goto out_unmap;
1361 		}
1362 		if (PageLocked(page)) {
1363 			result = SCAN_PAGE_LOCK;
1364 			goto out_unmap;
1365 		}
1366 		if (!PageAnon(page)) {
1367 			result = SCAN_PAGE_ANON;
1368 			goto out_unmap;
1369 		}
1370 
1371 		/*
1372 		 * Check if the page has any GUP (or other external) pins.
1373 		 *
1374 		 * Here the check may be racy:
1375 		 * it may see total_mapcount > refcount in some cases?
1376 		 * But such case is ephemeral we could always retry collapse
1377 		 * later.  However it may report false positive if the page
1378 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
1379 		 * will be done again later the risk seems low.
1380 		 */
1381 		if (!is_refcount_suitable(page)) {
1382 			result = SCAN_PAGE_COUNT;
1383 			goto out_unmap;
1384 		}
1385 
1386 		/*
1387 		 * If collapse was initiated by khugepaged, check that there is
1388 		 * enough young pte to justify collapsing the page
1389 		 */
1390 		if (cc->is_khugepaged &&
1391 		    (pte_young(pteval) || page_is_young(page) ||
1392 		     PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1393 								     address)))
1394 			referenced++;
1395 	}
1396 	if (!writable) {
1397 		result = SCAN_PAGE_RO;
1398 	} else if (cc->is_khugepaged &&
1399 		   (!referenced ||
1400 		    (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1401 		result = SCAN_LACK_REFERENCED_PAGE;
1402 	} else {
1403 		result = SCAN_SUCCEED;
1404 	}
1405 out_unmap:
1406 	pte_unmap_unlock(pte, ptl);
1407 	if (result == SCAN_SUCCEED) {
1408 		result = collapse_huge_page(mm, address, referenced,
1409 					    unmapped, cc);
1410 		/* collapse_huge_page will return with the mmap_lock released */
1411 		*mmap_locked = false;
1412 	}
1413 out:
1414 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1415 				     none_or_zero, result, unmapped);
1416 	return result;
1417 }
1418 
1419 static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1420 {
1421 	struct mm_slot *slot = &mm_slot->slot;
1422 	struct mm_struct *mm = slot->mm;
1423 
1424 	lockdep_assert_held(&khugepaged_mm_lock);
1425 
1426 	if (hpage_collapse_test_exit(mm)) {
1427 		/* free mm_slot */
1428 		hash_del(&slot->hash);
1429 		list_del(&slot->mm_node);
1430 
1431 		/*
1432 		 * Not strictly needed because the mm exited already.
1433 		 *
1434 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1435 		 */
1436 
1437 		/* khugepaged_mm_lock actually not necessary for the below */
1438 		mm_slot_free(mm_slot_cache, mm_slot);
1439 		mmdrop(mm);
1440 	}
1441 }
1442 
1443 #ifdef CONFIG_SHMEM
1444 /*
1445  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1446  * khugepaged should try to collapse the page table.
1447  *
1448  * Note that following race exists:
1449  * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
1450  *     emptying the A's ->pte_mapped_thp[] array.
1451  * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
1452  *     retract_page_tables() finds a VMA in mm_struct A mapping the same extent
1453  *     (at virtual address X) and adds an entry (for X) into mm_struct A's
1454  *     ->pte-mapped_thp[] array.
1455  * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
1456  *     sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1457  *     (for X) into mm_struct A's ->pte-mapped_thp[] array.
1458  * Thus, it's possible the same address is added multiple times for the same
1459  * mm_struct.  Should this happen, we'll simply attempt
1460  * collapse_pte_mapped_thp() multiple times for the same address, under the same
1461  * exclusive mmap_lock, and assuming the first call is successful, subsequent
1462  * attempts will return quickly (without grabbing any additional locks) when
1463  * a huge pmd is found in find_pmd_or_thp_or_none().  Since this is a cheap
1464  * check, and since this is a rare occurrence, the cost of preventing this
1465  * "multiple-add" is thought to be more expensive than just handling it, should
1466  * it occur.
1467  */
1468 static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1469 					  unsigned long addr)
1470 {
1471 	struct khugepaged_mm_slot *mm_slot;
1472 	struct mm_slot *slot;
1473 	bool ret = false;
1474 
1475 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1476 
1477 	spin_lock(&khugepaged_mm_lock);
1478 	slot = mm_slot_lookup(mm_slots_hash, mm);
1479 	mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
1480 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
1481 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1482 		ret = true;
1483 	}
1484 	spin_unlock(&khugepaged_mm_lock);
1485 	return ret;
1486 }
1487 
1488 /* hpage must be locked, and mmap_lock must be held */
1489 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1490 			pmd_t *pmdp, struct page *hpage)
1491 {
1492 	struct vm_fault vmf = {
1493 		.vma = vma,
1494 		.address = addr,
1495 		.flags = 0,
1496 		.pmd = pmdp,
1497 	};
1498 
1499 	VM_BUG_ON(!PageTransHuge(hpage));
1500 	mmap_assert_locked(vma->vm_mm);
1501 
1502 	if (do_set_pmd(&vmf, hpage))
1503 		return SCAN_FAIL;
1504 
1505 	get_page(hpage);
1506 	return SCAN_SUCCEED;
1507 }
1508 
1509 /**
1510  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1511  * address haddr.
1512  *
1513  * @mm: process address space where collapse happens
1514  * @addr: THP collapse address
1515  * @install_pmd: If a huge PMD should be installed
1516  *
1517  * This function checks whether all the PTEs in the PMD are pointing to the
1518  * right THP. If so, retract the page table so the THP can refault in with
1519  * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1520  */
1521 int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1522 			    bool install_pmd)
1523 {
1524 	struct mmu_notifier_range range;
1525 	bool notified = false;
1526 	unsigned long haddr = addr & HPAGE_PMD_MASK;
1527 	struct vm_area_struct *vma = vma_lookup(mm, haddr);
1528 	struct page *hpage;
1529 	pte_t *start_pte, *pte;
1530 	pmd_t *pmd, pgt_pmd;
1531 	spinlock_t *pml, *ptl;
1532 	int nr_ptes = 0, result = SCAN_FAIL;
1533 	int i;
1534 
1535 	mmap_assert_locked(mm);
1536 
1537 	/* First check VMA found, in case page tables are being torn down */
1538 	if (!vma || !vma->vm_file ||
1539 	    !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1540 		return SCAN_VMA_CHECK;
1541 
1542 	/* Fast check before locking page if already PMD-mapped */
1543 	result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1544 	if (result == SCAN_PMD_MAPPED)
1545 		return result;
1546 
1547 	/*
1548 	 * If we are here, we've succeeded in replacing all the native pages
1549 	 * in the page cache with a single hugepage. If a mm were to fault-in
1550 	 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1551 	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1552 	 * analogously elide sysfs THP settings here.
1553 	 */
1554 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
1555 		return SCAN_VMA_CHECK;
1556 
1557 	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1558 	if (userfaultfd_wp(vma))
1559 		return SCAN_PTE_UFFD_WP;
1560 
1561 	hpage = find_lock_page(vma->vm_file->f_mapping,
1562 			       linear_page_index(vma, haddr));
1563 	if (!hpage)
1564 		return SCAN_PAGE_NULL;
1565 
1566 	if (!PageHead(hpage)) {
1567 		result = SCAN_FAIL;
1568 		goto drop_hpage;
1569 	}
1570 
1571 	if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1572 		result = SCAN_PAGE_COMPOUND;
1573 		goto drop_hpage;
1574 	}
1575 
1576 	result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1577 	switch (result) {
1578 	case SCAN_SUCCEED:
1579 		break;
1580 	case SCAN_PMD_NONE:
1581 		/*
1582 		 * All pte entries have been removed and pmd cleared.
1583 		 * Skip all the pte checks and just update the pmd mapping.
1584 		 */
1585 		goto maybe_install_pmd;
1586 	default:
1587 		goto drop_hpage;
1588 	}
1589 
1590 	result = SCAN_FAIL;
1591 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1592 	if (!start_pte)		/* mmap_lock + page lock should prevent this */
1593 		goto drop_hpage;
1594 
1595 	/* step 1: check all mapped PTEs are to the right huge page */
1596 	for (i = 0, addr = haddr, pte = start_pte;
1597 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1598 		struct page *page;
1599 		pte_t ptent = ptep_get(pte);
1600 
1601 		/* empty pte, skip */
1602 		if (pte_none(ptent))
1603 			continue;
1604 
1605 		/* page swapped out, abort */
1606 		if (!pte_present(ptent)) {
1607 			result = SCAN_PTE_NON_PRESENT;
1608 			goto abort;
1609 		}
1610 
1611 		page = vm_normal_page(vma, addr, ptent);
1612 		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1613 			page = NULL;
1614 		/*
1615 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1616 		 * page table, but the new page will not be a subpage of hpage.
1617 		 */
1618 		if (hpage + i != page)
1619 			goto abort;
1620 	}
1621 
1622 	pte_unmap_unlock(start_pte, ptl);
1623 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1624 				haddr, haddr + HPAGE_PMD_SIZE);
1625 	mmu_notifier_invalidate_range_start(&range);
1626 	notified = true;
1627 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1628 	if (!start_pte)		/* mmap_lock + page lock should prevent this */
1629 		goto abort;
1630 
1631 	/* step 2: clear page table and adjust rmap */
1632 	for (i = 0, addr = haddr, pte = start_pte;
1633 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1634 		struct page *page;
1635 		pte_t ptent = ptep_get(pte);
1636 
1637 		if (pte_none(ptent))
1638 			continue;
1639 		/*
1640 		 * We dropped ptl after the first scan, to do the mmu_notifier:
1641 		 * page lock stops more PTEs of the hpage being faulted in, but
1642 		 * does not stop write faults COWing anon copies from existing
1643 		 * PTEs; and does not stop those being swapped out or migrated.
1644 		 */
1645 		if (!pte_present(ptent)) {
1646 			result = SCAN_PTE_NON_PRESENT;
1647 			goto abort;
1648 		}
1649 		page = vm_normal_page(vma, addr, ptent);
1650 		if (hpage + i != page)
1651 			goto abort;
1652 
1653 		/*
1654 		 * Must clear entry, or a racing truncate may re-remove it.
1655 		 * TLB flush can be left until pmdp_collapse_flush() does it.
1656 		 * PTE dirty? Shmem page is already dirty; file is read-only.
1657 		 */
1658 		ptep_clear(mm, addr, pte);
1659 		page_remove_rmap(page, vma, false);
1660 		nr_ptes++;
1661 	}
1662 
1663 	pte_unmap_unlock(start_pte, ptl);
1664 
1665 	/* step 3: set proper refcount and mm_counters. */
1666 	if (nr_ptes) {
1667 		page_ref_sub(hpage, nr_ptes);
1668 		add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
1669 	}
1670 
1671 	/* step 4: remove page table */
1672 
1673 	/* Huge page lock is still held, so page table must remain empty */
1674 	pml = pmd_lock(mm, pmd);
1675 	if (ptl != pml)
1676 		spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1677 	pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1678 	pmdp_get_lockless_sync();
1679 	if (ptl != pml)
1680 		spin_unlock(ptl);
1681 	spin_unlock(pml);
1682 
1683 	mmu_notifier_invalidate_range_end(&range);
1684 
1685 	mm_dec_nr_ptes(mm);
1686 	page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
1687 	pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1688 
1689 maybe_install_pmd:
1690 	/* step 5: install pmd entry */
1691 	result = install_pmd
1692 			? set_huge_pmd(vma, haddr, pmd, hpage)
1693 			: SCAN_SUCCEED;
1694 	goto drop_hpage;
1695 abort:
1696 	if (nr_ptes) {
1697 		flush_tlb_mm(mm);
1698 		page_ref_sub(hpage, nr_ptes);
1699 		add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
1700 	}
1701 	if (start_pte)
1702 		pte_unmap_unlock(start_pte, ptl);
1703 	if (notified)
1704 		mmu_notifier_invalidate_range_end(&range);
1705 drop_hpage:
1706 	unlock_page(hpage);
1707 	put_page(hpage);
1708 	return result;
1709 }
1710 
1711 static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
1712 {
1713 	struct mm_slot *slot = &mm_slot->slot;
1714 	struct mm_struct *mm = slot->mm;
1715 	int i;
1716 
1717 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1718 		return;
1719 
1720 	if (!mmap_write_trylock(mm))
1721 		return;
1722 
1723 	if (unlikely(hpage_collapse_test_exit(mm)))
1724 		goto out;
1725 
1726 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1727 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
1728 
1729 out:
1730 	mm_slot->nr_pte_mapped_thp = 0;
1731 	mmap_write_unlock(mm);
1732 }
1733 
1734 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1735 {
1736 	struct vm_area_struct *vma;
1737 
1738 	i_mmap_lock_read(mapping);
1739 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1740 		struct mmu_notifier_range range;
1741 		struct mm_struct *mm;
1742 		unsigned long addr;
1743 		pmd_t *pmd, pgt_pmd;
1744 		spinlock_t *pml;
1745 		spinlock_t *ptl;
1746 		bool skipped_uffd = false;
1747 
1748 		/*
1749 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1750 		 * got written to. These VMAs are likely not worth removing
1751 		 * page tables from, as PMD-mapping is likely to be split later.
1752 		 */
1753 		if (READ_ONCE(vma->anon_vma))
1754 			continue;
1755 
1756 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1757 		if (addr & ~HPAGE_PMD_MASK ||
1758 		    vma->vm_end < addr + HPAGE_PMD_SIZE)
1759 			continue;
1760 
1761 		mm = vma->vm_mm;
1762 		if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
1763 			continue;
1764 
1765 		if (hpage_collapse_test_exit(mm))
1766 			continue;
1767 		/*
1768 		 * When a vma is registered with uffd-wp, we cannot recycle
1769 		 * the page table because there may be pte markers installed.
1770 		 * Other vmas can still have the same file mapped hugely, but
1771 		 * skip this one: it will always be mapped in small page size
1772 		 * for uffd-wp registered ranges.
1773 		 */
1774 		if (userfaultfd_wp(vma))
1775 			continue;
1776 
1777 		/* PTEs were notified when unmapped; but now for the PMD? */
1778 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1779 					addr, addr + HPAGE_PMD_SIZE);
1780 		mmu_notifier_invalidate_range_start(&range);
1781 
1782 		pml = pmd_lock(mm, pmd);
1783 		ptl = pte_lockptr(mm, pmd);
1784 		if (ptl != pml)
1785 			spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1786 
1787 		/*
1788 		 * Huge page lock is still held, so normally the page table
1789 		 * must remain empty; and we have already skipped anon_vma
1790 		 * and userfaultfd_wp() vmas.  But since the mmap_lock is not
1791 		 * held, it is still possible for a racing userfaultfd_ioctl()
1792 		 * to have inserted ptes or markers.  Now that we hold ptlock,
1793 		 * repeating the anon_vma check protects from one category,
1794 		 * and repeating the userfaultfd_wp() check from another.
1795 		 */
1796 		if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
1797 			skipped_uffd = true;
1798 		} else {
1799 			pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
1800 			pmdp_get_lockless_sync();
1801 		}
1802 
1803 		if (ptl != pml)
1804 			spin_unlock(ptl);
1805 		spin_unlock(pml);
1806 
1807 		mmu_notifier_invalidate_range_end(&range);
1808 
1809 		if (!skipped_uffd) {
1810 			mm_dec_nr_ptes(mm);
1811 			page_table_check_pte_clear_range(mm, addr, pgt_pmd);
1812 			pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1813 		}
1814 	}
1815 	i_mmap_unlock_read(mapping);
1816 }
1817 
1818 /**
1819  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1820  *
1821  * @mm: process address space where collapse happens
1822  * @addr: virtual collapse start address
1823  * @file: file that collapse on
1824  * @start: collapse start address
1825  * @cc: collapse context and scratchpad
1826  *
1827  * Basic scheme is simple, details are more complex:
1828  *  - allocate and lock a new huge page;
1829  *  - scan page cache, locking old pages
1830  *    + swap/gup in pages if necessary;
1831  *  - copy data to new page
1832  *  - handle shmem holes
1833  *    + re-validate that holes weren't filled by someone else
1834  *    + check for userfaultfd
1835  *  - finalize updates to the page cache;
1836  *  - if replacing succeeds:
1837  *    + unlock huge page;
1838  *    + free old pages;
1839  *  - if replacing failed;
1840  *    + unlock old pages
1841  *    + unlock and free huge page;
1842  */
1843 static int collapse_file(struct mm_struct *mm, unsigned long addr,
1844 			 struct file *file, pgoff_t start,
1845 			 struct collapse_control *cc)
1846 {
1847 	struct address_space *mapping = file->f_mapping;
1848 	struct page *hpage;
1849 	struct page *page;
1850 	struct page *tmp;
1851 	struct folio *folio;
1852 	pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1853 	LIST_HEAD(pagelist);
1854 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1855 	int nr_none = 0, result = SCAN_SUCCEED;
1856 	bool is_shmem = shmem_file(file);
1857 	int nr = 0;
1858 
1859 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1860 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1861 
1862 	result = alloc_charge_hpage(&hpage, mm, cc);
1863 	if (result != SCAN_SUCCEED)
1864 		goto out;
1865 
1866 	__SetPageLocked(hpage);
1867 	if (is_shmem)
1868 		__SetPageSwapBacked(hpage);
1869 	hpage->index = start;
1870 	hpage->mapping = mapping;
1871 
1872 	/*
1873 	 * Ensure we have slots for all the pages in the range.  This is
1874 	 * almost certainly a no-op because most of the pages must be present
1875 	 */
1876 	do {
1877 		xas_lock_irq(&xas);
1878 		xas_create_range(&xas);
1879 		if (!xas_error(&xas))
1880 			break;
1881 		xas_unlock_irq(&xas);
1882 		if (!xas_nomem(&xas, GFP_KERNEL)) {
1883 			result = SCAN_FAIL;
1884 			goto rollback;
1885 		}
1886 	} while (1);
1887 
1888 	for (index = start; index < end; index++) {
1889 		xas_set(&xas, index);
1890 		page = xas_load(&xas);
1891 
1892 		VM_BUG_ON(index != xas.xa_index);
1893 		if (is_shmem) {
1894 			if (!page) {
1895 				/*
1896 				 * Stop if extent has been truncated or
1897 				 * hole-punched, and is now completely
1898 				 * empty.
1899 				 */
1900 				if (index == start) {
1901 					if (!xas_next_entry(&xas, end - 1)) {
1902 						result = SCAN_TRUNCATED;
1903 						goto xa_locked;
1904 					}
1905 				}
1906 				if (!shmem_charge(mapping->host, 1)) {
1907 					result = SCAN_FAIL;
1908 					goto xa_locked;
1909 				}
1910 				nr_none++;
1911 				continue;
1912 			}
1913 
1914 			if (xa_is_value(page) || !PageUptodate(page)) {
1915 				xas_unlock_irq(&xas);
1916 				/* swap in or instantiate fallocated page */
1917 				if (shmem_get_folio(mapping->host, index,
1918 						&folio, SGP_NOALLOC)) {
1919 					result = SCAN_FAIL;
1920 					goto xa_unlocked;
1921 				}
1922 				/* drain lru cache to help isolate_lru_page() */
1923 				lru_add_drain();
1924 				page = folio_file_page(folio, index);
1925 			} else if (trylock_page(page)) {
1926 				get_page(page);
1927 				xas_unlock_irq(&xas);
1928 			} else {
1929 				result = SCAN_PAGE_LOCK;
1930 				goto xa_locked;
1931 			}
1932 		} else {	/* !is_shmem */
1933 			if (!page || xa_is_value(page)) {
1934 				xas_unlock_irq(&xas);
1935 				page_cache_sync_readahead(mapping, &file->f_ra,
1936 							  file, index,
1937 							  end - index);
1938 				/* drain lru cache to help isolate_lru_page() */
1939 				lru_add_drain();
1940 				page = find_lock_page(mapping, index);
1941 				if (unlikely(page == NULL)) {
1942 					result = SCAN_FAIL;
1943 					goto xa_unlocked;
1944 				}
1945 			} else if (PageDirty(page)) {
1946 				/*
1947 				 * khugepaged only works on read-only fd,
1948 				 * so this page is dirty because it hasn't
1949 				 * been flushed since first write. There
1950 				 * won't be new dirty pages.
1951 				 *
1952 				 * Trigger async flush here and hope the
1953 				 * writeback is done when khugepaged
1954 				 * revisits this page.
1955 				 *
1956 				 * This is a one-off situation. We are not
1957 				 * forcing writeback in loop.
1958 				 */
1959 				xas_unlock_irq(&xas);
1960 				filemap_flush(mapping);
1961 				result = SCAN_FAIL;
1962 				goto xa_unlocked;
1963 			} else if (PageWriteback(page)) {
1964 				xas_unlock_irq(&xas);
1965 				result = SCAN_FAIL;
1966 				goto xa_unlocked;
1967 			} else if (trylock_page(page)) {
1968 				get_page(page);
1969 				xas_unlock_irq(&xas);
1970 			} else {
1971 				result = SCAN_PAGE_LOCK;
1972 				goto xa_locked;
1973 			}
1974 		}
1975 
1976 		/*
1977 		 * The page must be locked, so we can drop the i_pages lock
1978 		 * without racing with truncate.
1979 		 */
1980 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1981 
1982 		/* make sure the page is up to date */
1983 		if (unlikely(!PageUptodate(page))) {
1984 			result = SCAN_FAIL;
1985 			goto out_unlock;
1986 		}
1987 
1988 		/*
1989 		 * If file was truncated then extended, or hole-punched, before
1990 		 * we locked the first page, then a THP might be there already.
1991 		 * This will be discovered on the first iteration.
1992 		 */
1993 		if (PageTransCompound(page)) {
1994 			struct page *head = compound_head(page);
1995 
1996 			result = compound_order(head) == HPAGE_PMD_ORDER &&
1997 					head->index == start
1998 					/* Maybe PMD-mapped */
1999 					? SCAN_PTE_MAPPED_HUGEPAGE
2000 					: SCAN_PAGE_COMPOUND;
2001 			goto out_unlock;
2002 		}
2003 
2004 		folio = page_folio(page);
2005 
2006 		if (folio_mapping(folio) != mapping) {
2007 			result = SCAN_TRUNCATED;
2008 			goto out_unlock;
2009 		}
2010 
2011 		if (!is_shmem && (folio_test_dirty(folio) ||
2012 				  folio_test_writeback(folio))) {
2013 			/*
2014 			 * khugepaged only works on read-only fd, so this
2015 			 * page is dirty because it hasn't been flushed
2016 			 * since first write.
2017 			 */
2018 			result = SCAN_FAIL;
2019 			goto out_unlock;
2020 		}
2021 
2022 		if (!folio_isolate_lru(folio)) {
2023 			result = SCAN_DEL_PAGE_LRU;
2024 			goto out_unlock;
2025 		}
2026 
2027 		if (!filemap_release_folio(folio, GFP_KERNEL)) {
2028 			result = SCAN_PAGE_HAS_PRIVATE;
2029 			folio_putback_lru(folio);
2030 			goto out_unlock;
2031 		}
2032 
2033 		if (folio_mapped(folio))
2034 			try_to_unmap(folio,
2035 					TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
2036 
2037 		xas_lock_irq(&xas);
2038 
2039 		VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page);
2040 
2041 		/*
2042 		 * We control three references to the page:
2043 		 *  - we hold a pin on it;
2044 		 *  - one reference from page cache;
2045 		 *  - one from isolate_lru_page;
2046 		 * If those are the only references, then any new usage of the
2047 		 * page will have to fetch it from the page cache. That requires
2048 		 * locking the page to handle truncate, so any new usage will be
2049 		 * blocked until we unlock page after collapse/during rollback.
2050 		 */
2051 		if (page_count(page) != 3) {
2052 			result = SCAN_PAGE_COUNT;
2053 			xas_unlock_irq(&xas);
2054 			putback_lru_page(page);
2055 			goto out_unlock;
2056 		}
2057 
2058 		/*
2059 		 * Accumulate the pages that are being collapsed.
2060 		 */
2061 		list_add_tail(&page->lru, &pagelist);
2062 		continue;
2063 out_unlock:
2064 		unlock_page(page);
2065 		put_page(page);
2066 		goto xa_unlocked;
2067 	}
2068 
2069 	if (!is_shmem) {
2070 		filemap_nr_thps_inc(mapping);
2071 		/*
2072 		 * Paired with smp_mb() in do_dentry_open() to ensure
2073 		 * i_writecount is up to date and the update to nr_thps is
2074 		 * visible. Ensures the page cache will be truncated if the
2075 		 * file is opened writable.
2076 		 */
2077 		smp_mb();
2078 		if (inode_is_open_for_write(mapping->host)) {
2079 			result = SCAN_FAIL;
2080 			filemap_nr_thps_dec(mapping);
2081 		}
2082 	}
2083 
2084 xa_locked:
2085 	xas_unlock_irq(&xas);
2086 xa_unlocked:
2087 
2088 	/*
2089 	 * If collapse is successful, flush must be done now before copying.
2090 	 * If collapse is unsuccessful, does flush actually need to be done?
2091 	 * Do it anyway, to clear the state.
2092 	 */
2093 	try_to_unmap_flush();
2094 
2095 	if (result != SCAN_SUCCEED)
2096 		goto rollback;
2097 
2098 	/*
2099 	 * The old pages are locked, so they won't change anymore.
2100 	 */
2101 	index = start;
2102 	list_for_each_entry(page, &pagelist, lru) {
2103 		while (index < page->index) {
2104 			clear_highpage(hpage + (index % HPAGE_PMD_NR));
2105 			index++;
2106 		}
2107 		if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
2108 			result = SCAN_COPY_MC;
2109 			goto rollback;
2110 		}
2111 		index++;
2112 	}
2113 	while (index < end) {
2114 		clear_highpage(hpage + (index % HPAGE_PMD_NR));
2115 		index++;
2116 	}
2117 
2118 	if (nr_none) {
2119 		struct vm_area_struct *vma;
2120 		int nr_none_check = 0;
2121 
2122 		i_mmap_lock_read(mapping);
2123 		xas_lock_irq(&xas);
2124 
2125 		xas_set(&xas, start);
2126 		for (index = start; index < end; index++) {
2127 			if (!xas_next(&xas)) {
2128 				xas_store(&xas, XA_RETRY_ENTRY);
2129 				if (xas_error(&xas)) {
2130 					result = SCAN_STORE_FAILED;
2131 					goto immap_locked;
2132 				}
2133 				nr_none_check++;
2134 			}
2135 		}
2136 
2137 		if (nr_none != nr_none_check) {
2138 			result = SCAN_PAGE_FILLED;
2139 			goto immap_locked;
2140 		}
2141 
2142 		/*
2143 		 * If userspace observed a missing page in a VMA with a MODE_MISSING
2144 		 * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that
2145 		 * page. If so, we need to roll back to avoid suppressing such an
2146 		 * event. Since wp/minor userfaultfds don't give userspace any
2147 		 * guarantees that the kernel doesn't fill a missing page with a zero
2148 		 * page, so they don't matter here.
2149 		 *
2150 		 * Any userfaultfds registered after this point will not be able to
2151 		 * observe any missing pages due to the previously inserted retry
2152 		 * entries.
2153 		 */
2154 		vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2155 			if (userfaultfd_missing(vma)) {
2156 				result = SCAN_EXCEED_NONE_PTE;
2157 				goto immap_locked;
2158 			}
2159 		}
2160 
2161 immap_locked:
2162 		i_mmap_unlock_read(mapping);
2163 		if (result != SCAN_SUCCEED) {
2164 			xas_set(&xas, start);
2165 			for (index = start; index < end; index++) {
2166 				if (xas_next(&xas) == XA_RETRY_ENTRY)
2167 					xas_store(&xas, NULL);
2168 			}
2169 
2170 			xas_unlock_irq(&xas);
2171 			goto rollback;
2172 		}
2173 	} else {
2174 		xas_lock_irq(&xas);
2175 	}
2176 
2177 	nr = thp_nr_pages(hpage);
2178 	if (is_shmem)
2179 		__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
2180 	else
2181 		__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
2182 
2183 	if (nr_none) {
2184 		__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
2185 		/* nr_none is always 0 for non-shmem. */
2186 		__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
2187 	}
2188 
2189 	/*
2190 	 * Mark hpage as uptodate before inserting it into the page cache so
2191 	 * that it isn't mistaken for an fallocated but unwritten page.
2192 	 */
2193 	folio = page_folio(hpage);
2194 	folio_mark_uptodate(folio);
2195 	folio_ref_add(folio, HPAGE_PMD_NR - 1);
2196 
2197 	if (is_shmem)
2198 		folio_mark_dirty(folio);
2199 	folio_add_lru(folio);
2200 
2201 	/* Join all the small entries into a single multi-index entry. */
2202 	xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2203 	xas_store(&xas, hpage);
2204 	WARN_ON_ONCE(xas_error(&xas));
2205 	xas_unlock_irq(&xas);
2206 
2207 	/*
2208 	 * Remove pte page tables, so we can re-fault the page as huge.
2209 	 * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
2210 	 */
2211 	retract_page_tables(mapping, start);
2212 	if (cc && !cc->is_khugepaged)
2213 		result = SCAN_PTE_MAPPED_HUGEPAGE;
2214 	unlock_page(hpage);
2215 
2216 	/*
2217 	 * The collapse has succeeded, so free the old pages.
2218 	 */
2219 	list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2220 		list_del(&page->lru);
2221 		page->mapping = NULL;
2222 		ClearPageActive(page);
2223 		ClearPageUnevictable(page);
2224 		unlock_page(page);
2225 		folio_put_refs(page_folio(page), 3);
2226 	}
2227 
2228 	goto out;
2229 
2230 rollback:
2231 	/* Something went wrong: roll back page cache changes */
2232 	if (nr_none) {
2233 		xas_lock_irq(&xas);
2234 		mapping->nrpages -= nr_none;
2235 		shmem_uncharge(mapping->host, nr_none);
2236 		xas_unlock_irq(&xas);
2237 	}
2238 
2239 	list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2240 		list_del(&page->lru);
2241 		unlock_page(page);
2242 		putback_lru_page(page);
2243 		put_page(page);
2244 	}
2245 	/*
2246 	 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2247 	 * file only. This undo is not needed unless failure is
2248 	 * due to SCAN_COPY_MC.
2249 	 */
2250 	if (!is_shmem && result == SCAN_COPY_MC) {
2251 		filemap_nr_thps_dec(mapping);
2252 		/*
2253 		 * Paired with smp_mb() in do_dentry_open() to
2254 		 * ensure the update to nr_thps is visible.
2255 		 */
2256 		smp_mb();
2257 	}
2258 
2259 	hpage->mapping = NULL;
2260 
2261 	unlock_page(hpage);
2262 	put_page(hpage);
2263 out:
2264 	VM_BUG_ON(!list_empty(&pagelist));
2265 	trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
2266 	return result;
2267 }
2268 
2269 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2270 				    struct file *file, pgoff_t start,
2271 				    struct collapse_control *cc)
2272 {
2273 	struct page *page = NULL;
2274 	struct address_space *mapping = file->f_mapping;
2275 	XA_STATE(xas, &mapping->i_pages, start);
2276 	int present, swap;
2277 	int node = NUMA_NO_NODE;
2278 	int result = SCAN_SUCCEED;
2279 
2280 	present = 0;
2281 	swap = 0;
2282 	memset(cc->node_load, 0, sizeof(cc->node_load));
2283 	nodes_clear(cc->alloc_nmask);
2284 	rcu_read_lock();
2285 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2286 		if (xas_retry(&xas, page))
2287 			continue;
2288 
2289 		if (xa_is_value(page)) {
2290 			++swap;
2291 			if (cc->is_khugepaged &&
2292 			    swap > khugepaged_max_ptes_swap) {
2293 				result = SCAN_EXCEED_SWAP_PTE;
2294 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2295 				break;
2296 			}
2297 			continue;
2298 		}
2299 
2300 		/*
2301 		 * TODO: khugepaged should compact smaller compound pages
2302 		 * into a PMD sized page
2303 		 */
2304 		if (PageTransCompound(page)) {
2305 			struct page *head = compound_head(page);
2306 
2307 			result = compound_order(head) == HPAGE_PMD_ORDER &&
2308 					head->index == start
2309 					/* Maybe PMD-mapped */
2310 					? SCAN_PTE_MAPPED_HUGEPAGE
2311 					: SCAN_PAGE_COMPOUND;
2312 			/*
2313 			 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2314 			 * by the caller won't touch the page cache, and so
2315 			 * it's safe to skip LRU and refcount checks before
2316 			 * returning.
2317 			 */
2318 			break;
2319 		}
2320 
2321 		node = page_to_nid(page);
2322 		if (hpage_collapse_scan_abort(node, cc)) {
2323 			result = SCAN_SCAN_ABORT;
2324 			break;
2325 		}
2326 		cc->node_load[node]++;
2327 
2328 		if (!PageLRU(page)) {
2329 			result = SCAN_PAGE_LRU;
2330 			break;
2331 		}
2332 
2333 		if (page_count(page) !=
2334 		    1 + page_mapcount(page) + page_has_private(page)) {
2335 			result = SCAN_PAGE_COUNT;
2336 			break;
2337 		}
2338 
2339 		/*
2340 		 * We probably should check if the page is referenced here, but
2341 		 * nobody would transfer pte_young() to PageReferenced() for us.
2342 		 * And rmap walk here is just too costly...
2343 		 */
2344 
2345 		present++;
2346 
2347 		if (need_resched()) {
2348 			xas_pause(&xas);
2349 			cond_resched_rcu();
2350 		}
2351 	}
2352 	rcu_read_unlock();
2353 
2354 	if (result == SCAN_SUCCEED) {
2355 		if (cc->is_khugepaged &&
2356 		    present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2357 			result = SCAN_EXCEED_NONE_PTE;
2358 			count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2359 		} else {
2360 			result = collapse_file(mm, addr, file, start, cc);
2361 		}
2362 	}
2363 
2364 	trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
2365 	return result;
2366 }
2367 #else
2368 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2369 				    struct file *file, pgoff_t start,
2370 				    struct collapse_control *cc)
2371 {
2372 	BUILD_BUG();
2373 }
2374 
2375 static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
2376 {
2377 }
2378 
2379 static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
2380 					  unsigned long addr)
2381 {
2382 	return false;
2383 }
2384 #endif
2385 
2386 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2387 					    struct collapse_control *cc)
2388 	__releases(&khugepaged_mm_lock)
2389 	__acquires(&khugepaged_mm_lock)
2390 {
2391 	struct vma_iterator vmi;
2392 	struct khugepaged_mm_slot *mm_slot;
2393 	struct mm_slot *slot;
2394 	struct mm_struct *mm;
2395 	struct vm_area_struct *vma;
2396 	int progress = 0;
2397 
2398 	VM_BUG_ON(!pages);
2399 	lockdep_assert_held(&khugepaged_mm_lock);
2400 	*result = SCAN_FAIL;
2401 
2402 	if (khugepaged_scan.mm_slot) {
2403 		mm_slot = khugepaged_scan.mm_slot;
2404 		slot = &mm_slot->slot;
2405 	} else {
2406 		slot = list_entry(khugepaged_scan.mm_head.next,
2407 				     struct mm_slot, mm_node);
2408 		mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2409 		khugepaged_scan.address = 0;
2410 		khugepaged_scan.mm_slot = mm_slot;
2411 	}
2412 	spin_unlock(&khugepaged_mm_lock);
2413 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2414 
2415 	mm = slot->mm;
2416 	/*
2417 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
2418 	 * the next mm on the list.
2419 	 */
2420 	vma = NULL;
2421 	if (unlikely(!mmap_read_trylock(mm)))
2422 		goto breakouterloop_mmap_lock;
2423 
2424 	progress++;
2425 	if (unlikely(hpage_collapse_test_exit(mm)))
2426 		goto breakouterloop;
2427 
2428 	vma_iter_init(&vmi, mm, khugepaged_scan.address);
2429 	for_each_vma(vmi, vma) {
2430 		unsigned long hstart, hend;
2431 
2432 		cond_resched();
2433 		if (unlikely(hpage_collapse_test_exit(mm))) {
2434 			progress++;
2435 			break;
2436 		}
2437 		if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
2438 skip:
2439 			progress++;
2440 			continue;
2441 		}
2442 		hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2443 		hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2444 		if (khugepaged_scan.address > hend)
2445 			goto skip;
2446 		if (khugepaged_scan.address < hstart)
2447 			khugepaged_scan.address = hstart;
2448 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2449 
2450 		while (khugepaged_scan.address < hend) {
2451 			bool mmap_locked = true;
2452 
2453 			cond_resched();
2454 			if (unlikely(hpage_collapse_test_exit(mm)))
2455 				goto breakouterloop;
2456 
2457 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2458 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2459 				  hend);
2460 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2461 				struct file *file = get_file(vma->vm_file);
2462 				pgoff_t pgoff = linear_page_index(vma,
2463 						khugepaged_scan.address);
2464 
2465 				mmap_read_unlock(mm);
2466 				*result = hpage_collapse_scan_file(mm,
2467 								   khugepaged_scan.address,
2468 								   file, pgoff, cc);
2469 				mmap_locked = false;
2470 				fput(file);
2471 			} else {
2472 				*result = hpage_collapse_scan_pmd(mm, vma,
2473 								  khugepaged_scan.address,
2474 								  &mmap_locked,
2475 								  cc);
2476 			}
2477 			switch (*result) {
2478 			case SCAN_PTE_MAPPED_HUGEPAGE: {
2479 				pmd_t *pmd;
2480 
2481 				*result = find_pmd_or_thp_or_none(mm,
2482 								  khugepaged_scan.address,
2483 								  &pmd);
2484 				if (*result != SCAN_SUCCEED)
2485 					break;
2486 				if (!khugepaged_add_pte_mapped_thp(mm,
2487 								   khugepaged_scan.address))
2488 					break;
2489 			} fallthrough;
2490 			case SCAN_SUCCEED:
2491 				++khugepaged_pages_collapsed;
2492 				break;
2493 			default:
2494 				break;
2495 			}
2496 
2497 			/* move to next address */
2498 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2499 			progress += HPAGE_PMD_NR;
2500 			if (!mmap_locked)
2501 				/*
2502 				 * We released mmap_lock so break loop.  Note
2503 				 * that we drop mmap_lock before all hugepage
2504 				 * allocations, so if allocation fails, we are
2505 				 * guaranteed to break here and report the
2506 				 * correct result back to caller.
2507 				 */
2508 				goto breakouterloop_mmap_lock;
2509 			if (progress >= pages)
2510 				goto breakouterloop;
2511 		}
2512 	}
2513 breakouterloop:
2514 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2515 breakouterloop_mmap_lock:
2516 
2517 	spin_lock(&khugepaged_mm_lock);
2518 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2519 	/*
2520 	 * Release the current mm_slot if this mm is about to die, or
2521 	 * if we scanned all vmas of this mm.
2522 	 */
2523 	if (hpage_collapse_test_exit(mm) || !vma) {
2524 		/*
2525 		 * Make sure that if mm_users is reaching zero while
2526 		 * khugepaged runs here, khugepaged_exit will find
2527 		 * mm_slot not pointing to the exiting mm.
2528 		 */
2529 		if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2530 			slot = list_entry(slot->mm_node.next,
2531 					  struct mm_slot, mm_node);
2532 			khugepaged_scan.mm_slot =
2533 				mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2534 			khugepaged_scan.address = 0;
2535 		} else {
2536 			khugepaged_scan.mm_slot = NULL;
2537 			khugepaged_full_scans++;
2538 		}
2539 
2540 		collect_mm_slot(mm_slot);
2541 	}
2542 
2543 	return progress;
2544 }
2545 
2546 static int khugepaged_has_work(void)
2547 {
2548 	return !list_empty(&khugepaged_scan.mm_head) &&
2549 		hugepage_flags_enabled();
2550 }
2551 
2552 static int khugepaged_wait_event(void)
2553 {
2554 	return !list_empty(&khugepaged_scan.mm_head) ||
2555 		kthread_should_stop();
2556 }
2557 
2558 static void khugepaged_do_scan(struct collapse_control *cc)
2559 {
2560 	unsigned int progress = 0, pass_through_head = 0;
2561 	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2562 	bool wait = true;
2563 	int result = SCAN_SUCCEED;
2564 
2565 	lru_add_drain_all();
2566 
2567 	while (true) {
2568 		cond_resched();
2569 
2570 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2571 			break;
2572 
2573 		spin_lock(&khugepaged_mm_lock);
2574 		if (!khugepaged_scan.mm_slot)
2575 			pass_through_head++;
2576 		if (khugepaged_has_work() &&
2577 		    pass_through_head < 2)
2578 			progress += khugepaged_scan_mm_slot(pages - progress,
2579 							    &result, cc);
2580 		else
2581 			progress = pages;
2582 		spin_unlock(&khugepaged_mm_lock);
2583 
2584 		if (progress >= pages)
2585 			break;
2586 
2587 		if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2588 			/*
2589 			 * If fail to allocate the first time, try to sleep for
2590 			 * a while.  When hit again, cancel the scan.
2591 			 */
2592 			if (!wait)
2593 				break;
2594 			wait = false;
2595 			khugepaged_alloc_sleep();
2596 		}
2597 	}
2598 }
2599 
2600 static bool khugepaged_should_wakeup(void)
2601 {
2602 	return kthread_should_stop() ||
2603 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2604 }
2605 
2606 static void khugepaged_wait_work(void)
2607 {
2608 	if (khugepaged_has_work()) {
2609 		const unsigned long scan_sleep_jiffies =
2610 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2611 
2612 		if (!scan_sleep_jiffies)
2613 			return;
2614 
2615 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2616 		wait_event_freezable_timeout(khugepaged_wait,
2617 					     khugepaged_should_wakeup(),
2618 					     scan_sleep_jiffies);
2619 		return;
2620 	}
2621 
2622 	if (hugepage_flags_enabled())
2623 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2624 }
2625 
2626 static int khugepaged(void *none)
2627 {
2628 	struct khugepaged_mm_slot *mm_slot;
2629 
2630 	set_freezable();
2631 	set_user_nice(current, MAX_NICE);
2632 
2633 	while (!kthread_should_stop()) {
2634 		khugepaged_do_scan(&khugepaged_collapse_control);
2635 		khugepaged_wait_work();
2636 	}
2637 
2638 	spin_lock(&khugepaged_mm_lock);
2639 	mm_slot = khugepaged_scan.mm_slot;
2640 	khugepaged_scan.mm_slot = NULL;
2641 	if (mm_slot)
2642 		collect_mm_slot(mm_slot);
2643 	spin_unlock(&khugepaged_mm_lock);
2644 	return 0;
2645 }
2646 
2647 static void set_recommended_min_free_kbytes(void)
2648 {
2649 	struct zone *zone;
2650 	int nr_zones = 0;
2651 	unsigned long recommended_min;
2652 
2653 	if (!hugepage_flags_enabled()) {
2654 		calculate_min_free_kbytes();
2655 		goto update_wmarks;
2656 	}
2657 
2658 	for_each_populated_zone(zone) {
2659 		/*
2660 		 * We don't need to worry about fragmentation of
2661 		 * ZONE_MOVABLE since it only has movable pages.
2662 		 */
2663 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2664 			continue;
2665 
2666 		nr_zones++;
2667 	}
2668 
2669 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2670 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2671 
2672 	/*
2673 	 * Make sure that on average at least two pageblocks are almost free
2674 	 * of another type, one for a migratetype to fall back to and a
2675 	 * second to avoid subsequent fallbacks of other types There are 3
2676 	 * MIGRATE_TYPES we care about.
2677 	 */
2678 	recommended_min += pageblock_nr_pages * nr_zones *
2679 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2680 
2681 	/* don't ever allow to reserve more than 5% of the lowmem */
2682 	recommended_min = min(recommended_min,
2683 			      (unsigned long) nr_free_buffer_pages() / 20);
2684 	recommended_min <<= (PAGE_SHIFT-10);
2685 
2686 	if (recommended_min > min_free_kbytes) {
2687 		if (user_min_free_kbytes >= 0)
2688 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2689 				min_free_kbytes, recommended_min);
2690 
2691 		min_free_kbytes = recommended_min;
2692 	}
2693 
2694 update_wmarks:
2695 	setup_per_zone_wmarks();
2696 }
2697 
2698 int start_stop_khugepaged(void)
2699 {
2700 	int err = 0;
2701 
2702 	mutex_lock(&khugepaged_mutex);
2703 	if (hugepage_flags_enabled()) {
2704 		if (!khugepaged_thread)
2705 			khugepaged_thread = kthread_run(khugepaged, NULL,
2706 							"khugepaged");
2707 		if (IS_ERR(khugepaged_thread)) {
2708 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2709 			err = PTR_ERR(khugepaged_thread);
2710 			khugepaged_thread = NULL;
2711 			goto fail;
2712 		}
2713 
2714 		if (!list_empty(&khugepaged_scan.mm_head))
2715 			wake_up_interruptible(&khugepaged_wait);
2716 	} else if (khugepaged_thread) {
2717 		kthread_stop(khugepaged_thread);
2718 		khugepaged_thread = NULL;
2719 	}
2720 	set_recommended_min_free_kbytes();
2721 fail:
2722 	mutex_unlock(&khugepaged_mutex);
2723 	return err;
2724 }
2725 
2726 void khugepaged_min_free_kbytes_update(void)
2727 {
2728 	mutex_lock(&khugepaged_mutex);
2729 	if (hugepage_flags_enabled() && khugepaged_thread)
2730 		set_recommended_min_free_kbytes();
2731 	mutex_unlock(&khugepaged_mutex);
2732 }
2733 
2734 bool current_is_khugepaged(void)
2735 {
2736 	return kthread_func(current) == khugepaged;
2737 }
2738 
2739 static int madvise_collapse_errno(enum scan_result r)
2740 {
2741 	/*
2742 	 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2743 	 * actionable feedback to caller, so they may take an appropriate
2744 	 * fallback measure depending on the nature of the failure.
2745 	 */
2746 	switch (r) {
2747 	case SCAN_ALLOC_HUGE_PAGE_FAIL:
2748 		return -ENOMEM;
2749 	case SCAN_CGROUP_CHARGE_FAIL:
2750 	case SCAN_EXCEED_NONE_PTE:
2751 		return -EBUSY;
2752 	/* Resource temporary unavailable - trying again might succeed */
2753 	case SCAN_PAGE_COUNT:
2754 	case SCAN_PAGE_LOCK:
2755 	case SCAN_PAGE_LRU:
2756 	case SCAN_DEL_PAGE_LRU:
2757 	case SCAN_PAGE_FILLED:
2758 		return -EAGAIN;
2759 	/*
2760 	 * Other: Trying again likely not to succeed / error intrinsic to
2761 	 * specified memory range. khugepaged likely won't be able to collapse
2762 	 * either.
2763 	 */
2764 	default:
2765 		return -EINVAL;
2766 	}
2767 }
2768 
2769 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2770 		     unsigned long start, unsigned long end)
2771 {
2772 	struct collapse_control *cc;
2773 	struct mm_struct *mm = vma->vm_mm;
2774 	unsigned long hstart, hend, addr;
2775 	int thps = 0, last_fail = SCAN_FAIL;
2776 	bool mmap_locked = true;
2777 
2778 	BUG_ON(vma->vm_start > start);
2779 	BUG_ON(vma->vm_end < end);
2780 
2781 	*prev = vma;
2782 
2783 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
2784 		return -EINVAL;
2785 
2786 	cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2787 	if (!cc)
2788 		return -ENOMEM;
2789 	cc->is_khugepaged = false;
2790 
2791 	mmgrab(mm);
2792 	lru_add_drain_all();
2793 
2794 	hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2795 	hend = end & HPAGE_PMD_MASK;
2796 
2797 	for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2798 		int result = SCAN_FAIL;
2799 
2800 		if (!mmap_locked) {
2801 			cond_resched();
2802 			mmap_read_lock(mm);
2803 			mmap_locked = true;
2804 			result = hugepage_vma_revalidate(mm, addr, false, &vma,
2805 							 cc);
2806 			if (result  != SCAN_SUCCEED) {
2807 				last_fail = result;
2808 				goto out_nolock;
2809 			}
2810 
2811 			hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2812 		}
2813 		mmap_assert_locked(mm);
2814 		memset(cc->node_load, 0, sizeof(cc->node_load));
2815 		nodes_clear(cc->alloc_nmask);
2816 		if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2817 			struct file *file = get_file(vma->vm_file);
2818 			pgoff_t pgoff = linear_page_index(vma, addr);
2819 
2820 			mmap_read_unlock(mm);
2821 			mmap_locked = false;
2822 			result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2823 							  cc);
2824 			fput(file);
2825 		} else {
2826 			result = hpage_collapse_scan_pmd(mm, vma, addr,
2827 							 &mmap_locked, cc);
2828 		}
2829 		if (!mmap_locked)
2830 			*prev = NULL;  /* Tell caller we dropped mmap_lock */
2831 
2832 handle_result:
2833 		switch (result) {
2834 		case SCAN_SUCCEED:
2835 		case SCAN_PMD_MAPPED:
2836 			++thps;
2837 			break;
2838 		case SCAN_PTE_MAPPED_HUGEPAGE:
2839 			BUG_ON(mmap_locked);
2840 			BUG_ON(*prev);
2841 			mmap_read_lock(mm);
2842 			result = collapse_pte_mapped_thp(mm, addr, true);
2843 			mmap_read_unlock(mm);
2844 			goto handle_result;
2845 		/* Whitelisted set of results where continuing OK */
2846 		case SCAN_PMD_NULL:
2847 		case SCAN_PTE_NON_PRESENT:
2848 		case SCAN_PTE_UFFD_WP:
2849 		case SCAN_PAGE_RO:
2850 		case SCAN_LACK_REFERENCED_PAGE:
2851 		case SCAN_PAGE_NULL:
2852 		case SCAN_PAGE_COUNT:
2853 		case SCAN_PAGE_LOCK:
2854 		case SCAN_PAGE_COMPOUND:
2855 		case SCAN_PAGE_LRU:
2856 		case SCAN_DEL_PAGE_LRU:
2857 			last_fail = result;
2858 			break;
2859 		default:
2860 			last_fail = result;
2861 			/* Other error, exit */
2862 			goto out_maybelock;
2863 		}
2864 	}
2865 
2866 out_maybelock:
2867 	/* Caller expects us to hold mmap_lock on return */
2868 	if (!mmap_locked)
2869 		mmap_read_lock(mm);
2870 out_nolock:
2871 	mmap_assert_locked(mm);
2872 	mmdrop(mm);
2873 	kfree(cc);
2874 
2875 	return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2876 			: madvise_collapse_errno(last_fail);
2877 }
2878