xref: /openbmc/linux/mm/khugepaged.c (revision b5f184fb)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
21 
22 #include <asm/tlb.h>
23 #include <asm/pgalloc.h>
24 #include "internal.h"
25 
26 enum scan_result {
27 	SCAN_FAIL,
28 	SCAN_SUCCEED,
29 	SCAN_PMD_NULL,
30 	SCAN_EXCEED_NONE_PTE,
31 	SCAN_EXCEED_SWAP_PTE,
32 	SCAN_EXCEED_SHARED_PTE,
33 	SCAN_PTE_NON_PRESENT,
34 	SCAN_PTE_UFFD_WP,
35 	SCAN_PAGE_RO,
36 	SCAN_LACK_REFERENCED_PAGE,
37 	SCAN_PAGE_NULL,
38 	SCAN_SCAN_ABORT,
39 	SCAN_PAGE_COUNT,
40 	SCAN_PAGE_LRU,
41 	SCAN_PAGE_LOCK,
42 	SCAN_PAGE_ANON,
43 	SCAN_PAGE_COMPOUND,
44 	SCAN_ANY_PROCESS,
45 	SCAN_VMA_NULL,
46 	SCAN_VMA_CHECK,
47 	SCAN_ADDRESS_RANGE,
48 	SCAN_SWAP_CACHE_PAGE,
49 	SCAN_DEL_PAGE_LRU,
50 	SCAN_ALLOC_HUGE_PAGE_FAIL,
51 	SCAN_CGROUP_CHARGE_FAIL,
52 	SCAN_TRUNCATED,
53 	SCAN_PAGE_HAS_PRIVATE,
54 };
55 
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/huge_memory.h>
58 
59 static struct task_struct *khugepaged_thread __read_mostly;
60 static DEFINE_MUTEX(khugepaged_mutex);
61 
62 /* default scan 8*512 pte (or vmas) every 30 second */
63 static unsigned int khugepaged_pages_to_scan __read_mostly;
64 static unsigned int khugepaged_pages_collapsed;
65 static unsigned int khugepaged_full_scans;
66 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67 /* during fragmentation poll the hugepage allocator once every minute */
68 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69 static unsigned long khugepaged_sleep_expire;
70 static DEFINE_SPINLOCK(khugepaged_mm_lock);
71 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72 /*
73  * default collapse hugepages if there is at least one pte mapped like
74  * it would have happened if the vma was large enough during page
75  * fault.
76  */
77 static unsigned int khugepaged_max_ptes_none __read_mostly;
78 static unsigned int khugepaged_max_ptes_swap __read_mostly;
79 static unsigned int khugepaged_max_ptes_shared __read_mostly;
80 
81 #define MM_SLOTS_HASH_BITS 10
82 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83 
84 static struct kmem_cache *mm_slot_cache __read_mostly;
85 
86 #define MAX_PTE_MAPPED_THP 8
87 
88 /**
89  * struct mm_slot - hash lookup from mm to mm_slot
90  * @hash: hash collision list
91  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92  * @mm: the mm that this information is valid for
93  * @nr_pte_mapped_thp: number of pte mapped THP
94  * @pte_mapped_thp: address array corresponding pte mapped THP
95  */
96 struct mm_slot {
97 	struct hlist_node hash;
98 	struct list_head mm_node;
99 	struct mm_struct *mm;
100 
101 	/* pte-mapped THP in this mm */
102 	int nr_pte_mapped_thp;
103 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
104 };
105 
106 /**
107  * struct khugepaged_scan - cursor for scanning
108  * @mm_head: the head of the mm list to scan
109  * @mm_slot: the current mm_slot we are scanning
110  * @address: the next address inside that to be scanned
111  *
112  * There is only the one khugepaged_scan instance of this cursor structure.
113  */
114 struct khugepaged_scan {
115 	struct list_head mm_head;
116 	struct mm_slot *mm_slot;
117 	unsigned long address;
118 };
119 
120 static struct khugepaged_scan khugepaged_scan = {
121 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122 };
123 
124 #ifdef CONFIG_SYSFS
125 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126 					 struct kobj_attribute *attr,
127 					 char *buf)
128 {
129 	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
130 }
131 
132 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133 					  struct kobj_attribute *attr,
134 					  const char *buf, size_t count)
135 {
136 	unsigned int msecs;
137 	int err;
138 
139 	err = kstrtouint(buf, 10, &msecs);
140 	if (err)
141 		return -EINVAL;
142 
143 	khugepaged_scan_sleep_millisecs = msecs;
144 	khugepaged_sleep_expire = 0;
145 	wake_up_interruptible(&khugepaged_wait);
146 
147 	return count;
148 }
149 static struct kobj_attribute scan_sleep_millisecs_attr =
150 	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
151 	       scan_sleep_millisecs_store);
152 
153 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
154 					  struct kobj_attribute *attr,
155 					  char *buf)
156 {
157 	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
158 }
159 
160 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
161 					   struct kobj_attribute *attr,
162 					   const char *buf, size_t count)
163 {
164 	unsigned int msecs;
165 	int err;
166 
167 	err = kstrtouint(buf, 10, &msecs);
168 	if (err)
169 		return -EINVAL;
170 
171 	khugepaged_alloc_sleep_millisecs = msecs;
172 	khugepaged_sleep_expire = 0;
173 	wake_up_interruptible(&khugepaged_wait);
174 
175 	return count;
176 }
177 static struct kobj_attribute alloc_sleep_millisecs_attr =
178 	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
179 	       alloc_sleep_millisecs_store);
180 
181 static ssize_t pages_to_scan_show(struct kobject *kobj,
182 				  struct kobj_attribute *attr,
183 				  char *buf)
184 {
185 	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
186 }
187 static ssize_t pages_to_scan_store(struct kobject *kobj,
188 				   struct kobj_attribute *attr,
189 				   const char *buf, size_t count)
190 {
191 	unsigned int pages;
192 	int err;
193 
194 	err = kstrtouint(buf, 10, &pages);
195 	if (err || !pages)
196 		return -EINVAL;
197 
198 	khugepaged_pages_to_scan = pages;
199 
200 	return count;
201 }
202 static struct kobj_attribute pages_to_scan_attr =
203 	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
204 	       pages_to_scan_store);
205 
206 static ssize_t pages_collapsed_show(struct kobject *kobj,
207 				    struct kobj_attribute *attr,
208 				    char *buf)
209 {
210 	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
211 }
212 static struct kobj_attribute pages_collapsed_attr =
213 	__ATTR_RO(pages_collapsed);
214 
215 static ssize_t full_scans_show(struct kobject *kobj,
216 			       struct kobj_attribute *attr,
217 			       char *buf)
218 {
219 	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
220 }
221 static struct kobj_attribute full_scans_attr =
222 	__ATTR_RO(full_scans);
223 
224 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
225 				      struct kobj_attribute *attr, char *buf)
226 {
227 	return single_hugepage_flag_show(kobj, attr, buf,
228 					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
229 }
230 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
231 				       struct kobj_attribute *attr,
232 				       const char *buf, size_t count)
233 {
234 	return single_hugepage_flag_store(kobj, attr, buf, count,
235 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236 }
237 static struct kobj_attribute khugepaged_defrag_attr =
238 	__ATTR(defrag, 0644, khugepaged_defrag_show,
239 	       khugepaged_defrag_store);
240 
241 /*
242  * max_ptes_none controls if khugepaged should collapse hugepages over
243  * any unmapped ptes in turn potentially increasing the memory
244  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
245  * reduce the available free memory in the system as it
246  * runs. Increasing max_ptes_none will instead potentially reduce the
247  * free memory in the system during the khugepaged scan.
248  */
249 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
250 					     struct kobj_attribute *attr,
251 					     char *buf)
252 {
253 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
254 }
255 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
256 					      struct kobj_attribute *attr,
257 					      const char *buf, size_t count)
258 {
259 	int err;
260 	unsigned long max_ptes_none;
261 
262 	err = kstrtoul(buf, 10, &max_ptes_none);
263 	if (err || max_ptes_none > HPAGE_PMD_NR-1)
264 		return -EINVAL;
265 
266 	khugepaged_max_ptes_none = max_ptes_none;
267 
268 	return count;
269 }
270 static struct kobj_attribute khugepaged_max_ptes_none_attr =
271 	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
272 	       khugepaged_max_ptes_none_store);
273 
274 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
275 					     struct kobj_attribute *attr,
276 					     char *buf)
277 {
278 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
279 }
280 
281 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
282 					      struct kobj_attribute *attr,
283 					      const char *buf, size_t count)
284 {
285 	int err;
286 	unsigned long max_ptes_swap;
287 
288 	err  = kstrtoul(buf, 10, &max_ptes_swap);
289 	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
290 		return -EINVAL;
291 
292 	khugepaged_max_ptes_swap = max_ptes_swap;
293 
294 	return count;
295 }
296 
297 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
298 	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
299 	       khugepaged_max_ptes_swap_store);
300 
301 static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
302 					       struct kobj_attribute *attr,
303 					       char *buf)
304 {
305 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
306 }
307 
308 static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
309 					      struct kobj_attribute *attr,
310 					      const char *buf, size_t count)
311 {
312 	int err;
313 	unsigned long max_ptes_shared;
314 
315 	err  = kstrtoul(buf, 10, &max_ptes_shared);
316 	if (err || max_ptes_shared > HPAGE_PMD_NR-1)
317 		return -EINVAL;
318 
319 	khugepaged_max_ptes_shared = max_ptes_shared;
320 
321 	return count;
322 }
323 
324 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
325 	__ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
326 	       khugepaged_max_ptes_shared_store);
327 
328 static struct attribute *khugepaged_attr[] = {
329 	&khugepaged_defrag_attr.attr,
330 	&khugepaged_max_ptes_none_attr.attr,
331 	&khugepaged_max_ptes_swap_attr.attr,
332 	&khugepaged_max_ptes_shared_attr.attr,
333 	&pages_to_scan_attr.attr,
334 	&pages_collapsed_attr.attr,
335 	&full_scans_attr.attr,
336 	&scan_sleep_millisecs_attr.attr,
337 	&alloc_sleep_millisecs_attr.attr,
338 	NULL,
339 };
340 
341 struct attribute_group khugepaged_attr_group = {
342 	.attrs = khugepaged_attr,
343 	.name = "khugepaged",
344 };
345 #endif /* CONFIG_SYSFS */
346 
347 int hugepage_madvise(struct vm_area_struct *vma,
348 		     unsigned long *vm_flags, int advice)
349 {
350 	switch (advice) {
351 	case MADV_HUGEPAGE:
352 #ifdef CONFIG_S390
353 		/*
354 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
355 		 * can't handle this properly after s390_enable_sie, so we simply
356 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
357 		 */
358 		if (mm_has_pgste(vma->vm_mm))
359 			return 0;
360 #endif
361 		*vm_flags &= ~VM_NOHUGEPAGE;
362 		*vm_flags |= VM_HUGEPAGE;
363 		/*
364 		 * If the vma become good for khugepaged to scan,
365 		 * register it here without waiting a page fault that
366 		 * may not happen any time soon.
367 		 */
368 		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
369 				khugepaged_enter_vma_merge(vma, *vm_flags))
370 			return -ENOMEM;
371 		break;
372 	case MADV_NOHUGEPAGE:
373 		*vm_flags &= ~VM_HUGEPAGE;
374 		*vm_flags |= VM_NOHUGEPAGE;
375 		/*
376 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 		 * this vma even if we leave the mm registered in khugepaged if
378 		 * it got registered before VM_NOHUGEPAGE was set.
379 		 */
380 		break;
381 	}
382 
383 	return 0;
384 }
385 
386 int __init khugepaged_init(void)
387 {
388 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389 					  sizeof(struct mm_slot),
390 					  __alignof__(struct mm_slot), 0, NULL);
391 	if (!mm_slot_cache)
392 		return -ENOMEM;
393 
394 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
395 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
396 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
397 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
398 
399 	return 0;
400 }
401 
402 void __init khugepaged_destroy(void)
403 {
404 	kmem_cache_destroy(mm_slot_cache);
405 }
406 
407 static inline struct mm_slot *alloc_mm_slot(void)
408 {
409 	if (!mm_slot_cache)	/* initialization failed */
410 		return NULL;
411 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
412 }
413 
414 static inline void free_mm_slot(struct mm_slot *mm_slot)
415 {
416 	kmem_cache_free(mm_slot_cache, mm_slot);
417 }
418 
419 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
420 {
421 	struct mm_slot *mm_slot;
422 
423 	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
424 		if (mm == mm_slot->mm)
425 			return mm_slot;
426 
427 	return NULL;
428 }
429 
430 static void insert_to_mm_slots_hash(struct mm_struct *mm,
431 				    struct mm_slot *mm_slot)
432 {
433 	mm_slot->mm = mm;
434 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
435 }
436 
437 static inline int khugepaged_test_exit(struct mm_struct *mm)
438 {
439 	return atomic_read(&mm->mm_users) == 0;
440 }
441 
442 static bool hugepage_vma_check(struct vm_area_struct *vma,
443 			       unsigned long vm_flags)
444 {
445 	if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
446 	    (vm_flags & VM_NOHUGEPAGE) ||
447 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
448 		return false;
449 
450 	if (shmem_file(vma->vm_file) ||
451 	    (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
452 	     vma->vm_file &&
453 	     (vm_flags & VM_DENYWRITE))) {
454 		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
455 				HPAGE_PMD_NR);
456 	}
457 	if (!vma->anon_vma || vma->vm_ops)
458 		return false;
459 	if (vma_is_temporary_stack(vma))
460 		return false;
461 	return !(vm_flags & VM_NO_KHUGEPAGED);
462 }
463 
464 int __khugepaged_enter(struct mm_struct *mm)
465 {
466 	struct mm_slot *mm_slot;
467 	int wakeup;
468 
469 	mm_slot = alloc_mm_slot();
470 	if (!mm_slot)
471 		return -ENOMEM;
472 
473 	/* __khugepaged_exit() must not run from under us */
474 	VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
475 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
476 		free_mm_slot(mm_slot);
477 		return 0;
478 	}
479 
480 	spin_lock(&khugepaged_mm_lock);
481 	insert_to_mm_slots_hash(mm, mm_slot);
482 	/*
483 	 * Insert just behind the scanning cursor, to let the area settle
484 	 * down a little.
485 	 */
486 	wakeup = list_empty(&khugepaged_scan.mm_head);
487 	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
488 	spin_unlock(&khugepaged_mm_lock);
489 
490 	mmgrab(mm);
491 	if (wakeup)
492 		wake_up_interruptible(&khugepaged_wait);
493 
494 	return 0;
495 }
496 
497 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
498 			       unsigned long vm_flags)
499 {
500 	unsigned long hstart, hend;
501 
502 	/*
503 	 * khugepaged only supports read-only files for non-shmem files.
504 	 * khugepaged does not yet work on special mappings. And
505 	 * file-private shmem THP is not supported.
506 	 */
507 	if (!hugepage_vma_check(vma, vm_flags))
508 		return 0;
509 
510 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
511 	hend = vma->vm_end & HPAGE_PMD_MASK;
512 	if (hstart < hend)
513 		return khugepaged_enter(vma, vm_flags);
514 	return 0;
515 }
516 
517 void __khugepaged_exit(struct mm_struct *mm)
518 {
519 	struct mm_slot *mm_slot;
520 	int free = 0;
521 
522 	spin_lock(&khugepaged_mm_lock);
523 	mm_slot = get_mm_slot(mm);
524 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
525 		hash_del(&mm_slot->hash);
526 		list_del(&mm_slot->mm_node);
527 		free = 1;
528 	}
529 	spin_unlock(&khugepaged_mm_lock);
530 
531 	if (free) {
532 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
533 		free_mm_slot(mm_slot);
534 		mmdrop(mm);
535 	} else if (mm_slot) {
536 		/*
537 		 * This is required to serialize against
538 		 * khugepaged_test_exit() (which is guaranteed to run
539 		 * under mmap sem read mode). Stop here (after we
540 		 * return all pagetables will be destroyed) until
541 		 * khugepaged has finished working on the pagetables
542 		 * under the mmap_lock.
543 		 */
544 		mmap_write_lock(mm);
545 		mmap_write_unlock(mm);
546 	}
547 }
548 
549 static void release_pte_page(struct page *page)
550 {
551 	mod_node_page_state(page_pgdat(page),
552 			NR_ISOLATED_ANON + page_is_file_lru(page),
553 			-compound_nr(page));
554 	unlock_page(page);
555 	putback_lru_page(page);
556 }
557 
558 static void release_pte_pages(pte_t *pte, pte_t *_pte,
559 		struct list_head *compound_pagelist)
560 {
561 	struct page *page, *tmp;
562 
563 	while (--_pte >= pte) {
564 		pte_t pteval = *_pte;
565 
566 		page = pte_page(pteval);
567 		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
568 				!PageCompound(page))
569 			release_pte_page(page);
570 	}
571 
572 	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
573 		list_del(&page->lru);
574 		release_pte_page(page);
575 	}
576 }
577 
578 static bool is_refcount_suitable(struct page *page)
579 {
580 	int expected_refcount;
581 
582 	expected_refcount = total_mapcount(page);
583 	if (PageSwapCache(page))
584 		expected_refcount += compound_nr(page);
585 
586 	return page_count(page) == expected_refcount;
587 }
588 
589 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
590 					unsigned long address,
591 					pte_t *pte,
592 					struct list_head *compound_pagelist)
593 {
594 	struct page *page = NULL;
595 	pte_t *_pte;
596 	int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
597 	bool writable = false;
598 
599 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
600 	     _pte++, address += PAGE_SIZE) {
601 		pte_t pteval = *_pte;
602 		if (pte_none(pteval) || (pte_present(pteval) &&
603 				is_zero_pfn(pte_pfn(pteval)))) {
604 			if (!userfaultfd_armed(vma) &&
605 			    ++none_or_zero <= khugepaged_max_ptes_none) {
606 				continue;
607 			} else {
608 				result = SCAN_EXCEED_NONE_PTE;
609 				goto out;
610 			}
611 		}
612 		if (!pte_present(pteval)) {
613 			result = SCAN_PTE_NON_PRESENT;
614 			goto out;
615 		}
616 		page = vm_normal_page(vma, address, pteval);
617 		if (unlikely(!page)) {
618 			result = SCAN_PAGE_NULL;
619 			goto out;
620 		}
621 
622 		VM_BUG_ON_PAGE(!PageAnon(page), page);
623 
624 		if (page_mapcount(page) > 1 &&
625 				++shared > khugepaged_max_ptes_shared) {
626 			result = SCAN_EXCEED_SHARED_PTE;
627 			goto out;
628 		}
629 
630 		if (PageCompound(page)) {
631 			struct page *p;
632 			page = compound_head(page);
633 
634 			/*
635 			 * Check if we have dealt with the compound page
636 			 * already
637 			 */
638 			list_for_each_entry(p, compound_pagelist, lru) {
639 				if (page == p)
640 					goto next;
641 			}
642 		}
643 
644 		/*
645 		 * We can do it before isolate_lru_page because the
646 		 * page can't be freed from under us. NOTE: PG_lock
647 		 * is needed to serialize against split_huge_page
648 		 * when invoked from the VM.
649 		 */
650 		if (!trylock_page(page)) {
651 			result = SCAN_PAGE_LOCK;
652 			goto out;
653 		}
654 
655 		/*
656 		 * Check if the page has any GUP (or other external) pins.
657 		 *
658 		 * The page table that maps the page has been already unlinked
659 		 * from the page table tree and this process cannot get
660 		 * an additinal pin on the page.
661 		 *
662 		 * New pins can come later if the page is shared across fork,
663 		 * but not from this process. The other process cannot write to
664 		 * the page, only trigger CoW.
665 		 */
666 		if (!is_refcount_suitable(page)) {
667 			unlock_page(page);
668 			result = SCAN_PAGE_COUNT;
669 			goto out;
670 		}
671 		if (!pte_write(pteval) && PageSwapCache(page) &&
672 				!reuse_swap_page(page, NULL)) {
673 			/*
674 			 * Page is in the swap cache and cannot be re-used.
675 			 * It cannot be collapsed into a THP.
676 			 */
677 			unlock_page(page);
678 			result = SCAN_SWAP_CACHE_PAGE;
679 			goto out;
680 		}
681 
682 		/*
683 		 * Isolate the page to avoid collapsing an hugepage
684 		 * currently in use by the VM.
685 		 */
686 		if (isolate_lru_page(page)) {
687 			unlock_page(page);
688 			result = SCAN_DEL_PAGE_LRU;
689 			goto out;
690 		}
691 		mod_node_page_state(page_pgdat(page),
692 				NR_ISOLATED_ANON + page_is_file_lru(page),
693 				compound_nr(page));
694 		VM_BUG_ON_PAGE(!PageLocked(page), page);
695 		VM_BUG_ON_PAGE(PageLRU(page), page);
696 
697 		if (PageCompound(page))
698 			list_add_tail(&page->lru, compound_pagelist);
699 next:
700 		/* There should be enough young pte to collapse the page */
701 		if (pte_young(pteval) ||
702 		    page_is_young(page) || PageReferenced(page) ||
703 		    mmu_notifier_test_young(vma->vm_mm, address))
704 			referenced++;
705 
706 		if (pte_write(pteval))
707 			writable = true;
708 	}
709 	if (likely(writable)) {
710 		if (likely(referenced)) {
711 			result = SCAN_SUCCEED;
712 			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
713 							    referenced, writable, result);
714 			return 1;
715 		}
716 	} else {
717 		result = SCAN_PAGE_RO;
718 	}
719 
720 out:
721 	release_pte_pages(pte, _pte, compound_pagelist);
722 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
723 					    referenced, writable, result);
724 	return 0;
725 }
726 
727 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
728 				      struct vm_area_struct *vma,
729 				      unsigned long address,
730 				      spinlock_t *ptl,
731 				      struct list_head *compound_pagelist)
732 {
733 	struct page *src_page, *tmp;
734 	pte_t *_pte;
735 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
736 				_pte++, page++, address += PAGE_SIZE) {
737 		pte_t pteval = *_pte;
738 
739 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
740 			clear_user_highpage(page, address);
741 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
742 			if (is_zero_pfn(pte_pfn(pteval))) {
743 				/*
744 				 * ptl mostly unnecessary.
745 				 */
746 				spin_lock(ptl);
747 				/*
748 				 * paravirt calls inside pte_clear here are
749 				 * superfluous.
750 				 */
751 				pte_clear(vma->vm_mm, address, _pte);
752 				spin_unlock(ptl);
753 			}
754 		} else {
755 			src_page = pte_page(pteval);
756 			copy_user_highpage(page, src_page, address, vma);
757 			if (!PageCompound(src_page))
758 				release_pte_page(src_page);
759 			/*
760 			 * ptl mostly unnecessary, but preempt has to
761 			 * be disabled to update the per-cpu stats
762 			 * inside page_remove_rmap().
763 			 */
764 			spin_lock(ptl);
765 			/*
766 			 * paravirt calls inside pte_clear here are
767 			 * superfluous.
768 			 */
769 			pte_clear(vma->vm_mm, address, _pte);
770 			page_remove_rmap(src_page, false);
771 			spin_unlock(ptl);
772 			free_page_and_swap_cache(src_page);
773 		}
774 	}
775 
776 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
777 		list_del(&src_page->lru);
778 		release_pte_page(src_page);
779 	}
780 }
781 
782 static void khugepaged_alloc_sleep(void)
783 {
784 	DEFINE_WAIT(wait);
785 
786 	add_wait_queue(&khugepaged_wait, &wait);
787 	freezable_schedule_timeout_interruptible(
788 		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
789 	remove_wait_queue(&khugepaged_wait, &wait);
790 }
791 
792 static int khugepaged_node_load[MAX_NUMNODES];
793 
794 static bool khugepaged_scan_abort(int nid)
795 {
796 	int i;
797 
798 	/*
799 	 * If node_reclaim_mode is disabled, then no extra effort is made to
800 	 * allocate memory locally.
801 	 */
802 	if (!node_reclaim_mode)
803 		return false;
804 
805 	/* If there is a count for this node already, it must be acceptable */
806 	if (khugepaged_node_load[nid])
807 		return false;
808 
809 	for (i = 0; i < MAX_NUMNODES; i++) {
810 		if (!khugepaged_node_load[i])
811 			continue;
812 		if (node_distance(nid, i) > node_reclaim_distance)
813 			return true;
814 	}
815 	return false;
816 }
817 
818 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
819 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
820 {
821 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
822 }
823 
824 #ifdef CONFIG_NUMA
825 static int khugepaged_find_target_node(void)
826 {
827 	static int last_khugepaged_target_node = NUMA_NO_NODE;
828 	int nid, target_node = 0, max_value = 0;
829 
830 	/* find first node with max normal pages hit */
831 	for (nid = 0; nid < MAX_NUMNODES; nid++)
832 		if (khugepaged_node_load[nid] > max_value) {
833 			max_value = khugepaged_node_load[nid];
834 			target_node = nid;
835 		}
836 
837 	/* do some balance if several nodes have the same hit record */
838 	if (target_node <= last_khugepaged_target_node)
839 		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
840 				nid++)
841 			if (max_value == khugepaged_node_load[nid]) {
842 				target_node = nid;
843 				break;
844 			}
845 
846 	last_khugepaged_target_node = target_node;
847 	return target_node;
848 }
849 
850 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
851 {
852 	if (IS_ERR(*hpage)) {
853 		if (!*wait)
854 			return false;
855 
856 		*wait = false;
857 		*hpage = NULL;
858 		khugepaged_alloc_sleep();
859 	} else if (*hpage) {
860 		put_page(*hpage);
861 		*hpage = NULL;
862 	}
863 
864 	return true;
865 }
866 
867 static struct page *
868 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
869 {
870 	VM_BUG_ON_PAGE(*hpage, *hpage);
871 
872 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
873 	if (unlikely(!*hpage)) {
874 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
875 		*hpage = ERR_PTR(-ENOMEM);
876 		return NULL;
877 	}
878 
879 	prep_transhuge_page(*hpage);
880 	count_vm_event(THP_COLLAPSE_ALLOC);
881 	return *hpage;
882 }
883 #else
884 static int khugepaged_find_target_node(void)
885 {
886 	return 0;
887 }
888 
889 static inline struct page *alloc_khugepaged_hugepage(void)
890 {
891 	struct page *page;
892 
893 	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
894 			   HPAGE_PMD_ORDER);
895 	if (page)
896 		prep_transhuge_page(page);
897 	return page;
898 }
899 
900 static struct page *khugepaged_alloc_hugepage(bool *wait)
901 {
902 	struct page *hpage;
903 
904 	do {
905 		hpage = alloc_khugepaged_hugepage();
906 		if (!hpage) {
907 			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
908 			if (!*wait)
909 				return NULL;
910 
911 			*wait = false;
912 			khugepaged_alloc_sleep();
913 		} else
914 			count_vm_event(THP_COLLAPSE_ALLOC);
915 	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
916 
917 	return hpage;
918 }
919 
920 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
921 {
922 	/*
923 	 * If the hpage allocated earlier was briefly exposed in page cache
924 	 * before collapse_file() failed, it is possible that racing lookups
925 	 * have not yet completed, and would then be unpleasantly surprised by
926 	 * finding the hpage reused for the same mapping at a different offset.
927 	 * Just release the previous allocation if there is any danger of that.
928 	 */
929 	if (*hpage && page_count(*hpage) > 1) {
930 		put_page(*hpage);
931 		*hpage = NULL;
932 	}
933 
934 	if (!*hpage)
935 		*hpage = khugepaged_alloc_hugepage(wait);
936 
937 	if (unlikely(!*hpage))
938 		return false;
939 
940 	return true;
941 }
942 
943 static struct page *
944 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
945 {
946 	VM_BUG_ON(!*hpage);
947 
948 	return  *hpage;
949 }
950 #endif
951 
952 /*
953  * If mmap_lock temporarily dropped, revalidate vma
954  * before taking mmap_lock.
955  * Return 0 if succeeds, otherwise return none-zero
956  * value (scan code).
957  */
958 
959 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
960 		struct vm_area_struct **vmap)
961 {
962 	struct vm_area_struct *vma;
963 	unsigned long hstart, hend;
964 
965 	if (unlikely(khugepaged_test_exit(mm)))
966 		return SCAN_ANY_PROCESS;
967 
968 	*vmap = vma = find_vma(mm, address);
969 	if (!vma)
970 		return SCAN_VMA_NULL;
971 
972 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
973 	hend = vma->vm_end & HPAGE_PMD_MASK;
974 	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
975 		return SCAN_ADDRESS_RANGE;
976 	if (!hugepage_vma_check(vma, vma->vm_flags))
977 		return SCAN_VMA_CHECK;
978 	/* Anon VMA expected */
979 	if (!vma->anon_vma || vma->vm_ops)
980 		return SCAN_VMA_CHECK;
981 	return 0;
982 }
983 
984 /*
985  * Bring missing pages in from swap, to complete THP collapse.
986  * Only done if khugepaged_scan_pmd believes it is worthwhile.
987  *
988  * Called and returns without pte mapped or spinlocks held,
989  * but with mmap_lock held to protect against vma changes.
990  */
991 
992 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
993 					struct vm_area_struct *vma,
994 					unsigned long haddr, pmd_t *pmd,
995 					int referenced)
996 {
997 	int swapped_in = 0;
998 	vm_fault_t ret = 0;
999 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1000 
1001 	for (address = haddr; address < end; address += PAGE_SIZE) {
1002 		struct vm_fault vmf = {
1003 			.vma = vma,
1004 			.address = address,
1005 			.pgoff = linear_page_index(vma, haddr),
1006 			.flags = FAULT_FLAG_ALLOW_RETRY,
1007 			.pmd = pmd,
1008 		};
1009 
1010 		vmf.pte = pte_offset_map(pmd, address);
1011 		vmf.orig_pte = *vmf.pte;
1012 		if (!is_swap_pte(vmf.orig_pte)) {
1013 			pte_unmap(vmf.pte);
1014 			continue;
1015 		}
1016 		swapped_in++;
1017 		ret = do_swap_page(&vmf);
1018 
1019 		/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1020 		if (ret & VM_FAULT_RETRY) {
1021 			mmap_read_lock(mm);
1022 			if (hugepage_vma_revalidate(mm, haddr, &vma)) {
1023 				/* vma is no longer available, don't continue to swapin */
1024 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1025 				return false;
1026 			}
1027 			/* check if the pmd is still valid */
1028 			if (mm_find_pmd(mm, haddr) != pmd) {
1029 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1030 				return false;
1031 			}
1032 		}
1033 		if (ret & VM_FAULT_ERROR) {
1034 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1035 			return false;
1036 		}
1037 	}
1038 
1039 	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1040 	if (swapped_in)
1041 		lru_add_drain();
1042 
1043 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1044 	return true;
1045 }
1046 
1047 static void collapse_huge_page(struct mm_struct *mm,
1048 				   unsigned long address,
1049 				   struct page **hpage,
1050 				   int node, int referenced, int unmapped)
1051 {
1052 	LIST_HEAD(compound_pagelist);
1053 	pmd_t *pmd, _pmd;
1054 	pte_t *pte;
1055 	pgtable_t pgtable;
1056 	struct page *new_page;
1057 	spinlock_t *pmd_ptl, *pte_ptl;
1058 	int isolated = 0, result = 0;
1059 	struct vm_area_struct *vma;
1060 	struct mmu_notifier_range range;
1061 	gfp_t gfp;
1062 
1063 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1064 
1065 	/* Only allocate from the target node */
1066 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1067 
1068 	/*
1069 	 * Before allocating the hugepage, release the mmap_lock read lock.
1070 	 * The allocation can take potentially a long time if it involves
1071 	 * sync compaction, and we do not need to hold the mmap_lock during
1072 	 * that. We will recheck the vma after taking it again in write mode.
1073 	 */
1074 	mmap_read_unlock(mm);
1075 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1076 	if (!new_page) {
1077 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1078 		goto out_nolock;
1079 	}
1080 
1081 	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1082 		result = SCAN_CGROUP_CHARGE_FAIL;
1083 		goto out_nolock;
1084 	}
1085 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1086 
1087 	mmap_read_lock(mm);
1088 	result = hugepage_vma_revalidate(mm, address, &vma);
1089 	if (result) {
1090 		mmap_read_unlock(mm);
1091 		goto out_nolock;
1092 	}
1093 
1094 	pmd = mm_find_pmd(mm, address);
1095 	if (!pmd) {
1096 		result = SCAN_PMD_NULL;
1097 		mmap_read_unlock(mm);
1098 		goto out_nolock;
1099 	}
1100 
1101 	/*
1102 	 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1103 	 * If it fails, we release mmap_lock and jump out_nolock.
1104 	 * Continuing to collapse causes inconsistency.
1105 	 */
1106 	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1107 						     pmd, referenced)) {
1108 		mmap_read_unlock(mm);
1109 		goto out_nolock;
1110 	}
1111 
1112 	mmap_read_unlock(mm);
1113 	/*
1114 	 * Prevent all access to pagetables with the exception of
1115 	 * gup_fast later handled by the ptep_clear_flush and the VM
1116 	 * handled by the anon_vma lock + PG_lock.
1117 	 */
1118 	mmap_write_lock(mm);
1119 	result = hugepage_vma_revalidate(mm, address, &vma);
1120 	if (result)
1121 		goto out;
1122 	/* check if the pmd is still valid */
1123 	if (mm_find_pmd(mm, address) != pmd)
1124 		goto out;
1125 
1126 	anon_vma_lock_write(vma->anon_vma);
1127 
1128 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1129 				address, address + HPAGE_PMD_SIZE);
1130 	mmu_notifier_invalidate_range_start(&range);
1131 
1132 	pte = pte_offset_map(pmd, address);
1133 	pte_ptl = pte_lockptr(mm, pmd);
1134 
1135 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1136 	/*
1137 	 * After this gup_fast can't run anymore. This also removes
1138 	 * any huge TLB entry from the CPU so we won't allow
1139 	 * huge and small TLB entries for the same virtual address
1140 	 * to avoid the risk of CPU bugs in that area.
1141 	 */
1142 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1143 	spin_unlock(pmd_ptl);
1144 	mmu_notifier_invalidate_range_end(&range);
1145 
1146 	spin_lock(pte_ptl);
1147 	isolated = __collapse_huge_page_isolate(vma, address, pte,
1148 			&compound_pagelist);
1149 	spin_unlock(pte_ptl);
1150 
1151 	if (unlikely(!isolated)) {
1152 		pte_unmap(pte);
1153 		spin_lock(pmd_ptl);
1154 		BUG_ON(!pmd_none(*pmd));
1155 		/*
1156 		 * We can only use set_pmd_at when establishing
1157 		 * hugepmds and never for establishing regular pmds that
1158 		 * points to regular pagetables. Use pmd_populate for that
1159 		 */
1160 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1161 		spin_unlock(pmd_ptl);
1162 		anon_vma_unlock_write(vma->anon_vma);
1163 		result = SCAN_FAIL;
1164 		goto out;
1165 	}
1166 
1167 	/*
1168 	 * All pages are isolated and locked so anon_vma rmap
1169 	 * can't run anymore.
1170 	 */
1171 	anon_vma_unlock_write(vma->anon_vma);
1172 
1173 	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1174 			&compound_pagelist);
1175 	pte_unmap(pte);
1176 	__SetPageUptodate(new_page);
1177 	pgtable = pmd_pgtable(_pmd);
1178 
1179 	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1180 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1181 
1182 	/*
1183 	 * spin_lock() below is not the equivalent of smp_wmb(), so
1184 	 * this is needed to avoid the copy_huge_page writes to become
1185 	 * visible after the set_pmd_at() write.
1186 	 */
1187 	smp_wmb();
1188 
1189 	spin_lock(pmd_ptl);
1190 	BUG_ON(!pmd_none(*pmd));
1191 	page_add_new_anon_rmap(new_page, vma, address, true);
1192 	lru_cache_add_inactive_or_unevictable(new_page, vma);
1193 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1194 	set_pmd_at(mm, address, pmd, _pmd);
1195 	update_mmu_cache_pmd(vma, address, pmd);
1196 	spin_unlock(pmd_ptl);
1197 
1198 	*hpage = NULL;
1199 
1200 	khugepaged_pages_collapsed++;
1201 	result = SCAN_SUCCEED;
1202 out_up_write:
1203 	mmap_write_unlock(mm);
1204 out_nolock:
1205 	if (!IS_ERR_OR_NULL(*hpage))
1206 		mem_cgroup_uncharge(*hpage);
1207 	trace_mm_collapse_huge_page(mm, isolated, result);
1208 	return;
1209 out:
1210 	goto out_up_write;
1211 }
1212 
1213 static int khugepaged_scan_pmd(struct mm_struct *mm,
1214 			       struct vm_area_struct *vma,
1215 			       unsigned long address,
1216 			       struct page **hpage)
1217 {
1218 	pmd_t *pmd;
1219 	pte_t *pte, *_pte;
1220 	int ret = 0, result = 0, referenced = 0;
1221 	int none_or_zero = 0, shared = 0;
1222 	struct page *page = NULL;
1223 	unsigned long _address;
1224 	spinlock_t *ptl;
1225 	int node = NUMA_NO_NODE, unmapped = 0;
1226 	bool writable = false;
1227 
1228 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1229 
1230 	pmd = mm_find_pmd(mm, address);
1231 	if (!pmd) {
1232 		result = SCAN_PMD_NULL;
1233 		goto out;
1234 	}
1235 
1236 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1237 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1238 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1239 	     _pte++, _address += PAGE_SIZE) {
1240 		pte_t pteval = *_pte;
1241 		if (is_swap_pte(pteval)) {
1242 			if (++unmapped <= khugepaged_max_ptes_swap) {
1243 				/*
1244 				 * Always be strict with uffd-wp
1245 				 * enabled swap entries.  Please see
1246 				 * comment below for pte_uffd_wp().
1247 				 */
1248 				if (pte_swp_uffd_wp(pteval)) {
1249 					result = SCAN_PTE_UFFD_WP;
1250 					goto out_unmap;
1251 				}
1252 				continue;
1253 			} else {
1254 				result = SCAN_EXCEED_SWAP_PTE;
1255 				goto out_unmap;
1256 			}
1257 		}
1258 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1259 			if (!userfaultfd_armed(vma) &&
1260 			    ++none_or_zero <= khugepaged_max_ptes_none) {
1261 				continue;
1262 			} else {
1263 				result = SCAN_EXCEED_NONE_PTE;
1264 				goto out_unmap;
1265 			}
1266 		}
1267 		if (!pte_present(pteval)) {
1268 			result = SCAN_PTE_NON_PRESENT;
1269 			goto out_unmap;
1270 		}
1271 		if (pte_uffd_wp(pteval)) {
1272 			/*
1273 			 * Don't collapse the page if any of the small
1274 			 * PTEs are armed with uffd write protection.
1275 			 * Here we can also mark the new huge pmd as
1276 			 * write protected if any of the small ones is
1277 			 * marked but that could bring unknown
1278 			 * userfault messages that falls outside of
1279 			 * the registered range.  So, just be simple.
1280 			 */
1281 			result = SCAN_PTE_UFFD_WP;
1282 			goto out_unmap;
1283 		}
1284 		if (pte_write(pteval))
1285 			writable = true;
1286 
1287 		page = vm_normal_page(vma, _address, pteval);
1288 		if (unlikely(!page)) {
1289 			result = SCAN_PAGE_NULL;
1290 			goto out_unmap;
1291 		}
1292 
1293 		if (page_mapcount(page) > 1 &&
1294 				++shared > khugepaged_max_ptes_shared) {
1295 			result = SCAN_EXCEED_SHARED_PTE;
1296 			goto out_unmap;
1297 		}
1298 
1299 		page = compound_head(page);
1300 
1301 		/*
1302 		 * Record which node the original page is from and save this
1303 		 * information to khugepaged_node_load[].
1304 		 * Khupaged will allocate hugepage from the node has the max
1305 		 * hit record.
1306 		 */
1307 		node = page_to_nid(page);
1308 		if (khugepaged_scan_abort(node)) {
1309 			result = SCAN_SCAN_ABORT;
1310 			goto out_unmap;
1311 		}
1312 		khugepaged_node_load[node]++;
1313 		if (!PageLRU(page)) {
1314 			result = SCAN_PAGE_LRU;
1315 			goto out_unmap;
1316 		}
1317 		if (PageLocked(page)) {
1318 			result = SCAN_PAGE_LOCK;
1319 			goto out_unmap;
1320 		}
1321 		if (!PageAnon(page)) {
1322 			result = SCAN_PAGE_ANON;
1323 			goto out_unmap;
1324 		}
1325 
1326 		/*
1327 		 * Check if the page has any GUP (or other external) pins.
1328 		 *
1329 		 * Here the check is racy it may see totmal_mapcount > refcount
1330 		 * in some cases.
1331 		 * For example, one process with one forked child process.
1332 		 * The parent has the PMD split due to MADV_DONTNEED, then
1333 		 * the child is trying unmap the whole PMD, but khugepaged
1334 		 * may be scanning the parent between the child has
1335 		 * PageDoubleMap flag cleared and dec the mapcount.  So
1336 		 * khugepaged may see total_mapcount > refcount.
1337 		 *
1338 		 * But such case is ephemeral we could always retry collapse
1339 		 * later.  However it may report false positive if the page
1340 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
1341 		 * will be done again later the risk seems low.
1342 		 */
1343 		if (!is_refcount_suitable(page)) {
1344 			result = SCAN_PAGE_COUNT;
1345 			goto out_unmap;
1346 		}
1347 		if (pte_young(pteval) ||
1348 		    page_is_young(page) || PageReferenced(page) ||
1349 		    mmu_notifier_test_young(vma->vm_mm, address))
1350 			referenced++;
1351 	}
1352 	if (!writable) {
1353 		result = SCAN_PAGE_RO;
1354 	} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1355 		result = SCAN_LACK_REFERENCED_PAGE;
1356 	} else {
1357 		result = SCAN_SUCCEED;
1358 		ret = 1;
1359 	}
1360 out_unmap:
1361 	pte_unmap_unlock(pte, ptl);
1362 	if (ret) {
1363 		node = khugepaged_find_target_node();
1364 		/* collapse_huge_page will return with the mmap_lock released */
1365 		collapse_huge_page(mm, address, hpage, node,
1366 				referenced, unmapped);
1367 	}
1368 out:
1369 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1370 				     none_or_zero, result, unmapped);
1371 	return ret;
1372 }
1373 
1374 static void collect_mm_slot(struct mm_slot *mm_slot)
1375 {
1376 	struct mm_struct *mm = mm_slot->mm;
1377 
1378 	lockdep_assert_held(&khugepaged_mm_lock);
1379 
1380 	if (khugepaged_test_exit(mm)) {
1381 		/* free mm_slot */
1382 		hash_del(&mm_slot->hash);
1383 		list_del(&mm_slot->mm_node);
1384 
1385 		/*
1386 		 * Not strictly needed because the mm exited already.
1387 		 *
1388 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1389 		 */
1390 
1391 		/* khugepaged_mm_lock actually not necessary for the below */
1392 		free_mm_slot(mm_slot);
1393 		mmdrop(mm);
1394 	}
1395 }
1396 
1397 #ifdef CONFIG_SHMEM
1398 /*
1399  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1400  * khugepaged should try to collapse the page table.
1401  */
1402 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1403 					 unsigned long addr)
1404 {
1405 	struct mm_slot *mm_slot;
1406 
1407 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1408 
1409 	spin_lock(&khugepaged_mm_lock);
1410 	mm_slot = get_mm_slot(mm);
1411 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1412 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1413 	spin_unlock(&khugepaged_mm_lock);
1414 	return 0;
1415 }
1416 
1417 /**
1418  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1419  * address haddr.
1420  *
1421  * @mm: process address space where collapse happens
1422  * @addr: THP collapse address
1423  *
1424  * This function checks whether all the PTEs in the PMD are pointing to the
1425  * right THP. If so, retract the page table so the THP can refault in with
1426  * as pmd-mapped.
1427  */
1428 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1429 {
1430 	unsigned long haddr = addr & HPAGE_PMD_MASK;
1431 	struct vm_area_struct *vma = find_vma(mm, haddr);
1432 	struct page *hpage;
1433 	pte_t *start_pte, *pte;
1434 	pmd_t *pmd, _pmd;
1435 	spinlock_t *ptl;
1436 	int count = 0;
1437 	int i;
1438 
1439 	if (!vma || !vma->vm_file ||
1440 	    vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1441 		return;
1442 
1443 	/*
1444 	 * This vm_flags may not have VM_HUGEPAGE if the page was not
1445 	 * collapsed by this mm. But we can still collapse if the page is
1446 	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1447 	 * will not fail the vma for missing VM_HUGEPAGE
1448 	 */
1449 	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1450 		return;
1451 
1452 	hpage = find_lock_page(vma->vm_file->f_mapping,
1453 			       linear_page_index(vma, haddr));
1454 	if (!hpage)
1455 		return;
1456 
1457 	if (!PageHead(hpage))
1458 		goto drop_hpage;
1459 
1460 	pmd = mm_find_pmd(mm, haddr);
1461 	if (!pmd)
1462 		goto drop_hpage;
1463 
1464 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1465 
1466 	/* step 1: check all mapped PTEs are to the right huge page */
1467 	for (i = 0, addr = haddr, pte = start_pte;
1468 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1469 		struct page *page;
1470 
1471 		/* empty pte, skip */
1472 		if (pte_none(*pte))
1473 			continue;
1474 
1475 		/* page swapped out, abort */
1476 		if (!pte_present(*pte))
1477 			goto abort;
1478 
1479 		page = vm_normal_page(vma, addr, *pte);
1480 
1481 		/*
1482 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1483 		 * page table, but the new page will not be a subpage of hpage.
1484 		 */
1485 		if (hpage + i != page)
1486 			goto abort;
1487 		count++;
1488 	}
1489 
1490 	/* step 2: adjust rmap */
1491 	for (i = 0, addr = haddr, pte = start_pte;
1492 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1493 		struct page *page;
1494 
1495 		if (pte_none(*pte))
1496 			continue;
1497 		page = vm_normal_page(vma, addr, *pte);
1498 		page_remove_rmap(page, false);
1499 	}
1500 
1501 	pte_unmap_unlock(start_pte, ptl);
1502 
1503 	/* step 3: set proper refcount and mm_counters. */
1504 	if (count) {
1505 		page_ref_sub(hpage, count);
1506 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1507 	}
1508 
1509 	/* step 4: collapse pmd */
1510 	ptl = pmd_lock(vma->vm_mm, pmd);
1511 	_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1512 	spin_unlock(ptl);
1513 	mm_dec_nr_ptes(mm);
1514 	pte_free(mm, pmd_pgtable(_pmd));
1515 
1516 drop_hpage:
1517 	unlock_page(hpage);
1518 	put_page(hpage);
1519 	return;
1520 
1521 abort:
1522 	pte_unmap_unlock(start_pte, ptl);
1523 	goto drop_hpage;
1524 }
1525 
1526 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1527 {
1528 	struct mm_struct *mm = mm_slot->mm;
1529 	int i;
1530 
1531 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1532 		return 0;
1533 
1534 	if (!mmap_write_trylock(mm))
1535 		return -EBUSY;
1536 
1537 	if (unlikely(khugepaged_test_exit(mm)))
1538 		goto out;
1539 
1540 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1541 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1542 
1543 out:
1544 	mm_slot->nr_pte_mapped_thp = 0;
1545 	mmap_write_unlock(mm);
1546 	return 0;
1547 }
1548 
1549 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1550 {
1551 	struct vm_area_struct *vma;
1552 	struct mm_struct *mm;
1553 	unsigned long addr;
1554 	pmd_t *pmd, _pmd;
1555 
1556 	i_mmap_lock_write(mapping);
1557 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1558 		/*
1559 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1560 		 * got written to. These VMAs are likely not worth investing
1561 		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1562 		 * later.
1563 		 *
1564 		 * Not that vma->anon_vma check is racy: it can be set up after
1565 		 * the check but before we took mmap_lock by the fault path.
1566 		 * But page lock would prevent establishing any new ptes of the
1567 		 * page, so we are safe.
1568 		 *
1569 		 * An alternative would be drop the check, but check that page
1570 		 * table is clear before calling pmdp_collapse_flush() under
1571 		 * ptl. It has higher chance to recover THP for the VMA, but
1572 		 * has higher cost too.
1573 		 */
1574 		if (vma->anon_vma)
1575 			continue;
1576 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1577 		if (addr & ~HPAGE_PMD_MASK)
1578 			continue;
1579 		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1580 			continue;
1581 		mm = vma->vm_mm;
1582 		pmd = mm_find_pmd(mm, addr);
1583 		if (!pmd)
1584 			continue;
1585 		/*
1586 		 * We need exclusive mmap_lock to retract page table.
1587 		 *
1588 		 * We use trylock due to lock inversion: we need to acquire
1589 		 * mmap_lock while holding page lock. Fault path does it in
1590 		 * reverse order. Trylock is a way to avoid deadlock.
1591 		 */
1592 		if (mmap_write_trylock(mm)) {
1593 			if (!khugepaged_test_exit(mm)) {
1594 				spinlock_t *ptl = pmd_lock(mm, pmd);
1595 				/* assume page table is clear */
1596 				_pmd = pmdp_collapse_flush(vma, addr, pmd);
1597 				spin_unlock(ptl);
1598 				mm_dec_nr_ptes(mm);
1599 				pte_free(mm, pmd_pgtable(_pmd));
1600 			}
1601 			mmap_write_unlock(mm);
1602 		} else {
1603 			/* Try again later */
1604 			khugepaged_add_pte_mapped_thp(mm, addr);
1605 		}
1606 	}
1607 	i_mmap_unlock_write(mapping);
1608 }
1609 
1610 /**
1611  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1612  *
1613  * @mm: process address space where collapse happens
1614  * @file: file that collapse on
1615  * @start: collapse start address
1616  * @hpage: new allocated huge page for collapse
1617  * @node: appointed node the new huge page allocate from
1618  *
1619  * Basic scheme is simple, details are more complex:
1620  *  - allocate and lock a new huge page;
1621  *  - scan page cache replacing old pages with the new one
1622  *    + swap/gup in pages if necessary;
1623  *    + fill in gaps;
1624  *    + keep old pages around in case rollback is required;
1625  *  - if replacing succeeds:
1626  *    + copy data over;
1627  *    + free old pages;
1628  *    + unlock huge page;
1629  *  - if replacing failed;
1630  *    + put all pages back and unfreeze them;
1631  *    + restore gaps in the page cache;
1632  *    + unlock and free huge page;
1633  */
1634 static void collapse_file(struct mm_struct *mm,
1635 		struct file *file, pgoff_t start,
1636 		struct page **hpage, int node)
1637 {
1638 	struct address_space *mapping = file->f_mapping;
1639 	gfp_t gfp;
1640 	struct page *new_page;
1641 	pgoff_t index, end = start + HPAGE_PMD_NR;
1642 	LIST_HEAD(pagelist);
1643 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1644 	int nr_none = 0, result = SCAN_SUCCEED;
1645 	bool is_shmem = shmem_file(file);
1646 
1647 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1648 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1649 
1650 	/* Only allocate from the target node */
1651 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1652 
1653 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1654 	if (!new_page) {
1655 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1656 		goto out;
1657 	}
1658 
1659 	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1660 		result = SCAN_CGROUP_CHARGE_FAIL;
1661 		goto out;
1662 	}
1663 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1664 
1665 	/* This will be less messy when we use multi-index entries */
1666 	do {
1667 		xas_lock_irq(&xas);
1668 		xas_create_range(&xas);
1669 		if (!xas_error(&xas))
1670 			break;
1671 		xas_unlock_irq(&xas);
1672 		if (!xas_nomem(&xas, GFP_KERNEL)) {
1673 			result = SCAN_FAIL;
1674 			goto out;
1675 		}
1676 	} while (1);
1677 
1678 	__SetPageLocked(new_page);
1679 	if (is_shmem)
1680 		__SetPageSwapBacked(new_page);
1681 	new_page->index = start;
1682 	new_page->mapping = mapping;
1683 
1684 	/*
1685 	 * At this point the new_page is locked and not up-to-date.
1686 	 * It's safe to insert it into the page cache, because nobody would
1687 	 * be able to map it or use it in another way until we unlock it.
1688 	 */
1689 
1690 	xas_set(&xas, start);
1691 	for (index = start; index < end; index++) {
1692 		struct page *page = xas_next(&xas);
1693 
1694 		VM_BUG_ON(index != xas.xa_index);
1695 		if (is_shmem) {
1696 			if (!page) {
1697 				/*
1698 				 * Stop if extent has been truncated or
1699 				 * hole-punched, and is now completely
1700 				 * empty.
1701 				 */
1702 				if (index == start) {
1703 					if (!xas_next_entry(&xas, end - 1)) {
1704 						result = SCAN_TRUNCATED;
1705 						goto xa_locked;
1706 					}
1707 					xas_set(&xas, index);
1708 				}
1709 				if (!shmem_charge(mapping->host, 1)) {
1710 					result = SCAN_FAIL;
1711 					goto xa_locked;
1712 				}
1713 				xas_store(&xas, new_page);
1714 				nr_none++;
1715 				continue;
1716 			}
1717 
1718 			if (xa_is_value(page) || !PageUptodate(page)) {
1719 				xas_unlock_irq(&xas);
1720 				/* swap in or instantiate fallocated page */
1721 				if (shmem_getpage(mapping->host, index, &page,
1722 						  SGP_NOHUGE)) {
1723 					result = SCAN_FAIL;
1724 					goto xa_unlocked;
1725 				}
1726 			} else if (trylock_page(page)) {
1727 				get_page(page);
1728 				xas_unlock_irq(&xas);
1729 			} else {
1730 				result = SCAN_PAGE_LOCK;
1731 				goto xa_locked;
1732 			}
1733 		} else {	/* !is_shmem */
1734 			if (!page || xa_is_value(page)) {
1735 				xas_unlock_irq(&xas);
1736 				page_cache_sync_readahead(mapping, &file->f_ra,
1737 							  file, index,
1738 							  end - index);
1739 				/* drain pagevecs to help isolate_lru_page() */
1740 				lru_add_drain();
1741 				page = find_lock_page(mapping, index);
1742 				if (unlikely(page == NULL)) {
1743 					result = SCAN_FAIL;
1744 					goto xa_unlocked;
1745 				}
1746 			} else if (PageDirty(page)) {
1747 				/*
1748 				 * khugepaged only works on read-only fd,
1749 				 * so this page is dirty because it hasn't
1750 				 * been flushed since first write. There
1751 				 * won't be new dirty pages.
1752 				 *
1753 				 * Trigger async flush here and hope the
1754 				 * writeback is done when khugepaged
1755 				 * revisits this page.
1756 				 *
1757 				 * This is a one-off situation. We are not
1758 				 * forcing writeback in loop.
1759 				 */
1760 				xas_unlock_irq(&xas);
1761 				filemap_flush(mapping);
1762 				result = SCAN_FAIL;
1763 				goto xa_unlocked;
1764 			} else if (trylock_page(page)) {
1765 				get_page(page);
1766 				xas_unlock_irq(&xas);
1767 			} else {
1768 				result = SCAN_PAGE_LOCK;
1769 				goto xa_locked;
1770 			}
1771 		}
1772 
1773 		/*
1774 		 * The page must be locked, so we can drop the i_pages lock
1775 		 * without racing with truncate.
1776 		 */
1777 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1778 
1779 		/* make sure the page is up to date */
1780 		if (unlikely(!PageUptodate(page))) {
1781 			result = SCAN_FAIL;
1782 			goto out_unlock;
1783 		}
1784 
1785 		/*
1786 		 * If file was truncated then extended, or hole-punched, before
1787 		 * we locked the first page, then a THP might be there already.
1788 		 */
1789 		if (PageTransCompound(page)) {
1790 			result = SCAN_PAGE_COMPOUND;
1791 			goto out_unlock;
1792 		}
1793 
1794 		if (page_mapping(page) != mapping) {
1795 			result = SCAN_TRUNCATED;
1796 			goto out_unlock;
1797 		}
1798 
1799 		if (!is_shmem && PageDirty(page)) {
1800 			/*
1801 			 * khugepaged only works on read-only fd, so this
1802 			 * page is dirty because it hasn't been flushed
1803 			 * since first write.
1804 			 */
1805 			result = SCAN_FAIL;
1806 			goto out_unlock;
1807 		}
1808 
1809 		if (isolate_lru_page(page)) {
1810 			result = SCAN_DEL_PAGE_LRU;
1811 			goto out_unlock;
1812 		}
1813 
1814 		if (page_has_private(page) &&
1815 		    !try_to_release_page(page, GFP_KERNEL)) {
1816 			result = SCAN_PAGE_HAS_PRIVATE;
1817 			putback_lru_page(page);
1818 			goto out_unlock;
1819 		}
1820 
1821 		if (page_mapped(page))
1822 			unmap_mapping_pages(mapping, index, 1, false);
1823 
1824 		xas_lock_irq(&xas);
1825 		xas_set(&xas, index);
1826 
1827 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1828 		VM_BUG_ON_PAGE(page_mapped(page), page);
1829 
1830 		/*
1831 		 * The page is expected to have page_count() == 3:
1832 		 *  - we hold a pin on it;
1833 		 *  - one reference from page cache;
1834 		 *  - one from isolate_lru_page;
1835 		 */
1836 		if (!page_ref_freeze(page, 3)) {
1837 			result = SCAN_PAGE_COUNT;
1838 			xas_unlock_irq(&xas);
1839 			putback_lru_page(page);
1840 			goto out_unlock;
1841 		}
1842 
1843 		/*
1844 		 * Add the page to the list to be able to undo the collapse if
1845 		 * something go wrong.
1846 		 */
1847 		list_add_tail(&page->lru, &pagelist);
1848 
1849 		/* Finally, replace with the new page. */
1850 		xas_store(&xas, new_page);
1851 		continue;
1852 out_unlock:
1853 		unlock_page(page);
1854 		put_page(page);
1855 		goto xa_unlocked;
1856 	}
1857 
1858 	if (is_shmem)
1859 		__inc_lruvec_page_state(new_page, NR_SHMEM_THPS);
1860 	else {
1861 		__inc_lruvec_page_state(new_page, NR_FILE_THPS);
1862 		filemap_nr_thps_inc(mapping);
1863 	}
1864 
1865 	if (nr_none) {
1866 		__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1867 		if (is_shmem)
1868 			__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1869 	}
1870 
1871 xa_locked:
1872 	xas_unlock_irq(&xas);
1873 xa_unlocked:
1874 
1875 	if (result == SCAN_SUCCEED) {
1876 		struct page *page, *tmp;
1877 
1878 		/*
1879 		 * Replacing old pages with new one has succeeded, now we
1880 		 * need to copy the content and free the old pages.
1881 		 */
1882 		index = start;
1883 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1884 			while (index < page->index) {
1885 				clear_highpage(new_page + (index % HPAGE_PMD_NR));
1886 				index++;
1887 			}
1888 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1889 					page);
1890 			list_del(&page->lru);
1891 			page->mapping = NULL;
1892 			page_ref_unfreeze(page, 1);
1893 			ClearPageActive(page);
1894 			ClearPageUnevictable(page);
1895 			unlock_page(page);
1896 			put_page(page);
1897 			index++;
1898 		}
1899 		while (index < end) {
1900 			clear_highpage(new_page + (index % HPAGE_PMD_NR));
1901 			index++;
1902 		}
1903 
1904 		SetPageUptodate(new_page);
1905 		page_ref_add(new_page, HPAGE_PMD_NR - 1);
1906 		if (is_shmem)
1907 			set_page_dirty(new_page);
1908 		lru_cache_add(new_page);
1909 
1910 		/*
1911 		 * Remove pte page tables, so we can re-fault the page as huge.
1912 		 */
1913 		retract_page_tables(mapping, start);
1914 		*hpage = NULL;
1915 
1916 		khugepaged_pages_collapsed++;
1917 	} else {
1918 		struct page *page;
1919 
1920 		/* Something went wrong: roll back page cache changes */
1921 		xas_lock_irq(&xas);
1922 		mapping->nrpages -= nr_none;
1923 
1924 		if (is_shmem)
1925 			shmem_uncharge(mapping->host, nr_none);
1926 
1927 		xas_set(&xas, start);
1928 		xas_for_each(&xas, page, end - 1) {
1929 			page = list_first_entry_or_null(&pagelist,
1930 					struct page, lru);
1931 			if (!page || xas.xa_index < page->index) {
1932 				if (!nr_none)
1933 					break;
1934 				nr_none--;
1935 				/* Put holes back where they were */
1936 				xas_store(&xas, NULL);
1937 				continue;
1938 			}
1939 
1940 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1941 
1942 			/* Unfreeze the page. */
1943 			list_del(&page->lru);
1944 			page_ref_unfreeze(page, 2);
1945 			xas_store(&xas, page);
1946 			xas_pause(&xas);
1947 			xas_unlock_irq(&xas);
1948 			unlock_page(page);
1949 			putback_lru_page(page);
1950 			xas_lock_irq(&xas);
1951 		}
1952 		VM_BUG_ON(nr_none);
1953 		xas_unlock_irq(&xas);
1954 
1955 		new_page->mapping = NULL;
1956 	}
1957 
1958 	unlock_page(new_page);
1959 out:
1960 	VM_BUG_ON(!list_empty(&pagelist));
1961 	if (!IS_ERR_OR_NULL(*hpage))
1962 		mem_cgroup_uncharge(*hpage);
1963 	/* TODO: tracepoints */
1964 }
1965 
1966 static void khugepaged_scan_file(struct mm_struct *mm,
1967 		struct file *file, pgoff_t start, struct page **hpage)
1968 {
1969 	struct page *page = NULL;
1970 	struct address_space *mapping = file->f_mapping;
1971 	XA_STATE(xas, &mapping->i_pages, start);
1972 	int present, swap;
1973 	int node = NUMA_NO_NODE;
1974 	int result = SCAN_SUCCEED;
1975 
1976 	present = 0;
1977 	swap = 0;
1978 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1979 	rcu_read_lock();
1980 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1981 		if (xas_retry(&xas, page))
1982 			continue;
1983 
1984 		if (xa_is_value(page)) {
1985 			if (++swap > khugepaged_max_ptes_swap) {
1986 				result = SCAN_EXCEED_SWAP_PTE;
1987 				break;
1988 			}
1989 			continue;
1990 		}
1991 
1992 		if (PageTransCompound(page)) {
1993 			result = SCAN_PAGE_COMPOUND;
1994 			break;
1995 		}
1996 
1997 		node = page_to_nid(page);
1998 		if (khugepaged_scan_abort(node)) {
1999 			result = SCAN_SCAN_ABORT;
2000 			break;
2001 		}
2002 		khugepaged_node_load[node]++;
2003 
2004 		if (!PageLRU(page)) {
2005 			result = SCAN_PAGE_LRU;
2006 			break;
2007 		}
2008 
2009 		if (page_count(page) !=
2010 		    1 + page_mapcount(page) + page_has_private(page)) {
2011 			result = SCAN_PAGE_COUNT;
2012 			break;
2013 		}
2014 
2015 		/*
2016 		 * We probably should check if the page is referenced here, but
2017 		 * nobody would transfer pte_young() to PageReferenced() for us.
2018 		 * And rmap walk here is just too costly...
2019 		 */
2020 
2021 		present++;
2022 
2023 		if (need_resched()) {
2024 			xas_pause(&xas);
2025 			cond_resched_rcu();
2026 		}
2027 	}
2028 	rcu_read_unlock();
2029 
2030 	if (result == SCAN_SUCCEED) {
2031 		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2032 			result = SCAN_EXCEED_NONE_PTE;
2033 		} else {
2034 			node = khugepaged_find_target_node();
2035 			collapse_file(mm, file, start, hpage, node);
2036 		}
2037 	}
2038 
2039 	/* TODO: tracepoints */
2040 }
2041 #else
2042 static void khugepaged_scan_file(struct mm_struct *mm,
2043 		struct file *file, pgoff_t start, struct page **hpage)
2044 {
2045 	BUILD_BUG();
2046 }
2047 
2048 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2049 {
2050 	return 0;
2051 }
2052 #endif
2053 
2054 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2055 					    struct page **hpage)
2056 	__releases(&khugepaged_mm_lock)
2057 	__acquires(&khugepaged_mm_lock)
2058 {
2059 	struct mm_slot *mm_slot;
2060 	struct mm_struct *mm;
2061 	struct vm_area_struct *vma;
2062 	int progress = 0;
2063 
2064 	VM_BUG_ON(!pages);
2065 	lockdep_assert_held(&khugepaged_mm_lock);
2066 
2067 	if (khugepaged_scan.mm_slot)
2068 		mm_slot = khugepaged_scan.mm_slot;
2069 	else {
2070 		mm_slot = list_entry(khugepaged_scan.mm_head.next,
2071 				     struct mm_slot, mm_node);
2072 		khugepaged_scan.address = 0;
2073 		khugepaged_scan.mm_slot = mm_slot;
2074 	}
2075 	spin_unlock(&khugepaged_mm_lock);
2076 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2077 
2078 	mm = mm_slot->mm;
2079 	/*
2080 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
2081 	 * the next mm on the list.
2082 	 */
2083 	vma = NULL;
2084 	if (unlikely(!mmap_read_trylock(mm)))
2085 		goto breakouterloop_mmap_lock;
2086 	if (likely(!khugepaged_test_exit(mm)))
2087 		vma = find_vma(mm, khugepaged_scan.address);
2088 
2089 	progress++;
2090 	for (; vma; vma = vma->vm_next) {
2091 		unsigned long hstart, hend;
2092 
2093 		cond_resched();
2094 		if (unlikely(khugepaged_test_exit(mm))) {
2095 			progress++;
2096 			break;
2097 		}
2098 		if (!hugepage_vma_check(vma, vma->vm_flags)) {
2099 skip:
2100 			progress++;
2101 			continue;
2102 		}
2103 		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2104 		hend = vma->vm_end & HPAGE_PMD_MASK;
2105 		if (hstart >= hend)
2106 			goto skip;
2107 		if (khugepaged_scan.address > hend)
2108 			goto skip;
2109 		if (khugepaged_scan.address < hstart)
2110 			khugepaged_scan.address = hstart;
2111 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2112 		if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2113 			goto skip;
2114 
2115 		while (khugepaged_scan.address < hend) {
2116 			int ret;
2117 			cond_resched();
2118 			if (unlikely(khugepaged_test_exit(mm)))
2119 				goto breakouterloop;
2120 
2121 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2122 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2123 				  hend);
2124 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2125 				struct file *file = get_file(vma->vm_file);
2126 				pgoff_t pgoff = linear_page_index(vma,
2127 						khugepaged_scan.address);
2128 
2129 				mmap_read_unlock(mm);
2130 				ret = 1;
2131 				khugepaged_scan_file(mm, file, pgoff, hpage);
2132 				fput(file);
2133 			} else {
2134 				ret = khugepaged_scan_pmd(mm, vma,
2135 						khugepaged_scan.address,
2136 						hpage);
2137 			}
2138 			/* move to next address */
2139 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2140 			progress += HPAGE_PMD_NR;
2141 			if (ret)
2142 				/* we released mmap_lock so break loop */
2143 				goto breakouterloop_mmap_lock;
2144 			if (progress >= pages)
2145 				goto breakouterloop;
2146 		}
2147 	}
2148 breakouterloop:
2149 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2150 breakouterloop_mmap_lock:
2151 
2152 	spin_lock(&khugepaged_mm_lock);
2153 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2154 	/*
2155 	 * Release the current mm_slot if this mm is about to die, or
2156 	 * if we scanned all vmas of this mm.
2157 	 */
2158 	if (khugepaged_test_exit(mm) || !vma) {
2159 		/*
2160 		 * Make sure that if mm_users is reaching zero while
2161 		 * khugepaged runs here, khugepaged_exit will find
2162 		 * mm_slot not pointing to the exiting mm.
2163 		 */
2164 		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2165 			khugepaged_scan.mm_slot = list_entry(
2166 				mm_slot->mm_node.next,
2167 				struct mm_slot, mm_node);
2168 			khugepaged_scan.address = 0;
2169 		} else {
2170 			khugepaged_scan.mm_slot = NULL;
2171 			khugepaged_full_scans++;
2172 		}
2173 
2174 		collect_mm_slot(mm_slot);
2175 	}
2176 
2177 	return progress;
2178 }
2179 
2180 static int khugepaged_has_work(void)
2181 {
2182 	return !list_empty(&khugepaged_scan.mm_head) &&
2183 		khugepaged_enabled();
2184 }
2185 
2186 static int khugepaged_wait_event(void)
2187 {
2188 	return !list_empty(&khugepaged_scan.mm_head) ||
2189 		kthread_should_stop();
2190 }
2191 
2192 static void khugepaged_do_scan(void)
2193 {
2194 	struct page *hpage = NULL;
2195 	unsigned int progress = 0, pass_through_head = 0;
2196 	unsigned int pages = khugepaged_pages_to_scan;
2197 	bool wait = true;
2198 
2199 	barrier(); /* write khugepaged_pages_to_scan to local stack */
2200 
2201 	lru_add_drain_all();
2202 
2203 	while (progress < pages) {
2204 		if (!khugepaged_prealloc_page(&hpage, &wait))
2205 			break;
2206 
2207 		cond_resched();
2208 
2209 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2210 			break;
2211 
2212 		spin_lock(&khugepaged_mm_lock);
2213 		if (!khugepaged_scan.mm_slot)
2214 			pass_through_head++;
2215 		if (khugepaged_has_work() &&
2216 		    pass_through_head < 2)
2217 			progress += khugepaged_scan_mm_slot(pages - progress,
2218 							    &hpage);
2219 		else
2220 			progress = pages;
2221 		spin_unlock(&khugepaged_mm_lock);
2222 	}
2223 
2224 	if (!IS_ERR_OR_NULL(hpage))
2225 		put_page(hpage);
2226 }
2227 
2228 static bool khugepaged_should_wakeup(void)
2229 {
2230 	return kthread_should_stop() ||
2231 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2232 }
2233 
2234 static void khugepaged_wait_work(void)
2235 {
2236 	if (khugepaged_has_work()) {
2237 		const unsigned long scan_sleep_jiffies =
2238 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2239 
2240 		if (!scan_sleep_jiffies)
2241 			return;
2242 
2243 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2244 		wait_event_freezable_timeout(khugepaged_wait,
2245 					     khugepaged_should_wakeup(),
2246 					     scan_sleep_jiffies);
2247 		return;
2248 	}
2249 
2250 	if (khugepaged_enabled())
2251 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2252 }
2253 
2254 static int khugepaged(void *none)
2255 {
2256 	struct mm_slot *mm_slot;
2257 
2258 	set_freezable();
2259 	set_user_nice(current, MAX_NICE);
2260 
2261 	while (!kthread_should_stop()) {
2262 		khugepaged_do_scan();
2263 		khugepaged_wait_work();
2264 	}
2265 
2266 	spin_lock(&khugepaged_mm_lock);
2267 	mm_slot = khugepaged_scan.mm_slot;
2268 	khugepaged_scan.mm_slot = NULL;
2269 	if (mm_slot)
2270 		collect_mm_slot(mm_slot);
2271 	spin_unlock(&khugepaged_mm_lock);
2272 	return 0;
2273 }
2274 
2275 static void set_recommended_min_free_kbytes(void)
2276 {
2277 	struct zone *zone;
2278 	int nr_zones = 0;
2279 	unsigned long recommended_min;
2280 
2281 	for_each_populated_zone(zone) {
2282 		/*
2283 		 * We don't need to worry about fragmentation of
2284 		 * ZONE_MOVABLE since it only has movable pages.
2285 		 */
2286 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2287 			continue;
2288 
2289 		nr_zones++;
2290 	}
2291 
2292 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2293 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2294 
2295 	/*
2296 	 * Make sure that on average at least two pageblocks are almost free
2297 	 * of another type, one for a migratetype to fall back to and a
2298 	 * second to avoid subsequent fallbacks of other types There are 3
2299 	 * MIGRATE_TYPES we care about.
2300 	 */
2301 	recommended_min += pageblock_nr_pages * nr_zones *
2302 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2303 
2304 	/* don't ever allow to reserve more than 5% of the lowmem */
2305 	recommended_min = min(recommended_min,
2306 			      (unsigned long) nr_free_buffer_pages() / 20);
2307 	recommended_min <<= (PAGE_SHIFT-10);
2308 
2309 	if (recommended_min > min_free_kbytes) {
2310 		if (user_min_free_kbytes >= 0)
2311 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2312 				min_free_kbytes, recommended_min);
2313 
2314 		min_free_kbytes = recommended_min;
2315 	}
2316 	setup_per_zone_wmarks();
2317 }
2318 
2319 int start_stop_khugepaged(void)
2320 {
2321 	int err = 0;
2322 
2323 	mutex_lock(&khugepaged_mutex);
2324 	if (khugepaged_enabled()) {
2325 		if (!khugepaged_thread)
2326 			khugepaged_thread = kthread_run(khugepaged, NULL,
2327 							"khugepaged");
2328 		if (IS_ERR(khugepaged_thread)) {
2329 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2330 			err = PTR_ERR(khugepaged_thread);
2331 			khugepaged_thread = NULL;
2332 			goto fail;
2333 		}
2334 
2335 		if (!list_empty(&khugepaged_scan.mm_head))
2336 			wake_up_interruptible(&khugepaged_wait);
2337 
2338 		set_recommended_min_free_kbytes();
2339 	} else if (khugepaged_thread) {
2340 		kthread_stop(khugepaged_thread);
2341 		khugepaged_thread = NULL;
2342 	}
2343 fail:
2344 	mutex_unlock(&khugepaged_mutex);
2345 	return err;
2346 }
2347 
2348 void khugepaged_min_free_kbytes_update(void)
2349 {
2350 	mutex_lock(&khugepaged_mutex);
2351 	if (khugepaged_enabled() && khugepaged_thread)
2352 		set_recommended_min_free_kbytes();
2353 	mutex_unlock(&khugepaged_mutex);
2354 }
2355