xref: /openbmc/linux/mm/khugepaged.c (revision 11a163f2)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
21 
22 #include <asm/tlb.h>
23 #include <asm/pgalloc.h>
24 #include "internal.h"
25 
26 enum scan_result {
27 	SCAN_FAIL,
28 	SCAN_SUCCEED,
29 	SCAN_PMD_NULL,
30 	SCAN_EXCEED_NONE_PTE,
31 	SCAN_EXCEED_SWAP_PTE,
32 	SCAN_EXCEED_SHARED_PTE,
33 	SCAN_PTE_NON_PRESENT,
34 	SCAN_PTE_UFFD_WP,
35 	SCAN_PAGE_RO,
36 	SCAN_LACK_REFERENCED_PAGE,
37 	SCAN_PAGE_NULL,
38 	SCAN_SCAN_ABORT,
39 	SCAN_PAGE_COUNT,
40 	SCAN_PAGE_LRU,
41 	SCAN_PAGE_LOCK,
42 	SCAN_PAGE_ANON,
43 	SCAN_PAGE_COMPOUND,
44 	SCAN_ANY_PROCESS,
45 	SCAN_VMA_NULL,
46 	SCAN_VMA_CHECK,
47 	SCAN_ADDRESS_RANGE,
48 	SCAN_SWAP_CACHE_PAGE,
49 	SCAN_DEL_PAGE_LRU,
50 	SCAN_ALLOC_HUGE_PAGE_FAIL,
51 	SCAN_CGROUP_CHARGE_FAIL,
52 	SCAN_TRUNCATED,
53 	SCAN_PAGE_HAS_PRIVATE,
54 };
55 
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/huge_memory.h>
58 
59 static struct task_struct *khugepaged_thread __read_mostly;
60 static DEFINE_MUTEX(khugepaged_mutex);
61 
62 /* default scan 8*512 pte (or vmas) every 30 second */
63 static unsigned int khugepaged_pages_to_scan __read_mostly;
64 static unsigned int khugepaged_pages_collapsed;
65 static unsigned int khugepaged_full_scans;
66 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67 /* during fragmentation poll the hugepage allocator once every minute */
68 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69 static unsigned long khugepaged_sleep_expire;
70 static DEFINE_SPINLOCK(khugepaged_mm_lock);
71 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72 /*
73  * default collapse hugepages if there is at least one pte mapped like
74  * it would have happened if the vma was large enough during page
75  * fault.
76  */
77 static unsigned int khugepaged_max_ptes_none __read_mostly;
78 static unsigned int khugepaged_max_ptes_swap __read_mostly;
79 static unsigned int khugepaged_max_ptes_shared __read_mostly;
80 
81 #define MM_SLOTS_HASH_BITS 10
82 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83 
84 static struct kmem_cache *mm_slot_cache __read_mostly;
85 
86 #define MAX_PTE_MAPPED_THP 8
87 
88 /**
89  * struct mm_slot - hash lookup from mm to mm_slot
90  * @hash: hash collision list
91  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92  * @mm: the mm that this information is valid for
93  */
94 struct mm_slot {
95 	struct hlist_node hash;
96 	struct list_head mm_node;
97 	struct mm_struct *mm;
98 
99 	/* pte-mapped THP in this mm */
100 	int nr_pte_mapped_thp;
101 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
102 };
103 
104 /**
105  * struct khugepaged_scan - cursor for scanning
106  * @mm_head: the head of the mm list to scan
107  * @mm_slot: the current mm_slot we are scanning
108  * @address: the next address inside that to be scanned
109  *
110  * There is only the one khugepaged_scan instance of this cursor structure.
111  */
112 struct khugepaged_scan {
113 	struct list_head mm_head;
114 	struct mm_slot *mm_slot;
115 	unsigned long address;
116 };
117 
118 static struct khugepaged_scan khugepaged_scan = {
119 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
120 };
121 
122 #ifdef CONFIG_SYSFS
123 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
124 					 struct kobj_attribute *attr,
125 					 char *buf)
126 {
127 	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
128 }
129 
130 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
131 					  struct kobj_attribute *attr,
132 					  const char *buf, size_t count)
133 {
134 	unsigned long msecs;
135 	int err;
136 
137 	err = kstrtoul(buf, 10, &msecs);
138 	if (err || msecs > UINT_MAX)
139 		return -EINVAL;
140 
141 	khugepaged_scan_sleep_millisecs = msecs;
142 	khugepaged_sleep_expire = 0;
143 	wake_up_interruptible(&khugepaged_wait);
144 
145 	return count;
146 }
147 static struct kobj_attribute scan_sleep_millisecs_attr =
148 	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
149 	       scan_sleep_millisecs_store);
150 
151 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
152 					  struct kobj_attribute *attr,
153 					  char *buf)
154 {
155 	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
156 }
157 
158 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
159 					   struct kobj_attribute *attr,
160 					   const char *buf, size_t count)
161 {
162 	unsigned long msecs;
163 	int err;
164 
165 	err = kstrtoul(buf, 10, &msecs);
166 	if (err || msecs > UINT_MAX)
167 		return -EINVAL;
168 
169 	khugepaged_alloc_sleep_millisecs = msecs;
170 	khugepaged_sleep_expire = 0;
171 	wake_up_interruptible(&khugepaged_wait);
172 
173 	return count;
174 }
175 static struct kobj_attribute alloc_sleep_millisecs_attr =
176 	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
177 	       alloc_sleep_millisecs_store);
178 
179 static ssize_t pages_to_scan_show(struct kobject *kobj,
180 				  struct kobj_attribute *attr,
181 				  char *buf)
182 {
183 	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
184 }
185 static ssize_t pages_to_scan_store(struct kobject *kobj,
186 				   struct kobj_attribute *attr,
187 				   const char *buf, size_t count)
188 {
189 	int err;
190 	unsigned long pages;
191 
192 	err = kstrtoul(buf, 10, &pages);
193 	if (err || !pages || pages > UINT_MAX)
194 		return -EINVAL;
195 
196 	khugepaged_pages_to_scan = pages;
197 
198 	return count;
199 }
200 static struct kobj_attribute pages_to_scan_attr =
201 	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
202 	       pages_to_scan_store);
203 
204 static ssize_t pages_collapsed_show(struct kobject *kobj,
205 				    struct kobj_attribute *attr,
206 				    char *buf)
207 {
208 	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
209 }
210 static struct kobj_attribute pages_collapsed_attr =
211 	__ATTR_RO(pages_collapsed);
212 
213 static ssize_t full_scans_show(struct kobject *kobj,
214 			       struct kobj_attribute *attr,
215 			       char *buf)
216 {
217 	return sprintf(buf, "%u\n", khugepaged_full_scans);
218 }
219 static struct kobj_attribute full_scans_attr =
220 	__ATTR_RO(full_scans);
221 
222 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
223 				      struct kobj_attribute *attr, char *buf)
224 {
225 	return single_hugepage_flag_show(kobj, attr, buf,
226 				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
227 }
228 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
229 				       struct kobj_attribute *attr,
230 				       const char *buf, size_t count)
231 {
232 	return single_hugepage_flag_store(kobj, attr, buf, count,
233 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
234 }
235 static struct kobj_attribute khugepaged_defrag_attr =
236 	__ATTR(defrag, 0644, khugepaged_defrag_show,
237 	       khugepaged_defrag_store);
238 
239 /*
240  * max_ptes_none controls if khugepaged should collapse hugepages over
241  * any unmapped ptes in turn potentially increasing the memory
242  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
243  * reduce the available free memory in the system as it
244  * runs. Increasing max_ptes_none will instead potentially reduce the
245  * free memory in the system during the khugepaged scan.
246  */
247 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
248 					     struct kobj_attribute *attr,
249 					     char *buf)
250 {
251 	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
252 }
253 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
254 					      struct kobj_attribute *attr,
255 					      const char *buf, size_t count)
256 {
257 	int err;
258 	unsigned long max_ptes_none;
259 
260 	err = kstrtoul(buf, 10, &max_ptes_none);
261 	if (err || max_ptes_none > HPAGE_PMD_NR-1)
262 		return -EINVAL;
263 
264 	khugepaged_max_ptes_none = max_ptes_none;
265 
266 	return count;
267 }
268 static struct kobj_attribute khugepaged_max_ptes_none_attr =
269 	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
270 	       khugepaged_max_ptes_none_store);
271 
272 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
273 					     struct kobj_attribute *attr,
274 					     char *buf)
275 {
276 	return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
277 }
278 
279 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
280 					      struct kobj_attribute *attr,
281 					      const char *buf, size_t count)
282 {
283 	int err;
284 	unsigned long max_ptes_swap;
285 
286 	err  = kstrtoul(buf, 10, &max_ptes_swap);
287 	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
288 		return -EINVAL;
289 
290 	khugepaged_max_ptes_swap = max_ptes_swap;
291 
292 	return count;
293 }
294 
295 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
296 	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
297 	       khugepaged_max_ptes_swap_store);
298 
299 static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
300 					     struct kobj_attribute *attr,
301 					     char *buf)
302 {
303 	return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
304 }
305 
306 static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
307 					      struct kobj_attribute *attr,
308 					      const char *buf, size_t count)
309 {
310 	int err;
311 	unsigned long max_ptes_shared;
312 
313 	err  = kstrtoul(buf, 10, &max_ptes_shared);
314 	if (err || max_ptes_shared > HPAGE_PMD_NR-1)
315 		return -EINVAL;
316 
317 	khugepaged_max_ptes_shared = max_ptes_shared;
318 
319 	return count;
320 }
321 
322 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
323 	__ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
324 	       khugepaged_max_ptes_shared_store);
325 
326 static struct attribute *khugepaged_attr[] = {
327 	&khugepaged_defrag_attr.attr,
328 	&khugepaged_max_ptes_none_attr.attr,
329 	&khugepaged_max_ptes_swap_attr.attr,
330 	&khugepaged_max_ptes_shared_attr.attr,
331 	&pages_to_scan_attr.attr,
332 	&pages_collapsed_attr.attr,
333 	&full_scans_attr.attr,
334 	&scan_sleep_millisecs_attr.attr,
335 	&alloc_sleep_millisecs_attr.attr,
336 	NULL,
337 };
338 
339 struct attribute_group khugepaged_attr_group = {
340 	.attrs = khugepaged_attr,
341 	.name = "khugepaged",
342 };
343 #endif /* CONFIG_SYSFS */
344 
345 int hugepage_madvise(struct vm_area_struct *vma,
346 		     unsigned long *vm_flags, int advice)
347 {
348 	switch (advice) {
349 	case MADV_HUGEPAGE:
350 #ifdef CONFIG_S390
351 		/*
352 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
353 		 * can't handle this properly after s390_enable_sie, so we simply
354 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
355 		 */
356 		if (mm_has_pgste(vma->vm_mm))
357 			return 0;
358 #endif
359 		*vm_flags &= ~VM_NOHUGEPAGE;
360 		*vm_flags |= VM_HUGEPAGE;
361 		/*
362 		 * If the vma become good for khugepaged to scan,
363 		 * register it here without waiting a page fault that
364 		 * may not happen any time soon.
365 		 */
366 		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
367 				khugepaged_enter_vma_merge(vma, *vm_flags))
368 			return -ENOMEM;
369 		break;
370 	case MADV_NOHUGEPAGE:
371 		*vm_flags &= ~VM_HUGEPAGE;
372 		*vm_flags |= VM_NOHUGEPAGE;
373 		/*
374 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
375 		 * this vma even if we leave the mm registered in khugepaged if
376 		 * it got registered before VM_NOHUGEPAGE was set.
377 		 */
378 		break;
379 	}
380 
381 	return 0;
382 }
383 
384 int __init khugepaged_init(void)
385 {
386 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
387 					  sizeof(struct mm_slot),
388 					  __alignof__(struct mm_slot), 0, NULL);
389 	if (!mm_slot_cache)
390 		return -ENOMEM;
391 
392 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
395 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
396 
397 	return 0;
398 }
399 
400 void __init khugepaged_destroy(void)
401 {
402 	kmem_cache_destroy(mm_slot_cache);
403 }
404 
405 static inline struct mm_slot *alloc_mm_slot(void)
406 {
407 	if (!mm_slot_cache)	/* initialization failed */
408 		return NULL;
409 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
410 }
411 
412 static inline void free_mm_slot(struct mm_slot *mm_slot)
413 {
414 	kmem_cache_free(mm_slot_cache, mm_slot);
415 }
416 
417 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
418 {
419 	struct mm_slot *mm_slot;
420 
421 	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
422 		if (mm == mm_slot->mm)
423 			return mm_slot;
424 
425 	return NULL;
426 }
427 
428 static void insert_to_mm_slots_hash(struct mm_struct *mm,
429 				    struct mm_slot *mm_slot)
430 {
431 	mm_slot->mm = mm;
432 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
433 }
434 
435 static inline int khugepaged_test_exit(struct mm_struct *mm)
436 {
437 	return atomic_read(&mm->mm_users) == 0;
438 }
439 
440 static bool hugepage_vma_check(struct vm_area_struct *vma,
441 			       unsigned long vm_flags)
442 {
443 	if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
444 	    (vm_flags & VM_NOHUGEPAGE) ||
445 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
446 		return false;
447 
448 	if (shmem_file(vma->vm_file) ||
449 	    (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
450 	     vma->vm_file &&
451 	     (vm_flags & VM_DENYWRITE))) {
452 		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
453 				HPAGE_PMD_NR);
454 	}
455 	if (!vma->anon_vma || vma->vm_ops)
456 		return false;
457 	if (vma_is_temporary_stack(vma))
458 		return false;
459 	return !(vm_flags & VM_NO_KHUGEPAGED);
460 }
461 
462 int __khugepaged_enter(struct mm_struct *mm)
463 {
464 	struct mm_slot *mm_slot;
465 	int wakeup;
466 
467 	mm_slot = alloc_mm_slot();
468 	if (!mm_slot)
469 		return -ENOMEM;
470 
471 	/* __khugepaged_exit() must not run from under us */
472 	VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
473 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
474 		free_mm_slot(mm_slot);
475 		return 0;
476 	}
477 
478 	spin_lock(&khugepaged_mm_lock);
479 	insert_to_mm_slots_hash(mm, mm_slot);
480 	/*
481 	 * Insert just behind the scanning cursor, to let the area settle
482 	 * down a little.
483 	 */
484 	wakeup = list_empty(&khugepaged_scan.mm_head);
485 	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
486 	spin_unlock(&khugepaged_mm_lock);
487 
488 	mmgrab(mm);
489 	if (wakeup)
490 		wake_up_interruptible(&khugepaged_wait);
491 
492 	return 0;
493 }
494 
495 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
496 			       unsigned long vm_flags)
497 {
498 	unsigned long hstart, hend;
499 
500 	/*
501 	 * khugepaged only supports read-only files for non-shmem files.
502 	 * khugepaged does not yet work on special mappings. And
503 	 * file-private shmem THP is not supported.
504 	 */
505 	if (!hugepage_vma_check(vma, vm_flags))
506 		return 0;
507 
508 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
509 	hend = vma->vm_end & HPAGE_PMD_MASK;
510 	if (hstart < hend)
511 		return khugepaged_enter(vma, vm_flags);
512 	return 0;
513 }
514 
515 void __khugepaged_exit(struct mm_struct *mm)
516 {
517 	struct mm_slot *mm_slot;
518 	int free = 0;
519 
520 	spin_lock(&khugepaged_mm_lock);
521 	mm_slot = get_mm_slot(mm);
522 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
523 		hash_del(&mm_slot->hash);
524 		list_del(&mm_slot->mm_node);
525 		free = 1;
526 	}
527 	spin_unlock(&khugepaged_mm_lock);
528 
529 	if (free) {
530 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
531 		free_mm_slot(mm_slot);
532 		mmdrop(mm);
533 	} else if (mm_slot) {
534 		/*
535 		 * This is required to serialize against
536 		 * khugepaged_test_exit() (which is guaranteed to run
537 		 * under mmap sem read mode). Stop here (after we
538 		 * return all pagetables will be destroyed) until
539 		 * khugepaged has finished working on the pagetables
540 		 * under the mmap_lock.
541 		 */
542 		mmap_write_lock(mm);
543 		mmap_write_unlock(mm);
544 	}
545 }
546 
547 static void release_pte_page(struct page *page)
548 {
549 	mod_node_page_state(page_pgdat(page),
550 			NR_ISOLATED_ANON + page_is_file_lru(page),
551 			-compound_nr(page));
552 	unlock_page(page);
553 	putback_lru_page(page);
554 }
555 
556 static void release_pte_pages(pte_t *pte, pte_t *_pte,
557 		struct list_head *compound_pagelist)
558 {
559 	struct page *page, *tmp;
560 
561 	while (--_pte >= pte) {
562 		pte_t pteval = *_pte;
563 
564 		page = pte_page(pteval);
565 		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
566 				!PageCompound(page))
567 			release_pte_page(page);
568 	}
569 
570 	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
571 		list_del(&page->lru);
572 		release_pte_page(page);
573 	}
574 }
575 
576 static bool is_refcount_suitable(struct page *page)
577 {
578 	int expected_refcount;
579 
580 	expected_refcount = total_mapcount(page);
581 	if (PageSwapCache(page))
582 		expected_refcount += compound_nr(page);
583 
584 	return page_count(page) == expected_refcount;
585 }
586 
587 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
588 					unsigned long address,
589 					pte_t *pte,
590 					struct list_head *compound_pagelist)
591 {
592 	struct page *page = NULL;
593 	pte_t *_pte;
594 	int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
595 	bool writable = false;
596 
597 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
598 	     _pte++, address += PAGE_SIZE) {
599 		pte_t pteval = *_pte;
600 		if (pte_none(pteval) || (pte_present(pteval) &&
601 				is_zero_pfn(pte_pfn(pteval)))) {
602 			if (!userfaultfd_armed(vma) &&
603 			    ++none_or_zero <= khugepaged_max_ptes_none) {
604 				continue;
605 			} else {
606 				result = SCAN_EXCEED_NONE_PTE;
607 				goto out;
608 			}
609 		}
610 		if (!pte_present(pteval)) {
611 			result = SCAN_PTE_NON_PRESENT;
612 			goto out;
613 		}
614 		page = vm_normal_page(vma, address, pteval);
615 		if (unlikely(!page)) {
616 			result = SCAN_PAGE_NULL;
617 			goto out;
618 		}
619 
620 		VM_BUG_ON_PAGE(!PageAnon(page), page);
621 
622 		if (page_mapcount(page) > 1 &&
623 				++shared > khugepaged_max_ptes_shared) {
624 			result = SCAN_EXCEED_SHARED_PTE;
625 			goto out;
626 		}
627 
628 		if (PageCompound(page)) {
629 			struct page *p;
630 			page = compound_head(page);
631 
632 			/*
633 			 * Check if we have dealt with the compound page
634 			 * already
635 			 */
636 			list_for_each_entry(p, compound_pagelist, lru) {
637 				if (page == p)
638 					goto next;
639 			}
640 		}
641 
642 		/*
643 		 * We can do it before isolate_lru_page because the
644 		 * page can't be freed from under us. NOTE: PG_lock
645 		 * is needed to serialize against split_huge_page
646 		 * when invoked from the VM.
647 		 */
648 		if (!trylock_page(page)) {
649 			result = SCAN_PAGE_LOCK;
650 			goto out;
651 		}
652 
653 		/*
654 		 * Check if the page has any GUP (or other external) pins.
655 		 *
656 		 * The page table that maps the page has been already unlinked
657 		 * from the page table tree and this process cannot get
658 		 * an additinal pin on the page.
659 		 *
660 		 * New pins can come later if the page is shared across fork,
661 		 * but not from this process. The other process cannot write to
662 		 * the page, only trigger CoW.
663 		 */
664 		if (!is_refcount_suitable(page)) {
665 			unlock_page(page);
666 			result = SCAN_PAGE_COUNT;
667 			goto out;
668 		}
669 		if (!pte_write(pteval) && PageSwapCache(page) &&
670 				!reuse_swap_page(page, NULL)) {
671 			/*
672 			 * Page is in the swap cache and cannot be re-used.
673 			 * It cannot be collapsed into a THP.
674 			 */
675 			unlock_page(page);
676 			result = SCAN_SWAP_CACHE_PAGE;
677 			goto out;
678 		}
679 
680 		/*
681 		 * Isolate the page to avoid collapsing an hugepage
682 		 * currently in use by the VM.
683 		 */
684 		if (isolate_lru_page(page)) {
685 			unlock_page(page);
686 			result = SCAN_DEL_PAGE_LRU;
687 			goto out;
688 		}
689 		mod_node_page_state(page_pgdat(page),
690 				NR_ISOLATED_ANON + page_is_file_lru(page),
691 				compound_nr(page));
692 		VM_BUG_ON_PAGE(!PageLocked(page), page);
693 		VM_BUG_ON_PAGE(PageLRU(page), page);
694 
695 		if (PageCompound(page))
696 			list_add_tail(&page->lru, compound_pagelist);
697 next:
698 		/* There should be enough young pte to collapse the page */
699 		if (pte_young(pteval) ||
700 		    page_is_young(page) || PageReferenced(page) ||
701 		    mmu_notifier_test_young(vma->vm_mm, address))
702 			referenced++;
703 
704 		if (pte_write(pteval))
705 			writable = true;
706 	}
707 	if (likely(writable)) {
708 		if (likely(referenced)) {
709 			result = SCAN_SUCCEED;
710 			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
711 							    referenced, writable, result);
712 			return 1;
713 		}
714 	} else {
715 		result = SCAN_PAGE_RO;
716 	}
717 
718 out:
719 	release_pte_pages(pte, _pte, compound_pagelist);
720 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
721 					    referenced, writable, result);
722 	return 0;
723 }
724 
725 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
726 				      struct vm_area_struct *vma,
727 				      unsigned long address,
728 				      spinlock_t *ptl,
729 				      struct list_head *compound_pagelist)
730 {
731 	struct page *src_page, *tmp;
732 	pte_t *_pte;
733 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
734 				_pte++, page++, address += PAGE_SIZE) {
735 		pte_t pteval = *_pte;
736 
737 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
738 			clear_user_highpage(page, address);
739 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
740 			if (is_zero_pfn(pte_pfn(pteval))) {
741 				/*
742 				 * ptl mostly unnecessary.
743 				 */
744 				spin_lock(ptl);
745 				/*
746 				 * paravirt calls inside pte_clear here are
747 				 * superfluous.
748 				 */
749 				pte_clear(vma->vm_mm, address, _pte);
750 				spin_unlock(ptl);
751 			}
752 		} else {
753 			src_page = pte_page(pteval);
754 			copy_user_highpage(page, src_page, address, vma);
755 			if (!PageCompound(src_page))
756 				release_pte_page(src_page);
757 			/*
758 			 * ptl mostly unnecessary, but preempt has to
759 			 * be disabled to update the per-cpu stats
760 			 * inside page_remove_rmap().
761 			 */
762 			spin_lock(ptl);
763 			/*
764 			 * paravirt calls inside pte_clear here are
765 			 * superfluous.
766 			 */
767 			pte_clear(vma->vm_mm, address, _pte);
768 			page_remove_rmap(src_page, false);
769 			spin_unlock(ptl);
770 			free_page_and_swap_cache(src_page);
771 		}
772 	}
773 
774 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
775 		list_del(&src_page->lru);
776 		release_pte_page(src_page);
777 	}
778 }
779 
780 static void khugepaged_alloc_sleep(void)
781 {
782 	DEFINE_WAIT(wait);
783 
784 	add_wait_queue(&khugepaged_wait, &wait);
785 	freezable_schedule_timeout_interruptible(
786 		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
787 	remove_wait_queue(&khugepaged_wait, &wait);
788 }
789 
790 static int khugepaged_node_load[MAX_NUMNODES];
791 
792 static bool khugepaged_scan_abort(int nid)
793 {
794 	int i;
795 
796 	/*
797 	 * If node_reclaim_mode is disabled, then no extra effort is made to
798 	 * allocate memory locally.
799 	 */
800 	if (!node_reclaim_mode)
801 		return false;
802 
803 	/* If there is a count for this node already, it must be acceptable */
804 	if (khugepaged_node_load[nid])
805 		return false;
806 
807 	for (i = 0; i < MAX_NUMNODES; i++) {
808 		if (!khugepaged_node_load[i])
809 			continue;
810 		if (node_distance(nid, i) > node_reclaim_distance)
811 			return true;
812 	}
813 	return false;
814 }
815 
816 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
817 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
818 {
819 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
820 }
821 
822 #ifdef CONFIG_NUMA
823 static int khugepaged_find_target_node(void)
824 {
825 	static int last_khugepaged_target_node = NUMA_NO_NODE;
826 	int nid, target_node = 0, max_value = 0;
827 
828 	/* find first node with max normal pages hit */
829 	for (nid = 0; nid < MAX_NUMNODES; nid++)
830 		if (khugepaged_node_load[nid] > max_value) {
831 			max_value = khugepaged_node_load[nid];
832 			target_node = nid;
833 		}
834 
835 	/* do some balance if several nodes have the same hit record */
836 	if (target_node <= last_khugepaged_target_node)
837 		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
838 				nid++)
839 			if (max_value == khugepaged_node_load[nid]) {
840 				target_node = nid;
841 				break;
842 			}
843 
844 	last_khugepaged_target_node = target_node;
845 	return target_node;
846 }
847 
848 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
849 {
850 	if (IS_ERR(*hpage)) {
851 		if (!*wait)
852 			return false;
853 
854 		*wait = false;
855 		*hpage = NULL;
856 		khugepaged_alloc_sleep();
857 	} else if (*hpage) {
858 		put_page(*hpage);
859 		*hpage = NULL;
860 	}
861 
862 	return true;
863 }
864 
865 static struct page *
866 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
867 {
868 	VM_BUG_ON_PAGE(*hpage, *hpage);
869 
870 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
871 	if (unlikely(!*hpage)) {
872 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
873 		*hpage = ERR_PTR(-ENOMEM);
874 		return NULL;
875 	}
876 
877 	prep_transhuge_page(*hpage);
878 	count_vm_event(THP_COLLAPSE_ALLOC);
879 	return *hpage;
880 }
881 #else
882 static int khugepaged_find_target_node(void)
883 {
884 	return 0;
885 }
886 
887 static inline struct page *alloc_khugepaged_hugepage(void)
888 {
889 	struct page *page;
890 
891 	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
892 			   HPAGE_PMD_ORDER);
893 	if (page)
894 		prep_transhuge_page(page);
895 	return page;
896 }
897 
898 static struct page *khugepaged_alloc_hugepage(bool *wait)
899 {
900 	struct page *hpage;
901 
902 	do {
903 		hpage = alloc_khugepaged_hugepage();
904 		if (!hpage) {
905 			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
906 			if (!*wait)
907 				return NULL;
908 
909 			*wait = false;
910 			khugepaged_alloc_sleep();
911 		} else
912 			count_vm_event(THP_COLLAPSE_ALLOC);
913 	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
914 
915 	return hpage;
916 }
917 
918 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
919 {
920 	/*
921 	 * If the hpage allocated earlier was briefly exposed in page cache
922 	 * before collapse_file() failed, it is possible that racing lookups
923 	 * have not yet completed, and would then be unpleasantly surprised by
924 	 * finding the hpage reused for the same mapping at a different offset.
925 	 * Just release the previous allocation if there is any danger of that.
926 	 */
927 	if (*hpage && page_count(*hpage) > 1) {
928 		put_page(*hpage);
929 		*hpage = NULL;
930 	}
931 
932 	if (!*hpage)
933 		*hpage = khugepaged_alloc_hugepage(wait);
934 
935 	if (unlikely(!*hpage))
936 		return false;
937 
938 	return true;
939 }
940 
941 static struct page *
942 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
943 {
944 	VM_BUG_ON(!*hpage);
945 
946 	return  *hpage;
947 }
948 #endif
949 
950 /*
951  * If mmap_lock temporarily dropped, revalidate vma
952  * before taking mmap_lock.
953  * Return 0 if succeeds, otherwise return none-zero
954  * value (scan code).
955  */
956 
957 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
958 		struct vm_area_struct **vmap)
959 {
960 	struct vm_area_struct *vma;
961 	unsigned long hstart, hend;
962 
963 	if (unlikely(khugepaged_test_exit(mm)))
964 		return SCAN_ANY_PROCESS;
965 
966 	*vmap = vma = find_vma(mm, address);
967 	if (!vma)
968 		return SCAN_VMA_NULL;
969 
970 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
971 	hend = vma->vm_end & HPAGE_PMD_MASK;
972 	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
973 		return SCAN_ADDRESS_RANGE;
974 	if (!hugepage_vma_check(vma, vma->vm_flags))
975 		return SCAN_VMA_CHECK;
976 	/* Anon VMA expected */
977 	if (!vma->anon_vma || vma->vm_ops)
978 		return SCAN_VMA_CHECK;
979 	return 0;
980 }
981 
982 /*
983  * Bring missing pages in from swap, to complete THP collapse.
984  * Only done if khugepaged_scan_pmd believes it is worthwhile.
985  *
986  * Called and returns without pte mapped or spinlocks held,
987  * but with mmap_lock held to protect against vma changes.
988  */
989 
990 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
991 					struct vm_area_struct *vma,
992 					unsigned long address, pmd_t *pmd,
993 					int referenced)
994 {
995 	int swapped_in = 0;
996 	vm_fault_t ret = 0;
997 	struct vm_fault vmf = {
998 		.vma = vma,
999 		.address = address,
1000 		.flags = FAULT_FLAG_ALLOW_RETRY,
1001 		.pmd = pmd,
1002 		.pgoff = linear_page_index(vma, address),
1003 	};
1004 
1005 	vmf.pte = pte_offset_map(pmd, address);
1006 	for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
1007 			vmf.pte++, vmf.address += PAGE_SIZE) {
1008 		vmf.orig_pte = *vmf.pte;
1009 		if (!is_swap_pte(vmf.orig_pte))
1010 			continue;
1011 		swapped_in++;
1012 		ret = do_swap_page(&vmf);
1013 
1014 		/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1015 		if (ret & VM_FAULT_RETRY) {
1016 			mmap_read_lock(mm);
1017 			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
1018 				/* vma is no longer available, don't continue to swapin */
1019 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1020 				return false;
1021 			}
1022 			/* check if the pmd is still valid */
1023 			if (mm_find_pmd(mm, address) != pmd) {
1024 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1025 				return false;
1026 			}
1027 		}
1028 		if (ret & VM_FAULT_ERROR) {
1029 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1030 			return false;
1031 		}
1032 		/* pte is unmapped now, we need to map it */
1033 		vmf.pte = pte_offset_map(pmd, vmf.address);
1034 	}
1035 	vmf.pte--;
1036 	pte_unmap(vmf.pte);
1037 
1038 	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1039 	if (swapped_in)
1040 		lru_add_drain();
1041 
1042 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1043 	return true;
1044 }
1045 
1046 static void collapse_huge_page(struct mm_struct *mm,
1047 				   unsigned long address,
1048 				   struct page **hpage,
1049 				   int node, int referenced, int unmapped)
1050 {
1051 	LIST_HEAD(compound_pagelist);
1052 	pmd_t *pmd, _pmd;
1053 	pte_t *pte;
1054 	pgtable_t pgtable;
1055 	struct page *new_page;
1056 	spinlock_t *pmd_ptl, *pte_ptl;
1057 	int isolated = 0, result = 0;
1058 	struct vm_area_struct *vma;
1059 	struct mmu_notifier_range range;
1060 	gfp_t gfp;
1061 
1062 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1063 
1064 	/* Only allocate from the target node */
1065 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1066 
1067 	/*
1068 	 * Before allocating the hugepage, release the mmap_lock read lock.
1069 	 * The allocation can take potentially a long time if it involves
1070 	 * sync compaction, and we do not need to hold the mmap_lock during
1071 	 * that. We will recheck the vma after taking it again in write mode.
1072 	 */
1073 	mmap_read_unlock(mm);
1074 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1075 	if (!new_page) {
1076 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1077 		goto out_nolock;
1078 	}
1079 
1080 	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1081 		result = SCAN_CGROUP_CHARGE_FAIL;
1082 		goto out_nolock;
1083 	}
1084 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1085 
1086 	mmap_read_lock(mm);
1087 	result = hugepage_vma_revalidate(mm, address, &vma);
1088 	if (result) {
1089 		mmap_read_unlock(mm);
1090 		goto out_nolock;
1091 	}
1092 
1093 	pmd = mm_find_pmd(mm, address);
1094 	if (!pmd) {
1095 		result = SCAN_PMD_NULL;
1096 		mmap_read_unlock(mm);
1097 		goto out_nolock;
1098 	}
1099 
1100 	/*
1101 	 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1102 	 * If it fails, we release mmap_lock and jump out_nolock.
1103 	 * Continuing to collapse causes inconsistency.
1104 	 */
1105 	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1106 						     pmd, referenced)) {
1107 		mmap_read_unlock(mm);
1108 		goto out_nolock;
1109 	}
1110 
1111 	mmap_read_unlock(mm);
1112 	/*
1113 	 * Prevent all access to pagetables with the exception of
1114 	 * gup_fast later handled by the ptep_clear_flush and the VM
1115 	 * handled by the anon_vma lock + PG_lock.
1116 	 */
1117 	mmap_write_lock(mm);
1118 	result = hugepage_vma_revalidate(mm, address, &vma);
1119 	if (result)
1120 		goto out;
1121 	/* check if the pmd is still valid */
1122 	if (mm_find_pmd(mm, address) != pmd)
1123 		goto out;
1124 
1125 	anon_vma_lock_write(vma->anon_vma);
1126 
1127 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1128 				address, address + HPAGE_PMD_SIZE);
1129 	mmu_notifier_invalidate_range_start(&range);
1130 
1131 	pte = pte_offset_map(pmd, address);
1132 	pte_ptl = pte_lockptr(mm, pmd);
1133 
1134 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1135 	/*
1136 	 * After this gup_fast can't run anymore. This also removes
1137 	 * any huge TLB entry from the CPU so we won't allow
1138 	 * huge and small TLB entries for the same virtual address
1139 	 * to avoid the risk of CPU bugs in that area.
1140 	 */
1141 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1142 	spin_unlock(pmd_ptl);
1143 	mmu_notifier_invalidate_range_end(&range);
1144 
1145 	spin_lock(pte_ptl);
1146 	isolated = __collapse_huge_page_isolate(vma, address, pte,
1147 			&compound_pagelist);
1148 	spin_unlock(pte_ptl);
1149 
1150 	if (unlikely(!isolated)) {
1151 		pte_unmap(pte);
1152 		spin_lock(pmd_ptl);
1153 		BUG_ON(!pmd_none(*pmd));
1154 		/*
1155 		 * We can only use set_pmd_at when establishing
1156 		 * hugepmds and never for establishing regular pmds that
1157 		 * points to regular pagetables. Use pmd_populate for that
1158 		 */
1159 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1160 		spin_unlock(pmd_ptl);
1161 		anon_vma_unlock_write(vma->anon_vma);
1162 		result = SCAN_FAIL;
1163 		goto out;
1164 	}
1165 
1166 	/*
1167 	 * All pages are isolated and locked so anon_vma rmap
1168 	 * can't run anymore.
1169 	 */
1170 	anon_vma_unlock_write(vma->anon_vma);
1171 
1172 	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1173 			&compound_pagelist);
1174 	pte_unmap(pte);
1175 	__SetPageUptodate(new_page);
1176 	pgtable = pmd_pgtable(_pmd);
1177 
1178 	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1179 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1180 
1181 	/*
1182 	 * spin_lock() below is not the equivalent of smp_wmb(), so
1183 	 * this is needed to avoid the copy_huge_page writes to become
1184 	 * visible after the set_pmd_at() write.
1185 	 */
1186 	smp_wmb();
1187 
1188 	spin_lock(pmd_ptl);
1189 	BUG_ON(!pmd_none(*pmd));
1190 	page_add_new_anon_rmap(new_page, vma, address, true);
1191 	lru_cache_add_inactive_or_unevictable(new_page, vma);
1192 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1193 	set_pmd_at(mm, address, pmd, _pmd);
1194 	update_mmu_cache_pmd(vma, address, pmd);
1195 	spin_unlock(pmd_ptl);
1196 
1197 	*hpage = NULL;
1198 
1199 	khugepaged_pages_collapsed++;
1200 	result = SCAN_SUCCEED;
1201 out_up_write:
1202 	mmap_write_unlock(mm);
1203 out_nolock:
1204 	if (!IS_ERR_OR_NULL(*hpage))
1205 		mem_cgroup_uncharge(*hpage);
1206 	trace_mm_collapse_huge_page(mm, isolated, result);
1207 	return;
1208 out:
1209 	goto out_up_write;
1210 }
1211 
1212 static int khugepaged_scan_pmd(struct mm_struct *mm,
1213 			       struct vm_area_struct *vma,
1214 			       unsigned long address,
1215 			       struct page **hpage)
1216 {
1217 	pmd_t *pmd;
1218 	pte_t *pte, *_pte;
1219 	int ret = 0, result = 0, referenced = 0;
1220 	int none_or_zero = 0, shared = 0;
1221 	struct page *page = NULL;
1222 	unsigned long _address;
1223 	spinlock_t *ptl;
1224 	int node = NUMA_NO_NODE, unmapped = 0;
1225 	bool writable = false;
1226 
1227 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1228 
1229 	pmd = mm_find_pmd(mm, address);
1230 	if (!pmd) {
1231 		result = SCAN_PMD_NULL;
1232 		goto out;
1233 	}
1234 
1235 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1236 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1237 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1238 	     _pte++, _address += PAGE_SIZE) {
1239 		pte_t pteval = *_pte;
1240 		if (is_swap_pte(pteval)) {
1241 			if (++unmapped <= khugepaged_max_ptes_swap) {
1242 				/*
1243 				 * Always be strict with uffd-wp
1244 				 * enabled swap entries.  Please see
1245 				 * comment below for pte_uffd_wp().
1246 				 */
1247 				if (pte_swp_uffd_wp(pteval)) {
1248 					result = SCAN_PTE_UFFD_WP;
1249 					goto out_unmap;
1250 				}
1251 				continue;
1252 			} else {
1253 				result = SCAN_EXCEED_SWAP_PTE;
1254 				goto out_unmap;
1255 			}
1256 		}
1257 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1258 			if (!userfaultfd_armed(vma) &&
1259 			    ++none_or_zero <= khugepaged_max_ptes_none) {
1260 				continue;
1261 			} else {
1262 				result = SCAN_EXCEED_NONE_PTE;
1263 				goto out_unmap;
1264 			}
1265 		}
1266 		if (!pte_present(pteval)) {
1267 			result = SCAN_PTE_NON_PRESENT;
1268 			goto out_unmap;
1269 		}
1270 		if (pte_uffd_wp(pteval)) {
1271 			/*
1272 			 * Don't collapse the page if any of the small
1273 			 * PTEs are armed with uffd write protection.
1274 			 * Here we can also mark the new huge pmd as
1275 			 * write protected if any of the small ones is
1276 			 * marked but that could bring uknown
1277 			 * userfault messages that falls outside of
1278 			 * the registered range.  So, just be simple.
1279 			 */
1280 			result = SCAN_PTE_UFFD_WP;
1281 			goto out_unmap;
1282 		}
1283 		if (pte_write(pteval))
1284 			writable = true;
1285 
1286 		page = vm_normal_page(vma, _address, pteval);
1287 		if (unlikely(!page)) {
1288 			result = SCAN_PAGE_NULL;
1289 			goto out_unmap;
1290 		}
1291 
1292 		if (page_mapcount(page) > 1 &&
1293 				++shared > khugepaged_max_ptes_shared) {
1294 			result = SCAN_EXCEED_SHARED_PTE;
1295 			goto out_unmap;
1296 		}
1297 
1298 		page = compound_head(page);
1299 
1300 		/*
1301 		 * Record which node the original page is from and save this
1302 		 * information to khugepaged_node_load[].
1303 		 * Khupaged will allocate hugepage from the node has the max
1304 		 * hit record.
1305 		 */
1306 		node = page_to_nid(page);
1307 		if (khugepaged_scan_abort(node)) {
1308 			result = SCAN_SCAN_ABORT;
1309 			goto out_unmap;
1310 		}
1311 		khugepaged_node_load[node]++;
1312 		if (!PageLRU(page)) {
1313 			result = SCAN_PAGE_LRU;
1314 			goto out_unmap;
1315 		}
1316 		if (PageLocked(page)) {
1317 			result = SCAN_PAGE_LOCK;
1318 			goto out_unmap;
1319 		}
1320 		if (!PageAnon(page)) {
1321 			result = SCAN_PAGE_ANON;
1322 			goto out_unmap;
1323 		}
1324 
1325 		/*
1326 		 * Check if the page has any GUP (or other external) pins.
1327 		 *
1328 		 * Here the check is racy it may see totmal_mapcount > refcount
1329 		 * in some cases.
1330 		 * For example, one process with one forked child process.
1331 		 * The parent has the PMD split due to MADV_DONTNEED, then
1332 		 * the child is trying unmap the whole PMD, but khugepaged
1333 		 * may be scanning the parent between the child has
1334 		 * PageDoubleMap flag cleared and dec the mapcount.  So
1335 		 * khugepaged may see total_mapcount > refcount.
1336 		 *
1337 		 * But such case is ephemeral we could always retry collapse
1338 		 * later.  However it may report false positive if the page
1339 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
1340 		 * will be done again later the risk seems low.
1341 		 */
1342 		if (!is_refcount_suitable(page)) {
1343 			result = SCAN_PAGE_COUNT;
1344 			goto out_unmap;
1345 		}
1346 		if (pte_young(pteval) ||
1347 		    page_is_young(page) || PageReferenced(page) ||
1348 		    mmu_notifier_test_young(vma->vm_mm, address))
1349 			referenced++;
1350 	}
1351 	if (!writable) {
1352 		result = SCAN_PAGE_RO;
1353 	} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1354 		result = SCAN_LACK_REFERENCED_PAGE;
1355 	} else {
1356 		result = SCAN_SUCCEED;
1357 		ret = 1;
1358 	}
1359 out_unmap:
1360 	pte_unmap_unlock(pte, ptl);
1361 	if (ret) {
1362 		node = khugepaged_find_target_node();
1363 		/* collapse_huge_page will return with the mmap_lock released */
1364 		collapse_huge_page(mm, address, hpage, node,
1365 				referenced, unmapped);
1366 	}
1367 out:
1368 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1369 				     none_or_zero, result, unmapped);
1370 	return ret;
1371 }
1372 
1373 static void collect_mm_slot(struct mm_slot *mm_slot)
1374 {
1375 	struct mm_struct *mm = mm_slot->mm;
1376 
1377 	lockdep_assert_held(&khugepaged_mm_lock);
1378 
1379 	if (khugepaged_test_exit(mm)) {
1380 		/* free mm_slot */
1381 		hash_del(&mm_slot->hash);
1382 		list_del(&mm_slot->mm_node);
1383 
1384 		/*
1385 		 * Not strictly needed because the mm exited already.
1386 		 *
1387 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1388 		 */
1389 
1390 		/* khugepaged_mm_lock actually not necessary for the below */
1391 		free_mm_slot(mm_slot);
1392 		mmdrop(mm);
1393 	}
1394 }
1395 
1396 #ifdef CONFIG_SHMEM
1397 /*
1398  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1399  * khugepaged should try to collapse the page table.
1400  */
1401 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1402 					 unsigned long addr)
1403 {
1404 	struct mm_slot *mm_slot;
1405 
1406 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1407 
1408 	spin_lock(&khugepaged_mm_lock);
1409 	mm_slot = get_mm_slot(mm);
1410 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1411 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1412 	spin_unlock(&khugepaged_mm_lock);
1413 	return 0;
1414 }
1415 
1416 /**
1417  * Try to collapse a pte-mapped THP for mm at address haddr.
1418  *
1419  * This function checks whether all the PTEs in the PMD are pointing to the
1420  * right THP. If so, retract the page table so the THP can refault in with
1421  * as pmd-mapped.
1422  */
1423 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1424 {
1425 	unsigned long haddr = addr & HPAGE_PMD_MASK;
1426 	struct vm_area_struct *vma = find_vma(mm, haddr);
1427 	struct page *hpage;
1428 	pte_t *start_pte, *pte;
1429 	pmd_t *pmd, _pmd;
1430 	spinlock_t *ptl;
1431 	int count = 0;
1432 	int i;
1433 
1434 	if (!vma || !vma->vm_file ||
1435 	    vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1436 		return;
1437 
1438 	/*
1439 	 * This vm_flags may not have VM_HUGEPAGE if the page was not
1440 	 * collapsed by this mm. But we can still collapse if the page is
1441 	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1442 	 * will not fail the vma for missing VM_HUGEPAGE
1443 	 */
1444 	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1445 		return;
1446 
1447 	hpage = find_lock_page(vma->vm_file->f_mapping,
1448 			       linear_page_index(vma, haddr));
1449 	if (!hpage)
1450 		return;
1451 
1452 	if (!PageHead(hpage))
1453 		goto drop_hpage;
1454 
1455 	pmd = mm_find_pmd(mm, haddr);
1456 	if (!pmd)
1457 		goto drop_hpage;
1458 
1459 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1460 
1461 	/* step 1: check all mapped PTEs are to the right huge page */
1462 	for (i = 0, addr = haddr, pte = start_pte;
1463 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1464 		struct page *page;
1465 
1466 		/* empty pte, skip */
1467 		if (pte_none(*pte))
1468 			continue;
1469 
1470 		/* page swapped out, abort */
1471 		if (!pte_present(*pte))
1472 			goto abort;
1473 
1474 		page = vm_normal_page(vma, addr, *pte);
1475 
1476 		/*
1477 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1478 		 * page table, but the new page will not be a subpage of hpage.
1479 		 */
1480 		if (hpage + i != page)
1481 			goto abort;
1482 		count++;
1483 	}
1484 
1485 	/* step 2: adjust rmap */
1486 	for (i = 0, addr = haddr, pte = start_pte;
1487 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1488 		struct page *page;
1489 
1490 		if (pte_none(*pte))
1491 			continue;
1492 		page = vm_normal_page(vma, addr, *pte);
1493 		page_remove_rmap(page, false);
1494 	}
1495 
1496 	pte_unmap_unlock(start_pte, ptl);
1497 
1498 	/* step 3: set proper refcount and mm_counters. */
1499 	if (count) {
1500 		page_ref_sub(hpage, count);
1501 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1502 	}
1503 
1504 	/* step 4: collapse pmd */
1505 	ptl = pmd_lock(vma->vm_mm, pmd);
1506 	_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1507 	spin_unlock(ptl);
1508 	mm_dec_nr_ptes(mm);
1509 	pte_free(mm, pmd_pgtable(_pmd));
1510 
1511 drop_hpage:
1512 	unlock_page(hpage);
1513 	put_page(hpage);
1514 	return;
1515 
1516 abort:
1517 	pte_unmap_unlock(start_pte, ptl);
1518 	goto drop_hpage;
1519 }
1520 
1521 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1522 {
1523 	struct mm_struct *mm = mm_slot->mm;
1524 	int i;
1525 
1526 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1527 		return 0;
1528 
1529 	if (!mmap_write_trylock(mm))
1530 		return -EBUSY;
1531 
1532 	if (unlikely(khugepaged_test_exit(mm)))
1533 		goto out;
1534 
1535 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1536 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1537 
1538 out:
1539 	mm_slot->nr_pte_mapped_thp = 0;
1540 	mmap_write_unlock(mm);
1541 	return 0;
1542 }
1543 
1544 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1545 {
1546 	struct vm_area_struct *vma;
1547 	struct mm_struct *mm;
1548 	unsigned long addr;
1549 	pmd_t *pmd, _pmd;
1550 
1551 	i_mmap_lock_write(mapping);
1552 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1553 		/*
1554 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1555 		 * got written to. These VMAs are likely not worth investing
1556 		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1557 		 * later.
1558 		 *
1559 		 * Not that vma->anon_vma check is racy: it can be set up after
1560 		 * the check but before we took mmap_lock by the fault path.
1561 		 * But page lock would prevent establishing any new ptes of the
1562 		 * page, so we are safe.
1563 		 *
1564 		 * An alternative would be drop the check, but check that page
1565 		 * table is clear before calling pmdp_collapse_flush() under
1566 		 * ptl. It has higher chance to recover THP for the VMA, but
1567 		 * has higher cost too.
1568 		 */
1569 		if (vma->anon_vma)
1570 			continue;
1571 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1572 		if (addr & ~HPAGE_PMD_MASK)
1573 			continue;
1574 		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1575 			continue;
1576 		mm = vma->vm_mm;
1577 		pmd = mm_find_pmd(mm, addr);
1578 		if (!pmd)
1579 			continue;
1580 		/*
1581 		 * We need exclusive mmap_lock to retract page table.
1582 		 *
1583 		 * We use trylock due to lock inversion: we need to acquire
1584 		 * mmap_lock while holding page lock. Fault path does it in
1585 		 * reverse order. Trylock is a way to avoid deadlock.
1586 		 */
1587 		if (mmap_write_trylock(mm)) {
1588 			if (!khugepaged_test_exit(mm)) {
1589 				spinlock_t *ptl = pmd_lock(mm, pmd);
1590 				/* assume page table is clear */
1591 				_pmd = pmdp_collapse_flush(vma, addr, pmd);
1592 				spin_unlock(ptl);
1593 				mm_dec_nr_ptes(mm);
1594 				pte_free(mm, pmd_pgtable(_pmd));
1595 			}
1596 			mmap_write_unlock(mm);
1597 		} else {
1598 			/* Try again later */
1599 			khugepaged_add_pte_mapped_thp(mm, addr);
1600 		}
1601 	}
1602 	i_mmap_unlock_write(mapping);
1603 }
1604 
1605 /**
1606  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1607  *
1608  * Basic scheme is simple, details are more complex:
1609  *  - allocate and lock a new huge page;
1610  *  - scan page cache replacing old pages with the new one
1611  *    + swap/gup in pages if necessary;
1612  *    + fill in gaps;
1613  *    + keep old pages around in case rollback is required;
1614  *  - if replacing succeeds:
1615  *    + copy data over;
1616  *    + free old pages;
1617  *    + unlock huge page;
1618  *  - if replacing failed;
1619  *    + put all pages back and unfreeze them;
1620  *    + restore gaps in the page cache;
1621  *    + unlock and free huge page;
1622  */
1623 static void collapse_file(struct mm_struct *mm,
1624 		struct file *file, pgoff_t start,
1625 		struct page **hpage, int node)
1626 {
1627 	struct address_space *mapping = file->f_mapping;
1628 	gfp_t gfp;
1629 	struct page *new_page;
1630 	pgoff_t index, end = start + HPAGE_PMD_NR;
1631 	LIST_HEAD(pagelist);
1632 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1633 	int nr_none = 0, result = SCAN_SUCCEED;
1634 	bool is_shmem = shmem_file(file);
1635 
1636 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1637 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1638 
1639 	/* Only allocate from the target node */
1640 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1641 
1642 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1643 	if (!new_page) {
1644 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1645 		goto out;
1646 	}
1647 
1648 	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1649 		result = SCAN_CGROUP_CHARGE_FAIL;
1650 		goto out;
1651 	}
1652 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1653 
1654 	/* This will be less messy when we use multi-index entries */
1655 	do {
1656 		xas_lock_irq(&xas);
1657 		xas_create_range(&xas);
1658 		if (!xas_error(&xas))
1659 			break;
1660 		xas_unlock_irq(&xas);
1661 		if (!xas_nomem(&xas, GFP_KERNEL)) {
1662 			result = SCAN_FAIL;
1663 			goto out;
1664 		}
1665 	} while (1);
1666 
1667 	__SetPageLocked(new_page);
1668 	if (is_shmem)
1669 		__SetPageSwapBacked(new_page);
1670 	new_page->index = start;
1671 	new_page->mapping = mapping;
1672 
1673 	/*
1674 	 * At this point the new_page is locked and not up-to-date.
1675 	 * It's safe to insert it into the page cache, because nobody would
1676 	 * be able to map it or use it in another way until we unlock it.
1677 	 */
1678 
1679 	xas_set(&xas, start);
1680 	for (index = start; index < end; index++) {
1681 		struct page *page = xas_next(&xas);
1682 
1683 		VM_BUG_ON(index != xas.xa_index);
1684 		if (is_shmem) {
1685 			if (!page) {
1686 				/*
1687 				 * Stop if extent has been truncated or
1688 				 * hole-punched, and is now completely
1689 				 * empty.
1690 				 */
1691 				if (index == start) {
1692 					if (!xas_next_entry(&xas, end - 1)) {
1693 						result = SCAN_TRUNCATED;
1694 						goto xa_locked;
1695 					}
1696 					xas_set(&xas, index);
1697 				}
1698 				if (!shmem_charge(mapping->host, 1)) {
1699 					result = SCAN_FAIL;
1700 					goto xa_locked;
1701 				}
1702 				xas_store(&xas, new_page);
1703 				nr_none++;
1704 				continue;
1705 			}
1706 
1707 			if (xa_is_value(page) || !PageUptodate(page)) {
1708 				xas_unlock_irq(&xas);
1709 				/* swap in or instantiate fallocated page */
1710 				if (shmem_getpage(mapping->host, index, &page,
1711 						  SGP_NOHUGE)) {
1712 					result = SCAN_FAIL;
1713 					goto xa_unlocked;
1714 				}
1715 			} else if (trylock_page(page)) {
1716 				get_page(page);
1717 				xas_unlock_irq(&xas);
1718 			} else {
1719 				result = SCAN_PAGE_LOCK;
1720 				goto xa_locked;
1721 			}
1722 		} else {	/* !is_shmem */
1723 			if (!page || xa_is_value(page)) {
1724 				xas_unlock_irq(&xas);
1725 				page_cache_sync_readahead(mapping, &file->f_ra,
1726 							  file, index,
1727 							  end - index);
1728 				/* drain pagevecs to help isolate_lru_page() */
1729 				lru_add_drain();
1730 				page = find_lock_page(mapping, index);
1731 				if (unlikely(page == NULL)) {
1732 					result = SCAN_FAIL;
1733 					goto xa_unlocked;
1734 				}
1735 			} else if (PageDirty(page)) {
1736 				/*
1737 				 * khugepaged only works on read-only fd,
1738 				 * so this page is dirty because it hasn't
1739 				 * been flushed since first write. There
1740 				 * won't be new dirty pages.
1741 				 *
1742 				 * Trigger async flush here and hope the
1743 				 * writeback is done when khugepaged
1744 				 * revisits this page.
1745 				 *
1746 				 * This is a one-off situation. We are not
1747 				 * forcing writeback in loop.
1748 				 */
1749 				xas_unlock_irq(&xas);
1750 				filemap_flush(mapping);
1751 				result = SCAN_FAIL;
1752 				goto xa_unlocked;
1753 			} else if (trylock_page(page)) {
1754 				get_page(page);
1755 				xas_unlock_irq(&xas);
1756 			} else {
1757 				result = SCAN_PAGE_LOCK;
1758 				goto xa_locked;
1759 			}
1760 		}
1761 
1762 		/*
1763 		 * The page must be locked, so we can drop the i_pages lock
1764 		 * without racing with truncate.
1765 		 */
1766 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1767 
1768 		/* make sure the page is up to date */
1769 		if (unlikely(!PageUptodate(page))) {
1770 			result = SCAN_FAIL;
1771 			goto out_unlock;
1772 		}
1773 
1774 		/*
1775 		 * If file was truncated then extended, or hole-punched, before
1776 		 * we locked the first page, then a THP might be there already.
1777 		 */
1778 		if (PageTransCompound(page)) {
1779 			result = SCAN_PAGE_COMPOUND;
1780 			goto out_unlock;
1781 		}
1782 
1783 		if (page_mapping(page) != mapping) {
1784 			result = SCAN_TRUNCATED;
1785 			goto out_unlock;
1786 		}
1787 
1788 		if (!is_shmem && PageDirty(page)) {
1789 			/*
1790 			 * khugepaged only works on read-only fd, so this
1791 			 * page is dirty because it hasn't been flushed
1792 			 * since first write.
1793 			 */
1794 			result = SCAN_FAIL;
1795 			goto out_unlock;
1796 		}
1797 
1798 		if (isolate_lru_page(page)) {
1799 			result = SCAN_DEL_PAGE_LRU;
1800 			goto out_unlock;
1801 		}
1802 
1803 		if (page_has_private(page) &&
1804 		    !try_to_release_page(page, GFP_KERNEL)) {
1805 			result = SCAN_PAGE_HAS_PRIVATE;
1806 			putback_lru_page(page);
1807 			goto out_unlock;
1808 		}
1809 
1810 		if (page_mapped(page))
1811 			unmap_mapping_pages(mapping, index, 1, false);
1812 
1813 		xas_lock_irq(&xas);
1814 		xas_set(&xas, index);
1815 
1816 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1817 		VM_BUG_ON_PAGE(page_mapped(page), page);
1818 
1819 		/*
1820 		 * The page is expected to have page_count() == 3:
1821 		 *  - we hold a pin on it;
1822 		 *  - one reference from page cache;
1823 		 *  - one from isolate_lru_page;
1824 		 */
1825 		if (!page_ref_freeze(page, 3)) {
1826 			result = SCAN_PAGE_COUNT;
1827 			xas_unlock_irq(&xas);
1828 			putback_lru_page(page);
1829 			goto out_unlock;
1830 		}
1831 
1832 		/*
1833 		 * Add the page to the list to be able to undo the collapse if
1834 		 * something go wrong.
1835 		 */
1836 		list_add_tail(&page->lru, &pagelist);
1837 
1838 		/* Finally, replace with the new page. */
1839 		xas_store(&xas, new_page);
1840 		continue;
1841 out_unlock:
1842 		unlock_page(page);
1843 		put_page(page);
1844 		goto xa_unlocked;
1845 	}
1846 
1847 	if (is_shmem)
1848 		__inc_node_page_state(new_page, NR_SHMEM_THPS);
1849 	else {
1850 		__inc_node_page_state(new_page, NR_FILE_THPS);
1851 		filemap_nr_thps_inc(mapping);
1852 	}
1853 
1854 	if (nr_none) {
1855 		__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1856 		if (is_shmem)
1857 			__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1858 	}
1859 
1860 xa_locked:
1861 	xas_unlock_irq(&xas);
1862 xa_unlocked:
1863 
1864 	if (result == SCAN_SUCCEED) {
1865 		struct page *page, *tmp;
1866 
1867 		/*
1868 		 * Replacing old pages with new one has succeeded, now we
1869 		 * need to copy the content and free the old pages.
1870 		 */
1871 		index = start;
1872 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1873 			while (index < page->index) {
1874 				clear_highpage(new_page + (index % HPAGE_PMD_NR));
1875 				index++;
1876 			}
1877 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1878 					page);
1879 			list_del(&page->lru);
1880 			page->mapping = NULL;
1881 			page_ref_unfreeze(page, 1);
1882 			ClearPageActive(page);
1883 			ClearPageUnevictable(page);
1884 			unlock_page(page);
1885 			put_page(page);
1886 			index++;
1887 		}
1888 		while (index < end) {
1889 			clear_highpage(new_page + (index % HPAGE_PMD_NR));
1890 			index++;
1891 		}
1892 
1893 		SetPageUptodate(new_page);
1894 		page_ref_add(new_page, HPAGE_PMD_NR - 1);
1895 		if (is_shmem)
1896 			set_page_dirty(new_page);
1897 		lru_cache_add(new_page);
1898 
1899 		/*
1900 		 * Remove pte page tables, so we can re-fault the page as huge.
1901 		 */
1902 		retract_page_tables(mapping, start);
1903 		*hpage = NULL;
1904 
1905 		khugepaged_pages_collapsed++;
1906 	} else {
1907 		struct page *page;
1908 
1909 		/* Something went wrong: roll back page cache changes */
1910 		xas_lock_irq(&xas);
1911 		mapping->nrpages -= nr_none;
1912 
1913 		if (is_shmem)
1914 			shmem_uncharge(mapping->host, nr_none);
1915 
1916 		xas_set(&xas, start);
1917 		xas_for_each(&xas, page, end - 1) {
1918 			page = list_first_entry_or_null(&pagelist,
1919 					struct page, lru);
1920 			if (!page || xas.xa_index < page->index) {
1921 				if (!nr_none)
1922 					break;
1923 				nr_none--;
1924 				/* Put holes back where they were */
1925 				xas_store(&xas, NULL);
1926 				continue;
1927 			}
1928 
1929 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1930 
1931 			/* Unfreeze the page. */
1932 			list_del(&page->lru);
1933 			page_ref_unfreeze(page, 2);
1934 			xas_store(&xas, page);
1935 			xas_pause(&xas);
1936 			xas_unlock_irq(&xas);
1937 			unlock_page(page);
1938 			putback_lru_page(page);
1939 			xas_lock_irq(&xas);
1940 		}
1941 		VM_BUG_ON(nr_none);
1942 		xas_unlock_irq(&xas);
1943 
1944 		new_page->mapping = NULL;
1945 	}
1946 
1947 	unlock_page(new_page);
1948 out:
1949 	VM_BUG_ON(!list_empty(&pagelist));
1950 	if (!IS_ERR_OR_NULL(*hpage))
1951 		mem_cgroup_uncharge(*hpage);
1952 	/* TODO: tracepoints */
1953 }
1954 
1955 static void khugepaged_scan_file(struct mm_struct *mm,
1956 		struct file *file, pgoff_t start, struct page **hpage)
1957 {
1958 	struct page *page = NULL;
1959 	struct address_space *mapping = file->f_mapping;
1960 	XA_STATE(xas, &mapping->i_pages, start);
1961 	int present, swap;
1962 	int node = NUMA_NO_NODE;
1963 	int result = SCAN_SUCCEED;
1964 
1965 	present = 0;
1966 	swap = 0;
1967 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1968 	rcu_read_lock();
1969 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1970 		if (xas_retry(&xas, page))
1971 			continue;
1972 
1973 		if (xa_is_value(page)) {
1974 			if (++swap > khugepaged_max_ptes_swap) {
1975 				result = SCAN_EXCEED_SWAP_PTE;
1976 				break;
1977 			}
1978 			continue;
1979 		}
1980 
1981 		if (PageTransCompound(page)) {
1982 			result = SCAN_PAGE_COMPOUND;
1983 			break;
1984 		}
1985 
1986 		node = page_to_nid(page);
1987 		if (khugepaged_scan_abort(node)) {
1988 			result = SCAN_SCAN_ABORT;
1989 			break;
1990 		}
1991 		khugepaged_node_load[node]++;
1992 
1993 		if (!PageLRU(page)) {
1994 			result = SCAN_PAGE_LRU;
1995 			break;
1996 		}
1997 
1998 		if (page_count(page) !=
1999 		    1 + page_mapcount(page) + page_has_private(page)) {
2000 			result = SCAN_PAGE_COUNT;
2001 			break;
2002 		}
2003 
2004 		/*
2005 		 * We probably should check if the page is referenced here, but
2006 		 * nobody would transfer pte_young() to PageReferenced() for us.
2007 		 * And rmap walk here is just too costly...
2008 		 */
2009 
2010 		present++;
2011 
2012 		if (need_resched()) {
2013 			xas_pause(&xas);
2014 			cond_resched_rcu();
2015 		}
2016 	}
2017 	rcu_read_unlock();
2018 
2019 	if (result == SCAN_SUCCEED) {
2020 		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2021 			result = SCAN_EXCEED_NONE_PTE;
2022 		} else {
2023 			node = khugepaged_find_target_node();
2024 			collapse_file(mm, file, start, hpage, node);
2025 		}
2026 	}
2027 
2028 	/* TODO: tracepoints */
2029 }
2030 #else
2031 static void khugepaged_scan_file(struct mm_struct *mm,
2032 		struct file *file, pgoff_t start, struct page **hpage)
2033 {
2034 	BUILD_BUG();
2035 }
2036 
2037 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2038 {
2039 	return 0;
2040 }
2041 #endif
2042 
2043 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2044 					    struct page **hpage)
2045 	__releases(&khugepaged_mm_lock)
2046 	__acquires(&khugepaged_mm_lock)
2047 {
2048 	struct mm_slot *mm_slot;
2049 	struct mm_struct *mm;
2050 	struct vm_area_struct *vma;
2051 	int progress = 0;
2052 
2053 	VM_BUG_ON(!pages);
2054 	lockdep_assert_held(&khugepaged_mm_lock);
2055 
2056 	if (khugepaged_scan.mm_slot)
2057 		mm_slot = khugepaged_scan.mm_slot;
2058 	else {
2059 		mm_slot = list_entry(khugepaged_scan.mm_head.next,
2060 				     struct mm_slot, mm_node);
2061 		khugepaged_scan.address = 0;
2062 		khugepaged_scan.mm_slot = mm_slot;
2063 	}
2064 	spin_unlock(&khugepaged_mm_lock);
2065 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2066 
2067 	mm = mm_slot->mm;
2068 	/*
2069 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
2070 	 * the next mm on the list.
2071 	 */
2072 	vma = NULL;
2073 	if (unlikely(!mmap_read_trylock(mm)))
2074 		goto breakouterloop_mmap_lock;
2075 	if (likely(!khugepaged_test_exit(mm)))
2076 		vma = find_vma(mm, khugepaged_scan.address);
2077 
2078 	progress++;
2079 	for (; vma; vma = vma->vm_next) {
2080 		unsigned long hstart, hend;
2081 
2082 		cond_resched();
2083 		if (unlikely(khugepaged_test_exit(mm))) {
2084 			progress++;
2085 			break;
2086 		}
2087 		if (!hugepage_vma_check(vma, vma->vm_flags)) {
2088 skip:
2089 			progress++;
2090 			continue;
2091 		}
2092 		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2093 		hend = vma->vm_end & HPAGE_PMD_MASK;
2094 		if (hstart >= hend)
2095 			goto skip;
2096 		if (khugepaged_scan.address > hend)
2097 			goto skip;
2098 		if (khugepaged_scan.address < hstart)
2099 			khugepaged_scan.address = hstart;
2100 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2101 		if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2102 			goto skip;
2103 
2104 		while (khugepaged_scan.address < hend) {
2105 			int ret;
2106 			cond_resched();
2107 			if (unlikely(khugepaged_test_exit(mm)))
2108 				goto breakouterloop;
2109 
2110 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2111 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2112 				  hend);
2113 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2114 				struct file *file = get_file(vma->vm_file);
2115 				pgoff_t pgoff = linear_page_index(vma,
2116 						khugepaged_scan.address);
2117 
2118 				mmap_read_unlock(mm);
2119 				ret = 1;
2120 				khugepaged_scan_file(mm, file, pgoff, hpage);
2121 				fput(file);
2122 			} else {
2123 				ret = khugepaged_scan_pmd(mm, vma,
2124 						khugepaged_scan.address,
2125 						hpage);
2126 			}
2127 			/* move to next address */
2128 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2129 			progress += HPAGE_PMD_NR;
2130 			if (ret)
2131 				/* we released mmap_lock so break loop */
2132 				goto breakouterloop_mmap_lock;
2133 			if (progress >= pages)
2134 				goto breakouterloop;
2135 		}
2136 	}
2137 breakouterloop:
2138 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2139 breakouterloop_mmap_lock:
2140 
2141 	spin_lock(&khugepaged_mm_lock);
2142 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2143 	/*
2144 	 * Release the current mm_slot if this mm is about to die, or
2145 	 * if we scanned all vmas of this mm.
2146 	 */
2147 	if (khugepaged_test_exit(mm) || !vma) {
2148 		/*
2149 		 * Make sure that if mm_users is reaching zero while
2150 		 * khugepaged runs here, khugepaged_exit will find
2151 		 * mm_slot not pointing to the exiting mm.
2152 		 */
2153 		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2154 			khugepaged_scan.mm_slot = list_entry(
2155 				mm_slot->mm_node.next,
2156 				struct mm_slot, mm_node);
2157 			khugepaged_scan.address = 0;
2158 		} else {
2159 			khugepaged_scan.mm_slot = NULL;
2160 			khugepaged_full_scans++;
2161 		}
2162 
2163 		collect_mm_slot(mm_slot);
2164 	}
2165 
2166 	return progress;
2167 }
2168 
2169 static int khugepaged_has_work(void)
2170 {
2171 	return !list_empty(&khugepaged_scan.mm_head) &&
2172 		khugepaged_enabled();
2173 }
2174 
2175 static int khugepaged_wait_event(void)
2176 {
2177 	return !list_empty(&khugepaged_scan.mm_head) ||
2178 		kthread_should_stop();
2179 }
2180 
2181 static void khugepaged_do_scan(void)
2182 {
2183 	struct page *hpage = NULL;
2184 	unsigned int progress = 0, pass_through_head = 0;
2185 	unsigned int pages = khugepaged_pages_to_scan;
2186 	bool wait = true;
2187 
2188 	barrier(); /* write khugepaged_pages_to_scan to local stack */
2189 
2190 	lru_add_drain_all();
2191 
2192 	while (progress < pages) {
2193 		if (!khugepaged_prealloc_page(&hpage, &wait))
2194 			break;
2195 
2196 		cond_resched();
2197 
2198 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2199 			break;
2200 
2201 		spin_lock(&khugepaged_mm_lock);
2202 		if (!khugepaged_scan.mm_slot)
2203 			pass_through_head++;
2204 		if (khugepaged_has_work() &&
2205 		    pass_through_head < 2)
2206 			progress += khugepaged_scan_mm_slot(pages - progress,
2207 							    &hpage);
2208 		else
2209 			progress = pages;
2210 		spin_unlock(&khugepaged_mm_lock);
2211 	}
2212 
2213 	if (!IS_ERR_OR_NULL(hpage))
2214 		put_page(hpage);
2215 }
2216 
2217 static bool khugepaged_should_wakeup(void)
2218 {
2219 	return kthread_should_stop() ||
2220 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2221 }
2222 
2223 static void khugepaged_wait_work(void)
2224 {
2225 	if (khugepaged_has_work()) {
2226 		const unsigned long scan_sleep_jiffies =
2227 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2228 
2229 		if (!scan_sleep_jiffies)
2230 			return;
2231 
2232 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2233 		wait_event_freezable_timeout(khugepaged_wait,
2234 					     khugepaged_should_wakeup(),
2235 					     scan_sleep_jiffies);
2236 		return;
2237 	}
2238 
2239 	if (khugepaged_enabled())
2240 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2241 }
2242 
2243 static int khugepaged(void *none)
2244 {
2245 	struct mm_slot *mm_slot;
2246 
2247 	set_freezable();
2248 	set_user_nice(current, MAX_NICE);
2249 
2250 	while (!kthread_should_stop()) {
2251 		khugepaged_do_scan();
2252 		khugepaged_wait_work();
2253 	}
2254 
2255 	spin_lock(&khugepaged_mm_lock);
2256 	mm_slot = khugepaged_scan.mm_slot;
2257 	khugepaged_scan.mm_slot = NULL;
2258 	if (mm_slot)
2259 		collect_mm_slot(mm_slot);
2260 	spin_unlock(&khugepaged_mm_lock);
2261 	return 0;
2262 }
2263 
2264 static void set_recommended_min_free_kbytes(void)
2265 {
2266 	struct zone *zone;
2267 	int nr_zones = 0;
2268 	unsigned long recommended_min;
2269 
2270 	for_each_populated_zone(zone) {
2271 		/*
2272 		 * We don't need to worry about fragmentation of
2273 		 * ZONE_MOVABLE since it only has movable pages.
2274 		 */
2275 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2276 			continue;
2277 
2278 		nr_zones++;
2279 	}
2280 
2281 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2282 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2283 
2284 	/*
2285 	 * Make sure that on average at least two pageblocks are almost free
2286 	 * of another type, one for a migratetype to fall back to and a
2287 	 * second to avoid subsequent fallbacks of other types There are 3
2288 	 * MIGRATE_TYPES we care about.
2289 	 */
2290 	recommended_min += pageblock_nr_pages * nr_zones *
2291 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2292 
2293 	/* don't ever allow to reserve more than 5% of the lowmem */
2294 	recommended_min = min(recommended_min,
2295 			      (unsigned long) nr_free_buffer_pages() / 20);
2296 	recommended_min <<= (PAGE_SHIFT-10);
2297 
2298 	if (recommended_min > min_free_kbytes) {
2299 		if (user_min_free_kbytes >= 0)
2300 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2301 				min_free_kbytes, recommended_min);
2302 
2303 		min_free_kbytes = recommended_min;
2304 	}
2305 	setup_per_zone_wmarks();
2306 }
2307 
2308 int start_stop_khugepaged(void)
2309 {
2310 	int err = 0;
2311 
2312 	mutex_lock(&khugepaged_mutex);
2313 	if (khugepaged_enabled()) {
2314 		if (!khugepaged_thread)
2315 			khugepaged_thread = kthread_run(khugepaged, NULL,
2316 							"khugepaged");
2317 		if (IS_ERR(khugepaged_thread)) {
2318 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2319 			err = PTR_ERR(khugepaged_thread);
2320 			khugepaged_thread = NULL;
2321 			goto fail;
2322 		}
2323 
2324 		if (!list_empty(&khugepaged_scan.mm_head))
2325 			wake_up_interruptible(&khugepaged_wait);
2326 
2327 		set_recommended_min_free_kbytes();
2328 	} else if (khugepaged_thread) {
2329 		kthread_stop(khugepaged_thread);
2330 		khugepaged_thread = NULL;
2331 	}
2332 fail:
2333 	mutex_unlock(&khugepaged_mutex);
2334 	return err;
2335 }
2336 
2337 void khugepaged_min_free_kbytes_update(void)
2338 {
2339 	mutex_lock(&khugepaged_mutex);
2340 	if (khugepaged_enabled() && khugepaged_thread)
2341 		set_recommended_min_free_kbytes();
2342 	mutex_unlock(&khugepaged_mutex);
2343 }
2344