xref: /openbmc/linux/mm/khugepaged.c (revision 06701297)
1  // SPDX-License-Identifier: GPL-2.0
2  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3  
4  #include <linux/mm.h>
5  #include <linux/sched.h>
6  #include <linux/sched/mm.h>
7  #include <linux/sched/coredump.h>
8  #include <linux/mmu_notifier.h>
9  #include <linux/rmap.h>
10  #include <linux/swap.h>
11  #include <linux/mm_inline.h>
12  #include <linux/kthread.h>
13  #include <linux/khugepaged.h>
14  #include <linux/freezer.h>
15  #include <linux/mman.h>
16  #include <linux/hashtable.h>
17  #include <linux/userfaultfd_k.h>
18  #include <linux/page_idle.h>
19  #include <linux/swapops.h>
20  #include <linux/shmem_fs.h>
21  
22  #include <asm/tlb.h>
23  #include <asm/pgalloc.h>
24  #include "internal.h"
25  
26  enum scan_result {
27  	SCAN_FAIL,
28  	SCAN_SUCCEED,
29  	SCAN_PMD_NULL,
30  	SCAN_EXCEED_NONE_PTE,
31  	SCAN_EXCEED_SWAP_PTE,
32  	SCAN_EXCEED_SHARED_PTE,
33  	SCAN_PTE_NON_PRESENT,
34  	SCAN_PTE_UFFD_WP,
35  	SCAN_PAGE_RO,
36  	SCAN_LACK_REFERENCED_PAGE,
37  	SCAN_PAGE_NULL,
38  	SCAN_SCAN_ABORT,
39  	SCAN_PAGE_COUNT,
40  	SCAN_PAGE_LRU,
41  	SCAN_PAGE_LOCK,
42  	SCAN_PAGE_ANON,
43  	SCAN_PAGE_COMPOUND,
44  	SCAN_ANY_PROCESS,
45  	SCAN_VMA_NULL,
46  	SCAN_VMA_CHECK,
47  	SCAN_ADDRESS_RANGE,
48  	SCAN_SWAP_CACHE_PAGE,
49  	SCAN_DEL_PAGE_LRU,
50  	SCAN_ALLOC_HUGE_PAGE_FAIL,
51  	SCAN_CGROUP_CHARGE_FAIL,
52  	SCAN_TRUNCATED,
53  	SCAN_PAGE_HAS_PRIVATE,
54  };
55  
56  #define CREATE_TRACE_POINTS
57  #include <trace/events/huge_memory.h>
58  
59  static struct task_struct *khugepaged_thread __read_mostly;
60  static DEFINE_MUTEX(khugepaged_mutex);
61  
62  /* default scan 8*512 pte (or vmas) every 30 second */
63  static unsigned int khugepaged_pages_to_scan __read_mostly;
64  static unsigned int khugepaged_pages_collapsed;
65  static unsigned int khugepaged_full_scans;
66  static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67  /* during fragmentation poll the hugepage allocator once every minute */
68  static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69  static unsigned long khugepaged_sleep_expire;
70  static DEFINE_SPINLOCK(khugepaged_mm_lock);
71  static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72  /*
73   * default collapse hugepages if there is at least one pte mapped like
74   * it would have happened if the vma was large enough during page
75   * fault.
76   */
77  static unsigned int khugepaged_max_ptes_none __read_mostly;
78  static unsigned int khugepaged_max_ptes_swap __read_mostly;
79  static unsigned int khugepaged_max_ptes_shared __read_mostly;
80  
81  #define MM_SLOTS_HASH_BITS 10
82  static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83  
84  static struct kmem_cache *mm_slot_cache __read_mostly;
85  
86  #define MAX_PTE_MAPPED_THP 8
87  
88  /**
89   * struct mm_slot - hash lookup from mm to mm_slot
90   * @hash: hash collision list
91   * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92   * @mm: the mm that this information is valid for
93   * @nr_pte_mapped_thp: number of pte mapped THP
94   * @pte_mapped_thp: address array corresponding pte mapped THP
95   */
96  struct mm_slot {
97  	struct hlist_node hash;
98  	struct list_head mm_node;
99  	struct mm_struct *mm;
100  
101  	/* pte-mapped THP in this mm */
102  	int nr_pte_mapped_thp;
103  	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
104  };
105  
106  /**
107   * struct khugepaged_scan - cursor for scanning
108   * @mm_head: the head of the mm list to scan
109   * @mm_slot: the current mm_slot we are scanning
110   * @address: the next address inside that to be scanned
111   *
112   * There is only the one khugepaged_scan instance of this cursor structure.
113   */
114  struct khugepaged_scan {
115  	struct list_head mm_head;
116  	struct mm_slot *mm_slot;
117  	unsigned long address;
118  };
119  
120  static struct khugepaged_scan khugepaged_scan = {
121  	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122  };
123  
124  #ifdef CONFIG_SYSFS
125  static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126  					 struct kobj_attribute *attr,
127  					 char *buf)
128  {
129  	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
130  }
131  
132  static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133  					  struct kobj_attribute *attr,
134  					  const char *buf, size_t count)
135  {
136  	unsigned int msecs;
137  	int err;
138  
139  	err = kstrtouint(buf, 10, &msecs);
140  	if (err)
141  		return -EINVAL;
142  
143  	khugepaged_scan_sleep_millisecs = msecs;
144  	khugepaged_sleep_expire = 0;
145  	wake_up_interruptible(&khugepaged_wait);
146  
147  	return count;
148  }
149  static struct kobj_attribute scan_sleep_millisecs_attr =
150  	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
151  	       scan_sleep_millisecs_store);
152  
153  static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
154  					  struct kobj_attribute *attr,
155  					  char *buf)
156  {
157  	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
158  }
159  
160  static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
161  					   struct kobj_attribute *attr,
162  					   const char *buf, size_t count)
163  {
164  	unsigned int msecs;
165  	int err;
166  
167  	err = kstrtouint(buf, 10, &msecs);
168  	if (err)
169  		return -EINVAL;
170  
171  	khugepaged_alloc_sleep_millisecs = msecs;
172  	khugepaged_sleep_expire = 0;
173  	wake_up_interruptible(&khugepaged_wait);
174  
175  	return count;
176  }
177  static struct kobj_attribute alloc_sleep_millisecs_attr =
178  	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
179  	       alloc_sleep_millisecs_store);
180  
181  static ssize_t pages_to_scan_show(struct kobject *kobj,
182  				  struct kobj_attribute *attr,
183  				  char *buf)
184  {
185  	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
186  }
187  static ssize_t pages_to_scan_store(struct kobject *kobj,
188  				   struct kobj_attribute *attr,
189  				   const char *buf, size_t count)
190  {
191  	unsigned int pages;
192  	int err;
193  
194  	err = kstrtouint(buf, 10, &pages);
195  	if (err || !pages)
196  		return -EINVAL;
197  
198  	khugepaged_pages_to_scan = pages;
199  
200  	return count;
201  }
202  static struct kobj_attribute pages_to_scan_attr =
203  	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
204  	       pages_to_scan_store);
205  
206  static ssize_t pages_collapsed_show(struct kobject *kobj,
207  				    struct kobj_attribute *attr,
208  				    char *buf)
209  {
210  	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
211  }
212  static struct kobj_attribute pages_collapsed_attr =
213  	__ATTR_RO(pages_collapsed);
214  
215  static ssize_t full_scans_show(struct kobject *kobj,
216  			       struct kobj_attribute *attr,
217  			       char *buf)
218  {
219  	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
220  }
221  static struct kobj_attribute full_scans_attr =
222  	__ATTR_RO(full_scans);
223  
224  static ssize_t khugepaged_defrag_show(struct kobject *kobj,
225  				      struct kobj_attribute *attr, char *buf)
226  {
227  	return single_hugepage_flag_show(kobj, attr, buf,
228  					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
229  }
230  static ssize_t khugepaged_defrag_store(struct kobject *kobj,
231  				       struct kobj_attribute *attr,
232  				       const char *buf, size_t count)
233  {
234  	return single_hugepage_flag_store(kobj, attr, buf, count,
235  				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236  }
237  static struct kobj_attribute khugepaged_defrag_attr =
238  	__ATTR(defrag, 0644, khugepaged_defrag_show,
239  	       khugepaged_defrag_store);
240  
241  /*
242   * max_ptes_none controls if khugepaged should collapse hugepages over
243   * any unmapped ptes in turn potentially increasing the memory
244   * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
245   * reduce the available free memory in the system as it
246   * runs. Increasing max_ptes_none will instead potentially reduce the
247   * free memory in the system during the khugepaged scan.
248   */
249  static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
250  					     struct kobj_attribute *attr,
251  					     char *buf)
252  {
253  	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
254  }
255  static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
256  					      struct kobj_attribute *attr,
257  					      const char *buf, size_t count)
258  {
259  	int err;
260  	unsigned long max_ptes_none;
261  
262  	err = kstrtoul(buf, 10, &max_ptes_none);
263  	if (err || max_ptes_none > HPAGE_PMD_NR-1)
264  		return -EINVAL;
265  
266  	khugepaged_max_ptes_none = max_ptes_none;
267  
268  	return count;
269  }
270  static struct kobj_attribute khugepaged_max_ptes_none_attr =
271  	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
272  	       khugepaged_max_ptes_none_store);
273  
274  static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
275  					     struct kobj_attribute *attr,
276  					     char *buf)
277  {
278  	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
279  }
280  
281  static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
282  					      struct kobj_attribute *attr,
283  					      const char *buf, size_t count)
284  {
285  	int err;
286  	unsigned long max_ptes_swap;
287  
288  	err  = kstrtoul(buf, 10, &max_ptes_swap);
289  	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
290  		return -EINVAL;
291  
292  	khugepaged_max_ptes_swap = max_ptes_swap;
293  
294  	return count;
295  }
296  
297  static struct kobj_attribute khugepaged_max_ptes_swap_attr =
298  	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
299  	       khugepaged_max_ptes_swap_store);
300  
301  static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
302  					       struct kobj_attribute *attr,
303  					       char *buf)
304  {
305  	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
306  }
307  
308  static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
309  					      struct kobj_attribute *attr,
310  					      const char *buf, size_t count)
311  {
312  	int err;
313  	unsigned long max_ptes_shared;
314  
315  	err  = kstrtoul(buf, 10, &max_ptes_shared);
316  	if (err || max_ptes_shared > HPAGE_PMD_NR-1)
317  		return -EINVAL;
318  
319  	khugepaged_max_ptes_shared = max_ptes_shared;
320  
321  	return count;
322  }
323  
324  static struct kobj_attribute khugepaged_max_ptes_shared_attr =
325  	__ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
326  	       khugepaged_max_ptes_shared_store);
327  
328  static struct attribute *khugepaged_attr[] = {
329  	&khugepaged_defrag_attr.attr,
330  	&khugepaged_max_ptes_none_attr.attr,
331  	&khugepaged_max_ptes_swap_attr.attr,
332  	&khugepaged_max_ptes_shared_attr.attr,
333  	&pages_to_scan_attr.attr,
334  	&pages_collapsed_attr.attr,
335  	&full_scans_attr.attr,
336  	&scan_sleep_millisecs_attr.attr,
337  	&alloc_sleep_millisecs_attr.attr,
338  	NULL,
339  };
340  
341  struct attribute_group khugepaged_attr_group = {
342  	.attrs = khugepaged_attr,
343  	.name = "khugepaged",
344  };
345  #endif /* CONFIG_SYSFS */
346  
347  int hugepage_madvise(struct vm_area_struct *vma,
348  		     unsigned long *vm_flags, int advice)
349  {
350  	switch (advice) {
351  	case MADV_HUGEPAGE:
352  #ifdef CONFIG_S390
353  		/*
354  		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
355  		 * can't handle this properly after s390_enable_sie, so we simply
356  		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
357  		 */
358  		if (mm_has_pgste(vma->vm_mm))
359  			return 0;
360  #endif
361  		*vm_flags &= ~VM_NOHUGEPAGE;
362  		*vm_flags |= VM_HUGEPAGE;
363  		/*
364  		 * If the vma become good for khugepaged to scan,
365  		 * register it here without waiting a page fault that
366  		 * may not happen any time soon.
367  		 */
368  		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
369  				khugepaged_enter_vma_merge(vma, *vm_flags))
370  			return -ENOMEM;
371  		break;
372  	case MADV_NOHUGEPAGE:
373  		*vm_flags &= ~VM_HUGEPAGE;
374  		*vm_flags |= VM_NOHUGEPAGE;
375  		/*
376  		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377  		 * this vma even if we leave the mm registered in khugepaged if
378  		 * it got registered before VM_NOHUGEPAGE was set.
379  		 */
380  		break;
381  	}
382  
383  	return 0;
384  }
385  
386  int __init khugepaged_init(void)
387  {
388  	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389  					  sizeof(struct mm_slot),
390  					  __alignof__(struct mm_slot), 0, NULL);
391  	if (!mm_slot_cache)
392  		return -ENOMEM;
393  
394  	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
395  	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
396  	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
397  	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
398  
399  	return 0;
400  }
401  
402  void __init khugepaged_destroy(void)
403  {
404  	kmem_cache_destroy(mm_slot_cache);
405  }
406  
407  static inline struct mm_slot *alloc_mm_slot(void)
408  {
409  	if (!mm_slot_cache)	/* initialization failed */
410  		return NULL;
411  	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
412  }
413  
414  static inline void free_mm_slot(struct mm_slot *mm_slot)
415  {
416  	kmem_cache_free(mm_slot_cache, mm_slot);
417  }
418  
419  static struct mm_slot *get_mm_slot(struct mm_struct *mm)
420  {
421  	struct mm_slot *mm_slot;
422  
423  	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
424  		if (mm == mm_slot->mm)
425  			return mm_slot;
426  
427  	return NULL;
428  }
429  
430  static void insert_to_mm_slots_hash(struct mm_struct *mm,
431  				    struct mm_slot *mm_slot)
432  {
433  	mm_slot->mm = mm;
434  	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
435  }
436  
437  static inline int khugepaged_test_exit(struct mm_struct *mm)
438  {
439  	return atomic_read(&mm->mm_users) == 0;
440  }
441  
442  static bool hugepage_vma_check(struct vm_area_struct *vma,
443  			       unsigned long vm_flags)
444  {
445  	if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
446  	    (vm_flags & VM_NOHUGEPAGE) ||
447  	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
448  		return false;
449  
450  	if (shmem_file(vma->vm_file) ||
451  	    (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
452  	     vma->vm_file &&
453  	     (vm_flags & VM_DENYWRITE))) {
454  		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
455  				HPAGE_PMD_NR);
456  	}
457  	if (!vma->anon_vma || vma->vm_ops)
458  		return false;
459  	if (vma_is_temporary_stack(vma))
460  		return false;
461  	return !(vm_flags & VM_NO_KHUGEPAGED);
462  }
463  
464  int __khugepaged_enter(struct mm_struct *mm)
465  {
466  	struct mm_slot *mm_slot;
467  	int wakeup;
468  
469  	mm_slot = alloc_mm_slot();
470  	if (!mm_slot)
471  		return -ENOMEM;
472  
473  	/* __khugepaged_exit() must not run from under us */
474  	VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
475  	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
476  		free_mm_slot(mm_slot);
477  		return 0;
478  	}
479  
480  	spin_lock(&khugepaged_mm_lock);
481  	insert_to_mm_slots_hash(mm, mm_slot);
482  	/*
483  	 * Insert just behind the scanning cursor, to let the area settle
484  	 * down a little.
485  	 */
486  	wakeup = list_empty(&khugepaged_scan.mm_head);
487  	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
488  	spin_unlock(&khugepaged_mm_lock);
489  
490  	mmgrab(mm);
491  	if (wakeup)
492  		wake_up_interruptible(&khugepaged_wait);
493  
494  	return 0;
495  }
496  
497  int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
498  			       unsigned long vm_flags)
499  {
500  	unsigned long hstart, hend;
501  
502  	/*
503  	 * khugepaged only supports read-only files for non-shmem files.
504  	 * khugepaged does not yet work on special mappings. And
505  	 * file-private shmem THP is not supported.
506  	 */
507  	if (!hugepage_vma_check(vma, vm_flags))
508  		return 0;
509  
510  	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
511  	hend = vma->vm_end & HPAGE_PMD_MASK;
512  	if (hstart < hend)
513  		return khugepaged_enter(vma, vm_flags);
514  	return 0;
515  }
516  
517  void __khugepaged_exit(struct mm_struct *mm)
518  {
519  	struct mm_slot *mm_slot;
520  	int free = 0;
521  
522  	spin_lock(&khugepaged_mm_lock);
523  	mm_slot = get_mm_slot(mm);
524  	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
525  		hash_del(&mm_slot->hash);
526  		list_del(&mm_slot->mm_node);
527  		free = 1;
528  	}
529  	spin_unlock(&khugepaged_mm_lock);
530  
531  	if (free) {
532  		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
533  		free_mm_slot(mm_slot);
534  		mmdrop(mm);
535  	} else if (mm_slot) {
536  		/*
537  		 * This is required to serialize against
538  		 * khugepaged_test_exit() (which is guaranteed to run
539  		 * under mmap sem read mode). Stop here (after we
540  		 * return all pagetables will be destroyed) until
541  		 * khugepaged has finished working on the pagetables
542  		 * under the mmap_lock.
543  		 */
544  		mmap_write_lock(mm);
545  		mmap_write_unlock(mm);
546  	}
547  }
548  
549  static void release_pte_page(struct page *page)
550  {
551  	mod_node_page_state(page_pgdat(page),
552  			NR_ISOLATED_ANON + page_is_file_lru(page),
553  			-compound_nr(page));
554  	unlock_page(page);
555  	putback_lru_page(page);
556  }
557  
558  static void release_pte_pages(pte_t *pte, pte_t *_pte,
559  		struct list_head *compound_pagelist)
560  {
561  	struct page *page, *tmp;
562  
563  	while (--_pte >= pte) {
564  		pte_t pteval = *_pte;
565  
566  		page = pte_page(pteval);
567  		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
568  				!PageCompound(page))
569  			release_pte_page(page);
570  	}
571  
572  	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
573  		list_del(&page->lru);
574  		release_pte_page(page);
575  	}
576  }
577  
578  static bool is_refcount_suitable(struct page *page)
579  {
580  	int expected_refcount;
581  
582  	expected_refcount = total_mapcount(page);
583  	if (PageSwapCache(page))
584  		expected_refcount += compound_nr(page);
585  
586  	return page_count(page) == expected_refcount;
587  }
588  
589  static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
590  					unsigned long address,
591  					pte_t *pte,
592  					struct list_head *compound_pagelist)
593  {
594  	struct page *page = NULL;
595  	pte_t *_pte;
596  	int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
597  	bool writable = false;
598  
599  	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
600  	     _pte++, address += PAGE_SIZE) {
601  		pte_t pteval = *_pte;
602  		if (pte_none(pteval) || (pte_present(pteval) &&
603  				is_zero_pfn(pte_pfn(pteval)))) {
604  			if (!userfaultfd_armed(vma) &&
605  			    ++none_or_zero <= khugepaged_max_ptes_none) {
606  				continue;
607  			} else {
608  				result = SCAN_EXCEED_NONE_PTE;
609  				goto out;
610  			}
611  		}
612  		if (!pte_present(pteval)) {
613  			result = SCAN_PTE_NON_PRESENT;
614  			goto out;
615  		}
616  		page = vm_normal_page(vma, address, pteval);
617  		if (unlikely(!page)) {
618  			result = SCAN_PAGE_NULL;
619  			goto out;
620  		}
621  
622  		VM_BUG_ON_PAGE(!PageAnon(page), page);
623  
624  		if (page_mapcount(page) > 1 &&
625  				++shared > khugepaged_max_ptes_shared) {
626  			result = SCAN_EXCEED_SHARED_PTE;
627  			goto out;
628  		}
629  
630  		if (PageCompound(page)) {
631  			struct page *p;
632  			page = compound_head(page);
633  
634  			/*
635  			 * Check if we have dealt with the compound page
636  			 * already
637  			 */
638  			list_for_each_entry(p, compound_pagelist, lru) {
639  				if (page == p)
640  					goto next;
641  			}
642  		}
643  
644  		/*
645  		 * We can do it before isolate_lru_page because the
646  		 * page can't be freed from under us. NOTE: PG_lock
647  		 * is needed to serialize against split_huge_page
648  		 * when invoked from the VM.
649  		 */
650  		if (!trylock_page(page)) {
651  			result = SCAN_PAGE_LOCK;
652  			goto out;
653  		}
654  
655  		/*
656  		 * Check if the page has any GUP (or other external) pins.
657  		 *
658  		 * The page table that maps the page has been already unlinked
659  		 * from the page table tree and this process cannot get
660  		 * an additinal pin on the page.
661  		 *
662  		 * New pins can come later if the page is shared across fork,
663  		 * but not from this process. The other process cannot write to
664  		 * the page, only trigger CoW.
665  		 */
666  		if (!is_refcount_suitable(page)) {
667  			unlock_page(page);
668  			result = SCAN_PAGE_COUNT;
669  			goto out;
670  		}
671  		if (!pte_write(pteval) && PageSwapCache(page) &&
672  				!reuse_swap_page(page, NULL)) {
673  			/*
674  			 * Page is in the swap cache and cannot be re-used.
675  			 * It cannot be collapsed into a THP.
676  			 */
677  			unlock_page(page);
678  			result = SCAN_SWAP_CACHE_PAGE;
679  			goto out;
680  		}
681  
682  		/*
683  		 * Isolate the page to avoid collapsing an hugepage
684  		 * currently in use by the VM.
685  		 */
686  		if (isolate_lru_page(page)) {
687  			unlock_page(page);
688  			result = SCAN_DEL_PAGE_LRU;
689  			goto out;
690  		}
691  		mod_node_page_state(page_pgdat(page),
692  				NR_ISOLATED_ANON + page_is_file_lru(page),
693  				compound_nr(page));
694  		VM_BUG_ON_PAGE(!PageLocked(page), page);
695  		VM_BUG_ON_PAGE(PageLRU(page), page);
696  
697  		if (PageCompound(page))
698  			list_add_tail(&page->lru, compound_pagelist);
699  next:
700  		/* There should be enough young pte to collapse the page */
701  		if (pte_young(pteval) ||
702  		    page_is_young(page) || PageReferenced(page) ||
703  		    mmu_notifier_test_young(vma->vm_mm, address))
704  			referenced++;
705  
706  		if (pte_write(pteval))
707  			writable = true;
708  	}
709  	if (likely(writable)) {
710  		if (likely(referenced)) {
711  			result = SCAN_SUCCEED;
712  			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
713  							    referenced, writable, result);
714  			return 1;
715  		}
716  	} else {
717  		result = SCAN_PAGE_RO;
718  	}
719  
720  out:
721  	release_pte_pages(pte, _pte, compound_pagelist);
722  	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
723  					    referenced, writable, result);
724  	return 0;
725  }
726  
727  static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
728  				      struct vm_area_struct *vma,
729  				      unsigned long address,
730  				      spinlock_t *ptl,
731  				      struct list_head *compound_pagelist)
732  {
733  	struct page *src_page, *tmp;
734  	pte_t *_pte;
735  	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
736  				_pte++, page++, address += PAGE_SIZE) {
737  		pte_t pteval = *_pte;
738  
739  		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
740  			clear_user_highpage(page, address);
741  			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
742  			if (is_zero_pfn(pte_pfn(pteval))) {
743  				/*
744  				 * ptl mostly unnecessary.
745  				 */
746  				spin_lock(ptl);
747  				/*
748  				 * paravirt calls inside pte_clear here are
749  				 * superfluous.
750  				 */
751  				pte_clear(vma->vm_mm, address, _pte);
752  				spin_unlock(ptl);
753  			}
754  		} else {
755  			src_page = pte_page(pteval);
756  			copy_user_highpage(page, src_page, address, vma);
757  			if (!PageCompound(src_page))
758  				release_pte_page(src_page);
759  			/*
760  			 * ptl mostly unnecessary, but preempt has to
761  			 * be disabled to update the per-cpu stats
762  			 * inside page_remove_rmap().
763  			 */
764  			spin_lock(ptl);
765  			/*
766  			 * paravirt calls inside pte_clear here are
767  			 * superfluous.
768  			 */
769  			pte_clear(vma->vm_mm, address, _pte);
770  			page_remove_rmap(src_page, false);
771  			spin_unlock(ptl);
772  			free_page_and_swap_cache(src_page);
773  		}
774  	}
775  
776  	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
777  		list_del(&src_page->lru);
778  		release_pte_page(src_page);
779  	}
780  }
781  
782  static void khugepaged_alloc_sleep(void)
783  {
784  	DEFINE_WAIT(wait);
785  
786  	add_wait_queue(&khugepaged_wait, &wait);
787  	freezable_schedule_timeout_interruptible(
788  		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
789  	remove_wait_queue(&khugepaged_wait, &wait);
790  }
791  
792  static int khugepaged_node_load[MAX_NUMNODES];
793  
794  static bool khugepaged_scan_abort(int nid)
795  {
796  	int i;
797  
798  	/*
799  	 * If node_reclaim_mode is disabled, then no extra effort is made to
800  	 * allocate memory locally.
801  	 */
802  	if (!node_reclaim_mode)
803  		return false;
804  
805  	/* If there is a count for this node already, it must be acceptable */
806  	if (khugepaged_node_load[nid])
807  		return false;
808  
809  	for (i = 0; i < MAX_NUMNODES; i++) {
810  		if (!khugepaged_node_load[i])
811  			continue;
812  		if (node_distance(nid, i) > node_reclaim_distance)
813  			return true;
814  	}
815  	return false;
816  }
817  
818  /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
819  static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
820  {
821  	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
822  }
823  
824  #ifdef CONFIG_NUMA
825  static int khugepaged_find_target_node(void)
826  {
827  	static int last_khugepaged_target_node = NUMA_NO_NODE;
828  	int nid, target_node = 0, max_value = 0;
829  
830  	/* find first node with max normal pages hit */
831  	for (nid = 0; nid < MAX_NUMNODES; nid++)
832  		if (khugepaged_node_load[nid] > max_value) {
833  			max_value = khugepaged_node_load[nid];
834  			target_node = nid;
835  		}
836  
837  	/* do some balance if several nodes have the same hit record */
838  	if (target_node <= last_khugepaged_target_node)
839  		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
840  				nid++)
841  			if (max_value == khugepaged_node_load[nid]) {
842  				target_node = nid;
843  				break;
844  			}
845  
846  	last_khugepaged_target_node = target_node;
847  	return target_node;
848  }
849  
850  static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
851  {
852  	if (IS_ERR(*hpage)) {
853  		if (!*wait)
854  			return false;
855  
856  		*wait = false;
857  		*hpage = NULL;
858  		khugepaged_alloc_sleep();
859  	} else if (*hpage) {
860  		put_page(*hpage);
861  		*hpage = NULL;
862  	}
863  
864  	return true;
865  }
866  
867  static struct page *
868  khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
869  {
870  	VM_BUG_ON_PAGE(*hpage, *hpage);
871  
872  	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
873  	if (unlikely(!*hpage)) {
874  		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
875  		*hpage = ERR_PTR(-ENOMEM);
876  		return NULL;
877  	}
878  
879  	prep_transhuge_page(*hpage);
880  	count_vm_event(THP_COLLAPSE_ALLOC);
881  	return *hpage;
882  }
883  #else
884  static int khugepaged_find_target_node(void)
885  {
886  	return 0;
887  }
888  
889  static inline struct page *alloc_khugepaged_hugepage(void)
890  {
891  	struct page *page;
892  
893  	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
894  			   HPAGE_PMD_ORDER);
895  	if (page)
896  		prep_transhuge_page(page);
897  	return page;
898  }
899  
900  static struct page *khugepaged_alloc_hugepage(bool *wait)
901  {
902  	struct page *hpage;
903  
904  	do {
905  		hpage = alloc_khugepaged_hugepage();
906  		if (!hpage) {
907  			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
908  			if (!*wait)
909  				return NULL;
910  
911  			*wait = false;
912  			khugepaged_alloc_sleep();
913  		} else
914  			count_vm_event(THP_COLLAPSE_ALLOC);
915  	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
916  
917  	return hpage;
918  }
919  
920  static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
921  {
922  	/*
923  	 * If the hpage allocated earlier was briefly exposed in page cache
924  	 * before collapse_file() failed, it is possible that racing lookups
925  	 * have not yet completed, and would then be unpleasantly surprised by
926  	 * finding the hpage reused for the same mapping at a different offset.
927  	 * Just release the previous allocation if there is any danger of that.
928  	 */
929  	if (*hpage && page_count(*hpage) > 1) {
930  		put_page(*hpage);
931  		*hpage = NULL;
932  	}
933  
934  	if (!*hpage)
935  		*hpage = khugepaged_alloc_hugepage(wait);
936  
937  	if (unlikely(!*hpage))
938  		return false;
939  
940  	return true;
941  }
942  
943  static struct page *
944  khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
945  {
946  	VM_BUG_ON(!*hpage);
947  
948  	return  *hpage;
949  }
950  #endif
951  
952  /*
953   * If mmap_lock temporarily dropped, revalidate vma
954   * before taking mmap_lock.
955   * Return 0 if succeeds, otherwise return none-zero
956   * value (scan code).
957   */
958  
959  static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
960  		struct vm_area_struct **vmap)
961  {
962  	struct vm_area_struct *vma;
963  	unsigned long hstart, hend;
964  
965  	if (unlikely(khugepaged_test_exit(mm)))
966  		return SCAN_ANY_PROCESS;
967  
968  	*vmap = vma = find_vma(mm, address);
969  	if (!vma)
970  		return SCAN_VMA_NULL;
971  
972  	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
973  	hend = vma->vm_end & HPAGE_PMD_MASK;
974  	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
975  		return SCAN_ADDRESS_RANGE;
976  	if (!hugepage_vma_check(vma, vma->vm_flags))
977  		return SCAN_VMA_CHECK;
978  	/* Anon VMA expected */
979  	if (!vma->anon_vma || vma->vm_ops)
980  		return SCAN_VMA_CHECK;
981  	return 0;
982  }
983  
984  /*
985   * Bring missing pages in from swap, to complete THP collapse.
986   * Only done if khugepaged_scan_pmd believes it is worthwhile.
987   *
988   * Called and returns without pte mapped or spinlocks held,
989   * but with mmap_lock held to protect against vma changes.
990   */
991  
992  static bool __collapse_huge_page_swapin(struct mm_struct *mm,
993  					struct vm_area_struct *vma,
994  					unsigned long address, pmd_t *pmd,
995  					int referenced)
996  {
997  	int swapped_in = 0;
998  	vm_fault_t ret = 0;
999  	struct vm_fault vmf = {
1000  		.vma = vma,
1001  		.address = address,
1002  		.flags = FAULT_FLAG_ALLOW_RETRY,
1003  		.pmd = pmd,
1004  		.pgoff = linear_page_index(vma, address),
1005  	};
1006  
1007  	vmf.pte = pte_offset_map(pmd, address);
1008  	for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
1009  			vmf.pte++, vmf.address += PAGE_SIZE) {
1010  		vmf.orig_pte = *vmf.pte;
1011  		if (!is_swap_pte(vmf.orig_pte))
1012  			continue;
1013  		swapped_in++;
1014  		ret = do_swap_page(&vmf);
1015  
1016  		/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1017  		if (ret & VM_FAULT_RETRY) {
1018  			mmap_read_lock(mm);
1019  			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
1020  				/* vma is no longer available, don't continue to swapin */
1021  				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1022  				return false;
1023  			}
1024  			/* check if the pmd is still valid */
1025  			if (mm_find_pmd(mm, address) != pmd) {
1026  				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1027  				return false;
1028  			}
1029  		}
1030  		if (ret & VM_FAULT_ERROR) {
1031  			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1032  			return false;
1033  		}
1034  		/* pte is unmapped now, we need to map it */
1035  		vmf.pte = pte_offset_map(pmd, vmf.address);
1036  	}
1037  	vmf.pte--;
1038  	pte_unmap(vmf.pte);
1039  
1040  	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1041  	if (swapped_in)
1042  		lru_add_drain();
1043  
1044  	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1045  	return true;
1046  }
1047  
1048  static void collapse_huge_page(struct mm_struct *mm,
1049  				   unsigned long address,
1050  				   struct page **hpage,
1051  				   int node, int referenced, int unmapped)
1052  {
1053  	LIST_HEAD(compound_pagelist);
1054  	pmd_t *pmd, _pmd;
1055  	pte_t *pte;
1056  	pgtable_t pgtable;
1057  	struct page *new_page;
1058  	spinlock_t *pmd_ptl, *pte_ptl;
1059  	int isolated = 0, result = 0;
1060  	struct vm_area_struct *vma;
1061  	struct mmu_notifier_range range;
1062  	gfp_t gfp;
1063  
1064  	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1065  
1066  	/* Only allocate from the target node */
1067  	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1068  
1069  	/*
1070  	 * Before allocating the hugepage, release the mmap_lock read lock.
1071  	 * The allocation can take potentially a long time if it involves
1072  	 * sync compaction, and we do not need to hold the mmap_lock during
1073  	 * that. We will recheck the vma after taking it again in write mode.
1074  	 */
1075  	mmap_read_unlock(mm);
1076  	new_page = khugepaged_alloc_page(hpage, gfp, node);
1077  	if (!new_page) {
1078  		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1079  		goto out_nolock;
1080  	}
1081  
1082  	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1083  		result = SCAN_CGROUP_CHARGE_FAIL;
1084  		goto out_nolock;
1085  	}
1086  	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1087  
1088  	mmap_read_lock(mm);
1089  	result = hugepage_vma_revalidate(mm, address, &vma);
1090  	if (result) {
1091  		mmap_read_unlock(mm);
1092  		goto out_nolock;
1093  	}
1094  
1095  	pmd = mm_find_pmd(mm, address);
1096  	if (!pmd) {
1097  		result = SCAN_PMD_NULL;
1098  		mmap_read_unlock(mm);
1099  		goto out_nolock;
1100  	}
1101  
1102  	/*
1103  	 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1104  	 * If it fails, we release mmap_lock and jump out_nolock.
1105  	 * Continuing to collapse causes inconsistency.
1106  	 */
1107  	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1108  						     pmd, referenced)) {
1109  		mmap_read_unlock(mm);
1110  		goto out_nolock;
1111  	}
1112  
1113  	mmap_read_unlock(mm);
1114  	/*
1115  	 * Prevent all access to pagetables with the exception of
1116  	 * gup_fast later handled by the ptep_clear_flush and the VM
1117  	 * handled by the anon_vma lock + PG_lock.
1118  	 */
1119  	mmap_write_lock(mm);
1120  	result = hugepage_vma_revalidate(mm, address, &vma);
1121  	if (result)
1122  		goto out;
1123  	/* check if the pmd is still valid */
1124  	if (mm_find_pmd(mm, address) != pmd)
1125  		goto out;
1126  
1127  	anon_vma_lock_write(vma->anon_vma);
1128  
1129  	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1130  				address, address + HPAGE_PMD_SIZE);
1131  	mmu_notifier_invalidate_range_start(&range);
1132  
1133  	pte = pte_offset_map(pmd, address);
1134  	pte_ptl = pte_lockptr(mm, pmd);
1135  
1136  	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1137  	/*
1138  	 * After this gup_fast can't run anymore. This also removes
1139  	 * any huge TLB entry from the CPU so we won't allow
1140  	 * huge and small TLB entries for the same virtual address
1141  	 * to avoid the risk of CPU bugs in that area.
1142  	 */
1143  	_pmd = pmdp_collapse_flush(vma, address, pmd);
1144  	spin_unlock(pmd_ptl);
1145  	mmu_notifier_invalidate_range_end(&range);
1146  
1147  	spin_lock(pte_ptl);
1148  	isolated = __collapse_huge_page_isolate(vma, address, pte,
1149  			&compound_pagelist);
1150  	spin_unlock(pte_ptl);
1151  
1152  	if (unlikely(!isolated)) {
1153  		pte_unmap(pte);
1154  		spin_lock(pmd_ptl);
1155  		BUG_ON(!pmd_none(*pmd));
1156  		/*
1157  		 * We can only use set_pmd_at when establishing
1158  		 * hugepmds and never for establishing regular pmds that
1159  		 * points to regular pagetables. Use pmd_populate for that
1160  		 */
1161  		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1162  		spin_unlock(pmd_ptl);
1163  		anon_vma_unlock_write(vma->anon_vma);
1164  		result = SCAN_FAIL;
1165  		goto out;
1166  	}
1167  
1168  	/*
1169  	 * All pages are isolated and locked so anon_vma rmap
1170  	 * can't run anymore.
1171  	 */
1172  	anon_vma_unlock_write(vma->anon_vma);
1173  
1174  	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1175  			&compound_pagelist);
1176  	pte_unmap(pte);
1177  	__SetPageUptodate(new_page);
1178  	pgtable = pmd_pgtable(_pmd);
1179  
1180  	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1181  	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1182  
1183  	/*
1184  	 * spin_lock() below is not the equivalent of smp_wmb(), so
1185  	 * this is needed to avoid the copy_huge_page writes to become
1186  	 * visible after the set_pmd_at() write.
1187  	 */
1188  	smp_wmb();
1189  
1190  	spin_lock(pmd_ptl);
1191  	BUG_ON(!pmd_none(*pmd));
1192  	page_add_new_anon_rmap(new_page, vma, address, true);
1193  	lru_cache_add_inactive_or_unevictable(new_page, vma);
1194  	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1195  	set_pmd_at(mm, address, pmd, _pmd);
1196  	update_mmu_cache_pmd(vma, address, pmd);
1197  	spin_unlock(pmd_ptl);
1198  
1199  	*hpage = NULL;
1200  
1201  	khugepaged_pages_collapsed++;
1202  	result = SCAN_SUCCEED;
1203  out_up_write:
1204  	mmap_write_unlock(mm);
1205  out_nolock:
1206  	if (!IS_ERR_OR_NULL(*hpage))
1207  		mem_cgroup_uncharge(*hpage);
1208  	trace_mm_collapse_huge_page(mm, isolated, result);
1209  	return;
1210  out:
1211  	goto out_up_write;
1212  }
1213  
1214  static int khugepaged_scan_pmd(struct mm_struct *mm,
1215  			       struct vm_area_struct *vma,
1216  			       unsigned long address,
1217  			       struct page **hpage)
1218  {
1219  	pmd_t *pmd;
1220  	pte_t *pte, *_pte;
1221  	int ret = 0, result = 0, referenced = 0;
1222  	int none_or_zero = 0, shared = 0;
1223  	struct page *page = NULL;
1224  	unsigned long _address;
1225  	spinlock_t *ptl;
1226  	int node = NUMA_NO_NODE, unmapped = 0;
1227  	bool writable = false;
1228  
1229  	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1230  
1231  	pmd = mm_find_pmd(mm, address);
1232  	if (!pmd) {
1233  		result = SCAN_PMD_NULL;
1234  		goto out;
1235  	}
1236  
1237  	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1238  	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1239  	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1240  	     _pte++, _address += PAGE_SIZE) {
1241  		pte_t pteval = *_pte;
1242  		if (is_swap_pte(pteval)) {
1243  			if (++unmapped <= khugepaged_max_ptes_swap) {
1244  				/*
1245  				 * Always be strict with uffd-wp
1246  				 * enabled swap entries.  Please see
1247  				 * comment below for pte_uffd_wp().
1248  				 */
1249  				if (pte_swp_uffd_wp(pteval)) {
1250  					result = SCAN_PTE_UFFD_WP;
1251  					goto out_unmap;
1252  				}
1253  				continue;
1254  			} else {
1255  				result = SCAN_EXCEED_SWAP_PTE;
1256  				goto out_unmap;
1257  			}
1258  		}
1259  		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1260  			if (!userfaultfd_armed(vma) &&
1261  			    ++none_or_zero <= khugepaged_max_ptes_none) {
1262  				continue;
1263  			} else {
1264  				result = SCAN_EXCEED_NONE_PTE;
1265  				goto out_unmap;
1266  			}
1267  		}
1268  		if (!pte_present(pteval)) {
1269  			result = SCAN_PTE_NON_PRESENT;
1270  			goto out_unmap;
1271  		}
1272  		if (pte_uffd_wp(pteval)) {
1273  			/*
1274  			 * Don't collapse the page if any of the small
1275  			 * PTEs are armed with uffd write protection.
1276  			 * Here we can also mark the new huge pmd as
1277  			 * write protected if any of the small ones is
1278  			 * marked but that could bring unknown
1279  			 * userfault messages that falls outside of
1280  			 * the registered range.  So, just be simple.
1281  			 */
1282  			result = SCAN_PTE_UFFD_WP;
1283  			goto out_unmap;
1284  		}
1285  		if (pte_write(pteval))
1286  			writable = true;
1287  
1288  		page = vm_normal_page(vma, _address, pteval);
1289  		if (unlikely(!page)) {
1290  			result = SCAN_PAGE_NULL;
1291  			goto out_unmap;
1292  		}
1293  
1294  		if (page_mapcount(page) > 1 &&
1295  				++shared > khugepaged_max_ptes_shared) {
1296  			result = SCAN_EXCEED_SHARED_PTE;
1297  			goto out_unmap;
1298  		}
1299  
1300  		page = compound_head(page);
1301  
1302  		/*
1303  		 * Record which node the original page is from and save this
1304  		 * information to khugepaged_node_load[].
1305  		 * Khupaged will allocate hugepage from the node has the max
1306  		 * hit record.
1307  		 */
1308  		node = page_to_nid(page);
1309  		if (khugepaged_scan_abort(node)) {
1310  			result = SCAN_SCAN_ABORT;
1311  			goto out_unmap;
1312  		}
1313  		khugepaged_node_load[node]++;
1314  		if (!PageLRU(page)) {
1315  			result = SCAN_PAGE_LRU;
1316  			goto out_unmap;
1317  		}
1318  		if (PageLocked(page)) {
1319  			result = SCAN_PAGE_LOCK;
1320  			goto out_unmap;
1321  		}
1322  		if (!PageAnon(page)) {
1323  			result = SCAN_PAGE_ANON;
1324  			goto out_unmap;
1325  		}
1326  
1327  		/*
1328  		 * Check if the page has any GUP (or other external) pins.
1329  		 *
1330  		 * Here the check is racy it may see totmal_mapcount > refcount
1331  		 * in some cases.
1332  		 * For example, one process with one forked child process.
1333  		 * The parent has the PMD split due to MADV_DONTNEED, then
1334  		 * the child is trying unmap the whole PMD, but khugepaged
1335  		 * may be scanning the parent between the child has
1336  		 * PageDoubleMap flag cleared and dec the mapcount.  So
1337  		 * khugepaged may see total_mapcount > refcount.
1338  		 *
1339  		 * But such case is ephemeral we could always retry collapse
1340  		 * later.  However it may report false positive if the page
1341  		 * has excessive GUP pins (i.e. 512).  Anyway the same check
1342  		 * will be done again later the risk seems low.
1343  		 */
1344  		if (!is_refcount_suitable(page)) {
1345  			result = SCAN_PAGE_COUNT;
1346  			goto out_unmap;
1347  		}
1348  		if (pte_young(pteval) ||
1349  		    page_is_young(page) || PageReferenced(page) ||
1350  		    mmu_notifier_test_young(vma->vm_mm, address))
1351  			referenced++;
1352  	}
1353  	if (!writable) {
1354  		result = SCAN_PAGE_RO;
1355  	} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1356  		result = SCAN_LACK_REFERENCED_PAGE;
1357  	} else {
1358  		result = SCAN_SUCCEED;
1359  		ret = 1;
1360  	}
1361  out_unmap:
1362  	pte_unmap_unlock(pte, ptl);
1363  	if (ret) {
1364  		node = khugepaged_find_target_node();
1365  		/* collapse_huge_page will return with the mmap_lock released */
1366  		collapse_huge_page(mm, address, hpage, node,
1367  				referenced, unmapped);
1368  	}
1369  out:
1370  	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1371  				     none_or_zero, result, unmapped);
1372  	return ret;
1373  }
1374  
1375  static void collect_mm_slot(struct mm_slot *mm_slot)
1376  {
1377  	struct mm_struct *mm = mm_slot->mm;
1378  
1379  	lockdep_assert_held(&khugepaged_mm_lock);
1380  
1381  	if (khugepaged_test_exit(mm)) {
1382  		/* free mm_slot */
1383  		hash_del(&mm_slot->hash);
1384  		list_del(&mm_slot->mm_node);
1385  
1386  		/*
1387  		 * Not strictly needed because the mm exited already.
1388  		 *
1389  		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1390  		 */
1391  
1392  		/* khugepaged_mm_lock actually not necessary for the below */
1393  		free_mm_slot(mm_slot);
1394  		mmdrop(mm);
1395  	}
1396  }
1397  
1398  #ifdef CONFIG_SHMEM
1399  /*
1400   * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1401   * khugepaged should try to collapse the page table.
1402   */
1403  static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1404  					 unsigned long addr)
1405  {
1406  	struct mm_slot *mm_slot;
1407  
1408  	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1409  
1410  	spin_lock(&khugepaged_mm_lock);
1411  	mm_slot = get_mm_slot(mm);
1412  	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1413  		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1414  	spin_unlock(&khugepaged_mm_lock);
1415  	return 0;
1416  }
1417  
1418  /**
1419   * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1420   * address haddr.
1421   *
1422   * @mm: process address space where collapse happens
1423   * @addr: THP collapse address
1424   *
1425   * This function checks whether all the PTEs in the PMD are pointing to the
1426   * right THP. If so, retract the page table so the THP can refault in with
1427   * as pmd-mapped.
1428   */
1429  void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1430  {
1431  	unsigned long haddr = addr & HPAGE_PMD_MASK;
1432  	struct vm_area_struct *vma = find_vma(mm, haddr);
1433  	struct page *hpage;
1434  	pte_t *start_pte, *pte;
1435  	pmd_t *pmd, _pmd;
1436  	spinlock_t *ptl;
1437  	int count = 0;
1438  	int i;
1439  
1440  	if (!vma || !vma->vm_file ||
1441  	    vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1442  		return;
1443  
1444  	/*
1445  	 * This vm_flags may not have VM_HUGEPAGE if the page was not
1446  	 * collapsed by this mm. But we can still collapse if the page is
1447  	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1448  	 * will not fail the vma for missing VM_HUGEPAGE
1449  	 */
1450  	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1451  		return;
1452  
1453  	hpage = find_lock_page(vma->vm_file->f_mapping,
1454  			       linear_page_index(vma, haddr));
1455  	if (!hpage)
1456  		return;
1457  
1458  	if (!PageHead(hpage))
1459  		goto drop_hpage;
1460  
1461  	pmd = mm_find_pmd(mm, haddr);
1462  	if (!pmd)
1463  		goto drop_hpage;
1464  
1465  	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1466  
1467  	/* step 1: check all mapped PTEs are to the right huge page */
1468  	for (i = 0, addr = haddr, pte = start_pte;
1469  	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1470  		struct page *page;
1471  
1472  		/* empty pte, skip */
1473  		if (pte_none(*pte))
1474  			continue;
1475  
1476  		/* page swapped out, abort */
1477  		if (!pte_present(*pte))
1478  			goto abort;
1479  
1480  		page = vm_normal_page(vma, addr, *pte);
1481  
1482  		/*
1483  		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1484  		 * page table, but the new page will not be a subpage of hpage.
1485  		 */
1486  		if (hpage + i != page)
1487  			goto abort;
1488  		count++;
1489  	}
1490  
1491  	/* step 2: adjust rmap */
1492  	for (i = 0, addr = haddr, pte = start_pte;
1493  	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1494  		struct page *page;
1495  
1496  		if (pte_none(*pte))
1497  			continue;
1498  		page = vm_normal_page(vma, addr, *pte);
1499  		page_remove_rmap(page, false);
1500  	}
1501  
1502  	pte_unmap_unlock(start_pte, ptl);
1503  
1504  	/* step 3: set proper refcount and mm_counters. */
1505  	if (count) {
1506  		page_ref_sub(hpage, count);
1507  		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1508  	}
1509  
1510  	/* step 4: collapse pmd */
1511  	ptl = pmd_lock(vma->vm_mm, pmd);
1512  	_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1513  	spin_unlock(ptl);
1514  	mm_dec_nr_ptes(mm);
1515  	pte_free(mm, pmd_pgtable(_pmd));
1516  
1517  drop_hpage:
1518  	unlock_page(hpage);
1519  	put_page(hpage);
1520  	return;
1521  
1522  abort:
1523  	pte_unmap_unlock(start_pte, ptl);
1524  	goto drop_hpage;
1525  }
1526  
1527  static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1528  {
1529  	struct mm_struct *mm = mm_slot->mm;
1530  	int i;
1531  
1532  	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1533  		return 0;
1534  
1535  	if (!mmap_write_trylock(mm))
1536  		return -EBUSY;
1537  
1538  	if (unlikely(khugepaged_test_exit(mm)))
1539  		goto out;
1540  
1541  	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1542  		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1543  
1544  out:
1545  	mm_slot->nr_pte_mapped_thp = 0;
1546  	mmap_write_unlock(mm);
1547  	return 0;
1548  }
1549  
1550  static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1551  {
1552  	struct vm_area_struct *vma;
1553  	struct mm_struct *mm;
1554  	unsigned long addr;
1555  	pmd_t *pmd, _pmd;
1556  
1557  	i_mmap_lock_write(mapping);
1558  	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1559  		/*
1560  		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1561  		 * got written to. These VMAs are likely not worth investing
1562  		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1563  		 * later.
1564  		 *
1565  		 * Not that vma->anon_vma check is racy: it can be set up after
1566  		 * the check but before we took mmap_lock by the fault path.
1567  		 * But page lock would prevent establishing any new ptes of the
1568  		 * page, so we are safe.
1569  		 *
1570  		 * An alternative would be drop the check, but check that page
1571  		 * table is clear before calling pmdp_collapse_flush() under
1572  		 * ptl. It has higher chance to recover THP for the VMA, but
1573  		 * has higher cost too.
1574  		 */
1575  		if (vma->anon_vma)
1576  			continue;
1577  		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1578  		if (addr & ~HPAGE_PMD_MASK)
1579  			continue;
1580  		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1581  			continue;
1582  		mm = vma->vm_mm;
1583  		pmd = mm_find_pmd(mm, addr);
1584  		if (!pmd)
1585  			continue;
1586  		/*
1587  		 * We need exclusive mmap_lock to retract page table.
1588  		 *
1589  		 * We use trylock due to lock inversion: we need to acquire
1590  		 * mmap_lock while holding page lock. Fault path does it in
1591  		 * reverse order. Trylock is a way to avoid deadlock.
1592  		 */
1593  		if (mmap_write_trylock(mm)) {
1594  			if (!khugepaged_test_exit(mm)) {
1595  				spinlock_t *ptl = pmd_lock(mm, pmd);
1596  				/* assume page table is clear */
1597  				_pmd = pmdp_collapse_flush(vma, addr, pmd);
1598  				spin_unlock(ptl);
1599  				mm_dec_nr_ptes(mm);
1600  				pte_free(mm, pmd_pgtable(_pmd));
1601  			}
1602  			mmap_write_unlock(mm);
1603  		} else {
1604  			/* Try again later */
1605  			khugepaged_add_pte_mapped_thp(mm, addr);
1606  		}
1607  	}
1608  	i_mmap_unlock_write(mapping);
1609  }
1610  
1611  /**
1612   * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1613   *
1614   * @mm: process address space where collapse happens
1615   * @file: file that collapse on
1616   * @start: collapse start address
1617   * @hpage: new allocated huge page for collapse
1618   * @node: appointed node the new huge page allocate from
1619   *
1620   * Basic scheme is simple, details are more complex:
1621   *  - allocate and lock a new huge page;
1622   *  - scan page cache replacing old pages with the new one
1623   *    + swap/gup in pages if necessary;
1624   *    + fill in gaps;
1625   *    + keep old pages around in case rollback is required;
1626   *  - if replacing succeeds:
1627   *    + copy data over;
1628   *    + free old pages;
1629   *    + unlock huge page;
1630   *  - if replacing failed;
1631   *    + put all pages back and unfreeze them;
1632   *    + restore gaps in the page cache;
1633   *    + unlock and free huge page;
1634   */
1635  static void collapse_file(struct mm_struct *mm,
1636  		struct file *file, pgoff_t start,
1637  		struct page **hpage, int node)
1638  {
1639  	struct address_space *mapping = file->f_mapping;
1640  	gfp_t gfp;
1641  	struct page *new_page;
1642  	pgoff_t index, end = start + HPAGE_PMD_NR;
1643  	LIST_HEAD(pagelist);
1644  	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1645  	int nr_none = 0, result = SCAN_SUCCEED;
1646  	bool is_shmem = shmem_file(file);
1647  
1648  	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1649  	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1650  
1651  	/* Only allocate from the target node */
1652  	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1653  
1654  	new_page = khugepaged_alloc_page(hpage, gfp, node);
1655  	if (!new_page) {
1656  		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1657  		goto out;
1658  	}
1659  
1660  	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1661  		result = SCAN_CGROUP_CHARGE_FAIL;
1662  		goto out;
1663  	}
1664  	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1665  
1666  	/* This will be less messy when we use multi-index entries */
1667  	do {
1668  		xas_lock_irq(&xas);
1669  		xas_create_range(&xas);
1670  		if (!xas_error(&xas))
1671  			break;
1672  		xas_unlock_irq(&xas);
1673  		if (!xas_nomem(&xas, GFP_KERNEL)) {
1674  			result = SCAN_FAIL;
1675  			goto out;
1676  		}
1677  	} while (1);
1678  
1679  	__SetPageLocked(new_page);
1680  	if (is_shmem)
1681  		__SetPageSwapBacked(new_page);
1682  	new_page->index = start;
1683  	new_page->mapping = mapping;
1684  
1685  	/*
1686  	 * At this point the new_page is locked and not up-to-date.
1687  	 * It's safe to insert it into the page cache, because nobody would
1688  	 * be able to map it or use it in another way until we unlock it.
1689  	 */
1690  
1691  	xas_set(&xas, start);
1692  	for (index = start; index < end; index++) {
1693  		struct page *page = xas_next(&xas);
1694  
1695  		VM_BUG_ON(index != xas.xa_index);
1696  		if (is_shmem) {
1697  			if (!page) {
1698  				/*
1699  				 * Stop if extent has been truncated or
1700  				 * hole-punched, and is now completely
1701  				 * empty.
1702  				 */
1703  				if (index == start) {
1704  					if (!xas_next_entry(&xas, end - 1)) {
1705  						result = SCAN_TRUNCATED;
1706  						goto xa_locked;
1707  					}
1708  					xas_set(&xas, index);
1709  				}
1710  				if (!shmem_charge(mapping->host, 1)) {
1711  					result = SCAN_FAIL;
1712  					goto xa_locked;
1713  				}
1714  				xas_store(&xas, new_page);
1715  				nr_none++;
1716  				continue;
1717  			}
1718  
1719  			if (xa_is_value(page) || !PageUptodate(page)) {
1720  				xas_unlock_irq(&xas);
1721  				/* swap in or instantiate fallocated page */
1722  				if (shmem_getpage(mapping->host, index, &page,
1723  						  SGP_NOHUGE)) {
1724  					result = SCAN_FAIL;
1725  					goto xa_unlocked;
1726  				}
1727  			} else if (trylock_page(page)) {
1728  				get_page(page);
1729  				xas_unlock_irq(&xas);
1730  			} else {
1731  				result = SCAN_PAGE_LOCK;
1732  				goto xa_locked;
1733  			}
1734  		} else {	/* !is_shmem */
1735  			if (!page || xa_is_value(page)) {
1736  				xas_unlock_irq(&xas);
1737  				page_cache_sync_readahead(mapping, &file->f_ra,
1738  							  file, index,
1739  							  end - index);
1740  				/* drain pagevecs to help isolate_lru_page() */
1741  				lru_add_drain();
1742  				page = find_lock_page(mapping, index);
1743  				if (unlikely(page == NULL)) {
1744  					result = SCAN_FAIL;
1745  					goto xa_unlocked;
1746  				}
1747  			} else if (PageDirty(page)) {
1748  				/*
1749  				 * khugepaged only works on read-only fd,
1750  				 * so this page is dirty because it hasn't
1751  				 * been flushed since first write. There
1752  				 * won't be new dirty pages.
1753  				 *
1754  				 * Trigger async flush here and hope the
1755  				 * writeback is done when khugepaged
1756  				 * revisits this page.
1757  				 *
1758  				 * This is a one-off situation. We are not
1759  				 * forcing writeback in loop.
1760  				 */
1761  				xas_unlock_irq(&xas);
1762  				filemap_flush(mapping);
1763  				result = SCAN_FAIL;
1764  				goto xa_unlocked;
1765  			} else if (trylock_page(page)) {
1766  				get_page(page);
1767  				xas_unlock_irq(&xas);
1768  			} else {
1769  				result = SCAN_PAGE_LOCK;
1770  				goto xa_locked;
1771  			}
1772  		}
1773  
1774  		/*
1775  		 * The page must be locked, so we can drop the i_pages lock
1776  		 * without racing with truncate.
1777  		 */
1778  		VM_BUG_ON_PAGE(!PageLocked(page), page);
1779  
1780  		/* make sure the page is up to date */
1781  		if (unlikely(!PageUptodate(page))) {
1782  			result = SCAN_FAIL;
1783  			goto out_unlock;
1784  		}
1785  
1786  		/*
1787  		 * If file was truncated then extended, or hole-punched, before
1788  		 * we locked the first page, then a THP might be there already.
1789  		 */
1790  		if (PageTransCompound(page)) {
1791  			result = SCAN_PAGE_COMPOUND;
1792  			goto out_unlock;
1793  		}
1794  
1795  		if (page_mapping(page) != mapping) {
1796  			result = SCAN_TRUNCATED;
1797  			goto out_unlock;
1798  		}
1799  
1800  		if (!is_shmem && PageDirty(page)) {
1801  			/*
1802  			 * khugepaged only works on read-only fd, so this
1803  			 * page is dirty because it hasn't been flushed
1804  			 * since first write.
1805  			 */
1806  			result = SCAN_FAIL;
1807  			goto out_unlock;
1808  		}
1809  
1810  		if (isolate_lru_page(page)) {
1811  			result = SCAN_DEL_PAGE_LRU;
1812  			goto out_unlock;
1813  		}
1814  
1815  		if (page_has_private(page) &&
1816  		    !try_to_release_page(page, GFP_KERNEL)) {
1817  			result = SCAN_PAGE_HAS_PRIVATE;
1818  			putback_lru_page(page);
1819  			goto out_unlock;
1820  		}
1821  
1822  		if (page_mapped(page))
1823  			unmap_mapping_pages(mapping, index, 1, false);
1824  
1825  		xas_lock_irq(&xas);
1826  		xas_set(&xas, index);
1827  
1828  		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1829  		VM_BUG_ON_PAGE(page_mapped(page), page);
1830  
1831  		/*
1832  		 * The page is expected to have page_count() == 3:
1833  		 *  - we hold a pin on it;
1834  		 *  - one reference from page cache;
1835  		 *  - one from isolate_lru_page;
1836  		 */
1837  		if (!page_ref_freeze(page, 3)) {
1838  			result = SCAN_PAGE_COUNT;
1839  			xas_unlock_irq(&xas);
1840  			putback_lru_page(page);
1841  			goto out_unlock;
1842  		}
1843  
1844  		/*
1845  		 * Add the page to the list to be able to undo the collapse if
1846  		 * something go wrong.
1847  		 */
1848  		list_add_tail(&page->lru, &pagelist);
1849  
1850  		/* Finally, replace with the new page. */
1851  		xas_store(&xas, new_page);
1852  		continue;
1853  out_unlock:
1854  		unlock_page(page);
1855  		put_page(page);
1856  		goto xa_unlocked;
1857  	}
1858  
1859  	if (is_shmem)
1860  		__inc_lruvec_page_state(new_page, NR_SHMEM_THPS);
1861  	else {
1862  		__inc_lruvec_page_state(new_page, NR_FILE_THPS);
1863  		filemap_nr_thps_inc(mapping);
1864  	}
1865  
1866  	if (nr_none) {
1867  		__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1868  		if (is_shmem)
1869  			__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1870  	}
1871  
1872  xa_locked:
1873  	xas_unlock_irq(&xas);
1874  xa_unlocked:
1875  
1876  	if (result == SCAN_SUCCEED) {
1877  		struct page *page, *tmp;
1878  
1879  		/*
1880  		 * Replacing old pages with new one has succeeded, now we
1881  		 * need to copy the content and free the old pages.
1882  		 */
1883  		index = start;
1884  		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1885  			while (index < page->index) {
1886  				clear_highpage(new_page + (index % HPAGE_PMD_NR));
1887  				index++;
1888  			}
1889  			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1890  					page);
1891  			list_del(&page->lru);
1892  			page->mapping = NULL;
1893  			page_ref_unfreeze(page, 1);
1894  			ClearPageActive(page);
1895  			ClearPageUnevictable(page);
1896  			unlock_page(page);
1897  			put_page(page);
1898  			index++;
1899  		}
1900  		while (index < end) {
1901  			clear_highpage(new_page + (index % HPAGE_PMD_NR));
1902  			index++;
1903  		}
1904  
1905  		SetPageUptodate(new_page);
1906  		page_ref_add(new_page, HPAGE_PMD_NR - 1);
1907  		if (is_shmem)
1908  			set_page_dirty(new_page);
1909  		lru_cache_add(new_page);
1910  
1911  		/*
1912  		 * Remove pte page tables, so we can re-fault the page as huge.
1913  		 */
1914  		retract_page_tables(mapping, start);
1915  		*hpage = NULL;
1916  
1917  		khugepaged_pages_collapsed++;
1918  	} else {
1919  		struct page *page;
1920  
1921  		/* Something went wrong: roll back page cache changes */
1922  		xas_lock_irq(&xas);
1923  		mapping->nrpages -= nr_none;
1924  
1925  		if (is_shmem)
1926  			shmem_uncharge(mapping->host, nr_none);
1927  
1928  		xas_set(&xas, start);
1929  		xas_for_each(&xas, page, end - 1) {
1930  			page = list_first_entry_or_null(&pagelist,
1931  					struct page, lru);
1932  			if (!page || xas.xa_index < page->index) {
1933  				if (!nr_none)
1934  					break;
1935  				nr_none--;
1936  				/* Put holes back where they were */
1937  				xas_store(&xas, NULL);
1938  				continue;
1939  			}
1940  
1941  			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1942  
1943  			/* Unfreeze the page. */
1944  			list_del(&page->lru);
1945  			page_ref_unfreeze(page, 2);
1946  			xas_store(&xas, page);
1947  			xas_pause(&xas);
1948  			xas_unlock_irq(&xas);
1949  			unlock_page(page);
1950  			putback_lru_page(page);
1951  			xas_lock_irq(&xas);
1952  		}
1953  		VM_BUG_ON(nr_none);
1954  		xas_unlock_irq(&xas);
1955  
1956  		new_page->mapping = NULL;
1957  	}
1958  
1959  	unlock_page(new_page);
1960  out:
1961  	VM_BUG_ON(!list_empty(&pagelist));
1962  	if (!IS_ERR_OR_NULL(*hpage))
1963  		mem_cgroup_uncharge(*hpage);
1964  	/* TODO: tracepoints */
1965  }
1966  
1967  static void khugepaged_scan_file(struct mm_struct *mm,
1968  		struct file *file, pgoff_t start, struct page **hpage)
1969  {
1970  	struct page *page = NULL;
1971  	struct address_space *mapping = file->f_mapping;
1972  	XA_STATE(xas, &mapping->i_pages, start);
1973  	int present, swap;
1974  	int node = NUMA_NO_NODE;
1975  	int result = SCAN_SUCCEED;
1976  
1977  	present = 0;
1978  	swap = 0;
1979  	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1980  	rcu_read_lock();
1981  	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1982  		if (xas_retry(&xas, page))
1983  			continue;
1984  
1985  		if (xa_is_value(page)) {
1986  			if (++swap > khugepaged_max_ptes_swap) {
1987  				result = SCAN_EXCEED_SWAP_PTE;
1988  				break;
1989  			}
1990  			continue;
1991  		}
1992  
1993  		if (PageTransCompound(page)) {
1994  			result = SCAN_PAGE_COMPOUND;
1995  			break;
1996  		}
1997  
1998  		node = page_to_nid(page);
1999  		if (khugepaged_scan_abort(node)) {
2000  			result = SCAN_SCAN_ABORT;
2001  			break;
2002  		}
2003  		khugepaged_node_load[node]++;
2004  
2005  		if (!PageLRU(page)) {
2006  			result = SCAN_PAGE_LRU;
2007  			break;
2008  		}
2009  
2010  		if (page_count(page) !=
2011  		    1 + page_mapcount(page) + page_has_private(page)) {
2012  			result = SCAN_PAGE_COUNT;
2013  			break;
2014  		}
2015  
2016  		/*
2017  		 * We probably should check if the page is referenced here, but
2018  		 * nobody would transfer pte_young() to PageReferenced() for us.
2019  		 * And rmap walk here is just too costly...
2020  		 */
2021  
2022  		present++;
2023  
2024  		if (need_resched()) {
2025  			xas_pause(&xas);
2026  			cond_resched_rcu();
2027  		}
2028  	}
2029  	rcu_read_unlock();
2030  
2031  	if (result == SCAN_SUCCEED) {
2032  		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2033  			result = SCAN_EXCEED_NONE_PTE;
2034  		} else {
2035  			node = khugepaged_find_target_node();
2036  			collapse_file(mm, file, start, hpage, node);
2037  		}
2038  	}
2039  
2040  	/* TODO: tracepoints */
2041  }
2042  #else
2043  static void khugepaged_scan_file(struct mm_struct *mm,
2044  		struct file *file, pgoff_t start, struct page **hpage)
2045  {
2046  	BUILD_BUG();
2047  }
2048  
2049  static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2050  {
2051  	return 0;
2052  }
2053  #endif
2054  
2055  static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2056  					    struct page **hpage)
2057  	__releases(&khugepaged_mm_lock)
2058  	__acquires(&khugepaged_mm_lock)
2059  {
2060  	struct mm_slot *mm_slot;
2061  	struct mm_struct *mm;
2062  	struct vm_area_struct *vma;
2063  	int progress = 0;
2064  
2065  	VM_BUG_ON(!pages);
2066  	lockdep_assert_held(&khugepaged_mm_lock);
2067  
2068  	if (khugepaged_scan.mm_slot)
2069  		mm_slot = khugepaged_scan.mm_slot;
2070  	else {
2071  		mm_slot = list_entry(khugepaged_scan.mm_head.next,
2072  				     struct mm_slot, mm_node);
2073  		khugepaged_scan.address = 0;
2074  		khugepaged_scan.mm_slot = mm_slot;
2075  	}
2076  	spin_unlock(&khugepaged_mm_lock);
2077  	khugepaged_collapse_pte_mapped_thps(mm_slot);
2078  
2079  	mm = mm_slot->mm;
2080  	/*
2081  	 * Don't wait for semaphore (to avoid long wait times).  Just move to
2082  	 * the next mm on the list.
2083  	 */
2084  	vma = NULL;
2085  	if (unlikely(!mmap_read_trylock(mm)))
2086  		goto breakouterloop_mmap_lock;
2087  	if (likely(!khugepaged_test_exit(mm)))
2088  		vma = find_vma(mm, khugepaged_scan.address);
2089  
2090  	progress++;
2091  	for (; vma; vma = vma->vm_next) {
2092  		unsigned long hstart, hend;
2093  
2094  		cond_resched();
2095  		if (unlikely(khugepaged_test_exit(mm))) {
2096  			progress++;
2097  			break;
2098  		}
2099  		if (!hugepage_vma_check(vma, vma->vm_flags)) {
2100  skip:
2101  			progress++;
2102  			continue;
2103  		}
2104  		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2105  		hend = vma->vm_end & HPAGE_PMD_MASK;
2106  		if (hstart >= hend)
2107  			goto skip;
2108  		if (khugepaged_scan.address > hend)
2109  			goto skip;
2110  		if (khugepaged_scan.address < hstart)
2111  			khugepaged_scan.address = hstart;
2112  		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2113  		if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2114  			goto skip;
2115  
2116  		while (khugepaged_scan.address < hend) {
2117  			int ret;
2118  			cond_resched();
2119  			if (unlikely(khugepaged_test_exit(mm)))
2120  				goto breakouterloop;
2121  
2122  			VM_BUG_ON(khugepaged_scan.address < hstart ||
2123  				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2124  				  hend);
2125  			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2126  				struct file *file = get_file(vma->vm_file);
2127  				pgoff_t pgoff = linear_page_index(vma,
2128  						khugepaged_scan.address);
2129  
2130  				mmap_read_unlock(mm);
2131  				ret = 1;
2132  				khugepaged_scan_file(mm, file, pgoff, hpage);
2133  				fput(file);
2134  			} else {
2135  				ret = khugepaged_scan_pmd(mm, vma,
2136  						khugepaged_scan.address,
2137  						hpage);
2138  			}
2139  			/* move to next address */
2140  			khugepaged_scan.address += HPAGE_PMD_SIZE;
2141  			progress += HPAGE_PMD_NR;
2142  			if (ret)
2143  				/* we released mmap_lock so break loop */
2144  				goto breakouterloop_mmap_lock;
2145  			if (progress >= pages)
2146  				goto breakouterloop;
2147  		}
2148  	}
2149  breakouterloop:
2150  	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2151  breakouterloop_mmap_lock:
2152  
2153  	spin_lock(&khugepaged_mm_lock);
2154  	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2155  	/*
2156  	 * Release the current mm_slot if this mm is about to die, or
2157  	 * if we scanned all vmas of this mm.
2158  	 */
2159  	if (khugepaged_test_exit(mm) || !vma) {
2160  		/*
2161  		 * Make sure that if mm_users is reaching zero while
2162  		 * khugepaged runs here, khugepaged_exit will find
2163  		 * mm_slot not pointing to the exiting mm.
2164  		 */
2165  		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2166  			khugepaged_scan.mm_slot = list_entry(
2167  				mm_slot->mm_node.next,
2168  				struct mm_slot, mm_node);
2169  			khugepaged_scan.address = 0;
2170  		} else {
2171  			khugepaged_scan.mm_slot = NULL;
2172  			khugepaged_full_scans++;
2173  		}
2174  
2175  		collect_mm_slot(mm_slot);
2176  	}
2177  
2178  	return progress;
2179  }
2180  
2181  static int khugepaged_has_work(void)
2182  {
2183  	return !list_empty(&khugepaged_scan.mm_head) &&
2184  		khugepaged_enabled();
2185  }
2186  
2187  static int khugepaged_wait_event(void)
2188  {
2189  	return !list_empty(&khugepaged_scan.mm_head) ||
2190  		kthread_should_stop();
2191  }
2192  
2193  static void khugepaged_do_scan(void)
2194  {
2195  	struct page *hpage = NULL;
2196  	unsigned int progress = 0, pass_through_head = 0;
2197  	unsigned int pages = khugepaged_pages_to_scan;
2198  	bool wait = true;
2199  
2200  	barrier(); /* write khugepaged_pages_to_scan to local stack */
2201  
2202  	lru_add_drain_all();
2203  
2204  	while (progress < pages) {
2205  		if (!khugepaged_prealloc_page(&hpage, &wait))
2206  			break;
2207  
2208  		cond_resched();
2209  
2210  		if (unlikely(kthread_should_stop() || try_to_freeze()))
2211  			break;
2212  
2213  		spin_lock(&khugepaged_mm_lock);
2214  		if (!khugepaged_scan.mm_slot)
2215  			pass_through_head++;
2216  		if (khugepaged_has_work() &&
2217  		    pass_through_head < 2)
2218  			progress += khugepaged_scan_mm_slot(pages - progress,
2219  							    &hpage);
2220  		else
2221  			progress = pages;
2222  		spin_unlock(&khugepaged_mm_lock);
2223  	}
2224  
2225  	if (!IS_ERR_OR_NULL(hpage))
2226  		put_page(hpage);
2227  }
2228  
2229  static bool khugepaged_should_wakeup(void)
2230  {
2231  	return kthread_should_stop() ||
2232  	       time_after_eq(jiffies, khugepaged_sleep_expire);
2233  }
2234  
2235  static void khugepaged_wait_work(void)
2236  {
2237  	if (khugepaged_has_work()) {
2238  		const unsigned long scan_sleep_jiffies =
2239  			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2240  
2241  		if (!scan_sleep_jiffies)
2242  			return;
2243  
2244  		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2245  		wait_event_freezable_timeout(khugepaged_wait,
2246  					     khugepaged_should_wakeup(),
2247  					     scan_sleep_jiffies);
2248  		return;
2249  	}
2250  
2251  	if (khugepaged_enabled())
2252  		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2253  }
2254  
2255  static int khugepaged(void *none)
2256  {
2257  	struct mm_slot *mm_slot;
2258  
2259  	set_freezable();
2260  	set_user_nice(current, MAX_NICE);
2261  
2262  	while (!kthread_should_stop()) {
2263  		khugepaged_do_scan();
2264  		khugepaged_wait_work();
2265  	}
2266  
2267  	spin_lock(&khugepaged_mm_lock);
2268  	mm_slot = khugepaged_scan.mm_slot;
2269  	khugepaged_scan.mm_slot = NULL;
2270  	if (mm_slot)
2271  		collect_mm_slot(mm_slot);
2272  	spin_unlock(&khugepaged_mm_lock);
2273  	return 0;
2274  }
2275  
2276  static void set_recommended_min_free_kbytes(void)
2277  {
2278  	struct zone *zone;
2279  	int nr_zones = 0;
2280  	unsigned long recommended_min;
2281  
2282  	for_each_populated_zone(zone) {
2283  		/*
2284  		 * We don't need to worry about fragmentation of
2285  		 * ZONE_MOVABLE since it only has movable pages.
2286  		 */
2287  		if (zone_idx(zone) > gfp_zone(GFP_USER))
2288  			continue;
2289  
2290  		nr_zones++;
2291  	}
2292  
2293  	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2294  	recommended_min = pageblock_nr_pages * nr_zones * 2;
2295  
2296  	/*
2297  	 * Make sure that on average at least two pageblocks are almost free
2298  	 * of another type, one for a migratetype to fall back to and a
2299  	 * second to avoid subsequent fallbacks of other types There are 3
2300  	 * MIGRATE_TYPES we care about.
2301  	 */
2302  	recommended_min += pageblock_nr_pages * nr_zones *
2303  			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2304  
2305  	/* don't ever allow to reserve more than 5% of the lowmem */
2306  	recommended_min = min(recommended_min,
2307  			      (unsigned long) nr_free_buffer_pages() / 20);
2308  	recommended_min <<= (PAGE_SHIFT-10);
2309  
2310  	if (recommended_min > min_free_kbytes) {
2311  		if (user_min_free_kbytes >= 0)
2312  			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2313  				min_free_kbytes, recommended_min);
2314  
2315  		min_free_kbytes = recommended_min;
2316  	}
2317  	setup_per_zone_wmarks();
2318  }
2319  
2320  int start_stop_khugepaged(void)
2321  {
2322  	int err = 0;
2323  
2324  	mutex_lock(&khugepaged_mutex);
2325  	if (khugepaged_enabled()) {
2326  		if (!khugepaged_thread)
2327  			khugepaged_thread = kthread_run(khugepaged, NULL,
2328  							"khugepaged");
2329  		if (IS_ERR(khugepaged_thread)) {
2330  			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2331  			err = PTR_ERR(khugepaged_thread);
2332  			khugepaged_thread = NULL;
2333  			goto fail;
2334  		}
2335  
2336  		if (!list_empty(&khugepaged_scan.mm_head))
2337  			wake_up_interruptible(&khugepaged_wait);
2338  
2339  		set_recommended_min_free_kbytes();
2340  	} else if (khugepaged_thread) {
2341  		kthread_stop(khugepaged_thread);
2342  		khugepaged_thread = NULL;
2343  	}
2344  fail:
2345  	mutex_unlock(&khugepaged_mutex);
2346  	return err;
2347  }
2348  
2349  void khugepaged_min_free_kbytes_update(void)
2350  {
2351  	mutex_lock(&khugepaged_mutex);
2352  	if (khugepaged_enabled() && khugepaged_thread)
2353  		set_recommended_min_free_kbytes();
2354  	mutex_unlock(&khugepaged_mutex);
2355  }
2356