1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b46e756fSKirill A. Shutemov #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3b46e756fSKirill A. Shutemov
4b46e756fSKirill A. Shutemov #include <linux/mm.h>
5b46e756fSKirill A. Shutemov #include <linux/sched.h>
66e84f315SIngo Molnar #include <linux/sched/mm.h>
7f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
8b46e756fSKirill A. Shutemov #include <linux/mmu_notifier.h>
9b46e756fSKirill A. Shutemov #include <linux/rmap.h>
10b46e756fSKirill A. Shutemov #include <linux/swap.h>
11b46e756fSKirill A. Shutemov #include <linux/mm_inline.h>
12b46e756fSKirill A. Shutemov #include <linux/kthread.h>
13b46e756fSKirill A. Shutemov #include <linux/khugepaged.h>
14b46e756fSKirill A. Shutemov #include <linux/freezer.h>
15b46e756fSKirill A. Shutemov #include <linux/mman.h>
16b46e756fSKirill A. Shutemov #include <linux/hashtable.h>
17b46e756fSKirill A. Shutemov #include <linux/userfaultfd_k.h>
18b46e756fSKirill A. Shutemov #include <linux/page_idle.h>
1980110bbfSPasha Tatashin #include <linux/page_table_check.h>
20b46e756fSKirill A. Shutemov #include <linux/swapops.h>
21f3f0e1d2SKirill A. Shutemov #include <linux/shmem_fs.h>
22e2942062Sxu xin #include <linux/ksm.h>
23b46e756fSKirill A. Shutemov
24b46e756fSKirill A. Shutemov #include <asm/tlb.h>
25b46e756fSKirill A. Shutemov #include <asm/pgalloc.h>
26b46e756fSKirill A. Shutemov #include "internal.h"
27b26e2701SQi Zheng #include "mm_slot.h"
28b46e756fSKirill A. Shutemov
29b46e756fSKirill A. Shutemov enum scan_result {
30b46e756fSKirill A. Shutemov SCAN_FAIL,
31b46e756fSKirill A. Shutemov SCAN_SUCCEED,
32b46e756fSKirill A. Shutemov SCAN_PMD_NULL,
3334488399SZach O'Keefe SCAN_PMD_NONE,
3450722804SZach O'Keefe SCAN_PMD_MAPPED,
35b46e756fSKirill A. Shutemov SCAN_EXCEED_NONE_PTE,
3671a2c112SKirill A. Shutemov SCAN_EXCEED_SWAP_PTE,
3771a2c112SKirill A. Shutemov SCAN_EXCEED_SHARED_PTE,
38b46e756fSKirill A. Shutemov SCAN_PTE_NON_PRESENT,
39e1e267c7SPeter Xu SCAN_PTE_UFFD_WP,
4058ac9a89SZach O'Keefe SCAN_PTE_MAPPED_HUGEPAGE,
41b46e756fSKirill A. Shutemov SCAN_PAGE_RO,
420db501f7SEbru Akagunduz SCAN_LACK_REFERENCED_PAGE,
43b46e756fSKirill A. Shutemov SCAN_PAGE_NULL,
44b46e756fSKirill A. Shutemov SCAN_SCAN_ABORT,
45b46e756fSKirill A. Shutemov SCAN_PAGE_COUNT,
46b46e756fSKirill A. Shutemov SCAN_PAGE_LRU,
47b46e756fSKirill A. Shutemov SCAN_PAGE_LOCK,
48b46e756fSKirill A. Shutemov SCAN_PAGE_ANON,
49b46e756fSKirill A. Shutemov SCAN_PAGE_COMPOUND,
50b46e756fSKirill A. Shutemov SCAN_ANY_PROCESS,
51b46e756fSKirill A. Shutemov SCAN_VMA_NULL,
52b46e756fSKirill A. Shutemov SCAN_VMA_CHECK,
53b46e756fSKirill A. Shutemov SCAN_ADDRESS_RANGE,
54b46e756fSKirill A. Shutemov SCAN_DEL_PAGE_LRU,
55b46e756fSKirill A. Shutemov SCAN_ALLOC_HUGE_PAGE_FAIL,
56b46e756fSKirill A. Shutemov SCAN_CGROUP_CHARGE_FAIL,
57f3f0e1d2SKirill A. Shutemov SCAN_TRUNCATED,
5899cb0dbdSSong Liu SCAN_PAGE_HAS_PRIVATE,
592ce0bdfeSIvan Orlov SCAN_STORE_FAILED,
6098c76c9fSJiaqi Yan SCAN_COPY_MC,
61ac492b9cSDavid Stevens SCAN_PAGE_FILLED,
62b46e756fSKirill A. Shutemov };
63b46e756fSKirill A. Shutemov
64b46e756fSKirill A. Shutemov #define CREATE_TRACE_POINTS
65b46e756fSKirill A. Shutemov #include <trace/events/huge_memory.h>
66b46e756fSKirill A. Shutemov
674aab2be0SVijay Balakrishna static struct task_struct *khugepaged_thread __read_mostly;
684aab2be0SVijay Balakrishna static DEFINE_MUTEX(khugepaged_mutex);
694aab2be0SVijay Balakrishna
70b46e756fSKirill A. Shutemov /* default scan 8*512 pte (or vmas) every 30 second */
71b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_to_scan __read_mostly;
72b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_collapsed;
73b46e756fSKirill A. Shutemov static unsigned int khugepaged_full_scans;
74b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
75b46e756fSKirill A. Shutemov /* during fragmentation poll the hugepage allocator once every minute */
76b46e756fSKirill A. Shutemov static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
77b46e756fSKirill A. Shutemov static unsigned long khugepaged_sleep_expire;
78b46e756fSKirill A. Shutemov static DEFINE_SPINLOCK(khugepaged_mm_lock);
79b46e756fSKirill A. Shutemov static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
80b46e756fSKirill A. Shutemov /*
81b46e756fSKirill A. Shutemov * default collapse hugepages if there is at least one pte mapped like
82b46e756fSKirill A. Shutemov * it would have happened if the vma was large enough during page
83b46e756fSKirill A. Shutemov * fault.
84d8ea7cc8SZach O'Keefe *
85d8ea7cc8SZach O'Keefe * Note that these are only respected if collapse was initiated by khugepaged.
86b46e756fSKirill A. Shutemov */
87b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_none __read_mostly;
88b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_swap __read_mostly;
8971a2c112SKirill A. Shutemov static unsigned int khugepaged_max_ptes_shared __read_mostly;
90b46e756fSKirill A. Shutemov
91b46e756fSKirill A. Shutemov #define MM_SLOTS_HASH_BITS 10
92e1ad3e66SNick Desaulniers static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
93b46e756fSKirill A. Shutemov
94b46e756fSKirill A. Shutemov static struct kmem_cache *mm_slot_cache __read_mostly;
95b46e756fSKirill A. Shutemov
9634d6b470SZach O'Keefe struct collapse_control {
97d8ea7cc8SZach O'Keefe bool is_khugepaged;
98d8ea7cc8SZach O'Keefe
9934d6b470SZach O'Keefe /* Num pages scanned per node */
10034d6b470SZach O'Keefe u32 node_load[MAX_NUMNODES];
10134d6b470SZach O'Keefe
102e031ff96SYang Shi /* nodemask for allocation fallback */
103e031ff96SYang Shi nodemask_t alloc_nmask;
10434d6b470SZach O'Keefe };
10534d6b470SZach O'Keefe
106b46e756fSKirill A. Shutemov /**
107b26e2701SQi Zheng * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
108b26e2701SQi Zheng * @slot: hash lookup from mm to mm_slot
109b46e756fSKirill A. Shutemov */
110b26e2701SQi Zheng struct khugepaged_mm_slot {
111b26e2701SQi Zheng struct mm_slot slot;
112b46e756fSKirill A. Shutemov };
113b46e756fSKirill A. Shutemov
114b46e756fSKirill A. Shutemov /**
115b46e756fSKirill A. Shutemov * struct khugepaged_scan - cursor for scanning
116b46e756fSKirill A. Shutemov * @mm_head: the head of the mm list to scan
117b46e756fSKirill A. Shutemov * @mm_slot: the current mm_slot we are scanning
118b46e756fSKirill A. Shutemov * @address: the next address inside that to be scanned
119b46e756fSKirill A. Shutemov *
120b46e756fSKirill A. Shutemov * There is only the one khugepaged_scan instance of this cursor structure.
121b46e756fSKirill A. Shutemov */
122b46e756fSKirill A. Shutemov struct khugepaged_scan {
123b46e756fSKirill A. Shutemov struct list_head mm_head;
124b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot;
125b46e756fSKirill A. Shutemov unsigned long address;
126b46e756fSKirill A. Shutemov };
127b46e756fSKirill A. Shutemov
128b46e756fSKirill A. Shutemov static struct khugepaged_scan khugepaged_scan = {
129b46e756fSKirill A. Shutemov .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
130b46e756fSKirill A. Shutemov };
131b46e756fSKirill A. Shutemov
132e1465d12SJérémy Lefaure #ifdef CONFIG_SYSFS
scan_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)133b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
134b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
135b46e756fSKirill A. Shutemov char *buf)
136b46e756fSKirill A. Shutemov {
137ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
138b46e756fSKirill A. Shutemov }
139b46e756fSKirill A. Shutemov
scan_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)140b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
141b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
142b46e756fSKirill A. Shutemov const char *buf, size_t count)
143b46e756fSKirill A. Shutemov {
144dfefd226SAlexey Dobriyan unsigned int msecs;
145b46e756fSKirill A. Shutemov int err;
146b46e756fSKirill A. Shutemov
147dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs);
148dfefd226SAlexey Dobriyan if (err)
149b46e756fSKirill A. Shutemov return -EINVAL;
150b46e756fSKirill A. Shutemov
151b46e756fSKirill A. Shutemov khugepaged_scan_sleep_millisecs = msecs;
152b46e756fSKirill A. Shutemov khugepaged_sleep_expire = 0;
153b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait);
154b46e756fSKirill A. Shutemov
155b46e756fSKirill A. Shutemov return count;
156b46e756fSKirill A. Shutemov }
157b46e756fSKirill A. Shutemov static struct kobj_attribute scan_sleep_millisecs_attr =
1586dcdc94dSMiaohe Lin __ATTR_RW(scan_sleep_millisecs);
159b46e756fSKirill A. Shutemov
alloc_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)160b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
161b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
162b46e756fSKirill A. Shutemov char *buf)
163b46e756fSKirill A. Shutemov {
164ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
165b46e756fSKirill A. Shutemov }
166b46e756fSKirill A. Shutemov
alloc_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)167b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
168b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
169b46e756fSKirill A. Shutemov const char *buf, size_t count)
170b46e756fSKirill A. Shutemov {
171dfefd226SAlexey Dobriyan unsigned int msecs;
172b46e756fSKirill A. Shutemov int err;
173b46e756fSKirill A. Shutemov
174dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &msecs);
175dfefd226SAlexey Dobriyan if (err)
176b46e756fSKirill A. Shutemov return -EINVAL;
177b46e756fSKirill A. Shutemov
178b46e756fSKirill A. Shutemov khugepaged_alloc_sleep_millisecs = msecs;
179b46e756fSKirill A. Shutemov khugepaged_sleep_expire = 0;
180b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait);
181b46e756fSKirill A. Shutemov
182b46e756fSKirill A. Shutemov return count;
183b46e756fSKirill A. Shutemov }
184b46e756fSKirill A. Shutemov static struct kobj_attribute alloc_sleep_millisecs_attr =
1856dcdc94dSMiaohe Lin __ATTR_RW(alloc_sleep_millisecs);
186b46e756fSKirill A. Shutemov
pages_to_scan_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)187b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_show(struct kobject *kobj,
188b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
189b46e756fSKirill A. Shutemov char *buf)
190b46e756fSKirill A. Shutemov {
191ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
192b46e756fSKirill A. Shutemov }
pages_to_scan_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)193b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_store(struct kobject *kobj,
194b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
195b46e756fSKirill A. Shutemov const char *buf, size_t count)
196b46e756fSKirill A. Shutemov {
197dfefd226SAlexey Dobriyan unsigned int pages;
198b46e756fSKirill A. Shutemov int err;
199b46e756fSKirill A. Shutemov
200dfefd226SAlexey Dobriyan err = kstrtouint(buf, 10, &pages);
201dfefd226SAlexey Dobriyan if (err || !pages)
202b46e756fSKirill A. Shutemov return -EINVAL;
203b46e756fSKirill A. Shutemov
204b46e756fSKirill A. Shutemov khugepaged_pages_to_scan = pages;
205b46e756fSKirill A. Shutemov
206b46e756fSKirill A. Shutemov return count;
207b46e756fSKirill A. Shutemov }
208b46e756fSKirill A. Shutemov static struct kobj_attribute pages_to_scan_attr =
2096dcdc94dSMiaohe Lin __ATTR_RW(pages_to_scan);
210b46e756fSKirill A. Shutemov
pages_collapsed_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)211b46e756fSKirill A. Shutemov static ssize_t pages_collapsed_show(struct kobject *kobj,
212b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
213b46e756fSKirill A. Shutemov char *buf)
214b46e756fSKirill A. Shutemov {
215ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
216b46e756fSKirill A. Shutemov }
217b46e756fSKirill A. Shutemov static struct kobj_attribute pages_collapsed_attr =
218b46e756fSKirill A. Shutemov __ATTR_RO(pages_collapsed);
219b46e756fSKirill A. Shutemov
full_scans_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)220b46e756fSKirill A. Shutemov static ssize_t full_scans_show(struct kobject *kobj,
221b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
222b46e756fSKirill A. Shutemov char *buf)
223b46e756fSKirill A. Shutemov {
224ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
225b46e756fSKirill A. Shutemov }
226b46e756fSKirill A. Shutemov static struct kobj_attribute full_scans_attr =
227b46e756fSKirill A. Shutemov __ATTR_RO(full_scans);
228b46e756fSKirill A. Shutemov
defrag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2296dcdc94dSMiaohe Lin static ssize_t defrag_show(struct kobject *kobj,
230b46e756fSKirill A. Shutemov struct kobj_attribute *attr, char *buf)
231b46e756fSKirill A. Shutemov {
232b46e756fSKirill A. Shutemov return single_hugepage_flag_show(kobj, attr, buf,
233b46e756fSKirill A. Shutemov TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
234b46e756fSKirill A. Shutemov }
defrag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)2356dcdc94dSMiaohe Lin static ssize_t defrag_store(struct kobject *kobj,
236b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
237b46e756fSKirill A. Shutemov const char *buf, size_t count)
238b46e756fSKirill A. Shutemov {
239b46e756fSKirill A. Shutemov return single_hugepage_flag_store(kobj, attr, buf, count,
240b46e756fSKirill A. Shutemov TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
241b46e756fSKirill A. Shutemov }
242b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_defrag_attr =
2436dcdc94dSMiaohe Lin __ATTR_RW(defrag);
244b46e756fSKirill A. Shutemov
245b46e756fSKirill A. Shutemov /*
246b46e756fSKirill A. Shutemov * max_ptes_none controls if khugepaged should collapse hugepages over
247b46e756fSKirill A. Shutemov * any unmapped ptes in turn potentially increasing the memory
248b46e756fSKirill A. Shutemov * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
249b46e756fSKirill A. Shutemov * reduce the available free memory in the system as it
250b46e756fSKirill A. Shutemov * runs. Increasing max_ptes_none will instead potentially reduce the
251b46e756fSKirill A. Shutemov * free memory in the system during the khugepaged scan.
252b46e756fSKirill A. Shutemov */
max_ptes_none_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2536dcdc94dSMiaohe Lin static ssize_t max_ptes_none_show(struct kobject *kobj,
254b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
255b46e756fSKirill A. Shutemov char *buf)
256b46e756fSKirill A. Shutemov {
257ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
258b46e756fSKirill A. Shutemov }
max_ptes_none_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)2596dcdc94dSMiaohe Lin static ssize_t max_ptes_none_store(struct kobject *kobj,
260b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
261b46e756fSKirill A. Shutemov const char *buf, size_t count)
262b46e756fSKirill A. Shutemov {
263b46e756fSKirill A. Shutemov int err;
264b46e756fSKirill A. Shutemov unsigned long max_ptes_none;
265b46e756fSKirill A. Shutemov
266b46e756fSKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_none);
267b46e756fSKirill A. Shutemov if (err || max_ptes_none > HPAGE_PMD_NR - 1)
268b46e756fSKirill A. Shutemov return -EINVAL;
269b46e756fSKirill A. Shutemov
270b46e756fSKirill A. Shutemov khugepaged_max_ptes_none = max_ptes_none;
271b46e756fSKirill A. Shutemov
272b46e756fSKirill A. Shutemov return count;
273b46e756fSKirill A. Shutemov }
274b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_none_attr =
2756dcdc94dSMiaohe Lin __ATTR_RW(max_ptes_none);
276b46e756fSKirill A. Shutemov
max_ptes_swap_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2776dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_show(struct kobject *kobj,
278b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
279b46e756fSKirill A. Shutemov char *buf)
280b46e756fSKirill A. Shutemov {
281ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
282b46e756fSKirill A. Shutemov }
283b46e756fSKirill A. Shutemov
max_ptes_swap_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)2846dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_store(struct kobject *kobj,
285b46e756fSKirill A. Shutemov struct kobj_attribute *attr,
286b46e756fSKirill A. Shutemov const char *buf, size_t count)
287b46e756fSKirill A. Shutemov {
288b46e756fSKirill A. Shutemov int err;
289b46e756fSKirill A. Shutemov unsigned long max_ptes_swap;
290b46e756fSKirill A. Shutemov
291b46e756fSKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_swap);
292b46e756fSKirill A. Shutemov if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
293b46e756fSKirill A. Shutemov return -EINVAL;
294b46e756fSKirill A. Shutemov
295b46e756fSKirill A. Shutemov khugepaged_max_ptes_swap = max_ptes_swap;
296b46e756fSKirill A. Shutemov
297b46e756fSKirill A. Shutemov return count;
298b46e756fSKirill A. Shutemov }
299b46e756fSKirill A. Shutemov
300b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_swap_attr =
3016dcdc94dSMiaohe Lin __ATTR_RW(max_ptes_swap);
302b46e756fSKirill A. Shutemov
max_ptes_shared_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)3036dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_show(struct kobject *kobj,
30471a2c112SKirill A. Shutemov struct kobj_attribute *attr,
30571a2c112SKirill A. Shutemov char *buf)
30671a2c112SKirill A. Shutemov {
307ae7a927dSJoe Perches return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
30871a2c112SKirill A. Shutemov }
30971a2c112SKirill A. Shutemov
max_ptes_shared_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)3106dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_store(struct kobject *kobj,
31171a2c112SKirill A. Shutemov struct kobj_attribute *attr,
31271a2c112SKirill A. Shutemov const char *buf, size_t count)
31371a2c112SKirill A. Shutemov {
31471a2c112SKirill A. Shutemov int err;
31571a2c112SKirill A. Shutemov unsigned long max_ptes_shared;
31671a2c112SKirill A. Shutemov
31771a2c112SKirill A. Shutemov err = kstrtoul(buf, 10, &max_ptes_shared);
31871a2c112SKirill A. Shutemov if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
31971a2c112SKirill A. Shutemov return -EINVAL;
32071a2c112SKirill A. Shutemov
32171a2c112SKirill A. Shutemov khugepaged_max_ptes_shared = max_ptes_shared;
32271a2c112SKirill A. Shutemov
32371a2c112SKirill A. Shutemov return count;
32471a2c112SKirill A. Shutemov }
32571a2c112SKirill A. Shutemov
32671a2c112SKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_shared_attr =
3276dcdc94dSMiaohe Lin __ATTR_RW(max_ptes_shared);
32871a2c112SKirill A. Shutemov
329b46e756fSKirill A. Shutemov static struct attribute *khugepaged_attr[] = {
330b46e756fSKirill A. Shutemov &khugepaged_defrag_attr.attr,
331b46e756fSKirill A. Shutemov &khugepaged_max_ptes_none_attr.attr,
33271a2c112SKirill A. Shutemov &khugepaged_max_ptes_swap_attr.attr,
33371a2c112SKirill A. Shutemov &khugepaged_max_ptes_shared_attr.attr,
334b46e756fSKirill A. Shutemov &pages_to_scan_attr.attr,
335b46e756fSKirill A. Shutemov &pages_collapsed_attr.attr,
336b46e756fSKirill A. Shutemov &full_scans_attr.attr,
337b46e756fSKirill A. Shutemov &scan_sleep_millisecs_attr.attr,
338b46e756fSKirill A. Shutemov &alloc_sleep_millisecs_attr.attr,
339b46e756fSKirill A. Shutemov NULL,
340b46e756fSKirill A. Shutemov };
341b46e756fSKirill A. Shutemov
342b46e756fSKirill A. Shutemov struct attribute_group khugepaged_attr_group = {
343b46e756fSKirill A. Shutemov .attrs = khugepaged_attr,
344b46e756fSKirill A. Shutemov .name = "khugepaged",
345b46e756fSKirill A. Shutemov };
346e1465d12SJérémy Lefaure #endif /* CONFIG_SYSFS */
347b46e756fSKirill A. Shutemov
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)348b46e756fSKirill A. Shutemov int hugepage_madvise(struct vm_area_struct *vma,
349b46e756fSKirill A. Shutemov unsigned long *vm_flags, int advice)
350b46e756fSKirill A. Shutemov {
351b46e756fSKirill A. Shutemov switch (advice) {
352b46e756fSKirill A. Shutemov case MADV_HUGEPAGE:
353b46e756fSKirill A. Shutemov #ifdef CONFIG_S390
354b46e756fSKirill A. Shutemov /*
355b46e756fSKirill A. Shutemov * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
356b46e756fSKirill A. Shutemov * can't handle this properly after s390_enable_sie, so we simply
357b46e756fSKirill A. Shutemov * ignore the madvise to prevent qemu from causing a SIGSEGV.
358b46e756fSKirill A. Shutemov */
359b46e756fSKirill A. Shutemov if (mm_has_pgste(vma->vm_mm))
360b46e756fSKirill A. Shutemov return 0;
361b46e756fSKirill A. Shutemov #endif
362b46e756fSKirill A. Shutemov *vm_flags &= ~VM_NOHUGEPAGE;
363b46e756fSKirill A. Shutemov *vm_flags |= VM_HUGEPAGE;
364b46e756fSKirill A. Shutemov /*
365b46e756fSKirill A. Shutemov * If the vma become good for khugepaged to scan,
366b46e756fSKirill A. Shutemov * register it here without waiting a page fault that
367b46e756fSKirill A. Shutemov * may not happen any time soon.
368b46e756fSKirill A. Shutemov */
369c791576cSYang Shi khugepaged_enter_vma(vma, *vm_flags);
370b46e756fSKirill A. Shutemov break;
371b46e756fSKirill A. Shutemov case MADV_NOHUGEPAGE:
372b46e756fSKirill A. Shutemov *vm_flags &= ~VM_HUGEPAGE;
373b46e756fSKirill A. Shutemov *vm_flags |= VM_NOHUGEPAGE;
374b46e756fSKirill A. Shutemov /*
375b46e756fSKirill A. Shutemov * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
376b46e756fSKirill A. Shutemov * this vma even if we leave the mm registered in khugepaged if
377b46e756fSKirill A. Shutemov * it got registered before VM_NOHUGEPAGE was set.
378b46e756fSKirill A. Shutemov */
379b46e756fSKirill A. Shutemov break;
380b46e756fSKirill A. Shutemov }
381b46e756fSKirill A. Shutemov
382b46e756fSKirill A. Shutemov return 0;
383b46e756fSKirill A. Shutemov }
384b46e756fSKirill A. Shutemov
khugepaged_init(void)385b46e756fSKirill A. Shutemov int __init khugepaged_init(void)
386b46e756fSKirill A. Shutemov {
387b46e756fSKirill A. Shutemov mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
388b26e2701SQi Zheng sizeof(struct khugepaged_mm_slot),
389b26e2701SQi Zheng __alignof__(struct khugepaged_mm_slot),
390b26e2701SQi Zheng 0, NULL);
391b46e756fSKirill A. Shutemov if (!mm_slot_cache)
392b46e756fSKirill A. Shutemov return -ENOMEM;
393b46e756fSKirill A. Shutemov
394b46e756fSKirill A. Shutemov khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
395b46e756fSKirill A. Shutemov khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
396b46e756fSKirill A. Shutemov khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
39771a2c112SKirill A. Shutemov khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
398b46e756fSKirill A. Shutemov
399b46e756fSKirill A. Shutemov return 0;
400b46e756fSKirill A. Shutemov }
401b46e756fSKirill A. Shutemov
khugepaged_destroy(void)402b46e756fSKirill A. Shutemov void __init khugepaged_destroy(void)
403b46e756fSKirill A. Shutemov {
404b46e756fSKirill A. Shutemov kmem_cache_destroy(mm_slot_cache);
405b46e756fSKirill A. Shutemov }
406b46e756fSKirill A. Shutemov
hpage_collapse_test_exit(struct mm_struct * mm)4077d2c4385SZach O'Keefe static inline int hpage_collapse_test_exit(struct mm_struct *mm)
408b46e756fSKirill A. Shutemov {
4094d45e75aSJann Horn return atomic_read(&mm->mm_users) == 0;
410b46e756fSKirill A. Shutemov }
411b46e756fSKirill A. Shutemov
__khugepaged_enter(struct mm_struct * mm)412d2081b2bSYang Shi void __khugepaged_enter(struct mm_struct *mm)
413b46e756fSKirill A. Shutemov {
414b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot;
415b26e2701SQi Zheng struct mm_slot *slot;
416b46e756fSKirill A. Shutemov int wakeup;
417b46e756fSKirill A. Shutemov
41816618670SXin Hao /* __khugepaged_exit() must not run from under us */
41916618670SXin Hao VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
42016618670SXin Hao if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
42116618670SXin Hao return;
42216618670SXin Hao
423b26e2701SQi Zheng mm_slot = mm_slot_alloc(mm_slot_cache);
424b46e756fSKirill A. Shutemov if (!mm_slot)
425d2081b2bSYang Shi return;
426b46e756fSKirill A. Shutemov
427b26e2701SQi Zheng slot = &mm_slot->slot;
428b26e2701SQi Zheng
429b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock);
430b26e2701SQi Zheng mm_slot_insert(mm_slots_hash, mm, slot);
431b46e756fSKirill A. Shutemov /*
432b46e756fSKirill A. Shutemov * Insert just behind the scanning cursor, to let the area settle
433b46e756fSKirill A. Shutemov * down a little.
434b46e756fSKirill A. Shutemov */
435b46e756fSKirill A. Shutemov wakeup = list_empty(&khugepaged_scan.mm_head);
436b26e2701SQi Zheng list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
437b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock);
438b46e756fSKirill A. Shutemov
439f1f10076SVegard Nossum mmgrab(mm);
440b46e756fSKirill A. Shutemov if (wakeup)
441b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait);
442b46e756fSKirill A. Shutemov }
443b46e756fSKirill A. Shutemov
khugepaged_enter_vma(struct vm_area_struct * vma,unsigned long vm_flags)444c791576cSYang Shi void khugepaged_enter_vma(struct vm_area_struct *vma,
445b46e756fSKirill A. Shutemov unsigned long vm_flags)
446b46e756fSKirill A. Shutemov {
4472647d11bSYang Shi if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
4481064026bSYang Shi hugepage_flags_enabled()) {
449a7f4e6e4SZach O'Keefe if (hugepage_vma_check(vma, vm_flags, false, false, true))
4502647d11bSYang Shi __khugepaged_enter(vma->vm_mm);
4512647d11bSYang Shi }
452b46e756fSKirill A. Shutemov }
453b46e756fSKirill A. Shutemov
__khugepaged_exit(struct mm_struct * mm)454b46e756fSKirill A. Shutemov void __khugepaged_exit(struct mm_struct *mm)
455b46e756fSKirill A. Shutemov {
456b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot;
457b26e2701SQi Zheng struct mm_slot *slot;
458b46e756fSKirill A. Shutemov int free = 0;
459b46e756fSKirill A. Shutemov
460b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock);
461b26e2701SQi Zheng slot = mm_slot_lookup(mm_slots_hash, mm);
462b26e2701SQi Zheng mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
463b46e756fSKirill A. Shutemov if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
464b26e2701SQi Zheng hash_del(&slot->hash);
465b26e2701SQi Zheng list_del(&slot->mm_node);
466b46e756fSKirill A. Shutemov free = 1;
467b46e756fSKirill A. Shutemov }
468b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock);
469b46e756fSKirill A. Shutemov
470b46e756fSKirill A. Shutemov if (free) {
471b46e756fSKirill A. Shutemov clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
472b26e2701SQi Zheng mm_slot_free(mm_slot_cache, mm_slot);
473b46e756fSKirill A. Shutemov mmdrop(mm);
474b46e756fSKirill A. Shutemov } else if (mm_slot) {
475b46e756fSKirill A. Shutemov /*
476b46e756fSKirill A. Shutemov * This is required to serialize against
4777d2c4385SZach O'Keefe * hpage_collapse_test_exit() (which is guaranteed to run
4787d2c4385SZach O'Keefe * under mmap sem read mode). Stop here (after we return all
4797d2c4385SZach O'Keefe * pagetables will be destroyed) until khugepaged has finished
4807d2c4385SZach O'Keefe * working on the pagetables under the mmap_lock.
481b46e756fSKirill A. Shutemov */
482d8ed45c5SMichel Lespinasse mmap_write_lock(mm);
483d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
484b46e756fSKirill A. Shutemov }
485b46e756fSKirill A. Shutemov }
486b46e756fSKirill A. Shutemov
release_pte_folio(struct folio * folio)48792644f58SVishal Moola (Oracle) static void release_pte_folio(struct folio *folio)
48892644f58SVishal Moola (Oracle) {
48992644f58SVishal Moola (Oracle) node_stat_mod_folio(folio,
49092644f58SVishal Moola (Oracle) NR_ISOLATED_ANON + folio_is_file_lru(folio),
49192644f58SVishal Moola (Oracle) -folio_nr_pages(folio));
49292644f58SVishal Moola (Oracle) folio_unlock(folio);
49392644f58SVishal Moola (Oracle) folio_putback_lru(folio);
49492644f58SVishal Moola (Oracle) }
49592644f58SVishal Moola (Oracle)
release_pte_page(struct page * page)496b46e756fSKirill A. Shutemov static void release_pte_page(struct page *page)
497b46e756fSKirill A. Shutemov {
49892644f58SVishal Moola (Oracle) release_pte_folio(page_folio(page));
499b46e756fSKirill A. Shutemov }
500b46e756fSKirill A. Shutemov
release_pte_pages(pte_t * pte,pte_t * _pte,struct list_head * compound_pagelist)5015503fbf2SKirill A. Shutemov static void release_pte_pages(pte_t *pte, pte_t *_pte,
5025503fbf2SKirill A. Shutemov struct list_head *compound_pagelist)
503b46e756fSKirill A. Shutemov {
5049bdfeea4SVishal Moola (Oracle) struct folio *folio, *tmp;
5055503fbf2SKirill A. Shutemov
506b46e756fSKirill A. Shutemov while (--_pte >= pte) {
507c33c7948SRyan Roberts pte_t pteval = ptep_get(_pte);
508f528260bSVishal Moola (Oracle) unsigned long pfn;
5095503fbf2SKirill A. Shutemov
510f528260bSVishal Moola (Oracle) if (pte_none(pteval))
511f528260bSVishal Moola (Oracle) continue;
512f528260bSVishal Moola (Oracle) pfn = pte_pfn(pteval);
513f528260bSVishal Moola (Oracle) if (is_zero_pfn(pfn))
514f528260bSVishal Moola (Oracle) continue;
515f528260bSVishal Moola (Oracle) folio = pfn_folio(pfn);
516f528260bSVishal Moola (Oracle) if (folio_test_large(folio))
517f528260bSVishal Moola (Oracle) continue;
5189bdfeea4SVishal Moola (Oracle) release_pte_folio(folio);
5195503fbf2SKirill A. Shutemov }
5205503fbf2SKirill A. Shutemov
5219bdfeea4SVishal Moola (Oracle) list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
5229bdfeea4SVishal Moola (Oracle) list_del(&folio->lru);
5239bdfeea4SVishal Moola (Oracle) release_pte_folio(folio);
524b46e756fSKirill A. Shutemov }
525b46e756fSKirill A. Shutemov }
526b46e756fSKirill A. Shutemov
is_refcount_suitable(struct page * page)5279445689fSKirill A. Shutemov static bool is_refcount_suitable(struct page *page)
5289445689fSKirill A. Shutemov {
5299445689fSKirill A. Shutemov int expected_refcount;
5309445689fSKirill A. Shutemov
5319445689fSKirill A. Shutemov expected_refcount = total_mapcount(page);
5329445689fSKirill A. Shutemov if (PageSwapCache(page))
5339445689fSKirill A. Shutemov expected_refcount += compound_nr(page);
5349445689fSKirill A. Shutemov
5359445689fSKirill A. Shutemov return page_count(page) == expected_refcount;
5369445689fSKirill A. Shutemov }
5379445689fSKirill A. Shutemov
__collapse_huge_page_isolate(struct vm_area_struct * vma,unsigned long address,pte_t * pte,struct collapse_control * cc,struct list_head * compound_pagelist)538b46e756fSKirill A. Shutemov static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
539b46e756fSKirill A. Shutemov unsigned long address,
5405503fbf2SKirill A. Shutemov pte_t *pte,
541d8ea7cc8SZach O'Keefe struct collapse_control *cc,
5425503fbf2SKirill A. Shutemov struct list_head *compound_pagelist)
543b46e756fSKirill A. Shutemov {
544b46e756fSKirill A. Shutemov struct page *page = NULL;
545b46e756fSKirill A. Shutemov pte_t *_pte;
54650ad2f24SZach O'Keefe int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
5470db501f7SEbru Akagunduz bool writable = false;
548b46e756fSKirill A. Shutemov
549b46e756fSKirill A. Shutemov for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
550b46e756fSKirill A. Shutemov _pte++, address += PAGE_SIZE) {
551c33c7948SRyan Roberts pte_t pteval = ptep_get(_pte);
552b46e756fSKirill A. Shutemov if (pte_none(pteval) || (pte_present(pteval) &&
553b46e756fSKirill A. Shutemov is_zero_pfn(pte_pfn(pteval)))) {
554d8ea7cc8SZach O'Keefe ++none_or_zero;
555b46e756fSKirill A. Shutemov if (!userfaultfd_armed(vma) &&
556d8ea7cc8SZach O'Keefe (!cc->is_khugepaged ||
557d8ea7cc8SZach O'Keefe none_or_zero <= khugepaged_max_ptes_none)) {
558b46e756fSKirill A. Shutemov continue;
559b46e756fSKirill A. Shutemov } else {
560b46e756fSKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE;
561e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
562b46e756fSKirill A. Shutemov goto out;
563b46e756fSKirill A. Shutemov }
564b46e756fSKirill A. Shutemov }
565b46e756fSKirill A. Shutemov if (!pte_present(pteval)) {
566b46e756fSKirill A. Shutemov result = SCAN_PTE_NON_PRESENT;
567b46e756fSKirill A. Shutemov goto out;
568b46e756fSKirill A. Shutemov }
569dd47ac42SPeter Xu if (pte_uffd_wp(pteval)) {
570dd47ac42SPeter Xu result = SCAN_PTE_UFFD_WP;
571dd47ac42SPeter Xu goto out;
572dd47ac42SPeter Xu }
573b46e756fSKirill A. Shutemov page = vm_normal_page(vma, address, pteval);
5743218f871SAlex Sierra if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
575b46e756fSKirill A. Shutemov result = SCAN_PAGE_NULL;
576b46e756fSKirill A. Shutemov goto out;
577b46e756fSKirill A. Shutemov }
578b46e756fSKirill A. Shutemov
579b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(!PageAnon(page), page);
580b46e756fSKirill A. Shutemov
581d8ea7cc8SZach O'Keefe if (page_mapcount(page) > 1) {
582d8ea7cc8SZach O'Keefe ++shared;
583d8ea7cc8SZach O'Keefe if (cc->is_khugepaged &&
584d8ea7cc8SZach O'Keefe shared > khugepaged_max_ptes_shared) {
58571a2c112SKirill A. Shutemov result = SCAN_EXCEED_SHARED_PTE;
586e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
58771a2c112SKirill A. Shutemov goto out;
58871a2c112SKirill A. Shutemov }
589d8ea7cc8SZach O'Keefe }
59071a2c112SKirill A. Shutemov
5915503fbf2SKirill A. Shutemov if (PageCompound(page)) {
5925503fbf2SKirill A. Shutemov struct page *p;
5935503fbf2SKirill A. Shutemov page = compound_head(page);
5945503fbf2SKirill A. Shutemov
5955503fbf2SKirill A. Shutemov /*
5965503fbf2SKirill A. Shutemov * Check if we have dealt with the compound page
5975503fbf2SKirill A. Shutemov * already
5985503fbf2SKirill A. Shutemov */
5995503fbf2SKirill A. Shutemov list_for_each_entry(p, compound_pagelist, lru) {
6005503fbf2SKirill A. Shutemov if (page == p)
6015503fbf2SKirill A. Shutemov goto next;
6025503fbf2SKirill A. Shutemov }
6035503fbf2SKirill A. Shutemov }
6045503fbf2SKirill A. Shutemov
605b46e756fSKirill A. Shutemov /*
606b46e756fSKirill A. Shutemov * We can do it before isolate_lru_page because the
607b46e756fSKirill A. Shutemov * page can't be freed from under us. NOTE: PG_lock
608b46e756fSKirill A. Shutemov * is needed to serialize against split_huge_page
609b46e756fSKirill A. Shutemov * when invoked from the VM.
610b46e756fSKirill A. Shutemov */
611b46e756fSKirill A. Shutemov if (!trylock_page(page)) {
612b46e756fSKirill A. Shutemov result = SCAN_PAGE_LOCK;
613b46e756fSKirill A. Shutemov goto out;
614b46e756fSKirill A. Shutemov }
615b46e756fSKirill A. Shutemov
616b46e756fSKirill A. Shutemov /*
6179445689fSKirill A. Shutemov * Check if the page has any GUP (or other external) pins.
6189445689fSKirill A. Shutemov *
6199445689fSKirill A. Shutemov * The page table that maps the page has been already unlinked
6209445689fSKirill A. Shutemov * from the page table tree and this process cannot get
621f0953a1bSIngo Molnar * an additional pin on the page.
6229445689fSKirill A. Shutemov *
6239445689fSKirill A. Shutemov * New pins can come later if the page is shared across fork,
6249445689fSKirill A. Shutemov * but not from this process. The other process cannot write to
6259445689fSKirill A. Shutemov * the page, only trigger CoW.
626b46e756fSKirill A. Shutemov */
6279445689fSKirill A. Shutemov if (!is_refcount_suitable(page)) {
628b46e756fSKirill A. Shutemov unlock_page(page);
629b46e756fSKirill A. Shutemov result = SCAN_PAGE_COUNT;
630b46e756fSKirill A. Shutemov goto out;
631b46e756fSKirill A. Shutemov }
632b46e756fSKirill A. Shutemov
633b46e756fSKirill A. Shutemov /*
634b46e756fSKirill A. Shutemov * Isolate the page to avoid collapsing an hugepage
635b46e756fSKirill A. Shutemov * currently in use by the VM.
636b46e756fSKirill A. Shutemov */
637f7f9c00dSBaolin Wang if (!isolate_lru_page(page)) {
638b46e756fSKirill A. Shutemov unlock_page(page);
639b46e756fSKirill A. Shutemov result = SCAN_DEL_PAGE_LRU;
640b46e756fSKirill A. Shutemov goto out;
641b46e756fSKirill A. Shutemov }
6425503fbf2SKirill A. Shutemov mod_node_page_state(page_pgdat(page),
6435503fbf2SKirill A. Shutemov NR_ISOLATED_ANON + page_is_file_lru(page),
6445503fbf2SKirill A. Shutemov compound_nr(page));
645b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page);
646b46e756fSKirill A. Shutemov VM_BUG_ON_PAGE(PageLRU(page), page);
647b46e756fSKirill A. Shutemov
6485503fbf2SKirill A. Shutemov if (PageCompound(page))
6495503fbf2SKirill A. Shutemov list_add_tail(&page->lru, compound_pagelist);
6505503fbf2SKirill A. Shutemov next:
651d8ea7cc8SZach O'Keefe /*
652d8ea7cc8SZach O'Keefe * If collapse was initiated by khugepaged, check that there is
653d8ea7cc8SZach O'Keefe * enough young pte to justify collapsing the page
654d8ea7cc8SZach O'Keefe */
655d8ea7cc8SZach O'Keefe if (cc->is_khugepaged &&
656d8ea7cc8SZach O'Keefe (pte_young(pteval) || page_is_young(page) ||
657d8ea7cc8SZach O'Keefe PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
658d8ea7cc8SZach O'Keefe address)))
6590db501f7SEbru Akagunduz referenced++;
6605503fbf2SKirill A. Shutemov
6615503fbf2SKirill A. Shutemov if (pte_write(pteval))
6625503fbf2SKirill A. Shutemov writable = true;
663b46e756fSKirill A. Shutemov }
66474e579bfSMiaohe Lin
66574e579bfSMiaohe Lin if (unlikely(!writable)) {
66674e579bfSMiaohe Lin result = SCAN_PAGE_RO;
667d8ea7cc8SZach O'Keefe } else if (unlikely(cc->is_khugepaged && !referenced)) {
66874e579bfSMiaohe Lin result = SCAN_LACK_REFERENCED_PAGE;
66974e579bfSMiaohe Lin } else {
670b46e756fSKirill A. Shutemov result = SCAN_SUCCEED;
671b46e756fSKirill A. Shutemov trace_mm_collapse_huge_page_isolate(page, none_or_zero,
672b46e756fSKirill A. Shutemov referenced, writable, result);
67350ad2f24SZach O'Keefe return result;
674b46e756fSKirill A. Shutemov }
675b46e756fSKirill A. Shutemov out:
6765503fbf2SKirill A. Shutemov release_pte_pages(pte, _pte, compound_pagelist);
677b46e756fSKirill A. Shutemov trace_mm_collapse_huge_page_isolate(page, none_or_zero,
678b46e756fSKirill A. Shutemov referenced, writable, result);
67950ad2f24SZach O'Keefe return result;
680b46e756fSKirill A. Shutemov }
681b46e756fSKirill A. Shutemov
__collapse_huge_page_copy_succeeded(pte_t * pte,struct vm_area_struct * vma,unsigned long address,spinlock_t * ptl,struct list_head * compound_pagelist)68298c76c9fSJiaqi Yan static void __collapse_huge_page_copy_succeeded(pte_t *pte,
683b46e756fSKirill A. Shutemov struct vm_area_struct *vma,
684b46e756fSKirill A. Shutemov unsigned long address,
6855503fbf2SKirill A. Shutemov spinlock_t *ptl,
6865503fbf2SKirill A. Shutemov struct list_head *compound_pagelist)
687b46e756fSKirill A. Shutemov {
68898c76c9fSJiaqi Yan struct page *src_page;
68998c76c9fSJiaqi Yan struct page *tmp;
690b46e756fSKirill A. Shutemov pte_t *_pte;
69198c76c9fSJiaqi Yan pte_t pteval;
692b46e756fSKirill A. Shutemov
69398c76c9fSJiaqi Yan for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
69498c76c9fSJiaqi Yan _pte++, address += PAGE_SIZE) {
695c33c7948SRyan Roberts pteval = ptep_get(_pte);
696b46e756fSKirill A. Shutemov if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
697b46e756fSKirill A. Shutemov add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
698b46e756fSKirill A. Shutemov if (is_zero_pfn(pte_pfn(pteval))) {
699b46e756fSKirill A. Shutemov /*
700b46e756fSKirill A. Shutemov * ptl mostly unnecessary.
701b46e756fSKirill A. Shutemov */
702b46e756fSKirill A. Shutemov spin_lock(ptl);
70308d5b29eSPasha Tatashin ptep_clear(vma->vm_mm, address, _pte);
704b46e756fSKirill A. Shutemov spin_unlock(ptl);
7056080d19fSxu xin ksm_might_unmap_zero_page(vma->vm_mm, pteval);
706b46e756fSKirill A. Shutemov }
707b46e756fSKirill A. Shutemov } else {
708b46e756fSKirill A. Shutemov src_page = pte_page(pteval);
7095503fbf2SKirill A. Shutemov if (!PageCompound(src_page))
710b46e756fSKirill A. Shutemov release_pte_page(src_page);
711b46e756fSKirill A. Shutemov /*
712b46e756fSKirill A. Shutemov * ptl mostly unnecessary, but preempt has to
713b46e756fSKirill A. Shutemov * be disabled to update the per-cpu stats
714b46e756fSKirill A. Shutemov * inside page_remove_rmap().
715b46e756fSKirill A. Shutemov */
716b46e756fSKirill A. Shutemov spin_lock(ptl);
71708d5b29eSPasha Tatashin ptep_clear(vma->vm_mm, address, _pte);
718cea86fe2SHugh Dickins page_remove_rmap(src_page, vma, false);
719b46e756fSKirill A. Shutemov spin_unlock(ptl);
720b46e756fSKirill A. Shutemov free_page_and_swap_cache(src_page);
721b46e756fSKirill A. Shutemov }
722b46e756fSKirill A. Shutemov }
7235503fbf2SKirill A. Shutemov
7245503fbf2SKirill A. Shutemov list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
7255503fbf2SKirill A. Shutemov list_del(&src_page->lru);
7261baec203SMiaohe Lin mod_node_page_state(page_pgdat(src_page),
7271baec203SMiaohe Lin NR_ISOLATED_ANON + page_is_file_lru(src_page),
7281baec203SMiaohe Lin -compound_nr(src_page));
7291baec203SMiaohe Lin unlock_page(src_page);
7301baec203SMiaohe Lin free_swap_cache(src_page);
7311baec203SMiaohe Lin putback_lru_page(src_page);
7325503fbf2SKirill A. Shutemov }
733b46e756fSKirill A. Shutemov }
734b46e756fSKirill A. Shutemov
__collapse_huge_page_copy_failed(pte_t * pte,pmd_t * pmd,pmd_t orig_pmd,struct vm_area_struct * vma,struct list_head * compound_pagelist)73598c76c9fSJiaqi Yan static void __collapse_huge_page_copy_failed(pte_t *pte,
73698c76c9fSJiaqi Yan pmd_t *pmd,
73798c76c9fSJiaqi Yan pmd_t orig_pmd,
73898c76c9fSJiaqi Yan struct vm_area_struct *vma,
73998c76c9fSJiaqi Yan struct list_head *compound_pagelist)
74098c76c9fSJiaqi Yan {
74198c76c9fSJiaqi Yan spinlock_t *pmd_ptl;
74298c76c9fSJiaqi Yan
74398c76c9fSJiaqi Yan /*
74498c76c9fSJiaqi Yan * Re-establish the PMD to point to the original page table
74598c76c9fSJiaqi Yan * entry. Restoring PMD needs to be done prior to releasing
74698c76c9fSJiaqi Yan * pages. Since pages are still isolated and locked here,
74798c76c9fSJiaqi Yan * acquiring anon_vma_lock_write is unnecessary.
74898c76c9fSJiaqi Yan */
74998c76c9fSJiaqi Yan pmd_ptl = pmd_lock(vma->vm_mm, pmd);
75098c76c9fSJiaqi Yan pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
75198c76c9fSJiaqi Yan spin_unlock(pmd_ptl);
75298c76c9fSJiaqi Yan /*
75398c76c9fSJiaqi Yan * Release both raw and compound pages isolated
75498c76c9fSJiaqi Yan * in __collapse_huge_page_isolate.
75598c76c9fSJiaqi Yan */
75698c76c9fSJiaqi Yan release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
75798c76c9fSJiaqi Yan }
75898c76c9fSJiaqi Yan
75998c76c9fSJiaqi Yan /*
76098c76c9fSJiaqi Yan * __collapse_huge_page_copy - attempts to copy memory contents from raw
76198c76c9fSJiaqi Yan * pages to a hugepage. Cleans up the raw pages if copying succeeds;
76298c76c9fSJiaqi Yan * otherwise restores the original page table and releases isolated raw pages.
76398c76c9fSJiaqi Yan * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
76498c76c9fSJiaqi Yan *
76598c76c9fSJiaqi Yan * @pte: starting of the PTEs to copy from
76698c76c9fSJiaqi Yan * @page: the new hugepage to copy contents to
76798c76c9fSJiaqi Yan * @pmd: pointer to the new hugepage's PMD
76898c76c9fSJiaqi Yan * @orig_pmd: the original raw pages' PMD
76998c76c9fSJiaqi Yan * @vma: the original raw pages' virtual memory area
77098c76c9fSJiaqi Yan * @address: starting address to copy
77198c76c9fSJiaqi Yan * @ptl: lock on raw pages' PTEs
77298c76c9fSJiaqi Yan * @compound_pagelist: list that stores compound pages
77398c76c9fSJiaqi Yan */
__collapse_huge_page_copy(pte_t * pte,struct page * page,pmd_t * pmd,pmd_t orig_pmd,struct vm_area_struct * vma,unsigned long address,spinlock_t * ptl,struct list_head * compound_pagelist)77498c76c9fSJiaqi Yan static int __collapse_huge_page_copy(pte_t *pte,
77598c76c9fSJiaqi Yan struct page *page,
77698c76c9fSJiaqi Yan pmd_t *pmd,
77798c76c9fSJiaqi Yan pmd_t orig_pmd,
77898c76c9fSJiaqi Yan struct vm_area_struct *vma,
77998c76c9fSJiaqi Yan unsigned long address,
78098c76c9fSJiaqi Yan spinlock_t *ptl,
78198c76c9fSJiaqi Yan struct list_head *compound_pagelist)
78298c76c9fSJiaqi Yan {
78398c76c9fSJiaqi Yan struct page *src_page;
78498c76c9fSJiaqi Yan pte_t *_pte;
78598c76c9fSJiaqi Yan pte_t pteval;
78698c76c9fSJiaqi Yan unsigned long _address;
78798c76c9fSJiaqi Yan int result = SCAN_SUCCEED;
78898c76c9fSJiaqi Yan
78998c76c9fSJiaqi Yan /*
79098c76c9fSJiaqi Yan * Copying pages' contents is subject to memory poison at any iteration.
79198c76c9fSJiaqi Yan */
79298c76c9fSJiaqi Yan for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
79398c76c9fSJiaqi Yan _pte++, page++, _address += PAGE_SIZE) {
794c33c7948SRyan Roberts pteval = ptep_get(_pte);
79598c76c9fSJiaqi Yan if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
79698c76c9fSJiaqi Yan clear_user_highpage(page, _address);
79798c76c9fSJiaqi Yan continue;
79898c76c9fSJiaqi Yan }
79998c76c9fSJiaqi Yan src_page = pte_page(pteval);
80098c76c9fSJiaqi Yan if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
80198c76c9fSJiaqi Yan result = SCAN_COPY_MC;
80298c76c9fSJiaqi Yan break;
80398c76c9fSJiaqi Yan }
80498c76c9fSJiaqi Yan }
80598c76c9fSJiaqi Yan
80698c76c9fSJiaqi Yan if (likely(result == SCAN_SUCCEED))
80798c76c9fSJiaqi Yan __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
80898c76c9fSJiaqi Yan compound_pagelist);
80998c76c9fSJiaqi Yan else
81098c76c9fSJiaqi Yan __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
81198c76c9fSJiaqi Yan compound_pagelist);
81298c76c9fSJiaqi Yan
81398c76c9fSJiaqi Yan return result;
81498c76c9fSJiaqi Yan }
81598c76c9fSJiaqi Yan
khugepaged_alloc_sleep(void)816b46e756fSKirill A. Shutemov static void khugepaged_alloc_sleep(void)
817b46e756fSKirill A. Shutemov {
818b46e756fSKirill A. Shutemov DEFINE_WAIT(wait);
819b46e756fSKirill A. Shutemov
820b46e756fSKirill A. Shutemov add_wait_queue(&khugepaged_wait, &wait);
821f5d39b02SPeter Zijlstra __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
822f5d39b02SPeter Zijlstra schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
823b46e756fSKirill A. Shutemov remove_wait_queue(&khugepaged_wait, &wait);
824b46e756fSKirill A. Shutemov }
825b46e756fSKirill A. Shutemov
82634d6b470SZach O'Keefe struct collapse_control khugepaged_collapse_control = {
827d8ea7cc8SZach O'Keefe .is_khugepaged = true,
82834d6b470SZach O'Keefe };
829b46e756fSKirill A. Shutemov
hpage_collapse_scan_abort(int nid,struct collapse_control * cc)8307d2c4385SZach O'Keefe static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
831b46e756fSKirill A. Shutemov {
832b46e756fSKirill A. Shutemov int i;
833b46e756fSKirill A. Shutemov
834b46e756fSKirill A. Shutemov /*
835a5f5f91dSMel Gorman * If node_reclaim_mode is disabled, then no extra effort is made to
836b46e756fSKirill A. Shutemov * allocate memory locally.
837b46e756fSKirill A. Shutemov */
838202e35dbSDave Hansen if (!node_reclaim_enabled())
839b46e756fSKirill A. Shutemov return false;
840b46e756fSKirill A. Shutemov
841b46e756fSKirill A. Shutemov /* If there is a count for this node already, it must be acceptable */
84234d6b470SZach O'Keefe if (cc->node_load[nid])
843b46e756fSKirill A. Shutemov return false;
844b46e756fSKirill A. Shutemov
845b46e756fSKirill A. Shutemov for (i = 0; i < MAX_NUMNODES; i++) {
84634d6b470SZach O'Keefe if (!cc->node_load[i])
847b46e756fSKirill A. Shutemov continue;
848a55c7454SMatt Fleming if (node_distance(nid, i) > node_reclaim_distance)
849b46e756fSKirill A. Shutemov return true;
850b46e756fSKirill A. Shutemov }
851b46e756fSKirill A. Shutemov return false;
852b46e756fSKirill A. Shutemov }
853b46e756fSKirill A. Shutemov
8541064026bSYang Shi #define khugepaged_defrag() \
8551064026bSYang Shi (transparent_hugepage_flags & \
8561064026bSYang Shi (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
8571064026bSYang Shi
858b46e756fSKirill A. Shutemov /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
alloc_hugepage_khugepaged_gfpmask(void)859b46e756fSKirill A. Shutemov static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
860b46e756fSKirill A. Shutemov {
86125160354SVlastimil Babka return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
862b46e756fSKirill A. Shutemov }
863b46e756fSKirill A. Shutemov
864b46e756fSKirill A. Shutemov #ifdef CONFIG_NUMA
hpage_collapse_find_target_node(struct collapse_control * cc)8657d2c4385SZach O'Keefe static int hpage_collapse_find_target_node(struct collapse_control *cc)
866b46e756fSKirill A. Shutemov {
867b46e756fSKirill A. Shutemov int nid, target_node = 0, max_value = 0;
868b46e756fSKirill A. Shutemov
869b46e756fSKirill A. Shutemov /* find first node with max normal pages hit */
870b46e756fSKirill A. Shutemov for (nid = 0; nid < MAX_NUMNODES; nid++)
87134d6b470SZach O'Keefe if (cc->node_load[nid] > max_value) {
87234d6b470SZach O'Keefe max_value = cc->node_load[nid];
873b46e756fSKirill A. Shutemov target_node = nid;
874b46e756fSKirill A. Shutemov }
875b46e756fSKirill A. Shutemov
876e031ff96SYang Shi for_each_online_node(nid) {
877e031ff96SYang Shi if (max_value == cc->node_load[nid])
878e031ff96SYang Shi node_set(nid, cc->alloc_nmask);
879b46e756fSKirill A. Shutemov }
880b46e756fSKirill A. Shutemov
881b46e756fSKirill A. Shutemov return target_node;
882b46e756fSKirill A. Shutemov }
883c6a7f445SYang Shi #else
hpage_collapse_find_target_node(struct collapse_control * cc)8847d2c4385SZach O'Keefe static int hpage_collapse_find_target_node(struct collapse_control *cc)
885b46e756fSKirill A. Shutemov {
886c6a7f445SYang Shi return 0;
887b46e756fSKirill A. Shutemov }
888c6a7f445SYang Shi #endif
889b46e756fSKirill A. Shutemov
890b46e756fSKirill A. Shutemov /*
891c1e8d7c6SMichel Lespinasse * If mmap_lock temporarily dropped, revalidate vma
892c1e8d7c6SMichel Lespinasse * before taking mmap_lock.
89350ad2f24SZach O'Keefe * Returns enum scan_result value.
894b46e756fSKirill A. Shutemov */
895b46e756fSKirill A. Shutemov
hugepage_vma_revalidate(struct mm_struct * mm,unsigned long address,bool expect_anon,struct vm_area_struct ** vmap,struct collapse_control * cc)896c131f751SKirill A. Shutemov static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
89734488399SZach O'Keefe bool expect_anon,
898a7f4e6e4SZach O'Keefe struct vm_area_struct **vmap,
899a7f4e6e4SZach O'Keefe struct collapse_control *cc)
900b46e756fSKirill A. Shutemov {
901b46e756fSKirill A. Shutemov struct vm_area_struct *vma;
902b46e756fSKirill A. Shutemov
9037d2c4385SZach O'Keefe if (unlikely(hpage_collapse_test_exit(mm)))
904b46e756fSKirill A. Shutemov return SCAN_ANY_PROCESS;
905b46e756fSKirill A. Shutemov
906c131f751SKirill A. Shutemov *vmap = vma = find_vma(mm, address);
907b46e756fSKirill A. Shutemov if (!vma)
908b46e756fSKirill A. Shutemov return SCAN_VMA_NULL;
909b46e756fSKirill A. Shutemov
9104fa6893fSYang Shi if (!transhuge_vma_suitable(vma, address))
911b46e756fSKirill A. Shutemov return SCAN_ADDRESS_RANGE;
912a7f4e6e4SZach O'Keefe if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
913a7f4e6e4SZach O'Keefe cc->is_khugepaged))
914b46e756fSKirill A. Shutemov return SCAN_VMA_CHECK;
915f707fa49SYang Shi /*
916f707fa49SYang Shi * Anon VMA expected, the address may be unmapped then
917f707fa49SYang Shi * remapped to file after khugepaged reaquired the mmap_lock.
918f707fa49SYang Shi *
919f707fa49SYang Shi * hugepage_vma_check may return true for qualified file
920f707fa49SYang Shi * vmas.
921f707fa49SYang Shi */
92234488399SZach O'Keefe if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
92334488399SZach O'Keefe return SCAN_PAGE_ANON;
92450ad2f24SZach O'Keefe return SCAN_SUCCEED;
925b46e756fSKirill A. Shutemov }
926b46e756fSKirill A. Shutemov
find_pmd_or_thp_or_none(struct mm_struct * mm,unsigned long address,pmd_t ** pmd)92750722804SZach O'Keefe static int find_pmd_or_thp_or_none(struct mm_struct *mm,
92850722804SZach O'Keefe unsigned long address,
92950722804SZach O'Keefe pmd_t **pmd)
93050722804SZach O'Keefe {
93150722804SZach O'Keefe pmd_t pmde;
93250722804SZach O'Keefe
93350722804SZach O'Keefe *pmd = mm_find_pmd(mm, address);
93450722804SZach O'Keefe if (!*pmd)
93550722804SZach O'Keefe return SCAN_PMD_NULL;
93650722804SZach O'Keefe
937dab6e717SPeter Zijlstra pmde = pmdp_get_lockless(*pmd);
93834488399SZach O'Keefe if (pmd_none(pmde))
93934488399SZach O'Keefe return SCAN_PMD_NONE;
940edb5d0cfSZach O'Keefe if (!pmd_present(pmde))
941edb5d0cfSZach O'Keefe return SCAN_PMD_NULL;
94250722804SZach O'Keefe if (pmd_trans_huge(pmde))
94350722804SZach O'Keefe return SCAN_PMD_MAPPED;
944edb5d0cfSZach O'Keefe if (pmd_devmap(pmde))
945edb5d0cfSZach O'Keefe return SCAN_PMD_NULL;
94650722804SZach O'Keefe if (pmd_bad(pmde))
94750722804SZach O'Keefe return SCAN_PMD_NULL;
94850722804SZach O'Keefe return SCAN_SUCCEED;
94950722804SZach O'Keefe }
95050722804SZach O'Keefe
check_pmd_still_valid(struct mm_struct * mm,unsigned long address,pmd_t * pmd)95150722804SZach O'Keefe static int check_pmd_still_valid(struct mm_struct *mm,
95250722804SZach O'Keefe unsigned long address,
95350722804SZach O'Keefe pmd_t *pmd)
95450722804SZach O'Keefe {
95550722804SZach O'Keefe pmd_t *new_pmd;
95650722804SZach O'Keefe int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
95750722804SZach O'Keefe
95850722804SZach O'Keefe if (result != SCAN_SUCCEED)
95950722804SZach O'Keefe return result;
96050722804SZach O'Keefe if (new_pmd != pmd)
96150722804SZach O'Keefe return SCAN_FAIL;
96250722804SZach O'Keefe return SCAN_SUCCEED;
963b46e756fSKirill A. Shutemov }
964b46e756fSKirill A. Shutemov
965b46e756fSKirill A. Shutemov /*
966b46e756fSKirill A. Shutemov * Bring missing pages in from swap, to complete THP collapse.
9677d2c4385SZach O'Keefe * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
968b46e756fSKirill A. Shutemov *
9694d928e20SMiaohe Lin * Called and returns without pte mapped or spinlocks held.
970895f5ee4SHugh Dickins * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
971b46e756fSKirill A. Shutemov */
__collapse_huge_page_swapin(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd,int referenced)97250ad2f24SZach O'Keefe static int __collapse_huge_page_swapin(struct mm_struct *mm,
973b46e756fSKirill A. Shutemov struct vm_area_struct *vma,
9742b635dd3SWill Deacon unsigned long haddr, pmd_t *pmd,
9750db501f7SEbru Akagunduz int referenced)
976b46e756fSKirill A. Shutemov {
9772b740303SSouptick Joarder int swapped_in = 0;
9782b740303SSouptick Joarder vm_fault_t ret = 0;
9792b635dd3SWill Deacon unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
980895f5ee4SHugh Dickins int result;
981895f5ee4SHugh Dickins pte_t *pte = NULL;
982c7ad0880SHugh Dickins spinlock_t *ptl;
9832b635dd3SWill Deacon
9842b635dd3SWill Deacon for (address = haddr; address < end; address += PAGE_SIZE) {
98582b0f8c3SJan Kara struct vm_fault vmf = {
986b46e756fSKirill A. Shutemov .vma = vma,
987b46e756fSKirill A. Shutemov .address = address,
988895f5ee4SHugh Dickins .pgoff = linear_page_index(vma, address),
989b46e756fSKirill A. Shutemov .flags = FAULT_FLAG_ALLOW_RETRY,
990b46e756fSKirill A. Shutemov .pmd = pmd,
991b46e756fSKirill A. Shutemov };
992b46e756fSKirill A. Shutemov
993895f5ee4SHugh Dickins if (!pte++) {
994c7ad0880SHugh Dickins pte = pte_offset_map_nolock(mm, pmd, address, &ptl);
995895f5ee4SHugh Dickins if (!pte) {
996895f5ee4SHugh Dickins mmap_read_unlock(mm);
997895f5ee4SHugh Dickins result = SCAN_PMD_NULL;
998895f5ee4SHugh Dickins goto out;
9992b635dd3SWill Deacon }
1000895f5ee4SHugh Dickins }
1001895f5ee4SHugh Dickins
1002c7ad0880SHugh Dickins vmf.orig_pte = ptep_get_lockless(pte);
1003895f5ee4SHugh Dickins if (!is_swap_pte(vmf.orig_pte))
1004895f5ee4SHugh Dickins continue;
1005895f5ee4SHugh Dickins
1006895f5ee4SHugh Dickins vmf.pte = pte;
1007c7ad0880SHugh Dickins vmf.ptl = ptl;
10082994302bSJan Kara ret = do_swap_page(&vmf);
1009895f5ee4SHugh Dickins /* Which unmaps pte (after perhaps re-checking the entry) */
1010895f5ee4SHugh Dickins pte = NULL;
10110db501f7SEbru Akagunduz
10124d928e20SMiaohe Lin /*
10134d928e20SMiaohe Lin * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
10144d928e20SMiaohe Lin * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
10154d928e20SMiaohe Lin * we do not retry here and swap entry will remain in pagetable
10164d928e20SMiaohe Lin * resulting in later failure.
10174d928e20SMiaohe Lin */
1018b46e756fSKirill A. Shutemov if (ret & VM_FAULT_RETRY) {
101950ad2f24SZach O'Keefe /* Likely, but not guaranteed, that page lock failed */
1020895f5ee4SHugh Dickins result = SCAN_PAGE_LOCK;
1021895f5ee4SHugh Dickins goto out;
102247f863eaSEbru Akagunduz }
1023b46e756fSKirill A. Shutemov if (ret & VM_FAULT_ERROR) {
10244d928e20SMiaohe Lin mmap_read_unlock(mm);
1025895f5ee4SHugh Dickins result = SCAN_FAIL;
1026895f5ee4SHugh Dickins goto out;
1027b46e756fSKirill A. Shutemov }
10284d928e20SMiaohe Lin swapped_in++;
1029b46e756fSKirill A. Shutemov }
1030ae2c5d80SKirill A. Shutemov
1031895f5ee4SHugh Dickins if (pte)
1032895f5ee4SHugh Dickins pte_unmap(pte);
1033895f5ee4SHugh Dickins
10341fec6890SMatthew Wilcox (Oracle) /* Drain LRU cache to remove extra pin on the swapped in pages */
1035ae2c5d80SKirill A. Shutemov if (swapped_in)
1036ae2c5d80SKirill A. Shutemov lru_add_drain();
1037ae2c5d80SKirill A. Shutemov
1038895f5ee4SHugh Dickins result = SCAN_SUCCEED;
1039895f5ee4SHugh Dickins out:
1040895f5ee4SHugh Dickins trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1041895f5ee4SHugh Dickins return result;
1042b46e756fSKirill A. Shutemov }
1043b46e756fSKirill A. Shutemov
alloc_charge_folio(struct folio ** foliop,struct mm_struct * mm,struct collapse_control * cc)104403e36dbaSMatthew Wilcox (Oracle) static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
10459710a78aSZach O'Keefe struct collapse_control *cc)
10469710a78aSZach O'Keefe {
10477d8faaf1SZach O'Keefe gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
1048e031ff96SYang Shi GFP_TRANSHUGE);
10497d2c4385SZach O'Keefe int node = hpage_collapse_find_target_node(cc);
105094c02ad7SPeter Xu struct folio *folio;
10519710a78aSZach O'Keefe
1052281a0312SMatthew Wilcox (Oracle) folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
1053281a0312SMatthew Wilcox (Oracle) if (!folio) {
105403e36dbaSMatthew Wilcox (Oracle) *foliop = NULL;
1055281a0312SMatthew Wilcox (Oracle) count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
10569710a78aSZach O'Keefe return SCAN_ALLOC_HUGE_PAGE_FAIL;
1057a1afee6cSVishal Moola (Oracle) }
105894c02ad7SPeter Xu
1059281a0312SMatthew Wilcox (Oracle) count_vm_event(THP_COLLAPSE_ALLOC);
106094c02ad7SPeter Xu if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
106194c02ad7SPeter Xu folio_put(folio);
106203e36dbaSMatthew Wilcox (Oracle) *foliop = NULL;
10639710a78aSZach O'Keefe return SCAN_CGROUP_CHARGE_FAIL;
106494c02ad7SPeter Xu }
106594c02ad7SPeter Xu
1066a1afee6cSVishal Moola (Oracle) count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1067a1afee6cSVishal Moola (Oracle)
106803e36dbaSMatthew Wilcox (Oracle) *foliop = folio;
10699710a78aSZach O'Keefe return SCAN_SUCCEED;
10709710a78aSZach O'Keefe }
10719710a78aSZach O'Keefe
collapse_huge_page(struct mm_struct * mm,unsigned long address,int referenced,int unmapped,struct collapse_control * cc)107250ad2f24SZach O'Keefe static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
107350ad2f24SZach O'Keefe int referenced, int unmapped,
107450ad2f24SZach O'Keefe struct collapse_control *cc)
1075b46e756fSKirill A. Shutemov {
10765503fbf2SKirill A. Shutemov LIST_HEAD(compound_pagelist);
1077b46e756fSKirill A. Shutemov pmd_t *pmd, _pmd;
1078b46e756fSKirill A. Shutemov pte_t *pte;
1079b46e756fSKirill A. Shutemov pgtable_t pgtable;
10804ba70817SMatthew Wilcox (Oracle) struct folio *folio;
108150ad2f24SZach O'Keefe struct page *hpage;
1082b46e756fSKirill A. Shutemov spinlock_t *pmd_ptl, *pte_ptl;
108350ad2f24SZach O'Keefe int result = SCAN_FAIL;
1084c131f751SKirill A. Shutemov struct vm_area_struct *vma;
1085ac46d4f3SJérôme Glisse struct mmu_notifier_range range;
1086b46e756fSKirill A. Shutemov
1087b46e756fSKirill A. Shutemov VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1088b46e756fSKirill A. Shutemov
1089988ddb71SKirill A. Shutemov /*
1090c1e8d7c6SMichel Lespinasse * Before allocating the hugepage, release the mmap_lock read lock.
1091988ddb71SKirill A. Shutemov * The allocation can take potentially a long time if it involves
1092c1e8d7c6SMichel Lespinasse * sync compaction, and we do not need to hold the mmap_lock during
1093988ddb71SKirill A. Shutemov * that. We will recheck the vma after taking it again in write mode.
1094988ddb71SKirill A. Shutemov */
1095d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
1096b46e756fSKirill A. Shutemov
109703e36dbaSMatthew Wilcox (Oracle) result = alloc_charge_folio(&folio, mm, cc);
109803e36dbaSMatthew Wilcox (Oracle) hpage = &folio->page;
10999710a78aSZach O'Keefe if (result != SCAN_SUCCEED)
1100b46e756fSKirill A. Shutemov goto out_nolock;
1101b46e756fSKirill A. Shutemov
1102d8ed45c5SMichel Lespinasse mmap_read_lock(mm);
110334488399SZach O'Keefe result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
110450ad2f24SZach O'Keefe if (result != SCAN_SUCCEED) {
1105d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
1106b46e756fSKirill A. Shutemov goto out_nolock;
1107b46e756fSKirill A. Shutemov }
1108b46e756fSKirill A. Shutemov
110950722804SZach O'Keefe result = find_pmd_or_thp_or_none(mm, address, &pmd);
111050722804SZach O'Keefe if (result != SCAN_SUCCEED) {
1111d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
1112b46e756fSKirill A. Shutemov goto out_nolock;
1113b46e756fSKirill A. Shutemov }
1114b46e756fSKirill A. Shutemov
111550ad2f24SZach O'Keefe if (unmapped) {
1116b46e756fSKirill A. Shutemov /*
111750ad2f24SZach O'Keefe * __collapse_huge_page_swapin will return with mmap_lock
111850ad2f24SZach O'Keefe * released when it fails. So we jump out_nolock directly in
111950ad2f24SZach O'Keefe * that case. Continuing to collapse causes inconsistency.
1120b46e756fSKirill A. Shutemov */
112150ad2f24SZach O'Keefe result = __collapse_huge_page_swapin(mm, vma, address, pmd,
112250ad2f24SZach O'Keefe referenced);
112350ad2f24SZach O'Keefe if (result != SCAN_SUCCEED)
1124b46e756fSKirill A. Shutemov goto out_nolock;
1125b46e756fSKirill A. Shutemov }
1126b46e756fSKirill A. Shutemov
1127d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
1128b46e756fSKirill A. Shutemov /*
1129b46e756fSKirill A. Shutemov * Prevent all access to pagetables with the exception of
1130b46e756fSKirill A. Shutemov * gup_fast later handled by the ptep_clear_flush and the VM
1131b46e756fSKirill A. Shutemov * handled by the anon_vma lock + PG_lock.
1132b46e756fSKirill A. Shutemov */
1133d8ed45c5SMichel Lespinasse mmap_write_lock(mm);
113434488399SZach O'Keefe result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
113550ad2f24SZach O'Keefe if (result != SCAN_SUCCEED)
113618d24a7cSMiaohe Lin goto out_up_write;
1137b46e756fSKirill A. Shutemov /* check if the pmd is still valid */
113850722804SZach O'Keefe result = check_pmd_still_valid(mm, address, pmd);
113950722804SZach O'Keefe if (result != SCAN_SUCCEED)
114018d24a7cSMiaohe Lin goto out_up_write;
1141b46e756fSKirill A. Shutemov
114255fd6fccSSuren Baghdasaryan vma_start_write(vma);
1143b46e756fSKirill A. Shutemov anon_vma_lock_write(vma->anon_vma);
1144b46e756fSKirill A. Shutemov
11457d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
11467d4a8be0SAlistair Popple address + HPAGE_PMD_SIZE);
1147ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range);
1148ec649c9dSVille Syrjälä
1149b46e756fSKirill A. Shutemov pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1150b46e756fSKirill A. Shutemov /*
115170cbc3ccSYang Shi * This removes any huge TLB entry from the CPU so we won't allow
115270cbc3ccSYang Shi * huge and small TLB entries for the same virtual address to
115370cbc3ccSYang Shi * avoid the risk of CPU bugs in that area.
115470cbc3ccSYang Shi *
115570cbc3ccSYang Shi * Parallel fast GUP is fine since fast GUP will back off when
115670cbc3ccSYang Shi * it detects PMD is changed.
1157b46e756fSKirill A. Shutemov */
1158b46e756fSKirill A. Shutemov _pmd = pmdp_collapse_flush(vma, address, pmd);
1159b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl);
1160ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range);
11612ba99c5eSJann Horn tlb_remove_table_sync_one();
1162b46e756fSKirill A. Shutemov
1163895f5ee4SHugh Dickins pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1164895f5ee4SHugh Dickins if (pte) {
1165d8ea7cc8SZach O'Keefe result = __collapse_huge_page_isolate(vma, address, pte, cc,
11665503fbf2SKirill A. Shutemov &compound_pagelist);
1167b46e756fSKirill A. Shutemov spin_unlock(pte_ptl);
1168895f5ee4SHugh Dickins } else {
1169895f5ee4SHugh Dickins result = SCAN_PMD_NULL;
1170895f5ee4SHugh Dickins }
1171b46e756fSKirill A. Shutemov
117250ad2f24SZach O'Keefe if (unlikely(result != SCAN_SUCCEED)) {
1173895f5ee4SHugh Dickins if (pte)
1174b46e756fSKirill A. Shutemov pte_unmap(pte);
1175b46e756fSKirill A. Shutemov spin_lock(pmd_ptl);
1176b46e756fSKirill A. Shutemov BUG_ON(!pmd_none(*pmd));
1177b46e756fSKirill A. Shutemov /*
1178b46e756fSKirill A. Shutemov * We can only use set_pmd_at when establishing
1179b46e756fSKirill A. Shutemov * hugepmds and never for establishing regular pmds that
1180b46e756fSKirill A. Shutemov * points to regular pagetables. Use pmd_populate for that
1181b46e756fSKirill A. Shutemov */
1182b46e756fSKirill A. Shutemov pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1183b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl);
1184b46e756fSKirill A. Shutemov anon_vma_unlock_write(vma->anon_vma);
118518d24a7cSMiaohe Lin goto out_up_write;
1186b46e756fSKirill A. Shutemov }
1187b46e756fSKirill A. Shutemov
1188b46e756fSKirill A. Shutemov /*
1189b46e756fSKirill A. Shutemov * All pages are isolated and locked so anon_vma rmap
1190b46e756fSKirill A. Shutemov * can't run anymore.
1191b46e756fSKirill A. Shutemov */
1192b46e756fSKirill A. Shutemov anon_vma_unlock_write(vma->anon_vma);
1193b46e756fSKirill A. Shutemov
119498c76c9fSJiaqi Yan result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
119598c76c9fSJiaqi Yan vma, address, pte_ptl,
11965503fbf2SKirill A. Shutemov &compound_pagelist);
1197b46e756fSKirill A. Shutemov pte_unmap(pte);
119898c76c9fSJiaqi Yan if (unlikely(result != SCAN_SUCCEED))
119998c76c9fSJiaqi Yan goto out_up_write;
120098c76c9fSJiaqi Yan
1201588d01f9SMiaohe Lin /*
12024ba70817SMatthew Wilcox (Oracle) * The smp_wmb() inside __folio_mark_uptodate() ensures the
12034ba70817SMatthew Wilcox (Oracle) * copy_huge_page writes become visible before the set_pmd_at()
12044ba70817SMatthew Wilcox (Oracle) * write.
1205588d01f9SMiaohe Lin */
12064ba70817SMatthew Wilcox (Oracle) __folio_mark_uptodate(folio);
1207b46e756fSKirill A. Shutemov pgtable = pmd_pgtable(_pmd);
1208b46e756fSKirill A. Shutemov
120950ad2f24SZach O'Keefe _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1210f55e1014SLinus Torvalds _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1211b46e756fSKirill A. Shutemov
1212b46e756fSKirill A. Shutemov spin_lock(pmd_ptl);
1213b46e756fSKirill A. Shutemov BUG_ON(!pmd_none(*pmd));
12144ba70817SMatthew Wilcox (Oracle) folio_add_new_anon_rmap(folio, vma, address);
12154ba70817SMatthew Wilcox (Oracle) folio_add_lru_vma(folio, vma);
1216b46e756fSKirill A. Shutemov pgtable_trans_huge_deposit(mm, pmd, pgtable);
1217b46e756fSKirill A. Shutemov set_pmd_at(mm, address, pmd, _pmd);
1218b46e756fSKirill A. Shutemov update_mmu_cache_pmd(vma, address, pmd);
1219b46e756fSKirill A. Shutemov spin_unlock(pmd_ptl);
1220b46e756fSKirill A. Shutemov
122150ad2f24SZach O'Keefe hpage = NULL;
1222b46e756fSKirill A. Shutemov
1223b46e756fSKirill A. Shutemov result = SCAN_SUCCEED;
1224b46e756fSKirill A. Shutemov out_up_write:
1225d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
1226b46e756fSKirill A. Shutemov out_nolock:
12277cb1d7efSPeter Xu if (hpage)
122850ad2f24SZach O'Keefe put_page(hpage);
122950ad2f24SZach O'Keefe trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
123050ad2f24SZach O'Keefe return result;
1231b46e756fSKirill A. Shutemov }
1232b46e756fSKirill A. Shutemov
hpage_collapse_scan_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,bool * mmap_locked,struct collapse_control * cc)12337d2c4385SZach O'Keefe static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1234b46e756fSKirill A. Shutemov struct vm_area_struct *vma,
123550ad2f24SZach O'Keefe unsigned long address, bool *mmap_locked,
123634d6b470SZach O'Keefe struct collapse_control *cc)
1237b46e756fSKirill A. Shutemov {
1238b46e756fSKirill A. Shutemov pmd_t *pmd;
1239b46e756fSKirill A. Shutemov pte_t *pte, *_pte;
124050ad2f24SZach O'Keefe int result = SCAN_FAIL, referenced = 0;
124171a2c112SKirill A. Shutemov int none_or_zero = 0, shared = 0;
1242b46e756fSKirill A. Shutemov struct page *page = NULL;
1243b46e756fSKirill A. Shutemov unsigned long _address;
1244b46e756fSKirill A. Shutemov spinlock_t *ptl;
1245b46e756fSKirill A. Shutemov int node = NUMA_NO_NODE, unmapped = 0;
12460db501f7SEbru Akagunduz bool writable = false;
1247b46e756fSKirill A. Shutemov
1248b46e756fSKirill A. Shutemov VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1249b46e756fSKirill A. Shutemov
125050722804SZach O'Keefe result = find_pmd_or_thp_or_none(mm, address, &pmd);
125150722804SZach O'Keefe if (result != SCAN_SUCCEED)
1252b46e756fSKirill A. Shutemov goto out;
1253b46e756fSKirill A. Shutemov
125434d6b470SZach O'Keefe memset(cc->node_load, 0, sizeof(cc->node_load));
1255e031ff96SYang Shi nodes_clear(cc->alloc_nmask);
1256b46e756fSKirill A. Shutemov pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1257895f5ee4SHugh Dickins if (!pte) {
1258895f5ee4SHugh Dickins result = SCAN_PMD_NULL;
1259895f5ee4SHugh Dickins goto out;
1260895f5ee4SHugh Dickins }
1261895f5ee4SHugh Dickins
1262b46e756fSKirill A. Shutemov for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1263b46e756fSKirill A. Shutemov _pte++, _address += PAGE_SIZE) {
1264c33c7948SRyan Roberts pte_t pteval = ptep_get(_pte);
1265b46e756fSKirill A. Shutemov if (is_swap_pte(pteval)) {
1266d8ea7cc8SZach O'Keefe ++unmapped;
1267d8ea7cc8SZach O'Keefe if (!cc->is_khugepaged ||
1268d8ea7cc8SZach O'Keefe unmapped <= khugepaged_max_ptes_swap) {
1269e1e267c7SPeter Xu /*
1270e1e267c7SPeter Xu * Always be strict with uffd-wp
1271e1e267c7SPeter Xu * enabled swap entries. Please see
1272e1e267c7SPeter Xu * comment below for pte_uffd_wp().
1273e1e267c7SPeter Xu */
12742bad466cSPeter Xu if (pte_swp_uffd_wp_any(pteval)) {
1275e1e267c7SPeter Xu result = SCAN_PTE_UFFD_WP;
1276e1e267c7SPeter Xu goto out_unmap;
1277e1e267c7SPeter Xu }
1278b46e756fSKirill A. Shutemov continue;
1279b46e756fSKirill A. Shutemov } else {
1280b46e756fSKirill A. Shutemov result = SCAN_EXCEED_SWAP_PTE;
1281e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1282b46e756fSKirill A. Shutemov goto out_unmap;
1283b46e756fSKirill A. Shutemov }
1284b46e756fSKirill A. Shutemov }
1285b46e756fSKirill A. Shutemov if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1286d8ea7cc8SZach O'Keefe ++none_or_zero;
1287b46e756fSKirill A. Shutemov if (!userfaultfd_armed(vma) &&
1288d8ea7cc8SZach O'Keefe (!cc->is_khugepaged ||
1289d8ea7cc8SZach O'Keefe none_or_zero <= khugepaged_max_ptes_none)) {
1290b46e756fSKirill A. Shutemov continue;
1291b46e756fSKirill A. Shutemov } else {
1292b46e756fSKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE;
1293e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1294b46e756fSKirill A. Shutemov goto out_unmap;
1295b46e756fSKirill A. Shutemov }
1296b46e756fSKirill A. Shutemov }
1297e1e267c7SPeter Xu if (pte_uffd_wp(pteval)) {
1298e1e267c7SPeter Xu /*
1299e1e267c7SPeter Xu * Don't collapse the page if any of the small
1300e1e267c7SPeter Xu * PTEs are armed with uffd write protection.
1301e1e267c7SPeter Xu * Here we can also mark the new huge pmd as
1302e1e267c7SPeter Xu * write protected if any of the small ones is
13038958b249SHaitao Shi * marked but that could bring unknown
1304e1e267c7SPeter Xu * userfault messages that falls outside of
1305e1e267c7SPeter Xu * the registered range. So, just be simple.
1306e1e267c7SPeter Xu */
1307e1e267c7SPeter Xu result = SCAN_PTE_UFFD_WP;
1308e1e267c7SPeter Xu goto out_unmap;
1309e1e267c7SPeter Xu }
1310b46e756fSKirill A. Shutemov if (pte_write(pteval))
1311b46e756fSKirill A. Shutemov writable = true;
1312b46e756fSKirill A. Shutemov
1313b46e756fSKirill A. Shutemov page = vm_normal_page(vma, _address, pteval);
13143218f871SAlex Sierra if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1315b46e756fSKirill A. Shutemov result = SCAN_PAGE_NULL;
1316b46e756fSKirill A. Shutemov goto out_unmap;
1317b46e756fSKirill A. Shutemov }
1318b46e756fSKirill A. Shutemov
1319d8ea7cc8SZach O'Keefe if (page_mapcount(page) > 1) {
1320d8ea7cc8SZach O'Keefe ++shared;
1321d8ea7cc8SZach O'Keefe if (cc->is_khugepaged &&
1322d8ea7cc8SZach O'Keefe shared > khugepaged_max_ptes_shared) {
132371a2c112SKirill A. Shutemov result = SCAN_EXCEED_SHARED_PTE;
1324e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
132571a2c112SKirill A. Shutemov goto out_unmap;
132671a2c112SKirill A. Shutemov }
1327d8ea7cc8SZach O'Keefe }
132871a2c112SKirill A. Shutemov
13295503fbf2SKirill A. Shutemov page = compound_head(page);
1330b46e756fSKirill A. Shutemov
1331b46e756fSKirill A. Shutemov /*
1332b46e756fSKirill A. Shutemov * Record which node the original page is from and save this
133334d6b470SZach O'Keefe * information to cc->node_load[].
13340b8f0d87SQuanfa Fu * Khugepaged will allocate hugepage from the node has the max
1335b46e756fSKirill A. Shutemov * hit record.
1336b46e756fSKirill A. Shutemov */
1337b46e756fSKirill A. Shutemov node = page_to_nid(page);
13387d2c4385SZach O'Keefe if (hpage_collapse_scan_abort(node, cc)) {
1339b46e756fSKirill A. Shutemov result = SCAN_SCAN_ABORT;
1340b46e756fSKirill A. Shutemov goto out_unmap;
1341b46e756fSKirill A. Shutemov }
134234d6b470SZach O'Keefe cc->node_load[node]++;
1343b46e756fSKirill A. Shutemov if (!PageLRU(page)) {
1344b46e756fSKirill A. Shutemov result = SCAN_PAGE_LRU;
1345b46e756fSKirill A. Shutemov goto out_unmap;
1346b46e756fSKirill A. Shutemov }
1347b46e756fSKirill A. Shutemov if (PageLocked(page)) {
1348b46e756fSKirill A. Shutemov result = SCAN_PAGE_LOCK;
1349b46e756fSKirill A. Shutemov goto out_unmap;
1350b46e756fSKirill A. Shutemov }
1351b46e756fSKirill A. Shutemov if (!PageAnon(page)) {
1352b46e756fSKirill A. Shutemov result = SCAN_PAGE_ANON;
1353b46e756fSKirill A. Shutemov goto out_unmap;
1354b46e756fSKirill A. Shutemov }
1355b46e756fSKirill A. Shutemov
1356b46e756fSKirill A. Shutemov /*
13579445689fSKirill A. Shutemov * Check if the page has any GUP (or other external) pins.
13589445689fSKirill A. Shutemov *
1359cb67f428SHugh Dickins * Here the check may be racy:
1360cb67f428SHugh Dickins * it may see total_mapcount > refcount in some cases?
13619445689fSKirill A. Shutemov * But such case is ephemeral we could always retry collapse
13629445689fSKirill A. Shutemov * later. However it may report false positive if the page
13639445689fSKirill A. Shutemov * has excessive GUP pins (i.e. 512). Anyway the same check
13649445689fSKirill A. Shutemov * will be done again later the risk seems low.
1365b46e756fSKirill A. Shutemov */
13669445689fSKirill A. Shutemov if (!is_refcount_suitable(page)) {
1367b46e756fSKirill A. Shutemov result = SCAN_PAGE_COUNT;
1368b46e756fSKirill A. Shutemov goto out_unmap;
1369b46e756fSKirill A. Shutemov }
1370d8ea7cc8SZach O'Keefe
1371d8ea7cc8SZach O'Keefe /*
1372d8ea7cc8SZach O'Keefe * If collapse was initiated by khugepaged, check that there is
1373d8ea7cc8SZach O'Keefe * enough young pte to justify collapsing the page
1374d8ea7cc8SZach O'Keefe */
1375d8ea7cc8SZach O'Keefe if (cc->is_khugepaged &&
1376d8ea7cc8SZach O'Keefe (pte_young(pteval) || page_is_young(page) ||
1377d8ea7cc8SZach O'Keefe PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1378d8ea7cc8SZach O'Keefe address)))
13790db501f7SEbru Akagunduz referenced++;
1380b46e756fSKirill A. Shutemov }
1381ffe945e6SKirill A. Shutemov if (!writable) {
1382ffe945e6SKirill A. Shutemov result = SCAN_PAGE_RO;
1383d8ea7cc8SZach O'Keefe } else if (cc->is_khugepaged &&
1384d8ea7cc8SZach O'Keefe (!referenced ||
1385d8ea7cc8SZach O'Keefe (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1386ffe945e6SKirill A. Shutemov result = SCAN_LACK_REFERENCED_PAGE;
1387ffe945e6SKirill A. Shutemov } else {
1388b46e756fSKirill A. Shutemov result = SCAN_SUCCEED;
1389b46e756fSKirill A. Shutemov }
1390b46e756fSKirill A. Shutemov out_unmap:
1391b46e756fSKirill A. Shutemov pte_unmap_unlock(pte, ptl);
139250ad2f24SZach O'Keefe if (result == SCAN_SUCCEED) {
139350ad2f24SZach O'Keefe result = collapse_huge_page(mm, address, referenced,
139450ad2f24SZach O'Keefe unmapped, cc);
1395c1e8d7c6SMichel Lespinasse /* collapse_huge_page will return with the mmap_lock released */
139650ad2f24SZach O'Keefe *mmap_locked = false;
1397b46e756fSKirill A. Shutemov }
1398b46e756fSKirill A. Shutemov out:
1399b46e756fSKirill A. Shutemov trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1400b46e756fSKirill A. Shutemov none_or_zero, result, unmapped);
140150ad2f24SZach O'Keefe return result;
1402b46e756fSKirill A. Shutemov }
1403b46e756fSKirill A. Shutemov
collect_mm_slot(struct khugepaged_mm_slot * mm_slot)1404b26e2701SQi Zheng static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1405b46e756fSKirill A. Shutemov {
1406b26e2701SQi Zheng struct mm_slot *slot = &mm_slot->slot;
1407b26e2701SQi Zheng struct mm_struct *mm = slot->mm;
1408b46e756fSKirill A. Shutemov
140935f3aa39SLance Roy lockdep_assert_held(&khugepaged_mm_lock);
1410b46e756fSKirill A. Shutemov
14117d2c4385SZach O'Keefe if (hpage_collapse_test_exit(mm)) {
1412b46e756fSKirill A. Shutemov /* free mm_slot */
1413b26e2701SQi Zheng hash_del(&slot->hash);
1414b26e2701SQi Zheng list_del(&slot->mm_node);
1415b46e756fSKirill A. Shutemov
1416b46e756fSKirill A. Shutemov /*
1417b46e756fSKirill A. Shutemov * Not strictly needed because the mm exited already.
1418b46e756fSKirill A. Shutemov *
1419b46e756fSKirill A. Shutemov * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1420b46e756fSKirill A. Shutemov */
1421b46e756fSKirill A. Shutemov
1422b46e756fSKirill A. Shutemov /* khugepaged_mm_lock actually not necessary for the below */
1423b26e2701SQi Zheng mm_slot_free(mm_slot_cache, mm_slot);
1424b46e756fSKirill A. Shutemov mmdrop(mm);
1425b46e756fSKirill A. Shutemov }
1426b46e756fSKirill A. Shutemov }
1427b46e756fSKirill A. Shutemov
1428396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM
14291043173eSHugh Dickins /* hpage must be locked, and mmap_lock must be held */
set_huge_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,struct page * hpage)143034488399SZach O'Keefe static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
143134488399SZach O'Keefe pmd_t *pmdp, struct page *hpage)
143234488399SZach O'Keefe {
143334488399SZach O'Keefe struct vm_fault vmf = {
143434488399SZach O'Keefe .vma = vma,
143534488399SZach O'Keefe .address = addr,
143634488399SZach O'Keefe .flags = 0,
143734488399SZach O'Keefe .pmd = pmdp,
143834488399SZach O'Keefe };
143934488399SZach O'Keefe
144034488399SZach O'Keefe VM_BUG_ON(!PageTransHuge(hpage));
14411043173eSHugh Dickins mmap_assert_locked(vma->vm_mm);
144234488399SZach O'Keefe
144334488399SZach O'Keefe if (do_set_pmd(&vmf, hpage))
144434488399SZach O'Keefe return SCAN_FAIL;
144534488399SZach O'Keefe
144634488399SZach O'Keefe get_page(hpage);
144734488399SZach O'Keefe return SCAN_SUCCEED;
144827e1f827SSong Liu }
144927e1f827SSong Liu
145027e1f827SSong Liu /**
1451336e6b53SAlex Shi * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1452336e6b53SAlex Shi * address haddr.
1453336e6b53SAlex Shi *
1454336e6b53SAlex Shi * @mm: process address space where collapse happens
1455336e6b53SAlex Shi * @addr: THP collapse address
145634488399SZach O'Keefe * @install_pmd: If a huge PMD should be installed
145727e1f827SSong Liu *
145827e1f827SSong Liu * This function checks whether all the PTEs in the PMD are pointing to the
145927e1f827SSong Liu * right THP. If so, retract the page table so the THP can refault in with
146034488399SZach O'Keefe * as pmd-mapped. Possibly install a huge PMD mapping the THP.
146127e1f827SSong Liu */
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr,bool install_pmd)146234488399SZach O'Keefe int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
146334488399SZach O'Keefe bool install_pmd)
146427e1f827SSong Liu {
14651043173eSHugh Dickins struct mmu_notifier_range range;
14661043173eSHugh Dickins bool notified = false;
146727e1f827SSong Liu unsigned long haddr = addr & HPAGE_PMD_MASK;
146894d815b2SLiam R. Howlett struct vm_area_struct *vma = vma_lookup(mm, haddr);
1469119a5fc1SHugh Dickins struct page *hpage;
147027e1f827SSong Liu pte_t *start_pte, *pte;
14711043173eSHugh Dickins pmd_t *pmd, pgt_pmd;
1472a9846049SHugh Dickins spinlock_t *pml = NULL, *ptl;
14731043173eSHugh Dickins int nr_ptes = 0, result = SCAN_FAIL;
147427e1f827SSong Liu int i;
147527e1f827SSong Liu
14761043173eSHugh Dickins mmap_assert_locked(mm);
14771043173eSHugh Dickins
14781043173eSHugh Dickins /* First check VMA found, in case page tables are being torn down */
14791043173eSHugh Dickins if (!vma || !vma->vm_file ||
14801043173eSHugh Dickins !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
14811043173eSHugh Dickins return SCAN_VMA_CHECK;
148258ac9a89SZach O'Keefe
148334488399SZach O'Keefe /* Fast check before locking page if already PMD-mapped */
148458ac9a89SZach O'Keefe result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
148534488399SZach O'Keefe if (result == SCAN_PMD_MAPPED)
148634488399SZach O'Keefe return result;
148758ac9a89SZach O'Keefe
148827e1f827SSong Liu /*
1489a7f4e6e4SZach O'Keefe * If we are here, we've succeeded in replacing all the native pages
1490a7f4e6e4SZach O'Keefe * in the page cache with a single hugepage. If a mm were to fault-in
1491a7f4e6e4SZach O'Keefe * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1492a7f4e6e4SZach O'Keefe * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1493a7f4e6e4SZach O'Keefe * analogously elide sysfs THP settings here.
149427e1f827SSong Liu */
1495a7f4e6e4SZach O'Keefe if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
149634488399SZach O'Keefe return SCAN_VMA_CHECK;
149727e1f827SSong Liu
1498deb4c93aSPeter Xu /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1499deb4c93aSPeter Xu if (userfaultfd_wp(vma))
150034488399SZach O'Keefe return SCAN_PTE_UFFD_WP;
1501deb4c93aSPeter Xu
1502119a5fc1SHugh Dickins hpage = find_lock_page(vma->vm_file->f_mapping,
1503119a5fc1SHugh Dickins linear_page_index(vma, haddr));
1504119a5fc1SHugh Dickins if (!hpage)
150534488399SZach O'Keefe return SCAN_PAGE_NULL;
1506119a5fc1SHugh Dickins
150734488399SZach O'Keefe if (!PageHead(hpage)) {
150834488399SZach O'Keefe result = SCAN_FAIL;
1509119a5fc1SHugh Dickins goto drop_hpage;
151034488399SZach O'Keefe }
1511119a5fc1SHugh Dickins
151234488399SZach O'Keefe if (compound_order(hpage) != HPAGE_PMD_ORDER) {
151334488399SZach O'Keefe result = SCAN_PAGE_COMPOUND;
1514119a5fc1SHugh Dickins goto drop_hpage;
151534488399SZach O'Keefe }
1516780a4b6fSZach O'Keefe
15171043173eSHugh Dickins result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
151834488399SZach O'Keefe switch (result) {
151934488399SZach O'Keefe case SCAN_SUCCEED:
152034488399SZach O'Keefe break;
152134488399SZach O'Keefe case SCAN_PMD_NONE:
152234488399SZach O'Keefe /*
15231d65b771SHugh Dickins * All pte entries have been removed and pmd cleared.
15241d65b771SHugh Dickins * Skip all the pte checks and just update the pmd mapping.
152534488399SZach O'Keefe */
152634488399SZach O'Keefe goto maybe_install_pmd;
152734488399SZach O'Keefe default:
152827e1f827SSong Liu goto drop_hpage;
152934488399SZach O'Keefe }
153027e1f827SSong Liu
153134488399SZach O'Keefe result = SCAN_FAIL;
1532895f5ee4SHugh Dickins start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
15331043173eSHugh Dickins if (!start_pte) /* mmap_lock + page lock should prevent this */
15341043173eSHugh Dickins goto drop_hpage;
153527e1f827SSong Liu
153627e1f827SSong Liu /* step 1: check all mapped PTEs are to the right huge page */
153727e1f827SSong Liu for (i = 0, addr = haddr, pte = start_pte;
153827e1f827SSong Liu i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
153927e1f827SSong Liu struct page *page;
1540c33c7948SRyan Roberts pte_t ptent = ptep_get(pte);
154127e1f827SSong Liu
154227e1f827SSong Liu /* empty pte, skip */
1543c33c7948SRyan Roberts if (pte_none(ptent))
154427e1f827SSong Liu continue;
154527e1f827SSong Liu
154627e1f827SSong Liu /* page swapped out, abort */
1547c33c7948SRyan Roberts if (!pte_present(ptent)) {
154834488399SZach O'Keefe result = SCAN_PTE_NON_PRESENT;
154927e1f827SSong Liu goto abort;
155034488399SZach O'Keefe }
155127e1f827SSong Liu
1552c33c7948SRyan Roberts page = vm_normal_page(vma, addr, ptent);
15533218f871SAlex Sierra if (WARN_ON_ONCE(page && is_zone_device_page(page)))
15543218f871SAlex Sierra page = NULL;
155527e1f827SSong Liu /*
1556119a5fc1SHugh Dickins * Note that uprobe, debugger, or MAP_PRIVATE may change the
1557119a5fc1SHugh Dickins * page table, but the new page will not be a subpage of hpage.
155827e1f827SSong Liu */
1559119a5fc1SHugh Dickins if (hpage + i != page)
156027e1f827SSong Liu goto abort;
156127e1f827SSong Liu }
156227e1f827SSong Liu
15631043173eSHugh Dickins pte_unmap_unlock(start_pte, ptl);
15641043173eSHugh Dickins mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
15651043173eSHugh Dickins haddr, haddr + HPAGE_PMD_SIZE);
15661043173eSHugh Dickins mmu_notifier_invalidate_range_start(&range);
15671043173eSHugh Dickins notified = true;
1568a9846049SHugh Dickins
1569a9846049SHugh Dickins /*
1570a9846049SHugh Dickins * pmd_lock covers a wider range than ptl, and (if split from mm's
1571a9846049SHugh Dickins * page_table_lock) ptl nests inside pml. The less time we hold pml,
1572a9846049SHugh Dickins * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1573a9846049SHugh Dickins * inserts a valid as-if-COWed PTE without even looking up page cache.
1574a9846049SHugh Dickins * So page lock of hpage does not protect from it, so we must not drop
1575a9846049SHugh Dickins * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1576a9846049SHugh Dickins */
1577a9846049SHugh Dickins if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
1578a9846049SHugh Dickins pml = pmd_lock(mm, pmd);
1579a9846049SHugh Dickins
1580a9846049SHugh Dickins start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl);
15811043173eSHugh Dickins if (!start_pte) /* mmap_lock + page lock should prevent this */
15821043173eSHugh Dickins goto abort;
1583a9846049SHugh Dickins if (!pml)
1584a9846049SHugh Dickins spin_lock(ptl);
1585a9846049SHugh Dickins else if (ptl != pml)
1586a9846049SHugh Dickins spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
15871043173eSHugh Dickins
15881043173eSHugh Dickins /* step 2: clear page table and adjust rmap */
158927e1f827SSong Liu for (i = 0, addr = haddr, pte = start_pte;
159027e1f827SSong Liu i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
159127e1f827SSong Liu struct page *page;
1592c33c7948SRyan Roberts pte_t ptent = ptep_get(pte);
159327e1f827SSong Liu
1594c33c7948SRyan Roberts if (pte_none(ptent))
159527e1f827SSong Liu continue;
15961043173eSHugh Dickins /*
15971043173eSHugh Dickins * We dropped ptl after the first scan, to do the mmu_notifier:
15981043173eSHugh Dickins * page lock stops more PTEs of the hpage being faulted in, but
15991043173eSHugh Dickins * does not stop write faults COWing anon copies from existing
16001043173eSHugh Dickins * PTEs; and does not stop those being swapped out or migrated.
16011043173eSHugh Dickins */
16021043173eSHugh Dickins if (!pte_present(ptent)) {
16031043173eSHugh Dickins result = SCAN_PTE_NON_PRESENT;
16043218f871SAlex Sierra goto abort;
16051043173eSHugh Dickins }
16061043173eSHugh Dickins page = vm_normal_page(vma, addr, ptent);
16071043173eSHugh Dickins if (hpage + i != page)
16081043173eSHugh Dickins goto abort;
16091043173eSHugh Dickins
16101043173eSHugh Dickins /*
16111043173eSHugh Dickins * Must clear entry, or a racing truncate may re-remove it.
16121043173eSHugh Dickins * TLB flush can be left until pmdp_collapse_flush() does it.
16131043173eSHugh Dickins * PTE dirty? Shmem page is already dirty; file is read-only.
16141043173eSHugh Dickins */
16151043173eSHugh Dickins ptep_clear(mm, addr, pte);
1616cea86fe2SHugh Dickins page_remove_rmap(page, vma, false);
16171043173eSHugh Dickins nr_ptes++;
161827e1f827SSong Liu }
161927e1f827SSong Liu
1620a9846049SHugh Dickins pte_unmap(start_pte);
1621a9846049SHugh Dickins if (!pml)
1622a9846049SHugh Dickins spin_unlock(ptl);
162327e1f827SSong Liu
162427e1f827SSong Liu /* step 3: set proper refcount and mm_counters. */
16251043173eSHugh Dickins if (nr_ptes) {
16261043173eSHugh Dickins page_ref_sub(hpage, nr_ptes);
16271043173eSHugh Dickins add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
162827e1f827SSong Liu }
162927e1f827SSong Liu
1630a9846049SHugh Dickins /* step 4: remove empty page table */
1631a9846049SHugh Dickins if (!pml) {
16321043173eSHugh Dickins pml = pmd_lock(mm, pmd);
16331043173eSHugh Dickins if (ptl != pml)
16341043173eSHugh Dickins spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1635a9846049SHugh Dickins }
16361043173eSHugh Dickins pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
16371043173eSHugh Dickins pmdp_get_lockless_sync();
16381043173eSHugh Dickins if (ptl != pml)
16391043173eSHugh Dickins spin_unlock(ptl);
16401043173eSHugh Dickins spin_unlock(pml);
1641ab0c3f12SHugh Dickins
16421043173eSHugh Dickins mmu_notifier_invalidate_range_end(&range);
164334488399SZach O'Keefe
16441043173eSHugh Dickins mm_dec_nr_ptes(mm);
16451043173eSHugh Dickins page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
16461043173eSHugh Dickins pte_free_defer(mm, pmd_pgtable(pgt_pmd));
16478d3c106eSJann Horn
164834488399SZach O'Keefe maybe_install_pmd:
164934488399SZach O'Keefe /* step 5: install pmd entry */
165034488399SZach O'Keefe result = install_pmd
165134488399SZach O'Keefe ? set_huge_pmd(vma, haddr, pmd, hpage)
165234488399SZach O'Keefe : SCAN_SUCCEED;
16531043173eSHugh Dickins goto drop_hpage;
16541043173eSHugh Dickins abort:
16551043173eSHugh Dickins if (nr_ptes) {
16561043173eSHugh Dickins flush_tlb_mm(mm);
16571043173eSHugh Dickins page_ref_sub(hpage, nr_ptes);
16581043173eSHugh Dickins add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
16591043173eSHugh Dickins }
16601043173eSHugh Dickins if (start_pte)
16611043173eSHugh Dickins pte_unmap_unlock(start_pte, ptl);
1662a9846049SHugh Dickins if (pml && pml != ptl)
1663a9846049SHugh Dickins spin_unlock(pml);
16641043173eSHugh Dickins if (notified)
16651043173eSHugh Dickins mmu_notifier_invalidate_range_end(&range);
1666119a5fc1SHugh Dickins drop_hpage:
1667119a5fc1SHugh Dickins unlock_page(hpage);
1668119a5fc1SHugh Dickins put_page(hpage);
166934488399SZach O'Keefe return result;
167027e1f827SSong Liu }
167127e1f827SSong Liu
retract_page_tables(struct address_space * mapping,pgoff_t pgoff)16721d65b771SHugh Dickins static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1673f3f0e1d2SKirill A. Shutemov {
1674f3f0e1d2SKirill A. Shutemov struct vm_area_struct *vma;
1675f3f0e1d2SKirill A. Shutemov
16761d65b771SHugh Dickins i_mmap_lock_read(mapping);
1677f3f0e1d2SKirill A. Shutemov vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
16781d65b771SHugh Dickins struct mmu_notifier_range range;
16791d65b771SHugh Dickins struct mm_struct *mm;
16801d65b771SHugh Dickins unsigned long addr;
16811d65b771SHugh Dickins pmd_t *pmd, pgt_pmd;
16821d65b771SHugh Dickins spinlock_t *pml;
16831d65b771SHugh Dickins spinlock_t *ptl;
16841d65b771SHugh Dickins bool skipped_uffd = false;
168534488399SZach O'Keefe
168627e1f827SSong Liu /*
168727e1f827SSong Liu * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
16881d65b771SHugh Dickins * got written to. These VMAs are likely not worth removing
16891d65b771SHugh Dickins * page tables from, as PMD-mapping is likely to be split later.
169027e1f827SSong Liu */
16911d65b771SHugh Dickins if (READ_ONCE(vma->anon_vma))
16921d65b771SHugh Dickins continue;
16931d65b771SHugh Dickins
1694f3f0e1d2SKirill A. Shutemov addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
169534488399SZach O'Keefe if (addr & ~HPAGE_PMD_MASK ||
16961d65b771SHugh Dickins vma->vm_end < addr + HPAGE_PMD_SIZE)
169734488399SZach O'Keefe continue;
16981d65b771SHugh Dickins
16991d65b771SHugh Dickins mm = vma->vm_mm;
17001d65b771SHugh Dickins if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
17011d65b771SHugh Dickins continue;
17021d65b771SHugh Dickins
17031d65b771SHugh Dickins if (hpage_collapse_test_exit(mm))
17041d65b771SHugh Dickins continue;
17051d65b771SHugh Dickins /*
17061d65b771SHugh Dickins * When a vma is registered with uffd-wp, we cannot recycle
17071d65b771SHugh Dickins * the page table because there may be pte markers installed.
17081d65b771SHugh Dickins * Other vmas can still have the same file mapped hugely, but
17091d65b771SHugh Dickins * skip this one: it will always be mapped in small page size
17101d65b771SHugh Dickins * for uffd-wp registered ranges.
17111d65b771SHugh Dickins */
17121d65b771SHugh Dickins if (userfaultfd_wp(vma))
17131d65b771SHugh Dickins continue;
17141d65b771SHugh Dickins
17151d65b771SHugh Dickins /* PTEs were notified when unmapped; but now for the PMD? */
17161d65b771SHugh Dickins mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
17171d65b771SHugh Dickins addr, addr + HPAGE_PMD_SIZE);
17181d65b771SHugh Dickins mmu_notifier_invalidate_range_start(&range);
17191d65b771SHugh Dickins
17201d65b771SHugh Dickins pml = pmd_lock(mm, pmd);
17211d65b771SHugh Dickins ptl = pte_lockptr(mm, pmd);
17221d65b771SHugh Dickins if (ptl != pml)
17231d65b771SHugh Dickins spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
17241d65b771SHugh Dickins
17251d65b771SHugh Dickins /*
17261d65b771SHugh Dickins * Huge page lock is still held, so normally the page table
17271d65b771SHugh Dickins * must remain empty; and we have already skipped anon_vma
17281d65b771SHugh Dickins * and userfaultfd_wp() vmas. But since the mmap_lock is not
17291d65b771SHugh Dickins * held, it is still possible for a racing userfaultfd_ioctl()
17301d65b771SHugh Dickins * to have inserted ptes or markers. Now that we hold ptlock,
17311d65b771SHugh Dickins * repeating the anon_vma check protects from one category,
17321d65b771SHugh Dickins * and repeating the userfaultfd_wp() check from another.
17331d65b771SHugh Dickins */
17341d65b771SHugh Dickins if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
17351d65b771SHugh Dickins skipped_uffd = true;
17361d65b771SHugh Dickins } else {
17371d65b771SHugh Dickins pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
17381d65b771SHugh Dickins pmdp_get_lockless_sync();
173934488399SZach O'Keefe }
17401d65b771SHugh Dickins
17411d65b771SHugh Dickins if (ptl != pml)
17421d65b771SHugh Dickins spin_unlock(ptl);
17431d65b771SHugh Dickins spin_unlock(pml);
17441d65b771SHugh Dickins
17451d65b771SHugh Dickins mmu_notifier_invalidate_range_end(&range);
17461d65b771SHugh Dickins
17471d65b771SHugh Dickins if (!skipped_uffd) {
17481d65b771SHugh Dickins mm_dec_nr_ptes(mm);
17491d65b771SHugh Dickins page_table_check_pte_clear_range(mm, addr, pgt_pmd);
17501d65b771SHugh Dickins pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1751f3f0e1d2SKirill A. Shutemov }
17521d65b771SHugh Dickins }
17531d65b771SHugh Dickins i_mmap_unlock_read(mapping);
1754f3f0e1d2SKirill A. Shutemov }
1755f3f0e1d2SKirill A. Shutemov
1756f3f0e1d2SKirill A. Shutemov /**
175799cb0dbdSSong Liu * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1758f3f0e1d2SKirill A. Shutemov *
1759336e6b53SAlex Shi * @mm: process address space where collapse happens
176034488399SZach O'Keefe * @addr: virtual collapse start address
1761336e6b53SAlex Shi * @file: file that collapse on
1762336e6b53SAlex Shi * @start: collapse start address
17639710a78aSZach O'Keefe * @cc: collapse context and scratchpad
1764336e6b53SAlex Shi *
1765f3f0e1d2SKirill A. Shutemov * Basic scheme is simple, details are more complex:
176687c460a0SHugh Dickins * - allocate and lock a new huge page;
1767a2e17cc2SDavid Stevens * - scan page cache, locking old pages
176899cb0dbdSSong Liu * + swap/gup in pages if necessary;
1769a2e17cc2SDavid Stevens * - copy data to new page
1770a2e17cc2SDavid Stevens * - handle shmem holes
1771a2e17cc2SDavid Stevens * + re-validate that holes weren't filled by someone else
1772a2e17cc2SDavid Stevens * + check for userfaultfd
1773ac492b9cSDavid Stevens * - finalize updates to the page cache;
177477da9389SMatthew Wilcox * - if replacing succeeds:
177587c460a0SHugh Dickins * + unlock huge page;
1776a2e17cc2SDavid Stevens * + free old pages;
1777f3f0e1d2SKirill A. Shutemov * - if replacing failed;
1778a2e17cc2SDavid Stevens * + unlock old pages
177987c460a0SHugh Dickins * + unlock and free huge page;
1780f3f0e1d2SKirill A. Shutemov */
collapse_file(struct mm_struct * mm,unsigned long addr,struct file * file,pgoff_t start,struct collapse_control * cc)178134488399SZach O'Keefe static int collapse_file(struct mm_struct *mm, unsigned long addr,
1782579c571eSSong Liu struct file *file, pgoff_t start,
178334488399SZach O'Keefe struct collapse_control *cc)
1784f3f0e1d2SKirill A. Shutemov {
1785579c571eSSong Liu struct address_space *mapping = file->f_mapping;
178612904d95SJiaqi Yan struct page *page;
178736249a0bSMatthew Wilcox (Oracle) struct page *tmp, *dst;
178803e36dbaSMatthew Wilcox (Oracle) struct folio *folio, *new_folio;
17894c9473e8SGautam Menghani pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1790f3f0e1d2SKirill A. Shutemov LIST_HEAD(pagelist);
179177da9389SMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1792f3f0e1d2SKirill A. Shutemov int nr_none = 0, result = SCAN_SUCCEED;
179399cb0dbdSSong Liu bool is_shmem = shmem_file(file);
1794f3f0e1d2SKirill A. Shutemov
179599cb0dbdSSong Liu VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1796f3f0e1d2SKirill A. Shutemov VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1797f3f0e1d2SKirill A. Shutemov
179803e36dbaSMatthew Wilcox (Oracle) result = alloc_charge_folio(&new_folio, mm, cc);
17999710a78aSZach O'Keefe if (result != SCAN_SUCCEED)
1800f3f0e1d2SKirill A. Shutemov goto out;
1801f3f0e1d2SKirill A. Shutemov
180236249a0bSMatthew Wilcox (Oracle) __folio_set_locked(new_folio);
1803cae106ddSDavid Stevens if (is_shmem)
180436249a0bSMatthew Wilcox (Oracle) __folio_set_swapbacked(new_folio);
180536249a0bSMatthew Wilcox (Oracle) new_folio->index = start;
180636249a0bSMatthew Wilcox (Oracle) new_folio->mapping = mapping;
1807cae106ddSDavid Stevens
18086b24ca4aSMatthew Wilcox (Oracle) /*
18096b24ca4aSMatthew Wilcox (Oracle) * Ensure we have slots for all the pages in the range. This is
18106b24ca4aSMatthew Wilcox (Oracle) * almost certainly a no-op because most of the pages must be present
18116b24ca4aSMatthew Wilcox (Oracle) */
181295feeabbSHugh Dickins do {
181395feeabbSHugh Dickins xas_lock_irq(&xas);
181495feeabbSHugh Dickins xas_create_range(&xas);
181595feeabbSHugh Dickins if (!xas_error(&xas))
181695feeabbSHugh Dickins break;
181795feeabbSHugh Dickins xas_unlock_irq(&xas);
181895feeabbSHugh Dickins if (!xas_nomem(&xas, GFP_KERNEL)) {
181995feeabbSHugh Dickins result = SCAN_FAIL;
1820cae106ddSDavid Stevens goto rollback;
182195feeabbSHugh Dickins }
182295feeabbSHugh Dickins } while (1);
182395feeabbSHugh Dickins
182477da9389SMatthew Wilcox for (index = start; index < end; index++) {
1825e8c716bcSHugh Dickins xas_set(&xas, index);
1826e8c716bcSHugh Dickins page = xas_load(&xas);
182777da9389SMatthew Wilcox
182877da9389SMatthew Wilcox VM_BUG_ON(index != xas.xa_index);
182999cb0dbdSSong Liu if (is_shmem) {
183077da9389SMatthew Wilcox if (!page) {
1831701270faSHugh Dickins /*
183299cb0dbdSSong Liu * Stop if extent has been truncated or
183399cb0dbdSSong Liu * hole-punched, and is now completely
183499cb0dbdSSong Liu * empty.
1835701270faSHugh Dickins */
1836701270faSHugh Dickins if (index == start) {
1837701270faSHugh Dickins if (!xas_next_entry(&xas, end - 1)) {
1838701270faSHugh Dickins result = SCAN_TRUNCATED;
1839042a3082SHugh Dickins goto xa_locked;
1840701270faSHugh Dickins }
1841701270faSHugh Dickins }
184277da9389SMatthew Wilcox nr_none++;
184377da9389SMatthew Wilcox continue;
1844f3f0e1d2SKirill A. Shutemov }
1845f3f0e1d2SKirill A. Shutemov
18463159f943SMatthew Wilcox if (xa_is_value(page) || !PageUptodate(page)) {
184777da9389SMatthew Wilcox xas_unlock_irq(&xas);
1848f3f0e1d2SKirill A. Shutemov /* swap in or instantiate fallocated page */
18497459c149SMatthew Wilcox (Oracle) if (shmem_get_folio(mapping->host, index,
18507459c149SMatthew Wilcox (Oracle) &folio, SGP_NOALLOC)) {
1851f3f0e1d2SKirill A. Shutemov result = SCAN_FAIL;
185277da9389SMatthew Wilcox goto xa_unlocked;
1853f3f0e1d2SKirill A. Shutemov }
18541fec6890SMatthew Wilcox (Oracle) /* drain lru cache to help isolate_lru_page() */
1855efa3d814SDavid Stevens lru_add_drain();
18567459c149SMatthew Wilcox (Oracle) page = folio_file_page(folio, index);
1857f3f0e1d2SKirill A. Shutemov } else if (trylock_page(page)) {
1858f3f0e1d2SKirill A. Shutemov get_page(page);
1859042a3082SHugh Dickins xas_unlock_irq(&xas);
1860f3f0e1d2SKirill A. Shutemov } else {
1861f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_LOCK;
1862042a3082SHugh Dickins goto xa_locked;
1863f3f0e1d2SKirill A. Shutemov }
186499cb0dbdSSong Liu } else { /* !is_shmem */
186599cb0dbdSSong Liu if (!page || xa_is_value(page)) {
186699cb0dbdSSong Liu xas_unlock_irq(&xas);
186799cb0dbdSSong Liu page_cache_sync_readahead(mapping, &file->f_ra,
186899cb0dbdSSong Liu file, index,
1869e5a59d30SDavid Howells end - index);
18701fec6890SMatthew Wilcox (Oracle) /* drain lru cache to help isolate_lru_page() */
187199cb0dbdSSong Liu lru_add_drain();
187299cb0dbdSSong Liu page = find_lock_page(mapping, index);
187399cb0dbdSSong Liu if (unlikely(page == NULL)) {
187499cb0dbdSSong Liu result = SCAN_FAIL;
187599cb0dbdSSong Liu goto xa_unlocked;
187699cb0dbdSSong Liu }
187775f36069SSong Liu } else if (PageDirty(page)) {
187875f36069SSong Liu /*
187975f36069SSong Liu * khugepaged only works on read-only fd,
188075f36069SSong Liu * so this page is dirty because it hasn't
188175f36069SSong Liu * been flushed since first write. There
188275f36069SSong Liu * won't be new dirty pages.
188375f36069SSong Liu *
188475f36069SSong Liu * Trigger async flush here and hope the
188575f36069SSong Liu * writeback is done when khugepaged
188675f36069SSong Liu * revisits this page.
188775f36069SSong Liu *
188875f36069SSong Liu * This is a one-off situation. We are not
188975f36069SSong Liu * forcing writeback in loop.
189075f36069SSong Liu */
189175f36069SSong Liu xas_unlock_irq(&xas);
189275f36069SSong Liu filemap_flush(mapping);
189375f36069SSong Liu result = SCAN_FAIL;
189475f36069SSong Liu goto xa_unlocked;
189574c42e1bSRongwei Wang } else if (PageWriteback(page)) {
189674c42e1bSRongwei Wang xas_unlock_irq(&xas);
189774c42e1bSRongwei Wang result = SCAN_FAIL;
189874c42e1bSRongwei Wang goto xa_unlocked;
189999cb0dbdSSong Liu } else if (trylock_page(page)) {
190099cb0dbdSSong Liu get_page(page);
190199cb0dbdSSong Liu xas_unlock_irq(&xas);
190299cb0dbdSSong Liu } else {
190399cb0dbdSSong Liu result = SCAN_PAGE_LOCK;
190499cb0dbdSSong Liu goto xa_locked;
190599cb0dbdSSong Liu }
190699cb0dbdSSong Liu }
1907f3f0e1d2SKirill A. Shutemov
1908f3f0e1d2SKirill A. Shutemov /*
1909b93b0163SMatthew Wilcox * The page must be locked, so we can drop the i_pages lock
1910f3f0e1d2SKirill A. Shutemov * without racing with truncate.
1911f3f0e1d2SKirill A. Shutemov */
1912f3f0e1d2SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page);
19134655e5e5SSong Liu
19144655e5e5SSong Liu /* make sure the page is up to date */
19154655e5e5SSong Liu if (unlikely(!PageUptodate(page))) {
19164655e5e5SSong Liu result = SCAN_FAIL;
19174655e5e5SSong Liu goto out_unlock;
19184655e5e5SSong Liu }
191906a5e126SHugh Dickins
192006a5e126SHugh Dickins /*
192106a5e126SHugh Dickins * If file was truncated then extended, or hole-punched, before
192206a5e126SHugh Dickins * we locked the first page, then a THP might be there already.
192358ac9a89SZach O'Keefe * This will be discovered on the first iteration.
192406a5e126SHugh Dickins */
192506a5e126SHugh Dickins if (PageTransCompound(page)) {
192658ac9a89SZach O'Keefe struct page *head = compound_head(page);
192758ac9a89SZach O'Keefe
192858ac9a89SZach O'Keefe result = compound_order(head) == HPAGE_PMD_ORDER &&
192958ac9a89SZach O'Keefe head->index == start
193058ac9a89SZach O'Keefe /* Maybe PMD-mapped */
193158ac9a89SZach O'Keefe ? SCAN_PTE_MAPPED_HUGEPAGE
193258ac9a89SZach O'Keefe : SCAN_PAGE_COMPOUND;
193306a5e126SHugh Dickins goto out_unlock;
193406a5e126SHugh Dickins }
1935f3f0e1d2SKirill A. Shutemov
193664ab3195SVishal Moola (Oracle) folio = page_folio(page);
193764ab3195SVishal Moola (Oracle)
193864ab3195SVishal Moola (Oracle) if (folio_mapping(folio) != mapping) {
1939f3f0e1d2SKirill A. Shutemov result = SCAN_TRUNCATED;
1940f3f0e1d2SKirill A. Shutemov goto out_unlock;
1941f3f0e1d2SKirill A. Shutemov }
1942f3f0e1d2SKirill A. Shutemov
194364ab3195SVishal Moola (Oracle) if (!is_shmem && (folio_test_dirty(folio) ||
194464ab3195SVishal Moola (Oracle) folio_test_writeback(folio))) {
19454655e5e5SSong Liu /*
19464655e5e5SSong Liu * khugepaged only works on read-only fd, so this
19474655e5e5SSong Liu * page is dirty because it hasn't been flushed
19484655e5e5SSong Liu * since first write.
19494655e5e5SSong Liu */
19504655e5e5SSong Liu result = SCAN_FAIL;
19514655e5e5SSong Liu goto out_unlock;
19524655e5e5SSong Liu }
19534655e5e5SSong Liu
1954be2d5756SBaolin Wang if (!folio_isolate_lru(folio)) {
1955f3f0e1d2SKirill A. Shutemov result = SCAN_DEL_PAGE_LRU;
1956042a3082SHugh Dickins goto out_unlock;
1957f3f0e1d2SKirill A. Shutemov }
1958f3f0e1d2SKirill A. Shutemov
19590201ebf2SDavid Howells if (!filemap_release_folio(folio, GFP_KERNEL)) {
196099cb0dbdSSong Liu result = SCAN_PAGE_HAS_PRIVATE;
196164ab3195SVishal Moola (Oracle) folio_putback_lru(folio);
196299cb0dbdSSong Liu goto out_unlock;
196399cb0dbdSSong Liu }
196499cb0dbdSSong Liu
196564ab3195SVishal Moola (Oracle) if (folio_mapped(folio))
196664ab3195SVishal Moola (Oracle) try_to_unmap(folio,
1967869f7ee6SMatthew Wilcox (Oracle) TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1968f3f0e1d2SKirill A. Shutemov
196977da9389SMatthew Wilcox xas_lock_irq(&xas);
1970f3f0e1d2SKirill A. Shutemov
1971e8c716bcSHugh Dickins VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page);
1972f3f0e1d2SKirill A. Shutemov
1973f3f0e1d2SKirill A. Shutemov /*
1974a2e17cc2SDavid Stevens * We control three references to the page:
1975f3f0e1d2SKirill A. Shutemov * - we hold a pin on it;
197677da9389SMatthew Wilcox * - one reference from page cache;
1977f3f0e1d2SKirill A. Shutemov * - one from isolate_lru_page;
1978a2e17cc2SDavid Stevens * If those are the only references, then any new usage of the
1979a2e17cc2SDavid Stevens * page will have to fetch it from the page cache. That requires
1980a2e17cc2SDavid Stevens * locking the page to handle truncate, so any new usage will be
1981a2e17cc2SDavid Stevens * blocked until we unlock page after collapse/during rollback.
1982f3f0e1d2SKirill A. Shutemov */
1983a2e17cc2SDavid Stevens if (page_count(page) != 3) {
1984f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_COUNT;
1985042a3082SHugh Dickins xas_unlock_irq(&xas);
1986042a3082SHugh Dickins putback_lru_page(page);
1987042a3082SHugh Dickins goto out_unlock;
1988f3f0e1d2SKirill A. Shutemov }
1989f3f0e1d2SKirill A. Shutemov
1990f3f0e1d2SKirill A. Shutemov /*
1991a2e17cc2SDavid Stevens * Accumulate the pages that are being collapsed.
1992f3f0e1d2SKirill A. Shutemov */
1993f3f0e1d2SKirill A. Shutemov list_add_tail(&page->lru, &pagelist);
1994f3f0e1d2SKirill A. Shutemov continue;
1995f3f0e1d2SKirill A. Shutemov out_unlock:
1996f3f0e1d2SKirill A. Shutemov unlock_page(page);
1997f3f0e1d2SKirill A. Shutemov put_page(page);
1998042a3082SHugh Dickins goto xa_unlocked;
1999f3f0e1d2SKirill A. Shutemov }
2000f3f0e1d2SKirill A. Shutemov
200112904d95SJiaqi Yan if (!is_shmem) {
200209d91cdaSSong Liu filemap_nr_thps_inc(mapping);
2003eb6ecbedSCollin Fijalkovich /*
2004eb6ecbedSCollin Fijalkovich * Paired with smp_mb() in do_dentry_open() to ensure
2005eb6ecbedSCollin Fijalkovich * i_writecount is up to date and the update to nr_thps is
2006eb6ecbedSCollin Fijalkovich * visible. Ensures the page cache will be truncated if the
2007eb6ecbedSCollin Fijalkovich * file is opened writable.
2008eb6ecbedSCollin Fijalkovich */
2009eb6ecbedSCollin Fijalkovich smp_mb();
2010eb6ecbedSCollin Fijalkovich if (inode_is_open_for_write(mapping->host)) {
2011eb6ecbedSCollin Fijalkovich result = SCAN_FAIL;
2012eb6ecbedSCollin Fijalkovich filemap_nr_thps_dec(mapping);
2013eb6ecbedSCollin Fijalkovich }
201409d91cdaSSong Liu }
201599cb0dbdSSong Liu
2016042a3082SHugh Dickins xa_locked:
2017042a3082SHugh Dickins xas_unlock_irq(&xas);
201877da9389SMatthew Wilcox xa_unlocked:
2019042a3082SHugh Dickins
20206d9df8a5SHugh Dickins /*
20216d9df8a5SHugh Dickins * If collapse is successful, flush must be done now before copying.
20226d9df8a5SHugh Dickins * If collapse is unsuccessful, does flush actually need to be done?
20236d9df8a5SHugh Dickins * Do it anyway, to clear the state.
20246d9df8a5SHugh Dickins */
20256d9df8a5SHugh Dickins try_to_unmap_flush();
20266d9df8a5SHugh Dickins
2027509f0069SHugh Dickins if (result == SCAN_SUCCEED && nr_none &&
2028509f0069SHugh Dickins !shmem_charge(mapping->host, nr_none))
2029509f0069SHugh Dickins result = SCAN_FAIL;
2030509f0069SHugh Dickins if (result != SCAN_SUCCEED) {
2031509f0069SHugh Dickins nr_none = 0;
2032cae106ddSDavid Stevens goto rollback;
2033509f0069SHugh Dickins }
2034cae106ddSDavid Stevens
2035f3f0e1d2SKirill A. Shutemov /*
2036a2e17cc2SDavid Stevens * The old pages are locked, so they won't change anymore.
2037f3f0e1d2SKirill A. Shutemov */
20382af8ff29SHugh Dickins index = start;
203936249a0bSMatthew Wilcox (Oracle) dst = folio_page(new_folio, 0);
204012904d95SJiaqi Yan list_for_each_entry(page, &pagelist, lru) {
20412af8ff29SHugh Dickins while (index < page->index) {
204236249a0bSMatthew Wilcox (Oracle) clear_highpage(dst);
20432af8ff29SHugh Dickins index++;
204436249a0bSMatthew Wilcox (Oracle) dst++;
20452af8ff29SHugh Dickins }
204636249a0bSMatthew Wilcox (Oracle) if (copy_mc_highpage(dst, page) > 0) {
204712904d95SJiaqi Yan result = SCAN_COPY_MC;
2048cae106ddSDavid Stevens goto rollback;
204912904d95SJiaqi Yan }
205012904d95SJiaqi Yan index++;
205136249a0bSMatthew Wilcox (Oracle) dst++;
205212904d95SJiaqi Yan }
2053cae106ddSDavid Stevens while (index < end) {
205436249a0bSMatthew Wilcox (Oracle) clear_highpage(dst);
205512904d95SJiaqi Yan index++;
205636249a0bSMatthew Wilcox (Oracle) dst++;
205712904d95SJiaqi Yan }
205812904d95SJiaqi Yan
2059ac492b9cSDavid Stevens if (nr_none) {
2060ac492b9cSDavid Stevens struct vm_area_struct *vma;
2061ac492b9cSDavid Stevens int nr_none_check = 0;
2062ac492b9cSDavid Stevens
2063ac492b9cSDavid Stevens i_mmap_lock_read(mapping);
2064ac492b9cSDavid Stevens xas_lock_irq(&xas);
2065ac492b9cSDavid Stevens
2066ac492b9cSDavid Stevens xas_set(&xas, start);
2067ac492b9cSDavid Stevens for (index = start; index < end; index++) {
2068ac492b9cSDavid Stevens if (!xas_next(&xas)) {
2069ac492b9cSDavid Stevens xas_store(&xas, XA_RETRY_ENTRY);
2070ac492b9cSDavid Stevens if (xas_error(&xas)) {
2071ac492b9cSDavid Stevens result = SCAN_STORE_FAILED;
2072ac492b9cSDavid Stevens goto immap_locked;
2073ac492b9cSDavid Stevens }
2074ac492b9cSDavid Stevens nr_none_check++;
2075ac492b9cSDavid Stevens }
2076ac492b9cSDavid Stevens }
2077ac492b9cSDavid Stevens
2078ac492b9cSDavid Stevens if (nr_none != nr_none_check) {
2079ac492b9cSDavid Stevens result = SCAN_PAGE_FILLED;
2080ac492b9cSDavid Stevens goto immap_locked;
2081ac492b9cSDavid Stevens }
2082ac492b9cSDavid Stevens
208312904d95SJiaqi Yan /*
208436249a0bSMatthew Wilcox (Oracle) * If userspace observed a missing page in a VMA with
208536249a0bSMatthew Wilcox (Oracle) * a MODE_MISSING userfaultfd, then it might expect a
208636249a0bSMatthew Wilcox (Oracle) * UFFD_EVENT_PAGEFAULT for that page. If so, we need to
208736249a0bSMatthew Wilcox (Oracle) * roll back to avoid suppressing such an event. Since
208836249a0bSMatthew Wilcox (Oracle) * wp/minor userfaultfds don't give userspace any
208936249a0bSMatthew Wilcox (Oracle) * guarantees that the kernel doesn't fill a missing
209036249a0bSMatthew Wilcox (Oracle) * page with a zero page, so they don't matter here.
2091ac492b9cSDavid Stevens *
209236249a0bSMatthew Wilcox (Oracle) * Any userfaultfds registered after this point will
209336249a0bSMatthew Wilcox (Oracle) * not be able to observe any missing pages due to the
209436249a0bSMatthew Wilcox (Oracle) * previously inserted retry entries.
209512904d95SJiaqi Yan */
2096ac492b9cSDavid Stevens vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2097ac492b9cSDavid Stevens if (userfaultfd_missing(vma)) {
2098ac492b9cSDavid Stevens result = SCAN_EXCEED_NONE_PTE;
2099ac492b9cSDavid Stevens goto immap_locked;
2100ac492b9cSDavid Stevens }
2101ac492b9cSDavid Stevens }
2102ac492b9cSDavid Stevens
2103ac492b9cSDavid Stevens immap_locked:
2104ac492b9cSDavid Stevens i_mmap_unlock_read(mapping);
2105ac492b9cSDavid Stevens if (result != SCAN_SUCCEED) {
2106ac492b9cSDavid Stevens xas_set(&xas, start);
2107ac492b9cSDavid Stevens for (index = start; index < end; index++) {
2108ac492b9cSDavid Stevens if (xas_next(&xas) == XA_RETRY_ENTRY)
2109ac492b9cSDavid Stevens xas_store(&xas, NULL);
2110ac492b9cSDavid Stevens }
2111ac492b9cSDavid Stevens
2112ac492b9cSDavid Stevens xas_unlock_irq(&xas);
2113ac492b9cSDavid Stevens goto rollback;
2114ac492b9cSDavid Stevens }
2115ac492b9cSDavid Stevens } else {
2116ac492b9cSDavid Stevens xas_lock_irq(&xas);
21172af8ff29SHugh Dickins }
211812904d95SJiaqi Yan
211912904d95SJiaqi Yan if (is_shmem)
212036249a0bSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
212112904d95SJiaqi Yan else
212236249a0bSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
212312904d95SJiaqi Yan
212412904d95SJiaqi Yan if (nr_none) {
212536249a0bSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
212612904d95SJiaqi Yan /* nr_none is always 0 for non-shmem. */
212736249a0bSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
2128f3f0e1d2SKirill A. Shutemov }
2129f3f0e1d2SKirill A. Shutemov
2130a2e17cc2SDavid Stevens /*
213136249a0bSMatthew Wilcox (Oracle) * Mark new_folio as uptodate before inserting it into the
213236249a0bSMatthew Wilcox (Oracle) * page cache so that it isn't mistaken for an fallocated but
213336249a0bSMatthew Wilcox (Oracle) * unwritten page.
2134a2e17cc2SDavid Stevens */
213536249a0bSMatthew Wilcox (Oracle) folio_mark_uptodate(new_folio);
213636249a0bSMatthew Wilcox (Oracle) folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
2137284a344eSVishal Moola (Oracle)
21386058eaecSJohannes Weiner if (is_shmem)
213936249a0bSMatthew Wilcox (Oracle) folio_mark_dirty(new_folio);
214036249a0bSMatthew Wilcox (Oracle) folio_add_lru(new_folio);
2141f3f0e1d2SKirill A. Shutemov
2142a2e17cc2SDavid Stevens /* Join all the small entries into a single multi-index entry. */
2143a2e17cc2SDavid Stevens xas_set_order(&xas, start, HPAGE_PMD_ORDER);
214436249a0bSMatthew Wilcox (Oracle) xas_store(&xas, new_folio);
21450175ab61SHugh Dickins WARN_ON_ONCE(xas_error(&xas));
2146a2e17cc2SDavid Stevens xas_unlock_irq(&xas);
2147a2e17cc2SDavid Stevens
2148042a3082SHugh Dickins /*
2149042a3082SHugh Dickins * Remove pte page tables, so we can re-fault the page as huge.
21501d65b771SHugh Dickins * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
2151042a3082SHugh Dickins */
21521d65b771SHugh Dickins retract_page_tables(mapping, start);
21531d65b771SHugh Dickins if (cc && !cc->is_khugepaged)
21541d65b771SHugh Dickins result = SCAN_PTE_MAPPED_HUGEPAGE;
215536249a0bSMatthew Wilcox (Oracle) folio_unlock(new_folio);
2156ac492b9cSDavid Stevens
2157ac492b9cSDavid Stevens /*
2158ac492b9cSDavid Stevens * The collapse has succeeded, so free the old pages.
2159ac492b9cSDavid Stevens */
2160ac492b9cSDavid Stevens list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2161ac492b9cSDavid Stevens list_del(&page->lru);
2162ac492b9cSDavid Stevens page->mapping = NULL;
2163ac492b9cSDavid Stevens ClearPageActive(page);
2164ac492b9cSDavid Stevens ClearPageUnevictable(page);
2165ac492b9cSDavid Stevens unlock_page(page);
2166a2e17cc2SDavid Stevens folio_put_refs(page_folio(page), 3);
2167ac492b9cSDavid Stevens }
2168ac492b9cSDavid Stevens
2169cae106ddSDavid Stevens goto out;
2170cae106ddSDavid Stevens
2171cae106ddSDavid Stevens rollback:
217277da9389SMatthew Wilcox /* Something went wrong: roll back page cache changes */
21732f55f070SMiaohe Lin if (nr_none) {
2174a2e17cc2SDavid Stevens xas_lock_irq(&xas);
2175aaa52e34SHugh Dickins mapping->nrpages -= nr_none;
217677da9389SMatthew Wilcox xas_unlock_irq(&xas);
2177509f0069SHugh Dickins shmem_uncharge(mapping->host, nr_none);
2178a2e17cc2SDavid Stevens }
2179a2e17cc2SDavid Stevens
2180a2e17cc2SDavid Stevens list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2181a2e17cc2SDavid Stevens list_del(&page->lru);
2182f3f0e1d2SKirill A. Shutemov unlock_page(page);
2183042a3082SHugh Dickins putback_lru_page(page);
2184a2e17cc2SDavid Stevens put_page(page);
2185f3f0e1d2SKirill A. Shutemov }
218612904d95SJiaqi Yan /*
218712904d95SJiaqi Yan * Undo the updates of filemap_nr_thps_inc for non-SHMEM
218812904d95SJiaqi Yan * file only. This undo is not needed unless failure is
218912904d95SJiaqi Yan * due to SCAN_COPY_MC.
219012904d95SJiaqi Yan */
219112904d95SJiaqi Yan if (!is_shmem && result == SCAN_COPY_MC) {
219212904d95SJiaqi Yan filemap_nr_thps_dec(mapping);
219312904d95SJiaqi Yan /*
219412904d95SJiaqi Yan * Paired with smp_mb() in do_dentry_open() to
219512904d95SJiaqi Yan * ensure the update to nr_thps is visible.
219612904d95SJiaqi Yan */
219712904d95SJiaqi Yan smp_mb();
219812904d95SJiaqi Yan }
219912904d95SJiaqi Yan
220036249a0bSMatthew Wilcox (Oracle) new_folio->mapping = NULL;
2201042a3082SHugh Dickins
220236249a0bSMatthew Wilcox (Oracle) folio_unlock(new_folio);
220336249a0bSMatthew Wilcox (Oracle) folio_put(new_folio);
2204f3f0e1d2SKirill A. Shutemov out:
2205f3f0e1d2SKirill A. Shutemov VM_BUG_ON(!list_empty(&pagelist));
2206*5f029be6SYang Shi trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result);
220750ad2f24SZach O'Keefe return result;
2208f3f0e1d2SKirill A. Shutemov }
2209f3f0e1d2SKirill A. Shutemov
hpage_collapse_scan_file(struct mm_struct * mm,unsigned long addr,struct file * file,pgoff_t start,struct collapse_control * cc)221034488399SZach O'Keefe static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
221134488399SZach O'Keefe struct file *file, pgoff_t start,
221234488399SZach O'Keefe struct collapse_control *cc)
2213f3f0e1d2SKirill A. Shutemov {
2214f3f0e1d2SKirill A. Shutemov struct page *page = NULL;
2215579c571eSSong Liu struct address_space *mapping = file->f_mapping;
221685b392dbSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start);
2217f3f0e1d2SKirill A. Shutemov int present, swap;
2218f3f0e1d2SKirill A. Shutemov int node = NUMA_NO_NODE;
2219f3f0e1d2SKirill A. Shutemov int result = SCAN_SUCCEED;
2220f3f0e1d2SKirill A. Shutemov
2221f3f0e1d2SKirill A. Shutemov present = 0;
2222f3f0e1d2SKirill A. Shutemov swap = 0;
222334d6b470SZach O'Keefe memset(cc->node_load, 0, sizeof(cc->node_load));
2224e031ff96SYang Shi nodes_clear(cc->alloc_nmask);
2225f3f0e1d2SKirill A. Shutemov rcu_read_lock();
222685b392dbSMatthew Wilcox xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
222785b392dbSMatthew Wilcox if (xas_retry(&xas, page))
2228f3f0e1d2SKirill A. Shutemov continue;
2229f3f0e1d2SKirill A. Shutemov
223085b392dbSMatthew Wilcox if (xa_is_value(page)) {
2231d8ea7cc8SZach O'Keefe ++swap;
2232d8ea7cc8SZach O'Keefe if (cc->is_khugepaged &&
2233d8ea7cc8SZach O'Keefe swap > khugepaged_max_ptes_swap) {
2234f3f0e1d2SKirill A. Shutemov result = SCAN_EXCEED_SWAP_PTE;
2235e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2236f3f0e1d2SKirill A. Shutemov break;
2237f3f0e1d2SKirill A. Shutemov }
2238f3f0e1d2SKirill A. Shutemov continue;
2239f3f0e1d2SKirill A. Shutemov }
2240f3f0e1d2SKirill A. Shutemov
22416b24ca4aSMatthew Wilcox (Oracle) /*
224258ac9a89SZach O'Keefe * TODO: khugepaged should compact smaller compound pages
22436b24ca4aSMatthew Wilcox (Oracle) * into a PMD sized page
22446b24ca4aSMatthew Wilcox (Oracle) */
2245f3f0e1d2SKirill A. Shutemov if (PageTransCompound(page)) {
224658ac9a89SZach O'Keefe struct page *head = compound_head(page);
224758ac9a89SZach O'Keefe
224858ac9a89SZach O'Keefe result = compound_order(head) == HPAGE_PMD_ORDER &&
224958ac9a89SZach O'Keefe head->index == start
225058ac9a89SZach O'Keefe /* Maybe PMD-mapped */
225158ac9a89SZach O'Keefe ? SCAN_PTE_MAPPED_HUGEPAGE
225258ac9a89SZach O'Keefe : SCAN_PAGE_COMPOUND;
225358ac9a89SZach O'Keefe /*
225458ac9a89SZach O'Keefe * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
225558ac9a89SZach O'Keefe * by the caller won't touch the page cache, and so
225658ac9a89SZach O'Keefe * it's safe to skip LRU and refcount checks before
225758ac9a89SZach O'Keefe * returning.
225858ac9a89SZach O'Keefe */
2259f3f0e1d2SKirill A. Shutemov break;
2260f3f0e1d2SKirill A. Shutemov }
2261f3f0e1d2SKirill A. Shutemov
2262f3f0e1d2SKirill A. Shutemov node = page_to_nid(page);
22637d2c4385SZach O'Keefe if (hpage_collapse_scan_abort(node, cc)) {
2264f3f0e1d2SKirill A. Shutemov result = SCAN_SCAN_ABORT;
2265f3f0e1d2SKirill A. Shutemov break;
2266f3f0e1d2SKirill A. Shutemov }
226734d6b470SZach O'Keefe cc->node_load[node]++;
2268f3f0e1d2SKirill A. Shutemov
2269f3f0e1d2SKirill A. Shutemov if (!PageLRU(page)) {
2270f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_LRU;
2271f3f0e1d2SKirill A. Shutemov break;
2272f3f0e1d2SKirill A. Shutemov }
2273f3f0e1d2SKirill A. Shutemov
227499cb0dbdSSong Liu if (page_count(page) !=
227599cb0dbdSSong Liu 1 + page_mapcount(page) + page_has_private(page)) {
2276f3f0e1d2SKirill A. Shutemov result = SCAN_PAGE_COUNT;
2277f3f0e1d2SKirill A. Shutemov break;
2278f3f0e1d2SKirill A. Shutemov }
2279f3f0e1d2SKirill A. Shutemov
2280f3f0e1d2SKirill A. Shutemov /*
2281f3f0e1d2SKirill A. Shutemov * We probably should check if the page is referenced here, but
2282f3f0e1d2SKirill A. Shutemov * nobody would transfer pte_young() to PageReferenced() for us.
2283f3f0e1d2SKirill A. Shutemov * And rmap walk here is just too costly...
2284f3f0e1d2SKirill A. Shutemov */
2285f3f0e1d2SKirill A. Shutemov
2286f3f0e1d2SKirill A. Shutemov present++;
2287f3f0e1d2SKirill A. Shutemov
2288f3f0e1d2SKirill A. Shutemov if (need_resched()) {
228985b392dbSMatthew Wilcox xas_pause(&xas);
2290f3f0e1d2SKirill A. Shutemov cond_resched_rcu();
2291f3f0e1d2SKirill A. Shutemov }
2292f3f0e1d2SKirill A. Shutemov }
2293f3f0e1d2SKirill A. Shutemov rcu_read_unlock();
2294f3f0e1d2SKirill A. Shutemov
2295f3f0e1d2SKirill A. Shutemov if (result == SCAN_SUCCEED) {
2296d8ea7cc8SZach O'Keefe if (cc->is_khugepaged &&
2297d8ea7cc8SZach O'Keefe present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2298f3f0e1d2SKirill A. Shutemov result = SCAN_EXCEED_NONE_PTE;
2299e9ea874aSYang Yang count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2300f3f0e1d2SKirill A. Shutemov } else {
230134488399SZach O'Keefe result = collapse_file(mm, addr, file, start, cc);
2302f3f0e1d2SKirill A. Shutemov }
2303f3f0e1d2SKirill A. Shutemov }
2304f3f0e1d2SKirill A. Shutemov
2305045634ffSGautam Menghani trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
230650ad2f24SZach O'Keefe return result;
2307f3f0e1d2SKirill A. Shutemov }
2308f3f0e1d2SKirill A. Shutemov #else
hpage_collapse_scan_file(struct mm_struct * mm,unsigned long addr,struct file * file,pgoff_t start,struct collapse_control * cc)230934488399SZach O'Keefe static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
231034488399SZach O'Keefe struct file *file, pgoff_t start,
231134488399SZach O'Keefe struct collapse_control *cc)
2312f3f0e1d2SKirill A. Shutemov {
2313f3f0e1d2SKirill A. Shutemov BUILD_BUG();
2314f3f0e1d2SKirill A. Shutemov }
2315f3f0e1d2SKirill A. Shutemov #endif
2316f3f0e1d2SKirill A. Shutemov
khugepaged_scan_mm_slot(unsigned int pages,int * result,struct collapse_control * cc)231750ad2f24SZach O'Keefe static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
231834d6b470SZach O'Keefe struct collapse_control *cc)
2319b46e756fSKirill A. Shutemov __releases(&khugepaged_mm_lock)
2320b46e756fSKirill A. Shutemov __acquires(&khugepaged_mm_lock)
2321b46e756fSKirill A. Shutemov {
232268540502SMatthew Wilcox (Oracle) struct vma_iterator vmi;
2323b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot;
2324b26e2701SQi Zheng struct mm_slot *slot;
2325b46e756fSKirill A. Shutemov struct mm_struct *mm;
2326b46e756fSKirill A. Shutemov struct vm_area_struct *vma;
2327b46e756fSKirill A. Shutemov int progress = 0;
2328b46e756fSKirill A. Shutemov
2329b46e756fSKirill A. Shutemov VM_BUG_ON(!pages);
233035f3aa39SLance Roy lockdep_assert_held(&khugepaged_mm_lock);
233150ad2f24SZach O'Keefe *result = SCAN_FAIL;
2332b46e756fSKirill A. Shutemov
2333b26e2701SQi Zheng if (khugepaged_scan.mm_slot) {
2334b46e756fSKirill A. Shutemov mm_slot = khugepaged_scan.mm_slot;
2335b26e2701SQi Zheng slot = &mm_slot->slot;
2336b26e2701SQi Zheng } else {
2337b26e2701SQi Zheng slot = list_entry(khugepaged_scan.mm_head.next,
2338b46e756fSKirill A. Shutemov struct mm_slot, mm_node);
2339b26e2701SQi Zheng mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2340b46e756fSKirill A. Shutemov khugepaged_scan.address = 0;
2341b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = mm_slot;
2342b46e756fSKirill A. Shutemov }
2343b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock);
2344b46e756fSKirill A. Shutemov
2345b26e2701SQi Zheng mm = slot->mm;
23463b454ad3SYang Shi /*
23473b454ad3SYang Shi * Don't wait for semaphore (to avoid long wait times). Just move to
23483b454ad3SYang Shi * the next mm on the list.
23493b454ad3SYang Shi */
2350b46e756fSKirill A. Shutemov vma = NULL;
2351d8ed45c5SMichel Lespinasse if (unlikely(!mmap_read_trylock(mm)))
2352c1e8d7c6SMichel Lespinasse goto breakouterloop_mmap_lock;
2353b46e756fSKirill A. Shutemov
2354b46e756fSKirill A. Shutemov progress++;
235568540502SMatthew Wilcox (Oracle) if (unlikely(hpage_collapse_test_exit(mm)))
235668540502SMatthew Wilcox (Oracle) goto breakouterloop;
235768540502SMatthew Wilcox (Oracle)
235868540502SMatthew Wilcox (Oracle) vma_iter_init(&vmi, mm, khugepaged_scan.address);
235968540502SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) {
2360b46e756fSKirill A. Shutemov unsigned long hstart, hend;
2361b46e756fSKirill A. Shutemov
2362b46e756fSKirill A. Shutemov cond_resched();
23637d2c4385SZach O'Keefe if (unlikely(hpage_collapse_test_exit(mm))) {
2364b46e756fSKirill A. Shutemov progress++;
2365b46e756fSKirill A. Shutemov break;
2366b46e756fSKirill A. Shutemov }
2367a7f4e6e4SZach O'Keefe if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
2368b46e756fSKirill A. Shutemov skip:
2369b46e756fSKirill A. Shutemov progress++;
2370b46e756fSKirill A. Shutemov continue;
2371b46e756fSKirill A. Shutemov }
23724fa6893fSYang Shi hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
23734fa6893fSYang Shi hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2374b46e756fSKirill A. Shutemov if (khugepaged_scan.address > hend)
2375b46e756fSKirill A. Shutemov goto skip;
2376b46e756fSKirill A. Shutemov if (khugepaged_scan.address < hstart)
2377b46e756fSKirill A. Shutemov khugepaged_scan.address = hstart;
2378b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2379b46e756fSKirill A. Shutemov
2380b46e756fSKirill A. Shutemov while (khugepaged_scan.address < hend) {
238150ad2f24SZach O'Keefe bool mmap_locked = true;
238250ad2f24SZach O'Keefe
2383b46e756fSKirill A. Shutemov cond_resched();
23847d2c4385SZach O'Keefe if (unlikely(hpage_collapse_test_exit(mm)))
2385b46e756fSKirill A. Shutemov goto breakouterloop;
2386b46e756fSKirill A. Shutemov
2387b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.address < hstart ||
2388b46e756fSKirill A. Shutemov khugepaged_scan.address + HPAGE_PMD_SIZE >
2389b46e756fSKirill A. Shutemov hend);
239099cb0dbdSSong Liu if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2391396bcc52SMatthew Wilcox (Oracle) struct file *file = get_file(vma->vm_file);
2392f3f0e1d2SKirill A. Shutemov pgoff_t pgoff = linear_page_index(vma,
2393f3f0e1d2SKirill A. Shutemov khugepaged_scan.address);
239499cb0dbdSSong Liu
2395d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
239650ad2f24SZach O'Keefe mmap_locked = false;
2397d50791c2SHugh Dickins *result = hpage_collapse_scan_file(mm,
2398d50791c2SHugh Dickins khugepaged_scan.address, file, pgoff, cc);
2399f3f0e1d2SKirill A. Shutemov fput(file);
2400d50791c2SHugh Dickins if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
2401d50791c2SHugh Dickins mmap_read_lock(mm);
2402d50791c2SHugh Dickins if (hpage_collapse_test_exit(mm))
2403d50791c2SHugh Dickins goto breakouterloop;
2404d50791c2SHugh Dickins *result = collapse_pte_mapped_thp(mm,
2405d50791c2SHugh Dickins khugepaged_scan.address, false);
2406d50791c2SHugh Dickins if (*result == SCAN_PMD_MAPPED)
2407d50791c2SHugh Dickins *result = SCAN_SUCCEED;
2408d50791c2SHugh Dickins mmap_read_unlock(mm);
2409d50791c2SHugh Dickins }
2410f3f0e1d2SKirill A. Shutemov } else {
24117d2c4385SZach O'Keefe *result = hpage_collapse_scan_pmd(mm, vma,
2412d50791c2SHugh Dickins khugepaged_scan.address, &mmap_locked, cc);
2413f3f0e1d2SKirill A. Shutemov }
241458ac9a89SZach O'Keefe
2415d50791c2SHugh Dickins if (*result == SCAN_SUCCEED)
241650ad2f24SZach O'Keefe ++khugepaged_pages_collapsed;
241758ac9a89SZach O'Keefe
2418b46e756fSKirill A. Shutemov /* move to next address */
2419b46e756fSKirill A. Shutemov khugepaged_scan.address += HPAGE_PMD_SIZE;
2420b46e756fSKirill A. Shutemov progress += HPAGE_PMD_NR;
242150ad2f24SZach O'Keefe if (!mmap_locked)
242250ad2f24SZach O'Keefe /*
242350ad2f24SZach O'Keefe * We released mmap_lock so break loop. Note
242450ad2f24SZach O'Keefe * that we drop mmap_lock before all hugepage
242550ad2f24SZach O'Keefe * allocations, so if allocation fails, we are
242650ad2f24SZach O'Keefe * guaranteed to break here and report the
242750ad2f24SZach O'Keefe * correct result back to caller.
242850ad2f24SZach O'Keefe */
2429c1e8d7c6SMichel Lespinasse goto breakouterloop_mmap_lock;
2430b46e756fSKirill A. Shutemov if (progress >= pages)
2431b46e756fSKirill A. Shutemov goto breakouterloop;
2432b46e756fSKirill A. Shutemov }
2433b46e756fSKirill A. Shutemov }
2434b46e756fSKirill A. Shutemov breakouterloop:
2435d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2436c1e8d7c6SMichel Lespinasse breakouterloop_mmap_lock:
2437b46e756fSKirill A. Shutemov
2438b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock);
2439b46e756fSKirill A. Shutemov VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2440b46e756fSKirill A. Shutemov /*
2441b46e756fSKirill A. Shutemov * Release the current mm_slot if this mm is about to die, or
2442b46e756fSKirill A. Shutemov * if we scanned all vmas of this mm.
2443b46e756fSKirill A. Shutemov */
24447d2c4385SZach O'Keefe if (hpage_collapse_test_exit(mm) || !vma) {
2445b46e756fSKirill A. Shutemov /*
2446b46e756fSKirill A. Shutemov * Make sure that if mm_users is reaching zero while
2447b46e756fSKirill A. Shutemov * khugepaged runs here, khugepaged_exit will find
2448b46e756fSKirill A. Shutemov * mm_slot not pointing to the exiting mm.
2449b46e756fSKirill A. Shutemov */
2450b26e2701SQi Zheng if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2451b26e2701SQi Zheng slot = list_entry(slot->mm_node.next,
2452b46e756fSKirill A. Shutemov struct mm_slot, mm_node);
2453b26e2701SQi Zheng khugepaged_scan.mm_slot =
2454b26e2701SQi Zheng mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2455b46e756fSKirill A. Shutemov khugepaged_scan.address = 0;
2456b46e756fSKirill A. Shutemov } else {
2457b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = NULL;
2458b46e756fSKirill A. Shutemov khugepaged_full_scans++;
2459b46e756fSKirill A. Shutemov }
2460b46e756fSKirill A. Shutemov
2461b46e756fSKirill A. Shutemov collect_mm_slot(mm_slot);
2462b46e756fSKirill A. Shutemov }
2463b46e756fSKirill A. Shutemov
2464b46e756fSKirill A. Shutemov return progress;
2465b46e756fSKirill A. Shutemov }
2466b46e756fSKirill A. Shutemov
khugepaged_has_work(void)2467b46e756fSKirill A. Shutemov static int khugepaged_has_work(void)
2468b46e756fSKirill A. Shutemov {
2469b46e756fSKirill A. Shutemov return !list_empty(&khugepaged_scan.mm_head) &&
24701064026bSYang Shi hugepage_flags_enabled();
2471b46e756fSKirill A. Shutemov }
2472b46e756fSKirill A. Shutemov
khugepaged_wait_event(void)2473b46e756fSKirill A. Shutemov static int khugepaged_wait_event(void)
2474b46e756fSKirill A. Shutemov {
2475b46e756fSKirill A. Shutemov return !list_empty(&khugepaged_scan.mm_head) ||
2476b46e756fSKirill A. Shutemov kthread_should_stop();
2477b46e756fSKirill A. Shutemov }
2478b46e756fSKirill A. Shutemov
khugepaged_do_scan(struct collapse_control * cc)247934d6b470SZach O'Keefe static void khugepaged_do_scan(struct collapse_control *cc)
2480b46e756fSKirill A. Shutemov {
2481b46e756fSKirill A. Shutemov unsigned int progress = 0, pass_through_head = 0;
248289dc6a96SYanfei Xu unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2483b46e756fSKirill A. Shutemov bool wait = true;
248450ad2f24SZach O'Keefe int result = SCAN_SUCCEED;
2485b46e756fSKirill A. Shutemov
2486a980df33SKirill A. Shutemov lru_add_drain_all();
2487a980df33SKirill A. Shutemov
2488c6a7f445SYang Shi while (true) {
2489b46e756fSKirill A. Shutemov cond_resched();
2490b46e756fSKirill A. Shutemov
2491b46e756fSKirill A. Shutemov if (unlikely(kthread_should_stop() || try_to_freeze()))
2492b46e756fSKirill A. Shutemov break;
2493b46e756fSKirill A. Shutemov
2494b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock);
2495b46e756fSKirill A. Shutemov if (!khugepaged_scan.mm_slot)
2496b46e756fSKirill A. Shutemov pass_through_head++;
2497b46e756fSKirill A. Shutemov if (khugepaged_has_work() &&
2498b46e756fSKirill A. Shutemov pass_through_head < 2)
2499b46e756fSKirill A. Shutemov progress += khugepaged_scan_mm_slot(pages - progress,
250050ad2f24SZach O'Keefe &result, cc);
2501b46e756fSKirill A. Shutemov else
2502b46e756fSKirill A. Shutemov progress = pages;
2503b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock);
2504b46e756fSKirill A. Shutemov
2505c6a7f445SYang Shi if (progress >= pages)
2506c6a7f445SYang Shi break;
2507c6a7f445SYang Shi
250850ad2f24SZach O'Keefe if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2509c6a7f445SYang Shi /*
2510c6a7f445SYang Shi * If fail to allocate the first time, try to sleep for
2511c6a7f445SYang Shi * a while. When hit again, cancel the scan.
2512c6a7f445SYang Shi */
2513c6a7f445SYang Shi if (!wait)
2514c6a7f445SYang Shi break;
2515c6a7f445SYang Shi wait = false;
2516c6a7f445SYang Shi khugepaged_alloc_sleep();
2517c6a7f445SYang Shi }
2518c6a7f445SYang Shi }
2519b46e756fSKirill A. Shutemov }
2520b46e756fSKirill A. Shutemov
khugepaged_should_wakeup(void)2521b46e756fSKirill A. Shutemov static bool khugepaged_should_wakeup(void)
2522b46e756fSKirill A. Shutemov {
2523b46e756fSKirill A. Shutemov return kthread_should_stop() ||
2524b46e756fSKirill A. Shutemov time_after_eq(jiffies, khugepaged_sleep_expire);
2525b46e756fSKirill A. Shutemov }
2526b46e756fSKirill A. Shutemov
khugepaged_wait_work(void)2527b46e756fSKirill A. Shutemov static void khugepaged_wait_work(void)
2528b46e756fSKirill A. Shutemov {
2529b46e756fSKirill A. Shutemov if (khugepaged_has_work()) {
2530b46e756fSKirill A. Shutemov const unsigned long scan_sleep_jiffies =
2531b46e756fSKirill A. Shutemov msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2532b46e756fSKirill A. Shutemov
2533b46e756fSKirill A. Shutemov if (!scan_sleep_jiffies)
2534b46e756fSKirill A. Shutemov return;
2535b46e756fSKirill A. Shutemov
2536b46e756fSKirill A. Shutemov khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2537b46e756fSKirill A. Shutemov wait_event_freezable_timeout(khugepaged_wait,
2538b46e756fSKirill A. Shutemov khugepaged_should_wakeup(),
2539b46e756fSKirill A. Shutemov scan_sleep_jiffies);
2540b46e756fSKirill A. Shutemov return;
2541b46e756fSKirill A. Shutemov }
2542b46e756fSKirill A. Shutemov
25431064026bSYang Shi if (hugepage_flags_enabled())
2544b46e756fSKirill A. Shutemov wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2545b46e756fSKirill A. Shutemov }
2546b46e756fSKirill A. Shutemov
khugepaged(void * none)2547b46e756fSKirill A. Shutemov static int khugepaged(void *none)
2548b46e756fSKirill A. Shutemov {
2549b26e2701SQi Zheng struct khugepaged_mm_slot *mm_slot;
2550b46e756fSKirill A. Shutemov
2551b46e756fSKirill A. Shutemov set_freezable();
2552b46e756fSKirill A. Shutemov set_user_nice(current, MAX_NICE);
2553b46e756fSKirill A. Shutemov
2554b46e756fSKirill A. Shutemov while (!kthread_should_stop()) {
255534d6b470SZach O'Keefe khugepaged_do_scan(&khugepaged_collapse_control);
2556b46e756fSKirill A. Shutemov khugepaged_wait_work();
2557b46e756fSKirill A. Shutemov }
2558b46e756fSKirill A. Shutemov
2559b46e756fSKirill A. Shutemov spin_lock(&khugepaged_mm_lock);
2560b46e756fSKirill A. Shutemov mm_slot = khugepaged_scan.mm_slot;
2561b46e756fSKirill A. Shutemov khugepaged_scan.mm_slot = NULL;
2562b46e756fSKirill A. Shutemov if (mm_slot)
2563b46e756fSKirill A. Shutemov collect_mm_slot(mm_slot);
2564b46e756fSKirill A. Shutemov spin_unlock(&khugepaged_mm_lock);
2565b46e756fSKirill A. Shutemov return 0;
2566b46e756fSKirill A. Shutemov }
2567b46e756fSKirill A. Shutemov
set_recommended_min_free_kbytes(void)2568b46e756fSKirill A. Shutemov static void set_recommended_min_free_kbytes(void)
2569b46e756fSKirill A. Shutemov {
2570b46e756fSKirill A. Shutemov struct zone *zone;
2571b46e756fSKirill A. Shutemov int nr_zones = 0;
2572b46e756fSKirill A. Shutemov unsigned long recommended_min;
2573b46e756fSKirill A. Shutemov
25741064026bSYang Shi if (!hugepage_flags_enabled()) {
2575bd3400eaSLiangcai Fan calculate_min_free_kbytes();
2576bd3400eaSLiangcai Fan goto update_wmarks;
2577bd3400eaSLiangcai Fan }
2578bd3400eaSLiangcai Fan
2579b7d349c7SJoonsoo Kim for_each_populated_zone(zone) {
2580b7d349c7SJoonsoo Kim /*
2581b7d349c7SJoonsoo Kim * We don't need to worry about fragmentation of
2582b7d349c7SJoonsoo Kim * ZONE_MOVABLE since it only has movable pages.
2583b7d349c7SJoonsoo Kim */
2584b7d349c7SJoonsoo Kim if (zone_idx(zone) > gfp_zone(GFP_USER))
2585b7d349c7SJoonsoo Kim continue;
2586b7d349c7SJoonsoo Kim
2587b46e756fSKirill A. Shutemov nr_zones++;
2588b7d349c7SJoonsoo Kim }
2589b46e756fSKirill A. Shutemov
2590b46e756fSKirill A. Shutemov /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2591b46e756fSKirill A. Shutemov recommended_min = pageblock_nr_pages * nr_zones * 2;
2592b46e756fSKirill A. Shutemov
2593b46e756fSKirill A. Shutemov /*
2594b46e756fSKirill A. Shutemov * Make sure that on average at least two pageblocks are almost free
2595b46e756fSKirill A. Shutemov * of another type, one for a migratetype to fall back to and a
2596b46e756fSKirill A. Shutemov * second to avoid subsequent fallbacks of other types There are 3
2597b46e756fSKirill A. Shutemov * MIGRATE_TYPES we care about.
2598b46e756fSKirill A. Shutemov */
2599b46e756fSKirill A. Shutemov recommended_min += pageblock_nr_pages * nr_zones *
2600b46e756fSKirill A. Shutemov MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2601b46e756fSKirill A. Shutemov
2602b46e756fSKirill A. Shutemov /* don't ever allow to reserve more than 5% of the lowmem */
2603b46e756fSKirill A. Shutemov recommended_min = min(recommended_min,
2604b46e756fSKirill A. Shutemov (unsigned long) nr_free_buffer_pages() / 20);
2605b46e756fSKirill A. Shutemov recommended_min <<= (PAGE_SHIFT-10);
2606b46e756fSKirill A. Shutemov
2607b46e756fSKirill A. Shutemov if (recommended_min > min_free_kbytes) {
2608b46e756fSKirill A. Shutemov if (user_min_free_kbytes >= 0)
2609b46e756fSKirill A. Shutemov pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2610b46e756fSKirill A. Shutemov min_free_kbytes, recommended_min);
2611b46e756fSKirill A. Shutemov
2612b46e756fSKirill A. Shutemov min_free_kbytes = recommended_min;
2613b46e756fSKirill A. Shutemov }
2614bd3400eaSLiangcai Fan
2615bd3400eaSLiangcai Fan update_wmarks:
2616b46e756fSKirill A. Shutemov setup_per_zone_wmarks();
2617b46e756fSKirill A. Shutemov }
2618b46e756fSKirill A. Shutemov
start_stop_khugepaged(void)2619b46e756fSKirill A. Shutemov int start_stop_khugepaged(void)
2620b46e756fSKirill A. Shutemov {
2621b46e756fSKirill A. Shutemov int err = 0;
2622b46e756fSKirill A. Shutemov
2623b46e756fSKirill A. Shutemov mutex_lock(&khugepaged_mutex);
26241064026bSYang Shi if (hugepage_flags_enabled()) {
2625b46e756fSKirill A. Shutemov if (!khugepaged_thread)
2626b46e756fSKirill A. Shutemov khugepaged_thread = kthread_run(khugepaged, NULL,
2627b46e756fSKirill A. Shutemov "khugepaged");
2628b46e756fSKirill A. Shutemov if (IS_ERR(khugepaged_thread)) {
2629b46e756fSKirill A. Shutemov pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2630b46e756fSKirill A. Shutemov err = PTR_ERR(khugepaged_thread);
2631b46e756fSKirill A. Shutemov khugepaged_thread = NULL;
2632b46e756fSKirill A. Shutemov goto fail;
2633b46e756fSKirill A. Shutemov }
2634b46e756fSKirill A. Shutemov
2635b46e756fSKirill A. Shutemov if (!list_empty(&khugepaged_scan.mm_head))
2636b46e756fSKirill A. Shutemov wake_up_interruptible(&khugepaged_wait);
2637b46e756fSKirill A. Shutemov } else if (khugepaged_thread) {
2638b46e756fSKirill A. Shutemov kthread_stop(khugepaged_thread);
2639b46e756fSKirill A. Shutemov khugepaged_thread = NULL;
2640b46e756fSKirill A. Shutemov }
2641bd3400eaSLiangcai Fan set_recommended_min_free_kbytes();
2642b46e756fSKirill A. Shutemov fail:
2643b46e756fSKirill A. Shutemov mutex_unlock(&khugepaged_mutex);
2644b46e756fSKirill A. Shutemov return err;
2645b46e756fSKirill A. Shutemov }
26464aab2be0SVijay Balakrishna
khugepaged_min_free_kbytes_update(void)26474aab2be0SVijay Balakrishna void khugepaged_min_free_kbytes_update(void)
26484aab2be0SVijay Balakrishna {
26494aab2be0SVijay Balakrishna mutex_lock(&khugepaged_mutex);
26501064026bSYang Shi if (hugepage_flags_enabled() && khugepaged_thread)
26514aab2be0SVijay Balakrishna set_recommended_min_free_kbytes();
26524aab2be0SVijay Balakrishna mutex_unlock(&khugepaged_mutex);
26534aab2be0SVijay Balakrishna }
26547d8faaf1SZach O'Keefe
current_is_khugepaged(void)265557e9cc50SJohannes Weiner bool current_is_khugepaged(void)
265657e9cc50SJohannes Weiner {
265757e9cc50SJohannes Weiner return kthread_func(current) == khugepaged;
265857e9cc50SJohannes Weiner }
265957e9cc50SJohannes Weiner
madvise_collapse_errno(enum scan_result r)26607d8faaf1SZach O'Keefe static int madvise_collapse_errno(enum scan_result r)
26617d8faaf1SZach O'Keefe {
26627d8faaf1SZach O'Keefe /*
26637d8faaf1SZach O'Keefe * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
26647d8faaf1SZach O'Keefe * actionable feedback to caller, so they may take an appropriate
26657d8faaf1SZach O'Keefe * fallback measure depending on the nature of the failure.
26667d8faaf1SZach O'Keefe */
26677d8faaf1SZach O'Keefe switch (r) {
26687d8faaf1SZach O'Keefe case SCAN_ALLOC_HUGE_PAGE_FAIL:
26697d8faaf1SZach O'Keefe return -ENOMEM;
26707d8faaf1SZach O'Keefe case SCAN_CGROUP_CHARGE_FAIL:
2671ac492b9cSDavid Stevens case SCAN_EXCEED_NONE_PTE:
26727d8faaf1SZach O'Keefe return -EBUSY;
26737d8faaf1SZach O'Keefe /* Resource temporary unavailable - trying again might succeed */
2674ae63c898SZach O'Keefe case SCAN_PAGE_COUNT:
26757d8faaf1SZach O'Keefe case SCAN_PAGE_LOCK:
26767d8faaf1SZach O'Keefe case SCAN_PAGE_LRU:
26770f3e2a2cSZach O'Keefe case SCAN_DEL_PAGE_LRU:
2678ac492b9cSDavid Stevens case SCAN_PAGE_FILLED:
26797d8faaf1SZach O'Keefe return -EAGAIN;
26807d8faaf1SZach O'Keefe /*
26817d8faaf1SZach O'Keefe * Other: Trying again likely not to succeed / error intrinsic to
26827d8faaf1SZach O'Keefe * specified memory range. khugepaged likely won't be able to collapse
26837d8faaf1SZach O'Keefe * either.
26847d8faaf1SZach O'Keefe */
26857d8faaf1SZach O'Keefe default:
26867d8faaf1SZach O'Keefe return -EINVAL;
26877d8faaf1SZach O'Keefe }
26887d8faaf1SZach O'Keefe }
26897d8faaf1SZach O'Keefe
madvise_collapse(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)26907d8faaf1SZach O'Keefe int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
26917d8faaf1SZach O'Keefe unsigned long start, unsigned long end)
26927d8faaf1SZach O'Keefe {
26937d8faaf1SZach O'Keefe struct collapse_control *cc;
26947d8faaf1SZach O'Keefe struct mm_struct *mm = vma->vm_mm;
26957d8faaf1SZach O'Keefe unsigned long hstart, hend, addr;
26967d8faaf1SZach O'Keefe int thps = 0, last_fail = SCAN_FAIL;
26977d8faaf1SZach O'Keefe bool mmap_locked = true;
26987d8faaf1SZach O'Keefe
26997d8faaf1SZach O'Keefe BUG_ON(vma->vm_start > start);
27007d8faaf1SZach O'Keefe BUG_ON(vma->vm_end < end);
27017d8faaf1SZach O'Keefe
27027d8faaf1SZach O'Keefe *prev = vma;
27037d8faaf1SZach O'Keefe
27047d8faaf1SZach O'Keefe if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
27057d8faaf1SZach O'Keefe return -EINVAL;
27067d8faaf1SZach O'Keefe
27077d8faaf1SZach O'Keefe cc = kmalloc(sizeof(*cc), GFP_KERNEL);
27087d8faaf1SZach O'Keefe if (!cc)
27097d8faaf1SZach O'Keefe return -ENOMEM;
27107d8faaf1SZach O'Keefe cc->is_khugepaged = false;
27117d8faaf1SZach O'Keefe
27127d8faaf1SZach O'Keefe mmgrab(mm);
27137d8faaf1SZach O'Keefe lru_add_drain_all();
27147d8faaf1SZach O'Keefe
27157d8faaf1SZach O'Keefe hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
27167d8faaf1SZach O'Keefe hend = end & HPAGE_PMD_MASK;
27177d8faaf1SZach O'Keefe
27187d8faaf1SZach O'Keefe for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
27197d8faaf1SZach O'Keefe int result = SCAN_FAIL;
27207d8faaf1SZach O'Keefe
27217d8faaf1SZach O'Keefe if (!mmap_locked) {
27227d8faaf1SZach O'Keefe cond_resched();
27237d8faaf1SZach O'Keefe mmap_read_lock(mm);
27247d8faaf1SZach O'Keefe mmap_locked = true;
272534488399SZach O'Keefe result = hugepage_vma_revalidate(mm, addr, false, &vma,
272634488399SZach O'Keefe cc);
27277d8faaf1SZach O'Keefe if (result != SCAN_SUCCEED) {
27287d8faaf1SZach O'Keefe last_fail = result;
27297d8faaf1SZach O'Keefe goto out_nolock;
27307d8faaf1SZach O'Keefe }
27314d24de94SYang Shi
273252dc0310SZach O'Keefe hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
27337d8faaf1SZach O'Keefe }
27347d8faaf1SZach O'Keefe mmap_assert_locked(mm);
27357d8faaf1SZach O'Keefe memset(cc->node_load, 0, sizeof(cc->node_load));
2736e031ff96SYang Shi nodes_clear(cc->alloc_nmask);
273734488399SZach O'Keefe if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
273834488399SZach O'Keefe struct file *file = get_file(vma->vm_file);
273934488399SZach O'Keefe pgoff_t pgoff = linear_page_index(vma, addr);
274034488399SZach O'Keefe
274134488399SZach O'Keefe mmap_read_unlock(mm);
274234488399SZach O'Keefe mmap_locked = false;
274334488399SZach O'Keefe result = hpage_collapse_scan_file(mm, addr, file, pgoff,
27447d2c4385SZach O'Keefe cc);
274534488399SZach O'Keefe fput(file);
274634488399SZach O'Keefe } else {
274734488399SZach O'Keefe result = hpage_collapse_scan_pmd(mm, vma, addr,
274834488399SZach O'Keefe &mmap_locked, cc);
274934488399SZach O'Keefe }
27507d8faaf1SZach O'Keefe if (!mmap_locked)
27517d8faaf1SZach O'Keefe *prev = NULL; /* Tell caller we dropped mmap_lock */
27527d8faaf1SZach O'Keefe
275334488399SZach O'Keefe handle_result:
27547d8faaf1SZach O'Keefe switch (result) {
27557d8faaf1SZach O'Keefe case SCAN_SUCCEED:
27567d8faaf1SZach O'Keefe case SCAN_PMD_MAPPED:
27577d8faaf1SZach O'Keefe ++thps;
27587d8faaf1SZach O'Keefe break;
275934488399SZach O'Keefe case SCAN_PTE_MAPPED_HUGEPAGE:
276034488399SZach O'Keefe BUG_ON(mmap_locked);
276134488399SZach O'Keefe BUG_ON(*prev);
27621043173eSHugh Dickins mmap_read_lock(mm);
276334488399SZach O'Keefe result = collapse_pte_mapped_thp(mm, addr, true);
27641043173eSHugh Dickins mmap_read_unlock(mm);
276534488399SZach O'Keefe goto handle_result;
27667d8faaf1SZach O'Keefe /* Whitelisted set of results where continuing OK */
27677d8faaf1SZach O'Keefe case SCAN_PMD_NULL:
27687d8faaf1SZach O'Keefe case SCAN_PTE_NON_PRESENT:
27697d8faaf1SZach O'Keefe case SCAN_PTE_UFFD_WP:
27707d8faaf1SZach O'Keefe case SCAN_PAGE_RO:
27717d8faaf1SZach O'Keefe case SCAN_LACK_REFERENCED_PAGE:
27727d8faaf1SZach O'Keefe case SCAN_PAGE_NULL:
27737d8faaf1SZach O'Keefe case SCAN_PAGE_COUNT:
27747d8faaf1SZach O'Keefe case SCAN_PAGE_LOCK:
27757d8faaf1SZach O'Keefe case SCAN_PAGE_COMPOUND:
27767d8faaf1SZach O'Keefe case SCAN_PAGE_LRU:
27770f3e2a2cSZach O'Keefe case SCAN_DEL_PAGE_LRU:
27787d8faaf1SZach O'Keefe last_fail = result;
27797d8faaf1SZach O'Keefe break;
27807d8faaf1SZach O'Keefe default:
27817d8faaf1SZach O'Keefe last_fail = result;
27827d8faaf1SZach O'Keefe /* Other error, exit */
27837d8faaf1SZach O'Keefe goto out_maybelock;
27847d8faaf1SZach O'Keefe }
27857d8faaf1SZach O'Keefe }
27867d8faaf1SZach O'Keefe
27877d8faaf1SZach O'Keefe out_maybelock:
27887d8faaf1SZach O'Keefe /* Caller expects us to hold mmap_lock on return */
27897d8faaf1SZach O'Keefe if (!mmap_locked)
27907d8faaf1SZach O'Keefe mmap_read_lock(mm);
27917d8faaf1SZach O'Keefe out_nolock:
27927d8faaf1SZach O'Keefe mmap_assert_locked(mm);
27937d8faaf1SZach O'Keefe mmdrop(mm);
27947d8faaf1SZach O'Keefe kfree(cc);
27957d8faaf1SZach O'Keefe
27967d8faaf1SZach O'Keefe return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
27977d8faaf1SZach O'Keefe : madvise_collapse_errno(last_fail);
27987d8faaf1SZach O'Keefe }
2799