xref: /openbmc/linux/include/linux/huge_mm.h (revision 02ec4b3b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4 
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7 
8 #include <linux/fs.h> /* only for vma_is_dax() */
9 
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 		  struct vm_area_struct *vma);
18 
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
huge_pud_set_accessed(struct vm_fault * vmf,pud_t orig_pud)22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26 
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
29 			   pmd_t *pmd, unsigned long addr, unsigned long next);
30 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
31 		 unsigned long addr);
32 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
33 		 unsigned long addr);
34 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
35 		   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
36 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
37 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
38 		    unsigned long cp_flags);
39 
40 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
41 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
42 
43 enum transparent_hugepage_flag {
44 	TRANSPARENT_HUGEPAGE_UNSUPPORTED,
45 	TRANSPARENT_HUGEPAGE_FLAG,
46 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
47 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
48 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
49 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
50 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
51 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
52 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
53 };
54 
55 struct kobject;
56 struct kobj_attribute;
57 
58 ssize_t single_hugepage_flag_store(struct kobject *kobj,
59 				   struct kobj_attribute *attr,
60 				   const char *buf, size_t count,
61 				   enum transparent_hugepage_flag flag);
62 ssize_t single_hugepage_flag_show(struct kobject *kobj,
63 				  struct kobj_attribute *attr, char *buf,
64 				  enum transparent_hugepage_flag flag);
65 extern struct kobj_attribute shmem_enabled_attr;
66 
67 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
68 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
69 
70 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
71 #define HPAGE_PMD_SHIFT PMD_SHIFT
72 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
73 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
74 
75 #define HPAGE_PUD_SHIFT PUD_SHIFT
76 #define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
77 #define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))
78 
79 extern unsigned long transparent_hugepage_flags;
80 
81 #define hugepage_flags_enabled()					       \
82 	(transparent_hugepage_flags &				       \
83 	 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |		       \
84 	  (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
85 #define hugepage_flags_always()				\
86 	(transparent_hugepage_flags &			\
87 	 (1<<TRANSPARENT_HUGEPAGE_FLAG))
88 
89 /*
90  * Do the below checks:
91  *   - For file vma, check if the linear page offset of vma is
92  *     HPAGE_PMD_NR aligned within the file.  The hugepage is
93  *     guaranteed to be hugepage-aligned within the file, but we must
94  *     check that the PMD-aligned addresses in the VMA map to
95  *     PMD-aligned offsets within the file, else the hugepage will
96  *     not be PMD-mappable.
97  *   - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
98  *     area.
99  */
transhuge_vma_suitable(struct vm_area_struct * vma,unsigned long addr)100 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
101 		unsigned long addr)
102 {
103 	unsigned long haddr;
104 
105 	/* Don't have to check pgoff for anonymous vma */
106 	if (!vma_is_anonymous(vma)) {
107 		if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
108 				HPAGE_PMD_NR))
109 			return false;
110 	}
111 
112 	haddr = addr & HPAGE_PMD_MASK;
113 
114 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
115 		return false;
116 	return true;
117 }
118 
file_thp_enabled(struct vm_area_struct * vma)119 static inline bool file_thp_enabled(struct vm_area_struct *vma)
120 {
121 	struct inode *inode;
122 
123 	if (!vma->vm_file)
124 		return false;
125 
126 	inode = vma->vm_file->f_inode;
127 
128 	return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
129 	       (vma->vm_flags & VM_EXEC) &&
130 	       !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
131 }
132 
133 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
134 			bool smaps, bool in_pf, bool enforce_sysfs);
135 
136 #define transparent_hugepage_use_zero_page()				\
137 	(transparent_hugepage_flags &					\
138 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
139 
vma_thp_disabled(struct vm_area_struct * vma,unsigned long vm_flags)140 static inline bool vma_thp_disabled(struct vm_area_struct *vma,
141 		unsigned long vm_flags)
142 {
143 	/*
144 	 * Explicitly disabled through madvise or prctl, or some
145 	 * architectures may disable THP for some mappings, for
146 	 * example, s390 kvm.
147 	 */
148 	return (vm_flags & VM_NOHUGEPAGE) ||
149 	       test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags);
150 }
151 
thp_disabled_by_hw(void)152 static inline bool thp_disabled_by_hw(void)
153 {
154 	/* If the hardware/firmware marked hugepage support disabled. */
155 	return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
156 }
157 
158 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
159 		unsigned long len, unsigned long pgoff, unsigned long flags);
160 
161 void folio_prep_large_rmappable(struct folio *folio);
162 bool can_split_folio(struct folio *folio, int *pextra_pins);
163 int split_huge_page_to_list(struct page *page, struct list_head *list);
split_huge_page(struct page * page)164 static inline int split_huge_page(struct page *page)
165 {
166 	return split_huge_page_to_list(page, NULL);
167 }
168 void deferred_split_folio(struct folio *folio);
169 
170 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
171 		unsigned long address, bool freeze, struct folio *folio);
172 
173 #define split_huge_pmd(__vma, __pmd, __address)				\
174 	do {								\
175 		pmd_t *____pmd = (__pmd);				\
176 		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
177 					|| pmd_devmap(*____pmd))	\
178 			__split_huge_pmd(__vma, __pmd, __address,	\
179 						false, NULL);		\
180 	}  while (0)
181 
182 
183 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
184 		bool freeze, struct folio *folio);
185 
186 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
187 		unsigned long address);
188 
189 #define split_huge_pud(__vma, __pud, __address)				\
190 	do {								\
191 		pud_t *____pud = (__pud);				\
192 		if (pud_trans_huge(*____pud)				\
193 					|| pud_devmap(*____pud))	\
194 			__split_huge_pud(__vma, __pud, __address);	\
195 	}  while (0)
196 
197 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
198 		     int advice);
199 int madvise_collapse(struct vm_area_struct *vma,
200 		     struct vm_area_struct **prev,
201 		     unsigned long start, unsigned long end);
202 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
203 			   unsigned long end, long adjust_next);
204 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
205 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
206 
is_swap_pmd(pmd_t pmd)207 static inline int is_swap_pmd(pmd_t pmd)
208 {
209 	return !pmd_none(pmd) && !pmd_present(pmd);
210 }
211 
212 /* mmap_lock must be held on entry */
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)213 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
214 		struct vm_area_struct *vma)
215 {
216 	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
217 		return __pmd_trans_huge_lock(pmd, vma);
218 	else
219 		return NULL;
220 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)221 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
222 		struct vm_area_struct *vma)
223 {
224 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
225 		return __pud_trans_huge_lock(pud, vma);
226 	else
227 		return NULL;
228 }
229 
230 /**
231  * folio_test_pmd_mappable - Can we map this folio with a PMD?
232  * @folio: The folio to test
233  */
folio_test_pmd_mappable(struct folio * folio)234 static inline bool folio_test_pmd_mappable(struct folio *folio)
235 {
236 	return folio_order(folio) >= HPAGE_PMD_ORDER;
237 }
238 
239 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
240 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
241 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
242 		pud_t *pud, int flags, struct dev_pagemap **pgmap);
243 
244 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
245 
246 extern struct page *huge_zero_page;
247 extern unsigned long huge_zero_pfn;
248 
is_huge_zero_page(struct page * page)249 static inline bool is_huge_zero_page(struct page *page)
250 {
251 	return READ_ONCE(huge_zero_page) == page;
252 }
253 
is_huge_zero_pmd(pmd_t pmd)254 static inline bool is_huge_zero_pmd(pmd_t pmd)
255 {
256 	return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
257 }
258 
is_huge_zero_pud(pud_t pud)259 static inline bool is_huge_zero_pud(pud_t pud)
260 {
261 	return false;
262 }
263 
264 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
265 void mm_put_huge_zero_page(struct mm_struct *mm);
266 
267 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
268 
thp_migration_supported(void)269 static inline bool thp_migration_supported(void)
270 {
271 	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
272 }
273 
274 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
275 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
276 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
277 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
278 
279 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
280 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
281 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
282 
folio_test_pmd_mappable(struct folio * folio)283 static inline bool folio_test_pmd_mappable(struct folio *folio)
284 {
285 	return false;
286 }
287 
transhuge_vma_suitable(struct vm_area_struct * vma,unsigned long addr)288 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
289 		unsigned long addr)
290 {
291 	return false;
292 }
293 
hugepage_vma_check(struct vm_area_struct * vma,unsigned long vm_flags,bool smaps,bool in_pf,bool enforce_sysfs)294 static inline bool hugepage_vma_check(struct vm_area_struct *vma,
295 				      unsigned long vm_flags, bool smaps,
296 				      bool in_pf, bool enforce_sysfs)
297 {
298 	return false;
299 }
300 
folio_prep_large_rmappable(struct folio * folio)301 static inline void folio_prep_large_rmappable(struct folio *folio) {}
302 
303 #define transparent_hugepage_flags 0UL
304 
305 #define thp_get_unmapped_area	NULL
306 
307 static inline bool
can_split_folio(struct folio * folio,int * pextra_pins)308 can_split_folio(struct folio *folio, int *pextra_pins)
309 {
310 	return false;
311 }
312 static inline int
split_huge_page_to_list(struct page * page,struct list_head * list)313 split_huge_page_to_list(struct page *page, struct list_head *list)
314 {
315 	return 0;
316 }
split_huge_page(struct page * page)317 static inline int split_huge_page(struct page *page)
318 {
319 	return 0;
320 }
deferred_split_folio(struct folio * folio)321 static inline void deferred_split_folio(struct folio *folio) {}
322 #define split_huge_pmd(__vma, __pmd, __address)	\
323 	do { } while (0)
324 
__split_huge_pmd(struct vm_area_struct * vma,pmd_t * pmd,unsigned long address,bool freeze,struct folio * folio)325 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
326 		unsigned long address, bool freeze, struct folio *folio) {}
split_huge_pmd_address(struct vm_area_struct * vma,unsigned long address,bool freeze,struct folio * folio)327 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
328 		unsigned long address, bool freeze, struct folio *folio) {}
329 
330 #define split_huge_pud(__vma, __pmd, __address)	\
331 	do { } while (0)
332 
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)333 static inline int hugepage_madvise(struct vm_area_struct *vma,
334 				   unsigned long *vm_flags, int advice)
335 {
336 	return -EINVAL;
337 }
338 
madvise_collapse(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)339 static inline int madvise_collapse(struct vm_area_struct *vma,
340 				   struct vm_area_struct **prev,
341 				   unsigned long start, unsigned long end)
342 {
343 	return -EINVAL;
344 }
345 
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,long adjust_next)346 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
347 					 unsigned long start,
348 					 unsigned long end,
349 					 long adjust_next)
350 {
351 }
is_swap_pmd(pmd_t pmd)352 static inline int is_swap_pmd(pmd_t pmd)
353 {
354 	return 0;
355 }
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)356 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
357 		struct vm_area_struct *vma)
358 {
359 	return NULL;
360 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)361 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
362 		struct vm_area_struct *vma)
363 {
364 	return NULL;
365 }
366 
do_huge_pmd_numa_page(struct vm_fault * vmf)367 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
368 {
369 	return 0;
370 }
371 
is_huge_zero_page(struct page * page)372 static inline bool is_huge_zero_page(struct page *page)
373 {
374 	return false;
375 }
376 
is_huge_zero_pmd(pmd_t pmd)377 static inline bool is_huge_zero_pmd(pmd_t pmd)
378 {
379 	return false;
380 }
381 
is_huge_zero_pud(pud_t pud)382 static inline bool is_huge_zero_pud(pud_t pud)
383 {
384 	return false;
385 }
386 
mm_put_huge_zero_page(struct mm_struct * mm)387 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
388 {
389 	return;
390 }
391 
follow_devmap_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,int flags,struct dev_pagemap ** pgmap)392 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
393 	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
394 {
395 	return NULL;
396 }
397 
follow_devmap_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,int flags,struct dev_pagemap ** pgmap)398 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
399 	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
400 {
401 	return NULL;
402 }
403 
thp_migration_supported(void)404 static inline bool thp_migration_supported(void)
405 {
406 	return false;
407 }
408 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
409 
split_folio_to_list(struct folio * folio,struct list_head * list)410 static inline int split_folio_to_list(struct folio *folio,
411 		struct list_head *list)
412 {
413 	return split_huge_page_to_list(&folio->page, list);
414 }
415 
split_folio(struct folio * folio)416 static inline int split_folio(struct folio *folio)
417 {
418 	return split_folio_to_list(folio, NULL);
419 }
420 
421 /*
422  * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
423  * limitations in the implementation like arm64 MTE can override this to
424  * false
425  */
426 #ifndef arch_thp_swp_supported
arch_thp_swp_supported(void)427 static inline bool arch_thp_swp_supported(void)
428 {
429 	return true;
430 }
431 #endif
432 
433 #endif /* _LINUX_HUGE_MM_H */
434