xref: /openbmc/linux/mm/internal.h (revision 6aeadf78)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/tracepoint-defs.h>
15 
16 struct folio_batch;
17 
18 /*
19  * The set of flags that only affect watermark checking and reclaim
20  * behaviour. This is used by the MM to obey the caller constraints
21  * about IO, FS and watermark checking while ignoring placement
22  * hints such as HIGHMEM usage.
23  */
24 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
26 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
27 			__GFP_NOLOCKDEP)
28 
29 /* The GFP flags allowed during early boot */
30 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
31 
32 /* Control allocation cpuset and node placement constraints */
33 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
34 
35 /* Do not use these with a slab allocator */
36 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
37 
38 /*
39  * Different from WARN_ON_ONCE(), no warning will be issued
40  * when we specify __GFP_NOWARN.
41  */
42 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
43 	static bool __section(".data.once") __warned;			\
44 	int __ret_warn_once = !!(cond);					\
45 									\
46 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
47 		__warned = true;					\
48 		WARN_ON(1);						\
49 	}								\
50 	unlikely(__ret_warn_once);					\
51 })
52 
53 void page_writeback_init(void);
54 
55 /*
56  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
57  * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
58  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
59  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
60  */
61 #define COMPOUND_MAPPED		0x800000
62 #define FOLIO_PAGES_MAPPED	(COMPOUND_MAPPED - 1)
63 
64 /*
65  * How many individual pages have an elevated _mapcount.  Excludes
66  * the folio's entire_mapcount.
67  */
68 static inline int folio_nr_pages_mapped(struct folio *folio)
69 {
70 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
71 }
72 
73 static inline void *folio_raw_mapping(struct folio *folio)
74 {
75 	unsigned long mapping = (unsigned long)folio->mapping;
76 
77 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
78 }
79 
80 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
81 						int nr_throttled);
82 static inline void acct_reclaim_writeback(struct folio *folio)
83 {
84 	pg_data_t *pgdat = folio_pgdat(folio);
85 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
86 
87 	if (nr_throttled)
88 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
89 }
90 
91 static inline void wake_throttle_isolated(pg_data_t *pgdat)
92 {
93 	wait_queue_head_t *wqh;
94 
95 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
96 	if (waitqueue_active(wqh))
97 		wake_up(wqh);
98 }
99 
100 vm_fault_t do_swap_page(struct vm_fault *vmf);
101 void folio_rotate_reclaimable(struct folio *folio);
102 bool __folio_end_writeback(struct folio *folio);
103 void deactivate_file_folio(struct folio *folio);
104 void folio_activate(struct folio *folio);
105 
106 void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
107 		   struct vm_area_struct *start_vma, unsigned long floor,
108 		   unsigned long ceiling, bool mm_wr_locked);
109 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
110 
111 struct zap_details;
112 void unmap_page_range(struct mmu_gather *tlb,
113 			     struct vm_area_struct *vma,
114 			     unsigned long addr, unsigned long end,
115 			     struct zap_details *details);
116 
117 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
118 		unsigned int order);
119 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
120 static inline void force_page_cache_readahead(struct address_space *mapping,
121 		struct file *file, pgoff_t index, unsigned long nr_to_read)
122 {
123 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
124 	force_page_cache_ra(&ractl, nr_to_read);
125 }
126 
127 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
128 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
129 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
130 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
131 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
132 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
133 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
134 		loff_t end);
135 long invalidate_inode_page(struct page *page);
136 unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
137 		pgoff_t start, pgoff_t end, unsigned long *nr_pagevec);
138 
139 /**
140  * folio_evictable - Test whether a folio is evictable.
141  * @folio: The folio to test.
142  *
143  * Test whether @folio is evictable -- i.e., should be placed on
144  * active/inactive lists vs unevictable list.
145  *
146  * Reasons folio might not be evictable:
147  * 1. folio's mapping marked unevictable
148  * 2. One of the pages in the folio is part of an mlocked VMA
149  */
150 static inline bool folio_evictable(struct folio *folio)
151 {
152 	bool ret;
153 
154 	/* Prevent address_space of inode and swap cache from being freed */
155 	rcu_read_lock();
156 	ret = !mapping_unevictable(folio_mapping(folio)) &&
157 			!folio_test_mlocked(folio);
158 	rcu_read_unlock();
159 	return ret;
160 }
161 
162 /*
163  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
164  * a count of one.
165  */
166 static inline void set_page_refcounted(struct page *page)
167 {
168 	VM_BUG_ON_PAGE(PageTail(page), page);
169 	VM_BUG_ON_PAGE(page_ref_count(page), page);
170 	set_page_count(page, 1);
171 }
172 
173 extern unsigned long highest_memmap_pfn;
174 
175 /*
176  * Maximum number of reclaim retries without progress before the OOM
177  * killer is consider the only way forward.
178  */
179 #define MAX_RECLAIM_RETRIES 16
180 
181 /*
182  * in mm/vmscan.c:
183  */
184 bool isolate_lru_page(struct page *page);
185 bool folio_isolate_lru(struct folio *folio);
186 void putback_lru_page(struct page *page);
187 void folio_putback_lru(struct folio *folio);
188 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
189 
190 /*
191  * in mm/rmap.c:
192  */
193 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
194 
195 /*
196  * in mm/page_alloc.c
197  */
198 #define K(x) ((x) << (PAGE_SHIFT-10))
199 
200 extern char * const zone_names[MAX_NR_ZONES];
201 
202 /* perform sanity checks on struct pages being allocated or freed */
203 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
204 
205 static inline bool is_check_pages_enabled(void)
206 {
207 	return static_branch_unlikely(&check_pages_enabled);
208 }
209 
210 /*
211  * Structure for holding the mostly immutable allocation parameters passed
212  * between functions involved in allocations, including the alloc_pages*
213  * family of functions.
214  *
215  * nodemask, migratetype and highest_zoneidx are initialized only once in
216  * __alloc_pages() and then never change.
217  *
218  * zonelist, preferred_zone and highest_zoneidx are set first in
219  * __alloc_pages() for the fast path, and might be later changed
220  * in __alloc_pages_slowpath(). All other functions pass the whole structure
221  * by a const pointer.
222  */
223 struct alloc_context {
224 	struct zonelist *zonelist;
225 	nodemask_t *nodemask;
226 	struct zoneref *preferred_zoneref;
227 	int migratetype;
228 
229 	/*
230 	 * highest_zoneidx represents highest usable zone index of
231 	 * the allocation request. Due to the nature of the zone,
232 	 * memory on lower zone than the highest_zoneidx will be
233 	 * protected by lowmem_reserve[highest_zoneidx].
234 	 *
235 	 * highest_zoneidx is also used by reclaim/compaction to limit
236 	 * the target zone since higher zone than this index cannot be
237 	 * usable for this allocation request.
238 	 */
239 	enum zone_type highest_zoneidx;
240 	bool spread_dirty_pages;
241 };
242 
243 /*
244  * This function returns the order of a free page in the buddy system. In
245  * general, page_zone(page)->lock must be held by the caller to prevent the
246  * page from being allocated in parallel and returning garbage as the order.
247  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
248  * page cannot be allocated or merged in parallel. Alternatively, it must
249  * handle invalid values gracefully, and use buddy_order_unsafe() below.
250  */
251 static inline unsigned int buddy_order(struct page *page)
252 {
253 	/* PageBuddy() must be checked by the caller */
254 	return page_private(page);
255 }
256 
257 /*
258  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
259  * PageBuddy() should be checked first by the caller to minimize race window,
260  * and invalid values must be handled gracefully.
261  *
262  * READ_ONCE is used so that if the caller assigns the result into a local
263  * variable and e.g. tests it for valid range before using, the compiler cannot
264  * decide to remove the variable and inline the page_private(page) multiple
265  * times, potentially observing different values in the tests and the actual
266  * use of the result.
267  */
268 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
269 
270 /*
271  * This function checks whether a page is free && is the buddy
272  * we can coalesce a page and its buddy if
273  * (a) the buddy is not in a hole (check before calling!) &&
274  * (b) the buddy is in the buddy system &&
275  * (c) a page and its buddy have the same order &&
276  * (d) a page and its buddy are in the same zone.
277  *
278  * For recording whether a page is in the buddy system, we set PageBuddy.
279  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
280  *
281  * For recording page's order, we use page_private(page).
282  */
283 static inline bool page_is_buddy(struct page *page, struct page *buddy,
284 				 unsigned int order)
285 {
286 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
287 		return false;
288 
289 	if (buddy_order(buddy) != order)
290 		return false;
291 
292 	/*
293 	 * zone check is done late to avoid uselessly calculating
294 	 * zone/node ids for pages that could never merge.
295 	 */
296 	if (page_zone_id(page) != page_zone_id(buddy))
297 		return false;
298 
299 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
300 
301 	return true;
302 }
303 
304 /*
305  * Locate the struct page for both the matching buddy in our
306  * pair (buddy1) and the combined O(n+1) page they form (page).
307  *
308  * 1) Any buddy B1 will have an order O twin B2 which satisfies
309  * the following equation:
310  *     B2 = B1 ^ (1 << O)
311  * For example, if the starting buddy (buddy2) is #8 its order
312  * 1 buddy is #10:
313  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
314  *
315  * 2) Any buddy B will have an order O+1 parent P which
316  * satisfies the following equation:
317  *     P = B & ~(1 << O)
318  *
319  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
320  */
321 static inline unsigned long
322 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
323 {
324 	return page_pfn ^ (1 << order);
325 }
326 
327 /*
328  * Find the buddy of @page and validate it.
329  * @page: The input page
330  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
331  *       function is used in the performance-critical __free_one_page().
332  * @order: The order of the page
333  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
334  *             page_to_pfn().
335  *
336  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
337  * not the same as @page. The validation is necessary before use it.
338  *
339  * Return: the found buddy page or NULL if not found.
340  */
341 static inline struct page *find_buddy_page_pfn(struct page *page,
342 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
343 {
344 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
345 	struct page *buddy;
346 
347 	buddy = page + (__buddy_pfn - pfn);
348 	if (buddy_pfn)
349 		*buddy_pfn = __buddy_pfn;
350 
351 	if (page_is_buddy(page, buddy, order))
352 		return buddy;
353 	return NULL;
354 }
355 
356 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
357 				unsigned long end_pfn, struct zone *zone);
358 
359 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
360 				unsigned long end_pfn, struct zone *zone)
361 {
362 	if (zone->contiguous)
363 		return pfn_to_page(start_pfn);
364 
365 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
366 }
367 
368 extern int __isolate_free_page(struct page *page, unsigned int order);
369 extern void __putback_isolated_page(struct page *page, unsigned int order,
370 				    int mt);
371 extern void memblock_free_pages(struct page *page, unsigned long pfn,
372 					unsigned int order);
373 extern void __free_pages_core(struct page *page, unsigned int order);
374 
375 static inline void prep_compound_head(struct page *page, unsigned int order)
376 {
377 	struct folio *folio = (struct folio *)page;
378 
379 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
380 	set_compound_order(page, order);
381 	atomic_set(&folio->_entire_mapcount, -1);
382 	atomic_set(&folio->_nr_pages_mapped, 0);
383 	atomic_set(&folio->_pincount, 0);
384 }
385 
386 static inline void prep_compound_tail(struct page *head, int tail_idx)
387 {
388 	struct page *p = head + tail_idx;
389 
390 	p->mapping = TAIL_MAPPING;
391 	set_compound_head(p, head);
392 	set_page_private(p, 0);
393 }
394 
395 extern void prep_compound_page(struct page *page, unsigned int order);
396 
397 extern void post_alloc_hook(struct page *page, unsigned int order,
398 					gfp_t gfp_flags);
399 extern int user_min_free_kbytes;
400 
401 extern void free_unref_page(struct page *page, unsigned int order);
402 extern void free_unref_page_list(struct list_head *list);
403 
404 extern void zone_pcp_reset(struct zone *zone);
405 extern void zone_pcp_disable(struct zone *zone);
406 extern void zone_pcp_enable(struct zone *zone);
407 extern void zone_pcp_init(struct zone *zone);
408 
409 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
410 			  phys_addr_t min_addr,
411 			  int nid, bool exact_nid);
412 
413 int split_free_page(struct page *free_page,
414 			unsigned int order, unsigned long split_pfn_offset);
415 
416 /*
417  * This will have no effect, other than possibly generating a warning, if the
418  * caller passes in a non-large folio.
419  */
420 static inline void folio_set_order(struct folio *folio, unsigned int order)
421 {
422 	if (WARN_ON_ONCE(!folio_test_large(folio)))
423 		return;
424 
425 	folio->_folio_order = order;
426 #ifdef CONFIG_64BIT
427 	/*
428 	 * When hugetlb dissolves a folio, we need to clear the tail
429 	 * page, rather than setting nr_pages to 1.
430 	 */
431 	folio->_folio_nr_pages = order ? 1U << order : 0;
432 #endif
433 }
434 
435 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
436 
437 /*
438  * in mm/compaction.c
439  */
440 /*
441  * compact_control is used to track pages being migrated and the free pages
442  * they are being migrated to during memory compaction. The free_pfn starts
443  * at the end of a zone and migrate_pfn begins at the start. Movable pages
444  * are moved to the end of a zone during a compaction run and the run
445  * completes when free_pfn <= migrate_pfn
446  */
447 struct compact_control {
448 	struct list_head freepages;	/* List of free pages to migrate to */
449 	struct list_head migratepages;	/* List of pages being migrated */
450 	unsigned int nr_freepages;	/* Number of isolated free pages */
451 	unsigned int nr_migratepages;	/* Number of pages to migrate */
452 	unsigned long free_pfn;		/* isolate_freepages search base */
453 	/*
454 	 * Acts as an in/out parameter to page isolation for migration.
455 	 * isolate_migratepages uses it as a search base.
456 	 * isolate_migratepages_block will update the value to the next pfn
457 	 * after the last isolated one.
458 	 */
459 	unsigned long migrate_pfn;
460 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
461 	struct zone *zone;
462 	unsigned long total_migrate_scanned;
463 	unsigned long total_free_scanned;
464 	unsigned short fast_search_fail;/* failures to use free list searches */
465 	short search_order;		/* order to start a fast search at */
466 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
467 	int order;			/* order a direct compactor needs */
468 	int migratetype;		/* migratetype of direct compactor */
469 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
470 	const int highest_zoneidx;	/* zone index of a direct compactor */
471 	enum migrate_mode mode;		/* Async or sync migration mode */
472 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
473 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
474 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
475 	bool direct_compaction;		/* False from kcompactd or /proc/... */
476 	bool proactive_compaction;	/* kcompactd proactive compaction */
477 	bool whole_zone;		/* Whole zone should/has been scanned */
478 	bool contended;			/* Signal lock contention */
479 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
480 					 * when there are potentially transient
481 					 * isolation or migration failures to
482 					 * ensure forward progress.
483 					 */
484 	bool alloc_contig;		/* alloc_contig_range allocation */
485 };
486 
487 /*
488  * Used in direct compaction when a page should be taken from the freelists
489  * immediately when one is created during the free path.
490  */
491 struct capture_control {
492 	struct compact_control *cc;
493 	struct page *page;
494 };
495 
496 unsigned long
497 isolate_freepages_range(struct compact_control *cc,
498 			unsigned long start_pfn, unsigned long end_pfn);
499 int
500 isolate_migratepages_range(struct compact_control *cc,
501 			   unsigned long low_pfn, unsigned long end_pfn);
502 
503 int __alloc_contig_migrate_range(struct compact_control *cc,
504 					unsigned long start, unsigned long end);
505 
506 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
507 void init_cma_reserved_pageblock(struct page *page);
508 
509 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
510 
511 int find_suitable_fallback(struct free_area *area, unsigned int order,
512 			int migratetype, bool only_stealable, bool *can_steal);
513 
514 static inline bool free_area_empty(struct free_area *area, int migratetype)
515 {
516 	return list_empty(&area->free_list[migratetype]);
517 }
518 
519 /*
520  * These three helpers classifies VMAs for virtual memory accounting.
521  */
522 
523 /*
524  * Executable code area - executable, not writable, not stack
525  */
526 static inline bool is_exec_mapping(vm_flags_t flags)
527 {
528 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
529 }
530 
531 /*
532  * Stack area - automatically grows in one direction
533  *
534  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
535  * do_mmap() forbids all other combinations.
536  */
537 static inline bool is_stack_mapping(vm_flags_t flags)
538 {
539 	return (flags & VM_STACK) == VM_STACK;
540 }
541 
542 /*
543  * Data area - private, writable, not stack
544  */
545 static inline bool is_data_mapping(vm_flags_t flags)
546 {
547 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
548 }
549 
550 /* mm/util.c */
551 struct anon_vma *folio_anon_vma(struct folio *folio);
552 
553 #ifdef CONFIG_MMU
554 void unmap_mapping_folio(struct folio *folio);
555 extern long populate_vma_page_range(struct vm_area_struct *vma,
556 		unsigned long start, unsigned long end, int *locked);
557 extern long faultin_vma_page_range(struct vm_area_struct *vma,
558 				   unsigned long start, unsigned long end,
559 				   bool write, int *locked);
560 extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
561 			      unsigned long len);
562 /*
563  * mlock_vma_folio() and munlock_vma_folio():
564  * should be called with vma's mmap_lock held for read or write,
565  * under page table lock for the pte/pmd being added or removed.
566  *
567  * mlock is usually called at the end of page_add_*_rmap(), munlock at
568  * the end of page_remove_rmap(); but new anon folios are managed by
569  * folio_add_lru_vma() calling mlock_new_folio().
570  *
571  * @compound is used to include pmd mappings of THPs, but filter out
572  * pte mappings of THPs, which cannot be consistently counted: a pte
573  * mapping of the THP head cannot be distinguished by the page alone.
574  */
575 void mlock_folio(struct folio *folio);
576 static inline void mlock_vma_folio(struct folio *folio,
577 			struct vm_area_struct *vma, bool compound)
578 {
579 	/*
580 	 * The VM_SPECIAL check here serves two purposes.
581 	 * 1) VM_IO check prevents migration from double-counting during mlock.
582 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
583 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
584 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
585 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
586 	 */
587 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
588 	    (compound || !folio_test_large(folio)))
589 		mlock_folio(folio);
590 }
591 
592 void munlock_folio(struct folio *folio);
593 static inline void munlock_vma_folio(struct folio *folio,
594 			struct vm_area_struct *vma, bool compound)
595 {
596 	if (unlikely(vma->vm_flags & VM_LOCKED) &&
597 	    (compound || !folio_test_large(folio)))
598 		munlock_folio(folio);
599 }
600 
601 void mlock_new_folio(struct folio *folio);
602 bool need_mlock_drain(int cpu);
603 void mlock_drain_local(void);
604 void mlock_drain_remote(int cpu);
605 
606 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
607 
608 /*
609  * Return the start of user virtual address at the specific offset within
610  * a vma.
611  */
612 static inline unsigned long
613 vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
614 		  struct vm_area_struct *vma)
615 {
616 	unsigned long address;
617 
618 	if (pgoff >= vma->vm_pgoff) {
619 		address = vma->vm_start +
620 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
621 		/* Check for address beyond vma (or wrapped through 0?) */
622 		if (address < vma->vm_start || address >= vma->vm_end)
623 			address = -EFAULT;
624 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
625 		/* Test above avoids possibility of wrap to 0 on 32-bit */
626 		address = vma->vm_start;
627 	} else {
628 		address = -EFAULT;
629 	}
630 	return address;
631 }
632 
633 /*
634  * Return the start of user virtual address of a page within a vma.
635  * Returns -EFAULT if all of the page is outside the range of vma.
636  * If page is a compound head, the entire compound page is considered.
637  */
638 static inline unsigned long
639 vma_address(struct page *page, struct vm_area_struct *vma)
640 {
641 	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
642 	return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
643 }
644 
645 /*
646  * Then at what user virtual address will none of the range be found in vma?
647  * Assumes that vma_address() already returned a good starting address.
648  */
649 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
650 {
651 	struct vm_area_struct *vma = pvmw->vma;
652 	pgoff_t pgoff;
653 	unsigned long address;
654 
655 	/* Common case, plus ->pgoff is invalid for KSM */
656 	if (pvmw->nr_pages == 1)
657 		return pvmw->address + PAGE_SIZE;
658 
659 	pgoff = pvmw->pgoff + pvmw->nr_pages;
660 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
661 	/* Check for address beyond vma (or wrapped through 0?) */
662 	if (address < vma->vm_start || address > vma->vm_end)
663 		address = vma->vm_end;
664 	return address;
665 }
666 
667 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
668 						    struct file *fpin)
669 {
670 	int flags = vmf->flags;
671 
672 	if (fpin)
673 		return fpin;
674 
675 	/*
676 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
677 	 * anything, so we only pin the file and drop the mmap_lock if only
678 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
679 	 */
680 	if (fault_flag_allow_retry_first(flags) &&
681 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
682 		fpin = get_file(vmf->vma->vm_file);
683 		mmap_read_unlock(vmf->vma->vm_mm);
684 	}
685 	return fpin;
686 }
687 #else /* !CONFIG_MMU */
688 static inline void unmap_mapping_folio(struct folio *folio) { }
689 static inline void mlock_new_folio(struct folio *folio) { }
690 static inline bool need_mlock_drain(int cpu) { return false; }
691 static inline void mlock_drain_local(void) { }
692 static inline void mlock_drain_remote(int cpu) { }
693 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
694 {
695 }
696 #endif /* !CONFIG_MMU */
697 
698 /* Memory initialisation debug and verification */
699 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
700 DECLARE_STATIC_KEY_TRUE(deferred_pages);
701 
702 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
703 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
704 
705 enum mminit_level {
706 	MMINIT_WARNING,
707 	MMINIT_VERIFY,
708 	MMINIT_TRACE
709 };
710 
711 #ifdef CONFIG_DEBUG_MEMORY_INIT
712 
713 extern int mminit_loglevel;
714 
715 #define mminit_dprintk(level, prefix, fmt, arg...) \
716 do { \
717 	if (level < mminit_loglevel) { \
718 		if (level <= MMINIT_WARNING) \
719 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
720 		else \
721 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
722 	} \
723 } while (0)
724 
725 extern void mminit_verify_pageflags_layout(void);
726 extern void mminit_verify_zonelist(void);
727 #else
728 
729 static inline void mminit_dprintk(enum mminit_level level,
730 				const char *prefix, const char *fmt, ...)
731 {
732 }
733 
734 static inline void mminit_verify_pageflags_layout(void)
735 {
736 }
737 
738 static inline void mminit_verify_zonelist(void)
739 {
740 }
741 #endif /* CONFIG_DEBUG_MEMORY_INIT */
742 
743 #define NODE_RECLAIM_NOSCAN	-2
744 #define NODE_RECLAIM_FULL	-1
745 #define NODE_RECLAIM_SOME	0
746 #define NODE_RECLAIM_SUCCESS	1
747 
748 #ifdef CONFIG_NUMA
749 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
750 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
751 #else
752 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
753 				unsigned int order)
754 {
755 	return NODE_RECLAIM_NOSCAN;
756 }
757 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
758 {
759 	return NUMA_NO_NODE;
760 }
761 #endif
762 
763 /*
764  * mm/memory-failure.c
765  */
766 extern int hwpoison_filter(struct page *p);
767 
768 extern u32 hwpoison_filter_dev_major;
769 extern u32 hwpoison_filter_dev_minor;
770 extern u64 hwpoison_filter_flags_mask;
771 extern u64 hwpoison_filter_flags_value;
772 extern u64 hwpoison_filter_memcg;
773 extern u32 hwpoison_filter_enable;
774 
775 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
776         unsigned long, unsigned long,
777         unsigned long, unsigned long);
778 
779 extern void set_pageblock_order(void);
780 unsigned long reclaim_pages(struct list_head *folio_list);
781 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
782 					    struct list_head *folio_list);
783 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
784 #define ALLOC_WMARK_MIN		WMARK_MIN
785 #define ALLOC_WMARK_LOW		WMARK_LOW
786 #define ALLOC_WMARK_HIGH	WMARK_HIGH
787 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
788 
789 /* Mask to get the watermark bits */
790 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
791 
792 /*
793  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
794  * cannot assume a reduced access to memory reserves is sufficient for
795  * !MMU
796  */
797 #ifdef CONFIG_MMU
798 #define ALLOC_OOM		0x08
799 #else
800 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
801 #endif
802 
803 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
804 				       * to 25% of the min watermark or
805 				       * 62.5% if __GFP_HIGH is set.
806 				       */
807 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
808 				       * of the min watermark.
809 				       */
810 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
811 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
812 #ifdef CONFIG_ZONE_DMA32
813 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
814 #else
815 #define ALLOC_NOFRAGMENT	  0x0
816 #endif
817 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
818 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
819 
820 /* Flags that allow allocations below the min watermark. */
821 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
822 
823 enum ttu_flags;
824 struct tlbflush_unmap_batch;
825 
826 
827 /*
828  * only for MM internal work items which do not depend on
829  * any allocations or locks which might depend on allocations
830  */
831 extern struct workqueue_struct *mm_percpu_wq;
832 
833 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
834 void try_to_unmap_flush(void);
835 void try_to_unmap_flush_dirty(void);
836 void flush_tlb_batched_pending(struct mm_struct *mm);
837 #else
838 static inline void try_to_unmap_flush(void)
839 {
840 }
841 static inline void try_to_unmap_flush_dirty(void)
842 {
843 }
844 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
845 {
846 }
847 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
848 
849 extern const struct trace_print_flags pageflag_names[];
850 extern const struct trace_print_flags pagetype_names[];
851 extern const struct trace_print_flags vmaflag_names[];
852 extern const struct trace_print_flags gfpflag_names[];
853 
854 static inline bool is_migrate_highatomic(enum migratetype migratetype)
855 {
856 	return migratetype == MIGRATE_HIGHATOMIC;
857 }
858 
859 static inline bool is_migrate_highatomic_page(struct page *page)
860 {
861 	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
862 }
863 
864 void setup_zone_pageset(struct zone *zone);
865 
866 struct migration_target_control {
867 	int nid;		/* preferred node id */
868 	nodemask_t *nmask;
869 	gfp_t gfp_mask;
870 };
871 
872 /*
873  * mm/filemap.c
874  */
875 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
876 			      struct folio *folio, loff_t fpos, size_t size);
877 
878 /*
879  * mm/vmalloc.c
880  */
881 #ifdef CONFIG_MMU
882 void __init vmalloc_init(void);
883 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
884                 pgprot_t prot, struct page **pages, unsigned int page_shift);
885 #else
886 static inline void vmalloc_init(void)
887 {
888 }
889 
890 static inline
891 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
892                 pgprot_t prot, struct page **pages, unsigned int page_shift)
893 {
894 	return -EINVAL;
895 }
896 #endif
897 
898 int __must_check __vmap_pages_range_noflush(unsigned long addr,
899 			       unsigned long end, pgprot_t prot,
900 			       struct page **pages, unsigned int page_shift);
901 
902 void vunmap_range_noflush(unsigned long start, unsigned long end);
903 
904 void __vunmap_range_noflush(unsigned long start, unsigned long end);
905 
906 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
907 		      unsigned long addr, int page_nid, int *flags);
908 
909 void free_zone_device_page(struct page *page);
910 int migrate_device_coherent_page(struct page *page);
911 
912 /*
913  * mm/gup.c
914  */
915 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
916 int __must_check try_grab_page(struct page *page, unsigned int flags);
917 
918 enum {
919 	/* mark page accessed */
920 	FOLL_TOUCH = 1 << 16,
921 	/* a retry, previous pass started an IO */
922 	FOLL_TRIED = 1 << 17,
923 	/* we are working on non-current tsk/mm */
924 	FOLL_REMOTE = 1 << 18,
925 	/* pages must be released via unpin_user_page */
926 	FOLL_PIN = 1 << 19,
927 	/* gup_fast: prevent fall-back to slow gup */
928 	FOLL_FAST_ONLY = 1 << 20,
929 	/* allow unlocking the mmap lock */
930 	FOLL_UNLOCKABLE = 1 << 21,
931 };
932 
933 /*
934  * Indicates for which pages that are write-protected in the page table,
935  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
936  * GUP pin will remain consistent with the pages mapped into the page tables
937  * of the MM.
938  *
939  * Temporary unmapping of PageAnonExclusive() pages or clearing of
940  * PageAnonExclusive() has to protect against concurrent GUP:
941  * * Ordinary GUP: Using the PT lock
942  * * GUP-fast and fork(): mm->write_protect_seq
943  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
944  *    page_try_share_anon_rmap()
945  *
946  * Must be called with the (sub)page that's actually referenced via the
947  * page table entry, which might not necessarily be the head page for a
948  * PTE-mapped THP.
949  *
950  * If the vma is NULL, we're coming from the GUP-fast path and might have
951  * to fallback to the slow path just to lookup the vma.
952  */
953 static inline bool gup_must_unshare(struct vm_area_struct *vma,
954 				    unsigned int flags, struct page *page)
955 {
956 	/*
957 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
958 	 * has to be writable -- and if it references (part of) an anonymous
959 	 * folio, that part is required to be marked exclusive.
960 	 */
961 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
962 		return false;
963 	/*
964 	 * Note: PageAnon(page) is stable until the page is actually getting
965 	 * freed.
966 	 */
967 	if (!PageAnon(page)) {
968 		/*
969 		 * We only care about R/O long-term pining: R/O short-term
970 		 * pinning does not have the semantics to observe successive
971 		 * changes through the process page tables.
972 		 */
973 		if (!(flags & FOLL_LONGTERM))
974 			return false;
975 
976 		/* We really need the vma ... */
977 		if (!vma)
978 			return true;
979 
980 		/*
981 		 * ... because we only care about writable private ("COW")
982 		 * mappings where we have to break COW early.
983 		 */
984 		return is_cow_mapping(vma->vm_flags);
985 	}
986 
987 	/* Paired with a memory barrier in page_try_share_anon_rmap(). */
988 	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
989 		smp_rmb();
990 
991 	/*
992 	 * Note that PageKsm() pages cannot be exclusive, and consequently,
993 	 * cannot get pinned.
994 	 */
995 	return !PageAnonExclusive(page);
996 }
997 
998 extern bool mirrored_kernelcore;
999 
1000 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1001 {
1002 	/*
1003 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1004 	 * enablements, because when without soft-dirty being compiled in,
1005 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1006 	 * will be constantly true.
1007 	 */
1008 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1009 		return false;
1010 
1011 	/*
1012 	 * Soft-dirty is kind of special: its tracking is enabled when the
1013 	 * vma flags not set.
1014 	 */
1015 	return !(vma->vm_flags & VM_SOFTDIRTY);
1016 }
1017 
1018 /*
1019  * VMA Iterator functions shared between nommu and mmap
1020  */
1021 static inline int vma_iter_prealloc(struct vma_iterator *vmi)
1022 {
1023 	return mas_preallocate(&vmi->mas, GFP_KERNEL);
1024 }
1025 
1026 static inline void vma_iter_clear(struct vma_iterator *vmi,
1027 				  unsigned long start, unsigned long end)
1028 {
1029 	mas_set_range(&vmi->mas, start, end - 1);
1030 	mas_store_prealloc(&vmi->mas, NULL);
1031 }
1032 
1033 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1034 {
1035 	return mas_walk(&vmi->mas);
1036 }
1037 
1038 /* Store a VMA with preallocated memory */
1039 static inline void vma_iter_store(struct vma_iterator *vmi,
1040 				  struct vm_area_struct *vma)
1041 {
1042 
1043 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1044 	if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.index > vma->vm_start)) {
1045 		printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
1046 		printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
1047 		printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
1048 		mt_dump(vmi->mas.tree);
1049 	}
1050 	if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last <  vma->vm_start)) {
1051 		printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
1052 		printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
1053 		printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
1054 		mt_dump(vmi->mas.tree);
1055 	}
1056 #endif
1057 
1058 	if (vmi->mas.node != MAS_START &&
1059 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1060 		vma_iter_invalidate(vmi);
1061 
1062 	vmi->mas.index = vma->vm_start;
1063 	vmi->mas.last = vma->vm_end - 1;
1064 	mas_store_prealloc(&vmi->mas, vma);
1065 }
1066 
1067 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1068 			struct vm_area_struct *vma, gfp_t gfp)
1069 {
1070 	if (vmi->mas.node != MAS_START &&
1071 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1072 		vma_iter_invalidate(vmi);
1073 
1074 	vmi->mas.index = vma->vm_start;
1075 	vmi->mas.last = vma->vm_end - 1;
1076 	mas_store_gfp(&vmi->mas, vma, gfp);
1077 	if (unlikely(mas_is_err(&vmi->mas)))
1078 		return -ENOMEM;
1079 
1080 	return 0;
1081 }
1082 
1083 /*
1084  * VMA lock generalization
1085  */
1086 struct vma_prepare {
1087 	struct vm_area_struct *vma;
1088 	struct vm_area_struct *adj_next;
1089 	struct file *file;
1090 	struct address_space *mapping;
1091 	struct anon_vma *anon_vma;
1092 	struct vm_area_struct *insert;
1093 	struct vm_area_struct *remove;
1094 	struct vm_area_struct *remove2;
1095 };
1096 #endif	/* __MM_INTERNAL_H */
1097