xref: /openbmc/linux/mm/internal.h (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
21da177e4SLinus Torvalds /* internal.h: mm/ internal definitions
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
51da177e4SLinus Torvalds  * Written by David Howells (dhowells@redhat.com)
61da177e4SLinus Torvalds  */
70f8053a5SNick Piggin #ifndef __MM_INTERNAL_H
80f8053a5SNick Piggin #define __MM_INTERNAL_H
90f8053a5SNick Piggin 
1029f175d1SFabian Frederick #include <linux/fs.h>
110f8053a5SNick Piggin #include <linux/mm.h>
12e9b61f19SKirill A. Shutemov #include <linux/pagemap.h>
132aff7a47SMatthew Wilcox (Oracle) #include <linux/rmap.h>
14edf14cdbSVlastimil Babka #include <linux/tracepoint-defs.h>
151da177e4SLinus Torvalds 
160e499ed3SMatthew Wilcox (Oracle) struct folio_batch;
170e499ed3SMatthew Wilcox (Oracle) 
18dd56b046SMel Gorman /*
19dd56b046SMel Gorman  * The set of flags that only affect watermark checking and reclaim
20dd56b046SMel Gorman  * behaviour. This is used by the MM to obey the caller constraints
21dd56b046SMel Gorman  * about IO, FS and watermark checking while ignoring placement
22dd56b046SMel Gorman  * hints such as HIGHMEM usage.
23dd56b046SMel Gorman  */
24dd56b046SMel Gorman #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25dcda9b04SMichal Hocko 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
26e838a45fSMel Gorman 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
272973d822SNeilBrown 			__GFP_NOLOCKDEP)
28dd56b046SMel Gorman 
29dd56b046SMel Gorman /* The GFP flags allowed during early boot */
30dd56b046SMel Gorman #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
31dd56b046SMel Gorman 
32dd56b046SMel Gorman /* Control allocation cpuset and node placement constraints */
33dd56b046SMel Gorman #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
34dd56b046SMel Gorman 
35dd56b046SMel Gorman /* Do not use these with a slab allocator */
36dd56b046SMel Gorman #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
37dd56b046SMel Gorman 
383f913fc5SQi Zheng /*
393f913fc5SQi Zheng  * Different from WARN_ON_ONCE(), no warning will be issued
403f913fc5SQi Zheng  * when we specify __GFP_NOWARN.
413f913fc5SQi Zheng  */
423f913fc5SQi Zheng #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
43*0dd7a8b9SMasahiro Yamada 	static bool __section(".data..once") __warned;			\
443f913fc5SQi Zheng 	int __ret_warn_once = !!(cond);					\
453f913fc5SQi Zheng 									\
463f913fc5SQi Zheng 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
473f913fc5SQi Zheng 		__warned = true;					\
483f913fc5SQi Zheng 		WARN_ON(1);						\
493f913fc5SQi Zheng 	}								\
503f913fc5SQi Zheng 	unlikely(__ret_warn_once);					\
513f913fc5SQi Zheng })
523f913fc5SQi Zheng 
5362906027SNicholas Piggin void page_writeback_init(void);
5462906027SNicholas Piggin 
55eec20426SMatthew Wilcox (Oracle) /*
56eec20426SMatthew Wilcox (Oracle)  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
57eec20426SMatthew Wilcox (Oracle)  * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
58eec20426SMatthew Wilcox (Oracle)  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
59eec20426SMatthew Wilcox (Oracle)  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
60eec20426SMatthew Wilcox (Oracle)  */
61eec20426SMatthew Wilcox (Oracle) #define COMPOUND_MAPPED		0x800000
62eec20426SMatthew Wilcox (Oracle) #define FOLIO_PAGES_MAPPED	(COMPOUND_MAPPED - 1)
63eec20426SMatthew Wilcox (Oracle) 
64eec20426SMatthew Wilcox (Oracle) /*
651279aa06SKefeng Wang  * Flags passed to __show_mem() and show_free_areas() to suppress output in
661279aa06SKefeng Wang  * various contexts.
671279aa06SKefeng Wang  */
681279aa06SKefeng Wang #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
691279aa06SKefeng Wang 
701279aa06SKefeng Wang /*
71eec20426SMatthew Wilcox (Oracle)  * How many individual pages have an elevated _mapcount.  Excludes
72eec20426SMatthew Wilcox (Oracle)  * the folio's entire_mapcount.
73eec20426SMatthew Wilcox (Oracle)  */
folio_nr_pages_mapped(struct folio * folio)74eec20426SMatthew Wilcox (Oracle) static inline int folio_nr_pages_mapped(struct folio *folio)
75eec20426SMatthew Wilcox (Oracle) {
76eec20426SMatthew Wilcox (Oracle) 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
77eec20426SMatthew Wilcox (Oracle) }
78eec20426SMatthew Wilcox (Oracle) 
folio_raw_mapping(struct folio * folio)7964601000SMatthew Wilcox (Oracle) static inline void *folio_raw_mapping(struct folio *folio)
8064601000SMatthew Wilcox (Oracle) {
8164601000SMatthew Wilcox (Oracle) 	unsigned long mapping = (unsigned long)folio->mapping;
8264601000SMatthew Wilcox (Oracle) 
8364601000SMatthew Wilcox (Oracle) 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
8464601000SMatthew Wilcox (Oracle) }
8564601000SMatthew Wilcox (Oracle) 
86cd3ed99fSLorenzo Stoakes /*
87cd3ed99fSLorenzo Stoakes  * This is a file-backed mapping, and is about to be memory mapped - invoke its
88cd3ed99fSLorenzo Stoakes  * mmap hook and safely handle error conditions. On error, VMA hooks will be
89cd3ed99fSLorenzo Stoakes  * mutated.
90cd3ed99fSLorenzo Stoakes  *
91cd3ed99fSLorenzo Stoakes  * @file: File which backs the mapping.
92cd3ed99fSLorenzo Stoakes  * @vma:  VMA which we are mapping.
93cd3ed99fSLorenzo Stoakes  *
94cd3ed99fSLorenzo Stoakes  * Returns: 0 if success, error otherwise.
95cd3ed99fSLorenzo Stoakes  */
mmap_file(struct file * file,struct vm_area_struct * vma)96cd3ed99fSLorenzo Stoakes static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
97cd3ed99fSLorenzo Stoakes {
98cd3ed99fSLorenzo Stoakes 	int err = call_mmap(file, vma);
99cd3ed99fSLorenzo Stoakes 
100cd3ed99fSLorenzo Stoakes 	if (likely(!err))
101cd3ed99fSLorenzo Stoakes 		return 0;
102cd3ed99fSLorenzo Stoakes 
103cd3ed99fSLorenzo Stoakes 	/*
104cd3ed99fSLorenzo Stoakes 	 * OK, we tried to call the file hook for mmap(), but an error
105cd3ed99fSLorenzo Stoakes 	 * arose. The mapping is in an inconsistent state and we most not invoke
106cd3ed99fSLorenzo Stoakes 	 * any further hooks on it.
107cd3ed99fSLorenzo Stoakes 	 */
108cd3ed99fSLorenzo Stoakes 	vma->vm_ops = &vma_dummy_vm_ops;
109cd3ed99fSLorenzo Stoakes 
110cd3ed99fSLorenzo Stoakes 	return err;
111cd3ed99fSLorenzo Stoakes }
112cd3ed99fSLorenzo Stoakes 
113a97fe688SLorenzo Stoakes /*
114a97fe688SLorenzo Stoakes  * If the VMA has a close hook then close it, and since closing it might leave
115a97fe688SLorenzo Stoakes  * it in an inconsistent state which makes the use of any hooks suspect, clear
116a97fe688SLorenzo Stoakes  * them down by installing dummy empty hooks.
117a97fe688SLorenzo Stoakes  */
vma_close(struct vm_area_struct * vma)118a97fe688SLorenzo Stoakes static inline void vma_close(struct vm_area_struct *vma)
119a97fe688SLorenzo Stoakes {
120a97fe688SLorenzo Stoakes 	if (vma->vm_ops && vma->vm_ops->close) {
121a97fe688SLorenzo Stoakes 		vma->vm_ops->close(vma);
122a97fe688SLorenzo Stoakes 
123a97fe688SLorenzo Stoakes 		/*
124a97fe688SLorenzo Stoakes 		 * The mapping is in an inconsistent state, and no further hooks
125a97fe688SLorenzo Stoakes 		 * may be invoked upon it.
126a97fe688SLorenzo Stoakes 		 */
127a97fe688SLorenzo Stoakes 		vma->vm_ops = &vma_dummy_vm_ops;
128a97fe688SLorenzo Stoakes 	}
129a97fe688SLorenzo Stoakes }
130a97fe688SLorenzo Stoakes 
131512b7931SLinus Torvalds void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
132512b7931SLinus Torvalds 						int nr_throttled);
acct_reclaim_writeback(struct folio * folio)133512b7931SLinus Torvalds static inline void acct_reclaim_writeback(struct folio *folio)
134512b7931SLinus Torvalds {
135512b7931SLinus Torvalds 	pg_data_t *pgdat = folio_pgdat(folio);
1368cd7c588SMel Gorman 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
1378cd7c588SMel Gorman 
1388cd7c588SMel Gorman 	if (nr_throttled)
139512b7931SLinus Torvalds 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
1408cd7c588SMel Gorman }
1418cd7c588SMel Gorman 
wake_throttle_isolated(pg_data_t * pgdat)142d818fca1SMel Gorman static inline void wake_throttle_isolated(pg_data_t *pgdat)
143d818fca1SMel Gorman {
144d818fca1SMel Gorman 	wait_queue_head_t *wqh;
145d818fca1SMel Gorman 
146d818fca1SMel Gorman 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
147d818fca1SMel Gorman 	if (waitqueue_active(wqh))
148d818fca1SMel Gorman 		wake_up(wqh);
149d818fca1SMel Gorman }
150d818fca1SMel Gorman 
1512b740303SSouptick Joarder vm_fault_t do_swap_page(struct vm_fault *vmf);
152575ced1cSMatthew Wilcox (Oracle) void folio_rotate_reclaimable(struct folio *folio);
153269ccca3SMatthew Wilcox (Oracle) bool __folio_end_writeback(struct folio *folio);
154261b6840SMatthew Wilcox (Oracle) void deactivate_file_folio(struct folio *folio);
155018ee47fSYu Zhao void folio_activate(struct folio *folio);
1568a966ed7SEbru Akagunduz 
157fd892593SLiam R. Howlett void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
158763ecb03SLiam R. Howlett 		   struct vm_area_struct *start_vma, unsigned long floor,
15998e51a22SSuren Baghdasaryan 		   unsigned long ceiling, bool mm_wr_locked);
16003c4f204SQi Zheng void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
16142b77728SJan Beulich 
1623506659eSMatthew Wilcox (Oracle) struct zap_details;
163aac45363SMichal Hocko void unmap_page_range(struct mmu_gather *tlb,
164aac45363SMichal Hocko 			     struct vm_area_struct *vma,
165aac45363SMichal Hocko 			     unsigned long addr, unsigned long end,
166aac45363SMichal Hocko 			     struct zap_details *details);
167aac45363SMichal Hocko 
16856a4d67cSMatthew Wilcox (Oracle) void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
16956a4d67cSMatthew Wilcox (Oracle) 		unsigned int order);
170fcd9ae4fSMatthew Wilcox (Oracle) void force_page_cache_ra(struct readahead_control *, unsigned long nr);
force_page_cache_readahead(struct address_space * mapping,struct file * file,pgoff_t index,unsigned long nr_to_read)1717b3df3b9SDavid Howells static inline void force_page_cache_readahead(struct address_space *mapping,
1727b3df3b9SDavid Howells 		struct file *file, pgoff_t index, unsigned long nr_to_read)
1737b3df3b9SDavid Howells {
174fcd9ae4fSMatthew Wilcox (Oracle) 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
175fcd9ae4fSMatthew Wilcox (Oracle) 	force_page_cache_ra(&ractl, nr_to_read);
1767b3df3b9SDavid Howells }
17729f175d1SFabian Frederick 
1783392ca12SVishal Moola (Oracle) unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
17951dcbdacSMatthew Wilcox (Oracle) 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
1809fb6beeaSVishal Moola (Oracle) unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
1810e499ed3SMatthew Wilcox (Oracle) 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
18278f42660SMatthew Wilcox (Oracle) void filemap_free_folio(struct address_space *mapping, struct folio *folio);
1831e84a3d9SMatthew Wilcox (Oracle) int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
184b9a8a419SMatthew Wilcox (Oracle) bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
185b9a8a419SMatthew Wilcox (Oracle) 		loff_t end);
186d6c75dc2SMatthew Wilcox (Oracle) long invalidate_inode_page(struct page *page);
1871a0fc811SMatthew Wilcox (Oracle) unsigned long mapping_try_invalidate(struct address_space *mapping,
1881a0fc811SMatthew Wilcox (Oracle) 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
1895c211ba2SMatthew Wilcox (Oracle) 
1901eb6234eSYang Shi /**
1913eed3ef5SMatthew Wilcox (Oracle)  * folio_evictable - Test whether a folio is evictable.
1923eed3ef5SMatthew Wilcox (Oracle)  * @folio: The folio to test.
1931eb6234eSYang Shi  *
1943eed3ef5SMatthew Wilcox (Oracle)  * Test whether @folio is evictable -- i.e., should be placed on
1953eed3ef5SMatthew Wilcox (Oracle)  * active/inactive lists vs unevictable list.
1961eb6234eSYang Shi  *
1973eed3ef5SMatthew Wilcox (Oracle)  * Reasons folio might not be evictable:
1983eed3ef5SMatthew Wilcox (Oracle)  * 1. folio's mapping marked unevictable
1993eed3ef5SMatthew Wilcox (Oracle)  * 2. One of the pages in the folio is part of an mlocked VMA
2001eb6234eSYang Shi  */
folio_evictable(struct folio * folio)2013eed3ef5SMatthew Wilcox (Oracle) static inline bool folio_evictable(struct folio *folio)
2023eed3ef5SMatthew Wilcox (Oracle) {
2033eed3ef5SMatthew Wilcox (Oracle) 	bool ret;
2043eed3ef5SMatthew Wilcox (Oracle) 
2053eed3ef5SMatthew Wilcox (Oracle) 	/* Prevent address_space of inode and swap cache from being freed */
2063eed3ef5SMatthew Wilcox (Oracle) 	rcu_read_lock();
2073eed3ef5SMatthew Wilcox (Oracle) 	ret = !mapping_unevictable(folio_mapping(folio)) &&
2083eed3ef5SMatthew Wilcox (Oracle) 			!folio_test_mlocked(folio);
2093eed3ef5SMatthew Wilcox (Oracle) 	rcu_read_unlock();
2103eed3ef5SMatthew Wilcox (Oracle) 	return ret;
2113eed3ef5SMatthew Wilcox (Oracle) }
2123eed3ef5SMatthew Wilcox (Oracle) 
2137835e98bSNick Piggin /*
2140139aa7bSJoonsoo Kim  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
2157835e98bSNick Piggin  * a count of one.
2167835e98bSNick Piggin  */
set_page_refcounted(struct page * page)2177835e98bSNick Piggin static inline void set_page_refcounted(struct page *page)
2187835e98bSNick Piggin {
219309381feSSasha Levin 	VM_BUG_ON_PAGE(PageTail(page), page);
220fe896d18SJoonsoo Kim 	VM_BUG_ON_PAGE(page_ref_count(page), page);
22177a8a788SNick Piggin 	set_page_count(page, 1);
22277a8a788SNick Piggin }
22377a8a788SNick Piggin 
2240201ebf2SDavid Howells /*
2250201ebf2SDavid Howells  * Return true if a folio needs ->release_folio() calling upon it.
2260201ebf2SDavid Howells  */
folio_needs_release(struct folio * folio)2270201ebf2SDavid Howells static inline bool folio_needs_release(struct folio *folio)
2280201ebf2SDavid Howells {
229b4fa966fSDavid Howells 	struct address_space *mapping = folio_mapping(folio);
230b4fa966fSDavid Howells 
231b4fa966fSDavid Howells 	return folio_has_private(folio) ||
232b4fa966fSDavid Howells 		(mapping && mapping_release_always(mapping));
2330201ebf2SDavid Howells }
2340201ebf2SDavid Howells 
23503f6462aSHugh Dickins extern unsigned long highest_memmap_pfn;
23603f6462aSHugh Dickins 
237894bc310SLee Schermerhorn /*
238c73322d0SJohannes Weiner  * Maximum number of reclaim retries without progress before the OOM
239c73322d0SJohannes Weiner  * killer is consider the only way forward.
240c73322d0SJohannes Weiner  */
241c73322d0SJohannes Weiner #define MAX_RECLAIM_RETRIES 16
242c73322d0SJohannes Weiner 
243c73322d0SJohannes Weiner /*
244894bc310SLee Schermerhorn  * in mm/vmscan.c:
245894bc310SLee Schermerhorn  */
246f7f9c00dSBaolin Wang bool isolate_lru_page(struct page *page);
247be2d5756SBaolin Wang bool folio_isolate_lru(struct folio *folio);
248ca6d60f3SMatthew Wilcox (Oracle) void putback_lru_page(struct page *page);
249ca6d60f3SMatthew Wilcox (Oracle) void folio_putback_lru(struct folio *folio);
250c3f4a9a2SMel Gorman extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
25162695a84SNick Piggin 
252894bc310SLee Schermerhorn /*
2536219049aSBob Liu  * in mm/rmap.c:
2546219049aSBob Liu  */
25550722804SZach O'Keefe pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
2566219049aSBob Liu 
2576219049aSBob Liu /*
258894bc310SLee Schermerhorn  * in mm/page_alloc.c
259894bc310SLee Schermerhorn  */
260eb8589b4SMike Rapoport (IBM) #define K(x) ((x) << (PAGE_SHIFT-10))
2613c605096SJoonsoo Kim 
2629420f89dSMike Rapoport (IBM) extern char * const zone_names[MAX_NR_ZONES];
2639420f89dSMike Rapoport (IBM) 
264f2fc4b44SMike Rapoport (IBM) /* perform sanity checks on struct pages being allocated or freed */
265f2fc4b44SMike Rapoport (IBM) DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
266f2fc4b44SMike Rapoport (IBM) 
267e95d372cSKefeng Wang extern int min_free_kbytes;
268e95d372cSKefeng Wang 
269e95d372cSKefeng Wang void setup_per_zone_wmarks(void);
270e95d372cSKefeng Wang void calculate_min_free_kbytes(void);
271e95d372cSKefeng Wang int __meminit init_per_zone_wmark_min(void);
272e95d372cSKefeng Wang void page_alloc_sysctl_init(void);
273f2fc4b44SMike Rapoport (IBM) 
2743c605096SJoonsoo Kim /*
2751a6d53a1SVlastimil Babka  * Structure for holding the mostly immutable allocation parameters passed
2761a6d53a1SVlastimil Babka  * between functions involved in allocations, including the alloc_pages*
2771a6d53a1SVlastimil Babka  * family of functions.
2781a6d53a1SVlastimil Babka  *
27997a225e6SJoonsoo Kim  * nodemask, migratetype and highest_zoneidx are initialized only once in
28084172f4bSMatthew Wilcox (Oracle)  * __alloc_pages() and then never change.
2811a6d53a1SVlastimil Babka  *
28297a225e6SJoonsoo Kim  * zonelist, preferred_zone and highest_zoneidx are set first in
28384172f4bSMatthew Wilcox (Oracle)  * __alloc_pages() for the fast path, and might be later changed
28468956ccbSEthon Paul  * in __alloc_pages_slowpath(). All other functions pass the whole structure
2851a6d53a1SVlastimil Babka  * by a const pointer.
2861a6d53a1SVlastimil Babka  */
2871a6d53a1SVlastimil Babka struct alloc_context {
2881a6d53a1SVlastimil Babka 	struct zonelist *zonelist;
2891a6d53a1SVlastimil Babka 	nodemask_t *nodemask;
290c33d6c06SMel Gorman 	struct zoneref *preferred_zoneref;
2911a6d53a1SVlastimil Babka 	int migratetype;
29297a225e6SJoonsoo Kim 
29397a225e6SJoonsoo Kim 	/*
29497a225e6SJoonsoo Kim 	 * highest_zoneidx represents highest usable zone index of
29597a225e6SJoonsoo Kim 	 * the allocation request. Due to the nature of the zone,
29697a225e6SJoonsoo Kim 	 * memory on lower zone than the highest_zoneidx will be
29797a225e6SJoonsoo Kim 	 * protected by lowmem_reserve[highest_zoneidx].
29897a225e6SJoonsoo Kim 	 *
29997a225e6SJoonsoo Kim 	 * highest_zoneidx is also used by reclaim/compaction to limit
30097a225e6SJoonsoo Kim 	 * the target zone since higher zone than this index cannot be
30197a225e6SJoonsoo Kim 	 * usable for this allocation request.
30297a225e6SJoonsoo Kim 	 */
30397a225e6SJoonsoo Kim 	enum zone_type highest_zoneidx;
304c9ab0c4fSMel Gorman 	bool spread_dirty_pages;
3051a6d53a1SVlastimil Babka };
3061a6d53a1SVlastimil Babka 
3071a6d53a1SVlastimil Babka /*
3088170ac47SZi Yan  * This function returns the order of a free page in the buddy system. In
3098170ac47SZi Yan  * general, page_zone(page)->lock must be held by the caller to prevent the
3108170ac47SZi Yan  * page from being allocated in parallel and returning garbage as the order.
3118170ac47SZi Yan  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
3128170ac47SZi Yan  * page cannot be allocated or merged in parallel. Alternatively, it must
3138170ac47SZi Yan  * handle invalid values gracefully, and use buddy_order_unsafe() below.
3148170ac47SZi Yan  */
buddy_order(struct page * page)3158170ac47SZi Yan static inline unsigned int buddy_order(struct page *page)
3168170ac47SZi Yan {
3178170ac47SZi Yan 	/* PageBuddy() must be checked by the caller */
3188170ac47SZi Yan 	return page_private(page);
3198170ac47SZi Yan }
3208170ac47SZi Yan 
3218170ac47SZi Yan /*
3228170ac47SZi Yan  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
3238170ac47SZi Yan  * PageBuddy() should be checked first by the caller to minimize race window,
3248170ac47SZi Yan  * and invalid values must be handled gracefully.
3258170ac47SZi Yan  *
3268170ac47SZi Yan  * READ_ONCE is used so that if the caller assigns the result into a local
3278170ac47SZi Yan  * variable and e.g. tests it for valid range before using, the compiler cannot
3288170ac47SZi Yan  * decide to remove the variable and inline the page_private(page) multiple
3298170ac47SZi Yan  * times, potentially observing different values in the tests and the actual
3308170ac47SZi Yan  * use of the result.
3318170ac47SZi Yan  */
3328170ac47SZi Yan #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
3338170ac47SZi Yan 
3348170ac47SZi Yan /*
3358170ac47SZi Yan  * This function checks whether a page is free && is the buddy
3368170ac47SZi Yan  * we can coalesce a page and its buddy if
3378170ac47SZi Yan  * (a) the buddy is not in a hole (check before calling!) &&
3388170ac47SZi Yan  * (b) the buddy is in the buddy system &&
3398170ac47SZi Yan  * (c) a page and its buddy have the same order &&
3408170ac47SZi Yan  * (d) a page and its buddy are in the same zone.
3418170ac47SZi Yan  *
3428170ac47SZi Yan  * For recording whether a page is in the buddy system, we set PageBuddy.
3438170ac47SZi Yan  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
3448170ac47SZi Yan  *
3458170ac47SZi Yan  * For recording page's order, we use page_private(page).
3468170ac47SZi Yan  */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)3478170ac47SZi Yan static inline bool page_is_buddy(struct page *page, struct page *buddy,
3488170ac47SZi Yan 				 unsigned int order)
3498170ac47SZi Yan {
3508170ac47SZi Yan 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
3518170ac47SZi Yan 		return false;
3528170ac47SZi Yan 
3538170ac47SZi Yan 	if (buddy_order(buddy) != order)
3548170ac47SZi Yan 		return false;
3558170ac47SZi Yan 
3568170ac47SZi Yan 	/*
3578170ac47SZi Yan 	 * zone check is done late to avoid uselessly calculating
3588170ac47SZi Yan 	 * zone/node ids for pages that could never merge.
3598170ac47SZi Yan 	 */
3608170ac47SZi Yan 	if (page_zone_id(page) != page_zone_id(buddy))
3618170ac47SZi Yan 		return false;
3628170ac47SZi Yan 
3638170ac47SZi Yan 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
3648170ac47SZi Yan 
3658170ac47SZi Yan 	return true;
3668170ac47SZi Yan }
3678170ac47SZi Yan 
3688170ac47SZi Yan /*
3693c605096SJoonsoo Kim  * Locate the struct page for both the matching buddy in our
3703c605096SJoonsoo Kim  * pair (buddy1) and the combined O(n+1) page they form (page).
3713c605096SJoonsoo Kim  *
3723c605096SJoonsoo Kim  * 1) Any buddy B1 will have an order O twin B2 which satisfies
3733c605096SJoonsoo Kim  * the following equation:
3743c605096SJoonsoo Kim  *     B2 = B1 ^ (1 << O)
3753c605096SJoonsoo Kim  * For example, if the starting buddy (buddy2) is #8 its order
3763c605096SJoonsoo Kim  * 1 buddy is #10:
3773c605096SJoonsoo Kim  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
3783c605096SJoonsoo Kim  *
3793c605096SJoonsoo Kim  * 2) Any buddy B will have an order O+1 parent P which
3803c605096SJoonsoo Kim  * satisfies the following equation:
3813c605096SJoonsoo Kim  *     P = B & ~(1 << O)
3823c605096SJoonsoo Kim  *
3833c605096SJoonsoo Kim  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
3843c605096SJoonsoo Kim  */
3853c605096SJoonsoo Kim static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn,unsigned int order)38676741e77SVlastimil Babka __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
3873c605096SJoonsoo Kim {
38876741e77SVlastimil Babka 	return page_pfn ^ (1 << order);
3893c605096SJoonsoo Kim }
3903c605096SJoonsoo Kim 
3918170ac47SZi Yan /*
3928170ac47SZi Yan  * Find the buddy of @page and validate it.
3938170ac47SZi Yan  * @page: The input page
3948170ac47SZi Yan  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
3958170ac47SZi Yan  *       function is used in the performance-critical __free_one_page().
3968170ac47SZi Yan  * @order: The order of the page
3978170ac47SZi Yan  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
3988170ac47SZi Yan  *             page_to_pfn().
3998170ac47SZi Yan  *
4008170ac47SZi Yan  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
4018170ac47SZi Yan  * not the same as @page. The validation is necessary before use it.
4028170ac47SZi Yan  *
4038170ac47SZi Yan  * Return: the found buddy page or NULL if not found.
4048170ac47SZi Yan  */
find_buddy_page_pfn(struct page * page,unsigned long pfn,unsigned int order,unsigned long * buddy_pfn)4058170ac47SZi Yan static inline struct page *find_buddy_page_pfn(struct page *page,
4068170ac47SZi Yan 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
4078170ac47SZi Yan {
4088170ac47SZi Yan 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
4098170ac47SZi Yan 	struct page *buddy;
4108170ac47SZi Yan 
4118170ac47SZi Yan 	buddy = page + (__buddy_pfn - pfn);
4128170ac47SZi Yan 	if (buddy_pfn)
4138170ac47SZi Yan 		*buddy_pfn = __buddy_pfn;
4148170ac47SZi Yan 
4158170ac47SZi Yan 	if (page_is_buddy(page, buddy, order))
4168170ac47SZi Yan 		return buddy;
4178170ac47SZi Yan 	return NULL;
4188170ac47SZi Yan }
4198170ac47SZi Yan 
4207cf91a98SJoonsoo Kim extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
4217cf91a98SJoonsoo Kim 				unsigned long end_pfn, struct zone *zone);
4227cf91a98SJoonsoo Kim 
pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)4237cf91a98SJoonsoo Kim static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
4247cf91a98SJoonsoo Kim 				unsigned long end_pfn, struct zone *zone)
4257cf91a98SJoonsoo Kim {
4267cf91a98SJoonsoo Kim 	if (zone->contiguous)
4277cf91a98SJoonsoo Kim 		return pfn_to_page(start_pfn);
4287cf91a98SJoonsoo Kim 
4297cf91a98SJoonsoo Kim 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
4307cf91a98SJoonsoo Kim }
4317cf91a98SJoonsoo Kim 
432904d5857SKefeng Wang void set_zone_contiguous(struct zone *zone);
433904d5857SKefeng Wang 
clear_zone_contiguous(struct zone * zone)434904d5857SKefeng Wang static inline void clear_zone_contiguous(struct zone *zone)
435904d5857SKefeng Wang {
436904d5857SKefeng Wang 	zone->contiguous = false;
437904d5857SKefeng Wang }
438904d5857SKefeng Wang 
4393c605096SJoonsoo Kim extern int __isolate_free_page(struct page *page, unsigned int order);
440624f58d8SAlexander Duyck extern void __putback_isolated_page(struct page *page, unsigned int order,
441624f58d8SAlexander Duyck 				    int mt);
4427c2ee349SMike Rapoport extern void memblock_free_pages(struct page *page, unsigned long pfn,
443d70ddd7aSMel Gorman 					unsigned int order);
444a9cd410aSArun KS extern void __free_pages_core(struct page *page, unsigned int order);
4459420f89dSMike Rapoport (IBM) 
4461e3be485STarun Sahu /*
4471e3be485STarun Sahu  * This will have no effect, other than possibly generating a warning, if the
4481e3be485STarun Sahu  * caller passes in a non-large folio.
4491e3be485STarun Sahu  */
folio_set_order(struct folio * folio,unsigned int order)4501e3be485STarun Sahu static inline void folio_set_order(struct folio *folio, unsigned int order)
4511e3be485STarun Sahu {
4521e3be485STarun Sahu 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
4531e3be485STarun Sahu 		return;
4541e3be485STarun Sahu 
455ebc1baf5SMatthew Wilcox (Oracle) 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
4561e3be485STarun Sahu #ifdef CONFIG_64BIT
4571e3be485STarun Sahu 	folio->_folio_nr_pages = 1U << order;
4581e3be485STarun Sahu #endif
4591e3be485STarun Sahu }
4601e3be485STarun Sahu 
461fc4951c3SHugh Dickins bool __folio_unqueue_deferred_split(struct folio *folio);
folio_unqueue_deferred_split(struct folio * folio)462fc4951c3SHugh Dickins static inline bool folio_unqueue_deferred_split(struct folio *folio)
463eb6b6d3eSKefeng Wang {
464eb6b6d3eSKefeng Wang 	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
465fc4951c3SHugh Dickins 		return false;
466eb6b6d3eSKefeng Wang 
467eb6b6d3eSKefeng Wang 	/*
468eb6b6d3eSKefeng Wang 	 * At this point, there is no one trying to add the folio to
469eb6b6d3eSKefeng Wang 	 * deferred_list. If folio is not in deferred_list, it's safe
470eb6b6d3eSKefeng Wang 	 * to check without acquiring the split_queue_lock.
471eb6b6d3eSKefeng Wang 	 */
472eb6b6d3eSKefeng Wang 	if (data_race(list_empty(&folio->_deferred_list)))
473fc4951c3SHugh Dickins 		return false;
474eb6b6d3eSKefeng Wang 
475fc4951c3SHugh Dickins 	return __folio_unqueue_deferred_split(folio);
476eb6b6d3eSKefeng Wang }
4778dc4a8f1SMatthew Wilcox (Oracle) 
page_rmappable_folio(struct page * page)478bc899023SHugh Dickins static inline struct folio *page_rmappable_folio(struct page *page)
479bc899023SHugh Dickins {
480bc899023SHugh Dickins 	struct folio *folio = (struct folio *)page;
481bc899023SHugh Dickins 
482bc899023SHugh Dickins 	folio_prep_large_rmappable(folio);
483bc899023SHugh Dickins 	return folio;
484bc899023SHugh Dickins }
485bc899023SHugh Dickins 
prep_compound_head(struct page * page,unsigned int order)4869420f89dSMike Rapoport (IBM) static inline void prep_compound_head(struct page *page, unsigned int order)
4879420f89dSMike Rapoport (IBM) {
4889420f89dSMike Rapoport (IBM) 	struct folio *folio = (struct folio *)page;
4899420f89dSMike Rapoport (IBM) 
4901e3be485STarun Sahu 	folio_set_order(folio, order);
4919420f89dSMike Rapoport (IBM) 	atomic_set(&folio->_entire_mapcount, -1);
4929420f89dSMike Rapoport (IBM) 	atomic_set(&folio->_nr_pages_mapped, 0);
4939420f89dSMike Rapoport (IBM) 	atomic_set(&folio->_pincount, 0);
4940275e402SMatthew Wilcox (Oracle) 	if (order > 1)
4950275e402SMatthew Wilcox (Oracle) 		INIT_LIST_HEAD(&folio->_deferred_list);
4969420f89dSMike Rapoport (IBM) }
4979420f89dSMike Rapoport (IBM) 
prep_compound_tail(struct page * head,int tail_idx)4989420f89dSMike Rapoport (IBM) static inline void prep_compound_tail(struct page *head, int tail_idx)
4999420f89dSMike Rapoport (IBM) {
5009420f89dSMike Rapoport (IBM) 	struct page *p = head + tail_idx;
5019420f89dSMike Rapoport (IBM) 
5029420f89dSMike Rapoport (IBM) 	p->mapping = TAIL_MAPPING;
5039420f89dSMike Rapoport (IBM) 	set_compound_head(p, head);
5049420f89dSMike Rapoport (IBM) 	set_page_private(p, 0);
5059420f89dSMike Rapoport (IBM) }
5069420f89dSMike Rapoport (IBM) 
507d00181b9SKirill A. Shutemov extern void prep_compound_page(struct page *page, unsigned int order);
5089420f89dSMike Rapoport (IBM) 
50946f24fd8SJoonsoo Kim extern void post_alloc_hook(struct page *page, unsigned int order,
51046f24fd8SJoonsoo Kim 					gfp_t gfp_flags);
51142aa83cbSHan Pingtian extern int user_min_free_kbytes;
51220a0307cSWu Fengguang 
51344042b44SMel Gorman extern void free_unref_page(struct page *page, unsigned int order);
5140966aeb4SMatthew Wilcox (Oracle) extern void free_unref_page_list(struct list_head *list);
5150966aeb4SMatthew Wilcox (Oracle) 
51668265390SMel Gorman extern void zone_pcp_reset(struct zone *zone);
517ec6e8c7eSVlastimil Babka extern void zone_pcp_disable(struct zone *zone);
518ec6e8c7eSVlastimil Babka extern void zone_pcp_enable(struct zone *zone);
5199420f89dSMike Rapoport (IBM) extern void zone_pcp_init(struct zone *zone);
52068265390SMel Gorman 
521c803b3c8SMike Rapoport extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
522c803b3c8SMike Rapoport 			  phys_addr_t min_addr,
523c803b3c8SMike Rapoport 			  int nid, bool exact_nid);
524c803b3c8SMike Rapoport 
525e95d372cSKefeng Wang void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
526e95d372cSKefeng Wang 		unsigned long, enum meminit_context, struct vmem_altmap *, int);
527e95d372cSKefeng Wang 
528e95d372cSKefeng Wang 
52986d28b07SZi Yan int split_free_page(struct page *free_page,
53086d28b07SZi Yan 			unsigned int order, unsigned long split_pfn_offset);
531b2c9e2fbSZi Yan 
532ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA
533ff9543fdSMichal Nazarewicz 
534ff9543fdSMichal Nazarewicz /*
535ff9543fdSMichal Nazarewicz  * in mm/compaction.c
536ff9543fdSMichal Nazarewicz  */
537ff9543fdSMichal Nazarewicz /*
538ff9543fdSMichal Nazarewicz  * compact_control is used to track pages being migrated and the free pages
539ff9543fdSMichal Nazarewicz  * they are being migrated to during memory compaction. The free_pfn starts
540ff9543fdSMichal Nazarewicz  * at the end of a zone and migrate_pfn begins at the start. Movable pages
541ff9543fdSMichal Nazarewicz  * are moved to the end of a zone during a compaction run and the run
542ff9543fdSMichal Nazarewicz  * completes when free_pfn <= migrate_pfn
543ff9543fdSMichal Nazarewicz  */
544ff9543fdSMichal Nazarewicz struct compact_control {
545ff9543fdSMichal Nazarewicz 	struct list_head freepages;	/* List of free pages to migrate to */
546ff9543fdSMichal Nazarewicz 	struct list_head migratepages;	/* List of pages being migrated */
547c5fbd937SMel Gorman 	unsigned int nr_freepages;	/* Number of isolated free pages */
548c5fbd937SMel Gorman 	unsigned int nr_migratepages;	/* Number of pages to migrate */
549ff9543fdSMichal Nazarewicz 	unsigned long free_pfn;		/* isolate_freepages search base */
550c2ad7a1fSOscar Salvador 	/*
551c2ad7a1fSOscar Salvador 	 * Acts as an in/out parameter to page isolation for migration.
552c2ad7a1fSOscar Salvador 	 * isolate_migratepages uses it as a search base.
553c2ad7a1fSOscar Salvador 	 * isolate_migratepages_block will update the value to the next pfn
554c2ad7a1fSOscar Salvador 	 * after the last isolated one.
555c2ad7a1fSOscar Salvador 	 */
556c2ad7a1fSOscar Salvador 	unsigned long migrate_pfn;
55770b44595SMel Gorman 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
558c5943b9cSMel Gorman 	struct zone *zone;
559c5943b9cSMel Gorman 	unsigned long total_migrate_scanned;
560c5943b9cSMel Gorman 	unsigned long total_free_scanned;
561dbe2d4e4SMel Gorman 	unsigned short fast_search_fail;/* failures to use free list searches */
562dbe2d4e4SMel Gorman 	short search_order;		/* order to start a fast search at */
563f25ba6dcSVlastimil Babka 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
564f25ba6dcSVlastimil Babka 	int order;			/* order a direct compactor needs */
565d39773a0SVlastimil Babka 	int migratetype;		/* migratetype of direct compactor */
566f25ba6dcSVlastimil Babka 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
56797a225e6SJoonsoo Kim 	const int highest_zoneidx;	/* zone index of a direct compactor */
568e0b9daebSDavid Rientjes 	enum migrate_mode mode;		/* Async or sync migration mode */
569bb13ffebSMel Gorman 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
5702583d671SVlastimil Babka 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
5719f7e3387SVlastimil Babka 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
572accf6242SVlastimil Babka 	bool direct_compaction;		/* False from kcompactd or /proc/... */
573facdaa91SNitin Gupta 	bool proactive_compaction;	/* kcompactd proactive compaction */
57406ed2998SVlastimil Babka 	bool whole_zone;		/* Whole zone should/has been scanned */
575d56c1584SMiaohe Lin 	bool contended;			/* Signal lock contention */
57648731c84SMel Gorman 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
57748731c84SMel Gorman 					 * when there are potentially transient
57848731c84SMel Gorman 					 * isolation or migration failures to
57948731c84SMel Gorman 					 * ensure forward progress.
58048731c84SMel Gorman 					 */
581b06eda09SRik van Riel 	bool alloc_contig;		/* alloc_contig_range allocation */
582ff9543fdSMichal Nazarewicz };
583ff9543fdSMichal Nazarewicz 
5845e1f0f09SMel Gorman /*
5855e1f0f09SMel Gorman  * Used in direct compaction when a page should be taken from the freelists
5865e1f0f09SMel Gorman  * immediately when one is created during the free path.
5875e1f0f09SMel Gorman  */
5885e1f0f09SMel Gorman struct capture_control {
5895e1f0f09SMel Gorman 	struct compact_control *cc;
5905e1f0f09SMel Gorman 	struct page *page;
5915e1f0f09SMel Gorman };
5925e1f0f09SMel Gorman 
593ff9543fdSMichal Nazarewicz unsigned long
594bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc,
595bb13ffebSMel Gorman 			unsigned long start_pfn, unsigned long end_pfn);
596c2ad7a1fSOscar Salvador int
597edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc,
598edc2ca61SVlastimil Babka 			   unsigned long low_pfn, unsigned long end_pfn);
599b2c9e2fbSZi Yan 
600b2c9e2fbSZi Yan int __alloc_contig_migrate_range(struct compact_control *cc,
601b2c9e2fbSZi Yan 					unsigned long start, unsigned long end);
6029420f89dSMike Rapoport (IBM) 
6039420f89dSMike Rapoport (IBM) /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
6049420f89dSMike Rapoport (IBM) void init_cma_reserved_pageblock(struct page *page);
6059420f89dSMike Rapoport (IBM) 
6069420f89dSMike Rapoport (IBM) #endif /* CONFIG_COMPACTION || CONFIG_CMA */
6079420f89dSMike Rapoport (IBM) 
6082149cdaeSJoonsoo Kim int find_suitable_fallback(struct free_area *area, unsigned int order,
6092149cdaeSJoonsoo Kim 			int migratetype, bool only_stealable, bool *can_steal);
610ff9543fdSMichal Nazarewicz 
free_area_empty(struct free_area * area,int migratetype)61162f31bd4SMike Rapoport (IBM) static inline bool free_area_empty(struct free_area *area, int migratetype)
61262f31bd4SMike Rapoport (IBM) {
61362f31bd4SMike Rapoport (IBM) 	return list_empty(&area->free_list[migratetype]);
61462f31bd4SMike Rapoport (IBM) }
61562f31bd4SMike Rapoport (IBM) 
61648f13bf3SMel Gorman /*
61730bdbb78SKonstantin Khlebnikov  * These three helpers classifies VMAs for virtual memory accounting.
61830bdbb78SKonstantin Khlebnikov  */
61930bdbb78SKonstantin Khlebnikov 
62030bdbb78SKonstantin Khlebnikov /*
62130bdbb78SKonstantin Khlebnikov  * Executable code area - executable, not writable, not stack
62230bdbb78SKonstantin Khlebnikov  */
is_exec_mapping(vm_flags_t flags)623d977d56cSKonstantin Khlebnikov static inline bool is_exec_mapping(vm_flags_t flags)
624d977d56cSKonstantin Khlebnikov {
62530bdbb78SKonstantin Khlebnikov 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
626d977d56cSKonstantin Khlebnikov }
627d977d56cSKonstantin Khlebnikov 
62830bdbb78SKonstantin Khlebnikov /*
62900547ef7SRick Edgecombe  * Stack area (including shadow stacks)
63030bdbb78SKonstantin Khlebnikov  *
63130bdbb78SKonstantin Khlebnikov  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
63230bdbb78SKonstantin Khlebnikov  * do_mmap() forbids all other combinations.
63330bdbb78SKonstantin Khlebnikov  */
is_stack_mapping(vm_flags_t flags)634d977d56cSKonstantin Khlebnikov static inline bool is_stack_mapping(vm_flags_t flags)
635d977d56cSKonstantin Khlebnikov {
63600547ef7SRick Edgecombe 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
637d977d56cSKonstantin Khlebnikov }
638d977d56cSKonstantin Khlebnikov 
63930bdbb78SKonstantin Khlebnikov /*
64030bdbb78SKonstantin Khlebnikov  * Data area - private, writable, not stack
64130bdbb78SKonstantin Khlebnikov  */
is_data_mapping(vm_flags_t flags)642d977d56cSKonstantin Khlebnikov static inline bool is_data_mapping(vm_flags_t flags)
643d977d56cSKonstantin Khlebnikov {
64430bdbb78SKonstantin Khlebnikov 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
645d977d56cSKonstantin Khlebnikov }
646d977d56cSKonstantin Khlebnikov 
6476038def0SNamhyung Kim /* mm/util.c */
648e05b3453SMatthew Wilcox (Oracle) struct anon_vma *folio_anon_vma(struct folio *folio);
6496038def0SNamhyung Kim 
650af8e3354SHugh Dickins #ifdef CONFIG_MMU
6513506659eSMatthew Wilcox (Oracle) void unmap_mapping_folio(struct folio *folio);
652fc05f566SKirill A. Shutemov extern long populate_vma_page_range(struct vm_area_struct *vma,
653a78f1ccdSDavid Hildenbrand 		unsigned long start, unsigned long end, int *locked);
6549e898211SDavid Hildenbrand extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
6559e898211SDavid Hildenbrand 		unsigned long end, bool write, int *locked);
656b0cc5e89SAndrew Morton extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
6573c54a298SLorenzo Stoakes 			       unsigned long bytes);
658b291f000SNick Piggin /*
6597efecffbSMatthew Wilcox (Oracle)  * mlock_vma_folio() and munlock_vma_folio():
660cea86fe2SHugh Dickins  * should be called with vma's mmap_lock held for read or write,
661cea86fe2SHugh Dickins  * under page table lock for the pte/pmd being added or removed.
662b291f000SNick Piggin  *
66396f97c43SLorenzo Stoakes  * mlock is usually called at the end of page_add_*_rmap(), munlock at
66496f97c43SLorenzo Stoakes  * the end of page_remove_rmap(); but new anon folios are managed by
66596f97c43SLorenzo Stoakes  * folio_add_lru_vma() calling mlock_new_folio().
666cea86fe2SHugh Dickins  *
667cea86fe2SHugh Dickins  * @compound is used to include pmd mappings of THPs, but filter out
668cea86fe2SHugh Dickins  * pte mappings of THPs, which cannot be consistently counted: a pte
669cea86fe2SHugh Dickins  * mapping of the THP head cannot be distinguished by the page alone.
670b291f000SNick Piggin  */
671dcc5d337SMatthew Wilcox (Oracle) void mlock_folio(struct folio *folio);
mlock_vma_folio(struct folio * folio,struct vm_area_struct * vma,bool compound)672dcc5d337SMatthew Wilcox (Oracle) static inline void mlock_vma_folio(struct folio *folio,
673cea86fe2SHugh Dickins 			struct vm_area_struct *vma, bool compound)
674cea86fe2SHugh Dickins {
675c8263bd6SHugh Dickins 	/*
676c8263bd6SHugh Dickins 	 * The VM_SPECIAL check here serves two purposes.
677c8263bd6SHugh Dickins 	 * 1) VM_IO check prevents migration from double-counting during mlock.
678c8263bd6SHugh Dickins 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
679c8263bd6SHugh Dickins 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
680c8263bd6SHugh Dickins 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
681c8263bd6SHugh Dickins 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
682c8263bd6SHugh Dickins 	 */
683c8263bd6SHugh Dickins 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
684dcc5d337SMatthew Wilcox (Oracle) 	    (compound || !folio_test_large(folio)))
685dcc5d337SMatthew Wilcox (Oracle) 		mlock_folio(folio);
686cea86fe2SHugh Dickins }
687dcc5d337SMatthew Wilcox (Oracle) 
68896f97c43SLorenzo Stoakes void munlock_folio(struct folio *folio);
munlock_vma_folio(struct folio * folio,struct vm_area_struct * vma,bool compound)68996f97c43SLorenzo Stoakes static inline void munlock_vma_folio(struct folio *folio,
690cea86fe2SHugh Dickins 			struct vm_area_struct *vma, bool compound)
691cea86fe2SHugh Dickins {
692cea86fe2SHugh Dickins 	if (unlikely(vma->vm_flags & VM_LOCKED) &&
69396f97c43SLorenzo Stoakes 	    (compound || !folio_test_large(folio)))
69496f97c43SLorenzo Stoakes 		munlock_folio(folio);
695cea86fe2SHugh Dickins }
69696f97c43SLorenzo Stoakes 
69796f97c43SLorenzo Stoakes void mlock_new_folio(struct folio *folio);
69896f97c43SLorenzo Stoakes bool need_mlock_drain(int cpu);
69996f97c43SLorenzo Stoakes void mlock_drain_local(void);
70096f97c43SLorenzo Stoakes void mlock_drain_remote(int cpu);
701b291f000SNick Piggin 
702f55e1014SLinus Torvalds extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
703b32967ffSMel Gorman 
704e9b61f19SKirill A. Shutemov /*
7056a8e0596SMuchun Song  * Return the start of user virtual address at the specific offset within
7066a8e0596SMuchun Song  * a vma.
707e9b61f19SKirill A. Shutemov  */
708e9b61f19SKirill A. Shutemov static inline unsigned long
vma_pgoff_address(pgoff_t pgoff,unsigned long nr_pages,struct vm_area_struct * vma)7096a8e0596SMuchun Song vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
7106a8e0596SMuchun Song 		  struct vm_area_struct *vma)
711e9b61f19SKirill A. Shutemov {
712494334e4SHugh Dickins 	unsigned long address;
713a8fa41adSKirill A. Shutemov 
714494334e4SHugh Dickins 	if (pgoff >= vma->vm_pgoff) {
715494334e4SHugh Dickins 		address = vma->vm_start +
716494334e4SHugh Dickins 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
717494334e4SHugh Dickins 		/* Check for address beyond vma (or wrapped through 0?) */
718494334e4SHugh Dickins 		if (address < vma->vm_start || address >= vma->vm_end)
719494334e4SHugh Dickins 			address = -EFAULT;
7206a8e0596SMuchun Song 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
721494334e4SHugh Dickins 		/* Test above avoids possibility of wrap to 0 on 32-bit */
722494334e4SHugh Dickins 		address = vma->vm_start;
723494334e4SHugh Dickins 	} else {
724494334e4SHugh Dickins 		address = -EFAULT;
725494334e4SHugh Dickins 	}
726494334e4SHugh Dickins 	return address;
727494334e4SHugh Dickins }
728e9b61f19SKirill A. Shutemov 
729494334e4SHugh Dickins /*
7306a8e0596SMuchun Song  * Return the start of user virtual address of a page within a vma.
7316a8e0596SMuchun Song  * Returns -EFAULT if all of the page is outside the range of vma.
7326a8e0596SMuchun Song  * If page is a compound head, the entire compound page is considered.
7336a8e0596SMuchun Song  */
7346a8e0596SMuchun Song static inline unsigned long
vma_address(struct page * page,struct vm_area_struct * vma)7356a8e0596SMuchun Song vma_address(struct page *page, struct vm_area_struct *vma)
7366a8e0596SMuchun Song {
7376a8e0596SMuchun Song 	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
7386a8e0596SMuchun Song 	return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
7396a8e0596SMuchun Song }
7406a8e0596SMuchun Song 
7416a8e0596SMuchun Song /*
7422aff7a47SMatthew Wilcox (Oracle)  * Then at what user virtual address will none of the range be found in vma?
743494334e4SHugh Dickins  * Assumes that vma_address() already returned a good starting address.
744494334e4SHugh Dickins  */
vma_address_end(struct page_vma_mapped_walk * pvmw)7452aff7a47SMatthew Wilcox (Oracle) static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
746494334e4SHugh Dickins {
7472aff7a47SMatthew Wilcox (Oracle) 	struct vm_area_struct *vma = pvmw->vma;
748494334e4SHugh Dickins 	pgoff_t pgoff;
749494334e4SHugh Dickins 	unsigned long address;
750e9b61f19SKirill A. Shutemov 
7512aff7a47SMatthew Wilcox (Oracle) 	/* Common case, plus ->pgoff is invalid for KSM */
7522aff7a47SMatthew Wilcox (Oracle) 	if (pvmw->nr_pages == 1)
7532aff7a47SMatthew Wilcox (Oracle) 		return pvmw->address + PAGE_SIZE;
7542aff7a47SMatthew Wilcox (Oracle) 
7552aff7a47SMatthew Wilcox (Oracle) 	pgoff = pvmw->pgoff + pvmw->nr_pages;
756494334e4SHugh Dickins 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
757494334e4SHugh Dickins 	/* Check for address beyond vma (or wrapped through 0?) */
758494334e4SHugh Dickins 	if (address < vma->vm_start || address > vma->vm_end)
759494334e4SHugh Dickins 		address = vma->vm_end;
760494334e4SHugh Dickins 	return address;
761e9b61f19SKirill A. Shutemov }
762e9b61f19SKirill A. Shutemov 
maybe_unlock_mmap_for_io(struct vm_fault * vmf,struct file * fpin)76389b15332SJohannes Weiner static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
76489b15332SJohannes Weiner 						    struct file *fpin)
76589b15332SJohannes Weiner {
76689b15332SJohannes Weiner 	int flags = vmf->flags;
76789b15332SJohannes Weiner 
76889b15332SJohannes Weiner 	if (fpin)
76989b15332SJohannes Weiner 		return fpin;
77089b15332SJohannes Weiner 
77189b15332SJohannes Weiner 	/*
77289b15332SJohannes Weiner 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
773c1e8d7c6SMichel Lespinasse 	 * anything, so we only pin the file and drop the mmap_lock if only
7744064b982SPeter Xu 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
77589b15332SJohannes Weiner 	 */
7764064b982SPeter Xu 	if (fault_flag_allow_retry_first(flags) &&
7774064b982SPeter Xu 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
77889b15332SJohannes Weiner 		fpin = get_file(vmf->vma->vm_file);
7790790e1e2SMatthew Wilcox (Oracle) 		release_fault_lock(vmf);
78089b15332SJohannes Weiner 	}
78189b15332SJohannes Weiner 	return fpin;
78289b15332SJohannes Weiner }
783af8e3354SHugh Dickins #else /* !CONFIG_MMU */
unmap_mapping_folio(struct folio * folio)7843506659eSMatthew Wilcox (Oracle) static inline void unmap_mapping_folio(struct folio *folio) { }
mlock_new_folio(struct folio * folio)78596f97c43SLorenzo Stoakes static inline void mlock_new_folio(struct folio *folio) { }
need_mlock_drain(int cpu)78696f97c43SLorenzo Stoakes static inline bool need_mlock_drain(int cpu) { return false; }
mlock_drain_local(void)78796f97c43SLorenzo Stoakes static inline void mlock_drain_local(void) { }
mlock_drain_remote(int cpu)78896f97c43SLorenzo Stoakes static inline void mlock_drain_remote(int cpu) { }
vunmap_range_noflush(unsigned long start,unsigned long end)7894ad0ae8cSNicholas Piggin static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
7904ad0ae8cSNicholas Piggin {
7914ad0ae8cSNicholas Piggin }
792af8e3354SHugh Dickins #endif /* !CONFIG_MMU */
793894bc310SLee Schermerhorn 
7946b74ab97SMel Gorman /* Memory initialisation debug and verification */
7959420f89dSMike Rapoport (IBM) #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7969420f89dSMike Rapoport (IBM) DECLARE_STATIC_KEY_TRUE(deferred_pages);
7979420f89dSMike Rapoport (IBM) 
7989420f89dSMike Rapoport (IBM) bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
7999420f89dSMike Rapoport (IBM) #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
8009420f89dSMike Rapoport (IBM) 
8016b74ab97SMel Gorman enum mminit_level {
8026b74ab97SMel Gorman 	MMINIT_WARNING,
8036b74ab97SMel Gorman 	MMINIT_VERIFY,
8046b74ab97SMel Gorman 	MMINIT_TRACE
8056b74ab97SMel Gorman };
8066b74ab97SMel Gorman 
8076b74ab97SMel Gorman #ifdef CONFIG_DEBUG_MEMORY_INIT
8086b74ab97SMel Gorman 
8096b74ab97SMel Gorman extern int mminit_loglevel;
8106b74ab97SMel Gorman 
8116b74ab97SMel Gorman #define mminit_dprintk(level, prefix, fmt, arg...) \
8126b74ab97SMel Gorman do { \
8136b74ab97SMel Gorman 	if (level < mminit_loglevel) { \
814fc5199d1SRasmus Villemoes 		if (level <= MMINIT_WARNING) \
8151170532bSJoe Perches 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
816fc5199d1SRasmus Villemoes 		else \
817fc5199d1SRasmus Villemoes 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
8186b74ab97SMel Gorman 	} \
8196b74ab97SMel Gorman } while (0)
8206b74ab97SMel Gorman 
821708614e6SMel Gorman extern void mminit_verify_pageflags_layout(void);
82268ad8df4SMel Gorman extern void mminit_verify_zonelist(void);
8236b74ab97SMel Gorman #else
8246b74ab97SMel Gorman 
mminit_dprintk(enum mminit_level level,const char * prefix,const char * fmt,...)8256b74ab97SMel Gorman static inline void mminit_dprintk(enum mminit_level level,
8266b74ab97SMel Gorman 				const char *prefix, const char *fmt, ...)
8276b74ab97SMel Gorman {
8286b74ab97SMel Gorman }
8296b74ab97SMel Gorman 
mminit_verify_pageflags_layout(void)830708614e6SMel Gorman static inline void mminit_verify_pageflags_layout(void)
831708614e6SMel Gorman {
832708614e6SMel Gorman }
833708614e6SMel Gorman 
mminit_verify_zonelist(void)83468ad8df4SMel Gorman static inline void mminit_verify_zonelist(void)
83568ad8df4SMel Gorman {
83668ad8df4SMel Gorman }
8376b74ab97SMel Gorman #endif /* CONFIG_DEBUG_MEMORY_INIT */
8382dbb51c4SMel Gorman 
839a5f5f91dSMel Gorman #define NODE_RECLAIM_NOSCAN	-2
840a5f5f91dSMel Gorman #define NODE_RECLAIM_FULL	-1
841a5f5f91dSMel Gorman #define NODE_RECLAIM_SOME	0
842a5f5f91dSMel Gorman #define NODE_RECLAIM_SUCCESS	1
8437c116f2bSWu Fengguang 
8448b09549cSWei Yang #ifdef CONFIG_NUMA
8458b09549cSWei Yang extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
84679c28a41SDave Hansen extern int find_next_best_node(int node, nodemask_t *used_node_mask);
8478b09549cSWei Yang #else
node_reclaim(struct pglist_data * pgdat,gfp_t mask,unsigned int order)8488b09549cSWei Yang static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
8498b09549cSWei Yang 				unsigned int order)
8508b09549cSWei Yang {
8518b09549cSWei Yang 	return NODE_RECLAIM_NOSCAN;
8528b09549cSWei Yang }
find_next_best_node(int node,nodemask_t * used_node_mask)85379c28a41SDave Hansen static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
85479c28a41SDave Hansen {
85579c28a41SDave Hansen 	return NUMA_NO_NODE;
85679c28a41SDave Hansen }
8578b09549cSWei Yang #endif
8588b09549cSWei Yang 
85960f272f6Szhenwei pi /*
86060f272f6Szhenwei pi  * mm/memory-failure.c
86160f272f6Szhenwei pi  */
86231d3d348SWu Fengguang extern int hwpoison_filter(struct page *p);
86331d3d348SWu Fengguang 
8647c116f2bSWu Fengguang extern u32 hwpoison_filter_dev_major;
8657c116f2bSWu Fengguang extern u32 hwpoison_filter_dev_minor;
866478c5ffcSWu Fengguang extern u64 hwpoison_filter_flags_mask;
867478c5ffcSWu Fengguang extern u64 hwpoison_filter_flags_value;
8684fd466ebSAndi Kleen extern u64 hwpoison_filter_memcg;
8691bfe5febSHaicheng Li extern u32 hwpoison_filter_enable;
870eb36c587SAl Viro 
871dc0ef0dfSMichal Hocko extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
872eb36c587SAl Viro         unsigned long, unsigned long,
8739fbeb5abSMichal Hocko         unsigned long, unsigned long);
874ca57df79SXishi Qiu 
875ca57df79SXishi Qiu extern void set_pageblock_order(void);
8764bf4f155SKefeng Wang unsigned long reclaim_pages(struct list_head *folio_list);
877730ec8c0SManinder Singh unsigned int reclaim_clean_pages_from_list(struct zone *zone,
8784bf4f155SKefeng Wang 					    struct list_head *folio_list);
879d95ea5d1SBartlomiej Zolnierkiewicz /* The ALLOC_WMARK bits are used as an index to zone->watermark */
880d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_WMARK_MIN		WMARK_MIN
881d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_WMARK_LOW		WMARK_LOW
882d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_WMARK_HIGH	WMARK_HIGH
883d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
884d95ea5d1SBartlomiej Zolnierkiewicz 
885d95ea5d1SBartlomiej Zolnierkiewicz /* Mask to get the watermark bits */
886d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
887d95ea5d1SBartlomiej Zolnierkiewicz 
888cd04ae1eSMichal Hocko /*
889cd04ae1eSMichal Hocko  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
890cd04ae1eSMichal Hocko  * cannot assume a reduced access to memory reserves is sufficient for
891cd04ae1eSMichal Hocko  * !MMU
892cd04ae1eSMichal Hocko  */
893cd04ae1eSMichal Hocko #ifdef CONFIG_MMU
894cd04ae1eSMichal Hocko #define ALLOC_OOM		0x08
895cd04ae1eSMichal Hocko #else
896cd04ae1eSMichal Hocko #define ALLOC_OOM		ALLOC_NO_WATERMARKS
897cd04ae1eSMichal Hocko #endif
898cd04ae1eSMichal Hocko 
8991ebbb218SMel Gorman #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
9001ebbb218SMel Gorman 				       * to 25% of the min watermark or
9011ebbb218SMel Gorman 				       * 62.5% if __GFP_HIGH is set.
9021ebbb218SMel Gorman 				       */
903524c4807SMel Gorman #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
904524c4807SMel Gorman 				       * of the min watermark.
905524c4807SMel Gorman 				       */
906d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
907d883c6cfSJoonsoo Kim #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
9086bb15450SMel Gorman #ifdef CONFIG_ZONE_DMA32
9096bb15450SMel Gorman #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
9106bb15450SMel Gorman #else
9116bb15450SMel Gorman #define ALLOC_NOFRAGMENT	  0x0
9126bb15450SMel Gorman #endif
913eb2e2b42SMel Gorman #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
914736838e9SMateusz Nosek #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
915d95ea5d1SBartlomiej Zolnierkiewicz 
916ab350885SMel Gorman /* Flags that allow allocations below the min watermark. */
9171ebbb218SMel Gorman #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
918ab350885SMel Gorman 
91972b252aeSMel Gorman enum ttu_flags;
92072b252aeSMel Gorman struct tlbflush_unmap_batch;
92172b252aeSMel Gorman 
922ce612879SMichal Hocko 
923ce612879SMichal Hocko /*
924ce612879SMichal Hocko  * only for MM internal work items which do not depend on
925ce612879SMichal Hocko  * any allocations or locks which might depend on allocations
926ce612879SMichal Hocko  */
927ce612879SMichal Hocko extern struct workqueue_struct *mm_percpu_wq;
928ce612879SMichal Hocko 
92972b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
93072b252aeSMel Gorman void try_to_unmap_flush(void);
931d950c947SMel Gorman void try_to_unmap_flush_dirty(void);
9323ea27719SMel Gorman void flush_tlb_batched_pending(struct mm_struct *mm);
93372b252aeSMel Gorman #else
try_to_unmap_flush(void)93472b252aeSMel Gorman static inline void try_to_unmap_flush(void)
93572b252aeSMel Gorman {
93672b252aeSMel Gorman }
try_to_unmap_flush_dirty(void)937d950c947SMel Gorman static inline void try_to_unmap_flush_dirty(void)
938d950c947SMel Gorman {
939d950c947SMel Gorman }
flush_tlb_batched_pending(struct mm_struct * mm)9403ea27719SMel Gorman static inline void flush_tlb_batched_pending(struct mm_struct *mm)
9413ea27719SMel Gorman {
9423ea27719SMel Gorman }
94372b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
944edf14cdbSVlastimil Babka 
945edf14cdbSVlastimil Babka extern const struct trace_print_flags pageflag_names[];
9464c85c0beSHyeonggon Yoo extern const struct trace_print_flags pagetype_names[];
947edf14cdbSVlastimil Babka extern const struct trace_print_flags vmaflag_names[];
948edf14cdbSVlastimil Babka extern const struct trace_print_flags gfpflag_names[];
949edf14cdbSVlastimil Babka 
is_migrate_highatomic(enum migratetype migratetype)950a6ffdc07SXishi Qiu static inline bool is_migrate_highatomic(enum migratetype migratetype)
951a6ffdc07SXishi Qiu {
952a6ffdc07SXishi Qiu 	return migratetype == MIGRATE_HIGHATOMIC;
953a6ffdc07SXishi Qiu }
954a6ffdc07SXishi Qiu 
is_migrate_highatomic_page(struct page * page)955a6ffdc07SXishi Qiu static inline bool is_migrate_highatomic_page(struct page *page)
956a6ffdc07SXishi Qiu {
957a6ffdc07SXishi Qiu 	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
958a6ffdc07SXishi Qiu }
959a6ffdc07SXishi Qiu 
96072675e13SMichal Hocko void setup_zone_pageset(struct zone *zone);
96119fc7bedSJoonsoo Kim 
96219fc7bedSJoonsoo Kim struct migration_target_control {
96319fc7bedSJoonsoo Kim 	int nid;		/* preferred node id */
96419fc7bedSJoonsoo Kim 	nodemask_t *nmask;
96519fc7bedSJoonsoo Kim 	gfp_t gfp_mask;
96619fc7bedSJoonsoo Kim };
96719fc7bedSJoonsoo Kim 
968b67177ecSNicholas Piggin /*
96907073eb0SDavid Howells  * mm/filemap.c
97007073eb0SDavid Howells  */
97107073eb0SDavid Howells size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
97207073eb0SDavid Howells 			      struct folio *folio, loff_t fpos, size_t size);
97307073eb0SDavid Howells 
97407073eb0SDavid Howells /*
975b67177ecSNicholas Piggin  * mm/vmalloc.c
976b67177ecSNicholas Piggin  */
9774ad0ae8cSNicholas Piggin #ifdef CONFIG_MMU
978b6714911SMike Rapoport (IBM) void __init vmalloc_init(void);
979d905ae2bSAlexander Potapenko int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
980b67177ecSNicholas Piggin                 pgprot_t prot, struct page **pages, unsigned int page_shift);
9814ad0ae8cSNicholas Piggin #else
vmalloc_init(void)982b6714911SMike Rapoport (IBM) static inline void vmalloc_init(void)
983b6714911SMike Rapoport (IBM) {
984b6714911SMike Rapoport (IBM) }
985b6714911SMike Rapoport (IBM) 
9864ad0ae8cSNicholas Piggin static inline
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)987d905ae2bSAlexander Potapenko int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
9884ad0ae8cSNicholas Piggin                 pgprot_t prot, struct page **pages, unsigned int page_shift)
9894ad0ae8cSNicholas Piggin {
9904ad0ae8cSNicholas Piggin 	return -EINVAL;
9914ad0ae8cSNicholas Piggin }
9924ad0ae8cSNicholas Piggin #endif
9934ad0ae8cSNicholas Piggin 
994d905ae2bSAlexander Potapenko int __must_check __vmap_pages_range_noflush(unsigned long addr,
995d905ae2bSAlexander Potapenko 			       unsigned long end, pgprot_t prot,
996d905ae2bSAlexander Potapenko 			       struct page **pages, unsigned int page_shift);
997b073d7f8SAlexander Potapenko 
9984ad0ae8cSNicholas Piggin void vunmap_range_noflush(unsigned long start, unsigned long end);
999b67177ecSNicholas Piggin 
1000b073d7f8SAlexander Potapenko void __vunmap_range_noflush(unsigned long start, unsigned long end);
1001b073d7f8SAlexander Potapenko 
1002f4c0d836SYang Shi int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
1003f4c0d836SYang Shi 		      unsigned long addr, int page_nid, int *flags);
1004f4c0d836SYang Shi 
100527674ef6SChristoph Hellwig void free_zone_device_page(struct page *page);
1006b05a79d4SAlistair Popple int migrate_device_coherent_page(struct page *page);
100727674ef6SChristoph Hellwig 
1008ece1ed7bSMatthew Wilcox (Oracle) /*
1009ece1ed7bSMatthew Wilcox (Oracle)  * mm/gup.c
1010ece1ed7bSMatthew Wilcox (Oracle)  */
101126273f5fSYang Shi int __must_check try_grab_folio(struct folio *folio, int refs,
101226273f5fSYang Shi 				unsigned int flags);
1013ece1ed7bSMatthew Wilcox (Oracle) 
10148b9c1cc0SDavid Hildenbrand /*
10158b9c1cc0SDavid Hildenbrand  * mm/huge_memory.c
10168b9c1cc0SDavid Hildenbrand  */
10178b9c1cc0SDavid Hildenbrand struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
10188b9c1cc0SDavid Hildenbrand 				   unsigned long addr, pmd_t *pmd,
10198b9c1cc0SDavid Hildenbrand 				   unsigned int flags);
10208b9c1cc0SDavid Hildenbrand 
10212c224108SJason Gunthorpe enum {
10222c224108SJason Gunthorpe 	/* mark page accessed */
10232c224108SJason Gunthorpe 	FOLL_TOUCH = 1 << 16,
10242c224108SJason Gunthorpe 	/* a retry, previous pass started an IO */
10252c224108SJason Gunthorpe 	FOLL_TRIED = 1 << 17,
10262c224108SJason Gunthorpe 	/* we are working on non-current tsk/mm */
10272c224108SJason Gunthorpe 	FOLL_REMOTE = 1 << 18,
10282c224108SJason Gunthorpe 	/* pages must be released via unpin_user_page */
10292c224108SJason Gunthorpe 	FOLL_PIN = 1 << 19,
10302c224108SJason Gunthorpe 	/* gup_fast: prevent fall-back to slow gup */
10312c224108SJason Gunthorpe 	FOLL_FAST_ONLY = 1 << 20,
10322c224108SJason Gunthorpe 	/* allow unlocking the mmap lock */
10332c224108SJason Gunthorpe 	FOLL_UNLOCKABLE = 1 << 21,
10349e898211SDavid Hildenbrand 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
10359e898211SDavid Hildenbrand 	FOLL_MADV_POPULATE = 1 << 22,
10362c224108SJason Gunthorpe };
10372c224108SJason Gunthorpe 
103849db746dSLorenzo Stoakes #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
10399e898211SDavid Hildenbrand 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
10409e898211SDavid Hildenbrand 			    FOLL_MADV_POPULATE)
104149db746dSLorenzo Stoakes 
104263b60512SJason Gunthorpe /*
104363b60512SJason Gunthorpe  * Indicates for which pages that are write-protected in the page table,
104463b60512SJason Gunthorpe  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
104563b60512SJason Gunthorpe  * GUP pin will remain consistent with the pages mapped into the page tables
104663b60512SJason Gunthorpe  * of the MM.
104763b60512SJason Gunthorpe  *
104863b60512SJason Gunthorpe  * Temporary unmapping of PageAnonExclusive() pages or clearing of
104963b60512SJason Gunthorpe  * PageAnonExclusive() has to protect against concurrent GUP:
105063b60512SJason Gunthorpe  * * Ordinary GUP: Using the PT lock
105163b60512SJason Gunthorpe  * * GUP-fast and fork(): mm->write_protect_seq
105263b60512SJason Gunthorpe  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
105363b60512SJason Gunthorpe  *    page_try_share_anon_rmap()
105463b60512SJason Gunthorpe  *
105563b60512SJason Gunthorpe  * Must be called with the (sub)page that's actually referenced via the
105663b60512SJason Gunthorpe  * page table entry, which might not necessarily be the head page for a
105763b60512SJason Gunthorpe  * PTE-mapped THP.
105863b60512SJason Gunthorpe  *
105963b60512SJason Gunthorpe  * If the vma is NULL, we're coming from the GUP-fast path and might have
106063b60512SJason Gunthorpe  * to fallback to the slow path just to lookup the vma.
106163b60512SJason Gunthorpe  */
gup_must_unshare(struct vm_area_struct * vma,unsigned int flags,struct page * page)106263b60512SJason Gunthorpe static inline bool gup_must_unshare(struct vm_area_struct *vma,
106363b60512SJason Gunthorpe 				    unsigned int flags, struct page *page)
106463b60512SJason Gunthorpe {
106563b60512SJason Gunthorpe 	/*
106663b60512SJason Gunthorpe 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
106763b60512SJason Gunthorpe 	 * has to be writable -- and if it references (part of) an anonymous
106863b60512SJason Gunthorpe 	 * folio, that part is required to be marked exclusive.
106963b60512SJason Gunthorpe 	 */
107063b60512SJason Gunthorpe 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
107163b60512SJason Gunthorpe 		return false;
107263b60512SJason Gunthorpe 	/*
107363b60512SJason Gunthorpe 	 * Note: PageAnon(page) is stable until the page is actually getting
107463b60512SJason Gunthorpe 	 * freed.
107563b60512SJason Gunthorpe 	 */
107663b60512SJason Gunthorpe 	if (!PageAnon(page)) {
107763b60512SJason Gunthorpe 		/*
107863b60512SJason Gunthorpe 		 * We only care about R/O long-term pining: R/O short-term
107963b60512SJason Gunthorpe 		 * pinning does not have the semantics to observe successive
108063b60512SJason Gunthorpe 		 * changes through the process page tables.
108163b60512SJason Gunthorpe 		 */
108263b60512SJason Gunthorpe 		if (!(flags & FOLL_LONGTERM))
108363b60512SJason Gunthorpe 			return false;
108463b60512SJason Gunthorpe 
108563b60512SJason Gunthorpe 		/* We really need the vma ... */
108663b60512SJason Gunthorpe 		if (!vma)
108763b60512SJason Gunthorpe 			return true;
108863b60512SJason Gunthorpe 
108963b60512SJason Gunthorpe 		/*
109063b60512SJason Gunthorpe 		 * ... because we only care about writable private ("COW")
109163b60512SJason Gunthorpe 		 * mappings where we have to break COW early.
109263b60512SJason Gunthorpe 		 */
109363b60512SJason Gunthorpe 		return is_cow_mapping(vma->vm_flags);
109463b60512SJason Gunthorpe 	}
109563b60512SJason Gunthorpe 
109663b60512SJason Gunthorpe 	/* Paired with a memory barrier in page_try_share_anon_rmap(). */
109763b60512SJason Gunthorpe 	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
109863b60512SJason Gunthorpe 		smp_rmb();
109963b60512SJason Gunthorpe 
110063b60512SJason Gunthorpe 	/*
11015805192cSDavid Hildenbrand 	 * During GUP-fast we might not get called on the head page for a
11025805192cSDavid Hildenbrand 	 * hugetlb page that is mapped using cont-PTE, because GUP-fast does
11035805192cSDavid Hildenbrand 	 * not work with the abstracted hugetlb PTEs that always point at the
11045805192cSDavid Hildenbrand 	 * head page. For hugetlb, PageAnonExclusive only applies on the head
11055805192cSDavid Hildenbrand 	 * page (as it cannot be partially COW-shared), so lookup the head page.
11065805192cSDavid Hildenbrand 	 */
11075805192cSDavid Hildenbrand 	if (unlikely(!PageHead(page) && PageHuge(page)))
11085805192cSDavid Hildenbrand 		page = compound_head(page);
11095805192cSDavid Hildenbrand 
11105805192cSDavid Hildenbrand 	/*
111163b60512SJason Gunthorpe 	 * Note that PageKsm() pages cannot be exclusive, and consequently,
111263b60512SJason Gunthorpe 	 * cannot get pinned.
111363b60512SJason Gunthorpe 	 */
111463b60512SJason Gunthorpe 	return !PageAnonExclusive(page);
111563b60512SJason Gunthorpe }
11161da177e4SLinus Torvalds 
1117902c2d91SMa Wupeng extern bool mirrored_kernelcore;
11180db31d63SMa Wupeng extern bool memblock_has_mirror(void);
1119902c2d91SMa Wupeng 
vma_soft_dirty_enabled(struct vm_area_struct * vma)112076aefad6SPeter Xu static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
112176aefad6SPeter Xu {
112276aefad6SPeter Xu 	/*
112376aefad6SPeter Xu 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
112476aefad6SPeter Xu 	 * enablements, because when without soft-dirty being compiled in,
112576aefad6SPeter Xu 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
112676aefad6SPeter Xu 	 * will be constantly true.
112776aefad6SPeter Xu 	 */
112876aefad6SPeter Xu 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
112976aefad6SPeter Xu 		return false;
113076aefad6SPeter Xu 
113176aefad6SPeter Xu 	/*
113276aefad6SPeter Xu 	 * Soft-dirty is kind of special: its tracking is enabled when the
113376aefad6SPeter Xu 	 * vma flags not set.
113476aefad6SPeter Xu 	 */
113576aefad6SPeter Xu 	return !(vma->vm_flags & VM_SOFTDIRTY);
113676aefad6SPeter Xu }
113776aefad6SPeter Xu 
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)113853bee98dSLiam R. Howlett static inline void vma_iter_config(struct vma_iterator *vmi,
113953bee98dSLiam R. Howlett 		unsigned long index, unsigned long last)
114053bee98dSLiam R. Howlett {
114153bee98dSLiam R. Howlett 	MAS_BUG_ON(&vmi->mas, vmi->mas.node != MAS_START &&
114253bee98dSLiam R. Howlett 		   (vmi->mas.index > index || vmi->mas.last < index));
114353bee98dSLiam R. Howlett 	__mas_set_range(&vmi->mas, index, last - 1);
114453bee98dSLiam R. Howlett }
114553bee98dSLiam R. Howlett 
1146b62b633eSLiam R. Howlett /*
1147b62b633eSLiam R. Howlett  * VMA Iterator functions shared between nommu and mmap
1148b62b633eSLiam R. Howlett  */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)1149b5df0922SLiam R. Howlett static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1150b5df0922SLiam R. Howlett 		struct vm_area_struct *vma)
1151b62b633eSLiam R. Howlett {
1152b5df0922SLiam R. Howlett 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1153b62b633eSLiam R. Howlett }
1154b62b633eSLiam R. Howlett 
vma_iter_clear(struct vma_iterator * vmi)1155b5df0922SLiam R. Howlett static inline void vma_iter_clear(struct vma_iterator *vmi)
1156b62b633eSLiam R. Howlett {
1157b62b633eSLiam R. Howlett 	mas_store_prealloc(&vmi->mas, NULL);
1158b62b633eSLiam R. Howlett }
1159b62b633eSLiam R. Howlett 
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)1160f72cf24aSLiam R. Howlett static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1161f72cf24aSLiam R. Howlett 			unsigned long start, unsigned long end, gfp_t gfp)
1162f72cf24aSLiam R. Howlett {
1163b5df0922SLiam R. Howlett 	__mas_set_range(&vmi->mas, start, end - 1);
1164f72cf24aSLiam R. Howlett 	mas_store_gfp(&vmi->mas, NULL, gfp);
1165f72cf24aSLiam R. Howlett 	if (unlikely(mas_is_err(&vmi->mas)))
1166f72cf24aSLiam R. Howlett 		return -ENOMEM;
1167f72cf24aSLiam R. Howlett 
1168f72cf24aSLiam R. Howlett 	return 0;
1169f72cf24aSLiam R. Howlett }
1170f72cf24aSLiam R. Howlett 
vma_iter_load(struct vma_iterator * vmi)1171b62b633eSLiam R. Howlett static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1172b62b633eSLiam R. Howlett {
1173b62b633eSLiam R. Howlett 	return mas_walk(&vmi->mas);
1174b62b633eSLiam R. Howlett }
1175b62b633eSLiam R. Howlett 
1176b62b633eSLiam R. Howlett /* Store a VMA with preallocated memory */
vma_iter_store(struct vma_iterator * vmi,struct vm_area_struct * vma)1177b62b633eSLiam R. Howlett static inline void vma_iter_store(struct vma_iterator *vmi,
1178b62b633eSLiam R. Howlett 				  struct vm_area_struct *vma)
1179b62b633eSLiam R. Howlett {
1180b62b633eSLiam R. Howlett 
1181b62b633eSLiam R. Howlett #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
118236bd9310SLiam R. Howlett 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
118336bd9310SLiam R. Howlett 			vmi->mas.index > vma->vm_start)) {
118436bd9310SLiam R. Howlett 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
118536bd9310SLiam R. Howlett 			vmi->mas.index, vma->vm_start, vma->vm_start,
118636bd9310SLiam R. Howlett 			vma->vm_end, vmi->mas.index, vmi->mas.last);
1187b62b633eSLiam R. Howlett 	}
118836bd9310SLiam R. Howlett 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
118936bd9310SLiam R. Howlett 			vmi->mas.last <  vma->vm_start)) {
119036bd9310SLiam R. Howlett 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
119136bd9310SLiam R. Howlett 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
119236bd9310SLiam R. Howlett 		       vmi->mas.index, vmi->mas.last);
1193b62b633eSLiam R. Howlett 	}
1194b62b633eSLiam R. Howlett #endif
1195b62b633eSLiam R. Howlett 
1196b62b633eSLiam R. Howlett 	if (vmi->mas.node != MAS_START &&
1197b62b633eSLiam R. Howlett 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1198b62b633eSLiam R. Howlett 		vma_iter_invalidate(vmi);
1199b62b633eSLiam R. Howlett 
1200b5df0922SLiam R. Howlett 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1201b62b633eSLiam R. Howlett 	mas_store_prealloc(&vmi->mas, vma);
1202b62b633eSLiam R. Howlett }
1203b62b633eSLiam R. Howlett 
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)1204b62b633eSLiam R. Howlett static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1205b62b633eSLiam R. Howlett 			struct vm_area_struct *vma, gfp_t gfp)
1206b62b633eSLiam R. Howlett {
1207b62b633eSLiam R. Howlett 	if (vmi->mas.node != MAS_START &&
1208b62b633eSLiam R. Howlett 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1209b62b633eSLiam R. Howlett 		vma_iter_invalidate(vmi);
1210b62b633eSLiam R. Howlett 
1211b5df0922SLiam R. Howlett 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1212b62b633eSLiam R. Howlett 	mas_store_gfp(&vmi->mas, vma, gfp);
1213b62b633eSLiam R. Howlett 	if (unlikely(mas_is_err(&vmi->mas)))
1214b62b633eSLiam R. Howlett 		return -ENOMEM;
1215b62b633eSLiam R. Howlett 
1216b62b633eSLiam R. Howlett 	return 0;
1217b62b633eSLiam R. Howlett }
1218440703e0SLiam R. Howlett 
1219440703e0SLiam R. Howlett /*
1220440703e0SLiam R. Howlett  * VMA lock generalization
1221440703e0SLiam R. Howlett  */
1222440703e0SLiam R. Howlett struct vma_prepare {
1223440703e0SLiam R. Howlett 	struct vm_area_struct *vma;
1224440703e0SLiam R. Howlett 	struct vm_area_struct *adj_next;
1225440703e0SLiam R. Howlett 	struct file *file;
1226440703e0SLiam R. Howlett 	struct address_space *mapping;
1227440703e0SLiam R. Howlett 	struct anon_vma *anon_vma;
1228440703e0SLiam R. Howlett 	struct vm_area_struct *insert;
1229440703e0SLiam R. Howlett 	struct vm_area_struct *remove;
1230440703e0SLiam R. Howlett 	struct vm_area_struct *remove2;
1231440703e0SLiam R. Howlett };
12321da177e4SLinus Torvalds #endif	/* __MM_INTERNAL_H */
1233