12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
21da177e4SLinus Torvalds /* internal.h: mm/ internal definitions
31da177e4SLinus Torvalds *
41da177e4SLinus Torvalds * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
51da177e4SLinus Torvalds * Written by David Howells (dhowells@redhat.com)
61da177e4SLinus Torvalds */
70f8053a5SNick Piggin #ifndef __MM_INTERNAL_H
80f8053a5SNick Piggin #define __MM_INTERNAL_H
90f8053a5SNick Piggin
1029f175d1SFabian Frederick #include <linux/fs.h>
110f8053a5SNick Piggin #include <linux/mm.h>
12e9b61f19SKirill A. Shutemov #include <linux/pagemap.h>
132aff7a47SMatthew Wilcox (Oracle) #include <linux/rmap.h>
14edf14cdbSVlastimil Babka #include <linux/tracepoint-defs.h>
151da177e4SLinus Torvalds
160e499ed3SMatthew Wilcox (Oracle) struct folio_batch;
170e499ed3SMatthew Wilcox (Oracle)
18dd56b046SMel Gorman /*
19dd56b046SMel Gorman * The set of flags that only affect watermark checking and reclaim
20dd56b046SMel Gorman * behaviour. This is used by the MM to obey the caller constraints
21dd56b046SMel Gorman * about IO, FS and watermark checking while ignoring placement
22dd56b046SMel Gorman * hints such as HIGHMEM usage.
23dd56b046SMel Gorman */
24dd56b046SMel Gorman #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25dcda9b04SMichal Hocko __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
26e838a45fSMel Gorman __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
272973d822SNeilBrown __GFP_NOLOCKDEP)
28dd56b046SMel Gorman
29dd56b046SMel Gorman /* The GFP flags allowed during early boot */
30dd56b046SMel Gorman #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
31dd56b046SMel Gorman
32dd56b046SMel Gorman /* Control allocation cpuset and node placement constraints */
33dd56b046SMel Gorman #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
34dd56b046SMel Gorman
35dd56b046SMel Gorman /* Do not use these with a slab allocator */
36dd56b046SMel Gorman #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
37dd56b046SMel Gorman
383f913fc5SQi Zheng /*
393f913fc5SQi Zheng * Different from WARN_ON_ONCE(), no warning will be issued
403f913fc5SQi Zheng * when we specify __GFP_NOWARN.
413f913fc5SQi Zheng */
423f913fc5SQi Zheng #define WARN_ON_ONCE_GFP(cond, gfp) ({ \
433f913fc5SQi Zheng static bool __section(".data.once") __warned; \
443f913fc5SQi Zheng int __ret_warn_once = !!(cond); \
453f913fc5SQi Zheng \
463f913fc5SQi Zheng if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
473f913fc5SQi Zheng __warned = true; \
483f913fc5SQi Zheng WARN_ON(1); \
493f913fc5SQi Zheng } \
503f913fc5SQi Zheng unlikely(__ret_warn_once); \
513f913fc5SQi Zheng })
523f913fc5SQi Zheng
5362906027SNicholas Piggin void page_writeback_init(void);
5462906027SNicholas Piggin
55eec20426SMatthew Wilcox (Oracle) /*
56eec20426SMatthew Wilcox (Oracle) * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
57eec20426SMatthew Wilcox (Oracle) * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
58eec20426SMatthew Wilcox (Oracle) * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
59eec20426SMatthew Wilcox (Oracle) * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
60eec20426SMatthew Wilcox (Oracle) */
61eec20426SMatthew Wilcox (Oracle) #define COMPOUND_MAPPED 0x800000
62eec20426SMatthew Wilcox (Oracle) #define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1)
63eec20426SMatthew Wilcox (Oracle)
64eec20426SMatthew Wilcox (Oracle) /*
651279aa06SKefeng Wang * Flags passed to __show_mem() and show_free_areas() to suppress output in
661279aa06SKefeng Wang * various contexts.
671279aa06SKefeng Wang */
681279aa06SKefeng Wang #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
691279aa06SKefeng Wang
701279aa06SKefeng Wang /*
71eec20426SMatthew Wilcox (Oracle) * How many individual pages have an elevated _mapcount. Excludes
72eec20426SMatthew Wilcox (Oracle) * the folio's entire_mapcount.
73eec20426SMatthew Wilcox (Oracle) */
folio_nr_pages_mapped(struct folio * folio)74eec20426SMatthew Wilcox (Oracle) static inline int folio_nr_pages_mapped(struct folio *folio)
75eec20426SMatthew Wilcox (Oracle) {
76eec20426SMatthew Wilcox (Oracle) return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
77eec20426SMatthew Wilcox (Oracle) }
78eec20426SMatthew Wilcox (Oracle)
folio_raw_mapping(struct folio * folio)7964601000SMatthew Wilcox (Oracle) static inline void *folio_raw_mapping(struct folio *folio)
8064601000SMatthew Wilcox (Oracle) {
8164601000SMatthew Wilcox (Oracle) unsigned long mapping = (unsigned long)folio->mapping;
8264601000SMatthew Wilcox (Oracle)
8364601000SMatthew Wilcox (Oracle) return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
8464601000SMatthew Wilcox (Oracle) }
8564601000SMatthew Wilcox (Oracle)
86512b7931SLinus Torvalds void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
87512b7931SLinus Torvalds int nr_throttled);
acct_reclaim_writeback(struct folio * folio)88512b7931SLinus Torvalds static inline void acct_reclaim_writeback(struct folio *folio)
89512b7931SLinus Torvalds {
90512b7931SLinus Torvalds pg_data_t *pgdat = folio_pgdat(folio);
918cd7c588SMel Gorman int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
928cd7c588SMel Gorman
938cd7c588SMel Gorman if (nr_throttled)
94512b7931SLinus Torvalds __acct_reclaim_writeback(pgdat, folio, nr_throttled);
958cd7c588SMel Gorman }
968cd7c588SMel Gorman
wake_throttle_isolated(pg_data_t * pgdat)97d818fca1SMel Gorman static inline void wake_throttle_isolated(pg_data_t *pgdat)
98d818fca1SMel Gorman {
99d818fca1SMel Gorman wait_queue_head_t *wqh;
100d818fca1SMel Gorman
101d818fca1SMel Gorman wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
102d818fca1SMel Gorman if (waitqueue_active(wqh))
103d818fca1SMel Gorman wake_up(wqh);
104d818fca1SMel Gorman }
105d818fca1SMel Gorman
1062b740303SSouptick Joarder vm_fault_t do_swap_page(struct vm_fault *vmf);
107575ced1cSMatthew Wilcox (Oracle) void folio_rotate_reclaimable(struct folio *folio);
108269ccca3SMatthew Wilcox (Oracle) bool __folio_end_writeback(struct folio *folio);
109261b6840SMatthew Wilcox (Oracle) void deactivate_file_folio(struct folio *folio);
110018ee47fSYu Zhao void folio_activate(struct folio *folio);
1118a966ed7SEbru Akagunduz
112fd892593SLiam R. Howlett void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
113763ecb03SLiam R. Howlett struct vm_area_struct *start_vma, unsigned long floor,
11498e51a22SSuren Baghdasaryan unsigned long ceiling, bool mm_wr_locked);
11503c4f204SQi Zheng void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
11642b77728SJan Beulich
1173506659eSMatthew Wilcox (Oracle) struct zap_details;
118aac45363SMichal Hocko void unmap_page_range(struct mmu_gather *tlb,
119aac45363SMichal Hocko struct vm_area_struct *vma,
120aac45363SMichal Hocko unsigned long addr, unsigned long end,
121aac45363SMichal Hocko struct zap_details *details);
122aac45363SMichal Hocko
12356a4d67cSMatthew Wilcox (Oracle) void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
12456a4d67cSMatthew Wilcox (Oracle) unsigned int order);
125fcd9ae4fSMatthew Wilcox (Oracle) void force_page_cache_ra(struct readahead_control *, unsigned long nr);
force_page_cache_readahead(struct address_space * mapping,struct file * file,pgoff_t index,unsigned long nr_to_read)1267b3df3b9SDavid Howells static inline void force_page_cache_readahead(struct address_space *mapping,
1277b3df3b9SDavid Howells struct file *file, pgoff_t index, unsigned long nr_to_read)
1287b3df3b9SDavid Howells {
129fcd9ae4fSMatthew Wilcox (Oracle) DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
130fcd9ae4fSMatthew Wilcox (Oracle) force_page_cache_ra(&ractl, nr_to_read);
1317b3df3b9SDavid Howells }
13229f175d1SFabian Frederick
1333392ca12SVishal Moola (Oracle) unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
13451dcbdacSMatthew Wilcox (Oracle) pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
1359fb6beeaSVishal Moola (Oracle) unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
1360e499ed3SMatthew Wilcox (Oracle) pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
13778f42660SMatthew Wilcox (Oracle) void filemap_free_folio(struct address_space *mapping, struct folio *folio);
1381e84a3d9SMatthew Wilcox (Oracle) int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
139b9a8a419SMatthew Wilcox (Oracle) bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
140b9a8a419SMatthew Wilcox (Oracle) loff_t end);
141d6c75dc2SMatthew Wilcox (Oracle) long invalidate_inode_page(struct page *page);
1421a0fc811SMatthew Wilcox (Oracle) unsigned long mapping_try_invalidate(struct address_space *mapping,
1431a0fc811SMatthew Wilcox (Oracle) pgoff_t start, pgoff_t end, unsigned long *nr_failed);
1445c211ba2SMatthew Wilcox (Oracle)
1451eb6234eSYang Shi /**
1463eed3ef5SMatthew Wilcox (Oracle) * folio_evictable - Test whether a folio is evictable.
1473eed3ef5SMatthew Wilcox (Oracle) * @folio: The folio to test.
1481eb6234eSYang Shi *
1493eed3ef5SMatthew Wilcox (Oracle) * Test whether @folio is evictable -- i.e., should be placed on
1503eed3ef5SMatthew Wilcox (Oracle) * active/inactive lists vs unevictable list.
1511eb6234eSYang Shi *
1523eed3ef5SMatthew Wilcox (Oracle) * Reasons folio might not be evictable:
1533eed3ef5SMatthew Wilcox (Oracle) * 1. folio's mapping marked unevictable
1543eed3ef5SMatthew Wilcox (Oracle) * 2. One of the pages in the folio is part of an mlocked VMA
1551eb6234eSYang Shi */
folio_evictable(struct folio * folio)1563eed3ef5SMatthew Wilcox (Oracle) static inline bool folio_evictable(struct folio *folio)
1573eed3ef5SMatthew Wilcox (Oracle) {
1583eed3ef5SMatthew Wilcox (Oracle) bool ret;
1593eed3ef5SMatthew Wilcox (Oracle)
1603eed3ef5SMatthew Wilcox (Oracle) /* Prevent address_space of inode and swap cache from being freed */
1613eed3ef5SMatthew Wilcox (Oracle) rcu_read_lock();
1623eed3ef5SMatthew Wilcox (Oracle) ret = !mapping_unevictable(folio_mapping(folio)) &&
1633eed3ef5SMatthew Wilcox (Oracle) !folio_test_mlocked(folio);
1643eed3ef5SMatthew Wilcox (Oracle) rcu_read_unlock();
1653eed3ef5SMatthew Wilcox (Oracle) return ret;
1663eed3ef5SMatthew Wilcox (Oracle) }
1673eed3ef5SMatthew Wilcox (Oracle)
1687835e98bSNick Piggin /*
1690139aa7bSJoonsoo Kim * Turn a non-refcounted page (->_refcount == 0) into refcounted with
1707835e98bSNick Piggin * a count of one.
1717835e98bSNick Piggin */
set_page_refcounted(struct page * page)1727835e98bSNick Piggin static inline void set_page_refcounted(struct page *page)
1737835e98bSNick Piggin {
174309381feSSasha Levin VM_BUG_ON_PAGE(PageTail(page), page);
175fe896d18SJoonsoo Kim VM_BUG_ON_PAGE(page_ref_count(page), page);
17677a8a788SNick Piggin set_page_count(page, 1);
17777a8a788SNick Piggin }
17877a8a788SNick Piggin
1790201ebf2SDavid Howells /*
1800201ebf2SDavid Howells * Return true if a folio needs ->release_folio() calling upon it.
1810201ebf2SDavid Howells */
folio_needs_release(struct folio * folio)1820201ebf2SDavid Howells static inline bool folio_needs_release(struct folio *folio)
1830201ebf2SDavid Howells {
184b4fa966fSDavid Howells struct address_space *mapping = folio_mapping(folio);
185b4fa966fSDavid Howells
186b4fa966fSDavid Howells return folio_has_private(folio) ||
187b4fa966fSDavid Howells (mapping && mapping_release_always(mapping));
1880201ebf2SDavid Howells }
1890201ebf2SDavid Howells
19003f6462aSHugh Dickins extern unsigned long highest_memmap_pfn;
19103f6462aSHugh Dickins
192894bc310SLee Schermerhorn /*
193c73322d0SJohannes Weiner * Maximum number of reclaim retries without progress before the OOM
194c73322d0SJohannes Weiner * killer is consider the only way forward.
195c73322d0SJohannes Weiner */
196c73322d0SJohannes Weiner #define MAX_RECLAIM_RETRIES 16
197c73322d0SJohannes Weiner
198c73322d0SJohannes Weiner /*
199894bc310SLee Schermerhorn * in mm/vmscan.c:
200894bc310SLee Schermerhorn */
201f7f9c00dSBaolin Wang bool isolate_lru_page(struct page *page);
202be2d5756SBaolin Wang bool folio_isolate_lru(struct folio *folio);
203ca6d60f3SMatthew Wilcox (Oracle) void putback_lru_page(struct page *page);
204ca6d60f3SMatthew Wilcox (Oracle) void folio_putback_lru(struct folio *folio);
205c3f4a9a2SMel Gorman extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
20662695a84SNick Piggin
207894bc310SLee Schermerhorn /*
2086219049aSBob Liu * in mm/rmap.c:
2096219049aSBob Liu */
21050722804SZach O'Keefe pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
2116219049aSBob Liu
2126219049aSBob Liu /*
213894bc310SLee Schermerhorn * in mm/page_alloc.c
214894bc310SLee Schermerhorn */
215eb8589b4SMike Rapoport (IBM) #define K(x) ((x) << (PAGE_SHIFT-10))
2163c605096SJoonsoo Kim
2179420f89dSMike Rapoport (IBM) extern char * const zone_names[MAX_NR_ZONES];
2189420f89dSMike Rapoport (IBM)
219f2fc4b44SMike Rapoport (IBM) /* perform sanity checks on struct pages being allocated or freed */
220f2fc4b44SMike Rapoport (IBM) DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
221f2fc4b44SMike Rapoport (IBM)
222e95d372cSKefeng Wang extern int min_free_kbytes;
223e95d372cSKefeng Wang
224e95d372cSKefeng Wang void setup_per_zone_wmarks(void);
225e95d372cSKefeng Wang void calculate_min_free_kbytes(void);
226e95d372cSKefeng Wang int __meminit init_per_zone_wmark_min(void);
227e95d372cSKefeng Wang void page_alloc_sysctl_init(void);
228f2fc4b44SMike Rapoport (IBM)
2293c605096SJoonsoo Kim /*
2301a6d53a1SVlastimil Babka * Structure for holding the mostly immutable allocation parameters passed
2311a6d53a1SVlastimil Babka * between functions involved in allocations, including the alloc_pages*
2321a6d53a1SVlastimil Babka * family of functions.
2331a6d53a1SVlastimil Babka *
23497a225e6SJoonsoo Kim * nodemask, migratetype and highest_zoneidx are initialized only once in
23584172f4bSMatthew Wilcox (Oracle) * __alloc_pages() and then never change.
2361a6d53a1SVlastimil Babka *
23797a225e6SJoonsoo Kim * zonelist, preferred_zone and highest_zoneidx are set first in
23884172f4bSMatthew Wilcox (Oracle) * __alloc_pages() for the fast path, and might be later changed
23968956ccbSEthon Paul * in __alloc_pages_slowpath(). All other functions pass the whole structure
2401a6d53a1SVlastimil Babka * by a const pointer.
2411a6d53a1SVlastimil Babka */
2421a6d53a1SVlastimil Babka struct alloc_context {
2431a6d53a1SVlastimil Babka struct zonelist *zonelist;
2441a6d53a1SVlastimil Babka nodemask_t *nodemask;
245c33d6c06SMel Gorman struct zoneref *preferred_zoneref;
2461a6d53a1SVlastimil Babka int migratetype;
24797a225e6SJoonsoo Kim
24897a225e6SJoonsoo Kim /*
24997a225e6SJoonsoo Kim * highest_zoneidx represents highest usable zone index of
25097a225e6SJoonsoo Kim * the allocation request. Due to the nature of the zone,
25197a225e6SJoonsoo Kim * memory on lower zone than the highest_zoneidx will be
25297a225e6SJoonsoo Kim * protected by lowmem_reserve[highest_zoneidx].
25397a225e6SJoonsoo Kim *
25497a225e6SJoonsoo Kim * highest_zoneidx is also used by reclaim/compaction to limit
25597a225e6SJoonsoo Kim * the target zone since higher zone than this index cannot be
25697a225e6SJoonsoo Kim * usable for this allocation request.
25797a225e6SJoonsoo Kim */
25897a225e6SJoonsoo Kim enum zone_type highest_zoneidx;
259c9ab0c4fSMel Gorman bool spread_dirty_pages;
2601a6d53a1SVlastimil Babka };
2611a6d53a1SVlastimil Babka
2621a6d53a1SVlastimil Babka /*
2638170ac47SZi Yan * This function returns the order of a free page in the buddy system. In
2648170ac47SZi Yan * general, page_zone(page)->lock must be held by the caller to prevent the
2658170ac47SZi Yan * page from being allocated in parallel and returning garbage as the order.
2668170ac47SZi Yan * If a caller does not hold page_zone(page)->lock, it must guarantee that the
2678170ac47SZi Yan * page cannot be allocated or merged in parallel. Alternatively, it must
2688170ac47SZi Yan * handle invalid values gracefully, and use buddy_order_unsafe() below.
2698170ac47SZi Yan */
buddy_order(struct page * page)2708170ac47SZi Yan static inline unsigned int buddy_order(struct page *page)
2718170ac47SZi Yan {
2728170ac47SZi Yan /* PageBuddy() must be checked by the caller */
2738170ac47SZi Yan return page_private(page);
2748170ac47SZi Yan }
2758170ac47SZi Yan
2768170ac47SZi Yan /*
2778170ac47SZi Yan * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
2788170ac47SZi Yan * PageBuddy() should be checked first by the caller to minimize race window,
2798170ac47SZi Yan * and invalid values must be handled gracefully.
2808170ac47SZi Yan *
2818170ac47SZi Yan * READ_ONCE is used so that if the caller assigns the result into a local
2828170ac47SZi Yan * variable and e.g. tests it for valid range before using, the compiler cannot
2838170ac47SZi Yan * decide to remove the variable and inline the page_private(page) multiple
2848170ac47SZi Yan * times, potentially observing different values in the tests and the actual
2858170ac47SZi Yan * use of the result.
2868170ac47SZi Yan */
2878170ac47SZi Yan #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
2888170ac47SZi Yan
2898170ac47SZi Yan /*
2908170ac47SZi Yan * This function checks whether a page is free && is the buddy
2918170ac47SZi Yan * we can coalesce a page and its buddy if
2928170ac47SZi Yan * (a) the buddy is not in a hole (check before calling!) &&
2938170ac47SZi Yan * (b) the buddy is in the buddy system &&
2948170ac47SZi Yan * (c) a page and its buddy have the same order &&
2958170ac47SZi Yan * (d) a page and its buddy are in the same zone.
2968170ac47SZi Yan *
2978170ac47SZi Yan * For recording whether a page is in the buddy system, we set PageBuddy.
2988170ac47SZi Yan * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
2998170ac47SZi Yan *
3008170ac47SZi Yan * For recording page's order, we use page_private(page).
3018170ac47SZi Yan */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)3028170ac47SZi Yan static inline bool page_is_buddy(struct page *page, struct page *buddy,
3038170ac47SZi Yan unsigned int order)
3048170ac47SZi Yan {
3058170ac47SZi Yan if (!page_is_guard(buddy) && !PageBuddy(buddy))
3068170ac47SZi Yan return false;
3078170ac47SZi Yan
3088170ac47SZi Yan if (buddy_order(buddy) != order)
3098170ac47SZi Yan return false;
3108170ac47SZi Yan
3118170ac47SZi Yan /*
3128170ac47SZi Yan * zone check is done late to avoid uselessly calculating
3138170ac47SZi Yan * zone/node ids for pages that could never merge.
3148170ac47SZi Yan */
3158170ac47SZi Yan if (page_zone_id(page) != page_zone_id(buddy))
3168170ac47SZi Yan return false;
3178170ac47SZi Yan
3188170ac47SZi Yan VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
3198170ac47SZi Yan
3208170ac47SZi Yan return true;
3218170ac47SZi Yan }
3228170ac47SZi Yan
3238170ac47SZi Yan /*
3243c605096SJoonsoo Kim * Locate the struct page for both the matching buddy in our
3253c605096SJoonsoo Kim * pair (buddy1) and the combined O(n+1) page they form (page).
3263c605096SJoonsoo Kim *
3273c605096SJoonsoo Kim * 1) Any buddy B1 will have an order O twin B2 which satisfies
3283c605096SJoonsoo Kim * the following equation:
3293c605096SJoonsoo Kim * B2 = B1 ^ (1 << O)
3303c605096SJoonsoo Kim * For example, if the starting buddy (buddy2) is #8 its order
3313c605096SJoonsoo Kim * 1 buddy is #10:
3323c605096SJoonsoo Kim * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
3333c605096SJoonsoo Kim *
3343c605096SJoonsoo Kim * 2) Any buddy B will have an order O+1 parent P which
3353c605096SJoonsoo Kim * satisfies the following equation:
3363c605096SJoonsoo Kim * P = B & ~(1 << O)
3373c605096SJoonsoo Kim *
3383c605096SJoonsoo Kim * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
3393c605096SJoonsoo Kim */
3403c605096SJoonsoo Kim static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn,unsigned int order)34176741e77SVlastimil Babka __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
3423c605096SJoonsoo Kim {
34376741e77SVlastimil Babka return page_pfn ^ (1 << order);
3443c605096SJoonsoo Kim }
3453c605096SJoonsoo Kim
3468170ac47SZi Yan /*
3478170ac47SZi Yan * Find the buddy of @page and validate it.
3488170ac47SZi Yan * @page: The input page
3498170ac47SZi Yan * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
3508170ac47SZi Yan * function is used in the performance-critical __free_one_page().
3518170ac47SZi Yan * @order: The order of the page
3528170ac47SZi Yan * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
3538170ac47SZi Yan * page_to_pfn().
3548170ac47SZi Yan *
3558170ac47SZi Yan * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
3568170ac47SZi Yan * not the same as @page. The validation is necessary before use it.
3578170ac47SZi Yan *
3588170ac47SZi Yan * Return: the found buddy page or NULL if not found.
3598170ac47SZi Yan */
find_buddy_page_pfn(struct page * page,unsigned long pfn,unsigned int order,unsigned long * buddy_pfn)3608170ac47SZi Yan static inline struct page *find_buddy_page_pfn(struct page *page,
3618170ac47SZi Yan unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
3628170ac47SZi Yan {
3638170ac47SZi Yan unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
3648170ac47SZi Yan struct page *buddy;
3658170ac47SZi Yan
3668170ac47SZi Yan buddy = page + (__buddy_pfn - pfn);
3678170ac47SZi Yan if (buddy_pfn)
3688170ac47SZi Yan *buddy_pfn = __buddy_pfn;
3698170ac47SZi Yan
3708170ac47SZi Yan if (page_is_buddy(page, buddy, order))
3718170ac47SZi Yan return buddy;
3728170ac47SZi Yan return NULL;
3738170ac47SZi Yan }
3748170ac47SZi Yan
3757cf91a98SJoonsoo Kim extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
3767cf91a98SJoonsoo Kim unsigned long end_pfn, struct zone *zone);
3777cf91a98SJoonsoo Kim
pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)3787cf91a98SJoonsoo Kim static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
3797cf91a98SJoonsoo Kim unsigned long end_pfn, struct zone *zone)
3807cf91a98SJoonsoo Kim {
3817cf91a98SJoonsoo Kim if (zone->contiguous)
3827cf91a98SJoonsoo Kim return pfn_to_page(start_pfn);
3837cf91a98SJoonsoo Kim
3847cf91a98SJoonsoo Kim return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
3857cf91a98SJoonsoo Kim }
3867cf91a98SJoonsoo Kim
387904d5857SKefeng Wang void set_zone_contiguous(struct zone *zone);
388904d5857SKefeng Wang
clear_zone_contiguous(struct zone * zone)389904d5857SKefeng Wang static inline void clear_zone_contiguous(struct zone *zone)
390904d5857SKefeng Wang {
391904d5857SKefeng Wang zone->contiguous = false;
392904d5857SKefeng Wang }
393904d5857SKefeng Wang
3943c605096SJoonsoo Kim extern int __isolate_free_page(struct page *page, unsigned int order);
395624f58d8SAlexander Duyck extern void __putback_isolated_page(struct page *page, unsigned int order,
396624f58d8SAlexander Duyck int mt);
3977c2ee349SMike Rapoport extern void memblock_free_pages(struct page *page, unsigned long pfn,
398d70ddd7aSMel Gorman unsigned int order);
399a9cd410aSArun KS extern void __free_pages_core(struct page *page, unsigned int order);
4009420f89dSMike Rapoport (IBM)
4011e3be485STarun Sahu /*
4021e3be485STarun Sahu * This will have no effect, other than possibly generating a warning, if the
4031e3be485STarun Sahu * caller passes in a non-large folio.
4041e3be485STarun Sahu */
folio_set_order(struct folio * folio,unsigned int order)4051e3be485STarun Sahu static inline void folio_set_order(struct folio *folio, unsigned int order)
4061e3be485STarun Sahu {
4071e3be485STarun Sahu if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
4081e3be485STarun Sahu return;
4091e3be485STarun Sahu
410ebc1baf5SMatthew Wilcox (Oracle) folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
4111e3be485STarun Sahu #ifdef CONFIG_64BIT
4121e3be485STarun Sahu folio->_folio_nr_pages = 1U << order;
4131e3be485STarun Sahu #endif
4141e3be485STarun Sahu }
4151e3be485STarun Sahu
4168dc4a8f1SMatthew Wilcox (Oracle) void folio_undo_large_rmappable(struct folio *folio);
4178dc4a8f1SMatthew Wilcox (Oracle)
prep_compound_head(struct page * page,unsigned int order)4189420f89dSMike Rapoport (IBM) static inline void prep_compound_head(struct page *page, unsigned int order)
4199420f89dSMike Rapoport (IBM) {
4209420f89dSMike Rapoport (IBM) struct folio *folio = (struct folio *)page;
4219420f89dSMike Rapoport (IBM)
4221e3be485STarun Sahu folio_set_order(folio, order);
4239420f89dSMike Rapoport (IBM) atomic_set(&folio->_entire_mapcount, -1);
4249420f89dSMike Rapoport (IBM) atomic_set(&folio->_nr_pages_mapped, 0);
4259420f89dSMike Rapoport (IBM) atomic_set(&folio->_pincount, 0);
4269420f89dSMike Rapoport (IBM) }
4279420f89dSMike Rapoport (IBM)
prep_compound_tail(struct page * head,int tail_idx)4289420f89dSMike Rapoport (IBM) static inline void prep_compound_tail(struct page *head, int tail_idx)
4299420f89dSMike Rapoport (IBM) {
4309420f89dSMike Rapoport (IBM) struct page *p = head + tail_idx;
4319420f89dSMike Rapoport (IBM)
4329420f89dSMike Rapoport (IBM) p->mapping = TAIL_MAPPING;
4339420f89dSMike Rapoport (IBM) set_compound_head(p, head);
4349420f89dSMike Rapoport (IBM) set_page_private(p, 0);
4359420f89dSMike Rapoport (IBM) }
4369420f89dSMike Rapoport (IBM)
437d00181b9SKirill A. Shutemov extern void prep_compound_page(struct page *page, unsigned int order);
4389420f89dSMike Rapoport (IBM)
43946f24fd8SJoonsoo Kim extern void post_alloc_hook(struct page *page, unsigned int order,
44046f24fd8SJoonsoo Kim gfp_t gfp_flags);
44142aa83cbSHan Pingtian extern int user_min_free_kbytes;
44220a0307cSWu Fengguang
44344042b44SMel Gorman extern void free_unref_page(struct page *page, unsigned int order);
4440966aeb4SMatthew Wilcox (Oracle) extern void free_unref_page_list(struct list_head *list);
4450966aeb4SMatthew Wilcox (Oracle)
44668265390SMel Gorman extern void zone_pcp_reset(struct zone *zone);
447ec6e8c7eSVlastimil Babka extern void zone_pcp_disable(struct zone *zone);
448ec6e8c7eSVlastimil Babka extern void zone_pcp_enable(struct zone *zone);
4499420f89dSMike Rapoport (IBM) extern void zone_pcp_init(struct zone *zone);
45068265390SMel Gorman
451c803b3c8SMike Rapoport extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
452c803b3c8SMike Rapoport phys_addr_t min_addr,
453c803b3c8SMike Rapoport int nid, bool exact_nid);
454c803b3c8SMike Rapoport
455e95d372cSKefeng Wang void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
456e95d372cSKefeng Wang unsigned long, enum meminit_context, struct vmem_altmap *, int);
457e95d372cSKefeng Wang
458e95d372cSKefeng Wang
45986d28b07SZi Yan int split_free_page(struct page *free_page,
46086d28b07SZi Yan unsigned int order, unsigned long split_pfn_offset);
461b2c9e2fbSZi Yan
462ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA
463ff9543fdSMichal Nazarewicz
464ff9543fdSMichal Nazarewicz /*
465ff9543fdSMichal Nazarewicz * in mm/compaction.c
466ff9543fdSMichal Nazarewicz */
467ff9543fdSMichal Nazarewicz /*
468ff9543fdSMichal Nazarewicz * compact_control is used to track pages being migrated and the free pages
469ff9543fdSMichal Nazarewicz * they are being migrated to during memory compaction. The free_pfn starts
470ff9543fdSMichal Nazarewicz * at the end of a zone and migrate_pfn begins at the start. Movable pages
471ff9543fdSMichal Nazarewicz * are moved to the end of a zone during a compaction run and the run
472ff9543fdSMichal Nazarewicz * completes when free_pfn <= migrate_pfn
473ff9543fdSMichal Nazarewicz */
474ff9543fdSMichal Nazarewicz struct compact_control {
475ff9543fdSMichal Nazarewicz struct list_head freepages; /* List of free pages to migrate to */
476ff9543fdSMichal Nazarewicz struct list_head migratepages; /* List of pages being migrated */
477c5fbd937SMel Gorman unsigned int nr_freepages; /* Number of isolated free pages */
478c5fbd937SMel Gorman unsigned int nr_migratepages; /* Number of pages to migrate */
479ff9543fdSMichal Nazarewicz unsigned long free_pfn; /* isolate_freepages search base */
480c2ad7a1fSOscar Salvador /*
481c2ad7a1fSOscar Salvador * Acts as an in/out parameter to page isolation for migration.
482c2ad7a1fSOscar Salvador * isolate_migratepages uses it as a search base.
483c2ad7a1fSOscar Salvador * isolate_migratepages_block will update the value to the next pfn
484c2ad7a1fSOscar Salvador * after the last isolated one.
485c2ad7a1fSOscar Salvador */
486c2ad7a1fSOscar Salvador unsigned long migrate_pfn;
48770b44595SMel Gorman unsigned long fast_start_pfn; /* a pfn to start linear scan from */
488c5943b9cSMel Gorman struct zone *zone;
489c5943b9cSMel Gorman unsigned long total_migrate_scanned;
490c5943b9cSMel Gorman unsigned long total_free_scanned;
491dbe2d4e4SMel Gorman unsigned short fast_search_fail;/* failures to use free list searches */
492dbe2d4e4SMel Gorman short search_order; /* order to start a fast search at */
493f25ba6dcSVlastimil Babka const gfp_t gfp_mask; /* gfp mask of a direct compactor */
494f25ba6dcSVlastimil Babka int order; /* order a direct compactor needs */
495d39773a0SVlastimil Babka int migratetype; /* migratetype of direct compactor */
496f25ba6dcSVlastimil Babka const unsigned int alloc_flags; /* alloc flags of a direct compactor */
49797a225e6SJoonsoo Kim const int highest_zoneidx; /* zone index of a direct compactor */
498e0b9daebSDavid Rientjes enum migrate_mode mode; /* Async or sync migration mode */
499bb13ffebSMel Gorman bool ignore_skip_hint; /* Scan blocks even if marked skip */
5002583d671SVlastimil Babka bool no_set_skip_hint; /* Don't mark blocks for skipping */
5019f7e3387SVlastimil Babka bool ignore_block_suitable; /* Scan blocks considered unsuitable */
502accf6242SVlastimil Babka bool direct_compaction; /* False from kcompactd or /proc/... */
503facdaa91SNitin Gupta bool proactive_compaction; /* kcompactd proactive compaction */
50406ed2998SVlastimil Babka bool whole_zone; /* Whole zone should/has been scanned */
505d56c1584SMiaohe Lin bool contended; /* Signal lock contention */
50648731c84SMel Gorman bool finish_pageblock; /* Scan the remainder of a pageblock. Used
50748731c84SMel Gorman * when there are potentially transient
50848731c84SMel Gorman * isolation or migration failures to
50948731c84SMel Gorman * ensure forward progress.
51048731c84SMel Gorman */
511b06eda09SRik van Riel bool alloc_contig; /* alloc_contig_range allocation */
512ff9543fdSMichal Nazarewicz };
513ff9543fdSMichal Nazarewicz
5145e1f0f09SMel Gorman /*
5155e1f0f09SMel Gorman * Used in direct compaction when a page should be taken from the freelists
5165e1f0f09SMel Gorman * immediately when one is created during the free path.
5175e1f0f09SMel Gorman */
5185e1f0f09SMel Gorman struct capture_control {
5195e1f0f09SMel Gorman struct compact_control *cc;
5205e1f0f09SMel Gorman struct page *page;
5215e1f0f09SMel Gorman };
5225e1f0f09SMel Gorman
523ff9543fdSMichal Nazarewicz unsigned long
524bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc,
525bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn);
526c2ad7a1fSOscar Salvador int
527edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc,
528edc2ca61SVlastimil Babka unsigned long low_pfn, unsigned long end_pfn);
529b2c9e2fbSZi Yan
530b2c9e2fbSZi Yan int __alloc_contig_migrate_range(struct compact_control *cc,
531b2c9e2fbSZi Yan unsigned long start, unsigned long end);
5329420f89dSMike Rapoport (IBM)
5339420f89dSMike Rapoport (IBM) /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
5349420f89dSMike Rapoport (IBM) void init_cma_reserved_pageblock(struct page *page);
5359420f89dSMike Rapoport (IBM)
5369420f89dSMike Rapoport (IBM) #endif /* CONFIG_COMPACTION || CONFIG_CMA */
5379420f89dSMike Rapoport (IBM)
5382149cdaeSJoonsoo Kim int find_suitable_fallback(struct free_area *area, unsigned int order,
5392149cdaeSJoonsoo Kim int migratetype, bool only_stealable, bool *can_steal);
540ff9543fdSMichal Nazarewicz
free_area_empty(struct free_area * area,int migratetype)54162f31bd4SMike Rapoport (IBM) static inline bool free_area_empty(struct free_area *area, int migratetype)
54262f31bd4SMike Rapoport (IBM) {
54362f31bd4SMike Rapoport (IBM) return list_empty(&area->free_list[migratetype]);
54462f31bd4SMike Rapoport (IBM) }
54562f31bd4SMike Rapoport (IBM)
54648f13bf3SMel Gorman /*
54730bdbb78SKonstantin Khlebnikov * These three helpers classifies VMAs for virtual memory accounting.
54830bdbb78SKonstantin Khlebnikov */
54930bdbb78SKonstantin Khlebnikov
55030bdbb78SKonstantin Khlebnikov /*
55130bdbb78SKonstantin Khlebnikov * Executable code area - executable, not writable, not stack
55230bdbb78SKonstantin Khlebnikov */
is_exec_mapping(vm_flags_t flags)553d977d56cSKonstantin Khlebnikov static inline bool is_exec_mapping(vm_flags_t flags)
554d977d56cSKonstantin Khlebnikov {
55530bdbb78SKonstantin Khlebnikov return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
556d977d56cSKonstantin Khlebnikov }
557d977d56cSKonstantin Khlebnikov
55830bdbb78SKonstantin Khlebnikov /*
55900547ef7SRick Edgecombe * Stack area (including shadow stacks)
56030bdbb78SKonstantin Khlebnikov *
56130bdbb78SKonstantin Khlebnikov * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
56230bdbb78SKonstantin Khlebnikov * do_mmap() forbids all other combinations.
56330bdbb78SKonstantin Khlebnikov */
is_stack_mapping(vm_flags_t flags)564d977d56cSKonstantin Khlebnikov static inline bool is_stack_mapping(vm_flags_t flags)
565d977d56cSKonstantin Khlebnikov {
56600547ef7SRick Edgecombe return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
567d977d56cSKonstantin Khlebnikov }
568d977d56cSKonstantin Khlebnikov
56930bdbb78SKonstantin Khlebnikov /*
57030bdbb78SKonstantin Khlebnikov * Data area - private, writable, not stack
57130bdbb78SKonstantin Khlebnikov */
is_data_mapping(vm_flags_t flags)572d977d56cSKonstantin Khlebnikov static inline bool is_data_mapping(vm_flags_t flags)
573d977d56cSKonstantin Khlebnikov {
57430bdbb78SKonstantin Khlebnikov return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
575d977d56cSKonstantin Khlebnikov }
576d977d56cSKonstantin Khlebnikov
5776038def0SNamhyung Kim /* mm/util.c */
578e05b3453SMatthew Wilcox (Oracle) struct anon_vma *folio_anon_vma(struct folio *folio);
5796038def0SNamhyung Kim
580af8e3354SHugh Dickins #ifdef CONFIG_MMU
5813506659eSMatthew Wilcox (Oracle) void unmap_mapping_folio(struct folio *folio);
582fc05f566SKirill A. Shutemov extern long populate_vma_page_range(struct vm_area_struct *vma,
583a78f1ccdSDavid Hildenbrand unsigned long start, unsigned long end, int *locked);
5849e898211SDavid Hildenbrand extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
5859e898211SDavid Hildenbrand unsigned long end, bool write, int *locked);
586b0cc5e89SAndrew Morton extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
5873c54a298SLorenzo Stoakes unsigned long bytes);
588b291f000SNick Piggin /*
5897efecffbSMatthew Wilcox (Oracle) * mlock_vma_folio() and munlock_vma_folio():
590cea86fe2SHugh Dickins * should be called with vma's mmap_lock held for read or write,
591cea86fe2SHugh Dickins * under page table lock for the pte/pmd being added or removed.
592b291f000SNick Piggin *
59396f97c43SLorenzo Stoakes * mlock is usually called at the end of page_add_*_rmap(), munlock at
59496f97c43SLorenzo Stoakes * the end of page_remove_rmap(); but new anon folios are managed by
59596f97c43SLorenzo Stoakes * folio_add_lru_vma() calling mlock_new_folio().
596cea86fe2SHugh Dickins *
597cea86fe2SHugh Dickins * @compound is used to include pmd mappings of THPs, but filter out
598cea86fe2SHugh Dickins * pte mappings of THPs, which cannot be consistently counted: a pte
599cea86fe2SHugh Dickins * mapping of the THP head cannot be distinguished by the page alone.
600b291f000SNick Piggin */
601dcc5d337SMatthew Wilcox (Oracle) void mlock_folio(struct folio *folio);
mlock_vma_folio(struct folio * folio,struct vm_area_struct * vma,bool compound)602dcc5d337SMatthew Wilcox (Oracle) static inline void mlock_vma_folio(struct folio *folio,
603cea86fe2SHugh Dickins struct vm_area_struct *vma, bool compound)
604cea86fe2SHugh Dickins {
605c8263bd6SHugh Dickins /*
606c8263bd6SHugh Dickins * The VM_SPECIAL check here serves two purposes.
607c8263bd6SHugh Dickins * 1) VM_IO check prevents migration from double-counting during mlock.
608c8263bd6SHugh Dickins * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
609c8263bd6SHugh Dickins * is never left set on a VM_SPECIAL vma, there is an interval while
610c8263bd6SHugh Dickins * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
611c8263bd6SHugh Dickins * still be set while VM_SPECIAL bits are added: so ignore it then.
612c8263bd6SHugh Dickins */
613c8263bd6SHugh Dickins if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
614dcc5d337SMatthew Wilcox (Oracle) (compound || !folio_test_large(folio)))
615dcc5d337SMatthew Wilcox (Oracle) mlock_folio(folio);
616cea86fe2SHugh Dickins }
617dcc5d337SMatthew Wilcox (Oracle)
61896f97c43SLorenzo Stoakes void munlock_folio(struct folio *folio);
munlock_vma_folio(struct folio * folio,struct vm_area_struct * vma,bool compound)61996f97c43SLorenzo Stoakes static inline void munlock_vma_folio(struct folio *folio,
620cea86fe2SHugh Dickins struct vm_area_struct *vma, bool compound)
621cea86fe2SHugh Dickins {
622cea86fe2SHugh Dickins if (unlikely(vma->vm_flags & VM_LOCKED) &&
62396f97c43SLorenzo Stoakes (compound || !folio_test_large(folio)))
62496f97c43SLorenzo Stoakes munlock_folio(folio);
625cea86fe2SHugh Dickins }
62696f97c43SLorenzo Stoakes
62796f97c43SLorenzo Stoakes void mlock_new_folio(struct folio *folio);
62896f97c43SLorenzo Stoakes bool need_mlock_drain(int cpu);
62996f97c43SLorenzo Stoakes void mlock_drain_local(void);
63096f97c43SLorenzo Stoakes void mlock_drain_remote(int cpu);
631b291f000SNick Piggin
632f55e1014SLinus Torvalds extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
633b32967ffSMel Gorman
634e9b61f19SKirill A. Shutemov /*
6356a8e0596SMuchun Song * Return the start of user virtual address at the specific offset within
6366a8e0596SMuchun Song * a vma.
637e9b61f19SKirill A. Shutemov */
638e9b61f19SKirill A. Shutemov static inline unsigned long
vma_pgoff_address(pgoff_t pgoff,unsigned long nr_pages,struct vm_area_struct * vma)6396a8e0596SMuchun Song vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
6406a8e0596SMuchun Song struct vm_area_struct *vma)
641e9b61f19SKirill A. Shutemov {
642494334e4SHugh Dickins unsigned long address;
643a8fa41adSKirill A. Shutemov
644494334e4SHugh Dickins if (pgoff >= vma->vm_pgoff) {
645494334e4SHugh Dickins address = vma->vm_start +
646494334e4SHugh Dickins ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
647494334e4SHugh Dickins /* Check for address beyond vma (or wrapped through 0?) */
648494334e4SHugh Dickins if (address < vma->vm_start || address >= vma->vm_end)
649494334e4SHugh Dickins address = -EFAULT;
6506a8e0596SMuchun Song } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
651494334e4SHugh Dickins /* Test above avoids possibility of wrap to 0 on 32-bit */
652494334e4SHugh Dickins address = vma->vm_start;
653494334e4SHugh Dickins } else {
654494334e4SHugh Dickins address = -EFAULT;
655494334e4SHugh Dickins }
656494334e4SHugh Dickins return address;
657494334e4SHugh Dickins }
658e9b61f19SKirill A. Shutemov
659494334e4SHugh Dickins /*
6606a8e0596SMuchun Song * Return the start of user virtual address of a page within a vma.
6616a8e0596SMuchun Song * Returns -EFAULT if all of the page is outside the range of vma.
6626a8e0596SMuchun Song * If page is a compound head, the entire compound page is considered.
6636a8e0596SMuchun Song */
6646a8e0596SMuchun Song static inline unsigned long
vma_address(struct page * page,struct vm_area_struct * vma)6656a8e0596SMuchun Song vma_address(struct page *page, struct vm_area_struct *vma)
6666a8e0596SMuchun Song {
6676a8e0596SMuchun Song VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
6686a8e0596SMuchun Song return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
6696a8e0596SMuchun Song }
6706a8e0596SMuchun Song
6716a8e0596SMuchun Song /*
6722aff7a47SMatthew Wilcox (Oracle) * Then at what user virtual address will none of the range be found in vma?
673494334e4SHugh Dickins * Assumes that vma_address() already returned a good starting address.
674494334e4SHugh Dickins */
vma_address_end(struct page_vma_mapped_walk * pvmw)6752aff7a47SMatthew Wilcox (Oracle) static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
676494334e4SHugh Dickins {
6772aff7a47SMatthew Wilcox (Oracle) struct vm_area_struct *vma = pvmw->vma;
678494334e4SHugh Dickins pgoff_t pgoff;
679494334e4SHugh Dickins unsigned long address;
680e9b61f19SKirill A. Shutemov
6812aff7a47SMatthew Wilcox (Oracle) /* Common case, plus ->pgoff is invalid for KSM */
6822aff7a47SMatthew Wilcox (Oracle) if (pvmw->nr_pages == 1)
6832aff7a47SMatthew Wilcox (Oracle) return pvmw->address + PAGE_SIZE;
6842aff7a47SMatthew Wilcox (Oracle)
6852aff7a47SMatthew Wilcox (Oracle) pgoff = pvmw->pgoff + pvmw->nr_pages;
686494334e4SHugh Dickins address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
687494334e4SHugh Dickins /* Check for address beyond vma (or wrapped through 0?) */
688494334e4SHugh Dickins if (address < vma->vm_start || address > vma->vm_end)
689494334e4SHugh Dickins address = vma->vm_end;
690494334e4SHugh Dickins return address;
691e9b61f19SKirill A. Shutemov }
692e9b61f19SKirill A. Shutemov
maybe_unlock_mmap_for_io(struct vm_fault * vmf,struct file * fpin)69389b15332SJohannes Weiner static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
69489b15332SJohannes Weiner struct file *fpin)
69589b15332SJohannes Weiner {
69689b15332SJohannes Weiner int flags = vmf->flags;
69789b15332SJohannes Weiner
69889b15332SJohannes Weiner if (fpin)
69989b15332SJohannes Weiner return fpin;
70089b15332SJohannes Weiner
70189b15332SJohannes Weiner /*
70289b15332SJohannes Weiner * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
703c1e8d7c6SMichel Lespinasse * anything, so we only pin the file and drop the mmap_lock if only
7044064b982SPeter Xu * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
70589b15332SJohannes Weiner */
7064064b982SPeter Xu if (fault_flag_allow_retry_first(flags) &&
7074064b982SPeter Xu !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
70889b15332SJohannes Weiner fpin = get_file(vmf->vma->vm_file);
7090790e1e2SMatthew Wilcox (Oracle) release_fault_lock(vmf);
71089b15332SJohannes Weiner }
71189b15332SJohannes Weiner return fpin;
71289b15332SJohannes Weiner }
713af8e3354SHugh Dickins #else /* !CONFIG_MMU */
unmap_mapping_folio(struct folio * folio)7143506659eSMatthew Wilcox (Oracle) static inline void unmap_mapping_folio(struct folio *folio) { }
mlock_new_folio(struct folio * folio)71596f97c43SLorenzo Stoakes static inline void mlock_new_folio(struct folio *folio) { }
need_mlock_drain(int cpu)71696f97c43SLorenzo Stoakes static inline bool need_mlock_drain(int cpu) { return false; }
mlock_drain_local(void)71796f97c43SLorenzo Stoakes static inline void mlock_drain_local(void) { }
mlock_drain_remote(int cpu)71896f97c43SLorenzo Stoakes static inline void mlock_drain_remote(int cpu) { }
vunmap_range_noflush(unsigned long start,unsigned long end)7194ad0ae8cSNicholas Piggin static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
7204ad0ae8cSNicholas Piggin {
7214ad0ae8cSNicholas Piggin }
722af8e3354SHugh Dickins #endif /* !CONFIG_MMU */
723894bc310SLee Schermerhorn
7246b74ab97SMel Gorman /* Memory initialisation debug and verification */
7259420f89dSMike Rapoport (IBM) #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7269420f89dSMike Rapoport (IBM) DECLARE_STATIC_KEY_TRUE(deferred_pages);
7279420f89dSMike Rapoport (IBM)
7289420f89dSMike Rapoport (IBM) bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
7299420f89dSMike Rapoport (IBM) #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
7309420f89dSMike Rapoport (IBM)
7316b74ab97SMel Gorman enum mminit_level {
7326b74ab97SMel Gorman MMINIT_WARNING,
7336b74ab97SMel Gorman MMINIT_VERIFY,
7346b74ab97SMel Gorman MMINIT_TRACE
7356b74ab97SMel Gorman };
7366b74ab97SMel Gorman
7376b74ab97SMel Gorman #ifdef CONFIG_DEBUG_MEMORY_INIT
7386b74ab97SMel Gorman
7396b74ab97SMel Gorman extern int mminit_loglevel;
7406b74ab97SMel Gorman
7416b74ab97SMel Gorman #define mminit_dprintk(level, prefix, fmt, arg...) \
7426b74ab97SMel Gorman do { \
7436b74ab97SMel Gorman if (level < mminit_loglevel) { \
744fc5199d1SRasmus Villemoes if (level <= MMINIT_WARNING) \
7451170532bSJoe Perches pr_warn("mminit::" prefix " " fmt, ##arg); \
746fc5199d1SRasmus Villemoes else \
747fc5199d1SRasmus Villemoes printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
7486b74ab97SMel Gorman } \
7496b74ab97SMel Gorman } while (0)
7506b74ab97SMel Gorman
751708614e6SMel Gorman extern void mminit_verify_pageflags_layout(void);
75268ad8df4SMel Gorman extern void mminit_verify_zonelist(void);
7536b74ab97SMel Gorman #else
7546b74ab97SMel Gorman
mminit_dprintk(enum mminit_level level,const char * prefix,const char * fmt,...)7556b74ab97SMel Gorman static inline void mminit_dprintk(enum mminit_level level,
7566b74ab97SMel Gorman const char *prefix, const char *fmt, ...)
7576b74ab97SMel Gorman {
7586b74ab97SMel Gorman }
7596b74ab97SMel Gorman
mminit_verify_pageflags_layout(void)760708614e6SMel Gorman static inline void mminit_verify_pageflags_layout(void)
761708614e6SMel Gorman {
762708614e6SMel Gorman }
763708614e6SMel Gorman
mminit_verify_zonelist(void)76468ad8df4SMel Gorman static inline void mminit_verify_zonelist(void)
76568ad8df4SMel Gorman {
76668ad8df4SMel Gorman }
7676b74ab97SMel Gorman #endif /* CONFIG_DEBUG_MEMORY_INIT */
7682dbb51c4SMel Gorman
769a5f5f91dSMel Gorman #define NODE_RECLAIM_NOSCAN -2
770a5f5f91dSMel Gorman #define NODE_RECLAIM_FULL -1
771a5f5f91dSMel Gorman #define NODE_RECLAIM_SOME 0
772a5f5f91dSMel Gorman #define NODE_RECLAIM_SUCCESS 1
7737c116f2bSWu Fengguang
7748b09549cSWei Yang #ifdef CONFIG_NUMA
7758b09549cSWei Yang extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
77679c28a41SDave Hansen extern int find_next_best_node(int node, nodemask_t *used_node_mask);
7778b09549cSWei Yang #else
node_reclaim(struct pglist_data * pgdat,gfp_t mask,unsigned int order)7788b09549cSWei Yang static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
7798b09549cSWei Yang unsigned int order)
7808b09549cSWei Yang {
7818b09549cSWei Yang return NODE_RECLAIM_NOSCAN;
7828b09549cSWei Yang }
find_next_best_node(int node,nodemask_t * used_node_mask)78379c28a41SDave Hansen static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
78479c28a41SDave Hansen {
78579c28a41SDave Hansen return NUMA_NO_NODE;
78679c28a41SDave Hansen }
7878b09549cSWei Yang #endif
7888b09549cSWei Yang
78960f272f6Szhenwei pi /*
79060f272f6Szhenwei pi * mm/memory-failure.c
79160f272f6Szhenwei pi */
79231d3d348SWu Fengguang extern int hwpoison_filter(struct page *p);
79331d3d348SWu Fengguang
7947c116f2bSWu Fengguang extern u32 hwpoison_filter_dev_major;
7957c116f2bSWu Fengguang extern u32 hwpoison_filter_dev_minor;
796478c5ffcSWu Fengguang extern u64 hwpoison_filter_flags_mask;
797478c5ffcSWu Fengguang extern u64 hwpoison_filter_flags_value;
7984fd466ebSAndi Kleen extern u64 hwpoison_filter_memcg;
7991bfe5febSHaicheng Li extern u32 hwpoison_filter_enable;
800eb36c587SAl Viro
801dc0ef0dfSMichal Hocko extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
802eb36c587SAl Viro unsigned long, unsigned long,
8039fbeb5abSMichal Hocko unsigned long, unsigned long);
804ca57df79SXishi Qiu
805ca57df79SXishi Qiu extern void set_pageblock_order(void);
8064bf4f155SKefeng Wang unsigned long reclaim_pages(struct list_head *folio_list);
807730ec8c0SManinder Singh unsigned int reclaim_clean_pages_from_list(struct zone *zone,
8084bf4f155SKefeng Wang struct list_head *folio_list);
809d95ea5d1SBartlomiej Zolnierkiewicz /* The ALLOC_WMARK bits are used as an index to zone->watermark */
810d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_WMARK_MIN WMARK_MIN
811d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_WMARK_LOW WMARK_LOW
812d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_WMARK_HIGH WMARK_HIGH
813d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
814d95ea5d1SBartlomiej Zolnierkiewicz
815d95ea5d1SBartlomiej Zolnierkiewicz /* Mask to get the watermark bits */
816d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
817d95ea5d1SBartlomiej Zolnierkiewicz
818cd04ae1eSMichal Hocko /*
819cd04ae1eSMichal Hocko * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
820cd04ae1eSMichal Hocko * cannot assume a reduced access to memory reserves is sufficient for
821cd04ae1eSMichal Hocko * !MMU
822cd04ae1eSMichal Hocko */
823cd04ae1eSMichal Hocko #ifdef CONFIG_MMU
824cd04ae1eSMichal Hocko #define ALLOC_OOM 0x08
825cd04ae1eSMichal Hocko #else
826cd04ae1eSMichal Hocko #define ALLOC_OOM ALLOC_NO_WATERMARKS
827cd04ae1eSMichal Hocko #endif
828cd04ae1eSMichal Hocko
8291ebbb218SMel Gorman #define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access
8301ebbb218SMel Gorman * to 25% of the min watermark or
8311ebbb218SMel Gorman * 62.5% if __GFP_HIGH is set.
8321ebbb218SMel Gorman */
833524c4807SMel Gorman #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50%
834524c4807SMel Gorman * of the min watermark.
835524c4807SMel Gorman */
836d95ea5d1SBartlomiej Zolnierkiewicz #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
837d883c6cfSJoonsoo Kim #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
8386bb15450SMel Gorman #ifdef CONFIG_ZONE_DMA32
8396bb15450SMel Gorman #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
8406bb15450SMel Gorman #else
8416bb15450SMel Gorman #define ALLOC_NOFRAGMENT 0x0
8426bb15450SMel Gorman #endif
843eb2e2b42SMel Gorman #define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
844736838e9SMateusz Nosek #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
845d95ea5d1SBartlomiej Zolnierkiewicz
846ab350885SMel Gorman /* Flags that allow allocations below the min watermark. */
8471ebbb218SMel Gorman #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
848ab350885SMel Gorman
84972b252aeSMel Gorman enum ttu_flags;
85072b252aeSMel Gorman struct tlbflush_unmap_batch;
85172b252aeSMel Gorman
852ce612879SMichal Hocko
853ce612879SMichal Hocko /*
854ce612879SMichal Hocko * only for MM internal work items which do not depend on
855ce612879SMichal Hocko * any allocations or locks which might depend on allocations
856ce612879SMichal Hocko */
857ce612879SMichal Hocko extern struct workqueue_struct *mm_percpu_wq;
858ce612879SMichal Hocko
85972b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
86072b252aeSMel Gorman void try_to_unmap_flush(void);
861d950c947SMel Gorman void try_to_unmap_flush_dirty(void);
8623ea27719SMel Gorman void flush_tlb_batched_pending(struct mm_struct *mm);
86372b252aeSMel Gorman #else
try_to_unmap_flush(void)86472b252aeSMel Gorman static inline void try_to_unmap_flush(void)
86572b252aeSMel Gorman {
86672b252aeSMel Gorman }
try_to_unmap_flush_dirty(void)867d950c947SMel Gorman static inline void try_to_unmap_flush_dirty(void)
868d950c947SMel Gorman {
869d950c947SMel Gorman }
flush_tlb_batched_pending(struct mm_struct * mm)8703ea27719SMel Gorman static inline void flush_tlb_batched_pending(struct mm_struct *mm)
8713ea27719SMel Gorman {
8723ea27719SMel Gorman }
87372b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
874edf14cdbSVlastimil Babka
875edf14cdbSVlastimil Babka extern const struct trace_print_flags pageflag_names[];
8764c85c0beSHyeonggon Yoo extern const struct trace_print_flags pagetype_names[];
877edf14cdbSVlastimil Babka extern const struct trace_print_flags vmaflag_names[];
878edf14cdbSVlastimil Babka extern const struct trace_print_flags gfpflag_names[];
879edf14cdbSVlastimil Babka
is_migrate_highatomic(enum migratetype migratetype)880a6ffdc07SXishi Qiu static inline bool is_migrate_highatomic(enum migratetype migratetype)
881a6ffdc07SXishi Qiu {
882a6ffdc07SXishi Qiu return migratetype == MIGRATE_HIGHATOMIC;
883a6ffdc07SXishi Qiu }
884a6ffdc07SXishi Qiu
is_migrate_highatomic_page(struct page * page)885a6ffdc07SXishi Qiu static inline bool is_migrate_highatomic_page(struct page *page)
886a6ffdc07SXishi Qiu {
887a6ffdc07SXishi Qiu return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
888a6ffdc07SXishi Qiu }
889a6ffdc07SXishi Qiu
89072675e13SMichal Hocko void setup_zone_pageset(struct zone *zone);
89119fc7bedSJoonsoo Kim
89219fc7bedSJoonsoo Kim struct migration_target_control {
89319fc7bedSJoonsoo Kim int nid; /* preferred node id */
89419fc7bedSJoonsoo Kim nodemask_t *nmask;
89519fc7bedSJoonsoo Kim gfp_t gfp_mask;
89619fc7bedSJoonsoo Kim };
89719fc7bedSJoonsoo Kim
898b67177ecSNicholas Piggin /*
89907073eb0SDavid Howells * mm/filemap.c
90007073eb0SDavid Howells */
90107073eb0SDavid Howells size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
90207073eb0SDavid Howells struct folio *folio, loff_t fpos, size_t size);
90307073eb0SDavid Howells
90407073eb0SDavid Howells /*
905b67177ecSNicholas Piggin * mm/vmalloc.c
906b67177ecSNicholas Piggin */
9074ad0ae8cSNicholas Piggin #ifdef CONFIG_MMU
908b6714911SMike Rapoport (IBM) void __init vmalloc_init(void);
909d905ae2bSAlexander Potapenko int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
910b67177ecSNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift);
9114ad0ae8cSNicholas Piggin #else
vmalloc_init(void)912b6714911SMike Rapoport (IBM) static inline void vmalloc_init(void)
913b6714911SMike Rapoport (IBM) {
914b6714911SMike Rapoport (IBM) }
915b6714911SMike Rapoport (IBM)
9164ad0ae8cSNicholas Piggin static inline
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)917d905ae2bSAlexander Potapenko int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
9184ad0ae8cSNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift)
9194ad0ae8cSNicholas Piggin {
9204ad0ae8cSNicholas Piggin return -EINVAL;
9214ad0ae8cSNicholas Piggin }
9224ad0ae8cSNicholas Piggin #endif
9234ad0ae8cSNicholas Piggin
924d905ae2bSAlexander Potapenko int __must_check __vmap_pages_range_noflush(unsigned long addr,
925d905ae2bSAlexander Potapenko unsigned long end, pgprot_t prot,
926d905ae2bSAlexander Potapenko struct page **pages, unsigned int page_shift);
927b073d7f8SAlexander Potapenko
9284ad0ae8cSNicholas Piggin void vunmap_range_noflush(unsigned long start, unsigned long end);
929b67177ecSNicholas Piggin
930b073d7f8SAlexander Potapenko void __vunmap_range_noflush(unsigned long start, unsigned long end);
931b073d7f8SAlexander Potapenko
932f4c0d836SYang Shi int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
933f4c0d836SYang Shi unsigned long addr, int page_nid, int *flags);
934f4c0d836SYang Shi
93527674ef6SChristoph Hellwig void free_zone_device_page(struct page *page);
936b05a79d4SAlistair Popple int migrate_device_coherent_page(struct page *page);
93727674ef6SChristoph Hellwig
938ece1ed7bSMatthew Wilcox (Oracle) /*
939ece1ed7bSMatthew Wilcox (Oracle) * mm/gup.c
940ece1ed7bSMatthew Wilcox (Oracle) */
941ece1ed7bSMatthew Wilcox (Oracle) struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
9427ce154feSJason Gunthorpe int __must_check try_grab_page(struct page *page, unsigned int flags);
943ece1ed7bSMatthew Wilcox (Oracle)
9448b9c1cc0SDavid Hildenbrand /*
9458b9c1cc0SDavid Hildenbrand * mm/huge_memory.c
9468b9c1cc0SDavid Hildenbrand */
9478b9c1cc0SDavid Hildenbrand struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
9488b9c1cc0SDavid Hildenbrand unsigned long addr, pmd_t *pmd,
9498b9c1cc0SDavid Hildenbrand unsigned int flags);
9508b9c1cc0SDavid Hildenbrand
9512c224108SJason Gunthorpe enum {
9522c224108SJason Gunthorpe /* mark page accessed */
9532c224108SJason Gunthorpe FOLL_TOUCH = 1 << 16,
9542c224108SJason Gunthorpe /* a retry, previous pass started an IO */
9552c224108SJason Gunthorpe FOLL_TRIED = 1 << 17,
9562c224108SJason Gunthorpe /* we are working on non-current tsk/mm */
9572c224108SJason Gunthorpe FOLL_REMOTE = 1 << 18,
9582c224108SJason Gunthorpe /* pages must be released via unpin_user_page */
9592c224108SJason Gunthorpe FOLL_PIN = 1 << 19,
9602c224108SJason Gunthorpe /* gup_fast: prevent fall-back to slow gup */
9612c224108SJason Gunthorpe FOLL_FAST_ONLY = 1 << 20,
9622c224108SJason Gunthorpe /* allow unlocking the mmap lock */
9632c224108SJason Gunthorpe FOLL_UNLOCKABLE = 1 << 21,
9649e898211SDavid Hildenbrand /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
9659e898211SDavid Hildenbrand FOLL_MADV_POPULATE = 1 << 22,
9662c224108SJason Gunthorpe };
9672c224108SJason Gunthorpe
96849db746dSLorenzo Stoakes #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
9699e898211SDavid Hildenbrand FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
9709e898211SDavid Hildenbrand FOLL_MADV_POPULATE)
97149db746dSLorenzo Stoakes
97263b60512SJason Gunthorpe /*
97363b60512SJason Gunthorpe * Indicates for which pages that are write-protected in the page table,
97463b60512SJason Gunthorpe * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
97563b60512SJason Gunthorpe * GUP pin will remain consistent with the pages mapped into the page tables
97663b60512SJason Gunthorpe * of the MM.
97763b60512SJason Gunthorpe *
97863b60512SJason Gunthorpe * Temporary unmapping of PageAnonExclusive() pages or clearing of
97963b60512SJason Gunthorpe * PageAnonExclusive() has to protect against concurrent GUP:
98063b60512SJason Gunthorpe * * Ordinary GUP: Using the PT lock
98163b60512SJason Gunthorpe * * GUP-fast and fork(): mm->write_protect_seq
98263b60512SJason Gunthorpe * * GUP-fast and KSM or temporary unmapping (swap, migration): see
98363b60512SJason Gunthorpe * page_try_share_anon_rmap()
98463b60512SJason Gunthorpe *
98563b60512SJason Gunthorpe * Must be called with the (sub)page that's actually referenced via the
98663b60512SJason Gunthorpe * page table entry, which might not necessarily be the head page for a
98763b60512SJason Gunthorpe * PTE-mapped THP.
98863b60512SJason Gunthorpe *
98963b60512SJason Gunthorpe * If the vma is NULL, we're coming from the GUP-fast path and might have
99063b60512SJason Gunthorpe * to fallback to the slow path just to lookup the vma.
99163b60512SJason Gunthorpe */
gup_must_unshare(struct vm_area_struct * vma,unsigned int flags,struct page * page)99263b60512SJason Gunthorpe static inline bool gup_must_unshare(struct vm_area_struct *vma,
99363b60512SJason Gunthorpe unsigned int flags, struct page *page)
99463b60512SJason Gunthorpe {
99563b60512SJason Gunthorpe /*
99663b60512SJason Gunthorpe * FOLL_WRITE is implicitly handled correctly as the page table entry
99763b60512SJason Gunthorpe * has to be writable -- and if it references (part of) an anonymous
99863b60512SJason Gunthorpe * folio, that part is required to be marked exclusive.
99963b60512SJason Gunthorpe */
100063b60512SJason Gunthorpe if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
100163b60512SJason Gunthorpe return false;
100263b60512SJason Gunthorpe /*
100363b60512SJason Gunthorpe * Note: PageAnon(page) is stable until the page is actually getting
100463b60512SJason Gunthorpe * freed.
100563b60512SJason Gunthorpe */
100663b60512SJason Gunthorpe if (!PageAnon(page)) {
100763b60512SJason Gunthorpe /*
100863b60512SJason Gunthorpe * We only care about R/O long-term pining: R/O short-term
100963b60512SJason Gunthorpe * pinning does not have the semantics to observe successive
101063b60512SJason Gunthorpe * changes through the process page tables.
101163b60512SJason Gunthorpe */
101263b60512SJason Gunthorpe if (!(flags & FOLL_LONGTERM))
101363b60512SJason Gunthorpe return false;
101463b60512SJason Gunthorpe
101563b60512SJason Gunthorpe /* We really need the vma ... */
101663b60512SJason Gunthorpe if (!vma)
101763b60512SJason Gunthorpe return true;
101863b60512SJason Gunthorpe
101963b60512SJason Gunthorpe /*
102063b60512SJason Gunthorpe * ... because we only care about writable private ("COW")
102163b60512SJason Gunthorpe * mappings where we have to break COW early.
102263b60512SJason Gunthorpe */
102363b60512SJason Gunthorpe return is_cow_mapping(vma->vm_flags);
102463b60512SJason Gunthorpe }
102563b60512SJason Gunthorpe
102663b60512SJason Gunthorpe /* Paired with a memory barrier in page_try_share_anon_rmap(). */
102763b60512SJason Gunthorpe if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
102863b60512SJason Gunthorpe smp_rmb();
102963b60512SJason Gunthorpe
103063b60512SJason Gunthorpe /*
10315805192cSDavid Hildenbrand * During GUP-fast we might not get called on the head page for a
10325805192cSDavid Hildenbrand * hugetlb page that is mapped using cont-PTE, because GUP-fast does
10335805192cSDavid Hildenbrand * not work with the abstracted hugetlb PTEs that always point at the
10345805192cSDavid Hildenbrand * head page. For hugetlb, PageAnonExclusive only applies on the head
10355805192cSDavid Hildenbrand * page (as it cannot be partially COW-shared), so lookup the head page.
10365805192cSDavid Hildenbrand */
10375805192cSDavid Hildenbrand if (unlikely(!PageHead(page) && PageHuge(page)))
10385805192cSDavid Hildenbrand page = compound_head(page);
10395805192cSDavid Hildenbrand
10405805192cSDavid Hildenbrand /*
104163b60512SJason Gunthorpe * Note that PageKsm() pages cannot be exclusive, and consequently,
104263b60512SJason Gunthorpe * cannot get pinned.
104363b60512SJason Gunthorpe */
104463b60512SJason Gunthorpe return !PageAnonExclusive(page);
104563b60512SJason Gunthorpe }
10461da177e4SLinus Torvalds
1047902c2d91SMa Wupeng extern bool mirrored_kernelcore;
10480db31d63SMa Wupeng extern bool memblock_has_mirror(void);
1049902c2d91SMa Wupeng
vma_soft_dirty_enabled(struct vm_area_struct * vma)105076aefad6SPeter Xu static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
105176aefad6SPeter Xu {
105276aefad6SPeter Xu /*
105376aefad6SPeter Xu * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
105476aefad6SPeter Xu * enablements, because when without soft-dirty being compiled in,
105576aefad6SPeter Xu * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
105676aefad6SPeter Xu * will be constantly true.
105776aefad6SPeter Xu */
105876aefad6SPeter Xu if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
105976aefad6SPeter Xu return false;
106076aefad6SPeter Xu
106176aefad6SPeter Xu /*
106276aefad6SPeter Xu * Soft-dirty is kind of special: its tracking is enabled when the
106376aefad6SPeter Xu * vma flags not set.
106476aefad6SPeter Xu */
106576aefad6SPeter Xu return !(vma->vm_flags & VM_SOFTDIRTY);
106676aefad6SPeter Xu }
106776aefad6SPeter Xu
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)106853bee98dSLiam R. Howlett static inline void vma_iter_config(struct vma_iterator *vmi,
106953bee98dSLiam R. Howlett unsigned long index, unsigned long last)
107053bee98dSLiam R. Howlett {
107153bee98dSLiam R. Howlett MAS_BUG_ON(&vmi->mas, vmi->mas.node != MAS_START &&
107253bee98dSLiam R. Howlett (vmi->mas.index > index || vmi->mas.last < index));
107353bee98dSLiam R. Howlett __mas_set_range(&vmi->mas, index, last - 1);
107453bee98dSLiam R. Howlett }
107553bee98dSLiam R. Howlett
1076b62b633eSLiam R. Howlett /*
1077b62b633eSLiam R. Howlett * VMA Iterator functions shared between nommu and mmap
1078b62b633eSLiam R. Howlett */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)1079b5df0922SLiam R. Howlett static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1080b5df0922SLiam R. Howlett struct vm_area_struct *vma)
1081b62b633eSLiam R. Howlett {
1082b5df0922SLiam R. Howlett return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1083b62b633eSLiam R. Howlett }
1084b62b633eSLiam R. Howlett
vma_iter_clear(struct vma_iterator * vmi)1085b5df0922SLiam R. Howlett static inline void vma_iter_clear(struct vma_iterator *vmi)
1086b62b633eSLiam R. Howlett {
1087b62b633eSLiam R. Howlett mas_store_prealloc(&vmi->mas, NULL);
1088b62b633eSLiam R. Howlett }
1089b62b633eSLiam R. Howlett
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)1090f72cf24aSLiam R. Howlett static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1091f72cf24aSLiam R. Howlett unsigned long start, unsigned long end, gfp_t gfp)
1092f72cf24aSLiam R. Howlett {
1093b5df0922SLiam R. Howlett __mas_set_range(&vmi->mas, start, end - 1);
1094f72cf24aSLiam R. Howlett mas_store_gfp(&vmi->mas, NULL, gfp);
1095f72cf24aSLiam R. Howlett if (unlikely(mas_is_err(&vmi->mas)))
1096f72cf24aSLiam R. Howlett return -ENOMEM;
1097f72cf24aSLiam R. Howlett
1098f72cf24aSLiam R. Howlett return 0;
1099f72cf24aSLiam R. Howlett }
1100f72cf24aSLiam R. Howlett
vma_iter_load(struct vma_iterator * vmi)1101b62b633eSLiam R. Howlett static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1102b62b633eSLiam R. Howlett {
1103b62b633eSLiam R. Howlett return mas_walk(&vmi->mas);
1104b62b633eSLiam R. Howlett }
1105b62b633eSLiam R. Howlett
1106b62b633eSLiam R. Howlett /* Store a VMA with preallocated memory */
vma_iter_store(struct vma_iterator * vmi,struct vm_area_struct * vma)1107b62b633eSLiam R. Howlett static inline void vma_iter_store(struct vma_iterator *vmi,
1108b62b633eSLiam R. Howlett struct vm_area_struct *vma)
1109b62b633eSLiam R. Howlett {
1110b62b633eSLiam R. Howlett
1111b62b633eSLiam R. Howlett #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
111236bd9310SLiam R. Howlett if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
111336bd9310SLiam R. Howlett vmi->mas.index > vma->vm_start)) {
111436bd9310SLiam R. Howlett pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
111536bd9310SLiam R. Howlett vmi->mas.index, vma->vm_start, vma->vm_start,
111636bd9310SLiam R. Howlett vma->vm_end, vmi->mas.index, vmi->mas.last);
1117b62b633eSLiam R. Howlett }
111836bd9310SLiam R. Howlett if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
111936bd9310SLiam R. Howlett vmi->mas.last < vma->vm_start)) {
112036bd9310SLiam R. Howlett pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
112136bd9310SLiam R. Howlett vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
112236bd9310SLiam R. Howlett vmi->mas.index, vmi->mas.last);
1123b62b633eSLiam R. Howlett }
1124b62b633eSLiam R. Howlett #endif
1125b62b633eSLiam R. Howlett
1126b62b633eSLiam R. Howlett if (vmi->mas.node != MAS_START &&
1127b62b633eSLiam R. Howlett ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1128b62b633eSLiam R. Howlett vma_iter_invalidate(vmi);
1129b62b633eSLiam R. Howlett
1130b5df0922SLiam R. Howlett __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1131b62b633eSLiam R. Howlett mas_store_prealloc(&vmi->mas, vma);
1132b62b633eSLiam R. Howlett }
1133b62b633eSLiam R. Howlett
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)1134b62b633eSLiam R. Howlett static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1135b62b633eSLiam R. Howlett struct vm_area_struct *vma, gfp_t gfp)
1136b62b633eSLiam R. Howlett {
1137b62b633eSLiam R. Howlett if (vmi->mas.node != MAS_START &&
1138b62b633eSLiam R. Howlett ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1139b62b633eSLiam R. Howlett vma_iter_invalidate(vmi);
1140b62b633eSLiam R. Howlett
1141b5df0922SLiam R. Howlett __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1142b62b633eSLiam R. Howlett mas_store_gfp(&vmi->mas, vma, gfp);
1143b62b633eSLiam R. Howlett if (unlikely(mas_is_err(&vmi->mas)))
1144b62b633eSLiam R. Howlett return -ENOMEM;
1145b62b633eSLiam R. Howlett
1146b62b633eSLiam R. Howlett return 0;
1147b62b633eSLiam R. Howlett }
1148440703e0SLiam R. Howlett
1149440703e0SLiam R. Howlett /*
1150440703e0SLiam R. Howlett * VMA lock generalization
1151440703e0SLiam R. Howlett */
1152440703e0SLiam R. Howlett struct vma_prepare {
1153440703e0SLiam R. Howlett struct vm_area_struct *vma;
1154440703e0SLiam R. Howlett struct vm_area_struct *adj_next;
1155440703e0SLiam R. Howlett struct file *file;
1156440703e0SLiam R. Howlett struct address_space *mapping;
1157440703e0SLiam R. Howlett struct anon_vma *anon_vma;
1158440703e0SLiam R. Howlett struct vm_area_struct *insert;
1159440703e0SLiam R. Howlett struct vm_area_struct *remove;
1160440703e0SLiam R. Howlett struct vm_area_struct *remove2;
1161440703e0SLiam R. Howlett };
11621da177e4SLinus Torvalds #endif /* __MM_INTERNAL_H */
1163