xref: /openbmc/linux/mm/internal.h (revision 9e898211)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/tracepoint-defs.h>
15 
16 struct folio_batch;
17 
18 /*
19  * The set of flags that only affect watermark checking and reclaim
20  * behaviour. This is used by the MM to obey the caller constraints
21  * about IO, FS and watermark checking while ignoring placement
22  * hints such as HIGHMEM usage.
23  */
24 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
26 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
27 			__GFP_NOLOCKDEP)
28 
29 /* The GFP flags allowed during early boot */
30 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
31 
32 /* Control allocation cpuset and node placement constraints */
33 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
34 
35 /* Do not use these with a slab allocator */
36 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
37 
38 /*
39  * Different from WARN_ON_ONCE(), no warning will be issued
40  * when we specify __GFP_NOWARN.
41  */
42 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
43 	static bool __section(".data.once") __warned;			\
44 	int __ret_warn_once = !!(cond);					\
45 									\
46 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
47 		__warned = true;					\
48 		WARN_ON(1);						\
49 	}								\
50 	unlikely(__ret_warn_once);					\
51 })
52 
53 void page_writeback_init(void);
54 
55 /*
56  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
57  * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
58  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
59  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
60  */
61 #define COMPOUND_MAPPED		0x800000
62 #define FOLIO_PAGES_MAPPED	(COMPOUND_MAPPED - 1)
63 
64 /*
65  * Flags passed to __show_mem() and show_free_areas() to suppress output in
66  * various contexts.
67  */
68 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
69 
70 /*
71  * How many individual pages have an elevated _mapcount.  Excludes
72  * the folio's entire_mapcount.
73  */
folio_nr_pages_mapped(struct folio * folio)74 static inline int folio_nr_pages_mapped(struct folio *folio)
75 {
76 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
77 }
78 
folio_raw_mapping(struct folio * folio)79 static inline void *folio_raw_mapping(struct folio *folio)
80 {
81 	unsigned long mapping = (unsigned long)folio->mapping;
82 
83 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
84 }
85 
86 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
87 						int nr_throttled);
acct_reclaim_writeback(struct folio * folio)88 static inline void acct_reclaim_writeback(struct folio *folio)
89 {
90 	pg_data_t *pgdat = folio_pgdat(folio);
91 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
92 
93 	if (nr_throttled)
94 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
95 }
96 
wake_throttle_isolated(pg_data_t * pgdat)97 static inline void wake_throttle_isolated(pg_data_t *pgdat)
98 {
99 	wait_queue_head_t *wqh;
100 
101 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
102 	if (waitqueue_active(wqh))
103 		wake_up(wqh);
104 }
105 
106 vm_fault_t do_swap_page(struct vm_fault *vmf);
107 void folio_rotate_reclaimable(struct folio *folio);
108 bool __folio_end_writeback(struct folio *folio);
109 void deactivate_file_folio(struct folio *folio);
110 void folio_activate(struct folio *folio);
111 
112 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
113 		   struct vm_area_struct *start_vma, unsigned long floor,
114 		   unsigned long ceiling, bool mm_wr_locked);
115 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
116 
117 struct zap_details;
118 void unmap_page_range(struct mmu_gather *tlb,
119 			     struct vm_area_struct *vma,
120 			     unsigned long addr, unsigned long end,
121 			     struct zap_details *details);
122 
123 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
124 		unsigned int order);
125 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
force_page_cache_readahead(struct address_space * mapping,struct file * file,pgoff_t index,unsigned long nr_to_read)126 static inline void force_page_cache_readahead(struct address_space *mapping,
127 		struct file *file, pgoff_t index, unsigned long nr_to_read)
128 {
129 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
130 	force_page_cache_ra(&ractl, nr_to_read);
131 }
132 
133 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
134 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
135 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
136 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
137 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
138 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
139 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
140 		loff_t end);
141 long invalidate_inode_page(struct page *page);
142 unsigned long mapping_try_invalidate(struct address_space *mapping,
143 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
144 
145 /**
146  * folio_evictable - Test whether a folio is evictable.
147  * @folio: The folio to test.
148  *
149  * Test whether @folio is evictable -- i.e., should be placed on
150  * active/inactive lists vs unevictable list.
151  *
152  * Reasons folio might not be evictable:
153  * 1. folio's mapping marked unevictable
154  * 2. One of the pages in the folio is part of an mlocked VMA
155  */
folio_evictable(struct folio * folio)156 static inline bool folio_evictable(struct folio *folio)
157 {
158 	bool ret;
159 
160 	/* Prevent address_space of inode and swap cache from being freed */
161 	rcu_read_lock();
162 	ret = !mapping_unevictable(folio_mapping(folio)) &&
163 			!folio_test_mlocked(folio);
164 	rcu_read_unlock();
165 	return ret;
166 }
167 
168 /*
169  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
170  * a count of one.
171  */
set_page_refcounted(struct page * page)172 static inline void set_page_refcounted(struct page *page)
173 {
174 	VM_BUG_ON_PAGE(PageTail(page), page);
175 	VM_BUG_ON_PAGE(page_ref_count(page), page);
176 	set_page_count(page, 1);
177 }
178 
179 /*
180  * Return true if a folio needs ->release_folio() calling upon it.
181  */
folio_needs_release(struct folio * folio)182 static inline bool folio_needs_release(struct folio *folio)
183 {
184 	struct address_space *mapping = folio_mapping(folio);
185 
186 	return folio_has_private(folio) ||
187 		(mapping && mapping_release_always(mapping));
188 }
189 
190 extern unsigned long highest_memmap_pfn;
191 
192 /*
193  * Maximum number of reclaim retries without progress before the OOM
194  * killer is consider the only way forward.
195  */
196 #define MAX_RECLAIM_RETRIES 16
197 
198 /*
199  * in mm/vmscan.c:
200  */
201 bool isolate_lru_page(struct page *page);
202 bool folio_isolate_lru(struct folio *folio);
203 void putback_lru_page(struct page *page);
204 void folio_putback_lru(struct folio *folio);
205 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
206 
207 /*
208  * in mm/rmap.c:
209  */
210 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
211 
212 /*
213  * in mm/page_alloc.c
214  */
215 #define K(x) ((x) << (PAGE_SHIFT-10))
216 
217 extern char * const zone_names[MAX_NR_ZONES];
218 
219 /* perform sanity checks on struct pages being allocated or freed */
220 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
221 
222 extern int min_free_kbytes;
223 
224 void setup_per_zone_wmarks(void);
225 void calculate_min_free_kbytes(void);
226 int __meminit init_per_zone_wmark_min(void);
227 void page_alloc_sysctl_init(void);
228 
229 /*
230  * Structure for holding the mostly immutable allocation parameters passed
231  * between functions involved in allocations, including the alloc_pages*
232  * family of functions.
233  *
234  * nodemask, migratetype and highest_zoneidx are initialized only once in
235  * __alloc_pages() and then never change.
236  *
237  * zonelist, preferred_zone and highest_zoneidx are set first in
238  * __alloc_pages() for the fast path, and might be later changed
239  * in __alloc_pages_slowpath(). All other functions pass the whole structure
240  * by a const pointer.
241  */
242 struct alloc_context {
243 	struct zonelist *zonelist;
244 	nodemask_t *nodemask;
245 	struct zoneref *preferred_zoneref;
246 	int migratetype;
247 
248 	/*
249 	 * highest_zoneidx represents highest usable zone index of
250 	 * the allocation request. Due to the nature of the zone,
251 	 * memory on lower zone than the highest_zoneidx will be
252 	 * protected by lowmem_reserve[highest_zoneidx].
253 	 *
254 	 * highest_zoneidx is also used by reclaim/compaction to limit
255 	 * the target zone since higher zone than this index cannot be
256 	 * usable for this allocation request.
257 	 */
258 	enum zone_type highest_zoneidx;
259 	bool spread_dirty_pages;
260 };
261 
262 /*
263  * This function returns the order of a free page in the buddy system. In
264  * general, page_zone(page)->lock must be held by the caller to prevent the
265  * page from being allocated in parallel and returning garbage as the order.
266  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
267  * page cannot be allocated or merged in parallel. Alternatively, it must
268  * handle invalid values gracefully, and use buddy_order_unsafe() below.
269  */
buddy_order(struct page * page)270 static inline unsigned int buddy_order(struct page *page)
271 {
272 	/* PageBuddy() must be checked by the caller */
273 	return page_private(page);
274 }
275 
276 /*
277  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
278  * PageBuddy() should be checked first by the caller to minimize race window,
279  * and invalid values must be handled gracefully.
280  *
281  * READ_ONCE is used so that if the caller assigns the result into a local
282  * variable and e.g. tests it for valid range before using, the compiler cannot
283  * decide to remove the variable and inline the page_private(page) multiple
284  * times, potentially observing different values in the tests and the actual
285  * use of the result.
286  */
287 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
288 
289 /*
290  * This function checks whether a page is free && is the buddy
291  * we can coalesce a page and its buddy if
292  * (a) the buddy is not in a hole (check before calling!) &&
293  * (b) the buddy is in the buddy system &&
294  * (c) a page and its buddy have the same order &&
295  * (d) a page and its buddy are in the same zone.
296  *
297  * For recording whether a page is in the buddy system, we set PageBuddy.
298  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
299  *
300  * For recording page's order, we use page_private(page).
301  */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)302 static inline bool page_is_buddy(struct page *page, struct page *buddy,
303 				 unsigned int order)
304 {
305 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
306 		return false;
307 
308 	if (buddy_order(buddy) != order)
309 		return false;
310 
311 	/*
312 	 * zone check is done late to avoid uselessly calculating
313 	 * zone/node ids for pages that could never merge.
314 	 */
315 	if (page_zone_id(page) != page_zone_id(buddy))
316 		return false;
317 
318 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
319 
320 	return true;
321 }
322 
323 /*
324  * Locate the struct page for both the matching buddy in our
325  * pair (buddy1) and the combined O(n+1) page they form (page).
326  *
327  * 1) Any buddy B1 will have an order O twin B2 which satisfies
328  * the following equation:
329  *     B2 = B1 ^ (1 << O)
330  * For example, if the starting buddy (buddy2) is #8 its order
331  * 1 buddy is #10:
332  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
333  *
334  * 2) Any buddy B will have an order O+1 parent P which
335  * satisfies the following equation:
336  *     P = B & ~(1 << O)
337  *
338  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
339  */
340 static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn,unsigned int order)341 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
342 {
343 	return page_pfn ^ (1 << order);
344 }
345 
346 /*
347  * Find the buddy of @page and validate it.
348  * @page: The input page
349  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
350  *       function is used in the performance-critical __free_one_page().
351  * @order: The order of the page
352  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
353  *             page_to_pfn().
354  *
355  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
356  * not the same as @page. The validation is necessary before use it.
357  *
358  * Return: the found buddy page or NULL if not found.
359  */
find_buddy_page_pfn(struct page * page,unsigned long pfn,unsigned int order,unsigned long * buddy_pfn)360 static inline struct page *find_buddy_page_pfn(struct page *page,
361 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
362 {
363 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
364 	struct page *buddy;
365 
366 	buddy = page + (__buddy_pfn - pfn);
367 	if (buddy_pfn)
368 		*buddy_pfn = __buddy_pfn;
369 
370 	if (page_is_buddy(page, buddy, order))
371 		return buddy;
372 	return NULL;
373 }
374 
375 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
376 				unsigned long end_pfn, struct zone *zone);
377 
pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)378 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
379 				unsigned long end_pfn, struct zone *zone)
380 {
381 	if (zone->contiguous)
382 		return pfn_to_page(start_pfn);
383 
384 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
385 }
386 
387 void set_zone_contiguous(struct zone *zone);
388 
clear_zone_contiguous(struct zone * zone)389 static inline void clear_zone_contiguous(struct zone *zone)
390 {
391 	zone->contiguous = false;
392 }
393 
394 extern int __isolate_free_page(struct page *page, unsigned int order);
395 extern void __putback_isolated_page(struct page *page, unsigned int order,
396 				    int mt);
397 extern void memblock_free_pages(struct page *page, unsigned long pfn,
398 					unsigned int order);
399 extern void __free_pages_core(struct page *page, unsigned int order);
400 
401 /*
402  * This will have no effect, other than possibly generating a warning, if the
403  * caller passes in a non-large folio.
404  */
folio_set_order(struct folio * folio,unsigned int order)405 static inline void folio_set_order(struct folio *folio, unsigned int order)
406 {
407 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
408 		return;
409 
410 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
411 #ifdef CONFIG_64BIT
412 	folio->_folio_nr_pages = 1U << order;
413 #endif
414 }
415 
416 void folio_undo_large_rmappable(struct folio *folio);
417 
prep_compound_head(struct page * page,unsigned int order)418 static inline void prep_compound_head(struct page *page, unsigned int order)
419 {
420 	struct folio *folio = (struct folio *)page;
421 
422 	folio_set_order(folio, order);
423 	atomic_set(&folio->_entire_mapcount, -1);
424 	atomic_set(&folio->_nr_pages_mapped, 0);
425 	atomic_set(&folio->_pincount, 0);
426 }
427 
prep_compound_tail(struct page * head,int tail_idx)428 static inline void prep_compound_tail(struct page *head, int tail_idx)
429 {
430 	struct page *p = head + tail_idx;
431 
432 	p->mapping = TAIL_MAPPING;
433 	set_compound_head(p, head);
434 	set_page_private(p, 0);
435 }
436 
437 extern void prep_compound_page(struct page *page, unsigned int order);
438 
439 extern void post_alloc_hook(struct page *page, unsigned int order,
440 					gfp_t gfp_flags);
441 extern int user_min_free_kbytes;
442 
443 extern void free_unref_page(struct page *page, unsigned int order);
444 extern void free_unref_page_list(struct list_head *list);
445 
446 extern void zone_pcp_reset(struct zone *zone);
447 extern void zone_pcp_disable(struct zone *zone);
448 extern void zone_pcp_enable(struct zone *zone);
449 extern void zone_pcp_init(struct zone *zone);
450 
451 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
452 			  phys_addr_t min_addr,
453 			  int nid, bool exact_nid);
454 
455 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
456 		unsigned long, enum meminit_context, struct vmem_altmap *, int);
457 
458 
459 int split_free_page(struct page *free_page,
460 			unsigned int order, unsigned long split_pfn_offset);
461 
462 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
463 
464 /*
465  * in mm/compaction.c
466  */
467 /*
468  * compact_control is used to track pages being migrated and the free pages
469  * they are being migrated to during memory compaction. The free_pfn starts
470  * at the end of a zone and migrate_pfn begins at the start. Movable pages
471  * are moved to the end of a zone during a compaction run and the run
472  * completes when free_pfn <= migrate_pfn
473  */
474 struct compact_control {
475 	struct list_head freepages;	/* List of free pages to migrate to */
476 	struct list_head migratepages;	/* List of pages being migrated */
477 	unsigned int nr_freepages;	/* Number of isolated free pages */
478 	unsigned int nr_migratepages;	/* Number of pages to migrate */
479 	unsigned long free_pfn;		/* isolate_freepages search base */
480 	/*
481 	 * Acts as an in/out parameter to page isolation for migration.
482 	 * isolate_migratepages uses it as a search base.
483 	 * isolate_migratepages_block will update the value to the next pfn
484 	 * after the last isolated one.
485 	 */
486 	unsigned long migrate_pfn;
487 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
488 	struct zone *zone;
489 	unsigned long total_migrate_scanned;
490 	unsigned long total_free_scanned;
491 	unsigned short fast_search_fail;/* failures to use free list searches */
492 	short search_order;		/* order to start a fast search at */
493 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
494 	int order;			/* order a direct compactor needs */
495 	int migratetype;		/* migratetype of direct compactor */
496 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
497 	const int highest_zoneidx;	/* zone index of a direct compactor */
498 	enum migrate_mode mode;		/* Async or sync migration mode */
499 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
500 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
501 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
502 	bool direct_compaction;		/* False from kcompactd or /proc/... */
503 	bool proactive_compaction;	/* kcompactd proactive compaction */
504 	bool whole_zone;		/* Whole zone should/has been scanned */
505 	bool contended;			/* Signal lock contention */
506 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
507 					 * when there are potentially transient
508 					 * isolation or migration failures to
509 					 * ensure forward progress.
510 					 */
511 	bool alloc_contig;		/* alloc_contig_range allocation */
512 };
513 
514 /*
515  * Used in direct compaction when a page should be taken from the freelists
516  * immediately when one is created during the free path.
517  */
518 struct capture_control {
519 	struct compact_control *cc;
520 	struct page *page;
521 };
522 
523 unsigned long
524 isolate_freepages_range(struct compact_control *cc,
525 			unsigned long start_pfn, unsigned long end_pfn);
526 int
527 isolate_migratepages_range(struct compact_control *cc,
528 			   unsigned long low_pfn, unsigned long end_pfn);
529 
530 int __alloc_contig_migrate_range(struct compact_control *cc,
531 					unsigned long start, unsigned long end);
532 
533 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
534 void init_cma_reserved_pageblock(struct page *page);
535 
536 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
537 
538 int find_suitable_fallback(struct free_area *area, unsigned int order,
539 			int migratetype, bool only_stealable, bool *can_steal);
540 
free_area_empty(struct free_area * area,int migratetype)541 static inline bool free_area_empty(struct free_area *area, int migratetype)
542 {
543 	return list_empty(&area->free_list[migratetype]);
544 }
545 
546 /*
547  * These three helpers classifies VMAs for virtual memory accounting.
548  */
549 
550 /*
551  * Executable code area - executable, not writable, not stack
552  */
is_exec_mapping(vm_flags_t flags)553 static inline bool is_exec_mapping(vm_flags_t flags)
554 {
555 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
556 }
557 
558 /*
559  * Stack area (including shadow stacks)
560  *
561  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
562  * do_mmap() forbids all other combinations.
563  */
is_stack_mapping(vm_flags_t flags)564 static inline bool is_stack_mapping(vm_flags_t flags)
565 {
566 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
567 }
568 
569 /*
570  * Data area - private, writable, not stack
571  */
is_data_mapping(vm_flags_t flags)572 static inline bool is_data_mapping(vm_flags_t flags)
573 {
574 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
575 }
576 
577 /* mm/util.c */
578 struct anon_vma *folio_anon_vma(struct folio *folio);
579 
580 #ifdef CONFIG_MMU
581 void unmap_mapping_folio(struct folio *folio);
582 extern long populate_vma_page_range(struct vm_area_struct *vma,
583 		unsigned long start, unsigned long end, int *locked);
584 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
585 		unsigned long end, bool write, int *locked);
586 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
587 			       unsigned long bytes);
588 /*
589  * mlock_vma_folio() and munlock_vma_folio():
590  * should be called with vma's mmap_lock held for read or write,
591  * under page table lock for the pte/pmd being added or removed.
592  *
593  * mlock is usually called at the end of page_add_*_rmap(), munlock at
594  * the end of page_remove_rmap(); but new anon folios are managed by
595  * folio_add_lru_vma() calling mlock_new_folio().
596  *
597  * @compound is used to include pmd mappings of THPs, but filter out
598  * pte mappings of THPs, which cannot be consistently counted: a pte
599  * mapping of the THP head cannot be distinguished by the page alone.
600  */
601 void mlock_folio(struct folio *folio);
mlock_vma_folio(struct folio * folio,struct vm_area_struct * vma,bool compound)602 static inline void mlock_vma_folio(struct folio *folio,
603 			struct vm_area_struct *vma, bool compound)
604 {
605 	/*
606 	 * The VM_SPECIAL check here serves two purposes.
607 	 * 1) VM_IO check prevents migration from double-counting during mlock.
608 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
609 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
610 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
611 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
612 	 */
613 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
614 	    (compound || !folio_test_large(folio)))
615 		mlock_folio(folio);
616 }
617 
618 void munlock_folio(struct folio *folio);
munlock_vma_folio(struct folio * folio,struct vm_area_struct * vma,bool compound)619 static inline void munlock_vma_folio(struct folio *folio,
620 			struct vm_area_struct *vma, bool compound)
621 {
622 	if (unlikely(vma->vm_flags & VM_LOCKED) &&
623 	    (compound || !folio_test_large(folio)))
624 		munlock_folio(folio);
625 }
626 
627 void mlock_new_folio(struct folio *folio);
628 bool need_mlock_drain(int cpu);
629 void mlock_drain_local(void);
630 void mlock_drain_remote(int cpu);
631 
632 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
633 
634 /*
635  * Return the start of user virtual address at the specific offset within
636  * a vma.
637  */
638 static inline unsigned long
vma_pgoff_address(pgoff_t pgoff,unsigned long nr_pages,struct vm_area_struct * vma)639 vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
640 		  struct vm_area_struct *vma)
641 {
642 	unsigned long address;
643 
644 	if (pgoff >= vma->vm_pgoff) {
645 		address = vma->vm_start +
646 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
647 		/* Check for address beyond vma (or wrapped through 0?) */
648 		if (address < vma->vm_start || address >= vma->vm_end)
649 			address = -EFAULT;
650 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
651 		/* Test above avoids possibility of wrap to 0 on 32-bit */
652 		address = vma->vm_start;
653 	} else {
654 		address = -EFAULT;
655 	}
656 	return address;
657 }
658 
659 /*
660  * Return the start of user virtual address of a page within a vma.
661  * Returns -EFAULT if all of the page is outside the range of vma.
662  * If page is a compound head, the entire compound page is considered.
663  */
664 static inline unsigned long
vma_address(struct page * page,struct vm_area_struct * vma)665 vma_address(struct page *page, struct vm_area_struct *vma)
666 {
667 	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
668 	return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
669 }
670 
671 /*
672  * Then at what user virtual address will none of the range be found in vma?
673  * Assumes that vma_address() already returned a good starting address.
674  */
vma_address_end(struct page_vma_mapped_walk * pvmw)675 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
676 {
677 	struct vm_area_struct *vma = pvmw->vma;
678 	pgoff_t pgoff;
679 	unsigned long address;
680 
681 	/* Common case, plus ->pgoff is invalid for KSM */
682 	if (pvmw->nr_pages == 1)
683 		return pvmw->address + PAGE_SIZE;
684 
685 	pgoff = pvmw->pgoff + pvmw->nr_pages;
686 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
687 	/* Check for address beyond vma (or wrapped through 0?) */
688 	if (address < vma->vm_start || address > vma->vm_end)
689 		address = vma->vm_end;
690 	return address;
691 }
692 
maybe_unlock_mmap_for_io(struct vm_fault * vmf,struct file * fpin)693 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
694 						    struct file *fpin)
695 {
696 	int flags = vmf->flags;
697 
698 	if (fpin)
699 		return fpin;
700 
701 	/*
702 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
703 	 * anything, so we only pin the file and drop the mmap_lock if only
704 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
705 	 */
706 	if (fault_flag_allow_retry_first(flags) &&
707 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
708 		fpin = get_file(vmf->vma->vm_file);
709 		release_fault_lock(vmf);
710 	}
711 	return fpin;
712 }
713 #else /* !CONFIG_MMU */
unmap_mapping_folio(struct folio * folio)714 static inline void unmap_mapping_folio(struct folio *folio) { }
mlock_new_folio(struct folio * folio)715 static inline void mlock_new_folio(struct folio *folio) { }
need_mlock_drain(int cpu)716 static inline bool need_mlock_drain(int cpu) { return false; }
mlock_drain_local(void)717 static inline void mlock_drain_local(void) { }
mlock_drain_remote(int cpu)718 static inline void mlock_drain_remote(int cpu) { }
vunmap_range_noflush(unsigned long start,unsigned long end)719 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
720 {
721 }
722 #endif /* !CONFIG_MMU */
723 
724 /* Memory initialisation debug and verification */
725 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
726 DECLARE_STATIC_KEY_TRUE(deferred_pages);
727 
728 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
729 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
730 
731 enum mminit_level {
732 	MMINIT_WARNING,
733 	MMINIT_VERIFY,
734 	MMINIT_TRACE
735 };
736 
737 #ifdef CONFIG_DEBUG_MEMORY_INIT
738 
739 extern int mminit_loglevel;
740 
741 #define mminit_dprintk(level, prefix, fmt, arg...) \
742 do { \
743 	if (level < mminit_loglevel) { \
744 		if (level <= MMINIT_WARNING) \
745 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
746 		else \
747 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
748 	} \
749 } while (0)
750 
751 extern void mminit_verify_pageflags_layout(void);
752 extern void mminit_verify_zonelist(void);
753 #else
754 
mminit_dprintk(enum mminit_level level,const char * prefix,const char * fmt,...)755 static inline void mminit_dprintk(enum mminit_level level,
756 				const char *prefix, const char *fmt, ...)
757 {
758 }
759 
mminit_verify_pageflags_layout(void)760 static inline void mminit_verify_pageflags_layout(void)
761 {
762 }
763 
mminit_verify_zonelist(void)764 static inline void mminit_verify_zonelist(void)
765 {
766 }
767 #endif /* CONFIG_DEBUG_MEMORY_INIT */
768 
769 #define NODE_RECLAIM_NOSCAN	-2
770 #define NODE_RECLAIM_FULL	-1
771 #define NODE_RECLAIM_SOME	0
772 #define NODE_RECLAIM_SUCCESS	1
773 
774 #ifdef CONFIG_NUMA
775 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
776 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
777 #else
node_reclaim(struct pglist_data * pgdat,gfp_t mask,unsigned int order)778 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
779 				unsigned int order)
780 {
781 	return NODE_RECLAIM_NOSCAN;
782 }
find_next_best_node(int node,nodemask_t * used_node_mask)783 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
784 {
785 	return NUMA_NO_NODE;
786 }
787 #endif
788 
789 /*
790  * mm/memory-failure.c
791  */
792 extern int hwpoison_filter(struct page *p);
793 
794 extern u32 hwpoison_filter_dev_major;
795 extern u32 hwpoison_filter_dev_minor;
796 extern u64 hwpoison_filter_flags_mask;
797 extern u64 hwpoison_filter_flags_value;
798 extern u64 hwpoison_filter_memcg;
799 extern u32 hwpoison_filter_enable;
800 
801 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
802         unsigned long, unsigned long,
803         unsigned long, unsigned long);
804 
805 extern void set_pageblock_order(void);
806 unsigned long reclaim_pages(struct list_head *folio_list);
807 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
808 					    struct list_head *folio_list);
809 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
810 #define ALLOC_WMARK_MIN		WMARK_MIN
811 #define ALLOC_WMARK_LOW		WMARK_LOW
812 #define ALLOC_WMARK_HIGH	WMARK_HIGH
813 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
814 
815 /* Mask to get the watermark bits */
816 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
817 
818 /*
819  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
820  * cannot assume a reduced access to memory reserves is sufficient for
821  * !MMU
822  */
823 #ifdef CONFIG_MMU
824 #define ALLOC_OOM		0x08
825 #else
826 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
827 #endif
828 
829 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
830 				       * to 25% of the min watermark or
831 				       * 62.5% if __GFP_HIGH is set.
832 				       */
833 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
834 				       * of the min watermark.
835 				       */
836 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
837 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
838 #ifdef CONFIG_ZONE_DMA32
839 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
840 #else
841 #define ALLOC_NOFRAGMENT	  0x0
842 #endif
843 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
844 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
845 
846 /* Flags that allow allocations below the min watermark. */
847 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
848 
849 enum ttu_flags;
850 struct tlbflush_unmap_batch;
851 
852 
853 /*
854  * only for MM internal work items which do not depend on
855  * any allocations or locks which might depend on allocations
856  */
857 extern struct workqueue_struct *mm_percpu_wq;
858 
859 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
860 void try_to_unmap_flush(void);
861 void try_to_unmap_flush_dirty(void);
862 void flush_tlb_batched_pending(struct mm_struct *mm);
863 #else
try_to_unmap_flush(void)864 static inline void try_to_unmap_flush(void)
865 {
866 }
try_to_unmap_flush_dirty(void)867 static inline void try_to_unmap_flush_dirty(void)
868 {
869 }
flush_tlb_batched_pending(struct mm_struct * mm)870 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
871 {
872 }
873 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
874 
875 extern const struct trace_print_flags pageflag_names[];
876 extern const struct trace_print_flags pagetype_names[];
877 extern const struct trace_print_flags vmaflag_names[];
878 extern const struct trace_print_flags gfpflag_names[];
879 
is_migrate_highatomic(enum migratetype migratetype)880 static inline bool is_migrate_highatomic(enum migratetype migratetype)
881 {
882 	return migratetype == MIGRATE_HIGHATOMIC;
883 }
884 
is_migrate_highatomic_page(struct page * page)885 static inline bool is_migrate_highatomic_page(struct page *page)
886 {
887 	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
888 }
889 
890 void setup_zone_pageset(struct zone *zone);
891 
892 struct migration_target_control {
893 	int nid;		/* preferred node id */
894 	nodemask_t *nmask;
895 	gfp_t gfp_mask;
896 };
897 
898 /*
899  * mm/filemap.c
900  */
901 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
902 			      struct folio *folio, loff_t fpos, size_t size);
903 
904 /*
905  * mm/vmalloc.c
906  */
907 #ifdef CONFIG_MMU
908 void __init vmalloc_init(void);
909 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
910                 pgprot_t prot, struct page **pages, unsigned int page_shift);
911 #else
vmalloc_init(void)912 static inline void vmalloc_init(void)
913 {
914 }
915 
916 static inline
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)917 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
918                 pgprot_t prot, struct page **pages, unsigned int page_shift)
919 {
920 	return -EINVAL;
921 }
922 #endif
923 
924 int __must_check __vmap_pages_range_noflush(unsigned long addr,
925 			       unsigned long end, pgprot_t prot,
926 			       struct page **pages, unsigned int page_shift);
927 
928 void vunmap_range_noflush(unsigned long start, unsigned long end);
929 
930 void __vunmap_range_noflush(unsigned long start, unsigned long end);
931 
932 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
933 		      unsigned long addr, int page_nid, int *flags);
934 
935 void free_zone_device_page(struct page *page);
936 int migrate_device_coherent_page(struct page *page);
937 
938 /*
939  * mm/gup.c
940  */
941 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
942 int __must_check try_grab_page(struct page *page, unsigned int flags);
943 
944 /*
945  * mm/huge_memory.c
946  */
947 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
948 				   unsigned long addr, pmd_t *pmd,
949 				   unsigned int flags);
950 
951 enum {
952 	/* mark page accessed */
953 	FOLL_TOUCH = 1 << 16,
954 	/* a retry, previous pass started an IO */
955 	FOLL_TRIED = 1 << 17,
956 	/* we are working on non-current tsk/mm */
957 	FOLL_REMOTE = 1 << 18,
958 	/* pages must be released via unpin_user_page */
959 	FOLL_PIN = 1 << 19,
960 	/* gup_fast: prevent fall-back to slow gup */
961 	FOLL_FAST_ONLY = 1 << 20,
962 	/* allow unlocking the mmap lock */
963 	FOLL_UNLOCKABLE = 1 << 21,
964 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
965 	FOLL_MADV_POPULATE = 1 << 22,
966 };
967 
968 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
969 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
970 			    FOLL_MADV_POPULATE)
971 
972 /*
973  * Indicates for which pages that are write-protected in the page table,
974  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
975  * GUP pin will remain consistent with the pages mapped into the page tables
976  * of the MM.
977  *
978  * Temporary unmapping of PageAnonExclusive() pages or clearing of
979  * PageAnonExclusive() has to protect against concurrent GUP:
980  * * Ordinary GUP: Using the PT lock
981  * * GUP-fast and fork(): mm->write_protect_seq
982  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
983  *    page_try_share_anon_rmap()
984  *
985  * Must be called with the (sub)page that's actually referenced via the
986  * page table entry, which might not necessarily be the head page for a
987  * PTE-mapped THP.
988  *
989  * If the vma is NULL, we're coming from the GUP-fast path and might have
990  * to fallback to the slow path just to lookup the vma.
991  */
gup_must_unshare(struct vm_area_struct * vma,unsigned int flags,struct page * page)992 static inline bool gup_must_unshare(struct vm_area_struct *vma,
993 				    unsigned int flags, struct page *page)
994 {
995 	/*
996 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
997 	 * has to be writable -- and if it references (part of) an anonymous
998 	 * folio, that part is required to be marked exclusive.
999 	 */
1000 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1001 		return false;
1002 	/*
1003 	 * Note: PageAnon(page) is stable until the page is actually getting
1004 	 * freed.
1005 	 */
1006 	if (!PageAnon(page)) {
1007 		/*
1008 		 * We only care about R/O long-term pining: R/O short-term
1009 		 * pinning does not have the semantics to observe successive
1010 		 * changes through the process page tables.
1011 		 */
1012 		if (!(flags & FOLL_LONGTERM))
1013 			return false;
1014 
1015 		/* We really need the vma ... */
1016 		if (!vma)
1017 			return true;
1018 
1019 		/*
1020 		 * ... because we only care about writable private ("COW")
1021 		 * mappings where we have to break COW early.
1022 		 */
1023 		return is_cow_mapping(vma->vm_flags);
1024 	}
1025 
1026 	/* Paired with a memory barrier in page_try_share_anon_rmap(). */
1027 	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
1028 		smp_rmb();
1029 
1030 	/*
1031 	 * During GUP-fast we might not get called on the head page for a
1032 	 * hugetlb page that is mapped using cont-PTE, because GUP-fast does
1033 	 * not work with the abstracted hugetlb PTEs that always point at the
1034 	 * head page. For hugetlb, PageAnonExclusive only applies on the head
1035 	 * page (as it cannot be partially COW-shared), so lookup the head page.
1036 	 */
1037 	if (unlikely(!PageHead(page) && PageHuge(page)))
1038 		page = compound_head(page);
1039 
1040 	/*
1041 	 * Note that PageKsm() pages cannot be exclusive, and consequently,
1042 	 * cannot get pinned.
1043 	 */
1044 	return !PageAnonExclusive(page);
1045 }
1046 
1047 extern bool mirrored_kernelcore;
1048 extern bool memblock_has_mirror(void);
1049 
vma_soft_dirty_enabled(struct vm_area_struct * vma)1050 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1051 {
1052 	/*
1053 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1054 	 * enablements, because when without soft-dirty being compiled in,
1055 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1056 	 * will be constantly true.
1057 	 */
1058 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1059 		return false;
1060 
1061 	/*
1062 	 * Soft-dirty is kind of special: its tracking is enabled when the
1063 	 * vma flags not set.
1064 	 */
1065 	return !(vma->vm_flags & VM_SOFTDIRTY);
1066 }
1067 
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)1068 static inline void vma_iter_config(struct vma_iterator *vmi,
1069 		unsigned long index, unsigned long last)
1070 {
1071 	MAS_BUG_ON(&vmi->mas, vmi->mas.node != MAS_START &&
1072 		   (vmi->mas.index > index || vmi->mas.last < index));
1073 	__mas_set_range(&vmi->mas, index, last - 1);
1074 }
1075 
1076 /*
1077  * VMA Iterator functions shared between nommu and mmap
1078  */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)1079 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1080 		struct vm_area_struct *vma)
1081 {
1082 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1083 }
1084 
vma_iter_clear(struct vma_iterator * vmi)1085 static inline void vma_iter_clear(struct vma_iterator *vmi)
1086 {
1087 	mas_store_prealloc(&vmi->mas, NULL);
1088 }
1089 
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)1090 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1091 			unsigned long start, unsigned long end, gfp_t gfp)
1092 {
1093 	__mas_set_range(&vmi->mas, start, end - 1);
1094 	mas_store_gfp(&vmi->mas, NULL, gfp);
1095 	if (unlikely(mas_is_err(&vmi->mas)))
1096 		return -ENOMEM;
1097 
1098 	return 0;
1099 }
1100 
vma_iter_load(struct vma_iterator * vmi)1101 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1102 {
1103 	return mas_walk(&vmi->mas);
1104 }
1105 
1106 /* Store a VMA with preallocated memory */
vma_iter_store(struct vma_iterator * vmi,struct vm_area_struct * vma)1107 static inline void vma_iter_store(struct vma_iterator *vmi,
1108 				  struct vm_area_struct *vma)
1109 {
1110 
1111 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1112 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
1113 			vmi->mas.index > vma->vm_start)) {
1114 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1115 			vmi->mas.index, vma->vm_start, vma->vm_start,
1116 			vma->vm_end, vmi->mas.index, vmi->mas.last);
1117 	}
1118 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
1119 			vmi->mas.last <  vma->vm_start)) {
1120 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1121 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1122 		       vmi->mas.index, vmi->mas.last);
1123 	}
1124 #endif
1125 
1126 	if (vmi->mas.node != MAS_START &&
1127 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1128 		vma_iter_invalidate(vmi);
1129 
1130 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1131 	mas_store_prealloc(&vmi->mas, vma);
1132 }
1133 
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)1134 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1135 			struct vm_area_struct *vma, gfp_t gfp)
1136 {
1137 	if (vmi->mas.node != MAS_START &&
1138 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1139 		vma_iter_invalidate(vmi);
1140 
1141 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1142 	mas_store_gfp(&vmi->mas, vma, gfp);
1143 	if (unlikely(mas_is_err(&vmi->mas)))
1144 		return -ENOMEM;
1145 
1146 	return 0;
1147 }
1148 
1149 /*
1150  * VMA lock generalization
1151  */
1152 struct vma_prepare {
1153 	struct vm_area_struct *vma;
1154 	struct vm_area_struct *adj_next;
1155 	struct file *file;
1156 	struct address_space *mapping;
1157 	struct anon_vma *anon_vma;
1158 	struct vm_area_struct *insert;
1159 	struct vm_area_struct *remove;
1160 	struct vm_area_struct *remove2;
1161 };
1162 #endif	/* __MM_INTERNAL_H */
1163