xref: /openbmc/linux/mm/internal.h (revision fc4951c3)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/tracepoint-defs.h>
15 
16 struct folio_batch;
17 
18 /*
19  * The set of flags that only affect watermark checking and reclaim
20  * behaviour. This is used by the MM to obey the caller constraints
21  * about IO, FS and watermark checking while ignoring placement
22  * hints such as HIGHMEM usage.
23  */
24 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
26 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
27 			__GFP_NOLOCKDEP)
28 
29 /* The GFP flags allowed during early boot */
30 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
31 
32 /* Control allocation cpuset and node placement constraints */
33 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
34 
35 /* Do not use these with a slab allocator */
36 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
37 
38 /*
39  * Different from WARN_ON_ONCE(), no warning will be issued
40  * when we specify __GFP_NOWARN.
41  */
42 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
43 	static bool __section(".data.once") __warned;			\
44 	int __ret_warn_once = !!(cond);					\
45 									\
46 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
47 		__warned = true;					\
48 		WARN_ON(1);						\
49 	}								\
50 	unlikely(__ret_warn_once);					\
51 })
52 
53 void page_writeback_init(void);
54 
55 /*
56  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
57  * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
58  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
59  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
60  */
61 #define COMPOUND_MAPPED		0x800000
62 #define FOLIO_PAGES_MAPPED	(COMPOUND_MAPPED - 1)
63 
64 /*
65  * Flags passed to __show_mem() and show_free_areas() to suppress output in
66  * various contexts.
67  */
68 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
69 
70 /*
71  * How many individual pages have an elevated _mapcount.  Excludes
72  * the folio's entire_mapcount.
73  */
folio_nr_pages_mapped(struct folio * folio)74 static inline int folio_nr_pages_mapped(struct folio *folio)
75 {
76 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
77 }
78 
folio_raw_mapping(struct folio * folio)79 static inline void *folio_raw_mapping(struct folio *folio)
80 {
81 	unsigned long mapping = (unsigned long)folio->mapping;
82 
83 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
84 }
85 
86 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
87 						int nr_throttled);
acct_reclaim_writeback(struct folio * folio)88 static inline void acct_reclaim_writeback(struct folio *folio)
89 {
90 	pg_data_t *pgdat = folio_pgdat(folio);
91 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
92 
93 	if (nr_throttled)
94 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
95 }
96 
wake_throttle_isolated(pg_data_t * pgdat)97 static inline void wake_throttle_isolated(pg_data_t *pgdat)
98 {
99 	wait_queue_head_t *wqh;
100 
101 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
102 	if (waitqueue_active(wqh))
103 		wake_up(wqh);
104 }
105 
106 vm_fault_t do_swap_page(struct vm_fault *vmf);
107 void folio_rotate_reclaimable(struct folio *folio);
108 bool __folio_end_writeback(struct folio *folio);
109 void deactivate_file_folio(struct folio *folio);
110 void folio_activate(struct folio *folio);
111 
112 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
113 		   struct vm_area_struct *start_vma, unsigned long floor,
114 		   unsigned long ceiling, bool mm_wr_locked);
115 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
116 
117 struct zap_details;
118 void unmap_page_range(struct mmu_gather *tlb,
119 			     struct vm_area_struct *vma,
120 			     unsigned long addr, unsigned long end,
121 			     struct zap_details *details);
122 
123 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
124 		unsigned int order);
125 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
force_page_cache_readahead(struct address_space * mapping,struct file * file,pgoff_t index,unsigned long nr_to_read)126 static inline void force_page_cache_readahead(struct address_space *mapping,
127 		struct file *file, pgoff_t index, unsigned long nr_to_read)
128 {
129 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
130 	force_page_cache_ra(&ractl, nr_to_read);
131 }
132 
133 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
134 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
135 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
136 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
137 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
138 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
139 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
140 		loff_t end);
141 long invalidate_inode_page(struct page *page);
142 unsigned long mapping_try_invalidate(struct address_space *mapping,
143 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
144 
145 /**
146  * folio_evictable - Test whether a folio is evictable.
147  * @folio: The folio to test.
148  *
149  * Test whether @folio is evictable -- i.e., should be placed on
150  * active/inactive lists vs unevictable list.
151  *
152  * Reasons folio might not be evictable:
153  * 1. folio's mapping marked unevictable
154  * 2. One of the pages in the folio is part of an mlocked VMA
155  */
folio_evictable(struct folio * folio)156 static inline bool folio_evictable(struct folio *folio)
157 {
158 	bool ret;
159 
160 	/* Prevent address_space of inode and swap cache from being freed */
161 	rcu_read_lock();
162 	ret = !mapping_unevictable(folio_mapping(folio)) &&
163 			!folio_test_mlocked(folio);
164 	rcu_read_unlock();
165 	return ret;
166 }
167 
168 /*
169  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
170  * a count of one.
171  */
set_page_refcounted(struct page * page)172 static inline void set_page_refcounted(struct page *page)
173 {
174 	VM_BUG_ON_PAGE(PageTail(page), page);
175 	VM_BUG_ON_PAGE(page_ref_count(page), page);
176 	set_page_count(page, 1);
177 }
178 
179 /*
180  * Return true if a folio needs ->release_folio() calling upon it.
181  */
folio_needs_release(struct folio * folio)182 static inline bool folio_needs_release(struct folio *folio)
183 {
184 	struct address_space *mapping = folio_mapping(folio);
185 
186 	return folio_has_private(folio) ||
187 		(mapping && mapping_release_always(mapping));
188 }
189 
190 extern unsigned long highest_memmap_pfn;
191 
192 /*
193  * Maximum number of reclaim retries without progress before the OOM
194  * killer is consider the only way forward.
195  */
196 #define MAX_RECLAIM_RETRIES 16
197 
198 /*
199  * in mm/vmscan.c:
200  */
201 bool isolate_lru_page(struct page *page);
202 bool folio_isolate_lru(struct folio *folio);
203 void putback_lru_page(struct page *page);
204 void folio_putback_lru(struct folio *folio);
205 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
206 
207 /*
208  * in mm/rmap.c:
209  */
210 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
211 
212 /*
213  * in mm/page_alloc.c
214  */
215 #define K(x) ((x) << (PAGE_SHIFT-10))
216 
217 extern char * const zone_names[MAX_NR_ZONES];
218 
219 /* perform sanity checks on struct pages being allocated or freed */
220 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
221 
222 extern int min_free_kbytes;
223 
224 void setup_per_zone_wmarks(void);
225 void calculate_min_free_kbytes(void);
226 int __meminit init_per_zone_wmark_min(void);
227 void page_alloc_sysctl_init(void);
228 
229 /*
230  * Structure for holding the mostly immutable allocation parameters passed
231  * between functions involved in allocations, including the alloc_pages*
232  * family of functions.
233  *
234  * nodemask, migratetype and highest_zoneidx are initialized only once in
235  * __alloc_pages() and then never change.
236  *
237  * zonelist, preferred_zone and highest_zoneidx are set first in
238  * __alloc_pages() for the fast path, and might be later changed
239  * in __alloc_pages_slowpath(). All other functions pass the whole structure
240  * by a const pointer.
241  */
242 struct alloc_context {
243 	struct zonelist *zonelist;
244 	nodemask_t *nodemask;
245 	struct zoneref *preferred_zoneref;
246 	int migratetype;
247 
248 	/*
249 	 * highest_zoneidx represents highest usable zone index of
250 	 * the allocation request. Due to the nature of the zone,
251 	 * memory on lower zone than the highest_zoneidx will be
252 	 * protected by lowmem_reserve[highest_zoneidx].
253 	 *
254 	 * highest_zoneidx is also used by reclaim/compaction to limit
255 	 * the target zone since higher zone than this index cannot be
256 	 * usable for this allocation request.
257 	 */
258 	enum zone_type highest_zoneidx;
259 	bool spread_dirty_pages;
260 };
261 
262 /*
263  * This function returns the order of a free page in the buddy system. In
264  * general, page_zone(page)->lock must be held by the caller to prevent the
265  * page from being allocated in parallel and returning garbage as the order.
266  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
267  * page cannot be allocated or merged in parallel. Alternatively, it must
268  * handle invalid values gracefully, and use buddy_order_unsafe() below.
269  */
buddy_order(struct page * page)270 static inline unsigned int buddy_order(struct page *page)
271 {
272 	/* PageBuddy() must be checked by the caller */
273 	return page_private(page);
274 }
275 
276 /*
277  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
278  * PageBuddy() should be checked first by the caller to minimize race window,
279  * and invalid values must be handled gracefully.
280  *
281  * READ_ONCE is used so that if the caller assigns the result into a local
282  * variable and e.g. tests it for valid range before using, the compiler cannot
283  * decide to remove the variable and inline the page_private(page) multiple
284  * times, potentially observing different values in the tests and the actual
285  * use of the result.
286  */
287 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
288 
289 /*
290  * This function checks whether a page is free && is the buddy
291  * we can coalesce a page and its buddy if
292  * (a) the buddy is not in a hole (check before calling!) &&
293  * (b) the buddy is in the buddy system &&
294  * (c) a page and its buddy have the same order &&
295  * (d) a page and its buddy are in the same zone.
296  *
297  * For recording whether a page is in the buddy system, we set PageBuddy.
298  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
299  *
300  * For recording page's order, we use page_private(page).
301  */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)302 static inline bool page_is_buddy(struct page *page, struct page *buddy,
303 				 unsigned int order)
304 {
305 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
306 		return false;
307 
308 	if (buddy_order(buddy) != order)
309 		return false;
310 
311 	/*
312 	 * zone check is done late to avoid uselessly calculating
313 	 * zone/node ids for pages that could never merge.
314 	 */
315 	if (page_zone_id(page) != page_zone_id(buddy))
316 		return false;
317 
318 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
319 
320 	return true;
321 }
322 
323 /*
324  * Locate the struct page for both the matching buddy in our
325  * pair (buddy1) and the combined O(n+1) page they form (page).
326  *
327  * 1) Any buddy B1 will have an order O twin B2 which satisfies
328  * the following equation:
329  *     B2 = B1 ^ (1 << O)
330  * For example, if the starting buddy (buddy2) is #8 its order
331  * 1 buddy is #10:
332  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
333  *
334  * 2) Any buddy B will have an order O+1 parent P which
335  * satisfies the following equation:
336  *     P = B & ~(1 << O)
337  *
338  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
339  */
340 static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn,unsigned int order)341 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
342 {
343 	return page_pfn ^ (1 << order);
344 }
345 
346 /*
347  * Find the buddy of @page and validate it.
348  * @page: The input page
349  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
350  *       function is used in the performance-critical __free_one_page().
351  * @order: The order of the page
352  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
353  *             page_to_pfn().
354  *
355  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
356  * not the same as @page. The validation is necessary before use it.
357  *
358  * Return: the found buddy page or NULL if not found.
359  */
find_buddy_page_pfn(struct page * page,unsigned long pfn,unsigned int order,unsigned long * buddy_pfn)360 static inline struct page *find_buddy_page_pfn(struct page *page,
361 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
362 {
363 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
364 	struct page *buddy;
365 
366 	buddy = page + (__buddy_pfn - pfn);
367 	if (buddy_pfn)
368 		*buddy_pfn = __buddy_pfn;
369 
370 	if (page_is_buddy(page, buddy, order))
371 		return buddy;
372 	return NULL;
373 }
374 
375 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
376 				unsigned long end_pfn, struct zone *zone);
377 
pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)378 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
379 				unsigned long end_pfn, struct zone *zone)
380 {
381 	if (zone->contiguous)
382 		return pfn_to_page(start_pfn);
383 
384 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
385 }
386 
387 void set_zone_contiguous(struct zone *zone);
388 
clear_zone_contiguous(struct zone * zone)389 static inline void clear_zone_contiguous(struct zone *zone)
390 {
391 	zone->contiguous = false;
392 }
393 
394 extern int __isolate_free_page(struct page *page, unsigned int order);
395 extern void __putback_isolated_page(struct page *page, unsigned int order,
396 				    int mt);
397 extern void memblock_free_pages(struct page *page, unsigned long pfn,
398 					unsigned int order);
399 extern void __free_pages_core(struct page *page, unsigned int order);
400 
401 /*
402  * This will have no effect, other than possibly generating a warning, if the
403  * caller passes in a non-large folio.
404  */
folio_set_order(struct folio * folio,unsigned int order)405 static inline void folio_set_order(struct folio *folio, unsigned int order)
406 {
407 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
408 		return;
409 
410 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
411 #ifdef CONFIG_64BIT
412 	folio->_folio_nr_pages = 1U << order;
413 #endif
414 }
415 
416 bool __folio_unqueue_deferred_split(struct folio *folio);
folio_unqueue_deferred_split(struct folio * folio)417 static inline bool folio_unqueue_deferred_split(struct folio *folio)
418 {
419 	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
420 		return false;
421 
422 	/*
423 	 * At this point, there is no one trying to add the folio to
424 	 * deferred_list. If folio is not in deferred_list, it's safe
425 	 * to check without acquiring the split_queue_lock.
426 	 */
427 	if (data_race(list_empty(&folio->_deferred_list)))
428 		return false;
429 
430 	return __folio_unqueue_deferred_split(folio);
431 }
432 
page_rmappable_folio(struct page * page)433 static inline struct folio *page_rmappable_folio(struct page *page)
434 {
435 	struct folio *folio = (struct folio *)page;
436 
437 	folio_prep_large_rmappable(folio);
438 	return folio;
439 }
440 
prep_compound_head(struct page * page,unsigned int order)441 static inline void prep_compound_head(struct page *page, unsigned int order)
442 {
443 	struct folio *folio = (struct folio *)page;
444 
445 	folio_set_order(folio, order);
446 	atomic_set(&folio->_entire_mapcount, -1);
447 	atomic_set(&folio->_nr_pages_mapped, 0);
448 	atomic_set(&folio->_pincount, 0);
449 	if (order > 1)
450 		INIT_LIST_HEAD(&folio->_deferred_list);
451 }
452 
prep_compound_tail(struct page * head,int tail_idx)453 static inline void prep_compound_tail(struct page *head, int tail_idx)
454 {
455 	struct page *p = head + tail_idx;
456 
457 	p->mapping = TAIL_MAPPING;
458 	set_compound_head(p, head);
459 	set_page_private(p, 0);
460 }
461 
462 extern void prep_compound_page(struct page *page, unsigned int order);
463 
464 extern void post_alloc_hook(struct page *page, unsigned int order,
465 					gfp_t gfp_flags);
466 extern int user_min_free_kbytes;
467 
468 extern void free_unref_page(struct page *page, unsigned int order);
469 extern void free_unref_page_list(struct list_head *list);
470 
471 extern void zone_pcp_reset(struct zone *zone);
472 extern void zone_pcp_disable(struct zone *zone);
473 extern void zone_pcp_enable(struct zone *zone);
474 extern void zone_pcp_init(struct zone *zone);
475 
476 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
477 			  phys_addr_t min_addr,
478 			  int nid, bool exact_nid);
479 
480 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
481 		unsigned long, enum meminit_context, struct vmem_altmap *, int);
482 
483 
484 int split_free_page(struct page *free_page,
485 			unsigned int order, unsigned long split_pfn_offset);
486 
487 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
488 
489 /*
490  * in mm/compaction.c
491  */
492 /*
493  * compact_control is used to track pages being migrated and the free pages
494  * they are being migrated to during memory compaction. The free_pfn starts
495  * at the end of a zone and migrate_pfn begins at the start. Movable pages
496  * are moved to the end of a zone during a compaction run and the run
497  * completes when free_pfn <= migrate_pfn
498  */
499 struct compact_control {
500 	struct list_head freepages;	/* List of free pages to migrate to */
501 	struct list_head migratepages;	/* List of pages being migrated */
502 	unsigned int nr_freepages;	/* Number of isolated free pages */
503 	unsigned int nr_migratepages;	/* Number of pages to migrate */
504 	unsigned long free_pfn;		/* isolate_freepages search base */
505 	/*
506 	 * Acts as an in/out parameter to page isolation for migration.
507 	 * isolate_migratepages uses it as a search base.
508 	 * isolate_migratepages_block will update the value to the next pfn
509 	 * after the last isolated one.
510 	 */
511 	unsigned long migrate_pfn;
512 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
513 	struct zone *zone;
514 	unsigned long total_migrate_scanned;
515 	unsigned long total_free_scanned;
516 	unsigned short fast_search_fail;/* failures to use free list searches */
517 	short search_order;		/* order to start a fast search at */
518 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
519 	int order;			/* order a direct compactor needs */
520 	int migratetype;		/* migratetype of direct compactor */
521 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
522 	const int highest_zoneidx;	/* zone index of a direct compactor */
523 	enum migrate_mode mode;		/* Async or sync migration mode */
524 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
525 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
526 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
527 	bool direct_compaction;		/* False from kcompactd or /proc/... */
528 	bool proactive_compaction;	/* kcompactd proactive compaction */
529 	bool whole_zone;		/* Whole zone should/has been scanned */
530 	bool contended;			/* Signal lock contention */
531 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
532 					 * when there are potentially transient
533 					 * isolation or migration failures to
534 					 * ensure forward progress.
535 					 */
536 	bool alloc_contig;		/* alloc_contig_range allocation */
537 };
538 
539 /*
540  * Used in direct compaction when a page should be taken from the freelists
541  * immediately when one is created during the free path.
542  */
543 struct capture_control {
544 	struct compact_control *cc;
545 	struct page *page;
546 };
547 
548 unsigned long
549 isolate_freepages_range(struct compact_control *cc,
550 			unsigned long start_pfn, unsigned long end_pfn);
551 int
552 isolate_migratepages_range(struct compact_control *cc,
553 			   unsigned long low_pfn, unsigned long end_pfn);
554 
555 int __alloc_contig_migrate_range(struct compact_control *cc,
556 					unsigned long start, unsigned long end);
557 
558 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
559 void init_cma_reserved_pageblock(struct page *page);
560 
561 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
562 
563 int find_suitable_fallback(struct free_area *area, unsigned int order,
564 			int migratetype, bool only_stealable, bool *can_steal);
565 
free_area_empty(struct free_area * area,int migratetype)566 static inline bool free_area_empty(struct free_area *area, int migratetype)
567 {
568 	return list_empty(&area->free_list[migratetype]);
569 }
570 
571 /*
572  * These three helpers classifies VMAs for virtual memory accounting.
573  */
574 
575 /*
576  * Executable code area - executable, not writable, not stack
577  */
is_exec_mapping(vm_flags_t flags)578 static inline bool is_exec_mapping(vm_flags_t flags)
579 {
580 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
581 }
582 
583 /*
584  * Stack area (including shadow stacks)
585  *
586  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
587  * do_mmap() forbids all other combinations.
588  */
is_stack_mapping(vm_flags_t flags)589 static inline bool is_stack_mapping(vm_flags_t flags)
590 {
591 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
592 }
593 
594 /*
595  * Data area - private, writable, not stack
596  */
is_data_mapping(vm_flags_t flags)597 static inline bool is_data_mapping(vm_flags_t flags)
598 {
599 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
600 }
601 
602 /* mm/util.c */
603 struct anon_vma *folio_anon_vma(struct folio *folio);
604 
605 #ifdef CONFIG_MMU
606 void unmap_mapping_folio(struct folio *folio);
607 extern long populate_vma_page_range(struct vm_area_struct *vma,
608 		unsigned long start, unsigned long end, int *locked);
609 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
610 		unsigned long end, bool write, int *locked);
611 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
612 			       unsigned long bytes);
613 /*
614  * mlock_vma_folio() and munlock_vma_folio():
615  * should be called with vma's mmap_lock held for read or write,
616  * under page table lock for the pte/pmd being added or removed.
617  *
618  * mlock is usually called at the end of page_add_*_rmap(), munlock at
619  * the end of page_remove_rmap(); but new anon folios are managed by
620  * folio_add_lru_vma() calling mlock_new_folio().
621  *
622  * @compound is used to include pmd mappings of THPs, but filter out
623  * pte mappings of THPs, which cannot be consistently counted: a pte
624  * mapping of the THP head cannot be distinguished by the page alone.
625  */
626 void mlock_folio(struct folio *folio);
mlock_vma_folio(struct folio * folio,struct vm_area_struct * vma,bool compound)627 static inline void mlock_vma_folio(struct folio *folio,
628 			struct vm_area_struct *vma, bool compound)
629 {
630 	/*
631 	 * The VM_SPECIAL check here serves two purposes.
632 	 * 1) VM_IO check prevents migration from double-counting during mlock.
633 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
634 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
635 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
636 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
637 	 */
638 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
639 	    (compound || !folio_test_large(folio)))
640 		mlock_folio(folio);
641 }
642 
643 void munlock_folio(struct folio *folio);
munlock_vma_folio(struct folio * folio,struct vm_area_struct * vma,bool compound)644 static inline void munlock_vma_folio(struct folio *folio,
645 			struct vm_area_struct *vma, bool compound)
646 {
647 	if (unlikely(vma->vm_flags & VM_LOCKED) &&
648 	    (compound || !folio_test_large(folio)))
649 		munlock_folio(folio);
650 }
651 
652 void mlock_new_folio(struct folio *folio);
653 bool need_mlock_drain(int cpu);
654 void mlock_drain_local(void);
655 void mlock_drain_remote(int cpu);
656 
657 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
658 
659 /*
660  * Return the start of user virtual address at the specific offset within
661  * a vma.
662  */
663 static inline unsigned long
vma_pgoff_address(pgoff_t pgoff,unsigned long nr_pages,struct vm_area_struct * vma)664 vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
665 		  struct vm_area_struct *vma)
666 {
667 	unsigned long address;
668 
669 	if (pgoff >= vma->vm_pgoff) {
670 		address = vma->vm_start +
671 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
672 		/* Check for address beyond vma (or wrapped through 0?) */
673 		if (address < vma->vm_start || address >= vma->vm_end)
674 			address = -EFAULT;
675 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
676 		/* Test above avoids possibility of wrap to 0 on 32-bit */
677 		address = vma->vm_start;
678 	} else {
679 		address = -EFAULT;
680 	}
681 	return address;
682 }
683 
684 /*
685  * Return the start of user virtual address of a page within a vma.
686  * Returns -EFAULT if all of the page is outside the range of vma.
687  * If page is a compound head, the entire compound page is considered.
688  */
689 static inline unsigned long
vma_address(struct page * page,struct vm_area_struct * vma)690 vma_address(struct page *page, struct vm_area_struct *vma)
691 {
692 	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
693 	return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
694 }
695 
696 /*
697  * Then at what user virtual address will none of the range be found in vma?
698  * Assumes that vma_address() already returned a good starting address.
699  */
vma_address_end(struct page_vma_mapped_walk * pvmw)700 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
701 {
702 	struct vm_area_struct *vma = pvmw->vma;
703 	pgoff_t pgoff;
704 	unsigned long address;
705 
706 	/* Common case, plus ->pgoff is invalid for KSM */
707 	if (pvmw->nr_pages == 1)
708 		return pvmw->address + PAGE_SIZE;
709 
710 	pgoff = pvmw->pgoff + pvmw->nr_pages;
711 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
712 	/* Check for address beyond vma (or wrapped through 0?) */
713 	if (address < vma->vm_start || address > vma->vm_end)
714 		address = vma->vm_end;
715 	return address;
716 }
717 
maybe_unlock_mmap_for_io(struct vm_fault * vmf,struct file * fpin)718 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
719 						    struct file *fpin)
720 {
721 	int flags = vmf->flags;
722 
723 	if (fpin)
724 		return fpin;
725 
726 	/*
727 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
728 	 * anything, so we only pin the file and drop the mmap_lock if only
729 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
730 	 */
731 	if (fault_flag_allow_retry_first(flags) &&
732 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
733 		fpin = get_file(vmf->vma->vm_file);
734 		release_fault_lock(vmf);
735 	}
736 	return fpin;
737 }
738 #else /* !CONFIG_MMU */
unmap_mapping_folio(struct folio * folio)739 static inline void unmap_mapping_folio(struct folio *folio) { }
mlock_new_folio(struct folio * folio)740 static inline void mlock_new_folio(struct folio *folio) { }
need_mlock_drain(int cpu)741 static inline bool need_mlock_drain(int cpu) { return false; }
mlock_drain_local(void)742 static inline void mlock_drain_local(void) { }
mlock_drain_remote(int cpu)743 static inline void mlock_drain_remote(int cpu) { }
vunmap_range_noflush(unsigned long start,unsigned long end)744 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
745 {
746 }
747 #endif /* !CONFIG_MMU */
748 
749 /* Memory initialisation debug and verification */
750 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
751 DECLARE_STATIC_KEY_TRUE(deferred_pages);
752 
753 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
754 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
755 
756 enum mminit_level {
757 	MMINIT_WARNING,
758 	MMINIT_VERIFY,
759 	MMINIT_TRACE
760 };
761 
762 #ifdef CONFIG_DEBUG_MEMORY_INIT
763 
764 extern int mminit_loglevel;
765 
766 #define mminit_dprintk(level, prefix, fmt, arg...) \
767 do { \
768 	if (level < mminit_loglevel) { \
769 		if (level <= MMINIT_WARNING) \
770 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
771 		else \
772 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
773 	} \
774 } while (0)
775 
776 extern void mminit_verify_pageflags_layout(void);
777 extern void mminit_verify_zonelist(void);
778 #else
779 
mminit_dprintk(enum mminit_level level,const char * prefix,const char * fmt,...)780 static inline void mminit_dprintk(enum mminit_level level,
781 				const char *prefix, const char *fmt, ...)
782 {
783 }
784 
mminit_verify_pageflags_layout(void)785 static inline void mminit_verify_pageflags_layout(void)
786 {
787 }
788 
mminit_verify_zonelist(void)789 static inline void mminit_verify_zonelist(void)
790 {
791 }
792 #endif /* CONFIG_DEBUG_MEMORY_INIT */
793 
794 #define NODE_RECLAIM_NOSCAN	-2
795 #define NODE_RECLAIM_FULL	-1
796 #define NODE_RECLAIM_SOME	0
797 #define NODE_RECLAIM_SUCCESS	1
798 
799 #ifdef CONFIG_NUMA
800 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
801 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
802 #else
node_reclaim(struct pglist_data * pgdat,gfp_t mask,unsigned int order)803 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
804 				unsigned int order)
805 {
806 	return NODE_RECLAIM_NOSCAN;
807 }
find_next_best_node(int node,nodemask_t * used_node_mask)808 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
809 {
810 	return NUMA_NO_NODE;
811 }
812 #endif
813 
814 /*
815  * mm/memory-failure.c
816  */
817 extern int hwpoison_filter(struct page *p);
818 
819 extern u32 hwpoison_filter_dev_major;
820 extern u32 hwpoison_filter_dev_minor;
821 extern u64 hwpoison_filter_flags_mask;
822 extern u64 hwpoison_filter_flags_value;
823 extern u64 hwpoison_filter_memcg;
824 extern u32 hwpoison_filter_enable;
825 
826 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
827         unsigned long, unsigned long,
828         unsigned long, unsigned long);
829 
830 extern void set_pageblock_order(void);
831 unsigned long reclaim_pages(struct list_head *folio_list);
832 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
833 					    struct list_head *folio_list);
834 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
835 #define ALLOC_WMARK_MIN		WMARK_MIN
836 #define ALLOC_WMARK_LOW		WMARK_LOW
837 #define ALLOC_WMARK_HIGH	WMARK_HIGH
838 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
839 
840 /* Mask to get the watermark bits */
841 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
842 
843 /*
844  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
845  * cannot assume a reduced access to memory reserves is sufficient for
846  * !MMU
847  */
848 #ifdef CONFIG_MMU
849 #define ALLOC_OOM		0x08
850 #else
851 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
852 #endif
853 
854 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
855 				       * to 25% of the min watermark or
856 				       * 62.5% if __GFP_HIGH is set.
857 				       */
858 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
859 				       * of the min watermark.
860 				       */
861 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
862 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
863 #ifdef CONFIG_ZONE_DMA32
864 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
865 #else
866 #define ALLOC_NOFRAGMENT	  0x0
867 #endif
868 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
869 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
870 
871 /* Flags that allow allocations below the min watermark. */
872 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
873 
874 enum ttu_flags;
875 struct tlbflush_unmap_batch;
876 
877 
878 /*
879  * only for MM internal work items which do not depend on
880  * any allocations or locks which might depend on allocations
881  */
882 extern struct workqueue_struct *mm_percpu_wq;
883 
884 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
885 void try_to_unmap_flush(void);
886 void try_to_unmap_flush_dirty(void);
887 void flush_tlb_batched_pending(struct mm_struct *mm);
888 #else
try_to_unmap_flush(void)889 static inline void try_to_unmap_flush(void)
890 {
891 }
try_to_unmap_flush_dirty(void)892 static inline void try_to_unmap_flush_dirty(void)
893 {
894 }
flush_tlb_batched_pending(struct mm_struct * mm)895 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
896 {
897 }
898 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
899 
900 extern const struct trace_print_flags pageflag_names[];
901 extern const struct trace_print_flags pagetype_names[];
902 extern const struct trace_print_flags vmaflag_names[];
903 extern const struct trace_print_flags gfpflag_names[];
904 
is_migrate_highatomic(enum migratetype migratetype)905 static inline bool is_migrate_highatomic(enum migratetype migratetype)
906 {
907 	return migratetype == MIGRATE_HIGHATOMIC;
908 }
909 
is_migrate_highatomic_page(struct page * page)910 static inline bool is_migrate_highatomic_page(struct page *page)
911 {
912 	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
913 }
914 
915 void setup_zone_pageset(struct zone *zone);
916 
917 struct migration_target_control {
918 	int nid;		/* preferred node id */
919 	nodemask_t *nmask;
920 	gfp_t gfp_mask;
921 };
922 
923 /*
924  * mm/filemap.c
925  */
926 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
927 			      struct folio *folio, loff_t fpos, size_t size);
928 
929 /*
930  * mm/vmalloc.c
931  */
932 #ifdef CONFIG_MMU
933 void __init vmalloc_init(void);
934 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
935                 pgprot_t prot, struct page **pages, unsigned int page_shift);
936 #else
vmalloc_init(void)937 static inline void vmalloc_init(void)
938 {
939 }
940 
941 static inline
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)942 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
943                 pgprot_t prot, struct page **pages, unsigned int page_shift)
944 {
945 	return -EINVAL;
946 }
947 #endif
948 
949 int __must_check __vmap_pages_range_noflush(unsigned long addr,
950 			       unsigned long end, pgprot_t prot,
951 			       struct page **pages, unsigned int page_shift);
952 
953 void vunmap_range_noflush(unsigned long start, unsigned long end);
954 
955 void __vunmap_range_noflush(unsigned long start, unsigned long end);
956 
957 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
958 		      unsigned long addr, int page_nid, int *flags);
959 
960 void free_zone_device_page(struct page *page);
961 int migrate_device_coherent_page(struct page *page);
962 
963 /*
964  * mm/gup.c
965  */
966 int __must_check try_grab_folio(struct folio *folio, int refs,
967 				unsigned int flags);
968 
969 /*
970  * mm/huge_memory.c
971  */
972 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
973 				   unsigned long addr, pmd_t *pmd,
974 				   unsigned int flags);
975 
976 enum {
977 	/* mark page accessed */
978 	FOLL_TOUCH = 1 << 16,
979 	/* a retry, previous pass started an IO */
980 	FOLL_TRIED = 1 << 17,
981 	/* we are working on non-current tsk/mm */
982 	FOLL_REMOTE = 1 << 18,
983 	/* pages must be released via unpin_user_page */
984 	FOLL_PIN = 1 << 19,
985 	/* gup_fast: prevent fall-back to slow gup */
986 	FOLL_FAST_ONLY = 1 << 20,
987 	/* allow unlocking the mmap lock */
988 	FOLL_UNLOCKABLE = 1 << 21,
989 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
990 	FOLL_MADV_POPULATE = 1 << 22,
991 };
992 
993 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
994 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
995 			    FOLL_MADV_POPULATE)
996 
997 /*
998  * Indicates for which pages that are write-protected in the page table,
999  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1000  * GUP pin will remain consistent with the pages mapped into the page tables
1001  * of the MM.
1002  *
1003  * Temporary unmapping of PageAnonExclusive() pages or clearing of
1004  * PageAnonExclusive() has to protect against concurrent GUP:
1005  * * Ordinary GUP: Using the PT lock
1006  * * GUP-fast and fork(): mm->write_protect_seq
1007  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1008  *    page_try_share_anon_rmap()
1009  *
1010  * Must be called with the (sub)page that's actually referenced via the
1011  * page table entry, which might not necessarily be the head page for a
1012  * PTE-mapped THP.
1013  *
1014  * If the vma is NULL, we're coming from the GUP-fast path and might have
1015  * to fallback to the slow path just to lookup the vma.
1016  */
gup_must_unshare(struct vm_area_struct * vma,unsigned int flags,struct page * page)1017 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1018 				    unsigned int flags, struct page *page)
1019 {
1020 	/*
1021 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1022 	 * has to be writable -- and if it references (part of) an anonymous
1023 	 * folio, that part is required to be marked exclusive.
1024 	 */
1025 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1026 		return false;
1027 	/*
1028 	 * Note: PageAnon(page) is stable until the page is actually getting
1029 	 * freed.
1030 	 */
1031 	if (!PageAnon(page)) {
1032 		/*
1033 		 * We only care about R/O long-term pining: R/O short-term
1034 		 * pinning does not have the semantics to observe successive
1035 		 * changes through the process page tables.
1036 		 */
1037 		if (!(flags & FOLL_LONGTERM))
1038 			return false;
1039 
1040 		/* We really need the vma ... */
1041 		if (!vma)
1042 			return true;
1043 
1044 		/*
1045 		 * ... because we only care about writable private ("COW")
1046 		 * mappings where we have to break COW early.
1047 		 */
1048 		return is_cow_mapping(vma->vm_flags);
1049 	}
1050 
1051 	/* Paired with a memory barrier in page_try_share_anon_rmap(). */
1052 	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
1053 		smp_rmb();
1054 
1055 	/*
1056 	 * During GUP-fast we might not get called on the head page for a
1057 	 * hugetlb page that is mapped using cont-PTE, because GUP-fast does
1058 	 * not work with the abstracted hugetlb PTEs that always point at the
1059 	 * head page. For hugetlb, PageAnonExclusive only applies on the head
1060 	 * page (as it cannot be partially COW-shared), so lookup the head page.
1061 	 */
1062 	if (unlikely(!PageHead(page) && PageHuge(page)))
1063 		page = compound_head(page);
1064 
1065 	/*
1066 	 * Note that PageKsm() pages cannot be exclusive, and consequently,
1067 	 * cannot get pinned.
1068 	 */
1069 	return !PageAnonExclusive(page);
1070 }
1071 
1072 extern bool mirrored_kernelcore;
1073 extern bool memblock_has_mirror(void);
1074 
vma_soft_dirty_enabled(struct vm_area_struct * vma)1075 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1076 {
1077 	/*
1078 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1079 	 * enablements, because when without soft-dirty being compiled in,
1080 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1081 	 * will be constantly true.
1082 	 */
1083 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1084 		return false;
1085 
1086 	/*
1087 	 * Soft-dirty is kind of special: its tracking is enabled when the
1088 	 * vma flags not set.
1089 	 */
1090 	return !(vma->vm_flags & VM_SOFTDIRTY);
1091 }
1092 
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)1093 static inline void vma_iter_config(struct vma_iterator *vmi,
1094 		unsigned long index, unsigned long last)
1095 {
1096 	MAS_BUG_ON(&vmi->mas, vmi->mas.node != MAS_START &&
1097 		   (vmi->mas.index > index || vmi->mas.last < index));
1098 	__mas_set_range(&vmi->mas, index, last - 1);
1099 }
1100 
1101 /*
1102  * VMA Iterator functions shared between nommu and mmap
1103  */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)1104 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1105 		struct vm_area_struct *vma)
1106 {
1107 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1108 }
1109 
vma_iter_clear(struct vma_iterator * vmi)1110 static inline void vma_iter_clear(struct vma_iterator *vmi)
1111 {
1112 	mas_store_prealloc(&vmi->mas, NULL);
1113 }
1114 
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)1115 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1116 			unsigned long start, unsigned long end, gfp_t gfp)
1117 {
1118 	__mas_set_range(&vmi->mas, start, end - 1);
1119 	mas_store_gfp(&vmi->mas, NULL, gfp);
1120 	if (unlikely(mas_is_err(&vmi->mas)))
1121 		return -ENOMEM;
1122 
1123 	return 0;
1124 }
1125 
vma_iter_load(struct vma_iterator * vmi)1126 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1127 {
1128 	return mas_walk(&vmi->mas);
1129 }
1130 
1131 /* Store a VMA with preallocated memory */
vma_iter_store(struct vma_iterator * vmi,struct vm_area_struct * vma)1132 static inline void vma_iter_store(struct vma_iterator *vmi,
1133 				  struct vm_area_struct *vma)
1134 {
1135 
1136 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1137 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
1138 			vmi->mas.index > vma->vm_start)) {
1139 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1140 			vmi->mas.index, vma->vm_start, vma->vm_start,
1141 			vma->vm_end, vmi->mas.index, vmi->mas.last);
1142 	}
1143 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
1144 			vmi->mas.last <  vma->vm_start)) {
1145 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1146 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1147 		       vmi->mas.index, vmi->mas.last);
1148 	}
1149 #endif
1150 
1151 	if (vmi->mas.node != MAS_START &&
1152 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1153 		vma_iter_invalidate(vmi);
1154 
1155 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1156 	mas_store_prealloc(&vmi->mas, vma);
1157 }
1158 
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)1159 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1160 			struct vm_area_struct *vma, gfp_t gfp)
1161 {
1162 	if (vmi->mas.node != MAS_START &&
1163 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1164 		vma_iter_invalidate(vmi);
1165 
1166 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1167 	mas_store_gfp(&vmi->mas, vma, gfp);
1168 	if (unlikely(mas_is_err(&vmi->mas)))
1169 		return -ENOMEM;
1170 
1171 	return 0;
1172 }
1173 
1174 /*
1175  * VMA lock generalization
1176  */
1177 struct vma_prepare {
1178 	struct vm_area_struct *vma;
1179 	struct vm_area_struct *adj_next;
1180 	struct file *file;
1181 	struct address_space *mapping;
1182 	struct anon_vma *anon_vma;
1183 	struct vm_area_struct *insert;
1184 	struct vm_area_struct *remove;
1185 	struct vm_area_struct *remove2;
1186 };
1187 #endif	/* __MM_INTERNAL_H */
1188