xref: /openbmc/linux/mm/internal.h (revision 8a1e6bb3)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/tracepoint-defs.h>
15 
16 struct folio_batch;
17 
18 /*
19  * The set of flags that only affect watermark checking and reclaim
20  * behaviour. This is used by the MM to obey the caller constraints
21  * about IO, FS and watermark checking while ignoring placement
22  * hints such as HIGHMEM usage.
23  */
24 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
26 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
27 			__GFP_ATOMIC|__GFP_NOLOCKDEP)
28 
29 /* The GFP flags allowed during early boot */
30 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
31 
32 /* Control allocation cpuset and node placement constraints */
33 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
34 
35 /* Do not use these with a slab allocator */
36 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
37 
38 void page_writeback_init(void);
39 
40 static inline void *folio_raw_mapping(struct folio *folio)
41 {
42 	unsigned long mapping = (unsigned long)folio->mapping;
43 
44 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
45 }
46 
47 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
48 						int nr_throttled);
49 static inline void acct_reclaim_writeback(struct folio *folio)
50 {
51 	pg_data_t *pgdat = folio_pgdat(folio);
52 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
53 
54 	if (nr_throttled)
55 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
56 }
57 
58 static inline void wake_throttle_isolated(pg_data_t *pgdat)
59 {
60 	wait_queue_head_t *wqh;
61 
62 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
63 	if (waitqueue_active(wqh))
64 		wake_up(wqh);
65 }
66 
67 vm_fault_t do_swap_page(struct vm_fault *vmf);
68 void folio_rotate_reclaimable(struct folio *folio);
69 bool __folio_end_writeback(struct folio *folio);
70 void deactivate_file_folio(struct folio *folio);
71 
72 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
73 		unsigned long floor, unsigned long ceiling);
74 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
75 
76 struct zap_details;
77 void unmap_page_range(struct mmu_gather *tlb,
78 			     struct vm_area_struct *vma,
79 			     unsigned long addr, unsigned long end,
80 			     struct zap_details *details);
81 
82 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
83 		unsigned int order);
84 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
85 static inline void force_page_cache_readahead(struct address_space *mapping,
86 		struct file *file, pgoff_t index, unsigned long nr_to_read)
87 {
88 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
89 	force_page_cache_ra(&ractl, nr_to_read);
90 }
91 
92 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
93 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
94 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
95 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
96 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
97 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
98 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
99 		loff_t end);
100 long invalidate_inode_page(struct page *page);
101 unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
102 		pgoff_t start, pgoff_t end, unsigned long *nr_pagevec);
103 
104 /**
105  * folio_evictable - Test whether a folio is evictable.
106  * @folio: The folio to test.
107  *
108  * Test whether @folio is evictable -- i.e., should be placed on
109  * active/inactive lists vs unevictable list.
110  *
111  * Reasons folio might not be evictable:
112  * 1. folio's mapping marked unevictable
113  * 2. One of the pages in the folio is part of an mlocked VMA
114  */
115 static inline bool folio_evictable(struct folio *folio)
116 {
117 	bool ret;
118 
119 	/* Prevent address_space of inode and swap cache from being freed */
120 	rcu_read_lock();
121 	ret = !mapping_unevictable(folio_mapping(folio)) &&
122 			!folio_test_mlocked(folio);
123 	rcu_read_unlock();
124 	return ret;
125 }
126 
127 static inline bool page_evictable(struct page *page)
128 {
129 	bool ret;
130 
131 	/* Prevent address_space of inode and swap cache from being freed */
132 	rcu_read_lock();
133 	ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
134 	rcu_read_unlock();
135 	return ret;
136 }
137 
138 /*
139  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
140  * a count of one.
141  */
142 static inline void set_page_refcounted(struct page *page)
143 {
144 	VM_BUG_ON_PAGE(PageTail(page), page);
145 	VM_BUG_ON_PAGE(page_ref_count(page), page);
146 	set_page_count(page, 1);
147 }
148 
149 extern unsigned long highest_memmap_pfn;
150 
151 /*
152  * Maximum number of reclaim retries without progress before the OOM
153  * killer is consider the only way forward.
154  */
155 #define MAX_RECLAIM_RETRIES 16
156 
157 /*
158  * in mm/early_ioremap.c
159  */
160 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
161 					unsigned long size, pgprot_t prot);
162 
163 /*
164  * in mm/vmscan.c:
165  */
166 int isolate_lru_page(struct page *page);
167 int folio_isolate_lru(struct folio *folio);
168 void putback_lru_page(struct page *page);
169 void folio_putback_lru(struct folio *folio);
170 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
171 
172 /*
173  * in mm/rmap.c:
174  */
175 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
176 
177 /*
178  * in mm/page_alloc.c
179  */
180 
181 /*
182  * Structure for holding the mostly immutable allocation parameters passed
183  * between functions involved in allocations, including the alloc_pages*
184  * family of functions.
185  *
186  * nodemask, migratetype and highest_zoneidx are initialized only once in
187  * __alloc_pages() and then never change.
188  *
189  * zonelist, preferred_zone and highest_zoneidx are set first in
190  * __alloc_pages() for the fast path, and might be later changed
191  * in __alloc_pages_slowpath(). All other functions pass the whole structure
192  * by a const pointer.
193  */
194 struct alloc_context {
195 	struct zonelist *zonelist;
196 	nodemask_t *nodemask;
197 	struct zoneref *preferred_zoneref;
198 	int migratetype;
199 
200 	/*
201 	 * highest_zoneidx represents highest usable zone index of
202 	 * the allocation request. Due to the nature of the zone,
203 	 * memory on lower zone than the highest_zoneidx will be
204 	 * protected by lowmem_reserve[highest_zoneidx].
205 	 *
206 	 * highest_zoneidx is also used by reclaim/compaction to limit
207 	 * the target zone since higher zone than this index cannot be
208 	 * usable for this allocation request.
209 	 */
210 	enum zone_type highest_zoneidx;
211 	bool spread_dirty_pages;
212 };
213 
214 /*
215  * Locate the struct page for both the matching buddy in our
216  * pair (buddy1) and the combined O(n+1) page they form (page).
217  *
218  * 1) Any buddy B1 will have an order O twin B2 which satisfies
219  * the following equation:
220  *     B2 = B1 ^ (1 << O)
221  * For example, if the starting buddy (buddy2) is #8 its order
222  * 1 buddy is #10:
223  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
224  *
225  * 2) Any buddy B will have an order O+1 parent P which
226  * satisfies the following equation:
227  *     P = B & ~(1 << O)
228  *
229  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
230  */
231 static inline unsigned long
232 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
233 {
234 	return page_pfn ^ (1 << order);
235 }
236 
237 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
238 				unsigned long end_pfn, struct zone *zone);
239 
240 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
241 				unsigned long end_pfn, struct zone *zone)
242 {
243 	if (zone->contiguous)
244 		return pfn_to_page(start_pfn);
245 
246 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
247 }
248 
249 extern int __isolate_free_page(struct page *page, unsigned int order);
250 extern void __putback_isolated_page(struct page *page, unsigned int order,
251 				    int mt);
252 extern void memblock_free_pages(struct page *page, unsigned long pfn,
253 					unsigned int order);
254 extern void __free_pages_core(struct page *page, unsigned int order);
255 extern void prep_compound_page(struct page *page, unsigned int order);
256 extern void post_alloc_hook(struct page *page, unsigned int order,
257 					gfp_t gfp_flags);
258 extern int user_min_free_kbytes;
259 
260 extern void free_unref_page(struct page *page, unsigned int order);
261 extern void free_unref_page_list(struct list_head *list);
262 
263 extern void zone_pcp_update(struct zone *zone, int cpu_online);
264 extern void zone_pcp_reset(struct zone *zone);
265 extern void zone_pcp_disable(struct zone *zone);
266 extern void zone_pcp_enable(struct zone *zone);
267 
268 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
269 			  phys_addr_t min_addr,
270 			  int nid, bool exact_nid);
271 
272 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
273 
274 /*
275  * in mm/compaction.c
276  */
277 /*
278  * compact_control is used to track pages being migrated and the free pages
279  * they are being migrated to during memory compaction. The free_pfn starts
280  * at the end of a zone and migrate_pfn begins at the start. Movable pages
281  * are moved to the end of a zone during a compaction run and the run
282  * completes when free_pfn <= migrate_pfn
283  */
284 struct compact_control {
285 	struct list_head freepages;	/* List of free pages to migrate to */
286 	struct list_head migratepages;	/* List of pages being migrated */
287 	unsigned int nr_freepages;	/* Number of isolated free pages */
288 	unsigned int nr_migratepages;	/* Number of pages to migrate */
289 	unsigned long free_pfn;		/* isolate_freepages search base */
290 	/*
291 	 * Acts as an in/out parameter to page isolation for migration.
292 	 * isolate_migratepages uses it as a search base.
293 	 * isolate_migratepages_block will update the value to the next pfn
294 	 * after the last isolated one.
295 	 */
296 	unsigned long migrate_pfn;
297 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
298 	struct zone *zone;
299 	unsigned long total_migrate_scanned;
300 	unsigned long total_free_scanned;
301 	unsigned short fast_search_fail;/* failures to use free list searches */
302 	short search_order;		/* order to start a fast search at */
303 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
304 	int order;			/* order a direct compactor needs */
305 	int migratetype;		/* migratetype of direct compactor */
306 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
307 	const int highest_zoneidx;	/* zone index of a direct compactor */
308 	enum migrate_mode mode;		/* Async or sync migration mode */
309 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
310 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
311 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
312 	bool direct_compaction;		/* False from kcompactd or /proc/... */
313 	bool proactive_compaction;	/* kcompactd proactive compaction */
314 	bool whole_zone;		/* Whole zone should/has been scanned */
315 	bool contended;			/* Signal lock or sched contention */
316 	bool rescan;			/* Rescanning the same pageblock */
317 	bool alloc_contig;		/* alloc_contig_range allocation */
318 };
319 
320 /*
321  * Used in direct compaction when a page should be taken from the freelists
322  * immediately when one is created during the free path.
323  */
324 struct capture_control {
325 	struct compact_control *cc;
326 	struct page *page;
327 };
328 
329 unsigned long
330 isolate_freepages_range(struct compact_control *cc,
331 			unsigned long start_pfn, unsigned long end_pfn);
332 int
333 isolate_migratepages_range(struct compact_control *cc,
334 			   unsigned long low_pfn, unsigned long end_pfn);
335 #endif
336 int find_suitable_fallback(struct free_area *area, unsigned int order,
337 			int migratetype, bool only_stealable, bool *can_steal);
338 
339 /*
340  * This function returns the order of a free page in the buddy system. In
341  * general, page_zone(page)->lock must be held by the caller to prevent the
342  * page from being allocated in parallel and returning garbage as the order.
343  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
344  * page cannot be allocated or merged in parallel. Alternatively, it must
345  * handle invalid values gracefully, and use buddy_order_unsafe() below.
346  */
347 static inline unsigned int buddy_order(struct page *page)
348 {
349 	/* PageBuddy() must be checked by the caller */
350 	return page_private(page);
351 }
352 
353 /*
354  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
355  * PageBuddy() should be checked first by the caller to minimize race window,
356  * and invalid values must be handled gracefully.
357  *
358  * READ_ONCE is used so that if the caller assigns the result into a local
359  * variable and e.g. tests it for valid range before using, the compiler cannot
360  * decide to remove the variable and inline the page_private(page) multiple
361  * times, potentially observing different values in the tests and the actual
362  * use of the result.
363  */
364 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
365 
366 /*
367  * These three helpers classifies VMAs for virtual memory accounting.
368  */
369 
370 /*
371  * Executable code area - executable, not writable, not stack
372  */
373 static inline bool is_exec_mapping(vm_flags_t flags)
374 {
375 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
376 }
377 
378 /*
379  * Stack area - automatically grows in one direction
380  *
381  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
382  * do_mmap() forbids all other combinations.
383  */
384 static inline bool is_stack_mapping(vm_flags_t flags)
385 {
386 	return (flags & VM_STACK) == VM_STACK;
387 }
388 
389 /*
390  * Data area - private, writable, not stack
391  */
392 static inline bool is_data_mapping(vm_flags_t flags)
393 {
394 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
395 }
396 
397 /* mm/util.c */
398 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
399 		struct vm_area_struct *prev);
400 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
401 struct anon_vma *folio_anon_vma(struct folio *folio);
402 
403 #ifdef CONFIG_MMU
404 void unmap_mapping_folio(struct folio *folio);
405 extern long populate_vma_page_range(struct vm_area_struct *vma,
406 		unsigned long start, unsigned long end, int *locked);
407 extern long faultin_vma_page_range(struct vm_area_struct *vma,
408 				   unsigned long start, unsigned long end,
409 				   bool write, int *locked);
410 extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
411 			      unsigned long len);
412 /*
413  * mlock_vma_page() and munlock_vma_page():
414  * should be called with vma's mmap_lock held for read or write,
415  * under page table lock for the pte/pmd being added or removed.
416  *
417  * mlock is usually called at the end of page_add_*_rmap(),
418  * munlock at the end of page_remove_rmap(); but new anon
419  * pages are managed by lru_cache_add_inactive_or_unevictable()
420  * calling mlock_new_page().
421  *
422  * @compound is used to include pmd mappings of THPs, but filter out
423  * pte mappings of THPs, which cannot be consistently counted: a pte
424  * mapping of the THP head cannot be distinguished by the page alone.
425  */
426 void mlock_folio(struct folio *folio);
427 static inline void mlock_vma_folio(struct folio *folio,
428 			struct vm_area_struct *vma, bool compound)
429 {
430 	/*
431 	 * The VM_SPECIAL check here serves two purposes.
432 	 * 1) VM_IO check prevents migration from double-counting during mlock.
433 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
434 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
435 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
436 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
437 	 */
438 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
439 	    (compound || !folio_test_large(folio)))
440 		mlock_folio(folio);
441 }
442 
443 static inline void mlock_vma_page(struct page *page,
444 			struct vm_area_struct *vma, bool compound)
445 {
446 	mlock_vma_folio(page_folio(page), vma, compound);
447 }
448 
449 void munlock_page(struct page *page);
450 static inline void munlock_vma_page(struct page *page,
451 			struct vm_area_struct *vma, bool compound)
452 {
453 	if (unlikely(vma->vm_flags & VM_LOCKED) &&
454 	    (compound || !PageTransCompound(page)))
455 		munlock_page(page);
456 }
457 void mlock_new_page(struct page *page);
458 bool need_mlock_page_drain(int cpu);
459 void mlock_page_drain_local(void);
460 void mlock_page_drain_remote(int cpu);
461 
462 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
463 
464 /*
465  * At what user virtual address is page expected in vma?
466  * Returns -EFAULT if all of the page is outside the range of vma.
467  * If page is a compound head, the entire compound page is considered.
468  */
469 static inline unsigned long
470 vma_address(struct page *page, struct vm_area_struct *vma)
471 {
472 	pgoff_t pgoff;
473 	unsigned long address;
474 
475 	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
476 	pgoff = page_to_pgoff(page);
477 	if (pgoff >= vma->vm_pgoff) {
478 		address = vma->vm_start +
479 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
480 		/* Check for address beyond vma (or wrapped through 0?) */
481 		if (address < vma->vm_start || address >= vma->vm_end)
482 			address = -EFAULT;
483 	} else if (PageHead(page) &&
484 		   pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
485 		/* Test above avoids possibility of wrap to 0 on 32-bit */
486 		address = vma->vm_start;
487 	} else {
488 		address = -EFAULT;
489 	}
490 	return address;
491 }
492 
493 /*
494  * Then at what user virtual address will none of the range be found in vma?
495  * Assumes that vma_address() already returned a good starting address.
496  */
497 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
498 {
499 	struct vm_area_struct *vma = pvmw->vma;
500 	pgoff_t pgoff;
501 	unsigned long address;
502 
503 	/* Common case, plus ->pgoff is invalid for KSM */
504 	if (pvmw->nr_pages == 1)
505 		return pvmw->address + PAGE_SIZE;
506 
507 	pgoff = pvmw->pgoff + pvmw->nr_pages;
508 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
509 	/* Check for address beyond vma (or wrapped through 0?) */
510 	if (address < vma->vm_start || address > vma->vm_end)
511 		address = vma->vm_end;
512 	return address;
513 }
514 
515 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
516 						    struct file *fpin)
517 {
518 	int flags = vmf->flags;
519 
520 	if (fpin)
521 		return fpin;
522 
523 	/*
524 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
525 	 * anything, so we only pin the file and drop the mmap_lock if only
526 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
527 	 */
528 	if (fault_flag_allow_retry_first(flags) &&
529 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
530 		fpin = get_file(vmf->vma->vm_file);
531 		mmap_read_unlock(vmf->vma->vm_mm);
532 	}
533 	return fpin;
534 }
535 #else /* !CONFIG_MMU */
536 static inline void unmap_mapping_folio(struct folio *folio) { }
537 static inline void mlock_vma_page(struct page *page,
538 			struct vm_area_struct *vma, bool compound) { }
539 static inline void munlock_vma_page(struct page *page,
540 			struct vm_area_struct *vma, bool compound) { }
541 static inline void mlock_new_page(struct page *page) { }
542 static inline bool need_mlock_page_drain(int cpu) { return false; }
543 static inline void mlock_page_drain_local(void) { }
544 static inline void mlock_page_drain_remote(int cpu) { }
545 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
546 {
547 }
548 #endif /* !CONFIG_MMU */
549 
550 /*
551  * Return the mem_map entry representing the 'offset' subpage within
552  * the maximally aligned gigantic page 'base'.  Handle any discontiguity
553  * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
554  */
555 static inline struct page *mem_map_offset(struct page *base, int offset)
556 {
557 	if (unlikely(offset >= MAX_ORDER_NR_PAGES))
558 		return nth_page(base, offset);
559 	return base + offset;
560 }
561 
562 /*
563  * Iterator over all subpages within the maximally aligned gigantic
564  * page 'base'.  Handle any discontiguity in the mem_map.
565  */
566 static inline struct page *mem_map_next(struct page *iter,
567 						struct page *base, int offset)
568 {
569 	if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
570 		unsigned long pfn = page_to_pfn(base) + offset;
571 		if (!pfn_valid(pfn))
572 			return NULL;
573 		return pfn_to_page(pfn);
574 	}
575 	return iter + 1;
576 }
577 
578 /* Memory initialisation debug and verification */
579 enum mminit_level {
580 	MMINIT_WARNING,
581 	MMINIT_VERIFY,
582 	MMINIT_TRACE
583 };
584 
585 #ifdef CONFIG_DEBUG_MEMORY_INIT
586 
587 extern int mminit_loglevel;
588 
589 #define mminit_dprintk(level, prefix, fmt, arg...) \
590 do { \
591 	if (level < mminit_loglevel) { \
592 		if (level <= MMINIT_WARNING) \
593 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
594 		else \
595 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
596 	} \
597 } while (0)
598 
599 extern void mminit_verify_pageflags_layout(void);
600 extern void mminit_verify_zonelist(void);
601 #else
602 
603 static inline void mminit_dprintk(enum mminit_level level,
604 				const char *prefix, const char *fmt, ...)
605 {
606 }
607 
608 static inline void mminit_verify_pageflags_layout(void)
609 {
610 }
611 
612 static inline void mminit_verify_zonelist(void)
613 {
614 }
615 #endif /* CONFIG_DEBUG_MEMORY_INIT */
616 
617 #define NODE_RECLAIM_NOSCAN	-2
618 #define NODE_RECLAIM_FULL	-1
619 #define NODE_RECLAIM_SOME	0
620 #define NODE_RECLAIM_SUCCESS	1
621 
622 #ifdef CONFIG_NUMA
623 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
624 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
625 #else
626 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
627 				unsigned int order)
628 {
629 	return NODE_RECLAIM_NOSCAN;
630 }
631 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
632 {
633 	return NUMA_NO_NODE;
634 }
635 #endif
636 
637 extern int hwpoison_filter(struct page *p);
638 
639 extern u32 hwpoison_filter_dev_major;
640 extern u32 hwpoison_filter_dev_minor;
641 extern u64 hwpoison_filter_flags_mask;
642 extern u64 hwpoison_filter_flags_value;
643 extern u64 hwpoison_filter_memcg;
644 extern u32 hwpoison_filter_enable;
645 
646 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
647         unsigned long, unsigned long,
648         unsigned long, unsigned long);
649 
650 extern void set_pageblock_order(void);
651 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
652 					    struct list_head *page_list);
653 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
654 #define ALLOC_WMARK_MIN		WMARK_MIN
655 #define ALLOC_WMARK_LOW		WMARK_LOW
656 #define ALLOC_WMARK_HIGH	WMARK_HIGH
657 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
658 
659 /* Mask to get the watermark bits */
660 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
661 
662 /*
663  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
664  * cannot assume a reduced access to memory reserves is sufficient for
665  * !MMU
666  */
667 #ifdef CONFIG_MMU
668 #define ALLOC_OOM		0x08
669 #else
670 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
671 #endif
672 
673 #define ALLOC_HARDER		 0x10 /* try to alloc harder */
674 #define ALLOC_HIGH		 0x20 /* __GFP_HIGH set */
675 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
676 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
677 #ifdef CONFIG_ZONE_DMA32
678 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
679 #else
680 #define ALLOC_NOFRAGMENT	  0x0
681 #endif
682 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
683 
684 enum ttu_flags;
685 struct tlbflush_unmap_batch;
686 
687 
688 /*
689  * only for MM internal work items which do not depend on
690  * any allocations or locks which might depend on allocations
691  */
692 extern struct workqueue_struct *mm_percpu_wq;
693 
694 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
695 void try_to_unmap_flush(void);
696 void try_to_unmap_flush_dirty(void);
697 void flush_tlb_batched_pending(struct mm_struct *mm);
698 #else
699 static inline void try_to_unmap_flush(void)
700 {
701 }
702 static inline void try_to_unmap_flush_dirty(void)
703 {
704 }
705 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
706 {
707 }
708 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
709 
710 extern const struct trace_print_flags pageflag_names[];
711 extern const struct trace_print_flags vmaflag_names[];
712 extern const struct trace_print_flags gfpflag_names[];
713 
714 static inline bool is_migrate_highatomic(enum migratetype migratetype)
715 {
716 	return migratetype == MIGRATE_HIGHATOMIC;
717 }
718 
719 static inline bool is_migrate_highatomic_page(struct page *page)
720 {
721 	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
722 }
723 
724 void setup_zone_pageset(struct zone *zone);
725 
726 struct migration_target_control {
727 	int nid;		/* preferred node id */
728 	nodemask_t *nmask;
729 	gfp_t gfp_mask;
730 };
731 
732 /*
733  * mm/vmalloc.c
734  */
735 #ifdef CONFIG_MMU
736 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
737                 pgprot_t prot, struct page **pages, unsigned int page_shift);
738 #else
739 static inline
740 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
741                 pgprot_t prot, struct page **pages, unsigned int page_shift)
742 {
743 	return -EINVAL;
744 }
745 #endif
746 
747 void vunmap_range_noflush(unsigned long start, unsigned long end);
748 
749 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
750 		      unsigned long addr, int page_nid, int *flags);
751 
752 void free_zone_device_page(struct page *page);
753 
754 /*
755  * mm/gup.c
756  */
757 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
758 
759 DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
760 
761 #endif	/* __MM_INTERNAL_H */
762