xref: /openbmc/linux/mm/internal.h (revision 827beb77)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/tracepoint-defs.h>
14 
15 /*
16  * The set of flags that only affect watermark checking and reclaim
17  * behaviour. This is used by the MM to obey the caller constraints
18  * about IO, FS and watermark checking while ignoring placement
19  * hints such as HIGHMEM usage.
20  */
21 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
22 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
23 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
24 			__GFP_ATOMIC)
25 
26 /* The GFP flags allowed during early boot */
27 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
28 
29 /* Control allocation cpuset and node placement constraints */
30 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
31 
32 /* Do not use these with a slab allocator */
33 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
34 
35 void page_writeback_init(void);
36 
37 static inline void *folio_raw_mapping(struct folio *folio)
38 {
39 	unsigned long mapping = (unsigned long)folio->mapping;
40 
41 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
42 }
43 
44 vm_fault_t do_swap_page(struct vm_fault *vmf);
45 void folio_rotate_reclaimable(struct folio *folio);
46 bool __folio_end_writeback(struct folio *folio);
47 
48 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
49 		unsigned long floor, unsigned long ceiling);
50 
51 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
52 {
53 	return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
54 }
55 
56 void unmap_page_range(struct mmu_gather *tlb,
57 			     struct vm_area_struct *vma,
58 			     unsigned long addr, unsigned long end,
59 			     struct zap_details *details);
60 
61 void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read,
62 		unsigned long lookahead_size);
63 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
64 static inline void force_page_cache_readahead(struct address_space *mapping,
65 		struct file *file, pgoff_t index, unsigned long nr_to_read)
66 {
67 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
68 	force_page_cache_ra(&ractl, nr_to_read);
69 }
70 
71 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
72 		pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
73 
74 /**
75  * folio_evictable - Test whether a folio is evictable.
76  * @folio: The folio to test.
77  *
78  * Test whether @folio is evictable -- i.e., should be placed on
79  * active/inactive lists vs unevictable list.
80  *
81  * Reasons folio might not be evictable:
82  * 1. folio's mapping marked unevictable
83  * 2. One of the pages in the folio is part of an mlocked VMA
84  */
85 static inline bool folio_evictable(struct folio *folio)
86 {
87 	bool ret;
88 
89 	/* Prevent address_space of inode and swap cache from being freed */
90 	rcu_read_lock();
91 	ret = !mapping_unevictable(folio_mapping(folio)) &&
92 			!folio_test_mlocked(folio);
93 	rcu_read_unlock();
94 	return ret;
95 }
96 
97 static inline bool page_evictable(struct page *page)
98 {
99 	bool ret;
100 
101 	/* Prevent address_space of inode and swap cache from being freed */
102 	rcu_read_lock();
103 	ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
104 	rcu_read_unlock();
105 	return ret;
106 }
107 
108 /*
109  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
110  * a count of one.
111  */
112 static inline void set_page_refcounted(struct page *page)
113 {
114 	VM_BUG_ON_PAGE(PageTail(page), page);
115 	VM_BUG_ON_PAGE(page_ref_count(page), page);
116 	set_page_count(page, 1);
117 }
118 
119 extern unsigned long highest_memmap_pfn;
120 
121 /*
122  * Maximum number of reclaim retries without progress before the OOM
123  * killer is consider the only way forward.
124  */
125 #define MAX_RECLAIM_RETRIES 16
126 
127 /*
128  * in mm/vmscan.c:
129  */
130 extern int isolate_lru_page(struct page *page);
131 extern void putback_lru_page(struct page *page);
132 
133 /*
134  * in mm/rmap.c:
135  */
136 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
137 
138 /*
139  * in mm/memcontrol.c:
140  */
141 extern bool cgroup_memory_nokmem;
142 
143 /*
144  * in mm/page_alloc.c
145  */
146 
147 /*
148  * Structure for holding the mostly immutable allocation parameters passed
149  * between functions involved in allocations, including the alloc_pages*
150  * family of functions.
151  *
152  * nodemask, migratetype and highest_zoneidx are initialized only once in
153  * __alloc_pages() and then never change.
154  *
155  * zonelist, preferred_zone and highest_zoneidx are set first in
156  * __alloc_pages() for the fast path, and might be later changed
157  * in __alloc_pages_slowpath(). All other functions pass the whole structure
158  * by a const pointer.
159  */
160 struct alloc_context {
161 	struct zonelist *zonelist;
162 	nodemask_t *nodemask;
163 	struct zoneref *preferred_zoneref;
164 	int migratetype;
165 
166 	/*
167 	 * highest_zoneidx represents highest usable zone index of
168 	 * the allocation request. Due to the nature of the zone,
169 	 * memory on lower zone than the highest_zoneidx will be
170 	 * protected by lowmem_reserve[highest_zoneidx].
171 	 *
172 	 * highest_zoneidx is also used by reclaim/compaction to limit
173 	 * the target zone since higher zone than this index cannot be
174 	 * usable for this allocation request.
175 	 */
176 	enum zone_type highest_zoneidx;
177 	bool spread_dirty_pages;
178 };
179 
180 /*
181  * Locate the struct page for both the matching buddy in our
182  * pair (buddy1) and the combined O(n+1) page they form (page).
183  *
184  * 1) Any buddy B1 will have an order O twin B2 which satisfies
185  * the following equation:
186  *     B2 = B1 ^ (1 << O)
187  * For example, if the starting buddy (buddy2) is #8 its order
188  * 1 buddy is #10:
189  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
190  *
191  * 2) Any buddy B will have an order O+1 parent P which
192  * satisfies the following equation:
193  *     P = B & ~(1 << O)
194  *
195  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
196  */
197 static inline unsigned long
198 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
199 {
200 	return page_pfn ^ (1 << order);
201 }
202 
203 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
204 				unsigned long end_pfn, struct zone *zone);
205 
206 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
207 				unsigned long end_pfn, struct zone *zone)
208 {
209 	if (zone->contiguous)
210 		return pfn_to_page(start_pfn);
211 
212 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
213 }
214 
215 extern int __isolate_free_page(struct page *page, unsigned int order);
216 extern void __putback_isolated_page(struct page *page, unsigned int order,
217 				    int mt);
218 extern void memblock_free_pages(struct page *page, unsigned long pfn,
219 					unsigned int order);
220 extern void __free_pages_core(struct page *page, unsigned int order);
221 extern void prep_compound_page(struct page *page, unsigned int order);
222 extern void post_alloc_hook(struct page *page, unsigned int order,
223 					gfp_t gfp_flags);
224 extern int user_min_free_kbytes;
225 
226 extern void free_unref_page(struct page *page, unsigned int order);
227 extern void free_unref_page_list(struct list_head *list);
228 
229 extern void zone_pcp_update(struct zone *zone, int cpu_online);
230 extern void zone_pcp_reset(struct zone *zone);
231 extern void zone_pcp_disable(struct zone *zone);
232 extern void zone_pcp_enable(struct zone *zone);
233 
234 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
235 			  phys_addr_t min_addr,
236 			  int nid, bool exact_nid);
237 
238 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
239 
240 /*
241  * in mm/compaction.c
242  */
243 /*
244  * compact_control is used to track pages being migrated and the free pages
245  * they are being migrated to during memory compaction. The free_pfn starts
246  * at the end of a zone and migrate_pfn begins at the start. Movable pages
247  * are moved to the end of a zone during a compaction run and the run
248  * completes when free_pfn <= migrate_pfn
249  */
250 struct compact_control {
251 	struct list_head freepages;	/* List of free pages to migrate to */
252 	struct list_head migratepages;	/* List of pages being migrated */
253 	unsigned int nr_freepages;	/* Number of isolated free pages */
254 	unsigned int nr_migratepages;	/* Number of pages to migrate */
255 	unsigned long free_pfn;		/* isolate_freepages search base */
256 	/*
257 	 * Acts as an in/out parameter to page isolation for migration.
258 	 * isolate_migratepages uses it as a search base.
259 	 * isolate_migratepages_block will update the value to the next pfn
260 	 * after the last isolated one.
261 	 */
262 	unsigned long migrate_pfn;
263 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
264 	struct zone *zone;
265 	unsigned long total_migrate_scanned;
266 	unsigned long total_free_scanned;
267 	unsigned short fast_search_fail;/* failures to use free list searches */
268 	short search_order;		/* order to start a fast search at */
269 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
270 	int order;			/* order a direct compactor needs */
271 	int migratetype;		/* migratetype of direct compactor */
272 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
273 	const int highest_zoneidx;	/* zone index of a direct compactor */
274 	enum migrate_mode mode;		/* Async or sync migration mode */
275 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
276 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
277 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
278 	bool direct_compaction;		/* False from kcompactd or /proc/... */
279 	bool proactive_compaction;	/* kcompactd proactive compaction */
280 	bool whole_zone;		/* Whole zone should/has been scanned */
281 	bool contended;			/* Signal lock or sched contention */
282 	bool rescan;			/* Rescanning the same pageblock */
283 	bool alloc_contig;		/* alloc_contig_range allocation */
284 };
285 
286 /*
287  * Used in direct compaction when a page should be taken from the freelists
288  * immediately when one is created during the free path.
289  */
290 struct capture_control {
291 	struct compact_control *cc;
292 	struct page *page;
293 };
294 
295 unsigned long
296 isolate_freepages_range(struct compact_control *cc,
297 			unsigned long start_pfn, unsigned long end_pfn);
298 int
299 isolate_migratepages_range(struct compact_control *cc,
300 			   unsigned long low_pfn, unsigned long end_pfn);
301 #endif
302 int find_suitable_fallback(struct free_area *area, unsigned int order,
303 			int migratetype, bool only_stealable, bool *can_steal);
304 
305 /*
306  * This function returns the order of a free page in the buddy system. In
307  * general, page_zone(page)->lock must be held by the caller to prevent the
308  * page from being allocated in parallel and returning garbage as the order.
309  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
310  * page cannot be allocated or merged in parallel. Alternatively, it must
311  * handle invalid values gracefully, and use buddy_order_unsafe() below.
312  */
313 static inline unsigned int buddy_order(struct page *page)
314 {
315 	/* PageBuddy() must be checked by the caller */
316 	return page_private(page);
317 }
318 
319 /*
320  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
321  * PageBuddy() should be checked first by the caller to minimize race window,
322  * and invalid values must be handled gracefully.
323  *
324  * READ_ONCE is used so that if the caller assigns the result into a local
325  * variable and e.g. tests it for valid range before using, the compiler cannot
326  * decide to remove the variable and inline the page_private(page) multiple
327  * times, potentially observing different values in the tests and the actual
328  * use of the result.
329  */
330 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
331 
332 /*
333  * These three helpers classifies VMAs for virtual memory accounting.
334  */
335 
336 /*
337  * Executable code area - executable, not writable, not stack
338  */
339 static inline bool is_exec_mapping(vm_flags_t flags)
340 {
341 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
342 }
343 
344 /*
345  * Stack area - automatically grows in one direction
346  *
347  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
348  * do_mmap() forbids all other combinations.
349  */
350 static inline bool is_stack_mapping(vm_flags_t flags)
351 {
352 	return (flags & VM_STACK) == VM_STACK;
353 }
354 
355 /*
356  * Data area - private, writable, not stack
357  */
358 static inline bool is_data_mapping(vm_flags_t flags)
359 {
360 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
361 }
362 
363 /* mm/util.c */
364 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
365 		struct vm_area_struct *prev);
366 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
367 
368 #ifdef CONFIG_MMU
369 extern long populate_vma_page_range(struct vm_area_struct *vma,
370 		unsigned long start, unsigned long end, int *locked);
371 extern long faultin_vma_page_range(struct vm_area_struct *vma,
372 				   unsigned long start, unsigned long end,
373 				   bool write, int *locked);
374 extern void munlock_vma_pages_range(struct vm_area_struct *vma,
375 			unsigned long start, unsigned long end);
376 static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
377 {
378 	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
379 }
380 
381 /*
382  * must be called with vma's mmap_lock held for read or write, and page locked.
383  */
384 extern void mlock_vma_page(struct page *page);
385 extern unsigned int munlock_vma_page(struct page *page);
386 
387 extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
388 			      unsigned long len);
389 
390 /*
391  * Clear the page's PageMlocked().  This can be useful in a situation where
392  * we want to unconditionally remove a page from the pagecache -- e.g.,
393  * on truncation or freeing.
394  *
395  * It is legal to call this function for any page, mlocked or not.
396  * If called for a page that is still mapped by mlocked vmas, all we do
397  * is revert to lazy LRU behaviour -- semantics are not broken.
398  */
399 extern void clear_page_mlock(struct page *page);
400 
401 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
402 
403 /*
404  * At what user virtual address is page expected in vma?
405  * Returns -EFAULT if all of the page is outside the range of vma.
406  * If page is a compound head, the entire compound page is considered.
407  */
408 static inline unsigned long
409 vma_address(struct page *page, struct vm_area_struct *vma)
410 {
411 	pgoff_t pgoff;
412 	unsigned long address;
413 
414 	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
415 	pgoff = page_to_pgoff(page);
416 	if (pgoff >= vma->vm_pgoff) {
417 		address = vma->vm_start +
418 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
419 		/* Check for address beyond vma (or wrapped through 0?) */
420 		if (address < vma->vm_start || address >= vma->vm_end)
421 			address = -EFAULT;
422 	} else if (PageHead(page) &&
423 		   pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
424 		/* Test above avoids possibility of wrap to 0 on 32-bit */
425 		address = vma->vm_start;
426 	} else {
427 		address = -EFAULT;
428 	}
429 	return address;
430 }
431 
432 /*
433  * Then at what user virtual address will none of the page be found in vma?
434  * Assumes that vma_address() already returned a good starting address.
435  * If page is a compound head, the entire compound page is considered.
436  */
437 static inline unsigned long
438 vma_address_end(struct page *page, struct vm_area_struct *vma)
439 {
440 	pgoff_t pgoff;
441 	unsigned long address;
442 
443 	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
444 	pgoff = page_to_pgoff(page) + compound_nr(page);
445 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
446 	/* Check for address beyond vma (or wrapped through 0?) */
447 	if (address < vma->vm_start || address > vma->vm_end)
448 		address = vma->vm_end;
449 	return address;
450 }
451 
452 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
453 						    struct file *fpin)
454 {
455 	int flags = vmf->flags;
456 
457 	if (fpin)
458 		return fpin;
459 
460 	/*
461 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
462 	 * anything, so we only pin the file and drop the mmap_lock if only
463 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
464 	 */
465 	if (fault_flag_allow_retry_first(flags) &&
466 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
467 		fpin = get_file(vmf->vma->vm_file);
468 		mmap_read_unlock(vmf->vma->vm_mm);
469 	}
470 	return fpin;
471 }
472 
473 #else /* !CONFIG_MMU */
474 static inline void clear_page_mlock(struct page *page) { }
475 static inline void mlock_vma_page(struct page *page) { }
476 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
477 {
478 }
479 #endif /* !CONFIG_MMU */
480 
481 /*
482  * Return the mem_map entry representing the 'offset' subpage within
483  * the maximally aligned gigantic page 'base'.  Handle any discontiguity
484  * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
485  */
486 static inline struct page *mem_map_offset(struct page *base, int offset)
487 {
488 	if (unlikely(offset >= MAX_ORDER_NR_PAGES))
489 		return nth_page(base, offset);
490 	return base + offset;
491 }
492 
493 /*
494  * Iterator over all subpages within the maximally aligned gigantic
495  * page 'base'.  Handle any discontiguity in the mem_map.
496  */
497 static inline struct page *mem_map_next(struct page *iter,
498 						struct page *base, int offset)
499 {
500 	if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
501 		unsigned long pfn = page_to_pfn(base) + offset;
502 		if (!pfn_valid(pfn))
503 			return NULL;
504 		return pfn_to_page(pfn);
505 	}
506 	return iter + 1;
507 }
508 
509 /* Memory initialisation debug and verification */
510 enum mminit_level {
511 	MMINIT_WARNING,
512 	MMINIT_VERIFY,
513 	MMINIT_TRACE
514 };
515 
516 #ifdef CONFIG_DEBUG_MEMORY_INIT
517 
518 extern int mminit_loglevel;
519 
520 #define mminit_dprintk(level, prefix, fmt, arg...) \
521 do { \
522 	if (level < mminit_loglevel) { \
523 		if (level <= MMINIT_WARNING) \
524 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
525 		else \
526 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
527 	} \
528 } while (0)
529 
530 extern void mminit_verify_pageflags_layout(void);
531 extern void mminit_verify_zonelist(void);
532 #else
533 
534 static inline void mminit_dprintk(enum mminit_level level,
535 				const char *prefix, const char *fmt, ...)
536 {
537 }
538 
539 static inline void mminit_verify_pageflags_layout(void)
540 {
541 }
542 
543 static inline void mminit_verify_zonelist(void)
544 {
545 }
546 #endif /* CONFIG_DEBUG_MEMORY_INIT */
547 
548 /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
549 #if defined(CONFIG_SPARSEMEM)
550 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
551 				unsigned long *end_pfn);
552 #else
553 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
554 				unsigned long *end_pfn)
555 {
556 }
557 #endif /* CONFIG_SPARSEMEM */
558 
559 #define NODE_RECLAIM_NOSCAN	-2
560 #define NODE_RECLAIM_FULL	-1
561 #define NODE_RECLAIM_SOME	0
562 #define NODE_RECLAIM_SUCCESS	1
563 
564 #ifdef CONFIG_NUMA
565 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
566 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
567 #else
568 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
569 				unsigned int order)
570 {
571 	return NODE_RECLAIM_NOSCAN;
572 }
573 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
574 {
575 	return NUMA_NO_NODE;
576 }
577 #endif
578 
579 extern int hwpoison_filter(struct page *p);
580 
581 extern u32 hwpoison_filter_dev_major;
582 extern u32 hwpoison_filter_dev_minor;
583 extern u64 hwpoison_filter_flags_mask;
584 extern u64 hwpoison_filter_flags_value;
585 extern u64 hwpoison_filter_memcg;
586 extern u32 hwpoison_filter_enable;
587 
588 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
589         unsigned long, unsigned long,
590         unsigned long, unsigned long);
591 
592 extern void set_pageblock_order(void);
593 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
594 					    struct list_head *page_list);
595 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
596 #define ALLOC_WMARK_MIN		WMARK_MIN
597 #define ALLOC_WMARK_LOW		WMARK_LOW
598 #define ALLOC_WMARK_HIGH	WMARK_HIGH
599 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
600 
601 /* Mask to get the watermark bits */
602 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
603 
604 /*
605  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
606  * cannot assume a reduced access to memory reserves is sufficient for
607  * !MMU
608  */
609 #ifdef CONFIG_MMU
610 #define ALLOC_OOM		0x08
611 #else
612 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
613 #endif
614 
615 #define ALLOC_HARDER		 0x10 /* try to alloc harder */
616 #define ALLOC_HIGH		 0x20 /* __GFP_HIGH set */
617 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
618 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
619 #ifdef CONFIG_ZONE_DMA32
620 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
621 #else
622 #define ALLOC_NOFRAGMENT	  0x0
623 #endif
624 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
625 
626 enum ttu_flags;
627 struct tlbflush_unmap_batch;
628 
629 
630 /*
631  * only for MM internal work items which do not depend on
632  * any allocations or locks which might depend on allocations
633  */
634 extern struct workqueue_struct *mm_percpu_wq;
635 
636 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
637 void try_to_unmap_flush(void);
638 void try_to_unmap_flush_dirty(void);
639 void flush_tlb_batched_pending(struct mm_struct *mm);
640 #else
641 static inline void try_to_unmap_flush(void)
642 {
643 }
644 static inline void try_to_unmap_flush_dirty(void)
645 {
646 }
647 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
648 {
649 }
650 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
651 
652 extern const struct trace_print_flags pageflag_names[];
653 extern const struct trace_print_flags vmaflag_names[];
654 extern const struct trace_print_flags gfpflag_names[];
655 
656 static inline bool is_migrate_highatomic(enum migratetype migratetype)
657 {
658 	return migratetype == MIGRATE_HIGHATOMIC;
659 }
660 
661 static inline bool is_migrate_highatomic_page(struct page *page)
662 {
663 	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
664 }
665 
666 void setup_zone_pageset(struct zone *zone);
667 
668 struct migration_target_control {
669 	int nid;		/* preferred node id */
670 	nodemask_t *nmask;
671 	gfp_t gfp_mask;
672 };
673 
674 /*
675  * mm/vmalloc.c
676  */
677 #ifdef CONFIG_MMU
678 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
679                 pgprot_t prot, struct page **pages, unsigned int page_shift);
680 #else
681 static inline
682 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
683                 pgprot_t prot, struct page **pages, unsigned int page_shift)
684 {
685 	return -EINVAL;
686 }
687 #endif
688 
689 void vunmap_range_noflush(unsigned long start, unsigned long end);
690 
691 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
692 		      unsigned long addr, int page_nid, int *flags);
693 
694 #endif	/* __MM_INTERNAL_H */
695