xref: /openbmc/linux/mm/internal.h (revision 545e4006)
1 /* internal.h: mm/ internal definitions
2  *
3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #ifndef __MM_INTERNAL_H
12 #define __MM_INTERNAL_H
13 
14 #include <linux/mm.h>
15 
16 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
17 		unsigned long floor, unsigned long ceiling);
18 
19 extern void prep_compound_page(struct page *page, unsigned long order);
20 
21 static inline void set_page_count(struct page *page, int v)
22 {
23 	atomic_set(&page->_count, v);
24 }
25 
26 /*
27  * Turn a non-refcounted page (->_count == 0) into refcounted with
28  * a count of one.
29  */
30 static inline void set_page_refcounted(struct page *page)
31 {
32 	VM_BUG_ON(PageTail(page));
33 	VM_BUG_ON(atomic_read(&page->_count));
34 	set_page_count(page, 1);
35 }
36 
37 static inline void __put_page(struct page *page)
38 {
39 	atomic_dec(&page->_count);
40 }
41 
42 extern void __free_pages_bootmem(struct page *page, unsigned int order);
43 
44 /*
45  * function for dealing with page's order in buddy system.
46  * zone->lock is already acquired when we use these.
47  * So, we don't need atomic page->flags operations here.
48  */
49 static inline unsigned long page_order(struct page *page)
50 {
51 	VM_BUG_ON(!PageBuddy(page));
52 	return page_private(page);
53 }
54 
55 /*
56  * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
57  * so all functions starting at paging_init should be marked __init
58  * in those cases. SPARSEMEM, however, allows for memory hotplug,
59  * and alloc_bootmem_node is not used.
60  */
61 #ifdef CONFIG_SPARSEMEM
62 #define __paginginit __meminit
63 #else
64 #define __paginginit __init
65 #endif
66 
67 /* Memory initialisation debug and verification */
68 enum mminit_level {
69 	MMINIT_WARNING,
70 	MMINIT_VERIFY,
71 	MMINIT_TRACE
72 };
73 
74 #ifdef CONFIG_DEBUG_MEMORY_INIT
75 
76 extern int mminit_loglevel;
77 
78 #define mminit_dprintk(level, prefix, fmt, arg...) \
79 do { \
80 	if (level < mminit_loglevel) { \
81 		printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
82 		printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
83 	} \
84 } while (0)
85 
86 extern void mminit_verify_pageflags_layout(void);
87 extern void mminit_verify_page_links(struct page *page,
88 		enum zone_type zone, unsigned long nid, unsigned long pfn);
89 extern void mminit_verify_zonelist(void);
90 
91 #else
92 
93 static inline void mminit_dprintk(enum mminit_level level,
94 				const char *prefix, const char *fmt, ...)
95 {
96 }
97 
98 static inline void mminit_verify_pageflags_layout(void)
99 {
100 }
101 
102 static inline void mminit_verify_page_links(struct page *page,
103 		enum zone_type zone, unsigned long nid, unsigned long pfn)
104 {
105 }
106 
107 static inline void mminit_verify_zonelist(void)
108 {
109 }
110 #endif /* CONFIG_DEBUG_MEMORY_INIT */
111 
112 /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
113 #if defined(CONFIG_SPARSEMEM)
114 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
115 				unsigned long *end_pfn);
116 #else
117 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
118 				unsigned long *end_pfn)
119 {
120 }
121 #endif /* CONFIG_SPARSEMEM */
122 
123 #endif
124