xref: /openbmc/linux/mm/mm_init.c (revision 95298d63)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm_init.c - Memory initialisation verification and debugging
4  *
5  * Copyright 2008 IBM Corporation, 2008
6  * Author Mel Gorman <mel@csn.ul.ie>
7  *
8  */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/kobject.h>
12 #include <linux/export.h>
13 #include <linux/memory.h>
14 #include <linux/notifier.h>
15 #include <linux/sched.h>
16 #include "internal.h"
17 
18 #ifdef CONFIG_DEBUG_MEMORY_INIT
19 int __meminitdata mminit_loglevel;
20 
21 #ifndef SECTIONS_SHIFT
22 #define SECTIONS_SHIFT	0
23 #endif
24 
25 /* The zonelists are simply reported, validation is manual. */
26 void __init mminit_verify_zonelist(void)
27 {
28 	int nid;
29 
30 	if (mminit_loglevel < MMINIT_VERIFY)
31 		return;
32 
33 	for_each_online_node(nid) {
34 		pg_data_t *pgdat = NODE_DATA(nid);
35 		struct zone *zone;
36 		struct zoneref *z;
37 		struct zonelist *zonelist;
38 		int i, listid, zoneid;
39 
40 		BUILD_BUG_ON(MAX_ZONELISTS > 2);
41 		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
42 
43 			/* Identify the zone and nodelist */
44 			zoneid = i % MAX_NR_ZONES;
45 			listid = i / MAX_NR_ZONES;
46 			zonelist = &pgdat->node_zonelists[listid];
47 			zone = &pgdat->node_zones[zoneid];
48 			if (!populated_zone(zone))
49 				continue;
50 
51 			/* Print information about the zonelist */
52 			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
53 				listid > 0 ? "thisnode" : "general", nid,
54 				zone->name);
55 
56 			/* Iterate the zonelist */
57 			for_each_zone_zonelist(zone, z, zonelist, zoneid)
58 				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
59 			pr_cont("\n");
60 		}
61 	}
62 }
63 
64 void __init mminit_verify_pageflags_layout(void)
65 {
66 	int shift, width;
67 	unsigned long or_mask, add_mask;
68 
69 	shift = 8 * sizeof(unsigned long);
70 	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
71 		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
72 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
73 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
74 		SECTIONS_WIDTH,
75 		NODES_WIDTH,
76 		ZONES_WIDTH,
77 		LAST_CPUPID_WIDTH,
78 		KASAN_TAG_WIDTH,
79 		NR_PAGEFLAGS);
80 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
81 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
82 		SECTIONS_SHIFT,
83 		NODES_SHIFT,
84 		ZONES_SHIFT,
85 		LAST_CPUPID_SHIFT,
86 		KASAN_TAG_WIDTH);
87 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
88 		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
89 		(unsigned long)SECTIONS_PGSHIFT,
90 		(unsigned long)NODES_PGSHIFT,
91 		(unsigned long)ZONES_PGSHIFT,
92 		(unsigned long)LAST_CPUPID_PGSHIFT,
93 		(unsigned long)KASAN_TAG_PGSHIFT);
94 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
95 		"Node/Zone ID: %lu -> %lu\n",
96 		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
97 		(unsigned long)ZONEID_PGOFF);
98 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
99 		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
100 		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
101 #ifdef NODE_NOT_IN_PAGE_FLAGS
102 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
103 		"Node not in page flags");
104 #endif
105 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
106 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
107 		"Last cpupid not in page flags");
108 #endif
109 
110 	if (SECTIONS_WIDTH) {
111 		shift -= SECTIONS_WIDTH;
112 		BUG_ON(shift != SECTIONS_PGSHIFT);
113 	}
114 	if (NODES_WIDTH) {
115 		shift -= NODES_WIDTH;
116 		BUG_ON(shift != NODES_PGSHIFT);
117 	}
118 	if (ZONES_WIDTH) {
119 		shift -= ZONES_WIDTH;
120 		BUG_ON(shift != ZONES_PGSHIFT);
121 	}
122 
123 	/* Check for bitmask overlaps */
124 	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
125 			(NODES_MASK << NODES_PGSHIFT) |
126 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
127 	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
128 			(NODES_MASK << NODES_PGSHIFT) +
129 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
130 	BUG_ON(or_mask != add_mask);
131 }
132 
133 static __init int set_mminit_loglevel(char *str)
134 {
135 	get_option(&str, &mminit_loglevel);
136 	return 0;
137 }
138 early_param("mminit_loglevel", set_mminit_loglevel);
139 #endif /* CONFIG_DEBUG_MEMORY_INIT */
140 
141 struct kobject *mm_kobj;
142 EXPORT_SYMBOL_GPL(mm_kobj);
143 
144 #ifdef CONFIG_SMP
145 s32 vm_committed_as_batch = 32;
146 
147 static void __meminit mm_compute_batch(void)
148 {
149 	u64 memsized_batch;
150 	s32 nr = num_present_cpus();
151 	s32 batch = max_t(s32, nr*2, 32);
152 
153 	/* batch size set to 0.4% of (total memory/#cpus), or max int32 */
154 	memsized_batch = min_t(u64, (totalram_pages()/nr)/256, 0x7fffffff);
155 
156 	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
157 }
158 
159 static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
160 					unsigned long action, void *arg)
161 {
162 	switch (action) {
163 	case MEM_ONLINE:
164 	case MEM_OFFLINE:
165 		mm_compute_batch();
166 	default:
167 		break;
168 	}
169 	return NOTIFY_OK;
170 }
171 
172 static struct notifier_block compute_batch_nb __meminitdata = {
173 	.notifier_call = mm_compute_batch_notifier,
174 	.priority = IPC_CALLBACK_PRI, /* use lowest priority */
175 };
176 
177 static int __init mm_compute_batch_init(void)
178 {
179 	mm_compute_batch();
180 	register_hotmemory_notifier(&compute_batch_nb);
181 
182 	return 0;
183 }
184 
185 __initcall(mm_compute_batch_init);
186 
187 #endif
188 
189 static int __init mm_sysfs_init(void)
190 {
191 	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
192 	if (!mm_kobj)
193 		return -ENOMEM;
194 
195 	return 0;
196 }
197 postcore_initcall(mm_sysfs_init);
198