1 /* 2 * mm_init.c - Memory initialisation verification and debugging 3 * 4 * Copyright 2008 IBM Corporation, 2008 5 * Author Mel Gorman <mel@csn.ul.ie> 6 * 7 */ 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/kobject.h> 11 #include <linux/export.h> 12 #include <linux/memory.h> 13 #include <linux/notifier.h> 14 #include <linux/sched.h> 15 #include "internal.h" 16 17 #ifdef CONFIG_DEBUG_MEMORY_INIT 18 int __meminitdata mminit_loglevel; 19 20 #ifndef SECTIONS_SHIFT 21 #define SECTIONS_SHIFT 0 22 #endif 23 24 /* The zonelists are simply reported, validation is manual. */ 25 void __init mminit_verify_zonelist(void) 26 { 27 int nid; 28 29 if (mminit_loglevel < MMINIT_VERIFY) 30 return; 31 32 for_each_online_node(nid) { 33 pg_data_t *pgdat = NODE_DATA(nid); 34 struct zone *zone; 35 struct zoneref *z; 36 struct zonelist *zonelist; 37 int i, listid, zoneid; 38 39 BUG_ON(MAX_ZONELISTS > 2); 40 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) { 41 42 /* Identify the zone and nodelist */ 43 zoneid = i % MAX_NR_ZONES; 44 listid = i / MAX_NR_ZONES; 45 zonelist = &pgdat->node_zonelists[listid]; 46 zone = &pgdat->node_zones[zoneid]; 47 if (!populated_zone(zone)) 48 continue; 49 50 /* Print information about the zonelist */ 51 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ", 52 listid > 0 ? "thisnode" : "general", nid, 53 zone->name); 54 55 /* Iterate the zonelist */ 56 for_each_zone_zonelist(zone, z, zonelist, zoneid) { 57 #ifdef CONFIG_NUMA 58 pr_cont("%d:%s ", zone->node, zone->name); 59 #else 60 pr_cont("0:%s ", zone->name); 61 #endif /* CONFIG_NUMA */ 62 } 63 pr_cont("\n"); 64 } 65 } 66 } 67 68 void __init mminit_verify_pageflags_layout(void) 69 { 70 int shift, width; 71 unsigned long or_mask, add_mask; 72 73 shift = 8 * sizeof(unsigned long); 74 width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT; 75 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", 76 "Section %d Node %d Zone %d Lastcpupid %d Flags %d\n", 77 SECTIONS_WIDTH, 78 NODES_WIDTH, 79 ZONES_WIDTH, 80 LAST_CPUPID_WIDTH, 81 NR_PAGEFLAGS); 82 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", 83 "Section %d Node %d Zone %d Lastcpupid %d\n", 84 SECTIONS_SHIFT, 85 NODES_SHIFT, 86 ZONES_SHIFT, 87 LAST_CPUPID_SHIFT); 88 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts", 89 "Section %lu Node %lu Zone %lu Lastcpupid %lu\n", 90 (unsigned long)SECTIONS_PGSHIFT, 91 (unsigned long)NODES_PGSHIFT, 92 (unsigned long)ZONES_PGSHIFT, 93 (unsigned long)LAST_CPUPID_PGSHIFT); 94 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid", 95 "Node/Zone ID: %lu -> %lu\n", 96 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT), 97 (unsigned long)ZONEID_PGOFF); 98 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage", 99 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", 100 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0); 101 #ifdef NODE_NOT_IN_PAGE_FLAGS 102 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 103 "Node not in page flags"); 104 #endif 105 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 106 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 107 "Last cpupid not in page flags"); 108 #endif 109 110 if (SECTIONS_WIDTH) { 111 shift -= SECTIONS_WIDTH; 112 BUG_ON(shift != SECTIONS_PGSHIFT); 113 } 114 if (NODES_WIDTH) { 115 shift -= NODES_WIDTH; 116 BUG_ON(shift != NODES_PGSHIFT); 117 } 118 if (ZONES_WIDTH) { 119 shift -= ZONES_WIDTH; 120 BUG_ON(shift != ZONES_PGSHIFT); 121 } 122 123 /* Check for bitmask overlaps */ 124 or_mask = (ZONES_MASK << ZONES_PGSHIFT) | 125 (NODES_MASK << NODES_PGSHIFT) | 126 (SECTIONS_MASK << SECTIONS_PGSHIFT); 127 add_mask = (ZONES_MASK << ZONES_PGSHIFT) + 128 (NODES_MASK << NODES_PGSHIFT) + 129 (SECTIONS_MASK << SECTIONS_PGSHIFT); 130 BUG_ON(or_mask != add_mask); 131 } 132 133 static __init int set_mminit_loglevel(char *str) 134 { 135 get_option(&str, &mminit_loglevel); 136 return 0; 137 } 138 early_param("mminit_loglevel", set_mminit_loglevel); 139 #endif /* CONFIG_DEBUG_MEMORY_INIT */ 140 141 struct kobject *mm_kobj; 142 EXPORT_SYMBOL_GPL(mm_kobj); 143 144 #ifdef CONFIG_SMP 145 s32 vm_committed_as_batch = 32; 146 147 static void __meminit mm_compute_batch(void) 148 { 149 u64 memsized_batch; 150 s32 nr = num_present_cpus(); 151 s32 batch = max_t(s32, nr*2, 32); 152 153 /* batch size set to 0.4% of (total memory/#cpus), or max int32 */ 154 memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff); 155 156 vm_committed_as_batch = max_t(s32, memsized_batch, batch); 157 } 158 159 static int __meminit mm_compute_batch_notifier(struct notifier_block *self, 160 unsigned long action, void *arg) 161 { 162 switch (action) { 163 case MEM_ONLINE: 164 case MEM_OFFLINE: 165 mm_compute_batch(); 166 default: 167 break; 168 } 169 return NOTIFY_OK; 170 } 171 172 static struct notifier_block compute_batch_nb __meminitdata = { 173 .notifier_call = mm_compute_batch_notifier, 174 .priority = IPC_CALLBACK_PRI, /* use lowest priority */ 175 }; 176 177 static int __init mm_compute_batch_init(void) 178 { 179 mm_compute_batch(); 180 register_hotmemory_notifier(&compute_batch_nb); 181 182 return 0; 183 } 184 185 __initcall(mm_compute_batch_init); 186 187 #endif 188 189 static int __init mm_sysfs_init(void) 190 { 191 mm_kobj = kobject_create_and_add("mm", kernel_kobj); 192 if (!mm_kobj) 193 return -ENOMEM; 194 195 return 0; 196 } 197 postcore_initcall(mm_sysfs_init); 198