xref: /openbmc/linux/mm/mm_init.c (revision 6c4c5766)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
26b74ab97SMel Gorman /*
36b74ab97SMel Gorman  * mm_init.c - Memory initialisation verification and debugging
46b74ab97SMel Gorman  *
56b74ab97SMel Gorman  * Copyright 2008 IBM Corporation, 2008
66b74ab97SMel Gorman  * Author Mel Gorman <mel@csn.ul.ie>
76b74ab97SMel Gorman  *
86b74ab97SMel Gorman  */
96b74ab97SMel Gorman #include <linux/kernel.h>
106b74ab97SMel Gorman #include <linux/init.h>
11ff7ea79cSNishanth Aravamudan #include <linux/kobject.h>
12b95f1b31SPaul Gortmaker #include <linux/export.h>
13917d9290STim Chen #include <linux/memory.h>
14917d9290STim Chen #include <linux/notifier.h>
157e18adb4SMel Gorman #include <linux/sched.h>
1656f3547bSFeng Tang #include <linux/mman.h>
179420f89dSMike Rapoport (IBM) #include <linux/memblock.h>
189420f89dSMike Rapoport (IBM) #include <linux/page-isolation.h>
199420f89dSMike Rapoport (IBM) #include <linux/padata.h>
209420f89dSMike Rapoport (IBM) #include <linux/nmi.h>
219420f89dSMike Rapoport (IBM) #include <linux/buffer_head.h>
229420f89dSMike Rapoport (IBM) #include <linux/kmemleak.h>
23b7ec1bf3SMike Rapoport (IBM) #include <linux/kfence.h>
24b7ec1bf3SMike Rapoport (IBM) #include <linux/page_ext.h>
25b7ec1bf3SMike Rapoport (IBM) #include <linux/pti.h>
26b7ec1bf3SMike Rapoport (IBM) #include <linux/pgtable.h>
27eb8589b4SMike Rapoport (IBM) #include <linux/swap.h>
28eb8589b4SMike Rapoport (IBM) #include <linux/cma.h>
29*6c4c5766SMa Wupeng #include <linux/crash_dump.h>
30708614e6SMel Gorman #include "internal.h"
31d5d2c02aSMike Rapoport (IBM) #include "slab.h"
329420f89dSMike Rapoport (IBM) #include "shuffle.h"
336b74ab97SMel Gorman 
34b7ec1bf3SMike Rapoport (IBM) #include <asm/setup.h>
35b7ec1bf3SMike Rapoport (IBM) 
365e9426abSNishanth Aravamudan #ifdef CONFIG_DEBUG_MEMORY_INIT
37194e8151SRasmus Villemoes int __meminitdata mminit_loglevel;
386b74ab97SMel Gorman 
3968ad8df4SMel Gorman /* The zonelists are simply reported, validation is manual. */
mminit_verify_zonelist(void)400e2342c7SRasmus Villemoes void __init mminit_verify_zonelist(void)
4168ad8df4SMel Gorman {
4268ad8df4SMel Gorman 	int nid;
4368ad8df4SMel Gorman 
4468ad8df4SMel Gorman 	if (mminit_loglevel < MMINIT_VERIFY)
4568ad8df4SMel Gorman 		return;
4668ad8df4SMel Gorman 
4768ad8df4SMel Gorman 	for_each_online_node(nid) {
4868ad8df4SMel Gorman 		pg_data_t *pgdat = NODE_DATA(nid);
4968ad8df4SMel Gorman 		struct zone *zone;
5068ad8df4SMel Gorman 		struct zoneref *z;
5168ad8df4SMel Gorman 		struct zonelist *zonelist;
5268ad8df4SMel Gorman 		int i, listid, zoneid;
5368ad8df4SMel Gorman 
54e46b893dSMateusz Nosek 		BUILD_BUG_ON(MAX_ZONELISTS > 2);
5568ad8df4SMel Gorman 		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
5668ad8df4SMel Gorman 
5768ad8df4SMel Gorman 			/* Identify the zone and nodelist */
5868ad8df4SMel Gorman 			zoneid = i % MAX_NR_ZONES;
5968ad8df4SMel Gorman 			listid = i / MAX_NR_ZONES;
6068ad8df4SMel Gorman 			zonelist = &pgdat->node_zonelists[listid];
6168ad8df4SMel Gorman 			zone = &pgdat->node_zones[zoneid];
6268ad8df4SMel Gorman 			if (!populated_zone(zone))
6368ad8df4SMel Gorman 				continue;
6468ad8df4SMel Gorman 
6568ad8df4SMel Gorman 			/* Print information about the zonelist */
6668ad8df4SMel Gorman 			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
6768ad8df4SMel Gorman 				listid > 0 ? "thisnode" : "general", nid,
6868ad8df4SMel Gorman 				zone->name);
6968ad8df4SMel Gorman 
7068ad8df4SMel Gorman 			/* Iterate the zonelist */
71c1093b74SPavel Tatashin 			for_each_zone_zonelist(zone, z, zonelist, zoneid)
72c1093b74SPavel Tatashin 				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
731170532bSJoe Perches 			pr_cont("\n");
7468ad8df4SMel Gorman 		}
7568ad8df4SMel Gorman 	}
7668ad8df4SMel Gorman }
7768ad8df4SMel Gorman 
mminit_verify_pageflags_layout(void)78708614e6SMel Gorman void __init mminit_verify_pageflags_layout(void)
79708614e6SMel Gorman {
80708614e6SMel Gorman 	int shift, width;
81708614e6SMel Gorman 	unsigned long or_mask, add_mask;
82708614e6SMel Gorman 
83daee07bfSMiaohe Lin 	shift = BITS_PER_LONG;
8486fea8b4SJing Xia 	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
85ec1c86b2SYu Zhao 		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
86708614e6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
87ec1c86b2SYu Zhao 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
88708614e6SMel Gorman 		SECTIONS_WIDTH,
89708614e6SMel Gorman 		NODES_WIDTH,
90708614e6SMel Gorman 		ZONES_WIDTH,
9190572890SPeter Zijlstra 		LAST_CPUPID_WIDTH,
9286fea8b4SJing Xia 		KASAN_TAG_WIDTH,
93ec1c86b2SYu Zhao 		LRU_GEN_WIDTH,
94ec1c86b2SYu Zhao 		LRU_REFS_WIDTH,
95708614e6SMel Gorman 		NR_PAGEFLAGS);
96708614e6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
9786fea8b4SJing Xia 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
98708614e6SMel Gorman 		SECTIONS_SHIFT,
99708614e6SMel Gorman 		NODES_SHIFT,
100a4e1b4c6SMel Gorman 		ZONES_SHIFT,
10186fea8b4SJing Xia 		LAST_CPUPID_SHIFT,
10286fea8b4SJing Xia 		KASAN_TAG_WIDTH);
103a4e1b4c6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
10486fea8b4SJing Xia 		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
105708614e6SMel Gorman 		(unsigned long)SECTIONS_PGSHIFT,
106708614e6SMel Gorman 		(unsigned long)NODES_PGSHIFT,
107a4e1b4c6SMel Gorman 		(unsigned long)ZONES_PGSHIFT,
10886fea8b4SJing Xia 		(unsigned long)LAST_CPUPID_PGSHIFT,
10986fea8b4SJing Xia 		(unsigned long)KASAN_TAG_PGSHIFT);
110a4e1b4c6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
111a4e1b4c6SMel Gorman 		"Node/Zone ID: %lu -> %lu\n",
112a4e1b4c6SMel Gorman 		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
113a4e1b4c6SMel Gorman 		(unsigned long)ZONEID_PGOFF);
114708614e6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
115a4e1b4c6SMel Gorman 		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
116708614e6SMel Gorman 		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
117708614e6SMel Gorman #ifdef NODE_NOT_IN_PAGE_FLAGS
118708614e6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
119708614e6SMel Gorman 		"Node not in page flags");
120708614e6SMel Gorman #endif
12190572890SPeter Zijlstra #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
122a4e1b4c6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
12390572890SPeter Zijlstra 		"Last cpupid not in page flags");
124a4e1b4c6SMel Gorman #endif
125708614e6SMel Gorman 
126708614e6SMel Gorman 	if (SECTIONS_WIDTH) {
127708614e6SMel Gorman 		shift -= SECTIONS_WIDTH;
128708614e6SMel Gorman 		BUG_ON(shift != SECTIONS_PGSHIFT);
129708614e6SMel Gorman 	}
130708614e6SMel Gorman 	if (NODES_WIDTH) {
131708614e6SMel Gorman 		shift -= NODES_WIDTH;
132708614e6SMel Gorman 		BUG_ON(shift != NODES_PGSHIFT);
133708614e6SMel Gorman 	}
134708614e6SMel Gorman 	if (ZONES_WIDTH) {
135708614e6SMel Gorman 		shift -= ZONES_WIDTH;
136708614e6SMel Gorman 		BUG_ON(shift != ZONES_PGSHIFT);
137708614e6SMel Gorman 	}
138708614e6SMel Gorman 
139708614e6SMel Gorman 	/* Check for bitmask overlaps */
140708614e6SMel Gorman 	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
141708614e6SMel Gorman 			(NODES_MASK << NODES_PGSHIFT) |
142708614e6SMel Gorman 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
143708614e6SMel Gorman 	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
144708614e6SMel Gorman 			(NODES_MASK << NODES_PGSHIFT) +
145708614e6SMel Gorman 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
146708614e6SMel Gorman 	BUG_ON(or_mask != add_mask);
147708614e6SMel Gorman }
148708614e6SMel Gorman 
set_mminit_loglevel(char * str)1496b74ab97SMel Gorman static __init int set_mminit_loglevel(char *str)
1506b74ab97SMel Gorman {
1516b74ab97SMel Gorman 	get_option(&str, &mminit_loglevel);
1526b74ab97SMel Gorman 	return 0;
1536b74ab97SMel Gorman }
1546b74ab97SMel Gorman early_param("mminit_loglevel", set_mminit_loglevel);
1555e9426abSNishanth Aravamudan #endif /* CONFIG_DEBUG_MEMORY_INIT */
156ff7ea79cSNishanth Aravamudan 
157ff7ea79cSNishanth Aravamudan struct kobject *mm_kobj;
158ff7ea79cSNishanth Aravamudan 
159917d9290STim Chen #ifdef CONFIG_SMP
160917d9290STim Chen s32 vm_committed_as_batch = 32;
161917d9290STim Chen 
mm_compute_batch(int overcommit_policy)16256f3547bSFeng Tang void mm_compute_batch(int overcommit_policy)
163917d9290STim Chen {
164917d9290STim Chen 	u64 memsized_batch;
165917d9290STim Chen 	s32 nr = num_present_cpus();
166917d9290STim Chen 	s32 batch = max_t(s32, nr*2, 32);
16756f3547bSFeng Tang 	unsigned long ram_pages = totalram_pages();
168917d9290STim Chen 
16956f3547bSFeng Tang 	/*
17056f3547bSFeng Tang 	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
17156f3547bSFeng Tang 	 * (total memory/#cpus), and lift it to 25% for other policies
17256f3547bSFeng Tang 	 * to easy the possible lock contention for percpu_counter
17356f3547bSFeng Tang 	 * vm_committed_as, while the max limit is INT_MAX
17456f3547bSFeng Tang 	 */
17556f3547bSFeng Tang 	if (overcommit_policy == OVERCOMMIT_NEVER)
17656f3547bSFeng Tang 		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
17756f3547bSFeng Tang 	else
17856f3547bSFeng Tang 		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
179917d9290STim Chen 
180917d9290STim Chen 	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
181917d9290STim Chen }
182917d9290STim Chen 
mm_compute_batch_notifier(struct notifier_block * self,unsigned long action,void * arg)183917d9290STim Chen static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
184917d9290STim Chen 					unsigned long action, void *arg)
185917d9290STim Chen {
186917d9290STim Chen 	switch (action) {
187917d9290STim Chen 	case MEM_ONLINE:
188917d9290STim Chen 	case MEM_OFFLINE:
18956f3547bSFeng Tang 		mm_compute_batch(sysctl_overcommit_memory);
19001359eb2SGustavo A. R. Silva 		break;
191917d9290STim Chen 	default:
192917d9290STim Chen 		break;
193917d9290STim Chen 	}
194917d9290STim Chen 	return NOTIFY_OK;
195917d9290STim Chen }
196917d9290STim Chen 
mm_compute_batch_init(void)197917d9290STim Chen static int __init mm_compute_batch_init(void)
198917d9290STim Chen {
19956f3547bSFeng Tang 	mm_compute_batch(sysctl_overcommit_memory);
2001eeaa4fdSLiu Shixin 	hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
201917d9290STim Chen 	return 0;
202917d9290STim Chen }
203917d9290STim Chen 
204917d9290STim Chen __initcall(mm_compute_batch_init);
205917d9290STim Chen 
206917d9290STim Chen #endif
207917d9290STim Chen 
mm_sysfs_init(void)208ff7ea79cSNishanth Aravamudan static int __init mm_sysfs_init(void)
209ff7ea79cSNishanth Aravamudan {
210ff7ea79cSNishanth Aravamudan 	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
211ff7ea79cSNishanth Aravamudan 	if (!mm_kobj)
212ff7ea79cSNishanth Aravamudan 		return -ENOMEM;
213ff7ea79cSNishanth Aravamudan 
214ff7ea79cSNishanth Aravamudan 	return 0;
215ff7ea79cSNishanth Aravamudan }
216e82cb95dSHugh Dickins postcore_initcall(mm_sysfs_init);
2179420f89dSMike Rapoport (IBM) 
2189420f89dSMike Rapoport (IBM) static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
2199420f89dSMike Rapoport (IBM) static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
2209420f89dSMike Rapoport (IBM) static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
2219420f89dSMike Rapoport (IBM) 
2229420f89dSMike Rapoport (IBM) static unsigned long required_kernelcore __initdata;
2239420f89dSMike Rapoport (IBM) static unsigned long required_kernelcore_percent __initdata;
2249420f89dSMike Rapoport (IBM) static unsigned long required_movablecore __initdata;
2259420f89dSMike Rapoport (IBM) static unsigned long required_movablecore_percent __initdata;
2269420f89dSMike Rapoport (IBM) 
2279420f89dSMike Rapoport (IBM) static unsigned long nr_kernel_pages __initdata;
2289420f89dSMike Rapoport (IBM) static unsigned long nr_all_pages __initdata;
2299420f89dSMike Rapoport (IBM) static unsigned long dma_reserve __initdata;
2309420f89dSMike Rapoport (IBM) 
231de57807eSMike Rapoport (IBM) static bool deferred_struct_pages __meminitdata;
2329420f89dSMike Rapoport (IBM) 
2339420f89dSMike Rapoport (IBM) static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
2349420f89dSMike Rapoport (IBM) 
cmdline_parse_core(char * p,unsigned long * core,unsigned long * percent)2359420f89dSMike Rapoport (IBM) static int __init cmdline_parse_core(char *p, unsigned long *core,
2369420f89dSMike Rapoport (IBM) 				     unsigned long *percent)
2379420f89dSMike Rapoport (IBM) {
2389420f89dSMike Rapoport (IBM) 	unsigned long long coremem;
2399420f89dSMike Rapoport (IBM) 	char *endptr;
2409420f89dSMike Rapoport (IBM) 
2419420f89dSMike Rapoport (IBM) 	if (!p)
2429420f89dSMike Rapoport (IBM) 		return -EINVAL;
2439420f89dSMike Rapoport (IBM) 
2449420f89dSMike Rapoport (IBM) 	/* Value may be a percentage of total memory, otherwise bytes */
2459420f89dSMike Rapoport (IBM) 	coremem = simple_strtoull(p, &endptr, 0);
2469420f89dSMike Rapoport (IBM) 	if (*endptr == '%') {
2479420f89dSMike Rapoport (IBM) 		/* Paranoid check for percent values greater than 100 */
2489420f89dSMike Rapoport (IBM) 		WARN_ON(coremem > 100);
2499420f89dSMike Rapoport (IBM) 
2509420f89dSMike Rapoport (IBM) 		*percent = coremem;
2519420f89dSMike Rapoport (IBM) 	} else {
2529420f89dSMike Rapoport (IBM) 		coremem = memparse(p, &p);
2539420f89dSMike Rapoport (IBM) 		/* Paranoid check that UL is enough for the coremem value */
2549420f89dSMike Rapoport (IBM) 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
2559420f89dSMike Rapoport (IBM) 
2569420f89dSMike Rapoport (IBM) 		*core = coremem >> PAGE_SHIFT;
2579420f89dSMike Rapoport (IBM) 		*percent = 0UL;
2589420f89dSMike Rapoport (IBM) 	}
2599420f89dSMike Rapoport (IBM) 	return 0;
2609420f89dSMike Rapoport (IBM) }
2619420f89dSMike Rapoport (IBM) 
262072ba380SKefeng Wang bool mirrored_kernelcore __initdata_memblock;
263072ba380SKefeng Wang 
2649420f89dSMike Rapoport (IBM) /*
2659420f89dSMike Rapoport (IBM)  * kernelcore=size sets the amount of memory for use for allocations that
2669420f89dSMike Rapoport (IBM)  * cannot be reclaimed or migrated.
2679420f89dSMike Rapoport (IBM)  */
cmdline_parse_kernelcore(char * p)2689420f89dSMike Rapoport (IBM) static int __init cmdline_parse_kernelcore(char *p)
2699420f89dSMike Rapoport (IBM) {
2709420f89dSMike Rapoport (IBM) 	/* parse kernelcore=mirror */
2719420f89dSMike Rapoport (IBM) 	if (parse_option_str(p, "mirror")) {
2729420f89dSMike Rapoport (IBM) 		mirrored_kernelcore = true;
2739420f89dSMike Rapoport (IBM) 		return 0;
2749420f89dSMike Rapoport (IBM) 	}
2759420f89dSMike Rapoport (IBM) 
2769420f89dSMike Rapoport (IBM) 	return cmdline_parse_core(p, &required_kernelcore,
2779420f89dSMike Rapoport (IBM) 				  &required_kernelcore_percent);
2789420f89dSMike Rapoport (IBM) }
2799420f89dSMike Rapoport (IBM) early_param("kernelcore", cmdline_parse_kernelcore);
2809420f89dSMike Rapoport (IBM) 
2819420f89dSMike Rapoport (IBM) /*
2829420f89dSMike Rapoport (IBM)  * movablecore=size sets the amount of memory for use for allocations that
2839420f89dSMike Rapoport (IBM)  * can be reclaimed or migrated.
2849420f89dSMike Rapoport (IBM)  */
cmdline_parse_movablecore(char * p)2859420f89dSMike Rapoport (IBM) static int __init cmdline_parse_movablecore(char *p)
2869420f89dSMike Rapoport (IBM) {
2879420f89dSMike Rapoport (IBM) 	return cmdline_parse_core(p, &required_movablecore,
2889420f89dSMike Rapoport (IBM) 				  &required_movablecore_percent);
2899420f89dSMike Rapoport (IBM) }
2909420f89dSMike Rapoport (IBM) early_param("movablecore", cmdline_parse_movablecore);
2919420f89dSMike Rapoport (IBM) 
2929420f89dSMike Rapoport (IBM) /*
2939420f89dSMike Rapoport (IBM)  * early_calculate_totalpages()
2949420f89dSMike Rapoport (IBM)  * Sum pages in active regions for movable zone.
2959420f89dSMike Rapoport (IBM)  * Populate N_MEMORY for calculating usable_nodes.
2969420f89dSMike Rapoport (IBM)  */
early_calculate_totalpages(void)2979420f89dSMike Rapoport (IBM) static unsigned long __init early_calculate_totalpages(void)
2989420f89dSMike Rapoport (IBM) {
2999420f89dSMike Rapoport (IBM) 	unsigned long totalpages = 0;
3009420f89dSMike Rapoport (IBM) 	unsigned long start_pfn, end_pfn;
3019420f89dSMike Rapoport (IBM) 	int i, nid;
3029420f89dSMike Rapoport (IBM) 
3039420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
3049420f89dSMike Rapoport (IBM) 		unsigned long pages = end_pfn - start_pfn;
3059420f89dSMike Rapoport (IBM) 
3069420f89dSMike Rapoport (IBM) 		totalpages += pages;
3079420f89dSMike Rapoport (IBM) 		if (pages)
3089420f89dSMike Rapoport (IBM) 			node_set_state(nid, N_MEMORY);
3099420f89dSMike Rapoport (IBM) 	}
3109420f89dSMike Rapoport (IBM) 	return totalpages;
3119420f89dSMike Rapoport (IBM) }
3129420f89dSMike Rapoport (IBM) 
3139420f89dSMike Rapoport (IBM) /*
3149420f89dSMike Rapoport (IBM)  * This finds a zone that can be used for ZONE_MOVABLE pages. The
3159420f89dSMike Rapoport (IBM)  * assumption is made that zones within a node are ordered in monotonic
3169420f89dSMike Rapoport (IBM)  * increasing memory addresses so that the "highest" populated zone is used
3179420f89dSMike Rapoport (IBM)  */
find_usable_zone_for_movable(void)3189420f89dSMike Rapoport (IBM) static void __init find_usable_zone_for_movable(void)
3199420f89dSMike Rapoport (IBM) {
3209420f89dSMike Rapoport (IBM) 	int zone_index;
3219420f89dSMike Rapoport (IBM) 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3229420f89dSMike Rapoport (IBM) 		if (zone_index == ZONE_MOVABLE)
3239420f89dSMike Rapoport (IBM) 			continue;
3249420f89dSMike Rapoport (IBM) 
3259420f89dSMike Rapoport (IBM) 		if (arch_zone_highest_possible_pfn[zone_index] >
3269420f89dSMike Rapoport (IBM) 				arch_zone_lowest_possible_pfn[zone_index])
3279420f89dSMike Rapoport (IBM) 			break;
3289420f89dSMike Rapoport (IBM) 	}
3299420f89dSMike Rapoport (IBM) 
3309420f89dSMike Rapoport (IBM) 	VM_BUG_ON(zone_index == -1);
3319420f89dSMike Rapoport (IBM) 	movable_zone = zone_index;
3329420f89dSMike Rapoport (IBM) }
3339420f89dSMike Rapoport (IBM) 
3349420f89dSMike Rapoport (IBM) /*
3359420f89dSMike Rapoport (IBM)  * Find the PFN the Movable zone begins in each node. Kernel memory
3369420f89dSMike Rapoport (IBM)  * is spread evenly between nodes as long as the nodes have enough
3379420f89dSMike Rapoport (IBM)  * memory. When they don't, some nodes will have more kernelcore than
3389420f89dSMike Rapoport (IBM)  * others
3399420f89dSMike Rapoport (IBM)  */
find_zone_movable_pfns_for_nodes(void)3409420f89dSMike Rapoport (IBM) static void __init find_zone_movable_pfns_for_nodes(void)
3419420f89dSMike Rapoport (IBM) {
3429420f89dSMike Rapoport (IBM) 	int i, nid;
3439420f89dSMike Rapoport (IBM) 	unsigned long usable_startpfn;
3449420f89dSMike Rapoport (IBM) 	unsigned long kernelcore_node, kernelcore_remaining;
3459420f89dSMike Rapoport (IBM) 	/* save the state before borrow the nodemask */
3469420f89dSMike Rapoport (IBM) 	nodemask_t saved_node_state = node_states[N_MEMORY];
3479420f89dSMike Rapoport (IBM) 	unsigned long totalpages = early_calculate_totalpages();
3489420f89dSMike Rapoport (IBM) 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
3499420f89dSMike Rapoport (IBM) 	struct memblock_region *r;
3509420f89dSMike Rapoport (IBM) 
3519420f89dSMike Rapoport (IBM) 	/* Need to find movable_zone earlier when movable_node is specified. */
3529420f89dSMike Rapoport (IBM) 	find_usable_zone_for_movable();
3539420f89dSMike Rapoport (IBM) 
3549420f89dSMike Rapoport (IBM) 	/*
3559420f89dSMike Rapoport (IBM) 	 * If movable_node is specified, ignore kernelcore and movablecore
3569420f89dSMike Rapoport (IBM) 	 * options.
3579420f89dSMike Rapoport (IBM) 	 */
3589420f89dSMike Rapoport (IBM) 	if (movable_node_is_enabled()) {
3599420f89dSMike Rapoport (IBM) 		for_each_mem_region(r) {
3609420f89dSMike Rapoport (IBM) 			if (!memblock_is_hotpluggable(r))
3619420f89dSMike Rapoport (IBM) 				continue;
3629420f89dSMike Rapoport (IBM) 
3639420f89dSMike Rapoport (IBM) 			nid = memblock_get_region_node(r);
3649420f89dSMike Rapoport (IBM) 
3659420f89dSMike Rapoport (IBM) 			usable_startpfn = PFN_DOWN(r->base);
3669420f89dSMike Rapoport (IBM) 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
3679420f89dSMike Rapoport (IBM) 				min(usable_startpfn, zone_movable_pfn[nid]) :
3689420f89dSMike Rapoport (IBM) 				usable_startpfn;
3699420f89dSMike Rapoport (IBM) 		}
3709420f89dSMike Rapoport (IBM) 
3719420f89dSMike Rapoport (IBM) 		goto out2;
3729420f89dSMike Rapoport (IBM) 	}
3739420f89dSMike Rapoport (IBM) 
3749420f89dSMike Rapoport (IBM) 	/*
3759420f89dSMike Rapoport (IBM) 	 * If kernelcore=mirror is specified, ignore movablecore option
3769420f89dSMike Rapoport (IBM) 	 */
3779420f89dSMike Rapoport (IBM) 	if (mirrored_kernelcore) {
3789420f89dSMike Rapoport (IBM) 		bool mem_below_4gb_not_mirrored = false;
3799420f89dSMike Rapoport (IBM) 
3800db31d63SMa Wupeng 		if (!memblock_has_mirror()) {
3810db31d63SMa Wupeng 			pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n");
3820db31d63SMa Wupeng 			goto out;
3830db31d63SMa Wupeng 		}
3840db31d63SMa Wupeng 
385*6c4c5766SMa Wupeng 		if (is_kdump_kernel()) {
386*6c4c5766SMa Wupeng 			pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
387*6c4c5766SMa Wupeng 			goto out;
388*6c4c5766SMa Wupeng 		}
389*6c4c5766SMa Wupeng 
3909420f89dSMike Rapoport (IBM) 		for_each_mem_region(r) {
3919420f89dSMike Rapoport (IBM) 			if (memblock_is_mirror(r))
3929420f89dSMike Rapoport (IBM) 				continue;
3939420f89dSMike Rapoport (IBM) 
3949420f89dSMike Rapoport (IBM) 			nid = memblock_get_region_node(r);
3959420f89dSMike Rapoport (IBM) 
3969420f89dSMike Rapoport (IBM) 			usable_startpfn = memblock_region_memory_base_pfn(r);
3979420f89dSMike Rapoport (IBM) 
3989420f89dSMike Rapoport (IBM) 			if (usable_startpfn < PHYS_PFN(SZ_4G)) {
3999420f89dSMike Rapoport (IBM) 				mem_below_4gb_not_mirrored = true;
4009420f89dSMike Rapoport (IBM) 				continue;
4019420f89dSMike Rapoport (IBM) 			}
4029420f89dSMike Rapoport (IBM) 
4039420f89dSMike Rapoport (IBM) 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
4049420f89dSMike Rapoport (IBM) 				min(usable_startpfn, zone_movable_pfn[nid]) :
4059420f89dSMike Rapoport (IBM) 				usable_startpfn;
4069420f89dSMike Rapoport (IBM) 		}
4079420f89dSMike Rapoport (IBM) 
4089420f89dSMike Rapoport (IBM) 		if (mem_below_4gb_not_mirrored)
4099420f89dSMike Rapoport (IBM) 			pr_warn("This configuration results in unmirrored kernel memory.\n");
4109420f89dSMike Rapoport (IBM) 
4119420f89dSMike Rapoport (IBM) 		goto out2;
4129420f89dSMike Rapoport (IBM) 	}
4139420f89dSMike Rapoport (IBM) 
4149420f89dSMike Rapoport (IBM) 	/*
4159420f89dSMike Rapoport (IBM) 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
4169420f89dSMike Rapoport (IBM) 	 * amount of necessary memory.
4179420f89dSMike Rapoport (IBM) 	 */
4189420f89dSMike Rapoport (IBM) 	if (required_kernelcore_percent)
4199420f89dSMike Rapoport (IBM) 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
4209420f89dSMike Rapoport (IBM) 				       10000UL;
4219420f89dSMike Rapoport (IBM) 	if (required_movablecore_percent)
4229420f89dSMike Rapoport (IBM) 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
4239420f89dSMike Rapoport (IBM) 					10000UL;
4249420f89dSMike Rapoport (IBM) 
4259420f89dSMike Rapoport (IBM) 	/*
4269420f89dSMike Rapoport (IBM) 	 * If movablecore= was specified, calculate what size of
4279420f89dSMike Rapoport (IBM) 	 * kernelcore that corresponds so that memory usable for
4289420f89dSMike Rapoport (IBM) 	 * any allocation type is evenly spread. If both kernelcore
4299420f89dSMike Rapoport (IBM) 	 * and movablecore are specified, then the value of kernelcore
4309420f89dSMike Rapoport (IBM) 	 * will be used for required_kernelcore if it's greater than
4319420f89dSMike Rapoport (IBM) 	 * what movablecore would have allowed.
4329420f89dSMike Rapoport (IBM) 	 */
4339420f89dSMike Rapoport (IBM) 	if (required_movablecore) {
4349420f89dSMike Rapoport (IBM) 		unsigned long corepages;
4359420f89dSMike Rapoport (IBM) 
4369420f89dSMike Rapoport (IBM) 		/*
4379420f89dSMike Rapoport (IBM) 		 * Round-up so that ZONE_MOVABLE is at least as large as what
4389420f89dSMike Rapoport (IBM) 		 * was requested by the user
4399420f89dSMike Rapoport (IBM) 		 */
4409420f89dSMike Rapoport (IBM) 		required_movablecore =
4419420f89dSMike Rapoport (IBM) 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4429420f89dSMike Rapoport (IBM) 		required_movablecore = min(totalpages, required_movablecore);
4439420f89dSMike Rapoport (IBM) 		corepages = totalpages - required_movablecore;
4449420f89dSMike Rapoport (IBM) 
4459420f89dSMike Rapoport (IBM) 		required_kernelcore = max(required_kernelcore, corepages);
4469420f89dSMike Rapoport (IBM) 	}
4479420f89dSMike Rapoport (IBM) 
4489420f89dSMike Rapoport (IBM) 	/*
4499420f89dSMike Rapoport (IBM) 	 * If kernelcore was not specified or kernelcore size is larger
4509420f89dSMike Rapoport (IBM) 	 * than totalpages, there is no ZONE_MOVABLE.
4519420f89dSMike Rapoport (IBM) 	 */
4529420f89dSMike Rapoport (IBM) 	if (!required_kernelcore || required_kernelcore >= totalpages)
4539420f89dSMike Rapoport (IBM) 		goto out;
4549420f89dSMike Rapoport (IBM) 
4559420f89dSMike Rapoport (IBM) 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4569420f89dSMike Rapoport (IBM) 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4579420f89dSMike Rapoport (IBM) 
4589420f89dSMike Rapoport (IBM) restart:
4599420f89dSMike Rapoport (IBM) 	/* Spread kernelcore memory as evenly as possible throughout nodes */
4609420f89dSMike Rapoport (IBM) 	kernelcore_node = required_kernelcore / usable_nodes;
4619420f89dSMike Rapoport (IBM) 	for_each_node_state(nid, N_MEMORY) {
4629420f89dSMike Rapoport (IBM) 		unsigned long start_pfn, end_pfn;
4639420f89dSMike Rapoport (IBM) 
4649420f89dSMike Rapoport (IBM) 		/*
4659420f89dSMike Rapoport (IBM) 		 * Recalculate kernelcore_node if the division per node
4669420f89dSMike Rapoport (IBM) 		 * now exceeds what is necessary to satisfy the requested
4679420f89dSMike Rapoport (IBM) 		 * amount of memory for the kernel
4689420f89dSMike Rapoport (IBM) 		 */
4699420f89dSMike Rapoport (IBM) 		if (required_kernelcore < kernelcore_node)
4709420f89dSMike Rapoport (IBM) 			kernelcore_node = required_kernelcore / usable_nodes;
4719420f89dSMike Rapoport (IBM) 
4729420f89dSMike Rapoport (IBM) 		/*
4739420f89dSMike Rapoport (IBM) 		 * As the map is walked, we track how much memory is usable
4749420f89dSMike Rapoport (IBM) 		 * by the kernel using kernelcore_remaining. When it is
4759420f89dSMike Rapoport (IBM) 		 * 0, the rest of the node is usable by ZONE_MOVABLE
4769420f89dSMike Rapoport (IBM) 		 */
4779420f89dSMike Rapoport (IBM) 		kernelcore_remaining = kernelcore_node;
4789420f89dSMike Rapoport (IBM) 
4799420f89dSMike Rapoport (IBM) 		/* Go through each range of PFNs within this node */
4809420f89dSMike Rapoport (IBM) 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4819420f89dSMike Rapoport (IBM) 			unsigned long size_pages;
4829420f89dSMike Rapoport (IBM) 
4839420f89dSMike Rapoport (IBM) 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
4849420f89dSMike Rapoport (IBM) 			if (start_pfn >= end_pfn)
4859420f89dSMike Rapoport (IBM) 				continue;
4869420f89dSMike Rapoport (IBM) 
4879420f89dSMike Rapoport (IBM) 			/* Account for what is only usable for kernelcore */
4889420f89dSMike Rapoport (IBM) 			if (start_pfn < usable_startpfn) {
4899420f89dSMike Rapoport (IBM) 				unsigned long kernel_pages;
4909420f89dSMike Rapoport (IBM) 				kernel_pages = min(end_pfn, usable_startpfn)
4919420f89dSMike Rapoport (IBM) 								- start_pfn;
4929420f89dSMike Rapoport (IBM) 
4939420f89dSMike Rapoport (IBM) 				kernelcore_remaining -= min(kernel_pages,
4949420f89dSMike Rapoport (IBM) 							kernelcore_remaining);
4959420f89dSMike Rapoport (IBM) 				required_kernelcore -= min(kernel_pages,
4969420f89dSMike Rapoport (IBM) 							required_kernelcore);
4979420f89dSMike Rapoport (IBM) 
4989420f89dSMike Rapoport (IBM) 				/* Continue if range is now fully accounted */
4999420f89dSMike Rapoport (IBM) 				if (end_pfn <= usable_startpfn) {
5009420f89dSMike Rapoport (IBM) 
5019420f89dSMike Rapoport (IBM) 					/*
5029420f89dSMike Rapoport (IBM) 					 * Push zone_movable_pfn to the end so
5039420f89dSMike Rapoport (IBM) 					 * that if we have to rebalance
5049420f89dSMike Rapoport (IBM) 					 * kernelcore across nodes, we will
5059420f89dSMike Rapoport (IBM) 					 * not double account here
5069420f89dSMike Rapoport (IBM) 					 */
5079420f89dSMike Rapoport (IBM) 					zone_movable_pfn[nid] = end_pfn;
5089420f89dSMike Rapoport (IBM) 					continue;
5099420f89dSMike Rapoport (IBM) 				}
5109420f89dSMike Rapoport (IBM) 				start_pfn = usable_startpfn;
5119420f89dSMike Rapoport (IBM) 			}
5129420f89dSMike Rapoport (IBM) 
5139420f89dSMike Rapoport (IBM) 			/*
5149420f89dSMike Rapoport (IBM) 			 * The usable PFN range for ZONE_MOVABLE is from
5159420f89dSMike Rapoport (IBM) 			 * start_pfn->end_pfn. Calculate size_pages as the
5169420f89dSMike Rapoport (IBM) 			 * number of pages used as kernelcore
5179420f89dSMike Rapoport (IBM) 			 */
5189420f89dSMike Rapoport (IBM) 			size_pages = end_pfn - start_pfn;
5199420f89dSMike Rapoport (IBM) 			if (size_pages > kernelcore_remaining)
5209420f89dSMike Rapoport (IBM) 				size_pages = kernelcore_remaining;
5219420f89dSMike Rapoport (IBM) 			zone_movable_pfn[nid] = start_pfn + size_pages;
5229420f89dSMike Rapoport (IBM) 
5239420f89dSMike Rapoport (IBM) 			/*
5249420f89dSMike Rapoport (IBM) 			 * Some kernelcore has been met, update counts and
5259420f89dSMike Rapoport (IBM) 			 * break if the kernelcore for this node has been
5269420f89dSMike Rapoport (IBM) 			 * satisfied
5279420f89dSMike Rapoport (IBM) 			 */
5289420f89dSMike Rapoport (IBM) 			required_kernelcore -= min(required_kernelcore,
5299420f89dSMike Rapoport (IBM) 								size_pages);
5309420f89dSMike Rapoport (IBM) 			kernelcore_remaining -= size_pages;
5319420f89dSMike Rapoport (IBM) 			if (!kernelcore_remaining)
5329420f89dSMike Rapoport (IBM) 				break;
5339420f89dSMike Rapoport (IBM) 		}
5349420f89dSMike Rapoport (IBM) 	}
5359420f89dSMike Rapoport (IBM) 
5369420f89dSMike Rapoport (IBM) 	/*
5379420f89dSMike Rapoport (IBM) 	 * If there is still required_kernelcore, we do another pass with one
5389420f89dSMike Rapoport (IBM) 	 * less node in the count. This will push zone_movable_pfn[nid] further
5399420f89dSMike Rapoport (IBM) 	 * along on the nodes that still have memory until kernelcore is
5409420f89dSMike Rapoport (IBM) 	 * satisfied
5419420f89dSMike Rapoport (IBM) 	 */
5429420f89dSMike Rapoport (IBM) 	usable_nodes--;
5439420f89dSMike Rapoport (IBM) 	if (usable_nodes && required_kernelcore > usable_nodes)
5449420f89dSMike Rapoport (IBM) 		goto restart;
5459420f89dSMike Rapoport (IBM) 
5469420f89dSMike Rapoport (IBM) out2:
5479420f89dSMike Rapoport (IBM) 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5489420f89dSMike Rapoport (IBM) 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
5499420f89dSMike Rapoport (IBM) 		unsigned long start_pfn, end_pfn;
5509420f89dSMike Rapoport (IBM) 
5519420f89dSMike Rapoport (IBM) 		zone_movable_pfn[nid] =
5529420f89dSMike Rapoport (IBM) 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
5539420f89dSMike Rapoport (IBM) 
5549420f89dSMike Rapoport (IBM) 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
5559420f89dSMike Rapoport (IBM) 		if (zone_movable_pfn[nid] >= end_pfn)
5569420f89dSMike Rapoport (IBM) 			zone_movable_pfn[nid] = 0;
5579420f89dSMike Rapoport (IBM) 	}
5589420f89dSMike Rapoport (IBM) 
5599420f89dSMike Rapoport (IBM) out:
5609420f89dSMike Rapoport (IBM) 	/* restore the node_state */
5619420f89dSMike Rapoport (IBM) 	node_states[N_MEMORY] = saved_node_state;
5629420f89dSMike Rapoport (IBM) }
5639420f89dSMike Rapoport (IBM) 
__init_single_page(struct page * page,unsigned long pfn,unsigned long zone,int nid)5649420f89dSMike Rapoport (IBM) static void __meminit __init_single_page(struct page *page, unsigned long pfn,
5659420f89dSMike Rapoport (IBM) 				unsigned long zone, int nid)
5669420f89dSMike Rapoport (IBM) {
5679420f89dSMike Rapoport (IBM) 	mm_zero_struct_page(page);
5689420f89dSMike Rapoport (IBM) 	set_page_links(page, zone, nid, pfn);
5699420f89dSMike Rapoport (IBM) 	init_page_count(page);
5709420f89dSMike Rapoport (IBM) 	page_mapcount_reset(page);
5719420f89dSMike Rapoport (IBM) 	page_cpupid_reset_last(page);
5729420f89dSMike Rapoport (IBM) 	page_kasan_tag_reset(page);
5739420f89dSMike Rapoport (IBM) 
5749420f89dSMike Rapoport (IBM) 	INIT_LIST_HEAD(&page->lru);
5759420f89dSMike Rapoport (IBM) #ifdef WANT_PAGE_VIRTUAL
5769420f89dSMike Rapoport (IBM) 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
5779420f89dSMike Rapoport (IBM) 	if (!is_highmem_idx(zone))
5789420f89dSMike Rapoport (IBM) 		set_page_address(page, __va(pfn << PAGE_SHIFT));
5799420f89dSMike Rapoport (IBM) #endif
5809420f89dSMike Rapoport (IBM) }
5819420f89dSMike Rapoport (IBM) 
5829420f89dSMike Rapoport (IBM) #ifdef CONFIG_NUMA
5839420f89dSMike Rapoport (IBM) /*
5849420f89dSMike Rapoport (IBM)  * During memory init memblocks map pfns to nids. The search is expensive and
5859420f89dSMike Rapoport (IBM)  * this caches recent lookups. The implementation of __early_pfn_to_nid
5869420f89dSMike Rapoport (IBM)  * treats start/end as pfns.
5879420f89dSMike Rapoport (IBM)  */
5889420f89dSMike Rapoport (IBM) struct mminit_pfnnid_cache {
5899420f89dSMike Rapoport (IBM) 	unsigned long last_start;
5909420f89dSMike Rapoport (IBM) 	unsigned long last_end;
5919420f89dSMike Rapoport (IBM) 	int last_nid;
5929420f89dSMike Rapoport (IBM) };
5939420f89dSMike Rapoport (IBM) 
5949420f89dSMike Rapoport (IBM) static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
5959420f89dSMike Rapoport (IBM) 
5969420f89dSMike Rapoport (IBM) /*
5979420f89dSMike Rapoport (IBM)  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
5989420f89dSMike Rapoport (IBM)  */
__early_pfn_to_nid(unsigned long pfn,struct mminit_pfnnid_cache * state)5999420f89dSMike Rapoport (IBM) static int __meminit __early_pfn_to_nid(unsigned long pfn,
6009420f89dSMike Rapoport (IBM) 					struct mminit_pfnnid_cache *state)
6019420f89dSMike Rapoport (IBM) {
6029420f89dSMike Rapoport (IBM) 	unsigned long start_pfn, end_pfn;
6039420f89dSMike Rapoport (IBM) 	int nid;
6049420f89dSMike Rapoport (IBM) 
6059420f89dSMike Rapoport (IBM) 	if (state->last_start <= pfn && pfn < state->last_end)
6069420f89dSMike Rapoport (IBM) 		return state->last_nid;
6079420f89dSMike Rapoport (IBM) 
6089420f89dSMike Rapoport (IBM) 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
6099420f89dSMike Rapoport (IBM) 	if (nid != NUMA_NO_NODE) {
6109420f89dSMike Rapoport (IBM) 		state->last_start = start_pfn;
6119420f89dSMike Rapoport (IBM) 		state->last_end = end_pfn;
6129420f89dSMike Rapoport (IBM) 		state->last_nid = nid;
6139420f89dSMike Rapoport (IBM) 	}
6149420f89dSMike Rapoport (IBM) 
6159420f89dSMike Rapoport (IBM) 	return nid;
6169420f89dSMike Rapoport (IBM) }
6179420f89dSMike Rapoport (IBM) 
early_pfn_to_nid(unsigned long pfn)6189420f89dSMike Rapoport (IBM) int __meminit early_pfn_to_nid(unsigned long pfn)
6199420f89dSMike Rapoport (IBM) {
6209420f89dSMike Rapoport (IBM) 	static DEFINE_SPINLOCK(early_pfn_lock);
6219420f89dSMike Rapoport (IBM) 	int nid;
6229420f89dSMike Rapoport (IBM) 
6239420f89dSMike Rapoport (IBM) 	spin_lock(&early_pfn_lock);
6249420f89dSMike Rapoport (IBM) 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
6259420f89dSMike Rapoport (IBM) 	if (nid < 0)
6269420f89dSMike Rapoport (IBM) 		nid = first_online_node;
6279420f89dSMike Rapoport (IBM) 	spin_unlock(&early_pfn_lock);
6289420f89dSMike Rapoport (IBM) 
6299420f89dSMike Rapoport (IBM) 	return nid;
6309420f89dSMike Rapoport (IBM) }
631534ef4e1SMike Rapoport (IBM) 
632534ef4e1SMike Rapoport (IBM) int hashdist = HASHDIST_DEFAULT;
633534ef4e1SMike Rapoport (IBM) 
set_hashdist(char * str)634534ef4e1SMike Rapoport (IBM) static int __init set_hashdist(char *str)
635534ef4e1SMike Rapoport (IBM) {
636534ef4e1SMike Rapoport (IBM) 	if (!str)
637534ef4e1SMike Rapoport (IBM) 		return 0;
638534ef4e1SMike Rapoport (IBM) 	hashdist = simple_strtoul(str, &str, 0);
639534ef4e1SMike Rapoport (IBM) 	return 1;
640534ef4e1SMike Rapoport (IBM) }
641534ef4e1SMike Rapoport (IBM) __setup("hashdist=", set_hashdist);
642534ef4e1SMike Rapoport (IBM) 
fixup_hashdist(void)643534ef4e1SMike Rapoport (IBM) static inline void fixup_hashdist(void)
644534ef4e1SMike Rapoport (IBM) {
645534ef4e1SMike Rapoport (IBM) 	if (num_node_state(N_MEMORY) == 1)
646534ef4e1SMike Rapoport (IBM) 		hashdist = 0;
647534ef4e1SMike Rapoport (IBM) }
648534ef4e1SMike Rapoport (IBM) #else
fixup_hashdist(void)649534ef4e1SMike Rapoport (IBM) static inline void fixup_hashdist(void) {}
6509420f89dSMike Rapoport (IBM) #endif /* CONFIG_NUMA */
6519420f89dSMike Rapoport (IBM) 
6529420f89dSMike Rapoport (IBM) #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
pgdat_set_deferred_range(pg_data_t * pgdat)6539420f89dSMike Rapoport (IBM) static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
6549420f89dSMike Rapoport (IBM) {
6559420f89dSMike Rapoport (IBM) 	pgdat->first_deferred_pfn = ULONG_MAX;
6569420f89dSMike Rapoport (IBM) }
6579420f89dSMike Rapoport (IBM) 
6589420f89dSMike Rapoport (IBM) /* Returns true if the struct page for the pfn is initialised */
early_page_initialised(unsigned long pfn,int nid)65961167ad5SYajun Deng static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
6609420f89dSMike Rapoport (IBM) {
6619420f89dSMike Rapoport (IBM) 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
6629420f89dSMike Rapoport (IBM) 		return false;
6639420f89dSMike Rapoport (IBM) 
6649420f89dSMike Rapoport (IBM) 	return true;
6659420f89dSMike Rapoport (IBM) }
6669420f89dSMike Rapoport (IBM) 
6679420f89dSMike Rapoport (IBM) /*
6689420f89dSMike Rapoport (IBM)  * Returns true when the remaining initialisation should be deferred until
6699420f89dSMike Rapoport (IBM)  * later in the boot cycle when it can be parallelised.
6709420f89dSMike Rapoport (IBM)  */
6719420f89dSMike Rapoport (IBM) static bool __meminit
defer_init(int nid,unsigned long pfn,unsigned long end_pfn)6729420f89dSMike Rapoport (IBM) defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
6739420f89dSMike Rapoport (IBM) {
6749420f89dSMike Rapoport (IBM) 	static unsigned long prev_end_pfn, nr_initialised;
6759420f89dSMike Rapoport (IBM) 
6769420f89dSMike Rapoport (IBM) 	if (early_page_ext_enabled())
6779420f89dSMike Rapoport (IBM) 		return false;
6789420f89dSMike Rapoport (IBM) 	/*
6799420f89dSMike Rapoport (IBM) 	 * prev_end_pfn static that contains the end of previous zone
6809420f89dSMike Rapoport (IBM) 	 * No need to protect because called very early in boot before smp_init.
6819420f89dSMike Rapoport (IBM) 	 */
6829420f89dSMike Rapoport (IBM) 	if (prev_end_pfn != end_pfn) {
6839420f89dSMike Rapoport (IBM) 		prev_end_pfn = end_pfn;
6849420f89dSMike Rapoport (IBM) 		nr_initialised = 0;
6859420f89dSMike Rapoport (IBM) 	}
6869420f89dSMike Rapoport (IBM) 
6879420f89dSMike Rapoport (IBM) 	/* Always populate low zones for address-constrained allocations */
6889420f89dSMike Rapoport (IBM) 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
6899420f89dSMike Rapoport (IBM) 		return false;
6909420f89dSMike Rapoport (IBM) 
6919420f89dSMike Rapoport (IBM) 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
6929420f89dSMike Rapoport (IBM) 		return true;
6939420f89dSMike Rapoport (IBM) 	/*
6949420f89dSMike Rapoport (IBM) 	 * We start only with one section of pages, more pages are added as
6959420f89dSMike Rapoport (IBM) 	 * needed until the rest of deferred pages are initialized.
6969420f89dSMike Rapoport (IBM) 	 */
6979420f89dSMike Rapoport (IBM) 	nr_initialised++;
6989420f89dSMike Rapoport (IBM) 	if ((nr_initialised > PAGES_PER_SECTION) &&
6999420f89dSMike Rapoport (IBM) 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
7009420f89dSMike Rapoport (IBM) 		NODE_DATA(nid)->first_deferred_pfn = pfn;
7019420f89dSMike Rapoport (IBM) 		return true;
7029420f89dSMike Rapoport (IBM) 	}
7039420f89dSMike Rapoport (IBM) 	return false;
7049420f89dSMike Rapoport (IBM) }
7059420f89dSMike Rapoport (IBM) 
init_reserved_page(unsigned long pfn,int nid)70661167ad5SYajun Deng static void __meminit init_reserved_page(unsigned long pfn, int nid)
7079420f89dSMike Rapoport (IBM) {
7089420f89dSMike Rapoport (IBM) 	pg_data_t *pgdat;
70961167ad5SYajun Deng 	int zid;
7109420f89dSMike Rapoport (IBM) 
71161167ad5SYajun Deng 	if (early_page_initialised(pfn, nid))
7129420f89dSMike Rapoport (IBM) 		return;
7139420f89dSMike Rapoport (IBM) 
7149420f89dSMike Rapoport (IBM) 	pgdat = NODE_DATA(nid);
7159420f89dSMike Rapoport (IBM) 
7169420f89dSMike Rapoport (IBM) 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
7179420f89dSMike Rapoport (IBM) 		struct zone *zone = &pgdat->node_zones[zid];
7189420f89dSMike Rapoport (IBM) 
7199420f89dSMike Rapoport (IBM) 		if (zone_spans_pfn(zone, pfn))
7209420f89dSMike Rapoport (IBM) 			break;
7219420f89dSMike Rapoport (IBM) 	}
7229420f89dSMike Rapoport (IBM) 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
7239420f89dSMike Rapoport (IBM) }
7249420f89dSMike Rapoport (IBM) #else
pgdat_set_deferred_range(pg_data_t * pgdat)7259420f89dSMike Rapoport (IBM) static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7269420f89dSMike Rapoport (IBM) 
early_page_initialised(unsigned long pfn,int nid)72761167ad5SYajun Deng static inline bool early_page_initialised(unsigned long pfn, int nid)
7289420f89dSMike Rapoport (IBM) {
7299420f89dSMike Rapoport (IBM) 	return true;
7309420f89dSMike Rapoport (IBM) }
7319420f89dSMike Rapoport (IBM) 
defer_init(int nid,unsigned long pfn,unsigned long end_pfn)7329420f89dSMike Rapoport (IBM) static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
7339420f89dSMike Rapoport (IBM) {
7349420f89dSMike Rapoport (IBM) 	return false;
7359420f89dSMike Rapoport (IBM) }
7369420f89dSMike Rapoport (IBM) 
init_reserved_page(unsigned long pfn,int nid)73761167ad5SYajun Deng static inline void init_reserved_page(unsigned long pfn, int nid)
7389420f89dSMike Rapoport (IBM) {
7399420f89dSMike Rapoport (IBM) }
7409420f89dSMike Rapoport (IBM) #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
7419420f89dSMike Rapoport (IBM) 
7429420f89dSMike Rapoport (IBM) /*
7439420f89dSMike Rapoport (IBM)  * Initialised pages do not have PageReserved set. This function is
7449420f89dSMike Rapoport (IBM)  * called for each range allocated by the bootmem allocator and
7459420f89dSMike Rapoport (IBM)  * marks the pages PageReserved. The remaining valid pages are later
7469420f89dSMike Rapoport (IBM)  * sent to the buddy page allocator.
7479420f89dSMike Rapoport (IBM)  */
reserve_bootmem_region(phys_addr_t start,phys_addr_t end,int nid)74861167ad5SYajun Deng void __meminit reserve_bootmem_region(phys_addr_t start,
74961167ad5SYajun Deng 				      phys_addr_t end, int nid)
7509420f89dSMike Rapoport (IBM) {
7519420f89dSMike Rapoport (IBM) 	unsigned long start_pfn = PFN_DOWN(start);
7529420f89dSMike Rapoport (IBM) 	unsigned long end_pfn = PFN_UP(end);
7539420f89dSMike Rapoport (IBM) 
7549420f89dSMike Rapoport (IBM) 	for (; start_pfn < end_pfn; start_pfn++) {
7559420f89dSMike Rapoport (IBM) 		if (pfn_valid(start_pfn)) {
7569420f89dSMike Rapoport (IBM) 			struct page *page = pfn_to_page(start_pfn);
7579420f89dSMike Rapoport (IBM) 
75861167ad5SYajun Deng 			init_reserved_page(start_pfn, nid);
7599420f89dSMike Rapoport (IBM) 
7609420f89dSMike Rapoport (IBM) 			/* Avoid false-positive PageTail() */
7619420f89dSMike Rapoport (IBM) 			INIT_LIST_HEAD(&page->lru);
7629420f89dSMike Rapoport (IBM) 
7639420f89dSMike Rapoport (IBM) 			/*
7649420f89dSMike Rapoport (IBM) 			 * no need for atomic set_bit because the struct
7659420f89dSMike Rapoport (IBM) 			 * page is not visible yet so nobody should
7669420f89dSMike Rapoport (IBM) 			 * access it yet.
7679420f89dSMike Rapoport (IBM) 			 */
7689420f89dSMike Rapoport (IBM) 			__SetPageReserved(page);
7699420f89dSMike Rapoport (IBM) 		}
7709420f89dSMike Rapoport (IBM) 	}
7719420f89dSMike Rapoport (IBM) }
7729420f89dSMike Rapoport (IBM) 
7739420f89dSMike Rapoport (IBM) /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
7749420f89dSMike Rapoport (IBM) static bool __meminit
overlap_memmap_init(unsigned long zone,unsigned long * pfn)7759420f89dSMike Rapoport (IBM) overlap_memmap_init(unsigned long zone, unsigned long *pfn)
7769420f89dSMike Rapoport (IBM) {
7779420f89dSMike Rapoport (IBM) 	static struct memblock_region *r;
7789420f89dSMike Rapoport (IBM) 
7799420f89dSMike Rapoport (IBM) 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
7809420f89dSMike Rapoport (IBM) 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
7819420f89dSMike Rapoport (IBM) 			for_each_mem_region(r) {
7829420f89dSMike Rapoport (IBM) 				if (*pfn < memblock_region_memory_end_pfn(r))
7839420f89dSMike Rapoport (IBM) 					break;
7849420f89dSMike Rapoport (IBM) 			}
7859420f89dSMike Rapoport (IBM) 		}
7869420f89dSMike Rapoport (IBM) 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
7879420f89dSMike Rapoport (IBM) 		    memblock_is_mirror(r)) {
7889420f89dSMike Rapoport (IBM) 			*pfn = memblock_region_memory_end_pfn(r);
7899420f89dSMike Rapoport (IBM) 			return true;
7909420f89dSMike Rapoport (IBM) 		}
7919420f89dSMike Rapoport (IBM) 	}
7929420f89dSMike Rapoport (IBM) 	return false;
7939420f89dSMike Rapoport (IBM) }
7949420f89dSMike Rapoport (IBM) 
7959420f89dSMike Rapoport (IBM) /*
7969420f89dSMike Rapoport (IBM)  * Only struct pages that correspond to ranges defined by memblock.memory
7979420f89dSMike Rapoport (IBM)  * are zeroed and initialized by going through __init_single_page() during
7989420f89dSMike Rapoport (IBM)  * memmap_init_zone_range().
7999420f89dSMike Rapoport (IBM)  *
8009420f89dSMike Rapoport (IBM)  * But, there could be struct pages that correspond to holes in
8019420f89dSMike Rapoport (IBM)  * memblock.memory. This can happen because of the following reasons:
8029420f89dSMike Rapoport (IBM)  * - physical memory bank size is not necessarily the exact multiple of the
8039420f89dSMike Rapoport (IBM)  *   arbitrary section size
8049420f89dSMike Rapoport (IBM)  * - early reserved memory may not be listed in memblock.memory
8059420f89dSMike Rapoport (IBM)  * - memory layouts defined with memmap= kernel parameter may not align
8069420f89dSMike Rapoport (IBM)  *   nicely with memmap sections
8079420f89dSMike Rapoport (IBM)  *
8089420f89dSMike Rapoport (IBM)  * Explicitly initialize those struct pages so that:
8099420f89dSMike Rapoport (IBM)  * - PG_Reserved is set
8109420f89dSMike Rapoport (IBM)  * - zone and node links point to zone and node that span the page if the
8119420f89dSMike Rapoport (IBM)  *   hole is in the middle of a zone
8129420f89dSMike Rapoport (IBM)  * - zone and node links point to adjacent zone/node if the hole falls on
8139420f89dSMike Rapoport (IBM)  *   the zone boundary; the pages in such holes will be prepended to the
8149420f89dSMike Rapoport (IBM)  *   zone/node above the hole except for the trailing pages in the last
8159420f89dSMike Rapoport (IBM)  *   section that will be appended to the zone/node below.
8169420f89dSMike Rapoport (IBM)  */
init_unavailable_range(unsigned long spfn,unsigned long epfn,int zone,int node)8179420f89dSMike Rapoport (IBM) static void __init init_unavailable_range(unsigned long spfn,
8189420f89dSMike Rapoport (IBM) 					  unsigned long epfn,
8199420f89dSMike Rapoport (IBM) 					  int zone, int node)
8209420f89dSMike Rapoport (IBM) {
8219420f89dSMike Rapoport (IBM) 	unsigned long pfn;
8229420f89dSMike Rapoport (IBM) 	u64 pgcnt = 0;
8239420f89dSMike Rapoport (IBM) 
8249420f89dSMike Rapoport (IBM) 	for (pfn = spfn; pfn < epfn; pfn++) {
8259420f89dSMike Rapoport (IBM) 		if (!pfn_valid(pageblock_start_pfn(pfn))) {
8269420f89dSMike Rapoport (IBM) 			pfn = pageblock_end_pfn(pfn) - 1;
8279420f89dSMike Rapoport (IBM) 			continue;
8289420f89dSMike Rapoport (IBM) 		}
8299420f89dSMike Rapoport (IBM) 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
8309420f89dSMike Rapoport (IBM) 		__SetPageReserved(pfn_to_page(pfn));
8319420f89dSMike Rapoport (IBM) 		pgcnt++;
8329420f89dSMike Rapoport (IBM) 	}
8339420f89dSMike Rapoport (IBM) 
8349420f89dSMike Rapoport (IBM) 	if (pgcnt)
8359420f89dSMike Rapoport (IBM) 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
8369420f89dSMike Rapoport (IBM) 			node, zone_names[zone], pgcnt);
8379420f89dSMike Rapoport (IBM) }
8389420f89dSMike Rapoport (IBM) 
8399420f89dSMike Rapoport (IBM) /*
8409420f89dSMike Rapoport (IBM)  * Initially all pages are reserved - free ones are freed
8419420f89dSMike Rapoport (IBM)  * up by memblock_free_all() once the early boot process is
8429420f89dSMike Rapoport (IBM)  * done. Non-atomic initialization, single-pass.
8439420f89dSMike Rapoport (IBM)  *
8449420f89dSMike Rapoport (IBM)  * All aligned pageblocks are initialized to the specified migratetype
8459420f89dSMike Rapoport (IBM)  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
8469420f89dSMike Rapoport (IBM)  * zone stats (e.g., nr_isolate_pageblock) are touched.
8479420f89dSMike Rapoport (IBM)  */
memmap_init_range(unsigned long size,int nid,unsigned long zone,unsigned long start_pfn,unsigned long zone_end_pfn,enum meminit_context context,struct vmem_altmap * altmap,int migratetype)8489420f89dSMike Rapoport (IBM) void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
8499420f89dSMike Rapoport (IBM) 		unsigned long start_pfn, unsigned long zone_end_pfn,
8509420f89dSMike Rapoport (IBM) 		enum meminit_context context,
8519420f89dSMike Rapoport (IBM) 		struct vmem_altmap *altmap, int migratetype)
8529420f89dSMike Rapoport (IBM) {
8539420f89dSMike Rapoport (IBM) 	unsigned long pfn, end_pfn = start_pfn + size;
8549420f89dSMike Rapoport (IBM) 	struct page *page;
8559420f89dSMike Rapoport (IBM) 
8569420f89dSMike Rapoport (IBM) 	if (highest_memmap_pfn < end_pfn - 1)
8579420f89dSMike Rapoport (IBM) 		highest_memmap_pfn = end_pfn - 1;
8589420f89dSMike Rapoport (IBM) 
8599420f89dSMike Rapoport (IBM) #ifdef CONFIG_ZONE_DEVICE
8609420f89dSMike Rapoport (IBM) 	/*
8619420f89dSMike Rapoport (IBM) 	 * Honor reservation requested by the driver for this ZONE_DEVICE
8629420f89dSMike Rapoport (IBM) 	 * memory. We limit the total number of pages to initialize to just
8639420f89dSMike Rapoport (IBM) 	 * those that might contain the memory mapping. We will defer the
8649420f89dSMike Rapoport (IBM) 	 * ZONE_DEVICE page initialization until after we have released
8659420f89dSMike Rapoport (IBM) 	 * the hotplug lock.
8669420f89dSMike Rapoport (IBM) 	 */
8679420f89dSMike Rapoport (IBM) 	if (zone == ZONE_DEVICE) {
8689420f89dSMike Rapoport (IBM) 		if (!altmap)
8699420f89dSMike Rapoport (IBM) 			return;
8709420f89dSMike Rapoport (IBM) 
8719420f89dSMike Rapoport (IBM) 		if (start_pfn == altmap->base_pfn)
8729420f89dSMike Rapoport (IBM) 			start_pfn += altmap->reserve;
8739420f89dSMike Rapoport (IBM) 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
8749420f89dSMike Rapoport (IBM) 	}
8759420f89dSMike Rapoport (IBM) #endif
8769420f89dSMike Rapoport (IBM) 
8779420f89dSMike Rapoport (IBM) 	for (pfn = start_pfn; pfn < end_pfn; ) {
8789420f89dSMike Rapoport (IBM) 		/*
8799420f89dSMike Rapoport (IBM) 		 * There can be holes in boot-time mem_map[]s handed to this
8809420f89dSMike Rapoport (IBM) 		 * function.  They do not exist on hotplugged memory.
8819420f89dSMike Rapoport (IBM) 		 */
8829420f89dSMike Rapoport (IBM) 		if (context == MEMINIT_EARLY) {
8839420f89dSMike Rapoport (IBM) 			if (overlap_memmap_init(zone, &pfn))
8849420f89dSMike Rapoport (IBM) 				continue;
8859420f89dSMike Rapoport (IBM) 			if (defer_init(nid, pfn, zone_end_pfn)) {
8869420f89dSMike Rapoport (IBM) 				deferred_struct_pages = true;
8879420f89dSMike Rapoport (IBM) 				break;
8889420f89dSMike Rapoport (IBM) 			}
8899420f89dSMike Rapoport (IBM) 		}
8909420f89dSMike Rapoport (IBM) 
8919420f89dSMike Rapoport (IBM) 		page = pfn_to_page(pfn);
8929420f89dSMike Rapoport (IBM) 		__init_single_page(page, pfn, zone, nid);
8939420f89dSMike Rapoport (IBM) 		if (context == MEMINIT_HOTPLUG)
8949420f89dSMike Rapoport (IBM) 			__SetPageReserved(page);
8959420f89dSMike Rapoport (IBM) 
8969420f89dSMike Rapoport (IBM) 		/*
8979420f89dSMike Rapoport (IBM) 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
8989420f89dSMike Rapoport (IBM) 		 * such that unmovable allocations won't be scattered all
8999420f89dSMike Rapoport (IBM) 		 * over the place during system boot.
9009420f89dSMike Rapoport (IBM) 		 */
9019420f89dSMike Rapoport (IBM) 		if (pageblock_aligned(pfn)) {
9029420f89dSMike Rapoport (IBM) 			set_pageblock_migratetype(page, migratetype);
9039420f89dSMike Rapoport (IBM) 			cond_resched();
9049420f89dSMike Rapoport (IBM) 		}
9059420f89dSMike Rapoport (IBM) 		pfn++;
9069420f89dSMike Rapoport (IBM) 	}
9079420f89dSMike Rapoport (IBM) }
9089420f89dSMike Rapoport (IBM) 
memmap_init_zone_range(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn,unsigned long * hole_pfn)9099420f89dSMike Rapoport (IBM) static void __init memmap_init_zone_range(struct zone *zone,
9109420f89dSMike Rapoport (IBM) 					  unsigned long start_pfn,
9119420f89dSMike Rapoport (IBM) 					  unsigned long end_pfn,
9129420f89dSMike Rapoport (IBM) 					  unsigned long *hole_pfn)
9139420f89dSMike Rapoport (IBM) {
9149420f89dSMike Rapoport (IBM) 	unsigned long zone_start_pfn = zone->zone_start_pfn;
9159420f89dSMike Rapoport (IBM) 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
9169420f89dSMike Rapoport (IBM) 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
9179420f89dSMike Rapoport (IBM) 
9189420f89dSMike Rapoport (IBM) 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
9199420f89dSMike Rapoport (IBM) 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
9209420f89dSMike Rapoport (IBM) 
9219420f89dSMike Rapoport (IBM) 	if (start_pfn >= end_pfn)
9229420f89dSMike Rapoport (IBM) 		return;
9239420f89dSMike Rapoport (IBM) 
9249420f89dSMike Rapoport (IBM) 	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
9259420f89dSMike Rapoport (IBM) 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
9269420f89dSMike Rapoport (IBM) 
9279420f89dSMike Rapoport (IBM) 	if (*hole_pfn < start_pfn)
9289420f89dSMike Rapoport (IBM) 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
9299420f89dSMike Rapoport (IBM) 
9309420f89dSMike Rapoport (IBM) 	*hole_pfn = end_pfn;
9319420f89dSMike Rapoport (IBM) }
9329420f89dSMike Rapoport (IBM) 
memmap_init(void)9339420f89dSMike Rapoport (IBM) static void __init memmap_init(void)
9349420f89dSMike Rapoport (IBM) {
9359420f89dSMike Rapoport (IBM) 	unsigned long start_pfn, end_pfn;
9369420f89dSMike Rapoport (IBM) 	unsigned long hole_pfn = 0;
9379420f89dSMike Rapoport (IBM) 	int i, j, zone_id = 0, nid;
9389420f89dSMike Rapoport (IBM) 
9399420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
9409420f89dSMike Rapoport (IBM) 		struct pglist_data *node = NODE_DATA(nid);
9419420f89dSMike Rapoport (IBM) 
9429420f89dSMike Rapoport (IBM) 		for (j = 0; j < MAX_NR_ZONES; j++) {
9439420f89dSMike Rapoport (IBM) 			struct zone *zone = node->node_zones + j;
9449420f89dSMike Rapoport (IBM) 
9459420f89dSMike Rapoport (IBM) 			if (!populated_zone(zone))
9469420f89dSMike Rapoport (IBM) 				continue;
9479420f89dSMike Rapoport (IBM) 
9489420f89dSMike Rapoport (IBM) 			memmap_init_zone_range(zone, start_pfn, end_pfn,
9499420f89dSMike Rapoport (IBM) 					       &hole_pfn);
9509420f89dSMike Rapoport (IBM) 			zone_id = j;
9519420f89dSMike Rapoport (IBM) 		}
9529420f89dSMike Rapoport (IBM) 	}
9539420f89dSMike Rapoport (IBM) 
9549420f89dSMike Rapoport (IBM) #ifdef CONFIG_SPARSEMEM
9559420f89dSMike Rapoport (IBM) 	/*
9569420f89dSMike Rapoport (IBM) 	 * Initialize the memory map for hole in the range [memory_end,
9579420f89dSMike Rapoport (IBM) 	 * section_end].
9589420f89dSMike Rapoport (IBM) 	 * Append the pages in this hole to the highest zone in the last
9599420f89dSMike Rapoport (IBM) 	 * node.
9609420f89dSMike Rapoport (IBM) 	 * The call to init_unavailable_range() is outside the ifdef to
9619420f89dSMike Rapoport (IBM) 	 * silence the compiler warining about zone_id set but not used;
9629420f89dSMike Rapoport (IBM) 	 * for FLATMEM it is a nop anyway
9639420f89dSMike Rapoport (IBM) 	 */
9649420f89dSMike Rapoport (IBM) 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
9659420f89dSMike Rapoport (IBM) 	if (hole_pfn < end_pfn)
9669420f89dSMike Rapoport (IBM) #endif
9679420f89dSMike Rapoport (IBM) 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
9689420f89dSMike Rapoport (IBM) }
9699420f89dSMike Rapoport (IBM) 
9709420f89dSMike Rapoport (IBM) #ifdef CONFIG_ZONE_DEVICE
__init_zone_device_page(struct page * page,unsigned long pfn,unsigned long zone_idx,int nid,struct dev_pagemap * pgmap)9719420f89dSMike Rapoport (IBM) static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
9729420f89dSMike Rapoport (IBM) 					  unsigned long zone_idx, int nid,
9739420f89dSMike Rapoport (IBM) 					  struct dev_pagemap *pgmap)
9749420f89dSMike Rapoport (IBM) {
9759420f89dSMike Rapoport (IBM) 
9769420f89dSMike Rapoport (IBM) 	__init_single_page(page, pfn, zone_idx, nid);
9779420f89dSMike Rapoport (IBM) 
9789420f89dSMike Rapoport (IBM) 	/*
9799420f89dSMike Rapoport (IBM) 	 * Mark page reserved as it will need to wait for onlining
9809420f89dSMike Rapoport (IBM) 	 * phase for it to be fully associated with a zone.
9819420f89dSMike Rapoport (IBM) 	 *
9829420f89dSMike Rapoport (IBM) 	 * We can use the non-atomic __set_bit operation for setting
9839420f89dSMike Rapoport (IBM) 	 * the flag as we are still initializing the pages.
9849420f89dSMike Rapoport (IBM) 	 */
9859420f89dSMike Rapoport (IBM) 	__SetPageReserved(page);
9869420f89dSMike Rapoport (IBM) 
9879420f89dSMike Rapoport (IBM) 	/*
9889420f89dSMike Rapoport (IBM) 	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
9899420f89dSMike Rapoport (IBM) 	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
9909420f89dSMike Rapoport (IBM) 	 * ever freed or placed on a driver-private list.
9919420f89dSMike Rapoport (IBM) 	 */
9929420f89dSMike Rapoport (IBM) 	page->pgmap = pgmap;
9939420f89dSMike Rapoport (IBM) 	page->zone_device_data = NULL;
9949420f89dSMike Rapoport (IBM) 
9959420f89dSMike Rapoport (IBM) 	/*
9969420f89dSMike Rapoport (IBM) 	 * Mark the block movable so that blocks are reserved for
9979420f89dSMike Rapoport (IBM) 	 * movable at startup. This will force kernel allocations
9989420f89dSMike Rapoport (IBM) 	 * to reserve their blocks rather than leaking throughout
9999420f89dSMike Rapoport (IBM) 	 * the address space during boot when many long-lived
10009420f89dSMike Rapoport (IBM) 	 * kernel allocations are made.
10019420f89dSMike Rapoport (IBM) 	 *
10029420f89dSMike Rapoport (IBM) 	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
10039420f89dSMike Rapoport (IBM) 	 * because this is done early in section_activate()
10049420f89dSMike Rapoport (IBM) 	 */
10059420f89dSMike Rapoport (IBM) 	if (pageblock_aligned(pfn)) {
10069420f89dSMike Rapoport (IBM) 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
10079420f89dSMike Rapoport (IBM) 		cond_resched();
10089420f89dSMike Rapoport (IBM) 	}
10099420f89dSMike Rapoport (IBM) 
10109420f89dSMike Rapoport (IBM) 	/*
10119420f89dSMike Rapoport (IBM) 	 * ZONE_DEVICE pages are released directly to the driver page allocator
10129420f89dSMike Rapoport (IBM) 	 * which will set the page count to 1 when allocating the page.
10139420f89dSMike Rapoport (IBM) 	 */
10149420f89dSMike Rapoport (IBM) 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
10159420f89dSMike Rapoport (IBM) 	    pgmap->type == MEMORY_DEVICE_COHERENT)
10169420f89dSMike Rapoport (IBM) 		set_page_count(page, 0);
10179420f89dSMike Rapoport (IBM) }
10189420f89dSMike Rapoport (IBM) 
10199420f89dSMike Rapoport (IBM) /*
10209420f89dSMike Rapoport (IBM)  * With compound page geometry and when struct pages are stored in ram most
10219420f89dSMike Rapoport (IBM)  * tail pages are reused. Consequently, the amount of unique struct pages to
10229420f89dSMike Rapoport (IBM)  * initialize is a lot smaller that the total amount of struct pages being
10239420f89dSMike Rapoport (IBM)  * mapped. This is a paired / mild layering violation with explicit knowledge
10249420f89dSMike Rapoport (IBM)  * of how the sparse_vmemmap internals handle compound pages in the lack
10259420f89dSMike Rapoport (IBM)  * of an altmap. See vmemmap_populate_compound_pages().
10269420f89dSMike Rapoport (IBM)  */
compound_nr_pages(struct vmem_altmap * altmap,struct dev_pagemap * pgmap)10279420f89dSMike Rapoport (IBM) static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
102887a7ae75SAneesh Kumar K.V 					      struct dev_pagemap *pgmap)
10299420f89dSMike Rapoport (IBM) {
103087a7ae75SAneesh Kumar K.V 	if (!vmemmap_can_optimize(altmap, pgmap))
103187a7ae75SAneesh Kumar K.V 		return pgmap_vmemmap_nr(pgmap);
103287a7ae75SAneesh Kumar K.V 
1033c1a6c536SAneesh Kumar K.V 	return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
10349420f89dSMike Rapoport (IBM) }
10359420f89dSMike Rapoport (IBM) 
memmap_init_compound(struct page * head,unsigned long head_pfn,unsigned long zone_idx,int nid,struct dev_pagemap * pgmap,unsigned long nr_pages)10369420f89dSMike Rapoport (IBM) static void __ref memmap_init_compound(struct page *head,
10379420f89dSMike Rapoport (IBM) 				       unsigned long head_pfn,
10389420f89dSMike Rapoport (IBM) 				       unsigned long zone_idx, int nid,
10399420f89dSMike Rapoport (IBM) 				       struct dev_pagemap *pgmap,
10409420f89dSMike Rapoport (IBM) 				       unsigned long nr_pages)
10419420f89dSMike Rapoport (IBM) {
10429420f89dSMike Rapoport (IBM) 	unsigned long pfn, end_pfn = head_pfn + nr_pages;
10439420f89dSMike Rapoport (IBM) 	unsigned int order = pgmap->vmemmap_shift;
10449420f89dSMike Rapoport (IBM) 
10459420f89dSMike Rapoport (IBM) 	__SetPageHead(head);
10469420f89dSMike Rapoport (IBM) 	for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
10479420f89dSMike Rapoport (IBM) 		struct page *page = pfn_to_page(pfn);
10489420f89dSMike Rapoport (IBM) 
10499420f89dSMike Rapoport (IBM) 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
10509420f89dSMike Rapoport (IBM) 		prep_compound_tail(head, pfn - head_pfn);
10519420f89dSMike Rapoport (IBM) 		set_page_count(page, 0);
10529420f89dSMike Rapoport (IBM) 
10539420f89dSMike Rapoport (IBM) 		/*
10549420f89dSMike Rapoport (IBM) 		 * The first tail page stores important compound page info.
10559420f89dSMike Rapoport (IBM) 		 * Call prep_compound_head() after the first tail page has
10569420f89dSMike Rapoport (IBM) 		 * been initialized, to not have the data overwritten.
10579420f89dSMike Rapoport (IBM) 		 */
10589420f89dSMike Rapoport (IBM) 		if (pfn == head_pfn + 1)
10599420f89dSMike Rapoport (IBM) 			prep_compound_head(head, order);
10609420f89dSMike Rapoport (IBM) 	}
10619420f89dSMike Rapoport (IBM) }
10629420f89dSMike Rapoport (IBM) 
memmap_init_zone_device(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages,struct dev_pagemap * pgmap)10639420f89dSMike Rapoport (IBM) void __ref memmap_init_zone_device(struct zone *zone,
10649420f89dSMike Rapoport (IBM) 				   unsigned long start_pfn,
10659420f89dSMike Rapoport (IBM) 				   unsigned long nr_pages,
10669420f89dSMike Rapoport (IBM) 				   struct dev_pagemap *pgmap)
10679420f89dSMike Rapoport (IBM) {
10689420f89dSMike Rapoport (IBM) 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
10699420f89dSMike Rapoport (IBM) 	struct pglist_data *pgdat = zone->zone_pgdat;
10709420f89dSMike Rapoport (IBM) 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
10719420f89dSMike Rapoport (IBM) 	unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
10729420f89dSMike Rapoport (IBM) 	unsigned long zone_idx = zone_idx(zone);
10739420f89dSMike Rapoport (IBM) 	unsigned long start = jiffies;
10749420f89dSMike Rapoport (IBM) 	int nid = pgdat->node_id;
10759420f89dSMike Rapoport (IBM) 
10769420f89dSMike Rapoport (IBM) 	if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
10779420f89dSMike Rapoport (IBM) 		return;
10789420f89dSMike Rapoport (IBM) 
10799420f89dSMike Rapoport (IBM) 	/*
10809420f89dSMike Rapoport (IBM) 	 * The call to memmap_init should have already taken care
10819420f89dSMike Rapoport (IBM) 	 * of the pages reserved for the memmap, so we can just jump to
10829420f89dSMike Rapoport (IBM) 	 * the end of that region and start processing the device pages.
10839420f89dSMike Rapoport (IBM) 	 */
10849420f89dSMike Rapoport (IBM) 	if (altmap) {
10859420f89dSMike Rapoport (IBM) 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
10869420f89dSMike Rapoport (IBM) 		nr_pages = end_pfn - start_pfn;
10879420f89dSMike Rapoport (IBM) 	}
10889420f89dSMike Rapoport (IBM) 
10899420f89dSMike Rapoport (IBM) 	for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
10909420f89dSMike Rapoport (IBM) 		struct page *page = pfn_to_page(pfn);
10919420f89dSMike Rapoport (IBM) 
10929420f89dSMike Rapoport (IBM) 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
10939420f89dSMike Rapoport (IBM) 
10949420f89dSMike Rapoport (IBM) 		if (pfns_per_compound == 1)
10959420f89dSMike Rapoport (IBM) 			continue;
10969420f89dSMike Rapoport (IBM) 
10979420f89dSMike Rapoport (IBM) 		memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
109887a7ae75SAneesh Kumar K.V 				     compound_nr_pages(altmap, pgmap));
10999420f89dSMike Rapoport (IBM) 	}
11009420f89dSMike Rapoport (IBM) 
1101dd31bad2STomas Krcka 	pr_debug("%s initialised %lu pages in %ums\n", __func__,
11029420f89dSMike Rapoport (IBM) 		nr_pages, jiffies_to_msecs(jiffies - start));
11039420f89dSMike Rapoport (IBM) }
11049420f89dSMike Rapoport (IBM) #endif
11059420f89dSMike Rapoport (IBM) 
11069420f89dSMike Rapoport (IBM) /*
11079420f89dSMike Rapoport (IBM)  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
11089420f89dSMike Rapoport (IBM)  * because it is sized independent of architecture. Unlike the other zones,
11099420f89dSMike Rapoport (IBM)  * the starting point for ZONE_MOVABLE is not fixed. It may be different
11109420f89dSMike Rapoport (IBM)  * in each node depending on the size of each node and how evenly kernelcore
11119420f89dSMike Rapoport (IBM)  * is distributed. This helper function adjusts the zone ranges
11129420f89dSMike Rapoport (IBM)  * provided by the architecture for a given node by using the end of the
11139420f89dSMike Rapoport (IBM)  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
11149420f89dSMike Rapoport (IBM)  * zones within a node are in order of monotonic increases memory addresses
11159420f89dSMike Rapoport (IBM)  */
adjust_zone_range_for_zone_movable(int nid,unsigned long zone_type,unsigned long node_end_pfn,unsigned long * zone_start_pfn,unsigned long * zone_end_pfn)11169420f89dSMike Rapoport (IBM) static void __init adjust_zone_range_for_zone_movable(int nid,
11179420f89dSMike Rapoport (IBM) 					unsigned long zone_type,
11189420f89dSMike Rapoport (IBM) 					unsigned long node_end_pfn,
11199420f89dSMike Rapoport (IBM) 					unsigned long *zone_start_pfn,
11209420f89dSMike Rapoport (IBM) 					unsigned long *zone_end_pfn)
11219420f89dSMike Rapoport (IBM) {
11229420f89dSMike Rapoport (IBM) 	/* Only adjust if ZONE_MOVABLE is on this node */
11239420f89dSMike Rapoport (IBM) 	if (zone_movable_pfn[nid]) {
11249420f89dSMike Rapoport (IBM) 		/* Size ZONE_MOVABLE */
11259420f89dSMike Rapoport (IBM) 		if (zone_type == ZONE_MOVABLE) {
11269420f89dSMike Rapoport (IBM) 			*zone_start_pfn = zone_movable_pfn[nid];
11279420f89dSMike Rapoport (IBM) 			*zone_end_pfn = min(node_end_pfn,
11289420f89dSMike Rapoport (IBM) 				arch_zone_highest_possible_pfn[movable_zone]);
11299420f89dSMike Rapoport (IBM) 
11309420f89dSMike Rapoport (IBM) 		/* Adjust for ZONE_MOVABLE starting within this range */
11319420f89dSMike Rapoport (IBM) 		} else if (!mirrored_kernelcore &&
11329420f89dSMike Rapoport (IBM) 			*zone_start_pfn < zone_movable_pfn[nid] &&
11339420f89dSMike Rapoport (IBM) 			*zone_end_pfn > zone_movable_pfn[nid]) {
11349420f89dSMike Rapoport (IBM) 			*zone_end_pfn = zone_movable_pfn[nid];
11359420f89dSMike Rapoport (IBM) 
11369420f89dSMike Rapoport (IBM) 		/* Check if this whole range is within ZONE_MOVABLE */
11379420f89dSMike Rapoport (IBM) 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
11389420f89dSMike Rapoport (IBM) 			*zone_start_pfn = *zone_end_pfn;
11399420f89dSMike Rapoport (IBM) 	}
11409420f89dSMike Rapoport (IBM) }
11419420f89dSMike Rapoport (IBM) 
11429420f89dSMike Rapoport (IBM) /*
11439420f89dSMike Rapoport (IBM)  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
11449420f89dSMike Rapoport (IBM)  * then all holes in the requested range will be accounted for.
11459420f89dSMike Rapoport (IBM)  */
__absent_pages_in_range(int nid,unsigned long range_start_pfn,unsigned long range_end_pfn)11469420f89dSMike Rapoport (IBM) unsigned long __init __absent_pages_in_range(int nid,
11479420f89dSMike Rapoport (IBM) 				unsigned long range_start_pfn,
11489420f89dSMike Rapoport (IBM) 				unsigned long range_end_pfn)
11499420f89dSMike Rapoport (IBM) {
11509420f89dSMike Rapoport (IBM) 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
11519420f89dSMike Rapoport (IBM) 	unsigned long start_pfn, end_pfn;
11529420f89dSMike Rapoport (IBM) 	int i;
11539420f89dSMike Rapoport (IBM) 
11549420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
11559420f89dSMike Rapoport (IBM) 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
11569420f89dSMike Rapoport (IBM) 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
11579420f89dSMike Rapoport (IBM) 		nr_absent -= end_pfn - start_pfn;
11589420f89dSMike Rapoport (IBM) 	}
11599420f89dSMike Rapoport (IBM) 	return nr_absent;
11609420f89dSMike Rapoport (IBM) }
11619420f89dSMike Rapoport (IBM) 
11629420f89dSMike Rapoport (IBM) /**
11639420f89dSMike Rapoport (IBM)  * absent_pages_in_range - Return number of page frames in holes within a range
11649420f89dSMike Rapoport (IBM)  * @start_pfn: The start PFN to start searching for holes
11659420f89dSMike Rapoport (IBM)  * @end_pfn: The end PFN to stop searching for holes
11669420f89dSMike Rapoport (IBM)  *
11679420f89dSMike Rapoport (IBM)  * Return: the number of pages frames in memory holes within a range.
11689420f89dSMike Rapoport (IBM)  */
absent_pages_in_range(unsigned long start_pfn,unsigned long end_pfn)11699420f89dSMike Rapoport (IBM) unsigned long __init absent_pages_in_range(unsigned long start_pfn,
11709420f89dSMike Rapoport (IBM) 							unsigned long end_pfn)
11719420f89dSMike Rapoport (IBM) {
11729420f89dSMike Rapoport (IBM) 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
11739420f89dSMike Rapoport (IBM) }
11749420f89dSMike Rapoport (IBM) 
11759420f89dSMike Rapoport (IBM) /* Return the number of page frames in holes in a zone on a node */
zone_absent_pages_in_node(int nid,unsigned long zone_type,unsigned long zone_start_pfn,unsigned long zone_end_pfn)11769420f89dSMike Rapoport (IBM) static unsigned long __init zone_absent_pages_in_node(int nid,
11779420f89dSMike Rapoport (IBM) 					unsigned long zone_type,
11781c2d252fSHaifeng Xu 					unsigned long zone_start_pfn,
11791c2d252fSHaifeng Xu 					unsigned long zone_end_pfn)
11809420f89dSMike Rapoport (IBM) {
11819420f89dSMike Rapoport (IBM) 	unsigned long nr_absent;
11829420f89dSMike Rapoport (IBM) 
11831c2d252fSHaifeng Xu 	/* zone is empty, we don't have any absent pages */
11841c2d252fSHaifeng Xu 	if (zone_start_pfn == zone_end_pfn)
11859420f89dSMike Rapoport (IBM) 		return 0;
11869420f89dSMike Rapoport (IBM) 
11879420f89dSMike Rapoport (IBM) 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
11889420f89dSMike Rapoport (IBM) 
11899420f89dSMike Rapoport (IBM) 	/*
11909420f89dSMike Rapoport (IBM) 	 * ZONE_MOVABLE handling.
11919420f89dSMike Rapoport (IBM) 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
11929420f89dSMike Rapoport (IBM) 	 * and vice versa.
11939420f89dSMike Rapoport (IBM) 	 */
11949420f89dSMike Rapoport (IBM) 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
11959420f89dSMike Rapoport (IBM) 		unsigned long start_pfn, end_pfn;
11969420f89dSMike Rapoport (IBM) 		struct memblock_region *r;
11979420f89dSMike Rapoport (IBM) 
11989420f89dSMike Rapoport (IBM) 		for_each_mem_region(r) {
11999420f89dSMike Rapoport (IBM) 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
12009420f89dSMike Rapoport (IBM) 					  zone_start_pfn, zone_end_pfn);
12019420f89dSMike Rapoport (IBM) 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
12029420f89dSMike Rapoport (IBM) 					zone_start_pfn, zone_end_pfn);
12039420f89dSMike Rapoport (IBM) 
12049420f89dSMike Rapoport (IBM) 			if (zone_type == ZONE_MOVABLE &&
12059420f89dSMike Rapoport (IBM) 			    memblock_is_mirror(r))
12069420f89dSMike Rapoport (IBM) 				nr_absent += end_pfn - start_pfn;
12079420f89dSMike Rapoport (IBM) 
12089420f89dSMike Rapoport (IBM) 			if (zone_type == ZONE_NORMAL &&
12099420f89dSMike Rapoport (IBM) 			    !memblock_is_mirror(r))
12109420f89dSMike Rapoport (IBM) 				nr_absent += end_pfn - start_pfn;
12119420f89dSMike Rapoport (IBM) 		}
12129420f89dSMike Rapoport (IBM) 	}
12139420f89dSMike Rapoport (IBM) 
12149420f89dSMike Rapoport (IBM) 	return nr_absent;
12159420f89dSMike Rapoport (IBM) }
12169420f89dSMike Rapoport (IBM) 
12179420f89dSMike Rapoport (IBM) /*
12189420f89dSMike Rapoport (IBM)  * Return the number of pages a zone spans in a node, including holes
12199420f89dSMike Rapoport (IBM)  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
12209420f89dSMike Rapoport (IBM)  */
zone_spanned_pages_in_node(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zone_start_pfn,unsigned long * zone_end_pfn)12219420f89dSMike Rapoport (IBM) static unsigned long __init zone_spanned_pages_in_node(int nid,
12229420f89dSMike Rapoport (IBM) 					unsigned long zone_type,
12239420f89dSMike Rapoport (IBM) 					unsigned long node_start_pfn,
12249420f89dSMike Rapoport (IBM) 					unsigned long node_end_pfn,
12259420f89dSMike Rapoport (IBM) 					unsigned long *zone_start_pfn,
12269420f89dSMike Rapoport (IBM) 					unsigned long *zone_end_pfn)
12279420f89dSMike Rapoport (IBM) {
12289420f89dSMike Rapoport (IBM) 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
12299420f89dSMike Rapoport (IBM) 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
12309420f89dSMike Rapoport (IBM) 
12319420f89dSMike Rapoport (IBM) 	/* Get the start and end of the zone */
12329420f89dSMike Rapoport (IBM) 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
12339420f89dSMike Rapoport (IBM) 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
12340792e47dSHaifeng Xu 	adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn,
12359420f89dSMike Rapoport (IBM) 					   zone_start_pfn, zone_end_pfn);
12369420f89dSMike Rapoport (IBM) 
12379420f89dSMike Rapoport (IBM) 	/* Check that this node has pages within the zone's required range */
12389420f89dSMike Rapoport (IBM) 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
12399420f89dSMike Rapoport (IBM) 		return 0;
12409420f89dSMike Rapoport (IBM) 
12419420f89dSMike Rapoport (IBM) 	/* Move the zone boundaries inside the node if necessary */
12429420f89dSMike Rapoport (IBM) 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
12439420f89dSMike Rapoport (IBM) 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
12449420f89dSMike Rapoport (IBM) 
12459420f89dSMike Rapoport (IBM) 	/* Return the spanned pages */
12469420f89dSMike Rapoport (IBM) 	return *zone_end_pfn - *zone_start_pfn;
12479420f89dSMike Rapoport (IBM) }
12489420f89dSMike Rapoport (IBM) 
reset_memoryless_node_totalpages(struct pglist_data * pgdat)1249ba1b67c7SHaifeng Xu static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat)
1250ba1b67c7SHaifeng Xu {
1251ba1b67c7SHaifeng Xu 	struct zone *z;
1252ba1b67c7SHaifeng Xu 
1253ba1b67c7SHaifeng Xu 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) {
1254ba1b67c7SHaifeng Xu 		z->zone_start_pfn = 0;
1255ba1b67c7SHaifeng Xu 		z->spanned_pages = 0;
1256ba1b67c7SHaifeng Xu 		z->present_pages = 0;
1257ba1b67c7SHaifeng Xu #if defined(CONFIG_MEMORY_HOTPLUG)
1258ba1b67c7SHaifeng Xu 		z->present_early_pages = 0;
1259ba1b67c7SHaifeng Xu #endif
1260ba1b67c7SHaifeng Xu 	}
1261ba1b67c7SHaifeng Xu 
1262ba1b67c7SHaifeng Xu 	pgdat->node_spanned_pages = 0;
1263ba1b67c7SHaifeng Xu 	pgdat->node_present_pages = 0;
1264ba1b67c7SHaifeng Xu 	pr_debug("On node %d totalpages: 0\n", pgdat->node_id);
1265ba1b67c7SHaifeng Xu }
1266ba1b67c7SHaifeng Xu 
calculate_node_totalpages(struct pglist_data * pgdat,unsigned long node_start_pfn,unsigned long node_end_pfn)12679420f89dSMike Rapoport (IBM) static void __init calculate_node_totalpages(struct pglist_data *pgdat,
12689420f89dSMike Rapoport (IBM) 						unsigned long node_start_pfn,
12699420f89dSMike Rapoport (IBM) 						unsigned long node_end_pfn)
12709420f89dSMike Rapoport (IBM) {
12719420f89dSMike Rapoport (IBM) 	unsigned long realtotalpages = 0, totalpages = 0;
12729420f89dSMike Rapoport (IBM) 	enum zone_type i;
12739420f89dSMike Rapoport (IBM) 
12749420f89dSMike Rapoport (IBM) 	for (i = 0; i < MAX_NR_ZONES; i++) {
12759420f89dSMike Rapoport (IBM) 		struct zone *zone = pgdat->node_zones + i;
12769420f89dSMike Rapoport (IBM) 		unsigned long zone_start_pfn, zone_end_pfn;
12779420f89dSMike Rapoport (IBM) 		unsigned long spanned, absent;
12781c2d252fSHaifeng Xu 		unsigned long real_size;
12799420f89dSMike Rapoport (IBM) 
12809420f89dSMike Rapoport (IBM) 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
12819420f89dSMike Rapoport (IBM) 						     node_start_pfn,
12829420f89dSMike Rapoport (IBM) 						     node_end_pfn,
12839420f89dSMike Rapoport (IBM) 						     &zone_start_pfn,
12849420f89dSMike Rapoport (IBM) 						     &zone_end_pfn);
12859420f89dSMike Rapoport (IBM) 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
12861c2d252fSHaifeng Xu 						   zone_start_pfn,
12871c2d252fSHaifeng Xu 						   zone_end_pfn);
12889420f89dSMike Rapoport (IBM) 
12891c2d252fSHaifeng Xu 		real_size = spanned - absent;
12909420f89dSMike Rapoport (IBM) 
12911c2d252fSHaifeng Xu 		if (spanned)
12929420f89dSMike Rapoport (IBM) 			zone->zone_start_pfn = zone_start_pfn;
12939420f89dSMike Rapoport (IBM) 		else
12949420f89dSMike Rapoport (IBM) 			zone->zone_start_pfn = 0;
12951c2d252fSHaifeng Xu 		zone->spanned_pages = spanned;
12969420f89dSMike Rapoport (IBM) 		zone->present_pages = real_size;
12979420f89dSMike Rapoport (IBM) #if defined(CONFIG_MEMORY_HOTPLUG)
12989420f89dSMike Rapoport (IBM) 		zone->present_early_pages = real_size;
12999420f89dSMike Rapoport (IBM) #endif
13009420f89dSMike Rapoport (IBM) 
13011c2d252fSHaifeng Xu 		totalpages += spanned;
13029420f89dSMike Rapoport (IBM) 		realtotalpages += real_size;
13039420f89dSMike Rapoport (IBM) 	}
13049420f89dSMike Rapoport (IBM) 
13059420f89dSMike Rapoport (IBM) 	pgdat->node_spanned_pages = totalpages;
13069420f89dSMike Rapoport (IBM) 	pgdat->node_present_pages = realtotalpages;
13079420f89dSMike Rapoport (IBM) 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
13089420f89dSMike Rapoport (IBM) }
13099420f89dSMike Rapoport (IBM) 
calc_memmap_size(unsigned long spanned_pages,unsigned long present_pages)13109420f89dSMike Rapoport (IBM) static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
13119420f89dSMike Rapoport (IBM) 						unsigned long present_pages)
13129420f89dSMike Rapoport (IBM) {
13139420f89dSMike Rapoport (IBM) 	unsigned long pages = spanned_pages;
13149420f89dSMike Rapoport (IBM) 
13159420f89dSMike Rapoport (IBM) 	/*
13169420f89dSMike Rapoport (IBM) 	 * Provide a more accurate estimation if there are holes within
13179420f89dSMike Rapoport (IBM) 	 * the zone and SPARSEMEM is in use. If there are holes within the
13189420f89dSMike Rapoport (IBM) 	 * zone, each populated memory region may cost us one or two extra
13199420f89dSMike Rapoport (IBM) 	 * memmap pages due to alignment because memmap pages for each
13209420f89dSMike Rapoport (IBM) 	 * populated regions may not be naturally aligned on page boundary.
13219420f89dSMike Rapoport (IBM) 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
13229420f89dSMike Rapoport (IBM) 	 */
13239420f89dSMike Rapoport (IBM) 	if (spanned_pages > present_pages + (present_pages >> 4) &&
13249420f89dSMike Rapoport (IBM) 	    IS_ENABLED(CONFIG_SPARSEMEM))
13259420f89dSMike Rapoport (IBM) 		pages = present_pages;
13269420f89dSMike Rapoport (IBM) 
13279420f89dSMike Rapoport (IBM) 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
13289420f89dSMike Rapoport (IBM) }
13299420f89dSMike Rapoport (IBM) 
13309420f89dSMike Rapoport (IBM) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pgdat_init_split_queue(struct pglist_data * pgdat)13319420f89dSMike Rapoport (IBM) static void pgdat_init_split_queue(struct pglist_data *pgdat)
13329420f89dSMike Rapoport (IBM) {
13339420f89dSMike Rapoport (IBM) 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
13349420f89dSMike Rapoport (IBM) 
13359420f89dSMike Rapoport (IBM) 	spin_lock_init(&ds_queue->split_queue_lock);
13369420f89dSMike Rapoport (IBM) 	INIT_LIST_HEAD(&ds_queue->split_queue);
13379420f89dSMike Rapoport (IBM) 	ds_queue->split_queue_len = 0;
13389420f89dSMike Rapoport (IBM) }
13399420f89dSMike Rapoport (IBM) #else
pgdat_init_split_queue(struct pglist_data * pgdat)13409420f89dSMike Rapoport (IBM) static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
13419420f89dSMike Rapoport (IBM) #endif
13429420f89dSMike Rapoport (IBM) 
13439420f89dSMike Rapoport (IBM) #ifdef CONFIG_COMPACTION
pgdat_init_kcompactd(struct pglist_data * pgdat)13449420f89dSMike Rapoport (IBM) static void pgdat_init_kcompactd(struct pglist_data *pgdat)
13459420f89dSMike Rapoport (IBM) {
13469420f89dSMike Rapoport (IBM) 	init_waitqueue_head(&pgdat->kcompactd_wait);
13479420f89dSMike Rapoport (IBM) }
13489420f89dSMike Rapoport (IBM) #else
pgdat_init_kcompactd(struct pglist_data * pgdat)13499420f89dSMike Rapoport (IBM) static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
13509420f89dSMike Rapoport (IBM) #endif
13519420f89dSMike Rapoport (IBM) 
pgdat_init_internals(struct pglist_data * pgdat)13529420f89dSMike Rapoport (IBM) static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
13539420f89dSMike Rapoport (IBM) {
13549420f89dSMike Rapoport (IBM) 	int i;
13559420f89dSMike Rapoport (IBM) 
13569420f89dSMike Rapoport (IBM) 	pgdat_resize_init(pgdat);
13579420f89dSMike Rapoport (IBM) 	pgdat_kswapd_lock_init(pgdat);
13589420f89dSMike Rapoport (IBM) 
13599420f89dSMike Rapoport (IBM) 	pgdat_init_split_queue(pgdat);
13609420f89dSMike Rapoport (IBM) 	pgdat_init_kcompactd(pgdat);
13619420f89dSMike Rapoport (IBM) 
13629420f89dSMike Rapoport (IBM) 	init_waitqueue_head(&pgdat->kswapd_wait);
13639420f89dSMike Rapoport (IBM) 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
13649420f89dSMike Rapoport (IBM) 
13659420f89dSMike Rapoport (IBM) 	for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
13669420f89dSMike Rapoport (IBM) 		init_waitqueue_head(&pgdat->reclaim_wait[i]);
13679420f89dSMike Rapoport (IBM) 
13689420f89dSMike Rapoport (IBM) 	pgdat_page_ext_init(pgdat);
13699420f89dSMike Rapoport (IBM) 	lruvec_init(&pgdat->__lruvec);
13709420f89dSMike Rapoport (IBM) }
13719420f89dSMike Rapoport (IBM) 
zone_init_internals(struct zone * zone,enum zone_type idx,int nid,unsigned long remaining_pages)13729420f89dSMike Rapoport (IBM) static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
13739420f89dSMike Rapoport (IBM) 							unsigned long remaining_pages)
13749420f89dSMike Rapoport (IBM) {
13759420f89dSMike Rapoport (IBM) 	atomic_long_set(&zone->managed_pages, remaining_pages);
13769420f89dSMike Rapoport (IBM) 	zone_set_nid(zone, nid);
13779420f89dSMike Rapoport (IBM) 	zone->name = zone_names[idx];
13789420f89dSMike Rapoport (IBM) 	zone->zone_pgdat = NODE_DATA(nid);
13799420f89dSMike Rapoport (IBM) 	spin_lock_init(&zone->lock);
13809420f89dSMike Rapoport (IBM) 	zone_seqlock_init(zone);
13819420f89dSMike Rapoport (IBM) 	zone_pcp_init(zone);
13829420f89dSMike Rapoport (IBM) }
13839420f89dSMike Rapoport (IBM) 
zone_init_free_lists(struct zone * zone)13849420f89dSMike Rapoport (IBM) static void __meminit zone_init_free_lists(struct zone *zone)
13859420f89dSMike Rapoport (IBM) {
13869420f89dSMike Rapoport (IBM) 	unsigned int order, t;
13879420f89dSMike Rapoport (IBM) 	for_each_migratetype_order(order, t) {
13889420f89dSMike Rapoport (IBM) 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
13899420f89dSMike Rapoport (IBM) 		zone->free_area[order].nr_free = 0;
13909420f89dSMike Rapoport (IBM) 	}
1391dcdfdd40SKirill A. Shutemov 
1392dcdfdd40SKirill A. Shutemov #ifdef CONFIG_UNACCEPTED_MEMORY
1393dcdfdd40SKirill A. Shutemov 	INIT_LIST_HEAD(&zone->unaccepted_pages);
1394dcdfdd40SKirill A. Shutemov #endif
13959420f89dSMike Rapoport (IBM) }
13969420f89dSMike Rapoport (IBM) 
init_currently_empty_zone(struct zone * zone,unsigned long zone_start_pfn,unsigned long size)13979420f89dSMike Rapoport (IBM) void __meminit init_currently_empty_zone(struct zone *zone,
13989420f89dSMike Rapoport (IBM) 					unsigned long zone_start_pfn,
13999420f89dSMike Rapoport (IBM) 					unsigned long size)
14009420f89dSMike Rapoport (IBM) {
14019420f89dSMike Rapoport (IBM) 	struct pglist_data *pgdat = zone->zone_pgdat;
14029420f89dSMike Rapoport (IBM) 	int zone_idx = zone_idx(zone) + 1;
14039420f89dSMike Rapoport (IBM) 
14049420f89dSMike Rapoport (IBM) 	if (zone_idx > pgdat->nr_zones)
14059420f89dSMike Rapoport (IBM) 		pgdat->nr_zones = zone_idx;
14069420f89dSMike Rapoport (IBM) 
14079420f89dSMike Rapoport (IBM) 	zone->zone_start_pfn = zone_start_pfn;
14089420f89dSMike Rapoport (IBM) 
14099420f89dSMike Rapoport (IBM) 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
14109420f89dSMike Rapoport (IBM) 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
14119420f89dSMike Rapoport (IBM) 			pgdat->node_id,
14129420f89dSMike Rapoport (IBM) 			(unsigned long)zone_idx(zone),
14139420f89dSMike Rapoport (IBM) 			zone_start_pfn, (zone_start_pfn + size));
14149420f89dSMike Rapoport (IBM) 
14159420f89dSMike Rapoport (IBM) 	zone_init_free_lists(zone);
14169420f89dSMike Rapoport (IBM) 	zone->initialized = 1;
14179420f89dSMike Rapoport (IBM) }
14189420f89dSMike Rapoport (IBM) 
14199420f89dSMike Rapoport (IBM) #ifndef CONFIG_SPARSEMEM
14209420f89dSMike Rapoport (IBM) /*
14219420f89dSMike Rapoport (IBM)  * Calculate the size of the zone->blockflags rounded to an unsigned long
14229420f89dSMike Rapoport (IBM)  * Start by making sure zonesize is a multiple of pageblock_order by rounding
14239420f89dSMike Rapoport (IBM)  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
14249420f89dSMike Rapoport (IBM)  * round what is now in bits to nearest long in bits, then return it in
14259420f89dSMike Rapoport (IBM)  * bytes.
14269420f89dSMike Rapoport (IBM)  */
usemap_size(unsigned long zone_start_pfn,unsigned long zonesize)14279420f89dSMike Rapoport (IBM) static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
14289420f89dSMike Rapoport (IBM) {
14299420f89dSMike Rapoport (IBM) 	unsigned long usemapsize;
14309420f89dSMike Rapoport (IBM) 
14319420f89dSMike Rapoport (IBM) 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
14329420f89dSMike Rapoport (IBM) 	usemapsize = roundup(zonesize, pageblock_nr_pages);
14339420f89dSMike Rapoport (IBM) 	usemapsize = usemapsize >> pageblock_order;
14349420f89dSMike Rapoport (IBM) 	usemapsize *= NR_PAGEBLOCK_BITS;
1435daee07bfSMiaohe Lin 	usemapsize = roundup(usemapsize, BITS_PER_LONG);
14369420f89dSMike Rapoport (IBM) 
1437daee07bfSMiaohe Lin 	return usemapsize / BITS_PER_BYTE;
14389420f89dSMike Rapoport (IBM) }
14399420f89dSMike Rapoport (IBM) 
setup_usemap(struct zone * zone)14409420f89dSMike Rapoport (IBM) static void __ref setup_usemap(struct zone *zone)
14419420f89dSMike Rapoport (IBM) {
14429420f89dSMike Rapoport (IBM) 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
14439420f89dSMike Rapoport (IBM) 					       zone->spanned_pages);
14449420f89dSMike Rapoport (IBM) 	zone->pageblock_flags = NULL;
14459420f89dSMike Rapoport (IBM) 	if (usemapsize) {
14469420f89dSMike Rapoport (IBM) 		zone->pageblock_flags =
14479420f89dSMike Rapoport (IBM) 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
14489420f89dSMike Rapoport (IBM) 					    zone_to_nid(zone));
14499420f89dSMike Rapoport (IBM) 		if (!zone->pageblock_flags)
14509420f89dSMike Rapoport (IBM) 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
14519420f89dSMike Rapoport (IBM) 			      usemapsize, zone->name, zone_to_nid(zone));
14529420f89dSMike Rapoport (IBM) 	}
14539420f89dSMike Rapoport (IBM) }
14549420f89dSMike Rapoport (IBM) #else
setup_usemap(struct zone * zone)14559420f89dSMike Rapoport (IBM) static inline void setup_usemap(struct zone *zone) {}
14569420f89dSMike Rapoport (IBM) #endif /* CONFIG_SPARSEMEM */
14579420f89dSMike Rapoport (IBM) 
14589420f89dSMike Rapoport (IBM) #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
14599420f89dSMike Rapoport (IBM) 
14609420f89dSMike Rapoport (IBM) /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
set_pageblock_order(void)14619420f89dSMike Rapoport (IBM) void __init set_pageblock_order(void)
14629420f89dSMike Rapoport (IBM) {
14639420f89dSMike Rapoport (IBM) 	unsigned int order = MAX_ORDER;
14649420f89dSMike Rapoport (IBM) 
14659420f89dSMike Rapoport (IBM) 	/* Check that pageblock_nr_pages has not already been setup */
14669420f89dSMike Rapoport (IBM) 	if (pageblock_order)
14679420f89dSMike Rapoport (IBM) 		return;
14689420f89dSMike Rapoport (IBM) 
14699420f89dSMike Rapoport (IBM) 	/* Don't let pageblocks exceed the maximum allocation granularity. */
14709420f89dSMike Rapoport (IBM) 	if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
14719420f89dSMike Rapoport (IBM) 		order = HUGETLB_PAGE_ORDER;
14729420f89dSMike Rapoport (IBM) 
14739420f89dSMike Rapoport (IBM) 	/*
14749420f89dSMike Rapoport (IBM) 	 * Assume the largest contiguous order of interest is a huge page.
14759420f89dSMike Rapoport (IBM) 	 * This value may be variable depending on boot parameters on IA64 and
14769420f89dSMike Rapoport (IBM) 	 * powerpc.
14779420f89dSMike Rapoport (IBM) 	 */
14789420f89dSMike Rapoport (IBM) 	pageblock_order = order;
14799420f89dSMike Rapoport (IBM) }
14809420f89dSMike Rapoport (IBM) #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
14819420f89dSMike Rapoport (IBM) 
14829420f89dSMike Rapoport (IBM) /*
14839420f89dSMike Rapoport (IBM)  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
14849420f89dSMike Rapoport (IBM)  * is unused as pageblock_order is set at compile-time. See
14859420f89dSMike Rapoport (IBM)  * include/linux/pageblock-flags.h for the values of pageblock_order based on
14869420f89dSMike Rapoport (IBM)  * the kernel config
14879420f89dSMike Rapoport (IBM)  */
set_pageblock_order(void)14889420f89dSMike Rapoport (IBM) void __init set_pageblock_order(void)
14899420f89dSMike Rapoport (IBM) {
14909420f89dSMike Rapoport (IBM) }
14919420f89dSMike Rapoport (IBM) 
14929420f89dSMike Rapoport (IBM) #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
14939420f89dSMike Rapoport (IBM) 
14949420f89dSMike Rapoport (IBM) /*
14959420f89dSMike Rapoport (IBM)  * Set up the zone data structures
14969420f89dSMike Rapoport (IBM)  * - init pgdat internals
14979420f89dSMike Rapoport (IBM)  * - init all zones belonging to this node
14989420f89dSMike Rapoport (IBM)  *
14999420f89dSMike Rapoport (IBM)  * NOTE: this function is only called during memory hotplug
15009420f89dSMike Rapoport (IBM)  */
15019420f89dSMike Rapoport (IBM) #ifdef CONFIG_MEMORY_HOTPLUG
free_area_init_core_hotplug(struct pglist_data * pgdat)15029420f89dSMike Rapoport (IBM) void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
15039420f89dSMike Rapoport (IBM) {
15049420f89dSMike Rapoport (IBM) 	int nid = pgdat->node_id;
15059420f89dSMike Rapoport (IBM) 	enum zone_type z;
15069420f89dSMike Rapoport (IBM) 	int cpu;
15079420f89dSMike Rapoport (IBM) 
15089420f89dSMike Rapoport (IBM) 	pgdat_init_internals(pgdat);
15099420f89dSMike Rapoport (IBM) 
15109420f89dSMike Rapoport (IBM) 	if (pgdat->per_cpu_nodestats == &boot_nodestats)
15119420f89dSMike Rapoport (IBM) 		pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
15129420f89dSMike Rapoport (IBM) 
15139420f89dSMike Rapoport (IBM) 	/*
15149420f89dSMike Rapoport (IBM) 	 * Reset the nr_zones, order and highest_zoneidx before reuse.
15159420f89dSMike Rapoport (IBM) 	 * Note that kswapd will init kswapd_highest_zoneidx properly
15169420f89dSMike Rapoport (IBM) 	 * when it starts in the near future.
15179420f89dSMike Rapoport (IBM) 	 */
15189420f89dSMike Rapoport (IBM) 	pgdat->nr_zones = 0;
15199420f89dSMike Rapoport (IBM) 	pgdat->kswapd_order = 0;
15209420f89dSMike Rapoport (IBM) 	pgdat->kswapd_highest_zoneidx = 0;
15219420f89dSMike Rapoport (IBM) 	pgdat->node_start_pfn = 0;
152232b6a4a1SHaifeng Xu 	pgdat->node_present_pages = 0;
152332b6a4a1SHaifeng Xu 
15249420f89dSMike Rapoport (IBM) 	for_each_online_cpu(cpu) {
15259420f89dSMike Rapoport (IBM) 		struct per_cpu_nodestat *p;
15269420f89dSMike Rapoport (IBM) 
15279420f89dSMike Rapoport (IBM) 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
15289420f89dSMike Rapoport (IBM) 		memset(p, 0, sizeof(*p));
15299420f89dSMike Rapoport (IBM) 	}
15309420f89dSMike Rapoport (IBM) 
153132b6a4a1SHaifeng Xu 	/*
153232b6a4a1SHaifeng Xu 	 * When memory is hot-added, all the memory is in offline state. So
153332b6a4a1SHaifeng Xu 	 * clear all zones' present_pages and managed_pages because they will
153432b6a4a1SHaifeng Xu 	 * be updated in online_pages() and offline_pages().
153532b6a4a1SHaifeng Xu 	 */
153632b6a4a1SHaifeng Xu 	for (z = 0; z < MAX_NR_ZONES; z++) {
153732b6a4a1SHaifeng Xu 		struct zone *zone = pgdat->node_zones + z;
153832b6a4a1SHaifeng Xu 
153932b6a4a1SHaifeng Xu 		zone->present_pages = 0;
154032b6a4a1SHaifeng Xu 		zone_init_internals(zone, z, nid, 0);
154132b6a4a1SHaifeng Xu 	}
15429420f89dSMike Rapoport (IBM) }
15439420f89dSMike Rapoport (IBM) #endif
15449420f89dSMike Rapoport (IBM) 
15459420f89dSMike Rapoport (IBM) /*
15469420f89dSMike Rapoport (IBM)  * Set up the zone data structures:
15479420f89dSMike Rapoport (IBM)  *   - mark all pages reserved
15489420f89dSMike Rapoport (IBM)  *   - mark all memory queues empty
15499420f89dSMike Rapoport (IBM)  *   - clear the memory bitmaps
15509420f89dSMike Rapoport (IBM)  *
15519420f89dSMike Rapoport (IBM)  * NOTE: pgdat should get zeroed by caller.
15529420f89dSMike Rapoport (IBM)  * NOTE: this function is only called during early init.
15539420f89dSMike Rapoport (IBM)  */
free_area_init_core(struct pglist_data * pgdat)15549420f89dSMike Rapoport (IBM) static void __init free_area_init_core(struct pglist_data *pgdat)
15559420f89dSMike Rapoport (IBM) {
15569420f89dSMike Rapoport (IBM) 	enum zone_type j;
15579420f89dSMike Rapoport (IBM) 	int nid = pgdat->node_id;
15589420f89dSMike Rapoport (IBM) 
15599420f89dSMike Rapoport (IBM) 	pgdat_init_internals(pgdat);
15609420f89dSMike Rapoport (IBM) 	pgdat->per_cpu_nodestats = &boot_nodestats;
15619420f89dSMike Rapoport (IBM) 
15629420f89dSMike Rapoport (IBM) 	for (j = 0; j < MAX_NR_ZONES; j++) {
15639420f89dSMike Rapoport (IBM) 		struct zone *zone = pgdat->node_zones + j;
15649420f89dSMike Rapoport (IBM) 		unsigned long size, freesize, memmap_pages;
15659420f89dSMike Rapoport (IBM) 
15669420f89dSMike Rapoport (IBM) 		size = zone->spanned_pages;
15679420f89dSMike Rapoport (IBM) 		freesize = zone->present_pages;
15689420f89dSMike Rapoport (IBM) 
15699420f89dSMike Rapoport (IBM) 		/*
15709420f89dSMike Rapoport (IBM) 		 * Adjust freesize so that it accounts for how much memory
15719420f89dSMike Rapoport (IBM) 		 * is used by this zone for memmap. This affects the watermark
15729420f89dSMike Rapoport (IBM) 		 * and per-cpu initialisations
15739420f89dSMike Rapoport (IBM) 		 */
15749420f89dSMike Rapoport (IBM) 		memmap_pages = calc_memmap_size(size, freesize);
15759420f89dSMike Rapoport (IBM) 		if (!is_highmem_idx(j)) {
15769420f89dSMike Rapoport (IBM) 			if (freesize >= memmap_pages) {
15779420f89dSMike Rapoport (IBM) 				freesize -= memmap_pages;
15789420f89dSMike Rapoport (IBM) 				if (memmap_pages)
15799420f89dSMike Rapoport (IBM) 					pr_debug("  %s zone: %lu pages used for memmap\n",
15809420f89dSMike Rapoport (IBM) 						 zone_names[j], memmap_pages);
15819420f89dSMike Rapoport (IBM) 			} else
15829420f89dSMike Rapoport (IBM) 				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
15839420f89dSMike Rapoport (IBM) 					zone_names[j], memmap_pages, freesize);
15849420f89dSMike Rapoport (IBM) 		}
15859420f89dSMike Rapoport (IBM) 
15869420f89dSMike Rapoport (IBM) 		/* Account for reserved pages */
15879420f89dSMike Rapoport (IBM) 		if (j == 0 && freesize > dma_reserve) {
15889420f89dSMike Rapoport (IBM) 			freesize -= dma_reserve;
15899420f89dSMike Rapoport (IBM) 			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
15909420f89dSMike Rapoport (IBM) 		}
15919420f89dSMike Rapoport (IBM) 
15929420f89dSMike Rapoport (IBM) 		if (!is_highmem_idx(j))
15939420f89dSMike Rapoport (IBM) 			nr_kernel_pages += freesize;
15949420f89dSMike Rapoport (IBM) 		/* Charge for highmem memmap if there are enough kernel pages */
15959420f89dSMike Rapoport (IBM) 		else if (nr_kernel_pages > memmap_pages * 2)
15969420f89dSMike Rapoport (IBM) 			nr_kernel_pages -= memmap_pages;
15979420f89dSMike Rapoport (IBM) 		nr_all_pages += freesize;
15989420f89dSMike Rapoport (IBM) 
15999420f89dSMike Rapoport (IBM) 		/*
16009420f89dSMike Rapoport (IBM) 		 * Set an approximate value for lowmem here, it will be adjusted
16019420f89dSMike Rapoport (IBM) 		 * when the bootmem allocator frees pages into the buddy system.
16029420f89dSMike Rapoport (IBM) 		 * And all highmem pages will be managed by the buddy system.
16039420f89dSMike Rapoport (IBM) 		 */
16049420f89dSMike Rapoport (IBM) 		zone_init_internals(zone, j, nid, freesize);
16059420f89dSMike Rapoport (IBM) 
16069420f89dSMike Rapoport (IBM) 		if (!size)
16079420f89dSMike Rapoport (IBM) 			continue;
16089420f89dSMike Rapoport (IBM) 
16099420f89dSMike Rapoport (IBM) 		setup_usemap(zone);
16109420f89dSMike Rapoport (IBM) 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
16119420f89dSMike Rapoport (IBM) 	}
16129420f89dSMike Rapoport (IBM) }
16139420f89dSMike Rapoport (IBM) 
memmap_alloc(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,int nid,bool exact_nid)16149420f89dSMike Rapoport (IBM) void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
16159420f89dSMike Rapoport (IBM) 			  phys_addr_t min_addr, int nid, bool exact_nid)
16169420f89dSMike Rapoport (IBM) {
16179420f89dSMike Rapoport (IBM) 	void *ptr;
16189420f89dSMike Rapoport (IBM) 
16199420f89dSMike Rapoport (IBM) 	if (exact_nid)
16209420f89dSMike Rapoport (IBM) 		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
16219420f89dSMike Rapoport (IBM) 						   MEMBLOCK_ALLOC_ACCESSIBLE,
16229420f89dSMike Rapoport (IBM) 						   nid);
16239420f89dSMike Rapoport (IBM) 	else
16249420f89dSMike Rapoport (IBM) 		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
16259420f89dSMike Rapoport (IBM) 						 MEMBLOCK_ALLOC_ACCESSIBLE,
16269420f89dSMike Rapoport (IBM) 						 nid);
16279420f89dSMike Rapoport (IBM) 
16289420f89dSMike Rapoport (IBM) 	if (ptr && size > 0)
16299420f89dSMike Rapoport (IBM) 		page_init_poison(ptr, size);
16309420f89dSMike Rapoport (IBM) 
16319420f89dSMike Rapoport (IBM) 	return ptr;
16329420f89dSMike Rapoport (IBM) }
16339420f89dSMike Rapoport (IBM) 
16349420f89dSMike Rapoport (IBM) #ifdef CONFIG_FLATMEM
alloc_node_mem_map(struct pglist_data * pgdat)16359420f89dSMike Rapoport (IBM) static void __init alloc_node_mem_map(struct pglist_data *pgdat)
16369420f89dSMike Rapoport (IBM) {
16379420f89dSMike Rapoport (IBM) 	unsigned long __maybe_unused start = 0;
16389420f89dSMike Rapoport (IBM) 	unsigned long __maybe_unused offset = 0;
16399420f89dSMike Rapoport (IBM) 
16409420f89dSMike Rapoport (IBM) 	/* Skip empty nodes */
16419420f89dSMike Rapoport (IBM) 	if (!pgdat->node_spanned_pages)
16429420f89dSMike Rapoport (IBM) 		return;
16439420f89dSMike Rapoport (IBM) 
16449420f89dSMike Rapoport (IBM) 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
16459420f89dSMike Rapoport (IBM) 	offset = pgdat->node_start_pfn - start;
16469420f89dSMike Rapoport (IBM) 	/* ia64 gets its own node_mem_map, before this, without bootmem */
16479420f89dSMike Rapoport (IBM) 	if (!pgdat->node_mem_map) {
16489420f89dSMike Rapoport (IBM) 		unsigned long size, end;
16499420f89dSMike Rapoport (IBM) 		struct page *map;
16509420f89dSMike Rapoport (IBM) 
16519420f89dSMike Rapoport (IBM) 		/*
16529420f89dSMike Rapoport (IBM) 		 * The zone's endpoints aren't required to be MAX_ORDER
16539420f89dSMike Rapoport (IBM) 		 * aligned but the node_mem_map endpoints must be in order
16549420f89dSMike Rapoport (IBM) 		 * for the buddy allocator to function correctly.
16559420f89dSMike Rapoport (IBM) 		 */
16569420f89dSMike Rapoport (IBM) 		end = pgdat_end_pfn(pgdat);
16579420f89dSMike Rapoport (IBM) 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
16589420f89dSMike Rapoport (IBM) 		size =  (end - start) * sizeof(struct page);
16599420f89dSMike Rapoport (IBM) 		map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
16609420f89dSMike Rapoport (IBM) 				   pgdat->node_id, false);
16619420f89dSMike Rapoport (IBM) 		if (!map)
16629420f89dSMike Rapoport (IBM) 			panic("Failed to allocate %ld bytes for node %d memory map\n",
16639420f89dSMike Rapoport (IBM) 			      size, pgdat->node_id);
16649420f89dSMike Rapoport (IBM) 		pgdat->node_mem_map = map + offset;
16659420f89dSMike Rapoport (IBM) 	}
16669420f89dSMike Rapoport (IBM) 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
16679420f89dSMike Rapoport (IBM) 				__func__, pgdat->node_id, (unsigned long)pgdat,
16689420f89dSMike Rapoport (IBM) 				(unsigned long)pgdat->node_mem_map);
16699420f89dSMike Rapoport (IBM) #ifndef CONFIG_NUMA
16709420f89dSMike Rapoport (IBM) 	/*
16719420f89dSMike Rapoport (IBM) 	 * With no DISCONTIG, the global mem_map is just set as node 0's
16729420f89dSMike Rapoport (IBM) 	 */
16739420f89dSMike Rapoport (IBM) 	if (pgdat == NODE_DATA(0)) {
16749420f89dSMike Rapoport (IBM) 		mem_map = NODE_DATA(0)->node_mem_map;
16759420f89dSMike Rapoport (IBM) 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
16769420f89dSMike Rapoport (IBM) 			mem_map -= offset;
16779420f89dSMike Rapoport (IBM) 	}
16789420f89dSMike Rapoport (IBM) #endif
16799420f89dSMike Rapoport (IBM) }
16809420f89dSMike Rapoport (IBM) #else
alloc_node_mem_map(struct pglist_data * pgdat)16819420f89dSMike Rapoport (IBM) static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
16829420f89dSMike Rapoport (IBM) #endif /* CONFIG_FLATMEM */
16839420f89dSMike Rapoport (IBM) 
16849420f89dSMike Rapoport (IBM) /**
16859420f89dSMike Rapoport (IBM)  * get_pfn_range_for_nid - Return the start and end page frames for a node
16869420f89dSMike Rapoport (IBM)  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
16879420f89dSMike Rapoport (IBM)  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
16889420f89dSMike Rapoport (IBM)  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
16899420f89dSMike Rapoport (IBM)  *
16909420f89dSMike Rapoport (IBM)  * It returns the start and end page frame of a node based on information
16919420f89dSMike Rapoport (IBM)  * provided by memblock_set_node(). If called for a node
16923a29280aSMiaohe Lin  * with no available memory, the start and end PFNs will be 0.
16939420f89dSMike Rapoport (IBM)  */
get_pfn_range_for_nid(unsigned int nid,unsigned long * start_pfn,unsigned long * end_pfn)16949420f89dSMike Rapoport (IBM) void __init get_pfn_range_for_nid(unsigned int nid,
16959420f89dSMike Rapoport (IBM) 			unsigned long *start_pfn, unsigned long *end_pfn)
16969420f89dSMike Rapoport (IBM) {
16979420f89dSMike Rapoport (IBM) 	unsigned long this_start_pfn, this_end_pfn;
16989420f89dSMike Rapoport (IBM) 	int i;
16999420f89dSMike Rapoport (IBM) 
17009420f89dSMike Rapoport (IBM) 	*start_pfn = -1UL;
17019420f89dSMike Rapoport (IBM) 	*end_pfn = 0;
17029420f89dSMike Rapoport (IBM) 
17039420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
17049420f89dSMike Rapoport (IBM) 		*start_pfn = min(*start_pfn, this_start_pfn);
17059420f89dSMike Rapoport (IBM) 		*end_pfn = max(*end_pfn, this_end_pfn);
17069420f89dSMike Rapoport (IBM) 	}
17079420f89dSMike Rapoport (IBM) 
17089420f89dSMike Rapoport (IBM) 	if (*start_pfn == -1UL)
17099420f89dSMike Rapoport (IBM) 		*start_pfn = 0;
17109420f89dSMike Rapoport (IBM) }
17119420f89dSMike Rapoport (IBM) 
free_area_init_node(int nid)17129420f89dSMike Rapoport (IBM) static void __init free_area_init_node(int nid)
17139420f89dSMike Rapoport (IBM) {
17149420f89dSMike Rapoport (IBM) 	pg_data_t *pgdat = NODE_DATA(nid);
17159420f89dSMike Rapoport (IBM) 	unsigned long start_pfn = 0;
17169420f89dSMike Rapoport (IBM) 	unsigned long end_pfn = 0;
17179420f89dSMike Rapoport (IBM) 
17189420f89dSMike Rapoport (IBM) 	/* pg_data_t should be reset to zero when it's allocated */
17199420f89dSMike Rapoport (IBM) 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
17209420f89dSMike Rapoport (IBM) 
17219420f89dSMike Rapoport (IBM) 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
17229420f89dSMike Rapoport (IBM) 
17239420f89dSMike Rapoport (IBM) 	pgdat->node_id = nid;
17249420f89dSMike Rapoport (IBM) 	pgdat->node_start_pfn = start_pfn;
17259420f89dSMike Rapoport (IBM) 	pgdat->per_cpu_nodestats = NULL;
17269420f89dSMike Rapoport (IBM) 
17279420f89dSMike Rapoport (IBM) 	if (start_pfn != end_pfn) {
17289420f89dSMike Rapoport (IBM) 		pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
17299420f89dSMike Rapoport (IBM) 			(u64)start_pfn << PAGE_SHIFT,
17309420f89dSMike Rapoport (IBM) 			end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
17319420f89dSMike Rapoport (IBM) 
17329420f89dSMike Rapoport (IBM) 		calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1733ba1b67c7SHaifeng Xu 	} else {
1734ba1b67c7SHaifeng Xu 		pr_info("Initmem setup node %d as memoryless\n", nid);
1735ba1b67c7SHaifeng Xu 
1736ba1b67c7SHaifeng Xu 		reset_memoryless_node_totalpages(pgdat);
1737ba1b67c7SHaifeng Xu 	}
17389420f89dSMike Rapoport (IBM) 
17399420f89dSMike Rapoport (IBM) 	alloc_node_mem_map(pgdat);
17409420f89dSMike Rapoport (IBM) 	pgdat_set_deferred_range(pgdat);
17419420f89dSMike Rapoport (IBM) 
17429420f89dSMike Rapoport (IBM) 	free_area_init_core(pgdat);
17439420f89dSMike Rapoport (IBM) 	lru_gen_init_pgdat(pgdat);
17449420f89dSMike Rapoport (IBM) }
17459420f89dSMike Rapoport (IBM) 
17469420f89dSMike Rapoport (IBM) /* Any regular or high memory on that node ? */
check_for_memory(pg_data_t * pgdat)1747b894da04SHaifeng Xu static void __init check_for_memory(pg_data_t *pgdat)
17489420f89dSMike Rapoport (IBM) {
17499420f89dSMike Rapoport (IBM) 	enum zone_type zone_type;
17509420f89dSMike Rapoport (IBM) 
17519420f89dSMike Rapoport (IBM) 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
17529420f89dSMike Rapoport (IBM) 		struct zone *zone = &pgdat->node_zones[zone_type];
17539420f89dSMike Rapoport (IBM) 		if (populated_zone(zone)) {
17549420f89dSMike Rapoport (IBM) 			if (IS_ENABLED(CONFIG_HIGHMEM))
175591ff4d75SHaifeng Xu 				node_set_state(pgdat->node_id, N_HIGH_MEMORY);
17569420f89dSMike Rapoport (IBM) 			if (zone_type <= ZONE_NORMAL)
175791ff4d75SHaifeng Xu 				node_set_state(pgdat->node_id, N_NORMAL_MEMORY);
17589420f89dSMike Rapoport (IBM) 			break;
17599420f89dSMike Rapoport (IBM) 		}
17609420f89dSMike Rapoport (IBM) 	}
17619420f89dSMike Rapoport (IBM) }
17629420f89dSMike Rapoport (IBM) 
17639420f89dSMike Rapoport (IBM) #if MAX_NUMNODES > 1
17649420f89dSMike Rapoport (IBM) /*
17659420f89dSMike Rapoport (IBM)  * Figure out the number of possible node ids.
17669420f89dSMike Rapoport (IBM)  */
setup_nr_node_ids(void)17679420f89dSMike Rapoport (IBM) void __init setup_nr_node_ids(void)
17689420f89dSMike Rapoport (IBM) {
17699420f89dSMike Rapoport (IBM) 	unsigned int highest;
17709420f89dSMike Rapoport (IBM) 
17719420f89dSMike Rapoport (IBM) 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
17729420f89dSMike Rapoport (IBM) 	nr_node_ids = highest + 1;
17739420f89dSMike Rapoport (IBM) }
17749420f89dSMike Rapoport (IBM) #endif
17759420f89dSMike Rapoport (IBM) 
17769420f89dSMike Rapoport (IBM) /*
17779420f89dSMike Rapoport (IBM)  * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
17789420f89dSMike Rapoport (IBM)  * such cases we allow max_zone_pfn sorted in the descending order
17799420f89dSMike Rapoport (IBM)  */
arch_has_descending_max_zone_pfns(void)17805f300fd5SArnd Bergmann static bool arch_has_descending_max_zone_pfns(void)
17819420f89dSMike Rapoport (IBM) {
17825f300fd5SArnd Bergmann 	return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
17839420f89dSMike Rapoport (IBM) }
17849420f89dSMike Rapoport (IBM) 
17859420f89dSMike Rapoport (IBM) /**
17869420f89dSMike Rapoport (IBM)  * free_area_init - Initialise all pg_data_t and zone data
17879420f89dSMike Rapoport (IBM)  * @max_zone_pfn: an array of max PFNs for each zone
17889420f89dSMike Rapoport (IBM)  *
17899420f89dSMike Rapoport (IBM)  * This will call free_area_init_node() for each active node in the system.
17909420f89dSMike Rapoport (IBM)  * Using the page ranges provided by memblock_set_node(), the size of each
17919420f89dSMike Rapoport (IBM)  * zone in each node and their holes is calculated. If the maximum PFN
17929420f89dSMike Rapoport (IBM)  * between two adjacent zones match, it is assumed that the zone is empty.
17939420f89dSMike Rapoport (IBM)  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
17949420f89dSMike Rapoport (IBM)  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
17959420f89dSMike Rapoport (IBM)  * starts where the previous one ended. For example, ZONE_DMA32 starts
17969420f89dSMike Rapoport (IBM)  * at arch_max_dma_pfn.
17979420f89dSMike Rapoport (IBM)  */
free_area_init(unsigned long * max_zone_pfn)17989420f89dSMike Rapoport (IBM) void __init free_area_init(unsigned long *max_zone_pfn)
17999420f89dSMike Rapoport (IBM) {
18009420f89dSMike Rapoport (IBM) 	unsigned long start_pfn, end_pfn;
18019420f89dSMike Rapoport (IBM) 	int i, nid, zone;
18029420f89dSMike Rapoport (IBM) 	bool descending;
18039420f89dSMike Rapoport (IBM) 
18049420f89dSMike Rapoport (IBM) 	/* Record where the zone boundaries are */
18059420f89dSMike Rapoport (IBM) 	memset(arch_zone_lowest_possible_pfn, 0,
18069420f89dSMike Rapoport (IBM) 				sizeof(arch_zone_lowest_possible_pfn));
18079420f89dSMike Rapoport (IBM) 	memset(arch_zone_highest_possible_pfn, 0,
18089420f89dSMike Rapoport (IBM) 				sizeof(arch_zone_highest_possible_pfn));
18099420f89dSMike Rapoport (IBM) 
18109420f89dSMike Rapoport (IBM) 	start_pfn = PHYS_PFN(memblock_start_of_DRAM());
18119420f89dSMike Rapoport (IBM) 	descending = arch_has_descending_max_zone_pfns();
18129420f89dSMike Rapoport (IBM) 
18139420f89dSMike Rapoport (IBM) 	for (i = 0; i < MAX_NR_ZONES; i++) {
18149420f89dSMike Rapoport (IBM) 		if (descending)
18159420f89dSMike Rapoport (IBM) 			zone = MAX_NR_ZONES - i - 1;
18169420f89dSMike Rapoport (IBM) 		else
18179420f89dSMike Rapoport (IBM) 			zone = i;
18189420f89dSMike Rapoport (IBM) 
18199420f89dSMike Rapoport (IBM) 		if (zone == ZONE_MOVABLE)
18209420f89dSMike Rapoport (IBM) 			continue;
18219420f89dSMike Rapoport (IBM) 
18229420f89dSMike Rapoport (IBM) 		end_pfn = max(max_zone_pfn[zone], start_pfn);
18239420f89dSMike Rapoport (IBM) 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
18249420f89dSMike Rapoport (IBM) 		arch_zone_highest_possible_pfn[zone] = end_pfn;
18259420f89dSMike Rapoport (IBM) 
18269420f89dSMike Rapoport (IBM) 		start_pfn = end_pfn;
18279420f89dSMike Rapoport (IBM) 	}
18289420f89dSMike Rapoport (IBM) 
18299420f89dSMike Rapoport (IBM) 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
18309420f89dSMike Rapoport (IBM) 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
18319420f89dSMike Rapoport (IBM) 	find_zone_movable_pfns_for_nodes();
18329420f89dSMike Rapoport (IBM) 
18339420f89dSMike Rapoport (IBM) 	/* Print out the zone ranges */
18349420f89dSMike Rapoport (IBM) 	pr_info("Zone ranges:\n");
18359420f89dSMike Rapoport (IBM) 	for (i = 0; i < MAX_NR_ZONES; i++) {
18369420f89dSMike Rapoport (IBM) 		if (i == ZONE_MOVABLE)
18379420f89dSMike Rapoport (IBM) 			continue;
18389420f89dSMike Rapoport (IBM) 		pr_info("  %-8s ", zone_names[i]);
18399420f89dSMike Rapoport (IBM) 		if (arch_zone_lowest_possible_pfn[i] ==
18409420f89dSMike Rapoport (IBM) 				arch_zone_highest_possible_pfn[i])
18419420f89dSMike Rapoport (IBM) 			pr_cont("empty\n");
18429420f89dSMike Rapoport (IBM) 		else
18439420f89dSMike Rapoport (IBM) 			pr_cont("[mem %#018Lx-%#018Lx]\n",
18449420f89dSMike Rapoport (IBM) 				(u64)arch_zone_lowest_possible_pfn[i]
18459420f89dSMike Rapoport (IBM) 					<< PAGE_SHIFT,
18469420f89dSMike Rapoport (IBM) 				((u64)arch_zone_highest_possible_pfn[i]
18479420f89dSMike Rapoport (IBM) 					<< PAGE_SHIFT) - 1);
18489420f89dSMike Rapoport (IBM) 	}
18499420f89dSMike Rapoport (IBM) 
18509420f89dSMike Rapoport (IBM) 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
18519420f89dSMike Rapoport (IBM) 	pr_info("Movable zone start for each node\n");
18529420f89dSMike Rapoport (IBM) 	for (i = 0; i < MAX_NUMNODES; i++) {
18539420f89dSMike Rapoport (IBM) 		if (zone_movable_pfn[i])
18549420f89dSMike Rapoport (IBM) 			pr_info("  Node %d: %#018Lx\n", i,
18559420f89dSMike Rapoport (IBM) 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
18569420f89dSMike Rapoport (IBM) 	}
18579420f89dSMike Rapoport (IBM) 
18589420f89dSMike Rapoport (IBM) 	/*
18599420f89dSMike Rapoport (IBM) 	 * Print out the early node map, and initialize the
18609420f89dSMike Rapoport (IBM) 	 * subsection-map relative to active online memory ranges to
18619420f89dSMike Rapoport (IBM) 	 * enable future "sub-section" extensions of the memory map.
18629420f89dSMike Rapoport (IBM) 	 */
18639420f89dSMike Rapoport (IBM) 	pr_info("Early memory node ranges\n");
18649420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
18659420f89dSMike Rapoport (IBM) 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
18669420f89dSMike Rapoport (IBM) 			(u64)start_pfn << PAGE_SHIFT,
18679420f89dSMike Rapoport (IBM) 			((u64)end_pfn << PAGE_SHIFT) - 1);
18689420f89dSMike Rapoport (IBM) 		subsection_map_init(start_pfn, end_pfn - start_pfn);
18699420f89dSMike Rapoport (IBM) 	}
18709420f89dSMike Rapoport (IBM) 
18719420f89dSMike Rapoport (IBM) 	/* Initialise every node */
18729420f89dSMike Rapoport (IBM) 	mminit_verify_pageflags_layout();
18739420f89dSMike Rapoport (IBM) 	setup_nr_node_ids();
1874e3d9b45fSHaifeng Xu 	set_pageblock_order();
1875e3d9b45fSHaifeng Xu 
18769420f89dSMike Rapoport (IBM) 	for_each_node(nid) {
18779420f89dSMike Rapoport (IBM) 		pg_data_t *pgdat;
18789420f89dSMike Rapoport (IBM) 
18799420f89dSMike Rapoport (IBM) 		if (!node_online(nid)) {
18809420f89dSMike Rapoport (IBM) 			pr_info("Initializing node %d as memoryless\n", nid);
18819420f89dSMike Rapoport (IBM) 
18829420f89dSMike Rapoport (IBM) 			/* Allocator not initialized yet */
18839420f89dSMike Rapoport (IBM) 			pgdat = arch_alloc_nodedata(nid);
18849420f89dSMike Rapoport (IBM) 			if (!pgdat)
18859420f89dSMike Rapoport (IBM) 				panic("Cannot allocate %zuB for node %d.\n",
18869420f89dSMike Rapoport (IBM) 				       sizeof(*pgdat), nid);
18879420f89dSMike Rapoport (IBM) 			arch_refresh_nodedata(nid, pgdat);
1888837c2ba5SHaifeng Xu 			free_area_init_node(nid);
18899420f89dSMike Rapoport (IBM) 
18909420f89dSMike Rapoport (IBM) 			/*
18919420f89dSMike Rapoport (IBM) 			 * We do not want to confuse userspace by sysfs
18929420f89dSMike Rapoport (IBM) 			 * files/directories for node without any memory
18939420f89dSMike Rapoport (IBM) 			 * attached to it, so this node is not marked as
18949420f89dSMike Rapoport (IBM) 			 * N_MEMORY and not marked online so that no sysfs
18959420f89dSMike Rapoport (IBM) 			 * hierarchy will be created via register_one_node for
18969420f89dSMike Rapoport (IBM) 			 * it. The pgdat will get fully initialized by
18979420f89dSMike Rapoport (IBM) 			 * hotadd_init_pgdat() when memory is hotplugged into
18989420f89dSMike Rapoport (IBM) 			 * this node.
18999420f89dSMike Rapoport (IBM) 			 */
19009420f89dSMike Rapoport (IBM) 			continue;
19019420f89dSMike Rapoport (IBM) 		}
19029420f89dSMike Rapoport (IBM) 
19039420f89dSMike Rapoport (IBM) 		pgdat = NODE_DATA(nid);
19049420f89dSMike Rapoport (IBM) 		free_area_init_node(nid);
19059420f89dSMike Rapoport (IBM) 
19069420f89dSMike Rapoport (IBM) 		/* Any memory on that node */
19079420f89dSMike Rapoport (IBM) 		if (pgdat->node_present_pages)
19089420f89dSMike Rapoport (IBM) 			node_set_state(nid, N_MEMORY);
190991ff4d75SHaifeng Xu 		check_for_memory(pgdat);
19109420f89dSMike Rapoport (IBM) 	}
19119420f89dSMike Rapoport (IBM) 
19129420f89dSMike Rapoport (IBM) 	memmap_init();
1913534ef4e1SMike Rapoport (IBM) 
1914534ef4e1SMike Rapoport (IBM) 	/* disable hash distribution for systems with a single node */
1915534ef4e1SMike Rapoport (IBM) 	fixup_hashdist();
19169420f89dSMike Rapoport (IBM) }
19179420f89dSMike Rapoport (IBM) 
19189420f89dSMike Rapoport (IBM) /**
19199420f89dSMike Rapoport (IBM)  * node_map_pfn_alignment - determine the maximum internode alignment
19209420f89dSMike Rapoport (IBM)  *
19219420f89dSMike Rapoport (IBM)  * This function should be called after node map is populated and sorted.
19229420f89dSMike Rapoport (IBM)  * It calculates the maximum power of two alignment which can distinguish
19239420f89dSMike Rapoport (IBM)  * all the nodes.
19249420f89dSMike Rapoport (IBM)  *
19259420f89dSMike Rapoport (IBM)  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
19269420f89dSMike Rapoport (IBM)  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
19279420f89dSMike Rapoport (IBM)  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
19289420f89dSMike Rapoport (IBM)  * shifted, 1GiB is enough and this function will indicate so.
19299420f89dSMike Rapoport (IBM)  *
19309420f89dSMike Rapoport (IBM)  * This is used to test whether pfn -> nid mapping of the chosen memory
19319420f89dSMike Rapoport (IBM)  * model has fine enough granularity to avoid incorrect mapping for the
19329420f89dSMike Rapoport (IBM)  * populated node map.
19339420f89dSMike Rapoport (IBM)  *
19349420f89dSMike Rapoport (IBM)  * Return: the determined alignment in pfn's.  0 if there is no alignment
19359420f89dSMike Rapoport (IBM)  * requirement (single node).
19369420f89dSMike Rapoport (IBM)  */
node_map_pfn_alignment(void)19379420f89dSMike Rapoport (IBM) unsigned long __init node_map_pfn_alignment(void)
19389420f89dSMike Rapoport (IBM) {
19399420f89dSMike Rapoport (IBM) 	unsigned long accl_mask = 0, last_end = 0;
19409420f89dSMike Rapoport (IBM) 	unsigned long start, end, mask;
19419420f89dSMike Rapoport (IBM) 	int last_nid = NUMA_NO_NODE;
19429420f89dSMike Rapoport (IBM) 	int i, nid;
19439420f89dSMike Rapoport (IBM) 
19449420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
19459420f89dSMike Rapoport (IBM) 		if (!start || last_nid < 0 || last_nid == nid) {
19469420f89dSMike Rapoport (IBM) 			last_nid = nid;
19479420f89dSMike Rapoport (IBM) 			last_end = end;
19489420f89dSMike Rapoport (IBM) 			continue;
19499420f89dSMike Rapoport (IBM) 		}
19509420f89dSMike Rapoport (IBM) 
19519420f89dSMike Rapoport (IBM) 		/*
19529420f89dSMike Rapoport (IBM) 		 * Start with a mask granular enough to pin-point to the
19539420f89dSMike Rapoport (IBM) 		 * start pfn and tick off bits one-by-one until it becomes
19549420f89dSMike Rapoport (IBM) 		 * too coarse to separate the current node from the last.
19559420f89dSMike Rapoport (IBM) 		 */
19569420f89dSMike Rapoport (IBM) 		mask = ~((1 << __ffs(start)) - 1);
19579420f89dSMike Rapoport (IBM) 		while (mask && last_end <= (start & (mask << 1)))
19589420f89dSMike Rapoport (IBM) 			mask <<= 1;
19599420f89dSMike Rapoport (IBM) 
19609420f89dSMike Rapoport (IBM) 		/* accumulate all internode masks */
19619420f89dSMike Rapoport (IBM) 		accl_mask |= mask;
19629420f89dSMike Rapoport (IBM) 	}
19639420f89dSMike Rapoport (IBM) 
19649420f89dSMike Rapoport (IBM) 	/* convert mask to number of pages */
19659420f89dSMike Rapoport (IBM) 	return ~accl_mask + 1;
19669420f89dSMike Rapoport (IBM) }
19679420f89dSMike Rapoport (IBM) 
19689420f89dSMike Rapoport (IBM) #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
deferred_free_range(unsigned long pfn,unsigned long nr_pages)19699420f89dSMike Rapoport (IBM) static void __init deferred_free_range(unsigned long pfn,
19709420f89dSMike Rapoport (IBM) 				       unsigned long nr_pages)
19719420f89dSMike Rapoport (IBM) {
19729420f89dSMike Rapoport (IBM) 	struct page *page;
19739420f89dSMike Rapoport (IBM) 	unsigned long i;
19749420f89dSMike Rapoport (IBM) 
19759420f89dSMike Rapoport (IBM) 	if (!nr_pages)
19769420f89dSMike Rapoport (IBM) 		return;
19779420f89dSMike Rapoport (IBM) 
19789420f89dSMike Rapoport (IBM) 	page = pfn_to_page(pfn);
19799420f89dSMike Rapoport (IBM) 
19809420f89dSMike Rapoport (IBM) 	/* Free a large naturally-aligned chunk if possible */
19813f6dac0fSKirill A. Shutemov 	if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
19823f6dac0fSKirill A. Shutemov 		for (i = 0; i < nr_pages; i += pageblock_nr_pages)
19833f6dac0fSKirill A. Shutemov 			set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
19843f6dac0fSKirill A. Shutemov 		__free_pages_core(page, MAX_ORDER);
19859420f89dSMike Rapoport (IBM) 		return;
19869420f89dSMike Rapoport (IBM) 	}
19879420f89dSMike Rapoport (IBM) 
1988dcdfdd40SKirill A. Shutemov 	/* Accept chunks smaller than MAX_ORDER upfront */
1989dcdfdd40SKirill A. Shutemov 	accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
1990dcdfdd40SKirill A. Shutemov 
19919420f89dSMike Rapoport (IBM) 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
19929420f89dSMike Rapoport (IBM) 		if (pageblock_aligned(pfn))
19939420f89dSMike Rapoport (IBM) 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
19949420f89dSMike Rapoport (IBM) 		__free_pages_core(page, 0);
19959420f89dSMike Rapoport (IBM) 	}
19969420f89dSMike Rapoport (IBM) }
19979420f89dSMike Rapoport (IBM) 
19989420f89dSMike Rapoport (IBM) /* Completion tracking for deferred_init_memmap() threads */
19999420f89dSMike Rapoport (IBM) static atomic_t pgdat_init_n_undone __initdata;
20009420f89dSMike Rapoport (IBM) static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
20019420f89dSMike Rapoport (IBM) 
pgdat_init_report_one_done(void)20029420f89dSMike Rapoport (IBM) static inline void __init pgdat_init_report_one_done(void)
20039420f89dSMike Rapoport (IBM) {
20049420f89dSMike Rapoport (IBM) 	if (atomic_dec_and_test(&pgdat_init_n_undone))
20059420f89dSMike Rapoport (IBM) 		complete(&pgdat_init_all_done_comp);
20069420f89dSMike Rapoport (IBM) }
20079420f89dSMike Rapoport (IBM) 
20089420f89dSMike Rapoport (IBM) /*
20099420f89dSMike Rapoport (IBM)  * Returns true if page needs to be initialized or freed to buddy allocator.
20109420f89dSMike Rapoport (IBM)  *
20113f6dac0fSKirill A. Shutemov  * We check if a current MAX_ORDER block is valid by only checking the validity
20129420f89dSMike Rapoport (IBM)  * of the head pfn.
20139420f89dSMike Rapoport (IBM)  */
deferred_pfn_valid(unsigned long pfn)20149420f89dSMike Rapoport (IBM) static inline bool __init deferred_pfn_valid(unsigned long pfn)
20159420f89dSMike Rapoport (IBM) {
20163f6dac0fSKirill A. Shutemov 	if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
20179420f89dSMike Rapoport (IBM) 		return false;
20189420f89dSMike Rapoport (IBM) 	return true;
20199420f89dSMike Rapoport (IBM) }
20209420f89dSMike Rapoport (IBM) 
20219420f89dSMike Rapoport (IBM) /*
20229420f89dSMike Rapoport (IBM)  * Free pages to buddy allocator. Try to free aligned pages in
20233f6dac0fSKirill A. Shutemov  * MAX_ORDER_NR_PAGES sizes.
20249420f89dSMike Rapoport (IBM)  */
deferred_free_pages(unsigned long pfn,unsigned long end_pfn)20259420f89dSMike Rapoport (IBM) static void __init deferred_free_pages(unsigned long pfn,
20269420f89dSMike Rapoport (IBM) 				       unsigned long end_pfn)
20279420f89dSMike Rapoport (IBM) {
20289420f89dSMike Rapoport (IBM) 	unsigned long nr_free = 0;
20299420f89dSMike Rapoport (IBM) 
20309420f89dSMike Rapoport (IBM) 	for (; pfn < end_pfn; pfn++) {
20319420f89dSMike Rapoport (IBM) 		if (!deferred_pfn_valid(pfn)) {
20329420f89dSMike Rapoport (IBM) 			deferred_free_range(pfn - nr_free, nr_free);
20339420f89dSMike Rapoport (IBM) 			nr_free = 0;
20343f6dac0fSKirill A. Shutemov 		} else if (IS_MAX_ORDER_ALIGNED(pfn)) {
20359420f89dSMike Rapoport (IBM) 			deferred_free_range(pfn - nr_free, nr_free);
20369420f89dSMike Rapoport (IBM) 			nr_free = 1;
20379420f89dSMike Rapoport (IBM) 		} else {
20389420f89dSMike Rapoport (IBM) 			nr_free++;
20399420f89dSMike Rapoport (IBM) 		}
20409420f89dSMike Rapoport (IBM) 	}
20419420f89dSMike Rapoport (IBM) 	/* Free the last block of pages to allocator */
20429420f89dSMike Rapoport (IBM) 	deferred_free_range(pfn - nr_free, nr_free);
20439420f89dSMike Rapoport (IBM) }
20449420f89dSMike Rapoport (IBM) 
20459420f89dSMike Rapoport (IBM) /*
20469420f89dSMike Rapoport (IBM)  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
20473f6dac0fSKirill A. Shutemov  * by performing it only once every MAX_ORDER_NR_PAGES.
20489420f89dSMike Rapoport (IBM)  * Return number of pages initialized.
20499420f89dSMike Rapoport (IBM)  */
deferred_init_pages(struct zone * zone,unsigned long pfn,unsigned long end_pfn)20509420f89dSMike Rapoport (IBM) static unsigned long  __init deferred_init_pages(struct zone *zone,
20519420f89dSMike Rapoport (IBM) 						 unsigned long pfn,
20529420f89dSMike Rapoport (IBM) 						 unsigned long end_pfn)
20539420f89dSMike Rapoport (IBM) {
20549420f89dSMike Rapoport (IBM) 	int nid = zone_to_nid(zone);
20559420f89dSMike Rapoport (IBM) 	unsigned long nr_pages = 0;
20569420f89dSMike Rapoport (IBM) 	int zid = zone_idx(zone);
20579420f89dSMike Rapoport (IBM) 	struct page *page = NULL;
20589420f89dSMike Rapoport (IBM) 
20599420f89dSMike Rapoport (IBM) 	for (; pfn < end_pfn; pfn++) {
20609420f89dSMike Rapoport (IBM) 		if (!deferred_pfn_valid(pfn)) {
20619420f89dSMike Rapoport (IBM) 			page = NULL;
20629420f89dSMike Rapoport (IBM) 			continue;
20633f6dac0fSKirill A. Shutemov 		} else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
20649420f89dSMike Rapoport (IBM) 			page = pfn_to_page(pfn);
20659420f89dSMike Rapoport (IBM) 		} else {
20669420f89dSMike Rapoport (IBM) 			page++;
20679420f89dSMike Rapoport (IBM) 		}
20689420f89dSMike Rapoport (IBM) 		__init_single_page(page, pfn, zid, nid);
20699420f89dSMike Rapoport (IBM) 		nr_pages++;
20709420f89dSMike Rapoport (IBM) 	}
20719420f89dSMike Rapoport (IBM) 	return (nr_pages);
20729420f89dSMike Rapoport (IBM) }
20739420f89dSMike Rapoport (IBM) 
20749420f89dSMike Rapoport (IBM) /*
20759420f89dSMike Rapoport (IBM)  * This function is meant to pre-load the iterator for the zone init.
20769420f89dSMike Rapoport (IBM)  * Specifically it walks through the ranges until we are caught up to the
20779420f89dSMike Rapoport (IBM)  * first_init_pfn value and exits there. If we never encounter the value we
20789420f89dSMike Rapoport (IBM)  * return false indicating there are no valid ranges left.
20799420f89dSMike Rapoport (IBM)  */
20809420f89dSMike Rapoport (IBM) static bool __init
deferred_init_mem_pfn_range_in_zone(u64 * i,struct zone * zone,unsigned long * spfn,unsigned long * epfn,unsigned long first_init_pfn)20819420f89dSMike Rapoport (IBM) deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
20829420f89dSMike Rapoport (IBM) 				    unsigned long *spfn, unsigned long *epfn,
20839420f89dSMike Rapoport (IBM) 				    unsigned long first_init_pfn)
20849420f89dSMike Rapoport (IBM) {
20859420f89dSMike Rapoport (IBM) 	u64 j;
20869420f89dSMike Rapoport (IBM) 
20879420f89dSMike Rapoport (IBM) 	/*
20889420f89dSMike Rapoport (IBM) 	 * Start out by walking through the ranges in this zone that have
20899420f89dSMike Rapoport (IBM) 	 * already been initialized. We don't need to do anything with them
20909420f89dSMike Rapoport (IBM) 	 * so we just need to flush them out of the system.
20919420f89dSMike Rapoport (IBM) 	 */
20929420f89dSMike Rapoport (IBM) 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
20939420f89dSMike Rapoport (IBM) 		if (*epfn <= first_init_pfn)
20949420f89dSMike Rapoport (IBM) 			continue;
20959420f89dSMike Rapoport (IBM) 		if (*spfn < first_init_pfn)
20969420f89dSMike Rapoport (IBM) 			*spfn = first_init_pfn;
20979420f89dSMike Rapoport (IBM) 		*i = j;
20989420f89dSMike Rapoport (IBM) 		return true;
20999420f89dSMike Rapoport (IBM) 	}
21009420f89dSMike Rapoport (IBM) 
21019420f89dSMike Rapoport (IBM) 	return false;
21029420f89dSMike Rapoport (IBM) }
21039420f89dSMike Rapoport (IBM) 
21049420f89dSMike Rapoport (IBM) /*
21059420f89dSMike Rapoport (IBM)  * Initialize and free pages. We do it in two loops: first we initialize
21069420f89dSMike Rapoport (IBM)  * struct page, then free to buddy allocator, because while we are
21079420f89dSMike Rapoport (IBM)  * freeing pages we can access pages that are ahead (computing buddy
21089420f89dSMike Rapoport (IBM)  * page in __free_one_page()).
21099420f89dSMike Rapoport (IBM)  *
21109420f89dSMike Rapoport (IBM)  * In order to try and keep some memory in the cache we have the loop
21119420f89dSMike Rapoport (IBM)  * broken along max page order boundaries. This way we will not cause
21129420f89dSMike Rapoport (IBM)  * any issues with the buddy page computation.
21139420f89dSMike Rapoport (IBM)  */
21149420f89dSMike Rapoport (IBM) static unsigned long __init
deferred_init_maxorder(u64 * i,struct zone * zone,unsigned long * start_pfn,unsigned long * end_pfn)21159420f89dSMike Rapoport (IBM) deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
21169420f89dSMike Rapoport (IBM) 		       unsigned long *end_pfn)
21179420f89dSMike Rapoport (IBM) {
21189420f89dSMike Rapoport (IBM) 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
21199420f89dSMike Rapoport (IBM) 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
21209420f89dSMike Rapoport (IBM) 	unsigned long nr_pages = 0;
21219420f89dSMike Rapoport (IBM) 	u64 j = *i;
21229420f89dSMike Rapoport (IBM) 
21239420f89dSMike Rapoport (IBM) 	/* First we loop through and initialize the page values */
21249420f89dSMike Rapoport (IBM) 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
21259420f89dSMike Rapoport (IBM) 		unsigned long t;
21269420f89dSMike Rapoport (IBM) 
21279420f89dSMike Rapoport (IBM) 		if (mo_pfn <= *start_pfn)
21289420f89dSMike Rapoport (IBM) 			break;
21299420f89dSMike Rapoport (IBM) 
21309420f89dSMike Rapoport (IBM) 		t = min(mo_pfn, *end_pfn);
21319420f89dSMike Rapoport (IBM) 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
21329420f89dSMike Rapoport (IBM) 
21339420f89dSMike Rapoport (IBM) 		if (mo_pfn < *end_pfn) {
21349420f89dSMike Rapoport (IBM) 			*start_pfn = mo_pfn;
21359420f89dSMike Rapoport (IBM) 			break;
21369420f89dSMike Rapoport (IBM) 		}
21379420f89dSMike Rapoport (IBM) 	}
21389420f89dSMike Rapoport (IBM) 
21399420f89dSMike Rapoport (IBM) 	/* Reset values and now loop through freeing pages as needed */
21409420f89dSMike Rapoport (IBM) 	swap(j, *i);
21419420f89dSMike Rapoport (IBM) 
21429420f89dSMike Rapoport (IBM) 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
21439420f89dSMike Rapoport (IBM) 		unsigned long t;
21449420f89dSMike Rapoport (IBM) 
21459420f89dSMike Rapoport (IBM) 		if (mo_pfn <= spfn)
21469420f89dSMike Rapoport (IBM) 			break;
21479420f89dSMike Rapoport (IBM) 
21489420f89dSMike Rapoport (IBM) 		t = min(mo_pfn, epfn);
21499420f89dSMike Rapoport (IBM) 		deferred_free_pages(spfn, t);
21509420f89dSMike Rapoport (IBM) 
21519420f89dSMike Rapoport (IBM) 		if (mo_pfn <= epfn)
21529420f89dSMike Rapoport (IBM) 			break;
21539420f89dSMike Rapoport (IBM) 	}
21549420f89dSMike Rapoport (IBM) 
21559420f89dSMike Rapoport (IBM) 	return nr_pages;
21569420f89dSMike Rapoport (IBM) }
21579420f89dSMike Rapoport (IBM) 
21589420f89dSMike Rapoport (IBM) static void __init
deferred_init_memmap_chunk(unsigned long start_pfn,unsigned long end_pfn,void * arg)21599420f89dSMike Rapoport (IBM) deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
21609420f89dSMike Rapoport (IBM) 			   void *arg)
21619420f89dSMike Rapoport (IBM) {
21629420f89dSMike Rapoport (IBM) 	unsigned long spfn, epfn;
21639420f89dSMike Rapoport (IBM) 	struct zone *zone = arg;
21649420f89dSMike Rapoport (IBM) 	u64 i;
21659420f89dSMike Rapoport (IBM) 
21669420f89dSMike Rapoport (IBM) 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
21679420f89dSMike Rapoport (IBM) 
21689420f89dSMike Rapoport (IBM) 	/*
21699420f89dSMike Rapoport (IBM) 	 * Initialize and free pages in MAX_ORDER sized increments so that we
21709420f89dSMike Rapoport (IBM) 	 * can avoid introducing any issues with the buddy allocator.
21719420f89dSMike Rapoport (IBM) 	 */
21729420f89dSMike Rapoport (IBM) 	while (spfn < end_pfn) {
21739420f89dSMike Rapoport (IBM) 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
21749420f89dSMike Rapoport (IBM) 		cond_resched();
21759420f89dSMike Rapoport (IBM) 	}
21769420f89dSMike Rapoport (IBM) }
21779420f89dSMike Rapoport (IBM) 
21789420f89dSMike Rapoport (IBM) /* An arch may override for more concurrency. */
21799420f89dSMike Rapoport (IBM) __weak int __init
deferred_page_init_max_threads(const struct cpumask * node_cpumask)21809420f89dSMike Rapoport (IBM) deferred_page_init_max_threads(const struct cpumask *node_cpumask)
21819420f89dSMike Rapoport (IBM) {
21829420f89dSMike Rapoport (IBM) 	return 1;
21839420f89dSMike Rapoport (IBM) }
21849420f89dSMike Rapoport (IBM) 
21859420f89dSMike Rapoport (IBM) /* Initialise remaining memory on a node */
deferred_init_memmap(void * data)21869420f89dSMike Rapoport (IBM) static int __init deferred_init_memmap(void *data)
21879420f89dSMike Rapoport (IBM) {
21889420f89dSMike Rapoport (IBM) 	pg_data_t *pgdat = data;
21899420f89dSMike Rapoport (IBM) 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
21909420f89dSMike Rapoport (IBM) 	unsigned long spfn = 0, epfn = 0;
21919420f89dSMike Rapoport (IBM) 	unsigned long first_init_pfn, flags;
21929420f89dSMike Rapoport (IBM) 	unsigned long start = jiffies;
21939420f89dSMike Rapoport (IBM) 	struct zone *zone;
21949420f89dSMike Rapoport (IBM) 	int zid, max_threads;
21959420f89dSMike Rapoport (IBM) 	u64 i;
21969420f89dSMike Rapoport (IBM) 
21979420f89dSMike Rapoport (IBM) 	/* Bind memory initialisation thread to a local node if possible */
21989420f89dSMike Rapoport (IBM) 	if (!cpumask_empty(cpumask))
21999420f89dSMike Rapoport (IBM) 		set_cpus_allowed_ptr(current, cpumask);
22009420f89dSMike Rapoport (IBM) 
22019420f89dSMike Rapoport (IBM) 	pgdat_resize_lock(pgdat, &flags);
22029420f89dSMike Rapoport (IBM) 	first_init_pfn = pgdat->first_deferred_pfn;
22039420f89dSMike Rapoport (IBM) 	if (first_init_pfn == ULONG_MAX) {
22049420f89dSMike Rapoport (IBM) 		pgdat_resize_unlock(pgdat, &flags);
22059420f89dSMike Rapoport (IBM) 		pgdat_init_report_one_done();
22069420f89dSMike Rapoport (IBM) 		return 0;
22079420f89dSMike Rapoport (IBM) 	}
22089420f89dSMike Rapoport (IBM) 
22099420f89dSMike Rapoport (IBM) 	/* Sanity check boundaries */
22109420f89dSMike Rapoport (IBM) 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
22119420f89dSMike Rapoport (IBM) 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
22129420f89dSMike Rapoport (IBM) 	pgdat->first_deferred_pfn = ULONG_MAX;
22139420f89dSMike Rapoport (IBM) 
22149420f89dSMike Rapoport (IBM) 	/*
22159420f89dSMike Rapoport (IBM) 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
22169420f89dSMike Rapoport (IBM) 	 * interrupt thread must allocate this early in boot, zone must be
22179420f89dSMike Rapoport (IBM) 	 * pre-grown prior to start of deferred page initialization.
22189420f89dSMike Rapoport (IBM) 	 */
22199420f89dSMike Rapoport (IBM) 	pgdat_resize_unlock(pgdat, &flags);
22209420f89dSMike Rapoport (IBM) 
22219420f89dSMike Rapoport (IBM) 	/* Only the highest zone is deferred so find it */
22229420f89dSMike Rapoport (IBM) 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
22239420f89dSMike Rapoport (IBM) 		zone = pgdat->node_zones + zid;
22249420f89dSMike Rapoport (IBM) 		if (first_init_pfn < zone_end_pfn(zone))
22259420f89dSMike Rapoport (IBM) 			break;
22269420f89dSMike Rapoport (IBM) 	}
22279420f89dSMike Rapoport (IBM) 
22289420f89dSMike Rapoport (IBM) 	/* If the zone is empty somebody else may have cleared out the zone */
22299420f89dSMike Rapoport (IBM) 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
22309420f89dSMike Rapoport (IBM) 						 first_init_pfn))
22319420f89dSMike Rapoport (IBM) 		goto zone_empty;
22329420f89dSMike Rapoport (IBM) 
22339420f89dSMike Rapoport (IBM) 	max_threads = deferred_page_init_max_threads(cpumask);
22349420f89dSMike Rapoport (IBM) 
22359420f89dSMike Rapoport (IBM) 	while (spfn < epfn) {
22369420f89dSMike Rapoport (IBM) 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
22379420f89dSMike Rapoport (IBM) 		struct padata_mt_job job = {
22389420f89dSMike Rapoport (IBM) 			.thread_fn   = deferred_init_memmap_chunk,
22399420f89dSMike Rapoport (IBM) 			.fn_arg      = zone,
22409420f89dSMike Rapoport (IBM) 			.start       = spfn,
22419420f89dSMike Rapoport (IBM) 			.size        = epfn_align - spfn,
22429420f89dSMike Rapoport (IBM) 			.align       = PAGES_PER_SECTION,
22439420f89dSMike Rapoport (IBM) 			.min_chunk   = PAGES_PER_SECTION,
22449420f89dSMike Rapoport (IBM) 			.max_threads = max_threads,
22459420f89dSMike Rapoport (IBM) 		};
22469420f89dSMike Rapoport (IBM) 
22479420f89dSMike Rapoport (IBM) 		padata_do_multithreaded(&job);
22489420f89dSMike Rapoport (IBM) 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
22499420f89dSMike Rapoport (IBM) 						    epfn_align);
22509420f89dSMike Rapoport (IBM) 	}
22519420f89dSMike Rapoport (IBM) zone_empty:
22529420f89dSMike Rapoport (IBM) 	/* Sanity check that the next zone really is unpopulated */
22539420f89dSMike Rapoport (IBM) 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
22549420f89dSMike Rapoport (IBM) 
22559420f89dSMike Rapoport (IBM) 	pr_info("node %d deferred pages initialised in %ums\n",
22569420f89dSMike Rapoport (IBM) 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
22579420f89dSMike Rapoport (IBM) 
22589420f89dSMike Rapoport (IBM) 	pgdat_init_report_one_done();
22599420f89dSMike Rapoport (IBM) 	return 0;
22609420f89dSMike Rapoport (IBM) }
22619420f89dSMike Rapoport (IBM) 
22629420f89dSMike Rapoport (IBM) /*
22639420f89dSMike Rapoport (IBM)  * If this zone has deferred pages, try to grow it by initializing enough
22649420f89dSMike Rapoport (IBM)  * deferred pages to satisfy the allocation specified by order, rounded up to
22659420f89dSMike Rapoport (IBM)  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
22669420f89dSMike Rapoport (IBM)  * of SECTION_SIZE bytes by initializing struct pages in increments of
22679420f89dSMike Rapoport (IBM)  * PAGES_PER_SECTION * sizeof(struct page) bytes.
22689420f89dSMike Rapoport (IBM)  *
22699420f89dSMike Rapoport (IBM)  * Return true when zone was grown, otherwise return false. We return true even
22709420f89dSMike Rapoport (IBM)  * when we grow less than requested, to let the caller decide if there are
22719420f89dSMike Rapoport (IBM)  * enough pages to satisfy the allocation.
22729420f89dSMike Rapoport (IBM)  *
22739420f89dSMike Rapoport (IBM)  * Note: We use noinline because this function is needed only during boot, and
22749420f89dSMike Rapoport (IBM)  * it is called from a __ref function _deferred_grow_zone. This way we are
22759420f89dSMike Rapoport (IBM)  * making sure that it is not inlined into permanent text section.
22769420f89dSMike Rapoport (IBM)  */
deferred_grow_zone(struct zone * zone,unsigned int order)22779420f89dSMike Rapoport (IBM) bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
22789420f89dSMike Rapoport (IBM) {
22799420f89dSMike Rapoport (IBM) 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
22809420f89dSMike Rapoport (IBM) 	pg_data_t *pgdat = zone->zone_pgdat;
22819420f89dSMike Rapoport (IBM) 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
22829420f89dSMike Rapoport (IBM) 	unsigned long spfn, epfn, flags;
22839420f89dSMike Rapoport (IBM) 	unsigned long nr_pages = 0;
22849420f89dSMike Rapoport (IBM) 	u64 i;
22859420f89dSMike Rapoport (IBM) 
22869420f89dSMike Rapoport (IBM) 	/* Only the last zone may have deferred pages */
22879420f89dSMike Rapoport (IBM) 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
22889420f89dSMike Rapoport (IBM) 		return false;
22899420f89dSMike Rapoport (IBM) 
22909420f89dSMike Rapoport (IBM) 	pgdat_resize_lock(pgdat, &flags);
22919420f89dSMike Rapoport (IBM) 
22929420f89dSMike Rapoport (IBM) 	/*
22939420f89dSMike Rapoport (IBM) 	 * If someone grew this zone while we were waiting for spinlock, return
22949420f89dSMike Rapoport (IBM) 	 * true, as there might be enough pages already.
22959420f89dSMike Rapoport (IBM) 	 */
22969420f89dSMike Rapoport (IBM) 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
22979420f89dSMike Rapoport (IBM) 		pgdat_resize_unlock(pgdat, &flags);
22989420f89dSMike Rapoport (IBM) 		return true;
22999420f89dSMike Rapoport (IBM) 	}
23009420f89dSMike Rapoport (IBM) 
23019420f89dSMike Rapoport (IBM) 	/* If the zone is empty somebody else may have cleared out the zone */
23029420f89dSMike Rapoport (IBM) 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
23039420f89dSMike Rapoport (IBM) 						 first_deferred_pfn)) {
23049420f89dSMike Rapoport (IBM) 		pgdat->first_deferred_pfn = ULONG_MAX;
23059420f89dSMike Rapoport (IBM) 		pgdat_resize_unlock(pgdat, &flags);
23069420f89dSMike Rapoport (IBM) 		/* Retry only once. */
23079420f89dSMike Rapoport (IBM) 		return first_deferred_pfn != ULONG_MAX;
23089420f89dSMike Rapoport (IBM) 	}
23099420f89dSMike Rapoport (IBM) 
23109420f89dSMike Rapoport (IBM) 	/*
23119420f89dSMike Rapoport (IBM) 	 * Initialize and free pages in MAX_ORDER sized increments so
23129420f89dSMike Rapoport (IBM) 	 * that we can avoid introducing any issues with the buddy
23139420f89dSMike Rapoport (IBM) 	 * allocator.
23149420f89dSMike Rapoport (IBM) 	 */
23159420f89dSMike Rapoport (IBM) 	while (spfn < epfn) {
23169420f89dSMike Rapoport (IBM) 		/* update our first deferred PFN for this section */
23179420f89dSMike Rapoport (IBM) 		first_deferred_pfn = spfn;
23189420f89dSMike Rapoport (IBM) 
23199420f89dSMike Rapoport (IBM) 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
23209420f89dSMike Rapoport (IBM) 		touch_nmi_watchdog();
23219420f89dSMike Rapoport (IBM) 
23229420f89dSMike Rapoport (IBM) 		/* We should only stop along section boundaries */
23239420f89dSMike Rapoport (IBM) 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
23249420f89dSMike Rapoport (IBM) 			continue;
23259420f89dSMike Rapoport (IBM) 
23269420f89dSMike Rapoport (IBM) 		/* If our quota has been met we can stop here */
23279420f89dSMike Rapoport (IBM) 		if (nr_pages >= nr_pages_needed)
23289420f89dSMike Rapoport (IBM) 			break;
23299420f89dSMike Rapoport (IBM) 	}
23309420f89dSMike Rapoport (IBM) 
23319420f89dSMike Rapoport (IBM) 	pgdat->first_deferred_pfn = spfn;
23329420f89dSMike Rapoport (IBM) 	pgdat_resize_unlock(pgdat, &flags);
23339420f89dSMike Rapoport (IBM) 
23349420f89dSMike Rapoport (IBM) 	return nr_pages > 0;
23359420f89dSMike Rapoport (IBM) }
23369420f89dSMike Rapoport (IBM) 
23379420f89dSMike Rapoport (IBM) #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
23389420f89dSMike Rapoport (IBM) 
23399420f89dSMike Rapoport (IBM) #ifdef CONFIG_CMA
init_cma_reserved_pageblock(struct page * page)23409420f89dSMike Rapoport (IBM) void __init init_cma_reserved_pageblock(struct page *page)
23419420f89dSMike Rapoport (IBM) {
23429420f89dSMike Rapoport (IBM) 	unsigned i = pageblock_nr_pages;
23439420f89dSMike Rapoport (IBM) 	struct page *p = page;
23449420f89dSMike Rapoport (IBM) 
23459420f89dSMike Rapoport (IBM) 	do {
23469420f89dSMike Rapoport (IBM) 		__ClearPageReserved(p);
23479420f89dSMike Rapoport (IBM) 		set_page_count(p, 0);
23489420f89dSMike Rapoport (IBM) 	} while (++p, --i);
23499420f89dSMike Rapoport (IBM) 
23509420f89dSMike Rapoport (IBM) 	set_pageblock_migratetype(page, MIGRATE_CMA);
23519420f89dSMike Rapoport (IBM) 	set_page_refcounted(page);
23529420f89dSMike Rapoport (IBM) 	__free_pages(page, pageblock_order);
23539420f89dSMike Rapoport (IBM) 
23549420f89dSMike Rapoport (IBM) 	adjust_managed_page_count(page, pageblock_nr_pages);
23559420f89dSMike Rapoport (IBM) 	page_zone(page)->cma_pages += pageblock_nr_pages;
23569420f89dSMike Rapoport (IBM) }
23579420f89dSMike Rapoport (IBM) #endif
23589420f89dSMike Rapoport (IBM) 
set_zone_contiguous(struct zone * zone)2359904d5857SKefeng Wang void set_zone_contiguous(struct zone *zone)
2360904d5857SKefeng Wang {
2361904d5857SKefeng Wang 	unsigned long block_start_pfn = zone->zone_start_pfn;
2362904d5857SKefeng Wang 	unsigned long block_end_pfn;
2363904d5857SKefeng Wang 
2364904d5857SKefeng Wang 	block_end_pfn = pageblock_end_pfn(block_start_pfn);
2365904d5857SKefeng Wang 	for (; block_start_pfn < zone_end_pfn(zone);
2366904d5857SKefeng Wang 			block_start_pfn = block_end_pfn,
2367904d5857SKefeng Wang 			 block_end_pfn += pageblock_nr_pages) {
2368904d5857SKefeng Wang 
2369904d5857SKefeng Wang 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
2370904d5857SKefeng Wang 
2371904d5857SKefeng Wang 		if (!__pageblock_pfn_to_page(block_start_pfn,
2372904d5857SKefeng Wang 					     block_end_pfn, zone))
2373904d5857SKefeng Wang 			return;
2374904d5857SKefeng Wang 		cond_resched();
2375904d5857SKefeng Wang 	}
2376904d5857SKefeng Wang 
2377904d5857SKefeng Wang 	/* We confirm that there is no hole */
2378904d5857SKefeng Wang 	zone->contiguous = true;
2379904d5857SKefeng Wang }
2380904d5857SKefeng Wang 
page_alloc_init_late(void)23819420f89dSMike Rapoport (IBM) void __init page_alloc_init_late(void)
23829420f89dSMike Rapoport (IBM) {
23839420f89dSMike Rapoport (IBM) 	struct zone *zone;
23849420f89dSMike Rapoport (IBM) 	int nid;
23859420f89dSMike Rapoport (IBM) 
23869420f89dSMike Rapoport (IBM) #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
23879420f89dSMike Rapoport (IBM) 
23889420f89dSMike Rapoport (IBM) 	/* There will be num_node_state(N_MEMORY) threads */
23899420f89dSMike Rapoport (IBM) 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
23909420f89dSMike Rapoport (IBM) 	for_each_node_state(nid, N_MEMORY) {
23919420f89dSMike Rapoport (IBM) 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
23929420f89dSMike Rapoport (IBM) 	}
23939420f89dSMike Rapoport (IBM) 
23949420f89dSMike Rapoport (IBM) 	/* Block until all are initialised */
23959420f89dSMike Rapoport (IBM) 	wait_for_completion(&pgdat_init_all_done_comp);
23969420f89dSMike Rapoport (IBM) 
23979420f89dSMike Rapoport (IBM) 	/*
23989420f89dSMike Rapoport (IBM) 	 * We initialized the rest of the deferred pages.  Permanently disable
23999420f89dSMike Rapoport (IBM) 	 * on-demand struct page initialization.
24009420f89dSMike Rapoport (IBM) 	 */
24019420f89dSMike Rapoport (IBM) 	static_branch_disable(&deferred_pages);
24029420f89dSMike Rapoport (IBM) 
24039420f89dSMike Rapoport (IBM) 	/* Reinit limits that are based on free pages after the kernel is up */
24049420f89dSMike Rapoport (IBM) 	files_maxfiles_init();
24059420f89dSMike Rapoport (IBM) #endif
24069420f89dSMike Rapoport (IBM) 
24079420f89dSMike Rapoport (IBM) 	buffer_init();
24089420f89dSMike Rapoport (IBM) 
24099420f89dSMike Rapoport (IBM) 	/* Discard memblock private memory */
24109420f89dSMike Rapoport (IBM) 	memblock_discard();
24119420f89dSMike Rapoport (IBM) 
24129420f89dSMike Rapoport (IBM) 	for_each_node_state(nid, N_MEMORY)
24139420f89dSMike Rapoport (IBM) 		shuffle_free_memory(NODE_DATA(nid));
24149420f89dSMike Rapoport (IBM) 
24159420f89dSMike Rapoport (IBM) 	for_each_populated_zone(zone)
24169420f89dSMike Rapoport (IBM) 		set_zone_contiguous(zone);
2417de57807eSMike Rapoport (IBM) 
2418de57807eSMike Rapoport (IBM) 	/* Initialize page ext after all struct pages are initialized. */
2419de57807eSMike Rapoport (IBM) 	if (deferred_struct_pages)
2420de57807eSMike Rapoport (IBM) 		page_ext_init();
2421e95d372cSKefeng Wang 
2422e95d372cSKefeng Wang 	page_alloc_sysctl_init();
24239420f89dSMike Rapoport (IBM) }
24249420f89dSMike Rapoport (IBM) 
24259420f89dSMike Rapoport (IBM) #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
24269420f89dSMike Rapoport (IBM) /*
24279420f89dSMike Rapoport (IBM)  * Returns the number of pages that arch has reserved but
24289420f89dSMike Rapoport (IBM)  * is not known to alloc_large_system_hash().
24299420f89dSMike Rapoport (IBM)  */
arch_reserved_kernel_pages(void)24309420f89dSMike Rapoport (IBM) static unsigned long __init arch_reserved_kernel_pages(void)
24319420f89dSMike Rapoport (IBM) {
24329420f89dSMike Rapoport (IBM) 	return 0;
24339420f89dSMike Rapoport (IBM) }
24349420f89dSMike Rapoport (IBM) #endif
24359420f89dSMike Rapoport (IBM) 
24369420f89dSMike Rapoport (IBM) /*
24379420f89dSMike Rapoport (IBM)  * Adaptive scale is meant to reduce sizes of hash tables on large memory
24389420f89dSMike Rapoport (IBM)  * machines. As memory size is increased the scale is also increased but at
24399420f89dSMike Rapoport (IBM)  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
24409420f89dSMike Rapoport (IBM)  * quadruples the scale is increased by one, which means the size of hash table
24419420f89dSMike Rapoport (IBM)  * only doubles, instead of quadrupling as well.
24429420f89dSMike Rapoport (IBM)  * Because 32-bit systems cannot have large physical memory, where this scaling
24439420f89dSMike Rapoport (IBM)  * makes sense, it is disabled on such platforms.
24449420f89dSMike Rapoport (IBM)  */
24459420f89dSMike Rapoport (IBM) #if __BITS_PER_LONG > 32
24469420f89dSMike Rapoport (IBM) #define ADAPT_SCALE_BASE	(64ul << 30)
24479420f89dSMike Rapoport (IBM) #define ADAPT_SCALE_SHIFT	2
24489420f89dSMike Rapoport (IBM) #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
24499420f89dSMike Rapoport (IBM) #endif
24509420f89dSMike Rapoport (IBM) 
24519420f89dSMike Rapoport (IBM) /*
24529420f89dSMike Rapoport (IBM)  * allocate a large system hash table from bootmem
24539420f89dSMike Rapoport (IBM)  * - it is assumed that the hash table must contain an exact power-of-2
24549420f89dSMike Rapoport (IBM)  *   quantity of entries
24559420f89dSMike Rapoport (IBM)  * - limit is the number of hash buckets, not the total allocation size
24569420f89dSMike Rapoport (IBM)  */
alloc_large_system_hash(const char * tablename,unsigned long bucketsize,unsigned long numentries,int scale,int flags,unsigned int * _hash_shift,unsigned int * _hash_mask,unsigned long low_limit,unsigned long high_limit)24579420f89dSMike Rapoport (IBM) void *__init alloc_large_system_hash(const char *tablename,
24589420f89dSMike Rapoport (IBM) 				     unsigned long bucketsize,
24599420f89dSMike Rapoport (IBM) 				     unsigned long numentries,
24609420f89dSMike Rapoport (IBM) 				     int scale,
24619420f89dSMike Rapoport (IBM) 				     int flags,
24629420f89dSMike Rapoport (IBM) 				     unsigned int *_hash_shift,
24639420f89dSMike Rapoport (IBM) 				     unsigned int *_hash_mask,
24649420f89dSMike Rapoport (IBM) 				     unsigned long low_limit,
24659420f89dSMike Rapoport (IBM) 				     unsigned long high_limit)
24669420f89dSMike Rapoport (IBM) {
24679420f89dSMike Rapoport (IBM) 	unsigned long long max = high_limit;
24689420f89dSMike Rapoport (IBM) 	unsigned long log2qty, size;
24699420f89dSMike Rapoport (IBM) 	void *table;
24709420f89dSMike Rapoport (IBM) 	gfp_t gfp_flags;
24719420f89dSMike Rapoport (IBM) 	bool virt;
24729420f89dSMike Rapoport (IBM) 	bool huge;
24739420f89dSMike Rapoport (IBM) 
24749420f89dSMike Rapoport (IBM) 	/* allow the kernel cmdline to have a say */
24759420f89dSMike Rapoport (IBM) 	if (!numentries) {
24769420f89dSMike Rapoport (IBM) 		/* round applicable memory size up to nearest megabyte */
24779420f89dSMike Rapoport (IBM) 		numentries = nr_kernel_pages;
24789420f89dSMike Rapoport (IBM) 		numentries -= arch_reserved_kernel_pages();
24799420f89dSMike Rapoport (IBM) 
24809420f89dSMike Rapoport (IBM) 		/* It isn't necessary when PAGE_SIZE >= 1MB */
24819420f89dSMike Rapoport (IBM) 		if (PAGE_SIZE < SZ_1M)
24829420f89dSMike Rapoport (IBM) 			numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
24839420f89dSMike Rapoport (IBM) 
24849420f89dSMike Rapoport (IBM) #if __BITS_PER_LONG > 32
24859420f89dSMike Rapoport (IBM) 		if (!high_limit) {
24869420f89dSMike Rapoport (IBM) 			unsigned long adapt;
24879420f89dSMike Rapoport (IBM) 
24889420f89dSMike Rapoport (IBM) 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
24899420f89dSMike Rapoport (IBM) 			     adapt <<= ADAPT_SCALE_SHIFT)
24909420f89dSMike Rapoport (IBM) 				scale++;
24919420f89dSMike Rapoport (IBM) 		}
24929420f89dSMike Rapoport (IBM) #endif
24939420f89dSMike Rapoport (IBM) 
24949420f89dSMike Rapoport (IBM) 		/* limit to 1 bucket per 2^scale bytes of low memory */
24959420f89dSMike Rapoport (IBM) 		if (scale > PAGE_SHIFT)
24969420f89dSMike Rapoport (IBM) 			numentries >>= (scale - PAGE_SHIFT);
24979420f89dSMike Rapoport (IBM) 		else
24989420f89dSMike Rapoport (IBM) 			numentries <<= (PAGE_SHIFT - scale);
24999420f89dSMike Rapoport (IBM) 
25003fade62bSMiaohe Lin 		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
25019420f89dSMike Rapoport (IBM) 			numentries = PAGE_SIZE / bucketsize;
25029420f89dSMike Rapoport (IBM) 	}
25039420f89dSMike Rapoport (IBM) 	numentries = roundup_pow_of_two(numentries);
25049420f89dSMike Rapoport (IBM) 
25059420f89dSMike Rapoport (IBM) 	/* limit allocation size to 1/16 total memory by default */
25069420f89dSMike Rapoport (IBM) 	if (max == 0) {
25079420f89dSMike Rapoport (IBM) 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
25089420f89dSMike Rapoport (IBM) 		do_div(max, bucketsize);
25099420f89dSMike Rapoport (IBM) 	}
25109420f89dSMike Rapoport (IBM) 	max = min(max, 0x80000000ULL);
25119420f89dSMike Rapoport (IBM) 
25129420f89dSMike Rapoport (IBM) 	if (numentries < low_limit)
25139420f89dSMike Rapoport (IBM) 		numentries = low_limit;
25149420f89dSMike Rapoport (IBM) 	if (numentries > max)
25159420f89dSMike Rapoport (IBM) 		numentries = max;
25169420f89dSMike Rapoport (IBM) 
25179420f89dSMike Rapoport (IBM) 	log2qty = ilog2(numentries);
25189420f89dSMike Rapoport (IBM) 
25199420f89dSMike Rapoport (IBM) 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
25209420f89dSMike Rapoport (IBM) 	do {
25219420f89dSMike Rapoport (IBM) 		virt = false;
25229420f89dSMike Rapoport (IBM) 		size = bucketsize << log2qty;
25239420f89dSMike Rapoport (IBM) 		if (flags & HASH_EARLY) {
25249420f89dSMike Rapoport (IBM) 			if (flags & HASH_ZERO)
25259420f89dSMike Rapoport (IBM) 				table = memblock_alloc(size, SMP_CACHE_BYTES);
25269420f89dSMike Rapoport (IBM) 			else
25279420f89dSMike Rapoport (IBM) 				table = memblock_alloc_raw(size,
25289420f89dSMike Rapoport (IBM) 							   SMP_CACHE_BYTES);
25299420f89dSMike Rapoport (IBM) 		} else if (get_order(size) > MAX_ORDER || hashdist) {
25309420f89dSMike Rapoport (IBM) 			table = vmalloc_huge(size, gfp_flags);
25319420f89dSMike Rapoport (IBM) 			virt = true;
25329420f89dSMike Rapoport (IBM) 			if (table)
25339420f89dSMike Rapoport (IBM) 				huge = is_vm_area_hugepages(table);
25349420f89dSMike Rapoport (IBM) 		} else {
25359420f89dSMike Rapoport (IBM) 			/*
25369420f89dSMike Rapoport (IBM) 			 * If bucketsize is not a power-of-two, we may free
25379420f89dSMike Rapoport (IBM) 			 * some pages at the end of hash table which
25389420f89dSMike Rapoport (IBM) 			 * alloc_pages_exact() automatically does
25399420f89dSMike Rapoport (IBM) 			 */
25409420f89dSMike Rapoport (IBM) 			table = alloc_pages_exact(size, gfp_flags);
25419420f89dSMike Rapoport (IBM) 			kmemleak_alloc(table, size, 1, gfp_flags);
25429420f89dSMike Rapoport (IBM) 		}
25439420f89dSMike Rapoport (IBM) 	} while (!table && size > PAGE_SIZE && --log2qty);
25449420f89dSMike Rapoport (IBM) 
25459420f89dSMike Rapoport (IBM) 	if (!table)
25469420f89dSMike Rapoport (IBM) 		panic("Failed to allocate %s hash table\n", tablename);
25479420f89dSMike Rapoport (IBM) 
25489420f89dSMike Rapoport (IBM) 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
25499420f89dSMike Rapoport (IBM) 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
25509420f89dSMike Rapoport (IBM) 		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
25519420f89dSMike Rapoport (IBM) 
25529420f89dSMike Rapoport (IBM) 	if (_hash_shift)
25539420f89dSMike Rapoport (IBM) 		*_hash_shift = log2qty;
25549420f89dSMike Rapoport (IBM) 	if (_hash_mask)
25559420f89dSMike Rapoport (IBM) 		*_hash_mask = (1 << log2qty) - 1;
25569420f89dSMike Rapoport (IBM) 
25579420f89dSMike Rapoport (IBM) 	return table;
25589420f89dSMike Rapoport (IBM) }
25599420f89dSMike Rapoport (IBM) 
25609420f89dSMike Rapoport (IBM) /**
25619420f89dSMike Rapoport (IBM)  * set_dma_reserve - set the specified number of pages reserved in the first zone
25629420f89dSMike Rapoport (IBM)  * @new_dma_reserve: The number of pages to mark reserved
25639420f89dSMike Rapoport (IBM)  *
25649420f89dSMike Rapoport (IBM)  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
25659420f89dSMike Rapoport (IBM)  * In the DMA zone, a significant percentage may be consumed by kernel image
25669420f89dSMike Rapoport (IBM)  * and other unfreeable allocations which can skew the watermarks badly. This
25679420f89dSMike Rapoport (IBM)  * function may optionally be used to account for unfreeable pages in the
25689420f89dSMike Rapoport (IBM)  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
25699420f89dSMike Rapoport (IBM)  * smaller per-cpu batchsize.
25709420f89dSMike Rapoport (IBM)  */
set_dma_reserve(unsigned long new_dma_reserve)25719420f89dSMike Rapoport (IBM) void __init set_dma_reserve(unsigned long new_dma_reserve)
25729420f89dSMike Rapoport (IBM) {
25739420f89dSMike Rapoport (IBM) 	dma_reserve = new_dma_reserve;
25749420f89dSMike Rapoport (IBM) }
25759420f89dSMike Rapoport (IBM) 
memblock_free_pages(struct page * page,unsigned long pfn,unsigned int order)25769420f89dSMike Rapoport (IBM) void __init memblock_free_pages(struct page *page, unsigned long pfn,
25779420f89dSMike Rapoport (IBM) 							unsigned int order)
25789420f89dSMike Rapoport (IBM) {
257961167ad5SYajun Deng 
258061167ad5SYajun Deng 	if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
258161167ad5SYajun Deng 		int nid = early_pfn_to_nid(pfn);
258261167ad5SYajun Deng 
258361167ad5SYajun Deng 		if (!early_page_initialised(pfn, nid))
25849420f89dSMike Rapoport (IBM) 			return;
258561167ad5SYajun Deng 	}
258661167ad5SYajun Deng 
25879420f89dSMike Rapoport (IBM) 	if (!kmsan_memblock_free_pages(page, order)) {
25889420f89dSMike Rapoport (IBM) 		/* KMSAN will take care of these pages. */
25899420f89dSMike Rapoport (IBM) 		return;
25909420f89dSMike Rapoport (IBM) 	}
25919420f89dSMike Rapoport (IBM) 	__free_pages_core(page, order);
25929420f89dSMike Rapoport (IBM) }
2593b7ec1bf3SMike Rapoport (IBM) 
25945e7d5da2SKefeng Wang DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
25955e7d5da2SKefeng Wang EXPORT_SYMBOL(init_on_alloc);
25965e7d5da2SKefeng Wang 
25975e7d5da2SKefeng Wang DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
25985e7d5da2SKefeng Wang EXPORT_SYMBOL(init_on_free);
25995e7d5da2SKefeng Wang 
2600f2fc4b44SMike Rapoport (IBM) static bool _init_on_alloc_enabled_early __read_mostly
2601f2fc4b44SMike Rapoport (IBM) 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
early_init_on_alloc(char * buf)2602f2fc4b44SMike Rapoport (IBM) static int __init early_init_on_alloc(char *buf)
2603f2fc4b44SMike Rapoport (IBM) {
2604f2fc4b44SMike Rapoport (IBM) 
2605f2fc4b44SMike Rapoport (IBM) 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
2606f2fc4b44SMike Rapoport (IBM) }
2607f2fc4b44SMike Rapoport (IBM) early_param("init_on_alloc", early_init_on_alloc);
2608f2fc4b44SMike Rapoport (IBM) 
2609f2fc4b44SMike Rapoport (IBM) static bool _init_on_free_enabled_early __read_mostly
2610f2fc4b44SMike Rapoport (IBM) 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
early_init_on_free(char * buf)2611f2fc4b44SMike Rapoport (IBM) static int __init early_init_on_free(char *buf)
2612f2fc4b44SMike Rapoport (IBM) {
2613f2fc4b44SMike Rapoport (IBM) 	return kstrtobool(buf, &_init_on_free_enabled_early);
2614f2fc4b44SMike Rapoport (IBM) }
2615f2fc4b44SMike Rapoport (IBM) early_param("init_on_free", early_init_on_free);
2616f2fc4b44SMike Rapoport (IBM) 
2617f2fc4b44SMike Rapoport (IBM) DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
2618f2fc4b44SMike Rapoport (IBM) 
2619f2fc4b44SMike Rapoport (IBM) /*
2620f2fc4b44SMike Rapoport (IBM)  * Enable static keys related to various memory debugging and hardening options.
2621f2fc4b44SMike Rapoport (IBM)  * Some override others, and depend on early params that are evaluated in the
2622f2fc4b44SMike Rapoport (IBM)  * order of appearance. So we need to first gather the full picture of what was
2623f2fc4b44SMike Rapoport (IBM)  * enabled, and then make decisions.
2624f2fc4b44SMike Rapoport (IBM)  */
mem_debugging_and_hardening_init(void)2625f2fc4b44SMike Rapoport (IBM) static void __init mem_debugging_and_hardening_init(void)
2626f2fc4b44SMike Rapoport (IBM) {
2627f2fc4b44SMike Rapoport (IBM) 	bool page_poisoning_requested = false;
2628f2fc4b44SMike Rapoport (IBM) 	bool want_check_pages = false;
2629f2fc4b44SMike Rapoport (IBM) 
2630f2fc4b44SMike Rapoport (IBM) #ifdef CONFIG_PAGE_POISONING
2631f2fc4b44SMike Rapoport (IBM) 	/*
2632f2fc4b44SMike Rapoport (IBM) 	 * Page poisoning is debug page alloc for some arches. If
2633f2fc4b44SMike Rapoport (IBM) 	 * either of those options are enabled, enable poisoning.
2634f2fc4b44SMike Rapoport (IBM) 	 */
2635f2fc4b44SMike Rapoport (IBM) 	if (page_poisoning_enabled() ||
2636f2fc4b44SMike Rapoport (IBM) 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
2637f2fc4b44SMike Rapoport (IBM) 	      debug_pagealloc_enabled())) {
2638f2fc4b44SMike Rapoport (IBM) 		static_branch_enable(&_page_poisoning_enabled);
2639f2fc4b44SMike Rapoport (IBM) 		page_poisoning_requested = true;
2640f2fc4b44SMike Rapoport (IBM) 		want_check_pages = true;
2641f2fc4b44SMike Rapoport (IBM) 	}
2642f2fc4b44SMike Rapoport (IBM) #endif
2643f2fc4b44SMike Rapoport (IBM) 
2644f2fc4b44SMike Rapoport (IBM) 	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
2645f2fc4b44SMike Rapoport (IBM) 	    page_poisoning_requested) {
2646f2fc4b44SMike Rapoport (IBM) 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2647f2fc4b44SMike Rapoport (IBM) 			"will take precedence over init_on_alloc and init_on_free\n");
2648f2fc4b44SMike Rapoport (IBM) 		_init_on_alloc_enabled_early = false;
2649f2fc4b44SMike Rapoport (IBM) 		_init_on_free_enabled_early = false;
2650f2fc4b44SMike Rapoport (IBM) 	}
2651f2fc4b44SMike Rapoport (IBM) 
2652f2fc4b44SMike Rapoport (IBM) 	if (_init_on_alloc_enabled_early) {
2653f2fc4b44SMike Rapoport (IBM) 		want_check_pages = true;
2654f2fc4b44SMike Rapoport (IBM) 		static_branch_enable(&init_on_alloc);
2655f2fc4b44SMike Rapoport (IBM) 	} else {
2656f2fc4b44SMike Rapoport (IBM) 		static_branch_disable(&init_on_alloc);
2657f2fc4b44SMike Rapoport (IBM) 	}
2658f2fc4b44SMike Rapoport (IBM) 
2659f2fc4b44SMike Rapoport (IBM) 	if (_init_on_free_enabled_early) {
2660f2fc4b44SMike Rapoport (IBM) 		want_check_pages = true;
2661f2fc4b44SMike Rapoport (IBM) 		static_branch_enable(&init_on_free);
2662f2fc4b44SMike Rapoport (IBM) 	} else {
2663f2fc4b44SMike Rapoport (IBM) 		static_branch_disable(&init_on_free);
2664f2fc4b44SMike Rapoport (IBM) 	}
2665f2fc4b44SMike Rapoport (IBM) 
2666f2fc4b44SMike Rapoport (IBM) 	if (IS_ENABLED(CONFIG_KMSAN) &&
2667f2fc4b44SMike Rapoport (IBM) 	    (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
2668f2fc4b44SMike Rapoport (IBM) 		pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2669f2fc4b44SMike Rapoport (IBM) 
2670f2fc4b44SMike Rapoport (IBM) #ifdef CONFIG_DEBUG_PAGEALLOC
2671f2fc4b44SMike Rapoport (IBM) 	if (debug_pagealloc_enabled()) {
2672f2fc4b44SMike Rapoport (IBM) 		want_check_pages = true;
2673f2fc4b44SMike Rapoport (IBM) 		static_branch_enable(&_debug_pagealloc_enabled);
2674f2fc4b44SMike Rapoport (IBM) 
2675f2fc4b44SMike Rapoport (IBM) 		if (debug_guardpage_minorder())
2676f2fc4b44SMike Rapoport (IBM) 			static_branch_enable(&_debug_guardpage_enabled);
2677f2fc4b44SMike Rapoport (IBM) 	}
2678f2fc4b44SMike Rapoport (IBM) #endif
2679f2fc4b44SMike Rapoport (IBM) 
2680f2fc4b44SMike Rapoport (IBM) 	/*
2681f2fc4b44SMike Rapoport (IBM) 	 * Any page debugging or hardening option also enables sanity checking
2682f2fc4b44SMike Rapoport (IBM) 	 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2683f2fc4b44SMike Rapoport (IBM) 	 * enabled already.
2684f2fc4b44SMike Rapoport (IBM) 	 */
2685f2fc4b44SMike Rapoport (IBM) 	if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
2686f2fc4b44SMike Rapoport (IBM) 		static_branch_enable(&check_pages_enabled);
2687f2fc4b44SMike Rapoport (IBM) }
2688f2fc4b44SMike Rapoport (IBM) 
2689b7ec1bf3SMike Rapoport (IBM) /* Report memory auto-initialization states for this boot. */
report_meminit(void)2690b7ec1bf3SMike Rapoport (IBM) static void __init report_meminit(void)
2691b7ec1bf3SMike Rapoport (IBM) {
2692b7ec1bf3SMike Rapoport (IBM) 	const char *stack;
2693b7ec1bf3SMike Rapoport (IBM) 
2694b7ec1bf3SMike Rapoport (IBM) 	if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
2695b7ec1bf3SMike Rapoport (IBM) 		stack = "all(pattern)";
2696b7ec1bf3SMike Rapoport (IBM) 	else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
2697b7ec1bf3SMike Rapoport (IBM) 		stack = "all(zero)";
2698b7ec1bf3SMike Rapoport (IBM) 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
2699b7ec1bf3SMike Rapoport (IBM) 		stack = "byref_all(zero)";
2700b7ec1bf3SMike Rapoport (IBM) 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
2701b7ec1bf3SMike Rapoport (IBM) 		stack = "byref(zero)";
2702b7ec1bf3SMike Rapoport (IBM) 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
2703b7ec1bf3SMike Rapoport (IBM) 		stack = "__user(zero)";
2704b7ec1bf3SMike Rapoport (IBM) 	else
2705b7ec1bf3SMike Rapoport (IBM) 		stack = "off";
2706b7ec1bf3SMike Rapoport (IBM) 
2707b7ec1bf3SMike Rapoport (IBM) 	pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2708b7ec1bf3SMike Rapoport (IBM) 		stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
2709b7ec1bf3SMike Rapoport (IBM) 		want_init_on_free() ? "on" : "off");
2710b7ec1bf3SMike Rapoport (IBM) 	if (want_init_on_free())
2711b7ec1bf3SMike Rapoport (IBM) 		pr_info("mem auto-init: clearing system memory may take some time...\n");
2712b7ec1bf3SMike Rapoport (IBM) }
2713b7ec1bf3SMike Rapoport (IBM) 
mem_init_print_info(void)2714eb8589b4SMike Rapoport (IBM) static void __init mem_init_print_info(void)
2715eb8589b4SMike Rapoport (IBM) {
2716eb8589b4SMike Rapoport (IBM) 	unsigned long physpages, codesize, datasize, rosize, bss_size;
2717eb8589b4SMike Rapoport (IBM) 	unsigned long init_code_size, init_data_size;
2718eb8589b4SMike Rapoport (IBM) 
2719eb8589b4SMike Rapoport (IBM) 	physpages = get_num_physpages();
2720eb8589b4SMike Rapoport (IBM) 	codesize = _etext - _stext;
2721eb8589b4SMike Rapoport (IBM) 	datasize = _edata - _sdata;
2722eb8589b4SMike Rapoport (IBM) 	rosize = __end_rodata - __start_rodata;
2723eb8589b4SMike Rapoport (IBM) 	bss_size = __bss_stop - __bss_start;
2724eb8589b4SMike Rapoport (IBM) 	init_data_size = __init_end - __init_begin;
2725eb8589b4SMike Rapoport (IBM) 	init_code_size = _einittext - _sinittext;
2726eb8589b4SMike Rapoport (IBM) 
2727eb8589b4SMike Rapoport (IBM) 	/*
2728eb8589b4SMike Rapoport (IBM) 	 * Detect special cases and adjust section sizes accordingly:
2729eb8589b4SMike Rapoport (IBM) 	 * 1) .init.* may be embedded into .data sections
2730eb8589b4SMike Rapoport (IBM) 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
2731eb8589b4SMike Rapoport (IBM) 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
2732eb8589b4SMike Rapoport (IBM) 	 * 3) .rodata.* may be embedded into .text or .data sections.
2733eb8589b4SMike Rapoport (IBM) 	 */
2734eb8589b4SMike Rapoport (IBM) #define adj_init_size(start, end, size, pos, adj) \
2735eb8589b4SMike Rapoport (IBM) 	do { \
2736eb8589b4SMike Rapoport (IBM) 		if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2737eb8589b4SMike Rapoport (IBM) 			size -= adj; \
2738eb8589b4SMike Rapoport (IBM) 	} while (0)
2739eb8589b4SMike Rapoport (IBM) 
2740eb8589b4SMike Rapoport (IBM) 	adj_init_size(__init_begin, __init_end, init_data_size,
2741eb8589b4SMike Rapoport (IBM) 		     _sinittext, init_code_size);
2742eb8589b4SMike Rapoport (IBM) 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
2743eb8589b4SMike Rapoport (IBM) 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
2744eb8589b4SMike Rapoport (IBM) 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
2745eb8589b4SMike Rapoport (IBM) 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
2746eb8589b4SMike Rapoport (IBM) 
2747eb8589b4SMike Rapoport (IBM) #undef	adj_init_size
2748eb8589b4SMike Rapoport (IBM) 
2749eb8589b4SMike Rapoport (IBM) 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2750eb8589b4SMike Rapoport (IBM) #ifdef	CONFIG_HIGHMEM
2751eb8589b4SMike Rapoport (IBM) 		", %luK highmem"
2752eb8589b4SMike Rapoport (IBM) #endif
2753eb8589b4SMike Rapoport (IBM) 		")\n",
2754eb8589b4SMike Rapoport (IBM) 		K(nr_free_pages()), K(physpages),
2755eb8589b4SMike Rapoport (IBM) 		codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
2756eb8589b4SMike Rapoport (IBM) 		(init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
2757eb8589b4SMike Rapoport (IBM) 		K(physpages - totalram_pages() - totalcma_pages),
2758eb8589b4SMike Rapoport (IBM) 		K(totalcma_pages)
2759eb8589b4SMike Rapoport (IBM) #ifdef	CONFIG_HIGHMEM
2760eb8589b4SMike Rapoport (IBM) 		, K(totalhigh_pages())
2761eb8589b4SMike Rapoport (IBM) #endif
2762eb8589b4SMike Rapoport (IBM) 		);
2763eb8589b4SMike Rapoport (IBM) }
2764eb8589b4SMike Rapoport (IBM) 
2765b7ec1bf3SMike Rapoport (IBM) /*
2766b7ec1bf3SMike Rapoport (IBM)  * Set up kernel memory allocators
2767b7ec1bf3SMike Rapoport (IBM)  */
mm_core_init(void)2768b7ec1bf3SMike Rapoport (IBM) void __init mm_core_init(void)
2769b7ec1bf3SMike Rapoport (IBM) {
2770b7ec1bf3SMike Rapoport (IBM) 	/* Initializations relying on SMP setup */
2771b7ec1bf3SMike Rapoport (IBM) 	build_all_zonelists(NULL);
2772b7ec1bf3SMike Rapoport (IBM) 	page_alloc_init_cpuhp();
2773b7ec1bf3SMike Rapoport (IBM) 
2774b7ec1bf3SMike Rapoport (IBM) 	/*
2775b7ec1bf3SMike Rapoport (IBM) 	 * page_ext requires contiguous pages,
2776b7ec1bf3SMike Rapoport (IBM) 	 * bigger than MAX_ORDER unless SPARSEMEM.
2777b7ec1bf3SMike Rapoport (IBM) 	 */
2778b7ec1bf3SMike Rapoport (IBM) 	page_ext_init_flatmem();
2779f2fc4b44SMike Rapoport (IBM) 	mem_debugging_and_hardening_init();
2780cabdf74eSPeng Zhang 	kfence_alloc_pool_and_metadata();
2781b7ec1bf3SMike Rapoport (IBM) 	report_meminit();
2782b7ec1bf3SMike Rapoport (IBM) 	kmsan_init_shadow();
2783b7ec1bf3SMike Rapoport (IBM) 	stack_depot_early_init();
2784b7ec1bf3SMike Rapoport (IBM) 	mem_init();
2785b7ec1bf3SMike Rapoport (IBM) 	mem_init_print_info();
2786b7ec1bf3SMike Rapoport (IBM) 	kmem_cache_init();
2787b7ec1bf3SMike Rapoport (IBM) 	/*
2788b7ec1bf3SMike Rapoport (IBM) 	 * page_owner must be initialized after buddy is ready, and also after
2789b7ec1bf3SMike Rapoport (IBM) 	 * slab is ready so that stack_depot_init() works properly
2790b7ec1bf3SMike Rapoport (IBM) 	 */
2791b7ec1bf3SMike Rapoport (IBM) 	page_ext_init_flatmem_late();
2792b7ec1bf3SMike Rapoport (IBM) 	kmemleak_init();
27934cd1e9edSMike Rapoport (IBM) 	ptlock_cache_init();
27944cd1e9edSMike Rapoport (IBM) 	pgtable_cache_init();
2795b7ec1bf3SMike Rapoport (IBM) 	debug_objects_mem_init();
2796b7ec1bf3SMike Rapoport (IBM) 	vmalloc_init();
2797b7ec1bf3SMike Rapoport (IBM) 	/* If no deferred init page_ext now, as vmap is fully initialized */
2798b7ec1bf3SMike Rapoport (IBM) 	if (!deferred_struct_pages)
2799b7ec1bf3SMike Rapoport (IBM) 		page_ext_init();
2800b7ec1bf3SMike Rapoport (IBM) 	/* Should be run before the first non-init thread is created */
2801b7ec1bf3SMike Rapoport (IBM) 	init_espfix_bsp();
2802b7ec1bf3SMike Rapoport (IBM) 	/* Should be run after espfix64 is set up. */
2803b7ec1bf3SMike Rapoport (IBM) 	pti_init();
2804b7ec1bf3SMike Rapoport (IBM) 	kmsan_init_runtime();
2805b7ec1bf3SMike Rapoport (IBM) 	mm_cache_init();
2806b7ec1bf3SMike Rapoport (IBM) }
2807