xref: /openbmc/linux/mm/mm_init.c (revision 726ccdba)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm_init.c - Memory initialisation verification and debugging
4  *
5  * Copyright 2008 IBM Corporation, 2008
6  * Author Mel Gorman <mel@csn.ul.ie>
7  *
8  */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/kobject.h>
12 #include <linux/export.h>
13 #include <linux/memory.h>
14 #include <linux/notifier.h>
15 #include <linux/sched.h>
16 #include <linux/mman.h>
17 #include <linux/memblock.h>
18 #include <linux/page-isolation.h>
19 #include <linux/padata.h>
20 #include <linux/nmi.h>
21 #include <linux/buffer_head.h>
22 #include <linux/kmemleak.h>
23 #include <linux/kfence.h>
24 #include <linux/page_ext.h>
25 #include <linux/pti.h>
26 #include <linux/pgtable.h>
27 #include <linux/swap.h>
28 #include <linux/cma.h>
29 #include "internal.h"
30 #include "slab.h"
31 #include "shuffle.h"
32 
33 #include <asm/setup.h>
34 
35 #ifdef CONFIG_DEBUG_MEMORY_INIT
36 int __meminitdata mminit_loglevel;
37 
38 /* The zonelists are simply reported, validation is manual. */
39 void __init mminit_verify_zonelist(void)
40 {
41 	int nid;
42 
43 	if (mminit_loglevel < MMINIT_VERIFY)
44 		return;
45 
46 	for_each_online_node(nid) {
47 		pg_data_t *pgdat = NODE_DATA(nid);
48 		struct zone *zone;
49 		struct zoneref *z;
50 		struct zonelist *zonelist;
51 		int i, listid, zoneid;
52 
53 		BUILD_BUG_ON(MAX_ZONELISTS > 2);
54 		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
55 
56 			/* Identify the zone and nodelist */
57 			zoneid = i % MAX_NR_ZONES;
58 			listid = i / MAX_NR_ZONES;
59 			zonelist = &pgdat->node_zonelists[listid];
60 			zone = &pgdat->node_zones[zoneid];
61 			if (!populated_zone(zone))
62 				continue;
63 
64 			/* Print information about the zonelist */
65 			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
66 				listid > 0 ? "thisnode" : "general", nid,
67 				zone->name);
68 
69 			/* Iterate the zonelist */
70 			for_each_zone_zonelist(zone, z, zonelist, zoneid)
71 				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
72 			pr_cont("\n");
73 		}
74 	}
75 }
76 
77 void __init mminit_verify_pageflags_layout(void)
78 {
79 	int shift, width;
80 	unsigned long or_mask, add_mask;
81 
82 	shift = 8 * sizeof(unsigned long);
83 	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
84 		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
85 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
86 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
87 		SECTIONS_WIDTH,
88 		NODES_WIDTH,
89 		ZONES_WIDTH,
90 		LAST_CPUPID_WIDTH,
91 		KASAN_TAG_WIDTH,
92 		LRU_GEN_WIDTH,
93 		LRU_REFS_WIDTH,
94 		NR_PAGEFLAGS);
95 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
96 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
97 		SECTIONS_SHIFT,
98 		NODES_SHIFT,
99 		ZONES_SHIFT,
100 		LAST_CPUPID_SHIFT,
101 		KASAN_TAG_WIDTH);
102 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
103 		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
104 		(unsigned long)SECTIONS_PGSHIFT,
105 		(unsigned long)NODES_PGSHIFT,
106 		(unsigned long)ZONES_PGSHIFT,
107 		(unsigned long)LAST_CPUPID_PGSHIFT,
108 		(unsigned long)KASAN_TAG_PGSHIFT);
109 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
110 		"Node/Zone ID: %lu -> %lu\n",
111 		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
112 		(unsigned long)ZONEID_PGOFF);
113 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
114 		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
115 		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
116 #ifdef NODE_NOT_IN_PAGE_FLAGS
117 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
118 		"Node not in page flags");
119 #endif
120 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
121 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
122 		"Last cpupid not in page flags");
123 #endif
124 
125 	if (SECTIONS_WIDTH) {
126 		shift -= SECTIONS_WIDTH;
127 		BUG_ON(shift != SECTIONS_PGSHIFT);
128 	}
129 	if (NODES_WIDTH) {
130 		shift -= NODES_WIDTH;
131 		BUG_ON(shift != NODES_PGSHIFT);
132 	}
133 	if (ZONES_WIDTH) {
134 		shift -= ZONES_WIDTH;
135 		BUG_ON(shift != ZONES_PGSHIFT);
136 	}
137 
138 	/* Check for bitmask overlaps */
139 	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
140 			(NODES_MASK << NODES_PGSHIFT) |
141 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
142 	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
143 			(NODES_MASK << NODES_PGSHIFT) +
144 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
145 	BUG_ON(or_mask != add_mask);
146 }
147 
148 static __init int set_mminit_loglevel(char *str)
149 {
150 	get_option(&str, &mminit_loglevel);
151 	return 0;
152 }
153 early_param("mminit_loglevel", set_mminit_loglevel);
154 #endif /* CONFIG_DEBUG_MEMORY_INIT */
155 
156 struct kobject *mm_kobj;
157 EXPORT_SYMBOL_GPL(mm_kobj);
158 
159 #ifdef CONFIG_SMP
160 s32 vm_committed_as_batch = 32;
161 
162 void mm_compute_batch(int overcommit_policy)
163 {
164 	u64 memsized_batch;
165 	s32 nr = num_present_cpus();
166 	s32 batch = max_t(s32, nr*2, 32);
167 	unsigned long ram_pages = totalram_pages();
168 
169 	/*
170 	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
171 	 * (total memory/#cpus), and lift it to 25% for other policies
172 	 * to easy the possible lock contention for percpu_counter
173 	 * vm_committed_as, while the max limit is INT_MAX
174 	 */
175 	if (overcommit_policy == OVERCOMMIT_NEVER)
176 		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
177 	else
178 		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
179 
180 	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
181 }
182 
183 static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
184 					unsigned long action, void *arg)
185 {
186 	switch (action) {
187 	case MEM_ONLINE:
188 	case MEM_OFFLINE:
189 		mm_compute_batch(sysctl_overcommit_memory);
190 		break;
191 	default:
192 		break;
193 	}
194 	return NOTIFY_OK;
195 }
196 
197 static int __init mm_compute_batch_init(void)
198 {
199 	mm_compute_batch(sysctl_overcommit_memory);
200 	hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
201 	return 0;
202 }
203 
204 __initcall(mm_compute_batch_init);
205 
206 #endif
207 
208 static int __init mm_sysfs_init(void)
209 {
210 	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
211 	if (!mm_kobj)
212 		return -ENOMEM;
213 
214 	return 0;
215 }
216 postcore_initcall(mm_sysfs_init);
217 
218 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
219 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
220 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
221 
222 static unsigned long required_kernelcore __initdata;
223 static unsigned long required_kernelcore_percent __initdata;
224 static unsigned long required_movablecore __initdata;
225 static unsigned long required_movablecore_percent __initdata;
226 
227 static unsigned long nr_kernel_pages __initdata;
228 static unsigned long nr_all_pages __initdata;
229 static unsigned long dma_reserve __initdata;
230 
231 static bool deferred_struct_pages __meminitdata;
232 
233 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
234 
235 static int __init cmdline_parse_core(char *p, unsigned long *core,
236 				     unsigned long *percent)
237 {
238 	unsigned long long coremem;
239 	char *endptr;
240 
241 	if (!p)
242 		return -EINVAL;
243 
244 	/* Value may be a percentage of total memory, otherwise bytes */
245 	coremem = simple_strtoull(p, &endptr, 0);
246 	if (*endptr == '%') {
247 		/* Paranoid check for percent values greater than 100 */
248 		WARN_ON(coremem > 100);
249 
250 		*percent = coremem;
251 	} else {
252 		coremem = memparse(p, &p);
253 		/* Paranoid check that UL is enough for the coremem value */
254 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
255 
256 		*core = coremem >> PAGE_SHIFT;
257 		*percent = 0UL;
258 	}
259 	return 0;
260 }
261 
262 bool mirrored_kernelcore __initdata_memblock;
263 
264 /*
265  * kernelcore=size sets the amount of memory for use for allocations that
266  * cannot be reclaimed or migrated.
267  */
268 static int __init cmdline_parse_kernelcore(char *p)
269 {
270 	/* parse kernelcore=mirror */
271 	if (parse_option_str(p, "mirror")) {
272 		mirrored_kernelcore = true;
273 		return 0;
274 	}
275 
276 	return cmdline_parse_core(p, &required_kernelcore,
277 				  &required_kernelcore_percent);
278 }
279 early_param("kernelcore", cmdline_parse_kernelcore);
280 
281 /*
282  * movablecore=size sets the amount of memory for use for allocations that
283  * can be reclaimed or migrated.
284  */
285 static int __init cmdline_parse_movablecore(char *p)
286 {
287 	return cmdline_parse_core(p, &required_movablecore,
288 				  &required_movablecore_percent);
289 }
290 early_param("movablecore", cmdline_parse_movablecore);
291 
292 /*
293  * early_calculate_totalpages()
294  * Sum pages in active regions for movable zone.
295  * Populate N_MEMORY for calculating usable_nodes.
296  */
297 static unsigned long __init early_calculate_totalpages(void)
298 {
299 	unsigned long totalpages = 0;
300 	unsigned long start_pfn, end_pfn;
301 	int i, nid;
302 
303 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
304 		unsigned long pages = end_pfn - start_pfn;
305 
306 		totalpages += pages;
307 		if (pages)
308 			node_set_state(nid, N_MEMORY);
309 	}
310 	return totalpages;
311 }
312 
313 /*
314  * This finds a zone that can be used for ZONE_MOVABLE pages. The
315  * assumption is made that zones within a node are ordered in monotonic
316  * increasing memory addresses so that the "highest" populated zone is used
317  */
318 static void __init find_usable_zone_for_movable(void)
319 {
320 	int zone_index;
321 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
322 		if (zone_index == ZONE_MOVABLE)
323 			continue;
324 
325 		if (arch_zone_highest_possible_pfn[zone_index] >
326 				arch_zone_lowest_possible_pfn[zone_index])
327 			break;
328 	}
329 
330 	VM_BUG_ON(zone_index == -1);
331 	movable_zone = zone_index;
332 }
333 
334 /*
335  * Find the PFN the Movable zone begins in each node. Kernel memory
336  * is spread evenly between nodes as long as the nodes have enough
337  * memory. When they don't, some nodes will have more kernelcore than
338  * others
339  */
340 static void __init find_zone_movable_pfns_for_nodes(void)
341 {
342 	int i, nid;
343 	unsigned long usable_startpfn;
344 	unsigned long kernelcore_node, kernelcore_remaining;
345 	/* save the state before borrow the nodemask */
346 	nodemask_t saved_node_state = node_states[N_MEMORY];
347 	unsigned long totalpages = early_calculate_totalpages();
348 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
349 	struct memblock_region *r;
350 
351 	/* Need to find movable_zone earlier when movable_node is specified. */
352 	find_usable_zone_for_movable();
353 
354 	/*
355 	 * If movable_node is specified, ignore kernelcore and movablecore
356 	 * options.
357 	 */
358 	if (movable_node_is_enabled()) {
359 		for_each_mem_region(r) {
360 			if (!memblock_is_hotpluggable(r))
361 				continue;
362 
363 			nid = memblock_get_region_node(r);
364 
365 			usable_startpfn = PFN_DOWN(r->base);
366 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
367 				min(usable_startpfn, zone_movable_pfn[nid]) :
368 				usable_startpfn;
369 		}
370 
371 		goto out2;
372 	}
373 
374 	/*
375 	 * If kernelcore=mirror is specified, ignore movablecore option
376 	 */
377 	if (mirrored_kernelcore) {
378 		bool mem_below_4gb_not_mirrored = false;
379 
380 		for_each_mem_region(r) {
381 			if (memblock_is_mirror(r))
382 				continue;
383 
384 			nid = memblock_get_region_node(r);
385 
386 			usable_startpfn = memblock_region_memory_base_pfn(r);
387 
388 			if (usable_startpfn < PHYS_PFN(SZ_4G)) {
389 				mem_below_4gb_not_mirrored = true;
390 				continue;
391 			}
392 
393 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
394 				min(usable_startpfn, zone_movable_pfn[nid]) :
395 				usable_startpfn;
396 		}
397 
398 		if (mem_below_4gb_not_mirrored)
399 			pr_warn("This configuration results in unmirrored kernel memory.\n");
400 
401 		goto out2;
402 	}
403 
404 	/*
405 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
406 	 * amount of necessary memory.
407 	 */
408 	if (required_kernelcore_percent)
409 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
410 				       10000UL;
411 	if (required_movablecore_percent)
412 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
413 					10000UL;
414 
415 	/*
416 	 * If movablecore= was specified, calculate what size of
417 	 * kernelcore that corresponds so that memory usable for
418 	 * any allocation type is evenly spread. If both kernelcore
419 	 * and movablecore are specified, then the value of kernelcore
420 	 * will be used for required_kernelcore if it's greater than
421 	 * what movablecore would have allowed.
422 	 */
423 	if (required_movablecore) {
424 		unsigned long corepages;
425 
426 		/*
427 		 * Round-up so that ZONE_MOVABLE is at least as large as what
428 		 * was requested by the user
429 		 */
430 		required_movablecore =
431 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
432 		required_movablecore = min(totalpages, required_movablecore);
433 		corepages = totalpages - required_movablecore;
434 
435 		required_kernelcore = max(required_kernelcore, corepages);
436 	}
437 
438 	/*
439 	 * If kernelcore was not specified or kernelcore size is larger
440 	 * than totalpages, there is no ZONE_MOVABLE.
441 	 */
442 	if (!required_kernelcore || required_kernelcore >= totalpages)
443 		goto out;
444 
445 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
446 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
447 
448 restart:
449 	/* Spread kernelcore memory as evenly as possible throughout nodes */
450 	kernelcore_node = required_kernelcore / usable_nodes;
451 	for_each_node_state(nid, N_MEMORY) {
452 		unsigned long start_pfn, end_pfn;
453 
454 		/*
455 		 * Recalculate kernelcore_node if the division per node
456 		 * now exceeds what is necessary to satisfy the requested
457 		 * amount of memory for the kernel
458 		 */
459 		if (required_kernelcore < kernelcore_node)
460 			kernelcore_node = required_kernelcore / usable_nodes;
461 
462 		/*
463 		 * As the map is walked, we track how much memory is usable
464 		 * by the kernel using kernelcore_remaining. When it is
465 		 * 0, the rest of the node is usable by ZONE_MOVABLE
466 		 */
467 		kernelcore_remaining = kernelcore_node;
468 
469 		/* Go through each range of PFNs within this node */
470 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
471 			unsigned long size_pages;
472 
473 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
474 			if (start_pfn >= end_pfn)
475 				continue;
476 
477 			/* Account for what is only usable for kernelcore */
478 			if (start_pfn < usable_startpfn) {
479 				unsigned long kernel_pages;
480 				kernel_pages = min(end_pfn, usable_startpfn)
481 								- start_pfn;
482 
483 				kernelcore_remaining -= min(kernel_pages,
484 							kernelcore_remaining);
485 				required_kernelcore -= min(kernel_pages,
486 							required_kernelcore);
487 
488 				/* Continue if range is now fully accounted */
489 				if (end_pfn <= usable_startpfn) {
490 
491 					/*
492 					 * Push zone_movable_pfn to the end so
493 					 * that if we have to rebalance
494 					 * kernelcore across nodes, we will
495 					 * not double account here
496 					 */
497 					zone_movable_pfn[nid] = end_pfn;
498 					continue;
499 				}
500 				start_pfn = usable_startpfn;
501 			}
502 
503 			/*
504 			 * The usable PFN range for ZONE_MOVABLE is from
505 			 * start_pfn->end_pfn. Calculate size_pages as the
506 			 * number of pages used as kernelcore
507 			 */
508 			size_pages = end_pfn - start_pfn;
509 			if (size_pages > kernelcore_remaining)
510 				size_pages = kernelcore_remaining;
511 			zone_movable_pfn[nid] = start_pfn + size_pages;
512 
513 			/*
514 			 * Some kernelcore has been met, update counts and
515 			 * break if the kernelcore for this node has been
516 			 * satisfied
517 			 */
518 			required_kernelcore -= min(required_kernelcore,
519 								size_pages);
520 			kernelcore_remaining -= size_pages;
521 			if (!kernelcore_remaining)
522 				break;
523 		}
524 	}
525 
526 	/*
527 	 * If there is still required_kernelcore, we do another pass with one
528 	 * less node in the count. This will push zone_movable_pfn[nid] further
529 	 * along on the nodes that still have memory until kernelcore is
530 	 * satisfied
531 	 */
532 	usable_nodes--;
533 	if (usable_nodes && required_kernelcore > usable_nodes)
534 		goto restart;
535 
536 out2:
537 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
538 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
539 		unsigned long start_pfn, end_pfn;
540 
541 		zone_movable_pfn[nid] =
542 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
543 
544 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
545 		if (zone_movable_pfn[nid] >= end_pfn)
546 			zone_movable_pfn[nid] = 0;
547 	}
548 
549 out:
550 	/* restore the node_state */
551 	node_states[N_MEMORY] = saved_node_state;
552 }
553 
554 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
555 				unsigned long zone, int nid)
556 {
557 	mm_zero_struct_page(page);
558 	set_page_links(page, zone, nid, pfn);
559 	init_page_count(page);
560 	page_mapcount_reset(page);
561 	page_cpupid_reset_last(page);
562 	page_kasan_tag_reset(page);
563 
564 	INIT_LIST_HEAD(&page->lru);
565 #ifdef WANT_PAGE_VIRTUAL
566 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
567 	if (!is_highmem_idx(zone))
568 		set_page_address(page, __va(pfn << PAGE_SHIFT));
569 #endif
570 }
571 
572 #ifdef CONFIG_NUMA
573 /*
574  * During memory init memblocks map pfns to nids. The search is expensive and
575  * this caches recent lookups. The implementation of __early_pfn_to_nid
576  * treats start/end as pfns.
577  */
578 struct mminit_pfnnid_cache {
579 	unsigned long last_start;
580 	unsigned long last_end;
581 	int last_nid;
582 };
583 
584 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
585 
586 /*
587  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
588  */
589 static int __meminit __early_pfn_to_nid(unsigned long pfn,
590 					struct mminit_pfnnid_cache *state)
591 {
592 	unsigned long start_pfn, end_pfn;
593 	int nid;
594 
595 	if (state->last_start <= pfn && pfn < state->last_end)
596 		return state->last_nid;
597 
598 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
599 	if (nid != NUMA_NO_NODE) {
600 		state->last_start = start_pfn;
601 		state->last_end = end_pfn;
602 		state->last_nid = nid;
603 	}
604 
605 	return nid;
606 }
607 
608 int __meminit early_pfn_to_nid(unsigned long pfn)
609 {
610 	static DEFINE_SPINLOCK(early_pfn_lock);
611 	int nid;
612 
613 	spin_lock(&early_pfn_lock);
614 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
615 	if (nid < 0)
616 		nid = first_online_node;
617 	spin_unlock(&early_pfn_lock);
618 
619 	return nid;
620 }
621 
622 int hashdist = HASHDIST_DEFAULT;
623 
624 static int __init set_hashdist(char *str)
625 {
626 	if (!str)
627 		return 0;
628 	hashdist = simple_strtoul(str, &str, 0);
629 	return 1;
630 }
631 __setup("hashdist=", set_hashdist);
632 
633 static inline void fixup_hashdist(void)
634 {
635 	if (num_node_state(N_MEMORY) == 1)
636 		hashdist = 0;
637 }
638 #else
639 static inline void fixup_hashdist(void) {}
640 #endif /* CONFIG_NUMA */
641 
642 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
643 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
644 {
645 	pgdat->first_deferred_pfn = ULONG_MAX;
646 }
647 
648 /* Returns true if the struct page for the pfn is initialised */
649 static inline bool __meminit early_page_initialised(unsigned long pfn)
650 {
651 	int nid = early_pfn_to_nid(pfn);
652 
653 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
654 		return false;
655 
656 	return true;
657 }
658 
659 /*
660  * Returns true when the remaining initialisation should be deferred until
661  * later in the boot cycle when it can be parallelised.
662  */
663 static bool __meminit
664 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
665 {
666 	static unsigned long prev_end_pfn, nr_initialised;
667 
668 	if (early_page_ext_enabled())
669 		return false;
670 	/*
671 	 * prev_end_pfn static that contains the end of previous zone
672 	 * No need to protect because called very early in boot before smp_init.
673 	 */
674 	if (prev_end_pfn != end_pfn) {
675 		prev_end_pfn = end_pfn;
676 		nr_initialised = 0;
677 	}
678 
679 	/* Always populate low zones for address-constrained allocations */
680 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
681 		return false;
682 
683 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
684 		return true;
685 	/*
686 	 * We start only with one section of pages, more pages are added as
687 	 * needed until the rest of deferred pages are initialized.
688 	 */
689 	nr_initialised++;
690 	if ((nr_initialised > PAGES_PER_SECTION) &&
691 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
692 		NODE_DATA(nid)->first_deferred_pfn = pfn;
693 		return true;
694 	}
695 	return false;
696 }
697 
698 static void __meminit init_reserved_page(unsigned long pfn)
699 {
700 	pg_data_t *pgdat;
701 	int nid, zid;
702 
703 	if (early_page_initialised(pfn))
704 		return;
705 
706 	nid = early_pfn_to_nid(pfn);
707 	pgdat = NODE_DATA(nid);
708 
709 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
710 		struct zone *zone = &pgdat->node_zones[zid];
711 
712 		if (zone_spans_pfn(zone, pfn))
713 			break;
714 	}
715 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
716 }
717 #else
718 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
719 
720 static inline bool early_page_initialised(unsigned long pfn)
721 {
722 	return true;
723 }
724 
725 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
726 {
727 	return false;
728 }
729 
730 static inline void init_reserved_page(unsigned long pfn)
731 {
732 }
733 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
734 
735 /*
736  * Initialised pages do not have PageReserved set. This function is
737  * called for each range allocated by the bootmem allocator and
738  * marks the pages PageReserved. The remaining valid pages are later
739  * sent to the buddy page allocator.
740  */
741 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
742 {
743 	unsigned long start_pfn = PFN_DOWN(start);
744 	unsigned long end_pfn = PFN_UP(end);
745 
746 	for (; start_pfn < end_pfn; start_pfn++) {
747 		if (pfn_valid(start_pfn)) {
748 			struct page *page = pfn_to_page(start_pfn);
749 
750 			init_reserved_page(start_pfn);
751 
752 			/* Avoid false-positive PageTail() */
753 			INIT_LIST_HEAD(&page->lru);
754 
755 			/*
756 			 * no need for atomic set_bit because the struct
757 			 * page is not visible yet so nobody should
758 			 * access it yet.
759 			 */
760 			__SetPageReserved(page);
761 		}
762 	}
763 }
764 
765 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
766 static bool __meminit
767 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
768 {
769 	static struct memblock_region *r;
770 
771 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
772 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
773 			for_each_mem_region(r) {
774 				if (*pfn < memblock_region_memory_end_pfn(r))
775 					break;
776 			}
777 		}
778 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
779 		    memblock_is_mirror(r)) {
780 			*pfn = memblock_region_memory_end_pfn(r);
781 			return true;
782 		}
783 	}
784 	return false;
785 }
786 
787 /*
788  * Only struct pages that correspond to ranges defined by memblock.memory
789  * are zeroed and initialized by going through __init_single_page() during
790  * memmap_init_zone_range().
791  *
792  * But, there could be struct pages that correspond to holes in
793  * memblock.memory. This can happen because of the following reasons:
794  * - physical memory bank size is not necessarily the exact multiple of the
795  *   arbitrary section size
796  * - early reserved memory may not be listed in memblock.memory
797  * - memory layouts defined with memmap= kernel parameter may not align
798  *   nicely with memmap sections
799  *
800  * Explicitly initialize those struct pages so that:
801  * - PG_Reserved is set
802  * - zone and node links point to zone and node that span the page if the
803  *   hole is in the middle of a zone
804  * - zone and node links point to adjacent zone/node if the hole falls on
805  *   the zone boundary; the pages in such holes will be prepended to the
806  *   zone/node above the hole except for the trailing pages in the last
807  *   section that will be appended to the zone/node below.
808  */
809 static void __init init_unavailable_range(unsigned long spfn,
810 					  unsigned long epfn,
811 					  int zone, int node)
812 {
813 	unsigned long pfn;
814 	u64 pgcnt = 0;
815 
816 	for (pfn = spfn; pfn < epfn; pfn++) {
817 		if (!pfn_valid(pageblock_start_pfn(pfn))) {
818 			pfn = pageblock_end_pfn(pfn) - 1;
819 			continue;
820 		}
821 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
822 		__SetPageReserved(pfn_to_page(pfn));
823 		pgcnt++;
824 	}
825 
826 	if (pgcnt)
827 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
828 			node, zone_names[zone], pgcnt);
829 }
830 
831 /*
832  * Initially all pages are reserved - free ones are freed
833  * up by memblock_free_all() once the early boot process is
834  * done. Non-atomic initialization, single-pass.
835  *
836  * All aligned pageblocks are initialized to the specified migratetype
837  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
838  * zone stats (e.g., nr_isolate_pageblock) are touched.
839  */
840 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
841 		unsigned long start_pfn, unsigned long zone_end_pfn,
842 		enum meminit_context context,
843 		struct vmem_altmap *altmap, int migratetype)
844 {
845 	unsigned long pfn, end_pfn = start_pfn + size;
846 	struct page *page;
847 
848 	if (highest_memmap_pfn < end_pfn - 1)
849 		highest_memmap_pfn = end_pfn - 1;
850 
851 #ifdef CONFIG_ZONE_DEVICE
852 	/*
853 	 * Honor reservation requested by the driver for this ZONE_DEVICE
854 	 * memory. We limit the total number of pages to initialize to just
855 	 * those that might contain the memory mapping. We will defer the
856 	 * ZONE_DEVICE page initialization until after we have released
857 	 * the hotplug lock.
858 	 */
859 	if (zone == ZONE_DEVICE) {
860 		if (!altmap)
861 			return;
862 
863 		if (start_pfn == altmap->base_pfn)
864 			start_pfn += altmap->reserve;
865 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
866 	}
867 #endif
868 
869 	for (pfn = start_pfn; pfn < end_pfn; ) {
870 		/*
871 		 * There can be holes in boot-time mem_map[]s handed to this
872 		 * function.  They do not exist on hotplugged memory.
873 		 */
874 		if (context == MEMINIT_EARLY) {
875 			if (overlap_memmap_init(zone, &pfn))
876 				continue;
877 			if (defer_init(nid, pfn, zone_end_pfn)) {
878 				deferred_struct_pages = true;
879 				break;
880 			}
881 		}
882 
883 		page = pfn_to_page(pfn);
884 		__init_single_page(page, pfn, zone, nid);
885 		if (context == MEMINIT_HOTPLUG)
886 			__SetPageReserved(page);
887 
888 		/*
889 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
890 		 * such that unmovable allocations won't be scattered all
891 		 * over the place during system boot.
892 		 */
893 		if (pageblock_aligned(pfn)) {
894 			set_pageblock_migratetype(page, migratetype);
895 			cond_resched();
896 		}
897 		pfn++;
898 	}
899 }
900 
901 static void __init memmap_init_zone_range(struct zone *zone,
902 					  unsigned long start_pfn,
903 					  unsigned long end_pfn,
904 					  unsigned long *hole_pfn)
905 {
906 	unsigned long zone_start_pfn = zone->zone_start_pfn;
907 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
908 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
909 
910 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
911 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
912 
913 	if (start_pfn >= end_pfn)
914 		return;
915 
916 	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
917 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
918 
919 	if (*hole_pfn < start_pfn)
920 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
921 
922 	*hole_pfn = end_pfn;
923 }
924 
925 static void __init memmap_init(void)
926 {
927 	unsigned long start_pfn, end_pfn;
928 	unsigned long hole_pfn = 0;
929 	int i, j, zone_id = 0, nid;
930 
931 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
932 		struct pglist_data *node = NODE_DATA(nid);
933 
934 		for (j = 0; j < MAX_NR_ZONES; j++) {
935 			struct zone *zone = node->node_zones + j;
936 
937 			if (!populated_zone(zone))
938 				continue;
939 
940 			memmap_init_zone_range(zone, start_pfn, end_pfn,
941 					       &hole_pfn);
942 			zone_id = j;
943 		}
944 	}
945 
946 #ifdef CONFIG_SPARSEMEM
947 	/*
948 	 * Initialize the memory map for hole in the range [memory_end,
949 	 * section_end].
950 	 * Append the pages in this hole to the highest zone in the last
951 	 * node.
952 	 * The call to init_unavailable_range() is outside the ifdef to
953 	 * silence the compiler warining about zone_id set but not used;
954 	 * for FLATMEM it is a nop anyway
955 	 */
956 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
957 	if (hole_pfn < end_pfn)
958 #endif
959 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
960 }
961 
962 #ifdef CONFIG_ZONE_DEVICE
963 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
964 					  unsigned long zone_idx, int nid,
965 					  struct dev_pagemap *pgmap)
966 {
967 
968 	__init_single_page(page, pfn, zone_idx, nid);
969 
970 	/*
971 	 * Mark page reserved as it will need to wait for onlining
972 	 * phase for it to be fully associated with a zone.
973 	 *
974 	 * We can use the non-atomic __set_bit operation for setting
975 	 * the flag as we are still initializing the pages.
976 	 */
977 	__SetPageReserved(page);
978 
979 	/*
980 	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
981 	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
982 	 * ever freed or placed on a driver-private list.
983 	 */
984 	page->pgmap = pgmap;
985 	page->zone_device_data = NULL;
986 
987 	/*
988 	 * Mark the block movable so that blocks are reserved for
989 	 * movable at startup. This will force kernel allocations
990 	 * to reserve their blocks rather than leaking throughout
991 	 * the address space during boot when many long-lived
992 	 * kernel allocations are made.
993 	 *
994 	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
995 	 * because this is done early in section_activate()
996 	 */
997 	if (pageblock_aligned(pfn)) {
998 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
999 		cond_resched();
1000 	}
1001 
1002 	/*
1003 	 * ZONE_DEVICE pages are released directly to the driver page allocator
1004 	 * which will set the page count to 1 when allocating the page.
1005 	 */
1006 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
1007 	    pgmap->type == MEMORY_DEVICE_COHERENT)
1008 		set_page_count(page, 0);
1009 }
1010 
1011 /*
1012  * With compound page geometry and when struct pages are stored in ram most
1013  * tail pages are reused. Consequently, the amount of unique struct pages to
1014  * initialize is a lot smaller that the total amount of struct pages being
1015  * mapped. This is a paired / mild layering violation with explicit knowledge
1016  * of how the sparse_vmemmap internals handle compound pages in the lack
1017  * of an altmap. See vmemmap_populate_compound_pages().
1018  */
1019 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
1020 					      struct dev_pagemap *pgmap)
1021 {
1022 	if (!vmemmap_can_optimize(altmap, pgmap))
1023 		return pgmap_vmemmap_nr(pgmap);
1024 
1025 	return 2 * (PAGE_SIZE / sizeof(struct page));
1026 }
1027 
1028 static void __ref memmap_init_compound(struct page *head,
1029 				       unsigned long head_pfn,
1030 				       unsigned long zone_idx, int nid,
1031 				       struct dev_pagemap *pgmap,
1032 				       unsigned long nr_pages)
1033 {
1034 	unsigned long pfn, end_pfn = head_pfn + nr_pages;
1035 	unsigned int order = pgmap->vmemmap_shift;
1036 
1037 	__SetPageHead(head);
1038 	for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
1039 		struct page *page = pfn_to_page(pfn);
1040 
1041 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1042 		prep_compound_tail(head, pfn - head_pfn);
1043 		set_page_count(page, 0);
1044 
1045 		/*
1046 		 * The first tail page stores important compound page info.
1047 		 * Call prep_compound_head() after the first tail page has
1048 		 * been initialized, to not have the data overwritten.
1049 		 */
1050 		if (pfn == head_pfn + 1)
1051 			prep_compound_head(head, order);
1052 	}
1053 }
1054 
1055 void __ref memmap_init_zone_device(struct zone *zone,
1056 				   unsigned long start_pfn,
1057 				   unsigned long nr_pages,
1058 				   struct dev_pagemap *pgmap)
1059 {
1060 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
1061 	struct pglist_data *pgdat = zone->zone_pgdat;
1062 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
1063 	unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
1064 	unsigned long zone_idx = zone_idx(zone);
1065 	unsigned long start = jiffies;
1066 	int nid = pgdat->node_id;
1067 
1068 	if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
1069 		return;
1070 
1071 	/*
1072 	 * The call to memmap_init should have already taken care
1073 	 * of the pages reserved for the memmap, so we can just jump to
1074 	 * the end of that region and start processing the device pages.
1075 	 */
1076 	if (altmap) {
1077 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1078 		nr_pages = end_pfn - start_pfn;
1079 	}
1080 
1081 	for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
1082 		struct page *page = pfn_to_page(pfn);
1083 
1084 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1085 
1086 		if (pfns_per_compound == 1)
1087 			continue;
1088 
1089 		memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
1090 				     compound_nr_pages(altmap, pgmap));
1091 	}
1092 
1093 	pr_debug("%s initialised %lu pages in %ums\n", __func__,
1094 		nr_pages, jiffies_to_msecs(jiffies - start));
1095 }
1096 #endif
1097 
1098 /*
1099  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1100  * because it is sized independent of architecture. Unlike the other zones,
1101  * the starting point for ZONE_MOVABLE is not fixed. It may be different
1102  * in each node depending on the size of each node and how evenly kernelcore
1103  * is distributed. This helper function adjusts the zone ranges
1104  * provided by the architecture for a given node by using the end of the
1105  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1106  * zones within a node are in order of monotonic increases memory addresses
1107  */
1108 static void __init adjust_zone_range_for_zone_movable(int nid,
1109 					unsigned long zone_type,
1110 					unsigned long node_start_pfn,
1111 					unsigned long node_end_pfn,
1112 					unsigned long *zone_start_pfn,
1113 					unsigned long *zone_end_pfn)
1114 {
1115 	/* Only adjust if ZONE_MOVABLE is on this node */
1116 	if (zone_movable_pfn[nid]) {
1117 		/* Size ZONE_MOVABLE */
1118 		if (zone_type == ZONE_MOVABLE) {
1119 			*zone_start_pfn = zone_movable_pfn[nid];
1120 			*zone_end_pfn = min(node_end_pfn,
1121 				arch_zone_highest_possible_pfn[movable_zone]);
1122 
1123 		/* Adjust for ZONE_MOVABLE starting within this range */
1124 		} else if (!mirrored_kernelcore &&
1125 			*zone_start_pfn < zone_movable_pfn[nid] &&
1126 			*zone_end_pfn > zone_movable_pfn[nid]) {
1127 			*zone_end_pfn = zone_movable_pfn[nid];
1128 
1129 		/* Check if this whole range is within ZONE_MOVABLE */
1130 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
1131 			*zone_start_pfn = *zone_end_pfn;
1132 	}
1133 }
1134 
1135 /*
1136  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
1137  * then all holes in the requested range will be accounted for.
1138  */
1139 unsigned long __init __absent_pages_in_range(int nid,
1140 				unsigned long range_start_pfn,
1141 				unsigned long range_end_pfn)
1142 {
1143 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
1144 	unsigned long start_pfn, end_pfn;
1145 	int i;
1146 
1147 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
1148 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
1149 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
1150 		nr_absent -= end_pfn - start_pfn;
1151 	}
1152 	return nr_absent;
1153 }
1154 
1155 /**
1156  * absent_pages_in_range - Return number of page frames in holes within a range
1157  * @start_pfn: The start PFN to start searching for holes
1158  * @end_pfn: The end PFN to stop searching for holes
1159  *
1160  * Return: the number of pages frames in memory holes within a range.
1161  */
1162 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
1163 							unsigned long end_pfn)
1164 {
1165 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
1166 }
1167 
1168 /* Return the number of page frames in holes in a zone on a node */
1169 static unsigned long __init zone_absent_pages_in_node(int nid,
1170 					unsigned long zone_type,
1171 					unsigned long zone_start_pfn,
1172 					unsigned long zone_end_pfn)
1173 {
1174 	unsigned long nr_absent;
1175 
1176 	/* zone is empty, we don't have any absent pages */
1177 	if (zone_start_pfn == zone_end_pfn)
1178 		return 0;
1179 
1180 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
1181 
1182 	/*
1183 	 * ZONE_MOVABLE handling.
1184 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
1185 	 * and vice versa.
1186 	 */
1187 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
1188 		unsigned long start_pfn, end_pfn;
1189 		struct memblock_region *r;
1190 
1191 		for_each_mem_region(r) {
1192 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
1193 					  zone_start_pfn, zone_end_pfn);
1194 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
1195 					zone_start_pfn, zone_end_pfn);
1196 
1197 			if (zone_type == ZONE_MOVABLE &&
1198 			    memblock_is_mirror(r))
1199 				nr_absent += end_pfn - start_pfn;
1200 
1201 			if (zone_type == ZONE_NORMAL &&
1202 			    !memblock_is_mirror(r))
1203 				nr_absent += end_pfn - start_pfn;
1204 		}
1205 	}
1206 
1207 	return nr_absent;
1208 }
1209 
1210 /*
1211  * Return the number of pages a zone spans in a node, including holes
1212  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1213  */
1214 static unsigned long __init zone_spanned_pages_in_node(int nid,
1215 					unsigned long zone_type,
1216 					unsigned long node_start_pfn,
1217 					unsigned long node_end_pfn,
1218 					unsigned long *zone_start_pfn,
1219 					unsigned long *zone_end_pfn)
1220 {
1221 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
1222 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
1223 
1224 	/* Get the start and end of the zone */
1225 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
1226 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
1227 	adjust_zone_range_for_zone_movable(nid, zone_type,
1228 				node_start_pfn, node_end_pfn,
1229 				zone_start_pfn, zone_end_pfn);
1230 
1231 	/* Check that this node has pages within the zone's required range */
1232 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
1233 		return 0;
1234 
1235 	/* Move the zone boundaries inside the node if necessary */
1236 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
1237 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
1238 
1239 	/* Return the spanned pages */
1240 	return *zone_end_pfn - *zone_start_pfn;
1241 }
1242 
1243 static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat)
1244 {
1245 	struct zone *z;
1246 
1247 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) {
1248 		z->zone_start_pfn = 0;
1249 		z->spanned_pages = 0;
1250 		z->present_pages = 0;
1251 #if defined(CONFIG_MEMORY_HOTPLUG)
1252 		z->present_early_pages = 0;
1253 #endif
1254 	}
1255 
1256 	pgdat->node_spanned_pages = 0;
1257 	pgdat->node_present_pages = 0;
1258 	pr_debug("On node %d totalpages: 0\n", pgdat->node_id);
1259 }
1260 
1261 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
1262 						unsigned long node_start_pfn,
1263 						unsigned long node_end_pfn)
1264 {
1265 	unsigned long realtotalpages = 0, totalpages = 0;
1266 	enum zone_type i;
1267 
1268 	for (i = 0; i < MAX_NR_ZONES; i++) {
1269 		struct zone *zone = pgdat->node_zones + i;
1270 		unsigned long zone_start_pfn, zone_end_pfn;
1271 		unsigned long spanned, absent;
1272 		unsigned long real_size;
1273 
1274 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
1275 						     node_start_pfn,
1276 						     node_end_pfn,
1277 						     &zone_start_pfn,
1278 						     &zone_end_pfn);
1279 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
1280 						   zone_start_pfn,
1281 						   zone_end_pfn);
1282 
1283 		real_size = spanned - absent;
1284 
1285 		if (spanned)
1286 			zone->zone_start_pfn = zone_start_pfn;
1287 		else
1288 			zone->zone_start_pfn = 0;
1289 		zone->spanned_pages = spanned;
1290 		zone->present_pages = real_size;
1291 #if defined(CONFIG_MEMORY_HOTPLUG)
1292 		zone->present_early_pages = real_size;
1293 #endif
1294 
1295 		totalpages += spanned;
1296 		realtotalpages += real_size;
1297 	}
1298 
1299 	pgdat->node_spanned_pages = totalpages;
1300 	pgdat->node_present_pages = realtotalpages;
1301 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1302 }
1303 
1304 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
1305 						unsigned long present_pages)
1306 {
1307 	unsigned long pages = spanned_pages;
1308 
1309 	/*
1310 	 * Provide a more accurate estimation if there are holes within
1311 	 * the zone and SPARSEMEM is in use. If there are holes within the
1312 	 * zone, each populated memory region may cost us one or two extra
1313 	 * memmap pages due to alignment because memmap pages for each
1314 	 * populated regions may not be naturally aligned on page boundary.
1315 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
1316 	 */
1317 	if (spanned_pages > present_pages + (present_pages >> 4) &&
1318 	    IS_ENABLED(CONFIG_SPARSEMEM))
1319 		pages = present_pages;
1320 
1321 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
1322 }
1323 
1324 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1325 static void pgdat_init_split_queue(struct pglist_data *pgdat)
1326 {
1327 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
1328 
1329 	spin_lock_init(&ds_queue->split_queue_lock);
1330 	INIT_LIST_HEAD(&ds_queue->split_queue);
1331 	ds_queue->split_queue_len = 0;
1332 }
1333 #else
1334 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
1335 #endif
1336 
1337 #ifdef CONFIG_COMPACTION
1338 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
1339 {
1340 	init_waitqueue_head(&pgdat->kcompactd_wait);
1341 }
1342 #else
1343 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
1344 #endif
1345 
1346 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
1347 {
1348 	int i;
1349 
1350 	pgdat_resize_init(pgdat);
1351 	pgdat_kswapd_lock_init(pgdat);
1352 
1353 	pgdat_init_split_queue(pgdat);
1354 	pgdat_init_kcompactd(pgdat);
1355 
1356 	init_waitqueue_head(&pgdat->kswapd_wait);
1357 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
1358 
1359 	for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
1360 		init_waitqueue_head(&pgdat->reclaim_wait[i]);
1361 
1362 	pgdat_page_ext_init(pgdat);
1363 	lruvec_init(&pgdat->__lruvec);
1364 }
1365 
1366 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
1367 							unsigned long remaining_pages)
1368 {
1369 	atomic_long_set(&zone->managed_pages, remaining_pages);
1370 	zone_set_nid(zone, nid);
1371 	zone->name = zone_names[idx];
1372 	zone->zone_pgdat = NODE_DATA(nid);
1373 	spin_lock_init(&zone->lock);
1374 	zone_seqlock_init(zone);
1375 	zone_pcp_init(zone);
1376 }
1377 
1378 static void __meminit zone_init_free_lists(struct zone *zone)
1379 {
1380 	unsigned int order, t;
1381 	for_each_migratetype_order(order, t) {
1382 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1383 		zone->free_area[order].nr_free = 0;
1384 	}
1385 }
1386 
1387 void __meminit init_currently_empty_zone(struct zone *zone,
1388 					unsigned long zone_start_pfn,
1389 					unsigned long size)
1390 {
1391 	struct pglist_data *pgdat = zone->zone_pgdat;
1392 	int zone_idx = zone_idx(zone) + 1;
1393 
1394 	if (zone_idx > pgdat->nr_zones)
1395 		pgdat->nr_zones = zone_idx;
1396 
1397 	zone->zone_start_pfn = zone_start_pfn;
1398 
1399 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
1400 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
1401 			pgdat->node_id,
1402 			(unsigned long)zone_idx(zone),
1403 			zone_start_pfn, (zone_start_pfn + size));
1404 
1405 	zone_init_free_lists(zone);
1406 	zone->initialized = 1;
1407 }
1408 
1409 #ifndef CONFIG_SPARSEMEM
1410 /*
1411  * Calculate the size of the zone->blockflags rounded to an unsigned long
1412  * Start by making sure zonesize is a multiple of pageblock_order by rounding
1413  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
1414  * round what is now in bits to nearest long in bits, then return it in
1415  * bytes.
1416  */
1417 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
1418 {
1419 	unsigned long usemapsize;
1420 
1421 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
1422 	usemapsize = roundup(zonesize, pageblock_nr_pages);
1423 	usemapsize = usemapsize >> pageblock_order;
1424 	usemapsize *= NR_PAGEBLOCK_BITS;
1425 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
1426 
1427 	return usemapsize / 8;
1428 }
1429 
1430 static void __ref setup_usemap(struct zone *zone)
1431 {
1432 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
1433 					       zone->spanned_pages);
1434 	zone->pageblock_flags = NULL;
1435 	if (usemapsize) {
1436 		zone->pageblock_flags =
1437 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
1438 					    zone_to_nid(zone));
1439 		if (!zone->pageblock_flags)
1440 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
1441 			      usemapsize, zone->name, zone_to_nid(zone));
1442 	}
1443 }
1444 #else
1445 static inline void setup_usemap(struct zone *zone) {}
1446 #endif /* CONFIG_SPARSEMEM */
1447 
1448 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
1449 
1450 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
1451 void __init set_pageblock_order(void)
1452 {
1453 	unsigned int order = MAX_ORDER;
1454 
1455 	/* Check that pageblock_nr_pages has not already been setup */
1456 	if (pageblock_order)
1457 		return;
1458 
1459 	/* Don't let pageblocks exceed the maximum allocation granularity. */
1460 	if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
1461 		order = HUGETLB_PAGE_ORDER;
1462 
1463 	/*
1464 	 * Assume the largest contiguous order of interest is a huge page.
1465 	 * This value may be variable depending on boot parameters on IA64 and
1466 	 * powerpc.
1467 	 */
1468 	pageblock_order = order;
1469 }
1470 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1471 
1472 /*
1473  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
1474  * is unused as pageblock_order is set at compile-time. See
1475  * include/linux/pageblock-flags.h for the values of pageblock_order based on
1476  * the kernel config
1477  */
1478 void __init set_pageblock_order(void)
1479 {
1480 }
1481 
1482 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1483 
1484 /*
1485  * Set up the zone data structures
1486  * - init pgdat internals
1487  * - init all zones belonging to this node
1488  *
1489  * NOTE: this function is only called during memory hotplug
1490  */
1491 #ifdef CONFIG_MEMORY_HOTPLUG
1492 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
1493 {
1494 	int nid = pgdat->node_id;
1495 	enum zone_type z;
1496 	int cpu;
1497 
1498 	pgdat_init_internals(pgdat);
1499 
1500 	if (pgdat->per_cpu_nodestats == &boot_nodestats)
1501 		pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1502 
1503 	/*
1504 	 * Reset the nr_zones, order and highest_zoneidx before reuse.
1505 	 * Note that kswapd will init kswapd_highest_zoneidx properly
1506 	 * when it starts in the near future.
1507 	 */
1508 	pgdat->nr_zones = 0;
1509 	pgdat->kswapd_order = 0;
1510 	pgdat->kswapd_highest_zoneidx = 0;
1511 	pgdat->node_start_pfn = 0;
1512 	pgdat->node_present_pages = 0;
1513 
1514 	for_each_online_cpu(cpu) {
1515 		struct per_cpu_nodestat *p;
1516 
1517 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
1518 		memset(p, 0, sizeof(*p));
1519 	}
1520 
1521 	/*
1522 	 * When memory is hot-added, all the memory is in offline state. So
1523 	 * clear all zones' present_pages and managed_pages because they will
1524 	 * be updated in online_pages() and offline_pages().
1525 	 */
1526 	for (z = 0; z < MAX_NR_ZONES; z++) {
1527 		struct zone *zone = pgdat->node_zones + z;
1528 
1529 		zone->present_pages = 0;
1530 		zone_init_internals(zone, z, nid, 0);
1531 	}
1532 }
1533 #endif
1534 
1535 /*
1536  * Set up the zone data structures:
1537  *   - mark all pages reserved
1538  *   - mark all memory queues empty
1539  *   - clear the memory bitmaps
1540  *
1541  * NOTE: pgdat should get zeroed by caller.
1542  * NOTE: this function is only called during early init.
1543  */
1544 static void __init free_area_init_core(struct pglist_data *pgdat)
1545 {
1546 	enum zone_type j;
1547 	int nid = pgdat->node_id;
1548 
1549 	pgdat_init_internals(pgdat);
1550 	pgdat->per_cpu_nodestats = &boot_nodestats;
1551 
1552 	for (j = 0; j < MAX_NR_ZONES; j++) {
1553 		struct zone *zone = pgdat->node_zones + j;
1554 		unsigned long size, freesize, memmap_pages;
1555 
1556 		size = zone->spanned_pages;
1557 		freesize = zone->present_pages;
1558 
1559 		/*
1560 		 * Adjust freesize so that it accounts for how much memory
1561 		 * is used by this zone for memmap. This affects the watermark
1562 		 * and per-cpu initialisations
1563 		 */
1564 		memmap_pages = calc_memmap_size(size, freesize);
1565 		if (!is_highmem_idx(j)) {
1566 			if (freesize >= memmap_pages) {
1567 				freesize -= memmap_pages;
1568 				if (memmap_pages)
1569 					pr_debug("  %s zone: %lu pages used for memmap\n",
1570 						 zone_names[j], memmap_pages);
1571 			} else
1572 				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
1573 					zone_names[j], memmap_pages, freesize);
1574 		}
1575 
1576 		/* Account for reserved pages */
1577 		if (j == 0 && freesize > dma_reserve) {
1578 			freesize -= dma_reserve;
1579 			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
1580 		}
1581 
1582 		if (!is_highmem_idx(j))
1583 			nr_kernel_pages += freesize;
1584 		/* Charge for highmem memmap if there are enough kernel pages */
1585 		else if (nr_kernel_pages > memmap_pages * 2)
1586 			nr_kernel_pages -= memmap_pages;
1587 		nr_all_pages += freesize;
1588 
1589 		/*
1590 		 * Set an approximate value for lowmem here, it will be adjusted
1591 		 * when the bootmem allocator frees pages into the buddy system.
1592 		 * And all highmem pages will be managed by the buddy system.
1593 		 */
1594 		zone_init_internals(zone, j, nid, freesize);
1595 
1596 		if (!size)
1597 			continue;
1598 
1599 		setup_usemap(zone);
1600 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1601 	}
1602 }
1603 
1604 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
1605 			  phys_addr_t min_addr, int nid, bool exact_nid)
1606 {
1607 	void *ptr;
1608 
1609 	if (exact_nid)
1610 		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
1611 						   MEMBLOCK_ALLOC_ACCESSIBLE,
1612 						   nid);
1613 	else
1614 		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
1615 						 MEMBLOCK_ALLOC_ACCESSIBLE,
1616 						 nid);
1617 
1618 	if (ptr && size > 0)
1619 		page_init_poison(ptr, size);
1620 
1621 	return ptr;
1622 }
1623 
1624 #ifdef CONFIG_FLATMEM
1625 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1626 {
1627 	unsigned long __maybe_unused start = 0;
1628 	unsigned long __maybe_unused offset = 0;
1629 
1630 	/* Skip empty nodes */
1631 	if (!pgdat->node_spanned_pages)
1632 		return;
1633 
1634 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
1635 	offset = pgdat->node_start_pfn - start;
1636 	/* ia64 gets its own node_mem_map, before this, without bootmem */
1637 	if (!pgdat->node_mem_map) {
1638 		unsigned long size, end;
1639 		struct page *map;
1640 
1641 		/*
1642 		 * The zone's endpoints aren't required to be MAX_ORDER
1643 		 * aligned but the node_mem_map endpoints must be in order
1644 		 * for the buddy allocator to function correctly.
1645 		 */
1646 		end = pgdat_end_pfn(pgdat);
1647 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
1648 		size =  (end - start) * sizeof(struct page);
1649 		map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
1650 				   pgdat->node_id, false);
1651 		if (!map)
1652 			panic("Failed to allocate %ld bytes for node %d memory map\n",
1653 			      size, pgdat->node_id);
1654 		pgdat->node_mem_map = map + offset;
1655 	}
1656 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
1657 				__func__, pgdat->node_id, (unsigned long)pgdat,
1658 				(unsigned long)pgdat->node_mem_map);
1659 #ifndef CONFIG_NUMA
1660 	/*
1661 	 * With no DISCONTIG, the global mem_map is just set as node 0's
1662 	 */
1663 	if (pgdat == NODE_DATA(0)) {
1664 		mem_map = NODE_DATA(0)->node_mem_map;
1665 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
1666 			mem_map -= offset;
1667 	}
1668 #endif
1669 }
1670 #else
1671 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
1672 #endif /* CONFIG_FLATMEM */
1673 
1674 /**
1675  * get_pfn_range_for_nid - Return the start and end page frames for a node
1676  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
1677  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
1678  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
1679  *
1680  * It returns the start and end page frame of a node based on information
1681  * provided by memblock_set_node(). If called for a node
1682  * with no available memory, a warning is printed and the start and end
1683  * PFNs will be 0.
1684  */
1685 void __init get_pfn_range_for_nid(unsigned int nid,
1686 			unsigned long *start_pfn, unsigned long *end_pfn)
1687 {
1688 	unsigned long this_start_pfn, this_end_pfn;
1689 	int i;
1690 
1691 	*start_pfn = -1UL;
1692 	*end_pfn = 0;
1693 
1694 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
1695 		*start_pfn = min(*start_pfn, this_start_pfn);
1696 		*end_pfn = max(*end_pfn, this_end_pfn);
1697 	}
1698 
1699 	if (*start_pfn == -1UL)
1700 		*start_pfn = 0;
1701 }
1702 
1703 static void __init free_area_init_node(int nid)
1704 {
1705 	pg_data_t *pgdat = NODE_DATA(nid);
1706 	unsigned long start_pfn = 0;
1707 	unsigned long end_pfn = 0;
1708 
1709 	/* pg_data_t should be reset to zero when it's allocated */
1710 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
1711 
1712 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1713 
1714 	pgdat->node_id = nid;
1715 	pgdat->node_start_pfn = start_pfn;
1716 	pgdat->per_cpu_nodestats = NULL;
1717 
1718 	if (start_pfn != end_pfn) {
1719 		pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
1720 			(u64)start_pfn << PAGE_SHIFT,
1721 			end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
1722 
1723 		calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1724 	} else {
1725 		pr_info("Initmem setup node %d as memoryless\n", nid);
1726 
1727 		reset_memoryless_node_totalpages(pgdat);
1728 	}
1729 
1730 	alloc_node_mem_map(pgdat);
1731 	pgdat_set_deferred_range(pgdat);
1732 
1733 	free_area_init_core(pgdat);
1734 	lru_gen_init_pgdat(pgdat);
1735 }
1736 
1737 /* Any regular or high memory on that node ? */
1738 static void check_for_memory(pg_data_t *pgdat)
1739 {
1740 	enum zone_type zone_type;
1741 
1742 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
1743 		struct zone *zone = &pgdat->node_zones[zone_type];
1744 		if (populated_zone(zone)) {
1745 			if (IS_ENABLED(CONFIG_HIGHMEM))
1746 				node_set_state(pgdat->node_id, N_HIGH_MEMORY);
1747 			if (zone_type <= ZONE_NORMAL)
1748 				node_set_state(pgdat->node_id, N_NORMAL_MEMORY);
1749 			break;
1750 		}
1751 	}
1752 }
1753 
1754 #if MAX_NUMNODES > 1
1755 /*
1756  * Figure out the number of possible node ids.
1757  */
1758 void __init setup_nr_node_ids(void)
1759 {
1760 	unsigned int highest;
1761 
1762 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
1763 	nr_node_ids = highest + 1;
1764 }
1765 #endif
1766 
1767 /*
1768  * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
1769  * such cases we allow max_zone_pfn sorted in the descending order
1770  */
1771 static bool arch_has_descending_max_zone_pfns(void)
1772 {
1773 	return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
1774 }
1775 
1776 /**
1777  * free_area_init - Initialise all pg_data_t and zone data
1778  * @max_zone_pfn: an array of max PFNs for each zone
1779  *
1780  * This will call free_area_init_node() for each active node in the system.
1781  * Using the page ranges provided by memblock_set_node(), the size of each
1782  * zone in each node and their holes is calculated. If the maximum PFN
1783  * between two adjacent zones match, it is assumed that the zone is empty.
1784  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
1785  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1786  * starts where the previous one ended. For example, ZONE_DMA32 starts
1787  * at arch_max_dma_pfn.
1788  */
1789 void __init free_area_init(unsigned long *max_zone_pfn)
1790 {
1791 	unsigned long start_pfn, end_pfn;
1792 	int i, nid, zone;
1793 	bool descending;
1794 
1795 	/* Record where the zone boundaries are */
1796 	memset(arch_zone_lowest_possible_pfn, 0,
1797 				sizeof(arch_zone_lowest_possible_pfn));
1798 	memset(arch_zone_highest_possible_pfn, 0,
1799 				sizeof(arch_zone_highest_possible_pfn));
1800 
1801 	start_pfn = PHYS_PFN(memblock_start_of_DRAM());
1802 	descending = arch_has_descending_max_zone_pfns();
1803 
1804 	for (i = 0; i < MAX_NR_ZONES; i++) {
1805 		if (descending)
1806 			zone = MAX_NR_ZONES - i - 1;
1807 		else
1808 			zone = i;
1809 
1810 		if (zone == ZONE_MOVABLE)
1811 			continue;
1812 
1813 		end_pfn = max(max_zone_pfn[zone], start_pfn);
1814 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
1815 		arch_zone_highest_possible_pfn[zone] = end_pfn;
1816 
1817 		start_pfn = end_pfn;
1818 	}
1819 
1820 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
1821 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
1822 	find_zone_movable_pfns_for_nodes();
1823 
1824 	/* Print out the zone ranges */
1825 	pr_info("Zone ranges:\n");
1826 	for (i = 0; i < MAX_NR_ZONES; i++) {
1827 		if (i == ZONE_MOVABLE)
1828 			continue;
1829 		pr_info("  %-8s ", zone_names[i]);
1830 		if (arch_zone_lowest_possible_pfn[i] ==
1831 				arch_zone_highest_possible_pfn[i])
1832 			pr_cont("empty\n");
1833 		else
1834 			pr_cont("[mem %#018Lx-%#018Lx]\n",
1835 				(u64)arch_zone_lowest_possible_pfn[i]
1836 					<< PAGE_SHIFT,
1837 				((u64)arch_zone_highest_possible_pfn[i]
1838 					<< PAGE_SHIFT) - 1);
1839 	}
1840 
1841 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
1842 	pr_info("Movable zone start for each node\n");
1843 	for (i = 0; i < MAX_NUMNODES; i++) {
1844 		if (zone_movable_pfn[i])
1845 			pr_info("  Node %d: %#018Lx\n", i,
1846 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
1847 	}
1848 
1849 	/*
1850 	 * Print out the early node map, and initialize the
1851 	 * subsection-map relative to active online memory ranges to
1852 	 * enable future "sub-section" extensions of the memory map.
1853 	 */
1854 	pr_info("Early memory node ranges\n");
1855 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
1856 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
1857 			(u64)start_pfn << PAGE_SHIFT,
1858 			((u64)end_pfn << PAGE_SHIFT) - 1);
1859 		subsection_map_init(start_pfn, end_pfn - start_pfn);
1860 	}
1861 
1862 	/* Initialise every node */
1863 	mminit_verify_pageflags_layout();
1864 	setup_nr_node_ids();
1865 	set_pageblock_order();
1866 
1867 	for_each_node(nid) {
1868 		pg_data_t *pgdat;
1869 
1870 		if (!node_online(nid)) {
1871 			pr_info("Initializing node %d as memoryless\n", nid);
1872 
1873 			/* Allocator not initialized yet */
1874 			pgdat = arch_alloc_nodedata(nid);
1875 			if (!pgdat)
1876 				panic("Cannot allocate %zuB for node %d.\n",
1877 				       sizeof(*pgdat), nid);
1878 			arch_refresh_nodedata(nid, pgdat);
1879 			free_area_init_node(nid);
1880 
1881 			/*
1882 			 * We do not want to confuse userspace by sysfs
1883 			 * files/directories for node without any memory
1884 			 * attached to it, so this node is not marked as
1885 			 * N_MEMORY and not marked online so that no sysfs
1886 			 * hierarchy will be created via register_one_node for
1887 			 * it. The pgdat will get fully initialized by
1888 			 * hotadd_init_pgdat() when memory is hotplugged into
1889 			 * this node.
1890 			 */
1891 			continue;
1892 		}
1893 
1894 		pgdat = NODE_DATA(nid);
1895 		free_area_init_node(nid);
1896 
1897 		/* Any memory on that node */
1898 		if (pgdat->node_present_pages)
1899 			node_set_state(nid, N_MEMORY);
1900 		check_for_memory(pgdat);
1901 	}
1902 
1903 	memmap_init();
1904 
1905 	/* disable hash distribution for systems with a single node */
1906 	fixup_hashdist();
1907 }
1908 
1909 /**
1910  * node_map_pfn_alignment - determine the maximum internode alignment
1911  *
1912  * This function should be called after node map is populated and sorted.
1913  * It calculates the maximum power of two alignment which can distinguish
1914  * all the nodes.
1915  *
1916  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
1917  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
1918  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
1919  * shifted, 1GiB is enough and this function will indicate so.
1920  *
1921  * This is used to test whether pfn -> nid mapping of the chosen memory
1922  * model has fine enough granularity to avoid incorrect mapping for the
1923  * populated node map.
1924  *
1925  * Return: the determined alignment in pfn's.  0 if there is no alignment
1926  * requirement (single node).
1927  */
1928 unsigned long __init node_map_pfn_alignment(void)
1929 {
1930 	unsigned long accl_mask = 0, last_end = 0;
1931 	unsigned long start, end, mask;
1932 	int last_nid = NUMA_NO_NODE;
1933 	int i, nid;
1934 
1935 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1936 		if (!start || last_nid < 0 || last_nid == nid) {
1937 			last_nid = nid;
1938 			last_end = end;
1939 			continue;
1940 		}
1941 
1942 		/*
1943 		 * Start with a mask granular enough to pin-point to the
1944 		 * start pfn and tick off bits one-by-one until it becomes
1945 		 * too coarse to separate the current node from the last.
1946 		 */
1947 		mask = ~((1 << __ffs(start)) - 1);
1948 		while (mask && last_end <= (start & (mask << 1)))
1949 			mask <<= 1;
1950 
1951 		/* accumulate all internode masks */
1952 		accl_mask |= mask;
1953 	}
1954 
1955 	/* convert mask to number of pages */
1956 	return ~accl_mask + 1;
1957 }
1958 
1959 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1960 static void __init deferred_free_range(unsigned long pfn,
1961 				       unsigned long nr_pages)
1962 {
1963 	struct page *page;
1964 	unsigned long i;
1965 
1966 	if (!nr_pages)
1967 		return;
1968 
1969 	page = pfn_to_page(pfn);
1970 
1971 	/* Free a large naturally-aligned chunk if possible */
1972 	if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
1973 		for (i = 0; i < nr_pages; i += pageblock_nr_pages)
1974 			set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
1975 		__free_pages_core(page, MAX_ORDER);
1976 		return;
1977 	}
1978 
1979 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1980 		if (pageblock_aligned(pfn))
1981 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1982 		__free_pages_core(page, 0);
1983 	}
1984 }
1985 
1986 /* Completion tracking for deferred_init_memmap() threads */
1987 static atomic_t pgdat_init_n_undone __initdata;
1988 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1989 
1990 static inline void __init pgdat_init_report_one_done(void)
1991 {
1992 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1993 		complete(&pgdat_init_all_done_comp);
1994 }
1995 
1996 /*
1997  * Returns true if page needs to be initialized or freed to buddy allocator.
1998  *
1999  * We check if a current MAX_ORDER block is valid by only checking the validity
2000  * of the head pfn.
2001  */
2002 static inline bool __init deferred_pfn_valid(unsigned long pfn)
2003 {
2004 	if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
2005 		return false;
2006 	return true;
2007 }
2008 
2009 /*
2010  * Free pages to buddy allocator. Try to free aligned pages in
2011  * MAX_ORDER_NR_PAGES sizes.
2012  */
2013 static void __init deferred_free_pages(unsigned long pfn,
2014 				       unsigned long end_pfn)
2015 {
2016 	unsigned long nr_free = 0;
2017 
2018 	for (; pfn < end_pfn; pfn++) {
2019 		if (!deferred_pfn_valid(pfn)) {
2020 			deferred_free_range(pfn - nr_free, nr_free);
2021 			nr_free = 0;
2022 		} else if (IS_MAX_ORDER_ALIGNED(pfn)) {
2023 			deferred_free_range(pfn - nr_free, nr_free);
2024 			nr_free = 1;
2025 		} else {
2026 			nr_free++;
2027 		}
2028 	}
2029 	/* Free the last block of pages to allocator */
2030 	deferred_free_range(pfn - nr_free, nr_free);
2031 }
2032 
2033 /*
2034  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
2035  * by performing it only once every MAX_ORDER_NR_PAGES.
2036  * Return number of pages initialized.
2037  */
2038 static unsigned long  __init deferred_init_pages(struct zone *zone,
2039 						 unsigned long pfn,
2040 						 unsigned long end_pfn)
2041 {
2042 	int nid = zone_to_nid(zone);
2043 	unsigned long nr_pages = 0;
2044 	int zid = zone_idx(zone);
2045 	struct page *page = NULL;
2046 
2047 	for (; pfn < end_pfn; pfn++) {
2048 		if (!deferred_pfn_valid(pfn)) {
2049 			page = NULL;
2050 			continue;
2051 		} else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
2052 			page = pfn_to_page(pfn);
2053 		} else {
2054 			page++;
2055 		}
2056 		__init_single_page(page, pfn, zid, nid);
2057 		nr_pages++;
2058 	}
2059 	return (nr_pages);
2060 }
2061 
2062 /*
2063  * This function is meant to pre-load the iterator for the zone init.
2064  * Specifically it walks through the ranges until we are caught up to the
2065  * first_init_pfn value and exits there. If we never encounter the value we
2066  * return false indicating there are no valid ranges left.
2067  */
2068 static bool __init
2069 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
2070 				    unsigned long *spfn, unsigned long *epfn,
2071 				    unsigned long first_init_pfn)
2072 {
2073 	u64 j;
2074 
2075 	/*
2076 	 * Start out by walking through the ranges in this zone that have
2077 	 * already been initialized. We don't need to do anything with them
2078 	 * so we just need to flush them out of the system.
2079 	 */
2080 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
2081 		if (*epfn <= first_init_pfn)
2082 			continue;
2083 		if (*spfn < first_init_pfn)
2084 			*spfn = first_init_pfn;
2085 		*i = j;
2086 		return true;
2087 	}
2088 
2089 	return false;
2090 }
2091 
2092 /*
2093  * Initialize and free pages. We do it in two loops: first we initialize
2094  * struct page, then free to buddy allocator, because while we are
2095  * freeing pages we can access pages that are ahead (computing buddy
2096  * page in __free_one_page()).
2097  *
2098  * In order to try and keep some memory in the cache we have the loop
2099  * broken along max page order boundaries. This way we will not cause
2100  * any issues with the buddy page computation.
2101  */
2102 static unsigned long __init
2103 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2104 		       unsigned long *end_pfn)
2105 {
2106 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2107 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
2108 	unsigned long nr_pages = 0;
2109 	u64 j = *i;
2110 
2111 	/* First we loop through and initialize the page values */
2112 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2113 		unsigned long t;
2114 
2115 		if (mo_pfn <= *start_pfn)
2116 			break;
2117 
2118 		t = min(mo_pfn, *end_pfn);
2119 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
2120 
2121 		if (mo_pfn < *end_pfn) {
2122 			*start_pfn = mo_pfn;
2123 			break;
2124 		}
2125 	}
2126 
2127 	/* Reset values and now loop through freeing pages as needed */
2128 	swap(j, *i);
2129 
2130 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2131 		unsigned long t;
2132 
2133 		if (mo_pfn <= spfn)
2134 			break;
2135 
2136 		t = min(mo_pfn, epfn);
2137 		deferred_free_pages(spfn, t);
2138 
2139 		if (mo_pfn <= epfn)
2140 			break;
2141 	}
2142 
2143 	return nr_pages;
2144 }
2145 
2146 static void __init
2147 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2148 			   void *arg)
2149 {
2150 	unsigned long spfn, epfn;
2151 	struct zone *zone = arg;
2152 	u64 i;
2153 
2154 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2155 
2156 	/*
2157 	 * Initialize and free pages in MAX_ORDER sized increments so that we
2158 	 * can avoid introducing any issues with the buddy allocator.
2159 	 */
2160 	while (spfn < end_pfn) {
2161 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
2162 		cond_resched();
2163 	}
2164 }
2165 
2166 /* An arch may override for more concurrency. */
2167 __weak int __init
2168 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2169 {
2170 	return 1;
2171 }
2172 
2173 /* Initialise remaining memory on a node */
2174 static int __init deferred_init_memmap(void *data)
2175 {
2176 	pg_data_t *pgdat = data;
2177 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2178 	unsigned long spfn = 0, epfn = 0;
2179 	unsigned long first_init_pfn, flags;
2180 	unsigned long start = jiffies;
2181 	struct zone *zone;
2182 	int zid, max_threads;
2183 	u64 i;
2184 
2185 	/* Bind memory initialisation thread to a local node if possible */
2186 	if (!cpumask_empty(cpumask))
2187 		set_cpus_allowed_ptr(current, cpumask);
2188 
2189 	pgdat_resize_lock(pgdat, &flags);
2190 	first_init_pfn = pgdat->first_deferred_pfn;
2191 	if (first_init_pfn == ULONG_MAX) {
2192 		pgdat_resize_unlock(pgdat, &flags);
2193 		pgdat_init_report_one_done();
2194 		return 0;
2195 	}
2196 
2197 	/* Sanity check boundaries */
2198 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2199 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2200 	pgdat->first_deferred_pfn = ULONG_MAX;
2201 
2202 	/*
2203 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
2204 	 * interrupt thread must allocate this early in boot, zone must be
2205 	 * pre-grown prior to start of deferred page initialization.
2206 	 */
2207 	pgdat_resize_unlock(pgdat, &flags);
2208 
2209 	/* Only the highest zone is deferred so find it */
2210 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2211 		zone = pgdat->node_zones + zid;
2212 		if (first_init_pfn < zone_end_pfn(zone))
2213 			break;
2214 	}
2215 
2216 	/* If the zone is empty somebody else may have cleared out the zone */
2217 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2218 						 first_init_pfn))
2219 		goto zone_empty;
2220 
2221 	max_threads = deferred_page_init_max_threads(cpumask);
2222 
2223 	while (spfn < epfn) {
2224 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2225 		struct padata_mt_job job = {
2226 			.thread_fn   = deferred_init_memmap_chunk,
2227 			.fn_arg      = zone,
2228 			.start       = spfn,
2229 			.size        = epfn_align - spfn,
2230 			.align       = PAGES_PER_SECTION,
2231 			.min_chunk   = PAGES_PER_SECTION,
2232 			.max_threads = max_threads,
2233 		};
2234 
2235 		padata_do_multithreaded(&job);
2236 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2237 						    epfn_align);
2238 	}
2239 zone_empty:
2240 	/* Sanity check that the next zone really is unpopulated */
2241 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2242 
2243 	pr_info("node %d deferred pages initialised in %ums\n",
2244 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2245 
2246 	pgdat_init_report_one_done();
2247 	return 0;
2248 }
2249 
2250 /*
2251  * If this zone has deferred pages, try to grow it by initializing enough
2252  * deferred pages to satisfy the allocation specified by order, rounded up to
2253  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2254  * of SECTION_SIZE bytes by initializing struct pages in increments of
2255  * PAGES_PER_SECTION * sizeof(struct page) bytes.
2256  *
2257  * Return true when zone was grown, otherwise return false. We return true even
2258  * when we grow less than requested, to let the caller decide if there are
2259  * enough pages to satisfy the allocation.
2260  *
2261  * Note: We use noinline because this function is needed only during boot, and
2262  * it is called from a __ref function _deferred_grow_zone. This way we are
2263  * making sure that it is not inlined into permanent text section.
2264  */
2265 bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
2266 {
2267 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2268 	pg_data_t *pgdat = zone->zone_pgdat;
2269 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2270 	unsigned long spfn, epfn, flags;
2271 	unsigned long nr_pages = 0;
2272 	u64 i;
2273 
2274 	/* Only the last zone may have deferred pages */
2275 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2276 		return false;
2277 
2278 	pgdat_resize_lock(pgdat, &flags);
2279 
2280 	/*
2281 	 * If someone grew this zone while we were waiting for spinlock, return
2282 	 * true, as there might be enough pages already.
2283 	 */
2284 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2285 		pgdat_resize_unlock(pgdat, &flags);
2286 		return true;
2287 	}
2288 
2289 	/* If the zone is empty somebody else may have cleared out the zone */
2290 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2291 						 first_deferred_pfn)) {
2292 		pgdat->first_deferred_pfn = ULONG_MAX;
2293 		pgdat_resize_unlock(pgdat, &flags);
2294 		/* Retry only once. */
2295 		return first_deferred_pfn != ULONG_MAX;
2296 	}
2297 
2298 	/*
2299 	 * Initialize and free pages in MAX_ORDER sized increments so
2300 	 * that we can avoid introducing any issues with the buddy
2301 	 * allocator.
2302 	 */
2303 	while (spfn < epfn) {
2304 		/* update our first deferred PFN for this section */
2305 		first_deferred_pfn = spfn;
2306 
2307 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2308 		touch_nmi_watchdog();
2309 
2310 		/* We should only stop along section boundaries */
2311 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2312 			continue;
2313 
2314 		/* If our quota has been met we can stop here */
2315 		if (nr_pages >= nr_pages_needed)
2316 			break;
2317 	}
2318 
2319 	pgdat->first_deferred_pfn = spfn;
2320 	pgdat_resize_unlock(pgdat, &flags);
2321 
2322 	return nr_pages > 0;
2323 }
2324 
2325 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2326 
2327 #ifdef CONFIG_CMA
2328 void __init init_cma_reserved_pageblock(struct page *page)
2329 {
2330 	unsigned i = pageblock_nr_pages;
2331 	struct page *p = page;
2332 
2333 	do {
2334 		__ClearPageReserved(p);
2335 		set_page_count(p, 0);
2336 	} while (++p, --i);
2337 
2338 	set_pageblock_migratetype(page, MIGRATE_CMA);
2339 	set_page_refcounted(page);
2340 	__free_pages(page, pageblock_order);
2341 
2342 	adjust_managed_page_count(page, pageblock_nr_pages);
2343 	page_zone(page)->cma_pages += pageblock_nr_pages;
2344 }
2345 #endif
2346 
2347 void set_zone_contiguous(struct zone *zone)
2348 {
2349 	unsigned long block_start_pfn = zone->zone_start_pfn;
2350 	unsigned long block_end_pfn;
2351 
2352 	block_end_pfn = pageblock_end_pfn(block_start_pfn);
2353 	for (; block_start_pfn < zone_end_pfn(zone);
2354 			block_start_pfn = block_end_pfn,
2355 			 block_end_pfn += pageblock_nr_pages) {
2356 
2357 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
2358 
2359 		if (!__pageblock_pfn_to_page(block_start_pfn,
2360 					     block_end_pfn, zone))
2361 			return;
2362 		cond_resched();
2363 	}
2364 
2365 	/* We confirm that there is no hole */
2366 	zone->contiguous = true;
2367 }
2368 
2369 void __init page_alloc_init_late(void)
2370 {
2371 	struct zone *zone;
2372 	int nid;
2373 
2374 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2375 
2376 	/* There will be num_node_state(N_MEMORY) threads */
2377 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2378 	for_each_node_state(nid, N_MEMORY) {
2379 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2380 	}
2381 
2382 	/* Block until all are initialised */
2383 	wait_for_completion(&pgdat_init_all_done_comp);
2384 
2385 	/*
2386 	 * We initialized the rest of the deferred pages.  Permanently disable
2387 	 * on-demand struct page initialization.
2388 	 */
2389 	static_branch_disable(&deferred_pages);
2390 
2391 	/* Reinit limits that are based on free pages after the kernel is up */
2392 	files_maxfiles_init();
2393 #endif
2394 
2395 	buffer_init();
2396 
2397 	/* Discard memblock private memory */
2398 	memblock_discard();
2399 
2400 	for_each_node_state(nid, N_MEMORY)
2401 		shuffle_free_memory(NODE_DATA(nid));
2402 
2403 	for_each_populated_zone(zone)
2404 		set_zone_contiguous(zone);
2405 
2406 	/* Initialize page ext after all struct pages are initialized. */
2407 	if (deferred_struct_pages)
2408 		page_ext_init();
2409 
2410 	page_alloc_sysctl_init();
2411 }
2412 
2413 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2414 /*
2415  * Returns the number of pages that arch has reserved but
2416  * is not known to alloc_large_system_hash().
2417  */
2418 static unsigned long __init arch_reserved_kernel_pages(void)
2419 {
2420 	return 0;
2421 }
2422 #endif
2423 
2424 /*
2425  * Adaptive scale is meant to reduce sizes of hash tables on large memory
2426  * machines. As memory size is increased the scale is also increased but at
2427  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
2428  * quadruples the scale is increased by one, which means the size of hash table
2429  * only doubles, instead of quadrupling as well.
2430  * Because 32-bit systems cannot have large physical memory, where this scaling
2431  * makes sense, it is disabled on such platforms.
2432  */
2433 #if __BITS_PER_LONG > 32
2434 #define ADAPT_SCALE_BASE	(64ul << 30)
2435 #define ADAPT_SCALE_SHIFT	2
2436 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
2437 #endif
2438 
2439 /*
2440  * allocate a large system hash table from bootmem
2441  * - it is assumed that the hash table must contain an exact power-of-2
2442  *   quantity of entries
2443  * - limit is the number of hash buckets, not the total allocation size
2444  */
2445 void *__init alloc_large_system_hash(const char *tablename,
2446 				     unsigned long bucketsize,
2447 				     unsigned long numentries,
2448 				     int scale,
2449 				     int flags,
2450 				     unsigned int *_hash_shift,
2451 				     unsigned int *_hash_mask,
2452 				     unsigned long low_limit,
2453 				     unsigned long high_limit)
2454 {
2455 	unsigned long long max = high_limit;
2456 	unsigned long log2qty, size;
2457 	void *table;
2458 	gfp_t gfp_flags;
2459 	bool virt;
2460 	bool huge;
2461 
2462 	/* allow the kernel cmdline to have a say */
2463 	if (!numentries) {
2464 		/* round applicable memory size up to nearest megabyte */
2465 		numentries = nr_kernel_pages;
2466 		numentries -= arch_reserved_kernel_pages();
2467 
2468 		/* It isn't necessary when PAGE_SIZE >= 1MB */
2469 		if (PAGE_SIZE < SZ_1M)
2470 			numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
2471 
2472 #if __BITS_PER_LONG > 32
2473 		if (!high_limit) {
2474 			unsigned long adapt;
2475 
2476 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
2477 			     adapt <<= ADAPT_SCALE_SHIFT)
2478 				scale++;
2479 		}
2480 #endif
2481 
2482 		/* limit to 1 bucket per 2^scale bytes of low memory */
2483 		if (scale > PAGE_SHIFT)
2484 			numentries >>= (scale - PAGE_SHIFT);
2485 		else
2486 			numentries <<= (PAGE_SHIFT - scale);
2487 
2488 		/* Make sure we've got at least a 0-order allocation.. */
2489 		if (unlikely(flags & HASH_SMALL)) {
2490 			/* Makes no sense without HASH_EARLY */
2491 			WARN_ON(!(flags & HASH_EARLY));
2492 			if (!(numentries >> *_hash_shift)) {
2493 				numentries = 1UL << *_hash_shift;
2494 				BUG_ON(!numentries);
2495 			}
2496 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
2497 			numentries = PAGE_SIZE / bucketsize;
2498 	}
2499 	numentries = roundup_pow_of_two(numentries);
2500 
2501 	/* limit allocation size to 1/16 total memory by default */
2502 	if (max == 0) {
2503 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2504 		do_div(max, bucketsize);
2505 	}
2506 	max = min(max, 0x80000000ULL);
2507 
2508 	if (numentries < low_limit)
2509 		numentries = low_limit;
2510 	if (numentries > max)
2511 		numentries = max;
2512 
2513 	log2qty = ilog2(numentries);
2514 
2515 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
2516 	do {
2517 		virt = false;
2518 		size = bucketsize << log2qty;
2519 		if (flags & HASH_EARLY) {
2520 			if (flags & HASH_ZERO)
2521 				table = memblock_alloc(size, SMP_CACHE_BYTES);
2522 			else
2523 				table = memblock_alloc_raw(size,
2524 							   SMP_CACHE_BYTES);
2525 		} else if (get_order(size) > MAX_ORDER || hashdist) {
2526 			table = vmalloc_huge(size, gfp_flags);
2527 			virt = true;
2528 			if (table)
2529 				huge = is_vm_area_hugepages(table);
2530 		} else {
2531 			/*
2532 			 * If bucketsize is not a power-of-two, we may free
2533 			 * some pages at the end of hash table which
2534 			 * alloc_pages_exact() automatically does
2535 			 */
2536 			table = alloc_pages_exact(size, gfp_flags);
2537 			kmemleak_alloc(table, size, 1, gfp_flags);
2538 		}
2539 	} while (!table && size > PAGE_SIZE && --log2qty);
2540 
2541 	if (!table)
2542 		panic("Failed to allocate %s hash table\n", tablename);
2543 
2544 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
2545 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
2546 		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
2547 
2548 	if (_hash_shift)
2549 		*_hash_shift = log2qty;
2550 	if (_hash_mask)
2551 		*_hash_mask = (1 << log2qty) - 1;
2552 
2553 	return table;
2554 }
2555 
2556 /**
2557  * set_dma_reserve - set the specified number of pages reserved in the first zone
2558  * @new_dma_reserve: The number of pages to mark reserved
2559  *
2560  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
2561  * In the DMA zone, a significant percentage may be consumed by kernel image
2562  * and other unfreeable allocations which can skew the watermarks badly. This
2563  * function may optionally be used to account for unfreeable pages in the
2564  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
2565  * smaller per-cpu batchsize.
2566  */
2567 void __init set_dma_reserve(unsigned long new_dma_reserve)
2568 {
2569 	dma_reserve = new_dma_reserve;
2570 }
2571 
2572 void __init memblock_free_pages(struct page *page, unsigned long pfn,
2573 							unsigned int order)
2574 {
2575 	if (!early_page_initialised(pfn))
2576 		return;
2577 	if (!kmsan_memblock_free_pages(page, order)) {
2578 		/* KMSAN will take care of these pages. */
2579 		return;
2580 	}
2581 	__free_pages_core(page, order);
2582 }
2583 
2584 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
2585 EXPORT_SYMBOL(init_on_alloc);
2586 
2587 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
2588 EXPORT_SYMBOL(init_on_free);
2589 
2590 static bool _init_on_alloc_enabled_early __read_mostly
2591 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
2592 static int __init early_init_on_alloc(char *buf)
2593 {
2594 
2595 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
2596 }
2597 early_param("init_on_alloc", early_init_on_alloc);
2598 
2599 static bool _init_on_free_enabled_early __read_mostly
2600 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
2601 static int __init early_init_on_free(char *buf)
2602 {
2603 	return kstrtobool(buf, &_init_on_free_enabled_early);
2604 }
2605 early_param("init_on_free", early_init_on_free);
2606 
2607 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
2608 
2609 /*
2610  * Enable static keys related to various memory debugging and hardening options.
2611  * Some override others, and depend on early params that are evaluated in the
2612  * order of appearance. So we need to first gather the full picture of what was
2613  * enabled, and then make decisions.
2614  */
2615 static void __init mem_debugging_and_hardening_init(void)
2616 {
2617 	bool page_poisoning_requested = false;
2618 	bool want_check_pages = false;
2619 
2620 #ifdef CONFIG_PAGE_POISONING
2621 	/*
2622 	 * Page poisoning is debug page alloc for some arches. If
2623 	 * either of those options are enabled, enable poisoning.
2624 	 */
2625 	if (page_poisoning_enabled() ||
2626 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
2627 	      debug_pagealloc_enabled())) {
2628 		static_branch_enable(&_page_poisoning_enabled);
2629 		page_poisoning_requested = true;
2630 		want_check_pages = true;
2631 	}
2632 #endif
2633 
2634 	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
2635 	    page_poisoning_requested) {
2636 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2637 			"will take precedence over init_on_alloc and init_on_free\n");
2638 		_init_on_alloc_enabled_early = false;
2639 		_init_on_free_enabled_early = false;
2640 	}
2641 
2642 	if (_init_on_alloc_enabled_early) {
2643 		want_check_pages = true;
2644 		static_branch_enable(&init_on_alloc);
2645 	} else {
2646 		static_branch_disable(&init_on_alloc);
2647 	}
2648 
2649 	if (_init_on_free_enabled_early) {
2650 		want_check_pages = true;
2651 		static_branch_enable(&init_on_free);
2652 	} else {
2653 		static_branch_disable(&init_on_free);
2654 	}
2655 
2656 	if (IS_ENABLED(CONFIG_KMSAN) &&
2657 	    (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
2658 		pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2659 
2660 #ifdef CONFIG_DEBUG_PAGEALLOC
2661 	if (debug_pagealloc_enabled()) {
2662 		want_check_pages = true;
2663 		static_branch_enable(&_debug_pagealloc_enabled);
2664 
2665 		if (debug_guardpage_minorder())
2666 			static_branch_enable(&_debug_guardpage_enabled);
2667 	}
2668 #endif
2669 
2670 	/*
2671 	 * Any page debugging or hardening option also enables sanity checking
2672 	 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2673 	 * enabled already.
2674 	 */
2675 	if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
2676 		static_branch_enable(&check_pages_enabled);
2677 }
2678 
2679 /* Report memory auto-initialization states for this boot. */
2680 static void __init report_meminit(void)
2681 {
2682 	const char *stack;
2683 
2684 	if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
2685 		stack = "all(pattern)";
2686 	else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
2687 		stack = "all(zero)";
2688 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
2689 		stack = "byref_all(zero)";
2690 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
2691 		stack = "byref(zero)";
2692 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
2693 		stack = "__user(zero)";
2694 	else
2695 		stack = "off";
2696 
2697 	pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2698 		stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
2699 		want_init_on_free() ? "on" : "off");
2700 	if (want_init_on_free())
2701 		pr_info("mem auto-init: clearing system memory may take some time...\n");
2702 }
2703 
2704 static void __init mem_init_print_info(void)
2705 {
2706 	unsigned long physpages, codesize, datasize, rosize, bss_size;
2707 	unsigned long init_code_size, init_data_size;
2708 
2709 	physpages = get_num_physpages();
2710 	codesize = _etext - _stext;
2711 	datasize = _edata - _sdata;
2712 	rosize = __end_rodata - __start_rodata;
2713 	bss_size = __bss_stop - __bss_start;
2714 	init_data_size = __init_end - __init_begin;
2715 	init_code_size = _einittext - _sinittext;
2716 
2717 	/*
2718 	 * Detect special cases and adjust section sizes accordingly:
2719 	 * 1) .init.* may be embedded into .data sections
2720 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
2721 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
2722 	 * 3) .rodata.* may be embedded into .text or .data sections.
2723 	 */
2724 #define adj_init_size(start, end, size, pos, adj) \
2725 	do { \
2726 		if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2727 			size -= adj; \
2728 	} while (0)
2729 
2730 	adj_init_size(__init_begin, __init_end, init_data_size,
2731 		     _sinittext, init_code_size);
2732 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
2733 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
2734 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
2735 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
2736 
2737 #undef	adj_init_size
2738 
2739 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2740 #ifdef	CONFIG_HIGHMEM
2741 		", %luK highmem"
2742 #endif
2743 		")\n",
2744 		K(nr_free_pages()), K(physpages),
2745 		codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
2746 		(init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
2747 		K(physpages - totalram_pages() - totalcma_pages),
2748 		K(totalcma_pages)
2749 #ifdef	CONFIG_HIGHMEM
2750 		, K(totalhigh_pages())
2751 #endif
2752 		);
2753 }
2754 
2755 /*
2756  * Set up kernel memory allocators
2757  */
2758 void __init mm_core_init(void)
2759 {
2760 	/* Initializations relying on SMP setup */
2761 	build_all_zonelists(NULL);
2762 	page_alloc_init_cpuhp();
2763 
2764 	/*
2765 	 * page_ext requires contiguous pages,
2766 	 * bigger than MAX_ORDER unless SPARSEMEM.
2767 	 */
2768 	page_ext_init_flatmem();
2769 	mem_debugging_and_hardening_init();
2770 	kfence_alloc_pool();
2771 	report_meminit();
2772 	kmsan_init_shadow();
2773 	stack_depot_early_init();
2774 	mem_init();
2775 	mem_init_print_info();
2776 	kmem_cache_init();
2777 	/*
2778 	 * page_owner must be initialized after buddy is ready, and also after
2779 	 * slab is ready so that stack_depot_init() works properly
2780 	 */
2781 	page_ext_init_flatmem_late();
2782 	kmemleak_init();
2783 	ptlock_cache_init();
2784 	pgtable_cache_init();
2785 	debug_objects_mem_init();
2786 	vmalloc_init();
2787 	/* If no deferred init page_ext now, as vmap is fully initialized */
2788 	if (!deferred_struct_pages)
2789 		page_ext_init();
2790 	/* Should be run before the first non-init thread is created */
2791 	init_espfix_bsp();
2792 	/* Should be run after espfix64 is set up. */
2793 	pti_init();
2794 	kmsan_init_runtime();
2795 	mm_cache_init();
2796 }
2797