xref: /openbmc/linux/mm/mm_init.c (revision e961cc56)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm_init.c - Memory initialisation verification and debugging
4  *
5  * Copyright 2008 IBM Corporation, 2008
6  * Author Mel Gorman <mel@csn.ul.ie>
7  *
8  */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/kobject.h>
12 #include <linux/export.h>
13 #include <linux/memory.h>
14 #include <linux/notifier.h>
15 #include <linux/sched.h>
16 #include <linux/mman.h>
17 #include <linux/memblock.h>
18 #include <linux/page-isolation.h>
19 #include <linux/padata.h>
20 #include <linux/nmi.h>
21 #include <linux/buffer_head.h>
22 #include <linux/kmemleak.h>
23 #include <linux/kfence.h>
24 #include <linux/page_ext.h>
25 #include <linux/pti.h>
26 #include <linux/pgtable.h>
27 #include <linux/swap.h>
28 #include <linux/cma.h>
29 #include "internal.h"
30 #include "slab.h"
31 #include "shuffle.h"
32 
33 #include <asm/setup.h>
34 
35 #ifdef CONFIG_DEBUG_MEMORY_INIT
36 int __meminitdata mminit_loglevel;
37 
38 /* The zonelists are simply reported, validation is manual. */
39 void __init mminit_verify_zonelist(void)
40 {
41 	int nid;
42 
43 	if (mminit_loglevel < MMINIT_VERIFY)
44 		return;
45 
46 	for_each_online_node(nid) {
47 		pg_data_t *pgdat = NODE_DATA(nid);
48 		struct zone *zone;
49 		struct zoneref *z;
50 		struct zonelist *zonelist;
51 		int i, listid, zoneid;
52 
53 		BUILD_BUG_ON(MAX_ZONELISTS > 2);
54 		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
55 
56 			/* Identify the zone and nodelist */
57 			zoneid = i % MAX_NR_ZONES;
58 			listid = i / MAX_NR_ZONES;
59 			zonelist = &pgdat->node_zonelists[listid];
60 			zone = &pgdat->node_zones[zoneid];
61 			if (!populated_zone(zone))
62 				continue;
63 
64 			/* Print information about the zonelist */
65 			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
66 				listid > 0 ? "thisnode" : "general", nid,
67 				zone->name);
68 
69 			/* Iterate the zonelist */
70 			for_each_zone_zonelist(zone, z, zonelist, zoneid)
71 				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
72 			pr_cont("\n");
73 		}
74 	}
75 }
76 
77 void __init mminit_verify_pageflags_layout(void)
78 {
79 	int shift, width;
80 	unsigned long or_mask, add_mask;
81 
82 	shift = 8 * sizeof(unsigned long);
83 	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
84 		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
85 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
86 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
87 		SECTIONS_WIDTH,
88 		NODES_WIDTH,
89 		ZONES_WIDTH,
90 		LAST_CPUPID_WIDTH,
91 		KASAN_TAG_WIDTH,
92 		LRU_GEN_WIDTH,
93 		LRU_REFS_WIDTH,
94 		NR_PAGEFLAGS);
95 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
96 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
97 		SECTIONS_SHIFT,
98 		NODES_SHIFT,
99 		ZONES_SHIFT,
100 		LAST_CPUPID_SHIFT,
101 		KASAN_TAG_WIDTH);
102 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
103 		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
104 		(unsigned long)SECTIONS_PGSHIFT,
105 		(unsigned long)NODES_PGSHIFT,
106 		(unsigned long)ZONES_PGSHIFT,
107 		(unsigned long)LAST_CPUPID_PGSHIFT,
108 		(unsigned long)KASAN_TAG_PGSHIFT);
109 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
110 		"Node/Zone ID: %lu -> %lu\n",
111 		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
112 		(unsigned long)ZONEID_PGOFF);
113 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
114 		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
115 		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
116 #ifdef NODE_NOT_IN_PAGE_FLAGS
117 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
118 		"Node not in page flags");
119 #endif
120 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
121 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
122 		"Last cpupid not in page flags");
123 #endif
124 
125 	if (SECTIONS_WIDTH) {
126 		shift -= SECTIONS_WIDTH;
127 		BUG_ON(shift != SECTIONS_PGSHIFT);
128 	}
129 	if (NODES_WIDTH) {
130 		shift -= NODES_WIDTH;
131 		BUG_ON(shift != NODES_PGSHIFT);
132 	}
133 	if (ZONES_WIDTH) {
134 		shift -= ZONES_WIDTH;
135 		BUG_ON(shift != ZONES_PGSHIFT);
136 	}
137 
138 	/* Check for bitmask overlaps */
139 	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
140 			(NODES_MASK << NODES_PGSHIFT) |
141 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
142 	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
143 			(NODES_MASK << NODES_PGSHIFT) +
144 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
145 	BUG_ON(or_mask != add_mask);
146 }
147 
148 static __init int set_mminit_loglevel(char *str)
149 {
150 	get_option(&str, &mminit_loglevel);
151 	return 0;
152 }
153 early_param("mminit_loglevel", set_mminit_loglevel);
154 #endif /* CONFIG_DEBUG_MEMORY_INIT */
155 
156 struct kobject *mm_kobj;
157 EXPORT_SYMBOL_GPL(mm_kobj);
158 
159 #ifdef CONFIG_SMP
160 s32 vm_committed_as_batch = 32;
161 
162 void mm_compute_batch(int overcommit_policy)
163 {
164 	u64 memsized_batch;
165 	s32 nr = num_present_cpus();
166 	s32 batch = max_t(s32, nr*2, 32);
167 	unsigned long ram_pages = totalram_pages();
168 
169 	/*
170 	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
171 	 * (total memory/#cpus), and lift it to 25% for other policies
172 	 * to easy the possible lock contention for percpu_counter
173 	 * vm_committed_as, while the max limit is INT_MAX
174 	 */
175 	if (overcommit_policy == OVERCOMMIT_NEVER)
176 		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
177 	else
178 		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
179 
180 	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
181 }
182 
183 static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
184 					unsigned long action, void *arg)
185 {
186 	switch (action) {
187 	case MEM_ONLINE:
188 	case MEM_OFFLINE:
189 		mm_compute_batch(sysctl_overcommit_memory);
190 		break;
191 	default:
192 		break;
193 	}
194 	return NOTIFY_OK;
195 }
196 
197 static int __init mm_compute_batch_init(void)
198 {
199 	mm_compute_batch(sysctl_overcommit_memory);
200 	hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
201 	return 0;
202 }
203 
204 __initcall(mm_compute_batch_init);
205 
206 #endif
207 
208 static int __init mm_sysfs_init(void)
209 {
210 	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
211 	if (!mm_kobj)
212 		return -ENOMEM;
213 
214 	return 0;
215 }
216 postcore_initcall(mm_sysfs_init);
217 
218 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
219 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
220 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
221 
222 static unsigned long required_kernelcore __initdata;
223 static unsigned long required_kernelcore_percent __initdata;
224 static unsigned long required_movablecore __initdata;
225 static unsigned long required_movablecore_percent __initdata;
226 
227 static unsigned long nr_kernel_pages __initdata;
228 static unsigned long nr_all_pages __initdata;
229 static unsigned long dma_reserve __initdata;
230 
231 static bool deferred_struct_pages __meminitdata;
232 
233 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
234 
235 static int __init cmdline_parse_core(char *p, unsigned long *core,
236 				     unsigned long *percent)
237 {
238 	unsigned long long coremem;
239 	char *endptr;
240 
241 	if (!p)
242 		return -EINVAL;
243 
244 	/* Value may be a percentage of total memory, otherwise bytes */
245 	coremem = simple_strtoull(p, &endptr, 0);
246 	if (*endptr == '%') {
247 		/* Paranoid check for percent values greater than 100 */
248 		WARN_ON(coremem > 100);
249 
250 		*percent = coremem;
251 	} else {
252 		coremem = memparse(p, &p);
253 		/* Paranoid check that UL is enough for the coremem value */
254 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
255 
256 		*core = coremem >> PAGE_SHIFT;
257 		*percent = 0UL;
258 	}
259 	return 0;
260 }
261 
262 /*
263  * kernelcore=size sets the amount of memory for use for allocations that
264  * cannot be reclaimed or migrated.
265  */
266 static int __init cmdline_parse_kernelcore(char *p)
267 {
268 	/* parse kernelcore=mirror */
269 	if (parse_option_str(p, "mirror")) {
270 		mirrored_kernelcore = true;
271 		return 0;
272 	}
273 
274 	return cmdline_parse_core(p, &required_kernelcore,
275 				  &required_kernelcore_percent);
276 }
277 early_param("kernelcore", cmdline_parse_kernelcore);
278 
279 /*
280  * movablecore=size sets the amount of memory for use for allocations that
281  * can be reclaimed or migrated.
282  */
283 static int __init cmdline_parse_movablecore(char *p)
284 {
285 	return cmdline_parse_core(p, &required_movablecore,
286 				  &required_movablecore_percent);
287 }
288 early_param("movablecore", cmdline_parse_movablecore);
289 
290 /*
291  * early_calculate_totalpages()
292  * Sum pages in active regions for movable zone.
293  * Populate N_MEMORY for calculating usable_nodes.
294  */
295 static unsigned long __init early_calculate_totalpages(void)
296 {
297 	unsigned long totalpages = 0;
298 	unsigned long start_pfn, end_pfn;
299 	int i, nid;
300 
301 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
302 		unsigned long pages = end_pfn - start_pfn;
303 
304 		totalpages += pages;
305 		if (pages)
306 			node_set_state(nid, N_MEMORY);
307 	}
308 	return totalpages;
309 }
310 
311 /*
312  * This finds a zone that can be used for ZONE_MOVABLE pages. The
313  * assumption is made that zones within a node are ordered in monotonic
314  * increasing memory addresses so that the "highest" populated zone is used
315  */
316 static void __init find_usable_zone_for_movable(void)
317 {
318 	int zone_index;
319 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
320 		if (zone_index == ZONE_MOVABLE)
321 			continue;
322 
323 		if (arch_zone_highest_possible_pfn[zone_index] >
324 				arch_zone_lowest_possible_pfn[zone_index])
325 			break;
326 	}
327 
328 	VM_BUG_ON(zone_index == -1);
329 	movable_zone = zone_index;
330 }
331 
332 /*
333  * Find the PFN the Movable zone begins in each node. Kernel memory
334  * is spread evenly between nodes as long as the nodes have enough
335  * memory. When they don't, some nodes will have more kernelcore than
336  * others
337  */
338 static void __init find_zone_movable_pfns_for_nodes(void)
339 {
340 	int i, nid;
341 	unsigned long usable_startpfn;
342 	unsigned long kernelcore_node, kernelcore_remaining;
343 	/* save the state before borrow the nodemask */
344 	nodemask_t saved_node_state = node_states[N_MEMORY];
345 	unsigned long totalpages = early_calculate_totalpages();
346 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
347 	struct memblock_region *r;
348 
349 	/* Need to find movable_zone earlier when movable_node is specified. */
350 	find_usable_zone_for_movable();
351 
352 	/*
353 	 * If movable_node is specified, ignore kernelcore and movablecore
354 	 * options.
355 	 */
356 	if (movable_node_is_enabled()) {
357 		for_each_mem_region(r) {
358 			if (!memblock_is_hotpluggable(r))
359 				continue;
360 
361 			nid = memblock_get_region_node(r);
362 
363 			usable_startpfn = PFN_DOWN(r->base);
364 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
365 				min(usable_startpfn, zone_movable_pfn[nid]) :
366 				usable_startpfn;
367 		}
368 
369 		goto out2;
370 	}
371 
372 	/*
373 	 * If kernelcore=mirror is specified, ignore movablecore option
374 	 */
375 	if (mirrored_kernelcore) {
376 		bool mem_below_4gb_not_mirrored = false;
377 
378 		for_each_mem_region(r) {
379 			if (memblock_is_mirror(r))
380 				continue;
381 
382 			nid = memblock_get_region_node(r);
383 
384 			usable_startpfn = memblock_region_memory_base_pfn(r);
385 
386 			if (usable_startpfn < PHYS_PFN(SZ_4G)) {
387 				mem_below_4gb_not_mirrored = true;
388 				continue;
389 			}
390 
391 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
392 				min(usable_startpfn, zone_movable_pfn[nid]) :
393 				usable_startpfn;
394 		}
395 
396 		if (mem_below_4gb_not_mirrored)
397 			pr_warn("This configuration results in unmirrored kernel memory.\n");
398 
399 		goto out2;
400 	}
401 
402 	/*
403 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
404 	 * amount of necessary memory.
405 	 */
406 	if (required_kernelcore_percent)
407 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
408 				       10000UL;
409 	if (required_movablecore_percent)
410 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
411 					10000UL;
412 
413 	/*
414 	 * If movablecore= was specified, calculate what size of
415 	 * kernelcore that corresponds so that memory usable for
416 	 * any allocation type is evenly spread. If both kernelcore
417 	 * and movablecore are specified, then the value of kernelcore
418 	 * will be used for required_kernelcore if it's greater than
419 	 * what movablecore would have allowed.
420 	 */
421 	if (required_movablecore) {
422 		unsigned long corepages;
423 
424 		/*
425 		 * Round-up so that ZONE_MOVABLE is at least as large as what
426 		 * was requested by the user
427 		 */
428 		required_movablecore =
429 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
430 		required_movablecore = min(totalpages, required_movablecore);
431 		corepages = totalpages - required_movablecore;
432 
433 		required_kernelcore = max(required_kernelcore, corepages);
434 	}
435 
436 	/*
437 	 * If kernelcore was not specified or kernelcore size is larger
438 	 * than totalpages, there is no ZONE_MOVABLE.
439 	 */
440 	if (!required_kernelcore || required_kernelcore >= totalpages)
441 		goto out;
442 
443 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
444 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
445 
446 restart:
447 	/* Spread kernelcore memory as evenly as possible throughout nodes */
448 	kernelcore_node = required_kernelcore / usable_nodes;
449 	for_each_node_state(nid, N_MEMORY) {
450 		unsigned long start_pfn, end_pfn;
451 
452 		/*
453 		 * Recalculate kernelcore_node if the division per node
454 		 * now exceeds what is necessary to satisfy the requested
455 		 * amount of memory for the kernel
456 		 */
457 		if (required_kernelcore < kernelcore_node)
458 			kernelcore_node = required_kernelcore / usable_nodes;
459 
460 		/*
461 		 * As the map is walked, we track how much memory is usable
462 		 * by the kernel using kernelcore_remaining. When it is
463 		 * 0, the rest of the node is usable by ZONE_MOVABLE
464 		 */
465 		kernelcore_remaining = kernelcore_node;
466 
467 		/* Go through each range of PFNs within this node */
468 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
469 			unsigned long size_pages;
470 
471 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
472 			if (start_pfn >= end_pfn)
473 				continue;
474 
475 			/* Account for what is only usable for kernelcore */
476 			if (start_pfn < usable_startpfn) {
477 				unsigned long kernel_pages;
478 				kernel_pages = min(end_pfn, usable_startpfn)
479 								- start_pfn;
480 
481 				kernelcore_remaining -= min(kernel_pages,
482 							kernelcore_remaining);
483 				required_kernelcore -= min(kernel_pages,
484 							required_kernelcore);
485 
486 				/* Continue if range is now fully accounted */
487 				if (end_pfn <= usable_startpfn) {
488 
489 					/*
490 					 * Push zone_movable_pfn to the end so
491 					 * that if we have to rebalance
492 					 * kernelcore across nodes, we will
493 					 * not double account here
494 					 */
495 					zone_movable_pfn[nid] = end_pfn;
496 					continue;
497 				}
498 				start_pfn = usable_startpfn;
499 			}
500 
501 			/*
502 			 * The usable PFN range for ZONE_MOVABLE is from
503 			 * start_pfn->end_pfn. Calculate size_pages as the
504 			 * number of pages used as kernelcore
505 			 */
506 			size_pages = end_pfn - start_pfn;
507 			if (size_pages > kernelcore_remaining)
508 				size_pages = kernelcore_remaining;
509 			zone_movable_pfn[nid] = start_pfn + size_pages;
510 
511 			/*
512 			 * Some kernelcore has been met, update counts and
513 			 * break if the kernelcore for this node has been
514 			 * satisfied
515 			 */
516 			required_kernelcore -= min(required_kernelcore,
517 								size_pages);
518 			kernelcore_remaining -= size_pages;
519 			if (!kernelcore_remaining)
520 				break;
521 		}
522 	}
523 
524 	/*
525 	 * If there is still required_kernelcore, we do another pass with one
526 	 * less node in the count. This will push zone_movable_pfn[nid] further
527 	 * along on the nodes that still have memory until kernelcore is
528 	 * satisfied
529 	 */
530 	usable_nodes--;
531 	if (usable_nodes && required_kernelcore > usable_nodes)
532 		goto restart;
533 
534 out2:
535 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
536 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
537 		unsigned long start_pfn, end_pfn;
538 
539 		zone_movable_pfn[nid] =
540 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
541 
542 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
543 		if (zone_movable_pfn[nid] >= end_pfn)
544 			zone_movable_pfn[nid] = 0;
545 	}
546 
547 out:
548 	/* restore the node_state */
549 	node_states[N_MEMORY] = saved_node_state;
550 }
551 
552 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
553 				unsigned long zone, int nid)
554 {
555 	mm_zero_struct_page(page);
556 	set_page_links(page, zone, nid, pfn);
557 	init_page_count(page);
558 	page_mapcount_reset(page);
559 	page_cpupid_reset_last(page);
560 	page_kasan_tag_reset(page);
561 
562 	INIT_LIST_HEAD(&page->lru);
563 #ifdef WANT_PAGE_VIRTUAL
564 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
565 	if (!is_highmem_idx(zone))
566 		set_page_address(page, __va(pfn << PAGE_SHIFT));
567 #endif
568 }
569 
570 #ifdef CONFIG_NUMA
571 /*
572  * During memory init memblocks map pfns to nids. The search is expensive and
573  * this caches recent lookups. The implementation of __early_pfn_to_nid
574  * treats start/end as pfns.
575  */
576 struct mminit_pfnnid_cache {
577 	unsigned long last_start;
578 	unsigned long last_end;
579 	int last_nid;
580 };
581 
582 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
583 
584 /*
585  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
586  */
587 static int __meminit __early_pfn_to_nid(unsigned long pfn,
588 					struct mminit_pfnnid_cache *state)
589 {
590 	unsigned long start_pfn, end_pfn;
591 	int nid;
592 
593 	if (state->last_start <= pfn && pfn < state->last_end)
594 		return state->last_nid;
595 
596 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
597 	if (nid != NUMA_NO_NODE) {
598 		state->last_start = start_pfn;
599 		state->last_end = end_pfn;
600 		state->last_nid = nid;
601 	}
602 
603 	return nid;
604 }
605 
606 int __meminit early_pfn_to_nid(unsigned long pfn)
607 {
608 	static DEFINE_SPINLOCK(early_pfn_lock);
609 	int nid;
610 
611 	spin_lock(&early_pfn_lock);
612 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
613 	if (nid < 0)
614 		nid = first_online_node;
615 	spin_unlock(&early_pfn_lock);
616 
617 	return nid;
618 }
619 
620 int hashdist = HASHDIST_DEFAULT;
621 
622 static int __init set_hashdist(char *str)
623 {
624 	if (!str)
625 		return 0;
626 	hashdist = simple_strtoul(str, &str, 0);
627 	return 1;
628 }
629 __setup("hashdist=", set_hashdist);
630 
631 static inline void fixup_hashdist(void)
632 {
633 	if (num_node_state(N_MEMORY) == 1)
634 		hashdist = 0;
635 }
636 #else
637 static inline void fixup_hashdist(void) {}
638 #endif /* CONFIG_NUMA */
639 
640 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
641 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
642 {
643 	pgdat->first_deferred_pfn = ULONG_MAX;
644 }
645 
646 /* Returns true if the struct page for the pfn is initialised */
647 static inline bool __meminit early_page_initialised(unsigned long pfn)
648 {
649 	int nid = early_pfn_to_nid(pfn);
650 
651 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
652 		return false;
653 
654 	return true;
655 }
656 
657 /*
658  * Returns true when the remaining initialisation should be deferred until
659  * later in the boot cycle when it can be parallelised.
660  */
661 static bool __meminit
662 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
663 {
664 	static unsigned long prev_end_pfn, nr_initialised;
665 
666 	if (early_page_ext_enabled())
667 		return false;
668 	/*
669 	 * prev_end_pfn static that contains the end of previous zone
670 	 * No need to protect because called very early in boot before smp_init.
671 	 */
672 	if (prev_end_pfn != end_pfn) {
673 		prev_end_pfn = end_pfn;
674 		nr_initialised = 0;
675 	}
676 
677 	/* Always populate low zones for address-constrained allocations */
678 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
679 		return false;
680 
681 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
682 		return true;
683 	/*
684 	 * We start only with one section of pages, more pages are added as
685 	 * needed until the rest of deferred pages are initialized.
686 	 */
687 	nr_initialised++;
688 	if ((nr_initialised > PAGES_PER_SECTION) &&
689 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
690 		NODE_DATA(nid)->first_deferred_pfn = pfn;
691 		return true;
692 	}
693 	return false;
694 }
695 
696 static void __meminit init_reserved_page(unsigned long pfn)
697 {
698 	pg_data_t *pgdat;
699 	int nid, zid;
700 
701 	if (early_page_initialised(pfn))
702 		return;
703 
704 	nid = early_pfn_to_nid(pfn);
705 	pgdat = NODE_DATA(nid);
706 
707 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
708 		struct zone *zone = &pgdat->node_zones[zid];
709 
710 		if (zone_spans_pfn(zone, pfn))
711 			break;
712 	}
713 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
714 }
715 #else
716 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
717 
718 static inline bool early_page_initialised(unsigned long pfn)
719 {
720 	return true;
721 }
722 
723 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
724 {
725 	return false;
726 }
727 
728 static inline void init_reserved_page(unsigned long pfn)
729 {
730 }
731 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
732 
733 /*
734  * Initialised pages do not have PageReserved set. This function is
735  * called for each range allocated by the bootmem allocator and
736  * marks the pages PageReserved. The remaining valid pages are later
737  * sent to the buddy page allocator.
738  */
739 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
740 {
741 	unsigned long start_pfn = PFN_DOWN(start);
742 	unsigned long end_pfn = PFN_UP(end);
743 
744 	for (; start_pfn < end_pfn; start_pfn++) {
745 		if (pfn_valid(start_pfn)) {
746 			struct page *page = pfn_to_page(start_pfn);
747 
748 			init_reserved_page(start_pfn);
749 
750 			/* Avoid false-positive PageTail() */
751 			INIT_LIST_HEAD(&page->lru);
752 
753 			/*
754 			 * no need for atomic set_bit because the struct
755 			 * page is not visible yet so nobody should
756 			 * access it yet.
757 			 */
758 			__SetPageReserved(page);
759 		}
760 	}
761 }
762 
763 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
764 static bool __meminit
765 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
766 {
767 	static struct memblock_region *r;
768 
769 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
770 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
771 			for_each_mem_region(r) {
772 				if (*pfn < memblock_region_memory_end_pfn(r))
773 					break;
774 			}
775 		}
776 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
777 		    memblock_is_mirror(r)) {
778 			*pfn = memblock_region_memory_end_pfn(r);
779 			return true;
780 		}
781 	}
782 	return false;
783 }
784 
785 /*
786  * Only struct pages that correspond to ranges defined by memblock.memory
787  * are zeroed and initialized by going through __init_single_page() during
788  * memmap_init_zone_range().
789  *
790  * But, there could be struct pages that correspond to holes in
791  * memblock.memory. This can happen because of the following reasons:
792  * - physical memory bank size is not necessarily the exact multiple of the
793  *   arbitrary section size
794  * - early reserved memory may not be listed in memblock.memory
795  * - memory layouts defined with memmap= kernel parameter may not align
796  *   nicely with memmap sections
797  *
798  * Explicitly initialize those struct pages so that:
799  * - PG_Reserved is set
800  * - zone and node links point to zone and node that span the page if the
801  *   hole is in the middle of a zone
802  * - zone and node links point to adjacent zone/node if the hole falls on
803  *   the zone boundary; the pages in such holes will be prepended to the
804  *   zone/node above the hole except for the trailing pages in the last
805  *   section that will be appended to the zone/node below.
806  */
807 static void __init init_unavailable_range(unsigned long spfn,
808 					  unsigned long epfn,
809 					  int zone, int node)
810 {
811 	unsigned long pfn;
812 	u64 pgcnt = 0;
813 
814 	for (pfn = spfn; pfn < epfn; pfn++) {
815 		if (!pfn_valid(pageblock_start_pfn(pfn))) {
816 			pfn = pageblock_end_pfn(pfn) - 1;
817 			continue;
818 		}
819 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
820 		__SetPageReserved(pfn_to_page(pfn));
821 		pgcnt++;
822 	}
823 
824 	if (pgcnt)
825 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
826 			node, zone_names[zone], pgcnt);
827 }
828 
829 /*
830  * Initially all pages are reserved - free ones are freed
831  * up by memblock_free_all() once the early boot process is
832  * done. Non-atomic initialization, single-pass.
833  *
834  * All aligned pageblocks are initialized to the specified migratetype
835  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
836  * zone stats (e.g., nr_isolate_pageblock) are touched.
837  */
838 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
839 		unsigned long start_pfn, unsigned long zone_end_pfn,
840 		enum meminit_context context,
841 		struct vmem_altmap *altmap, int migratetype)
842 {
843 	unsigned long pfn, end_pfn = start_pfn + size;
844 	struct page *page;
845 
846 	if (highest_memmap_pfn < end_pfn - 1)
847 		highest_memmap_pfn = end_pfn - 1;
848 
849 #ifdef CONFIG_ZONE_DEVICE
850 	/*
851 	 * Honor reservation requested by the driver for this ZONE_DEVICE
852 	 * memory. We limit the total number of pages to initialize to just
853 	 * those that might contain the memory mapping. We will defer the
854 	 * ZONE_DEVICE page initialization until after we have released
855 	 * the hotplug lock.
856 	 */
857 	if (zone == ZONE_DEVICE) {
858 		if (!altmap)
859 			return;
860 
861 		if (start_pfn == altmap->base_pfn)
862 			start_pfn += altmap->reserve;
863 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
864 	}
865 #endif
866 
867 	for (pfn = start_pfn; pfn < end_pfn; ) {
868 		/*
869 		 * There can be holes in boot-time mem_map[]s handed to this
870 		 * function.  They do not exist on hotplugged memory.
871 		 */
872 		if (context == MEMINIT_EARLY) {
873 			if (overlap_memmap_init(zone, &pfn))
874 				continue;
875 			if (defer_init(nid, pfn, zone_end_pfn)) {
876 				deferred_struct_pages = true;
877 				break;
878 			}
879 		}
880 
881 		page = pfn_to_page(pfn);
882 		__init_single_page(page, pfn, zone, nid);
883 		if (context == MEMINIT_HOTPLUG)
884 			__SetPageReserved(page);
885 
886 		/*
887 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
888 		 * such that unmovable allocations won't be scattered all
889 		 * over the place during system boot.
890 		 */
891 		if (pageblock_aligned(pfn)) {
892 			set_pageblock_migratetype(page, migratetype);
893 			cond_resched();
894 		}
895 		pfn++;
896 	}
897 }
898 
899 static void __init memmap_init_zone_range(struct zone *zone,
900 					  unsigned long start_pfn,
901 					  unsigned long end_pfn,
902 					  unsigned long *hole_pfn)
903 {
904 	unsigned long zone_start_pfn = zone->zone_start_pfn;
905 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
906 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
907 
908 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
909 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
910 
911 	if (start_pfn >= end_pfn)
912 		return;
913 
914 	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
915 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
916 
917 	if (*hole_pfn < start_pfn)
918 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
919 
920 	*hole_pfn = end_pfn;
921 }
922 
923 static void __init memmap_init(void)
924 {
925 	unsigned long start_pfn, end_pfn;
926 	unsigned long hole_pfn = 0;
927 	int i, j, zone_id = 0, nid;
928 
929 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
930 		struct pglist_data *node = NODE_DATA(nid);
931 
932 		for (j = 0; j < MAX_NR_ZONES; j++) {
933 			struct zone *zone = node->node_zones + j;
934 
935 			if (!populated_zone(zone))
936 				continue;
937 
938 			memmap_init_zone_range(zone, start_pfn, end_pfn,
939 					       &hole_pfn);
940 			zone_id = j;
941 		}
942 	}
943 
944 #ifdef CONFIG_SPARSEMEM
945 	/*
946 	 * Initialize the memory map for hole in the range [memory_end,
947 	 * section_end].
948 	 * Append the pages in this hole to the highest zone in the last
949 	 * node.
950 	 * The call to init_unavailable_range() is outside the ifdef to
951 	 * silence the compiler warining about zone_id set but not used;
952 	 * for FLATMEM it is a nop anyway
953 	 */
954 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
955 	if (hole_pfn < end_pfn)
956 #endif
957 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
958 }
959 
960 #ifdef CONFIG_ZONE_DEVICE
961 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
962 					  unsigned long zone_idx, int nid,
963 					  struct dev_pagemap *pgmap)
964 {
965 
966 	__init_single_page(page, pfn, zone_idx, nid);
967 
968 	/*
969 	 * Mark page reserved as it will need to wait for onlining
970 	 * phase for it to be fully associated with a zone.
971 	 *
972 	 * We can use the non-atomic __set_bit operation for setting
973 	 * the flag as we are still initializing the pages.
974 	 */
975 	__SetPageReserved(page);
976 
977 	/*
978 	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
979 	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
980 	 * ever freed or placed on a driver-private list.
981 	 */
982 	page->pgmap = pgmap;
983 	page->zone_device_data = NULL;
984 
985 	/*
986 	 * Mark the block movable so that blocks are reserved for
987 	 * movable at startup. This will force kernel allocations
988 	 * to reserve their blocks rather than leaking throughout
989 	 * the address space during boot when many long-lived
990 	 * kernel allocations are made.
991 	 *
992 	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
993 	 * because this is done early in section_activate()
994 	 */
995 	if (pageblock_aligned(pfn)) {
996 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
997 		cond_resched();
998 	}
999 
1000 	/*
1001 	 * ZONE_DEVICE pages are released directly to the driver page allocator
1002 	 * which will set the page count to 1 when allocating the page.
1003 	 */
1004 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
1005 	    pgmap->type == MEMORY_DEVICE_COHERENT)
1006 		set_page_count(page, 0);
1007 }
1008 
1009 /*
1010  * With compound page geometry and when struct pages are stored in ram most
1011  * tail pages are reused. Consequently, the amount of unique struct pages to
1012  * initialize is a lot smaller that the total amount of struct pages being
1013  * mapped. This is a paired / mild layering violation with explicit knowledge
1014  * of how the sparse_vmemmap internals handle compound pages in the lack
1015  * of an altmap. See vmemmap_populate_compound_pages().
1016  */
1017 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
1018 					      unsigned long nr_pages)
1019 {
1020 	return is_power_of_2(sizeof(struct page)) &&
1021 		!altmap ? 2 * (PAGE_SIZE / sizeof(struct page)) : nr_pages;
1022 }
1023 
1024 static void __ref memmap_init_compound(struct page *head,
1025 				       unsigned long head_pfn,
1026 				       unsigned long zone_idx, int nid,
1027 				       struct dev_pagemap *pgmap,
1028 				       unsigned long nr_pages)
1029 {
1030 	unsigned long pfn, end_pfn = head_pfn + nr_pages;
1031 	unsigned int order = pgmap->vmemmap_shift;
1032 
1033 	__SetPageHead(head);
1034 	for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
1035 		struct page *page = pfn_to_page(pfn);
1036 
1037 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1038 		prep_compound_tail(head, pfn - head_pfn);
1039 		set_page_count(page, 0);
1040 
1041 		/*
1042 		 * The first tail page stores important compound page info.
1043 		 * Call prep_compound_head() after the first tail page has
1044 		 * been initialized, to not have the data overwritten.
1045 		 */
1046 		if (pfn == head_pfn + 1)
1047 			prep_compound_head(head, order);
1048 	}
1049 }
1050 
1051 void __ref memmap_init_zone_device(struct zone *zone,
1052 				   unsigned long start_pfn,
1053 				   unsigned long nr_pages,
1054 				   struct dev_pagemap *pgmap)
1055 {
1056 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
1057 	struct pglist_data *pgdat = zone->zone_pgdat;
1058 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
1059 	unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
1060 	unsigned long zone_idx = zone_idx(zone);
1061 	unsigned long start = jiffies;
1062 	int nid = pgdat->node_id;
1063 
1064 	if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
1065 		return;
1066 
1067 	/*
1068 	 * The call to memmap_init should have already taken care
1069 	 * of the pages reserved for the memmap, so we can just jump to
1070 	 * the end of that region and start processing the device pages.
1071 	 */
1072 	if (altmap) {
1073 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1074 		nr_pages = end_pfn - start_pfn;
1075 	}
1076 
1077 	for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
1078 		struct page *page = pfn_to_page(pfn);
1079 
1080 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1081 
1082 		if (pfns_per_compound == 1)
1083 			continue;
1084 
1085 		memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
1086 				     compound_nr_pages(altmap, pfns_per_compound));
1087 	}
1088 
1089 	pr_debug("%s initialised %lu pages in %ums\n", __func__,
1090 		nr_pages, jiffies_to_msecs(jiffies - start));
1091 }
1092 #endif
1093 
1094 /*
1095  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1096  * because it is sized independent of architecture. Unlike the other zones,
1097  * the starting point for ZONE_MOVABLE is not fixed. It may be different
1098  * in each node depending on the size of each node and how evenly kernelcore
1099  * is distributed. This helper function adjusts the zone ranges
1100  * provided by the architecture for a given node by using the end of the
1101  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1102  * zones within a node are in order of monotonic increases memory addresses
1103  */
1104 static void __init adjust_zone_range_for_zone_movable(int nid,
1105 					unsigned long zone_type,
1106 					unsigned long node_start_pfn,
1107 					unsigned long node_end_pfn,
1108 					unsigned long *zone_start_pfn,
1109 					unsigned long *zone_end_pfn)
1110 {
1111 	/* Only adjust if ZONE_MOVABLE is on this node */
1112 	if (zone_movable_pfn[nid]) {
1113 		/* Size ZONE_MOVABLE */
1114 		if (zone_type == ZONE_MOVABLE) {
1115 			*zone_start_pfn = zone_movable_pfn[nid];
1116 			*zone_end_pfn = min(node_end_pfn,
1117 				arch_zone_highest_possible_pfn[movable_zone]);
1118 
1119 		/* Adjust for ZONE_MOVABLE starting within this range */
1120 		} else if (!mirrored_kernelcore &&
1121 			*zone_start_pfn < zone_movable_pfn[nid] &&
1122 			*zone_end_pfn > zone_movable_pfn[nid]) {
1123 			*zone_end_pfn = zone_movable_pfn[nid];
1124 
1125 		/* Check if this whole range is within ZONE_MOVABLE */
1126 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
1127 			*zone_start_pfn = *zone_end_pfn;
1128 	}
1129 }
1130 
1131 /*
1132  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
1133  * then all holes in the requested range will be accounted for.
1134  */
1135 unsigned long __init __absent_pages_in_range(int nid,
1136 				unsigned long range_start_pfn,
1137 				unsigned long range_end_pfn)
1138 {
1139 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
1140 	unsigned long start_pfn, end_pfn;
1141 	int i;
1142 
1143 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
1144 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
1145 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
1146 		nr_absent -= end_pfn - start_pfn;
1147 	}
1148 	return nr_absent;
1149 }
1150 
1151 /**
1152  * absent_pages_in_range - Return number of page frames in holes within a range
1153  * @start_pfn: The start PFN to start searching for holes
1154  * @end_pfn: The end PFN to stop searching for holes
1155  *
1156  * Return: the number of pages frames in memory holes within a range.
1157  */
1158 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
1159 							unsigned long end_pfn)
1160 {
1161 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
1162 }
1163 
1164 /* Return the number of page frames in holes in a zone on a node */
1165 static unsigned long __init zone_absent_pages_in_node(int nid,
1166 					unsigned long zone_type,
1167 					unsigned long node_start_pfn,
1168 					unsigned long node_end_pfn)
1169 {
1170 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
1171 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
1172 	unsigned long zone_start_pfn, zone_end_pfn;
1173 	unsigned long nr_absent;
1174 
1175 	/* When hotadd a new node from cpu_up(), the node should be empty */
1176 	if (!node_start_pfn && !node_end_pfn)
1177 		return 0;
1178 
1179 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
1180 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
1181 
1182 	adjust_zone_range_for_zone_movable(nid, zone_type,
1183 			node_start_pfn, node_end_pfn,
1184 			&zone_start_pfn, &zone_end_pfn);
1185 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
1186 
1187 	/*
1188 	 * ZONE_MOVABLE handling.
1189 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
1190 	 * and vice versa.
1191 	 */
1192 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
1193 		unsigned long start_pfn, end_pfn;
1194 		struct memblock_region *r;
1195 
1196 		for_each_mem_region(r) {
1197 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
1198 					  zone_start_pfn, zone_end_pfn);
1199 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
1200 					zone_start_pfn, zone_end_pfn);
1201 
1202 			if (zone_type == ZONE_MOVABLE &&
1203 			    memblock_is_mirror(r))
1204 				nr_absent += end_pfn - start_pfn;
1205 
1206 			if (zone_type == ZONE_NORMAL &&
1207 			    !memblock_is_mirror(r))
1208 				nr_absent += end_pfn - start_pfn;
1209 		}
1210 	}
1211 
1212 	return nr_absent;
1213 }
1214 
1215 /*
1216  * Return the number of pages a zone spans in a node, including holes
1217  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1218  */
1219 static unsigned long __init zone_spanned_pages_in_node(int nid,
1220 					unsigned long zone_type,
1221 					unsigned long node_start_pfn,
1222 					unsigned long node_end_pfn,
1223 					unsigned long *zone_start_pfn,
1224 					unsigned long *zone_end_pfn)
1225 {
1226 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
1227 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
1228 	/* When hotadd a new node from cpu_up(), the node should be empty */
1229 	if (!node_start_pfn && !node_end_pfn)
1230 		return 0;
1231 
1232 	/* Get the start and end of the zone */
1233 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
1234 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
1235 	adjust_zone_range_for_zone_movable(nid, zone_type,
1236 				node_start_pfn, node_end_pfn,
1237 				zone_start_pfn, zone_end_pfn);
1238 
1239 	/* Check that this node has pages within the zone's required range */
1240 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
1241 		return 0;
1242 
1243 	/* Move the zone boundaries inside the node if necessary */
1244 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
1245 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
1246 
1247 	/* Return the spanned pages */
1248 	return *zone_end_pfn - *zone_start_pfn;
1249 }
1250 
1251 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
1252 						unsigned long node_start_pfn,
1253 						unsigned long node_end_pfn)
1254 {
1255 	unsigned long realtotalpages = 0, totalpages = 0;
1256 	enum zone_type i;
1257 
1258 	for (i = 0; i < MAX_NR_ZONES; i++) {
1259 		struct zone *zone = pgdat->node_zones + i;
1260 		unsigned long zone_start_pfn, zone_end_pfn;
1261 		unsigned long spanned, absent;
1262 		unsigned long size, real_size;
1263 
1264 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
1265 						     node_start_pfn,
1266 						     node_end_pfn,
1267 						     &zone_start_pfn,
1268 						     &zone_end_pfn);
1269 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
1270 						   node_start_pfn,
1271 						   node_end_pfn);
1272 
1273 		size = spanned;
1274 		real_size = size - absent;
1275 
1276 		if (size)
1277 			zone->zone_start_pfn = zone_start_pfn;
1278 		else
1279 			zone->zone_start_pfn = 0;
1280 		zone->spanned_pages = size;
1281 		zone->present_pages = real_size;
1282 #if defined(CONFIG_MEMORY_HOTPLUG)
1283 		zone->present_early_pages = real_size;
1284 #endif
1285 
1286 		totalpages += size;
1287 		realtotalpages += real_size;
1288 	}
1289 
1290 	pgdat->node_spanned_pages = totalpages;
1291 	pgdat->node_present_pages = realtotalpages;
1292 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1293 }
1294 
1295 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
1296 						unsigned long present_pages)
1297 {
1298 	unsigned long pages = spanned_pages;
1299 
1300 	/*
1301 	 * Provide a more accurate estimation if there are holes within
1302 	 * the zone and SPARSEMEM is in use. If there are holes within the
1303 	 * zone, each populated memory region may cost us one or two extra
1304 	 * memmap pages due to alignment because memmap pages for each
1305 	 * populated regions may not be naturally aligned on page boundary.
1306 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
1307 	 */
1308 	if (spanned_pages > present_pages + (present_pages >> 4) &&
1309 	    IS_ENABLED(CONFIG_SPARSEMEM))
1310 		pages = present_pages;
1311 
1312 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
1313 }
1314 
1315 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1316 static void pgdat_init_split_queue(struct pglist_data *pgdat)
1317 {
1318 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
1319 
1320 	spin_lock_init(&ds_queue->split_queue_lock);
1321 	INIT_LIST_HEAD(&ds_queue->split_queue);
1322 	ds_queue->split_queue_len = 0;
1323 }
1324 #else
1325 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
1326 #endif
1327 
1328 #ifdef CONFIG_COMPACTION
1329 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
1330 {
1331 	init_waitqueue_head(&pgdat->kcompactd_wait);
1332 }
1333 #else
1334 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
1335 #endif
1336 
1337 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
1338 {
1339 	int i;
1340 
1341 	pgdat_resize_init(pgdat);
1342 	pgdat_kswapd_lock_init(pgdat);
1343 
1344 	pgdat_init_split_queue(pgdat);
1345 	pgdat_init_kcompactd(pgdat);
1346 
1347 	init_waitqueue_head(&pgdat->kswapd_wait);
1348 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
1349 
1350 	for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
1351 		init_waitqueue_head(&pgdat->reclaim_wait[i]);
1352 
1353 	pgdat_page_ext_init(pgdat);
1354 	lruvec_init(&pgdat->__lruvec);
1355 }
1356 
1357 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
1358 							unsigned long remaining_pages)
1359 {
1360 	atomic_long_set(&zone->managed_pages, remaining_pages);
1361 	zone_set_nid(zone, nid);
1362 	zone->name = zone_names[idx];
1363 	zone->zone_pgdat = NODE_DATA(nid);
1364 	spin_lock_init(&zone->lock);
1365 	zone_seqlock_init(zone);
1366 	zone_pcp_init(zone);
1367 }
1368 
1369 static void __meminit zone_init_free_lists(struct zone *zone)
1370 {
1371 	unsigned int order, t;
1372 	for_each_migratetype_order(order, t) {
1373 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1374 		zone->free_area[order].nr_free = 0;
1375 	}
1376 }
1377 
1378 void __meminit init_currently_empty_zone(struct zone *zone,
1379 					unsigned long zone_start_pfn,
1380 					unsigned long size)
1381 {
1382 	struct pglist_data *pgdat = zone->zone_pgdat;
1383 	int zone_idx = zone_idx(zone) + 1;
1384 
1385 	if (zone_idx > pgdat->nr_zones)
1386 		pgdat->nr_zones = zone_idx;
1387 
1388 	zone->zone_start_pfn = zone_start_pfn;
1389 
1390 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
1391 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
1392 			pgdat->node_id,
1393 			(unsigned long)zone_idx(zone),
1394 			zone_start_pfn, (zone_start_pfn + size));
1395 
1396 	zone_init_free_lists(zone);
1397 	zone->initialized = 1;
1398 }
1399 
1400 #ifndef CONFIG_SPARSEMEM
1401 /*
1402  * Calculate the size of the zone->blockflags rounded to an unsigned long
1403  * Start by making sure zonesize is a multiple of pageblock_order by rounding
1404  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
1405  * round what is now in bits to nearest long in bits, then return it in
1406  * bytes.
1407  */
1408 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
1409 {
1410 	unsigned long usemapsize;
1411 
1412 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
1413 	usemapsize = roundup(zonesize, pageblock_nr_pages);
1414 	usemapsize = usemapsize >> pageblock_order;
1415 	usemapsize *= NR_PAGEBLOCK_BITS;
1416 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
1417 
1418 	return usemapsize / 8;
1419 }
1420 
1421 static void __ref setup_usemap(struct zone *zone)
1422 {
1423 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
1424 					       zone->spanned_pages);
1425 	zone->pageblock_flags = NULL;
1426 	if (usemapsize) {
1427 		zone->pageblock_flags =
1428 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
1429 					    zone_to_nid(zone));
1430 		if (!zone->pageblock_flags)
1431 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
1432 			      usemapsize, zone->name, zone_to_nid(zone));
1433 	}
1434 }
1435 #else
1436 static inline void setup_usemap(struct zone *zone) {}
1437 #endif /* CONFIG_SPARSEMEM */
1438 
1439 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
1440 
1441 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
1442 void __init set_pageblock_order(void)
1443 {
1444 	unsigned int order = MAX_ORDER;
1445 
1446 	/* Check that pageblock_nr_pages has not already been setup */
1447 	if (pageblock_order)
1448 		return;
1449 
1450 	/* Don't let pageblocks exceed the maximum allocation granularity. */
1451 	if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
1452 		order = HUGETLB_PAGE_ORDER;
1453 
1454 	/*
1455 	 * Assume the largest contiguous order of interest is a huge page.
1456 	 * This value may be variable depending on boot parameters on IA64 and
1457 	 * powerpc.
1458 	 */
1459 	pageblock_order = order;
1460 }
1461 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1462 
1463 /*
1464  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
1465  * is unused as pageblock_order is set at compile-time. See
1466  * include/linux/pageblock-flags.h for the values of pageblock_order based on
1467  * the kernel config
1468  */
1469 void __init set_pageblock_order(void)
1470 {
1471 }
1472 
1473 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1474 
1475 /*
1476  * Set up the zone data structures
1477  * - init pgdat internals
1478  * - init all zones belonging to this node
1479  *
1480  * NOTE: this function is only called during memory hotplug
1481  */
1482 #ifdef CONFIG_MEMORY_HOTPLUG
1483 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
1484 {
1485 	int nid = pgdat->node_id;
1486 	enum zone_type z;
1487 	int cpu;
1488 
1489 	pgdat_init_internals(pgdat);
1490 
1491 	if (pgdat->per_cpu_nodestats == &boot_nodestats)
1492 		pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1493 
1494 	/*
1495 	 * Reset the nr_zones, order and highest_zoneidx before reuse.
1496 	 * Note that kswapd will init kswapd_highest_zoneidx properly
1497 	 * when it starts in the near future.
1498 	 */
1499 	pgdat->nr_zones = 0;
1500 	pgdat->kswapd_order = 0;
1501 	pgdat->kswapd_highest_zoneidx = 0;
1502 	pgdat->node_start_pfn = 0;
1503 	for_each_online_cpu(cpu) {
1504 		struct per_cpu_nodestat *p;
1505 
1506 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
1507 		memset(p, 0, sizeof(*p));
1508 	}
1509 
1510 	for (z = 0; z < MAX_NR_ZONES; z++)
1511 		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
1512 }
1513 #endif
1514 
1515 /*
1516  * Set up the zone data structures:
1517  *   - mark all pages reserved
1518  *   - mark all memory queues empty
1519  *   - clear the memory bitmaps
1520  *
1521  * NOTE: pgdat should get zeroed by caller.
1522  * NOTE: this function is only called during early init.
1523  */
1524 static void __init free_area_init_core(struct pglist_data *pgdat)
1525 {
1526 	enum zone_type j;
1527 	int nid = pgdat->node_id;
1528 
1529 	pgdat_init_internals(pgdat);
1530 	pgdat->per_cpu_nodestats = &boot_nodestats;
1531 
1532 	for (j = 0; j < MAX_NR_ZONES; j++) {
1533 		struct zone *zone = pgdat->node_zones + j;
1534 		unsigned long size, freesize, memmap_pages;
1535 
1536 		size = zone->spanned_pages;
1537 		freesize = zone->present_pages;
1538 
1539 		/*
1540 		 * Adjust freesize so that it accounts for how much memory
1541 		 * is used by this zone for memmap. This affects the watermark
1542 		 * and per-cpu initialisations
1543 		 */
1544 		memmap_pages = calc_memmap_size(size, freesize);
1545 		if (!is_highmem_idx(j)) {
1546 			if (freesize >= memmap_pages) {
1547 				freesize -= memmap_pages;
1548 				if (memmap_pages)
1549 					pr_debug("  %s zone: %lu pages used for memmap\n",
1550 						 zone_names[j], memmap_pages);
1551 			} else
1552 				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
1553 					zone_names[j], memmap_pages, freesize);
1554 		}
1555 
1556 		/* Account for reserved pages */
1557 		if (j == 0 && freesize > dma_reserve) {
1558 			freesize -= dma_reserve;
1559 			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
1560 		}
1561 
1562 		if (!is_highmem_idx(j))
1563 			nr_kernel_pages += freesize;
1564 		/* Charge for highmem memmap if there are enough kernel pages */
1565 		else if (nr_kernel_pages > memmap_pages * 2)
1566 			nr_kernel_pages -= memmap_pages;
1567 		nr_all_pages += freesize;
1568 
1569 		/*
1570 		 * Set an approximate value for lowmem here, it will be adjusted
1571 		 * when the bootmem allocator frees pages into the buddy system.
1572 		 * And all highmem pages will be managed by the buddy system.
1573 		 */
1574 		zone_init_internals(zone, j, nid, freesize);
1575 
1576 		if (!size)
1577 			continue;
1578 
1579 		set_pageblock_order();
1580 		setup_usemap(zone);
1581 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1582 	}
1583 }
1584 
1585 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
1586 			  phys_addr_t min_addr, int nid, bool exact_nid)
1587 {
1588 	void *ptr;
1589 
1590 	if (exact_nid)
1591 		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
1592 						   MEMBLOCK_ALLOC_ACCESSIBLE,
1593 						   nid);
1594 	else
1595 		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
1596 						 MEMBLOCK_ALLOC_ACCESSIBLE,
1597 						 nid);
1598 
1599 	if (ptr && size > 0)
1600 		page_init_poison(ptr, size);
1601 
1602 	return ptr;
1603 }
1604 
1605 #ifdef CONFIG_FLATMEM
1606 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1607 {
1608 	unsigned long __maybe_unused start = 0;
1609 	unsigned long __maybe_unused offset = 0;
1610 
1611 	/* Skip empty nodes */
1612 	if (!pgdat->node_spanned_pages)
1613 		return;
1614 
1615 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
1616 	offset = pgdat->node_start_pfn - start;
1617 	/* ia64 gets its own node_mem_map, before this, without bootmem */
1618 	if (!pgdat->node_mem_map) {
1619 		unsigned long size, end;
1620 		struct page *map;
1621 
1622 		/*
1623 		 * The zone's endpoints aren't required to be MAX_ORDER
1624 		 * aligned but the node_mem_map endpoints must be in order
1625 		 * for the buddy allocator to function correctly.
1626 		 */
1627 		end = pgdat_end_pfn(pgdat);
1628 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
1629 		size =  (end - start) * sizeof(struct page);
1630 		map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
1631 				   pgdat->node_id, false);
1632 		if (!map)
1633 			panic("Failed to allocate %ld bytes for node %d memory map\n",
1634 			      size, pgdat->node_id);
1635 		pgdat->node_mem_map = map + offset;
1636 	}
1637 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
1638 				__func__, pgdat->node_id, (unsigned long)pgdat,
1639 				(unsigned long)pgdat->node_mem_map);
1640 #ifndef CONFIG_NUMA
1641 	/*
1642 	 * With no DISCONTIG, the global mem_map is just set as node 0's
1643 	 */
1644 	if (pgdat == NODE_DATA(0)) {
1645 		mem_map = NODE_DATA(0)->node_mem_map;
1646 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
1647 			mem_map -= offset;
1648 	}
1649 #endif
1650 }
1651 #else
1652 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
1653 #endif /* CONFIG_FLATMEM */
1654 
1655 /**
1656  * get_pfn_range_for_nid - Return the start and end page frames for a node
1657  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
1658  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
1659  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
1660  *
1661  * It returns the start and end page frame of a node based on information
1662  * provided by memblock_set_node(). If called for a node
1663  * with no available memory, a warning is printed and the start and end
1664  * PFNs will be 0.
1665  */
1666 void __init get_pfn_range_for_nid(unsigned int nid,
1667 			unsigned long *start_pfn, unsigned long *end_pfn)
1668 {
1669 	unsigned long this_start_pfn, this_end_pfn;
1670 	int i;
1671 
1672 	*start_pfn = -1UL;
1673 	*end_pfn = 0;
1674 
1675 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
1676 		*start_pfn = min(*start_pfn, this_start_pfn);
1677 		*end_pfn = max(*end_pfn, this_end_pfn);
1678 	}
1679 
1680 	if (*start_pfn == -1UL)
1681 		*start_pfn = 0;
1682 }
1683 
1684 static void __init free_area_init_node(int nid)
1685 {
1686 	pg_data_t *pgdat = NODE_DATA(nid);
1687 	unsigned long start_pfn = 0;
1688 	unsigned long end_pfn = 0;
1689 
1690 	/* pg_data_t should be reset to zero when it's allocated */
1691 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
1692 
1693 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1694 
1695 	pgdat->node_id = nid;
1696 	pgdat->node_start_pfn = start_pfn;
1697 	pgdat->per_cpu_nodestats = NULL;
1698 
1699 	if (start_pfn != end_pfn) {
1700 		pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
1701 			(u64)start_pfn << PAGE_SHIFT,
1702 			end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
1703 	} else {
1704 		pr_info("Initmem setup node %d as memoryless\n", nid);
1705 	}
1706 
1707 	calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1708 
1709 	alloc_node_mem_map(pgdat);
1710 	pgdat_set_deferred_range(pgdat);
1711 
1712 	free_area_init_core(pgdat);
1713 	lru_gen_init_pgdat(pgdat);
1714 }
1715 
1716 /* Any regular or high memory on that node ? */
1717 static void check_for_memory(pg_data_t *pgdat, int nid)
1718 {
1719 	enum zone_type zone_type;
1720 
1721 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
1722 		struct zone *zone = &pgdat->node_zones[zone_type];
1723 		if (populated_zone(zone)) {
1724 			if (IS_ENABLED(CONFIG_HIGHMEM))
1725 				node_set_state(nid, N_HIGH_MEMORY);
1726 			if (zone_type <= ZONE_NORMAL)
1727 				node_set_state(nid, N_NORMAL_MEMORY);
1728 			break;
1729 		}
1730 	}
1731 }
1732 
1733 #if MAX_NUMNODES > 1
1734 /*
1735  * Figure out the number of possible node ids.
1736  */
1737 void __init setup_nr_node_ids(void)
1738 {
1739 	unsigned int highest;
1740 
1741 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
1742 	nr_node_ids = highest + 1;
1743 }
1744 #endif
1745 
1746 static void __init free_area_init_memoryless_node(int nid)
1747 {
1748 	free_area_init_node(nid);
1749 }
1750 
1751 /*
1752  * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
1753  * such cases we allow max_zone_pfn sorted in the descending order
1754  */
1755 static bool arch_has_descending_max_zone_pfns(void)
1756 {
1757 	return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
1758 }
1759 
1760 /**
1761  * free_area_init - Initialise all pg_data_t and zone data
1762  * @max_zone_pfn: an array of max PFNs for each zone
1763  *
1764  * This will call free_area_init_node() for each active node in the system.
1765  * Using the page ranges provided by memblock_set_node(), the size of each
1766  * zone in each node and their holes is calculated. If the maximum PFN
1767  * between two adjacent zones match, it is assumed that the zone is empty.
1768  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
1769  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1770  * starts where the previous one ended. For example, ZONE_DMA32 starts
1771  * at arch_max_dma_pfn.
1772  */
1773 void __init free_area_init(unsigned long *max_zone_pfn)
1774 {
1775 	unsigned long start_pfn, end_pfn;
1776 	int i, nid, zone;
1777 	bool descending;
1778 
1779 	/* Record where the zone boundaries are */
1780 	memset(arch_zone_lowest_possible_pfn, 0,
1781 				sizeof(arch_zone_lowest_possible_pfn));
1782 	memset(arch_zone_highest_possible_pfn, 0,
1783 				sizeof(arch_zone_highest_possible_pfn));
1784 
1785 	start_pfn = PHYS_PFN(memblock_start_of_DRAM());
1786 	descending = arch_has_descending_max_zone_pfns();
1787 
1788 	for (i = 0; i < MAX_NR_ZONES; i++) {
1789 		if (descending)
1790 			zone = MAX_NR_ZONES - i - 1;
1791 		else
1792 			zone = i;
1793 
1794 		if (zone == ZONE_MOVABLE)
1795 			continue;
1796 
1797 		end_pfn = max(max_zone_pfn[zone], start_pfn);
1798 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
1799 		arch_zone_highest_possible_pfn[zone] = end_pfn;
1800 
1801 		start_pfn = end_pfn;
1802 	}
1803 
1804 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
1805 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
1806 	find_zone_movable_pfns_for_nodes();
1807 
1808 	/* Print out the zone ranges */
1809 	pr_info("Zone ranges:\n");
1810 	for (i = 0; i < MAX_NR_ZONES; i++) {
1811 		if (i == ZONE_MOVABLE)
1812 			continue;
1813 		pr_info("  %-8s ", zone_names[i]);
1814 		if (arch_zone_lowest_possible_pfn[i] ==
1815 				arch_zone_highest_possible_pfn[i])
1816 			pr_cont("empty\n");
1817 		else
1818 			pr_cont("[mem %#018Lx-%#018Lx]\n",
1819 				(u64)arch_zone_lowest_possible_pfn[i]
1820 					<< PAGE_SHIFT,
1821 				((u64)arch_zone_highest_possible_pfn[i]
1822 					<< PAGE_SHIFT) - 1);
1823 	}
1824 
1825 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
1826 	pr_info("Movable zone start for each node\n");
1827 	for (i = 0; i < MAX_NUMNODES; i++) {
1828 		if (zone_movable_pfn[i])
1829 			pr_info("  Node %d: %#018Lx\n", i,
1830 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
1831 	}
1832 
1833 	/*
1834 	 * Print out the early node map, and initialize the
1835 	 * subsection-map relative to active online memory ranges to
1836 	 * enable future "sub-section" extensions of the memory map.
1837 	 */
1838 	pr_info("Early memory node ranges\n");
1839 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
1840 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
1841 			(u64)start_pfn << PAGE_SHIFT,
1842 			((u64)end_pfn << PAGE_SHIFT) - 1);
1843 		subsection_map_init(start_pfn, end_pfn - start_pfn);
1844 	}
1845 
1846 	/* Initialise every node */
1847 	mminit_verify_pageflags_layout();
1848 	setup_nr_node_ids();
1849 	for_each_node(nid) {
1850 		pg_data_t *pgdat;
1851 
1852 		if (!node_online(nid)) {
1853 			pr_info("Initializing node %d as memoryless\n", nid);
1854 
1855 			/* Allocator not initialized yet */
1856 			pgdat = arch_alloc_nodedata(nid);
1857 			if (!pgdat)
1858 				panic("Cannot allocate %zuB for node %d.\n",
1859 				       sizeof(*pgdat), nid);
1860 			arch_refresh_nodedata(nid, pgdat);
1861 			free_area_init_memoryless_node(nid);
1862 
1863 			/*
1864 			 * We do not want to confuse userspace by sysfs
1865 			 * files/directories for node without any memory
1866 			 * attached to it, so this node is not marked as
1867 			 * N_MEMORY and not marked online so that no sysfs
1868 			 * hierarchy will be created via register_one_node for
1869 			 * it. The pgdat will get fully initialized by
1870 			 * hotadd_init_pgdat() when memory is hotplugged into
1871 			 * this node.
1872 			 */
1873 			continue;
1874 		}
1875 
1876 		pgdat = NODE_DATA(nid);
1877 		free_area_init_node(nid);
1878 
1879 		/* Any memory on that node */
1880 		if (pgdat->node_present_pages)
1881 			node_set_state(nid, N_MEMORY);
1882 		check_for_memory(pgdat, nid);
1883 	}
1884 
1885 	memmap_init();
1886 
1887 	/* disable hash distribution for systems with a single node */
1888 	fixup_hashdist();
1889 }
1890 
1891 /**
1892  * node_map_pfn_alignment - determine the maximum internode alignment
1893  *
1894  * This function should be called after node map is populated and sorted.
1895  * It calculates the maximum power of two alignment which can distinguish
1896  * all the nodes.
1897  *
1898  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
1899  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
1900  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
1901  * shifted, 1GiB is enough and this function will indicate so.
1902  *
1903  * This is used to test whether pfn -> nid mapping of the chosen memory
1904  * model has fine enough granularity to avoid incorrect mapping for the
1905  * populated node map.
1906  *
1907  * Return: the determined alignment in pfn's.  0 if there is no alignment
1908  * requirement (single node).
1909  */
1910 unsigned long __init node_map_pfn_alignment(void)
1911 {
1912 	unsigned long accl_mask = 0, last_end = 0;
1913 	unsigned long start, end, mask;
1914 	int last_nid = NUMA_NO_NODE;
1915 	int i, nid;
1916 
1917 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1918 		if (!start || last_nid < 0 || last_nid == nid) {
1919 			last_nid = nid;
1920 			last_end = end;
1921 			continue;
1922 		}
1923 
1924 		/*
1925 		 * Start with a mask granular enough to pin-point to the
1926 		 * start pfn and tick off bits one-by-one until it becomes
1927 		 * too coarse to separate the current node from the last.
1928 		 */
1929 		mask = ~((1 << __ffs(start)) - 1);
1930 		while (mask && last_end <= (start & (mask << 1)))
1931 			mask <<= 1;
1932 
1933 		/* accumulate all internode masks */
1934 		accl_mask |= mask;
1935 	}
1936 
1937 	/* convert mask to number of pages */
1938 	return ~accl_mask + 1;
1939 }
1940 
1941 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1942 static void __init deferred_free_range(unsigned long pfn,
1943 				       unsigned long nr_pages)
1944 {
1945 	struct page *page;
1946 	unsigned long i;
1947 
1948 	if (!nr_pages)
1949 		return;
1950 
1951 	page = pfn_to_page(pfn);
1952 
1953 	/* Free a large naturally-aligned chunk if possible */
1954 	if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
1955 		for (i = 0; i < nr_pages; i += pageblock_nr_pages)
1956 			set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
1957 		__free_pages_core(page, MAX_ORDER);
1958 		return;
1959 	}
1960 
1961 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1962 		if (pageblock_aligned(pfn))
1963 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1964 		__free_pages_core(page, 0);
1965 	}
1966 }
1967 
1968 /* Completion tracking for deferred_init_memmap() threads */
1969 static atomic_t pgdat_init_n_undone __initdata;
1970 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1971 
1972 static inline void __init pgdat_init_report_one_done(void)
1973 {
1974 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1975 		complete(&pgdat_init_all_done_comp);
1976 }
1977 
1978 /*
1979  * Returns true if page needs to be initialized or freed to buddy allocator.
1980  *
1981  * We check if a current MAX_ORDER block is valid by only checking the validity
1982  * of the head pfn.
1983  */
1984 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1985 {
1986 	if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
1987 		return false;
1988 	return true;
1989 }
1990 
1991 /*
1992  * Free pages to buddy allocator. Try to free aligned pages in
1993  * MAX_ORDER_NR_PAGES sizes.
1994  */
1995 static void __init deferred_free_pages(unsigned long pfn,
1996 				       unsigned long end_pfn)
1997 {
1998 	unsigned long nr_free = 0;
1999 
2000 	for (; pfn < end_pfn; pfn++) {
2001 		if (!deferred_pfn_valid(pfn)) {
2002 			deferred_free_range(pfn - nr_free, nr_free);
2003 			nr_free = 0;
2004 		} else if (IS_MAX_ORDER_ALIGNED(pfn)) {
2005 			deferred_free_range(pfn - nr_free, nr_free);
2006 			nr_free = 1;
2007 		} else {
2008 			nr_free++;
2009 		}
2010 	}
2011 	/* Free the last block of pages to allocator */
2012 	deferred_free_range(pfn - nr_free, nr_free);
2013 }
2014 
2015 /*
2016  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
2017  * by performing it only once every MAX_ORDER_NR_PAGES.
2018  * Return number of pages initialized.
2019  */
2020 static unsigned long  __init deferred_init_pages(struct zone *zone,
2021 						 unsigned long pfn,
2022 						 unsigned long end_pfn)
2023 {
2024 	int nid = zone_to_nid(zone);
2025 	unsigned long nr_pages = 0;
2026 	int zid = zone_idx(zone);
2027 	struct page *page = NULL;
2028 
2029 	for (; pfn < end_pfn; pfn++) {
2030 		if (!deferred_pfn_valid(pfn)) {
2031 			page = NULL;
2032 			continue;
2033 		} else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
2034 			page = pfn_to_page(pfn);
2035 		} else {
2036 			page++;
2037 		}
2038 		__init_single_page(page, pfn, zid, nid);
2039 		nr_pages++;
2040 	}
2041 	return (nr_pages);
2042 }
2043 
2044 /*
2045  * This function is meant to pre-load the iterator for the zone init.
2046  * Specifically it walks through the ranges until we are caught up to the
2047  * first_init_pfn value and exits there. If we never encounter the value we
2048  * return false indicating there are no valid ranges left.
2049  */
2050 static bool __init
2051 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
2052 				    unsigned long *spfn, unsigned long *epfn,
2053 				    unsigned long first_init_pfn)
2054 {
2055 	u64 j;
2056 
2057 	/*
2058 	 * Start out by walking through the ranges in this zone that have
2059 	 * already been initialized. We don't need to do anything with them
2060 	 * so we just need to flush them out of the system.
2061 	 */
2062 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
2063 		if (*epfn <= first_init_pfn)
2064 			continue;
2065 		if (*spfn < first_init_pfn)
2066 			*spfn = first_init_pfn;
2067 		*i = j;
2068 		return true;
2069 	}
2070 
2071 	return false;
2072 }
2073 
2074 /*
2075  * Initialize and free pages. We do it in two loops: first we initialize
2076  * struct page, then free to buddy allocator, because while we are
2077  * freeing pages we can access pages that are ahead (computing buddy
2078  * page in __free_one_page()).
2079  *
2080  * In order to try and keep some memory in the cache we have the loop
2081  * broken along max page order boundaries. This way we will not cause
2082  * any issues with the buddy page computation.
2083  */
2084 static unsigned long __init
2085 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2086 		       unsigned long *end_pfn)
2087 {
2088 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2089 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
2090 	unsigned long nr_pages = 0;
2091 	u64 j = *i;
2092 
2093 	/* First we loop through and initialize the page values */
2094 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2095 		unsigned long t;
2096 
2097 		if (mo_pfn <= *start_pfn)
2098 			break;
2099 
2100 		t = min(mo_pfn, *end_pfn);
2101 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
2102 
2103 		if (mo_pfn < *end_pfn) {
2104 			*start_pfn = mo_pfn;
2105 			break;
2106 		}
2107 	}
2108 
2109 	/* Reset values and now loop through freeing pages as needed */
2110 	swap(j, *i);
2111 
2112 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2113 		unsigned long t;
2114 
2115 		if (mo_pfn <= spfn)
2116 			break;
2117 
2118 		t = min(mo_pfn, epfn);
2119 		deferred_free_pages(spfn, t);
2120 
2121 		if (mo_pfn <= epfn)
2122 			break;
2123 	}
2124 
2125 	return nr_pages;
2126 }
2127 
2128 static void __init
2129 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2130 			   void *arg)
2131 {
2132 	unsigned long spfn, epfn;
2133 	struct zone *zone = arg;
2134 	u64 i;
2135 
2136 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2137 
2138 	/*
2139 	 * Initialize and free pages in MAX_ORDER sized increments so that we
2140 	 * can avoid introducing any issues with the buddy allocator.
2141 	 */
2142 	while (spfn < end_pfn) {
2143 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
2144 		cond_resched();
2145 	}
2146 }
2147 
2148 /* An arch may override for more concurrency. */
2149 __weak int __init
2150 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2151 {
2152 	return 1;
2153 }
2154 
2155 /* Initialise remaining memory on a node */
2156 static int __init deferred_init_memmap(void *data)
2157 {
2158 	pg_data_t *pgdat = data;
2159 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2160 	unsigned long spfn = 0, epfn = 0;
2161 	unsigned long first_init_pfn, flags;
2162 	unsigned long start = jiffies;
2163 	struct zone *zone;
2164 	int zid, max_threads;
2165 	u64 i;
2166 
2167 	/* Bind memory initialisation thread to a local node if possible */
2168 	if (!cpumask_empty(cpumask))
2169 		set_cpus_allowed_ptr(current, cpumask);
2170 
2171 	pgdat_resize_lock(pgdat, &flags);
2172 	first_init_pfn = pgdat->first_deferred_pfn;
2173 	if (first_init_pfn == ULONG_MAX) {
2174 		pgdat_resize_unlock(pgdat, &flags);
2175 		pgdat_init_report_one_done();
2176 		return 0;
2177 	}
2178 
2179 	/* Sanity check boundaries */
2180 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2181 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2182 	pgdat->first_deferred_pfn = ULONG_MAX;
2183 
2184 	/*
2185 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
2186 	 * interrupt thread must allocate this early in boot, zone must be
2187 	 * pre-grown prior to start of deferred page initialization.
2188 	 */
2189 	pgdat_resize_unlock(pgdat, &flags);
2190 
2191 	/* Only the highest zone is deferred so find it */
2192 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2193 		zone = pgdat->node_zones + zid;
2194 		if (first_init_pfn < zone_end_pfn(zone))
2195 			break;
2196 	}
2197 
2198 	/* If the zone is empty somebody else may have cleared out the zone */
2199 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2200 						 first_init_pfn))
2201 		goto zone_empty;
2202 
2203 	max_threads = deferred_page_init_max_threads(cpumask);
2204 
2205 	while (spfn < epfn) {
2206 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2207 		struct padata_mt_job job = {
2208 			.thread_fn   = deferred_init_memmap_chunk,
2209 			.fn_arg      = zone,
2210 			.start       = spfn,
2211 			.size        = epfn_align - spfn,
2212 			.align       = PAGES_PER_SECTION,
2213 			.min_chunk   = PAGES_PER_SECTION,
2214 			.max_threads = max_threads,
2215 		};
2216 
2217 		padata_do_multithreaded(&job);
2218 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2219 						    epfn_align);
2220 	}
2221 zone_empty:
2222 	/* Sanity check that the next zone really is unpopulated */
2223 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2224 
2225 	pr_info("node %d deferred pages initialised in %ums\n",
2226 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2227 
2228 	pgdat_init_report_one_done();
2229 	return 0;
2230 }
2231 
2232 /*
2233  * If this zone has deferred pages, try to grow it by initializing enough
2234  * deferred pages to satisfy the allocation specified by order, rounded up to
2235  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2236  * of SECTION_SIZE bytes by initializing struct pages in increments of
2237  * PAGES_PER_SECTION * sizeof(struct page) bytes.
2238  *
2239  * Return true when zone was grown, otherwise return false. We return true even
2240  * when we grow less than requested, to let the caller decide if there are
2241  * enough pages to satisfy the allocation.
2242  *
2243  * Note: We use noinline because this function is needed only during boot, and
2244  * it is called from a __ref function _deferred_grow_zone. This way we are
2245  * making sure that it is not inlined into permanent text section.
2246  */
2247 bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
2248 {
2249 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2250 	pg_data_t *pgdat = zone->zone_pgdat;
2251 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2252 	unsigned long spfn, epfn, flags;
2253 	unsigned long nr_pages = 0;
2254 	u64 i;
2255 
2256 	/* Only the last zone may have deferred pages */
2257 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2258 		return false;
2259 
2260 	pgdat_resize_lock(pgdat, &flags);
2261 
2262 	/*
2263 	 * If someone grew this zone while we were waiting for spinlock, return
2264 	 * true, as there might be enough pages already.
2265 	 */
2266 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2267 		pgdat_resize_unlock(pgdat, &flags);
2268 		return true;
2269 	}
2270 
2271 	/* If the zone is empty somebody else may have cleared out the zone */
2272 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2273 						 first_deferred_pfn)) {
2274 		pgdat->first_deferred_pfn = ULONG_MAX;
2275 		pgdat_resize_unlock(pgdat, &flags);
2276 		/* Retry only once. */
2277 		return first_deferred_pfn != ULONG_MAX;
2278 	}
2279 
2280 	/*
2281 	 * Initialize and free pages in MAX_ORDER sized increments so
2282 	 * that we can avoid introducing any issues with the buddy
2283 	 * allocator.
2284 	 */
2285 	while (spfn < epfn) {
2286 		/* update our first deferred PFN for this section */
2287 		first_deferred_pfn = spfn;
2288 
2289 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2290 		touch_nmi_watchdog();
2291 
2292 		/* We should only stop along section boundaries */
2293 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2294 			continue;
2295 
2296 		/* If our quota has been met we can stop here */
2297 		if (nr_pages >= nr_pages_needed)
2298 			break;
2299 	}
2300 
2301 	pgdat->first_deferred_pfn = spfn;
2302 	pgdat_resize_unlock(pgdat, &flags);
2303 
2304 	return nr_pages > 0;
2305 }
2306 
2307 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2308 
2309 #ifdef CONFIG_CMA
2310 void __init init_cma_reserved_pageblock(struct page *page)
2311 {
2312 	unsigned i = pageblock_nr_pages;
2313 	struct page *p = page;
2314 
2315 	do {
2316 		__ClearPageReserved(p);
2317 		set_page_count(p, 0);
2318 	} while (++p, --i);
2319 
2320 	set_pageblock_migratetype(page, MIGRATE_CMA);
2321 	set_page_refcounted(page);
2322 	__free_pages(page, pageblock_order);
2323 
2324 	adjust_managed_page_count(page, pageblock_nr_pages);
2325 	page_zone(page)->cma_pages += pageblock_nr_pages;
2326 }
2327 #endif
2328 
2329 void __init page_alloc_init_late(void)
2330 {
2331 	struct zone *zone;
2332 	int nid;
2333 
2334 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2335 
2336 	/* There will be num_node_state(N_MEMORY) threads */
2337 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2338 	for_each_node_state(nid, N_MEMORY) {
2339 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2340 	}
2341 
2342 	/* Block until all are initialised */
2343 	wait_for_completion(&pgdat_init_all_done_comp);
2344 
2345 	/*
2346 	 * We initialized the rest of the deferred pages.  Permanently disable
2347 	 * on-demand struct page initialization.
2348 	 */
2349 	static_branch_disable(&deferred_pages);
2350 
2351 	/* Reinit limits that are based on free pages after the kernel is up */
2352 	files_maxfiles_init();
2353 #endif
2354 
2355 	buffer_init();
2356 
2357 	/* Discard memblock private memory */
2358 	memblock_discard();
2359 
2360 	for_each_node_state(nid, N_MEMORY)
2361 		shuffle_free_memory(NODE_DATA(nid));
2362 
2363 	for_each_populated_zone(zone)
2364 		set_zone_contiguous(zone);
2365 
2366 	/* Initialize page ext after all struct pages are initialized. */
2367 	if (deferred_struct_pages)
2368 		page_ext_init();
2369 }
2370 
2371 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2372 /*
2373  * Returns the number of pages that arch has reserved but
2374  * is not known to alloc_large_system_hash().
2375  */
2376 static unsigned long __init arch_reserved_kernel_pages(void)
2377 {
2378 	return 0;
2379 }
2380 #endif
2381 
2382 /*
2383  * Adaptive scale is meant to reduce sizes of hash tables on large memory
2384  * machines. As memory size is increased the scale is also increased but at
2385  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
2386  * quadruples the scale is increased by one, which means the size of hash table
2387  * only doubles, instead of quadrupling as well.
2388  * Because 32-bit systems cannot have large physical memory, where this scaling
2389  * makes sense, it is disabled on such platforms.
2390  */
2391 #if __BITS_PER_LONG > 32
2392 #define ADAPT_SCALE_BASE	(64ul << 30)
2393 #define ADAPT_SCALE_SHIFT	2
2394 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
2395 #endif
2396 
2397 /*
2398  * allocate a large system hash table from bootmem
2399  * - it is assumed that the hash table must contain an exact power-of-2
2400  *   quantity of entries
2401  * - limit is the number of hash buckets, not the total allocation size
2402  */
2403 void *__init alloc_large_system_hash(const char *tablename,
2404 				     unsigned long bucketsize,
2405 				     unsigned long numentries,
2406 				     int scale,
2407 				     int flags,
2408 				     unsigned int *_hash_shift,
2409 				     unsigned int *_hash_mask,
2410 				     unsigned long low_limit,
2411 				     unsigned long high_limit)
2412 {
2413 	unsigned long long max = high_limit;
2414 	unsigned long log2qty, size;
2415 	void *table;
2416 	gfp_t gfp_flags;
2417 	bool virt;
2418 	bool huge;
2419 
2420 	/* allow the kernel cmdline to have a say */
2421 	if (!numentries) {
2422 		/* round applicable memory size up to nearest megabyte */
2423 		numentries = nr_kernel_pages;
2424 		numentries -= arch_reserved_kernel_pages();
2425 
2426 		/* It isn't necessary when PAGE_SIZE >= 1MB */
2427 		if (PAGE_SIZE < SZ_1M)
2428 			numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
2429 
2430 #if __BITS_PER_LONG > 32
2431 		if (!high_limit) {
2432 			unsigned long adapt;
2433 
2434 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
2435 			     adapt <<= ADAPT_SCALE_SHIFT)
2436 				scale++;
2437 		}
2438 #endif
2439 
2440 		/* limit to 1 bucket per 2^scale bytes of low memory */
2441 		if (scale > PAGE_SHIFT)
2442 			numentries >>= (scale - PAGE_SHIFT);
2443 		else
2444 			numentries <<= (PAGE_SHIFT - scale);
2445 
2446 		/* Make sure we've got at least a 0-order allocation.. */
2447 		if (unlikely(flags & HASH_SMALL)) {
2448 			/* Makes no sense without HASH_EARLY */
2449 			WARN_ON(!(flags & HASH_EARLY));
2450 			if (!(numentries >> *_hash_shift)) {
2451 				numentries = 1UL << *_hash_shift;
2452 				BUG_ON(!numentries);
2453 			}
2454 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
2455 			numentries = PAGE_SIZE / bucketsize;
2456 	}
2457 	numentries = roundup_pow_of_two(numentries);
2458 
2459 	/* limit allocation size to 1/16 total memory by default */
2460 	if (max == 0) {
2461 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2462 		do_div(max, bucketsize);
2463 	}
2464 	max = min(max, 0x80000000ULL);
2465 
2466 	if (numentries < low_limit)
2467 		numentries = low_limit;
2468 	if (numentries > max)
2469 		numentries = max;
2470 
2471 	log2qty = ilog2(numentries);
2472 
2473 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
2474 	do {
2475 		virt = false;
2476 		size = bucketsize << log2qty;
2477 		if (flags & HASH_EARLY) {
2478 			if (flags & HASH_ZERO)
2479 				table = memblock_alloc(size, SMP_CACHE_BYTES);
2480 			else
2481 				table = memblock_alloc_raw(size,
2482 							   SMP_CACHE_BYTES);
2483 		} else if (get_order(size) > MAX_ORDER || hashdist) {
2484 			table = vmalloc_huge(size, gfp_flags);
2485 			virt = true;
2486 			if (table)
2487 				huge = is_vm_area_hugepages(table);
2488 		} else {
2489 			/*
2490 			 * If bucketsize is not a power-of-two, we may free
2491 			 * some pages at the end of hash table which
2492 			 * alloc_pages_exact() automatically does
2493 			 */
2494 			table = alloc_pages_exact(size, gfp_flags);
2495 			kmemleak_alloc(table, size, 1, gfp_flags);
2496 		}
2497 	} while (!table && size > PAGE_SIZE && --log2qty);
2498 
2499 	if (!table)
2500 		panic("Failed to allocate %s hash table\n", tablename);
2501 
2502 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
2503 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
2504 		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
2505 
2506 	if (_hash_shift)
2507 		*_hash_shift = log2qty;
2508 	if (_hash_mask)
2509 		*_hash_mask = (1 << log2qty) - 1;
2510 
2511 	return table;
2512 }
2513 
2514 /**
2515  * set_dma_reserve - set the specified number of pages reserved in the first zone
2516  * @new_dma_reserve: The number of pages to mark reserved
2517  *
2518  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
2519  * In the DMA zone, a significant percentage may be consumed by kernel image
2520  * and other unfreeable allocations which can skew the watermarks badly. This
2521  * function may optionally be used to account for unfreeable pages in the
2522  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
2523  * smaller per-cpu batchsize.
2524  */
2525 void __init set_dma_reserve(unsigned long new_dma_reserve)
2526 {
2527 	dma_reserve = new_dma_reserve;
2528 }
2529 
2530 void __init memblock_free_pages(struct page *page, unsigned long pfn,
2531 							unsigned int order)
2532 {
2533 	if (!early_page_initialised(pfn))
2534 		return;
2535 	if (!kmsan_memblock_free_pages(page, order)) {
2536 		/* KMSAN will take care of these pages. */
2537 		return;
2538 	}
2539 	__free_pages_core(page, order);
2540 }
2541 
2542 static bool _init_on_alloc_enabled_early __read_mostly
2543 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
2544 static int __init early_init_on_alloc(char *buf)
2545 {
2546 
2547 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
2548 }
2549 early_param("init_on_alloc", early_init_on_alloc);
2550 
2551 static bool _init_on_free_enabled_early __read_mostly
2552 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
2553 static int __init early_init_on_free(char *buf)
2554 {
2555 	return kstrtobool(buf, &_init_on_free_enabled_early);
2556 }
2557 early_param("init_on_free", early_init_on_free);
2558 
2559 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
2560 
2561 /*
2562  * Enable static keys related to various memory debugging and hardening options.
2563  * Some override others, and depend on early params that are evaluated in the
2564  * order of appearance. So we need to first gather the full picture of what was
2565  * enabled, and then make decisions.
2566  */
2567 static void __init mem_debugging_and_hardening_init(void)
2568 {
2569 	bool page_poisoning_requested = false;
2570 	bool want_check_pages = false;
2571 
2572 #ifdef CONFIG_PAGE_POISONING
2573 	/*
2574 	 * Page poisoning is debug page alloc for some arches. If
2575 	 * either of those options are enabled, enable poisoning.
2576 	 */
2577 	if (page_poisoning_enabled() ||
2578 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
2579 	      debug_pagealloc_enabled())) {
2580 		static_branch_enable(&_page_poisoning_enabled);
2581 		page_poisoning_requested = true;
2582 		want_check_pages = true;
2583 	}
2584 #endif
2585 
2586 	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
2587 	    page_poisoning_requested) {
2588 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2589 			"will take precedence over init_on_alloc and init_on_free\n");
2590 		_init_on_alloc_enabled_early = false;
2591 		_init_on_free_enabled_early = false;
2592 	}
2593 
2594 	if (_init_on_alloc_enabled_early) {
2595 		want_check_pages = true;
2596 		static_branch_enable(&init_on_alloc);
2597 	} else {
2598 		static_branch_disable(&init_on_alloc);
2599 	}
2600 
2601 	if (_init_on_free_enabled_early) {
2602 		want_check_pages = true;
2603 		static_branch_enable(&init_on_free);
2604 	} else {
2605 		static_branch_disable(&init_on_free);
2606 	}
2607 
2608 	if (IS_ENABLED(CONFIG_KMSAN) &&
2609 	    (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
2610 		pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2611 
2612 #ifdef CONFIG_DEBUG_PAGEALLOC
2613 	if (debug_pagealloc_enabled()) {
2614 		want_check_pages = true;
2615 		static_branch_enable(&_debug_pagealloc_enabled);
2616 
2617 		if (debug_guardpage_minorder())
2618 			static_branch_enable(&_debug_guardpage_enabled);
2619 	}
2620 #endif
2621 
2622 	/*
2623 	 * Any page debugging or hardening option also enables sanity checking
2624 	 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2625 	 * enabled already.
2626 	 */
2627 	if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
2628 		static_branch_enable(&check_pages_enabled);
2629 }
2630 
2631 /* Report memory auto-initialization states for this boot. */
2632 static void __init report_meminit(void)
2633 {
2634 	const char *stack;
2635 
2636 	if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
2637 		stack = "all(pattern)";
2638 	else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
2639 		stack = "all(zero)";
2640 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
2641 		stack = "byref_all(zero)";
2642 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
2643 		stack = "byref(zero)";
2644 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
2645 		stack = "__user(zero)";
2646 	else
2647 		stack = "off";
2648 
2649 	pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2650 		stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
2651 		want_init_on_free() ? "on" : "off");
2652 	if (want_init_on_free())
2653 		pr_info("mem auto-init: clearing system memory may take some time...\n");
2654 }
2655 
2656 static void __init mem_init_print_info(void)
2657 {
2658 	unsigned long physpages, codesize, datasize, rosize, bss_size;
2659 	unsigned long init_code_size, init_data_size;
2660 
2661 	physpages = get_num_physpages();
2662 	codesize = _etext - _stext;
2663 	datasize = _edata - _sdata;
2664 	rosize = __end_rodata - __start_rodata;
2665 	bss_size = __bss_stop - __bss_start;
2666 	init_data_size = __init_end - __init_begin;
2667 	init_code_size = _einittext - _sinittext;
2668 
2669 	/*
2670 	 * Detect special cases and adjust section sizes accordingly:
2671 	 * 1) .init.* may be embedded into .data sections
2672 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
2673 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
2674 	 * 3) .rodata.* may be embedded into .text or .data sections.
2675 	 */
2676 #define adj_init_size(start, end, size, pos, adj) \
2677 	do { \
2678 		if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2679 			size -= adj; \
2680 	} while (0)
2681 
2682 	adj_init_size(__init_begin, __init_end, init_data_size,
2683 		     _sinittext, init_code_size);
2684 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
2685 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
2686 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
2687 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
2688 
2689 #undef	adj_init_size
2690 
2691 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2692 #ifdef	CONFIG_HIGHMEM
2693 		", %luK highmem"
2694 #endif
2695 		")\n",
2696 		K(nr_free_pages()), K(physpages),
2697 		codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
2698 		(init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
2699 		K(physpages - totalram_pages() - totalcma_pages),
2700 		K(totalcma_pages)
2701 #ifdef	CONFIG_HIGHMEM
2702 		, K(totalhigh_pages())
2703 #endif
2704 		);
2705 }
2706 
2707 /*
2708  * Set up kernel memory allocators
2709  */
2710 void __init mm_core_init(void)
2711 {
2712 	/* Initializations relying on SMP setup */
2713 	build_all_zonelists(NULL);
2714 	page_alloc_init_cpuhp();
2715 
2716 	/*
2717 	 * page_ext requires contiguous pages,
2718 	 * bigger than MAX_ORDER unless SPARSEMEM.
2719 	 */
2720 	page_ext_init_flatmem();
2721 	mem_debugging_and_hardening_init();
2722 	kfence_alloc_pool();
2723 	report_meminit();
2724 	kmsan_init_shadow();
2725 	stack_depot_early_init();
2726 	mem_init();
2727 	mem_init_print_info();
2728 	kmem_cache_init();
2729 	/*
2730 	 * page_owner must be initialized after buddy is ready, and also after
2731 	 * slab is ready so that stack_depot_init() works properly
2732 	 */
2733 	page_ext_init_flatmem_late();
2734 	kmemleak_init();
2735 	ptlock_cache_init();
2736 	pgtable_cache_init();
2737 	debug_objects_mem_init();
2738 	vmalloc_init();
2739 	/* If no deferred init page_ext now, as vmap is fully initialized */
2740 	if (!deferred_struct_pages)
2741 		page_ext_init();
2742 	/* Should be run before the first non-init thread is created */
2743 	init_espfix_bsp();
2744 	/* Should be run after espfix64 is set up. */
2745 	pti_init();
2746 	kmsan_init_runtime();
2747 	mm_cache_init();
2748 }
2749