xref: /openbmc/linux/mm/mm_init.c (revision dbdd2a989f2357d40f0c5a440ca81bf1390f11ba)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm_init.c - Memory initialisation verification and debugging
4  *
5  * Copyright 2008 IBM Corporation, 2008
6  * Author Mel Gorman <mel@csn.ul.ie>
7  *
8  */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/kobject.h>
12 #include <linux/export.h>
13 #include <linux/memory.h>
14 #include <linux/notifier.h>
15 #include <linux/sched.h>
16 #include <linux/mman.h>
17 #include <linux/memblock.h>
18 #include <linux/page-isolation.h>
19 #include <linux/padata.h>
20 #include <linux/nmi.h>
21 #include <linux/buffer_head.h>
22 #include <linux/kmemleak.h>
23 #include <linux/kfence.h>
24 #include <linux/page_ext.h>
25 #include <linux/pti.h>
26 #include <linux/pgtable.h>
27 #include <linux/swap.h>
28 #include <linux/cma.h>
29 #include "internal.h"
30 #include "slab.h"
31 #include "shuffle.h"
32 
33 #include <asm/setup.h>
34 
35 #ifdef CONFIG_DEBUG_MEMORY_INIT
36 int __meminitdata mminit_loglevel;
37 
38 /* The zonelists are simply reported, validation is manual. */
39 void __init mminit_verify_zonelist(void)
40 {
41 	int nid;
42 
43 	if (mminit_loglevel < MMINIT_VERIFY)
44 		return;
45 
46 	for_each_online_node(nid) {
47 		pg_data_t *pgdat = NODE_DATA(nid);
48 		struct zone *zone;
49 		struct zoneref *z;
50 		struct zonelist *zonelist;
51 		int i, listid, zoneid;
52 
53 		BUILD_BUG_ON(MAX_ZONELISTS > 2);
54 		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
55 
56 			/* Identify the zone and nodelist */
57 			zoneid = i % MAX_NR_ZONES;
58 			listid = i / MAX_NR_ZONES;
59 			zonelist = &pgdat->node_zonelists[listid];
60 			zone = &pgdat->node_zones[zoneid];
61 			if (!populated_zone(zone))
62 				continue;
63 
64 			/* Print information about the zonelist */
65 			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
66 				listid > 0 ? "thisnode" : "general", nid,
67 				zone->name);
68 
69 			/* Iterate the zonelist */
70 			for_each_zone_zonelist(zone, z, zonelist, zoneid)
71 				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
72 			pr_cont("\n");
73 		}
74 	}
75 }
76 
77 void __init mminit_verify_pageflags_layout(void)
78 {
79 	int shift, width;
80 	unsigned long or_mask, add_mask;
81 
82 	shift = 8 * sizeof(unsigned long);
83 	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
84 		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
85 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
86 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
87 		SECTIONS_WIDTH,
88 		NODES_WIDTH,
89 		ZONES_WIDTH,
90 		LAST_CPUPID_WIDTH,
91 		KASAN_TAG_WIDTH,
92 		LRU_GEN_WIDTH,
93 		LRU_REFS_WIDTH,
94 		NR_PAGEFLAGS);
95 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
96 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
97 		SECTIONS_SHIFT,
98 		NODES_SHIFT,
99 		ZONES_SHIFT,
100 		LAST_CPUPID_SHIFT,
101 		KASAN_TAG_WIDTH);
102 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
103 		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
104 		(unsigned long)SECTIONS_PGSHIFT,
105 		(unsigned long)NODES_PGSHIFT,
106 		(unsigned long)ZONES_PGSHIFT,
107 		(unsigned long)LAST_CPUPID_PGSHIFT,
108 		(unsigned long)KASAN_TAG_PGSHIFT);
109 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
110 		"Node/Zone ID: %lu -> %lu\n",
111 		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
112 		(unsigned long)ZONEID_PGOFF);
113 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
114 		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
115 		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
116 #ifdef NODE_NOT_IN_PAGE_FLAGS
117 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
118 		"Node not in page flags");
119 #endif
120 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
121 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
122 		"Last cpupid not in page flags");
123 #endif
124 
125 	if (SECTIONS_WIDTH) {
126 		shift -= SECTIONS_WIDTH;
127 		BUG_ON(shift != SECTIONS_PGSHIFT);
128 	}
129 	if (NODES_WIDTH) {
130 		shift -= NODES_WIDTH;
131 		BUG_ON(shift != NODES_PGSHIFT);
132 	}
133 	if (ZONES_WIDTH) {
134 		shift -= ZONES_WIDTH;
135 		BUG_ON(shift != ZONES_PGSHIFT);
136 	}
137 
138 	/* Check for bitmask overlaps */
139 	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
140 			(NODES_MASK << NODES_PGSHIFT) |
141 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
142 	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
143 			(NODES_MASK << NODES_PGSHIFT) +
144 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
145 	BUG_ON(or_mask != add_mask);
146 }
147 
148 static __init int set_mminit_loglevel(char *str)
149 {
150 	get_option(&str, &mminit_loglevel);
151 	return 0;
152 }
153 early_param("mminit_loglevel", set_mminit_loglevel);
154 #endif /* CONFIG_DEBUG_MEMORY_INIT */
155 
156 struct kobject *mm_kobj;
157 
158 #ifdef CONFIG_SMP
159 s32 vm_committed_as_batch = 32;
160 
161 void mm_compute_batch(int overcommit_policy)
162 {
163 	u64 memsized_batch;
164 	s32 nr = num_present_cpus();
165 	s32 batch = max_t(s32, nr*2, 32);
166 	unsigned long ram_pages = totalram_pages();
167 
168 	/*
169 	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
170 	 * (total memory/#cpus), and lift it to 25% for other policies
171 	 * to easy the possible lock contention for percpu_counter
172 	 * vm_committed_as, while the max limit is INT_MAX
173 	 */
174 	if (overcommit_policy == OVERCOMMIT_NEVER)
175 		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
176 	else
177 		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
178 
179 	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
180 }
181 
182 static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
183 					unsigned long action, void *arg)
184 {
185 	switch (action) {
186 	case MEM_ONLINE:
187 	case MEM_OFFLINE:
188 		mm_compute_batch(sysctl_overcommit_memory);
189 		break;
190 	default:
191 		break;
192 	}
193 	return NOTIFY_OK;
194 }
195 
196 static int __init mm_compute_batch_init(void)
197 {
198 	mm_compute_batch(sysctl_overcommit_memory);
199 	hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
200 	return 0;
201 }
202 
203 __initcall(mm_compute_batch_init);
204 
205 #endif
206 
207 static int __init mm_sysfs_init(void)
208 {
209 	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
210 	if (!mm_kobj)
211 		return -ENOMEM;
212 
213 	return 0;
214 }
215 postcore_initcall(mm_sysfs_init);
216 
217 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
218 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
219 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
220 
221 static unsigned long required_kernelcore __initdata;
222 static unsigned long required_kernelcore_percent __initdata;
223 static unsigned long required_movablecore __initdata;
224 static unsigned long required_movablecore_percent __initdata;
225 
226 static unsigned long nr_kernel_pages __initdata;
227 static unsigned long nr_all_pages __initdata;
228 static unsigned long dma_reserve __initdata;
229 
230 static bool deferred_struct_pages __meminitdata;
231 
232 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
233 
234 static int __init cmdline_parse_core(char *p, unsigned long *core,
235 				     unsigned long *percent)
236 {
237 	unsigned long long coremem;
238 	char *endptr;
239 
240 	if (!p)
241 		return -EINVAL;
242 
243 	/* Value may be a percentage of total memory, otherwise bytes */
244 	coremem = simple_strtoull(p, &endptr, 0);
245 	if (*endptr == '%') {
246 		/* Paranoid check for percent values greater than 100 */
247 		WARN_ON(coremem > 100);
248 
249 		*percent = coremem;
250 	} else {
251 		coremem = memparse(p, &p);
252 		/* Paranoid check that UL is enough for the coremem value */
253 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
254 
255 		*core = coremem >> PAGE_SHIFT;
256 		*percent = 0UL;
257 	}
258 	return 0;
259 }
260 
261 bool mirrored_kernelcore __initdata_memblock;
262 
263 /*
264  * kernelcore=size sets the amount of memory for use for allocations that
265  * cannot be reclaimed or migrated.
266  */
267 static int __init cmdline_parse_kernelcore(char *p)
268 {
269 	/* parse kernelcore=mirror */
270 	if (parse_option_str(p, "mirror")) {
271 		mirrored_kernelcore = true;
272 		return 0;
273 	}
274 
275 	return cmdline_parse_core(p, &required_kernelcore,
276 				  &required_kernelcore_percent);
277 }
278 early_param("kernelcore", cmdline_parse_kernelcore);
279 
280 /*
281  * movablecore=size sets the amount of memory for use for allocations that
282  * can be reclaimed or migrated.
283  */
284 static int __init cmdline_parse_movablecore(char *p)
285 {
286 	return cmdline_parse_core(p, &required_movablecore,
287 				  &required_movablecore_percent);
288 }
289 early_param("movablecore", cmdline_parse_movablecore);
290 
291 /*
292  * early_calculate_totalpages()
293  * Sum pages in active regions for movable zone.
294  * Populate N_MEMORY for calculating usable_nodes.
295  */
296 static unsigned long __init early_calculate_totalpages(void)
297 {
298 	unsigned long totalpages = 0;
299 	unsigned long start_pfn, end_pfn;
300 	int i, nid;
301 
302 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
303 		unsigned long pages = end_pfn - start_pfn;
304 
305 		totalpages += pages;
306 		if (pages)
307 			node_set_state(nid, N_MEMORY);
308 	}
309 	return totalpages;
310 }
311 
312 /*
313  * This finds a zone that can be used for ZONE_MOVABLE pages. The
314  * assumption is made that zones within a node are ordered in monotonic
315  * increasing memory addresses so that the "highest" populated zone is used
316  */
317 static void __init find_usable_zone_for_movable(void)
318 {
319 	int zone_index;
320 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
321 		if (zone_index == ZONE_MOVABLE)
322 			continue;
323 
324 		if (arch_zone_highest_possible_pfn[zone_index] >
325 				arch_zone_lowest_possible_pfn[zone_index])
326 			break;
327 	}
328 
329 	VM_BUG_ON(zone_index == -1);
330 	movable_zone = zone_index;
331 }
332 
333 /*
334  * Find the PFN the Movable zone begins in each node. Kernel memory
335  * is spread evenly between nodes as long as the nodes have enough
336  * memory. When they don't, some nodes will have more kernelcore than
337  * others
338  */
339 static void __init find_zone_movable_pfns_for_nodes(void)
340 {
341 	int i, nid;
342 	unsigned long usable_startpfn;
343 	unsigned long kernelcore_node, kernelcore_remaining;
344 	/* save the state before borrow the nodemask */
345 	nodemask_t saved_node_state = node_states[N_MEMORY];
346 	unsigned long totalpages = early_calculate_totalpages();
347 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
348 	struct memblock_region *r;
349 
350 	/* Need to find movable_zone earlier when movable_node is specified. */
351 	find_usable_zone_for_movable();
352 
353 	/*
354 	 * If movable_node is specified, ignore kernelcore and movablecore
355 	 * options.
356 	 */
357 	if (movable_node_is_enabled()) {
358 		for_each_mem_region(r) {
359 			if (!memblock_is_hotpluggable(r))
360 				continue;
361 
362 			nid = memblock_get_region_node(r);
363 
364 			usable_startpfn = PFN_DOWN(r->base);
365 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
366 				min(usable_startpfn, zone_movable_pfn[nid]) :
367 				usable_startpfn;
368 		}
369 
370 		goto out2;
371 	}
372 
373 	/*
374 	 * If kernelcore=mirror is specified, ignore movablecore option
375 	 */
376 	if (mirrored_kernelcore) {
377 		bool mem_below_4gb_not_mirrored = false;
378 
379 		for_each_mem_region(r) {
380 			if (memblock_is_mirror(r))
381 				continue;
382 
383 			nid = memblock_get_region_node(r);
384 
385 			usable_startpfn = memblock_region_memory_base_pfn(r);
386 
387 			if (usable_startpfn < PHYS_PFN(SZ_4G)) {
388 				mem_below_4gb_not_mirrored = true;
389 				continue;
390 			}
391 
392 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
393 				min(usable_startpfn, zone_movable_pfn[nid]) :
394 				usable_startpfn;
395 		}
396 
397 		if (mem_below_4gb_not_mirrored)
398 			pr_warn("This configuration results in unmirrored kernel memory.\n");
399 
400 		goto out2;
401 	}
402 
403 	/*
404 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
405 	 * amount of necessary memory.
406 	 */
407 	if (required_kernelcore_percent)
408 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
409 				       10000UL;
410 	if (required_movablecore_percent)
411 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
412 					10000UL;
413 
414 	/*
415 	 * If movablecore= was specified, calculate what size of
416 	 * kernelcore that corresponds so that memory usable for
417 	 * any allocation type is evenly spread. If both kernelcore
418 	 * and movablecore are specified, then the value of kernelcore
419 	 * will be used for required_kernelcore if it's greater than
420 	 * what movablecore would have allowed.
421 	 */
422 	if (required_movablecore) {
423 		unsigned long corepages;
424 
425 		/*
426 		 * Round-up so that ZONE_MOVABLE is at least as large as what
427 		 * was requested by the user
428 		 */
429 		required_movablecore =
430 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
431 		required_movablecore = min(totalpages, required_movablecore);
432 		corepages = totalpages - required_movablecore;
433 
434 		required_kernelcore = max(required_kernelcore, corepages);
435 	}
436 
437 	/*
438 	 * If kernelcore was not specified or kernelcore size is larger
439 	 * than totalpages, there is no ZONE_MOVABLE.
440 	 */
441 	if (!required_kernelcore || required_kernelcore >= totalpages)
442 		goto out;
443 
444 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
445 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
446 
447 restart:
448 	/* Spread kernelcore memory as evenly as possible throughout nodes */
449 	kernelcore_node = required_kernelcore / usable_nodes;
450 	for_each_node_state(nid, N_MEMORY) {
451 		unsigned long start_pfn, end_pfn;
452 
453 		/*
454 		 * Recalculate kernelcore_node if the division per node
455 		 * now exceeds what is necessary to satisfy the requested
456 		 * amount of memory for the kernel
457 		 */
458 		if (required_kernelcore < kernelcore_node)
459 			kernelcore_node = required_kernelcore / usable_nodes;
460 
461 		/*
462 		 * As the map is walked, we track how much memory is usable
463 		 * by the kernel using kernelcore_remaining. When it is
464 		 * 0, the rest of the node is usable by ZONE_MOVABLE
465 		 */
466 		kernelcore_remaining = kernelcore_node;
467 
468 		/* Go through each range of PFNs within this node */
469 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
470 			unsigned long size_pages;
471 
472 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
473 			if (start_pfn >= end_pfn)
474 				continue;
475 
476 			/* Account for what is only usable for kernelcore */
477 			if (start_pfn < usable_startpfn) {
478 				unsigned long kernel_pages;
479 				kernel_pages = min(end_pfn, usable_startpfn)
480 								- start_pfn;
481 
482 				kernelcore_remaining -= min(kernel_pages,
483 							kernelcore_remaining);
484 				required_kernelcore -= min(kernel_pages,
485 							required_kernelcore);
486 
487 				/* Continue if range is now fully accounted */
488 				if (end_pfn <= usable_startpfn) {
489 
490 					/*
491 					 * Push zone_movable_pfn to the end so
492 					 * that if we have to rebalance
493 					 * kernelcore across nodes, we will
494 					 * not double account here
495 					 */
496 					zone_movable_pfn[nid] = end_pfn;
497 					continue;
498 				}
499 				start_pfn = usable_startpfn;
500 			}
501 
502 			/*
503 			 * The usable PFN range for ZONE_MOVABLE is from
504 			 * start_pfn->end_pfn. Calculate size_pages as the
505 			 * number of pages used as kernelcore
506 			 */
507 			size_pages = end_pfn - start_pfn;
508 			if (size_pages > kernelcore_remaining)
509 				size_pages = kernelcore_remaining;
510 			zone_movable_pfn[nid] = start_pfn + size_pages;
511 
512 			/*
513 			 * Some kernelcore has been met, update counts and
514 			 * break if the kernelcore for this node has been
515 			 * satisfied
516 			 */
517 			required_kernelcore -= min(required_kernelcore,
518 								size_pages);
519 			kernelcore_remaining -= size_pages;
520 			if (!kernelcore_remaining)
521 				break;
522 		}
523 	}
524 
525 	/*
526 	 * If there is still required_kernelcore, we do another pass with one
527 	 * less node in the count. This will push zone_movable_pfn[nid] further
528 	 * along on the nodes that still have memory until kernelcore is
529 	 * satisfied
530 	 */
531 	usable_nodes--;
532 	if (usable_nodes && required_kernelcore > usable_nodes)
533 		goto restart;
534 
535 out2:
536 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
537 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
538 		unsigned long start_pfn, end_pfn;
539 
540 		zone_movable_pfn[nid] =
541 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
542 
543 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
544 		if (zone_movable_pfn[nid] >= end_pfn)
545 			zone_movable_pfn[nid] = 0;
546 	}
547 
548 out:
549 	/* restore the node_state */
550 	node_states[N_MEMORY] = saved_node_state;
551 }
552 
553 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
554 				unsigned long zone, int nid)
555 {
556 	mm_zero_struct_page(page);
557 	set_page_links(page, zone, nid, pfn);
558 	init_page_count(page);
559 	page_mapcount_reset(page);
560 	page_cpupid_reset_last(page);
561 	page_kasan_tag_reset(page);
562 
563 	INIT_LIST_HEAD(&page->lru);
564 #ifdef WANT_PAGE_VIRTUAL
565 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
566 	if (!is_highmem_idx(zone))
567 		set_page_address(page, __va(pfn << PAGE_SHIFT));
568 #endif
569 }
570 
571 #ifdef CONFIG_NUMA
572 /*
573  * During memory init memblocks map pfns to nids. The search is expensive and
574  * this caches recent lookups. The implementation of __early_pfn_to_nid
575  * treats start/end as pfns.
576  */
577 struct mminit_pfnnid_cache {
578 	unsigned long last_start;
579 	unsigned long last_end;
580 	int last_nid;
581 };
582 
583 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
584 
585 /*
586  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
587  */
588 static int __meminit __early_pfn_to_nid(unsigned long pfn,
589 					struct mminit_pfnnid_cache *state)
590 {
591 	unsigned long start_pfn, end_pfn;
592 	int nid;
593 
594 	if (state->last_start <= pfn && pfn < state->last_end)
595 		return state->last_nid;
596 
597 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
598 	if (nid != NUMA_NO_NODE) {
599 		state->last_start = start_pfn;
600 		state->last_end = end_pfn;
601 		state->last_nid = nid;
602 	}
603 
604 	return nid;
605 }
606 
607 int __meminit early_pfn_to_nid(unsigned long pfn)
608 {
609 	static DEFINE_SPINLOCK(early_pfn_lock);
610 	int nid;
611 
612 	spin_lock(&early_pfn_lock);
613 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
614 	if (nid < 0)
615 		nid = first_online_node;
616 	spin_unlock(&early_pfn_lock);
617 
618 	return nid;
619 }
620 
621 int hashdist = HASHDIST_DEFAULT;
622 
623 static int __init set_hashdist(char *str)
624 {
625 	if (!str)
626 		return 0;
627 	hashdist = simple_strtoul(str, &str, 0);
628 	return 1;
629 }
630 __setup("hashdist=", set_hashdist);
631 
632 static inline void fixup_hashdist(void)
633 {
634 	if (num_node_state(N_MEMORY) == 1)
635 		hashdist = 0;
636 }
637 #else
638 static inline void fixup_hashdist(void) {}
639 #endif /* CONFIG_NUMA */
640 
641 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
642 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
643 {
644 	pgdat->first_deferred_pfn = ULONG_MAX;
645 }
646 
647 /* Returns true if the struct page for the pfn is initialised */
648 static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
649 {
650 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
651 		return false;
652 
653 	return true;
654 }
655 
656 /*
657  * Returns true when the remaining initialisation should be deferred until
658  * later in the boot cycle when it can be parallelised.
659  */
660 static bool __meminit
661 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
662 {
663 	static unsigned long prev_end_pfn, nr_initialised;
664 
665 	if (early_page_ext_enabled())
666 		return false;
667 	/*
668 	 * prev_end_pfn static that contains the end of previous zone
669 	 * No need to protect because called very early in boot before smp_init.
670 	 */
671 	if (prev_end_pfn != end_pfn) {
672 		prev_end_pfn = end_pfn;
673 		nr_initialised = 0;
674 	}
675 
676 	/* Always populate low zones for address-constrained allocations */
677 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
678 		return false;
679 
680 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
681 		return true;
682 	/*
683 	 * We start only with one section of pages, more pages are added as
684 	 * needed until the rest of deferred pages are initialized.
685 	 */
686 	nr_initialised++;
687 	if ((nr_initialised > PAGES_PER_SECTION) &&
688 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
689 		NODE_DATA(nid)->first_deferred_pfn = pfn;
690 		return true;
691 	}
692 	return false;
693 }
694 
695 static void __meminit init_reserved_page(unsigned long pfn, int nid)
696 {
697 	pg_data_t *pgdat;
698 	int zid;
699 
700 	if (early_page_initialised(pfn, nid))
701 		return;
702 
703 	pgdat = NODE_DATA(nid);
704 
705 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
706 		struct zone *zone = &pgdat->node_zones[zid];
707 
708 		if (zone_spans_pfn(zone, pfn))
709 			break;
710 	}
711 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
712 }
713 #else
714 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
715 
716 static inline bool early_page_initialised(unsigned long pfn, int nid)
717 {
718 	return true;
719 }
720 
721 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
722 {
723 	return false;
724 }
725 
726 static inline void init_reserved_page(unsigned long pfn, int nid)
727 {
728 }
729 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
730 
731 /*
732  * Initialised pages do not have PageReserved set. This function is
733  * called for each range allocated by the bootmem allocator and
734  * marks the pages PageReserved. The remaining valid pages are later
735  * sent to the buddy page allocator.
736  */
737 void __meminit reserve_bootmem_region(phys_addr_t start,
738 				      phys_addr_t end, int nid)
739 {
740 	unsigned long start_pfn = PFN_DOWN(start);
741 	unsigned long end_pfn = PFN_UP(end);
742 
743 	for (; start_pfn < end_pfn; start_pfn++) {
744 		if (pfn_valid(start_pfn)) {
745 			struct page *page = pfn_to_page(start_pfn);
746 
747 			init_reserved_page(start_pfn, nid);
748 
749 			/* Avoid false-positive PageTail() */
750 			INIT_LIST_HEAD(&page->lru);
751 
752 			/*
753 			 * no need for atomic set_bit because the struct
754 			 * page is not visible yet so nobody should
755 			 * access it yet.
756 			 */
757 			__SetPageReserved(page);
758 		}
759 	}
760 }
761 
762 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
763 static bool __meminit
764 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
765 {
766 	static struct memblock_region *r;
767 
768 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
769 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
770 			for_each_mem_region(r) {
771 				if (*pfn < memblock_region_memory_end_pfn(r))
772 					break;
773 			}
774 		}
775 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
776 		    memblock_is_mirror(r)) {
777 			*pfn = memblock_region_memory_end_pfn(r);
778 			return true;
779 		}
780 	}
781 	return false;
782 }
783 
784 /*
785  * Only struct pages that correspond to ranges defined by memblock.memory
786  * are zeroed and initialized by going through __init_single_page() during
787  * memmap_init_zone_range().
788  *
789  * But, there could be struct pages that correspond to holes in
790  * memblock.memory. This can happen because of the following reasons:
791  * - physical memory bank size is not necessarily the exact multiple of the
792  *   arbitrary section size
793  * - early reserved memory may not be listed in memblock.memory
794  * - memory layouts defined with memmap= kernel parameter may not align
795  *   nicely with memmap sections
796  *
797  * Explicitly initialize those struct pages so that:
798  * - PG_Reserved is set
799  * - zone and node links point to zone and node that span the page if the
800  *   hole is in the middle of a zone
801  * - zone and node links point to adjacent zone/node if the hole falls on
802  *   the zone boundary; the pages in such holes will be prepended to the
803  *   zone/node above the hole except for the trailing pages in the last
804  *   section that will be appended to the zone/node below.
805  */
806 static void __init init_unavailable_range(unsigned long spfn,
807 					  unsigned long epfn,
808 					  int zone, int node)
809 {
810 	unsigned long pfn;
811 	u64 pgcnt = 0;
812 
813 	for (pfn = spfn; pfn < epfn; pfn++) {
814 		if (!pfn_valid(pageblock_start_pfn(pfn))) {
815 			pfn = pageblock_end_pfn(pfn) - 1;
816 			continue;
817 		}
818 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
819 		__SetPageReserved(pfn_to_page(pfn));
820 		pgcnt++;
821 	}
822 
823 	if (pgcnt)
824 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
825 			node, zone_names[zone], pgcnt);
826 }
827 
828 /*
829  * Initially all pages are reserved - free ones are freed
830  * up by memblock_free_all() once the early boot process is
831  * done. Non-atomic initialization, single-pass.
832  *
833  * All aligned pageblocks are initialized to the specified migratetype
834  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
835  * zone stats (e.g., nr_isolate_pageblock) are touched.
836  */
837 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
838 		unsigned long start_pfn, unsigned long zone_end_pfn,
839 		enum meminit_context context,
840 		struct vmem_altmap *altmap, int migratetype)
841 {
842 	unsigned long pfn, end_pfn = start_pfn + size;
843 	struct page *page;
844 
845 	if (highest_memmap_pfn < end_pfn - 1)
846 		highest_memmap_pfn = end_pfn - 1;
847 
848 #ifdef CONFIG_ZONE_DEVICE
849 	/*
850 	 * Honor reservation requested by the driver for this ZONE_DEVICE
851 	 * memory. We limit the total number of pages to initialize to just
852 	 * those that might contain the memory mapping. We will defer the
853 	 * ZONE_DEVICE page initialization until after we have released
854 	 * the hotplug lock.
855 	 */
856 	if (zone == ZONE_DEVICE) {
857 		if (!altmap)
858 			return;
859 
860 		if (start_pfn == altmap->base_pfn)
861 			start_pfn += altmap->reserve;
862 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
863 	}
864 #endif
865 
866 	for (pfn = start_pfn; pfn < end_pfn; ) {
867 		/*
868 		 * There can be holes in boot-time mem_map[]s handed to this
869 		 * function.  They do not exist on hotplugged memory.
870 		 */
871 		if (context == MEMINIT_EARLY) {
872 			if (overlap_memmap_init(zone, &pfn))
873 				continue;
874 			if (defer_init(nid, pfn, zone_end_pfn)) {
875 				deferred_struct_pages = true;
876 				break;
877 			}
878 		}
879 
880 		page = pfn_to_page(pfn);
881 		__init_single_page(page, pfn, zone, nid);
882 		if (context == MEMINIT_HOTPLUG)
883 			__SetPageReserved(page);
884 
885 		/*
886 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
887 		 * such that unmovable allocations won't be scattered all
888 		 * over the place during system boot.
889 		 */
890 		if (pageblock_aligned(pfn)) {
891 			set_pageblock_migratetype(page, migratetype);
892 			cond_resched();
893 		}
894 		pfn++;
895 	}
896 }
897 
898 static void __init memmap_init_zone_range(struct zone *zone,
899 					  unsigned long start_pfn,
900 					  unsigned long end_pfn,
901 					  unsigned long *hole_pfn)
902 {
903 	unsigned long zone_start_pfn = zone->zone_start_pfn;
904 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
905 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
906 
907 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
908 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
909 
910 	if (start_pfn >= end_pfn)
911 		return;
912 
913 	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
914 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
915 
916 	if (*hole_pfn < start_pfn)
917 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
918 
919 	*hole_pfn = end_pfn;
920 }
921 
922 static void __init memmap_init(void)
923 {
924 	unsigned long start_pfn, end_pfn;
925 	unsigned long hole_pfn = 0;
926 	int i, j, zone_id = 0, nid;
927 
928 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
929 		struct pglist_data *node = NODE_DATA(nid);
930 
931 		for (j = 0; j < MAX_NR_ZONES; j++) {
932 			struct zone *zone = node->node_zones + j;
933 
934 			if (!populated_zone(zone))
935 				continue;
936 
937 			memmap_init_zone_range(zone, start_pfn, end_pfn,
938 					       &hole_pfn);
939 			zone_id = j;
940 		}
941 	}
942 
943 #ifdef CONFIG_SPARSEMEM
944 	/*
945 	 * Initialize the memory map for hole in the range [memory_end,
946 	 * section_end].
947 	 * Append the pages in this hole to the highest zone in the last
948 	 * node.
949 	 * The call to init_unavailable_range() is outside the ifdef to
950 	 * silence the compiler warining about zone_id set but not used;
951 	 * for FLATMEM it is a nop anyway
952 	 */
953 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
954 	if (hole_pfn < end_pfn)
955 #endif
956 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
957 }
958 
959 #ifdef CONFIG_ZONE_DEVICE
960 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
961 					  unsigned long zone_idx, int nid,
962 					  struct dev_pagemap *pgmap)
963 {
964 
965 	__init_single_page(page, pfn, zone_idx, nid);
966 
967 	/*
968 	 * Mark page reserved as it will need to wait for onlining
969 	 * phase for it to be fully associated with a zone.
970 	 *
971 	 * We can use the non-atomic __set_bit operation for setting
972 	 * the flag as we are still initializing the pages.
973 	 */
974 	__SetPageReserved(page);
975 
976 	/*
977 	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
978 	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
979 	 * ever freed or placed on a driver-private list.
980 	 */
981 	page->pgmap = pgmap;
982 	page->zone_device_data = NULL;
983 
984 	/*
985 	 * Mark the block movable so that blocks are reserved for
986 	 * movable at startup. This will force kernel allocations
987 	 * to reserve their blocks rather than leaking throughout
988 	 * the address space during boot when many long-lived
989 	 * kernel allocations are made.
990 	 *
991 	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
992 	 * because this is done early in section_activate()
993 	 */
994 	if (pageblock_aligned(pfn)) {
995 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
996 		cond_resched();
997 	}
998 
999 	/*
1000 	 * ZONE_DEVICE pages are released directly to the driver page allocator
1001 	 * which will set the page count to 1 when allocating the page.
1002 	 */
1003 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
1004 	    pgmap->type == MEMORY_DEVICE_COHERENT)
1005 		set_page_count(page, 0);
1006 }
1007 
1008 /*
1009  * With compound page geometry and when struct pages are stored in ram most
1010  * tail pages are reused. Consequently, the amount of unique struct pages to
1011  * initialize is a lot smaller that the total amount of struct pages being
1012  * mapped. This is a paired / mild layering violation with explicit knowledge
1013  * of how the sparse_vmemmap internals handle compound pages in the lack
1014  * of an altmap. See vmemmap_populate_compound_pages().
1015  */
1016 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
1017 					      struct dev_pagemap *pgmap)
1018 {
1019 	if (!vmemmap_can_optimize(altmap, pgmap))
1020 		return pgmap_vmemmap_nr(pgmap);
1021 
1022 	return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
1023 }
1024 
1025 static void __ref memmap_init_compound(struct page *head,
1026 				       unsigned long head_pfn,
1027 				       unsigned long zone_idx, int nid,
1028 				       struct dev_pagemap *pgmap,
1029 				       unsigned long nr_pages)
1030 {
1031 	unsigned long pfn, end_pfn = head_pfn + nr_pages;
1032 	unsigned int order = pgmap->vmemmap_shift;
1033 
1034 	__SetPageHead(head);
1035 	for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
1036 		struct page *page = pfn_to_page(pfn);
1037 
1038 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1039 		prep_compound_tail(head, pfn - head_pfn);
1040 		set_page_count(page, 0);
1041 
1042 		/*
1043 		 * The first tail page stores important compound page info.
1044 		 * Call prep_compound_head() after the first tail page has
1045 		 * been initialized, to not have the data overwritten.
1046 		 */
1047 		if (pfn == head_pfn + 1)
1048 			prep_compound_head(head, order);
1049 	}
1050 }
1051 
1052 void __ref memmap_init_zone_device(struct zone *zone,
1053 				   unsigned long start_pfn,
1054 				   unsigned long nr_pages,
1055 				   struct dev_pagemap *pgmap)
1056 {
1057 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
1058 	struct pglist_data *pgdat = zone->zone_pgdat;
1059 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
1060 	unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
1061 	unsigned long zone_idx = zone_idx(zone);
1062 	unsigned long start = jiffies;
1063 	int nid = pgdat->node_id;
1064 
1065 	if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
1066 		return;
1067 
1068 	/*
1069 	 * The call to memmap_init should have already taken care
1070 	 * of the pages reserved for the memmap, so we can just jump to
1071 	 * the end of that region and start processing the device pages.
1072 	 */
1073 	if (altmap) {
1074 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1075 		nr_pages = end_pfn - start_pfn;
1076 	}
1077 
1078 	for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
1079 		struct page *page = pfn_to_page(pfn);
1080 
1081 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1082 
1083 		if (pfns_per_compound == 1)
1084 			continue;
1085 
1086 		memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
1087 				     compound_nr_pages(altmap, pgmap));
1088 	}
1089 
1090 	pr_debug("%s initialised %lu pages in %ums\n", __func__,
1091 		nr_pages, jiffies_to_msecs(jiffies - start));
1092 }
1093 #endif
1094 
1095 /*
1096  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1097  * because it is sized independent of architecture. Unlike the other zones,
1098  * the starting point for ZONE_MOVABLE is not fixed. It may be different
1099  * in each node depending on the size of each node and how evenly kernelcore
1100  * is distributed. This helper function adjusts the zone ranges
1101  * provided by the architecture for a given node by using the end of the
1102  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1103  * zones within a node are in order of monotonic increases memory addresses
1104  */
1105 static void __init adjust_zone_range_for_zone_movable(int nid,
1106 					unsigned long zone_type,
1107 					unsigned long node_end_pfn,
1108 					unsigned long *zone_start_pfn,
1109 					unsigned long *zone_end_pfn)
1110 {
1111 	/* Only adjust if ZONE_MOVABLE is on this node */
1112 	if (zone_movable_pfn[nid]) {
1113 		/* Size ZONE_MOVABLE */
1114 		if (zone_type == ZONE_MOVABLE) {
1115 			*zone_start_pfn = zone_movable_pfn[nid];
1116 			*zone_end_pfn = min(node_end_pfn,
1117 				arch_zone_highest_possible_pfn[movable_zone]);
1118 
1119 		/* Adjust for ZONE_MOVABLE starting within this range */
1120 		} else if (!mirrored_kernelcore &&
1121 			*zone_start_pfn < zone_movable_pfn[nid] &&
1122 			*zone_end_pfn > zone_movable_pfn[nid]) {
1123 			*zone_end_pfn = zone_movable_pfn[nid];
1124 
1125 		/* Check if this whole range is within ZONE_MOVABLE */
1126 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
1127 			*zone_start_pfn = *zone_end_pfn;
1128 	}
1129 }
1130 
1131 /*
1132  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
1133  * then all holes in the requested range will be accounted for.
1134  */
1135 unsigned long __init __absent_pages_in_range(int nid,
1136 				unsigned long range_start_pfn,
1137 				unsigned long range_end_pfn)
1138 {
1139 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
1140 	unsigned long start_pfn, end_pfn;
1141 	int i;
1142 
1143 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
1144 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
1145 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
1146 		nr_absent -= end_pfn - start_pfn;
1147 	}
1148 	return nr_absent;
1149 }
1150 
1151 /**
1152  * absent_pages_in_range - Return number of page frames in holes within a range
1153  * @start_pfn: The start PFN to start searching for holes
1154  * @end_pfn: The end PFN to stop searching for holes
1155  *
1156  * Return: the number of pages frames in memory holes within a range.
1157  */
1158 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
1159 							unsigned long end_pfn)
1160 {
1161 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
1162 }
1163 
1164 /* Return the number of page frames in holes in a zone on a node */
1165 static unsigned long __init zone_absent_pages_in_node(int nid,
1166 					unsigned long zone_type,
1167 					unsigned long zone_start_pfn,
1168 					unsigned long zone_end_pfn)
1169 {
1170 	unsigned long nr_absent;
1171 
1172 	/* zone is empty, we don't have any absent pages */
1173 	if (zone_start_pfn == zone_end_pfn)
1174 		return 0;
1175 
1176 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
1177 
1178 	/*
1179 	 * ZONE_MOVABLE handling.
1180 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
1181 	 * and vice versa.
1182 	 */
1183 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
1184 		unsigned long start_pfn, end_pfn;
1185 		struct memblock_region *r;
1186 
1187 		for_each_mem_region(r) {
1188 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
1189 					  zone_start_pfn, zone_end_pfn);
1190 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
1191 					zone_start_pfn, zone_end_pfn);
1192 
1193 			if (zone_type == ZONE_MOVABLE &&
1194 			    memblock_is_mirror(r))
1195 				nr_absent += end_pfn - start_pfn;
1196 
1197 			if (zone_type == ZONE_NORMAL &&
1198 			    !memblock_is_mirror(r))
1199 				nr_absent += end_pfn - start_pfn;
1200 		}
1201 	}
1202 
1203 	return nr_absent;
1204 }
1205 
1206 /*
1207  * Return the number of pages a zone spans in a node, including holes
1208  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1209  */
1210 static unsigned long __init zone_spanned_pages_in_node(int nid,
1211 					unsigned long zone_type,
1212 					unsigned long node_start_pfn,
1213 					unsigned long node_end_pfn,
1214 					unsigned long *zone_start_pfn,
1215 					unsigned long *zone_end_pfn)
1216 {
1217 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
1218 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
1219 
1220 	/* Get the start and end of the zone */
1221 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
1222 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
1223 	adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn,
1224 					   zone_start_pfn, zone_end_pfn);
1225 
1226 	/* Check that this node has pages within the zone's required range */
1227 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
1228 		return 0;
1229 
1230 	/* Move the zone boundaries inside the node if necessary */
1231 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
1232 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
1233 
1234 	/* Return the spanned pages */
1235 	return *zone_end_pfn - *zone_start_pfn;
1236 }
1237 
1238 static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat)
1239 {
1240 	struct zone *z;
1241 
1242 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) {
1243 		z->zone_start_pfn = 0;
1244 		z->spanned_pages = 0;
1245 		z->present_pages = 0;
1246 #if defined(CONFIG_MEMORY_HOTPLUG)
1247 		z->present_early_pages = 0;
1248 #endif
1249 	}
1250 
1251 	pgdat->node_spanned_pages = 0;
1252 	pgdat->node_present_pages = 0;
1253 	pr_debug("On node %d totalpages: 0\n", pgdat->node_id);
1254 }
1255 
1256 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
1257 						unsigned long node_start_pfn,
1258 						unsigned long node_end_pfn)
1259 {
1260 	unsigned long realtotalpages = 0, totalpages = 0;
1261 	enum zone_type i;
1262 
1263 	for (i = 0; i < MAX_NR_ZONES; i++) {
1264 		struct zone *zone = pgdat->node_zones + i;
1265 		unsigned long zone_start_pfn, zone_end_pfn;
1266 		unsigned long spanned, absent;
1267 		unsigned long real_size;
1268 
1269 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
1270 						     node_start_pfn,
1271 						     node_end_pfn,
1272 						     &zone_start_pfn,
1273 						     &zone_end_pfn);
1274 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
1275 						   zone_start_pfn,
1276 						   zone_end_pfn);
1277 
1278 		real_size = spanned - absent;
1279 
1280 		if (spanned)
1281 			zone->zone_start_pfn = zone_start_pfn;
1282 		else
1283 			zone->zone_start_pfn = 0;
1284 		zone->spanned_pages = spanned;
1285 		zone->present_pages = real_size;
1286 #if defined(CONFIG_MEMORY_HOTPLUG)
1287 		zone->present_early_pages = real_size;
1288 #endif
1289 
1290 		totalpages += spanned;
1291 		realtotalpages += real_size;
1292 	}
1293 
1294 	pgdat->node_spanned_pages = totalpages;
1295 	pgdat->node_present_pages = realtotalpages;
1296 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1297 }
1298 
1299 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
1300 						unsigned long present_pages)
1301 {
1302 	unsigned long pages = spanned_pages;
1303 
1304 	/*
1305 	 * Provide a more accurate estimation if there are holes within
1306 	 * the zone and SPARSEMEM is in use. If there are holes within the
1307 	 * zone, each populated memory region may cost us one or two extra
1308 	 * memmap pages due to alignment because memmap pages for each
1309 	 * populated regions may not be naturally aligned on page boundary.
1310 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
1311 	 */
1312 	if (spanned_pages > present_pages + (present_pages >> 4) &&
1313 	    IS_ENABLED(CONFIG_SPARSEMEM))
1314 		pages = present_pages;
1315 
1316 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
1317 }
1318 
1319 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1320 static void pgdat_init_split_queue(struct pglist_data *pgdat)
1321 {
1322 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
1323 
1324 	spin_lock_init(&ds_queue->split_queue_lock);
1325 	INIT_LIST_HEAD(&ds_queue->split_queue);
1326 	ds_queue->split_queue_len = 0;
1327 }
1328 #else
1329 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
1330 #endif
1331 
1332 #ifdef CONFIG_COMPACTION
1333 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
1334 {
1335 	init_waitqueue_head(&pgdat->kcompactd_wait);
1336 }
1337 #else
1338 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
1339 #endif
1340 
1341 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
1342 {
1343 	int i;
1344 
1345 	pgdat_resize_init(pgdat);
1346 	pgdat_kswapd_lock_init(pgdat);
1347 
1348 	pgdat_init_split_queue(pgdat);
1349 	pgdat_init_kcompactd(pgdat);
1350 
1351 	init_waitqueue_head(&pgdat->kswapd_wait);
1352 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
1353 
1354 	for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
1355 		init_waitqueue_head(&pgdat->reclaim_wait[i]);
1356 
1357 	pgdat_page_ext_init(pgdat);
1358 	lruvec_init(&pgdat->__lruvec);
1359 }
1360 
1361 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
1362 							unsigned long remaining_pages)
1363 {
1364 	atomic_long_set(&zone->managed_pages, remaining_pages);
1365 	zone_set_nid(zone, nid);
1366 	zone->name = zone_names[idx];
1367 	zone->zone_pgdat = NODE_DATA(nid);
1368 	spin_lock_init(&zone->lock);
1369 	zone_seqlock_init(zone);
1370 	zone_pcp_init(zone);
1371 }
1372 
1373 static void __meminit zone_init_free_lists(struct zone *zone)
1374 {
1375 	unsigned int order, t;
1376 	for_each_migratetype_order(order, t) {
1377 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1378 		zone->free_area[order].nr_free = 0;
1379 	}
1380 
1381 #ifdef CONFIG_UNACCEPTED_MEMORY
1382 	INIT_LIST_HEAD(&zone->unaccepted_pages);
1383 #endif
1384 }
1385 
1386 void __meminit init_currently_empty_zone(struct zone *zone,
1387 					unsigned long zone_start_pfn,
1388 					unsigned long size)
1389 {
1390 	struct pglist_data *pgdat = zone->zone_pgdat;
1391 	int zone_idx = zone_idx(zone) + 1;
1392 
1393 	if (zone_idx > pgdat->nr_zones)
1394 		pgdat->nr_zones = zone_idx;
1395 
1396 	zone->zone_start_pfn = zone_start_pfn;
1397 
1398 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
1399 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
1400 			pgdat->node_id,
1401 			(unsigned long)zone_idx(zone),
1402 			zone_start_pfn, (zone_start_pfn + size));
1403 
1404 	zone_init_free_lists(zone);
1405 	zone->initialized = 1;
1406 }
1407 
1408 #ifndef CONFIG_SPARSEMEM
1409 /*
1410  * Calculate the size of the zone->blockflags rounded to an unsigned long
1411  * Start by making sure zonesize is a multiple of pageblock_order by rounding
1412  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
1413  * round what is now in bits to nearest long in bits, then return it in
1414  * bytes.
1415  */
1416 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
1417 {
1418 	unsigned long usemapsize;
1419 
1420 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
1421 	usemapsize = roundup(zonesize, pageblock_nr_pages);
1422 	usemapsize = usemapsize >> pageblock_order;
1423 	usemapsize *= NR_PAGEBLOCK_BITS;
1424 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
1425 
1426 	return usemapsize / 8;
1427 }
1428 
1429 static void __ref setup_usemap(struct zone *zone)
1430 {
1431 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
1432 					       zone->spanned_pages);
1433 	zone->pageblock_flags = NULL;
1434 	if (usemapsize) {
1435 		zone->pageblock_flags =
1436 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
1437 					    zone_to_nid(zone));
1438 		if (!zone->pageblock_flags)
1439 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
1440 			      usemapsize, zone->name, zone_to_nid(zone));
1441 	}
1442 }
1443 #else
1444 static inline void setup_usemap(struct zone *zone) {}
1445 #endif /* CONFIG_SPARSEMEM */
1446 
1447 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
1448 
1449 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
1450 void __init set_pageblock_order(void)
1451 {
1452 	unsigned int order = MAX_ORDER;
1453 
1454 	/* Check that pageblock_nr_pages has not already been setup */
1455 	if (pageblock_order)
1456 		return;
1457 
1458 	/* Don't let pageblocks exceed the maximum allocation granularity. */
1459 	if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
1460 		order = HUGETLB_PAGE_ORDER;
1461 
1462 	/*
1463 	 * Assume the largest contiguous order of interest is a huge page.
1464 	 * This value may be variable depending on boot parameters on IA64 and
1465 	 * powerpc.
1466 	 */
1467 	pageblock_order = order;
1468 }
1469 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1470 
1471 /*
1472  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
1473  * is unused as pageblock_order is set at compile-time. See
1474  * include/linux/pageblock-flags.h for the values of pageblock_order based on
1475  * the kernel config
1476  */
1477 void __init set_pageblock_order(void)
1478 {
1479 }
1480 
1481 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1482 
1483 /*
1484  * Set up the zone data structures
1485  * - init pgdat internals
1486  * - init all zones belonging to this node
1487  *
1488  * NOTE: this function is only called during memory hotplug
1489  */
1490 #ifdef CONFIG_MEMORY_HOTPLUG
1491 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
1492 {
1493 	int nid = pgdat->node_id;
1494 	enum zone_type z;
1495 	int cpu;
1496 
1497 	pgdat_init_internals(pgdat);
1498 
1499 	if (pgdat->per_cpu_nodestats == &boot_nodestats)
1500 		pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1501 
1502 	/*
1503 	 * Reset the nr_zones, order and highest_zoneidx before reuse.
1504 	 * Note that kswapd will init kswapd_highest_zoneidx properly
1505 	 * when it starts in the near future.
1506 	 */
1507 	pgdat->nr_zones = 0;
1508 	pgdat->kswapd_order = 0;
1509 	pgdat->kswapd_highest_zoneidx = 0;
1510 	pgdat->node_start_pfn = 0;
1511 	pgdat->node_present_pages = 0;
1512 
1513 	for_each_online_cpu(cpu) {
1514 		struct per_cpu_nodestat *p;
1515 
1516 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
1517 		memset(p, 0, sizeof(*p));
1518 	}
1519 
1520 	/*
1521 	 * When memory is hot-added, all the memory is in offline state. So
1522 	 * clear all zones' present_pages and managed_pages because they will
1523 	 * be updated in online_pages() and offline_pages().
1524 	 */
1525 	for (z = 0; z < MAX_NR_ZONES; z++) {
1526 		struct zone *zone = pgdat->node_zones + z;
1527 
1528 		zone->present_pages = 0;
1529 		zone_init_internals(zone, z, nid, 0);
1530 	}
1531 }
1532 #endif
1533 
1534 /*
1535  * Set up the zone data structures:
1536  *   - mark all pages reserved
1537  *   - mark all memory queues empty
1538  *   - clear the memory bitmaps
1539  *
1540  * NOTE: pgdat should get zeroed by caller.
1541  * NOTE: this function is only called during early init.
1542  */
1543 static void __init free_area_init_core(struct pglist_data *pgdat)
1544 {
1545 	enum zone_type j;
1546 	int nid = pgdat->node_id;
1547 
1548 	pgdat_init_internals(pgdat);
1549 	pgdat->per_cpu_nodestats = &boot_nodestats;
1550 
1551 	for (j = 0; j < MAX_NR_ZONES; j++) {
1552 		struct zone *zone = pgdat->node_zones + j;
1553 		unsigned long size, freesize, memmap_pages;
1554 
1555 		size = zone->spanned_pages;
1556 		freesize = zone->present_pages;
1557 
1558 		/*
1559 		 * Adjust freesize so that it accounts for how much memory
1560 		 * is used by this zone for memmap. This affects the watermark
1561 		 * and per-cpu initialisations
1562 		 */
1563 		memmap_pages = calc_memmap_size(size, freesize);
1564 		if (!is_highmem_idx(j)) {
1565 			if (freesize >= memmap_pages) {
1566 				freesize -= memmap_pages;
1567 				if (memmap_pages)
1568 					pr_debug("  %s zone: %lu pages used for memmap\n",
1569 						 zone_names[j], memmap_pages);
1570 			} else
1571 				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
1572 					zone_names[j], memmap_pages, freesize);
1573 		}
1574 
1575 		/* Account for reserved pages */
1576 		if (j == 0 && freesize > dma_reserve) {
1577 			freesize -= dma_reserve;
1578 			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
1579 		}
1580 
1581 		if (!is_highmem_idx(j))
1582 			nr_kernel_pages += freesize;
1583 		/* Charge for highmem memmap if there are enough kernel pages */
1584 		else if (nr_kernel_pages > memmap_pages * 2)
1585 			nr_kernel_pages -= memmap_pages;
1586 		nr_all_pages += freesize;
1587 
1588 		/*
1589 		 * Set an approximate value for lowmem here, it will be adjusted
1590 		 * when the bootmem allocator frees pages into the buddy system.
1591 		 * And all highmem pages will be managed by the buddy system.
1592 		 */
1593 		zone_init_internals(zone, j, nid, freesize);
1594 
1595 		if (!size)
1596 			continue;
1597 
1598 		setup_usemap(zone);
1599 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1600 	}
1601 }
1602 
1603 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
1604 			  phys_addr_t min_addr, int nid, bool exact_nid)
1605 {
1606 	void *ptr;
1607 
1608 	if (exact_nid)
1609 		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
1610 						   MEMBLOCK_ALLOC_ACCESSIBLE,
1611 						   nid);
1612 	else
1613 		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
1614 						 MEMBLOCK_ALLOC_ACCESSIBLE,
1615 						 nid);
1616 
1617 	if (ptr && size > 0)
1618 		page_init_poison(ptr, size);
1619 
1620 	return ptr;
1621 }
1622 
1623 #ifdef CONFIG_FLATMEM
1624 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1625 {
1626 	unsigned long __maybe_unused start = 0;
1627 	unsigned long __maybe_unused offset = 0;
1628 
1629 	/* Skip empty nodes */
1630 	if (!pgdat->node_spanned_pages)
1631 		return;
1632 
1633 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
1634 	offset = pgdat->node_start_pfn - start;
1635 	/* ia64 gets its own node_mem_map, before this, without bootmem */
1636 	if (!pgdat->node_mem_map) {
1637 		unsigned long size, end;
1638 		struct page *map;
1639 
1640 		/*
1641 		 * The zone's endpoints aren't required to be MAX_ORDER
1642 		 * aligned but the node_mem_map endpoints must be in order
1643 		 * for the buddy allocator to function correctly.
1644 		 */
1645 		end = pgdat_end_pfn(pgdat);
1646 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
1647 		size =  (end - start) * sizeof(struct page);
1648 		map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
1649 				   pgdat->node_id, false);
1650 		if (!map)
1651 			panic("Failed to allocate %ld bytes for node %d memory map\n",
1652 			      size, pgdat->node_id);
1653 		pgdat->node_mem_map = map + offset;
1654 	}
1655 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
1656 				__func__, pgdat->node_id, (unsigned long)pgdat,
1657 				(unsigned long)pgdat->node_mem_map);
1658 #ifndef CONFIG_NUMA
1659 	/*
1660 	 * With no DISCONTIG, the global mem_map is just set as node 0's
1661 	 */
1662 	if (pgdat == NODE_DATA(0)) {
1663 		mem_map = NODE_DATA(0)->node_mem_map;
1664 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
1665 			mem_map -= offset;
1666 	}
1667 #endif
1668 }
1669 #else
1670 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
1671 #endif /* CONFIG_FLATMEM */
1672 
1673 /**
1674  * get_pfn_range_for_nid - Return the start and end page frames for a node
1675  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
1676  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
1677  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
1678  *
1679  * It returns the start and end page frame of a node based on information
1680  * provided by memblock_set_node(). If called for a node
1681  * with no available memory, the start and end PFNs will be 0.
1682  */
1683 void __init get_pfn_range_for_nid(unsigned int nid,
1684 			unsigned long *start_pfn, unsigned long *end_pfn)
1685 {
1686 	unsigned long this_start_pfn, this_end_pfn;
1687 	int i;
1688 
1689 	*start_pfn = -1UL;
1690 	*end_pfn = 0;
1691 
1692 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
1693 		*start_pfn = min(*start_pfn, this_start_pfn);
1694 		*end_pfn = max(*end_pfn, this_end_pfn);
1695 	}
1696 
1697 	if (*start_pfn == -1UL)
1698 		*start_pfn = 0;
1699 }
1700 
1701 static void __init free_area_init_node(int nid)
1702 {
1703 	pg_data_t *pgdat = NODE_DATA(nid);
1704 	unsigned long start_pfn = 0;
1705 	unsigned long end_pfn = 0;
1706 
1707 	/* pg_data_t should be reset to zero when it's allocated */
1708 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
1709 
1710 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1711 
1712 	pgdat->node_id = nid;
1713 	pgdat->node_start_pfn = start_pfn;
1714 	pgdat->per_cpu_nodestats = NULL;
1715 
1716 	if (start_pfn != end_pfn) {
1717 		pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
1718 			(u64)start_pfn << PAGE_SHIFT,
1719 			end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
1720 
1721 		calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1722 	} else {
1723 		pr_info("Initmem setup node %d as memoryless\n", nid);
1724 
1725 		reset_memoryless_node_totalpages(pgdat);
1726 	}
1727 
1728 	alloc_node_mem_map(pgdat);
1729 	pgdat_set_deferred_range(pgdat);
1730 
1731 	free_area_init_core(pgdat);
1732 	lru_gen_init_pgdat(pgdat);
1733 }
1734 
1735 /* Any regular or high memory on that node ? */
1736 static void __init check_for_memory(pg_data_t *pgdat)
1737 {
1738 	enum zone_type zone_type;
1739 
1740 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
1741 		struct zone *zone = &pgdat->node_zones[zone_type];
1742 		if (populated_zone(zone)) {
1743 			if (IS_ENABLED(CONFIG_HIGHMEM))
1744 				node_set_state(pgdat->node_id, N_HIGH_MEMORY);
1745 			if (zone_type <= ZONE_NORMAL)
1746 				node_set_state(pgdat->node_id, N_NORMAL_MEMORY);
1747 			break;
1748 		}
1749 	}
1750 }
1751 
1752 #if MAX_NUMNODES > 1
1753 /*
1754  * Figure out the number of possible node ids.
1755  */
1756 void __init setup_nr_node_ids(void)
1757 {
1758 	unsigned int highest;
1759 
1760 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
1761 	nr_node_ids = highest + 1;
1762 }
1763 #endif
1764 
1765 /*
1766  * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
1767  * such cases we allow max_zone_pfn sorted in the descending order
1768  */
1769 static bool arch_has_descending_max_zone_pfns(void)
1770 {
1771 	return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
1772 }
1773 
1774 /**
1775  * free_area_init - Initialise all pg_data_t and zone data
1776  * @max_zone_pfn: an array of max PFNs for each zone
1777  *
1778  * This will call free_area_init_node() for each active node in the system.
1779  * Using the page ranges provided by memblock_set_node(), the size of each
1780  * zone in each node and their holes is calculated. If the maximum PFN
1781  * between two adjacent zones match, it is assumed that the zone is empty.
1782  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
1783  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1784  * starts where the previous one ended. For example, ZONE_DMA32 starts
1785  * at arch_max_dma_pfn.
1786  */
1787 void __init free_area_init(unsigned long *max_zone_pfn)
1788 {
1789 	unsigned long start_pfn, end_pfn;
1790 	int i, nid, zone;
1791 	bool descending;
1792 
1793 	/* Record where the zone boundaries are */
1794 	memset(arch_zone_lowest_possible_pfn, 0,
1795 				sizeof(arch_zone_lowest_possible_pfn));
1796 	memset(arch_zone_highest_possible_pfn, 0,
1797 				sizeof(arch_zone_highest_possible_pfn));
1798 
1799 	start_pfn = PHYS_PFN(memblock_start_of_DRAM());
1800 	descending = arch_has_descending_max_zone_pfns();
1801 
1802 	for (i = 0; i < MAX_NR_ZONES; i++) {
1803 		if (descending)
1804 			zone = MAX_NR_ZONES - i - 1;
1805 		else
1806 			zone = i;
1807 
1808 		if (zone == ZONE_MOVABLE)
1809 			continue;
1810 
1811 		end_pfn = max(max_zone_pfn[zone], start_pfn);
1812 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
1813 		arch_zone_highest_possible_pfn[zone] = end_pfn;
1814 
1815 		start_pfn = end_pfn;
1816 	}
1817 
1818 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
1819 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
1820 	find_zone_movable_pfns_for_nodes();
1821 
1822 	/* Print out the zone ranges */
1823 	pr_info("Zone ranges:\n");
1824 	for (i = 0; i < MAX_NR_ZONES; i++) {
1825 		if (i == ZONE_MOVABLE)
1826 			continue;
1827 		pr_info("  %-8s ", zone_names[i]);
1828 		if (arch_zone_lowest_possible_pfn[i] ==
1829 				arch_zone_highest_possible_pfn[i])
1830 			pr_cont("empty\n");
1831 		else
1832 			pr_cont("[mem %#018Lx-%#018Lx]\n",
1833 				(u64)arch_zone_lowest_possible_pfn[i]
1834 					<< PAGE_SHIFT,
1835 				((u64)arch_zone_highest_possible_pfn[i]
1836 					<< PAGE_SHIFT) - 1);
1837 	}
1838 
1839 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
1840 	pr_info("Movable zone start for each node\n");
1841 	for (i = 0; i < MAX_NUMNODES; i++) {
1842 		if (zone_movable_pfn[i])
1843 			pr_info("  Node %d: %#018Lx\n", i,
1844 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
1845 	}
1846 
1847 	/*
1848 	 * Print out the early node map, and initialize the
1849 	 * subsection-map relative to active online memory ranges to
1850 	 * enable future "sub-section" extensions of the memory map.
1851 	 */
1852 	pr_info("Early memory node ranges\n");
1853 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
1854 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
1855 			(u64)start_pfn << PAGE_SHIFT,
1856 			((u64)end_pfn << PAGE_SHIFT) - 1);
1857 		subsection_map_init(start_pfn, end_pfn - start_pfn);
1858 	}
1859 
1860 	/* Initialise every node */
1861 	mminit_verify_pageflags_layout();
1862 	setup_nr_node_ids();
1863 	set_pageblock_order();
1864 
1865 	for_each_node(nid) {
1866 		pg_data_t *pgdat;
1867 
1868 		if (!node_online(nid)) {
1869 			pr_info("Initializing node %d as memoryless\n", nid);
1870 
1871 			/* Allocator not initialized yet */
1872 			pgdat = arch_alloc_nodedata(nid);
1873 			if (!pgdat)
1874 				panic("Cannot allocate %zuB for node %d.\n",
1875 				       sizeof(*pgdat), nid);
1876 			arch_refresh_nodedata(nid, pgdat);
1877 			free_area_init_node(nid);
1878 
1879 			/*
1880 			 * We do not want to confuse userspace by sysfs
1881 			 * files/directories for node without any memory
1882 			 * attached to it, so this node is not marked as
1883 			 * N_MEMORY and not marked online so that no sysfs
1884 			 * hierarchy will be created via register_one_node for
1885 			 * it. The pgdat will get fully initialized by
1886 			 * hotadd_init_pgdat() when memory is hotplugged into
1887 			 * this node.
1888 			 */
1889 			continue;
1890 		}
1891 
1892 		pgdat = NODE_DATA(nid);
1893 		free_area_init_node(nid);
1894 
1895 		/* Any memory on that node */
1896 		if (pgdat->node_present_pages)
1897 			node_set_state(nid, N_MEMORY);
1898 		check_for_memory(pgdat);
1899 	}
1900 
1901 	memmap_init();
1902 
1903 	/* disable hash distribution for systems with a single node */
1904 	fixup_hashdist();
1905 }
1906 
1907 /**
1908  * node_map_pfn_alignment - determine the maximum internode alignment
1909  *
1910  * This function should be called after node map is populated and sorted.
1911  * It calculates the maximum power of two alignment which can distinguish
1912  * all the nodes.
1913  *
1914  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
1915  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
1916  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
1917  * shifted, 1GiB is enough and this function will indicate so.
1918  *
1919  * This is used to test whether pfn -> nid mapping of the chosen memory
1920  * model has fine enough granularity to avoid incorrect mapping for the
1921  * populated node map.
1922  *
1923  * Return: the determined alignment in pfn's.  0 if there is no alignment
1924  * requirement (single node).
1925  */
1926 unsigned long __init node_map_pfn_alignment(void)
1927 {
1928 	unsigned long accl_mask = 0, last_end = 0;
1929 	unsigned long start, end, mask;
1930 	int last_nid = NUMA_NO_NODE;
1931 	int i, nid;
1932 
1933 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1934 		if (!start || last_nid < 0 || last_nid == nid) {
1935 			last_nid = nid;
1936 			last_end = end;
1937 			continue;
1938 		}
1939 
1940 		/*
1941 		 * Start with a mask granular enough to pin-point to the
1942 		 * start pfn and tick off bits one-by-one until it becomes
1943 		 * too coarse to separate the current node from the last.
1944 		 */
1945 		mask = ~((1 << __ffs(start)) - 1);
1946 		while (mask && last_end <= (start & (mask << 1)))
1947 			mask <<= 1;
1948 
1949 		/* accumulate all internode masks */
1950 		accl_mask |= mask;
1951 	}
1952 
1953 	/* convert mask to number of pages */
1954 	return ~accl_mask + 1;
1955 }
1956 
1957 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1958 static void __init deferred_free_range(unsigned long pfn,
1959 				       unsigned long nr_pages)
1960 {
1961 	struct page *page;
1962 	unsigned long i;
1963 
1964 	if (!nr_pages)
1965 		return;
1966 
1967 	page = pfn_to_page(pfn);
1968 
1969 	/* Free a large naturally-aligned chunk if possible */
1970 	if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
1971 		for (i = 0; i < nr_pages; i += pageblock_nr_pages)
1972 			set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
1973 		__free_pages_core(page, MAX_ORDER);
1974 		return;
1975 	}
1976 
1977 	/* Accept chunks smaller than MAX_ORDER upfront */
1978 	accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
1979 
1980 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1981 		if (pageblock_aligned(pfn))
1982 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1983 		__free_pages_core(page, 0);
1984 	}
1985 }
1986 
1987 /* Completion tracking for deferred_init_memmap() threads */
1988 static atomic_t pgdat_init_n_undone __initdata;
1989 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1990 
1991 static inline void __init pgdat_init_report_one_done(void)
1992 {
1993 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1994 		complete(&pgdat_init_all_done_comp);
1995 }
1996 
1997 /*
1998  * Returns true if page needs to be initialized or freed to buddy allocator.
1999  *
2000  * We check if a current MAX_ORDER block is valid by only checking the validity
2001  * of the head pfn.
2002  */
2003 static inline bool __init deferred_pfn_valid(unsigned long pfn)
2004 {
2005 	if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
2006 		return false;
2007 	return true;
2008 }
2009 
2010 /*
2011  * Free pages to buddy allocator. Try to free aligned pages in
2012  * MAX_ORDER_NR_PAGES sizes.
2013  */
2014 static void __init deferred_free_pages(unsigned long pfn,
2015 				       unsigned long end_pfn)
2016 {
2017 	unsigned long nr_free = 0;
2018 
2019 	for (; pfn < end_pfn; pfn++) {
2020 		if (!deferred_pfn_valid(pfn)) {
2021 			deferred_free_range(pfn - nr_free, nr_free);
2022 			nr_free = 0;
2023 		} else if (IS_MAX_ORDER_ALIGNED(pfn)) {
2024 			deferred_free_range(pfn - nr_free, nr_free);
2025 			nr_free = 1;
2026 		} else {
2027 			nr_free++;
2028 		}
2029 	}
2030 	/* Free the last block of pages to allocator */
2031 	deferred_free_range(pfn - nr_free, nr_free);
2032 }
2033 
2034 /*
2035  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
2036  * by performing it only once every MAX_ORDER_NR_PAGES.
2037  * Return number of pages initialized.
2038  */
2039 static unsigned long  __init deferred_init_pages(struct zone *zone,
2040 						 unsigned long pfn,
2041 						 unsigned long end_pfn)
2042 {
2043 	int nid = zone_to_nid(zone);
2044 	unsigned long nr_pages = 0;
2045 	int zid = zone_idx(zone);
2046 	struct page *page = NULL;
2047 
2048 	for (; pfn < end_pfn; pfn++) {
2049 		if (!deferred_pfn_valid(pfn)) {
2050 			page = NULL;
2051 			continue;
2052 		} else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
2053 			page = pfn_to_page(pfn);
2054 		} else {
2055 			page++;
2056 		}
2057 		__init_single_page(page, pfn, zid, nid);
2058 		nr_pages++;
2059 	}
2060 	return (nr_pages);
2061 }
2062 
2063 /*
2064  * This function is meant to pre-load the iterator for the zone init.
2065  * Specifically it walks through the ranges until we are caught up to the
2066  * first_init_pfn value and exits there. If we never encounter the value we
2067  * return false indicating there are no valid ranges left.
2068  */
2069 static bool __init
2070 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
2071 				    unsigned long *spfn, unsigned long *epfn,
2072 				    unsigned long first_init_pfn)
2073 {
2074 	u64 j;
2075 
2076 	/*
2077 	 * Start out by walking through the ranges in this zone that have
2078 	 * already been initialized. We don't need to do anything with them
2079 	 * so we just need to flush them out of the system.
2080 	 */
2081 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
2082 		if (*epfn <= first_init_pfn)
2083 			continue;
2084 		if (*spfn < first_init_pfn)
2085 			*spfn = first_init_pfn;
2086 		*i = j;
2087 		return true;
2088 	}
2089 
2090 	return false;
2091 }
2092 
2093 /*
2094  * Initialize and free pages. We do it in two loops: first we initialize
2095  * struct page, then free to buddy allocator, because while we are
2096  * freeing pages we can access pages that are ahead (computing buddy
2097  * page in __free_one_page()).
2098  *
2099  * In order to try and keep some memory in the cache we have the loop
2100  * broken along max page order boundaries. This way we will not cause
2101  * any issues with the buddy page computation.
2102  */
2103 static unsigned long __init
2104 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2105 		       unsigned long *end_pfn)
2106 {
2107 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2108 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
2109 	unsigned long nr_pages = 0;
2110 	u64 j = *i;
2111 
2112 	/* First we loop through and initialize the page values */
2113 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2114 		unsigned long t;
2115 
2116 		if (mo_pfn <= *start_pfn)
2117 			break;
2118 
2119 		t = min(mo_pfn, *end_pfn);
2120 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
2121 
2122 		if (mo_pfn < *end_pfn) {
2123 			*start_pfn = mo_pfn;
2124 			break;
2125 		}
2126 	}
2127 
2128 	/* Reset values and now loop through freeing pages as needed */
2129 	swap(j, *i);
2130 
2131 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2132 		unsigned long t;
2133 
2134 		if (mo_pfn <= spfn)
2135 			break;
2136 
2137 		t = min(mo_pfn, epfn);
2138 		deferred_free_pages(spfn, t);
2139 
2140 		if (mo_pfn <= epfn)
2141 			break;
2142 	}
2143 
2144 	return nr_pages;
2145 }
2146 
2147 static void __init
2148 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2149 			   void *arg)
2150 {
2151 	unsigned long spfn, epfn;
2152 	struct zone *zone = arg;
2153 	u64 i;
2154 
2155 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2156 
2157 	/*
2158 	 * Initialize and free pages in MAX_ORDER sized increments so that we
2159 	 * can avoid introducing any issues with the buddy allocator.
2160 	 */
2161 	while (spfn < end_pfn) {
2162 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
2163 		cond_resched();
2164 	}
2165 }
2166 
2167 /* An arch may override for more concurrency. */
2168 __weak int __init
2169 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2170 {
2171 	return 1;
2172 }
2173 
2174 /* Initialise remaining memory on a node */
2175 static int __init deferred_init_memmap(void *data)
2176 {
2177 	pg_data_t *pgdat = data;
2178 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2179 	unsigned long spfn = 0, epfn = 0;
2180 	unsigned long first_init_pfn, flags;
2181 	unsigned long start = jiffies;
2182 	struct zone *zone;
2183 	int zid, max_threads;
2184 	u64 i;
2185 
2186 	/* Bind memory initialisation thread to a local node if possible */
2187 	if (!cpumask_empty(cpumask))
2188 		set_cpus_allowed_ptr(current, cpumask);
2189 
2190 	pgdat_resize_lock(pgdat, &flags);
2191 	first_init_pfn = pgdat->first_deferred_pfn;
2192 	if (first_init_pfn == ULONG_MAX) {
2193 		pgdat_resize_unlock(pgdat, &flags);
2194 		pgdat_init_report_one_done();
2195 		return 0;
2196 	}
2197 
2198 	/* Sanity check boundaries */
2199 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2200 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2201 	pgdat->first_deferred_pfn = ULONG_MAX;
2202 
2203 	/*
2204 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
2205 	 * interrupt thread must allocate this early in boot, zone must be
2206 	 * pre-grown prior to start of deferred page initialization.
2207 	 */
2208 	pgdat_resize_unlock(pgdat, &flags);
2209 
2210 	/* Only the highest zone is deferred so find it */
2211 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2212 		zone = pgdat->node_zones + zid;
2213 		if (first_init_pfn < zone_end_pfn(zone))
2214 			break;
2215 	}
2216 
2217 	/* If the zone is empty somebody else may have cleared out the zone */
2218 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2219 						 first_init_pfn))
2220 		goto zone_empty;
2221 
2222 	max_threads = deferred_page_init_max_threads(cpumask);
2223 
2224 	while (spfn < epfn) {
2225 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2226 		struct padata_mt_job job = {
2227 			.thread_fn   = deferred_init_memmap_chunk,
2228 			.fn_arg      = zone,
2229 			.start       = spfn,
2230 			.size        = epfn_align - spfn,
2231 			.align       = PAGES_PER_SECTION,
2232 			.min_chunk   = PAGES_PER_SECTION,
2233 			.max_threads = max_threads,
2234 		};
2235 
2236 		padata_do_multithreaded(&job);
2237 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2238 						    epfn_align);
2239 	}
2240 zone_empty:
2241 	/* Sanity check that the next zone really is unpopulated */
2242 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2243 
2244 	pr_info("node %d deferred pages initialised in %ums\n",
2245 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2246 
2247 	pgdat_init_report_one_done();
2248 	return 0;
2249 }
2250 
2251 /*
2252  * If this zone has deferred pages, try to grow it by initializing enough
2253  * deferred pages to satisfy the allocation specified by order, rounded up to
2254  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2255  * of SECTION_SIZE bytes by initializing struct pages in increments of
2256  * PAGES_PER_SECTION * sizeof(struct page) bytes.
2257  *
2258  * Return true when zone was grown, otherwise return false. We return true even
2259  * when we grow less than requested, to let the caller decide if there are
2260  * enough pages to satisfy the allocation.
2261  *
2262  * Note: We use noinline because this function is needed only during boot, and
2263  * it is called from a __ref function _deferred_grow_zone. This way we are
2264  * making sure that it is not inlined into permanent text section.
2265  */
2266 bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
2267 {
2268 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2269 	pg_data_t *pgdat = zone->zone_pgdat;
2270 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2271 	unsigned long spfn, epfn, flags;
2272 	unsigned long nr_pages = 0;
2273 	u64 i;
2274 
2275 	/* Only the last zone may have deferred pages */
2276 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2277 		return false;
2278 
2279 	pgdat_resize_lock(pgdat, &flags);
2280 
2281 	/*
2282 	 * If someone grew this zone while we were waiting for spinlock, return
2283 	 * true, as there might be enough pages already.
2284 	 */
2285 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2286 		pgdat_resize_unlock(pgdat, &flags);
2287 		return true;
2288 	}
2289 
2290 	/* If the zone is empty somebody else may have cleared out the zone */
2291 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2292 						 first_deferred_pfn)) {
2293 		pgdat->first_deferred_pfn = ULONG_MAX;
2294 		pgdat_resize_unlock(pgdat, &flags);
2295 		/* Retry only once. */
2296 		return first_deferred_pfn != ULONG_MAX;
2297 	}
2298 
2299 	/*
2300 	 * Initialize and free pages in MAX_ORDER sized increments so
2301 	 * that we can avoid introducing any issues with the buddy
2302 	 * allocator.
2303 	 */
2304 	while (spfn < epfn) {
2305 		/* update our first deferred PFN for this section */
2306 		first_deferred_pfn = spfn;
2307 
2308 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2309 		touch_nmi_watchdog();
2310 
2311 		/* We should only stop along section boundaries */
2312 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2313 			continue;
2314 
2315 		/* If our quota has been met we can stop here */
2316 		if (nr_pages >= nr_pages_needed)
2317 			break;
2318 	}
2319 
2320 	pgdat->first_deferred_pfn = spfn;
2321 	pgdat_resize_unlock(pgdat, &flags);
2322 
2323 	return nr_pages > 0;
2324 }
2325 
2326 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2327 
2328 #ifdef CONFIG_CMA
2329 void __init init_cma_reserved_pageblock(struct page *page)
2330 {
2331 	unsigned i = pageblock_nr_pages;
2332 	struct page *p = page;
2333 
2334 	do {
2335 		__ClearPageReserved(p);
2336 		set_page_count(p, 0);
2337 	} while (++p, --i);
2338 
2339 	set_pageblock_migratetype(page, MIGRATE_CMA);
2340 	set_page_refcounted(page);
2341 	__free_pages(page, pageblock_order);
2342 
2343 	adjust_managed_page_count(page, pageblock_nr_pages);
2344 	page_zone(page)->cma_pages += pageblock_nr_pages;
2345 }
2346 #endif
2347 
2348 void set_zone_contiguous(struct zone *zone)
2349 {
2350 	unsigned long block_start_pfn = zone->zone_start_pfn;
2351 	unsigned long block_end_pfn;
2352 
2353 	block_end_pfn = pageblock_end_pfn(block_start_pfn);
2354 	for (; block_start_pfn < zone_end_pfn(zone);
2355 			block_start_pfn = block_end_pfn,
2356 			 block_end_pfn += pageblock_nr_pages) {
2357 
2358 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
2359 
2360 		if (!__pageblock_pfn_to_page(block_start_pfn,
2361 					     block_end_pfn, zone))
2362 			return;
2363 		cond_resched();
2364 	}
2365 
2366 	/* We confirm that there is no hole */
2367 	zone->contiguous = true;
2368 }
2369 
2370 void __init page_alloc_init_late(void)
2371 {
2372 	struct zone *zone;
2373 	int nid;
2374 
2375 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2376 
2377 	/* There will be num_node_state(N_MEMORY) threads */
2378 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2379 	for_each_node_state(nid, N_MEMORY) {
2380 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2381 	}
2382 
2383 	/* Block until all are initialised */
2384 	wait_for_completion(&pgdat_init_all_done_comp);
2385 
2386 	/*
2387 	 * We initialized the rest of the deferred pages.  Permanently disable
2388 	 * on-demand struct page initialization.
2389 	 */
2390 	static_branch_disable(&deferred_pages);
2391 
2392 	/* Reinit limits that are based on free pages after the kernel is up */
2393 	files_maxfiles_init();
2394 #endif
2395 
2396 	buffer_init();
2397 
2398 	/* Discard memblock private memory */
2399 	memblock_discard();
2400 
2401 	for_each_node_state(nid, N_MEMORY)
2402 		shuffle_free_memory(NODE_DATA(nid));
2403 
2404 	for_each_populated_zone(zone)
2405 		set_zone_contiguous(zone);
2406 
2407 	/* Initialize page ext after all struct pages are initialized. */
2408 	if (deferred_struct_pages)
2409 		page_ext_init();
2410 
2411 	page_alloc_sysctl_init();
2412 }
2413 
2414 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2415 /*
2416  * Returns the number of pages that arch has reserved but
2417  * is not known to alloc_large_system_hash().
2418  */
2419 static unsigned long __init arch_reserved_kernel_pages(void)
2420 {
2421 	return 0;
2422 }
2423 #endif
2424 
2425 /*
2426  * Adaptive scale is meant to reduce sizes of hash tables on large memory
2427  * machines. As memory size is increased the scale is also increased but at
2428  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
2429  * quadruples the scale is increased by one, which means the size of hash table
2430  * only doubles, instead of quadrupling as well.
2431  * Because 32-bit systems cannot have large physical memory, where this scaling
2432  * makes sense, it is disabled on such platforms.
2433  */
2434 #if __BITS_PER_LONG > 32
2435 #define ADAPT_SCALE_BASE	(64ul << 30)
2436 #define ADAPT_SCALE_SHIFT	2
2437 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
2438 #endif
2439 
2440 /*
2441  * allocate a large system hash table from bootmem
2442  * - it is assumed that the hash table must contain an exact power-of-2
2443  *   quantity of entries
2444  * - limit is the number of hash buckets, not the total allocation size
2445  */
2446 void *__init alloc_large_system_hash(const char *tablename,
2447 				     unsigned long bucketsize,
2448 				     unsigned long numentries,
2449 				     int scale,
2450 				     int flags,
2451 				     unsigned int *_hash_shift,
2452 				     unsigned int *_hash_mask,
2453 				     unsigned long low_limit,
2454 				     unsigned long high_limit)
2455 {
2456 	unsigned long long max = high_limit;
2457 	unsigned long log2qty, size;
2458 	void *table;
2459 	gfp_t gfp_flags;
2460 	bool virt;
2461 	bool huge;
2462 
2463 	/* allow the kernel cmdline to have a say */
2464 	if (!numentries) {
2465 		/* round applicable memory size up to nearest megabyte */
2466 		numentries = nr_kernel_pages;
2467 		numentries -= arch_reserved_kernel_pages();
2468 
2469 		/* It isn't necessary when PAGE_SIZE >= 1MB */
2470 		if (PAGE_SIZE < SZ_1M)
2471 			numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
2472 
2473 #if __BITS_PER_LONG > 32
2474 		if (!high_limit) {
2475 			unsigned long adapt;
2476 
2477 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
2478 			     adapt <<= ADAPT_SCALE_SHIFT)
2479 				scale++;
2480 		}
2481 #endif
2482 
2483 		/* limit to 1 bucket per 2^scale bytes of low memory */
2484 		if (scale > PAGE_SHIFT)
2485 			numentries >>= (scale - PAGE_SHIFT);
2486 		else
2487 			numentries <<= (PAGE_SHIFT - scale);
2488 
2489 		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
2490 			numentries = PAGE_SIZE / bucketsize;
2491 	}
2492 	numentries = roundup_pow_of_two(numentries);
2493 
2494 	/* limit allocation size to 1/16 total memory by default */
2495 	if (max == 0) {
2496 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2497 		do_div(max, bucketsize);
2498 	}
2499 	max = min(max, 0x80000000ULL);
2500 
2501 	if (numentries < low_limit)
2502 		numentries = low_limit;
2503 	if (numentries > max)
2504 		numentries = max;
2505 
2506 	log2qty = ilog2(numentries);
2507 
2508 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
2509 	do {
2510 		virt = false;
2511 		size = bucketsize << log2qty;
2512 		if (flags & HASH_EARLY) {
2513 			if (flags & HASH_ZERO)
2514 				table = memblock_alloc(size, SMP_CACHE_BYTES);
2515 			else
2516 				table = memblock_alloc_raw(size,
2517 							   SMP_CACHE_BYTES);
2518 		} else if (get_order(size) > MAX_ORDER || hashdist) {
2519 			table = vmalloc_huge(size, gfp_flags);
2520 			virt = true;
2521 			if (table)
2522 				huge = is_vm_area_hugepages(table);
2523 		} else {
2524 			/*
2525 			 * If bucketsize is not a power-of-two, we may free
2526 			 * some pages at the end of hash table which
2527 			 * alloc_pages_exact() automatically does
2528 			 */
2529 			table = alloc_pages_exact(size, gfp_flags);
2530 			kmemleak_alloc(table, size, 1, gfp_flags);
2531 		}
2532 	} while (!table && size > PAGE_SIZE && --log2qty);
2533 
2534 	if (!table)
2535 		panic("Failed to allocate %s hash table\n", tablename);
2536 
2537 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
2538 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
2539 		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
2540 
2541 	if (_hash_shift)
2542 		*_hash_shift = log2qty;
2543 	if (_hash_mask)
2544 		*_hash_mask = (1 << log2qty) - 1;
2545 
2546 	return table;
2547 }
2548 
2549 /**
2550  * set_dma_reserve - set the specified number of pages reserved in the first zone
2551  * @new_dma_reserve: The number of pages to mark reserved
2552  *
2553  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
2554  * In the DMA zone, a significant percentage may be consumed by kernel image
2555  * and other unfreeable allocations which can skew the watermarks badly. This
2556  * function may optionally be used to account for unfreeable pages in the
2557  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
2558  * smaller per-cpu batchsize.
2559  */
2560 void __init set_dma_reserve(unsigned long new_dma_reserve)
2561 {
2562 	dma_reserve = new_dma_reserve;
2563 }
2564 
2565 void __init memblock_free_pages(struct page *page, unsigned long pfn,
2566 							unsigned int order)
2567 {
2568 
2569 	if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
2570 		int nid = early_pfn_to_nid(pfn);
2571 
2572 		if (!early_page_initialised(pfn, nid))
2573 			return;
2574 	}
2575 
2576 	if (!kmsan_memblock_free_pages(page, order)) {
2577 		/* KMSAN will take care of these pages. */
2578 		return;
2579 	}
2580 	__free_pages_core(page, order);
2581 }
2582 
2583 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
2584 EXPORT_SYMBOL(init_on_alloc);
2585 
2586 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
2587 EXPORT_SYMBOL(init_on_free);
2588 
2589 static bool _init_on_alloc_enabled_early __read_mostly
2590 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
2591 static int __init early_init_on_alloc(char *buf)
2592 {
2593 
2594 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
2595 }
2596 early_param("init_on_alloc", early_init_on_alloc);
2597 
2598 static bool _init_on_free_enabled_early __read_mostly
2599 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
2600 static int __init early_init_on_free(char *buf)
2601 {
2602 	return kstrtobool(buf, &_init_on_free_enabled_early);
2603 }
2604 early_param("init_on_free", early_init_on_free);
2605 
2606 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
2607 
2608 /*
2609  * Enable static keys related to various memory debugging and hardening options.
2610  * Some override others, and depend on early params that are evaluated in the
2611  * order of appearance. So we need to first gather the full picture of what was
2612  * enabled, and then make decisions.
2613  */
2614 static void __init mem_debugging_and_hardening_init(void)
2615 {
2616 	bool page_poisoning_requested = false;
2617 	bool want_check_pages = false;
2618 
2619 #ifdef CONFIG_PAGE_POISONING
2620 	/*
2621 	 * Page poisoning is debug page alloc for some arches. If
2622 	 * either of those options are enabled, enable poisoning.
2623 	 */
2624 	if (page_poisoning_enabled() ||
2625 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
2626 	      debug_pagealloc_enabled())) {
2627 		static_branch_enable(&_page_poisoning_enabled);
2628 		page_poisoning_requested = true;
2629 		want_check_pages = true;
2630 	}
2631 #endif
2632 
2633 	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
2634 	    page_poisoning_requested) {
2635 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2636 			"will take precedence over init_on_alloc and init_on_free\n");
2637 		_init_on_alloc_enabled_early = false;
2638 		_init_on_free_enabled_early = false;
2639 	}
2640 
2641 	if (_init_on_alloc_enabled_early) {
2642 		want_check_pages = true;
2643 		static_branch_enable(&init_on_alloc);
2644 	} else {
2645 		static_branch_disable(&init_on_alloc);
2646 	}
2647 
2648 	if (_init_on_free_enabled_early) {
2649 		want_check_pages = true;
2650 		static_branch_enable(&init_on_free);
2651 	} else {
2652 		static_branch_disable(&init_on_free);
2653 	}
2654 
2655 	if (IS_ENABLED(CONFIG_KMSAN) &&
2656 	    (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
2657 		pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2658 
2659 #ifdef CONFIG_DEBUG_PAGEALLOC
2660 	if (debug_pagealloc_enabled()) {
2661 		want_check_pages = true;
2662 		static_branch_enable(&_debug_pagealloc_enabled);
2663 
2664 		if (debug_guardpage_minorder())
2665 			static_branch_enable(&_debug_guardpage_enabled);
2666 	}
2667 #endif
2668 
2669 	/*
2670 	 * Any page debugging or hardening option also enables sanity checking
2671 	 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2672 	 * enabled already.
2673 	 */
2674 	if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
2675 		static_branch_enable(&check_pages_enabled);
2676 }
2677 
2678 /* Report memory auto-initialization states for this boot. */
2679 static void __init report_meminit(void)
2680 {
2681 	const char *stack;
2682 
2683 	if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
2684 		stack = "all(pattern)";
2685 	else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
2686 		stack = "all(zero)";
2687 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
2688 		stack = "byref_all(zero)";
2689 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
2690 		stack = "byref(zero)";
2691 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
2692 		stack = "__user(zero)";
2693 	else
2694 		stack = "off";
2695 
2696 	pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2697 		stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
2698 		want_init_on_free() ? "on" : "off");
2699 	if (want_init_on_free())
2700 		pr_info("mem auto-init: clearing system memory may take some time...\n");
2701 }
2702 
2703 static void __init mem_init_print_info(void)
2704 {
2705 	unsigned long physpages, codesize, datasize, rosize, bss_size;
2706 	unsigned long init_code_size, init_data_size;
2707 
2708 	physpages = get_num_physpages();
2709 	codesize = _etext - _stext;
2710 	datasize = _edata - _sdata;
2711 	rosize = __end_rodata - __start_rodata;
2712 	bss_size = __bss_stop - __bss_start;
2713 	init_data_size = __init_end - __init_begin;
2714 	init_code_size = _einittext - _sinittext;
2715 
2716 	/*
2717 	 * Detect special cases and adjust section sizes accordingly:
2718 	 * 1) .init.* may be embedded into .data sections
2719 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
2720 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
2721 	 * 3) .rodata.* may be embedded into .text or .data sections.
2722 	 */
2723 #define adj_init_size(start, end, size, pos, adj) \
2724 	do { \
2725 		if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2726 			size -= adj; \
2727 	} while (0)
2728 
2729 	adj_init_size(__init_begin, __init_end, init_data_size,
2730 		     _sinittext, init_code_size);
2731 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
2732 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
2733 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
2734 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
2735 
2736 #undef	adj_init_size
2737 
2738 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2739 #ifdef	CONFIG_HIGHMEM
2740 		", %luK highmem"
2741 #endif
2742 		")\n",
2743 		K(nr_free_pages()), K(physpages),
2744 		codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
2745 		(init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
2746 		K(physpages - totalram_pages() - totalcma_pages),
2747 		K(totalcma_pages)
2748 #ifdef	CONFIG_HIGHMEM
2749 		, K(totalhigh_pages())
2750 #endif
2751 		);
2752 }
2753 
2754 /*
2755  * Set up kernel memory allocators
2756  */
2757 void __init mm_core_init(void)
2758 {
2759 	/* Initializations relying on SMP setup */
2760 	build_all_zonelists(NULL);
2761 	page_alloc_init_cpuhp();
2762 
2763 	/*
2764 	 * page_ext requires contiguous pages,
2765 	 * bigger than MAX_ORDER unless SPARSEMEM.
2766 	 */
2767 	page_ext_init_flatmem();
2768 	mem_debugging_and_hardening_init();
2769 	kfence_alloc_pool_and_metadata();
2770 	report_meminit();
2771 	kmsan_init_shadow();
2772 	stack_depot_early_init();
2773 	mem_init();
2774 	mem_init_print_info();
2775 	kmem_cache_init();
2776 	/*
2777 	 * page_owner must be initialized after buddy is ready, and also after
2778 	 * slab is ready so that stack_depot_init() works properly
2779 	 */
2780 	page_ext_init_flatmem_late();
2781 	kmemleak_init();
2782 	ptlock_cache_init();
2783 	pgtable_cache_init();
2784 	debug_objects_mem_init();
2785 	vmalloc_init();
2786 	/* If no deferred init page_ext now, as vmap is fully initialized */
2787 	if (!deferred_struct_pages)
2788 		page_ext_init();
2789 	/* Should be run before the first non-init thread is created */
2790 	init_espfix_bsp();
2791 	/* Should be run after espfix64 is set up. */
2792 	pti_init();
2793 	kmsan_init_runtime();
2794 	mm_cache_init();
2795 }
2796