xref: /openbmc/linux/mm/mm_init.c (revision 05bdb996)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm_init.c - Memory initialisation verification and debugging
4  *
5  * Copyright 2008 IBM Corporation, 2008
6  * Author Mel Gorman <mel@csn.ul.ie>
7  *
8  */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/kobject.h>
12 #include <linux/export.h>
13 #include <linux/memory.h>
14 #include <linux/notifier.h>
15 #include <linux/sched.h>
16 #include <linux/mman.h>
17 #include <linux/memblock.h>
18 #include <linux/page-isolation.h>
19 #include <linux/padata.h>
20 #include <linux/nmi.h>
21 #include <linux/buffer_head.h>
22 #include <linux/kmemleak.h>
23 #include <linux/kfence.h>
24 #include <linux/page_ext.h>
25 #include <linux/pti.h>
26 #include <linux/pgtable.h>
27 #include <linux/swap.h>
28 #include <linux/cma.h>
29 #include "internal.h"
30 #include "slab.h"
31 #include "shuffle.h"
32 
33 #include <asm/setup.h>
34 
35 #ifdef CONFIG_DEBUG_MEMORY_INIT
36 int __meminitdata mminit_loglevel;
37 
38 /* The zonelists are simply reported, validation is manual. */
39 void __init mminit_verify_zonelist(void)
40 {
41 	int nid;
42 
43 	if (mminit_loglevel < MMINIT_VERIFY)
44 		return;
45 
46 	for_each_online_node(nid) {
47 		pg_data_t *pgdat = NODE_DATA(nid);
48 		struct zone *zone;
49 		struct zoneref *z;
50 		struct zonelist *zonelist;
51 		int i, listid, zoneid;
52 
53 		BUILD_BUG_ON(MAX_ZONELISTS > 2);
54 		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
55 
56 			/* Identify the zone and nodelist */
57 			zoneid = i % MAX_NR_ZONES;
58 			listid = i / MAX_NR_ZONES;
59 			zonelist = &pgdat->node_zonelists[listid];
60 			zone = &pgdat->node_zones[zoneid];
61 			if (!populated_zone(zone))
62 				continue;
63 
64 			/* Print information about the zonelist */
65 			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
66 				listid > 0 ? "thisnode" : "general", nid,
67 				zone->name);
68 
69 			/* Iterate the zonelist */
70 			for_each_zone_zonelist(zone, z, zonelist, zoneid)
71 				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
72 			pr_cont("\n");
73 		}
74 	}
75 }
76 
77 void __init mminit_verify_pageflags_layout(void)
78 {
79 	int shift, width;
80 	unsigned long or_mask, add_mask;
81 
82 	shift = 8 * sizeof(unsigned long);
83 	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
84 		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
85 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
86 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
87 		SECTIONS_WIDTH,
88 		NODES_WIDTH,
89 		ZONES_WIDTH,
90 		LAST_CPUPID_WIDTH,
91 		KASAN_TAG_WIDTH,
92 		LRU_GEN_WIDTH,
93 		LRU_REFS_WIDTH,
94 		NR_PAGEFLAGS);
95 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
96 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
97 		SECTIONS_SHIFT,
98 		NODES_SHIFT,
99 		ZONES_SHIFT,
100 		LAST_CPUPID_SHIFT,
101 		KASAN_TAG_WIDTH);
102 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
103 		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
104 		(unsigned long)SECTIONS_PGSHIFT,
105 		(unsigned long)NODES_PGSHIFT,
106 		(unsigned long)ZONES_PGSHIFT,
107 		(unsigned long)LAST_CPUPID_PGSHIFT,
108 		(unsigned long)KASAN_TAG_PGSHIFT);
109 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
110 		"Node/Zone ID: %lu -> %lu\n",
111 		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
112 		(unsigned long)ZONEID_PGOFF);
113 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
114 		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
115 		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
116 #ifdef NODE_NOT_IN_PAGE_FLAGS
117 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
118 		"Node not in page flags");
119 #endif
120 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
121 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
122 		"Last cpupid not in page flags");
123 #endif
124 
125 	if (SECTIONS_WIDTH) {
126 		shift -= SECTIONS_WIDTH;
127 		BUG_ON(shift != SECTIONS_PGSHIFT);
128 	}
129 	if (NODES_WIDTH) {
130 		shift -= NODES_WIDTH;
131 		BUG_ON(shift != NODES_PGSHIFT);
132 	}
133 	if (ZONES_WIDTH) {
134 		shift -= ZONES_WIDTH;
135 		BUG_ON(shift != ZONES_PGSHIFT);
136 	}
137 
138 	/* Check for bitmask overlaps */
139 	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
140 			(NODES_MASK << NODES_PGSHIFT) |
141 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
142 	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
143 			(NODES_MASK << NODES_PGSHIFT) +
144 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
145 	BUG_ON(or_mask != add_mask);
146 }
147 
148 static __init int set_mminit_loglevel(char *str)
149 {
150 	get_option(&str, &mminit_loglevel);
151 	return 0;
152 }
153 early_param("mminit_loglevel", set_mminit_loglevel);
154 #endif /* CONFIG_DEBUG_MEMORY_INIT */
155 
156 struct kobject *mm_kobj;
157 EXPORT_SYMBOL_GPL(mm_kobj);
158 
159 #ifdef CONFIG_SMP
160 s32 vm_committed_as_batch = 32;
161 
162 void mm_compute_batch(int overcommit_policy)
163 {
164 	u64 memsized_batch;
165 	s32 nr = num_present_cpus();
166 	s32 batch = max_t(s32, nr*2, 32);
167 	unsigned long ram_pages = totalram_pages();
168 
169 	/*
170 	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
171 	 * (total memory/#cpus), and lift it to 25% for other policies
172 	 * to easy the possible lock contention for percpu_counter
173 	 * vm_committed_as, while the max limit is INT_MAX
174 	 */
175 	if (overcommit_policy == OVERCOMMIT_NEVER)
176 		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
177 	else
178 		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
179 
180 	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
181 }
182 
183 static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
184 					unsigned long action, void *arg)
185 {
186 	switch (action) {
187 	case MEM_ONLINE:
188 	case MEM_OFFLINE:
189 		mm_compute_batch(sysctl_overcommit_memory);
190 		break;
191 	default:
192 		break;
193 	}
194 	return NOTIFY_OK;
195 }
196 
197 static int __init mm_compute_batch_init(void)
198 {
199 	mm_compute_batch(sysctl_overcommit_memory);
200 	hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
201 	return 0;
202 }
203 
204 __initcall(mm_compute_batch_init);
205 
206 #endif
207 
208 static int __init mm_sysfs_init(void)
209 {
210 	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
211 	if (!mm_kobj)
212 		return -ENOMEM;
213 
214 	return 0;
215 }
216 postcore_initcall(mm_sysfs_init);
217 
218 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
219 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
220 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
221 
222 static unsigned long required_kernelcore __initdata;
223 static unsigned long required_kernelcore_percent __initdata;
224 static unsigned long required_movablecore __initdata;
225 static unsigned long required_movablecore_percent __initdata;
226 
227 static unsigned long nr_kernel_pages __initdata;
228 static unsigned long nr_all_pages __initdata;
229 static unsigned long dma_reserve __initdata;
230 
231 static bool deferred_struct_pages __meminitdata;
232 
233 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
234 
235 static int __init cmdline_parse_core(char *p, unsigned long *core,
236 				     unsigned long *percent)
237 {
238 	unsigned long long coremem;
239 	char *endptr;
240 
241 	if (!p)
242 		return -EINVAL;
243 
244 	/* Value may be a percentage of total memory, otherwise bytes */
245 	coremem = simple_strtoull(p, &endptr, 0);
246 	if (*endptr == '%') {
247 		/* Paranoid check for percent values greater than 100 */
248 		WARN_ON(coremem > 100);
249 
250 		*percent = coremem;
251 	} else {
252 		coremem = memparse(p, &p);
253 		/* Paranoid check that UL is enough for the coremem value */
254 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
255 
256 		*core = coremem >> PAGE_SHIFT;
257 		*percent = 0UL;
258 	}
259 	return 0;
260 }
261 
262 /*
263  * kernelcore=size sets the amount of memory for use for allocations that
264  * cannot be reclaimed or migrated.
265  */
266 static int __init cmdline_parse_kernelcore(char *p)
267 {
268 	/* parse kernelcore=mirror */
269 	if (parse_option_str(p, "mirror")) {
270 		mirrored_kernelcore = true;
271 		return 0;
272 	}
273 
274 	return cmdline_parse_core(p, &required_kernelcore,
275 				  &required_kernelcore_percent);
276 }
277 early_param("kernelcore", cmdline_parse_kernelcore);
278 
279 /*
280  * movablecore=size sets the amount of memory for use for allocations that
281  * can be reclaimed or migrated.
282  */
283 static int __init cmdline_parse_movablecore(char *p)
284 {
285 	return cmdline_parse_core(p, &required_movablecore,
286 				  &required_movablecore_percent);
287 }
288 early_param("movablecore", cmdline_parse_movablecore);
289 
290 /*
291  * early_calculate_totalpages()
292  * Sum pages in active regions for movable zone.
293  * Populate N_MEMORY for calculating usable_nodes.
294  */
295 static unsigned long __init early_calculate_totalpages(void)
296 {
297 	unsigned long totalpages = 0;
298 	unsigned long start_pfn, end_pfn;
299 	int i, nid;
300 
301 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
302 		unsigned long pages = end_pfn - start_pfn;
303 
304 		totalpages += pages;
305 		if (pages)
306 			node_set_state(nid, N_MEMORY);
307 	}
308 	return totalpages;
309 }
310 
311 /*
312  * This finds a zone that can be used for ZONE_MOVABLE pages. The
313  * assumption is made that zones within a node are ordered in monotonic
314  * increasing memory addresses so that the "highest" populated zone is used
315  */
316 static void __init find_usable_zone_for_movable(void)
317 {
318 	int zone_index;
319 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
320 		if (zone_index == ZONE_MOVABLE)
321 			continue;
322 
323 		if (arch_zone_highest_possible_pfn[zone_index] >
324 				arch_zone_lowest_possible_pfn[zone_index])
325 			break;
326 	}
327 
328 	VM_BUG_ON(zone_index == -1);
329 	movable_zone = zone_index;
330 }
331 
332 /*
333  * Find the PFN the Movable zone begins in each node. Kernel memory
334  * is spread evenly between nodes as long as the nodes have enough
335  * memory. When they don't, some nodes will have more kernelcore than
336  * others
337  */
338 static void __init find_zone_movable_pfns_for_nodes(void)
339 {
340 	int i, nid;
341 	unsigned long usable_startpfn;
342 	unsigned long kernelcore_node, kernelcore_remaining;
343 	/* save the state before borrow the nodemask */
344 	nodemask_t saved_node_state = node_states[N_MEMORY];
345 	unsigned long totalpages = early_calculate_totalpages();
346 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
347 	struct memblock_region *r;
348 
349 	/* Need to find movable_zone earlier when movable_node is specified. */
350 	find_usable_zone_for_movable();
351 
352 	/*
353 	 * If movable_node is specified, ignore kernelcore and movablecore
354 	 * options.
355 	 */
356 	if (movable_node_is_enabled()) {
357 		for_each_mem_region(r) {
358 			if (!memblock_is_hotpluggable(r))
359 				continue;
360 
361 			nid = memblock_get_region_node(r);
362 
363 			usable_startpfn = PFN_DOWN(r->base);
364 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
365 				min(usable_startpfn, zone_movable_pfn[nid]) :
366 				usable_startpfn;
367 		}
368 
369 		goto out2;
370 	}
371 
372 	/*
373 	 * If kernelcore=mirror is specified, ignore movablecore option
374 	 */
375 	if (mirrored_kernelcore) {
376 		bool mem_below_4gb_not_mirrored = false;
377 
378 		for_each_mem_region(r) {
379 			if (memblock_is_mirror(r))
380 				continue;
381 
382 			nid = memblock_get_region_node(r);
383 
384 			usable_startpfn = memblock_region_memory_base_pfn(r);
385 
386 			if (usable_startpfn < PHYS_PFN(SZ_4G)) {
387 				mem_below_4gb_not_mirrored = true;
388 				continue;
389 			}
390 
391 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
392 				min(usable_startpfn, zone_movable_pfn[nid]) :
393 				usable_startpfn;
394 		}
395 
396 		if (mem_below_4gb_not_mirrored)
397 			pr_warn("This configuration results in unmirrored kernel memory.\n");
398 
399 		goto out2;
400 	}
401 
402 	/*
403 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
404 	 * amount of necessary memory.
405 	 */
406 	if (required_kernelcore_percent)
407 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
408 				       10000UL;
409 	if (required_movablecore_percent)
410 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
411 					10000UL;
412 
413 	/*
414 	 * If movablecore= was specified, calculate what size of
415 	 * kernelcore that corresponds so that memory usable for
416 	 * any allocation type is evenly spread. If both kernelcore
417 	 * and movablecore are specified, then the value of kernelcore
418 	 * will be used for required_kernelcore if it's greater than
419 	 * what movablecore would have allowed.
420 	 */
421 	if (required_movablecore) {
422 		unsigned long corepages;
423 
424 		/*
425 		 * Round-up so that ZONE_MOVABLE is at least as large as what
426 		 * was requested by the user
427 		 */
428 		required_movablecore =
429 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
430 		required_movablecore = min(totalpages, required_movablecore);
431 		corepages = totalpages - required_movablecore;
432 
433 		required_kernelcore = max(required_kernelcore, corepages);
434 	}
435 
436 	/*
437 	 * If kernelcore was not specified or kernelcore size is larger
438 	 * than totalpages, there is no ZONE_MOVABLE.
439 	 */
440 	if (!required_kernelcore || required_kernelcore >= totalpages)
441 		goto out;
442 
443 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
444 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
445 
446 restart:
447 	/* Spread kernelcore memory as evenly as possible throughout nodes */
448 	kernelcore_node = required_kernelcore / usable_nodes;
449 	for_each_node_state(nid, N_MEMORY) {
450 		unsigned long start_pfn, end_pfn;
451 
452 		/*
453 		 * Recalculate kernelcore_node if the division per node
454 		 * now exceeds what is necessary to satisfy the requested
455 		 * amount of memory for the kernel
456 		 */
457 		if (required_kernelcore < kernelcore_node)
458 			kernelcore_node = required_kernelcore / usable_nodes;
459 
460 		/*
461 		 * As the map is walked, we track how much memory is usable
462 		 * by the kernel using kernelcore_remaining. When it is
463 		 * 0, the rest of the node is usable by ZONE_MOVABLE
464 		 */
465 		kernelcore_remaining = kernelcore_node;
466 
467 		/* Go through each range of PFNs within this node */
468 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
469 			unsigned long size_pages;
470 
471 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
472 			if (start_pfn >= end_pfn)
473 				continue;
474 
475 			/* Account for what is only usable for kernelcore */
476 			if (start_pfn < usable_startpfn) {
477 				unsigned long kernel_pages;
478 				kernel_pages = min(end_pfn, usable_startpfn)
479 								- start_pfn;
480 
481 				kernelcore_remaining -= min(kernel_pages,
482 							kernelcore_remaining);
483 				required_kernelcore -= min(kernel_pages,
484 							required_kernelcore);
485 
486 				/* Continue if range is now fully accounted */
487 				if (end_pfn <= usable_startpfn) {
488 
489 					/*
490 					 * Push zone_movable_pfn to the end so
491 					 * that if we have to rebalance
492 					 * kernelcore across nodes, we will
493 					 * not double account here
494 					 */
495 					zone_movable_pfn[nid] = end_pfn;
496 					continue;
497 				}
498 				start_pfn = usable_startpfn;
499 			}
500 
501 			/*
502 			 * The usable PFN range for ZONE_MOVABLE is from
503 			 * start_pfn->end_pfn. Calculate size_pages as the
504 			 * number of pages used as kernelcore
505 			 */
506 			size_pages = end_pfn - start_pfn;
507 			if (size_pages > kernelcore_remaining)
508 				size_pages = kernelcore_remaining;
509 			zone_movable_pfn[nid] = start_pfn + size_pages;
510 
511 			/*
512 			 * Some kernelcore has been met, update counts and
513 			 * break if the kernelcore for this node has been
514 			 * satisfied
515 			 */
516 			required_kernelcore -= min(required_kernelcore,
517 								size_pages);
518 			kernelcore_remaining -= size_pages;
519 			if (!kernelcore_remaining)
520 				break;
521 		}
522 	}
523 
524 	/*
525 	 * If there is still required_kernelcore, we do another pass with one
526 	 * less node in the count. This will push zone_movable_pfn[nid] further
527 	 * along on the nodes that still have memory until kernelcore is
528 	 * satisfied
529 	 */
530 	usable_nodes--;
531 	if (usable_nodes && required_kernelcore > usable_nodes)
532 		goto restart;
533 
534 out2:
535 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
536 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
537 		unsigned long start_pfn, end_pfn;
538 
539 		zone_movable_pfn[nid] =
540 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
541 
542 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
543 		if (zone_movable_pfn[nid] >= end_pfn)
544 			zone_movable_pfn[nid] = 0;
545 	}
546 
547 out:
548 	/* restore the node_state */
549 	node_states[N_MEMORY] = saved_node_state;
550 }
551 
552 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
553 				unsigned long zone, int nid)
554 {
555 	mm_zero_struct_page(page);
556 	set_page_links(page, zone, nid, pfn);
557 	init_page_count(page);
558 	page_mapcount_reset(page);
559 	page_cpupid_reset_last(page);
560 	page_kasan_tag_reset(page);
561 
562 	INIT_LIST_HEAD(&page->lru);
563 #ifdef WANT_PAGE_VIRTUAL
564 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
565 	if (!is_highmem_idx(zone))
566 		set_page_address(page, __va(pfn << PAGE_SHIFT));
567 #endif
568 }
569 
570 #ifdef CONFIG_NUMA
571 /*
572  * During memory init memblocks map pfns to nids. The search is expensive and
573  * this caches recent lookups. The implementation of __early_pfn_to_nid
574  * treats start/end as pfns.
575  */
576 struct mminit_pfnnid_cache {
577 	unsigned long last_start;
578 	unsigned long last_end;
579 	int last_nid;
580 };
581 
582 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
583 
584 /*
585  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
586  */
587 static int __meminit __early_pfn_to_nid(unsigned long pfn,
588 					struct mminit_pfnnid_cache *state)
589 {
590 	unsigned long start_pfn, end_pfn;
591 	int nid;
592 
593 	if (state->last_start <= pfn && pfn < state->last_end)
594 		return state->last_nid;
595 
596 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
597 	if (nid != NUMA_NO_NODE) {
598 		state->last_start = start_pfn;
599 		state->last_end = end_pfn;
600 		state->last_nid = nid;
601 	}
602 
603 	return nid;
604 }
605 
606 int __meminit early_pfn_to_nid(unsigned long pfn)
607 {
608 	static DEFINE_SPINLOCK(early_pfn_lock);
609 	int nid;
610 
611 	spin_lock(&early_pfn_lock);
612 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
613 	if (nid < 0)
614 		nid = first_online_node;
615 	spin_unlock(&early_pfn_lock);
616 
617 	return nid;
618 }
619 
620 int hashdist = HASHDIST_DEFAULT;
621 
622 static int __init set_hashdist(char *str)
623 {
624 	if (!str)
625 		return 0;
626 	hashdist = simple_strtoul(str, &str, 0);
627 	return 1;
628 }
629 __setup("hashdist=", set_hashdist);
630 
631 static inline void fixup_hashdist(void)
632 {
633 	if (num_node_state(N_MEMORY) == 1)
634 		hashdist = 0;
635 }
636 #else
637 static inline void fixup_hashdist(void) {}
638 #endif /* CONFIG_NUMA */
639 
640 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
641 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
642 {
643 	pgdat->first_deferred_pfn = ULONG_MAX;
644 }
645 
646 /* Returns true if the struct page for the pfn is initialised */
647 static inline bool __meminit early_page_initialised(unsigned long pfn)
648 {
649 	int nid = early_pfn_to_nid(pfn);
650 
651 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
652 		return false;
653 
654 	return true;
655 }
656 
657 /*
658  * Returns true when the remaining initialisation should be deferred until
659  * later in the boot cycle when it can be parallelised.
660  */
661 static bool __meminit
662 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
663 {
664 	static unsigned long prev_end_pfn, nr_initialised;
665 
666 	if (early_page_ext_enabled())
667 		return false;
668 	/*
669 	 * prev_end_pfn static that contains the end of previous zone
670 	 * No need to protect because called very early in boot before smp_init.
671 	 */
672 	if (prev_end_pfn != end_pfn) {
673 		prev_end_pfn = end_pfn;
674 		nr_initialised = 0;
675 	}
676 
677 	/* Always populate low zones for address-constrained allocations */
678 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
679 		return false;
680 
681 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
682 		return true;
683 	/*
684 	 * We start only with one section of pages, more pages are added as
685 	 * needed until the rest of deferred pages are initialized.
686 	 */
687 	nr_initialised++;
688 	if ((nr_initialised > PAGES_PER_SECTION) &&
689 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
690 		NODE_DATA(nid)->first_deferred_pfn = pfn;
691 		return true;
692 	}
693 	return false;
694 }
695 
696 static void __meminit init_reserved_page(unsigned long pfn)
697 {
698 	pg_data_t *pgdat;
699 	int nid, zid;
700 
701 	if (early_page_initialised(pfn))
702 		return;
703 
704 	nid = early_pfn_to_nid(pfn);
705 	pgdat = NODE_DATA(nid);
706 
707 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
708 		struct zone *zone = &pgdat->node_zones[zid];
709 
710 		if (zone_spans_pfn(zone, pfn))
711 			break;
712 	}
713 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
714 }
715 #else
716 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
717 
718 static inline bool early_page_initialised(unsigned long pfn)
719 {
720 	return true;
721 }
722 
723 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
724 {
725 	return false;
726 }
727 
728 static inline void init_reserved_page(unsigned long pfn)
729 {
730 }
731 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
732 
733 /*
734  * Initialised pages do not have PageReserved set. This function is
735  * called for each range allocated by the bootmem allocator and
736  * marks the pages PageReserved. The remaining valid pages are later
737  * sent to the buddy page allocator.
738  */
739 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
740 {
741 	unsigned long start_pfn = PFN_DOWN(start);
742 	unsigned long end_pfn = PFN_UP(end);
743 
744 	for (; start_pfn < end_pfn; start_pfn++) {
745 		if (pfn_valid(start_pfn)) {
746 			struct page *page = pfn_to_page(start_pfn);
747 
748 			init_reserved_page(start_pfn);
749 
750 			/* Avoid false-positive PageTail() */
751 			INIT_LIST_HEAD(&page->lru);
752 
753 			/*
754 			 * no need for atomic set_bit because the struct
755 			 * page is not visible yet so nobody should
756 			 * access it yet.
757 			 */
758 			__SetPageReserved(page);
759 		}
760 	}
761 }
762 
763 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
764 static bool __meminit
765 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
766 {
767 	static struct memblock_region *r;
768 
769 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
770 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
771 			for_each_mem_region(r) {
772 				if (*pfn < memblock_region_memory_end_pfn(r))
773 					break;
774 			}
775 		}
776 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
777 		    memblock_is_mirror(r)) {
778 			*pfn = memblock_region_memory_end_pfn(r);
779 			return true;
780 		}
781 	}
782 	return false;
783 }
784 
785 /*
786  * Only struct pages that correspond to ranges defined by memblock.memory
787  * are zeroed and initialized by going through __init_single_page() during
788  * memmap_init_zone_range().
789  *
790  * But, there could be struct pages that correspond to holes in
791  * memblock.memory. This can happen because of the following reasons:
792  * - physical memory bank size is not necessarily the exact multiple of the
793  *   arbitrary section size
794  * - early reserved memory may not be listed in memblock.memory
795  * - memory layouts defined with memmap= kernel parameter may not align
796  *   nicely with memmap sections
797  *
798  * Explicitly initialize those struct pages so that:
799  * - PG_Reserved is set
800  * - zone and node links point to zone and node that span the page if the
801  *   hole is in the middle of a zone
802  * - zone and node links point to adjacent zone/node if the hole falls on
803  *   the zone boundary; the pages in such holes will be prepended to the
804  *   zone/node above the hole except for the trailing pages in the last
805  *   section that will be appended to the zone/node below.
806  */
807 static void __init init_unavailable_range(unsigned long spfn,
808 					  unsigned long epfn,
809 					  int zone, int node)
810 {
811 	unsigned long pfn;
812 	u64 pgcnt = 0;
813 
814 	for (pfn = spfn; pfn < epfn; pfn++) {
815 		if (!pfn_valid(pageblock_start_pfn(pfn))) {
816 			pfn = pageblock_end_pfn(pfn) - 1;
817 			continue;
818 		}
819 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
820 		__SetPageReserved(pfn_to_page(pfn));
821 		pgcnt++;
822 	}
823 
824 	if (pgcnt)
825 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
826 			node, zone_names[zone], pgcnt);
827 }
828 
829 /*
830  * Initially all pages are reserved - free ones are freed
831  * up by memblock_free_all() once the early boot process is
832  * done. Non-atomic initialization, single-pass.
833  *
834  * All aligned pageblocks are initialized to the specified migratetype
835  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
836  * zone stats (e.g., nr_isolate_pageblock) are touched.
837  */
838 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
839 		unsigned long start_pfn, unsigned long zone_end_pfn,
840 		enum meminit_context context,
841 		struct vmem_altmap *altmap, int migratetype)
842 {
843 	unsigned long pfn, end_pfn = start_pfn + size;
844 	struct page *page;
845 
846 	if (highest_memmap_pfn < end_pfn - 1)
847 		highest_memmap_pfn = end_pfn - 1;
848 
849 #ifdef CONFIG_ZONE_DEVICE
850 	/*
851 	 * Honor reservation requested by the driver for this ZONE_DEVICE
852 	 * memory. We limit the total number of pages to initialize to just
853 	 * those that might contain the memory mapping. We will defer the
854 	 * ZONE_DEVICE page initialization until after we have released
855 	 * the hotplug lock.
856 	 */
857 	if (zone == ZONE_DEVICE) {
858 		if (!altmap)
859 			return;
860 
861 		if (start_pfn == altmap->base_pfn)
862 			start_pfn += altmap->reserve;
863 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
864 	}
865 #endif
866 
867 	for (pfn = start_pfn; pfn < end_pfn; ) {
868 		/*
869 		 * There can be holes in boot-time mem_map[]s handed to this
870 		 * function.  They do not exist on hotplugged memory.
871 		 */
872 		if (context == MEMINIT_EARLY) {
873 			if (overlap_memmap_init(zone, &pfn))
874 				continue;
875 			if (defer_init(nid, pfn, zone_end_pfn)) {
876 				deferred_struct_pages = true;
877 				break;
878 			}
879 		}
880 
881 		page = pfn_to_page(pfn);
882 		__init_single_page(page, pfn, zone, nid);
883 		if (context == MEMINIT_HOTPLUG)
884 			__SetPageReserved(page);
885 
886 		/*
887 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
888 		 * such that unmovable allocations won't be scattered all
889 		 * over the place during system boot.
890 		 */
891 		if (pageblock_aligned(pfn)) {
892 			set_pageblock_migratetype(page, migratetype);
893 			cond_resched();
894 		}
895 		pfn++;
896 	}
897 }
898 
899 static void __init memmap_init_zone_range(struct zone *zone,
900 					  unsigned long start_pfn,
901 					  unsigned long end_pfn,
902 					  unsigned long *hole_pfn)
903 {
904 	unsigned long zone_start_pfn = zone->zone_start_pfn;
905 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
906 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
907 
908 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
909 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
910 
911 	if (start_pfn >= end_pfn)
912 		return;
913 
914 	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
915 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
916 
917 	if (*hole_pfn < start_pfn)
918 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
919 
920 	*hole_pfn = end_pfn;
921 }
922 
923 static void __init memmap_init(void)
924 {
925 	unsigned long start_pfn, end_pfn;
926 	unsigned long hole_pfn = 0;
927 	int i, j, zone_id = 0, nid;
928 
929 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
930 		struct pglist_data *node = NODE_DATA(nid);
931 
932 		for (j = 0; j < MAX_NR_ZONES; j++) {
933 			struct zone *zone = node->node_zones + j;
934 
935 			if (!populated_zone(zone))
936 				continue;
937 
938 			memmap_init_zone_range(zone, start_pfn, end_pfn,
939 					       &hole_pfn);
940 			zone_id = j;
941 		}
942 	}
943 
944 #ifdef CONFIG_SPARSEMEM
945 	/*
946 	 * Initialize the memory map for hole in the range [memory_end,
947 	 * section_end].
948 	 * Append the pages in this hole to the highest zone in the last
949 	 * node.
950 	 * The call to init_unavailable_range() is outside the ifdef to
951 	 * silence the compiler warining about zone_id set but not used;
952 	 * for FLATMEM it is a nop anyway
953 	 */
954 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
955 	if (hole_pfn < end_pfn)
956 #endif
957 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
958 }
959 
960 #ifdef CONFIG_ZONE_DEVICE
961 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
962 					  unsigned long zone_idx, int nid,
963 					  struct dev_pagemap *pgmap)
964 {
965 
966 	__init_single_page(page, pfn, zone_idx, nid);
967 
968 	/*
969 	 * Mark page reserved as it will need to wait for onlining
970 	 * phase for it to be fully associated with a zone.
971 	 *
972 	 * We can use the non-atomic __set_bit operation for setting
973 	 * the flag as we are still initializing the pages.
974 	 */
975 	__SetPageReserved(page);
976 
977 	/*
978 	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
979 	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
980 	 * ever freed or placed on a driver-private list.
981 	 */
982 	page->pgmap = pgmap;
983 	page->zone_device_data = NULL;
984 
985 	/*
986 	 * Mark the block movable so that blocks are reserved for
987 	 * movable at startup. This will force kernel allocations
988 	 * to reserve their blocks rather than leaking throughout
989 	 * the address space during boot when many long-lived
990 	 * kernel allocations are made.
991 	 *
992 	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
993 	 * because this is done early in section_activate()
994 	 */
995 	if (pageblock_aligned(pfn)) {
996 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
997 		cond_resched();
998 	}
999 
1000 	/*
1001 	 * ZONE_DEVICE pages are released directly to the driver page allocator
1002 	 * which will set the page count to 1 when allocating the page.
1003 	 */
1004 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
1005 	    pgmap->type == MEMORY_DEVICE_COHERENT)
1006 		set_page_count(page, 0);
1007 }
1008 
1009 /*
1010  * With compound page geometry and when struct pages are stored in ram most
1011  * tail pages are reused. Consequently, the amount of unique struct pages to
1012  * initialize is a lot smaller that the total amount of struct pages being
1013  * mapped. This is a paired / mild layering violation with explicit knowledge
1014  * of how the sparse_vmemmap internals handle compound pages in the lack
1015  * of an altmap. See vmemmap_populate_compound_pages().
1016  */
1017 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
1018 					      struct dev_pagemap *pgmap)
1019 {
1020 	if (!vmemmap_can_optimize(altmap, pgmap))
1021 		return pgmap_vmemmap_nr(pgmap);
1022 
1023 	return 2 * (PAGE_SIZE / sizeof(struct page));
1024 }
1025 
1026 static void __ref memmap_init_compound(struct page *head,
1027 				       unsigned long head_pfn,
1028 				       unsigned long zone_idx, int nid,
1029 				       struct dev_pagemap *pgmap,
1030 				       unsigned long nr_pages)
1031 {
1032 	unsigned long pfn, end_pfn = head_pfn + nr_pages;
1033 	unsigned int order = pgmap->vmemmap_shift;
1034 
1035 	__SetPageHead(head);
1036 	for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
1037 		struct page *page = pfn_to_page(pfn);
1038 
1039 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1040 		prep_compound_tail(head, pfn - head_pfn);
1041 		set_page_count(page, 0);
1042 
1043 		/*
1044 		 * The first tail page stores important compound page info.
1045 		 * Call prep_compound_head() after the first tail page has
1046 		 * been initialized, to not have the data overwritten.
1047 		 */
1048 		if (pfn == head_pfn + 1)
1049 			prep_compound_head(head, order);
1050 	}
1051 }
1052 
1053 void __ref memmap_init_zone_device(struct zone *zone,
1054 				   unsigned long start_pfn,
1055 				   unsigned long nr_pages,
1056 				   struct dev_pagemap *pgmap)
1057 {
1058 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
1059 	struct pglist_data *pgdat = zone->zone_pgdat;
1060 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
1061 	unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
1062 	unsigned long zone_idx = zone_idx(zone);
1063 	unsigned long start = jiffies;
1064 	int nid = pgdat->node_id;
1065 
1066 	if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
1067 		return;
1068 
1069 	/*
1070 	 * The call to memmap_init should have already taken care
1071 	 * of the pages reserved for the memmap, so we can just jump to
1072 	 * the end of that region and start processing the device pages.
1073 	 */
1074 	if (altmap) {
1075 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1076 		nr_pages = end_pfn - start_pfn;
1077 	}
1078 
1079 	for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
1080 		struct page *page = pfn_to_page(pfn);
1081 
1082 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1083 
1084 		if (pfns_per_compound == 1)
1085 			continue;
1086 
1087 		memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
1088 				     compound_nr_pages(altmap, pgmap));
1089 	}
1090 
1091 	pr_debug("%s initialised %lu pages in %ums\n", __func__,
1092 		nr_pages, jiffies_to_msecs(jiffies - start));
1093 }
1094 #endif
1095 
1096 /*
1097  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1098  * because it is sized independent of architecture. Unlike the other zones,
1099  * the starting point for ZONE_MOVABLE is not fixed. It may be different
1100  * in each node depending on the size of each node and how evenly kernelcore
1101  * is distributed. This helper function adjusts the zone ranges
1102  * provided by the architecture for a given node by using the end of the
1103  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1104  * zones within a node are in order of monotonic increases memory addresses
1105  */
1106 static void __init adjust_zone_range_for_zone_movable(int nid,
1107 					unsigned long zone_type,
1108 					unsigned long node_start_pfn,
1109 					unsigned long node_end_pfn,
1110 					unsigned long *zone_start_pfn,
1111 					unsigned long *zone_end_pfn)
1112 {
1113 	/* Only adjust if ZONE_MOVABLE is on this node */
1114 	if (zone_movable_pfn[nid]) {
1115 		/* Size ZONE_MOVABLE */
1116 		if (zone_type == ZONE_MOVABLE) {
1117 			*zone_start_pfn = zone_movable_pfn[nid];
1118 			*zone_end_pfn = min(node_end_pfn,
1119 				arch_zone_highest_possible_pfn[movable_zone]);
1120 
1121 		/* Adjust for ZONE_MOVABLE starting within this range */
1122 		} else if (!mirrored_kernelcore &&
1123 			*zone_start_pfn < zone_movable_pfn[nid] &&
1124 			*zone_end_pfn > zone_movable_pfn[nid]) {
1125 			*zone_end_pfn = zone_movable_pfn[nid];
1126 
1127 		/* Check if this whole range is within ZONE_MOVABLE */
1128 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
1129 			*zone_start_pfn = *zone_end_pfn;
1130 	}
1131 }
1132 
1133 /*
1134  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
1135  * then all holes in the requested range will be accounted for.
1136  */
1137 unsigned long __init __absent_pages_in_range(int nid,
1138 				unsigned long range_start_pfn,
1139 				unsigned long range_end_pfn)
1140 {
1141 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
1142 	unsigned long start_pfn, end_pfn;
1143 	int i;
1144 
1145 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
1146 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
1147 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
1148 		nr_absent -= end_pfn - start_pfn;
1149 	}
1150 	return nr_absent;
1151 }
1152 
1153 /**
1154  * absent_pages_in_range - Return number of page frames in holes within a range
1155  * @start_pfn: The start PFN to start searching for holes
1156  * @end_pfn: The end PFN to stop searching for holes
1157  *
1158  * Return: the number of pages frames in memory holes within a range.
1159  */
1160 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
1161 							unsigned long end_pfn)
1162 {
1163 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
1164 }
1165 
1166 /* Return the number of page frames in holes in a zone on a node */
1167 static unsigned long __init zone_absent_pages_in_node(int nid,
1168 					unsigned long zone_type,
1169 					unsigned long node_start_pfn,
1170 					unsigned long node_end_pfn)
1171 {
1172 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
1173 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
1174 	unsigned long zone_start_pfn, zone_end_pfn;
1175 	unsigned long nr_absent;
1176 
1177 	/* When hotadd a new node from cpu_up(), the node should be empty */
1178 	if (!node_start_pfn && !node_end_pfn)
1179 		return 0;
1180 
1181 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
1182 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
1183 
1184 	adjust_zone_range_for_zone_movable(nid, zone_type,
1185 			node_start_pfn, node_end_pfn,
1186 			&zone_start_pfn, &zone_end_pfn);
1187 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
1188 
1189 	/*
1190 	 * ZONE_MOVABLE handling.
1191 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
1192 	 * and vice versa.
1193 	 */
1194 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
1195 		unsigned long start_pfn, end_pfn;
1196 		struct memblock_region *r;
1197 
1198 		for_each_mem_region(r) {
1199 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
1200 					  zone_start_pfn, zone_end_pfn);
1201 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
1202 					zone_start_pfn, zone_end_pfn);
1203 
1204 			if (zone_type == ZONE_MOVABLE &&
1205 			    memblock_is_mirror(r))
1206 				nr_absent += end_pfn - start_pfn;
1207 
1208 			if (zone_type == ZONE_NORMAL &&
1209 			    !memblock_is_mirror(r))
1210 				nr_absent += end_pfn - start_pfn;
1211 		}
1212 	}
1213 
1214 	return nr_absent;
1215 }
1216 
1217 /*
1218  * Return the number of pages a zone spans in a node, including holes
1219  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1220  */
1221 static unsigned long __init zone_spanned_pages_in_node(int nid,
1222 					unsigned long zone_type,
1223 					unsigned long node_start_pfn,
1224 					unsigned long node_end_pfn,
1225 					unsigned long *zone_start_pfn,
1226 					unsigned long *zone_end_pfn)
1227 {
1228 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
1229 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
1230 	/* When hotadd a new node from cpu_up(), the node should be empty */
1231 	if (!node_start_pfn && !node_end_pfn)
1232 		return 0;
1233 
1234 	/* Get the start and end of the zone */
1235 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
1236 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
1237 	adjust_zone_range_for_zone_movable(nid, zone_type,
1238 				node_start_pfn, node_end_pfn,
1239 				zone_start_pfn, zone_end_pfn);
1240 
1241 	/* Check that this node has pages within the zone's required range */
1242 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
1243 		return 0;
1244 
1245 	/* Move the zone boundaries inside the node if necessary */
1246 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
1247 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
1248 
1249 	/* Return the spanned pages */
1250 	return *zone_end_pfn - *zone_start_pfn;
1251 }
1252 
1253 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
1254 						unsigned long node_start_pfn,
1255 						unsigned long node_end_pfn)
1256 {
1257 	unsigned long realtotalpages = 0, totalpages = 0;
1258 	enum zone_type i;
1259 
1260 	for (i = 0; i < MAX_NR_ZONES; i++) {
1261 		struct zone *zone = pgdat->node_zones + i;
1262 		unsigned long zone_start_pfn, zone_end_pfn;
1263 		unsigned long spanned, absent;
1264 		unsigned long size, real_size;
1265 
1266 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
1267 						     node_start_pfn,
1268 						     node_end_pfn,
1269 						     &zone_start_pfn,
1270 						     &zone_end_pfn);
1271 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
1272 						   node_start_pfn,
1273 						   node_end_pfn);
1274 
1275 		size = spanned;
1276 		real_size = size - absent;
1277 
1278 		if (size)
1279 			zone->zone_start_pfn = zone_start_pfn;
1280 		else
1281 			zone->zone_start_pfn = 0;
1282 		zone->spanned_pages = size;
1283 		zone->present_pages = real_size;
1284 #if defined(CONFIG_MEMORY_HOTPLUG)
1285 		zone->present_early_pages = real_size;
1286 #endif
1287 
1288 		totalpages += size;
1289 		realtotalpages += real_size;
1290 	}
1291 
1292 	pgdat->node_spanned_pages = totalpages;
1293 	pgdat->node_present_pages = realtotalpages;
1294 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1295 }
1296 
1297 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
1298 						unsigned long present_pages)
1299 {
1300 	unsigned long pages = spanned_pages;
1301 
1302 	/*
1303 	 * Provide a more accurate estimation if there are holes within
1304 	 * the zone and SPARSEMEM is in use. If there are holes within the
1305 	 * zone, each populated memory region may cost us one or two extra
1306 	 * memmap pages due to alignment because memmap pages for each
1307 	 * populated regions may not be naturally aligned on page boundary.
1308 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
1309 	 */
1310 	if (spanned_pages > present_pages + (present_pages >> 4) &&
1311 	    IS_ENABLED(CONFIG_SPARSEMEM))
1312 		pages = present_pages;
1313 
1314 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
1315 }
1316 
1317 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1318 static void pgdat_init_split_queue(struct pglist_data *pgdat)
1319 {
1320 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
1321 
1322 	spin_lock_init(&ds_queue->split_queue_lock);
1323 	INIT_LIST_HEAD(&ds_queue->split_queue);
1324 	ds_queue->split_queue_len = 0;
1325 }
1326 #else
1327 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
1328 #endif
1329 
1330 #ifdef CONFIG_COMPACTION
1331 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
1332 {
1333 	init_waitqueue_head(&pgdat->kcompactd_wait);
1334 }
1335 #else
1336 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
1337 #endif
1338 
1339 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
1340 {
1341 	int i;
1342 
1343 	pgdat_resize_init(pgdat);
1344 	pgdat_kswapd_lock_init(pgdat);
1345 
1346 	pgdat_init_split_queue(pgdat);
1347 	pgdat_init_kcompactd(pgdat);
1348 
1349 	init_waitqueue_head(&pgdat->kswapd_wait);
1350 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
1351 
1352 	for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
1353 		init_waitqueue_head(&pgdat->reclaim_wait[i]);
1354 
1355 	pgdat_page_ext_init(pgdat);
1356 	lruvec_init(&pgdat->__lruvec);
1357 }
1358 
1359 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
1360 							unsigned long remaining_pages)
1361 {
1362 	atomic_long_set(&zone->managed_pages, remaining_pages);
1363 	zone_set_nid(zone, nid);
1364 	zone->name = zone_names[idx];
1365 	zone->zone_pgdat = NODE_DATA(nid);
1366 	spin_lock_init(&zone->lock);
1367 	zone_seqlock_init(zone);
1368 	zone_pcp_init(zone);
1369 }
1370 
1371 static void __meminit zone_init_free_lists(struct zone *zone)
1372 {
1373 	unsigned int order, t;
1374 	for_each_migratetype_order(order, t) {
1375 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1376 		zone->free_area[order].nr_free = 0;
1377 	}
1378 }
1379 
1380 void __meminit init_currently_empty_zone(struct zone *zone,
1381 					unsigned long zone_start_pfn,
1382 					unsigned long size)
1383 {
1384 	struct pglist_data *pgdat = zone->zone_pgdat;
1385 	int zone_idx = zone_idx(zone) + 1;
1386 
1387 	if (zone_idx > pgdat->nr_zones)
1388 		pgdat->nr_zones = zone_idx;
1389 
1390 	zone->zone_start_pfn = zone_start_pfn;
1391 
1392 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
1393 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
1394 			pgdat->node_id,
1395 			(unsigned long)zone_idx(zone),
1396 			zone_start_pfn, (zone_start_pfn + size));
1397 
1398 	zone_init_free_lists(zone);
1399 	zone->initialized = 1;
1400 }
1401 
1402 #ifndef CONFIG_SPARSEMEM
1403 /*
1404  * Calculate the size of the zone->blockflags rounded to an unsigned long
1405  * Start by making sure zonesize is a multiple of pageblock_order by rounding
1406  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
1407  * round what is now in bits to nearest long in bits, then return it in
1408  * bytes.
1409  */
1410 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
1411 {
1412 	unsigned long usemapsize;
1413 
1414 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
1415 	usemapsize = roundup(zonesize, pageblock_nr_pages);
1416 	usemapsize = usemapsize >> pageblock_order;
1417 	usemapsize *= NR_PAGEBLOCK_BITS;
1418 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
1419 
1420 	return usemapsize / 8;
1421 }
1422 
1423 static void __ref setup_usemap(struct zone *zone)
1424 {
1425 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
1426 					       zone->spanned_pages);
1427 	zone->pageblock_flags = NULL;
1428 	if (usemapsize) {
1429 		zone->pageblock_flags =
1430 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
1431 					    zone_to_nid(zone));
1432 		if (!zone->pageblock_flags)
1433 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
1434 			      usemapsize, zone->name, zone_to_nid(zone));
1435 	}
1436 }
1437 #else
1438 static inline void setup_usemap(struct zone *zone) {}
1439 #endif /* CONFIG_SPARSEMEM */
1440 
1441 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
1442 
1443 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
1444 void __init set_pageblock_order(void)
1445 {
1446 	unsigned int order = MAX_ORDER;
1447 
1448 	/* Check that pageblock_nr_pages has not already been setup */
1449 	if (pageblock_order)
1450 		return;
1451 
1452 	/* Don't let pageblocks exceed the maximum allocation granularity. */
1453 	if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
1454 		order = HUGETLB_PAGE_ORDER;
1455 
1456 	/*
1457 	 * Assume the largest contiguous order of interest is a huge page.
1458 	 * This value may be variable depending on boot parameters on IA64 and
1459 	 * powerpc.
1460 	 */
1461 	pageblock_order = order;
1462 }
1463 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1464 
1465 /*
1466  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
1467  * is unused as pageblock_order is set at compile-time. See
1468  * include/linux/pageblock-flags.h for the values of pageblock_order based on
1469  * the kernel config
1470  */
1471 void __init set_pageblock_order(void)
1472 {
1473 }
1474 
1475 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1476 
1477 /*
1478  * Set up the zone data structures
1479  * - init pgdat internals
1480  * - init all zones belonging to this node
1481  *
1482  * NOTE: this function is only called during memory hotplug
1483  */
1484 #ifdef CONFIG_MEMORY_HOTPLUG
1485 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
1486 {
1487 	int nid = pgdat->node_id;
1488 	enum zone_type z;
1489 	int cpu;
1490 
1491 	pgdat_init_internals(pgdat);
1492 
1493 	if (pgdat->per_cpu_nodestats == &boot_nodestats)
1494 		pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1495 
1496 	/*
1497 	 * Reset the nr_zones, order and highest_zoneidx before reuse.
1498 	 * Note that kswapd will init kswapd_highest_zoneidx properly
1499 	 * when it starts in the near future.
1500 	 */
1501 	pgdat->nr_zones = 0;
1502 	pgdat->kswapd_order = 0;
1503 	pgdat->kswapd_highest_zoneidx = 0;
1504 	pgdat->node_start_pfn = 0;
1505 	for_each_online_cpu(cpu) {
1506 		struct per_cpu_nodestat *p;
1507 
1508 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
1509 		memset(p, 0, sizeof(*p));
1510 	}
1511 
1512 	for (z = 0; z < MAX_NR_ZONES; z++)
1513 		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
1514 }
1515 #endif
1516 
1517 /*
1518  * Set up the zone data structures:
1519  *   - mark all pages reserved
1520  *   - mark all memory queues empty
1521  *   - clear the memory bitmaps
1522  *
1523  * NOTE: pgdat should get zeroed by caller.
1524  * NOTE: this function is only called during early init.
1525  */
1526 static void __init free_area_init_core(struct pglist_data *pgdat)
1527 {
1528 	enum zone_type j;
1529 	int nid = pgdat->node_id;
1530 
1531 	pgdat_init_internals(pgdat);
1532 	pgdat->per_cpu_nodestats = &boot_nodestats;
1533 
1534 	for (j = 0; j < MAX_NR_ZONES; j++) {
1535 		struct zone *zone = pgdat->node_zones + j;
1536 		unsigned long size, freesize, memmap_pages;
1537 
1538 		size = zone->spanned_pages;
1539 		freesize = zone->present_pages;
1540 
1541 		/*
1542 		 * Adjust freesize so that it accounts for how much memory
1543 		 * is used by this zone for memmap. This affects the watermark
1544 		 * and per-cpu initialisations
1545 		 */
1546 		memmap_pages = calc_memmap_size(size, freesize);
1547 		if (!is_highmem_idx(j)) {
1548 			if (freesize >= memmap_pages) {
1549 				freesize -= memmap_pages;
1550 				if (memmap_pages)
1551 					pr_debug("  %s zone: %lu pages used for memmap\n",
1552 						 zone_names[j], memmap_pages);
1553 			} else
1554 				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
1555 					zone_names[j], memmap_pages, freesize);
1556 		}
1557 
1558 		/* Account for reserved pages */
1559 		if (j == 0 && freesize > dma_reserve) {
1560 			freesize -= dma_reserve;
1561 			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
1562 		}
1563 
1564 		if (!is_highmem_idx(j))
1565 			nr_kernel_pages += freesize;
1566 		/* Charge for highmem memmap if there are enough kernel pages */
1567 		else if (nr_kernel_pages > memmap_pages * 2)
1568 			nr_kernel_pages -= memmap_pages;
1569 		nr_all_pages += freesize;
1570 
1571 		/*
1572 		 * Set an approximate value for lowmem here, it will be adjusted
1573 		 * when the bootmem allocator frees pages into the buddy system.
1574 		 * And all highmem pages will be managed by the buddy system.
1575 		 */
1576 		zone_init_internals(zone, j, nid, freesize);
1577 
1578 		if (!size)
1579 			continue;
1580 
1581 		set_pageblock_order();
1582 		setup_usemap(zone);
1583 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1584 	}
1585 }
1586 
1587 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
1588 			  phys_addr_t min_addr, int nid, bool exact_nid)
1589 {
1590 	void *ptr;
1591 
1592 	if (exact_nid)
1593 		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
1594 						   MEMBLOCK_ALLOC_ACCESSIBLE,
1595 						   nid);
1596 	else
1597 		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
1598 						 MEMBLOCK_ALLOC_ACCESSIBLE,
1599 						 nid);
1600 
1601 	if (ptr && size > 0)
1602 		page_init_poison(ptr, size);
1603 
1604 	return ptr;
1605 }
1606 
1607 #ifdef CONFIG_FLATMEM
1608 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1609 {
1610 	unsigned long __maybe_unused start = 0;
1611 	unsigned long __maybe_unused offset = 0;
1612 
1613 	/* Skip empty nodes */
1614 	if (!pgdat->node_spanned_pages)
1615 		return;
1616 
1617 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
1618 	offset = pgdat->node_start_pfn - start;
1619 	/* ia64 gets its own node_mem_map, before this, without bootmem */
1620 	if (!pgdat->node_mem_map) {
1621 		unsigned long size, end;
1622 		struct page *map;
1623 
1624 		/*
1625 		 * The zone's endpoints aren't required to be MAX_ORDER
1626 		 * aligned but the node_mem_map endpoints must be in order
1627 		 * for the buddy allocator to function correctly.
1628 		 */
1629 		end = pgdat_end_pfn(pgdat);
1630 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
1631 		size =  (end - start) * sizeof(struct page);
1632 		map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
1633 				   pgdat->node_id, false);
1634 		if (!map)
1635 			panic("Failed to allocate %ld bytes for node %d memory map\n",
1636 			      size, pgdat->node_id);
1637 		pgdat->node_mem_map = map + offset;
1638 	}
1639 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
1640 				__func__, pgdat->node_id, (unsigned long)pgdat,
1641 				(unsigned long)pgdat->node_mem_map);
1642 #ifndef CONFIG_NUMA
1643 	/*
1644 	 * With no DISCONTIG, the global mem_map is just set as node 0's
1645 	 */
1646 	if (pgdat == NODE_DATA(0)) {
1647 		mem_map = NODE_DATA(0)->node_mem_map;
1648 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
1649 			mem_map -= offset;
1650 	}
1651 #endif
1652 }
1653 #else
1654 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
1655 #endif /* CONFIG_FLATMEM */
1656 
1657 /**
1658  * get_pfn_range_for_nid - Return the start and end page frames for a node
1659  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
1660  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
1661  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
1662  *
1663  * It returns the start and end page frame of a node based on information
1664  * provided by memblock_set_node(). If called for a node
1665  * with no available memory, a warning is printed and the start and end
1666  * PFNs will be 0.
1667  */
1668 void __init get_pfn_range_for_nid(unsigned int nid,
1669 			unsigned long *start_pfn, unsigned long *end_pfn)
1670 {
1671 	unsigned long this_start_pfn, this_end_pfn;
1672 	int i;
1673 
1674 	*start_pfn = -1UL;
1675 	*end_pfn = 0;
1676 
1677 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
1678 		*start_pfn = min(*start_pfn, this_start_pfn);
1679 		*end_pfn = max(*end_pfn, this_end_pfn);
1680 	}
1681 
1682 	if (*start_pfn == -1UL)
1683 		*start_pfn = 0;
1684 }
1685 
1686 static void __init free_area_init_node(int nid)
1687 {
1688 	pg_data_t *pgdat = NODE_DATA(nid);
1689 	unsigned long start_pfn = 0;
1690 	unsigned long end_pfn = 0;
1691 
1692 	/* pg_data_t should be reset to zero when it's allocated */
1693 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
1694 
1695 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1696 
1697 	pgdat->node_id = nid;
1698 	pgdat->node_start_pfn = start_pfn;
1699 	pgdat->per_cpu_nodestats = NULL;
1700 
1701 	if (start_pfn != end_pfn) {
1702 		pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
1703 			(u64)start_pfn << PAGE_SHIFT,
1704 			end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
1705 	} else {
1706 		pr_info("Initmem setup node %d as memoryless\n", nid);
1707 	}
1708 
1709 	calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1710 
1711 	alloc_node_mem_map(pgdat);
1712 	pgdat_set_deferred_range(pgdat);
1713 
1714 	free_area_init_core(pgdat);
1715 	lru_gen_init_pgdat(pgdat);
1716 }
1717 
1718 /* Any regular or high memory on that node ? */
1719 static void check_for_memory(pg_data_t *pgdat, int nid)
1720 {
1721 	enum zone_type zone_type;
1722 
1723 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
1724 		struct zone *zone = &pgdat->node_zones[zone_type];
1725 		if (populated_zone(zone)) {
1726 			if (IS_ENABLED(CONFIG_HIGHMEM))
1727 				node_set_state(nid, N_HIGH_MEMORY);
1728 			if (zone_type <= ZONE_NORMAL)
1729 				node_set_state(nid, N_NORMAL_MEMORY);
1730 			break;
1731 		}
1732 	}
1733 }
1734 
1735 #if MAX_NUMNODES > 1
1736 /*
1737  * Figure out the number of possible node ids.
1738  */
1739 void __init setup_nr_node_ids(void)
1740 {
1741 	unsigned int highest;
1742 
1743 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
1744 	nr_node_ids = highest + 1;
1745 }
1746 #endif
1747 
1748 static void __init free_area_init_memoryless_node(int nid)
1749 {
1750 	free_area_init_node(nid);
1751 }
1752 
1753 /*
1754  * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
1755  * such cases we allow max_zone_pfn sorted in the descending order
1756  */
1757 static bool arch_has_descending_max_zone_pfns(void)
1758 {
1759 	return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
1760 }
1761 
1762 /**
1763  * free_area_init - Initialise all pg_data_t and zone data
1764  * @max_zone_pfn: an array of max PFNs for each zone
1765  *
1766  * This will call free_area_init_node() for each active node in the system.
1767  * Using the page ranges provided by memblock_set_node(), the size of each
1768  * zone in each node and their holes is calculated. If the maximum PFN
1769  * between two adjacent zones match, it is assumed that the zone is empty.
1770  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
1771  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1772  * starts where the previous one ended. For example, ZONE_DMA32 starts
1773  * at arch_max_dma_pfn.
1774  */
1775 void __init free_area_init(unsigned long *max_zone_pfn)
1776 {
1777 	unsigned long start_pfn, end_pfn;
1778 	int i, nid, zone;
1779 	bool descending;
1780 
1781 	/* Record where the zone boundaries are */
1782 	memset(arch_zone_lowest_possible_pfn, 0,
1783 				sizeof(arch_zone_lowest_possible_pfn));
1784 	memset(arch_zone_highest_possible_pfn, 0,
1785 				sizeof(arch_zone_highest_possible_pfn));
1786 
1787 	start_pfn = PHYS_PFN(memblock_start_of_DRAM());
1788 	descending = arch_has_descending_max_zone_pfns();
1789 
1790 	for (i = 0; i < MAX_NR_ZONES; i++) {
1791 		if (descending)
1792 			zone = MAX_NR_ZONES - i - 1;
1793 		else
1794 			zone = i;
1795 
1796 		if (zone == ZONE_MOVABLE)
1797 			continue;
1798 
1799 		end_pfn = max(max_zone_pfn[zone], start_pfn);
1800 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
1801 		arch_zone_highest_possible_pfn[zone] = end_pfn;
1802 
1803 		start_pfn = end_pfn;
1804 	}
1805 
1806 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
1807 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
1808 	find_zone_movable_pfns_for_nodes();
1809 
1810 	/* Print out the zone ranges */
1811 	pr_info("Zone ranges:\n");
1812 	for (i = 0; i < MAX_NR_ZONES; i++) {
1813 		if (i == ZONE_MOVABLE)
1814 			continue;
1815 		pr_info("  %-8s ", zone_names[i]);
1816 		if (arch_zone_lowest_possible_pfn[i] ==
1817 				arch_zone_highest_possible_pfn[i])
1818 			pr_cont("empty\n");
1819 		else
1820 			pr_cont("[mem %#018Lx-%#018Lx]\n",
1821 				(u64)arch_zone_lowest_possible_pfn[i]
1822 					<< PAGE_SHIFT,
1823 				((u64)arch_zone_highest_possible_pfn[i]
1824 					<< PAGE_SHIFT) - 1);
1825 	}
1826 
1827 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
1828 	pr_info("Movable zone start for each node\n");
1829 	for (i = 0; i < MAX_NUMNODES; i++) {
1830 		if (zone_movable_pfn[i])
1831 			pr_info("  Node %d: %#018Lx\n", i,
1832 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
1833 	}
1834 
1835 	/*
1836 	 * Print out the early node map, and initialize the
1837 	 * subsection-map relative to active online memory ranges to
1838 	 * enable future "sub-section" extensions of the memory map.
1839 	 */
1840 	pr_info("Early memory node ranges\n");
1841 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
1842 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
1843 			(u64)start_pfn << PAGE_SHIFT,
1844 			((u64)end_pfn << PAGE_SHIFT) - 1);
1845 		subsection_map_init(start_pfn, end_pfn - start_pfn);
1846 	}
1847 
1848 	/* Initialise every node */
1849 	mminit_verify_pageflags_layout();
1850 	setup_nr_node_ids();
1851 	for_each_node(nid) {
1852 		pg_data_t *pgdat;
1853 
1854 		if (!node_online(nid)) {
1855 			pr_info("Initializing node %d as memoryless\n", nid);
1856 
1857 			/* Allocator not initialized yet */
1858 			pgdat = arch_alloc_nodedata(nid);
1859 			if (!pgdat)
1860 				panic("Cannot allocate %zuB for node %d.\n",
1861 				       sizeof(*pgdat), nid);
1862 			arch_refresh_nodedata(nid, pgdat);
1863 			free_area_init_memoryless_node(nid);
1864 
1865 			/*
1866 			 * We do not want to confuse userspace by sysfs
1867 			 * files/directories for node without any memory
1868 			 * attached to it, so this node is not marked as
1869 			 * N_MEMORY and not marked online so that no sysfs
1870 			 * hierarchy will be created via register_one_node for
1871 			 * it. The pgdat will get fully initialized by
1872 			 * hotadd_init_pgdat() when memory is hotplugged into
1873 			 * this node.
1874 			 */
1875 			continue;
1876 		}
1877 
1878 		pgdat = NODE_DATA(nid);
1879 		free_area_init_node(nid);
1880 
1881 		/* Any memory on that node */
1882 		if (pgdat->node_present_pages)
1883 			node_set_state(nid, N_MEMORY);
1884 		check_for_memory(pgdat, nid);
1885 	}
1886 
1887 	memmap_init();
1888 
1889 	/* disable hash distribution for systems with a single node */
1890 	fixup_hashdist();
1891 }
1892 
1893 /**
1894  * node_map_pfn_alignment - determine the maximum internode alignment
1895  *
1896  * This function should be called after node map is populated and sorted.
1897  * It calculates the maximum power of two alignment which can distinguish
1898  * all the nodes.
1899  *
1900  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
1901  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
1902  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
1903  * shifted, 1GiB is enough and this function will indicate so.
1904  *
1905  * This is used to test whether pfn -> nid mapping of the chosen memory
1906  * model has fine enough granularity to avoid incorrect mapping for the
1907  * populated node map.
1908  *
1909  * Return: the determined alignment in pfn's.  0 if there is no alignment
1910  * requirement (single node).
1911  */
1912 unsigned long __init node_map_pfn_alignment(void)
1913 {
1914 	unsigned long accl_mask = 0, last_end = 0;
1915 	unsigned long start, end, mask;
1916 	int last_nid = NUMA_NO_NODE;
1917 	int i, nid;
1918 
1919 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1920 		if (!start || last_nid < 0 || last_nid == nid) {
1921 			last_nid = nid;
1922 			last_end = end;
1923 			continue;
1924 		}
1925 
1926 		/*
1927 		 * Start with a mask granular enough to pin-point to the
1928 		 * start pfn and tick off bits one-by-one until it becomes
1929 		 * too coarse to separate the current node from the last.
1930 		 */
1931 		mask = ~((1 << __ffs(start)) - 1);
1932 		while (mask && last_end <= (start & (mask << 1)))
1933 			mask <<= 1;
1934 
1935 		/* accumulate all internode masks */
1936 		accl_mask |= mask;
1937 	}
1938 
1939 	/* convert mask to number of pages */
1940 	return ~accl_mask + 1;
1941 }
1942 
1943 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1944 static void __init deferred_free_range(unsigned long pfn,
1945 				       unsigned long nr_pages)
1946 {
1947 	struct page *page;
1948 	unsigned long i;
1949 
1950 	if (!nr_pages)
1951 		return;
1952 
1953 	page = pfn_to_page(pfn);
1954 
1955 	/* Free a large naturally-aligned chunk if possible */
1956 	if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
1957 		for (i = 0; i < nr_pages; i += pageblock_nr_pages)
1958 			set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
1959 		__free_pages_core(page, MAX_ORDER);
1960 		return;
1961 	}
1962 
1963 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1964 		if (pageblock_aligned(pfn))
1965 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1966 		__free_pages_core(page, 0);
1967 	}
1968 }
1969 
1970 /* Completion tracking for deferred_init_memmap() threads */
1971 static atomic_t pgdat_init_n_undone __initdata;
1972 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1973 
1974 static inline void __init pgdat_init_report_one_done(void)
1975 {
1976 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1977 		complete(&pgdat_init_all_done_comp);
1978 }
1979 
1980 /*
1981  * Returns true if page needs to be initialized or freed to buddy allocator.
1982  *
1983  * We check if a current MAX_ORDER block is valid by only checking the validity
1984  * of the head pfn.
1985  */
1986 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1987 {
1988 	if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
1989 		return false;
1990 	return true;
1991 }
1992 
1993 /*
1994  * Free pages to buddy allocator. Try to free aligned pages in
1995  * MAX_ORDER_NR_PAGES sizes.
1996  */
1997 static void __init deferred_free_pages(unsigned long pfn,
1998 				       unsigned long end_pfn)
1999 {
2000 	unsigned long nr_free = 0;
2001 
2002 	for (; pfn < end_pfn; pfn++) {
2003 		if (!deferred_pfn_valid(pfn)) {
2004 			deferred_free_range(pfn - nr_free, nr_free);
2005 			nr_free = 0;
2006 		} else if (IS_MAX_ORDER_ALIGNED(pfn)) {
2007 			deferred_free_range(pfn - nr_free, nr_free);
2008 			nr_free = 1;
2009 		} else {
2010 			nr_free++;
2011 		}
2012 	}
2013 	/* Free the last block of pages to allocator */
2014 	deferred_free_range(pfn - nr_free, nr_free);
2015 }
2016 
2017 /*
2018  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
2019  * by performing it only once every MAX_ORDER_NR_PAGES.
2020  * Return number of pages initialized.
2021  */
2022 static unsigned long  __init deferred_init_pages(struct zone *zone,
2023 						 unsigned long pfn,
2024 						 unsigned long end_pfn)
2025 {
2026 	int nid = zone_to_nid(zone);
2027 	unsigned long nr_pages = 0;
2028 	int zid = zone_idx(zone);
2029 	struct page *page = NULL;
2030 
2031 	for (; pfn < end_pfn; pfn++) {
2032 		if (!deferred_pfn_valid(pfn)) {
2033 			page = NULL;
2034 			continue;
2035 		} else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
2036 			page = pfn_to_page(pfn);
2037 		} else {
2038 			page++;
2039 		}
2040 		__init_single_page(page, pfn, zid, nid);
2041 		nr_pages++;
2042 	}
2043 	return (nr_pages);
2044 }
2045 
2046 /*
2047  * This function is meant to pre-load the iterator for the zone init.
2048  * Specifically it walks through the ranges until we are caught up to the
2049  * first_init_pfn value and exits there. If we never encounter the value we
2050  * return false indicating there are no valid ranges left.
2051  */
2052 static bool __init
2053 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
2054 				    unsigned long *spfn, unsigned long *epfn,
2055 				    unsigned long first_init_pfn)
2056 {
2057 	u64 j;
2058 
2059 	/*
2060 	 * Start out by walking through the ranges in this zone that have
2061 	 * already been initialized. We don't need to do anything with them
2062 	 * so we just need to flush them out of the system.
2063 	 */
2064 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
2065 		if (*epfn <= first_init_pfn)
2066 			continue;
2067 		if (*spfn < first_init_pfn)
2068 			*spfn = first_init_pfn;
2069 		*i = j;
2070 		return true;
2071 	}
2072 
2073 	return false;
2074 }
2075 
2076 /*
2077  * Initialize and free pages. We do it in two loops: first we initialize
2078  * struct page, then free to buddy allocator, because while we are
2079  * freeing pages we can access pages that are ahead (computing buddy
2080  * page in __free_one_page()).
2081  *
2082  * In order to try and keep some memory in the cache we have the loop
2083  * broken along max page order boundaries. This way we will not cause
2084  * any issues with the buddy page computation.
2085  */
2086 static unsigned long __init
2087 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2088 		       unsigned long *end_pfn)
2089 {
2090 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2091 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
2092 	unsigned long nr_pages = 0;
2093 	u64 j = *i;
2094 
2095 	/* First we loop through and initialize the page values */
2096 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2097 		unsigned long t;
2098 
2099 		if (mo_pfn <= *start_pfn)
2100 			break;
2101 
2102 		t = min(mo_pfn, *end_pfn);
2103 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
2104 
2105 		if (mo_pfn < *end_pfn) {
2106 			*start_pfn = mo_pfn;
2107 			break;
2108 		}
2109 	}
2110 
2111 	/* Reset values and now loop through freeing pages as needed */
2112 	swap(j, *i);
2113 
2114 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2115 		unsigned long t;
2116 
2117 		if (mo_pfn <= spfn)
2118 			break;
2119 
2120 		t = min(mo_pfn, epfn);
2121 		deferred_free_pages(spfn, t);
2122 
2123 		if (mo_pfn <= epfn)
2124 			break;
2125 	}
2126 
2127 	return nr_pages;
2128 }
2129 
2130 static void __init
2131 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2132 			   void *arg)
2133 {
2134 	unsigned long spfn, epfn;
2135 	struct zone *zone = arg;
2136 	u64 i;
2137 
2138 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2139 
2140 	/*
2141 	 * Initialize and free pages in MAX_ORDER sized increments so that we
2142 	 * can avoid introducing any issues with the buddy allocator.
2143 	 */
2144 	while (spfn < end_pfn) {
2145 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
2146 		cond_resched();
2147 	}
2148 }
2149 
2150 /* An arch may override for more concurrency. */
2151 __weak int __init
2152 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2153 {
2154 	return 1;
2155 }
2156 
2157 /* Initialise remaining memory on a node */
2158 static int __init deferred_init_memmap(void *data)
2159 {
2160 	pg_data_t *pgdat = data;
2161 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2162 	unsigned long spfn = 0, epfn = 0;
2163 	unsigned long first_init_pfn, flags;
2164 	unsigned long start = jiffies;
2165 	struct zone *zone;
2166 	int zid, max_threads;
2167 	u64 i;
2168 
2169 	/* Bind memory initialisation thread to a local node if possible */
2170 	if (!cpumask_empty(cpumask))
2171 		set_cpus_allowed_ptr(current, cpumask);
2172 
2173 	pgdat_resize_lock(pgdat, &flags);
2174 	first_init_pfn = pgdat->first_deferred_pfn;
2175 	if (first_init_pfn == ULONG_MAX) {
2176 		pgdat_resize_unlock(pgdat, &flags);
2177 		pgdat_init_report_one_done();
2178 		return 0;
2179 	}
2180 
2181 	/* Sanity check boundaries */
2182 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2183 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2184 	pgdat->first_deferred_pfn = ULONG_MAX;
2185 
2186 	/*
2187 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
2188 	 * interrupt thread must allocate this early in boot, zone must be
2189 	 * pre-grown prior to start of deferred page initialization.
2190 	 */
2191 	pgdat_resize_unlock(pgdat, &flags);
2192 
2193 	/* Only the highest zone is deferred so find it */
2194 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2195 		zone = pgdat->node_zones + zid;
2196 		if (first_init_pfn < zone_end_pfn(zone))
2197 			break;
2198 	}
2199 
2200 	/* If the zone is empty somebody else may have cleared out the zone */
2201 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2202 						 first_init_pfn))
2203 		goto zone_empty;
2204 
2205 	max_threads = deferred_page_init_max_threads(cpumask);
2206 
2207 	while (spfn < epfn) {
2208 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2209 		struct padata_mt_job job = {
2210 			.thread_fn   = deferred_init_memmap_chunk,
2211 			.fn_arg      = zone,
2212 			.start       = spfn,
2213 			.size        = epfn_align - spfn,
2214 			.align       = PAGES_PER_SECTION,
2215 			.min_chunk   = PAGES_PER_SECTION,
2216 			.max_threads = max_threads,
2217 		};
2218 
2219 		padata_do_multithreaded(&job);
2220 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2221 						    epfn_align);
2222 	}
2223 zone_empty:
2224 	/* Sanity check that the next zone really is unpopulated */
2225 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2226 
2227 	pr_info("node %d deferred pages initialised in %ums\n",
2228 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2229 
2230 	pgdat_init_report_one_done();
2231 	return 0;
2232 }
2233 
2234 /*
2235  * If this zone has deferred pages, try to grow it by initializing enough
2236  * deferred pages to satisfy the allocation specified by order, rounded up to
2237  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2238  * of SECTION_SIZE bytes by initializing struct pages in increments of
2239  * PAGES_PER_SECTION * sizeof(struct page) bytes.
2240  *
2241  * Return true when zone was grown, otherwise return false. We return true even
2242  * when we grow less than requested, to let the caller decide if there are
2243  * enough pages to satisfy the allocation.
2244  *
2245  * Note: We use noinline because this function is needed only during boot, and
2246  * it is called from a __ref function _deferred_grow_zone. This way we are
2247  * making sure that it is not inlined into permanent text section.
2248  */
2249 bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
2250 {
2251 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2252 	pg_data_t *pgdat = zone->zone_pgdat;
2253 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2254 	unsigned long spfn, epfn, flags;
2255 	unsigned long nr_pages = 0;
2256 	u64 i;
2257 
2258 	/* Only the last zone may have deferred pages */
2259 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2260 		return false;
2261 
2262 	pgdat_resize_lock(pgdat, &flags);
2263 
2264 	/*
2265 	 * If someone grew this zone while we were waiting for spinlock, return
2266 	 * true, as there might be enough pages already.
2267 	 */
2268 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2269 		pgdat_resize_unlock(pgdat, &flags);
2270 		return true;
2271 	}
2272 
2273 	/* If the zone is empty somebody else may have cleared out the zone */
2274 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2275 						 first_deferred_pfn)) {
2276 		pgdat->first_deferred_pfn = ULONG_MAX;
2277 		pgdat_resize_unlock(pgdat, &flags);
2278 		/* Retry only once. */
2279 		return first_deferred_pfn != ULONG_MAX;
2280 	}
2281 
2282 	/*
2283 	 * Initialize and free pages in MAX_ORDER sized increments so
2284 	 * that we can avoid introducing any issues with the buddy
2285 	 * allocator.
2286 	 */
2287 	while (spfn < epfn) {
2288 		/* update our first deferred PFN for this section */
2289 		first_deferred_pfn = spfn;
2290 
2291 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2292 		touch_nmi_watchdog();
2293 
2294 		/* We should only stop along section boundaries */
2295 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2296 			continue;
2297 
2298 		/* If our quota has been met we can stop here */
2299 		if (nr_pages >= nr_pages_needed)
2300 			break;
2301 	}
2302 
2303 	pgdat->first_deferred_pfn = spfn;
2304 	pgdat_resize_unlock(pgdat, &flags);
2305 
2306 	return nr_pages > 0;
2307 }
2308 
2309 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2310 
2311 #ifdef CONFIG_CMA
2312 void __init init_cma_reserved_pageblock(struct page *page)
2313 {
2314 	unsigned i = pageblock_nr_pages;
2315 	struct page *p = page;
2316 
2317 	do {
2318 		__ClearPageReserved(p);
2319 		set_page_count(p, 0);
2320 	} while (++p, --i);
2321 
2322 	set_pageblock_migratetype(page, MIGRATE_CMA);
2323 	set_page_refcounted(page);
2324 	__free_pages(page, pageblock_order);
2325 
2326 	adjust_managed_page_count(page, pageblock_nr_pages);
2327 	page_zone(page)->cma_pages += pageblock_nr_pages;
2328 }
2329 #endif
2330 
2331 void __init page_alloc_init_late(void)
2332 {
2333 	struct zone *zone;
2334 	int nid;
2335 
2336 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2337 
2338 	/* There will be num_node_state(N_MEMORY) threads */
2339 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2340 	for_each_node_state(nid, N_MEMORY) {
2341 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2342 	}
2343 
2344 	/* Block until all are initialised */
2345 	wait_for_completion(&pgdat_init_all_done_comp);
2346 
2347 	/*
2348 	 * We initialized the rest of the deferred pages.  Permanently disable
2349 	 * on-demand struct page initialization.
2350 	 */
2351 	static_branch_disable(&deferred_pages);
2352 
2353 	/* Reinit limits that are based on free pages after the kernel is up */
2354 	files_maxfiles_init();
2355 #endif
2356 
2357 	buffer_init();
2358 
2359 	/* Discard memblock private memory */
2360 	memblock_discard();
2361 
2362 	for_each_node_state(nid, N_MEMORY)
2363 		shuffle_free_memory(NODE_DATA(nid));
2364 
2365 	for_each_populated_zone(zone)
2366 		set_zone_contiguous(zone);
2367 
2368 	/* Initialize page ext after all struct pages are initialized. */
2369 	if (deferred_struct_pages)
2370 		page_ext_init();
2371 }
2372 
2373 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2374 /*
2375  * Returns the number of pages that arch has reserved but
2376  * is not known to alloc_large_system_hash().
2377  */
2378 static unsigned long __init arch_reserved_kernel_pages(void)
2379 {
2380 	return 0;
2381 }
2382 #endif
2383 
2384 /*
2385  * Adaptive scale is meant to reduce sizes of hash tables on large memory
2386  * machines. As memory size is increased the scale is also increased but at
2387  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
2388  * quadruples the scale is increased by one, which means the size of hash table
2389  * only doubles, instead of quadrupling as well.
2390  * Because 32-bit systems cannot have large physical memory, where this scaling
2391  * makes sense, it is disabled on such platforms.
2392  */
2393 #if __BITS_PER_LONG > 32
2394 #define ADAPT_SCALE_BASE	(64ul << 30)
2395 #define ADAPT_SCALE_SHIFT	2
2396 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
2397 #endif
2398 
2399 /*
2400  * allocate a large system hash table from bootmem
2401  * - it is assumed that the hash table must contain an exact power-of-2
2402  *   quantity of entries
2403  * - limit is the number of hash buckets, not the total allocation size
2404  */
2405 void *__init alloc_large_system_hash(const char *tablename,
2406 				     unsigned long bucketsize,
2407 				     unsigned long numentries,
2408 				     int scale,
2409 				     int flags,
2410 				     unsigned int *_hash_shift,
2411 				     unsigned int *_hash_mask,
2412 				     unsigned long low_limit,
2413 				     unsigned long high_limit)
2414 {
2415 	unsigned long long max = high_limit;
2416 	unsigned long log2qty, size;
2417 	void *table;
2418 	gfp_t gfp_flags;
2419 	bool virt;
2420 	bool huge;
2421 
2422 	/* allow the kernel cmdline to have a say */
2423 	if (!numentries) {
2424 		/* round applicable memory size up to nearest megabyte */
2425 		numentries = nr_kernel_pages;
2426 		numentries -= arch_reserved_kernel_pages();
2427 
2428 		/* It isn't necessary when PAGE_SIZE >= 1MB */
2429 		if (PAGE_SIZE < SZ_1M)
2430 			numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
2431 
2432 #if __BITS_PER_LONG > 32
2433 		if (!high_limit) {
2434 			unsigned long adapt;
2435 
2436 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
2437 			     adapt <<= ADAPT_SCALE_SHIFT)
2438 				scale++;
2439 		}
2440 #endif
2441 
2442 		/* limit to 1 bucket per 2^scale bytes of low memory */
2443 		if (scale > PAGE_SHIFT)
2444 			numentries >>= (scale - PAGE_SHIFT);
2445 		else
2446 			numentries <<= (PAGE_SHIFT - scale);
2447 
2448 		/* Make sure we've got at least a 0-order allocation.. */
2449 		if (unlikely(flags & HASH_SMALL)) {
2450 			/* Makes no sense without HASH_EARLY */
2451 			WARN_ON(!(flags & HASH_EARLY));
2452 			if (!(numentries >> *_hash_shift)) {
2453 				numentries = 1UL << *_hash_shift;
2454 				BUG_ON(!numentries);
2455 			}
2456 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
2457 			numentries = PAGE_SIZE / bucketsize;
2458 	}
2459 	numentries = roundup_pow_of_two(numentries);
2460 
2461 	/* limit allocation size to 1/16 total memory by default */
2462 	if (max == 0) {
2463 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2464 		do_div(max, bucketsize);
2465 	}
2466 	max = min(max, 0x80000000ULL);
2467 
2468 	if (numentries < low_limit)
2469 		numentries = low_limit;
2470 	if (numentries > max)
2471 		numentries = max;
2472 
2473 	log2qty = ilog2(numentries);
2474 
2475 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
2476 	do {
2477 		virt = false;
2478 		size = bucketsize << log2qty;
2479 		if (flags & HASH_EARLY) {
2480 			if (flags & HASH_ZERO)
2481 				table = memblock_alloc(size, SMP_CACHE_BYTES);
2482 			else
2483 				table = memblock_alloc_raw(size,
2484 							   SMP_CACHE_BYTES);
2485 		} else if (get_order(size) > MAX_ORDER || hashdist) {
2486 			table = vmalloc_huge(size, gfp_flags);
2487 			virt = true;
2488 			if (table)
2489 				huge = is_vm_area_hugepages(table);
2490 		} else {
2491 			/*
2492 			 * If bucketsize is not a power-of-two, we may free
2493 			 * some pages at the end of hash table which
2494 			 * alloc_pages_exact() automatically does
2495 			 */
2496 			table = alloc_pages_exact(size, gfp_flags);
2497 			kmemleak_alloc(table, size, 1, gfp_flags);
2498 		}
2499 	} while (!table && size > PAGE_SIZE && --log2qty);
2500 
2501 	if (!table)
2502 		panic("Failed to allocate %s hash table\n", tablename);
2503 
2504 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
2505 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
2506 		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
2507 
2508 	if (_hash_shift)
2509 		*_hash_shift = log2qty;
2510 	if (_hash_mask)
2511 		*_hash_mask = (1 << log2qty) - 1;
2512 
2513 	return table;
2514 }
2515 
2516 /**
2517  * set_dma_reserve - set the specified number of pages reserved in the first zone
2518  * @new_dma_reserve: The number of pages to mark reserved
2519  *
2520  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
2521  * In the DMA zone, a significant percentage may be consumed by kernel image
2522  * and other unfreeable allocations which can skew the watermarks badly. This
2523  * function may optionally be used to account for unfreeable pages in the
2524  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
2525  * smaller per-cpu batchsize.
2526  */
2527 void __init set_dma_reserve(unsigned long new_dma_reserve)
2528 {
2529 	dma_reserve = new_dma_reserve;
2530 }
2531 
2532 void __init memblock_free_pages(struct page *page, unsigned long pfn,
2533 							unsigned int order)
2534 {
2535 	if (!early_page_initialised(pfn))
2536 		return;
2537 	if (!kmsan_memblock_free_pages(page, order)) {
2538 		/* KMSAN will take care of these pages. */
2539 		return;
2540 	}
2541 	__free_pages_core(page, order);
2542 }
2543 
2544 static bool _init_on_alloc_enabled_early __read_mostly
2545 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
2546 static int __init early_init_on_alloc(char *buf)
2547 {
2548 
2549 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
2550 }
2551 early_param("init_on_alloc", early_init_on_alloc);
2552 
2553 static bool _init_on_free_enabled_early __read_mostly
2554 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
2555 static int __init early_init_on_free(char *buf)
2556 {
2557 	return kstrtobool(buf, &_init_on_free_enabled_early);
2558 }
2559 early_param("init_on_free", early_init_on_free);
2560 
2561 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
2562 
2563 /*
2564  * Enable static keys related to various memory debugging and hardening options.
2565  * Some override others, and depend on early params that are evaluated in the
2566  * order of appearance. So we need to first gather the full picture of what was
2567  * enabled, and then make decisions.
2568  */
2569 static void __init mem_debugging_and_hardening_init(void)
2570 {
2571 	bool page_poisoning_requested = false;
2572 	bool want_check_pages = false;
2573 
2574 #ifdef CONFIG_PAGE_POISONING
2575 	/*
2576 	 * Page poisoning is debug page alloc for some arches. If
2577 	 * either of those options are enabled, enable poisoning.
2578 	 */
2579 	if (page_poisoning_enabled() ||
2580 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
2581 	      debug_pagealloc_enabled())) {
2582 		static_branch_enable(&_page_poisoning_enabled);
2583 		page_poisoning_requested = true;
2584 		want_check_pages = true;
2585 	}
2586 #endif
2587 
2588 	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
2589 	    page_poisoning_requested) {
2590 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2591 			"will take precedence over init_on_alloc and init_on_free\n");
2592 		_init_on_alloc_enabled_early = false;
2593 		_init_on_free_enabled_early = false;
2594 	}
2595 
2596 	if (_init_on_alloc_enabled_early) {
2597 		want_check_pages = true;
2598 		static_branch_enable(&init_on_alloc);
2599 	} else {
2600 		static_branch_disable(&init_on_alloc);
2601 	}
2602 
2603 	if (_init_on_free_enabled_early) {
2604 		want_check_pages = true;
2605 		static_branch_enable(&init_on_free);
2606 	} else {
2607 		static_branch_disable(&init_on_free);
2608 	}
2609 
2610 	if (IS_ENABLED(CONFIG_KMSAN) &&
2611 	    (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
2612 		pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2613 
2614 #ifdef CONFIG_DEBUG_PAGEALLOC
2615 	if (debug_pagealloc_enabled()) {
2616 		want_check_pages = true;
2617 		static_branch_enable(&_debug_pagealloc_enabled);
2618 
2619 		if (debug_guardpage_minorder())
2620 			static_branch_enable(&_debug_guardpage_enabled);
2621 	}
2622 #endif
2623 
2624 	/*
2625 	 * Any page debugging or hardening option also enables sanity checking
2626 	 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2627 	 * enabled already.
2628 	 */
2629 	if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
2630 		static_branch_enable(&check_pages_enabled);
2631 }
2632 
2633 /* Report memory auto-initialization states for this boot. */
2634 static void __init report_meminit(void)
2635 {
2636 	const char *stack;
2637 
2638 	if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
2639 		stack = "all(pattern)";
2640 	else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
2641 		stack = "all(zero)";
2642 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
2643 		stack = "byref_all(zero)";
2644 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
2645 		stack = "byref(zero)";
2646 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
2647 		stack = "__user(zero)";
2648 	else
2649 		stack = "off";
2650 
2651 	pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2652 		stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
2653 		want_init_on_free() ? "on" : "off");
2654 	if (want_init_on_free())
2655 		pr_info("mem auto-init: clearing system memory may take some time...\n");
2656 }
2657 
2658 static void __init mem_init_print_info(void)
2659 {
2660 	unsigned long physpages, codesize, datasize, rosize, bss_size;
2661 	unsigned long init_code_size, init_data_size;
2662 
2663 	physpages = get_num_physpages();
2664 	codesize = _etext - _stext;
2665 	datasize = _edata - _sdata;
2666 	rosize = __end_rodata - __start_rodata;
2667 	bss_size = __bss_stop - __bss_start;
2668 	init_data_size = __init_end - __init_begin;
2669 	init_code_size = _einittext - _sinittext;
2670 
2671 	/*
2672 	 * Detect special cases and adjust section sizes accordingly:
2673 	 * 1) .init.* may be embedded into .data sections
2674 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
2675 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
2676 	 * 3) .rodata.* may be embedded into .text or .data sections.
2677 	 */
2678 #define adj_init_size(start, end, size, pos, adj) \
2679 	do { \
2680 		if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2681 			size -= adj; \
2682 	} while (0)
2683 
2684 	adj_init_size(__init_begin, __init_end, init_data_size,
2685 		     _sinittext, init_code_size);
2686 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
2687 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
2688 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
2689 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
2690 
2691 #undef	adj_init_size
2692 
2693 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2694 #ifdef	CONFIG_HIGHMEM
2695 		", %luK highmem"
2696 #endif
2697 		")\n",
2698 		K(nr_free_pages()), K(physpages),
2699 		codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
2700 		(init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
2701 		K(physpages - totalram_pages() - totalcma_pages),
2702 		K(totalcma_pages)
2703 #ifdef	CONFIG_HIGHMEM
2704 		, K(totalhigh_pages())
2705 #endif
2706 		);
2707 }
2708 
2709 /*
2710  * Set up kernel memory allocators
2711  */
2712 void __init mm_core_init(void)
2713 {
2714 	/* Initializations relying on SMP setup */
2715 	build_all_zonelists(NULL);
2716 	page_alloc_init_cpuhp();
2717 
2718 	/*
2719 	 * page_ext requires contiguous pages,
2720 	 * bigger than MAX_ORDER unless SPARSEMEM.
2721 	 */
2722 	page_ext_init_flatmem();
2723 	mem_debugging_and_hardening_init();
2724 	kfence_alloc_pool();
2725 	report_meminit();
2726 	kmsan_init_shadow();
2727 	stack_depot_early_init();
2728 	mem_init();
2729 	mem_init_print_info();
2730 	kmem_cache_init();
2731 	/*
2732 	 * page_owner must be initialized after buddy is ready, and also after
2733 	 * slab is ready so that stack_depot_init() works properly
2734 	 */
2735 	page_ext_init_flatmem_late();
2736 	kmemleak_init();
2737 	ptlock_cache_init();
2738 	pgtable_cache_init();
2739 	debug_objects_mem_init();
2740 	vmalloc_init();
2741 	/* If no deferred init page_ext now, as vmap is fully initialized */
2742 	if (!deferred_struct_pages)
2743 		page_ext_init();
2744 	/* Should be run before the first non-init thread is created */
2745 	init_espfix_bsp();
2746 	/* Should be run after espfix64 is set up. */
2747 	pti_init();
2748 	kmsan_init_runtime();
2749 	mm_cache_init();
2750 }
2751