xref: /openbmc/linux/mm/page_alloc.c (revision 05911c5d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/page_alloc.c
4  *
5  *  Manages the free list, the system allocates free pages here.
6  *  Note that kmalloc() lives in slab.c
7  *
8  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
9  *  Swap reorganised 29.12.95, Stephen Tweedie
10  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16  */
17 
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/swap.h>
22 #include <linux/interrupt.h>
23 #include <linux/pagemap.h>
24 #include <linux/jiffies.h>
25 #include <linux/memblock.h>
26 #include <linux/compiler.h>
27 #include <linux/kernel.h>
28 #include <linux/kasan.h>
29 #include <linux/module.h>
30 #include <linux/suspend.h>
31 #include <linux/pagevec.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/ratelimit.h>
35 #include <linux/oom.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/memremap.h>
46 #include <linux/stop_machine.h>
47 #include <linux/random.h>
48 #include <linux/sort.h>
49 #include <linux/pfn.h>
50 #include <linux/backing-dev.h>
51 #include <linux/fault-inject.h>
52 #include <linux/page-isolation.h>
53 #include <linux/debugobjects.h>
54 #include <linux/kmemleak.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <trace/events/oom.h>
58 #include <linux/prefetch.h>
59 #include <linux/mm_inline.h>
60 #include <linux/mmu_notifier.h>
61 #include <linux/migrate.h>
62 #include <linux/hugetlb.h>
63 #include <linux/sched/rt.h>
64 #include <linux/sched/mm.h>
65 #include <linux/page_owner.h>
66 #include <linux/kthread.h>
67 #include <linux/memcontrol.h>
68 #include <linux/ftrace.h>
69 #include <linux/lockdep.h>
70 #include <linux/nmi.h>
71 #include <linux/psi.h>
72 #include <linux/padata.h>
73 #include <linux/khugepaged.h>
74 #include <linux/buffer_head.h>
75 #include <asm/sections.h>
76 #include <asm/tlbflush.h>
77 #include <asm/div64.h>
78 #include "internal.h"
79 #include "shuffle.h"
80 #include "page_reporting.h"
81 
82 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
83 typedef int __bitwise fpi_t;
84 
85 /* No special request */
86 #define FPI_NONE		((__force fpi_t)0)
87 
88 /*
89  * Skip free page reporting notification for the (possibly merged) page.
90  * This does not hinder free page reporting from grabbing the page,
91  * reporting it and marking it "reported" -  it only skips notifying
92  * the free page reporting infrastructure about a newly freed page. For
93  * example, used when temporarily pulling a page from a freelist and
94  * putting it back unmodified.
95  */
96 #define FPI_SKIP_REPORT_NOTIFY	((__force fpi_t)BIT(0))
97 
98 /*
99  * Place the (possibly merged) page to the tail of the freelist. Will ignore
100  * page shuffling (relevant code - e.g., memory onlining - is expected to
101  * shuffle the whole zone).
102  *
103  * Note: No code should rely on this flag for correctness - it's purely
104  *       to allow for optimizations when handing back either fresh pages
105  *       (memory onlining) or untouched pages (page isolation, free page
106  *       reporting).
107  */
108 #define FPI_TO_TAIL		((__force fpi_t)BIT(1))
109 
110 /*
111  * Don't poison memory with KASAN (only for the tag-based modes).
112  * During boot, all non-reserved memblock memory is exposed to page_alloc.
113  * Poisoning all that memory lengthens boot time, especially on systems with
114  * large amount of RAM. This flag is used to skip that poisoning.
115  * This is only done for the tag-based KASAN modes, as those are able to
116  * detect memory corruptions with the memory tags assigned by default.
117  * All memory allocated normally after boot gets poisoned as usual.
118  */
119 #define FPI_SKIP_KASAN_POISON	((__force fpi_t)BIT(2))
120 
121 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
122 static DEFINE_MUTEX(pcp_batch_high_lock);
123 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
124 
125 struct pagesets {
126 	local_lock_t lock;
127 #if defined(CONFIG_DEBUG_INFO_BTF) &&				\
128 	!defined(CONFIG_DEBUG_LOCK_ALLOC) &&			\
129 	!defined(CONFIG_PAHOLE_HAS_ZEROSIZE_PERCPU_SUPPORT)
130 	/*
131 	 * pahole 1.21 and earlier gets confused by zero-sized per-CPU
132 	 * variables and produces invalid BTF. Ensure that
133 	 * sizeof(struct pagesets) != 0 for older versions of pahole.
134 	 */
135 	char __pahole_hack;
136 	#warning "pahole too old to support zero-sized struct pagesets"
137 #endif
138 };
139 static DEFINE_PER_CPU(struct pagesets, pagesets) = {
140 	.lock = INIT_LOCAL_LOCK(lock),
141 };
142 
143 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
144 DEFINE_PER_CPU(int, numa_node);
145 EXPORT_PER_CPU_SYMBOL(numa_node);
146 #endif
147 
148 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
149 
150 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
151 /*
152  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
153  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
154  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
155  * defined in <linux/topology.h>.
156  */
157 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
158 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
159 #endif
160 
161 /* work_structs for global per-cpu drains */
162 struct pcpu_drain {
163 	struct zone *zone;
164 	struct work_struct work;
165 };
166 static DEFINE_MUTEX(pcpu_drain_mutex);
167 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
168 
169 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
170 volatile unsigned long latent_entropy __latent_entropy;
171 EXPORT_SYMBOL(latent_entropy);
172 #endif
173 
174 /*
175  * Array of node states.
176  */
177 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
178 	[N_POSSIBLE] = NODE_MASK_ALL,
179 	[N_ONLINE] = { { [0] = 1UL } },
180 #ifndef CONFIG_NUMA
181 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
182 #ifdef CONFIG_HIGHMEM
183 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
184 #endif
185 	[N_MEMORY] = { { [0] = 1UL } },
186 	[N_CPU] = { { [0] = 1UL } },
187 #endif	/* NUMA */
188 };
189 EXPORT_SYMBOL(node_states);
190 
191 atomic_long_t _totalram_pages __read_mostly;
192 EXPORT_SYMBOL(_totalram_pages);
193 unsigned long totalreserve_pages __read_mostly;
194 unsigned long totalcma_pages __read_mostly;
195 
196 int percpu_pagelist_high_fraction;
197 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
198 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
199 EXPORT_SYMBOL(init_on_alloc);
200 
201 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
202 EXPORT_SYMBOL(init_on_free);
203 
204 static bool _init_on_alloc_enabled_early __read_mostly
205 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
206 static int __init early_init_on_alloc(char *buf)
207 {
208 
209 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
210 }
211 early_param("init_on_alloc", early_init_on_alloc);
212 
213 static bool _init_on_free_enabled_early __read_mostly
214 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
215 static int __init early_init_on_free(char *buf)
216 {
217 	return kstrtobool(buf, &_init_on_free_enabled_early);
218 }
219 early_param("init_on_free", early_init_on_free);
220 
221 /*
222  * A cached value of the page's pageblock's migratetype, used when the page is
223  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
224  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
225  * Also the migratetype set in the page does not necessarily match the pcplist
226  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
227  * other index - this ensures that it will be put on the correct CMA freelist.
228  */
229 static inline int get_pcppage_migratetype(struct page *page)
230 {
231 	return page->index;
232 }
233 
234 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
235 {
236 	page->index = migratetype;
237 }
238 
239 #ifdef CONFIG_PM_SLEEP
240 /*
241  * The following functions are used by the suspend/hibernate code to temporarily
242  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
243  * while devices are suspended.  To avoid races with the suspend/hibernate code,
244  * they should always be called with system_transition_mutex held
245  * (gfp_allowed_mask also should only be modified with system_transition_mutex
246  * held, unless the suspend/hibernate code is guaranteed not to run in parallel
247  * with that modification).
248  */
249 
250 static gfp_t saved_gfp_mask;
251 
252 void pm_restore_gfp_mask(void)
253 {
254 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
255 	if (saved_gfp_mask) {
256 		gfp_allowed_mask = saved_gfp_mask;
257 		saved_gfp_mask = 0;
258 	}
259 }
260 
261 void pm_restrict_gfp_mask(void)
262 {
263 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
264 	WARN_ON(saved_gfp_mask);
265 	saved_gfp_mask = gfp_allowed_mask;
266 	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
267 }
268 
269 bool pm_suspended_storage(void)
270 {
271 	if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
272 		return false;
273 	return true;
274 }
275 #endif /* CONFIG_PM_SLEEP */
276 
277 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
278 unsigned int pageblock_order __read_mostly;
279 #endif
280 
281 static void __free_pages_ok(struct page *page, unsigned int order,
282 			    fpi_t fpi_flags);
283 
284 /*
285  * results with 256, 32 in the lowmem_reserve sysctl:
286  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
287  *	1G machine -> (16M dma, 784M normal, 224M high)
288  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
289  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
290  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
291  *
292  * TBD: should special case ZONE_DMA32 machines here - in those we normally
293  * don't need any ZONE_NORMAL reservation
294  */
295 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
296 #ifdef CONFIG_ZONE_DMA
297 	[ZONE_DMA] = 256,
298 #endif
299 #ifdef CONFIG_ZONE_DMA32
300 	[ZONE_DMA32] = 256,
301 #endif
302 	[ZONE_NORMAL] = 32,
303 #ifdef CONFIG_HIGHMEM
304 	[ZONE_HIGHMEM] = 0,
305 #endif
306 	[ZONE_MOVABLE] = 0,
307 };
308 
309 static char * const zone_names[MAX_NR_ZONES] = {
310 #ifdef CONFIG_ZONE_DMA
311 	 "DMA",
312 #endif
313 #ifdef CONFIG_ZONE_DMA32
314 	 "DMA32",
315 #endif
316 	 "Normal",
317 #ifdef CONFIG_HIGHMEM
318 	 "HighMem",
319 #endif
320 	 "Movable",
321 #ifdef CONFIG_ZONE_DEVICE
322 	 "Device",
323 #endif
324 };
325 
326 const char * const migratetype_names[MIGRATE_TYPES] = {
327 	"Unmovable",
328 	"Movable",
329 	"Reclaimable",
330 	"HighAtomic",
331 #ifdef CONFIG_CMA
332 	"CMA",
333 #endif
334 #ifdef CONFIG_MEMORY_ISOLATION
335 	"Isolate",
336 #endif
337 };
338 
339 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
340 	[NULL_COMPOUND_DTOR] = NULL,
341 	[COMPOUND_PAGE_DTOR] = free_compound_page,
342 #ifdef CONFIG_HUGETLB_PAGE
343 	[HUGETLB_PAGE_DTOR] = free_huge_page,
344 #endif
345 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
346 	[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
347 #endif
348 };
349 
350 int min_free_kbytes = 1024;
351 int user_min_free_kbytes = -1;
352 int watermark_boost_factor __read_mostly = 15000;
353 int watermark_scale_factor = 10;
354 
355 static unsigned long nr_kernel_pages __initdata;
356 static unsigned long nr_all_pages __initdata;
357 static unsigned long dma_reserve __initdata;
358 
359 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
360 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
361 static unsigned long required_kernelcore __initdata;
362 static unsigned long required_kernelcore_percent __initdata;
363 static unsigned long required_movablecore __initdata;
364 static unsigned long required_movablecore_percent __initdata;
365 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
366 static bool mirrored_kernelcore __meminitdata;
367 
368 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
369 int movable_zone;
370 EXPORT_SYMBOL(movable_zone);
371 
372 #if MAX_NUMNODES > 1
373 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
374 unsigned int nr_online_nodes __read_mostly = 1;
375 EXPORT_SYMBOL(nr_node_ids);
376 EXPORT_SYMBOL(nr_online_nodes);
377 #endif
378 
379 int page_group_by_mobility_disabled __read_mostly;
380 
381 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
382 /*
383  * During boot we initialize deferred pages on-demand, as needed, but once
384  * page_alloc_init_late() has finished, the deferred pages are all initialized,
385  * and we can permanently disable that path.
386  */
387 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
388 
389 /*
390  * Calling kasan_free_pages() only after deferred memory initialization
391  * has completed. Poisoning pages during deferred memory init will greatly
392  * lengthen the process and cause problem in large memory systems as the
393  * deferred pages initialization is done with interrupt disabled.
394  *
395  * Assuming that there will be no reference to those newly initialized
396  * pages before they are ever allocated, this should have no effect on
397  * KASAN memory tracking as the poison will be properly inserted at page
398  * allocation time. The only corner case is when pages are allocated by
399  * on-demand allocation and then freed again before the deferred pages
400  * initialization is done, but this is not likely to happen.
401  */
402 static inline void kasan_free_nondeferred_pages(struct page *page, int order,
403 						bool init, fpi_t fpi_flags)
404 {
405 	if (static_branch_unlikely(&deferred_pages))
406 		return;
407 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
408 			(fpi_flags & FPI_SKIP_KASAN_POISON))
409 		return;
410 	kasan_free_pages(page, order, init);
411 }
412 
413 /* Returns true if the struct page for the pfn is uninitialised */
414 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
415 {
416 	int nid = early_pfn_to_nid(pfn);
417 
418 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
419 		return true;
420 
421 	return false;
422 }
423 
424 /*
425  * Returns true when the remaining initialisation should be deferred until
426  * later in the boot cycle when it can be parallelised.
427  */
428 static bool __meminit
429 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
430 {
431 	static unsigned long prev_end_pfn, nr_initialised;
432 
433 	/*
434 	 * prev_end_pfn static that contains the end of previous zone
435 	 * No need to protect because called very early in boot before smp_init.
436 	 */
437 	if (prev_end_pfn != end_pfn) {
438 		prev_end_pfn = end_pfn;
439 		nr_initialised = 0;
440 	}
441 
442 	/* Always populate low zones for address-constrained allocations */
443 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
444 		return false;
445 
446 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
447 		return true;
448 	/*
449 	 * We start only with one section of pages, more pages are added as
450 	 * needed until the rest of deferred pages are initialized.
451 	 */
452 	nr_initialised++;
453 	if ((nr_initialised > PAGES_PER_SECTION) &&
454 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
455 		NODE_DATA(nid)->first_deferred_pfn = pfn;
456 		return true;
457 	}
458 	return false;
459 }
460 #else
461 static inline void kasan_free_nondeferred_pages(struct page *page, int order,
462 						bool init, fpi_t fpi_flags)
463 {
464 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
465 			(fpi_flags & FPI_SKIP_KASAN_POISON))
466 		return;
467 	kasan_free_pages(page, order, init);
468 }
469 
470 static inline bool early_page_uninitialised(unsigned long pfn)
471 {
472 	return false;
473 }
474 
475 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
476 {
477 	return false;
478 }
479 #endif
480 
481 /* Return a pointer to the bitmap storing bits affecting a block of pages */
482 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
483 							unsigned long pfn)
484 {
485 #ifdef CONFIG_SPARSEMEM
486 	return section_to_usemap(__pfn_to_section(pfn));
487 #else
488 	return page_zone(page)->pageblock_flags;
489 #endif /* CONFIG_SPARSEMEM */
490 }
491 
492 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
493 {
494 #ifdef CONFIG_SPARSEMEM
495 	pfn &= (PAGES_PER_SECTION-1);
496 #else
497 	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
498 #endif /* CONFIG_SPARSEMEM */
499 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
500 }
501 
502 static __always_inline
503 unsigned long __get_pfnblock_flags_mask(const struct page *page,
504 					unsigned long pfn,
505 					unsigned long mask)
506 {
507 	unsigned long *bitmap;
508 	unsigned long bitidx, word_bitidx;
509 	unsigned long word;
510 
511 	bitmap = get_pageblock_bitmap(page, pfn);
512 	bitidx = pfn_to_bitidx(page, pfn);
513 	word_bitidx = bitidx / BITS_PER_LONG;
514 	bitidx &= (BITS_PER_LONG-1);
515 
516 	word = bitmap[word_bitidx];
517 	return (word >> bitidx) & mask;
518 }
519 
520 /**
521  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
522  * @page: The page within the block of interest
523  * @pfn: The target page frame number
524  * @mask: mask of bits that the caller is interested in
525  *
526  * Return: pageblock_bits flags
527  */
528 unsigned long get_pfnblock_flags_mask(const struct page *page,
529 					unsigned long pfn, unsigned long mask)
530 {
531 	return __get_pfnblock_flags_mask(page, pfn, mask);
532 }
533 
534 static __always_inline int get_pfnblock_migratetype(const struct page *page,
535 					unsigned long pfn)
536 {
537 	return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
538 }
539 
540 /**
541  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
542  * @page: The page within the block of interest
543  * @flags: The flags to set
544  * @pfn: The target page frame number
545  * @mask: mask of bits that the caller is interested in
546  */
547 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
548 					unsigned long pfn,
549 					unsigned long mask)
550 {
551 	unsigned long *bitmap;
552 	unsigned long bitidx, word_bitidx;
553 	unsigned long old_word, word;
554 
555 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
556 	BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
557 
558 	bitmap = get_pageblock_bitmap(page, pfn);
559 	bitidx = pfn_to_bitidx(page, pfn);
560 	word_bitidx = bitidx / BITS_PER_LONG;
561 	bitidx &= (BITS_PER_LONG-1);
562 
563 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
564 
565 	mask <<= bitidx;
566 	flags <<= bitidx;
567 
568 	word = READ_ONCE(bitmap[word_bitidx]);
569 	for (;;) {
570 		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
571 		if (word == old_word)
572 			break;
573 		word = old_word;
574 	}
575 }
576 
577 void set_pageblock_migratetype(struct page *page, int migratetype)
578 {
579 	if (unlikely(page_group_by_mobility_disabled &&
580 		     migratetype < MIGRATE_PCPTYPES))
581 		migratetype = MIGRATE_UNMOVABLE;
582 
583 	set_pfnblock_flags_mask(page, (unsigned long)migratetype,
584 				page_to_pfn(page), MIGRATETYPE_MASK);
585 }
586 
587 #ifdef CONFIG_DEBUG_VM
588 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
589 {
590 	int ret = 0;
591 	unsigned seq;
592 	unsigned long pfn = page_to_pfn(page);
593 	unsigned long sp, start_pfn;
594 
595 	do {
596 		seq = zone_span_seqbegin(zone);
597 		start_pfn = zone->zone_start_pfn;
598 		sp = zone->spanned_pages;
599 		if (!zone_spans_pfn(zone, pfn))
600 			ret = 1;
601 	} while (zone_span_seqretry(zone, seq));
602 
603 	if (ret)
604 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
605 			pfn, zone_to_nid(zone), zone->name,
606 			start_pfn, start_pfn + sp);
607 
608 	return ret;
609 }
610 
611 static int page_is_consistent(struct zone *zone, struct page *page)
612 {
613 	if (!pfn_valid_within(page_to_pfn(page)))
614 		return 0;
615 	if (zone != page_zone(page))
616 		return 0;
617 
618 	return 1;
619 }
620 /*
621  * Temporary debugging check for pages not lying within a given zone.
622  */
623 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
624 {
625 	if (page_outside_zone_boundaries(zone, page))
626 		return 1;
627 	if (!page_is_consistent(zone, page))
628 		return 1;
629 
630 	return 0;
631 }
632 #else
633 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
634 {
635 	return 0;
636 }
637 #endif
638 
639 static void bad_page(struct page *page, const char *reason)
640 {
641 	static unsigned long resume;
642 	static unsigned long nr_shown;
643 	static unsigned long nr_unshown;
644 
645 	/*
646 	 * Allow a burst of 60 reports, then keep quiet for that minute;
647 	 * or allow a steady drip of one report per second.
648 	 */
649 	if (nr_shown == 60) {
650 		if (time_before(jiffies, resume)) {
651 			nr_unshown++;
652 			goto out;
653 		}
654 		if (nr_unshown) {
655 			pr_alert(
656 			      "BUG: Bad page state: %lu messages suppressed\n",
657 				nr_unshown);
658 			nr_unshown = 0;
659 		}
660 		nr_shown = 0;
661 	}
662 	if (nr_shown++ == 0)
663 		resume = jiffies + 60 * HZ;
664 
665 	pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
666 		current->comm, page_to_pfn(page));
667 	dump_page(page, reason);
668 
669 	print_modules();
670 	dump_stack();
671 out:
672 	/* Leave bad fields for debug, except PageBuddy could make trouble */
673 	page_mapcount_reset(page); /* remove PageBuddy */
674 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
675 }
676 
677 static inline unsigned int order_to_pindex(int migratetype, int order)
678 {
679 	int base = order;
680 
681 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
682 	if (order > PAGE_ALLOC_COSTLY_ORDER) {
683 		VM_BUG_ON(order != pageblock_order);
684 		base = PAGE_ALLOC_COSTLY_ORDER + 1;
685 	}
686 #else
687 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
688 #endif
689 
690 	return (MIGRATE_PCPTYPES * base) + migratetype;
691 }
692 
693 static inline int pindex_to_order(unsigned int pindex)
694 {
695 	int order = pindex / MIGRATE_PCPTYPES;
696 
697 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
698 	if (order > PAGE_ALLOC_COSTLY_ORDER) {
699 		order = pageblock_order;
700 		VM_BUG_ON(order != pageblock_order);
701 	}
702 #else
703 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
704 #endif
705 
706 	return order;
707 }
708 
709 static inline bool pcp_allowed_order(unsigned int order)
710 {
711 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
712 		return true;
713 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
714 	if (order == pageblock_order)
715 		return true;
716 #endif
717 	return false;
718 }
719 
720 static inline void free_the_page(struct page *page, unsigned int order)
721 {
722 	if (pcp_allowed_order(order))		/* Via pcp? */
723 		free_unref_page(page, order);
724 	else
725 		__free_pages_ok(page, order, FPI_NONE);
726 }
727 
728 /*
729  * Higher-order pages are called "compound pages".  They are structured thusly:
730  *
731  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
732  *
733  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
734  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
735  *
736  * The first tail page's ->compound_dtor holds the offset in array of compound
737  * page destructors. See compound_page_dtors.
738  *
739  * The first tail page's ->compound_order holds the order of allocation.
740  * This usage means that zero-order pages may not be compound.
741  */
742 
743 void free_compound_page(struct page *page)
744 {
745 	mem_cgroup_uncharge(page);
746 	free_the_page(page, compound_order(page));
747 }
748 
749 void prep_compound_page(struct page *page, unsigned int order)
750 {
751 	int i;
752 	int nr_pages = 1 << order;
753 
754 	__SetPageHead(page);
755 	for (i = 1; i < nr_pages; i++) {
756 		struct page *p = page + i;
757 		p->mapping = TAIL_MAPPING;
758 		set_compound_head(p, page);
759 	}
760 
761 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
762 	set_compound_order(page, order);
763 	atomic_set(compound_mapcount_ptr(page), -1);
764 	if (hpage_pincount_available(page))
765 		atomic_set(compound_pincount_ptr(page), 0);
766 }
767 
768 #ifdef CONFIG_DEBUG_PAGEALLOC
769 unsigned int _debug_guardpage_minorder;
770 
771 bool _debug_pagealloc_enabled_early __read_mostly
772 			= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
773 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
774 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
775 EXPORT_SYMBOL(_debug_pagealloc_enabled);
776 
777 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
778 
779 static int __init early_debug_pagealloc(char *buf)
780 {
781 	return kstrtobool(buf, &_debug_pagealloc_enabled_early);
782 }
783 early_param("debug_pagealloc", early_debug_pagealloc);
784 
785 static int __init debug_guardpage_minorder_setup(char *buf)
786 {
787 	unsigned long res;
788 
789 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
790 		pr_err("Bad debug_guardpage_minorder value\n");
791 		return 0;
792 	}
793 	_debug_guardpage_minorder = res;
794 	pr_info("Setting debug_guardpage_minorder to %lu\n", res);
795 	return 0;
796 }
797 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
798 
799 static inline bool set_page_guard(struct zone *zone, struct page *page,
800 				unsigned int order, int migratetype)
801 {
802 	if (!debug_guardpage_enabled())
803 		return false;
804 
805 	if (order >= debug_guardpage_minorder())
806 		return false;
807 
808 	__SetPageGuard(page);
809 	INIT_LIST_HEAD(&page->lru);
810 	set_page_private(page, order);
811 	/* Guard pages are not available for any usage */
812 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
813 
814 	return true;
815 }
816 
817 static inline void clear_page_guard(struct zone *zone, struct page *page,
818 				unsigned int order, int migratetype)
819 {
820 	if (!debug_guardpage_enabled())
821 		return;
822 
823 	__ClearPageGuard(page);
824 
825 	set_page_private(page, 0);
826 	if (!is_migrate_isolate(migratetype))
827 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
828 }
829 #else
830 static inline bool set_page_guard(struct zone *zone, struct page *page,
831 			unsigned int order, int migratetype) { return false; }
832 static inline void clear_page_guard(struct zone *zone, struct page *page,
833 				unsigned int order, int migratetype) {}
834 #endif
835 
836 /*
837  * Enable static keys related to various memory debugging and hardening options.
838  * Some override others, and depend on early params that are evaluated in the
839  * order of appearance. So we need to first gather the full picture of what was
840  * enabled, and then make decisions.
841  */
842 void init_mem_debugging_and_hardening(void)
843 {
844 	bool page_poisoning_requested = false;
845 
846 #ifdef CONFIG_PAGE_POISONING
847 	/*
848 	 * Page poisoning is debug page alloc for some arches. If
849 	 * either of those options are enabled, enable poisoning.
850 	 */
851 	if (page_poisoning_enabled() ||
852 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
853 	      debug_pagealloc_enabled())) {
854 		static_branch_enable(&_page_poisoning_enabled);
855 		page_poisoning_requested = true;
856 	}
857 #endif
858 
859 	if (_init_on_alloc_enabled_early) {
860 		if (page_poisoning_requested)
861 			pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
862 				"will take precedence over init_on_alloc\n");
863 		else
864 			static_branch_enable(&init_on_alloc);
865 	}
866 	if (_init_on_free_enabled_early) {
867 		if (page_poisoning_requested)
868 			pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
869 				"will take precedence over init_on_free\n");
870 		else
871 			static_branch_enable(&init_on_free);
872 	}
873 
874 #ifdef CONFIG_DEBUG_PAGEALLOC
875 	if (!debug_pagealloc_enabled())
876 		return;
877 
878 	static_branch_enable(&_debug_pagealloc_enabled);
879 
880 	if (!debug_guardpage_minorder())
881 		return;
882 
883 	static_branch_enable(&_debug_guardpage_enabled);
884 #endif
885 }
886 
887 static inline void set_buddy_order(struct page *page, unsigned int order)
888 {
889 	set_page_private(page, order);
890 	__SetPageBuddy(page);
891 }
892 
893 /*
894  * This function checks whether a page is free && is the buddy
895  * we can coalesce a page and its buddy if
896  * (a) the buddy is not in a hole (check before calling!) &&
897  * (b) the buddy is in the buddy system &&
898  * (c) a page and its buddy have the same order &&
899  * (d) a page and its buddy are in the same zone.
900  *
901  * For recording whether a page is in the buddy system, we set PageBuddy.
902  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
903  *
904  * For recording page's order, we use page_private(page).
905  */
906 static inline bool page_is_buddy(struct page *page, struct page *buddy,
907 							unsigned int order)
908 {
909 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
910 		return false;
911 
912 	if (buddy_order(buddy) != order)
913 		return false;
914 
915 	/*
916 	 * zone check is done late to avoid uselessly calculating
917 	 * zone/node ids for pages that could never merge.
918 	 */
919 	if (page_zone_id(page) != page_zone_id(buddy))
920 		return false;
921 
922 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
923 
924 	return true;
925 }
926 
927 #ifdef CONFIG_COMPACTION
928 static inline struct capture_control *task_capc(struct zone *zone)
929 {
930 	struct capture_control *capc = current->capture_control;
931 
932 	return unlikely(capc) &&
933 		!(current->flags & PF_KTHREAD) &&
934 		!capc->page &&
935 		capc->cc->zone == zone ? capc : NULL;
936 }
937 
938 static inline bool
939 compaction_capture(struct capture_control *capc, struct page *page,
940 		   int order, int migratetype)
941 {
942 	if (!capc || order != capc->cc->order)
943 		return false;
944 
945 	/* Do not accidentally pollute CMA or isolated regions*/
946 	if (is_migrate_cma(migratetype) ||
947 	    is_migrate_isolate(migratetype))
948 		return false;
949 
950 	/*
951 	 * Do not let lower order allocations pollute a movable pageblock.
952 	 * This might let an unmovable request use a reclaimable pageblock
953 	 * and vice-versa but no more than normal fallback logic which can
954 	 * have trouble finding a high-order free page.
955 	 */
956 	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
957 		return false;
958 
959 	capc->page = page;
960 	return true;
961 }
962 
963 #else
964 static inline struct capture_control *task_capc(struct zone *zone)
965 {
966 	return NULL;
967 }
968 
969 static inline bool
970 compaction_capture(struct capture_control *capc, struct page *page,
971 		   int order, int migratetype)
972 {
973 	return false;
974 }
975 #endif /* CONFIG_COMPACTION */
976 
977 /* Used for pages not on another list */
978 static inline void add_to_free_list(struct page *page, struct zone *zone,
979 				    unsigned int order, int migratetype)
980 {
981 	struct free_area *area = &zone->free_area[order];
982 
983 	list_add(&page->lru, &area->free_list[migratetype]);
984 	area->nr_free++;
985 }
986 
987 /* Used for pages not on another list */
988 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
989 					 unsigned int order, int migratetype)
990 {
991 	struct free_area *area = &zone->free_area[order];
992 
993 	list_add_tail(&page->lru, &area->free_list[migratetype]);
994 	area->nr_free++;
995 }
996 
997 /*
998  * Used for pages which are on another list. Move the pages to the tail
999  * of the list - so the moved pages won't immediately be considered for
1000  * allocation again (e.g., optimization for memory onlining).
1001  */
1002 static inline void move_to_free_list(struct page *page, struct zone *zone,
1003 				     unsigned int order, int migratetype)
1004 {
1005 	struct free_area *area = &zone->free_area[order];
1006 
1007 	list_move_tail(&page->lru, &area->free_list[migratetype]);
1008 }
1009 
1010 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
1011 					   unsigned int order)
1012 {
1013 	/* clear reported state and update reported page count */
1014 	if (page_reported(page))
1015 		__ClearPageReported(page);
1016 
1017 	list_del(&page->lru);
1018 	__ClearPageBuddy(page);
1019 	set_page_private(page, 0);
1020 	zone->free_area[order].nr_free--;
1021 }
1022 
1023 /*
1024  * If this is not the largest possible page, check if the buddy
1025  * of the next-highest order is free. If it is, it's possible
1026  * that pages are being freed that will coalesce soon. In case,
1027  * that is happening, add the free page to the tail of the list
1028  * so it's less likely to be used soon and more likely to be merged
1029  * as a higher order page
1030  */
1031 static inline bool
1032 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
1033 		   struct page *page, unsigned int order)
1034 {
1035 	struct page *higher_page, *higher_buddy;
1036 	unsigned long combined_pfn;
1037 
1038 	if (order >= MAX_ORDER - 2)
1039 		return false;
1040 
1041 	if (!pfn_valid_within(buddy_pfn))
1042 		return false;
1043 
1044 	combined_pfn = buddy_pfn & pfn;
1045 	higher_page = page + (combined_pfn - pfn);
1046 	buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
1047 	higher_buddy = higher_page + (buddy_pfn - combined_pfn);
1048 
1049 	return pfn_valid_within(buddy_pfn) &&
1050 	       page_is_buddy(higher_page, higher_buddy, order + 1);
1051 }
1052 
1053 /*
1054  * Freeing function for a buddy system allocator.
1055  *
1056  * The concept of a buddy system is to maintain direct-mapped table
1057  * (containing bit values) for memory blocks of various "orders".
1058  * The bottom level table contains the map for the smallest allocatable
1059  * units of memory (here, pages), and each level above it describes
1060  * pairs of units from the levels below, hence, "buddies".
1061  * At a high level, all that happens here is marking the table entry
1062  * at the bottom level available, and propagating the changes upward
1063  * as necessary, plus some accounting needed to play nicely with other
1064  * parts of the VM system.
1065  * At each level, we keep a list of pages, which are heads of continuous
1066  * free pages of length of (1 << order) and marked with PageBuddy.
1067  * Page's order is recorded in page_private(page) field.
1068  * So when we are allocating or freeing one, we can derive the state of the
1069  * other.  That is, if we allocate a small block, and both were
1070  * free, the remainder of the region must be split into blocks.
1071  * If a block is freed, and its buddy is also free, then this
1072  * triggers coalescing into a block of larger size.
1073  *
1074  * -- nyc
1075  */
1076 
1077 static inline void __free_one_page(struct page *page,
1078 		unsigned long pfn,
1079 		struct zone *zone, unsigned int order,
1080 		int migratetype, fpi_t fpi_flags)
1081 {
1082 	struct capture_control *capc = task_capc(zone);
1083 	unsigned long buddy_pfn;
1084 	unsigned long combined_pfn;
1085 	unsigned int max_order;
1086 	struct page *buddy;
1087 	bool to_tail;
1088 
1089 	max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
1090 
1091 	VM_BUG_ON(!zone_is_initialized(zone));
1092 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1093 
1094 	VM_BUG_ON(migratetype == -1);
1095 	if (likely(!is_migrate_isolate(migratetype)))
1096 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
1097 
1098 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1099 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
1100 
1101 continue_merging:
1102 	while (order < max_order) {
1103 		if (compaction_capture(capc, page, order, migratetype)) {
1104 			__mod_zone_freepage_state(zone, -(1 << order),
1105 								migratetype);
1106 			return;
1107 		}
1108 		buddy_pfn = __find_buddy_pfn(pfn, order);
1109 		buddy = page + (buddy_pfn - pfn);
1110 
1111 		if (!pfn_valid_within(buddy_pfn))
1112 			goto done_merging;
1113 		if (!page_is_buddy(page, buddy, order))
1114 			goto done_merging;
1115 		/*
1116 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1117 		 * merge with it and move up one order.
1118 		 */
1119 		if (page_is_guard(buddy))
1120 			clear_page_guard(zone, buddy, order, migratetype);
1121 		else
1122 			del_page_from_free_list(buddy, zone, order);
1123 		combined_pfn = buddy_pfn & pfn;
1124 		page = page + (combined_pfn - pfn);
1125 		pfn = combined_pfn;
1126 		order++;
1127 	}
1128 	if (order < MAX_ORDER - 1) {
1129 		/* If we are here, it means order is >= pageblock_order.
1130 		 * We want to prevent merge between freepages on isolate
1131 		 * pageblock and normal pageblock. Without this, pageblock
1132 		 * isolation could cause incorrect freepage or CMA accounting.
1133 		 *
1134 		 * We don't want to hit this code for the more frequent
1135 		 * low-order merging.
1136 		 */
1137 		if (unlikely(has_isolate_pageblock(zone))) {
1138 			int buddy_mt;
1139 
1140 			buddy_pfn = __find_buddy_pfn(pfn, order);
1141 			buddy = page + (buddy_pfn - pfn);
1142 			buddy_mt = get_pageblock_migratetype(buddy);
1143 
1144 			if (migratetype != buddy_mt
1145 					&& (is_migrate_isolate(migratetype) ||
1146 						is_migrate_isolate(buddy_mt)))
1147 				goto done_merging;
1148 		}
1149 		max_order = order + 1;
1150 		goto continue_merging;
1151 	}
1152 
1153 done_merging:
1154 	set_buddy_order(page, order);
1155 
1156 	if (fpi_flags & FPI_TO_TAIL)
1157 		to_tail = true;
1158 	else if (is_shuffle_order(order))
1159 		to_tail = shuffle_pick_tail();
1160 	else
1161 		to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1162 
1163 	if (to_tail)
1164 		add_to_free_list_tail(page, zone, order, migratetype);
1165 	else
1166 		add_to_free_list(page, zone, order, migratetype);
1167 
1168 	/* Notify page reporting subsystem of freed page */
1169 	if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1170 		page_reporting_notify_free(order);
1171 }
1172 
1173 /*
1174  * A bad page could be due to a number of fields. Instead of multiple branches,
1175  * try and check multiple fields with one check. The caller must do a detailed
1176  * check if necessary.
1177  */
1178 static inline bool page_expected_state(struct page *page,
1179 					unsigned long check_flags)
1180 {
1181 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1182 		return false;
1183 
1184 	if (unlikely((unsigned long)page->mapping |
1185 			page_ref_count(page) |
1186 #ifdef CONFIG_MEMCG
1187 			page->memcg_data |
1188 #endif
1189 			(page->flags & check_flags)))
1190 		return false;
1191 
1192 	return true;
1193 }
1194 
1195 static const char *page_bad_reason(struct page *page, unsigned long flags)
1196 {
1197 	const char *bad_reason = NULL;
1198 
1199 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1200 		bad_reason = "nonzero mapcount";
1201 	if (unlikely(page->mapping != NULL))
1202 		bad_reason = "non-NULL mapping";
1203 	if (unlikely(page_ref_count(page) != 0))
1204 		bad_reason = "nonzero _refcount";
1205 	if (unlikely(page->flags & flags)) {
1206 		if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1207 			bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1208 		else
1209 			bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1210 	}
1211 #ifdef CONFIG_MEMCG
1212 	if (unlikely(page->memcg_data))
1213 		bad_reason = "page still charged to cgroup";
1214 #endif
1215 	return bad_reason;
1216 }
1217 
1218 static void check_free_page_bad(struct page *page)
1219 {
1220 	bad_page(page,
1221 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1222 }
1223 
1224 static inline int check_free_page(struct page *page)
1225 {
1226 	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1227 		return 0;
1228 
1229 	/* Something has gone sideways, find it */
1230 	check_free_page_bad(page);
1231 	return 1;
1232 }
1233 
1234 static int free_tail_pages_check(struct page *head_page, struct page *page)
1235 {
1236 	int ret = 1;
1237 
1238 	/*
1239 	 * We rely page->lru.next never has bit 0 set, unless the page
1240 	 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1241 	 */
1242 	BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1243 
1244 	if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1245 		ret = 0;
1246 		goto out;
1247 	}
1248 	switch (page - head_page) {
1249 	case 1:
1250 		/* the first tail page: ->mapping may be compound_mapcount() */
1251 		if (unlikely(compound_mapcount(page))) {
1252 			bad_page(page, "nonzero compound_mapcount");
1253 			goto out;
1254 		}
1255 		break;
1256 	case 2:
1257 		/*
1258 		 * the second tail page: ->mapping is
1259 		 * deferred_list.next -- ignore value.
1260 		 */
1261 		break;
1262 	default:
1263 		if (page->mapping != TAIL_MAPPING) {
1264 			bad_page(page, "corrupted mapping in tail page");
1265 			goto out;
1266 		}
1267 		break;
1268 	}
1269 	if (unlikely(!PageTail(page))) {
1270 		bad_page(page, "PageTail not set");
1271 		goto out;
1272 	}
1273 	if (unlikely(compound_head(page) != head_page)) {
1274 		bad_page(page, "compound_head not consistent");
1275 		goto out;
1276 	}
1277 	ret = 0;
1278 out:
1279 	page->mapping = NULL;
1280 	clear_compound_head(page);
1281 	return ret;
1282 }
1283 
1284 static void kernel_init_free_pages(struct page *page, int numpages)
1285 {
1286 	int i;
1287 
1288 	/* s390's use of memset() could override KASAN redzones. */
1289 	kasan_disable_current();
1290 	for (i = 0; i < numpages; i++) {
1291 		u8 tag = page_kasan_tag(page + i);
1292 		page_kasan_tag_reset(page + i);
1293 		clear_highpage(page + i);
1294 		page_kasan_tag_set(page + i, tag);
1295 	}
1296 	kasan_enable_current();
1297 }
1298 
1299 static __always_inline bool free_pages_prepare(struct page *page,
1300 			unsigned int order, bool check_free, fpi_t fpi_flags)
1301 {
1302 	int bad = 0;
1303 	bool init;
1304 
1305 	VM_BUG_ON_PAGE(PageTail(page), page);
1306 
1307 	trace_mm_page_free(page, order);
1308 
1309 	if (unlikely(PageHWPoison(page)) && !order) {
1310 		/*
1311 		 * Do not let hwpoison pages hit pcplists/buddy
1312 		 * Untie memcg state and reset page's owner
1313 		 */
1314 		if (memcg_kmem_enabled() && PageMemcgKmem(page))
1315 			__memcg_kmem_uncharge_page(page, order);
1316 		reset_page_owner(page, order);
1317 		return false;
1318 	}
1319 
1320 	/*
1321 	 * Check tail pages before head page information is cleared to
1322 	 * avoid checking PageCompound for order-0 pages.
1323 	 */
1324 	if (unlikely(order)) {
1325 		bool compound = PageCompound(page);
1326 		int i;
1327 
1328 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1329 
1330 		if (compound)
1331 			ClearPageDoubleMap(page);
1332 		for (i = 1; i < (1 << order); i++) {
1333 			if (compound)
1334 				bad += free_tail_pages_check(page, page + i);
1335 			if (unlikely(check_free_page(page + i))) {
1336 				bad++;
1337 				continue;
1338 			}
1339 			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1340 		}
1341 	}
1342 	if (PageMappingFlags(page))
1343 		page->mapping = NULL;
1344 	if (memcg_kmem_enabled() && PageMemcgKmem(page))
1345 		__memcg_kmem_uncharge_page(page, order);
1346 	if (check_free)
1347 		bad += check_free_page(page);
1348 	if (bad)
1349 		return false;
1350 
1351 	page_cpupid_reset_last(page);
1352 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1353 	reset_page_owner(page, order);
1354 
1355 	if (!PageHighMem(page)) {
1356 		debug_check_no_locks_freed(page_address(page),
1357 					   PAGE_SIZE << order);
1358 		debug_check_no_obj_freed(page_address(page),
1359 					   PAGE_SIZE << order);
1360 	}
1361 
1362 	kernel_poison_pages(page, 1 << order);
1363 
1364 	/*
1365 	 * As memory initialization might be integrated into KASAN,
1366 	 * kasan_free_pages and kernel_init_free_pages must be
1367 	 * kept together to avoid discrepancies in behavior.
1368 	 *
1369 	 * With hardware tag-based KASAN, memory tags must be set before the
1370 	 * page becomes unavailable via debug_pagealloc or arch_free_page.
1371 	 */
1372 	init = want_init_on_free();
1373 	if (init && !kasan_has_integrated_init())
1374 		kernel_init_free_pages(page, 1 << order);
1375 	kasan_free_nondeferred_pages(page, order, init, fpi_flags);
1376 
1377 	/*
1378 	 * arch_free_page() can make the page's contents inaccessible.  s390
1379 	 * does this.  So nothing which can access the page's contents should
1380 	 * happen after this.
1381 	 */
1382 	arch_free_page(page, order);
1383 
1384 	debug_pagealloc_unmap_pages(page, 1 << order);
1385 
1386 	return true;
1387 }
1388 
1389 #ifdef CONFIG_DEBUG_VM
1390 /*
1391  * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1392  * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1393  * moved from pcp lists to free lists.
1394  */
1395 static bool free_pcp_prepare(struct page *page, unsigned int order)
1396 {
1397 	return free_pages_prepare(page, order, true, FPI_NONE);
1398 }
1399 
1400 static bool bulkfree_pcp_prepare(struct page *page)
1401 {
1402 	if (debug_pagealloc_enabled_static())
1403 		return check_free_page(page);
1404 	else
1405 		return false;
1406 }
1407 #else
1408 /*
1409  * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1410  * moving from pcp lists to free list in order to reduce overhead. With
1411  * debug_pagealloc enabled, they are checked also immediately when being freed
1412  * to the pcp lists.
1413  */
1414 static bool free_pcp_prepare(struct page *page, unsigned int order)
1415 {
1416 	if (debug_pagealloc_enabled_static())
1417 		return free_pages_prepare(page, order, true, FPI_NONE);
1418 	else
1419 		return free_pages_prepare(page, order, false, FPI_NONE);
1420 }
1421 
1422 static bool bulkfree_pcp_prepare(struct page *page)
1423 {
1424 	return check_free_page(page);
1425 }
1426 #endif /* CONFIG_DEBUG_VM */
1427 
1428 static inline void prefetch_buddy(struct page *page)
1429 {
1430 	unsigned long pfn = page_to_pfn(page);
1431 	unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1432 	struct page *buddy = page + (buddy_pfn - pfn);
1433 
1434 	prefetch(buddy);
1435 }
1436 
1437 /*
1438  * Frees a number of pages from the PCP lists
1439  * Assumes all pages on list are in same zone, and of same order.
1440  * count is the number of pages to free.
1441  *
1442  * If the zone was previously in an "all pages pinned" state then look to
1443  * see if this freeing clears that state.
1444  *
1445  * And clear the zone's pages_scanned counter, to hold off the "all pages are
1446  * pinned" detection logic.
1447  */
1448 static void free_pcppages_bulk(struct zone *zone, int count,
1449 					struct per_cpu_pages *pcp)
1450 {
1451 	int pindex = 0;
1452 	int batch_free = 0;
1453 	int nr_freed = 0;
1454 	unsigned int order;
1455 	int prefetch_nr = READ_ONCE(pcp->batch);
1456 	bool isolated_pageblocks;
1457 	struct page *page, *tmp;
1458 	LIST_HEAD(head);
1459 
1460 	/*
1461 	 * Ensure proper count is passed which otherwise would stuck in the
1462 	 * below while (list_empty(list)) loop.
1463 	 */
1464 	count = min(pcp->count, count);
1465 	while (count > 0) {
1466 		struct list_head *list;
1467 
1468 		/*
1469 		 * Remove pages from lists in a round-robin fashion. A
1470 		 * batch_free count is maintained that is incremented when an
1471 		 * empty list is encountered.  This is so more pages are freed
1472 		 * off fuller lists instead of spinning excessively around empty
1473 		 * lists
1474 		 */
1475 		do {
1476 			batch_free++;
1477 			if (++pindex == NR_PCP_LISTS)
1478 				pindex = 0;
1479 			list = &pcp->lists[pindex];
1480 		} while (list_empty(list));
1481 
1482 		/* This is the only non-empty list. Free them all. */
1483 		if (batch_free == NR_PCP_LISTS)
1484 			batch_free = count;
1485 
1486 		order = pindex_to_order(pindex);
1487 		BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
1488 		do {
1489 			page = list_last_entry(list, struct page, lru);
1490 			/* must delete to avoid corrupting pcp list */
1491 			list_del(&page->lru);
1492 			nr_freed += 1 << order;
1493 			count -= 1 << order;
1494 
1495 			if (bulkfree_pcp_prepare(page))
1496 				continue;
1497 
1498 			/* Encode order with the migratetype */
1499 			page->index <<= NR_PCP_ORDER_WIDTH;
1500 			page->index |= order;
1501 
1502 			list_add_tail(&page->lru, &head);
1503 
1504 			/*
1505 			 * We are going to put the page back to the global
1506 			 * pool, prefetch its buddy to speed up later access
1507 			 * under zone->lock. It is believed the overhead of
1508 			 * an additional test and calculating buddy_pfn here
1509 			 * can be offset by reduced memory latency later. To
1510 			 * avoid excessive prefetching due to large count, only
1511 			 * prefetch buddy for the first pcp->batch nr of pages.
1512 			 */
1513 			if (prefetch_nr) {
1514 				prefetch_buddy(page);
1515 				prefetch_nr--;
1516 			}
1517 		} while (count > 0 && --batch_free && !list_empty(list));
1518 	}
1519 	pcp->count -= nr_freed;
1520 
1521 	/*
1522 	 * local_lock_irq held so equivalent to spin_lock_irqsave for
1523 	 * both PREEMPT_RT and non-PREEMPT_RT configurations.
1524 	 */
1525 	spin_lock(&zone->lock);
1526 	isolated_pageblocks = has_isolate_pageblock(zone);
1527 
1528 	/*
1529 	 * Use safe version since after __free_one_page(),
1530 	 * page->lru.next will not point to original list.
1531 	 */
1532 	list_for_each_entry_safe(page, tmp, &head, lru) {
1533 		int mt = get_pcppage_migratetype(page);
1534 
1535 		/* mt has been encoded with the order (see above) */
1536 		order = mt & NR_PCP_ORDER_MASK;
1537 		mt >>= NR_PCP_ORDER_WIDTH;
1538 
1539 		/* MIGRATE_ISOLATE page should not go to pcplists */
1540 		VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1541 		/* Pageblock could have been isolated meanwhile */
1542 		if (unlikely(isolated_pageblocks))
1543 			mt = get_pageblock_migratetype(page);
1544 
1545 		__free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1546 		trace_mm_page_pcpu_drain(page, order, mt);
1547 	}
1548 	spin_unlock(&zone->lock);
1549 }
1550 
1551 static void free_one_page(struct zone *zone,
1552 				struct page *page, unsigned long pfn,
1553 				unsigned int order,
1554 				int migratetype, fpi_t fpi_flags)
1555 {
1556 	unsigned long flags;
1557 
1558 	spin_lock_irqsave(&zone->lock, flags);
1559 	if (unlikely(has_isolate_pageblock(zone) ||
1560 		is_migrate_isolate(migratetype))) {
1561 		migratetype = get_pfnblock_migratetype(page, pfn);
1562 	}
1563 	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1564 	spin_unlock_irqrestore(&zone->lock, flags);
1565 }
1566 
1567 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1568 				unsigned long zone, int nid)
1569 {
1570 	mm_zero_struct_page(page);
1571 	set_page_links(page, zone, nid, pfn);
1572 	init_page_count(page);
1573 	page_mapcount_reset(page);
1574 	page_cpupid_reset_last(page);
1575 	page_kasan_tag_reset(page);
1576 
1577 	INIT_LIST_HEAD(&page->lru);
1578 #ifdef WANT_PAGE_VIRTUAL
1579 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
1580 	if (!is_highmem_idx(zone))
1581 		set_page_address(page, __va(pfn << PAGE_SHIFT));
1582 #endif
1583 }
1584 
1585 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1586 static void __meminit init_reserved_page(unsigned long pfn)
1587 {
1588 	pg_data_t *pgdat;
1589 	int nid, zid;
1590 
1591 	if (!early_page_uninitialised(pfn))
1592 		return;
1593 
1594 	nid = early_pfn_to_nid(pfn);
1595 	pgdat = NODE_DATA(nid);
1596 
1597 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1598 		struct zone *zone = &pgdat->node_zones[zid];
1599 
1600 		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1601 			break;
1602 	}
1603 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1604 }
1605 #else
1606 static inline void init_reserved_page(unsigned long pfn)
1607 {
1608 }
1609 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1610 
1611 /*
1612  * Initialised pages do not have PageReserved set. This function is
1613  * called for each range allocated by the bootmem allocator and
1614  * marks the pages PageReserved. The remaining valid pages are later
1615  * sent to the buddy page allocator.
1616  */
1617 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1618 {
1619 	unsigned long start_pfn = PFN_DOWN(start);
1620 	unsigned long end_pfn = PFN_UP(end);
1621 
1622 	for (; start_pfn < end_pfn; start_pfn++) {
1623 		if (pfn_valid(start_pfn)) {
1624 			struct page *page = pfn_to_page(start_pfn);
1625 
1626 			init_reserved_page(start_pfn);
1627 
1628 			/* Avoid false-positive PageTail() */
1629 			INIT_LIST_HEAD(&page->lru);
1630 
1631 			/*
1632 			 * no need for atomic set_bit because the struct
1633 			 * page is not visible yet so nobody should
1634 			 * access it yet.
1635 			 */
1636 			__SetPageReserved(page);
1637 		}
1638 	}
1639 }
1640 
1641 static void __free_pages_ok(struct page *page, unsigned int order,
1642 			    fpi_t fpi_flags)
1643 {
1644 	unsigned long flags;
1645 	int migratetype;
1646 	unsigned long pfn = page_to_pfn(page);
1647 	struct zone *zone = page_zone(page);
1648 
1649 	if (!free_pages_prepare(page, order, true, fpi_flags))
1650 		return;
1651 
1652 	migratetype = get_pfnblock_migratetype(page, pfn);
1653 
1654 	spin_lock_irqsave(&zone->lock, flags);
1655 	if (unlikely(has_isolate_pageblock(zone) ||
1656 		is_migrate_isolate(migratetype))) {
1657 		migratetype = get_pfnblock_migratetype(page, pfn);
1658 	}
1659 	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1660 	spin_unlock_irqrestore(&zone->lock, flags);
1661 
1662 	__count_vm_events(PGFREE, 1 << order);
1663 }
1664 
1665 void __free_pages_core(struct page *page, unsigned int order)
1666 {
1667 	unsigned int nr_pages = 1 << order;
1668 	struct page *p = page;
1669 	unsigned int loop;
1670 
1671 	/*
1672 	 * When initializing the memmap, __init_single_page() sets the refcount
1673 	 * of all pages to 1 ("allocated"/"not free"). We have to set the
1674 	 * refcount of all involved pages to 0.
1675 	 */
1676 	prefetchw(p);
1677 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1678 		prefetchw(p + 1);
1679 		__ClearPageReserved(p);
1680 		set_page_count(p, 0);
1681 	}
1682 	__ClearPageReserved(p);
1683 	set_page_count(p, 0);
1684 
1685 	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1686 
1687 	/*
1688 	 * Bypass PCP and place fresh pages right to the tail, primarily
1689 	 * relevant for memory onlining.
1690 	 */
1691 	__free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
1692 }
1693 
1694 #ifdef CONFIG_NUMA
1695 
1696 /*
1697  * During memory init memblocks map pfns to nids. The search is expensive and
1698  * this caches recent lookups. The implementation of __early_pfn_to_nid
1699  * treats start/end as pfns.
1700  */
1701 struct mminit_pfnnid_cache {
1702 	unsigned long last_start;
1703 	unsigned long last_end;
1704 	int last_nid;
1705 };
1706 
1707 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1708 
1709 /*
1710  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1711  */
1712 static int __meminit __early_pfn_to_nid(unsigned long pfn,
1713 					struct mminit_pfnnid_cache *state)
1714 {
1715 	unsigned long start_pfn, end_pfn;
1716 	int nid;
1717 
1718 	if (state->last_start <= pfn && pfn < state->last_end)
1719 		return state->last_nid;
1720 
1721 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1722 	if (nid != NUMA_NO_NODE) {
1723 		state->last_start = start_pfn;
1724 		state->last_end = end_pfn;
1725 		state->last_nid = nid;
1726 	}
1727 
1728 	return nid;
1729 }
1730 
1731 int __meminit early_pfn_to_nid(unsigned long pfn)
1732 {
1733 	static DEFINE_SPINLOCK(early_pfn_lock);
1734 	int nid;
1735 
1736 	spin_lock(&early_pfn_lock);
1737 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1738 	if (nid < 0)
1739 		nid = first_online_node;
1740 	spin_unlock(&early_pfn_lock);
1741 
1742 	return nid;
1743 }
1744 #endif /* CONFIG_NUMA */
1745 
1746 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1747 							unsigned int order)
1748 {
1749 	if (early_page_uninitialised(pfn))
1750 		return;
1751 	__free_pages_core(page, order);
1752 }
1753 
1754 /*
1755  * Check that the whole (or subset of) a pageblock given by the interval of
1756  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1757  * with the migration of free compaction scanner. The scanners then need to
1758  * use only pfn_valid_within() check for arches that allow holes within
1759  * pageblocks.
1760  *
1761  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1762  *
1763  * It's possible on some configurations to have a setup like node0 node1 node0
1764  * i.e. it's possible that all pages within a zones range of pages do not
1765  * belong to a single zone. We assume that a border between node0 and node1
1766  * can occur within a single pageblock, but not a node0 node1 node0
1767  * interleaving within a single pageblock. It is therefore sufficient to check
1768  * the first and last page of a pageblock and avoid checking each individual
1769  * page in a pageblock.
1770  */
1771 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1772 				     unsigned long end_pfn, struct zone *zone)
1773 {
1774 	struct page *start_page;
1775 	struct page *end_page;
1776 
1777 	/* end_pfn is one past the range we are checking */
1778 	end_pfn--;
1779 
1780 	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1781 		return NULL;
1782 
1783 	start_page = pfn_to_online_page(start_pfn);
1784 	if (!start_page)
1785 		return NULL;
1786 
1787 	if (page_zone(start_page) != zone)
1788 		return NULL;
1789 
1790 	end_page = pfn_to_page(end_pfn);
1791 
1792 	/* This gives a shorter code than deriving page_zone(end_page) */
1793 	if (page_zone_id(start_page) != page_zone_id(end_page))
1794 		return NULL;
1795 
1796 	return start_page;
1797 }
1798 
1799 void set_zone_contiguous(struct zone *zone)
1800 {
1801 	unsigned long block_start_pfn = zone->zone_start_pfn;
1802 	unsigned long block_end_pfn;
1803 
1804 	block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1805 	for (; block_start_pfn < zone_end_pfn(zone);
1806 			block_start_pfn = block_end_pfn,
1807 			 block_end_pfn += pageblock_nr_pages) {
1808 
1809 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1810 
1811 		if (!__pageblock_pfn_to_page(block_start_pfn,
1812 					     block_end_pfn, zone))
1813 			return;
1814 		cond_resched();
1815 	}
1816 
1817 	/* We confirm that there is no hole */
1818 	zone->contiguous = true;
1819 }
1820 
1821 void clear_zone_contiguous(struct zone *zone)
1822 {
1823 	zone->contiguous = false;
1824 }
1825 
1826 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1827 static void __init deferred_free_range(unsigned long pfn,
1828 				       unsigned long nr_pages)
1829 {
1830 	struct page *page;
1831 	unsigned long i;
1832 
1833 	if (!nr_pages)
1834 		return;
1835 
1836 	page = pfn_to_page(pfn);
1837 
1838 	/* Free a large naturally-aligned chunk if possible */
1839 	if (nr_pages == pageblock_nr_pages &&
1840 	    (pfn & (pageblock_nr_pages - 1)) == 0) {
1841 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1842 		__free_pages_core(page, pageblock_order);
1843 		return;
1844 	}
1845 
1846 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1847 		if ((pfn & (pageblock_nr_pages - 1)) == 0)
1848 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1849 		__free_pages_core(page, 0);
1850 	}
1851 }
1852 
1853 /* Completion tracking for deferred_init_memmap() threads */
1854 static atomic_t pgdat_init_n_undone __initdata;
1855 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1856 
1857 static inline void __init pgdat_init_report_one_done(void)
1858 {
1859 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1860 		complete(&pgdat_init_all_done_comp);
1861 }
1862 
1863 /*
1864  * Returns true if page needs to be initialized or freed to buddy allocator.
1865  *
1866  * First we check if pfn is valid on architectures where it is possible to have
1867  * holes within pageblock_nr_pages. On systems where it is not possible, this
1868  * function is optimized out.
1869  *
1870  * Then, we check if a current large page is valid by only checking the validity
1871  * of the head pfn.
1872  */
1873 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1874 {
1875 	if (!pfn_valid_within(pfn))
1876 		return false;
1877 	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1878 		return false;
1879 	return true;
1880 }
1881 
1882 /*
1883  * Free pages to buddy allocator. Try to free aligned pages in
1884  * pageblock_nr_pages sizes.
1885  */
1886 static void __init deferred_free_pages(unsigned long pfn,
1887 				       unsigned long end_pfn)
1888 {
1889 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1890 	unsigned long nr_free = 0;
1891 
1892 	for (; pfn < end_pfn; pfn++) {
1893 		if (!deferred_pfn_valid(pfn)) {
1894 			deferred_free_range(pfn - nr_free, nr_free);
1895 			nr_free = 0;
1896 		} else if (!(pfn & nr_pgmask)) {
1897 			deferred_free_range(pfn - nr_free, nr_free);
1898 			nr_free = 1;
1899 		} else {
1900 			nr_free++;
1901 		}
1902 	}
1903 	/* Free the last block of pages to allocator */
1904 	deferred_free_range(pfn - nr_free, nr_free);
1905 }
1906 
1907 /*
1908  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1909  * by performing it only once every pageblock_nr_pages.
1910  * Return number of pages initialized.
1911  */
1912 static unsigned long  __init deferred_init_pages(struct zone *zone,
1913 						 unsigned long pfn,
1914 						 unsigned long end_pfn)
1915 {
1916 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1917 	int nid = zone_to_nid(zone);
1918 	unsigned long nr_pages = 0;
1919 	int zid = zone_idx(zone);
1920 	struct page *page = NULL;
1921 
1922 	for (; pfn < end_pfn; pfn++) {
1923 		if (!deferred_pfn_valid(pfn)) {
1924 			page = NULL;
1925 			continue;
1926 		} else if (!page || !(pfn & nr_pgmask)) {
1927 			page = pfn_to_page(pfn);
1928 		} else {
1929 			page++;
1930 		}
1931 		__init_single_page(page, pfn, zid, nid);
1932 		nr_pages++;
1933 	}
1934 	return (nr_pages);
1935 }
1936 
1937 /*
1938  * This function is meant to pre-load the iterator for the zone init.
1939  * Specifically it walks through the ranges until we are caught up to the
1940  * first_init_pfn value and exits there. If we never encounter the value we
1941  * return false indicating there are no valid ranges left.
1942  */
1943 static bool __init
1944 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1945 				    unsigned long *spfn, unsigned long *epfn,
1946 				    unsigned long first_init_pfn)
1947 {
1948 	u64 j;
1949 
1950 	/*
1951 	 * Start out by walking through the ranges in this zone that have
1952 	 * already been initialized. We don't need to do anything with them
1953 	 * so we just need to flush them out of the system.
1954 	 */
1955 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1956 		if (*epfn <= first_init_pfn)
1957 			continue;
1958 		if (*spfn < first_init_pfn)
1959 			*spfn = first_init_pfn;
1960 		*i = j;
1961 		return true;
1962 	}
1963 
1964 	return false;
1965 }
1966 
1967 /*
1968  * Initialize and free pages. We do it in two loops: first we initialize
1969  * struct page, then free to buddy allocator, because while we are
1970  * freeing pages we can access pages that are ahead (computing buddy
1971  * page in __free_one_page()).
1972  *
1973  * In order to try and keep some memory in the cache we have the loop
1974  * broken along max page order boundaries. This way we will not cause
1975  * any issues with the buddy page computation.
1976  */
1977 static unsigned long __init
1978 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1979 		       unsigned long *end_pfn)
1980 {
1981 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1982 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
1983 	unsigned long nr_pages = 0;
1984 	u64 j = *i;
1985 
1986 	/* First we loop through and initialize the page values */
1987 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1988 		unsigned long t;
1989 
1990 		if (mo_pfn <= *start_pfn)
1991 			break;
1992 
1993 		t = min(mo_pfn, *end_pfn);
1994 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
1995 
1996 		if (mo_pfn < *end_pfn) {
1997 			*start_pfn = mo_pfn;
1998 			break;
1999 		}
2000 	}
2001 
2002 	/* Reset values and now loop through freeing pages as needed */
2003 	swap(j, *i);
2004 
2005 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2006 		unsigned long t;
2007 
2008 		if (mo_pfn <= spfn)
2009 			break;
2010 
2011 		t = min(mo_pfn, epfn);
2012 		deferred_free_pages(spfn, t);
2013 
2014 		if (mo_pfn <= epfn)
2015 			break;
2016 	}
2017 
2018 	return nr_pages;
2019 }
2020 
2021 static void __init
2022 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2023 			   void *arg)
2024 {
2025 	unsigned long spfn, epfn;
2026 	struct zone *zone = arg;
2027 	u64 i;
2028 
2029 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2030 
2031 	/*
2032 	 * Initialize and free pages in MAX_ORDER sized increments so that we
2033 	 * can avoid introducing any issues with the buddy allocator.
2034 	 */
2035 	while (spfn < end_pfn) {
2036 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
2037 		cond_resched();
2038 	}
2039 }
2040 
2041 /* An arch may override for more concurrency. */
2042 __weak int __init
2043 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2044 {
2045 	return 1;
2046 }
2047 
2048 /* Initialise remaining memory on a node */
2049 static int __init deferred_init_memmap(void *data)
2050 {
2051 	pg_data_t *pgdat = data;
2052 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2053 	unsigned long spfn = 0, epfn = 0;
2054 	unsigned long first_init_pfn, flags;
2055 	unsigned long start = jiffies;
2056 	struct zone *zone;
2057 	int zid, max_threads;
2058 	u64 i;
2059 
2060 	/* Bind memory initialisation thread to a local node if possible */
2061 	if (!cpumask_empty(cpumask))
2062 		set_cpus_allowed_ptr(current, cpumask);
2063 
2064 	pgdat_resize_lock(pgdat, &flags);
2065 	first_init_pfn = pgdat->first_deferred_pfn;
2066 	if (first_init_pfn == ULONG_MAX) {
2067 		pgdat_resize_unlock(pgdat, &flags);
2068 		pgdat_init_report_one_done();
2069 		return 0;
2070 	}
2071 
2072 	/* Sanity check boundaries */
2073 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2074 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2075 	pgdat->first_deferred_pfn = ULONG_MAX;
2076 
2077 	/*
2078 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
2079 	 * interrupt thread must allocate this early in boot, zone must be
2080 	 * pre-grown prior to start of deferred page initialization.
2081 	 */
2082 	pgdat_resize_unlock(pgdat, &flags);
2083 
2084 	/* Only the highest zone is deferred so find it */
2085 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2086 		zone = pgdat->node_zones + zid;
2087 		if (first_init_pfn < zone_end_pfn(zone))
2088 			break;
2089 	}
2090 
2091 	/* If the zone is empty somebody else may have cleared out the zone */
2092 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2093 						 first_init_pfn))
2094 		goto zone_empty;
2095 
2096 	max_threads = deferred_page_init_max_threads(cpumask);
2097 
2098 	while (spfn < epfn) {
2099 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2100 		struct padata_mt_job job = {
2101 			.thread_fn   = deferred_init_memmap_chunk,
2102 			.fn_arg      = zone,
2103 			.start       = spfn,
2104 			.size        = epfn_align - spfn,
2105 			.align       = PAGES_PER_SECTION,
2106 			.min_chunk   = PAGES_PER_SECTION,
2107 			.max_threads = max_threads,
2108 		};
2109 
2110 		padata_do_multithreaded(&job);
2111 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2112 						    epfn_align);
2113 	}
2114 zone_empty:
2115 	/* Sanity check that the next zone really is unpopulated */
2116 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2117 
2118 	pr_info("node %d deferred pages initialised in %ums\n",
2119 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2120 
2121 	pgdat_init_report_one_done();
2122 	return 0;
2123 }
2124 
2125 /*
2126  * If this zone has deferred pages, try to grow it by initializing enough
2127  * deferred pages to satisfy the allocation specified by order, rounded up to
2128  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2129  * of SECTION_SIZE bytes by initializing struct pages in increments of
2130  * PAGES_PER_SECTION * sizeof(struct page) bytes.
2131  *
2132  * Return true when zone was grown, otherwise return false. We return true even
2133  * when we grow less than requested, to let the caller decide if there are
2134  * enough pages to satisfy the allocation.
2135  *
2136  * Note: We use noinline because this function is needed only during boot, and
2137  * it is called from a __ref function _deferred_grow_zone. This way we are
2138  * making sure that it is not inlined into permanent text section.
2139  */
2140 static noinline bool __init
2141 deferred_grow_zone(struct zone *zone, unsigned int order)
2142 {
2143 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2144 	pg_data_t *pgdat = zone->zone_pgdat;
2145 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2146 	unsigned long spfn, epfn, flags;
2147 	unsigned long nr_pages = 0;
2148 	u64 i;
2149 
2150 	/* Only the last zone may have deferred pages */
2151 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2152 		return false;
2153 
2154 	pgdat_resize_lock(pgdat, &flags);
2155 
2156 	/*
2157 	 * If someone grew this zone while we were waiting for spinlock, return
2158 	 * true, as there might be enough pages already.
2159 	 */
2160 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2161 		pgdat_resize_unlock(pgdat, &flags);
2162 		return true;
2163 	}
2164 
2165 	/* If the zone is empty somebody else may have cleared out the zone */
2166 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2167 						 first_deferred_pfn)) {
2168 		pgdat->first_deferred_pfn = ULONG_MAX;
2169 		pgdat_resize_unlock(pgdat, &flags);
2170 		/* Retry only once. */
2171 		return first_deferred_pfn != ULONG_MAX;
2172 	}
2173 
2174 	/*
2175 	 * Initialize and free pages in MAX_ORDER sized increments so
2176 	 * that we can avoid introducing any issues with the buddy
2177 	 * allocator.
2178 	 */
2179 	while (spfn < epfn) {
2180 		/* update our first deferred PFN for this section */
2181 		first_deferred_pfn = spfn;
2182 
2183 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2184 		touch_nmi_watchdog();
2185 
2186 		/* We should only stop along section boundaries */
2187 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2188 			continue;
2189 
2190 		/* If our quota has been met we can stop here */
2191 		if (nr_pages >= nr_pages_needed)
2192 			break;
2193 	}
2194 
2195 	pgdat->first_deferred_pfn = spfn;
2196 	pgdat_resize_unlock(pgdat, &flags);
2197 
2198 	return nr_pages > 0;
2199 }
2200 
2201 /*
2202  * deferred_grow_zone() is __init, but it is called from
2203  * get_page_from_freelist() during early boot until deferred_pages permanently
2204  * disables this call. This is why we have refdata wrapper to avoid warning,
2205  * and to ensure that the function body gets unloaded.
2206  */
2207 static bool __ref
2208 _deferred_grow_zone(struct zone *zone, unsigned int order)
2209 {
2210 	return deferred_grow_zone(zone, order);
2211 }
2212 
2213 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2214 
2215 void __init page_alloc_init_late(void)
2216 {
2217 	struct zone *zone;
2218 	int nid;
2219 
2220 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2221 
2222 	/* There will be num_node_state(N_MEMORY) threads */
2223 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2224 	for_each_node_state(nid, N_MEMORY) {
2225 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2226 	}
2227 
2228 	/* Block until all are initialised */
2229 	wait_for_completion(&pgdat_init_all_done_comp);
2230 
2231 	/*
2232 	 * We initialized the rest of the deferred pages.  Permanently disable
2233 	 * on-demand struct page initialization.
2234 	 */
2235 	static_branch_disable(&deferred_pages);
2236 
2237 	/* Reinit limits that are based on free pages after the kernel is up */
2238 	files_maxfiles_init();
2239 #endif
2240 
2241 	buffer_init();
2242 
2243 	/* Discard memblock private memory */
2244 	memblock_discard();
2245 
2246 	for_each_node_state(nid, N_MEMORY)
2247 		shuffle_free_memory(NODE_DATA(nid));
2248 
2249 	for_each_populated_zone(zone)
2250 		set_zone_contiguous(zone);
2251 }
2252 
2253 #ifdef CONFIG_CMA
2254 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2255 void __init init_cma_reserved_pageblock(struct page *page)
2256 {
2257 	unsigned i = pageblock_nr_pages;
2258 	struct page *p = page;
2259 
2260 	do {
2261 		__ClearPageReserved(p);
2262 		set_page_count(p, 0);
2263 	} while (++p, --i);
2264 
2265 	set_pageblock_migratetype(page, MIGRATE_CMA);
2266 
2267 	if (pageblock_order >= MAX_ORDER) {
2268 		i = pageblock_nr_pages;
2269 		p = page;
2270 		do {
2271 			set_page_refcounted(p);
2272 			__free_pages(p, MAX_ORDER - 1);
2273 			p += MAX_ORDER_NR_PAGES;
2274 		} while (i -= MAX_ORDER_NR_PAGES);
2275 	} else {
2276 		set_page_refcounted(page);
2277 		__free_pages(page, pageblock_order);
2278 	}
2279 
2280 	adjust_managed_page_count(page, pageblock_nr_pages);
2281 	page_zone(page)->cma_pages += pageblock_nr_pages;
2282 }
2283 #endif
2284 
2285 /*
2286  * The order of subdivision here is critical for the IO subsystem.
2287  * Please do not alter this order without good reasons and regression
2288  * testing. Specifically, as large blocks of memory are subdivided,
2289  * the order in which smaller blocks are delivered depends on the order
2290  * they're subdivided in this function. This is the primary factor
2291  * influencing the order in which pages are delivered to the IO
2292  * subsystem according to empirical testing, and this is also justified
2293  * by considering the behavior of a buddy system containing a single
2294  * large block of memory acted on by a series of small allocations.
2295  * This behavior is a critical factor in sglist merging's success.
2296  *
2297  * -- nyc
2298  */
2299 static inline void expand(struct zone *zone, struct page *page,
2300 	int low, int high, int migratetype)
2301 {
2302 	unsigned long size = 1 << high;
2303 
2304 	while (high > low) {
2305 		high--;
2306 		size >>= 1;
2307 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2308 
2309 		/*
2310 		 * Mark as guard pages (or page), that will allow to
2311 		 * merge back to allocator when buddy will be freed.
2312 		 * Corresponding page table entries will not be touched,
2313 		 * pages will stay not present in virtual address space
2314 		 */
2315 		if (set_page_guard(zone, &page[size], high, migratetype))
2316 			continue;
2317 
2318 		add_to_free_list(&page[size], zone, high, migratetype);
2319 		set_buddy_order(&page[size], high);
2320 	}
2321 }
2322 
2323 static void check_new_page_bad(struct page *page)
2324 {
2325 	if (unlikely(page->flags & __PG_HWPOISON)) {
2326 		/* Don't complain about hwpoisoned pages */
2327 		page_mapcount_reset(page); /* remove PageBuddy */
2328 		return;
2329 	}
2330 
2331 	bad_page(page,
2332 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2333 }
2334 
2335 /*
2336  * This page is about to be returned from the page allocator
2337  */
2338 static inline int check_new_page(struct page *page)
2339 {
2340 	if (likely(page_expected_state(page,
2341 				PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2342 		return 0;
2343 
2344 	check_new_page_bad(page);
2345 	return 1;
2346 }
2347 
2348 #ifdef CONFIG_DEBUG_VM
2349 /*
2350  * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2351  * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2352  * also checked when pcp lists are refilled from the free lists.
2353  */
2354 static inline bool check_pcp_refill(struct page *page)
2355 {
2356 	if (debug_pagealloc_enabled_static())
2357 		return check_new_page(page);
2358 	else
2359 		return false;
2360 }
2361 
2362 static inline bool check_new_pcp(struct page *page)
2363 {
2364 	return check_new_page(page);
2365 }
2366 #else
2367 /*
2368  * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2369  * when pcp lists are being refilled from the free lists. With debug_pagealloc
2370  * enabled, they are also checked when being allocated from the pcp lists.
2371  */
2372 static inline bool check_pcp_refill(struct page *page)
2373 {
2374 	return check_new_page(page);
2375 }
2376 static inline bool check_new_pcp(struct page *page)
2377 {
2378 	if (debug_pagealloc_enabled_static())
2379 		return check_new_page(page);
2380 	else
2381 		return false;
2382 }
2383 #endif /* CONFIG_DEBUG_VM */
2384 
2385 static bool check_new_pages(struct page *page, unsigned int order)
2386 {
2387 	int i;
2388 	for (i = 0; i < (1 << order); i++) {
2389 		struct page *p = page + i;
2390 
2391 		if (unlikely(check_new_page(p)))
2392 			return true;
2393 	}
2394 
2395 	return false;
2396 }
2397 
2398 inline void post_alloc_hook(struct page *page, unsigned int order,
2399 				gfp_t gfp_flags)
2400 {
2401 	bool init;
2402 
2403 	set_page_private(page, 0);
2404 	set_page_refcounted(page);
2405 
2406 	arch_alloc_page(page, order);
2407 	debug_pagealloc_map_pages(page, 1 << order);
2408 
2409 	/*
2410 	 * Page unpoisoning must happen before memory initialization.
2411 	 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2412 	 * allocations and the page unpoisoning code will complain.
2413 	 */
2414 	kernel_unpoison_pages(page, 1 << order);
2415 
2416 	/*
2417 	 * As memory initialization might be integrated into KASAN,
2418 	 * kasan_alloc_pages and kernel_init_free_pages must be
2419 	 * kept together to avoid discrepancies in behavior.
2420 	 */
2421 	init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
2422 	kasan_alloc_pages(page, order, init);
2423 	if (init && !kasan_has_integrated_init())
2424 		kernel_init_free_pages(page, 1 << order);
2425 
2426 	set_page_owner(page, order, gfp_flags);
2427 }
2428 
2429 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2430 							unsigned int alloc_flags)
2431 {
2432 	post_alloc_hook(page, order, gfp_flags);
2433 
2434 	if (order && (gfp_flags & __GFP_COMP))
2435 		prep_compound_page(page, order);
2436 
2437 	/*
2438 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2439 	 * allocate the page. The expectation is that the caller is taking
2440 	 * steps that will free more memory. The caller should avoid the page
2441 	 * being used for !PFMEMALLOC purposes.
2442 	 */
2443 	if (alloc_flags & ALLOC_NO_WATERMARKS)
2444 		set_page_pfmemalloc(page);
2445 	else
2446 		clear_page_pfmemalloc(page);
2447 }
2448 
2449 /*
2450  * Go through the free lists for the given migratetype and remove
2451  * the smallest available page from the freelists
2452  */
2453 static __always_inline
2454 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2455 						int migratetype)
2456 {
2457 	unsigned int current_order;
2458 	struct free_area *area;
2459 	struct page *page;
2460 
2461 	/* Find a page of the appropriate size in the preferred list */
2462 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2463 		area = &(zone->free_area[current_order]);
2464 		page = get_page_from_free_area(area, migratetype);
2465 		if (!page)
2466 			continue;
2467 		del_page_from_free_list(page, zone, current_order);
2468 		expand(zone, page, order, current_order, migratetype);
2469 		set_pcppage_migratetype(page, migratetype);
2470 		return page;
2471 	}
2472 
2473 	return NULL;
2474 }
2475 
2476 
2477 /*
2478  * This array describes the order lists are fallen back to when
2479  * the free lists for the desirable migrate type are depleted
2480  */
2481 static int fallbacks[MIGRATE_TYPES][3] = {
2482 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
2483 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2484 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2485 #ifdef CONFIG_CMA
2486 	[MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
2487 #endif
2488 #ifdef CONFIG_MEMORY_ISOLATION
2489 	[MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
2490 #endif
2491 };
2492 
2493 #ifdef CONFIG_CMA
2494 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2495 					unsigned int order)
2496 {
2497 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2498 }
2499 #else
2500 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2501 					unsigned int order) { return NULL; }
2502 #endif
2503 
2504 /*
2505  * Move the free pages in a range to the freelist tail of the requested type.
2506  * Note that start_page and end_pages are not aligned on a pageblock
2507  * boundary. If alignment is required, use move_freepages_block()
2508  */
2509 static int move_freepages(struct zone *zone,
2510 			  unsigned long start_pfn, unsigned long end_pfn,
2511 			  int migratetype, int *num_movable)
2512 {
2513 	struct page *page;
2514 	unsigned long pfn;
2515 	unsigned int order;
2516 	int pages_moved = 0;
2517 
2518 	for (pfn = start_pfn; pfn <= end_pfn;) {
2519 		if (!pfn_valid_within(pfn)) {
2520 			pfn++;
2521 			continue;
2522 		}
2523 
2524 		page = pfn_to_page(pfn);
2525 		if (!PageBuddy(page)) {
2526 			/*
2527 			 * We assume that pages that could be isolated for
2528 			 * migration are movable. But we don't actually try
2529 			 * isolating, as that would be expensive.
2530 			 */
2531 			if (num_movable &&
2532 					(PageLRU(page) || __PageMovable(page)))
2533 				(*num_movable)++;
2534 			pfn++;
2535 			continue;
2536 		}
2537 
2538 		/* Make sure we are not inadvertently changing nodes */
2539 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2540 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2541 
2542 		order = buddy_order(page);
2543 		move_to_free_list(page, zone, order, migratetype);
2544 		pfn += 1 << order;
2545 		pages_moved += 1 << order;
2546 	}
2547 
2548 	return pages_moved;
2549 }
2550 
2551 int move_freepages_block(struct zone *zone, struct page *page,
2552 				int migratetype, int *num_movable)
2553 {
2554 	unsigned long start_pfn, end_pfn, pfn;
2555 
2556 	if (num_movable)
2557 		*num_movable = 0;
2558 
2559 	pfn = page_to_pfn(page);
2560 	start_pfn = pfn & ~(pageblock_nr_pages - 1);
2561 	end_pfn = start_pfn + pageblock_nr_pages - 1;
2562 
2563 	/* Do not cross zone boundaries */
2564 	if (!zone_spans_pfn(zone, start_pfn))
2565 		start_pfn = pfn;
2566 	if (!zone_spans_pfn(zone, end_pfn))
2567 		return 0;
2568 
2569 	return move_freepages(zone, start_pfn, end_pfn, migratetype,
2570 								num_movable);
2571 }
2572 
2573 static void change_pageblock_range(struct page *pageblock_page,
2574 					int start_order, int migratetype)
2575 {
2576 	int nr_pageblocks = 1 << (start_order - pageblock_order);
2577 
2578 	while (nr_pageblocks--) {
2579 		set_pageblock_migratetype(pageblock_page, migratetype);
2580 		pageblock_page += pageblock_nr_pages;
2581 	}
2582 }
2583 
2584 /*
2585  * When we are falling back to another migratetype during allocation, try to
2586  * steal extra free pages from the same pageblocks to satisfy further
2587  * allocations, instead of polluting multiple pageblocks.
2588  *
2589  * If we are stealing a relatively large buddy page, it is likely there will
2590  * be more free pages in the pageblock, so try to steal them all. For
2591  * reclaimable and unmovable allocations, we steal regardless of page size,
2592  * as fragmentation caused by those allocations polluting movable pageblocks
2593  * is worse than movable allocations stealing from unmovable and reclaimable
2594  * pageblocks.
2595  */
2596 static bool can_steal_fallback(unsigned int order, int start_mt)
2597 {
2598 	/*
2599 	 * Leaving this order check is intended, although there is
2600 	 * relaxed order check in next check. The reason is that
2601 	 * we can actually steal whole pageblock if this condition met,
2602 	 * but, below check doesn't guarantee it and that is just heuristic
2603 	 * so could be changed anytime.
2604 	 */
2605 	if (order >= pageblock_order)
2606 		return true;
2607 
2608 	if (order >= pageblock_order / 2 ||
2609 		start_mt == MIGRATE_RECLAIMABLE ||
2610 		start_mt == MIGRATE_UNMOVABLE ||
2611 		page_group_by_mobility_disabled)
2612 		return true;
2613 
2614 	return false;
2615 }
2616 
2617 static inline bool boost_watermark(struct zone *zone)
2618 {
2619 	unsigned long max_boost;
2620 
2621 	if (!watermark_boost_factor)
2622 		return false;
2623 	/*
2624 	 * Don't bother in zones that are unlikely to produce results.
2625 	 * On small machines, including kdump capture kernels running
2626 	 * in a small area, boosting the watermark can cause an out of
2627 	 * memory situation immediately.
2628 	 */
2629 	if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2630 		return false;
2631 
2632 	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2633 			watermark_boost_factor, 10000);
2634 
2635 	/*
2636 	 * high watermark may be uninitialised if fragmentation occurs
2637 	 * very early in boot so do not boost. We do not fall
2638 	 * through and boost by pageblock_nr_pages as failing
2639 	 * allocations that early means that reclaim is not going
2640 	 * to help and it may even be impossible to reclaim the
2641 	 * boosted watermark resulting in a hang.
2642 	 */
2643 	if (!max_boost)
2644 		return false;
2645 
2646 	max_boost = max(pageblock_nr_pages, max_boost);
2647 
2648 	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2649 		max_boost);
2650 
2651 	return true;
2652 }
2653 
2654 /*
2655  * This function implements actual steal behaviour. If order is large enough,
2656  * we can steal whole pageblock. If not, we first move freepages in this
2657  * pageblock to our migratetype and determine how many already-allocated pages
2658  * are there in the pageblock with a compatible migratetype. If at least half
2659  * of pages are free or compatible, we can change migratetype of the pageblock
2660  * itself, so pages freed in the future will be put on the correct free list.
2661  */
2662 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2663 		unsigned int alloc_flags, int start_type, bool whole_block)
2664 {
2665 	unsigned int current_order = buddy_order(page);
2666 	int free_pages, movable_pages, alike_pages;
2667 	int old_block_type;
2668 
2669 	old_block_type = get_pageblock_migratetype(page);
2670 
2671 	/*
2672 	 * This can happen due to races and we want to prevent broken
2673 	 * highatomic accounting.
2674 	 */
2675 	if (is_migrate_highatomic(old_block_type))
2676 		goto single_page;
2677 
2678 	/* Take ownership for orders >= pageblock_order */
2679 	if (current_order >= pageblock_order) {
2680 		change_pageblock_range(page, current_order, start_type);
2681 		goto single_page;
2682 	}
2683 
2684 	/*
2685 	 * Boost watermarks to increase reclaim pressure to reduce the
2686 	 * likelihood of future fallbacks. Wake kswapd now as the node
2687 	 * may be balanced overall and kswapd will not wake naturally.
2688 	 */
2689 	if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2690 		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2691 
2692 	/* We are not allowed to try stealing from the whole block */
2693 	if (!whole_block)
2694 		goto single_page;
2695 
2696 	free_pages = move_freepages_block(zone, page, start_type,
2697 						&movable_pages);
2698 	/*
2699 	 * Determine how many pages are compatible with our allocation.
2700 	 * For movable allocation, it's the number of movable pages which
2701 	 * we just obtained. For other types it's a bit more tricky.
2702 	 */
2703 	if (start_type == MIGRATE_MOVABLE) {
2704 		alike_pages = movable_pages;
2705 	} else {
2706 		/*
2707 		 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2708 		 * to MOVABLE pageblock, consider all non-movable pages as
2709 		 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2710 		 * vice versa, be conservative since we can't distinguish the
2711 		 * exact migratetype of non-movable pages.
2712 		 */
2713 		if (old_block_type == MIGRATE_MOVABLE)
2714 			alike_pages = pageblock_nr_pages
2715 						- (free_pages + movable_pages);
2716 		else
2717 			alike_pages = 0;
2718 	}
2719 
2720 	/* moving whole block can fail due to zone boundary conditions */
2721 	if (!free_pages)
2722 		goto single_page;
2723 
2724 	/*
2725 	 * If a sufficient number of pages in the block are either free or of
2726 	 * comparable migratability as our allocation, claim the whole block.
2727 	 */
2728 	if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2729 			page_group_by_mobility_disabled)
2730 		set_pageblock_migratetype(page, start_type);
2731 
2732 	return;
2733 
2734 single_page:
2735 	move_to_free_list(page, zone, current_order, start_type);
2736 }
2737 
2738 /*
2739  * Check whether there is a suitable fallback freepage with requested order.
2740  * If only_stealable is true, this function returns fallback_mt only if
2741  * we can steal other freepages all together. This would help to reduce
2742  * fragmentation due to mixed migratetype pages in one pageblock.
2743  */
2744 int find_suitable_fallback(struct free_area *area, unsigned int order,
2745 			int migratetype, bool only_stealable, bool *can_steal)
2746 {
2747 	int i;
2748 	int fallback_mt;
2749 
2750 	if (area->nr_free == 0)
2751 		return -1;
2752 
2753 	*can_steal = false;
2754 	for (i = 0;; i++) {
2755 		fallback_mt = fallbacks[migratetype][i];
2756 		if (fallback_mt == MIGRATE_TYPES)
2757 			break;
2758 
2759 		if (free_area_empty(area, fallback_mt))
2760 			continue;
2761 
2762 		if (can_steal_fallback(order, migratetype))
2763 			*can_steal = true;
2764 
2765 		if (!only_stealable)
2766 			return fallback_mt;
2767 
2768 		if (*can_steal)
2769 			return fallback_mt;
2770 	}
2771 
2772 	return -1;
2773 }
2774 
2775 /*
2776  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2777  * there are no empty page blocks that contain a page with a suitable order
2778  */
2779 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2780 				unsigned int alloc_order)
2781 {
2782 	int mt;
2783 	unsigned long max_managed, flags;
2784 
2785 	/*
2786 	 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2787 	 * Check is race-prone but harmless.
2788 	 */
2789 	max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2790 	if (zone->nr_reserved_highatomic >= max_managed)
2791 		return;
2792 
2793 	spin_lock_irqsave(&zone->lock, flags);
2794 
2795 	/* Recheck the nr_reserved_highatomic limit under the lock */
2796 	if (zone->nr_reserved_highatomic >= max_managed)
2797 		goto out_unlock;
2798 
2799 	/* Yoink! */
2800 	mt = get_pageblock_migratetype(page);
2801 	if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2802 	    && !is_migrate_cma(mt)) {
2803 		zone->nr_reserved_highatomic += pageblock_nr_pages;
2804 		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2805 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2806 	}
2807 
2808 out_unlock:
2809 	spin_unlock_irqrestore(&zone->lock, flags);
2810 }
2811 
2812 /*
2813  * Used when an allocation is about to fail under memory pressure. This
2814  * potentially hurts the reliability of high-order allocations when under
2815  * intense memory pressure but failed atomic allocations should be easier
2816  * to recover from than an OOM.
2817  *
2818  * If @force is true, try to unreserve a pageblock even though highatomic
2819  * pageblock is exhausted.
2820  */
2821 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2822 						bool force)
2823 {
2824 	struct zonelist *zonelist = ac->zonelist;
2825 	unsigned long flags;
2826 	struct zoneref *z;
2827 	struct zone *zone;
2828 	struct page *page;
2829 	int order;
2830 	bool ret;
2831 
2832 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2833 								ac->nodemask) {
2834 		/*
2835 		 * Preserve at least one pageblock unless memory pressure
2836 		 * is really high.
2837 		 */
2838 		if (!force && zone->nr_reserved_highatomic <=
2839 					pageblock_nr_pages)
2840 			continue;
2841 
2842 		spin_lock_irqsave(&zone->lock, flags);
2843 		for (order = 0; order < MAX_ORDER; order++) {
2844 			struct free_area *area = &(zone->free_area[order]);
2845 
2846 			page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2847 			if (!page)
2848 				continue;
2849 
2850 			/*
2851 			 * In page freeing path, migratetype change is racy so
2852 			 * we can counter several free pages in a pageblock
2853 			 * in this loop although we changed the pageblock type
2854 			 * from highatomic to ac->migratetype. So we should
2855 			 * adjust the count once.
2856 			 */
2857 			if (is_migrate_highatomic_page(page)) {
2858 				/*
2859 				 * It should never happen but changes to
2860 				 * locking could inadvertently allow a per-cpu
2861 				 * drain to add pages to MIGRATE_HIGHATOMIC
2862 				 * while unreserving so be safe and watch for
2863 				 * underflows.
2864 				 */
2865 				zone->nr_reserved_highatomic -= min(
2866 						pageblock_nr_pages,
2867 						zone->nr_reserved_highatomic);
2868 			}
2869 
2870 			/*
2871 			 * Convert to ac->migratetype and avoid the normal
2872 			 * pageblock stealing heuristics. Minimally, the caller
2873 			 * is doing the work and needs the pages. More
2874 			 * importantly, if the block was always converted to
2875 			 * MIGRATE_UNMOVABLE or another type then the number
2876 			 * of pageblocks that cannot be completely freed
2877 			 * may increase.
2878 			 */
2879 			set_pageblock_migratetype(page, ac->migratetype);
2880 			ret = move_freepages_block(zone, page, ac->migratetype,
2881 									NULL);
2882 			if (ret) {
2883 				spin_unlock_irqrestore(&zone->lock, flags);
2884 				return ret;
2885 			}
2886 		}
2887 		spin_unlock_irqrestore(&zone->lock, flags);
2888 	}
2889 
2890 	return false;
2891 }
2892 
2893 /*
2894  * Try finding a free buddy page on the fallback list and put it on the free
2895  * list of requested migratetype, possibly along with other pages from the same
2896  * block, depending on fragmentation avoidance heuristics. Returns true if
2897  * fallback was found so that __rmqueue_smallest() can grab it.
2898  *
2899  * The use of signed ints for order and current_order is a deliberate
2900  * deviation from the rest of this file, to make the for loop
2901  * condition simpler.
2902  */
2903 static __always_inline bool
2904 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2905 						unsigned int alloc_flags)
2906 {
2907 	struct free_area *area;
2908 	int current_order;
2909 	int min_order = order;
2910 	struct page *page;
2911 	int fallback_mt;
2912 	bool can_steal;
2913 
2914 	/*
2915 	 * Do not steal pages from freelists belonging to other pageblocks
2916 	 * i.e. orders < pageblock_order. If there are no local zones free,
2917 	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2918 	 */
2919 	if (alloc_flags & ALLOC_NOFRAGMENT)
2920 		min_order = pageblock_order;
2921 
2922 	/*
2923 	 * Find the largest available free page in the other list. This roughly
2924 	 * approximates finding the pageblock with the most free pages, which
2925 	 * would be too costly to do exactly.
2926 	 */
2927 	for (current_order = MAX_ORDER - 1; current_order >= min_order;
2928 				--current_order) {
2929 		area = &(zone->free_area[current_order]);
2930 		fallback_mt = find_suitable_fallback(area, current_order,
2931 				start_migratetype, false, &can_steal);
2932 		if (fallback_mt == -1)
2933 			continue;
2934 
2935 		/*
2936 		 * We cannot steal all free pages from the pageblock and the
2937 		 * requested migratetype is movable. In that case it's better to
2938 		 * steal and split the smallest available page instead of the
2939 		 * largest available page, because even if the next movable
2940 		 * allocation falls back into a different pageblock than this
2941 		 * one, it won't cause permanent fragmentation.
2942 		 */
2943 		if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2944 					&& current_order > order)
2945 			goto find_smallest;
2946 
2947 		goto do_steal;
2948 	}
2949 
2950 	return false;
2951 
2952 find_smallest:
2953 	for (current_order = order; current_order < MAX_ORDER;
2954 							current_order++) {
2955 		area = &(zone->free_area[current_order]);
2956 		fallback_mt = find_suitable_fallback(area, current_order,
2957 				start_migratetype, false, &can_steal);
2958 		if (fallback_mt != -1)
2959 			break;
2960 	}
2961 
2962 	/*
2963 	 * This should not happen - we already found a suitable fallback
2964 	 * when looking for the largest page.
2965 	 */
2966 	VM_BUG_ON(current_order == MAX_ORDER);
2967 
2968 do_steal:
2969 	page = get_page_from_free_area(area, fallback_mt);
2970 
2971 	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2972 								can_steal);
2973 
2974 	trace_mm_page_alloc_extfrag(page, order, current_order,
2975 		start_migratetype, fallback_mt);
2976 
2977 	return true;
2978 
2979 }
2980 
2981 /*
2982  * Do the hard work of removing an element from the buddy allocator.
2983  * Call me with the zone->lock already held.
2984  */
2985 static __always_inline struct page *
2986 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2987 						unsigned int alloc_flags)
2988 {
2989 	struct page *page;
2990 
2991 	if (IS_ENABLED(CONFIG_CMA)) {
2992 		/*
2993 		 * Balance movable allocations between regular and CMA areas by
2994 		 * allocating from CMA when over half of the zone's free memory
2995 		 * is in the CMA area.
2996 		 */
2997 		if (alloc_flags & ALLOC_CMA &&
2998 		    zone_page_state(zone, NR_FREE_CMA_PAGES) >
2999 		    zone_page_state(zone, NR_FREE_PAGES) / 2) {
3000 			page = __rmqueue_cma_fallback(zone, order);
3001 			if (page)
3002 				goto out;
3003 		}
3004 	}
3005 retry:
3006 	page = __rmqueue_smallest(zone, order, migratetype);
3007 	if (unlikely(!page)) {
3008 		if (alloc_flags & ALLOC_CMA)
3009 			page = __rmqueue_cma_fallback(zone, order);
3010 
3011 		if (!page && __rmqueue_fallback(zone, order, migratetype,
3012 								alloc_flags))
3013 			goto retry;
3014 	}
3015 out:
3016 	if (page)
3017 		trace_mm_page_alloc_zone_locked(page, order, migratetype);
3018 	return page;
3019 }
3020 
3021 /*
3022  * Obtain a specified number of elements from the buddy allocator, all under
3023  * a single hold of the lock, for efficiency.  Add them to the supplied list.
3024  * Returns the number of new pages which were placed at *list.
3025  */
3026 static int rmqueue_bulk(struct zone *zone, unsigned int order,
3027 			unsigned long count, struct list_head *list,
3028 			int migratetype, unsigned int alloc_flags)
3029 {
3030 	int i, allocated = 0;
3031 
3032 	/*
3033 	 * local_lock_irq held so equivalent to spin_lock_irqsave for
3034 	 * both PREEMPT_RT and non-PREEMPT_RT configurations.
3035 	 */
3036 	spin_lock(&zone->lock);
3037 	for (i = 0; i < count; ++i) {
3038 		struct page *page = __rmqueue(zone, order, migratetype,
3039 								alloc_flags);
3040 		if (unlikely(page == NULL))
3041 			break;
3042 
3043 		if (unlikely(check_pcp_refill(page)))
3044 			continue;
3045 
3046 		/*
3047 		 * Split buddy pages returned by expand() are received here in
3048 		 * physical page order. The page is added to the tail of
3049 		 * caller's list. From the callers perspective, the linked list
3050 		 * is ordered by page number under some conditions. This is
3051 		 * useful for IO devices that can forward direction from the
3052 		 * head, thus also in the physical page order. This is useful
3053 		 * for IO devices that can merge IO requests if the physical
3054 		 * pages are ordered properly.
3055 		 */
3056 		list_add_tail(&page->lru, list);
3057 		allocated++;
3058 		if (is_migrate_cma(get_pcppage_migratetype(page)))
3059 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3060 					      -(1 << order));
3061 	}
3062 
3063 	/*
3064 	 * i pages were removed from the buddy list even if some leak due
3065 	 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
3066 	 * on i. Do not confuse with 'allocated' which is the number of
3067 	 * pages added to the pcp list.
3068 	 */
3069 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3070 	spin_unlock(&zone->lock);
3071 	return allocated;
3072 }
3073 
3074 #ifdef CONFIG_NUMA
3075 /*
3076  * Called from the vmstat counter updater to drain pagesets of this
3077  * currently executing processor on remote nodes after they have
3078  * expired.
3079  *
3080  * Note that this function must be called with the thread pinned to
3081  * a single processor.
3082  */
3083 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3084 {
3085 	unsigned long flags;
3086 	int to_drain, batch;
3087 
3088 	local_lock_irqsave(&pagesets.lock, flags);
3089 	batch = READ_ONCE(pcp->batch);
3090 	to_drain = min(pcp->count, batch);
3091 	if (to_drain > 0)
3092 		free_pcppages_bulk(zone, to_drain, pcp);
3093 	local_unlock_irqrestore(&pagesets.lock, flags);
3094 }
3095 #endif
3096 
3097 /*
3098  * Drain pcplists of the indicated processor and zone.
3099  *
3100  * The processor must either be the current processor and the
3101  * thread pinned to the current processor or a processor that
3102  * is not online.
3103  */
3104 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3105 {
3106 	unsigned long flags;
3107 	struct per_cpu_pages *pcp;
3108 
3109 	local_lock_irqsave(&pagesets.lock, flags);
3110 
3111 	pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3112 	if (pcp->count)
3113 		free_pcppages_bulk(zone, pcp->count, pcp);
3114 
3115 	local_unlock_irqrestore(&pagesets.lock, flags);
3116 }
3117 
3118 /*
3119  * Drain pcplists of all zones on the indicated processor.
3120  *
3121  * The processor must either be the current processor and the
3122  * thread pinned to the current processor or a processor that
3123  * is not online.
3124  */
3125 static void drain_pages(unsigned int cpu)
3126 {
3127 	struct zone *zone;
3128 
3129 	for_each_populated_zone(zone) {
3130 		drain_pages_zone(cpu, zone);
3131 	}
3132 }
3133 
3134 /*
3135  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3136  *
3137  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3138  * the single zone's pages.
3139  */
3140 void drain_local_pages(struct zone *zone)
3141 {
3142 	int cpu = smp_processor_id();
3143 
3144 	if (zone)
3145 		drain_pages_zone(cpu, zone);
3146 	else
3147 		drain_pages(cpu);
3148 }
3149 
3150 static void drain_local_pages_wq(struct work_struct *work)
3151 {
3152 	struct pcpu_drain *drain;
3153 
3154 	drain = container_of(work, struct pcpu_drain, work);
3155 
3156 	/*
3157 	 * drain_all_pages doesn't use proper cpu hotplug protection so
3158 	 * we can race with cpu offline when the WQ can move this from
3159 	 * a cpu pinned worker to an unbound one. We can operate on a different
3160 	 * cpu which is alright but we also have to make sure to not move to
3161 	 * a different one.
3162 	 */
3163 	preempt_disable();
3164 	drain_local_pages(drain->zone);
3165 	preempt_enable();
3166 }
3167 
3168 /*
3169  * The implementation of drain_all_pages(), exposing an extra parameter to
3170  * drain on all cpus.
3171  *
3172  * drain_all_pages() is optimized to only execute on cpus where pcplists are
3173  * not empty. The check for non-emptiness can however race with a free to
3174  * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3175  * that need the guarantee that every CPU has drained can disable the
3176  * optimizing racy check.
3177  */
3178 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3179 {
3180 	int cpu;
3181 
3182 	/*
3183 	 * Allocate in the BSS so we won't require allocation in
3184 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3185 	 */
3186 	static cpumask_t cpus_with_pcps;
3187 
3188 	/*
3189 	 * Make sure nobody triggers this path before mm_percpu_wq is fully
3190 	 * initialized.
3191 	 */
3192 	if (WARN_ON_ONCE(!mm_percpu_wq))
3193 		return;
3194 
3195 	/*
3196 	 * Do not drain if one is already in progress unless it's specific to
3197 	 * a zone. Such callers are primarily CMA and memory hotplug and need
3198 	 * the drain to be complete when the call returns.
3199 	 */
3200 	if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3201 		if (!zone)
3202 			return;
3203 		mutex_lock(&pcpu_drain_mutex);
3204 	}
3205 
3206 	/*
3207 	 * We don't care about racing with CPU hotplug event
3208 	 * as offline notification will cause the notified
3209 	 * cpu to drain that CPU pcps and on_each_cpu_mask
3210 	 * disables preemption as part of its processing
3211 	 */
3212 	for_each_online_cpu(cpu) {
3213 		struct per_cpu_pages *pcp;
3214 		struct zone *z;
3215 		bool has_pcps = false;
3216 
3217 		if (force_all_cpus) {
3218 			/*
3219 			 * The pcp.count check is racy, some callers need a
3220 			 * guarantee that no cpu is missed.
3221 			 */
3222 			has_pcps = true;
3223 		} else if (zone) {
3224 			pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3225 			if (pcp->count)
3226 				has_pcps = true;
3227 		} else {
3228 			for_each_populated_zone(z) {
3229 				pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3230 				if (pcp->count) {
3231 					has_pcps = true;
3232 					break;
3233 				}
3234 			}
3235 		}
3236 
3237 		if (has_pcps)
3238 			cpumask_set_cpu(cpu, &cpus_with_pcps);
3239 		else
3240 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
3241 	}
3242 
3243 	for_each_cpu(cpu, &cpus_with_pcps) {
3244 		struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3245 
3246 		drain->zone = zone;
3247 		INIT_WORK(&drain->work, drain_local_pages_wq);
3248 		queue_work_on(cpu, mm_percpu_wq, &drain->work);
3249 	}
3250 	for_each_cpu(cpu, &cpus_with_pcps)
3251 		flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
3252 
3253 	mutex_unlock(&pcpu_drain_mutex);
3254 }
3255 
3256 /*
3257  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3258  *
3259  * When zone parameter is non-NULL, spill just the single zone's pages.
3260  *
3261  * Note that this can be extremely slow as the draining happens in a workqueue.
3262  */
3263 void drain_all_pages(struct zone *zone)
3264 {
3265 	__drain_all_pages(zone, false);
3266 }
3267 
3268 #ifdef CONFIG_HIBERNATION
3269 
3270 /*
3271  * Touch the watchdog for every WD_PAGE_COUNT pages.
3272  */
3273 #define WD_PAGE_COUNT	(128*1024)
3274 
3275 void mark_free_pages(struct zone *zone)
3276 {
3277 	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3278 	unsigned long flags;
3279 	unsigned int order, t;
3280 	struct page *page;
3281 
3282 	if (zone_is_empty(zone))
3283 		return;
3284 
3285 	spin_lock_irqsave(&zone->lock, flags);
3286 
3287 	max_zone_pfn = zone_end_pfn(zone);
3288 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3289 		if (pfn_valid(pfn)) {
3290 			page = pfn_to_page(pfn);
3291 
3292 			if (!--page_count) {
3293 				touch_nmi_watchdog();
3294 				page_count = WD_PAGE_COUNT;
3295 			}
3296 
3297 			if (page_zone(page) != zone)
3298 				continue;
3299 
3300 			if (!swsusp_page_is_forbidden(page))
3301 				swsusp_unset_page_free(page);
3302 		}
3303 
3304 	for_each_migratetype_order(order, t) {
3305 		list_for_each_entry(page,
3306 				&zone->free_area[order].free_list[t], lru) {
3307 			unsigned long i;
3308 
3309 			pfn = page_to_pfn(page);
3310 			for (i = 0; i < (1UL << order); i++) {
3311 				if (!--page_count) {
3312 					touch_nmi_watchdog();
3313 					page_count = WD_PAGE_COUNT;
3314 				}
3315 				swsusp_set_page_free(pfn_to_page(pfn + i));
3316 			}
3317 		}
3318 	}
3319 	spin_unlock_irqrestore(&zone->lock, flags);
3320 }
3321 #endif /* CONFIG_PM */
3322 
3323 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
3324 							unsigned int order)
3325 {
3326 	int migratetype;
3327 
3328 	if (!free_pcp_prepare(page, order))
3329 		return false;
3330 
3331 	migratetype = get_pfnblock_migratetype(page, pfn);
3332 	set_pcppage_migratetype(page, migratetype);
3333 	return true;
3334 }
3335 
3336 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch)
3337 {
3338 	int min_nr_free, max_nr_free;
3339 
3340 	/* Check for PCP disabled or boot pageset */
3341 	if (unlikely(high < batch))
3342 		return 1;
3343 
3344 	/* Leave at least pcp->batch pages on the list */
3345 	min_nr_free = batch;
3346 	max_nr_free = high - batch;
3347 
3348 	/*
3349 	 * Double the number of pages freed each time there is subsequent
3350 	 * freeing of pages without any allocation.
3351 	 */
3352 	batch <<= pcp->free_factor;
3353 	if (batch < max_nr_free)
3354 		pcp->free_factor++;
3355 	batch = clamp(batch, min_nr_free, max_nr_free);
3356 
3357 	return batch;
3358 }
3359 
3360 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone)
3361 {
3362 	int high = READ_ONCE(pcp->high);
3363 
3364 	if (unlikely(!high))
3365 		return 0;
3366 
3367 	if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
3368 		return high;
3369 
3370 	/*
3371 	 * If reclaim is active, limit the number of pages that can be
3372 	 * stored on pcp lists
3373 	 */
3374 	return min(READ_ONCE(pcp->batch) << 2, high);
3375 }
3376 
3377 static void free_unref_page_commit(struct page *page, unsigned long pfn,
3378 				   int migratetype, unsigned int order)
3379 {
3380 	struct zone *zone = page_zone(page);
3381 	struct per_cpu_pages *pcp;
3382 	int high;
3383 	int pindex;
3384 
3385 	__count_vm_event(PGFREE);
3386 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
3387 	pindex = order_to_pindex(migratetype, order);
3388 	list_add(&page->lru, &pcp->lists[pindex]);
3389 	pcp->count += 1 << order;
3390 	high = nr_pcp_high(pcp, zone);
3391 	if (pcp->count >= high) {
3392 		int batch = READ_ONCE(pcp->batch);
3393 
3394 		free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp);
3395 	}
3396 }
3397 
3398 /*
3399  * Free a pcp page
3400  */
3401 void free_unref_page(struct page *page, unsigned int order)
3402 {
3403 	unsigned long flags;
3404 	unsigned long pfn = page_to_pfn(page);
3405 	int migratetype;
3406 
3407 	if (!free_unref_page_prepare(page, pfn, order))
3408 		return;
3409 
3410 	/*
3411 	 * We only track unmovable, reclaimable and movable on pcp lists.
3412 	 * Place ISOLATE pages on the isolated list because they are being
3413 	 * offlined but treat HIGHATOMIC as movable pages so we can get those
3414 	 * areas back if necessary. Otherwise, we may have to free
3415 	 * excessively into the page allocator
3416 	 */
3417 	migratetype = get_pcppage_migratetype(page);
3418 	if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
3419 		if (unlikely(is_migrate_isolate(migratetype))) {
3420 			free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
3421 			return;
3422 		}
3423 		migratetype = MIGRATE_MOVABLE;
3424 	}
3425 
3426 	local_lock_irqsave(&pagesets.lock, flags);
3427 	free_unref_page_commit(page, pfn, migratetype, order);
3428 	local_unlock_irqrestore(&pagesets.lock, flags);
3429 }
3430 
3431 /*
3432  * Free a list of 0-order pages
3433  */
3434 void free_unref_page_list(struct list_head *list)
3435 {
3436 	struct page *page, *next;
3437 	unsigned long flags, pfn;
3438 	int batch_count = 0;
3439 	int migratetype;
3440 
3441 	/* Prepare pages for freeing */
3442 	list_for_each_entry_safe(page, next, list, lru) {
3443 		pfn = page_to_pfn(page);
3444 		if (!free_unref_page_prepare(page, pfn, 0))
3445 			list_del(&page->lru);
3446 
3447 		/*
3448 		 * Free isolated pages directly to the allocator, see
3449 		 * comment in free_unref_page.
3450 		 */
3451 		migratetype = get_pcppage_migratetype(page);
3452 		if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
3453 			if (unlikely(is_migrate_isolate(migratetype))) {
3454 				list_del(&page->lru);
3455 				free_one_page(page_zone(page), page, pfn, 0,
3456 							migratetype, FPI_NONE);
3457 				continue;
3458 			}
3459 
3460 			/*
3461 			 * Non-isolated types over MIGRATE_PCPTYPES get added
3462 			 * to the MIGRATE_MOVABLE pcp list.
3463 			 */
3464 			set_pcppage_migratetype(page, MIGRATE_MOVABLE);
3465 		}
3466 
3467 		set_page_private(page, pfn);
3468 	}
3469 
3470 	local_lock_irqsave(&pagesets.lock, flags);
3471 	list_for_each_entry_safe(page, next, list, lru) {
3472 		pfn = page_private(page);
3473 		set_page_private(page, 0);
3474 		migratetype = get_pcppage_migratetype(page);
3475 		trace_mm_page_free_batched(page);
3476 		free_unref_page_commit(page, pfn, migratetype, 0);
3477 
3478 		/*
3479 		 * Guard against excessive IRQ disabled times when we get
3480 		 * a large list of pages to free.
3481 		 */
3482 		if (++batch_count == SWAP_CLUSTER_MAX) {
3483 			local_unlock_irqrestore(&pagesets.lock, flags);
3484 			batch_count = 0;
3485 			local_lock_irqsave(&pagesets.lock, flags);
3486 		}
3487 	}
3488 	local_unlock_irqrestore(&pagesets.lock, flags);
3489 }
3490 
3491 /*
3492  * split_page takes a non-compound higher-order page, and splits it into
3493  * n (1<<order) sub-pages: page[0..n]
3494  * Each sub-page must be freed individually.
3495  *
3496  * Note: this is probably too low level an operation for use in drivers.
3497  * Please consult with lkml before using this in your driver.
3498  */
3499 void split_page(struct page *page, unsigned int order)
3500 {
3501 	int i;
3502 
3503 	VM_BUG_ON_PAGE(PageCompound(page), page);
3504 	VM_BUG_ON_PAGE(!page_count(page), page);
3505 
3506 	for (i = 1; i < (1 << order); i++)
3507 		set_page_refcounted(page + i);
3508 	split_page_owner(page, 1 << order);
3509 	split_page_memcg(page, 1 << order);
3510 }
3511 EXPORT_SYMBOL_GPL(split_page);
3512 
3513 int __isolate_free_page(struct page *page, unsigned int order)
3514 {
3515 	unsigned long watermark;
3516 	struct zone *zone;
3517 	int mt;
3518 
3519 	BUG_ON(!PageBuddy(page));
3520 
3521 	zone = page_zone(page);
3522 	mt = get_pageblock_migratetype(page);
3523 
3524 	if (!is_migrate_isolate(mt)) {
3525 		/*
3526 		 * Obey watermarks as if the page was being allocated. We can
3527 		 * emulate a high-order watermark check with a raised order-0
3528 		 * watermark, because we already know our high-order page
3529 		 * exists.
3530 		 */
3531 		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3532 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3533 			return 0;
3534 
3535 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
3536 	}
3537 
3538 	/* Remove page from free list */
3539 
3540 	del_page_from_free_list(page, zone, order);
3541 
3542 	/*
3543 	 * Set the pageblock if the isolated page is at least half of a
3544 	 * pageblock
3545 	 */
3546 	if (order >= pageblock_order - 1) {
3547 		struct page *endpage = page + (1 << order) - 1;
3548 		for (; page < endpage; page += pageblock_nr_pages) {
3549 			int mt = get_pageblock_migratetype(page);
3550 			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3551 			    && !is_migrate_highatomic(mt))
3552 				set_pageblock_migratetype(page,
3553 							  MIGRATE_MOVABLE);
3554 		}
3555 	}
3556 
3557 
3558 	return 1UL << order;
3559 }
3560 
3561 /**
3562  * __putback_isolated_page - Return a now-isolated page back where we got it
3563  * @page: Page that was isolated
3564  * @order: Order of the isolated page
3565  * @mt: The page's pageblock's migratetype
3566  *
3567  * This function is meant to return a page pulled from the free lists via
3568  * __isolate_free_page back to the free lists they were pulled from.
3569  */
3570 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3571 {
3572 	struct zone *zone = page_zone(page);
3573 
3574 	/* zone lock should be held when this function is called */
3575 	lockdep_assert_held(&zone->lock);
3576 
3577 	/* Return isolated page to tail of freelist. */
3578 	__free_one_page(page, page_to_pfn(page), zone, order, mt,
3579 			FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3580 }
3581 
3582 /*
3583  * Update NUMA hit/miss statistics
3584  *
3585  * Must be called with interrupts disabled.
3586  */
3587 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3588 				   long nr_account)
3589 {
3590 #ifdef CONFIG_NUMA
3591 	enum numa_stat_item local_stat = NUMA_LOCAL;
3592 
3593 	/* skip numa counters update if numa stats is disabled */
3594 	if (!static_branch_likely(&vm_numa_stat_key))
3595 		return;
3596 
3597 	if (zone_to_nid(z) != numa_node_id())
3598 		local_stat = NUMA_OTHER;
3599 
3600 	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3601 		__count_numa_events(z, NUMA_HIT, nr_account);
3602 	else {
3603 		__count_numa_events(z, NUMA_MISS, nr_account);
3604 		__count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3605 	}
3606 	__count_numa_events(z, local_stat, nr_account);
3607 #endif
3608 }
3609 
3610 /* Remove page from the per-cpu list, caller must protect the list */
3611 static inline
3612 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3613 			int migratetype,
3614 			unsigned int alloc_flags,
3615 			struct per_cpu_pages *pcp,
3616 			struct list_head *list)
3617 {
3618 	struct page *page;
3619 
3620 	do {
3621 		if (list_empty(list)) {
3622 			int batch = READ_ONCE(pcp->batch);
3623 			int alloced;
3624 
3625 			/*
3626 			 * Scale batch relative to order if batch implies
3627 			 * free pages can be stored on the PCP. Batch can
3628 			 * be 1 for small zones or for boot pagesets which
3629 			 * should never store free pages as the pages may
3630 			 * belong to arbitrary zones.
3631 			 */
3632 			if (batch > 1)
3633 				batch = max(batch >> order, 2);
3634 			alloced = rmqueue_bulk(zone, order,
3635 					batch, list,
3636 					migratetype, alloc_flags);
3637 
3638 			pcp->count += alloced << order;
3639 			if (unlikely(list_empty(list)))
3640 				return NULL;
3641 		}
3642 
3643 		page = list_first_entry(list, struct page, lru);
3644 		list_del(&page->lru);
3645 		pcp->count -= 1 << order;
3646 	} while (check_new_pcp(page));
3647 
3648 	return page;
3649 }
3650 
3651 /* Lock and remove page from the per-cpu list */
3652 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3653 			struct zone *zone, unsigned int order,
3654 			gfp_t gfp_flags, int migratetype,
3655 			unsigned int alloc_flags)
3656 {
3657 	struct per_cpu_pages *pcp;
3658 	struct list_head *list;
3659 	struct page *page;
3660 	unsigned long flags;
3661 
3662 	local_lock_irqsave(&pagesets.lock, flags);
3663 
3664 	/*
3665 	 * On allocation, reduce the number of pages that are batch freed.
3666 	 * See nr_pcp_free() where free_factor is increased for subsequent
3667 	 * frees.
3668 	 */
3669 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
3670 	pcp->free_factor >>= 1;
3671 	list = &pcp->lists[order_to_pindex(migratetype, order)];
3672 	page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3673 	local_unlock_irqrestore(&pagesets.lock, flags);
3674 	if (page) {
3675 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3676 		zone_statistics(preferred_zone, zone, 1);
3677 	}
3678 	return page;
3679 }
3680 
3681 /*
3682  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3683  */
3684 static inline
3685 struct page *rmqueue(struct zone *preferred_zone,
3686 			struct zone *zone, unsigned int order,
3687 			gfp_t gfp_flags, unsigned int alloc_flags,
3688 			int migratetype)
3689 {
3690 	unsigned long flags;
3691 	struct page *page;
3692 
3693 	if (likely(pcp_allowed_order(order))) {
3694 		/*
3695 		 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3696 		 * we need to skip it when CMA area isn't allowed.
3697 		 */
3698 		if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3699 				migratetype != MIGRATE_MOVABLE) {
3700 			page = rmqueue_pcplist(preferred_zone, zone, order,
3701 					gfp_flags, migratetype, alloc_flags);
3702 			goto out;
3703 		}
3704 	}
3705 
3706 	/*
3707 	 * We most definitely don't want callers attempting to
3708 	 * allocate greater than order-1 page units with __GFP_NOFAIL.
3709 	 */
3710 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3711 	spin_lock_irqsave(&zone->lock, flags);
3712 
3713 	do {
3714 		page = NULL;
3715 		/*
3716 		 * order-0 request can reach here when the pcplist is skipped
3717 		 * due to non-CMA allocation context. HIGHATOMIC area is
3718 		 * reserved for high-order atomic allocation, so order-0
3719 		 * request should skip it.
3720 		 */
3721 		if (order > 0 && alloc_flags & ALLOC_HARDER) {
3722 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3723 			if (page)
3724 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
3725 		}
3726 		if (!page)
3727 			page = __rmqueue(zone, order, migratetype, alloc_flags);
3728 	} while (page && check_new_pages(page, order));
3729 	if (!page)
3730 		goto failed;
3731 
3732 	__mod_zone_freepage_state(zone, -(1 << order),
3733 				  get_pcppage_migratetype(page));
3734 	spin_unlock_irqrestore(&zone->lock, flags);
3735 
3736 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3737 	zone_statistics(preferred_zone, zone, 1);
3738 
3739 out:
3740 	/* Separate test+clear to avoid unnecessary atomics */
3741 	if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3742 		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3743 		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3744 	}
3745 
3746 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3747 	return page;
3748 
3749 failed:
3750 	spin_unlock_irqrestore(&zone->lock, flags);
3751 	return NULL;
3752 }
3753 
3754 #ifdef CONFIG_FAIL_PAGE_ALLOC
3755 
3756 static struct {
3757 	struct fault_attr attr;
3758 
3759 	bool ignore_gfp_highmem;
3760 	bool ignore_gfp_reclaim;
3761 	u32 min_order;
3762 } fail_page_alloc = {
3763 	.attr = FAULT_ATTR_INITIALIZER,
3764 	.ignore_gfp_reclaim = true,
3765 	.ignore_gfp_highmem = true,
3766 	.min_order = 1,
3767 };
3768 
3769 static int __init setup_fail_page_alloc(char *str)
3770 {
3771 	return setup_fault_attr(&fail_page_alloc.attr, str);
3772 }
3773 __setup("fail_page_alloc=", setup_fail_page_alloc);
3774 
3775 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3776 {
3777 	if (order < fail_page_alloc.min_order)
3778 		return false;
3779 	if (gfp_mask & __GFP_NOFAIL)
3780 		return false;
3781 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3782 		return false;
3783 	if (fail_page_alloc.ignore_gfp_reclaim &&
3784 			(gfp_mask & __GFP_DIRECT_RECLAIM))
3785 		return false;
3786 
3787 	return should_fail(&fail_page_alloc.attr, 1 << order);
3788 }
3789 
3790 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3791 
3792 static int __init fail_page_alloc_debugfs(void)
3793 {
3794 	umode_t mode = S_IFREG | 0600;
3795 	struct dentry *dir;
3796 
3797 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3798 					&fail_page_alloc.attr);
3799 
3800 	debugfs_create_bool("ignore-gfp-wait", mode, dir,
3801 			    &fail_page_alloc.ignore_gfp_reclaim);
3802 	debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3803 			    &fail_page_alloc.ignore_gfp_highmem);
3804 	debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3805 
3806 	return 0;
3807 }
3808 
3809 late_initcall(fail_page_alloc_debugfs);
3810 
3811 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3812 
3813 #else /* CONFIG_FAIL_PAGE_ALLOC */
3814 
3815 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3816 {
3817 	return false;
3818 }
3819 
3820 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3821 
3822 static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3823 {
3824 	return __should_fail_alloc_page(gfp_mask, order);
3825 }
3826 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3827 
3828 static inline long __zone_watermark_unusable_free(struct zone *z,
3829 				unsigned int order, unsigned int alloc_flags)
3830 {
3831 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3832 	long unusable_free = (1 << order) - 1;
3833 
3834 	/*
3835 	 * If the caller does not have rights to ALLOC_HARDER then subtract
3836 	 * the high-atomic reserves. This will over-estimate the size of the
3837 	 * atomic reserve but it avoids a search.
3838 	 */
3839 	if (likely(!alloc_harder))
3840 		unusable_free += z->nr_reserved_highatomic;
3841 
3842 #ifdef CONFIG_CMA
3843 	/* If allocation can't use CMA areas don't use free CMA pages */
3844 	if (!(alloc_flags & ALLOC_CMA))
3845 		unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3846 #endif
3847 
3848 	return unusable_free;
3849 }
3850 
3851 /*
3852  * Return true if free base pages are above 'mark'. For high-order checks it
3853  * will return true of the order-0 watermark is reached and there is at least
3854  * one free page of a suitable size. Checking now avoids taking the zone lock
3855  * to check in the allocation paths if no pages are free.
3856  */
3857 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3858 			 int highest_zoneidx, unsigned int alloc_flags,
3859 			 long free_pages)
3860 {
3861 	long min = mark;
3862 	int o;
3863 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3864 
3865 	/* free_pages may go negative - that's OK */
3866 	free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3867 
3868 	if (alloc_flags & ALLOC_HIGH)
3869 		min -= min / 2;
3870 
3871 	if (unlikely(alloc_harder)) {
3872 		/*
3873 		 * OOM victims can try even harder than normal ALLOC_HARDER
3874 		 * users on the grounds that it's definitely going to be in
3875 		 * the exit path shortly and free memory. Any allocation it
3876 		 * makes during the free path will be small and short-lived.
3877 		 */
3878 		if (alloc_flags & ALLOC_OOM)
3879 			min -= min / 2;
3880 		else
3881 			min -= min / 4;
3882 	}
3883 
3884 	/*
3885 	 * Check watermarks for an order-0 allocation request. If these
3886 	 * are not met, then a high-order request also cannot go ahead
3887 	 * even if a suitable page happened to be free.
3888 	 */
3889 	if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3890 		return false;
3891 
3892 	/* If this is an order-0 request then the watermark is fine */
3893 	if (!order)
3894 		return true;
3895 
3896 	/* For a high-order request, check at least one suitable page is free */
3897 	for (o = order; o < MAX_ORDER; o++) {
3898 		struct free_area *area = &z->free_area[o];
3899 		int mt;
3900 
3901 		if (!area->nr_free)
3902 			continue;
3903 
3904 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3905 			if (!free_area_empty(area, mt))
3906 				return true;
3907 		}
3908 
3909 #ifdef CONFIG_CMA
3910 		if ((alloc_flags & ALLOC_CMA) &&
3911 		    !free_area_empty(area, MIGRATE_CMA)) {
3912 			return true;
3913 		}
3914 #endif
3915 		if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
3916 			return true;
3917 	}
3918 	return false;
3919 }
3920 
3921 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3922 		      int highest_zoneidx, unsigned int alloc_flags)
3923 {
3924 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3925 					zone_page_state(z, NR_FREE_PAGES));
3926 }
3927 
3928 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3929 				unsigned long mark, int highest_zoneidx,
3930 				unsigned int alloc_flags, gfp_t gfp_mask)
3931 {
3932 	long free_pages;
3933 
3934 	free_pages = zone_page_state(z, NR_FREE_PAGES);
3935 
3936 	/*
3937 	 * Fast check for order-0 only. If this fails then the reserves
3938 	 * need to be calculated.
3939 	 */
3940 	if (!order) {
3941 		long fast_free;
3942 
3943 		fast_free = free_pages;
3944 		fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
3945 		if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
3946 			return true;
3947 	}
3948 
3949 	if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3950 					free_pages))
3951 		return true;
3952 	/*
3953 	 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
3954 	 * when checking the min watermark. The min watermark is the
3955 	 * point where boosting is ignored so that kswapd is woken up
3956 	 * when below the low watermark.
3957 	 */
3958 	if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3959 		&& ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3960 		mark = z->_watermark[WMARK_MIN];
3961 		return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3962 					alloc_flags, free_pages);
3963 	}
3964 
3965 	return false;
3966 }
3967 
3968 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3969 			unsigned long mark, int highest_zoneidx)
3970 {
3971 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
3972 
3973 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3974 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3975 
3976 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3977 								free_pages);
3978 }
3979 
3980 #ifdef CONFIG_NUMA
3981 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3982 {
3983 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3984 				node_reclaim_distance;
3985 }
3986 #else	/* CONFIG_NUMA */
3987 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3988 {
3989 	return true;
3990 }
3991 #endif	/* CONFIG_NUMA */
3992 
3993 /*
3994  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3995  * fragmentation is subtle. If the preferred zone was HIGHMEM then
3996  * premature use of a lower zone may cause lowmem pressure problems that
3997  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3998  * probably too small. It only makes sense to spread allocations to avoid
3999  * fragmentation between the Normal and DMA32 zones.
4000  */
4001 static inline unsigned int
4002 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
4003 {
4004 	unsigned int alloc_flags;
4005 
4006 	/*
4007 	 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4008 	 * to save a branch.
4009 	 */
4010 	alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
4011 
4012 #ifdef CONFIG_ZONE_DMA32
4013 	if (!zone)
4014 		return alloc_flags;
4015 
4016 	if (zone_idx(zone) != ZONE_NORMAL)
4017 		return alloc_flags;
4018 
4019 	/*
4020 	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
4021 	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
4022 	 * on UMA that if Normal is populated then so is DMA32.
4023 	 */
4024 	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
4025 	if (nr_online_nodes > 1 && !populated_zone(--zone))
4026 		return alloc_flags;
4027 
4028 	alloc_flags |= ALLOC_NOFRAGMENT;
4029 #endif /* CONFIG_ZONE_DMA32 */
4030 	return alloc_flags;
4031 }
4032 
4033 /* Must be called after current_gfp_context() which can change gfp_mask */
4034 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
4035 						  unsigned int alloc_flags)
4036 {
4037 #ifdef CONFIG_CMA
4038 	if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4039 		alloc_flags |= ALLOC_CMA;
4040 #endif
4041 	return alloc_flags;
4042 }
4043 
4044 /*
4045  * get_page_from_freelist goes through the zonelist trying to allocate
4046  * a page.
4047  */
4048 static struct page *
4049 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
4050 						const struct alloc_context *ac)
4051 {
4052 	struct zoneref *z;
4053 	struct zone *zone;
4054 	struct pglist_data *last_pgdat_dirty_limit = NULL;
4055 	bool no_fallback;
4056 
4057 retry:
4058 	/*
4059 	 * Scan zonelist, looking for a zone with enough free.
4060 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
4061 	 */
4062 	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
4063 	z = ac->preferred_zoneref;
4064 	for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
4065 					ac->nodemask) {
4066 		struct page *page;
4067 		unsigned long mark;
4068 
4069 		if (cpusets_enabled() &&
4070 			(alloc_flags & ALLOC_CPUSET) &&
4071 			!__cpuset_zone_allowed(zone, gfp_mask))
4072 				continue;
4073 		/*
4074 		 * When allocating a page cache page for writing, we
4075 		 * want to get it from a node that is within its dirty
4076 		 * limit, such that no single node holds more than its
4077 		 * proportional share of globally allowed dirty pages.
4078 		 * The dirty limits take into account the node's
4079 		 * lowmem reserves and high watermark so that kswapd
4080 		 * should be able to balance it without having to
4081 		 * write pages from its LRU list.
4082 		 *
4083 		 * XXX: For now, allow allocations to potentially
4084 		 * exceed the per-node dirty limit in the slowpath
4085 		 * (spread_dirty_pages unset) before going into reclaim,
4086 		 * which is important when on a NUMA setup the allowed
4087 		 * nodes are together not big enough to reach the
4088 		 * global limit.  The proper fix for these situations
4089 		 * will require awareness of nodes in the
4090 		 * dirty-throttling and the flusher threads.
4091 		 */
4092 		if (ac->spread_dirty_pages) {
4093 			if (last_pgdat_dirty_limit == zone->zone_pgdat)
4094 				continue;
4095 
4096 			if (!node_dirty_ok(zone->zone_pgdat)) {
4097 				last_pgdat_dirty_limit = zone->zone_pgdat;
4098 				continue;
4099 			}
4100 		}
4101 
4102 		if (no_fallback && nr_online_nodes > 1 &&
4103 		    zone != ac->preferred_zoneref->zone) {
4104 			int local_nid;
4105 
4106 			/*
4107 			 * If moving to a remote node, retry but allow
4108 			 * fragmenting fallbacks. Locality is more important
4109 			 * than fragmentation avoidance.
4110 			 */
4111 			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
4112 			if (zone_to_nid(zone) != local_nid) {
4113 				alloc_flags &= ~ALLOC_NOFRAGMENT;
4114 				goto retry;
4115 			}
4116 		}
4117 
4118 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
4119 		if (!zone_watermark_fast(zone, order, mark,
4120 				       ac->highest_zoneidx, alloc_flags,
4121 				       gfp_mask)) {
4122 			int ret;
4123 
4124 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4125 			/*
4126 			 * Watermark failed for this zone, but see if we can
4127 			 * grow this zone if it contains deferred pages.
4128 			 */
4129 			if (static_branch_unlikely(&deferred_pages)) {
4130 				if (_deferred_grow_zone(zone, order))
4131 					goto try_this_zone;
4132 			}
4133 #endif
4134 			/* Checked here to keep the fast path fast */
4135 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4136 			if (alloc_flags & ALLOC_NO_WATERMARKS)
4137 				goto try_this_zone;
4138 
4139 			if (!node_reclaim_enabled() ||
4140 			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
4141 				continue;
4142 
4143 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
4144 			switch (ret) {
4145 			case NODE_RECLAIM_NOSCAN:
4146 				/* did not scan */
4147 				continue;
4148 			case NODE_RECLAIM_FULL:
4149 				/* scanned but unreclaimable */
4150 				continue;
4151 			default:
4152 				/* did we reclaim enough */
4153 				if (zone_watermark_ok(zone, order, mark,
4154 					ac->highest_zoneidx, alloc_flags))
4155 					goto try_this_zone;
4156 
4157 				continue;
4158 			}
4159 		}
4160 
4161 try_this_zone:
4162 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
4163 				gfp_mask, alloc_flags, ac->migratetype);
4164 		if (page) {
4165 			prep_new_page(page, order, gfp_mask, alloc_flags);
4166 
4167 			/*
4168 			 * If this is a high-order atomic allocation then check
4169 			 * if the pageblock should be reserved for the future
4170 			 */
4171 			if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
4172 				reserve_highatomic_pageblock(page, zone, order);
4173 
4174 			return page;
4175 		} else {
4176 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4177 			/* Try again if zone has deferred pages */
4178 			if (static_branch_unlikely(&deferred_pages)) {
4179 				if (_deferred_grow_zone(zone, order))
4180 					goto try_this_zone;
4181 			}
4182 #endif
4183 		}
4184 	}
4185 
4186 	/*
4187 	 * It's possible on a UMA machine to get through all zones that are
4188 	 * fragmented. If avoiding fragmentation, reset and try again.
4189 	 */
4190 	if (no_fallback) {
4191 		alloc_flags &= ~ALLOC_NOFRAGMENT;
4192 		goto retry;
4193 	}
4194 
4195 	return NULL;
4196 }
4197 
4198 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
4199 {
4200 	unsigned int filter = SHOW_MEM_FILTER_NODES;
4201 
4202 	/*
4203 	 * This documents exceptions given to allocations in certain
4204 	 * contexts that are allowed to allocate outside current's set
4205 	 * of allowed nodes.
4206 	 */
4207 	if (!(gfp_mask & __GFP_NOMEMALLOC))
4208 		if (tsk_is_oom_victim(current) ||
4209 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
4210 			filter &= ~SHOW_MEM_FILTER_NODES;
4211 	if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4212 		filter &= ~SHOW_MEM_FILTER_NODES;
4213 
4214 	show_mem(filter, nodemask);
4215 }
4216 
4217 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4218 {
4219 	struct va_format vaf;
4220 	va_list args;
4221 	static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4222 
4223 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
4224 		return;
4225 
4226 	va_start(args, fmt);
4227 	vaf.fmt = fmt;
4228 	vaf.va = &args;
4229 	pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4230 			current->comm, &vaf, gfp_mask, &gfp_mask,
4231 			nodemask_pr_args(nodemask));
4232 	va_end(args);
4233 
4234 	cpuset_print_current_mems_allowed();
4235 	pr_cont("\n");
4236 	dump_stack();
4237 	warn_alloc_show_mem(gfp_mask, nodemask);
4238 }
4239 
4240 static inline struct page *
4241 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4242 			      unsigned int alloc_flags,
4243 			      const struct alloc_context *ac)
4244 {
4245 	struct page *page;
4246 
4247 	page = get_page_from_freelist(gfp_mask, order,
4248 			alloc_flags|ALLOC_CPUSET, ac);
4249 	/*
4250 	 * fallback to ignore cpuset restriction if our nodes
4251 	 * are depleted
4252 	 */
4253 	if (!page)
4254 		page = get_page_from_freelist(gfp_mask, order,
4255 				alloc_flags, ac);
4256 
4257 	return page;
4258 }
4259 
4260 static inline struct page *
4261 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4262 	const struct alloc_context *ac, unsigned long *did_some_progress)
4263 {
4264 	struct oom_control oc = {
4265 		.zonelist = ac->zonelist,
4266 		.nodemask = ac->nodemask,
4267 		.memcg = NULL,
4268 		.gfp_mask = gfp_mask,
4269 		.order = order,
4270 	};
4271 	struct page *page;
4272 
4273 	*did_some_progress = 0;
4274 
4275 	/*
4276 	 * Acquire the oom lock.  If that fails, somebody else is
4277 	 * making progress for us.
4278 	 */
4279 	if (!mutex_trylock(&oom_lock)) {
4280 		*did_some_progress = 1;
4281 		schedule_timeout_uninterruptible(1);
4282 		return NULL;
4283 	}
4284 
4285 	/*
4286 	 * Go through the zonelist yet one more time, keep very high watermark
4287 	 * here, this is only to catch a parallel oom killing, we must fail if
4288 	 * we're still under heavy pressure. But make sure that this reclaim
4289 	 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4290 	 * allocation which will never fail due to oom_lock already held.
4291 	 */
4292 	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4293 				      ~__GFP_DIRECT_RECLAIM, order,
4294 				      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4295 	if (page)
4296 		goto out;
4297 
4298 	/* Coredumps can quickly deplete all memory reserves */
4299 	if (current->flags & PF_DUMPCORE)
4300 		goto out;
4301 	/* The OOM killer will not help higher order allocs */
4302 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4303 		goto out;
4304 	/*
4305 	 * We have already exhausted all our reclaim opportunities without any
4306 	 * success so it is time to admit defeat. We will skip the OOM killer
4307 	 * because it is very likely that the caller has a more reasonable
4308 	 * fallback than shooting a random task.
4309 	 *
4310 	 * The OOM killer may not free memory on a specific node.
4311 	 */
4312 	if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4313 		goto out;
4314 	/* The OOM killer does not needlessly kill tasks for lowmem */
4315 	if (ac->highest_zoneidx < ZONE_NORMAL)
4316 		goto out;
4317 	if (pm_suspended_storage())
4318 		goto out;
4319 	/*
4320 	 * XXX: GFP_NOFS allocations should rather fail than rely on
4321 	 * other request to make a forward progress.
4322 	 * We are in an unfortunate situation where out_of_memory cannot
4323 	 * do much for this context but let's try it to at least get
4324 	 * access to memory reserved if the current task is killed (see
4325 	 * out_of_memory). Once filesystems are ready to handle allocation
4326 	 * failures more gracefully we should just bail out here.
4327 	 */
4328 
4329 	/* Exhausted what can be done so it's blame time */
4330 	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4331 		*did_some_progress = 1;
4332 
4333 		/*
4334 		 * Help non-failing allocations by giving them access to memory
4335 		 * reserves
4336 		 */
4337 		if (gfp_mask & __GFP_NOFAIL)
4338 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4339 					ALLOC_NO_WATERMARKS, ac);
4340 	}
4341 out:
4342 	mutex_unlock(&oom_lock);
4343 	return page;
4344 }
4345 
4346 /*
4347  * Maximum number of compaction retries with a progress before OOM
4348  * killer is consider as the only way to move forward.
4349  */
4350 #define MAX_COMPACT_RETRIES 16
4351 
4352 #ifdef CONFIG_COMPACTION
4353 /* Try memory compaction for high-order allocations before reclaim */
4354 static struct page *
4355 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4356 		unsigned int alloc_flags, const struct alloc_context *ac,
4357 		enum compact_priority prio, enum compact_result *compact_result)
4358 {
4359 	struct page *page = NULL;
4360 	unsigned long pflags;
4361 	unsigned int noreclaim_flag;
4362 
4363 	if (!order)
4364 		return NULL;
4365 
4366 	psi_memstall_enter(&pflags);
4367 	noreclaim_flag = memalloc_noreclaim_save();
4368 
4369 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4370 								prio, &page);
4371 
4372 	memalloc_noreclaim_restore(noreclaim_flag);
4373 	psi_memstall_leave(&pflags);
4374 
4375 	if (*compact_result == COMPACT_SKIPPED)
4376 		return NULL;
4377 	/*
4378 	 * At least in one zone compaction wasn't deferred or skipped, so let's
4379 	 * count a compaction stall
4380 	 */
4381 	count_vm_event(COMPACTSTALL);
4382 
4383 	/* Prep a captured page if available */
4384 	if (page)
4385 		prep_new_page(page, order, gfp_mask, alloc_flags);
4386 
4387 	/* Try get a page from the freelist if available */
4388 	if (!page)
4389 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4390 
4391 	if (page) {
4392 		struct zone *zone = page_zone(page);
4393 
4394 		zone->compact_blockskip_flush = false;
4395 		compaction_defer_reset(zone, order, true);
4396 		count_vm_event(COMPACTSUCCESS);
4397 		return page;
4398 	}
4399 
4400 	/*
4401 	 * It's bad if compaction run occurs and fails. The most likely reason
4402 	 * is that pages exist, but not enough to satisfy watermarks.
4403 	 */
4404 	count_vm_event(COMPACTFAIL);
4405 
4406 	cond_resched();
4407 
4408 	return NULL;
4409 }
4410 
4411 static inline bool
4412 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4413 		     enum compact_result compact_result,
4414 		     enum compact_priority *compact_priority,
4415 		     int *compaction_retries)
4416 {
4417 	int max_retries = MAX_COMPACT_RETRIES;
4418 	int min_priority;
4419 	bool ret = false;
4420 	int retries = *compaction_retries;
4421 	enum compact_priority priority = *compact_priority;
4422 
4423 	if (!order)
4424 		return false;
4425 
4426 	if (fatal_signal_pending(current))
4427 		return false;
4428 
4429 	if (compaction_made_progress(compact_result))
4430 		(*compaction_retries)++;
4431 
4432 	/*
4433 	 * compaction considers all the zone as desperately out of memory
4434 	 * so it doesn't really make much sense to retry except when the
4435 	 * failure could be caused by insufficient priority
4436 	 */
4437 	if (compaction_failed(compact_result))
4438 		goto check_priority;
4439 
4440 	/*
4441 	 * compaction was skipped because there are not enough order-0 pages
4442 	 * to work with, so we retry only if it looks like reclaim can help.
4443 	 */
4444 	if (compaction_needs_reclaim(compact_result)) {
4445 		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4446 		goto out;
4447 	}
4448 
4449 	/*
4450 	 * make sure the compaction wasn't deferred or didn't bail out early
4451 	 * due to locks contention before we declare that we should give up.
4452 	 * But the next retry should use a higher priority if allowed, so
4453 	 * we don't just keep bailing out endlessly.
4454 	 */
4455 	if (compaction_withdrawn(compact_result)) {
4456 		goto check_priority;
4457 	}
4458 
4459 	/*
4460 	 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4461 	 * costly ones because they are de facto nofail and invoke OOM
4462 	 * killer to move on while costly can fail and users are ready
4463 	 * to cope with that. 1/4 retries is rather arbitrary but we
4464 	 * would need much more detailed feedback from compaction to
4465 	 * make a better decision.
4466 	 */
4467 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4468 		max_retries /= 4;
4469 	if (*compaction_retries <= max_retries) {
4470 		ret = true;
4471 		goto out;
4472 	}
4473 
4474 	/*
4475 	 * Make sure there are attempts at the highest priority if we exhausted
4476 	 * all retries or failed at the lower priorities.
4477 	 */
4478 check_priority:
4479 	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4480 			MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4481 
4482 	if (*compact_priority > min_priority) {
4483 		(*compact_priority)--;
4484 		*compaction_retries = 0;
4485 		ret = true;
4486 	}
4487 out:
4488 	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4489 	return ret;
4490 }
4491 #else
4492 static inline struct page *
4493 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4494 		unsigned int alloc_flags, const struct alloc_context *ac,
4495 		enum compact_priority prio, enum compact_result *compact_result)
4496 {
4497 	*compact_result = COMPACT_SKIPPED;
4498 	return NULL;
4499 }
4500 
4501 static inline bool
4502 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4503 		     enum compact_result compact_result,
4504 		     enum compact_priority *compact_priority,
4505 		     int *compaction_retries)
4506 {
4507 	struct zone *zone;
4508 	struct zoneref *z;
4509 
4510 	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4511 		return false;
4512 
4513 	/*
4514 	 * There are setups with compaction disabled which would prefer to loop
4515 	 * inside the allocator rather than hit the oom killer prematurely.
4516 	 * Let's give them a good hope and keep retrying while the order-0
4517 	 * watermarks are OK.
4518 	 */
4519 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4520 				ac->highest_zoneidx, ac->nodemask) {
4521 		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4522 					ac->highest_zoneidx, alloc_flags))
4523 			return true;
4524 	}
4525 	return false;
4526 }
4527 #endif /* CONFIG_COMPACTION */
4528 
4529 #ifdef CONFIG_LOCKDEP
4530 static struct lockdep_map __fs_reclaim_map =
4531 	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4532 
4533 static bool __need_reclaim(gfp_t gfp_mask)
4534 {
4535 	/* no reclaim without waiting on it */
4536 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4537 		return false;
4538 
4539 	/* this guy won't enter reclaim */
4540 	if (current->flags & PF_MEMALLOC)
4541 		return false;
4542 
4543 	if (gfp_mask & __GFP_NOLOCKDEP)
4544 		return false;
4545 
4546 	return true;
4547 }
4548 
4549 void __fs_reclaim_acquire(void)
4550 {
4551 	lock_map_acquire(&__fs_reclaim_map);
4552 }
4553 
4554 void __fs_reclaim_release(void)
4555 {
4556 	lock_map_release(&__fs_reclaim_map);
4557 }
4558 
4559 void fs_reclaim_acquire(gfp_t gfp_mask)
4560 {
4561 	gfp_mask = current_gfp_context(gfp_mask);
4562 
4563 	if (__need_reclaim(gfp_mask)) {
4564 		if (gfp_mask & __GFP_FS)
4565 			__fs_reclaim_acquire();
4566 
4567 #ifdef CONFIG_MMU_NOTIFIER
4568 		lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4569 		lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4570 #endif
4571 
4572 	}
4573 }
4574 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4575 
4576 void fs_reclaim_release(gfp_t gfp_mask)
4577 {
4578 	gfp_mask = current_gfp_context(gfp_mask);
4579 
4580 	if (__need_reclaim(gfp_mask)) {
4581 		if (gfp_mask & __GFP_FS)
4582 			__fs_reclaim_release();
4583 	}
4584 }
4585 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4586 #endif
4587 
4588 /* Perform direct synchronous page reclaim */
4589 static unsigned long
4590 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4591 					const struct alloc_context *ac)
4592 {
4593 	unsigned int noreclaim_flag;
4594 	unsigned long pflags, progress;
4595 
4596 	cond_resched();
4597 
4598 	/* We now go into synchronous reclaim */
4599 	cpuset_memory_pressure_bump();
4600 	psi_memstall_enter(&pflags);
4601 	fs_reclaim_acquire(gfp_mask);
4602 	noreclaim_flag = memalloc_noreclaim_save();
4603 
4604 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4605 								ac->nodemask);
4606 
4607 	memalloc_noreclaim_restore(noreclaim_flag);
4608 	fs_reclaim_release(gfp_mask);
4609 	psi_memstall_leave(&pflags);
4610 
4611 	cond_resched();
4612 
4613 	return progress;
4614 }
4615 
4616 /* The really slow allocator path where we enter direct reclaim */
4617 static inline struct page *
4618 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4619 		unsigned int alloc_flags, const struct alloc_context *ac,
4620 		unsigned long *did_some_progress)
4621 {
4622 	struct page *page = NULL;
4623 	bool drained = false;
4624 
4625 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4626 	if (unlikely(!(*did_some_progress)))
4627 		return NULL;
4628 
4629 retry:
4630 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4631 
4632 	/*
4633 	 * If an allocation failed after direct reclaim, it could be because
4634 	 * pages are pinned on the per-cpu lists or in high alloc reserves.
4635 	 * Shrink them and try again
4636 	 */
4637 	if (!page && !drained) {
4638 		unreserve_highatomic_pageblock(ac, false);
4639 		drain_all_pages(NULL);
4640 		drained = true;
4641 		goto retry;
4642 	}
4643 
4644 	return page;
4645 }
4646 
4647 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4648 			     const struct alloc_context *ac)
4649 {
4650 	struct zoneref *z;
4651 	struct zone *zone;
4652 	pg_data_t *last_pgdat = NULL;
4653 	enum zone_type highest_zoneidx = ac->highest_zoneidx;
4654 
4655 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4656 					ac->nodemask) {
4657 		if (last_pgdat != zone->zone_pgdat)
4658 			wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4659 		last_pgdat = zone->zone_pgdat;
4660 	}
4661 }
4662 
4663 static inline unsigned int
4664 gfp_to_alloc_flags(gfp_t gfp_mask)
4665 {
4666 	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4667 
4668 	/*
4669 	 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4670 	 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4671 	 * to save two branches.
4672 	 */
4673 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4674 	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4675 
4676 	/*
4677 	 * The caller may dip into page reserves a bit more if the caller
4678 	 * cannot run direct reclaim, or if the caller has realtime scheduling
4679 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4680 	 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4681 	 */
4682 	alloc_flags |= (__force int)
4683 		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4684 
4685 	if (gfp_mask & __GFP_ATOMIC) {
4686 		/*
4687 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4688 		 * if it can't schedule.
4689 		 */
4690 		if (!(gfp_mask & __GFP_NOMEMALLOC))
4691 			alloc_flags |= ALLOC_HARDER;
4692 		/*
4693 		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4694 		 * comment for __cpuset_node_allowed().
4695 		 */
4696 		alloc_flags &= ~ALLOC_CPUSET;
4697 	} else if (unlikely(rt_task(current)) && !in_interrupt())
4698 		alloc_flags |= ALLOC_HARDER;
4699 
4700 	alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4701 
4702 	return alloc_flags;
4703 }
4704 
4705 static bool oom_reserves_allowed(struct task_struct *tsk)
4706 {
4707 	if (!tsk_is_oom_victim(tsk))
4708 		return false;
4709 
4710 	/*
4711 	 * !MMU doesn't have oom reaper so give access to memory reserves
4712 	 * only to the thread with TIF_MEMDIE set
4713 	 */
4714 	if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4715 		return false;
4716 
4717 	return true;
4718 }
4719 
4720 /*
4721  * Distinguish requests which really need access to full memory
4722  * reserves from oom victims which can live with a portion of it
4723  */
4724 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4725 {
4726 	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4727 		return 0;
4728 	if (gfp_mask & __GFP_MEMALLOC)
4729 		return ALLOC_NO_WATERMARKS;
4730 	if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4731 		return ALLOC_NO_WATERMARKS;
4732 	if (!in_interrupt()) {
4733 		if (current->flags & PF_MEMALLOC)
4734 			return ALLOC_NO_WATERMARKS;
4735 		else if (oom_reserves_allowed(current))
4736 			return ALLOC_OOM;
4737 	}
4738 
4739 	return 0;
4740 }
4741 
4742 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4743 {
4744 	return !!__gfp_pfmemalloc_flags(gfp_mask);
4745 }
4746 
4747 /*
4748  * Checks whether it makes sense to retry the reclaim to make a forward progress
4749  * for the given allocation request.
4750  *
4751  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4752  * without success, or when we couldn't even meet the watermark if we
4753  * reclaimed all remaining pages on the LRU lists.
4754  *
4755  * Returns true if a retry is viable or false to enter the oom path.
4756  */
4757 static inline bool
4758 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4759 		     struct alloc_context *ac, int alloc_flags,
4760 		     bool did_some_progress, int *no_progress_loops)
4761 {
4762 	struct zone *zone;
4763 	struct zoneref *z;
4764 	bool ret = false;
4765 
4766 	/*
4767 	 * Costly allocations might have made a progress but this doesn't mean
4768 	 * their order will become available due to high fragmentation so
4769 	 * always increment the no progress counter for them
4770 	 */
4771 	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4772 		*no_progress_loops = 0;
4773 	else
4774 		(*no_progress_loops)++;
4775 
4776 	/*
4777 	 * Make sure we converge to OOM if we cannot make any progress
4778 	 * several times in the row.
4779 	 */
4780 	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4781 		/* Before OOM, exhaust highatomic_reserve */
4782 		return unreserve_highatomic_pageblock(ac, true);
4783 	}
4784 
4785 	/*
4786 	 * Keep reclaiming pages while there is a chance this will lead
4787 	 * somewhere.  If none of the target zones can satisfy our allocation
4788 	 * request even if all reclaimable pages are considered then we are
4789 	 * screwed and have to go OOM.
4790 	 */
4791 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4792 				ac->highest_zoneidx, ac->nodemask) {
4793 		unsigned long available;
4794 		unsigned long reclaimable;
4795 		unsigned long min_wmark = min_wmark_pages(zone);
4796 		bool wmark;
4797 
4798 		available = reclaimable = zone_reclaimable_pages(zone);
4799 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4800 
4801 		/*
4802 		 * Would the allocation succeed if we reclaimed all
4803 		 * reclaimable pages?
4804 		 */
4805 		wmark = __zone_watermark_ok(zone, order, min_wmark,
4806 				ac->highest_zoneidx, alloc_flags, available);
4807 		trace_reclaim_retry_zone(z, order, reclaimable,
4808 				available, min_wmark, *no_progress_loops, wmark);
4809 		if (wmark) {
4810 			/*
4811 			 * If we didn't make any progress and have a lot of
4812 			 * dirty + writeback pages then we should wait for
4813 			 * an IO to complete to slow down the reclaim and
4814 			 * prevent from pre mature OOM
4815 			 */
4816 			if (!did_some_progress) {
4817 				unsigned long write_pending;
4818 
4819 				write_pending = zone_page_state_snapshot(zone,
4820 							NR_ZONE_WRITE_PENDING);
4821 
4822 				if (2 * write_pending > reclaimable) {
4823 					congestion_wait(BLK_RW_ASYNC, HZ/10);
4824 					return true;
4825 				}
4826 			}
4827 
4828 			ret = true;
4829 			goto out;
4830 		}
4831 	}
4832 
4833 out:
4834 	/*
4835 	 * Memory allocation/reclaim might be called from a WQ context and the
4836 	 * current implementation of the WQ concurrency control doesn't
4837 	 * recognize that a particular WQ is congested if the worker thread is
4838 	 * looping without ever sleeping. Therefore we have to do a short sleep
4839 	 * here rather than calling cond_resched().
4840 	 */
4841 	if (current->flags & PF_WQ_WORKER)
4842 		schedule_timeout_uninterruptible(1);
4843 	else
4844 		cond_resched();
4845 	return ret;
4846 }
4847 
4848 static inline bool
4849 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4850 {
4851 	/*
4852 	 * It's possible that cpuset's mems_allowed and the nodemask from
4853 	 * mempolicy don't intersect. This should be normally dealt with by
4854 	 * policy_nodemask(), but it's possible to race with cpuset update in
4855 	 * such a way the check therein was true, and then it became false
4856 	 * before we got our cpuset_mems_cookie here.
4857 	 * This assumes that for all allocations, ac->nodemask can come only
4858 	 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4859 	 * when it does not intersect with the cpuset restrictions) or the
4860 	 * caller can deal with a violated nodemask.
4861 	 */
4862 	if (cpusets_enabled() && ac->nodemask &&
4863 			!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4864 		ac->nodemask = NULL;
4865 		return true;
4866 	}
4867 
4868 	/*
4869 	 * When updating a task's mems_allowed or mempolicy nodemask, it is
4870 	 * possible to race with parallel threads in such a way that our
4871 	 * allocation can fail while the mask is being updated. If we are about
4872 	 * to fail, check if the cpuset changed during allocation and if so,
4873 	 * retry.
4874 	 */
4875 	if (read_mems_allowed_retry(cpuset_mems_cookie))
4876 		return true;
4877 
4878 	return false;
4879 }
4880 
4881 static inline struct page *
4882 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4883 						struct alloc_context *ac)
4884 {
4885 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4886 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4887 	struct page *page = NULL;
4888 	unsigned int alloc_flags;
4889 	unsigned long did_some_progress;
4890 	enum compact_priority compact_priority;
4891 	enum compact_result compact_result;
4892 	int compaction_retries;
4893 	int no_progress_loops;
4894 	unsigned int cpuset_mems_cookie;
4895 	int reserve_flags;
4896 
4897 	/*
4898 	 * We also sanity check to catch abuse of atomic reserves being used by
4899 	 * callers that are not in atomic context.
4900 	 */
4901 	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4902 				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4903 		gfp_mask &= ~__GFP_ATOMIC;
4904 
4905 retry_cpuset:
4906 	compaction_retries = 0;
4907 	no_progress_loops = 0;
4908 	compact_priority = DEF_COMPACT_PRIORITY;
4909 	cpuset_mems_cookie = read_mems_allowed_begin();
4910 
4911 	/*
4912 	 * The fast path uses conservative alloc_flags to succeed only until
4913 	 * kswapd needs to be woken up, and to avoid the cost of setting up
4914 	 * alloc_flags precisely. So we do that now.
4915 	 */
4916 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
4917 
4918 	/*
4919 	 * We need to recalculate the starting point for the zonelist iterator
4920 	 * because we might have used different nodemask in the fast path, or
4921 	 * there was a cpuset modification and we are retrying - otherwise we
4922 	 * could end up iterating over non-eligible zones endlessly.
4923 	 */
4924 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4925 					ac->highest_zoneidx, ac->nodemask);
4926 	if (!ac->preferred_zoneref->zone)
4927 		goto nopage;
4928 
4929 	if (alloc_flags & ALLOC_KSWAPD)
4930 		wake_all_kswapds(order, gfp_mask, ac);
4931 
4932 	/*
4933 	 * The adjusted alloc_flags might result in immediate success, so try
4934 	 * that first
4935 	 */
4936 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4937 	if (page)
4938 		goto got_pg;
4939 
4940 	/*
4941 	 * For costly allocations, try direct compaction first, as it's likely
4942 	 * that we have enough base pages and don't need to reclaim. For non-
4943 	 * movable high-order allocations, do that as well, as compaction will
4944 	 * try prevent permanent fragmentation by migrating from blocks of the
4945 	 * same migratetype.
4946 	 * Don't try this for allocations that are allowed to ignore
4947 	 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4948 	 */
4949 	if (can_direct_reclaim &&
4950 			(costly_order ||
4951 			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4952 			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
4953 		page = __alloc_pages_direct_compact(gfp_mask, order,
4954 						alloc_flags, ac,
4955 						INIT_COMPACT_PRIORITY,
4956 						&compact_result);
4957 		if (page)
4958 			goto got_pg;
4959 
4960 		/*
4961 		 * Checks for costly allocations with __GFP_NORETRY, which
4962 		 * includes some THP page fault allocations
4963 		 */
4964 		if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4965 			/*
4966 			 * If allocating entire pageblock(s) and compaction
4967 			 * failed because all zones are below low watermarks
4968 			 * or is prohibited because it recently failed at this
4969 			 * order, fail immediately unless the allocator has
4970 			 * requested compaction and reclaim retry.
4971 			 *
4972 			 * Reclaim is
4973 			 *  - potentially very expensive because zones are far
4974 			 *    below their low watermarks or this is part of very
4975 			 *    bursty high order allocations,
4976 			 *  - not guaranteed to help because isolate_freepages()
4977 			 *    may not iterate over freed pages as part of its
4978 			 *    linear scan, and
4979 			 *  - unlikely to make entire pageblocks free on its
4980 			 *    own.
4981 			 */
4982 			if (compact_result == COMPACT_SKIPPED ||
4983 			    compact_result == COMPACT_DEFERRED)
4984 				goto nopage;
4985 
4986 			/*
4987 			 * Looks like reclaim/compaction is worth trying, but
4988 			 * sync compaction could be very expensive, so keep
4989 			 * using async compaction.
4990 			 */
4991 			compact_priority = INIT_COMPACT_PRIORITY;
4992 		}
4993 	}
4994 
4995 retry:
4996 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4997 	if (alloc_flags & ALLOC_KSWAPD)
4998 		wake_all_kswapds(order, gfp_mask, ac);
4999 
5000 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
5001 	if (reserve_flags)
5002 		alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags);
5003 
5004 	/*
5005 	 * Reset the nodemask and zonelist iterators if memory policies can be
5006 	 * ignored. These allocations are high priority and system rather than
5007 	 * user oriented.
5008 	 */
5009 	if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
5010 		ac->nodemask = NULL;
5011 		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5012 					ac->highest_zoneidx, ac->nodemask);
5013 	}
5014 
5015 	/* Attempt with potentially adjusted zonelist and alloc_flags */
5016 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5017 	if (page)
5018 		goto got_pg;
5019 
5020 	/* Caller is not willing to reclaim, we can't balance anything */
5021 	if (!can_direct_reclaim)
5022 		goto nopage;
5023 
5024 	/* Avoid recursion of direct reclaim */
5025 	if (current->flags & PF_MEMALLOC)
5026 		goto nopage;
5027 
5028 	/* Try direct reclaim and then allocating */
5029 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
5030 							&did_some_progress);
5031 	if (page)
5032 		goto got_pg;
5033 
5034 	/* Try direct compaction and then allocating */
5035 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
5036 					compact_priority, &compact_result);
5037 	if (page)
5038 		goto got_pg;
5039 
5040 	/* Do not loop if specifically requested */
5041 	if (gfp_mask & __GFP_NORETRY)
5042 		goto nopage;
5043 
5044 	/*
5045 	 * Do not retry costly high order allocations unless they are
5046 	 * __GFP_RETRY_MAYFAIL
5047 	 */
5048 	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
5049 		goto nopage;
5050 
5051 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
5052 				 did_some_progress > 0, &no_progress_loops))
5053 		goto retry;
5054 
5055 	/*
5056 	 * It doesn't make any sense to retry for the compaction if the order-0
5057 	 * reclaim is not able to make any progress because the current
5058 	 * implementation of the compaction depends on the sufficient amount
5059 	 * of free memory (see __compaction_suitable)
5060 	 */
5061 	if (did_some_progress > 0 &&
5062 			should_compact_retry(ac, order, alloc_flags,
5063 				compact_result, &compact_priority,
5064 				&compaction_retries))
5065 		goto retry;
5066 
5067 
5068 	/* Deal with possible cpuset update races before we start OOM killing */
5069 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
5070 		goto retry_cpuset;
5071 
5072 	/* Reclaim has failed us, start killing things */
5073 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
5074 	if (page)
5075 		goto got_pg;
5076 
5077 	/* Avoid allocations with no watermarks from looping endlessly */
5078 	if (tsk_is_oom_victim(current) &&
5079 	    (alloc_flags & ALLOC_OOM ||
5080 	     (gfp_mask & __GFP_NOMEMALLOC)))
5081 		goto nopage;
5082 
5083 	/* Retry as long as the OOM killer is making progress */
5084 	if (did_some_progress) {
5085 		no_progress_loops = 0;
5086 		goto retry;
5087 	}
5088 
5089 nopage:
5090 	/* Deal with possible cpuset update races before we fail */
5091 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
5092 		goto retry_cpuset;
5093 
5094 	/*
5095 	 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
5096 	 * we always retry
5097 	 */
5098 	if (gfp_mask & __GFP_NOFAIL) {
5099 		/*
5100 		 * All existing users of the __GFP_NOFAIL are blockable, so warn
5101 		 * of any new users that actually require GFP_NOWAIT
5102 		 */
5103 		if (WARN_ON_ONCE(!can_direct_reclaim))
5104 			goto fail;
5105 
5106 		/*
5107 		 * PF_MEMALLOC request from this context is rather bizarre
5108 		 * because we cannot reclaim anything and only can loop waiting
5109 		 * for somebody to do a work for us
5110 		 */
5111 		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
5112 
5113 		/*
5114 		 * non failing costly orders are a hard requirement which we
5115 		 * are not prepared for much so let's warn about these users
5116 		 * so that we can identify them and convert them to something
5117 		 * else.
5118 		 */
5119 		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
5120 
5121 		/*
5122 		 * Help non-failing allocations by giving them access to memory
5123 		 * reserves but do not use ALLOC_NO_WATERMARKS because this
5124 		 * could deplete whole memory reserves which would just make
5125 		 * the situation worse
5126 		 */
5127 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
5128 		if (page)
5129 			goto got_pg;
5130 
5131 		cond_resched();
5132 		goto retry;
5133 	}
5134 fail:
5135 	warn_alloc(gfp_mask, ac->nodemask,
5136 			"page allocation failure: order:%u", order);
5137 got_pg:
5138 	return page;
5139 }
5140 
5141 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
5142 		int preferred_nid, nodemask_t *nodemask,
5143 		struct alloc_context *ac, gfp_t *alloc_gfp,
5144 		unsigned int *alloc_flags)
5145 {
5146 	ac->highest_zoneidx = gfp_zone(gfp_mask);
5147 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
5148 	ac->nodemask = nodemask;
5149 	ac->migratetype = gfp_migratetype(gfp_mask);
5150 
5151 	if (cpusets_enabled()) {
5152 		*alloc_gfp |= __GFP_HARDWALL;
5153 		/*
5154 		 * When we are in the interrupt context, it is irrelevant
5155 		 * to the current task context. It means that any node ok.
5156 		 */
5157 		if (!in_interrupt() && !ac->nodemask)
5158 			ac->nodemask = &cpuset_current_mems_allowed;
5159 		else
5160 			*alloc_flags |= ALLOC_CPUSET;
5161 	}
5162 
5163 	fs_reclaim_acquire(gfp_mask);
5164 	fs_reclaim_release(gfp_mask);
5165 
5166 	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
5167 
5168 	if (should_fail_alloc_page(gfp_mask, order))
5169 		return false;
5170 
5171 	*alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5172 
5173 	/* Dirty zone balancing only done in the fast path */
5174 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5175 
5176 	/*
5177 	 * The preferred zone is used for statistics but crucially it is
5178 	 * also used as the starting point for the zonelist iterator. It
5179 	 * may get reset for allocations that ignore memory policies.
5180 	 */
5181 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5182 					ac->highest_zoneidx, ac->nodemask);
5183 
5184 	return true;
5185 }
5186 
5187 /*
5188  * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5189  * @gfp: GFP flags for the allocation
5190  * @preferred_nid: The preferred NUMA node ID to allocate from
5191  * @nodemask: Set of nodes to allocate from, may be NULL
5192  * @nr_pages: The number of pages desired on the list or array
5193  * @page_list: Optional list to store the allocated pages
5194  * @page_array: Optional array to store the pages
5195  *
5196  * This is a batched version of the page allocator that attempts to
5197  * allocate nr_pages quickly. Pages are added to page_list if page_list
5198  * is not NULL, otherwise it is assumed that the page_array is valid.
5199  *
5200  * For lists, nr_pages is the number of pages that should be allocated.
5201  *
5202  * For arrays, only NULL elements are populated with pages and nr_pages
5203  * is the maximum number of pages that will be stored in the array.
5204  *
5205  * Returns the number of pages on the list or array.
5206  */
5207 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5208 			nodemask_t *nodemask, int nr_pages,
5209 			struct list_head *page_list,
5210 			struct page **page_array)
5211 {
5212 	struct page *page;
5213 	unsigned long flags;
5214 	struct zone *zone;
5215 	struct zoneref *z;
5216 	struct per_cpu_pages *pcp;
5217 	struct list_head *pcp_list;
5218 	struct alloc_context ac;
5219 	gfp_t alloc_gfp;
5220 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
5221 	int nr_populated = 0, nr_account = 0;
5222 
5223 	if (unlikely(nr_pages <= 0))
5224 		return 0;
5225 
5226 	/*
5227 	 * Skip populated array elements to determine if any pages need
5228 	 * to be allocated before disabling IRQs.
5229 	 */
5230 	while (page_array && nr_populated < nr_pages && page_array[nr_populated])
5231 		nr_populated++;
5232 
5233 	/* Already populated array? */
5234 	if (unlikely(page_array && nr_pages - nr_populated == 0))
5235 		return nr_populated;
5236 
5237 	/* Use the single page allocator for one page. */
5238 	if (nr_pages - nr_populated == 1)
5239 		goto failed;
5240 
5241 	/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5242 	gfp &= gfp_allowed_mask;
5243 	alloc_gfp = gfp;
5244 	if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5245 		return 0;
5246 	gfp = alloc_gfp;
5247 
5248 	/* Find an allowed local zone that meets the low watermark. */
5249 	for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5250 		unsigned long mark;
5251 
5252 		if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5253 		    !__cpuset_zone_allowed(zone, gfp)) {
5254 			continue;
5255 		}
5256 
5257 		if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5258 		    zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5259 			goto failed;
5260 		}
5261 
5262 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5263 		if (zone_watermark_fast(zone, 0,  mark,
5264 				zonelist_zone_idx(ac.preferred_zoneref),
5265 				alloc_flags, gfp)) {
5266 			break;
5267 		}
5268 	}
5269 
5270 	/*
5271 	 * If there are no allowed local zones that meets the watermarks then
5272 	 * try to allocate a single page and reclaim if necessary.
5273 	 */
5274 	if (unlikely(!zone))
5275 		goto failed;
5276 
5277 	/* Attempt the batch allocation */
5278 	local_lock_irqsave(&pagesets.lock, flags);
5279 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
5280 	pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5281 
5282 	while (nr_populated < nr_pages) {
5283 
5284 		/* Skip existing pages */
5285 		if (page_array && page_array[nr_populated]) {
5286 			nr_populated++;
5287 			continue;
5288 		}
5289 
5290 		page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5291 								pcp, pcp_list);
5292 		if (unlikely(!page)) {
5293 			/* Try and get at least one page */
5294 			if (!nr_populated)
5295 				goto failed_irq;
5296 			break;
5297 		}
5298 		nr_account++;
5299 
5300 		prep_new_page(page, 0, gfp, 0);
5301 		if (page_list)
5302 			list_add(&page->lru, page_list);
5303 		else
5304 			page_array[nr_populated] = page;
5305 		nr_populated++;
5306 	}
5307 
5308 	local_unlock_irqrestore(&pagesets.lock, flags);
5309 
5310 	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5311 	zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
5312 
5313 	return nr_populated;
5314 
5315 failed_irq:
5316 	local_unlock_irqrestore(&pagesets.lock, flags);
5317 
5318 failed:
5319 	page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5320 	if (page) {
5321 		if (page_list)
5322 			list_add(&page->lru, page_list);
5323 		else
5324 			page_array[nr_populated] = page;
5325 		nr_populated++;
5326 	}
5327 
5328 	return nr_populated;
5329 }
5330 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5331 
5332 /*
5333  * This is the 'heart' of the zoned buddy allocator.
5334  */
5335 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
5336 							nodemask_t *nodemask)
5337 {
5338 	struct page *page;
5339 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
5340 	gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5341 	struct alloc_context ac = { };
5342 
5343 	/*
5344 	 * There are several places where we assume that the order value is sane
5345 	 * so bail out early if the request is out of bound.
5346 	 */
5347 	if (unlikely(order >= MAX_ORDER)) {
5348 		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
5349 		return NULL;
5350 	}
5351 
5352 	gfp &= gfp_allowed_mask;
5353 	/*
5354 	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5355 	 * resp. GFP_NOIO which has to be inherited for all allocation requests
5356 	 * from a particular context which has been marked by
5357 	 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5358 	 * movable zones are not used during allocation.
5359 	 */
5360 	gfp = current_gfp_context(gfp);
5361 	alloc_gfp = gfp;
5362 	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5363 			&alloc_gfp, &alloc_flags))
5364 		return NULL;
5365 
5366 	/*
5367 	 * Forbid the first pass from falling back to types that fragment
5368 	 * memory until all local zones are considered.
5369 	 */
5370 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
5371 
5372 	/* First allocation attempt */
5373 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5374 	if (likely(page))
5375 		goto out;
5376 
5377 	alloc_gfp = gfp;
5378 	ac.spread_dirty_pages = false;
5379 
5380 	/*
5381 	 * Restore the original nodemask if it was potentially replaced with
5382 	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5383 	 */
5384 	ac.nodemask = nodemask;
5385 
5386 	page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5387 
5388 out:
5389 	if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
5390 	    unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5391 		__free_pages(page, order);
5392 		page = NULL;
5393 	}
5394 
5395 	trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5396 
5397 	return page;
5398 }
5399 EXPORT_SYMBOL(__alloc_pages);
5400 
5401 /*
5402  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5403  * address cannot represent highmem pages. Use alloc_pages and then kmap if
5404  * you need to access high mem.
5405  */
5406 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5407 {
5408 	struct page *page;
5409 
5410 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5411 	if (!page)
5412 		return 0;
5413 	return (unsigned long) page_address(page);
5414 }
5415 EXPORT_SYMBOL(__get_free_pages);
5416 
5417 unsigned long get_zeroed_page(gfp_t gfp_mask)
5418 {
5419 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5420 }
5421 EXPORT_SYMBOL(get_zeroed_page);
5422 
5423 /**
5424  * __free_pages - Free pages allocated with alloc_pages().
5425  * @page: The page pointer returned from alloc_pages().
5426  * @order: The order of the allocation.
5427  *
5428  * This function can free multi-page allocations that are not compound
5429  * pages.  It does not check that the @order passed in matches that of
5430  * the allocation, so it is easy to leak memory.  Freeing more memory
5431  * than was allocated will probably emit a warning.
5432  *
5433  * If the last reference to this page is speculative, it will be released
5434  * by put_page() which only frees the first page of a non-compound
5435  * allocation.  To prevent the remaining pages from being leaked, we free
5436  * the subsequent pages here.  If you want to use the page's reference
5437  * count to decide when to free the allocation, you should allocate a
5438  * compound page, and use put_page() instead of __free_pages().
5439  *
5440  * Context: May be called in interrupt context or while holding a normal
5441  * spinlock, but not in NMI context or while holding a raw spinlock.
5442  */
5443 void __free_pages(struct page *page, unsigned int order)
5444 {
5445 	if (put_page_testzero(page))
5446 		free_the_page(page, order);
5447 	else if (!PageHead(page))
5448 		while (order-- > 0)
5449 			free_the_page(page + (1 << order), order);
5450 }
5451 EXPORT_SYMBOL(__free_pages);
5452 
5453 void free_pages(unsigned long addr, unsigned int order)
5454 {
5455 	if (addr != 0) {
5456 		VM_BUG_ON(!virt_addr_valid((void *)addr));
5457 		__free_pages(virt_to_page((void *)addr), order);
5458 	}
5459 }
5460 
5461 EXPORT_SYMBOL(free_pages);
5462 
5463 /*
5464  * Page Fragment:
5465  *  An arbitrary-length arbitrary-offset area of memory which resides
5466  *  within a 0 or higher order page.  Multiple fragments within that page
5467  *  are individually refcounted, in the page's reference counter.
5468  *
5469  * The page_frag functions below provide a simple allocation framework for
5470  * page fragments.  This is used by the network stack and network device
5471  * drivers to provide a backing region of memory for use as either an
5472  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5473  */
5474 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5475 					     gfp_t gfp_mask)
5476 {
5477 	struct page *page = NULL;
5478 	gfp_t gfp = gfp_mask;
5479 
5480 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5481 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5482 		    __GFP_NOMEMALLOC;
5483 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5484 				PAGE_FRAG_CACHE_MAX_ORDER);
5485 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5486 #endif
5487 	if (unlikely(!page))
5488 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5489 
5490 	nc->va = page ? page_address(page) : NULL;
5491 
5492 	return page;
5493 }
5494 
5495 void __page_frag_cache_drain(struct page *page, unsigned int count)
5496 {
5497 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5498 
5499 	if (page_ref_sub_and_test(page, count))
5500 		free_the_page(page, compound_order(page));
5501 }
5502 EXPORT_SYMBOL(__page_frag_cache_drain);
5503 
5504 void *page_frag_alloc_align(struct page_frag_cache *nc,
5505 		      unsigned int fragsz, gfp_t gfp_mask,
5506 		      unsigned int align_mask)
5507 {
5508 	unsigned int size = PAGE_SIZE;
5509 	struct page *page;
5510 	int offset;
5511 
5512 	if (unlikely(!nc->va)) {
5513 refill:
5514 		page = __page_frag_cache_refill(nc, gfp_mask);
5515 		if (!page)
5516 			return NULL;
5517 
5518 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5519 		/* if size can vary use size else just use PAGE_SIZE */
5520 		size = nc->size;
5521 #endif
5522 		/* Even if we own the page, we do not use atomic_set().
5523 		 * This would break get_page_unless_zero() users.
5524 		 */
5525 		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5526 
5527 		/* reset page count bias and offset to start of new frag */
5528 		nc->pfmemalloc = page_is_pfmemalloc(page);
5529 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5530 		nc->offset = size;
5531 	}
5532 
5533 	offset = nc->offset - fragsz;
5534 	if (unlikely(offset < 0)) {
5535 		page = virt_to_page(nc->va);
5536 
5537 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5538 			goto refill;
5539 
5540 		if (unlikely(nc->pfmemalloc)) {
5541 			free_the_page(page, compound_order(page));
5542 			goto refill;
5543 		}
5544 
5545 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5546 		/* if size can vary use size else just use PAGE_SIZE */
5547 		size = nc->size;
5548 #endif
5549 		/* OK, page count is 0, we can safely set it */
5550 		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5551 
5552 		/* reset page count bias and offset to start of new frag */
5553 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5554 		offset = size - fragsz;
5555 	}
5556 
5557 	nc->pagecnt_bias--;
5558 	offset &= align_mask;
5559 	nc->offset = offset;
5560 
5561 	return nc->va + offset;
5562 }
5563 EXPORT_SYMBOL(page_frag_alloc_align);
5564 
5565 /*
5566  * Frees a page fragment allocated out of either a compound or order 0 page.
5567  */
5568 void page_frag_free(void *addr)
5569 {
5570 	struct page *page = virt_to_head_page(addr);
5571 
5572 	if (unlikely(put_page_testzero(page)))
5573 		free_the_page(page, compound_order(page));
5574 }
5575 EXPORT_SYMBOL(page_frag_free);
5576 
5577 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5578 		size_t size)
5579 {
5580 	if (addr) {
5581 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
5582 		unsigned long used = addr + PAGE_ALIGN(size);
5583 
5584 		split_page(virt_to_page((void *)addr), order);
5585 		while (used < alloc_end) {
5586 			free_page(used);
5587 			used += PAGE_SIZE;
5588 		}
5589 	}
5590 	return (void *)addr;
5591 }
5592 
5593 /**
5594  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5595  * @size: the number of bytes to allocate
5596  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5597  *
5598  * This function is similar to alloc_pages(), except that it allocates the
5599  * minimum number of pages to satisfy the request.  alloc_pages() can only
5600  * allocate memory in power-of-two pages.
5601  *
5602  * This function is also limited by MAX_ORDER.
5603  *
5604  * Memory allocated by this function must be released by free_pages_exact().
5605  *
5606  * Return: pointer to the allocated area or %NULL in case of error.
5607  */
5608 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5609 {
5610 	unsigned int order = get_order(size);
5611 	unsigned long addr;
5612 
5613 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5614 		gfp_mask &= ~__GFP_COMP;
5615 
5616 	addr = __get_free_pages(gfp_mask, order);
5617 	return make_alloc_exact(addr, order, size);
5618 }
5619 EXPORT_SYMBOL(alloc_pages_exact);
5620 
5621 /**
5622  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5623  *			   pages on a node.
5624  * @nid: the preferred node ID where memory should be allocated
5625  * @size: the number of bytes to allocate
5626  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5627  *
5628  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5629  * back.
5630  *
5631  * Return: pointer to the allocated area or %NULL in case of error.
5632  */
5633 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5634 {
5635 	unsigned int order = get_order(size);
5636 	struct page *p;
5637 
5638 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5639 		gfp_mask &= ~__GFP_COMP;
5640 
5641 	p = alloc_pages_node(nid, gfp_mask, order);
5642 	if (!p)
5643 		return NULL;
5644 	return make_alloc_exact((unsigned long)page_address(p), order, size);
5645 }
5646 
5647 /**
5648  * free_pages_exact - release memory allocated via alloc_pages_exact()
5649  * @virt: the value returned by alloc_pages_exact.
5650  * @size: size of allocation, same value as passed to alloc_pages_exact().
5651  *
5652  * Release the memory allocated by a previous call to alloc_pages_exact.
5653  */
5654 void free_pages_exact(void *virt, size_t size)
5655 {
5656 	unsigned long addr = (unsigned long)virt;
5657 	unsigned long end = addr + PAGE_ALIGN(size);
5658 
5659 	while (addr < end) {
5660 		free_page(addr);
5661 		addr += PAGE_SIZE;
5662 	}
5663 }
5664 EXPORT_SYMBOL(free_pages_exact);
5665 
5666 /**
5667  * nr_free_zone_pages - count number of pages beyond high watermark
5668  * @offset: The zone index of the highest zone
5669  *
5670  * nr_free_zone_pages() counts the number of pages which are beyond the
5671  * high watermark within all zones at or below a given zone index.  For each
5672  * zone, the number of pages is calculated as:
5673  *
5674  *     nr_free_zone_pages = managed_pages - high_pages
5675  *
5676  * Return: number of pages beyond high watermark.
5677  */
5678 static unsigned long nr_free_zone_pages(int offset)
5679 {
5680 	struct zoneref *z;
5681 	struct zone *zone;
5682 
5683 	/* Just pick one node, since fallback list is circular */
5684 	unsigned long sum = 0;
5685 
5686 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5687 
5688 	for_each_zone_zonelist(zone, z, zonelist, offset) {
5689 		unsigned long size = zone_managed_pages(zone);
5690 		unsigned long high = high_wmark_pages(zone);
5691 		if (size > high)
5692 			sum += size - high;
5693 	}
5694 
5695 	return sum;
5696 }
5697 
5698 /**
5699  * nr_free_buffer_pages - count number of pages beyond high watermark
5700  *
5701  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5702  * watermark within ZONE_DMA and ZONE_NORMAL.
5703  *
5704  * Return: number of pages beyond high watermark within ZONE_DMA and
5705  * ZONE_NORMAL.
5706  */
5707 unsigned long nr_free_buffer_pages(void)
5708 {
5709 	return nr_free_zone_pages(gfp_zone(GFP_USER));
5710 }
5711 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5712 
5713 static inline void show_node(struct zone *zone)
5714 {
5715 	if (IS_ENABLED(CONFIG_NUMA))
5716 		printk("Node %d ", zone_to_nid(zone));
5717 }
5718 
5719 long si_mem_available(void)
5720 {
5721 	long available;
5722 	unsigned long pagecache;
5723 	unsigned long wmark_low = 0;
5724 	unsigned long pages[NR_LRU_LISTS];
5725 	unsigned long reclaimable;
5726 	struct zone *zone;
5727 	int lru;
5728 
5729 	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5730 		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5731 
5732 	for_each_zone(zone)
5733 		wmark_low += low_wmark_pages(zone);
5734 
5735 	/*
5736 	 * Estimate the amount of memory available for userspace allocations,
5737 	 * without causing swapping.
5738 	 */
5739 	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5740 
5741 	/*
5742 	 * Not all the page cache can be freed, otherwise the system will
5743 	 * start swapping. Assume at least half of the page cache, or the
5744 	 * low watermark worth of cache, needs to stay.
5745 	 */
5746 	pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5747 	pagecache -= min(pagecache / 2, wmark_low);
5748 	available += pagecache;
5749 
5750 	/*
5751 	 * Part of the reclaimable slab and other kernel memory consists of
5752 	 * items that are in use, and cannot be freed. Cap this estimate at the
5753 	 * low watermark.
5754 	 */
5755 	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5756 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5757 	available += reclaimable - min(reclaimable / 2, wmark_low);
5758 
5759 	if (available < 0)
5760 		available = 0;
5761 	return available;
5762 }
5763 EXPORT_SYMBOL_GPL(si_mem_available);
5764 
5765 void si_meminfo(struct sysinfo *val)
5766 {
5767 	val->totalram = totalram_pages();
5768 	val->sharedram = global_node_page_state(NR_SHMEM);
5769 	val->freeram = global_zone_page_state(NR_FREE_PAGES);
5770 	val->bufferram = nr_blockdev_pages();
5771 	val->totalhigh = totalhigh_pages();
5772 	val->freehigh = nr_free_highpages();
5773 	val->mem_unit = PAGE_SIZE;
5774 }
5775 
5776 EXPORT_SYMBOL(si_meminfo);
5777 
5778 #ifdef CONFIG_NUMA
5779 void si_meminfo_node(struct sysinfo *val, int nid)
5780 {
5781 	int zone_type;		/* needs to be signed */
5782 	unsigned long managed_pages = 0;
5783 	unsigned long managed_highpages = 0;
5784 	unsigned long free_highpages = 0;
5785 	pg_data_t *pgdat = NODE_DATA(nid);
5786 
5787 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5788 		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5789 	val->totalram = managed_pages;
5790 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
5791 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5792 #ifdef CONFIG_HIGHMEM
5793 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5794 		struct zone *zone = &pgdat->node_zones[zone_type];
5795 
5796 		if (is_highmem(zone)) {
5797 			managed_highpages += zone_managed_pages(zone);
5798 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5799 		}
5800 	}
5801 	val->totalhigh = managed_highpages;
5802 	val->freehigh = free_highpages;
5803 #else
5804 	val->totalhigh = managed_highpages;
5805 	val->freehigh = free_highpages;
5806 #endif
5807 	val->mem_unit = PAGE_SIZE;
5808 }
5809 #endif
5810 
5811 /*
5812  * Determine whether the node should be displayed or not, depending on whether
5813  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5814  */
5815 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5816 {
5817 	if (!(flags & SHOW_MEM_FILTER_NODES))
5818 		return false;
5819 
5820 	/*
5821 	 * no node mask - aka implicit memory numa policy. Do not bother with
5822 	 * the synchronization - read_mems_allowed_begin - because we do not
5823 	 * have to be precise here.
5824 	 */
5825 	if (!nodemask)
5826 		nodemask = &cpuset_current_mems_allowed;
5827 
5828 	return !node_isset(nid, *nodemask);
5829 }
5830 
5831 #define K(x) ((x) << (PAGE_SHIFT-10))
5832 
5833 static void show_migration_types(unsigned char type)
5834 {
5835 	static const char types[MIGRATE_TYPES] = {
5836 		[MIGRATE_UNMOVABLE]	= 'U',
5837 		[MIGRATE_MOVABLE]	= 'M',
5838 		[MIGRATE_RECLAIMABLE]	= 'E',
5839 		[MIGRATE_HIGHATOMIC]	= 'H',
5840 #ifdef CONFIG_CMA
5841 		[MIGRATE_CMA]		= 'C',
5842 #endif
5843 #ifdef CONFIG_MEMORY_ISOLATION
5844 		[MIGRATE_ISOLATE]	= 'I',
5845 #endif
5846 	};
5847 	char tmp[MIGRATE_TYPES + 1];
5848 	char *p = tmp;
5849 	int i;
5850 
5851 	for (i = 0; i < MIGRATE_TYPES; i++) {
5852 		if (type & (1 << i))
5853 			*p++ = types[i];
5854 	}
5855 
5856 	*p = '\0';
5857 	printk(KERN_CONT "(%s) ", tmp);
5858 }
5859 
5860 /*
5861  * Show free area list (used inside shift_scroll-lock stuff)
5862  * We also calculate the percentage fragmentation. We do this by counting the
5863  * memory on each free list with the exception of the first item on the list.
5864  *
5865  * Bits in @filter:
5866  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5867  *   cpuset.
5868  */
5869 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5870 {
5871 	unsigned long free_pcp = 0;
5872 	int cpu;
5873 	struct zone *zone;
5874 	pg_data_t *pgdat;
5875 
5876 	for_each_populated_zone(zone) {
5877 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5878 			continue;
5879 
5880 		for_each_online_cpu(cpu)
5881 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
5882 	}
5883 
5884 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5885 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5886 		" unevictable:%lu dirty:%lu writeback:%lu\n"
5887 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5888 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5889 		" free:%lu free_pcp:%lu free_cma:%lu\n",
5890 		global_node_page_state(NR_ACTIVE_ANON),
5891 		global_node_page_state(NR_INACTIVE_ANON),
5892 		global_node_page_state(NR_ISOLATED_ANON),
5893 		global_node_page_state(NR_ACTIVE_FILE),
5894 		global_node_page_state(NR_INACTIVE_FILE),
5895 		global_node_page_state(NR_ISOLATED_FILE),
5896 		global_node_page_state(NR_UNEVICTABLE),
5897 		global_node_page_state(NR_FILE_DIRTY),
5898 		global_node_page_state(NR_WRITEBACK),
5899 		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
5900 		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
5901 		global_node_page_state(NR_FILE_MAPPED),
5902 		global_node_page_state(NR_SHMEM),
5903 		global_node_page_state(NR_PAGETABLE),
5904 		global_zone_page_state(NR_BOUNCE),
5905 		global_zone_page_state(NR_FREE_PAGES),
5906 		free_pcp,
5907 		global_zone_page_state(NR_FREE_CMA_PAGES));
5908 
5909 	for_each_online_pgdat(pgdat) {
5910 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5911 			continue;
5912 
5913 		printk("Node %d"
5914 			" active_anon:%lukB"
5915 			" inactive_anon:%lukB"
5916 			" active_file:%lukB"
5917 			" inactive_file:%lukB"
5918 			" unevictable:%lukB"
5919 			" isolated(anon):%lukB"
5920 			" isolated(file):%lukB"
5921 			" mapped:%lukB"
5922 			" dirty:%lukB"
5923 			" writeback:%lukB"
5924 			" shmem:%lukB"
5925 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5926 			" shmem_thp: %lukB"
5927 			" shmem_pmdmapped: %lukB"
5928 			" anon_thp: %lukB"
5929 #endif
5930 			" writeback_tmp:%lukB"
5931 			" kernel_stack:%lukB"
5932 #ifdef CONFIG_SHADOW_CALL_STACK
5933 			" shadow_call_stack:%lukB"
5934 #endif
5935 			" pagetables:%lukB"
5936 			" all_unreclaimable? %s"
5937 			"\n",
5938 			pgdat->node_id,
5939 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5940 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5941 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5942 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5943 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
5944 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5945 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5946 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
5947 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
5948 			K(node_page_state(pgdat, NR_WRITEBACK)),
5949 			K(node_page_state(pgdat, NR_SHMEM)),
5950 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5951 			K(node_page_state(pgdat, NR_SHMEM_THPS)),
5952 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
5953 			K(node_page_state(pgdat, NR_ANON_THPS)),
5954 #endif
5955 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5956 			node_page_state(pgdat, NR_KERNEL_STACK_KB),
5957 #ifdef CONFIG_SHADOW_CALL_STACK
5958 			node_page_state(pgdat, NR_KERNEL_SCS_KB),
5959 #endif
5960 			K(node_page_state(pgdat, NR_PAGETABLE)),
5961 			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5962 				"yes" : "no");
5963 	}
5964 
5965 	for_each_populated_zone(zone) {
5966 		int i;
5967 
5968 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5969 			continue;
5970 
5971 		free_pcp = 0;
5972 		for_each_online_cpu(cpu)
5973 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
5974 
5975 		show_node(zone);
5976 		printk(KERN_CONT
5977 			"%s"
5978 			" free:%lukB"
5979 			" min:%lukB"
5980 			" low:%lukB"
5981 			" high:%lukB"
5982 			" reserved_highatomic:%luKB"
5983 			" active_anon:%lukB"
5984 			" inactive_anon:%lukB"
5985 			" active_file:%lukB"
5986 			" inactive_file:%lukB"
5987 			" unevictable:%lukB"
5988 			" writepending:%lukB"
5989 			" present:%lukB"
5990 			" managed:%lukB"
5991 			" mlocked:%lukB"
5992 			" bounce:%lukB"
5993 			" free_pcp:%lukB"
5994 			" local_pcp:%ukB"
5995 			" free_cma:%lukB"
5996 			"\n",
5997 			zone->name,
5998 			K(zone_page_state(zone, NR_FREE_PAGES)),
5999 			K(min_wmark_pages(zone)),
6000 			K(low_wmark_pages(zone)),
6001 			K(high_wmark_pages(zone)),
6002 			K(zone->nr_reserved_highatomic),
6003 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
6004 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
6005 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
6006 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
6007 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
6008 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
6009 			K(zone->present_pages),
6010 			K(zone_managed_pages(zone)),
6011 			K(zone_page_state(zone, NR_MLOCK)),
6012 			K(zone_page_state(zone, NR_BOUNCE)),
6013 			K(free_pcp),
6014 			K(this_cpu_read(zone->per_cpu_pageset->count)),
6015 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
6016 		printk("lowmem_reserve[]:");
6017 		for (i = 0; i < MAX_NR_ZONES; i++)
6018 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
6019 		printk(KERN_CONT "\n");
6020 	}
6021 
6022 	for_each_populated_zone(zone) {
6023 		unsigned int order;
6024 		unsigned long nr[MAX_ORDER], flags, total = 0;
6025 		unsigned char types[MAX_ORDER];
6026 
6027 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6028 			continue;
6029 		show_node(zone);
6030 		printk(KERN_CONT "%s: ", zone->name);
6031 
6032 		spin_lock_irqsave(&zone->lock, flags);
6033 		for (order = 0; order < MAX_ORDER; order++) {
6034 			struct free_area *area = &zone->free_area[order];
6035 			int type;
6036 
6037 			nr[order] = area->nr_free;
6038 			total += nr[order] << order;
6039 
6040 			types[order] = 0;
6041 			for (type = 0; type < MIGRATE_TYPES; type++) {
6042 				if (!free_area_empty(area, type))
6043 					types[order] |= 1 << type;
6044 			}
6045 		}
6046 		spin_unlock_irqrestore(&zone->lock, flags);
6047 		for (order = 0; order < MAX_ORDER; order++) {
6048 			printk(KERN_CONT "%lu*%lukB ",
6049 			       nr[order], K(1UL) << order);
6050 			if (nr[order])
6051 				show_migration_types(types[order]);
6052 		}
6053 		printk(KERN_CONT "= %lukB\n", K(total));
6054 	}
6055 
6056 	hugetlb_show_meminfo();
6057 
6058 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
6059 
6060 	show_swap_cache_info();
6061 }
6062 
6063 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
6064 {
6065 	zoneref->zone = zone;
6066 	zoneref->zone_idx = zone_idx(zone);
6067 }
6068 
6069 /*
6070  * Builds allocation fallback zone lists.
6071  *
6072  * Add all populated zones of a node to the zonelist.
6073  */
6074 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
6075 {
6076 	struct zone *zone;
6077 	enum zone_type zone_type = MAX_NR_ZONES;
6078 	int nr_zones = 0;
6079 
6080 	do {
6081 		zone_type--;
6082 		zone = pgdat->node_zones + zone_type;
6083 		if (managed_zone(zone)) {
6084 			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
6085 			check_highest_zone(zone_type);
6086 		}
6087 	} while (zone_type);
6088 
6089 	return nr_zones;
6090 }
6091 
6092 #ifdef CONFIG_NUMA
6093 
6094 static int __parse_numa_zonelist_order(char *s)
6095 {
6096 	/*
6097 	 * We used to support different zonelists modes but they turned
6098 	 * out to be just not useful. Let's keep the warning in place
6099 	 * if somebody still use the cmd line parameter so that we do
6100 	 * not fail it silently
6101 	 */
6102 	if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
6103 		pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
6104 		return -EINVAL;
6105 	}
6106 	return 0;
6107 }
6108 
6109 char numa_zonelist_order[] = "Node";
6110 
6111 /*
6112  * sysctl handler for numa_zonelist_order
6113  */
6114 int numa_zonelist_order_handler(struct ctl_table *table, int write,
6115 		void *buffer, size_t *length, loff_t *ppos)
6116 {
6117 	if (write)
6118 		return __parse_numa_zonelist_order(buffer);
6119 	return proc_dostring(table, write, buffer, length, ppos);
6120 }
6121 
6122 
6123 #define MAX_NODE_LOAD (nr_online_nodes)
6124 static int node_load[MAX_NUMNODES];
6125 
6126 /**
6127  * find_next_best_node - find the next node that should appear in a given node's fallback list
6128  * @node: node whose fallback list we're appending
6129  * @used_node_mask: nodemask_t of already used nodes
6130  *
6131  * We use a number of factors to determine which is the next node that should
6132  * appear on a given node's fallback list.  The node should not have appeared
6133  * already in @node's fallback list, and it should be the next closest node
6134  * according to the distance array (which contains arbitrary distance values
6135  * from each node to each node in the system), and should also prefer nodes
6136  * with no CPUs, since presumably they'll have very little allocation pressure
6137  * on them otherwise.
6138  *
6139  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
6140  */
6141 static int find_next_best_node(int node, nodemask_t *used_node_mask)
6142 {
6143 	int n, val;
6144 	int min_val = INT_MAX;
6145 	int best_node = NUMA_NO_NODE;
6146 
6147 	/* Use the local node if we haven't already */
6148 	if (!node_isset(node, *used_node_mask)) {
6149 		node_set(node, *used_node_mask);
6150 		return node;
6151 	}
6152 
6153 	for_each_node_state(n, N_MEMORY) {
6154 
6155 		/* Don't want a node to appear more than once */
6156 		if (node_isset(n, *used_node_mask))
6157 			continue;
6158 
6159 		/* Use the distance array to find the distance */
6160 		val = node_distance(node, n);
6161 
6162 		/* Penalize nodes under us ("prefer the next node") */
6163 		val += (n < node);
6164 
6165 		/* Give preference to headless and unused nodes */
6166 		if (!cpumask_empty(cpumask_of_node(n)))
6167 			val += PENALTY_FOR_NODE_WITH_CPUS;
6168 
6169 		/* Slight preference for less loaded node */
6170 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
6171 		val += node_load[n];
6172 
6173 		if (val < min_val) {
6174 			min_val = val;
6175 			best_node = n;
6176 		}
6177 	}
6178 
6179 	if (best_node >= 0)
6180 		node_set(best_node, *used_node_mask);
6181 
6182 	return best_node;
6183 }
6184 
6185 
6186 /*
6187  * Build zonelists ordered by node and zones within node.
6188  * This results in maximum locality--normal zone overflows into local
6189  * DMA zone, if any--but risks exhausting DMA zone.
6190  */
6191 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
6192 		unsigned nr_nodes)
6193 {
6194 	struct zoneref *zonerefs;
6195 	int i;
6196 
6197 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6198 
6199 	for (i = 0; i < nr_nodes; i++) {
6200 		int nr_zones;
6201 
6202 		pg_data_t *node = NODE_DATA(node_order[i]);
6203 
6204 		nr_zones = build_zonerefs_node(node, zonerefs);
6205 		zonerefs += nr_zones;
6206 	}
6207 	zonerefs->zone = NULL;
6208 	zonerefs->zone_idx = 0;
6209 }
6210 
6211 /*
6212  * Build gfp_thisnode zonelists
6213  */
6214 static void build_thisnode_zonelists(pg_data_t *pgdat)
6215 {
6216 	struct zoneref *zonerefs;
6217 	int nr_zones;
6218 
6219 	zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
6220 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
6221 	zonerefs += nr_zones;
6222 	zonerefs->zone = NULL;
6223 	zonerefs->zone_idx = 0;
6224 }
6225 
6226 /*
6227  * Build zonelists ordered by zone and nodes within zones.
6228  * This results in conserving DMA zone[s] until all Normal memory is
6229  * exhausted, but results in overflowing to remote node while memory
6230  * may still exist in local DMA zone.
6231  */
6232 
6233 static void build_zonelists(pg_data_t *pgdat)
6234 {
6235 	static int node_order[MAX_NUMNODES];
6236 	int node, load, nr_nodes = 0;
6237 	nodemask_t used_mask = NODE_MASK_NONE;
6238 	int local_node, prev_node;
6239 
6240 	/* NUMA-aware ordering of nodes */
6241 	local_node = pgdat->node_id;
6242 	load = nr_online_nodes;
6243 	prev_node = local_node;
6244 
6245 	memset(node_order, 0, sizeof(node_order));
6246 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
6247 		/*
6248 		 * We don't want to pressure a particular node.
6249 		 * So adding penalty to the first node in same
6250 		 * distance group to make it round-robin.
6251 		 */
6252 		if (node_distance(local_node, node) !=
6253 		    node_distance(local_node, prev_node))
6254 			node_load[node] = load;
6255 
6256 		node_order[nr_nodes++] = node;
6257 		prev_node = node;
6258 		load--;
6259 	}
6260 
6261 	build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
6262 	build_thisnode_zonelists(pgdat);
6263 }
6264 
6265 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6266 /*
6267  * Return node id of node used for "local" allocations.
6268  * I.e., first node id of first zone in arg node's generic zonelist.
6269  * Used for initializing percpu 'numa_mem', which is used primarily
6270  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6271  */
6272 int local_memory_node(int node)
6273 {
6274 	struct zoneref *z;
6275 
6276 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
6277 				   gfp_zone(GFP_KERNEL),
6278 				   NULL);
6279 	return zone_to_nid(z->zone);
6280 }
6281 #endif
6282 
6283 static void setup_min_unmapped_ratio(void);
6284 static void setup_min_slab_ratio(void);
6285 #else	/* CONFIG_NUMA */
6286 
6287 static void build_zonelists(pg_data_t *pgdat)
6288 {
6289 	int node, local_node;
6290 	struct zoneref *zonerefs;
6291 	int nr_zones;
6292 
6293 	local_node = pgdat->node_id;
6294 
6295 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6296 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
6297 	zonerefs += nr_zones;
6298 
6299 	/*
6300 	 * Now we build the zonelist so that it contains the zones
6301 	 * of all the other nodes.
6302 	 * We don't want to pressure a particular node, so when
6303 	 * building the zones for node N, we make sure that the
6304 	 * zones coming right after the local ones are those from
6305 	 * node N+1 (modulo N)
6306 	 */
6307 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6308 		if (!node_online(node))
6309 			continue;
6310 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6311 		zonerefs += nr_zones;
6312 	}
6313 	for (node = 0; node < local_node; node++) {
6314 		if (!node_online(node))
6315 			continue;
6316 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6317 		zonerefs += nr_zones;
6318 	}
6319 
6320 	zonerefs->zone = NULL;
6321 	zonerefs->zone_idx = 0;
6322 }
6323 
6324 #endif	/* CONFIG_NUMA */
6325 
6326 /*
6327  * Boot pageset table. One per cpu which is going to be used for all
6328  * zones and all nodes. The parameters will be set in such a way
6329  * that an item put on a list will immediately be handed over to
6330  * the buddy list. This is safe since pageset manipulation is done
6331  * with interrupts disabled.
6332  *
6333  * The boot_pagesets must be kept even after bootup is complete for
6334  * unused processors and/or zones. They do play a role for bootstrapping
6335  * hotplugged processors.
6336  *
6337  * zoneinfo_show() and maybe other functions do
6338  * not check if the processor is online before following the pageset pointer.
6339  * Other parts of the kernel may not check if the zone is available.
6340  */
6341 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
6342 /* These effectively disable the pcplists in the boot pageset completely */
6343 #define BOOT_PAGESET_HIGH	0
6344 #define BOOT_PAGESET_BATCH	1
6345 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
6346 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
6347 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
6348 
6349 static void __build_all_zonelists(void *data)
6350 {
6351 	int nid;
6352 	int __maybe_unused cpu;
6353 	pg_data_t *self = data;
6354 	static DEFINE_SPINLOCK(lock);
6355 
6356 	spin_lock(&lock);
6357 
6358 #ifdef CONFIG_NUMA
6359 	memset(node_load, 0, sizeof(node_load));
6360 #endif
6361 
6362 	/*
6363 	 * This node is hotadded and no memory is yet present.   So just
6364 	 * building zonelists is fine - no need to touch other nodes.
6365 	 */
6366 	if (self && !node_online(self->node_id)) {
6367 		build_zonelists(self);
6368 	} else {
6369 		for_each_online_node(nid) {
6370 			pg_data_t *pgdat = NODE_DATA(nid);
6371 
6372 			build_zonelists(pgdat);
6373 		}
6374 
6375 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6376 		/*
6377 		 * We now know the "local memory node" for each node--
6378 		 * i.e., the node of the first zone in the generic zonelist.
6379 		 * Set up numa_mem percpu variable for on-line cpus.  During
6380 		 * boot, only the boot cpu should be on-line;  we'll init the
6381 		 * secondary cpus' numa_mem as they come on-line.  During
6382 		 * node/memory hotplug, we'll fixup all on-line cpus.
6383 		 */
6384 		for_each_online_cpu(cpu)
6385 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6386 #endif
6387 	}
6388 
6389 	spin_unlock(&lock);
6390 }
6391 
6392 static noinline void __init
6393 build_all_zonelists_init(void)
6394 {
6395 	int cpu;
6396 
6397 	__build_all_zonelists(NULL);
6398 
6399 	/*
6400 	 * Initialize the boot_pagesets that are going to be used
6401 	 * for bootstrapping processors. The real pagesets for
6402 	 * each zone will be allocated later when the per cpu
6403 	 * allocator is available.
6404 	 *
6405 	 * boot_pagesets are used also for bootstrapping offline
6406 	 * cpus if the system is already booted because the pagesets
6407 	 * are needed to initialize allocators on a specific cpu too.
6408 	 * F.e. the percpu allocator needs the page allocator which
6409 	 * needs the percpu allocator in order to allocate its pagesets
6410 	 * (a chicken-egg dilemma).
6411 	 */
6412 	for_each_possible_cpu(cpu)
6413 		per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
6414 
6415 	mminit_verify_zonelist();
6416 	cpuset_init_current_mems_allowed();
6417 }
6418 
6419 /*
6420  * unless system_state == SYSTEM_BOOTING.
6421  *
6422  * __ref due to call of __init annotated helper build_all_zonelists_init
6423  * [protected by SYSTEM_BOOTING].
6424  */
6425 void __ref build_all_zonelists(pg_data_t *pgdat)
6426 {
6427 	unsigned long vm_total_pages;
6428 
6429 	if (system_state == SYSTEM_BOOTING) {
6430 		build_all_zonelists_init();
6431 	} else {
6432 		__build_all_zonelists(pgdat);
6433 		/* cpuset refresh routine should be here */
6434 	}
6435 	/* Get the number of free pages beyond high watermark in all zones. */
6436 	vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6437 	/*
6438 	 * Disable grouping by mobility if the number of pages in the
6439 	 * system is too low to allow the mechanism to work. It would be
6440 	 * more accurate, but expensive to check per-zone. This check is
6441 	 * made on memory-hotadd so a system can start with mobility
6442 	 * disabled and enable it later
6443 	 */
6444 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6445 		page_group_by_mobility_disabled = 1;
6446 	else
6447 		page_group_by_mobility_disabled = 0;
6448 
6449 	pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
6450 		nr_online_nodes,
6451 		page_group_by_mobility_disabled ? "off" : "on",
6452 		vm_total_pages);
6453 #ifdef CONFIG_NUMA
6454 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6455 #endif
6456 }
6457 
6458 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6459 static bool __meminit
6460 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6461 {
6462 	static struct memblock_region *r;
6463 
6464 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6465 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6466 			for_each_mem_region(r) {
6467 				if (*pfn < memblock_region_memory_end_pfn(r))
6468 					break;
6469 			}
6470 		}
6471 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
6472 		    memblock_is_mirror(r)) {
6473 			*pfn = memblock_region_memory_end_pfn(r);
6474 			return true;
6475 		}
6476 	}
6477 	return false;
6478 }
6479 
6480 /*
6481  * Initially all pages are reserved - free ones are freed
6482  * up by memblock_free_all() once the early boot process is
6483  * done. Non-atomic initialization, single-pass.
6484  *
6485  * All aligned pageblocks are initialized to the specified migratetype
6486  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6487  * zone stats (e.g., nr_isolate_pageblock) are touched.
6488  */
6489 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
6490 		unsigned long start_pfn, unsigned long zone_end_pfn,
6491 		enum meminit_context context,
6492 		struct vmem_altmap *altmap, int migratetype)
6493 {
6494 	unsigned long pfn, end_pfn = start_pfn + size;
6495 	struct page *page;
6496 
6497 	if (highest_memmap_pfn < end_pfn - 1)
6498 		highest_memmap_pfn = end_pfn - 1;
6499 
6500 #ifdef CONFIG_ZONE_DEVICE
6501 	/*
6502 	 * Honor reservation requested by the driver for this ZONE_DEVICE
6503 	 * memory. We limit the total number of pages to initialize to just
6504 	 * those that might contain the memory mapping. We will defer the
6505 	 * ZONE_DEVICE page initialization until after we have released
6506 	 * the hotplug lock.
6507 	 */
6508 	if (zone == ZONE_DEVICE) {
6509 		if (!altmap)
6510 			return;
6511 
6512 		if (start_pfn == altmap->base_pfn)
6513 			start_pfn += altmap->reserve;
6514 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6515 	}
6516 #endif
6517 
6518 	for (pfn = start_pfn; pfn < end_pfn; ) {
6519 		/*
6520 		 * There can be holes in boot-time mem_map[]s handed to this
6521 		 * function.  They do not exist on hotplugged memory.
6522 		 */
6523 		if (context == MEMINIT_EARLY) {
6524 			if (overlap_memmap_init(zone, &pfn))
6525 				continue;
6526 			if (defer_init(nid, pfn, zone_end_pfn))
6527 				break;
6528 		}
6529 
6530 		page = pfn_to_page(pfn);
6531 		__init_single_page(page, pfn, zone, nid);
6532 		if (context == MEMINIT_HOTPLUG)
6533 			__SetPageReserved(page);
6534 
6535 		/*
6536 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6537 		 * such that unmovable allocations won't be scattered all
6538 		 * over the place during system boot.
6539 		 */
6540 		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6541 			set_pageblock_migratetype(page, migratetype);
6542 			cond_resched();
6543 		}
6544 		pfn++;
6545 	}
6546 }
6547 
6548 #ifdef CONFIG_ZONE_DEVICE
6549 void __ref memmap_init_zone_device(struct zone *zone,
6550 				   unsigned long start_pfn,
6551 				   unsigned long nr_pages,
6552 				   struct dev_pagemap *pgmap)
6553 {
6554 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
6555 	struct pglist_data *pgdat = zone->zone_pgdat;
6556 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6557 	unsigned long zone_idx = zone_idx(zone);
6558 	unsigned long start = jiffies;
6559 	int nid = pgdat->node_id;
6560 
6561 	if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
6562 		return;
6563 
6564 	/*
6565 	 * The call to memmap_init should have already taken care
6566 	 * of the pages reserved for the memmap, so we can just jump to
6567 	 * the end of that region and start processing the device pages.
6568 	 */
6569 	if (altmap) {
6570 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6571 		nr_pages = end_pfn - start_pfn;
6572 	}
6573 
6574 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
6575 		struct page *page = pfn_to_page(pfn);
6576 
6577 		__init_single_page(page, pfn, zone_idx, nid);
6578 
6579 		/*
6580 		 * Mark page reserved as it will need to wait for onlining
6581 		 * phase for it to be fully associated with a zone.
6582 		 *
6583 		 * We can use the non-atomic __set_bit operation for setting
6584 		 * the flag as we are still initializing the pages.
6585 		 */
6586 		__SetPageReserved(page);
6587 
6588 		/*
6589 		 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6590 		 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
6591 		 * ever freed or placed on a driver-private list.
6592 		 */
6593 		page->pgmap = pgmap;
6594 		page->zone_device_data = NULL;
6595 
6596 		/*
6597 		 * Mark the block movable so that blocks are reserved for
6598 		 * movable at startup. This will force kernel allocations
6599 		 * to reserve their blocks rather than leaking throughout
6600 		 * the address space during boot when many long-lived
6601 		 * kernel allocations are made.
6602 		 *
6603 		 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6604 		 * because this is done early in section_activate()
6605 		 */
6606 		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6607 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6608 			cond_resched();
6609 		}
6610 	}
6611 
6612 	pr_info("%s initialised %lu pages in %ums\n", __func__,
6613 		nr_pages, jiffies_to_msecs(jiffies - start));
6614 }
6615 
6616 #endif
6617 static void __meminit zone_init_free_lists(struct zone *zone)
6618 {
6619 	unsigned int order, t;
6620 	for_each_migratetype_order(order, t) {
6621 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6622 		zone->free_area[order].nr_free = 0;
6623 	}
6624 }
6625 
6626 #if !defined(CONFIG_FLATMEM)
6627 /*
6628  * Only struct pages that correspond to ranges defined by memblock.memory
6629  * are zeroed and initialized by going through __init_single_page() during
6630  * memmap_init_zone_range().
6631  *
6632  * But, there could be struct pages that correspond to holes in
6633  * memblock.memory. This can happen because of the following reasons:
6634  * - physical memory bank size is not necessarily the exact multiple of the
6635  *   arbitrary section size
6636  * - early reserved memory may not be listed in memblock.memory
6637  * - memory layouts defined with memmap= kernel parameter may not align
6638  *   nicely with memmap sections
6639  *
6640  * Explicitly initialize those struct pages so that:
6641  * - PG_Reserved is set
6642  * - zone and node links point to zone and node that span the page if the
6643  *   hole is in the middle of a zone
6644  * - zone and node links point to adjacent zone/node if the hole falls on
6645  *   the zone boundary; the pages in such holes will be prepended to the
6646  *   zone/node above the hole except for the trailing pages in the last
6647  *   section that will be appended to the zone/node below.
6648  */
6649 static void __init init_unavailable_range(unsigned long spfn,
6650 					  unsigned long epfn,
6651 					  int zone, int node)
6652 {
6653 	unsigned long pfn;
6654 	u64 pgcnt = 0;
6655 
6656 	for (pfn = spfn; pfn < epfn; pfn++) {
6657 		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6658 			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6659 				+ pageblock_nr_pages - 1;
6660 			continue;
6661 		}
6662 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
6663 		__SetPageReserved(pfn_to_page(pfn));
6664 		pgcnt++;
6665 	}
6666 
6667 	if (pgcnt)
6668 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6669 			node, zone_names[zone], pgcnt);
6670 }
6671 #else
6672 static inline void init_unavailable_range(unsigned long spfn,
6673 					  unsigned long epfn,
6674 					  int zone, int node)
6675 {
6676 }
6677 #endif
6678 
6679 static void __init memmap_init_zone_range(struct zone *zone,
6680 					  unsigned long start_pfn,
6681 					  unsigned long end_pfn,
6682 					  unsigned long *hole_pfn)
6683 {
6684 	unsigned long zone_start_pfn = zone->zone_start_pfn;
6685 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
6686 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
6687 
6688 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6689 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
6690 
6691 	if (start_pfn >= end_pfn)
6692 		return;
6693 
6694 	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
6695 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6696 
6697 	if (*hole_pfn < start_pfn)
6698 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
6699 
6700 	*hole_pfn = end_pfn;
6701 }
6702 
6703 static void __init memmap_init(void)
6704 {
6705 	unsigned long start_pfn, end_pfn;
6706 	unsigned long hole_pfn = 0;
6707 	int i, j, zone_id, nid;
6708 
6709 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6710 		struct pglist_data *node = NODE_DATA(nid);
6711 
6712 		for (j = 0; j < MAX_NR_ZONES; j++) {
6713 			struct zone *zone = node->node_zones + j;
6714 
6715 			if (!populated_zone(zone))
6716 				continue;
6717 
6718 			memmap_init_zone_range(zone, start_pfn, end_pfn,
6719 					       &hole_pfn);
6720 			zone_id = j;
6721 		}
6722 	}
6723 
6724 #ifdef CONFIG_SPARSEMEM
6725 	/*
6726 	 * Initialize the memory map for hole in the range [memory_end,
6727 	 * section_end].
6728 	 * Append the pages in this hole to the highest zone in the last
6729 	 * node.
6730 	 * The call to init_unavailable_range() is outside the ifdef to
6731 	 * silence the compiler warining about zone_id set but not used;
6732 	 * for FLATMEM it is a nop anyway
6733 	 */
6734 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
6735 	if (hole_pfn < end_pfn)
6736 #endif
6737 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
6738 }
6739 
6740 static int zone_batchsize(struct zone *zone)
6741 {
6742 #ifdef CONFIG_MMU
6743 	int batch;
6744 
6745 	/*
6746 	 * The number of pages to batch allocate is either ~0.1%
6747 	 * of the zone or 1MB, whichever is smaller. The batch
6748 	 * size is striking a balance between allocation latency
6749 	 * and zone lock contention.
6750 	 */
6751 	batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE);
6752 	batch /= 4;		/* We effectively *= 4 below */
6753 	if (batch < 1)
6754 		batch = 1;
6755 
6756 	/*
6757 	 * Clamp the batch to a 2^n - 1 value. Having a power
6758 	 * of 2 value was found to be more likely to have
6759 	 * suboptimal cache aliasing properties in some cases.
6760 	 *
6761 	 * For example if 2 tasks are alternately allocating
6762 	 * batches of pages, one task can end up with a lot
6763 	 * of pages of one half of the possible page colors
6764 	 * and the other with pages of the other colors.
6765 	 */
6766 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
6767 
6768 	return batch;
6769 
6770 #else
6771 	/* The deferral and batching of frees should be suppressed under NOMMU
6772 	 * conditions.
6773 	 *
6774 	 * The problem is that NOMMU needs to be able to allocate large chunks
6775 	 * of contiguous memory as there's no hardware page translation to
6776 	 * assemble apparent contiguous memory from discontiguous pages.
6777 	 *
6778 	 * Queueing large contiguous runs of pages for batching, however,
6779 	 * causes the pages to actually be freed in smaller chunks.  As there
6780 	 * can be a significant delay between the individual batches being
6781 	 * recycled, this leads to the once large chunks of space being
6782 	 * fragmented and becoming unavailable for high-order allocations.
6783 	 */
6784 	return 0;
6785 #endif
6786 }
6787 
6788 static int zone_highsize(struct zone *zone, int batch, int cpu_online)
6789 {
6790 #ifdef CONFIG_MMU
6791 	int high;
6792 	int nr_split_cpus;
6793 	unsigned long total_pages;
6794 
6795 	if (!percpu_pagelist_high_fraction) {
6796 		/*
6797 		 * By default, the high value of the pcp is based on the zone
6798 		 * low watermark so that if they are full then background
6799 		 * reclaim will not be started prematurely.
6800 		 */
6801 		total_pages = low_wmark_pages(zone);
6802 	} else {
6803 		/*
6804 		 * If percpu_pagelist_high_fraction is configured, the high
6805 		 * value is based on a fraction of the managed pages in the
6806 		 * zone.
6807 		 */
6808 		total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
6809 	}
6810 
6811 	/*
6812 	 * Split the high value across all online CPUs local to the zone. Note
6813 	 * that early in boot that CPUs may not be online yet and that during
6814 	 * CPU hotplug that the cpumask is not yet updated when a CPU is being
6815 	 * onlined. For memory nodes that have no CPUs, split pcp->high across
6816 	 * all online CPUs to mitigate the risk that reclaim is triggered
6817 	 * prematurely due to pages stored on pcp lists.
6818 	 */
6819 	nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
6820 	if (!nr_split_cpus)
6821 		nr_split_cpus = num_online_cpus();
6822 	high = total_pages / nr_split_cpus;
6823 
6824 	/*
6825 	 * Ensure high is at least batch*4. The multiple is based on the
6826 	 * historical relationship between high and batch.
6827 	 */
6828 	high = max(high, batch << 2);
6829 
6830 	return high;
6831 #else
6832 	return 0;
6833 #endif
6834 }
6835 
6836 /*
6837  * pcp->high and pcp->batch values are related and generally batch is lower
6838  * than high. They are also related to pcp->count such that count is lower
6839  * than high, and as soon as it reaches high, the pcplist is flushed.
6840  *
6841  * However, guaranteeing these relations at all times would require e.g. write
6842  * barriers here but also careful usage of read barriers at the read side, and
6843  * thus be prone to error and bad for performance. Thus the update only prevents
6844  * store tearing. Any new users of pcp->batch and pcp->high should ensure they
6845  * can cope with those fields changing asynchronously, and fully trust only the
6846  * pcp->count field on the local CPU with interrupts disabled.
6847  *
6848  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6849  * outside of boot time (or some other assurance that no concurrent updaters
6850  * exist).
6851  */
6852 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6853 		unsigned long batch)
6854 {
6855 	WRITE_ONCE(pcp->batch, batch);
6856 	WRITE_ONCE(pcp->high, high);
6857 }
6858 
6859 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
6860 {
6861 	int pindex;
6862 
6863 	memset(pcp, 0, sizeof(*pcp));
6864 	memset(pzstats, 0, sizeof(*pzstats));
6865 
6866 	for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
6867 		INIT_LIST_HEAD(&pcp->lists[pindex]);
6868 
6869 	/*
6870 	 * Set batch and high values safe for a boot pageset. A true percpu
6871 	 * pageset's initialization will update them subsequently. Here we don't
6872 	 * need to be as careful as pageset_update() as nobody can access the
6873 	 * pageset yet.
6874 	 */
6875 	pcp->high = BOOT_PAGESET_HIGH;
6876 	pcp->batch = BOOT_PAGESET_BATCH;
6877 	pcp->free_factor = 0;
6878 }
6879 
6880 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
6881 		unsigned long batch)
6882 {
6883 	struct per_cpu_pages *pcp;
6884 	int cpu;
6885 
6886 	for_each_possible_cpu(cpu) {
6887 		pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6888 		pageset_update(pcp, high, batch);
6889 	}
6890 }
6891 
6892 /*
6893  * Calculate and set new high and batch values for all per-cpu pagesets of a
6894  * zone based on the zone's size.
6895  */
6896 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
6897 {
6898 	int new_high, new_batch;
6899 
6900 	new_batch = max(1, zone_batchsize(zone));
6901 	new_high = zone_highsize(zone, new_batch, cpu_online);
6902 
6903 	if (zone->pageset_high == new_high &&
6904 	    zone->pageset_batch == new_batch)
6905 		return;
6906 
6907 	zone->pageset_high = new_high;
6908 	zone->pageset_batch = new_batch;
6909 
6910 	__zone_set_pageset_high_and_batch(zone, new_high, new_batch);
6911 }
6912 
6913 void __meminit setup_zone_pageset(struct zone *zone)
6914 {
6915 	int cpu;
6916 
6917 	/* Size may be 0 on !SMP && !NUMA */
6918 	if (sizeof(struct per_cpu_zonestat) > 0)
6919 		zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
6920 
6921 	zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
6922 	for_each_possible_cpu(cpu) {
6923 		struct per_cpu_pages *pcp;
6924 		struct per_cpu_zonestat *pzstats;
6925 
6926 		pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6927 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6928 		per_cpu_pages_init(pcp, pzstats);
6929 	}
6930 
6931 	zone_set_pageset_high_and_batch(zone, 0);
6932 }
6933 
6934 /*
6935  * Allocate per cpu pagesets and initialize them.
6936  * Before this call only boot pagesets were available.
6937  */
6938 void __init setup_per_cpu_pageset(void)
6939 {
6940 	struct pglist_data *pgdat;
6941 	struct zone *zone;
6942 	int __maybe_unused cpu;
6943 
6944 	for_each_populated_zone(zone)
6945 		setup_zone_pageset(zone);
6946 
6947 #ifdef CONFIG_NUMA
6948 	/*
6949 	 * Unpopulated zones continue using the boot pagesets.
6950 	 * The numa stats for these pagesets need to be reset.
6951 	 * Otherwise, they will end up skewing the stats of
6952 	 * the nodes these zones are associated with.
6953 	 */
6954 	for_each_possible_cpu(cpu) {
6955 		struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
6956 		memset(pzstats->vm_numa_event, 0,
6957 		       sizeof(pzstats->vm_numa_event));
6958 	}
6959 #endif
6960 
6961 	for_each_online_pgdat(pgdat)
6962 		pgdat->per_cpu_nodestats =
6963 			alloc_percpu(struct per_cpu_nodestat);
6964 }
6965 
6966 static __meminit void zone_pcp_init(struct zone *zone)
6967 {
6968 	/*
6969 	 * per cpu subsystem is not up at this point. The following code
6970 	 * relies on the ability of the linker to provide the
6971 	 * offset of a (static) per cpu variable into the per cpu area.
6972 	 */
6973 	zone->per_cpu_pageset = &boot_pageset;
6974 	zone->per_cpu_zonestats = &boot_zonestats;
6975 	zone->pageset_high = BOOT_PAGESET_HIGH;
6976 	zone->pageset_batch = BOOT_PAGESET_BATCH;
6977 
6978 	if (populated_zone(zone))
6979 		pr_debug("  %s zone: %lu pages, LIFO batch:%u\n", zone->name,
6980 			 zone->present_pages, zone_batchsize(zone));
6981 }
6982 
6983 void __meminit init_currently_empty_zone(struct zone *zone,
6984 					unsigned long zone_start_pfn,
6985 					unsigned long size)
6986 {
6987 	struct pglist_data *pgdat = zone->zone_pgdat;
6988 	int zone_idx = zone_idx(zone) + 1;
6989 
6990 	if (zone_idx > pgdat->nr_zones)
6991 		pgdat->nr_zones = zone_idx;
6992 
6993 	zone->zone_start_pfn = zone_start_pfn;
6994 
6995 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
6996 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
6997 			pgdat->node_id,
6998 			(unsigned long)zone_idx(zone),
6999 			zone_start_pfn, (zone_start_pfn + size));
7000 
7001 	zone_init_free_lists(zone);
7002 	zone->initialized = 1;
7003 }
7004 
7005 /**
7006  * get_pfn_range_for_nid - Return the start and end page frames for a node
7007  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
7008  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
7009  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
7010  *
7011  * It returns the start and end page frame of a node based on information
7012  * provided by memblock_set_node(). If called for a node
7013  * with no available memory, a warning is printed and the start and end
7014  * PFNs will be 0.
7015  */
7016 void __init get_pfn_range_for_nid(unsigned int nid,
7017 			unsigned long *start_pfn, unsigned long *end_pfn)
7018 {
7019 	unsigned long this_start_pfn, this_end_pfn;
7020 	int i;
7021 
7022 	*start_pfn = -1UL;
7023 	*end_pfn = 0;
7024 
7025 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
7026 		*start_pfn = min(*start_pfn, this_start_pfn);
7027 		*end_pfn = max(*end_pfn, this_end_pfn);
7028 	}
7029 
7030 	if (*start_pfn == -1UL)
7031 		*start_pfn = 0;
7032 }
7033 
7034 /*
7035  * This finds a zone that can be used for ZONE_MOVABLE pages. The
7036  * assumption is made that zones within a node are ordered in monotonic
7037  * increasing memory addresses so that the "highest" populated zone is used
7038  */
7039 static void __init find_usable_zone_for_movable(void)
7040 {
7041 	int zone_index;
7042 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
7043 		if (zone_index == ZONE_MOVABLE)
7044 			continue;
7045 
7046 		if (arch_zone_highest_possible_pfn[zone_index] >
7047 				arch_zone_lowest_possible_pfn[zone_index])
7048 			break;
7049 	}
7050 
7051 	VM_BUG_ON(zone_index == -1);
7052 	movable_zone = zone_index;
7053 }
7054 
7055 /*
7056  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
7057  * because it is sized independent of architecture. Unlike the other zones,
7058  * the starting point for ZONE_MOVABLE is not fixed. It may be different
7059  * in each node depending on the size of each node and how evenly kernelcore
7060  * is distributed. This helper function adjusts the zone ranges
7061  * provided by the architecture for a given node by using the end of the
7062  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
7063  * zones within a node are in order of monotonic increases memory addresses
7064  */
7065 static void __init adjust_zone_range_for_zone_movable(int nid,
7066 					unsigned long zone_type,
7067 					unsigned long node_start_pfn,
7068 					unsigned long node_end_pfn,
7069 					unsigned long *zone_start_pfn,
7070 					unsigned long *zone_end_pfn)
7071 {
7072 	/* Only adjust if ZONE_MOVABLE is on this node */
7073 	if (zone_movable_pfn[nid]) {
7074 		/* Size ZONE_MOVABLE */
7075 		if (zone_type == ZONE_MOVABLE) {
7076 			*zone_start_pfn = zone_movable_pfn[nid];
7077 			*zone_end_pfn = min(node_end_pfn,
7078 				arch_zone_highest_possible_pfn[movable_zone]);
7079 
7080 		/* Adjust for ZONE_MOVABLE starting within this range */
7081 		} else if (!mirrored_kernelcore &&
7082 			*zone_start_pfn < zone_movable_pfn[nid] &&
7083 			*zone_end_pfn > zone_movable_pfn[nid]) {
7084 			*zone_end_pfn = zone_movable_pfn[nid];
7085 
7086 		/* Check if this whole range is within ZONE_MOVABLE */
7087 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
7088 			*zone_start_pfn = *zone_end_pfn;
7089 	}
7090 }
7091 
7092 /*
7093  * Return the number of pages a zone spans in a node, including holes
7094  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
7095  */
7096 static unsigned long __init zone_spanned_pages_in_node(int nid,
7097 					unsigned long zone_type,
7098 					unsigned long node_start_pfn,
7099 					unsigned long node_end_pfn,
7100 					unsigned long *zone_start_pfn,
7101 					unsigned long *zone_end_pfn)
7102 {
7103 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7104 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7105 	/* When hotadd a new node from cpu_up(), the node should be empty */
7106 	if (!node_start_pfn && !node_end_pfn)
7107 		return 0;
7108 
7109 	/* Get the start and end of the zone */
7110 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7111 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7112 	adjust_zone_range_for_zone_movable(nid, zone_type,
7113 				node_start_pfn, node_end_pfn,
7114 				zone_start_pfn, zone_end_pfn);
7115 
7116 	/* Check that this node has pages within the zone's required range */
7117 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
7118 		return 0;
7119 
7120 	/* Move the zone boundaries inside the node if necessary */
7121 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
7122 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
7123 
7124 	/* Return the spanned pages */
7125 	return *zone_end_pfn - *zone_start_pfn;
7126 }
7127 
7128 /*
7129  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
7130  * then all holes in the requested range will be accounted for.
7131  */
7132 unsigned long __init __absent_pages_in_range(int nid,
7133 				unsigned long range_start_pfn,
7134 				unsigned long range_end_pfn)
7135 {
7136 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
7137 	unsigned long start_pfn, end_pfn;
7138 	int i;
7139 
7140 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7141 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
7142 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
7143 		nr_absent -= end_pfn - start_pfn;
7144 	}
7145 	return nr_absent;
7146 }
7147 
7148 /**
7149  * absent_pages_in_range - Return number of page frames in holes within a range
7150  * @start_pfn: The start PFN to start searching for holes
7151  * @end_pfn: The end PFN to stop searching for holes
7152  *
7153  * Return: the number of pages frames in memory holes within a range.
7154  */
7155 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
7156 							unsigned long end_pfn)
7157 {
7158 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
7159 }
7160 
7161 /* Return the number of page frames in holes in a zone on a node */
7162 static unsigned long __init zone_absent_pages_in_node(int nid,
7163 					unsigned long zone_type,
7164 					unsigned long node_start_pfn,
7165 					unsigned long node_end_pfn)
7166 {
7167 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7168 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7169 	unsigned long zone_start_pfn, zone_end_pfn;
7170 	unsigned long nr_absent;
7171 
7172 	/* When hotadd a new node from cpu_up(), the node should be empty */
7173 	if (!node_start_pfn && !node_end_pfn)
7174 		return 0;
7175 
7176 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7177 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7178 
7179 	adjust_zone_range_for_zone_movable(nid, zone_type,
7180 			node_start_pfn, node_end_pfn,
7181 			&zone_start_pfn, &zone_end_pfn);
7182 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
7183 
7184 	/*
7185 	 * ZONE_MOVABLE handling.
7186 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
7187 	 * and vice versa.
7188 	 */
7189 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
7190 		unsigned long start_pfn, end_pfn;
7191 		struct memblock_region *r;
7192 
7193 		for_each_mem_region(r) {
7194 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
7195 					  zone_start_pfn, zone_end_pfn);
7196 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
7197 					zone_start_pfn, zone_end_pfn);
7198 
7199 			if (zone_type == ZONE_MOVABLE &&
7200 			    memblock_is_mirror(r))
7201 				nr_absent += end_pfn - start_pfn;
7202 
7203 			if (zone_type == ZONE_NORMAL &&
7204 			    !memblock_is_mirror(r))
7205 				nr_absent += end_pfn - start_pfn;
7206 		}
7207 	}
7208 
7209 	return nr_absent;
7210 }
7211 
7212 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
7213 						unsigned long node_start_pfn,
7214 						unsigned long node_end_pfn)
7215 {
7216 	unsigned long realtotalpages = 0, totalpages = 0;
7217 	enum zone_type i;
7218 
7219 	for (i = 0; i < MAX_NR_ZONES; i++) {
7220 		struct zone *zone = pgdat->node_zones + i;
7221 		unsigned long zone_start_pfn, zone_end_pfn;
7222 		unsigned long spanned, absent;
7223 		unsigned long size, real_size;
7224 
7225 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
7226 						     node_start_pfn,
7227 						     node_end_pfn,
7228 						     &zone_start_pfn,
7229 						     &zone_end_pfn);
7230 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
7231 						   node_start_pfn,
7232 						   node_end_pfn);
7233 
7234 		size = spanned;
7235 		real_size = size - absent;
7236 
7237 		if (size)
7238 			zone->zone_start_pfn = zone_start_pfn;
7239 		else
7240 			zone->zone_start_pfn = 0;
7241 		zone->spanned_pages = size;
7242 		zone->present_pages = real_size;
7243 
7244 		totalpages += size;
7245 		realtotalpages += real_size;
7246 	}
7247 
7248 	pgdat->node_spanned_pages = totalpages;
7249 	pgdat->node_present_pages = realtotalpages;
7250 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
7251 }
7252 
7253 #ifndef CONFIG_SPARSEMEM
7254 /*
7255  * Calculate the size of the zone->blockflags rounded to an unsigned long
7256  * Start by making sure zonesize is a multiple of pageblock_order by rounding
7257  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
7258  * round what is now in bits to nearest long in bits, then return it in
7259  * bytes.
7260  */
7261 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
7262 {
7263 	unsigned long usemapsize;
7264 
7265 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
7266 	usemapsize = roundup(zonesize, pageblock_nr_pages);
7267 	usemapsize = usemapsize >> pageblock_order;
7268 	usemapsize *= NR_PAGEBLOCK_BITS;
7269 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
7270 
7271 	return usemapsize / 8;
7272 }
7273 
7274 static void __ref setup_usemap(struct zone *zone)
7275 {
7276 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
7277 					       zone->spanned_pages);
7278 	zone->pageblock_flags = NULL;
7279 	if (usemapsize) {
7280 		zone->pageblock_flags =
7281 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
7282 					    zone_to_nid(zone));
7283 		if (!zone->pageblock_flags)
7284 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7285 			      usemapsize, zone->name, zone_to_nid(zone));
7286 	}
7287 }
7288 #else
7289 static inline void setup_usemap(struct zone *zone) {}
7290 #endif /* CONFIG_SPARSEMEM */
7291 
7292 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
7293 
7294 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
7295 void __init set_pageblock_order(void)
7296 {
7297 	unsigned int order;
7298 
7299 	/* Check that pageblock_nr_pages has not already been setup */
7300 	if (pageblock_order)
7301 		return;
7302 
7303 	if (HPAGE_SHIFT > PAGE_SHIFT)
7304 		order = HUGETLB_PAGE_ORDER;
7305 	else
7306 		order = MAX_ORDER - 1;
7307 
7308 	/*
7309 	 * Assume the largest contiguous order of interest is a huge page.
7310 	 * This value may be variable depending on boot parameters on IA64 and
7311 	 * powerpc.
7312 	 */
7313 	pageblock_order = order;
7314 }
7315 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7316 
7317 /*
7318  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
7319  * is unused as pageblock_order is set at compile-time. See
7320  * include/linux/pageblock-flags.h for the values of pageblock_order based on
7321  * the kernel config
7322  */
7323 void __init set_pageblock_order(void)
7324 {
7325 }
7326 
7327 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7328 
7329 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
7330 						unsigned long present_pages)
7331 {
7332 	unsigned long pages = spanned_pages;
7333 
7334 	/*
7335 	 * Provide a more accurate estimation if there are holes within
7336 	 * the zone and SPARSEMEM is in use. If there are holes within the
7337 	 * zone, each populated memory region may cost us one or two extra
7338 	 * memmap pages due to alignment because memmap pages for each
7339 	 * populated regions may not be naturally aligned on page boundary.
7340 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
7341 	 */
7342 	if (spanned_pages > present_pages + (present_pages >> 4) &&
7343 	    IS_ENABLED(CONFIG_SPARSEMEM))
7344 		pages = present_pages;
7345 
7346 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
7347 }
7348 
7349 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7350 static void pgdat_init_split_queue(struct pglist_data *pgdat)
7351 {
7352 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
7353 
7354 	spin_lock_init(&ds_queue->split_queue_lock);
7355 	INIT_LIST_HEAD(&ds_queue->split_queue);
7356 	ds_queue->split_queue_len = 0;
7357 }
7358 #else
7359 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
7360 #endif
7361 
7362 #ifdef CONFIG_COMPACTION
7363 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
7364 {
7365 	init_waitqueue_head(&pgdat->kcompactd_wait);
7366 }
7367 #else
7368 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
7369 #endif
7370 
7371 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
7372 {
7373 	pgdat_resize_init(pgdat);
7374 
7375 	pgdat_init_split_queue(pgdat);
7376 	pgdat_init_kcompactd(pgdat);
7377 
7378 	init_waitqueue_head(&pgdat->kswapd_wait);
7379 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
7380 
7381 	pgdat_page_ext_init(pgdat);
7382 	lruvec_init(&pgdat->__lruvec);
7383 }
7384 
7385 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
7386 							unsigned long remaining_pages)
7387 {
7388 	atomic_long_set(&zone->managed_pages, remaining_pages);
7389 	zone_set_nid(zone, nid);
7390 	zone->name = zone_names[idx];
7391 	zone->zone_pgdat = NODE_DATA(nid);
7392 	spin_lock_init(&zone->lock);
7393 	zone_seqlock_init(zone);
7394 	zone_pcp_init(zone);
7395 }
7396 
7397 /*
7398  * Set up the zone data structures
7399  * - init pgdat internals
7400  * - init all zones belonging to this node
7401  *
7402  * NOTE: this function is only called during memory hotplug
7403  */
7404 #ifdef CONFIG_MEMORY_HOTPLUG
7405 void __ref free_area_init_core_hotplug(int nid)
7406 {
7407 	enum zone_type z;
7408 	pg_data_t *pgdat = NODE_DATA(nid);
7409 
7410 	pgdat_init_internals(pgdat);
7411 	for (z = 0; z < MAX_NR_ZONES; z++)
7412 		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
7413 }
7414 #endif
7415 
7416 /*
7417  * Set up the zone data structures:
7418  *   - mark all pages reserved
7419  *   - mark all memory queues empty
7420  *   - clear the memory bitmaps
7421  *
7422  * NOTE: pgdat should get zeroed by caller.
7423  * NOTE: this function is only called during early init.
7424  */
7425 static void __init free_area_init_core(struct pglist_data *pgdat)
7426 {
7427 	enum zone_type j;
7428 	int nid = pgdat->node_id;
7429 
7430 	pgdat_init_internals(pgdat);
7431 	pgdat->per_cpu_nodestats = &boot_nodestats;
7432 
7433 	for (j = 0; j < MAX_NR_ZONES; j++) {
7434 		struct zone *zone = pgdat->node_zones + j;
7435 		unsigned long size, freesize, memmap_pages;
7436 
7437 		size = zone->spanned_pages;
7438 		freesize = zone->present_pages;
7439 
7440 		/*
7441 		 * Adjust freesize so that it accounts for how much memory
7442 		 * is used by this zone for memmap. This affects the watermark
7443 		 * and per-cpu initialisations
7444 		 */
7445 		memmap_pages = calc_memmap_size(size, freesize);
7446 		if (!is_highmem_idx(j)) {
7447 			if (freesize >= memmap_pages) {
7448 				freesize -= memmap_pages;
7449 				if (memmap_pages)
7450 					pr_debug("  %s zone: %lu pages used for memmap\n",
7451 						 zone_names[j], memmap_pages);
7452 			} else
7453 				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
7454 					zone_names[j], memmap_pages, freesize);
7455 		}
7456 
7457 		/* Account for reserved pages */
7458 		if (j == 0 && freesize > dma_reserve) {
7459 			freesize -= dma_reserve;
7460 			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
7461 		}
7462 
7463 		if (!is_highmem_idx(j))
7464 			nr_kernel_pages += freesize;
7465 		/* Charge for highmem memmap if there are enough kernel pages */
7466 		else if (nr_kernel_pages > memmap_pages * 2)
7467 			nr_kernel_pages -= memmap_pages;
7468 		nr_all_pages += freesize;
7469 
7470 		/*
7471 		 * Set an approximate value for lowmem here, it will be adjusted
7472 		 * when the bootmem allocator frees pages into the buddy system.
7473 		 * And all highmem pages will be managed by the buddy system.
7474 		 */
7475 		zone_init_internals(zone, j, nid, freesize);
7476 
7477 		if (!size)
7478 			continue;
7479 
7480 		set_pageblock_order();
7481 		setup_usemap(zone);
7482 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
7483 	}
7484 }
7485 
7486 #ifdef CONFIG_FLATMEM
7487 static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
7488 {
7489 	unsigned long __maybe_unused start = 0;
7490 	unsigned long __maybe_unused offset = 0;
7491 
7492 	/* Skip empty nodes */
7493 	if (!pgdat->node_spanned_pages)
7494 		return;
7495 
7496 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
7497 	offset = pgdat->node_start_pfn - start;
7498 	/* ia64 gets its own node_mem_map, before this, without bootmem */
7499 	if (!pgdat->node_mem_map) {
7500 		unsigned long size, end;
7501 		struct page *map;
7502 
7503 		/*
7504 		 * The zone's endpoints aren't required to be MAX_ORDER
7505 		 * aligned but the node_mem_map endpoints must be in order
7506 		 * for the buddy allocator to function correctly.
7507 		 */
7508 		end = pgdat_end_pfn(pgdat);
7509 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
7510 		size =  (end - start) * sizeof(struct page);
7511 		map = memblock_alloc_node(size, SMP_CACHE_BYTES,
7512 					  pgdat->node_id);
7513 		if (!map)
7514 			panic("Failed to allocate %ld bytes for node %d memory map\n",
7515 			      size, pgdat->node_id);
7516 		pgdat->node_mem_map = map + offset;
7517 	}
7518 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7519 				__func__, pgdat->node_id, (unsigned long)pgdat,
7520 				(unsigned long)pgdat->node_mem_map);
7521 #ifndef CONFIG_NUMA
7522 	/*
7523 	 * With no DISCONTIG, the global mem_map is just set as node 0's
7524 	 */
7525 	if (pgdat == NODE_DATA(0)) {
7526 		mem_map = NODE_DATA(0)->node_mem_map;
7527 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
7528 			mem_map -= offset;
7529 	}
7530 #endif
7531 }
7532 #else
7533 static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
7534 #endif /* CONFIG_FLATMEM */
7535 
7536 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7537 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7538 {
7539 	pgdat->first_deferred_pfn = ULONG_MAX;
7540 }
7541 #else
7542 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7543 #endif
7544 
7545 static void __init free_area_init_node(int nid)
7546 {
7547 	pg_data_t *pgdat = NODE_DATA(nid);
7548 	unsigned long start_pfn = 0;
7549 	unsigned long end_pfn = 0;
7550 
7551 	/* pg_data_t should be reset to zero when it's allocated */
7552 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
7553 
7554 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7555 
7556 	pgdat->node_id = nid;
7557 	pgdat->node_start_pfn = start_pfn;
7558 	pgdat->per_cpu_nodestats = NULL;
7559 
7560 	pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7561 		(u64)start_pfn << PAGE_SHIFT,
7562 		end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7563 	calculate_node_totalpages(pgdat, start_pfn, end_pfn);
7564 
7565 	alloc_node_mem_map(pgdat);
7566 	pgdat_set_deferred_range(pgdat);
7567 
7568 	free_area_init_core(pgdat);
7569 }
7570 
7571 void __init free_area_init_memoryless_node(int nid)
7572 {
7573 	free_area_init_node(nid);
7574 }
7575 
7576 #if MAX_NUMNODES > 1
7577 /*
7578  * Figure out the number of possible node ids.
7579  */
7580 void __init setup_nr_node_ids(void)
7581 {
7582 	unsigned int highest;
7583 
7584 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
7585 	nr_node_ids = highest + 1;
7586 }
7587 #endif
7588 
7589 /**
7590  * node_map_pfn_alignment - determine the maximum internode alignment
7591  *
7592  * This function should be called after node map is populated and sorted.
7593  * It calculates the maximum power of two alignment which can distinguish
7594  * all the nodes.
7595  *
7596  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7597  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
7598  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
7599  * shifted, 1GiB is enough and this function will indicate so.
7600  *
7601  * This is used to test whether pfn -> nid mapping of the chosen memory
7602  * model has fine enough granularity to avoid incorrect mapping for the
7603  * populated node map.
7604  *
7605  * Return: the determined alignment in pfn's.  0 if there is no alignment
7606  * requirement (single node).
7607  */
7608 unsigned long __init node_map_pfn_alignment(void)
7609 {
7610 	unsigned long accl_mask = 0, last_end = 0;
7611 	unsigned long start, end, mask;
7612 	int last_nid = NUMA_NO_NODE;
7613 	int i, nid;
7614 
7615 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7616 		if (!start || last_nid < 0 || last_nid == nid) {
7617 			last_nid = nid;
7618 			last_end = end;
7619 			continue;
7620 		}
7621 
7622 		/*
7623 		 * Start with a mask granular enough to pin-point to the
7624 		 * start pfn and tick off bits one-by-one until it becomes
7625 		 * too coarse to separate the current node from the last.
7626 		 */
7627 		mask = ~((1 << __ffs(start)) - 1);
7628 		while (mask && last_end <= (start & (mask << 1)))
7629 			mask <<= 1;
7630 
7631 		/* accumulate all internode masks */
7632 		accl_mask |= mask;
7633 	}
7634 
7635 	/* convert mask to number of pages */
7636 	return ~accl_mask + 1;
7637 }
7638 
7639 /**
7640  * find_min_pfn_with_active_regions - Find the minimum PFN registered
7641  *
7642  * Return: the minimum PFN based on information provided via
7643  * memblock_set_node().
7644  */
7645 unsigned long __init find_min_pfn_with_active_regions(void)
7646 {
7647 	return PHYS_PFN(memblock_start_of_DRAM());
7648 }
7649 
7650 /*
7651  * early_calculate_totalpages()
7652  * Sum pages in active regions for movable zone.
7653  * Populate N_MEMORY for calculating usable_nodes.
7654  */
7655 static unsigned long __init early_calculate_totalpages(void)
7656 {
7657 	unsigned long totalpages = 0;
7658 	unsigned long start_pfn, end_pfn;
7659 	int i, nid;
7660 
7661 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7662 		unsigned long pages = end_pfn - start_pfn;
7663 
7664 		totalpages += pages;
7665 		if (pages)
7666 			node_set_state(nid, N_MEMORY);
7667 	}
7668 	return totalpages;
7669 }
7670 
7671 /*
7672  * Find the PFN the Movable zone begins in each node. Kernel memory
7673  * is spread evenly between nodes as long as the nodes have enough
7674  * memory. When they don't, some nodes will have more kernelcore than
7675  * others
7676  */
7677 static void __init find_zone_movable_pfns_for_nodes(void)
7678 {
7679 	int i, nid;
7680 	unsigned long usable_startpfn;
7681 	unsigned long kernelcore_node, kernelcore_remaining;
7682 	/* save the state before borrow the nodemask */
7683 	nodemask_t saved_node_state = node_states[N_MEMORY];
7684 	unsigned long totalpages = early_calculate_totalpages();
7685 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
7686 	struct memblock_region *r;
7687 
7688 	/* Need to find movable_zone earlier when movable_node is specified. */
7689 	find_usable_zone_for_movable();
7690 
7691 	/*
7692 	 * If movable_node is specified, ignore kernelcore and movablecore
7693 	 * options.
7694 	 */
7695 	if (movable_node_is_enabled()) {
7696 		for_each_mem_region(r) {
7697 			if (!memblock_is_hotpluggable(r))
7698 				continue;
7699 
7700 			nid = memblock_get_region_node(r);
7701 
7702 			usable_startpfn = PFN_DOWN(r->base);
7703 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7704 				min(usable_startpfn, zone_movable_pfn[nid]) :
7705 				usable_startpfn;
7706 		}
7707 
7708 		goto out2;
7709 	}
7710 
7711 	/*
7712 	 * If kernelcore=mirror is specified, ignore movablecore option
7713 	 */
7714 	if (mirrored_kernelcore) {
7715 		bool mem_below_4gb_not_mirrored = false;
7716 
7717 		for_each_mem_region(r) {
7718 			if (memblock_is_mirror(r))
7719 				continue;
7720 
7721 			nid = memblock_get_region_node(r);
7722 
7723 			usable_startpfn = memblock_region_memory_base_pfn(r);
7724 
7725 			if (usable_startpfn < 0x100000) {
7726 				mem_below_4gb_not_mirrored = true;
7727 				continue;
7728 			}
7729 
7730 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7731 				min(usable_startpfn, zone_movable_pfn[nid]) :
7732 				usable_startpfn;
7733 		}
7734 
7735 		if (mem_below_4gb_not_mirrored)
7736 			pr_warn("This configuration results in unmirrored kernel memory.\n");
7737 
7738 		goto out2;
7739 	}
7740 
7741 	/*
7742 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7743 	 * amount of necessary memory.
7744 	 */
7745 	if (required_kernelcore_percent)
7746 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
7747 				       10000UL;
7748 	if (required_movablecore_percent)
7749 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
7750 					10000UL;
7751 
7752 	/*
7753 	 * If movablecore= was specified, calculate what size of
7754 	 * kernelcore that corresponds so that memory usable for
7755 	 * any allocation type is evenly spread. If both kernelcore
7756 	 * and movablecore are specified, then the value of kernelcore
7757 	 * will be used for required_kernelcore if it's greater than
7758 	 * what movablecore would have allowed.
7759 	 */
7760 	if (required_movablecore) {
7761 		unsigned long corepages;
7762 
7763 		/*
7764 		 * Round-up so that ZONE_MOVABLE is at least as large as what
7765 		 * was requested by the user
7766 		 */
7767 		required_movablecore =
7768 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
7769 		required_movablecore = min(totalpages, required_movablecore);
7770 		corepages = totalpages - required_movablecore;
7771 
7772 		required_kernelcore = max(required_kernelcore, corepages);
7773 	}
7774 
7775 	/*
7776 	 * If kernelcore was not specified or kernelcore size is larger
7777 	 * than totalpages, there is no ZONE_MOVABLE.
7778 	 */
7779 	if (!required_kernelcore || required_kernelcore >= totalpages)
7780 		goto out;
7781 
7782 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
7783 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7784 
7785 restart:
7786 	/* Spread kernelcore memory as evenly as possible throughout nodes */
7787 	kernelcore_node = required_kernelcore / usable_nodes;
7788 	for_each_node_state(nid, N_MEMORY) {
7789 		unsigned long start_pfn, end_pfn;
7790 
7791 		/*
7792 		 * Recalculate kernelcore_node if the division per node
7793 		 * now exceeds what is necessary to satisfy the requested
7794 		 * amount of memory for the kernel
7795 		 */
7796 		if (required_kernelcore < kernelcore_node)
7797 			kernelcore_node = required_kernelcore / usable_nodes;
7798 
7799 		/*
7800 		 * As the map is walked, we track how much memory is usable
7801 		 * by the kernel using kernelcore_remaining. When it is
7802 		 * 0, the rest of the node is usable by ZONE_MOVABLE
7803 		 */
7804 		kernelcore_remaining = kernelcore_node;
7805 
7806 		/* Go through each range of PFNs within this node */
7807 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7808 			unsigned long size_pages;
7809 
7810 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
7811 			if (start_pfn >= end_pfn)
7812 				continue;
7813 
7814 			/* Account for what is only usable for kernelcore */
7815 			if (start_pfn < usable_startpfn) {
7816 				unsigned long kernel_pages;
7817 				kernel_pages = min(end_pfn, usable_startpfn)
7818 								- start_pfn;
7819 
7820 				kernelcore_remaining -= min(kernel_pages,
7821 							kernelcore_remaining);
7822 				required_kernelcore -= min(kernel_pages,
7823 							required_kernelcore);
7824 
7825 				/* Continue if range is now fully accounted */
7826 				if (end_pfn <= usable_startpfn) {
7827 
7828 					/*
7829 					 * Push zone_movable_pfn to the end so
7830 					 * that if we have to rebalance
7831 					 * kernelcore across nodes, we will
7832 					 * not double account here
7833 					 */
7834 					zone_movable_pfn[nid] = end_pfn;
7835 					continue;
7836 				}
7837 				start_pfn = usable_startpfn;
7838 			}
7839 
7840 			/*
7841 			 * The usable PFN range for ZONE_MOVABLE is from
7842 			 * start_pfn->end_pfn. Calculate size_pages as the
7843 			 * number of pages used as kernelcore
7844 			 */
7845 			size_pages = end_pfn - start_pfn;
7846 			if (size_pages > kernelcore_remaining)
7847 				size_pages = kernelcore_remaining;
7848 			zone_movable_pfn[nid] = start_pfn + size_pages;
7849 
7850 			/*
7851 			 * Some kernelcore has been met, update counts and
7852 			 * break if the kernelcore for this node has been
7853 			 * satisfied
7854 			 */
7855 			required_kernelcore -= min(required_kernelcore,
7856 								size_pages);
7857 			kernelcore_remaining -= size_pages;
7858 			if (!kernelcore_remaining)
7859 				break;
7860 		}
7861 	}
7862 
7863 	/*
7864 	 * If there is still required_kernelcore, we do another pass with one
7865 	 * less node in the count. This will push zone_movable_pfn[nid] further
7866 	 * along on the nodes that still have memory until kernelcore is
7867 	 * satisfied
7868 	 */
7869 	usable_nodes--;
7870 	if (usable_nodes && required_kernelcore > usable_nodes)
7871 		goto restart;
7872 
7873 out2:
7874 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7875 	for (nid = 0; nid < MAX_NUMNODES; nid++)
7876 		zone_movable_pfn[nid] =
7877 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
7878 
7879 out:
7880 	/* restore the node_state */
7881 	node_states[N_MEMORY] = saved_node_state;
7882 }
7883 
7884 /* Any regular or high memory on that node ? */
7885 static void check_for_memory(pg_data_t *pgdat, int nid)
7886 {
7887 	enum zone_type zone_type;
7888 
7889 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
7890 		struct zone *zone = &pgdat->node_zones[zone_type];
7891 		if (populated_zone(zone)) {
7892 			if (IS_ENABLED(CONFIG_HIGHMEM))
7893 				node_set_state(nid, N_HIGH_MEMORY);
7894 			if (zone_type <= ZONE_NORMAL)
7895 				node_set_state(nid, N_NORMAL_MEMORY);
7896 			break;
7897 		}
7898 	}
7899 }
7900 
7901 /*
7902  * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
7903  * such cases we allow max_zone_pfn sorted in the descending order
7904  */
7905 bool __weak arch_has_descending_max_zone_pfns(void)
7906 {
7907 	return false;
7908 }
7909 
7910 /**
7911  * free_area_init - Initialise all pg_data_t and zone data
7912  * @max_zone_pfn: an array of max PFNs for each zone
7913  *
7914  * This will call free_area_init_node() for each active node in the system.
7915  * Using the page ranges provided by memblock_set_node(), the size of each
7916  * zone in each node and their holes is calculated. If the maximum PFN
7917  * between two adjacent zones match, it is assumed that the zone is empty.
7918  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7919  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7920  * starts where the previous one ended. For example, ZONE_DMA32 starts
7921  * at arch_max_dma_pfn.
7922  */
7923 void __init free_area_init(unsigned long *max_zone_pfn)
7924 {
7925 	unsigned long start_pfn, end_pfn;
7926 	int i, nid, zone;
7927 	bool descending;
7928 
7929 	/* Record where the zone boundaries are */
7930 	memset(arch_zone_lowest_possible_pfn, 0,
7931 				sizeof(arch_zone_lowest_possible_pfn));
7932 	memset(arch_zone_highest_possible_pfn, 0,
7933 				sizeof(arch_zone_highest_possible_pfn));
7934 
7935 	start_pfn = find_min_pfn_with_active_regions();
7936 	descending = arch_has_descending_max_zone_pfns();
7937 
7938 	for (i = 0; i < MAX_NR_ZONES; i++) {
7939 		if (descending)
7940 			zone = MAX_NR_ZONES - i - 1;
7941 		else
7942 			zone = i;
7943 
7944 		if (zone == ZONE_MOVABLE)
7945 			continue;
7946 
7947 		end_pfn = max(max_zone_pfn[zone], start_pfn);
7948 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
7949 		arch_zone_highest_possible_pfn[zone] = end_pfn;
7950 
7951 		start_pfn = end_pfn;
7952 	}
7953 
7954 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
7955 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
7956 	find_zone_movable_pfns_for_nodes();
7957 
7958 	/* Print out the zone ranges */
7959 	pr_info("Zone ranges:\n");
7960 	for (i = 0; i < MAX_NR_ZONES; i++) {
7961 		if (i == ZONE_MOVABLE)
7962 			continue;
7963 		pr_info("  %-8s ", zone_names[i]);
7964 		if (arch_zone_lowest_possible_pfn[i] ==
7965 				arch_zone_highest_possible_pfn[i])
7966 			pr_cont("empty\n");
7967 		else
7968 			pr_cont("[mem %#018Lx-%#018Lx]\n",
7969 				(u64)arch_zone_lowest_possible_pfn[i]
7970 					<< PAGE_SHIFT,
7971 				((u64)arch_zone_highest_possible_pfn[i]
7972 					<< PAGE_SHIFT) - 1);
7973 	}
7974 
7975 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
7976 	pr_info("Movable zone start for each node\n");
7977 	for (i = 0; i < MAX_NUMNODES; i++) {
7978 		if (zone_movable_pfn[i])
7979 			pr_info("  Node %d: %#018Lx\n", i,
7980 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
7981 	}
7982 
7983 	/*
7984 	 * Print out the early node map, and initialize the
7985 	 * subsection-map relative to active online memory ranges to
7986 	 * enable future "sub-section" extensions of the memory map.
7987 	 */
7988 	pr_info("Early memory node ranges\n");
7989 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7990 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7991 			(u64)start_pfn << PAGE_SHIFT,
7992 			((u64)end_pfn << PAGE_SHIFT) - 1);
7993 		subsection_map_init(start_pfn, end_pfn - start_pfn);
7994 	}
7995 
7996 	/* Initialise every node */
7997 	mminit_verify_pageflags_layout();
7998 	setup_nr_node_ids();
7999 	for_each_online_node(nid) {
8000 		pg_data_t *pgdat = NODE_DATA(nid);
8001 		free_area_init_node(nid);
8002 
8003 		/* Any memory on that node */
8004 		if (pgdat->node_present_pages)
8005 			node_set_state(nid, N_MEMORY);
8006 		check_for_memory(pgdat, nid);
8007 	}
8008 
8009 	memmap_init();
8010 }
8011 
8012 static int __init cmdline_parse_core(char *p, unsigned long *core,
8013 				     unsigned long *percent)
8014 {
8015 	unsigned long long coremem;
8016 	char *endptr;
8017 
8018 	if (!p)
8019 		return -EINVAL;
8020 
8021 	/* Value may be a percentage of total memory, otherwise bytes */
8022 	coremem = simple_strtoull(p, &endptr, 0);
8023 	if (*endptr == '%') {
8024 		/* Paranoid check for percent values greater than 100 */
8025 		WARN_ON(coremem > 100);
8026 
8027 		*percent = coremem;
8028 	} else {
8029 		coremem = memparse(p, &p);
8030 		/* Paranoid check that UL is enough for the coremem value */
8031 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
8032 
8033 		*core = coremem >> PAGE_SHIFT;
8034 		*percent = 0UL;
8035 	}
8036 	return 0;
8037 }
8038 
8039 /*
8040  * kernelcore=size sets the amount of memory for use for allocations that
8041  * cannot be reclaimed or migrated.
8042  */
8043 static int __init cmdline_parse_kernelcore(char *p)
8044 {
8045 	/* parse kernelcore=mirror */
8046 	if (parse_option_str(p, "mirror")) {
8047 		mirrored_kernelcore = true;
8048 		return 0;
8049 	}
8050 
8051 	return cmdline_parse_core(p, &required_kernelcore,
8052 				  &required_kernelcore_percent);
8053 }
8054 
8055 /*
8056  * movablecore=size sets the amount of memory for use for allocations that
8057  * can be reclaimed or migrated.
8058  */
8059 static int __init cmdline_parse_movablecore(char *p)
8060 {
8061 	return cmdline_parse_core(p, &required_movablecore,
8062 				  &required_movablecore_percent);
8063 }
8064 
8065 early_param("kernelcore", cmdline_parse_kernelcore);
8066 early_param("movablecore", cmdline_parse_movablecore);
8067 
8068 void adjust_managed_page_count(struct page *page, long count)
8069 {
8070 	atomic_long_add(count, &page_zone(page)->managed_pages);
8071 	totalram_pages_add(count);
8072 #ifdef CONFIG_HIGHMEM
8073 	if (PageHighMem(page))
8074 		totalhigh_pages_add(count);
8075 #endif
8076 }
8077 EXPORT_SYMBOL(adjust_managed_page_count);
8078 
8079 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
8080 {
8081 	void *pos;
8082 	unsigned long pages = 0;
8083 
8084 	start = (void *)PAGE_ALIGN((unsigned long)start);
8085 	end = (void *)((unsigned long)end & PAGE_MASK);
8086 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
8087 		struct page *page = virt_to_page(pos);
8088 		void *direct_map_addr;
8089 
8090 		/*
8091 		 * 'direct_map_addr' might be different from 'pos'
8092 		 * because some architectures' virt_to_page()
8093 		 * work with aliases.  Getting the direct map
8094 		 * address ensures that we get a _writeable_
8095 		 * alias for the memset().
8096 		 */
8097 		direct_map_addr = page_address(page);
8098 		/*
8099 		 * Perform a kasan-unchecked memset() since this memory
8100 		 * has not been initialized.
8101 		 */
8102 		direct_map_addr = kasan_reset_tag(direct_map_addr);
8103 		if ((unsigned int)poison <= 0xFF)
8104 			memset(direct_map_addr, poison, PAGE_SIZE);
8105 
8106 		free_reserved_page(page);
8107 	}
8108 
8109 	if (pages && s)
8110 		pr_info("Freeing %s memory: %ldK\n",
8111 			s, pages << (PAGE_SHIFT - 10));
8112 
8113 	return pages;
8114 }
8115 
8116 void __init mem_init_print_info(void)
8117 {
8118 	unsigned long physpages, codesize, datasize, rosize, bss_size;
8119 	unsigned long init_code_size, init_data_size;
8120 
8121 	physpages = get_num_physpages();
8122 	codesize = _etext - _stext;
8123 	datasize = _edata - _sdata;
8124 	rosize = __end_rodata - __start_rodata;
8125 	bss_size = __bss_stop - __bss_start;
8126 	init_data_size = __init_end - __init_begin;
8127 	init_code_size = _einittext - _sinittext;
8128 
8129 	/*
8130 	 * Detect special cases and adjust section sizes accordingly:
8131 	 * 1) .init.* may be embedded into .data sections
8132 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
8133 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
8134 	 * 3) .rodata.* may be embedded into .text or .data sections.
8135 	 */
8136 #define adj_init_size(start, end, size, pos, adj) \
8137 	do { \
8138 		if (start <= pos && pos < end && size > adj) \
8139 			size -= adj; \
8140 	} while (0)
8141 
8142 	adj_init_size(__init_begin, __init_end, init_data_size,
8143 		     _sinittext, init_code_size);
8144 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
8145 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
8146 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
8147 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
8148 
8149 #undef	adj_init_size
8150 
8151 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
8152 #ifdef	CONFIG_HIGHMEM
8153 		", %luK highmem"
8154 #endif
8155 		")\n",
8156 		nr_free_pages() << (PAGE_SHIFT - 10),
8157 		physpages << (PAGE_SHIFT - 10),
8158 		codesize >> 10, datasize >> 10, rosize >> 10,
8159 		(init_data_size + init_code_size) >> 10, bss_size >> 10,
8160 		(physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
8161 		totalcma_pages << (PAGE_SHIFT - 10)
8162 #ifdef	CONFIG_HIGHMEM
8163 		, totalhigh_pages() << (PAGE_SHIFT - 10)
8164 #endif
8165 		);
8166 }
8167 
8168 /**
8169  * set_dma_reserve - set the specified number of pages reserved in the first zone
8170  * @new_dma_reserve: The number of pages to mark reserved
8171  *
8172  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
8173  * In the DMA zone, a significant percentage may be consumed by kernel image
8174  * and other unfreeable allocations which can skew the watermarks badly. This
8175  * function may optionally be used to account for unfreeable pages in the
8176  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8177  * smaller per-cpu batchsize.
8178  */
8179 void __init set_dma_reserve(unsigned long new_dma_reserve)
8180 {
8181 	dma_reserve = new_dma_reserve;
8182 }
8183 
8184 static int page_alloc_cpu_dead(unsigned int cpu)
8185 {
8186 	struct zone *zone;
8187 
8188 	lru_add_drain_cpu(cpu);
8189 	drain_pages(cpu);
8190 
8191 	/*
8192 	 * Spill the event counters of the dead processor
8193 	 * into the current processors event counters.
8194 	 * This artificially elevates the count of the current
8195 	 * processor.
8196 	 */
8197 	vm_events_fold_cpu(cpu);
8198 
8199 	/*
8200 	 * Zero the differential counters of the dead processor
8201 	 * so that the vm statistics are consistent.
8202 	 *
8203 	 * This is only okay since the processor is dead and cannot
8204 	 * race with what we are doing.
8205 	 */
8206 	cpu_vm_stats_fold(cpu);
8207 
8208 	for_each_populated_zone(zone)
8209 		zone_pcp_update(zone, 0);
8210 
8211 	return 0;
8212 }
8213 
8214 static int page_alloc_cpu_online(unsigned int cpu)
8215 {
8216 	struct zone *zone;
8217 
8218 	for_each_populated_zone(zone)
8219 		zone_pcp_update(zone, 1);
8220 	return 0;
8221 }
8222 
8223 #ifdef CONFIG_NUMA
8224 int hashdist = HASHDIST_DEFAULT;
8225 
8226 static int __init set_hashdist(char *str)
8227 {
8228 	if (!str)
8229 		return 0;
8230 	hashdist = simple_strtoul(str, &str, 0);
8231 	return 1;
8232 }
8233 __setup("hashdist=", set_hashdist);
8234 #endif
8235 
8236 void __init page_alloc_init(void)
8237 {
8238 	int ret;
8239 
8240 #ifdef CONFIG_NUMA
8241 	if (num_node_state(N_MEMORY) == 1)
8242 		hashdist = 0;
8243 #endif
8244 
8245 	ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
8246 					"mm/page_alloc:pcp",
8247 					page_alloc_cpu_online,
8248 					page_alloc_cpu_dead);
8249 	WARN_ON(ret < 0);
8250 }
8251 
8252 /*
8253  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
8254  *	or min_free_kbytes changes.
8255  */
8256 static void calculate_totalreserve_pages(void)
8257 {
8258 	struct pglist_data *pgdat;
8259 	unsigned long reserve_pages = 0;
8260 	enum zone_type i, j;
8261 
8262 	for_each_online_pgdat(pgdat) {
8263 
8264 		pgdat->totalreserve_pages = 0;
8265 
8266 		for (i = 0; i < MAX_NR_ZONES; i++) {
8267 			struct zone *zone = pgdat->node_zones + i;
8268 			long max = 0;
8269 			unsigned long managed_pages = zone_managed_pages(zone);
8270 
8271 			/* Find valid and maximum lowmem_reserve in the zone */
8272 			for (j = i; j < MAX_NR_ZONES; j++) {
8273 				if (zone->lowmem_reserve[j] > max)
8274 					max = zone->lowmem_reserve[j];
8275 			}
8276 
8277 			/* we treat the high watermark as reserved pages. */
8278 			max += high_wmark_pages(zone);
8279 
8280 			if (max > managed_pages)
8281 				max = managed_pages;
8282 
8283 			pgdat->totalreserve_pages += max;
8284 
8285 			reserve_pages += max;
8286 		}
8287 	}
8288 	totalreserve_pages = reserve_pages;
8289 }
8290 
8291 /*
8292  * setup_per_zone_lowmem_reserve - called whenever
8293  *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
8294  *	has a correct pages reserved value, so an adequate number of
8295  *	pages are left in the zone after a successful __alloc_pages().
8296  */
8297 static void setup_per_zone_lowmem_reserve(void)
8298 {
8299 	struct pglist_data *pgdat;
8300 	enum zone_type i, j;
8301 
8302 	for_each_online_pgdat(pgdat) {
8303 		for (i = 0; i < MAX_NR_ZONES - 1; i++) {
8304 			struct zone *zone = &pgdat->node_zones[i];
8305 			int ratio = sysctl_lowmem_reserve_ratio[i];
8306 			bool clear = !ratio || !zone_managed_pages(zone);
8307 			unsigned long managed_pages = 0;
8308 
8309 			for (j = i + 1; j < MAX_NR_ZONES; j++) {
8310 				struct zone *upper_zone = &pgdat->node_zones[j];
8311 
8312 				managed_pages += zone_managed_pages(upper_zone);
8313 
8314 				if (clear)
8315 					zone->lowmem_reserve[j] = 0;
8316 				else
8317 					zone->lowmem_reserve[j] = managed_pages / ratio;
8318 			}
8319 		}
8320 	}
8321 
8322 	/* update totalreserve_pages */
8323 	calculate_totalreserve_pages();
8324 }
8325 
8326 static void __setup_per_zone_wmarks(void)
8327 {
8328 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
8329 	unsigned long lowmem_pages = 0;
8330 	struct zone *zone;
8331 	unsigned long flags;
8332 
8333 	/* Calculate total number of !ZONE_HIGHMEM pages */
8334 	for_each_zone(zone) {
8335 		if (!is_highmem(zone))
8336 			lowmem_pages += zone_managed_pages(zone);
8337 	}
8338 
8339 	for_each_zone(zone) {
8340 		u64 tmp;
8341 
8342 		spin_lock_irqsave(&zone->lock, flags);
8343 		tmp = (u64)pages_min * zone_managed_pages(zone);
8344 		do_div(tmp, lowmem_pages);
8345 		if (is_highmem(zone)) {
8346 			/*
8347 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
8348 			 * need highmem pages, so cap pages_min to a small
8349 			 * value here.
8350 			 *
8351 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8352 			 * deltas control async page reclaim, and so should
8353 			 * not be capped for highmem.
8354 			 */
8355 			unsigned long min_pages;
8356 
8357 			min_pages = zone_managed_pages(zone) / 1024;
8358 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
8359 			zone->_watermark[WMARK_MIN] = min_pages;
8360 		} else {
8361 			/*
8362 			 * If it's a lowmem zone, reserve a number of pages
8363 			 * proportionate to the zone's size.
8364 			 */
8365 			zone->_watermark[WMARK_MIN] = tmp;
8366 		}
8367 
8368 		/*
8369 		 * Set the kswapd watermarks distance according to the
8370 		 * scale factor in proportion to available memory, but
8371 		 * ensure a minimum size on small systems.
8372 		 */
8373 		tmp = max_t(u64, tmp >> 2,
8374 			    mult_frac(zone_managed_pages(zone),
8375 				      watermark_scale_factor, 10000));
8376 
8377 		zone->watermark_boost = 0;
8378 		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
8379 		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
8380 
8381 		spin_unlock_irqrestore(&zone->lock, flags);
8382 	}
8383 
8384 	/* update totalreserve_pages */
8385 	calculate_totalreserve_pages();
8386 }
8387 
8388 /**
8389  * setup_per_zone_wmarks - called when min_free_kbytes changes
8390  * or when memory is hot-{added|removed}
8391  *
8392  * Ensures that the watermark[min,low,high] values for each zone are set
8393  * correctly with respect to min_free_kbytes.
8394  */
8395 void setup_per_zone_wmarks(void)
8396 {
8397 	struct zone *zone;
8398 	static DEFINE_SPINLOCK(lock);
8399 
8400 	spin_lock(&lock);
8401 	__setup_per_zone_wmarks();
8402 	spin_unlock(&lock);
8403 
8404 	/*
8405 	 * The watermark size have changed so update the pcpu batch
8406 	 * and high limits or the limits may be inappropriate.
8407 	 */
8408 	for_each_zone(zone)
8409 		zone_pcp_update(zone, 0);
8410 }
8411 
8412 /*
8413  * Initialise min_free_kbytes.
8414  *
8415  * For small machines we want it small (128k min).  For large machines
8416  * we want it large (256MB max).  But it is not linear, because network
8417  * bandwidth does not increase linearly with machine size.  We use
8418  *
8419  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
8420  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
8421  *
8422  * which yields
8423  *
8424  * 16MB:	512k
8425  * 32MB:	724k
8426  * 64MB:	1024k
8427  * 128MB:	1448k
8428  * 256MB:	2048k
8429  * 512MB:	2896k
8430  * 1024MB:	4096k
8431  * 2048MB:	5792k
8432  * 4096MB:	8192k
8433  * 8192MB:	11584k
8434  * 16384MB:	16384k
8435  */
8436 int __meminit init_per_zone_wmark_min(void)
8437 {
8438 	unsigned long lowmem_kbytes;
8439 	int new_min_free_kbytes;
8440 
8441 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
8442 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
8443 
8444 	if (new_min_free_kbytes > user_min_free_kbytes) {
8445 		min_free_kbytes = new_min_free_kbytes;
8446 		if (min_free_kbytes < 128)
8447 			min_free_kbytes = 128;
8448 		if (min_free_kbytes > 262144)
8449 			min_free_kbytes = 262144;
8450 	} else {
8451 		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8452 				new_min_free_kbytes, user_min_free_kbytes);
8453 	}
8454 	setup_per_zone_wmarks();
8455 	refresh_zone_stat_thresholds();
8456 	setup_per_zone_lowmem_reserve();
8457 
8458 #ifdef CONFIG_NUMA
8459 	setup_min_unmapped_ratio();
8460 	setup_min_slab_ratio();
8461 #endif
8462 
8463 	khugepaged_min_free_kbytes_update();
8464 
8465 	return 0;
8466 }
8467 postcore_initcall(init_per_zone_wmark_min)
8468 
8469 /*
8470  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8471  *	that we can call two helper functions whenever min_free_kbytes
8472  *	changes.
8473  */
8474 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8475 		void *buffer, size_t *length, loff_t *ppos)
8476 {
8477 	int rc;
8478 
8479 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8480 	if (rc)
8481 		return rc;
8482 
8483 	if (write) {
8484 		user_min_free_kbytes = min_free_kbytes;
8485 		setup_per_zone_wmarks();
8486 	}
8487 	return 0;
8488 }
8489 
8490 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8491 		void *buffer, size_t *length, loff_t *ppos)
8492 {
8493 	int rc;
8494 
8495 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8496 	if (rc)
8497 		return rc;
8498 
8499 	if (write)
8500 		setup_per_zone_wmarks();
8501 
8502 	return 0;
8503 }
8504 
8505 #ifdef CONFIG_NUMA
8506 static void setup_min_unmapped_ratio(void)
8507 {
8508 	pg_data_t *pgdat;
8509 	struct zone *zone;
8510 
8511 	for_each_online_pgdat(pgdat)
8512 		pgdat->min_unmapped_pages = 0;
8513 
8514 	for_each_zone(zone)
8515 		zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8516 						         sysctl_min_unmapped_ratio) / 100;
8517 }
8518 
8519 
8520 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8521 		void *buffer, size_t *length, loff_t *ppos)
8522 {
8523 	int rc;
8524 
8525 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8526 	if (rc)
8527 		return rc;
8528 
8529 	setup_min_unmapped_ratio();
8530 
8531 	return 0;
8532 }
8533 
8534 static void setup_min_slab_ratio(void)
8535 {
8536 	pg_data_t *pgdat;
8537 	struct zone *zone;
8538 
8539 	for_each_online_pgdat(pgdat)
8540 		pgdat->min_slab_pages = 0;
8541 
8542 	for_each_zone(zone)
8543 		zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8544 						     sysctl_min_slab_ratio) / 100;
8545 }
8546 
8547 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8548 		void *buffer, size_t *length, loff_t *ppos)
8549 {
8550 	int rc;
8551 
8552 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8553 	if (rc)
8554 		return rc;
8555 
8556 	setup_min_slab_ratio();
8557 
8558 	return 0;
8559 }
8560 #endif
8561 
8562 /*
8563  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8564  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8565  *	whenever sysctl_lowmem_reserve_ratio changes.
8566  *
8567  * The reserve ratio obviously has absolutely no relation with the
8568  * minimum watermarks. The lowmem reserve ratio can only make sense
8569  * if in function of the boot time zone sizes.
8570  */
8571 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8572 		void *buffer, size_t *length, loff_t *ppos)
8573 {
8574 	int i;
8575 
8576 	proc_dointvec_minmax(table, write, buffer, length, ppos);
8577 
8578 	for (i = 0; i < MAX_NR_ZONES; i++) {
8579 		if (sysctl_lowmem_reserve_ratio[i] < 1)
8580 			sysctl_lowmem_reserve_ratio[i] = 0;
8581 	}
8582 
8583 	setup_per_zone_lowmem_reserve();
8584 	return 0;
8585 }
8586 
8587 /*
8588  * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
8589  * cpu. It is the fraction of total pages in each zone that a hot per cpu
8590  * pagelist can have before it gets flushed back to buddy allocator.
8591  */
8592 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
8593 		int write, void *buffer, size_t *length, loff_t *ppos)
8594 {
8595 	struct zone *zone;
8596 	int old_percpu_pagelist_high_fraction;
8597 	int ret;
8598 
8599 	mutex_lock(&pcp_batch_high_lock);
8600 	old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
8601 
8602 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8603 	if (!write || ret < 0)
8604 		goto out;
8605 
8606 	/* Sanity checking to avoid pcp imbalance */
8607 	if (percpu_pagelist_high_fraction &&
8608 	    percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
8609 		percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
8610 		ret = -EINVAL;
8611 		goto out;
8612 	}
8613 
8614 	/* No change? */
8615 	if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
8616 		goto out;
8617 
8618 	for_each_populated_zone(zone)
8619 		zone_set_pageset_high_and_batch(zone, 0);
8620 out:
8621 	mutex_unlock(&pcp_batch_high_lock);
8622 	return ret;
8623 }
8624 
8625 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8626 /*
8627  * Returns the number of pages that arch has reserved but
8628  * is not known to alloc_large_system_hash().
8629  */
8630 static unsigned long __init arch_reserved_kernel_pages(void)
8631 {
8632 	return 0;
8633 }
8634 #endif
8635 
8636 /*
8637  * Adaptive scale is meant to reduce sizes of hash tables on large memory
8638  * machines. As memory size is increased the scale is also increased but at
8639  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
8640  * quadruples the scale is increased by one, which means the size of hash table
8641  * only doubles, instead of quadrupling as well.
8642  * Because 32-bit systems cannot have large physical memory, where this scaling
8643  * makes sense, it is disabled on such platforms.
8644  */
8645 #if __BITS_PER_LONG > 32
8646 #define ADAPT_SCALE_BASE	(64ul << 30)
8647 #define ADAPT_SCALE_SHIFT	2
8648 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
8649 #endif
8650 
8651 /*
8652  * allocate a large system hash table from bootmem
8653  * - it is assumed that the hash table must contain an exact power-of-2
8654  *   quantity of entries
8655  * - limit is the number of hash buckets, not the total allocation size
8656  */
8657 void *__init alloc_large_system_hash(const char *tablename,
8658 				     unsigned long bucketsize,
8659 				     unsigned long numentries,
8660 				     int scale,
8661 				     int flags,
8662 				     unsigned int *_hash_shift,
8663 				     unsigned int *_hash_mask,
8664 				     unsigned long low_limit,
8665 				     unsigned long high_limit)
8666 {
8667 	unsigned long long max = high_limit;
8668 	unsigned long log2qty, size;
8669 	void *table = NULL;
8670 	gfp_t gfp_flags;
8671 	bool virt;
8672 	bool huge;
8673 
8674 	/* allow the kernel cmdline to have a say */
8675 	if (!numentries) {
8676 		/* round applicable memory size up to nearest megabyte */
8677 		numentries = nr_kernel_pages;
8678 		numentries -= arch_reserved_kernel_pages();
8679 
8680 		/* It isn't necessary when PAGE_SIZE >= 1MB */
8681 		if (PAGE_SHIFT < 20)
8682 			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
8683 
8684 #if __BITS_PER_LONG > 32
8685 		if (!high_limit) {
8686 			unsigned long adapt;
8687 
8688 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
8689 			     adapt <<= ADAPT_SCALE_SHIFT)
8690 				scale++;
8691 		}
8692 #endif
8693 
8694 		/* limit to 1 bucket per 2^scale bytes of low memory */
8695 		if (scale > PAGE_SHIFT)
8696 			numentries >>= (scale - PAGE_SHIFT);
8697 		else
8698 			numentries <<= (PAGE_SHIFT - scale);
8699 
8700 		/* Make sure we've got at least a 0-order allocation.. */
8701 		if (unlikely(flags & HASH_SMALL)) {
8702 			/* Makes no sense without HASH_EARLY */
8703 			WARN_ON(!(flags & HASH_EARLY));
8704 			if (!(numentries >> *_hash_shift)) {
8705 				numentries = 1UL << *_hash_shift;
8706 				BUG_ON(!numentries);
8707 			}
8708 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
8709 			numentries = PAGE_SIZE / bucketsize;
8710 	}
8711 	numentries = roundup_pow_of_two(numentries);
8712 
8713 	/* limit allocation size to 1/16 total memory by default */
8714 	if (max == 0) {
8715 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
8716 		do_div(max, bucketsize);
8717 	}
8718 	max = min(max, 0x80000000ULL);
8719 
8720 	if (numentries < low_limit)
8721 		numentries = low_limit;
8722 	if (numentries > max)
8723 		numentries = max;
8724 
8725 	log2qty = ilog2(numentries);
8726 
8727 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
8728 	do {
8729 		virt = false;
8730 		size = bucketsize << log2qty;
8731 		if (flags & HASH_EARLY) {
8732 			if (flags & HASH_ZERO)
8733 				table = memblock_alloc(size, SMP_CACHE_BYTES);
8734 			else
8735 				table = memblock_alloc_raw(size,
8736 							   SMP_CACHE_BYTES);
8737 		} else if (get_order(size) >= MAX_ORDER || hashdist) {
8738 			table = __vmalloc(size, gfp_flags);
8739 			virt = true;
8740 			huge = is_vm_area_hugepages(table);
8741 		} else {
8742 			/*
8743 			 * If bucketsize is not a power-of-two, we may free
8744 			 * some pages at the end of hash table which
8745 			 * alloc_pages_exact() automatically does
8746 			 */
8747 			table = alloc_pages_exact(size, gfp_flags);
8748 			kmemleak_alloc(table, size, 1, gfp_flags);
8749 		}
8750 	} while (!table && size > PAGE_SIZE && --log2qty);
8751 
8752 	if (!table)
8753 		panic("Failed to allocate %s hash table\n", tablename);
8754 
8755 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8756 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
8757 		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
8758 
8759 	if (_hash_shift)
8760 		*_hash_shift = log2qty;
8761 	if (_hash_mask)
8762 		*_hash_mask = (1 << log2qty) - 1;
8763 
8764 	return table;
8765 }
8766 
8767 /*
8768  * This function checks whether pageblock includes unmovable pages or not.
8769  *
8770  * PageLRU check without isolation or lru_lock could race so that
8771  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8772  * check without lock_page also may miss some movable non-lru pages at
8773  * race condition. So you can't expect this function should be exact.
8774  *
8775  * Returns a page without holding a reference. If the caller wants to
8776  * dereference that page (e.g., dumping), it has to make sure that it
8777  * cannot get removed (e.g., via memory unplug) concurrently.
8778  *
8779  */
8780 struct page *has_unmovable_pages(struct zone *zone, struct page *page,
8781 				 int migratetype, int flags)
8782 {
8783 	unsigned long iter = 0;
8784 	unsigned long pfn = page_to_pfn(page);
8785 	unsigned long offset = pfn % pageblock_nr_pages;
8786 
8787 	if (is_migrate_cma_page(page)) {
8788 		/*
8789 		 * CMA allocations (alloc_contig_range) really need to mark
8790 		 * isolate CMA pageblocks even when they are not movable in fact
8791 		 * so consider them movable here.
8792 		 */
8793 		if (is_migrate_cma(migratetype))
8794 			return NULL;
8795 
8796 		return page;
8797 	}
8798 
8799 	for (; iter < pageblock_nr_pages - offset; iter++) {
8800 		if (!pfn_valid_within(pfn + iter))
8801 			continue;
8802 
8803 		page = pfn_to_page(pfn + iter);
8804 
8805 		/*
8806 		 * Both, bootmem allocations and memory holes are marked
8807 		 * PG_reserved and are unmovable. We can even have unmovable
8808 		 * allocations inside ZONE_MOVABLE, for example when
8809 		 * specifying "movablecore".
8810 		 */
8811 		if (PageReserved(page))
8812 			return page;
8813 
8814 		/*
8815 		 * If the zone is movable and we have ruled out all reserved
8816 		 * pages then it should be reasonably safe to assume the rest
8817 		 * is movable.
8818 		 */
8819 		if (zone_idx(zone) == ZONE_MOVABLE)
8820 			continue;
8821 
8822 		/*
8823 		 * Hugepages are not in LRU lists, but they're movable.
8824 		 * THPs are on the LRU, but need to be counted as #small pages.
8825 		 * We need not scan over tail pages because we don't
8826 		 * handle each tail page individually in migration.
8827 		 */
8828 		if (PageHuge(page) || PageTransCompound(page)) {
8829 			struct page *head = compound_head(page);
8830 			unsigned int skip_pages;
8831 
8832 			if (PageHuge(page)) {
8833 				if (!hugepage_migration_supported(page_hstate(head)))
8834 					return page;
8835 			} else if (!PageLRU(head) && !__PageMovable(head)) {
8836 				return page;
8837 			}
8838 
8839 			skip_pages = compound_nr(head) - (page - head);
8840 			iter += skip_pages - 1;
8841 			continue;
8842 		}
8843 
8844 		/*
8845 		 * We can't use page_count without pin a page
8846 		 * because another CPU can free compound page.
8847 		 * This check already skips compound tails of THP
8848 		 * because their page->_refcount is zero at all time.
8849 		 */
8850 		if (!page_ref_count(page)) {
8851 			if (PageBuddy(page))
8852 				iter += (1 << buddy_order(page)) - 1;
8853 			continue;
8854 		}
8855 
8856 		/*
8857 		 * The HWPoisoned page may be not in buddy system, and
8858 		 * page_count() is not 0.
8859 		 */
8860 		if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
8861 			continue;
8862 
8863 		/*
8864 		 * We treat all PageOffline() pages as movable when offlining
8865 		 * to give drivers a chance to decrement their reference count
8866 		 * in MEM_GOING_OFFLINE in order to indicate that these pages
8867 		 * can be offlined as there are no direct references anymore.
8868 		 * For actually unmovable PageOffline() where the driver does
8869 		 * not support this, we will fail later when trying to actually
8870 		 * move these pages that still have a reference count > 0.
8871 		 * (false negatives in this function only)
8872 		 */
8873 		if ((flags & MEMORY_OFFLINE) && PageOffline(page))
8874 			continue;
8875 
8876 		if (__PageMovable(page) || PageLRU(page))
8877 			continue;
8878 
8879 		/*
8880 		 * If there are RECLAIMABLE pages, we need to check
8881 		 * it.  But now, memory offline itself doesn't call
8882 		 * shrink_node_slabs() and it still to be fixed.
8883 		 */
8884 		return page;
8885 	}
8886 	return NULL;
8887 }
8888 
8889 #ifdef CONFIG_CONTIG_ALLOC
8890 static unsigned long pfn_max_align_down(unsigned long pfn)
8891 {
8892 	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8893 			     pageblock_nr_pages) - 1);
8894 }
8895 
8896 static unsigned long pfn_max_align_up(unsigned long pfn)
8897 {
8898 	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8899 				pageblock_nr_pages));
8900 }
8901 
8902 #if defined(CONFIG_DYNAMIC_DEBUG) || \
8903 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
8904 /* Usage: See admin-guide/dynamic-debug-howto.rst */
8905 static void alloc_contig_dump_pages(struct list_head *page_list)
8906 {
8907 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
8908 
8909 	if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
8910 		struct page *page;
8911 
8912 		dump_stack();
8913 		list_for_each_entry(page, page_list, lru)
8914 			dump_page(page, "migration failure");
8915 	}
8916 }
8917 #else
8918 static inline void alloc_contig_dump_pages(struct list_head *page_list)
8919 {
8920 }
8921 #endif
8922 
8923 /* [start, end) must belong to a single zone. */
8924 static int __alloc_contig_migrate_range(struct compact_control *cc,
8925 					unsigned long start, unsigned long end)
8926 {
8927 	/* This function is based on compact_zone() from compaction.c. */
8928 	unsigned int nr_reclaimed;
8929 	unsigned long pfn = start;
8930 	unsigned int tries = 0;
8931 	int ret = 0;
8932 	struct migration_target_control mtc = {
8933 		.nid = zone_to_nid(cc->zone),
8934 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
8935 	};
8936 
8937 	lru_cache_disable();
8938 
8939 	while (pfn < end || !list_empty(&cc->migratepages)) {
8940 		if (fatal_signal_pending(current)) {
8941 			ret = -EINTR;
8942 			break;
8943 		}
8944 
8945 		if (list_empty(&cc->migratepages)) {
8946 			cc->nr_migratepages = 0;
8947 			ret = isolate_migratepages_range(cc, pfn, end);
8948 			if (ret && ret != -EAGAIN)
8949 				break;
8950 			pfn = cc->migrate_pfn;
8951 			tries = 0;
8952 		} else if (++tries == 5) {
8953 			ret = -EBUSY;
8954 			break;
8955 		}
8956 
8957 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8958 							&cc->migratepages);
8959 		cc->nr_migratepages -= nr_reclaimed;
8960 
8961 		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
8962 				NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
8963 
8964 		/*
8965 		 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
8966 		 * to retry again over this error, so do the same here.
8967 		 */
8968 		if (ret == -ENOMEM)
8969 			break;
8970 	}
8971 
8972 	lru_cache_enable();
8973 	if (ret < 0) {
8974 		if (ret == -EBUSY)
8975 			alloc_contig_dump_pages(&cc->migratepages);
8976 		putback_movable_pages(&cc->migratepages);
8977 		return ret;
8978 	}
8979 	return 0;
8980 }
8981 
8982 /**
8983  * alloc_contig_range() -- tries to allocate given range of pages
8984  * @start:	start PFN to allocate
8985  * @end:	one-past-the-last PFN to allocate
8986  * @migratetype:	migratetype of the underlying pageblocks (either
8987  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
8988  *			in range must have the same migratetype and it must
8989  *			be either of the two.
8990  * @gfp_mask:	GFP mask to use during compaction
8991  *
8992  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
8993  * aligned.  The PFN range must belong to a single zone.
8994  *
8995  * The first thing this routine does is attempt to MIGRATE_ISOLATE all
8996  * pageblocks in the range.  Once isolated, the pageblocks should not
8997  * be modified by others.
8998  *
8999  * Return: zero on success or negative error code.  On success all
9000  * pages which PFN is in [start, end) are allocated for the caller and
9001  * need to be freed with free_contig_range().
9002  */
9003 int alloc_contig_range(unsigned long start, unsigned long end,
9004 		       unsigned migratetype, gfp_t gfp_mask)
9005 {
9006 	unsigned long outer_start, outer_end;
9007 	unsigned int order;
9008 	int ret = 0;
9009 
9010 	struct compact_control cc = {
9011 		.nr_migratepages = 0,
9012 		.order = -1,
9013 		.zone = page_zone(pfn_to_page(start)),
9014 		.mode = MIGRATE_SYNC,
9015 		.ignore_skip_hint = true,
9016 		.no_set_skip_hint = true,
9017 		.gfp_mask = current_gfp_context(gfp_mask),
9018 		.alloc_contig = true,
9019 	};
9020 	INIT_LIST_HEAD(&cc.migratepages);
9021 
9022 	/*
9023 	 * What we do here is we mark all pageblocks in range as
9024 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
9025 	 * have different sizes, and due to the way page allocator
9026 	 * work, we align the range to biggest of the two pages so
9027 	 * that page allocator won't try to merge buddies from
9028 	 * different pageblocks and change MIGRATE_ISOLATE to some
9029 	 * other migration type.
9030 	 *
9031 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
9032 	 * migrate the pages from an unaligned range (ie. pages that
9033 	 * we are interested in).  This will put all the pages in
9034 	 * range back to page allocator as MIGRATE_ISOLATE.
9035 	 *
9036 	 * When this is done, we take the pages in range from page
9037 	 * allocator removing them from the buddy system.  This way
9038 	 * page allocator will never consider using them.
9039 	 *
9040 	 * This lets us mark the pageblocks back as
9041 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
9042 	 * aligned range but not in the unaligned, original range are
9043 	 * put back to page allocator so that buddy can use them.
9044 	 */
9045 
9046 	ret = start_isolate_page_range(pfn_max_align_down(start),
9047 				       pfn_max_align_up(end), migratetype, 0);
9048 	if (ret)
9049 		return ret;
9050 
9051 	drain_all_pages(cc.zone);
9052 
9053 	/*
9054 	 * In case of -EBUSY, we'd like to know which page causes problem.
9055 	 * So, just fall through. test_pages_isolated() has a tracepoint
9056 	 * which will report the busy page.
9057 	 *
9058 	 * It is possible that busy pages could become available before
9059 	 * the call to test_pages_isolated, and the range will actually be
9060 	 * allocated.  So, if we fall through be sure to clear ret so that
9061 	 * -EBUSY is not accidentally used or returned to caller.
9062 	 */
9063 	ret = __alloc_contig_migrate_range(&cc, start, end);
9064 	if (ret && ret != -EBUSY)
9065 		goto done;
9066 	ret = 0;
9067 
9068 	/*
9069 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
9070 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
9071 	 * more, all pages in [start, end) are free in page allocator.
9072 	 * What we are going to do is to allocate all pages from
9073 	 * [start, end) (that is remove them from page allocator).
9074 	 *
9075 	 * The only problem is that pages at the beginning and at the
9076 	 * end of interesting range may be not aligned with pages that
9077 	 * page allocator holds, ie. they can be part of higher order
9078 	 * pages.  Because of this, we reserve the bigger range and
9079 	 * once this is done free the pages we are not interested in.
9080 	 *
9081 	 * We don't have to hold zone->lock here because the pages are
9082 	 * isolated thus they won't get removed from buddy.
9083 	 */
9084 
9085 	order = 0;
9086 	outer_start = start;
9087 	while (!PageBuddy(pfn_to_page(outer_start))) {
9088 		if (++order >= MAX_ORDER) {
9089 			outer_start = start;
9090 			break;
9091 		}
9092 		outer_start &= ~0UL << order;
9093 	}
9094 
9095 	if (outer_start != start) {
9096 		order = buddy_order(pfn_to_page(outer_start));
9097 
9098 		/*
9099 		 * outer_start page could be small order buddy page and
9100 		 * it doesn't include start page. Adjust outer_start
9101 		 * in this case to report failed page properly
9102 		 * on tracepoint in test_pages_isolated()
9103 		 */
9104 		if (outer_start + (1UL << order) <= start)
9105 			outer_start = start;
9106 	}
9107 
9108 	/* Make sure the range is really isolated. */
9109 	if (test_pages_isolated(outer_start, end, 0)) {
9110 		ret = -EBUSY;
9111 		goto done;
9112 	}
9113 
9114 	/* Grab isolated pages from freelists. */
9115 	outer_end = isolate_freepages_range(&cc, outer_start, end);
9116 	if (!outer_end) {
9117 		ret = -EBUSY;
9118 		goto done;
9119 	}
9120 
9121 	/* Free head and tail (if any) */
9122 	if (start != outer_start)
9123 		free_contig_range(outer_start, start - outer_start);
9124 	if (end != outer_end)
9125 		free_contig_range(end, outer_end - end);
9126 
9127 done:
9128 	undo_isolate_page_range(pfn_max_align_down(start),
9129 				pfn_max_align_up(end), migratetype);
9130 	return ret;
9131 }
9132 EXPORT_SYMBOL(alloc_contig_range);
9133 
9134 static int __alloc_contig_pages(unsigned long start_pfn,
9135 				unsigned long nr_pages, gfp_t gfp_mask)
9136 {
9137 	unsigned long end_pfn = start_pfn + nr_pages;
9138 
9139 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
9140 				  gfp_mask);
9141 }
9142 
9143 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
9144 				   unsigned long nr_pages)
9145 {
9146 	unsigned long i, end_pfn = start_pfn + nr_pages;
9147 	struct page *page;
9148 
9149 	for (i = start_pfn; i < end_pfn; i++) {
9150 		page = pfn_to_online_page(i);
9151 		if (!page)
9152 			return false;
9153 
9154 		if (page_zone(page) != z)
9155 			return false;
9156 
9157 		if (PageReserved(page))
9158 			return false;
9159 	}
9160 	return true;
9161 }
9162 
9163 static bool zone_spans_last_pfn(const struct zone *zone,
9164 				unsigned long start_pfn, unsigned long nr_pages)
9165 {
9166 	unsigned long last_pfn = start_pfn + nr_pages - 1;
9167 
9168 	return zone_spans_pfn(zone, last_pfn);
9169 }
9170 
9171 /**
9172  * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
9173  * @nr_pages:	Number of contiguous pages to allocate
9174  * @gfp_mask:	GFP mask to limit search and used during compaction
9175  * @nid:	Target node
9176  * @nodemask:	Mask for other possible nodes
9177  *
9178  * This routine is a wrapper around alloc_contig_range(). It scans over zones
9179  * on an applicable zonelist to find a contiguous pfn range which can then be
9180  * tried for allocation with alloc_contig_range(). This routine is intended
9181  * for allocation requests which can not be fulfilled with the buddy allocator.
9182  *
9183  * The allocated memory is always aligned to a page boundary. If nr_pages is a
9184  * power of two then the alignment is guaranteed to be to the given nr_pages
9185  * (e.g. 1GB request would be aligned to 1GB).
9186  *
9187  * Allocated pages can be freed with free_contig_range() or by manually calling
9188  * __free_page() on each allocated page.
9189  *
9190  * Return: pointer to contiguous pages on success, or NULL if not successful.
9191  */
9192 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
9193 				int nid, nodemask_t *nodemask)
9194 {
9195 	unsigned long ret, pfn, flags;
9196 	struct zonelist *zonelist;
9197 	struct zone *zone;
9198 	struct zoneref *z;
9199 
9200 	zonelist = node_zonelist(nid, gfp_mask);
9201 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
9202 					gfp_zone(gfp_mask), nodemask) {
9203 		spin_lock_irqsave(&zone->lock, flags);
9204 
9205 		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
9206 		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
9207 			if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
9208 				/*
9209 				 * We release the zone lock here because
9210 				 * alloc_contig_range() will also lock the zone
9211 				 * at some point. If there's an allocation
9212 				 * spinning on this lock, it may win the race
9213 				 * and cause alloc_contig_range() to fail...
9214 				 */
9215 				spin_unlock_irqrestore(&zone->lock, flags);
9216 				ret = __alloc_contig_pages(pfn, nr_pages,
9217 							gfp_mask);
9218 				if (!ret)
9219 					return pfn_to_page(pfn);
9220 				spin_lock_irqsave(&zone->lock, flags);
9221 			}
9222 			pfn += nr_pages;
9223 		}
9224 		spin_unlock_irqrestore(&zone->lock, flags);
9225 	}
9226 	return NULL;
9227 }
9228 #endif /* CONFIG_CONTIG_ALLOC */
9229 
9230 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
9231 {
9232 	unsigned long count = 0;
9233 
9234 	for (; nr_pages--; pfn++) {
9235 		struct page *page = pfn_to_page(pfn);
9236 
9237 		count += page_count(page) != 1;
9238 		__free_page(page);
9239 	}
9240 	WARN(count != 0, "%lu pages are still in use!\n", count);
9241 }
9242 EXPORT_SYMBOL(free_contig_range);
9243 
9244 /*
9245  * The zone indicated has a new number of managed_pages; batch sizes and percpu
9246  * page high values need to be recalculated.
9247  */
9248 void zone_pcp_update(struct zone *zone, int cpu_online)
9249 {
9250 	mutex_lock(&pcp_batch_high_lock);
9251 	zone_set_pageset_high_and_batch(zone, cpu_online);
9252 	mutex_unlock(&pcp_batch_high_lock);
9253 }
9254 
9255 /*
9256  * Effectively disable pcplists for the zone by setting the high limit to 0
9257  * and draining all cpus. A concurrent page freeing on another CPU that's about
9258  * to put the page on pcplist will either finish before the drain and the page
9259  * will be drained, or observe the new high limit and skip the pcplist.
9260  *
9261  * Must be paired with a call to zone_pcp_enable().
9262  */
9263 void zone_pcp_disable(struct zone *zone)
9264 {
9265 	mutex_lock(&pcp_batch_high_lock);
9266 	__zone_set_pageset_high_and_batch(zone, 0, 1);
9267 	__drain_all_pages(zone, true);
9268 }
9269 
9270 void zone_pcp_enable(struct zone *zone)
9271 {
9272 	__zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
9273 	mutex_unlock(&pcp_batch_high_lock);
9274 }
9275 
9276 void zone_pcp_reset(struct zone *zone)
9277 {
9278 	int cpu;
9279 	struct per_cpu_zonestat *pzstats;
9280 
9281 	if (zone->per_cpu_pageset != &boot_pageset) {
9282 		for_each_online_cpu(cpu) {
9283 			pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
9284 			drain_zonestat(zone, pzstats);
9285 		}
9286 		free_percpu(zone->per_cpu_pageset);
9287 		free_percpu(zone->per_cpu_zonestats);
9288 		zone->per_cpu_pageset = &boot_pageset;
9289 		zone->per_cpu_zonestats = &boot_zonestats;
9290 	}
9291 }
9292 
9293 #ifdef CONFIG_MEMORY_HOTREMOVE
9294 /*
9295  * All pages in the range must be in a single zone, must not contain holes,
9296  * must span full sections, and must be isolated before calling this function.
9297  */
9298 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
9299 {
9300 	unsigned long pfn = start_pfn;
9301 	struct page *page;
9302 	struct zone *zone;
9303 	unsigned int order;
9304 	unsigned long flags;
9305 
9306 	offline_mem_sections(pfn, end_pfn);
9307 	zone = page_zone(pfn_to_page(pfn));
9308 	spin_lock_irqsave(&zone->lock, flags);
9309 	while (pfn < end_pfn) {
9310 		page = pfn_to_page(pfn);
9311 		/*
9312 		 * The HWPoisoned page may be not in buddy system, and
9313 		 * page_count() is not 0.
9314 		 */
9315 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
9316 			pfn++;
9317 			continue;
9318 		}
9319 		/*
9320 		 * At this point all remaining PageOffline() pages have a
9321 		 * reference count of 0 and can simply be skipped.
9322 		 */
9323 		if (PageOffline(page)) {
9324 			BUG_ON(page_count(page));
9325 			BUG_ON(PageBuddy(page));
9326 			pfn++;
9327 			continue;
9328 		}
9329 
9330 		BUG_ON(page_count(page));
9331 		BUG_ON(!PageBuddy(page));
9332 		order = buddy_order(page);
9333 		del_page_from_free_list(page, zone, order);
9334 		pfn += (1 << order);
9335 	}
9336 	spin_unlock_irqrestore(&zone->lock, flags);
9337 }
9338 #endif
9339 
9340 bool is_free_buddy_page(struct page *page)
9341 {
9342 	struct zone *zone = page_zone(page);
9343 	unsigned long pfn = page_to_pfn(page);
9344 	unsigned long flags;
9345 	unsigned int order;
9346 
9347 	spin_lock_irqsave(&zone->lock, flags);
9348 	for (order = 0; order < MAX_ORDER; order++) {
9349 		struct page *page_head = page - (pfn & ((1 << order) - 1));
9350 
9351 		if (PageBuddy(page_head) && buddy_order(page_head) >= order)
9352 			break;
9353 	}
9354 	spin_unlock_irqrestore(&zone->lock, flags);
9355 
9356 	return order < MAX_ORDER;
9357 }
9358 
9359 #ifdef CONFIG_MEMORY_FAILURE
9360 /*
9361  * Break down a higher-order page in sub-pages, and keep our target out of
9362  * buddy allocator.
9363  */
9364 static void break_down_buddy_pages(struct zone *zone, struct page *page,
9365 				   struct page *target, int low, int high,
9366 				   int migratetype)
9367 {
9368 	unsigned long size = 1 << high;
9369 	struct page *current_buddy, *next_page;
9370 
9371 	while (high > low) {
9372 		high--;
9373 		size >>= 1;
9374 
9375 		if (target >= &page[size]) {
9376 			next_page = page + size;
9377 			current_buddy = page;
9378 		} else {
9379 			next_page = page;
9380 			current_buddy = page + size;
9381 		}
9382 
9383 		if (set_page_guard(zone, current_buddy, high, migratetype))
9384 			continue;
9385 
9386 		if (current_buddy != target) {
9387 			add_to_free_list(current_buddy, zone, high, migratetype);
9388 			set_buddy_order(current_buddy, high);
9389 			page = next_page;
9390 		}
9391 	}
9392 }
9393 
9394 /*
9395  * Take a page that will be marked as poisoned off the buddy allocator.
9396  */
9397 bool take_page_off_buddy(struct page *page)
9398 {
9399 	struct zone *zone = page_zone(page);
9400 	unsigned long pfn = page_to_pfn(page);
9401 	unsigned long flags;
9402 	unsigned int order;
9403 	bool ret = false;
9404 
9405 	spin_lock_irqsave(&zone->lock, flags);
9406 	for (order = 0; order < MAX_ORDER; order++) {
9407 		struct page *page_head = page - (pfn & ((1 << order) - 1));
9408 		int page_order = buddy_order(page_head);
9409 
9410 		if (PageBuddy(page_head) && page_order >= order) {
9411 			unsigned long pfn_head = page_to_pfn(page_head);
9412 			int migratetype = get_pfnblock_migratetype(page_head,
9413 								   pfn_head);
9414 
9415 			del_page_from_free_list(page_head, zone, page_order);
9416 			break_down_buddy_pages(zone, page_head, page, 0,
9417 						page_order, migratetype);
9418 			if (!is_migrate_isolate(migratetype))
9419 				__mod_zone_freepage_state(zone, -1, migratetype);
9420 			ret = true;
9421 			break;
9422 		}
9423 		if (page_count(page_head) > 0)
9424 			break;
9425 	}
9426 	spin_unlock_irqrestore(&zone->lock, flags);
9427 	return ret;
9428 }
9429 #endif
9430