xref: /openbmc/linux/mm/page_alloc.c (revision 236f57fe1b8853fb3505502c0f94ae64d153ae92)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/page_alloc.c
4  *
5  *  Manages the free list, the system allocates free pages here.
6  *  Note that kmalloc() lives in slab.c
7  *
8  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
9  *  Swap reorganised 29.12.95, Stephen Tweedie
10  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16  */
17 
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/swap.h>
22 #include <linux/interrupt.h>
23 #include <linux/pagemap.h>
24 #include <linux/jiffies.h>
25 #include <linux/memblock.h>
26 #include <linux/compiler.h>
27 #include <linux/kernel.h>
28 #include <linux/kasan.h>
29 #include <linux/module.h>
30 #include <linux/suspend.h>
31 #include <linux/pagevec.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/ratelimit.h>
35 #include <linux/oom.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/memremap.h>
46 #include <linux/stop_machine.h>
47 #include <linux/random.h>
48 #include <linux/sort.h>
49 #include <linux/pfn.h>
50 #include <linux/backing-dev.h>
51 #include <linux/fault-inject.h>
52 #include <linux/page-isolation.h>
53 #include <linux/debugobjects.h>
54 #include <linux/kmemleak.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <trace/events/oom.h>
58 #include <linux/prefetch.h>
59 #include <linux/mm_inline.h>
60 #include <linux/mmu_notifier.h>
61 #include <linux/migrate.h>
62 #include <linux/hugetlb.h>
63 #include <linux/sched/rt.h>
64 #include <linux/sched/mm.h>
65 #include <linux/page_owner.h>
66 #include <linux/kthread.h>
67 #include <linux/memcontrol.h>
68 #include <linux/ftrace.h>
69 #include <linux/lockdep.h>
70 #include <linux/nmi.h>
71 #include <linux/psi.h>
72 #include <linux/padata.h>
73 #include <linux/khugepaged.h>
74 #include <linux/buffer_head.h>
75 #include <asm/sections.h>
76 #include <asm/tlbflush.h>
77 #include <asm/div64.h>
78 #include "internal.h"
79 #include "shuffle.h"
80 #include "page_reporting.h"
81 
82 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
83 typedef int __bitwise fpi_t;
84 
85 /* No special request */
86 #define FPI_NONE		((__force fpi_t)0)
87 
88 /*
89  * Skip free page reporting notification for the (possibly merged) page.
90  * This does not hinder free page reporting from grabbing the page,
91  * reporting it and marking it "reported" -  it only skips notifying
92  * the free page reporting infrastructure about a newly freed page. For
93  * example, used when temporarily pulling a page from a freelist and
94  * putting it back unmodified.
95  */
96 #define FPI_SKIP_REPORT_NOTIFY	((__force fpi_t)BIT(0))
97 
98 /*
99  * Place the (possibly merged) page to the tail of the freelist. Will ignore
100  * page shuffling (relevant code - e.g., memory onlining - is expected to
101  * shuffle the whole zone).
102  *
103  * Note: No code should rely on this flag for correctness - it's purely
104  *       to allow for optimizations when handing back either fresh pages
105  *       (memory onlining) or untouched pages (page isolation, free page
106  *       reporting).
107  */
108 #define FPI_TO_TAIL		((__force fpi_t)BIT(1))
109 
110 /*
111  * Don't poison memory with KASAN (only for the tag-based modes).
112  * During boot, all non-reserved memblock memory is exposed to page_alloc.
113  * Poisoning all that memory lengthens boot time, especially on systems with
114  * large amount of RAM. This flag is used to skip that poisoning.
115  * This is only done for the tag-based KASAN modes, as those are able to
116  * detect memory corruptions with the memory tags assigned by default.
117  * All memory allocated normally after boot gets poisoned as usual.
118  */
119 #define FPI_SKIP_KASAN_POISON	((__force fpi_t)BIT(2))
120 
121 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
122 static DEFINE_MUTEX(pcp_batch_high_lock);
123 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
124 
125 struct pagesets {
126 	local_lock_t lock;
127 };
128 static DEFINE_PER_CPU(struct pagesets, pagesets) = {
129 	.lock = INIT_LOCAL_LOCK(lock),
130 };
131 
132 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
133 DEFINE_PER_CPU(int, numa_node);
134 EXPORT_PER_CPU_SYMBOL(numa_node);
135 #endif
136 
137 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
138 
139 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
140 /*
141  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
142  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
143  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
144  * defined in <linux/topology.h>.
145  */
146 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
147 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
148 #endif
149 
150 /* work_structs for global per-cpu drains */
151 struct pcpu_drain {
152 	struct zone *zone;
153 	struct work_struct work;
154 };
155 static DEFINE_MUTEX(pcpu_drain_mutex);
156 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
157 
158 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
159 volatile unsigned long latent_entropy __latent_entropy;
160 EXPORT_SYMBOL(latent_entropy);
161 #endif
162 
163 /*
164  * Array of node states.
165  */
166 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
167 	[N_POSSIBLE] = NODE_MASK_ALL,
168 	[N_ONLINE] = { { [0] = 1UL } },
169 #ifndef CONFIG_NUMA
170 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
171 #ifdef CONFIG_HIGHMEM
172 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
173 #endif
174 	[N_MEMORY] = { { [0] = 1UL } },
175 	[N_CPU] = { { [0] = 1UL } },
176 #endif	/* NUMA */
177 };
178 EXPORT_SYMBOL(node_states);
179 
180 atomic_long_t _totalram_pages __read_mostly;
181 EXPORT_SYMBOL(_totalram_pages);
182 unsigned long totalreserve_pages __read_mostly;
183 unsigned long totalcma_pages __read_mostly;
184 
185 int percpu_pagelist_high_fraction;
186 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
187 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
188 EXPORT_SYMBOL(init_on_alloc);
189 
190 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
191 EXPORT_SYMBOL(init_on_free);
192 
193 static bool _init_on_alloc_enabled_early __read_mostly
194 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
195 static int __init early_init_on_alloc(char *buf)
196 {
197 
198 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
199 }
200 early_param("init_on_alloc", early_init_on_alloc);
201 
202 static bool _init_on_free_enabled_early __read_mostly
203 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
204 static int __init early_init_on_free(char *buf)
205 {
206 	return kstrtobool(buf, &_init_on_free_enabled_early);
207 }
208 early_param("init_on_free", early_init_on_free);
209 
210 /*
211  * A cached value of the page's pageblock's migratetype, used when the page is
212  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
213  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
214  * Also the migratetype set in the page does not necessarily match the pcplist
215  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
216  * other index - this ensures that it will be put on the correct CMA freelist.
217  */
218 static inline int get_pcppage_migratetype(struct page *page)
219 {
220 	return page->index;
221 }
222 
223 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
224 {
225 	page->index = migratetype;
226 }
227 
228 #ifdef CONFIG_PM_SLEEP
229 /*
230  * The following functions are used by the suspend/hibernate code to temporarily
231  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
232  * while devices are suspended.  To avoid races with the suspend/hibernate code,
233  * they should always be called with system_transition_mutex held
234  * (gfp_allowed_mask also should only be modified with system_transition_mutex
235  * held, unless the suspend/hibernate code is guaranteed not to run in parallel
236  * with that modification).
237  */
238 
239 static gfp_t saved_gfp_mask;
240 
241 void pm_restore_gfp_mask(void)
242 {
243 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
244 	if (saved_gfp_mask) {
245 		gfp_allowed_mask = saved_gfp_mask;
246 		saved_gfp_mask = 0;
247 	}
248 }
249 
250 void pm_restrict_gfp_mask(void)
251 {
252 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
253 	WARN_ON(saved_gfp_mask);
254 	saved_gfp_mask = gfp_allowed_mask;
255 	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
256 }
257 
258 bool pm_suspended_storage(void)
259 {
260 	if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
261 		return false;
262 	return true;
263 }
264 #endif /* CONFIG_PM_SLEEP */
265 
266 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
267 unsigned int pageblock_order __read_mostly;
268 #endif
269 
270 static void __free_pages_ok(struct page *page, unsigned int order,
271 			    fpi_t fpi_flags);
272 
273 /*
274  * results with 256, 32 in the lowmem_reserve sysctl:
275  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
276  *	1G machine -> (16M dma, 784M normal, 224M high)
277  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
278  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
279  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
280  *
281  * TBD: should special case ZONE_DMA32 machines here - in those we normally
282  * don't need any ZONE_NORMAL reservation
283  */
284 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
285 #ifdef CONFIG_ZONE_DMA
286 	[ZONE_DMA] = 256,
287 #endif
288 #ifdef CONFIG_ZONE_DMA32
289 	[ZONE_DMA32] = 256,
290 #endif
291 	[ZONE_NORMAL] = 32,
292 #ifdef CONFIG_HIGHMEM
293 	[ZONE_HIGHMEM] = 0,
294 #endif
295 	[ZONE_MOVABLE] = 0,
296 };
297 
298 static char * const zone_names[MAX_NR_ZONES] = {
299 #ifdef CONFIG_ZONE_DMA
300 	 "DMA",
301 #endif
302 #ifdef CONFIG_ZONE_DMA32
303 	 "DMA32",
304 #endif
305 	 "Normal",
306 #ifdef CONFIG_HIGHMEM
307 	 "HighMem",
308 #endif
309 	 "Movable",
310 #ifdef CONFIG_ZONE_DEVICE
311 	 "Device",
312 #endif
313 };
314 
315 const char * const migratetype_names[MIGRATE_TYPES] = {
316 	"Unmovable",
317 	"Movable",
318 	"Reclaimable",
319 	"HighAtomic",
320 #ifdef CONFIG_CMA
321 	"CMA",
322 #endif
323 #ifdef CONFIG_MEMORY_ISOLATION
324 	"Isolate",
325 #endif
326 };
327 
328 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
329 	[NULL_COMPOUND_DTOR] = NULL,
330 	[COMPOUND_PAGE_DTOR] = free_compound_page,
331 #ifdef CONFIG_HUGETLB_PAGE
332 	[HUGETLB_PAGE_DTOR] = free_huge_page,
333 #endif
334 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
335 	[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
336 #endif
337 };
338 
339 int min_free_kbytes = 1024;
340 int user_min_free_kbytes = -1;
341 int watermark_boost_factor __read_mostly = 15000;
342 int watermark_scale_factor = 10;
343 
344 static unsigned long nr_kernel_pages __initdata;
345 static unsigned long nr_all_pages __initdata;
346 static unsigned long dma_reserve __initdata;
347 
348 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
349 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
350 static unsigned long required_kernelcore __initdata;
351 static unsigned long required_kernelcore_percent __initdata;
352 static unsigned long required_movablecore __initdata;
353 static unsigned long required_movablecore_percent __initdata;
354 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
355 static bool mirrored_kernelcore __meminitdata;
356 
357 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
358 int movable_zone;
359 EXPORT_SYMBOL(movable_zone);
360 
361 #if MAX_NUMNODES > 1
362 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
363 unsigned int nr_online_nodes __read_mostly = 1;
364 EXPORT_SYMBOL(nr_node_ids);
365 EXPORT_SYMBOL(nr_online_nodes);
366 #endif
367 
368 int page_group_by_mobility_disabled __read_mostly;
369 
370 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
371 /*
372  * During boot we initialize deferred pages on-demand, as needed, but once
373  * page_alloc_init_late() has finished, the deferred pages are all initialized,
374  * and we can permanently disable that path.
375  */
376 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
377 
378 /*
379  * Calling kasan_poison_pages() only after deferred memory initialization
380  * has completed. Poisoning pages during deferred memory init will greatly
381  * lengthen the process and cause problem in large memory systems as the
382  * deferred pages initialization is done with interrupt disabled.
383  *
384  * Assuming that there will be no reference to those newly initialized
385  * pages before they are ever allocated, this should have no effect on
386  * KASAN memory tracking as the poison will be properly inserted at page
387  * allocation time. The only corner case is when pages are allocated by
388  * on-demand allocation and then freed again before the deferred pages
389  * initialization is done, but this is not likely to happen.
390  */
391 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
392 {
393 	return static_branch_unlikely(&deferred_pages) ||
394 	       (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
395 		(fpi_flags & FPI_SKIP_KASAN_POISON)) ||
396 	       PageSkipKASanPoison(page);
397 }
398 
399 /* Returns true if the struct page for the pfn is uninitialised */
400 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
401 {
402 	int nid = early_pfn_to_nid(pfn);
403 
404 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
405 		return true;
406 
407 	return false;
408 }
409 
410 /*
411  * Returns true when the remaining initialisation should be deferred until
412  * later in the boot cycle when it can be parallelised.
413  */
414 static bool __meminit
415 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
416 {
417 	static unsigned long prev_end_pfn, nr_initialised;
418 
419 	/*
420 	 * prev_end_pfn static that contains the end of previous zone
421 	 * No need to protect because called very early in boot before smp_init.
422 	 */
423 	if (prev_end_pfn != end_pfn) {
424 		prev_end_pfn = end_pfn;
425 		nr_initialised = 0;
426 	}
427 
428 	/* Always populate low zones for address-constrained allocations */
429 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
430 		return false;
431 
432 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
433 		return true;
434 	/*
435 	 * We start only with one section of pages, more pages are added as
436 	 * needed until the rest of deferred pages are initialized.
437 	 */
438 	nr_initialised++;
439 	if ((nr_initialised > PAGES_PER_SECTION) &&
440 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
441 		NODE_DATA(nid)->first_deferred_pfn = pfn;
442 		return true;
443 	}
444 	return false;
445 }
446 #else
447 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
448 {
449 	return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
450 		(fpi_flags & FPI_SKIP_KASAN_POISON)) ||
451 	       PageSkipKASanPoison(page);
452 }
453 
454 static inline bool early_page_uninitialised(unsigned long pfn)
455 {
456 	return false;
457 }
458 
459 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
460 {
461 	return false;
462 }
463 #endif
464 
465 /* Return a pointer to the bitmap storing bits affecting a block of pages */
466 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
467 							unsigned long pfn)
468 {
469 #ifdef CONFIG_SPARSEMEM
470 	return section_to_usemap(__pfn_to_section(pfn));
471 #else
472 	return page_zone(page)->pageblock_flags;
473 #endif /* CONFIG_SPARSEMEM */
474 }
475 
476 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
477 {
478 #ifdef CONFIG_SPARSEMEM
479 	pfn &= (PAGES_PER_SECTION-1);
480 #else
481 	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
482 #endif /* CONFIG_SPARSEMEM */
483 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
484 }
485 
486 static __always_inline
487 unsigned long __get_pfnblock_flags_mask(const struct page *page,
488 					unsigned long pfn,
489 					unsigned long mask)
490 {
491 	unsigned long *bitmap;
492 	unsigned long bitidx, word_bitidx;
493 	unsigned long word;
494 
495 	bitmap = get_pageblock_bitmap(page, pfn);
496 	bitidx = pfn_to_bitidx(page, pfn);
497 	word_bitidx = bitidx / BITS_PER_LONG;
498 	bitidx &= (BITS_PER_LONG-1);
499 
500 	word = bitmap[word_bitidx];
501 	return (word >> bitidx) & mask;
502 }
503 
504 /**
505  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
506  * @page: The page within the block of interest
507  * @pfn: The target page frame number
508  * @mask: mask of bits that the caller is interested in
509  *
510  * Return: pageblock_bits flags
511  */
512 unsigned long get_pfnblock_flags_mask(const struct page *page,
513 					unsigned long pfn, unsigned long mask)
514 {
515 	return __get_pfnblock_flags_mask(page, pfn, mask);
516 }
517 
518 static __always_inline int get_pfnblock_migratetype(const struct page *page,
519 					unsigned long pfn)
520 {
521 	return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
522 }
523 
524 /**
525  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
526  * @page: The page within the block of interest
527  * @flags: The flags to set
528  * @pfn: The target page frame number
529  * @mask: mask of bits that the caller is interested in
530  */
531 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
532 					unsigned long pfn,
533 					unsigned long mask)
534 {
535 	unsigned long *bitmap;
536 	unsigned long bitidx, word_bitidx;
537 	unsigned long old_word, word;
538 
539 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
540 	BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
541 
542 	bitmap = get_pageblock_bitmap(page, pfn);
543 	bitidx = pfn_to_bitidx(page, pfn);
544 	word_bitidx = bitidx / BITS_PER_LONG;
545 	bitidx &= (BITS_PER_LONG-1);
546 
547 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
548 
549 	mask <<= bitidx;
550 	flags <<= bitidx;
551 
552 	word = READ_ONCE(bitmap[word_bitidx]);
553 	for (;;) {
554 		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
555 		if (word == old_word)
556 			break;
557 		word = old_word;
558 	}
559 }
560 
561 void set_pageblock_migratetype(struct page *page, int migratetype)
562 {
563 	if (unlikely(page_group_by_mobility_disabled &&
564 		     migratetype < MIGRATE_PCPTYPES))
565 		migratetype = MIGRATE_UNMOVABLE;
566 
567 	set_pfnblock_flags_mask(page, (unsigned long)migratetype,
568 				page_to_pfn(page), MIGRATETYPE_MASK);
569 }
570 
571 #ifdef CONFIG_DEBUG_VM
572 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
573 {
574 	int ret = 0;
575 	unsigned seq;
576 	unsigned long pfn = page_to_pfn(page);
577 	unsigned long sp, start_pfn;
578 
579 	do {
580 		seq = zone_span_seqbegin(zone);
581 		start_pfn = zone->zone_start_pfn;
582 		sp = zone->spanned_pages;
583 		if (!zone_spans_pfn(zone, pfn))
584 			ret = 1;
585 	} while (zone_span_seqretry(zone, seq));
586 
587 	if (ret)
588 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
589 			pfn, zone_to_nid(zone), zone->name,
590 			start_pfn, start_pfn + sp);
591 
592 	return ret;
593 }
594 
595 static int page_is_consistent(struct zone *zone, struct page *page)
596 {
597 	if (zone != page_zone(page))
598 		return 0;
599 
600 	return 1;
601 }
602 /*
603  * Temporary debugging check for pages not lying within a given zone.
604  */
605 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
606 {
607 	if (page_outside_zone_boundaries(zone, page))
608 		return 1;
609 	if (!page_is_consistent(zone, page))
610 		return 1;
611 
612 	return 0;
613 }
614 #else
615 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
616 {
617 	return 0;
618 }
619 #endif
620 
621 static void bad_page(struct page *page, const char *reason)
622 {
623 	static unsigned long resume;
624 	static unsigned long nr_shown;
625 	static unsigned long nr_unshown;
626 
627 	/*
628 	 * Allow a burst of 60 reports, then keep quiet for that minute;
629 	 * or allow a steady drip of one report per second.
630 	 */
631 	if (nr_shown == 60) {
632 		if (time_before(jiffies, resume)) {
633 			nr_unshown++;
634 			goto out;
635 		}
636 		if (nr_unshown) {
637 			pr_alert(
638 			      "BUG: Bad page state: %lu messages suppressed\n",
639 				nr_unshown);
640 			nr_unshown = 0;
641 		}
642 		nr_shown = 0;
643 	}
644 	if (nr_shown++ == 0)
645 		resume = jiffies + 60 * HZ;
646 
647 	pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
648 		current->comm, page_to_pfn(page));
649 	dump_page(page, reason);
650 
651 	print_modules();
652 	dump_stack();
653 out:
654 	/* Leave bad fields for debug, except PageBuddy could make trouble */
655 	page_mapcount_reset(page); /* remove PageBuddy */
656 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
657 }
658 
659 static inline unsigned int order_to_pindex(int migratetype, int order)
660 {
661 	int base = order;
662 
663 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
664 	if (order > PAGE_ALLOC_COSTLY_ORDER) {
665 		VM_BUG_ON(order != pageblock_order);
666 		base = PAGE_ALLOC_COSTLY_ORDER + 1;
667 	}
668 #else
669 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
670 #endif
671 
672 	return (MIGRATE_PCPTYPES * base) + migratetype;
673 }
674 
675 static inline int pindex_to_order(unsigned int pindex)
676 {
677 	int order = pindex / MIGRATE_PCPTYPES;
678 
679 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
680 	if (order > PAGE_ALLOC_COSTLY_ORDER) {
681 		order = pageblock_order;
682 		VM_BUG_ON(order != pageblock_order);
683 	}
684 #else
685 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
686 #endif
687 
688 	return order;
689 }
690 
691 static inline bool pcp_allowed_order(unsigned int order)
692 {
693 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
694 		return true;
695 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
696 	if (order == pageblock_order)
697 		return true;
698 #endif
699 	return false;
700 }
701 
702 static inline void free_the_page(struct page *page, unsigned int order)
703 {
704 	if (pcp_allowed_order(order))		/* Via pcp? */
705 		free_unref_page(page, order);
706 	else
707 		__free_pages_ok(page, order, FPI_NONE);
708 }
709 
710 /*
711  * Higher-order pages are called "compound pages".  They are structured thusly:
712  *
713  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
714  *
715  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
716  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
717  *
718  * The first tail page's ->compound_dtor holds the offset in array of compound
719  * page destructors. See compound_page_dtors.
720  *
721  * The first tail page's ->compound_order holds the order of allocation.
722  * This usage means that zero-order pages may not be compound.
723  */
724 
725 void free_compound_page(struct page *page)
726 {
727 	mem_cgroup_uncharge(page_folio(page));
728 	free_the_page(page, compound_order(page));
729 }
730 
731 void prep_compound_page(struct page *page, unsigned int order)
732 {
733 	int i;
734 	int nr_pages = 1 << order;
735 
736 	__SetPageHead(page);
737 	for (i = 1; i < nr_pages; i++) {
738 		struct page *p = page + i;
739 		p->mapping = TAIL_MAPPING;
740 		set_compound_head(p, page);
741 	}
742 
743 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
744 	set_compound_order(page, order);
745 	atomic_set(compound_mapcount_ptr(page), -1);
746 	if (hpage_pincount_available(page))
747 		atomic_set(compound_pincount_ptr(page), 0);
748 }
749 
750 #ifdef CONFIG_DEBUG_PAGEALLOC
751 unsigned int _debug_guardpage_minorder;
752 
753 bool _debug_pagealloc_enabled_early __read_mostly
754 			= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
755 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
756 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
757 EXPORT_SYMBOL(_debug_pagealloc_enabled);
758 
759 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
760 
761 static int __init early_debug_pagealloc(char *buf)
762 {
763 	return kstrtobool(buf, &_debug_pagealloc_enabled_early);
764 }
765 early_param("debug_pagealloc", early_debug_pagealloc);
766 
767 static int __init debug_guardpage_minorder_setup(char *buf)
768 {
769 	unsigned long res;
770 
771 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
772 		pr_err("Bad debug_guardpage_minorder value\n");
773 		return 0;
774 	}
775 	_debug_guardpage_minorder = res;
776 	pr_info("Setting debug_guardpage_minorder to %lu\n", res);
777 	return 0;
778 }
779 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
780 
781 static inline bool set_page_guard(struct zone *zone, struct page *page,
782 				unsigned int order, int migratetype)
783 {
784 	if (!debug_guardpage_enabled())
785 		return false;
786 
787 	if (order >= debug_guardpage_minorder())
788 		return false;
789 
790 	__SetPageGuard(page);
791 	INIT_LIST_HEAD(&page->lru);
792 	set_page_private(page, order);
793 	/* Guard pages are not available for any usage */
794 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
795 
796 	return true;
797 }
798 
799 static inline void clear_page_guard(struct zone *zone, struct page *page,
800 				unsigned int order, int migratetype)
801 {
802 	if (!debug_guardpage_enabled())
803 		return;
804 
805 	__ClearPageGuard(page);
806 
807 	set_page_private(page, 0);
808 	if (!is_migrate_isolate(migratetype))
809 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
810 }
811 #else
812 static inline bool set_page_guard(struct zone *zone, struct page *page,
813 			unsigned int order, int migratetype) { return false; }
814 static inline void clear_page_guard(struct zone *zone, struct page *page,
815 				unsigned int order, int migratetype) {}
816 #endif
817 
818 /*
819  * Enable static keys related to various memory debugging and hardening options.
820  * Some override others, and depend on early params that are evaluated in the
821  * order of appearance. So we need to first gather the full picture of what was
822  * enabled, and then make decisions.
823  */
824 void init_mem_debugging_and_hardening(void)
825 {
826 	bool page_poisoning_requested = false;
827 
828 #ifdef CONFIG_PAGE_POISONING
829 	/*
830 	 * Page poisoning is debug page alloc for some arches. If
831 	 * either of those options are enabled, enable poisoning.
832 	 */
833 	if (page_poisoning_enabled() ||
834 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
835 	      debug_pagealloc_enabled())) {
836 		static_branch_enable(&_page_poisoning_enabled);
837 		page_poisoning_requested = true;
838 	}
839 #endif
840 
841 	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
842 	    page_poisoning_requested) {
843 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
844 			"will take precedence over init_on_alloc and init_on_free\n");
845 		_init_on_alloc_enabled_early = false;
846 		_init_on_free_enabled_early = false;
847 	}
848 
849 	if (_init_on_alloc_enabled_early)
850 		static_branch_enable(&init_on_alloc);
851 	else
852 		static_branch_disable(&init_on_alloc);
853 
854 	if (_init_on_free_enabled_early)
855 		static_branch_enable(&init_on_free);
856 	else
857 		static_branch_disable(&init_on_free);
858 
859 #ifdef CONFIG_DEBUG_PAGEALLOC
860 	if (!debug_pagealloc_enabled())
861 		return;
862 
863 	static_branch_enable(&_debug_pagealloc_enabled);
864 
865 	if (!debug_guardpage_minorder())
866 		return;
867 
868 	static_branch_enable(&_debug_guardpage_enabled);
869 #endif
870 }
871 
872 static inline void set_buddy_order(struct page *page, unsigned int order)
873 {
874 	set_page_private(page, order);
875 	__SetPageBuddy(page);
876 }
877 
878 /*
879  * This function checks whether a page is free && is the buddy
880  * we can coalesce a page and its buddy if
881  * (a) the buddy is not in a hole (check before calling!) &&
882  * (b) the buddy is in the buddy system &&
883  * (c) a page and its buddy have the same order &&
884  * (d) a page and its buddy are in the same zone.
885  *
886  * For recording whether a page is in the buddy system, we set PageBuddy.
887  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
888  *
889  * For recording page's order, we use page_private(page).
890  */
891 static inline bool page_is_buddy(struct page *page, struct page *buddy,
892 							unsigned int order)
893 {
894 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
895 		return false;
896 
897 	if (buddy_order(buddy) != order)
898 		return false;
899 
900 	/*
901 	 * zone check is done late to avoid uselessly calculating
902 	 * zone/node ids for pages that could never merge.
903 	 */
904 	if (page_zone_id(page) != page_zone_id(buddy))
905 		return false;
906 
907 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
908 
909 	return true;
910 }
911 
912 #ifdef CONFIG_COMPACTION
913 static inline struct capture_control *task_capc(struct zone *zone)
914 {
915 	struct capture_control *capc = current->capture_control;
916 
917 	return unlikely(capc) &&
918 		!(current->flags & PF_KTHREAD) &&
919 		!capc->page &&
920 		capc->cc->zone == zone ? capc : NULL;
921 }
922 
923 static inline bool
924 compaction_capture(struct capture_control *capc, struct page *page,
925 		   int order, int migratetype)
926 {
927 	if (!capc || order != capc->cc->order)
928 		return false;
929 
930 	/* Do not accidentally pollute CMA or isolated regions*/
931 	if (is_migrate_cma(migratetype) ||
932 	    is_migrate_isolate(migratetype))
933 		return false;
934 
935 	/*
936 	 * Do not let lower order allocations pollute a movable pageblock.
937 	 * This might let an unmovable request use a reclaimable pageblock
938 	 * and vice-versa but no more than normal fallback logic which can
939 	 * have trouble finding a high-order free page.
940 	 */
941 	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
942 		return false;
943 
944 	capc->page = page;
945 	return true;
946 }
947 
948 #else
949 static inline struct capture_control *task_capc(struct zone *zone)
950 {
951 	return NULL;
952 }
953 
954 static inline bool
955 compaction_capture(struct capture_control *capc, struct page *page,
956 		   int order, int migratetype)
957 {
958 	return false;
959 }
960 #endif /* CONFIG_COMPACTION */
961 
962 /* Used for pages not on another list */
963 static inline void add_to_free_list(struct page *page, struct zone *zone,
964 				    unsigned int order, int migratetype)
965 {
966 	struct free_area *area = &zone->free_area[order];
967 
968 	list_add(&page->lru, &area->free_list[migratetype]);
969 	area->nr_free++;
970 }
971 
972 /* Used for pages not on another list */
973 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
974 					 unsigned int order, int migratetype)
975 {
976 	struct free_area *area = &zone->free_area[order];
977 
978 	list_add_tail(&page->lru, &area->free_list[migratetype]);
979 	area->nr_free++;
980 }
981 
982 /*
983  * Used for pages which are on another list. Move the pages to the tail
984  * of the list - so the moved pages won't immediately be considered for
985  * allocation again (e.g., optimization for memory onlining).
986  */
987 static inline void move_to_free_list(struct page *page, struct zone *zone,
988 				     unsigned int order, int migratetype)
989 {
990 	struct free_area *area = &zone->free_area[order];
991 
992 	list_move_tail(&page->lru, &area->free_list[migratetype]);
993 }
994 
995 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
996 					   unsigned int order)
997 {
998 	/* clear reported state and update reported page count */
999 	if (page_reported(page))
1000 		__ClearPageReported(page);
1001 
1002 	list_del(&page->lru);
1003 	__ClearPageBuddy(page);
1004 	set_page_private(page, 0);
1005 	zone->free_area[order].nr_free--;
1006 }
1007 
1008 /*
1009  * If this is not the largest possible page, check if the buddy
1010  * of the next-highest order is free. If it is, it's possible
1011  * that pages are being freed that will coalesce soon. In case,
1012  * that is happening, add the free page to the tail of the list
1013  * so it's less likely to be used soon and more likely to be merged
1014  * as a higher order page
1015  */
1016 static inline bool
1017 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
1018 		   struct page *page, unsigned int order)
1019 {
1020 	struct page *higher_page, *higher_buddy;
1021 	unsigned long combined_pfn;
1022 
1023 	if (order >= MAX_ORDER - 2)
1024 		return false;
1025 
1026 	combined_pfn = buddy_pfn & pfn;
1027 	higher_page = page + (combined_pfn - pfn);
1028 	buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
1029 	higher_buddy = higher_page + (buddy_pfn - combined_pfn);
1030 
1031 	return page_is_buddy(higher_page, higher_buddy, order + 1);
1032 }
1033 
1034 /*
1035  * Freeing function for a buddy system allocator.
1036  *
1037  * The concept of a buddy system is to maintain direct-mapped table
1038  * (containing bit values) for memory blocks of various "orders".
1039  * The bottom level table contains the map for the smallest allocatable
1040  * units of memory (here, pages), and each level above it describes
1041  * pairs of units from the levels below, hence, "buddies".
1042  * At a high level, all that happens here is marking the table entry
1043  * at the bottom level available, and propagating the changes upward
1044  * as necessary, plus some accounting needed to play nicely with other
1045  * parts of the VM system.
1046  * At each level, we keep a list of pages, which are heads of continuous
1047  * free pages of length of (1 << order) and marked with PageBuddy.
1048  * Page's order is recorded in page_private(page) field.
1049  * So when we are allocating or freeing one, we can derive the state of the
1050  * other.  That is, if we allocate a small block, and both were
1051  * free, the remainder of the region must be split into blocks.
1052  * If a block is freed, and its buddy is also free, then this
1053  * triggers coalescing into a block of larger size.
1054  *
1055  * -- nyc
1056  */
1057 
1058 static inline void __free_one_page(struct page *page,
1059 		unsigned long pfn,
1060 		struct zone *zone, unsigned int order,
1061 		int migratetype, fpi_t fpi_flags)
1062 {
1063 	struct capture_control *capc = task_capc(zone);
1064 	unsigned long buddy_pfn;
1065 	unsigned long combined_pfn;
1066 	unsigned int max_order;
1067 	struct page *buddy;
1068 	bool to_tail;
1069 
1070 	max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
1071 
1072 	VM_BUG_ON(!zone_is_initialized(zone));
1073 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1074 
1075 	VM_BUG_ON(migratetype == -1);
1076 	if (likely(!is_migrate_isolate(migratetype)))
1077 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
1078 
1079 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1080 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
1081 
1082 continue_merging:
1083 	while (order < max_order) {
1084 		if (compaction_capture(capc, page, order, migratetype)) {
1085 			__mod_zone_freepage_state(zone, -(1 << order),
1086 								migratetype);
1087 			return;
1088 		}
1089 		buddy_pfn = __find_buddy_pfn(pfn, order);
1090 		buddy = page + (buddy_pfn - pfn);
1091 
1092 		if (!page_is_buddy(page, buddy, order))
1093 			goto done_merging;
1094 		/*
1095 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1096 		 * merge with it and move up one order.
1097 		 */
1098 		if (page_is_guard(buddy))
1099 			clear_page_guard(zone, buddy, order, migratetype);
1100 		else
1101 			del_page_from_free_list(buddy, zone, order);
1102 		combined_pfn = buddy_pfn & pfn;
1103 		page = page + (combined_pfn - pfn);
1104 		pfn = combined_pfn;
1105 		order++;
1106 	}
1107 	if (order < MAX_ORDER - 1) {
1108 		/* If we are here, it means order is >= pageblock_order.
1109 		 * We want to prevent merge between freepages on isolate
1110 		 * pageblock and normal pageblock. Without this, pageblock
1111 		 * isolation could cause incorrect freepage or CMA accounting.
1112 		 *
1113 		 * We don't want to hit this code for the more frequent
1114 		 * low-order merging.
1115 		 */
1116 		if (unlikely(has_isolate_pageblock(zone))) {
1117 			int buddy_mt;
1118 
1119 			buddy_pfn = __find_buddy_pfn(pfn, order);
1120 			buddy = page + (buddy_pfn - pfn);
1121 			buddy_mt = get_pageblock_migratetype(buddy);
1122 
1123 			if (migratetype != buddy_mt
1124 					&& (is_migrate_isolate(migratetype) ||
1125 						is_migrate_isolate(buddy_mt)))
1126 				goto done_merging;
1127 		}
1128 		max_order = order + 1;
1129 		goto continue_merging;
1130 	}
1131 
1132 done_merging:
1133 	set_buddy_order(page, order);
1134 
1135 	if (fpi_flags & FPI_TO_TAIL)
1136 		to_tail = true;
1137 	else if (is_shuffle_order(order))
1138 		to_tail = shuffle_pick_tail();
1139 	else
1140 		to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1141 
1142 	if (to_tail)
1143 		add_to_free_list_tail(page, zone, order, migratetype);
1144 	else
1145 		add_to_free_list(page, zone, order, migratetype);
1146 
1147 	/* Notify page reporting subsystem of freed page */
1148 	if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1149 		page_reporting_notify_free(order);
1150 }
1151 
1152 /*
1153  * A bad page could be due to a number of fields. Instead of multiple branches,
1154  * try and check multiple fields with one check. The caller must do a detailed
1155  * check if necessary.
1156  */
1157 static inline bool page_expected_state(struct page *page,
1158 					unsigned long check_flags)
1159 {
1160 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1161 		return false;
1162 
1163 	if (unlikely((unsigned long)page->mapping |
1164 			page_ref_count(page) |
1165 #ifdef CONFIG_MEMCG
1166 			page->memcg_data |
1167 #endif
1168 			(page->flags & check_flags)))
1169 		return false;
1170 
1171 	return true;
1172 }
1173 
1174 static const char *page_bad_reason(struct page *page, unsigned long flags)
1175 {
1176 	const char *bad_reason = NULL;
1177 
1178 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1179 		bad_reason = "nonzero mapcount";
1180 	if (unlikely(page->mapping != NULL))
1181 		bad_reason = "non-NULL mapping";
1182 	if (unlikely(page_ref_count(page) != 0))
1183 		bad_reason = "nonzero _refcount";
1184 	if (unlikely(page->flags & flags)) {
1185 		if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1186 			bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1187 		else
1188 			bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1189 	}
1190 #ifdef CONFIG_MEMCG
1191 	if (unlikely(page->memcg_data))
1192 		bad_reason = "page still charged to cgroup";
1193 #endif
1194 	return bad_reason;
1195 }
1196 
1197 static void check_free_page_bad(struct page *page)
1198 {
1199 	bad_page(page,
1200 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1201 }
1202 
1203 static inline int check_free_page(struct page *page)
1204 {
1205 	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1206 		return 0;
1207 
1208 	/* Something has gone sideways, find it */
1209 	check_free_page_bad(page);
1210 	return 1;
1211 }
1212 
1213 static int free_tail_pages_check(struct page *head_page, struct page *page)
1214 {
1215 	int ret = 1;
1216 
1217 	/*
1218 	 * We rely page->lru.next never has bit 0 set, unless the page
1219 	 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1220 	 */
1221 	BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1222 
1223 	if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1224 		ret = 0;
1225 		goto out;
1226 	}
1227 	switch (page - head_page) {
1228 	case 1:
1229 		/* the first tail page: ->mapping may be compound_mapcount() */
1230 		if (unlikely(compound_mapcount(page))) {
1231 			bad_page(page, "nonzero compound_mapcount");
1232 			goto out;
1233 		}
1234 		break;
1235 	case 2:
1236 		/*
1237 		 * the second tail page: ->mapping is
1238 		 * deferred_list.next -- ignore value.
1239 		 */
1240 		break;
1241 	default:
1242 		if (page->mapping != TAIL_MAPPING) {
1243 			bad_page(page, "corrupted mapping in tail page");
1244 			goto out;
1245 		}
1246 		break;
1247 	}
1248 	if (unlikely(!PageTail(page))) {
1249 		bad_page(page, "PageTail not set");
1250 		goto out;
1251 	}
1252 	if (unlikely(compound_head(page) != head_page)) {
1253 		bad_page(page, "compound_head not consistent");
1254 		goto out;
1255 	}
1256 	ret = 0;
1257 out:
1258 	page->mapping = NULL;
1259 	clear_compound_head(page);
1260 	return ret;
1261 }
1262 
1263 static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags)
1264 {
1265 	int i;
1266 
1267 	if (zero_tags) {
1268 		for (i = 0; i < numpages; i++)
1269 			tag_clear_highpage(page + i);
1270 		return;
1271 	}
1272 
1273 	/* s390's use of memset() could override KASAN redzones. */
1274 	kasan_disable_current();
1275 	for (i = 0; i < numpages; i++) {
1276 		u8 tag = page_kasan_tag(page + i);
1277 		page_kasan_tag_reset(page + i);
1278 		clear_highpage(page + i);
1279 		page_kasan_tag_set(page + i, tag);
1280 	}
1281 	kasan_enable_current();
1282 }
1283 
1284 static __always_inline bool free_pages_prepare(struct page *page,
1285 			unsigned int order, bool check_free, fpi_t fpi_flags)
1286 {
1287 	int bad = 0;
1288 	bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
1289 
1290 	VM_BUG_ON_PAGE(PageTail(page), page);
1291 
1292 	trace_mm_page_free(page, order);
1293 
1294 	if (unlikely(PageHWPoison(page)) && !order) {
1295 		/*
1296 		 * Do not let hwpoison pages hit pcplists/buddy
1297 		 * Untie memcg state and reset page's owner
1298 		 */
1299 		if (memcg_kmem_enabled() && PageMemcgKmem(page))
1300 			__memcg_kmem_uncharge_page(page, order);
1301 		reset_page_owner(page, order);
1302 		return false;
1303 	}
1304 
1305 	/*
1306 	 * Check tail pages before head page information is cleared to
1307 	 * avoid checking PageCompound for order-0 pages.
1308 	 */
1309 	if (unlikely(order)) {
1310 		bool compound = PageCompound(page);
1311 		int i;
1312 
1313 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1314 
1315 		if (compound) {
1316 			ClearPageDoubleMap(page);
1317 			ClearPageHasHWPoisoned(page);
1318 		}
1319 		for (i = 1; i < (1 << order); i++) {
1320 			if (compound)
1321 				bad += free_tail_pages_check(page, page + i);
1322 			if (unlikely(check_free_page(page + i))) {
1323 				bad++;
1324 				continue;
1325 			}
1326 			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1327 		}
1328 	}
1329 	if (PageMappingFlags(page))
1330 		page->mapping = NULL;
1331 	if (memcg_kmem_enabled() && PageMemcgKmem(page))
1332 		__memcg_kmem_uncharge_page(page, order);
1333 	if (check_free)
1334 		bad += check_free_page(page);
1335 	if (bad)
1336 		return false;
1337 
1338 	page_cpupid_reset_last(page);
1339 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1340 	reset_page_owner(page, order);
1341 
1342 	if (!PageHighMem(page)) {
1343 		debug_check_no_locks_freed(page_address(page),
1344 					   PAGE_SIZE << order);
1345 		debug_check_no_obj_freed(page_address(page),
1346 					   PAGE_SIZE << order);
1347 	}
1348 
1349 	kernel_poison_pages(page, 1 << order);
1350 
1351 	/*
1352 	 * As memory initialization might be integrated into KASAN,
1353 	 * kasan_free_pages and kernel_init_free_pages must be
1354 	 * kept together to avoid discrepancies in behavior.
1355 	 *
1356 	 * With hardware tag-based KASAN, memory tags must be set before the
1357 	 * page becomes unavailable via debug_pagealloc or arch_free_page.
1358 	 */
1359 	if (kasan_has_integrated_init()) {
1360 		if (!skip_kasan_poison)
1361 			kasan_free_pages(page, order);
1362 	} else {
1363 		bool init = want_init_on_free();
1364 
1365 		if (init)
1366 			kernel_init_free_pages(page, 1 << order, false);
1367 		if (!skip_kasan_poison)
1368 			kasan_poison_pages(page, order, init);
1369 	}
1370 
1371 	/*
1372 	 * arch_free_page() can make the page's contents inaccessible.  s390
1373 	 * does this.  So nothing which can access the page's contents should
1374 	 * happen after this.
1375 	 */
1376 	arch_free_page(page, order);
1377 
1378 	debug_pagealloc_unmap_pages(page, 1 << order);
1379 
1380 	return true;
1381 }
1382 
1383 #ifdef CONFIG_DEBUG_VM
1384 /*
1385  * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1386  * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1387  * moved from pcp lists to free lists.
1388  */
1389 static bool free_pcp_prepare(struct page *page, unsigned int order)
1390 {
1391 	return free_pages_prepare(page, order, true, FPI_NONE);
1392 }
1393 
1394 static bool bulkfree_pcp_prepare(struct page *page)
1395 {
1396 	if (debug_pagealloc_enabled_static())
1397 		return check_free_page(page);
1398 	else
1399 		return false;
1400 }
1401 #else
1402 /*
1403  * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1404  * moving from pcp lists to free list in order to reduce overhead. With
1405  * debug_pagealloc enabled, they are checked also immediately when being freed
1406  * to the pcp lists.
1407  */
1408 static bool free_pcp_prepare(struct page *page, unsigned int order)
1409 {
1410 	if (debug_pagealloc_enabled_static())
1411 		return free_pages_prepare(page, order, true, FPI_NONE);
1412 	else
1413 		return free_pages_prepare(page, order, false, FPI_NONE);
1414 }
1415 
1416 static bool bulkfree_pcp_prepare(struct page *page)
1417 {
1418 	return check_free_page(page);
1419 }
1420 #endif /* CONFIG_DEBUG_VM */
1421 
1422 static inline void prefetch_buddy(struct page *page)
1423 {
1424 	unsigned long pfn = page_to_pfn(page);
1425 	unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1426 	struct page *buddy = page + (buddy_pfn - pfn);
1427 
1428 	prefetch(buddy);
1429 }
1430 
1431 /*
1432  * Frees a number of pages from the PCP lists
1433  * Assumes all pages on list are in same zone, and of same order.
1434  * count is the number of pages to free.
1435  *
1436  * If the zone was previously in an "all pages pinned" state then look to
1437  * see if this freeing clears that state.
1438  *
1439  * And clear the zone's pages_scanned counter, to hold off the "all pages are
1440  * pinned" detection logic.
1441  */
1442 static void free_pcppages_bulk(struct zone *zone, int count,
1443 					struct per_cpu_pages *pcp)
1444 {
1445 	int pindex = 0;
1446 	int batch_free = 0;
1447 	int nr_freed = 0;
1448 	unsigned int order;
1449 	int prefetch_nr = READ_ONCE(pcp->batch);
1450 	bool isolated_pageblocks;
1451 	struct page *page, *tmp;
1452 	LIST_HEAD(head);
1453 
1454 	/*
1455 	 * Ensure proper count is passed which otherwise would stuck in the
1456 	 * below while (list_empty(list)) loop.
1457 	 */
1458 	count = min(pcp->count, count);
1459 	while (count > 0) {
1460 		struct list_head *list;
1461 
1462 		/*
1463 		 * Remove pages from lists in a round-robin fashion. A
1464 		 * batch_free count is maintained that is incremented when an
1465 		 * empty list is encountered.  This is so more pages are freed
1466 		 * off fuller lists instead of spinning excessively around empty
1467 		 * lists
1468 		 */
1469 		do {
1470 			batch_free++;
1471 			if (++pindex == NR_PCP_LISTS)
1472 				pindex = 0;
1473 			list = &pcp->lists[pindex];
1474 		} while (list_empty(list));
1475 
1476 		/* This is the only non-empty list. Free them all. */
1477 		if (batch_free == NR_PCP_LISTS)
1478 			batch_free = count;
1479 
1480 		order = pindex_to_order(pindex);
1481 		BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
1482 		do {
1483 			page = list_last_entry(list, struct page, lru);
1484 			/* must delete to avoid corrupting pcp list */
1485 			list_del(&page->lru);
1486 			nr_freed += 1 << order;
1487 			count -= 1 << order;
1488 
1489 			if (bulkfree_pcp_prepare(page))
1490 				continue;
1491 
1492 			/* Encode order with the migratetype */
1493 			page->index <<= NR_PCP_ORDER_WIDTH;
1494 			page->index |= order;
1495 
1496 			list_add_tail(&page->lru, &head);
1497 
1498 			/*
1499 			 * We are going to put the page back to the global
1500 			 * pool, prefetch its buddy to speed up later access
1501 			 * under zone->lock. It is believed the overhead of
1502 			 * an additional test and calculating buddy_pfn here
1503 			 * can be offset by reduced memory latency later. To
1504 			 * avoid excessive prefetching due to large count, only
1505 			 * prefetch buddy for the first pcp->batch nr of pages.
1506 			 */
1507 			if (prefetch_nr) {
1508 				prefetch_buddy(page);
1509 				prefetch_nr--;
1510 			}
1511 		} while (count > 0 && --batch_free && !list_empty(list));
1512 	}
1513 	pcp->count -= nr_freed;
1514 
1515 	/*
1516 	 * local_lock_irq held so equivalent to spin_lock_irqsave for
1517 	 * both PREEMPT_RT and non-PREEMPT_RT configurations.
1518 	 */
1519 	spin_lock(&zone->lock);
1520 	isolated_pageblocks = has_isolate_pageblock(zone);
1521 
1522 	/*
1523 	 * Use safe version since after __free_one_page(),
1524 	 * page->lru.next will not point to original list.
1525 	 */
1526 	list_for_each_entry_safe(page, tmp, &head, lru) {
1527 		int mt = get_pcppage_migratetype(page);
1528 
1529 		/* mt has been encoded with the order (see above) */
1530 		order = mt & NR_PCP_ORDER_MASK;
1531 		mt >>= NR_PCP_ORDER_WIDTH;
1532 
1533 		/* MIGRATE_ISOLATE page should not go to pcplists */
1534 		VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1535 		/* Pageblock could have been isolated meanwhile */
1536 		if (unlikely(isolated_pageblocks))
1537 			mt = get_pageblock_migratetype(page);
1538 
1539 		__free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1540 		trace_mm_page_pcpu_drain(page, order, mt);
1541 	}
1542 	spin_unlock(&zone->lock);
1543 }
1544 
1545 static void free_one_page(struct zone *zone,
1546 				struct page *page, unsigned long pfn,
1547 				unsigned int order,
1548 				int migratetype, fpi_t fpi_flags)
1549 {
1550 	unsigned long flags;
1551 
1552 	spin_lock_irqsave(&zone->lock, flags);
1553 	if (unlikely(has_isolate_pageblock(zone) ||
1554 		is_migrate_isolate(migratetype))) {
1555 		migratetype = get_pfnblock_migratetype(page, pfn);
1556 	}
1557 	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1558 	spin_unlock_irqrestore(&zone->lock, flags);
1559 }
1560 
1561 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1562 				unsigned long zone, int nid)
1563 {
1564 	mm_zero_struct_page(page);
1565 	set_page_links(page, zone, nid, pfn);
1566 	init_page_count(page);
1567 	page_mapcount_reset(page);
1568 	page_cpupid_reset_last(page);
1569 	page_kasan_tag_reset(page);
1570 
1571 	INIT_LIST_HEAD(&page->lru);
1572 #ifdef WANT_PAGE_VIRTUAL
1573 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
1574 	if (!is_highmem_idx(zone))
1575 		set_page_address(page, __va(pfn << PAGE_SHIFT));
1576 #endif
1577 }
1578 
1579 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1580 static void __meminit init_reserved_page(unsigned long pfn)
1581 {
1582 	pg_data_t *pgdat;
1583 	int nid, zid;
1584 
1585 	if (!early_page_uninitialised(pfn))
1586 		return;
1587 
1588 	nid = early_pfn_to_nid(pfn);
1589 	pgdat = NODE_DATA(nid);
1590 
1591 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1592 		struct zone *zone = &pgdat->node_zones[zid];
1593 
1594 		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1595 			break;
1596 	}
1597 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1598 }
1599 #else
1600 static inline void init_reserved_page(unsigned long pfn)
1601 {
1602 }
1603 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1604 
1605 /*
1606  * Initialised pages do not have PageReserved set. This function is
1607  * called for each range allocated by the bootmem allocator and
1608  * marks the pages PageReserved. The remaining valid pages are later
1609  * sent to the buddy page allocator.
1610  */
1611 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1612 {
1613 	unsigned long start_pfn = PFN_DOWN(start);
1614 	unsigned long end_pfn = PFN_UP(end);
1615 
1616 	for (; start_pfn < end_pfn; start_pfn++) {
1617 		if (pfn_valid(start_pfn)) {
1618 			struct page *page = pfn_to_page(start_pfn);
1619 
1620 			init_reserved_page(start_pfn);
1621 
1622 			/* Avoid false-positive PageTail() */
1623 			INIT_LIST_HEAD(&page->lru);
1624 
1625 			/*
1626 			 * no need for atomic set_bit because the struct
1627 			 * page is not visible yet so nobody should
1628 			 * access it yet.
1629 			 */
1630 			__SetPageReserved(page);
1631 		}
1632 	}
1633 }
1634 
1635 static void __free_pages_ok(struct page *page, unsigned int order,
1636 			    fpi_t fpi_flags)
1637 {
1638 	unsigned long flags;
1639 	int migratetype;
1640 	unsigned long pfn = page_to_pfn(page);
1641 	struct zone *zone = page_zone(page);
1642 
1643 	if (!free_pages_prepare(page, order, true, fpi_flags))
1644 		return;
1645 
1646 	migratetype = get_pfnblock_migratetype(page, pfn);
1647 
1648 	spin_lock_irqsave(&zone->lock, flags);
1649 	if (unlikely(has_isolate_pageblock(zone) ||
1650 		is_migrate_isolate(migratetype))) {
1651 		migratetype = get_pfnblock_migratetype(page, pfn);
1652 	}
1653 	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1654 	spin_unlock_irqrestore(&zone->lock, flags);
1655 
1656 	__count_vm_events(PGFREE, 1 << order);
1657 }
1658 
1659 void __free_pages_core(struct page *page, unsigned int order)
1660 {
1661 	unsigned int nr_pages = 1 << order;
1662 	struct page *p = page;
1663 	unsigned int loop;
1664 
1665 	/*
1666 	 * When initializing the memmap, __init_single_page() sets the refcount
1667 	 * of all pages to 1 ("allocated"/"not free"). We have to set the
1668 	 * refcount of all involved pages to 0.
1669 	 */
1670 	prefetchw(p);
1671 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1672 		prefetchw(p + 1);
1673 		__ClearPageReserved(p);
1674 		set_page_count(p, 0);
1675 	}
1676 	__ClearPageReserved(p);
1677 	set_page_count(p, 0);
1678 
1679 	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1680 
1681 	/*
1682 	 * Bypass PCP and place fresh pages right to the tail, primarily
1683 	 * relevant for memory onlining.
1684 	 */
1685 	__free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
1686 }
1687 
1688 #ifdef CONFIG_NUMA
1689 
1690 /*
1691  * During memory init memblocks map pfns to nids. The search is expensive and
1692  * this caches recent lookups. The implementation of __early_pfn_to_nid
1693  * treats start/end as pfns.
1694  */
1695 struct mminit_pfnnid_cache {
1696 	unsigned long last_start;
1697 	unsigned long last_end;
1698 	int last_nid;
1699 };
1700 
1701 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1702 
1703 /*
1704  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1705  */
1706 static int __meminit __early_pfn_to_nid(unsigned long pfn,
1707 					struct mminit_pfnnid_cache *state)
1708 {
1709 	unsigned long start_pfn, end_pfn;
1710 	int nid;
1711 
1712 	if (state->last_start <= pfn && pfn < state->last_end)
1713 		return state->last_nid;
1714 
1715 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1716 	if (nid != NUMA_NO_NODE) {
1717 		state->last_start = start_pfn;
1718 		state->last_end = end_pfn;
1719 		state->last_nid = nid;
1720 	}
1721 
1722 	return nid;
1723 }
1724 
1725 int __meminit early_pfn_to_nid(unsigned long pfn)
1726 {
1727 	static DEFINE_SPINLOCK(early_pfn_lock);
1728 	int nid;
1729 
1730 	spin_lock(&early_pfn_lock);
1731 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1732 	if (nid < 0)
1733 		nid = first_online_node;
1734 	spin_unlock(&early_pfn_lock);
1735 
1736 	return nid;
1737 }
1738 #endif /* CONFIG_NUMA */
1739 
1740 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1741 							unsigned int order)
1742 {
1743 	if (early_page_uninitialised(pfn))
1744 		return;
1745 	__free_pages_core(page, order);
1746 }
1747 
1748 /*
1749  * Check that the whole (or subset of) a pageblock given by the interval of
1750  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1751  * with the migration of free compaction scanner.
1752  *
1753  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1754  *
1755  * It's possible on some configurations to have a setup like node0 node1 node0
1756  * i.e. it's possible that all pages within a zones range of pages do not
1757  * belong to a single zone. We assume that a border between node0 and node1
1758  * can occur within a single pageblock, but not a node0 node1 node0
1759  * interleaving within a single pageblock. It is therefore sufficient to check
1760  * the first and last page of a pageblock and avoid checking each individual
1761  * page in a pageblock.
1762  */
1763 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1764 				     unsigned long end_pfn, struct zone *zone)
1765 {
1766 	struct page *start_page;
1767 	struct page *end_page;
1768 
1769 	/* end_pfn is one past the range we are checking */
1770 	end_pfn--;
1771 
1772 	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1773 		return NULL;
1774 
1775 	start_page = pfn_to_online_page(start_pfn);
1776 	if (!start_page)
1777 		return NULL;
1778 
1779 	if (page_zone(start_page) != zone)
1780 		return NULL;
1781 
1782 	end_page = pfn_to_page(end_pfn);
1783 
1784 	/* This gives a shorter code than deriving page_zone(end_page) */
1785 	if (page_zone_id(start_page) != page_zone_id(end_page))
1786 		return NULL;
1787 
1788 	return start_page;
1789 }
1790 
1791 void set_zone_contiguous(struct zone *zone)
1792 {
1793 	unsigned long block_start_pfn = zone->zone_start_pfn;
1794 	unsigned long block_end_pfn;
1795 
1796 	block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1797 	for (; block_start_pfn < zone_end_pfn(zone);
1798 			block_start_pfn = block_end_pfn,
1799 			 block_end_pfn += pageblock_nr_pages) {
1800 
1801 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1802 
1803 		if (!__pageblock_pfn_to_page(block_start_pfn,
1804 					     block_end_pfn, zone))
1805 			return;
1806 		cond_resched();
1807 	}
1808 
1809 	/* We confirm that there is no hole */
1810 	zone->contiguous = true;
1811 }
1812 
1813 void clear_zone_contiguous(struct zone *zone)
1814 {
1815 	zone->contiguous = false;
1816 }
1817 
1818 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1819 static void __init deferred_free_range(unsigned long pfn,
1820 				       unsigned long nr_pages)
1821 {
1822 	struct page *page;
1823 	unsigned long i;
1824 
1825 	if (!nr_pages)
1826 		return;
1827 
1828 	page = pfn_to_page(pfn);
1829 
1830 	/* Free a large naturally-aligned chunk if possible */
1831 	if (nr_pages == pageblock_nr_pages &&
1832 	    (pfn & (pageblock_nr_pages - 1)) == 0) {
1833 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1834 		__free_pages_core(page, pageblock_order);
1835 		return;
1836 	}
1837 
1838 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1839 		if ((pfn & (pageblock_nr_pages - 1)) == 0)
1840 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1841 		__free_pages_core(page, 0);
1842 	}
1843 }
1844 
1845 /* Completion tracking for deferred_init_memmap() threads */
1846 static atomic_t pgdat_init_n_undone __initdata;
1847 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1848 
1849 static inline void __init pgdat_init_report_one_done(void)
1850 {
1851 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1852 		complete(&pgdat_init_all_done_comp);
1853 }
1854 
1855 /*
1856  * Returns true if page needs to be initialized or freed to buddy allocator.
1857  *
1858  * First we check if pfn is valid on architectures where it is possible to have
1859  * holes within pageblock_nr_pages. On systems where it is not possible, this
1860  * function is optimized out.
1861  *
1862  * Then, we check if a current large page is valid by only checking the validity
1863  * of the head pfn.
1864  */
1865 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1866 {
1867 	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1868 		return false;
1869 	return true;
1870 }
1871 
1872 /*
1873  * Free pages to buddy allocator. Try to free aligned pages in
1874  * pageblock_nr_pages sizes.
1875  */
1876 static void __init deferred_free_pages(unsigned long pfn,
1877 				       unsigned long end_pfn)
1878 {
1879 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1880 	unsigned long nr_free = 0;
1881 
1882 	for (; pfn < end_pfn; pfn++) {
1883 		if (!deferred_pfn_valid(pfn)) {
1884 			deferred_free_range(pfn - nr_free, nr_free);
1885 			nr_free = 0;
1886 		} else if (!(pfn & nr_pgmask)) {
1887 			deferred_free_range(pfn - nr_free, nr_free);
1888 			nr_free = 1;
1889 		} else {
1890 			nr_free++;
1891 		}
1892 	}
1893 	/* Free the last block of pages to allocator */
1894 	deferred_free_range(pfn - nr_free, nr_free);
1895 }
1896 
1897 /*
1898  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1899  * by performing it only once every pageblock_nr_pages.
1900  * Return number of pages initialized.
1901  */
1902 static unsigned long  __init deferred_init_pages(struct zone *zone,
1903 						 unsigned long pfn,
1904 						 unsigned long end_pfn)
1905 {
1906 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1907 	int nid = zone_to_nid(zone);
1908 	unsigned long nr_pages = 0;
1909 	int zid = zone_idx(zone);
1910 	struct page *page = NULL;
1911 
1912 	for (; pfn < end_pfn; pfn++) {
1913 		if (!deferred_pfn_valid(pfn)) {
1914 			page = NULL;
1915 			continue;
1916 		} else if (!page || !(pfn & nr_pgmask)) {
1917 			page = pfn_to_page(pfn);
1918 		} else {
1919 			page++;
1920 		}
1921 		__init_single_page(page, pfn, zid, nid);
1922 		nr_pages++;
1923 	}
1924 	return (nr_pages);
1925 }
1926 
1927 /*
1928  * This function is meant to pre-load the iterator for the zone init.
1929  * Specifically it walks through the ranges until we are caught up to the
1930  * first_init_pfn value and exits there. If we never encounter the value we
1931  * return false indicating there are no valid ranges left.
1932  */
1933 static bool __init
1934 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1935 				    unsigned long *spfn, unsigned long *epfn,
1936 				    unsigned long first_init_pfn)
1937 {
1938 	u64 j;
1939 
1940 	/*
1941 	 * Start out by walking through the ranges in this zone that have
1942 	 * already been initialized. We don't need to do anything with them
1943 	 * so we just need to flush them out of the system.
1944 	 */
1945 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1946 		if (*epfn <= first_init_pfn)
1947 			continue;
1948 		if (*spfn < first_init_pfn)
1949 			*spfn = first_init_pfn;
1950 		*i = j;
1951 		return true;
1952 	}
1953 
1954 	return false;
1955 }
1956 
1957 /*
1958  * Initialize and free pages. We do it in two loops: first we initialize
1959  * struct page, then free to buddy allocator, because while we are
1960  * freeing pages we can access pages that are ahead (computing buddy
1961  * page in __free_one_page()).
1962  *
1963  * In order to try and keep some memory in the cache we have the loop
1964  * broken along max page order boundaries. This way we will not cause
1965  * any issues with the buddy page computation.
1966  */
1967 static unsigned long __init
1968 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1969 		       unsigned long *end_pfn)
1970 {
1971 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1972 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
1973 	unsigned long nr_pages = 0;
1974 	u64 j = *i;
1975 
1976 	/* First we loop through and initialize the page values */
1977 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1978 		unsigned long t;
1979 
1980 		if (mo_pfn <= *start_pfn)
1981 			break;
1982 
1983 		t = min(mo_pfn, *end_pfn);
1984 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
1985 
1986 		if (mo_pfn < *end_pfn) {
1987 			*start_pfn = mo_pfn;
1988 			break;
1989 		}
1990 	}
1991 
1992 	/* Reset values and now loop through freeing pages as needed */
1993 	swap(j, *i);
1994 
1995 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
1996 		unsigned long t;
1997 
1998 		if (mo_pfn <= spfn)
1999 			break;
2000 
2001 		t = min(mo_pfn, epfn);
2002 		deferred_free_pages(spfn, t);
2003 
2004 		if (mo_pfn <= epfn)
2005 			break;
2006 	}
2007 
2008 	return nr_pages;
2009 }
2010 
2011 static void __init
2012 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2013 			   void *arg)
2014 {
2015 	unsigned long spfn, epfn;
2016 	struct zone *zone = arg;
2017 	u64 i;
2018 
2019 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2020 
2021 	/*
2022 	 * Initialize and free pages in MAX_ORDER sized increments so that we
2023 	 * can avoid introducing any issues with the buddy allocator.
2024 	 */
2025 	while (spfn < end_pfn) {
2026 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
2027 		cond_resched();
2028 	}
2029 }
2030 
2031 /* An arch may override for more concurrency. */
2032 __weak int __init
2033 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2034 {
2035 	return 1;
2036 }
2037 
2038 /* Initialise remaining memory on a node */
2039 static int __init deferred_init_memmap(void *data)
2040 {
2041 	pg_data_t *pgdat = data;
2042 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2043 	unsigned long spfn = 0, epfn = 0;
2044 	unsigned long first_init_pfn, flags;
2045 	unsigned long start = jiffies;
2046 	struct zone *zone;
2047 	int zid, max_threads;
2048 	u64 i;
2049 
2050 	/* Bind memory initialisation thread to a local node if possible */
2051 	if (!cpumask_empty(cpumask))
2052 		set_cpus_allowed_ptr(current, cpumask);
2053 
2054 	pgdat_resize_lock(pgdat, &flags);
2055 	first_init_pfn = pgdat->first_deferred_pfn;
2056 	if (first_init_pfn == ULONG_MAX) {
2057 		pgdat_resize_unlock(pgdat, &flags);
2058 		pgdat_init_report_one_done();
2059 		return 0;
2060 	}
2061 
2062 	/* Sanity check boundaries */
2063 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2064 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2065 	pgdat->first_deferred_pfn = ULONG_MAX;
2066 
2067 	/*
2068 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
2069 	 * interrupt thread must allocate this early in boot, zone must be
2070 	 * pre-grown prior to start of deferred page initialization.
2071 	 */
2072 	pgdat_resize_unlock(pgdat, &flags);
2073 
2074 	/* Only the highest zone is deferred so find it */
2075 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2076 		zone = pgdat->node_zones + zid;
2077 		if (first_init_pfn < zone_end_pfn(zone))
2078 			break;
2079 	}
2080 
2081 	/* If the zone is empty somebody else may have cleared out the zone */
2082 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2083 						 first_init_pfn))
2084 		goto zone_empty;
2085 
2086 	max_threads = deferred_page_init_max_threads(cpumask);
2087 
2088 	while (spfn < epfn) {
2089 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2090 		struct padata_mt_job job = {
2091 			.thread_fn   = deferred_init_memmap_chunk,
2092 			.fn_arg      = zone,
2093 			.start       = spfn,
2094 			.size        = epfn_align - spfn,
2095 			.align       = PAGES_PER_SECTION,
2096 			.min_chunk   = PAGES_PER_SECTION,
2097 			.max_threads = max_threads,
2098 		};
2099 
2100 		padata_do_multithreaded(&job);
2101 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2102 						    epfn_align);
2103 	}
2104 zone_empty:
2105 	/* Sanity check that the next zone really is unpopulated */
2106 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2107 
2108 	pr_info("node %d deferred pages initialised in %ums\n",
2109 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2110 
2111 	pgdat_init_report_one_done();
2112 	return 0;
2113 }
2114 
2115 /*
2116  * If this zone has deferred pages, try to grow it by initializing enough
2117  * deferred pages to satisfy the allocation specified by order, rounded up to
2118  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2119  * of SECTION_SIZE bytes by initializing struct pages in increments of
2120  * PAGES_PER_SECTION * sizeof(struct page) bytes.
2121  *
2122  * Return true when zone was grown, otherwise return false. We return true even
2123  * when we grow less than requested, to let the caller decide if there are
2124  * enough pages to satisfy the allocation.
2125  *
2126  * Note: We use noinline because this function is needed only during boot, and
2127  * it is called from a __ref function _deferred_grow_zone. This way we are
2128  * making sure that it is not inlined into permanent text section.
2129  */
2130 static noinline bool __init
2131 deferred_grow_zone(struct zone *zone, unsigned int order)
2132 {
2133 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2134 	pg_data_t *pgdat = zone->zone_pgdat;
2135 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2136 	unsigned long spfn, epfn, flags;
2137 	unsigned long nr_pages = 0;
2138 	u64 i;
2139 
2140 	/* Only the last zone may have deferred pages */
2141 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2142 		return false;
2143 
2144 	pgdat_resize_lock(pgdat, &flags);
2145 
2146 	/*
2147 	 * If someone grew this zone while we were waiting for spinlock, return
2148 	 * true, as there might be enough pages already.
2149 	 */
2150 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2151 		pgdat_resize_unlock(pgdat, &flags);
2152 		return true;
2153 	}
2154 
2155 	/* If the zone is empty somebody else may have cleared out the zone */
2156 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2157 						 first_deferred_pfn)) {
2158 		pgdat->first_deferred_pfn = ULONG_MAX;
2159 		pgdat_resize_unlock(pgdat, &flags);
2160 		/* Retry only once. */
2161 		return first_deferred_pfn != ULONG_MAX;
2162 	}
2163 
2164 	/*
2165 	 * Initialize and free pages in MAX_ORDER sized increments so
2166 	 * that we can avoid introducing any issues with the buddy
2167 	 * allocator.
2168 	 */
2169 	while (spfn < epfn) {
2170 		/* update our first deferred PFN for this section */
2171 		first_deferred_pfn = spfn;
2172 
2173 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2174 		touch_nmi_watchdog();
2175 
2176 		/* We should only stop along section boundaries */
2177 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2178 			continue;
2179 
2180 		/* If our quota has been met we can stop here */
2181 		if (nr_pages >= nr_pages_needed)
2182 			break;
2183 	}
2184 
2185 	pgdat->first_deferred_pfn = spfn;
2186 	pgdat_resize_unlock(pgdat, &flags);
2187 
2188 	return nr_pages > 0;
2189 }
2190 
2191 /*
2192  * deferred_grow_zone() is __init, but it is called from
2193  * get_page_from_freelist() during early boot until deferred_pages permanently
2194  * disables this call. This is why we have refdata wrapper to avoid warning,
2195  * and to ensure that the function body gets unloaded.
2196  */
2197 static bool __ref
2198 _deferred_grow_zone(struct zone *zone, unsigned int order)
2199 {
2200 	return deferred_grow_zone(zone, order);
2201 }
2202 
2203 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2204 
2205 void __init page_alloc_init_late(void)
2206 {
2207 	struct zone *zone;
2208 	int nid;
2209 
2210 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2211 
2212 	/* There will be num_node_state(N_MEMORY) threads */
2213 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2214 	for_each_node_state(nid, N_MEMORY) {
2215 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2216 	}
2217 
2218 	/* Block until all are initialised */
2219 	wait_for_completion(&pgdat_init_all_done_comp);
2220 
2221 	/*
2222 	 * We initialized the rest of the deferred pages.  Permanently disable
2223 	 * on-demand struct page initialization.
2224 	 */
2225 	static_branch_disable(&deferred_pages);
2226 
2227 	/* Reinit limits that are based on free pages after the kernel is up */
2228 	files_maxfiles_init();
2229 #endif
2230 
2231 	buffer_init();
2232 
2233 	/* Discard memblock private memory */
2234 	memblock_discard();
2235 
2236 	for_each_node_state(nid, N_MEMORY)
2237 		shuffle_free_memory(NODE_DATA(nid));
2238 
2239 	for_each_populated_zone(zone)
2240 		set_zone_contiguous(zone);
2241 }
2242 
2243 #ifdef CONFIG_CMA
2244 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2245 void __init init_cma_reserved_pageblock(struct page *page)
2246 {
2247 	unsigned i = pageblock_nr_pages;
2248 	struct page *p = page;
2249 
2250 	do {
2251 		__ClearPageReserved(p);
2252 		set_page_count(p, 0);
2253 	} while (++p, --i);
2254 
2255 	set_pageblock_migratetype(page, MIGRATE_CMA);
2256 
2257 	if (pageblock_order >= MAX_ORDER) {
2258 		i = pageblock_nr_pages;
2259 		p = page;
2260 		do {
2261 			set_page_refcounted(p);
2262 			__free_pages(p, MAX_ORDER - 1);
2263 			p += MAX_ORDER_NR_PAGES;
2264 		} while (i -= MAX_ORDER_NR_PAGES);
2265 	} else {
2266 		set_page_refcounted(page);
2267 		__free_pages(page, pageblock_order);
2268 	}
2269 
2270 	adjust_managed_page_count(page, pageblock_nr_pages);
2271 	page_zone(page)->cma_pages += pageblock_nr_pages;
2272 }
2273 #endif
2274 
2275 /*
2276  * The order of subdivision here is critical for the IO subsystem.
2277  * Please do not alter this order without good reasons and regression
2278  * testing. Specifically, as large blocks of memory are subdivided,
2279  * the order in which smaller blocks are delivered depends on the order
2280  * they're subdivided in this function. This is the primary factor
2281  * influencing the order in which pages are delivered to the IO
2282  * subsystem according to empirical testing, and this is also justified
2283  * by considering the behavior of a buddy system containing a single
2284  * large block of memory acted on by a series of small allocations.
2285  * This behavior is a critical factor in sglist merging's success.
2286  *
2287  * -- nyc
2288  */
2289 static inline void expand(struct zone *zone, struct page *page,
2290 	int low, int high, int migratetype)
2291 {
2292 	unsigned long size = 1 << high;
2293 
2294 	while (high > low) {
2295 		high--;
2296 		size >>= 1;
2297 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2298 
2299 		/*
2300 		 * Mark as guard pages (or page), that will allow to
2301 		 * merge back to allocator when buddy will be freed.
2302 		 * Corresponding page table entries will not be touched,
2303 		 * pages will stay not present in virtual address space
2304 		 */
2305 		if (set_page_guard(zone, &page[size], high, migratetype))
2306 			continue;
2307 
2308 		add_to_free_list(&page[size], zone, high, migratetype);
2309 		set_buddy_order(&page[size], high);
2310 	}
2311 }
2312 
2313 static void check_new_page_bad(struct page *page)
2314 {
2315 	if (unlikely(page->flags & __PG_HWPOISON)) {
2316 		/* Don't complain about hwpoisoned pages */
2317 		page_mapcount_reset(page); /* remove PageBuddy */
2318 		return;
2319 	}
2320 
2321 	bad_page(page,
2322 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2323 }
2324 
2325 /*
2326  * This page is about to be returned from the page allocator
2327  */
2328 static inline int check_new_page(struct page *page)
2329 {
2330 	if (likely(page_expected_state(page,
2331 				PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2332 		return 0;
2333 
2334 	check_new_page_bad(page);
2335 	return 1;
2336 }
2337 
2338 #ifdef CONFIG_DEBUG_VM
2339 /*
2340  * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2341  * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2342  * also checked when pcp lists are refilled from the free lists.
2343  */
2344 static inline bool check_pcp_refill(struct page *page)
2345 {
2346 	if (debug_pagealloc_enabled_static())
2347 		return check_new_page(page);
2348 	else
2349 		return false;
2350 }
2351 
2352 static inline bool check_new_pcp(struct page *page)
2353 {
2354 	return check_new_page(page);
2355 }
2356 #else
2357 /*
2358  * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2359  * when pcp lists are being refilled from the free lists. With debug_pagealloc
2360  * enabled, they are also checked when being allocated from the pcp lists.
2361  */
2362 static inline bool check_pcp_refill(struct page *page)
2363 {
2364 	return check_new_page(page);
2365 }
2366 static inline bool check_new_pcp(struct page *page)
2367 {
2368 	if (debug_pagealloc_enabled_static())
2369 		return check_new_page(page);
2370 	else
2371 		return false;
2372 }
2373 #endif /* CONFIG_DEBUG_VM */
2374 
2375 static bool check_new_pages(struct page *page, unsigned int order)
2376 {
2377 	int i;
2378 	for (i = 0; i < (1 << order); i++) {
2379 		struct page *p = page + i;
2380 
2381 		if (unlikely(check_new_page(p)))
2382 			return true;
2383 	}
2384 
2385 	return false;
2386 }
2387 
2388 inline void post_alloc_hook(struct page *page, unsigned int order,
2389 				gfp_t gfp_flags)
2390 {
2391 	set_page_private(page, 0);
2392 	set_page_refcounted(page);
2393 
2394 	arch_alloc_page(page, order);
2395 	debug_pagealloc_map_pages(page, 1 << order);
2396 
2397 	/*
2398 	 * Page unpoisoning must happen before memory initialization.
2399 	 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2400 	 * allocations and the page unpoisoning code will complain.
2401 	 */
2402 	kernel_unpoison_pages(page, 1 << order);
2403 
2404 	/*
2405 	 * As memory initialization might be integrated into KASAN,
2406 	 * kasan_alloc_pages and kernel_init_free_pages must be
2407 	 * kept together to avoid discrepancies in behavior.
2408 	 */
2409 	if (kasan_has_integrated_init()) {
2410 		kasan_alloc_pages(page, order, gfp_flags);
2411 	} else {
2412 		bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
2413 
2414 		kasan_unpoison_pages(page, order, init);
2415 		if (init)
2416 			kernel_init_free_pages(page, 1 << order,
2417 					       gfp_flags & __GFP_ZEROTAGS);
2418 	}
2419 
2420 	set_page_owner(page, order, gfp_flags);
2421 }
2422 
2423 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2424 							unsigned int alloc_flags)
2425 {
2426 	post_alloc_hook(page, order, gfp_flags);
2427 
2428 	if (order && (gfp_flags & __GFP_COMP))
2429 		prep_compound_page(page, order);
2430 
2431 	/*
2432 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2433 	 * allocate the page. The expectation is that the caller is taking
2434 	 * steps that will free more memory. The caller should avoid the page
2435 	 * being used for !PFMEMALLOC purposes.
2436 	 */
2437 	if (alloc_flags & ALLOC_NO_WATERMARKS)
2438 		set_page_pfmemalloc(page);
2439 	else
2440 		clear_page_pfmemalloc(page);
2441 }
2442 
2443 /*
2444  * Go through the free lists for the given migratetype and remove
2445  * the smallest available page from the freelists
2446  */
2447 static __always_inline
2448 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2449 						int migratetype)
2450 {
2451 	unsigned int current_order;
2452 	struct free_area *area;
2453 	struct page *page;
2454 
2455 	/* Find a page of the appropriate size in the preferred list */
2456 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2457 		area = &(zone->free_area[current_order]);
2458 		page = get_page_from_free_area(area, migratetype);
2459 		if (!page)
2460 			continue;
2461 		del_page_from_free_list(page, zone, current_order);
2462 		expand(zone, page, order, current_order, migratetype);
2463 		set_pcppage_migratetype(page, migratetype);
2464 		return page;
2465 	}
2466 
2467 	return NULL;
2468 }
2469 
2470 
2471 /*
2472  * This array describes the order lists are fallen back to when
2473  * the free lists for the desirable migrate type are depleted
2474  */
2475 static int fallbacks[MIGRATE_TYPES][3] = {
2476 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
2477 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2478 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2479 #ifdef CONFIG_CMA
2480 	[MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
2481 #endif
2482 #ifdef CONFIG_MEMORY_ISOLATION
2483 	[MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
2484 #endif
2485 };
2486 
2487 #ifdef CONFIG_CMA
2488 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2489 					unsigned int order)
2490 {
2491 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2492 }
2493 #else
2494 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2495 					unsigned int order) { return NULL; }
2496 #endif
2497 
2498 /*
2499  * Move the free pages in a range to the freelist tail of the requested type.
2500  * Note that start_page and end_pages are not aligned on a pageblock
2501  * boundary. If alignment is required, use move_freepages_block()
2502  */
2503 static int move_freepages(struct zone *zone,
2504 			  unsigned long start_pfn, unsigned long end_pfn,
2505 			  int migratetype, int *num_movable)
2506 {
2507 	struct page *page;
2508 	unsigned long pfn;
2509 	unsigned int order;
2510 	int pages_moved = 0;
2511 
2512 	for (pfn = start_pfn; pfn <= end_pfn;) {
2513 		page = pfn_to_page(pfn);
2514 		if (!PageBuddy(page)) {
2515 			/*
2516 			 * We assume that pages that could be isolated for
2517 			 * migration are movable. But we don't actually try
2518 			 * isolating, as that would be expensive.
2519 			 */
2520 			if (num_movable &&
2521 					(PageLRU(page) || __PageMovable(page)))
2522 				(*num_movable)++;
2523 			pfn++;
2524 			continue;
2525 		}
2526 
2527 		/* Make sure we are not inadvertently changing nodes */
2528 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2529 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2530 
2531 		order = buddy_order(page);
2532 		move_to_free_list(page, zone, order, migratetype);
2533 		pfn += 1 << order;
2534 		pages_moved += 1 << order;
2535 	}
2536 
2537 	return pages_moved;
2538 }
2539 
2540 int move_freepages_block(struct zone *zone, struct page *page,
2541 				int migratetype, int *num_movable)
2542 {
2543 	unsigned long start_pfn, end_pfn, pfn;
2544 
2545 	if (num_movable)
2546 		*num_movable = 0;
2547 
2548 	pfn = page_to_pfn(page);
2549 	start_pfn = pfn & ~(pageblock_nr_pages - 1);
2550 	end_pfn = start_pfn + pageblock_nr_pages - 1;
2551 
2552 	/* Do not cross zone boundaries */
2553 	if (!zone_spans_pfn(zone, start_pfn))
2554 		start_pfn = pfn;
2555 	if (!zone_spans_pfn(zone, end_pfn))
2556 		return 0;
2557 
2558 	return move_freepages(zone, start_pfn, end_pfn, migratetype,
2559 								num_movable);
2560 }
2561 
2562 static void change_pageblock_range(struct page *pageblock_page,
2563 					int start_order, int migratetype)
2564 {
2565 	int nr_pageblocks = 1 << (start_order - pageblock_order);
2566 
2567 	while (nr_pageblocks--) {
2568 		set_pageblock_migratetype(pageblock_page, migratetype);
2569 		pageblock_page += pageblock_nr_pages;
2570 	}
2571 }
2572 
2573 /*
2574  * When we are falling back to another migratetype during allocation, try to
2575  * steal extra free pages from the same pageblocks to satisfy further
2576  * allocations, instead of polluting multiple pageblocks.
2577  *
2578  * If we are stealing a relatively large buddy page, it is likely there will
2579  * be more free pages in the pageblock, so try to steal them all. For
2580  * reclaimable and unmovable allocations, we steal regardless of page size,
2581  * as fragmentation caused by those allocations polluting movable pageblocks
2582  * is worse than movable allocations stealing from unmovable and reclaimable
2583  * pageblocks.
2584  */
2585 static bool can_steal_fallback(unsigned int order, int start_mt)
2586 {
2587 	/*
2588 	 * Leaving this order check is intended, although there is
2589 	 * relaxed order check in next check. The reason is that
2590 	 * we can actually steal whole pageblock if this condition met,
2591 	 * but, below check doesn't guarantee it and that is just heuristic
2592 	 * so could be changed anytime.
2593 	 */
2594 	if (order >= pageblock_order)
2595 		return true;
2596 
2597 	if (order >= pageblock_order / 2 ||
2598 		start_mt == MIGRATE_RECLAIMABLE ||
2599 		start_mt == MIGRATE_UNMOVABLE ||
2600 		page_group_by_mobility_disabled)
2601 		return true;
2602 
2603 	return false;
2604 }
2605 
2606 static inline bool boost_watermark(struct zone *zone)
2607 {
2608 	unsigned long max_boost;
2609 
2610 	if (!watermark_boost_factor)
2611 		return false;
2612 	/*
2613 	 * Don't bother in zones that are unlikely to produce results.
2614 	 * On small machines, including kdump capture kernels running
2615 	 * in a small area, boosting the watermark can cause an out of
2616 	 * memory situation immediately.
2617 	 */
2618 	if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2619 		return false;
2620 
2621 	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2622 			watermark_boost_factor, 10000);
2623 
2624 	/*
2625 	 * high watermark may be uninitialised if fragmentation occurs
2626 	 * very early in boot so do not boost. We do not fall
2627 	 * through and boost by pageblock_nr_pages as failing
2628 	 * allocations that early means that reclaim is not going
2629 	 * to help and it may even be impossible to reclaim the
2630 	 * boosted watermark resulting in a hang.
2631 	 */
2632 	if (!max_boost)
2633 		return false;
2634 
2635 	max_boost = max(pageblock_nr_pages, max_boost);
2636 
2637 	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2638 		max_boost);
2639 
2640 	return true;
2641 }
2642 
2643 /*
2644  * This function implements actual steal behaviour. If order is large enough,
2645  * we can steal whole pageblock. If not, we first move freepages in this
2646  * pageblock to our migratetype and determine how many already-allocated pages
2647  * are there in the pageblock with a compatible migratetype. If at least half
2648  * of pages are free or compatible, we can change migratetype of the pageblock
2649  * itself, so pages freed in the future will be put on the correct free list.
2650  */
2651 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2652 		unsigned int alloc_flags, int start_type, bool whole_block)
2653 {
2654 	unsigned int current_order = buddy_order(page);
2655 	int free_pages, movable_pages, alike_pages;
2656 	int old_block_type;
2657 
2658 	old_block_type = get_pageblock_migratetype(page);
2659 
2660 	/*
2661 	 * This can happen due to races and we want to prevent broken
2662 	 * highatomic accounting.
2663 	 */
2664 	if (is_migrate_highatomic(old_block_type))
2665 		goto single_page;
2666 
2667 	/* Take ownership for orders >= pageblock_order */
2668 	if (current_order >= pageblock_order) {
2669 		change_pageblock_range(page, current_order, start_type);
2670 		goto single_page;
2671 	}
2672 
2673 	/*
2674 	 * Boost watermarks to increase reclaim pressure to reduce the
2675 	 * likelihood of future fallbacks. Wake kswapd now as the node
2676 	 * may be balanced overall and kswapd will not wake naturally.
2677 	 */
2678 	if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2679 		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2680 
2681 	/* We are not allowed to try stealing from the whole block */
2682 	if (!whole_block)
2683 		goto single_page;
2684 
2685 	free_pages = move_freepages_block(zone, page, start_type,
2686 						&movable_pages);
2687 	/*
2688 	 * Determine how many pages are compatible with our allocation.
2689 	 * For movable allocation, it's the number of movable pages which
2690 	 * we just obtained. For other types it's a bit more tricky.
2691 	 */
2692 	if (start_type == MIGRATE_MOVABLE) {
2693 		alike_pages = movable_pages;
2694 	} else {
2695 		/*
2696 		 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2697 		 * to MOVABLE pageblock, consider all non-movable pages as
2698 		 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2699 		 * vice versa, be conservative since we can't distinguish the
2700 		 * exact migratetype of non-movable pages.
2701 		 */
2702 		if (old_block_type == MIGRATE_MOVABLE)
2703 			alike_pages = pageblock_nr_pages
2704 						- (free_pages + movable_pages);
2705 		else
2706 			alike_pages = 0;
2707 	}
2708 
2709 	/* moving whole block can fail due to zone boundary conditions */
2710 	if (!free_pages)
2711 		goto single_page;
2712 
2713 	/*
2714 	 * If a sufficient number of pages in the block are either free or of
2715 	 * comparable migratability as our allocation, claim the whole block.
2716 	 */
2717 	if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2718 			page_group_by_mobility_disabled)
2719 		set_pageblock_migratetype(page, start_type);
2720 
2721 	return;
2722 
2723 single_page:
2724 	move_to_free_list(page, zone, current_order, start_type);
2725 }
2726 
2727 /*
2728  * Check whether there is a suitable fallback freepage with requested order.
2729  * If only_stealable is true, this function returns fallback_mt only if
2730  * we can steal other freepages all together. This would help to reduce
2731  * fragmentation due to mixed migratetype pages in one pageblock.
2732  */
2733 int find_suitable_fallback(struct free_area *area, unsigned int order,
2734 			int migratetype, bool only_stealable, bool *can_steal)
2735 {
2736 	int i;
2737 	int fallback_mt;
2738 
2739 	if (area->nr_free == 0)
2740 		return -1;
2741 
2742 	*can_steal = false;
2743 	for (i = 0;; i++) {
2744 		fallback_mt = fallbacks[migratetype][i];
2745 		if (fallback_mt == MIGRATE_TYPES)
2746 			break;
2747 
2748 		if (free_area_empty(area, fallback_mt))
2749 			continue;
2750 
2751 		if (can_steal_fallback(order, migratetype))
2752 			*can_steal = true;
2753 
2754 		if (!only_stealable)
2755 			return fallback_mt;
2756 
2757 		if (*can_steal)
2758 			return fallback_mt;
2759 	}
2760 
2761 	return -1;
2762 }
2763 
2764 /*
2765  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2766  * there are no empty page blocks that contain a page with a suitable order
2767  */
2768 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2769 				unsigned int alloc_order)
2770 {
2771 	int mt;
2772 	unsigned long max_managed, flags;
2773 
2774 	/*
2775 	 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2776 	 * Check is race-prone but harmless.
2777 	 */
2778 	max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2779 	if (zone->nr_reserved_highatomic >= max_managed)
2780 		return;
2781 
2782 	spin_lock_irqsave(&zone->lock, flags);
2783 
2784 	/* Recheck the nr_reserved_highatomic limit under the lock */
2785 	if (zone->nr_reserved_highatomic >= max_managed)
2786 		goto out_unlock;
2787 
2788 	/* Yoink! */
2789 	mt = get_pageblock_migratetype(page);
2790 	if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2791 	    && !is_migrate_cma(mt)) {
2792 		zone->nr_reserved_highatomic += pageblock_nr_pages;
2793 		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2794 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2795 	}
2796 
2797 out_unlock:
2798 	spin_unlock_irqrestore(&zone->lock, flags);
2799 }
2800 
2801 /*
2802  * Used when an allocation is about to fail under memory pressure. This
2803  * potentially hurts the reliability of high-order allocations when under
2804  * intense memory pressure but failed atomic allocations should be easier
2805  * to recover from than an OOM.
2806  *
2807  * If @force is true, try to unreserve a pageblock even though highatomic
2808  * pageblock is exhausted.
2809  */
2810 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2811 						bool force)
2812 {
2813 	struct zonelist *zonelist = ac->zonelist;
2814 	unsigned long flags;
2815 	struct zoneref *z;
2816 	struct zone *zone;
2817 	struct page *page;
2818 	int order;
2819 	bool ret;
2820 
2821 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2822 								ac->nodemask) {
2823 		/*
2824 		 * Preserve at least one pageblock unless memory pressure
2825 		 * is really high.
2826 		 */
2827 		if (!force && zone->nr_reserved_highatomic <=
2828 					pageblock_nr_pages)
2829 			continue;
2830 
2831 		spin_lock_irqsave(&zone->lock, flags);
2832 		for (order = 0; order < MAX_ORDER; order++) {
2833 			struct free_area *area = &(zone->free_area[order]);
2834 
2835 			page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2836 			if (!page)
2837 				continue;
2838 
2839 			/*
2840 			 * In page freeing path, migratetype change is racy so
2841 			 * we can counter several free pages in a pageblock
2842 			 * in this loop although we changed the pageblock type
2843 			 * from highatomic to ac->migratetype. So we should
2844 			 * adjust the count once.
2845 			 */
2846 			if (is_migrate_highatomic_page(page)) {
2847 				/*
2848 				 * It should never happen but changes to
2849 				 * locking could inadvertently allow a per-cpu
2850 				 * drain to add pages to MIGRATE_HIGHATOMIC
2851 				 * while unreserving so be safe and watch for
2852 				 * underflows.
2853 				 */
2854 				zone->nr_reserved_highatomic -= min(
2855 						pageblock_nr_pages,
2856 						zone->nr_reserved_highatomic);
2857 			}
2858 
2859 			/*
2860 			 * Convert to ac->migratetype and avoid the normal
2861 			 * pageblock stealing heuristics. Minimally, the caller
2862 			 * is doing the work and needs the pages. More
2863 			 * importantly, if the block was always converted to
2864 			 * MIGRATE_UNMOVABLE or another type then the number
2865 			 * of pageblocks that cannot be completely freed
2866 			 * may increase.
2867 			 */
2868 			set_pageblock_migratetype(page, ac->migratetype);
2869 			ret = move_freepages_block(zone, page, ac->migratetype,
2870 									NULL);
2871 			if (ret) {
2872 				spin_unlock_irqrestore(&zone->lock, flags);
2873 				return ret;
2874 			}
2875 		}
2876 		spin_unlock_irqrestore(&zone->lock, flags);
2877 	}
2878 
2879 	return false;
2880 }
2881 
2882 /*
2883  * Try finding a free buddy page on the fallback list and put it on the free
2884  * list of requested migratetype, possibly along with other pages from the same
2885  * block, depending on fragmentation avoidance heuristics. Returns true if
2886  * fallback was found so that __rmqueue_smallest() can grab it.
2887  *
2888  * The use of signed ints for order and current_order is a deliberate
2889  * deviation from the rest of this file, to make the for loop
2890  * condition simpler.
2891  */
2892 static __always_inline bool
2893 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2894 						unsigned int alloc_flags)
2895 {
2896 	struct free_area *area;
2897 	int current_order;
2898 	int min_order = order;
2899 	struct page *page;
2900 	int fallback_mt;
2901 	bool can_steal;
2902 
2903 	/*
2904 	 * Do not steal pages from freelists belonging to other pageblocks
2905 	 * i.e. orders < pageblock_order. If there are no local zones free,
2906 	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2907 	 */
2908 	if (alloc_flags & ALLOC_NOFRAGMENT)
2909 		min_order = pageblock_order;
2910 
2911 	/*
2912 	 * Find the largest available free page in the other list. This roughly
2913 	 * approximates finding the pageblock with the most free pages, which
2914 	 * would be too costly to do exactly.
2915 	 */
2916 	for (current_order = MAX_ORDER - 1; current_order >= min_order;
2917 				--current_order) {
2918 		area = &(zone->free_area[current_order]);
2919 		fallback_mt = find_suitable_fallback(area, current_order,
2920 				start_migratetype, false, &can_steal);
2921 		if (fallback_mt == -1)
2922 			continue;
2923 
2924 		/*
2925 		 * We cannot steal all free pages from the pageblock and the
2926 		 * requested migratetype is movable. In that case it's better to
2927 		 * steal and split the smallest available page instead of the
2928 		 * largest available page, because even if the next movable
2929 		 * allocation falls back into a different pageblock than this
2930 		 * one, it won't cause permanent fragmentation.
2931 		 */
2932 		if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2933 					&& current_order > order)
2934 			goto find_smallest;
2935 
2936 		goto do_steal;
2937 	}
2938 
2939 	return false;
2940 
2941 find_smallest:
2942 	for (current_order = order; current_order < MAX_ORDER;
2943 							current_order++) {
2944 		area = &(zone->free_area[current_order]);
2945 		fallback_mt = find_suitable_fallback(area, current_order,
2946 				start_migratetype, false, &can_steal);
2947 		if (fallback_mt != -1)
2948 			break;
2949 	}
2950 
2951 	/*
2952 	 * This should not happen - we already found a suitable fallback
2953 	 * when looking for the largest page.
2954 	 */
2955 	VM_BUG_ON(current_order == MAX_ORDER);
2956 
2957 do_steal:
2958 	page = get_page_from_free_area(area, fallback_mt);
2959 
2960 	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2961 								can_steal);
2962 
2963 	trace_mm_page_alloc_extfrag(page, order, current_order,
2964 		start_migratetype, fallback_mt);
2965 
2966 	return true;
2967 
2968 }
2969 
2970 /*
2971  * Do the hard work of removing an element from the buddy allocator.
2972  * Call me with the zone->lock already held.
2973  */
2974 static __always_inline struct page *
2975 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2976 						unsigned int alloc_flags)
2977 {
2978 	struct page *page;
2979 
2980 	if (IS_ENABLED(CONFIG_CMA)) {
2981 		/*
2982 		 * Balance movable allocations between regular and CMA areas by
2983 		 * allocating from CMA when over half of the zone's free memory
2984 		 * is in the CMA area.
2985 		 */
2986 		if (alloc_flags & ALLOC_CMA &&
2987 		    zone_page_state(zone, NR_FREE_CMA_PAGES) >
2988 		    zone_page_state(zone, NR_FREE_PAGES) / 2) {
2989 			page = __rmqueue_cma_fallback(zone, order);
2990 			if (page)
2991 				goto out;
2992 		}
2993 	}
2994 retry:
2995 	page = __rmqueue_smallest(zone, order, migratetype);
2996 	if (unlikely(!page)) {
2997 		if (alloc_flags & ALLOC_CMA)
2998 			page = __rmqueue_cma_fallback(zone, order);
2999 
3000 		if (!page && __rmqueue_fallback(zone, order, migratetype,
3001 								alloc_flags))
3002 			goto retry;
3003 	}
3004 out:
3005 	if (page)
3006 		trace_mm_page_alloc_zone_locked(page, order, migratetype);
3007 	return page;
3008 }
3009 
3010 /*
3011  * Obtain a specified number of elements from the buddy allocator, all under
3012  * a single hold of the lock, for efficiency.  Add them to the supplied list.
3013  * Returns the number of new pages which were placed at *list.
3014  */
3015 static int rmqueue_bulk(struct zone *zone, unsigned int order,
3016 			unsigned long count, struct list_head *list,
3017 			int migratetype, unsigned int alloc_flags)
3018 {
3019 	int i, allocated = 0;
3020 
3021 	/*
3022 	 * local_lock_irq held so equivalent to spin_lock_irqsave for
3023 	 * both PREEMPT_RT and non-PREEMPT_RT configurations.
3024 	 */
3025 	spin_lock(&zone->lock);
3026 	for (i = 0; i < count; ++i) {
3027 		struct page *page = __rmqueue(zone, order, migratetype,
3028 								alloc_flags);
3029 		if (unlikely(page == NULL))
3030 			break;
3031 
3032 		if (unlikely(check_pcp_refill(page)))
3033 			continue;
3034 
3035 		/*
3036 		 * Split buddy pages returned by expand() are received here in
3037 		 * physical page order. The page is added to the tail of
3038 		 * caller's list. From the callers perspective, the linked list
3039 		 * is ordered by page number under some conditions. This is
3040 		 * useful for IO devices that can forward direction from the
3041 		 * head, thus also in the physical page order. This is useful
3042 		 * for IO devices that can merge IO requests if the physical
3043 		 * pages are ordered properly.
3044 		 */
3045 		list_add_tail(&page->lru, list);
3046 		allocated++;
3047 		if (is_migrate_cma(get_pcppage_migratetype(page)))
3048 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3049 					      -(1 << order));
3050 	}
3051 
3052 	/*
3053 	 * i pages were removed from the buddy list even if some leak due
3054 	 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
3055 	 * on i. Do not confuse with 'allocated' which is the number of
3056 	 * pages added to the pcp list.
3057 	 */
3058 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3059 	spin_unlock(&zone->lock);
3060 	return allocated;
3061 }
3062 
3063 #ifdef CONFIG_NUMA
3064 /*
3065  * Called from the vmstat counter updater to drain pagesets of this
3066  * currently executing processor on remote nodes after they have
3067  * expired.
3068  *
3069  * Note that this function must be called with the thread pinned to
3070  * a single processor.
3071  */
3072 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3073 {
3074 	unsigned long flags;
3075 	int to_drain, batch;
3076 
3077 	local_lock_irqsave(&pagesets.lock, flags);
3078 	batch = READ_ONCE(pcp->batch);
3079 	to_drain = min(pcp->count, batch);
3080 	if (to_drain > 0)
3081 		free_pcppages_bulk(zone, to_drain, pcp);
3082 	local_unlock_irqrestore(&pagesets.lock, flags);
3083 }
3084 #endif
3085 
3086 /*
3087  * Drain pcplists of the indicated processor and zone.
3088  *
3089  * The processor must either be the current processor and the
3090  * thread pinned to the current processor or a processor that
3091  * is not online.
3092  */
3093 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3094 {
3095 	unsigned long flags;
3096 	struct per_cpu_pages *pcp;
3097 
3098 	local_lock_irqsave(&pagesets.lock, flags);
3099 
3100 	pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3101 	if (pcp->count)
3102 		free_pcppages_bulk(zone, pcp->count, pcp);
3103 
3104 	local_unlock_irqrestore(&pagesets.lock, flags);
3105 }
3106 
3107 /*
3108  * Drain pcplists of all zones on the indicated processor.
3109  *
3110  * The processor must either be the current processor and the
3111  * thread pinned to the current processor or a processor that
3112  * is not online.
3113  */
3114 static void drain_pages(unsigned int cpu)
3115 {
3116 	struct zone *zone;
3117 
3118 	for_each_populated_zone(zone) {
3119 		drain_pages_zone(cpu, zone);
3120 	}
3121 }
3122 
3123 /*
3124  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3125  *
3126  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3127  * the single zone's pages.
3128  */
3129 void drain_local_pages(struct zone *zone)
3130 {
3131 	int cpu = smp_processor_id();
3132 
3133 	if (zone)
3134 		drain_pages_zone(cpu, zone);
3135 	else
3136 		drain_pages(cpu);
3137 }
3138 
3139 static void drain_local_pages_wq(struct work_struct *work)
3140 {
3141 	struct pcpu_drain *drain;
3142 
3143 	drain = container_of(work, struct pcpu_drain, work);
3144 
3145 	/*
3146 	 * drain_all_pages doesn't use proper cpu hotplug protection so
3147 	 * we can race with cpu offline when the WQ can move this from
3148 	 * a cpu pinned worker to an unbound one. We can operate on a different
3149 	 * cpu which is alright but we also have to make sure to not move to
3150 	 * a different one.
3151 	 */
3152 	preempt_disable();
3153 	drain_local_pages(drain->zone);
3154 	preempt_enable();
3155 }
3156 
3157 /*
3158  * The implementation of drain_all_pages(), exposing an extra parameter to
3159  * drain on all cpus.
3160  *
3161  * drain_all_pages() is optimized to only execute on cpus where pcplists are
3162  * not empty. The check for non-emptiness can however race with a free to
3163  * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3164  * that need the guarantee that every CPU has drained can disable the
3165  * optimizing racy check.
3166  */
3167 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3168 {
3169 	int cpu;
3170 
3171 	/*
3172 	 * Allocate in the BSS so we won't require allocation in
3173 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3174 	 */
3175 	static cpumask_t cpus_with_pcps;
3176 
3177 	/*
3178 	 * Make sure nobody triggers this path before mm_percpu_wq is fully
3179 	 * initialized.
3180 	 */
3181 	if (WARN_ON_ONCE(!mm_percpu_wq))
3182 		return;
3183 
3184 	/*
3185 	 * Do not drain if one is already in progress unless it's specific to
3186 	 * a zone. Such callers are primarily CMA and memory hotplug and need
3187 	 * the drain to be complete when the call returns.
3188 	 */
3189 	if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3190 		if (!zone)
3191 			return;
3192 		mutex_lock(&pcpu_drain_mutex);
3193 	}
3194 
3195 	/*
3196 	 * We don't care about racing with CPU hotplug event
3197 	 * as offline notification will cause the notified
3198 	 * cpu to drain that CPU pcps and on_each_cpu_mask
3199 	 * disables preemption as part of its processing
3200 	 */
3201 	for_each_online_cpu(cpu) {
3202 		struct per_cpu_pages *pcp;
3203 		struct zone *z;
3204 		bool has_pcps = false;
3205 
3206 		if (force_all_cpus) {
3207 			/*
3208 			 * The pcp.count check is racy, some callers need a
3209 			 * guarantee that no cpu is missed.
3210 			 */
3211 			has_pcps = true;
3212 		} else if (zone) {
3213 			pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3214 			if (pcp->count)
3215 				has_pcps = true;
3216 		} else {
3217 			for_each_populated_zone(z) {
3218 				pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3219 				if (pcp->count) {
3220 					has_pcps = true;
3221 					break;
3222 				}
3223 			}
3224 		}
3225 
3226 		if (has_pcps)
3227 			cpumask_set_cpu(cpu, &cpus_with_pcps);
3228 		else
3229 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
3230 	}
3231 
3232 	for_each_cpu(cpu, &cpus_with_pcps) {
3233 		struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3234 
3235 		drain->zone = zone;
3236 		INIT_WORK(&drain->work, drain_local_pages_wq);
3237 		queue_work_on(cpu, mm_percpu_wq, &drain->work);
3238 	}
3239 	for_each_cpu(cpu, &cpus_with_pcps)
3240 		flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
3241 
3242 	mutex_unlock(&pcpu_drain_mutex);
3243 }
3244 
3245 /*
3246  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3247  *
3248  * When zone parameter is non-NULL, spill just the single zone's pages.
3249  *
3250  * Note that this can be extremely slow as the draining happens in a workqueue.
3251  */
3252 void drain_all_pages(struct zone *zone)
3253 {
3254 	__drain_all_pages(zone, false);
3255 }
3256 
3257 #ifdef CONFIG_HIBERNATION
3258 
3259 /*
3260  * Touch the watchdog for every WD_PAGE_COUNT pages.
3261  */
3262 #define WD_PAGE_COUNT	(128*1024)
3263 
3264 void mark_free_pages(struct zone *zone)
3265 {
3266 	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3267 	unsigned long flags;
3268 	unsigned int order, t;
3269 	struct page *page;
3270 
3271 	if (zone_is_empty(zone))
3272 		return;
3273 
3274 	spin_lock_irqsave(&zone->lock, flags);
3275 
3276 	max_zone_pfn = zone_end_pfn(zone);
3277 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3278 		if (pfn_valid(pfn)) {
3279 			page = pfn_to_page(pfn);
3280 
3281 			if (!--page_count) {
3282 				touch_nmi_watchdog();
3283 				page_count = WD_PAGE_COUNT;
3284 			}
3285 
3286 			if (page_zone(page) != zone)
3287 				continue;
3288 
3289 			if (!swsusp_page_is_forbidden(page))
3290 				swsusp_unset_page_free(page);
3291 		}
3292 
3293 	for_each_migratetype_order(order, t) {
3294 		list_for_each_entry(page,
3295 				&zone->free_area[order].free_list[t], lru) {
3296 			unsigned long i;
3297 
3298 			pfn = page_to_pfn(page);
3299 			for (i = 0; i < (1UL << order); i++) {
3300 				if (!--page_count) {
3301 					touch_nmi_watchdog();
3302 					page_count = WD_PAGE_COUNT;
3303 				}
3304 				swsusp_set_page_free(pfn_to_page(pfn + i));
3305 			}
3306 		}
3307 	}
3308 	spin_unlock_irqrestore(&zone->lock, flags);
3309 }
3310 #endif /* CONFIG_PM */
3311 
3312 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
3313 							unsigned int order)
3314 {
3315 	int migratetype;
3316 
3317 	if (!free_pcp_prepare(page, order))
3318 		return false;
3319 
3320 	migratetype = get_pfnblock_migratetype(page, pfn);
3321 	set_pcppage_migratetype(page, migratetype);
3322 	return true;
3323 }
3324 
3325 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch)
3326 {
3327 	int min_nr_free, max_nr_free;
3328 
3329 	/* Check for PCP disabled or boot pageset */
3330 	if (unlikely(high < batch))
3331 		return 1;
3332 
3333 	/* Leave at least pcp->batch pages on the list */
3334 	min_nr_free = batch;
3335 	max_nr_free = high - batch;
3336 
3337 	/*
3338 	 * Double the number of pages freed each time there is subsequent
3339 	 * freeing of pages without any allocation.
3340 	 */
3341 	batch <<= pcp->free_factor;
3342 	if (batch < max_nr_free)
3343 		pcp->free_factor++;
3344 	batch = clamp(batch, min_nr_free, max_nr_free);
3345 
3346 	return batch;
3347 }
3348 
3349 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone)
3350 {
3351 	int high = READ_ONCE(pcp->high);
3352 
3353 	if (unlikely(!high))
3354 		return 0;
3355 
3356 	if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
3357 		return high;
3358 
3359 	/*
3360 	 * If reclaim is active, limit the number of pages that can be
3361 	 * stored on pcp lists
3362 	 */
3363 	return min(READ_ONCE(pcp->batch) << 2, high);
3364 }
3365 
3366 static void free_unref_page_commit(struct page *page, unsigned long pfn,
3367 				   int migratetype, unsigned int order)
3368 {
3369 	struct zone *zone = page_zone(page);
3370 	struct per_cpu_pages *pcp;
3371 	int high;
3372 	int pindex;
3373 
3374 	__count_vm_event(PGFREE);
3375 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
3376 	pindex = order_to_pindex(migratetype, order);
3377 	list_add(&page->lru, &pcp->lists[pindex]);
3378 	pcp->count += 1 << order;
3379 	high = nr_pcp_high(pcp, zone);
3380 	if (pcp->count >= high) {
3381 		int batch = READ_ONCE(pcp->batch);
3382 
3383 		free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp);
3384 	}
3385 }
3386 
3387 /*
3388  * Free a pcp page
3389  */
3390 void free_unref_page(struct page *page, unsigned int order)
3391 {
3392 	unsigned long flags;
3393 	unsigned long pfn = page_to_pfn(page);
3394 	int migratetype;
3395 
3396 	if (!free_unref_page_prepare(page, pfn, order))
3397 		return;
3398 
3399 	/*
3400 	 * We only track unmovable, reclaimable and movable on pcp lists.
3401 	 * Place ISOLATE pages on the isolated list because they are being
3402 	 * offlined but treat HIGHATOMIC as movable pages so we can get those
3403 	 * areas back if necessary. Otherwise, we may have to free
3404 	 * excessively into the page allocator
3405 	 */
3406 	migratetype = get_pcppage_migratetype(page);
3407 	if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
3408 		if (unlikely(is_migrate_isolate(migratetype))) {
3409 			free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
3410 			return;
3411 		}
3412 		migratetype = MIGRATE_MOVABLE;
3413 	}
3414 
3415 	local_lock_irqsave(&pagesets.lock, flags);
3416 	free_unref_page_commit(page, pfn, migratetype, order);
3417 	local_unlock_irqrestore(&pagesets.lock, flags);
3418 }
3419 
3420 /*
3421  * Free a list of 0-order pages
3422  */
3423 void free_unref_page_list(struct list_head *list)
3424 {
3425 	struct page *page, *next;
3426 	unsigned long flags, pfn;
3427 	int batch_count = 0;
3428 	int migratetype;
3429 
3430 	/* Prepare pages for freeing */
3431 	list_for_each_entry_safe(page, next, list, lru) {
3432 		pfn = page_to_pfn(page);
3433 		if (!free_unref_page_prepare(page, pfn, 0)) {
3434 			list_del(&page->lru);
3435 			continue;
3436 		}
3437 
3438 		/*
3439 		 * Free isolated pages directly to the allocator, see
3440 		 * comment in free_unref_page.
3441 		 */
3442 		migratetype = get_pcppage_migratetype(page);
3443 		if (unlikely(is_migrate_isolate(migratetype))) {
3444 			list_del(&page->lru);
3445 			free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
3446 			continue;
3447 		}
3448 
3449 		set_page_private(page, pfn);
3450 	}
3451 
3452 	local_lock_irqsave(&pagesets.lock, flags);
3453 	list_for_each_entry_safe(page, next, list, lru) {
3454 		pfn = page_private(page);
3455 		set_page_private(page, 0);
3456 
3457 		/*
3458 		 * Non-isolated types over MIGRATE_PCPTYPES get added
3459 		 * to the MIGRATE_MOVABLE pcp list.
3460 		 */
3461 		migratetype = get_pcppage_migratetype(page);
3462 		if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3463 			migratetype = MIGRATE_MOVABLE;
3464 
3465 		trace_mm_page_free_batched(page);
3466 		free_unref_page_commit(page, pfn, migratetype, 0);
3467 
3468 		/*
3469 		 * Guard against excessive IRQ disabled times when we get
3470 		 * a large list of pages to free.
3471 		 */
3472 		if (++batch_count == SWAP_CLUSTER_MAX) {
3473 			local_unlock_irqrestore(&pagesets.lock, flags);
3474 			batch_count = 0;
3475 			local_lock_irqsave(&pagesets.lock, flags);
3476 		}
3477 	}
3478 	local_unlock_irqrestore(&pagesets.lock, flags);
3479 }
3480 
3481 /*
3482  * split_page takes a non-compound higher-order page, and splits it into
3483  * n (1<<order) sub-pages: page[0..n]
3484  * Each sub-page must be freed individually.
3485  *
3486  * Note: this is probably too low level an operation for use in drivers.
3487  * Please consult with lkml before using this in your driver.
3488  */
3489 void split_page(struct page *page, unsigned int order)
3490 {
3491 	int i;
3492 
3493 	VM_BUG_ON_PAGE(PageCompound(page), page);
3494 	VM_BUG_ON_PAGE(!page_count(page), page);
3495 
3496 	for (i = 1; i < (1 << order); i++)
3497 		set_page_refcounted(page + i);
3498 	split_page_owner(page, 1 << order);
3499 	split_page_memcg(page, 1 << order);
3500 }
3501 EXPORT_SYMBOL_GPL(split_page);
3502 
3503 int __isolate_free_page(struct page *page, unsigned int order)
3504 {
3505 	unsigned long watermark;
3506 	struct zone *zone;
3507 	int mt;
3508 
3509 	BUG_ON(!PageBuddy(page));
3510 
3511 	zone = page_zone(page);
3512 	mt = get_pageblock_migratetype(page);
3513 
3514 	if (!is_migrate_isolate(mt)) {
3515 		/*
3516 		 * Obey watermarks as if the page was being allocated. We can
3517 		 * emulate a high-order watermark check with a raised order-0
3518 		 * watermark, because we already know our high-order page
3519 		 * exists.
3520 		 */
3521 		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3522 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3523 			return 0;
3524 
3525 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
3526 	}
3527 
3528 	/* Remove page from free list */
3529 
3530 	del_page_from_free_list(page, zone, order);
3531 
3532 	/*
3533 	 * Set the pageblock if the isolated page is at least half of a
3534 	 * pageblock
3535 	 */
3536 	if (order >= pageblock_order - 1) {
3537 		struct page *endpage = page + (1 << order) - 1;
3538 		for (; page < endpage; page += pageblock_nr_pages) {
3539 			int mt = get_pageblock_migratetype(page);
3540 			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3541 			    && !is_migrate_highatomic(mt))
3542 				set_pageblock_migratetype(page,
3543 							  MIGRATE_MOVABLE);
3544 		}
3545 	}
3546 
3547 
3548 	return 1UL << order;
3549 }
3550 
3551 /**
3552  * __putback_isolated_page - Return a now-isolated page back where we got it
3553  * @page: Page that was isolated
3554  * @order: Order of the isolated page
3555  * @mt: The page's pageblock's migratetype
3556  *
3557  * This function is meant to return a page pulled from the free lists via
3558  * __isolate_free_page back to the free lists they were pulled from.
3559  */
3560 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3561 {
3562 	struct zone *zone = page_zone(page);
3563 
3564 	/* zone lock should be held when this function is called */
3565 	lockdep_assert_held(&zone->lock);
3566 
3567 	/* Return isolated page to tail of freelist. */
3568 	__free_one_page(page, page_to_pfn(page), zone, order, mt,
3569 			FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3570 }
3571 
3572 /*
3573  * Update NUMA hit/miss statistics
3574  *
3575  * Must be called with interrupts disabled.
3576  */
3577 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3578 				   long nr_account)
3579 {
3580 #ifdef CONFIG_NUMA
3581 	enum numa_stat_item local_stat = NUMA_LOCAL;
3582 
3583 	/* skip numa counters update if numa stats is disabled */
3584 	if (!static_branch_likely(&vm_numa_stat_key))
3585 		return;
3586 
3587 	if (zone_to_nid(z) != numa_node_id())
3588 		local_stat = NUMA_OTHER;
3589 
3590 	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3591 		__count_numa_events(z, NUMA_HIT, nr_account);
3592 	else {
3593 		__count_numa_events(z, NUMA_MISS, nr_account);
3594 		__count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3595 	}
3596 	__count_numa_events(z, local_stat, nr_account);
3597 #endif
3598 }
3599 
3600 /* Remove page from the per-cpu list, caller must protect the list */
3601 static inline
3602 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3603 			int migratetype,
3604 			unsigned int alloc_flags,
3605 			struct per_cpu_pages *pcp,
3606 			struct list_head *list)
3607 {
3608 	struct page *page;
3609 
3610 	do {
3611 		if (list_empty(list)) {
3612 			int batch = READ_ONCE(pcp->batch);
3613 			int alloced;
3614 
3615 			/*
3616 			 * Scale batch relative to order if batch implies
3617 			 * free pages can be stored on the PCP. Batch can
3618 			 * be 1 for small zones or for boot pagesets which
3619 			 * should never store free pages as the pages may
3620 			 * belong to arbitrary zones.
3621 			 */
3622 			if (batch > 1)
3623 				batch = max(batch >> order, 2);
3624 			alloced = rmqueue_bulk(zone, order,
3625 					batch, list,
3626 					migratetype, alloc_flags);
3627 
3628 			pcp->count += alloced << order;
3629 			if (unlikely(list_empty(list)))
3630 				return NULL;
3631 		}
3632 
3633 		page = list_first_entry(list, struct page, lru);
3634 		list_del(&page->lru);
3635 		pcp->count -= 1 << order;
3636 	} while (check_new_pcp(page));
3637 
3638 	return page;
3639 }
3640 
3641 /* Lock and remove page from the per-cpu list */
3642 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3643 			struct zone *zone, unsigned int order,
3644 			gfp_t gfp_flags, int migratetype,
3645 			unsigned int alloc_flags)
3646 {
3647 	struct per_cpu_pages *pcp;
3648 	struct list_head *list;
3649 	struct page *page;
3650 	unsigned long flags;
3651 
3652 	local_lock_irqsave(&pagesets.lock, flags);
3653 
3654 	/*
3655 	 * On allocation, reduce the number of pages that are batch freed.
3656 	 * See nr_pcp_free() where free_factor is increased for subsequent
3657 	 * frees.
3658 	 */
3659 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
3660 	pcp->free_factor >>= 1;
3661 	list = &pcp->lists[order_to_pindex(migratetype, order)];
3662 	page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3663 	local_unlock_irqrestore(&pagesets.lock, flags);
3664 	if (page) {
3665 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3666 		zone_statistics(preferred_zone, zone, 1);
3667 	}
3668 	return page;
3669 }
3670 
3671 /*
3672  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3673  */
3674 static inline
3675 struct page *rmqueue(struct zone *preferred_zone,
3676 			struct zone *zone, unsigned int order,
3677 			gfp_t gfp_flags, unsigned int alloc_flags,
3678 			int migratetype)
3679 {
3680 	unsigned long flags;
3681 	struct page *page;
3682 
3683 	if (likely(pcp_allowed_order(order))) {
3684 		/*
3685 		 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3686 		 * we need to skip it when CMA area isn't allowed.
3687 		 */
3688 		if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3689 				migratetype != MIGRATE_MOVABLE) {
3690 			page = rmqueue_pcplist(preferred_zone, zone, order,
3691 					gfp_flags, migratetype, alloc_flags);
3692 			goto out;
3693 		}
3694 	}
3695 
3696 	/*
3697 	 * We most definitely don't want callers attempting to
3698 	 * allocate greater than order-1 page units with __GFP_NOFAIL.
3699 	 */
3700 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3701 	spin_lock_irqsave(&zone->lock, flags);
3702 
3703 	do {
3704 		page = NULL;
3705 		/*
3706 		 * order-0 request can reach here when the pcplist is skipped
3707 		 * due to non-CMA allocation context. HIGHATOMIC area is
3708 		 * reserved for high-order atomic allocation, so order-0
3709 		 * request should skip it.
3710 		 */
3711 		if (order > 0 && alloc_flags & ALLOC_HARDER) {
3712 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3713 			if (page)
3714 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
3715 		}
3716 		if (!page)
3717 			page = __rmqueue(zone, order, migratetype, alloc_flags);
3718 	} while (page && check_new_pages(page, order));
3719 	if (!page)
3720 		goto failed;
3721 
3722 	__mod_zone_freepage_state(zone, -(1 << order),
3723 				  get_pcppage_migratetype(page));
3724 	spin_unlock_irqrestore(&zone->lock, flags);
3725 
3726 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3727 	zone_statistics(preferred_zone, zone, 1);
3728 
3729 out:
3730 	/* Separate test+clear to avoid unnecessary atomics */
3731 	if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3732 		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3733 		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3734 	}
3735 
3736 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3737 	return page;
3738 
3739 failed:
3740 	spin_unlock_irqrestore(&zone->lock, flags);
3741 	return NULL;
3742 }
3743 
3744 #ifdef CONFIG_FAIL_PAGE_ALLOC
3745 
3746 static struct {
3747 	struct fault_attr attr;
3748 
3749 	bool ignore_gfp_highmem;
3750 	bool ignore_gfp_reclaim;
3751 	u32 min_order;
3752 } fail_page_alloc = {
3753 	.attr = FAULT_ATTR_INITIALIZER,
3754 	.ignore_gfp_reclaim = true,
3755 	.ignore_gfp_highmem = true,
3756 	.min_order = 1,
3757 };
3758 
3759 static int __init setup_fail_page_alloc(char *str)
3760 {
3761 	return setup_fault_attr(&fail_page_alloc.attr, str);
3762 }
3763 __setup("fail_page_alloc=", setup_fail_page_alloc);
3764 
3765 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3766 {
3767 	if (order < fail_page_alloc.min_order)
3768 		return false;
3769 	if (gfp_mask & __GFP_NOFAIL)
3770 		return false;
3771 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3772 		return false;
3773 	if (fail_page_alloc.ignore_gfp_reclaim &&
3774 			(gfp_mask & __GFP_DIRECT_RECLAIM))
3775 		return false;
3776 
3777 	return should_fail(&fail_page_alloc.attr, 1 << order);
3778 }
3779 
3780 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3781 
3782 static int __init fail_page_alloc_debugfs(void)
3783 {
3784 	umode_t mode = S_IFREG | 0600;
3785 	struct dentry *dir;
3786 
3787 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3788 					&fail_page_alloc.attr);
3789 
3790 	debugfs_create_bool("ignore-gfp-wait", mode, dir,
3791 			    &fail_page_alloc.ignore_gfp_reclaim);
3792 	debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3793 			    &fail_page_alloc.ignore_gfp_highmem);
3794 	debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3795 
3796 	return 0;
3797 }
3798 
3799 late_initcall(fail_page_alloc_debugfs);
3800 
3801 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3802 
3803 #else /* CONFIG_FAIL_PAGE_ALLOC */
3804 
3805 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3806 {
3807 	return false;
3808 }
3809 
3810 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3811 
3812 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3813 {
3814 	return __should_fail_alloc_page(gfp_mask, order);
3815 }
3816 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3817 
3818 static inline long __zone_watermark_unusable_free(struct zone *z,
3819 				unsigned int order, unsigned int alloc_flags)
3820 {
3821 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3822 	long unusable_free = (1 << order) - 1;
3823 
3824 	/*
3825 	 * If the caller does not have rights to ALLOC_HARDER then subtract
3826 	 * the high-atomic reserves. This will over-estimate the size of the
3827 	 * atomic reserve but it avoids a search.
3828 	 */
3829 	if (likely(!alloc_harder))
3830 		unusable_free += z->nr_reserved_highatomic;
3831 
3832 #ifdef CONFIG_CMA
3833 	/* If allocation can't use CMA areas don't use free CMA pages */
3834 	if (!(alloc_flags & ALLOC_CMA))
3835 		unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3836 #endif
3837 
3838 	return unusable_free;
3839 }
3840 
3841 /*
3842  * Return true if free base pages are above 'mark'. For high-order checks it
3843  * will return true of the order-0 watermark is reached and there is at least
3844  * one free page of a suitable size. Checking now avoids taking the zone lock
3845  * to check in the allocation paths if no pages are free.
3846  */
3847 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3848 			 int highest_zoneidx, unsigned int alloc_flags,
3849 			 long free_pages)
3850 {
3851 	long min = mark;
3852 	int o;
3853 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3854 
3855 	/* free_pages may go negative - that's OK */
3856 	free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3857 
3858 	if (alloc_flags & ALLOC_HIGH)
3859 		min -= min / 2;
3860 
3861 	if (unlikely(alloc_harder)) {
3862 		/*
3863 		 * OOM victims can try even harder than normal ALLOC_HARDER
3864 		 * users on the grounds that it's definitely going to be in
3865 		 * the exit path shortly and free memory. Any allocation it
3866 		 * makes during the free path will be small and short-lived.
3867 		 */
3868 		if (alloc_flags & ALLOC_OOM)
3869 			min -= min / 2;
3870 		else
3871 			min -= min / 4;
3872 	}
3873 
3874 	/*
3875 	 * Check watermarks for an order-0 allocation request. If these
3876 	 * are not met, then a high-order request also cannot go ahead
3877 	 * even if a suitable page happened to be free.
3878 	 */
3879 	if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3880 		return false;
3881 
3882 	/* If this is an order-0 request then the watermark is fine */
3883 	if (!order)
3884 		return true;
3885 
3886 	/* For a high-order request, check at least one suitable page is free */
3887 	for (o = order; o < MAX_ORDER; o++) {
3888 		struct free_area *area = &z->free_area[o];
3889 		int mt;
3890 
3891 		if (!area->nr_free)
3892 			continue;
3893 
3894 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3895 			if (!free_area_empty(area, mt))
3896 				return true;
3897 		}
3898 
3899 #ifdef CONFIG_CMA
3900 		if ((alloc_flags & ALLOC_CMA) &&
3901 		    !free_area_empty(area, MIGRATE_CMA)) {
3902 			return true;
3903 		}
3904 #endif
3905 		if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
3906 			return true;
3907 	}
3908 	return false;
3909 }
3910 
3911 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3912 		      int highest_zoneidx, unsigned int alloc_flags)
3913 {
3914 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3915 					zone_page_state(z, NR_FREE_PAGES));
3916 }
3917 
3918 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3919 				unsigned long mark, int highest_zoneidx,
3920 				unsigned int alloc_flags, gfp_t gfp_mask)
3921 {
3922 	long free_pages;
3923 
3924 	free_pages = zone_page_state(z, NR_FREE_PAGES);
3925 
3926 	/*
3927 	 * Fast check for order-0 only. If this fails then the reserves
3928 	 * need to be calculated.
3929 	 */
3930 	if (!order) {
3931 		long fast_free;
3932 
3933 		fast_free = free_pages;
3934 		fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
3935 		if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
3936 			return true;
3937 	}
3938 
3939 	if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3940 					free_pages))
3941 		return true;
3942 	/*
3943 	 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
3944 	 * when checking the min watermark. The min watermark is the
3945 	 * point where boosting is ignored so that kswapd is woken up
3946 	 * when below the low watermark.
3947 	 */
3948 	if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3949 		&& ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3950 		mark = z->_watermark[WMARK_MIN];
3951 		return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3952 					alloc_flags, free_pages);
3953 	}
3954 
3955 	return false;
3956 }
3957 
3958 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3959 			unsigned long mark, int highest_zoneidx)
3960 {
3961 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
3962 
3963 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3964 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3965 
3966 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3967 								free_pages);
3968 }
3969 
3970 #ifdef CONFIG_NUMA
3971 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3972 {
3973 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3974 				node_reclaim_distance;
3975 }
3976 #else	/* CONFIG_NUMA */
3977 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3978 {
3979 	return true;
3980 }
3981 #endif	/* CONFIG_NUMA */
3982 
3983 /*
3984  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3985  * fragmentation is subtle. If the preferred zone was HIGHMEM then
3986  * premature use of a lower zone may cause lowmem pressure problems that
3987  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3988  * probably too small. It only makes sense to spread allocations to avoid
3989  * fragmentation between the Normal and DMA32 zones.
3990  */
3991 static inline unsigned int
3992 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3993 {
3994 	unsigned int alloc_flags;
3995 
3996 	/*
3997 	 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3998 	 * to save a branch.
3999 	 */
4000 	alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
4001 
4002 #ifdef CONFIG_ZONE_DMA32
4003 	if (!zone)
4004 		return alloc_flags;
4005 
4006 	if (zone_idx(zone) != ZONE_NORMAL)
4007 		return alloc_flags;
4008 
4009 	/*
4010 	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
4011 	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
4012 	 * on UMA that if Normal is populated then so is DMA32.
4013 	 */
4014 	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
4015 	if (nr_online_nodes > 1 && !populated_zone(--zone))
4016 		return alloc_flags;
4017 
4018 	alloc_flags |= ALLOC_NOFRAGMENT;
4019 #endif /* CONFIG_ZONE_DMA32 */
4020 	return alloc_flags;
4021 }
4022 
4023 /* Must be called after current_gfp_context() which can change gfp_mask */
4024 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
4025 						  unsigned int alloc_flags)
4026 {
4027 #ifdef CONFIG_CMA
4028 	if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4029 		alloc_flags |= ALLOC_CMA;
4030 #endif
4031 	return alloc_flags;
4032 }
4033 
4034 /*
4035  * get_page_from_freelist goes through the zonelist trying to allocate
4036  * a page.
4037  */
4038 static struct page *
4039 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
4040 						const struct alloc_context *ac)
4041 {
4042 	struct zoneref *z;
4043 	struct zone *zone;
4044 	struct pglist_data *last_pgdat_dirty_limit = NULL;
4045 	bool no_fallback;
4046 
4047 retry:
4048 	/*
4049 	 * Scan zonelist, looking for a zone with enough free.
4050 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
4051 	 */
4052 	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
4053 	z = ac->preferred_zoneref;
4054 	for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
4055 					ac->nodemask) {
4056 		struct page *page;
4057 		unsigned long mark;
4058 
4059 		if (cpusets_enabled() &&
4060 			(alloc_flags & ALLOC_CPUSET) &&
4061 			!__cpuset_zone_allowed(zone, gfp_mask))
4062 				continue;
4063 		/*
4064 		 * When allocating a page cache page for writing, we
4065 		 * want to get it from a node that is within its dirty
4066 		 * limit, such that no single node holds more than its
4067 		 * proportional share of globally allowed dirty pages.
4068 		 * The dirty limits take into account the node's
4069 		 * lowmem reserves and high watermark so that kswapd
4070 		 * should be able to balance it without having to
4071 		 * write pages from its LRU list.
4072 		 *
4073 		 * XXX: For now, allow allocations to potentially
4074 		 * exceed the per-node dirty limit in the slowpath
4075 		 * (spread_dirty_pages unset) before going into reclaim,
4076 		 * which is important when on a NUMA setup the allowed
4077 		 * nodes are together not big enough to reach the
4078 		 * global limit.  The proper fix for these situations
4079 		 * will require awareness of nodes in the
4080 		 * dirty-throttling and the flusher threads.
4081 		 */
4082 		if (ac->spread_dirty_pages) {
4083 			if (last_pgdat_dirty_limit == zone->zone_pgdat)
4084 				continue;
4085 
4086 			if (!node_dirty_ok(zone->zone_pgdat)) {
4087 				last_pgdat_dirty_limit = zone->zone_pgdat;
4088 				continue;
4089 			}
4090 		}
4091 
4092 		if (no_fallback && nr_online_nodes > 1 &&
4093 		    zone != ac->preferred_zoneref->zone) {
4094 			int local_nid;
4095 
4096 			/*
4097 			 * If moving to a remote node, retry but allow
4098 			 * fragmenting fallbacks. Locality is more important
4099 			 * than fragmentation avoidance.
4100 			 */
4101 			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
4102 			if (zone_to_nid(zone) != local_nid) {
4103 				alloc_flags &= ~ALLOC_NOFRAGMENT;
4104 				goto retry;
4105 			}
4106 		}
4107 
4108 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
4109 		if (!zone_watermark_fast(zone, order, mark,
4110 				       ac->highest_zoneidx, alloc_flags,
4111 				       gfp_mask)) {
4112 			int ret;
4113 
4114 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4115 			/*
4116 			 * Watermark failed for this zone, but see if we can
4117 			 * grow this zone if it contains deferred pages.
4118 			 */
4119 			if (static_branch_unlikely(&deferred_pages)) {
4120 				if (_deferred_grow_zone(zone, order))
4121 					goto try_this_zone;
4122 			}
4123 #endif
4124 			/* Checked here to keep the fast path fast */
4125 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4126 			if (alloc_flags & ALLOC_NO_WATERMARKS)
4127 				goto try_this_zone;
4128 
4129 			if (!node_reclaim_enabled() ||
4130 			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
4131 				continue;
4132 
4133 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
4134 			switch (ret) {
4135 			case NODE_RECLAIM_NOSCAN:
4136 				/* did not scan */
4137 				continue;
4138 			case NODE_RECLAIM_FULL:
4139 				/* scanned but unreclaimable */
4140 				continue;
4141 			default:
4142 				/* did we reclaim enough */
4143 				if (zone_watermark_ok(zone, order, mark,
4144 					ac->highest_zoneidx, alloc_flags))
4145 					goto try_this_zone;
4146 
4147 				continue;
4148 			}
4149 		}
4150 
4151 try_this_zone:
4152 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
4153 				gfp_mask, alloc_flags, ac->migratetype);
4154 		if (page) {
4155 			prep_new_page(page, order, gfp_mask, alloc_flags);
4156 
4157 			/*
4158 			 * If this is a high-order atomic allocation then check
4159 			 * if the pageblock should be reserved for the future
4160 			 */
4161 			if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
4162 				reserve_highatomic_pageblock(page, zone, order);
4163 
4164 			return page;
4165 		} else {
4166 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4167 			/* Try again if zone has deferred pages */
4168 			if (static_branch_unlikely(&deferred_pages)) {
4169 				if (_deferred_grow_zone(zone, order))
4170 					goto try_this_zone;
4171 			}
4172 #endif
4173 		}
4174 	}
4175 
4176 	/*
4177 	 * It's possible on a UMA machine to get through all zones that are
4178 	 * fragmented. If avoiding fragmentation, reset and try again.
4179 	 */
4180 	if (no_fallback) {
4181 		alloc_flags &= ~ALLOC_NOFRAGMENT;
4182 		goto retry;
4183 	}
4184 
4185 	return NULL;
4186 }
4187 
4188 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
4189 {
4190 	unsigned int filter = SHOW_MEM_FILTER_NODES;
4191 
4192 	/*
4193 	 * This documents exceptions given to allocations in certain
4194 	 * contexts that are allowed to allocate outside current's set
4195 	 * of allowed nodes.
4196 	 */
4197 	if (!(gfp_mask & __GFP_NOMEMALLOC))
4198 		if (tsk_is_oom_victim(current) ||
4199 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
4200 			filter &= ~SHOW_MEM_FILTER_NODES;
4201 	if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4202 		filter &= ~SHOW_MEM_FILTER_NODES;
4203 
4204 	show_mem(filter, nodemask);
4205 }
4206 
4207 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4208 {
4209 	struct va_format vaf;
4210 	va_list args;
4211 	static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4212 
4213 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
4214 		return;
4215 
4216 	va_start(args, fmt);
4217 	vaf.fmt = fmt;
4218 	vaf.va = &args;
4219 	pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4220 			current->comm, &vaf, gfp_mask, &gfp_mask,
4221 			nodemask_pr_args(nodemask));
4222 	va_end(args);
4223 
4224 	cpuset_print_current_mems_allowed();
4225 	pr_cont("\n");
4226 	dump_stack();
4227 	warn_alloc_show_mem(gfp_mask, nodemask);
4228 }
4229 
4230 static inline struct page *
4231 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4232 			      unsigned int alloc_flags,
4233 			      const struct alloc_context *ac)
4234 {
4235 	struct page *page;
4236 
4237 	page = get_page_from_freelist(gfp_mask, order,
4238 			alloc_flags|ALLOC_CPUSET, ac);
4239 	/*
4240 	 * fallback to ignore cpuset restriction if our nodes
4241 	 * are depleted
4242 	 */
4243 	if (!page)
4244 		page = get_page_from_freelist(gfp_mask, order,
4245 				alloc_flags, ac);
4246 
4247 	return page;
4248 }
4249 
4250 static inline struct page *
4251 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4252 	const struct alloc_context *ac, unsigned long *did_some_progress)
4253 {
4254 	struct oom_control oc = {
4255 		.zonelist = ac->zonelist,
4256 		.nodemask = ac->nodemask,
4257 		.memcg = NULL,
4258 		.gfp_mask = gfp_mask,
4259 		.order = order,
4260 	};
4261 	struct page *page;
4262 
4263 	*did_some_progress = 0;
4264 
4265 	/*
4266 	 * Acquire the oom lock.  If that fails, somebody else is
4267 	 * making progress for us.
4268 	 */
4269 	if (!mutex_trylock(&oom_lock)) {
4270 		*did_some_progress = 1;
4271 		schedule_timeout_uninterruptible(1);
4272 		return NULL;
4273 	}
4274 
4275 	/*
4276 	 * Go through the zonelist yet one more time, keep very high watermark
4277 	 * here, this is only to catch a parallel oom killing, we must fail if
4278 	 * we're still under heavy pressure. But make sure that this reclaim
4279 	 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4280 	 * allocation which will never fail due to oom_lock already held.
4281 	 */
4282 	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4283 				      ~__GFP_DIRECT_RECLAIM, order,
4284 				      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4285 	if (page)
4286 		goto out;
4287 
4288 	/* Coredumps can quickly deplete all memory reserves */
4289 	if (current->flags & PF_DUMPCORE)
4290 		goto out;
4291 	/* The OOM killer will not help higher order allocs */
4292 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4293 		goto out;
4294 	/*
4295 	 * We have already exhausted all our reclaim opportunities without any
4296 	 * success so it is time to admit defeat. We will skip the OOM killer
4297 	 * because it is very likely that the caller has a more reasonable
4298 	 * fallback than shooting a random task.
4299 	 *
4300 	 * The OOM killer may not free memory on a specific node.
4301 	 */
4302 	if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4303 		goto out;
4304 	/* The OOM killer does not needlessly kill tasks for lowmem */
4305 	if (ac->highest_zoneidx < ZONE_NORMAL)
4306 		goto out;
4307 	if (pm_suspended_storage())
4308 		goto out;
4309 	/*
4310 	 * XXX: GFP_NOFS allocations should rather fail than rely on
4311 	 * other request to make a forward progress.
4312 	 * We are in an unfortunate situation where out_of_memory cannot
4313 	 * do much for this context but let's try it to at least get
4314 	 * access to memory reserved if the current task is killed (see
4315 	 * out_of_memory). Once filesystems are ready to handle allocation
4316 	 * failures more gracefully we should just bail out here.
4317 	 */
4318 
4319 	/* Exhausted what can be done so it's blame time */
4320 	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4321 		*did_some_progress = 1;
4322 
4323 		/*
4324 		 * Help non-failing allocations by giving them access to memory
4325 		 * reserves
4326 		 */
4327 		if (gfp_mask & __GFP_NOFAIL)
4328 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4329 					ALLOC_NO_WATERMARKS, ac);
4330 	}
4331 out:
4332 	mutex_unlock(&oom_lock);
4333 	return page;
4334 }
4335 
4336 /*
4337  * Maximum number of compaction retries with a progress before OOM
4338  * killer is consider as the only way to move forward.
4339  */
4340 #define MAX_COMPACT_RETRIES 16
4341 
4342 #ifdef CONFIG_COMPACTION
4343 /* Try memory compaction for high-order allocations before reclaim */
4344 static struct page *
4345 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4346 		unsigned int alloc_flags, const struct alloc_context *ac,
4347 		enum compact_priority prio, enum compact_result *compact_result)
4348 {
4349 	struct page *page = NULL;
4350 	unsigned long pflags;
4351 	unsigned int noreclaim_flag;
4352 
4353 	if (!order)
4354 		return NULL;
4355 
4356 	psi_memstall_enter(&pflags);
4357 	noreclaim_flag = memalloc_noreclaim_save();
4358 
4359 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4360 								prio, &page);
4361 
4362 	memalloc_noreclaim_restore(noreclaim_flag);
4363 	psi_memstall_leave(&pflags);
4364 
4365 	if (*compact_result == COMPACT_SKIPPED)
4366 		return NULL;
4367 	/*
4368 	 * At least in one zone compaction wasn't deferred or skipped, so let's
4369 	 * count a compaction stall
4370 	 */
4371 	count_vm_event(COMPACTSTALL);
4372 
4373 	/* Prep a captured page if available */
4374 	if (page)
4375 		prep_new_page(page, order, gfp_mask, alloc_flags);
4376 
4377 	/* Try get a page from the freelist if available */
4378 	if (!page)
4379 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4380 
4381 	if (page) {
4382 		struct zone *zone = page_zone(page);
4383 
4384 		zone->compact_blockskip_flush = false;
4385 		compaction_defer_reset(zone, order, true);
4386 		count_vm_event(COMPACTSUCCESS);
4387 		return page;
4388 	}
4389 
4390 	/*
4391 	 * It's bad if compaction run occurs and fails. The most likely reason
4392 	 * is that pages exist, but not enough to satisfy watermarks.
4393 	 */
4394 	count_vm_event(COMPACTFAIL);
4395 
4396 	cond_resched();
4397 
4398 	return NULL;
4399 }
4400 
4401 static inline bool
4402 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4403 		     enum compact_result compact_result,
4404 		     enum compact_priority *compact_priority,
4405 		     int *compaction_retries)
4406 {
4407 	int max_retries = MAX_COMPACT_RETRIES;
4408 	int min_priority;
4409 	bool ret = false;
4410 	int retries = *compaction_retries;
4411 	enum compact_priority priority = *compact_priority;
4412 
4413 	if (!order)
4414 		return false;
4415 
4416 	if (fatal_signal_pending(current))
4417 		return false;
4418 
4419 	if (compaction_made_progress(compact_result))
4420 		(*compaction_retries)++;
4421 
4422 	/*
4423 	 * compaction considers all the zone as desperately out of memory
4424 	 * so it doesn't really make much sense to retry except when the
4425 	 * failure could be caused by insufficient priority
4426 	 */
4427 	if (compaction_failed(compact_result))
4428 		goto check_priority;
4429 
4430 	/*
4431 	 * compaction was skipped because there are not enough order-0 pages
4432 	 * to work with, so we retry only if it looks like reclaim can help.
4433 	 */
4434 	if (compaction_needs_reclaim(compact_result)) {
4435 		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4436 		goto out;
4437 	}
4438 
4439 	/*
4440 	 * make sure the compaction wasn't deferred or didn't bail out early
4441 	 * due to locks contention before we declare that we should give up.
4442 	 * But the next retry should use a higher priority if allowed, so
4443 	 * we don't just keep bailing out endlessly.
4444 	 */
4445 	if (compaction_withdrawn(compact_result)) {
4446 		goto check_priority;
4447 	}
4448 
4449 	/*
4450 	 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4451 	 * costly ones because they are de facto nofail and invoke OOM
4452 	 * killer to move on while costly can fail and users are ready
4453 	 * to cope with that. 1/4 retries is rather arbitrary but we
4454 	 * would need much more detailed feedback from compaction to
4455 	 * make a better decision.
4456 	 */
4457 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4458 		max_retries /= 4;
4459 	if (*compaction_retries <= max_retries) {
4460 		ret = true;
4461 		goto out;
4462 	}
4463 
4464 	/*
4465 	 * Make sure there are attempts at the highest priority if we exhausted
4466 	 * all retries or failed at the lower priorities.
4467 	 */
4468 check_priority:
4469 	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4470 			MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4471 
4472 	if (*compact_priority > min_priority) {
4473 		(*compact_priority)--;
4474 		*compaction_retries = 0;
4475 		ret = true;
4476 	}
4477 out:
4478 	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4479 	return ret;
4480 }
4481 #else
4482 static inline struct page *
4483 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4484 		unsigned int alloc_flags, const struct alloc_context *ac,
4485 		enum compact_priority prio, enum compact_result *compact_result)
4486 {
4487 	*compact_result = COMPACT_SKIPPED;
4488 	return NULL;
4489 }
4490 
4491 static inline bool
4492 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4493 		     enum compact_result compact_result,
4494 		     enum compact_priority *compact_priority,
4495 		     int *compaction_retries)
4496 {
4497 	struct zone *zone;
4498 	struct zoneref *z;
4499 
4500 	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4501 		return false;
4502 
4503 	/*
4504 	 * There are setups with compaction disabled which would prefer to loop
4505 	 * inside the allocator rather than hit the oom killer prematurely.
4506 	 * Let's give them a good hope and keep retrying while the order-0
4507 	 * watermarks are OK.
4508 	 */
4509 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4510 				ac->highest_zoneidx, ac->nodemask) {
4511 		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4512 					ac->highest_zoneidx, alloc_flags))
4513 			return true;
4514 	}
4515 	return false;
4516 }
4517 #endif /* CONFIG_COMPACTION */
4518 
4519 #ifdef CONFIG_LOCKDEP
4520 static struct lockdep_map __fs_reclaim_map =
4521 	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4522 
4523 static bool __need_reclaim(gfp_t gfp_mask)
4524 {
4525 	/* no reclaim without waiting on it */
4526 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4527 		return false;
4528 
4529 	/* this guy won't enter reclaim */
4530 	if (current->flags & PF_MEMALLOC)
4531 		return false;
4532 
4533 	if (gfp_mask & __GFP_NOLOCKDEP)
4534 		return false;
4535 
4536 	return true;
4537 }
4538 
4539 void __fs_reclaim_acquire(unsigned long ip)
4540 {
4541 	lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4542 }
4543 
4544 void __fs_reclaim_release(unsigned long ip)
4545 {
4546 	lock_release(&__fs_reclaim_map, ip);
4547 }
4548 
4549 void fs_reclaim_acquire(gfp_t gfp_mask)
4550 {
4551 	gfp_mask = current_gfp_context(gfp_mask);
4552 
4553 	if (__need_reclaim(gfp_mask)) {
4554 		if (gfp_mask & __GFP_FS)
4555 			__fs_reclaim_acquire(_RET_IP_);
4556 
4557 #ifdef CONFIG_MMU_NOTIFIER
4558 		lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4559 		lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4560 #endif
4561 
4562 	}
4563 }
4564 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4565 
4566 void fs_reclaim_release(gfp_t gfp_mask)
4567 {
4568 	gfp_mask = current_gfp_context(gfp_mask);
4569 
4570 	if (__need_reclaim(gfp_mask)) {
4571 		if (gfp_mask & __GFP_FS)
4572 			__fs_reclaim_release(_RET_IP_);
4573 	}
4574 }
4575 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4576 #endif
4577 
4578 /* Perform direct synchronous page reclaim */
4579 static unsigned long
4580 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4581 					const struct alloc_context *ac)
4582 {
4583 	unsigned int noreclaim_flag;
4584 	unsigned long pflags, progress;
4585 
4586 	cond_resched();
4587 
4588 	/* We now go into synchronous reclaim */
4589 	cpuset_memory_pressure_bump();
4590 	psi_memstall_enter(&pflags);
4591 	fs_reclaim_acquire(gfp_mask);
4592 	noreclaim_flag = memalloc_noreclaim_save();
4593 
4594 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4595 								ac->nodemask);
4596 
4597 	memalloc_noreclaim_restore(noreclaim_flag);
4598 	fs_reclaim_release(gfp_mask);
4599 	psi_memstall_leave(&pflags);
4600 
4601 	cond_resched();
4602 
4603 	return progress;
4604 }
4605 
4606 /* The really slow allocator path where we enter direct reclaim */
4607 static inline struct page *
4608 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4609 		unsigned int alloc_flags, const struct alloc_context *ac,
4610 		unsigned long *did_some_progress)
4611 {
4612 	struct page *page = NULL;
4613 	bool drained = false;
4614 
4615 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4616 	if (unlikely(!(*did_some_progress)))
4617 		return NULL;
4618 
4619 retry:
4620 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4621 
4622 	/*
4623 	 * If an allocation failed after direct reclaim, it could be because
4624 	 * pages are pinned on the per-cpu lists or in high alloc reserves.
4625 	 * Shrink them and try again
4626 	 */
4627 	if (!page && !drained) {
4628 		unreserve_highatomic_pageblock(ac, false);
4629 		drain_all_pages(NULL);
4630 		drained = true;
4631 		goto retry;
4632 	}
4633 
4634 	return page;
4635 }
4636 
4637 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4638 			     const struct alloc_context *ac)
4639 {
4640 	struct zoneref *z;
4641 	struct zone *zone;
4642 	pg_data_t *last_pgdat = NULL;
4643 	enum zone_type highest_zoneidx = ac->highest_zoneidx;
4644 
4645 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4646 					ac->nodemask) {
4647 		if (last_pgdat != zone->zone_pgdat)
4648 			wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4649 		last_pgdat = zone->zone_pgdat;
4650 	}
4651 }
4652 
4653 static inline unsigned int
4654 gfp_to_alloc_flags(gfp_t gfp_mask)
4655 {
4656 	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4657 
4658 	/*
4659 	 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4660 	 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4661 	 * to save two branches.
4662 	 */
4663 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4664 	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4665 
4666 	/*
4667 	 * The caller may dip into page reserves a bit more if the caller
4668 	 * cannot run direct reclaim, or if the caller has realtime scheduling
4669 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4670 	 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4671 	 */
4672 	alloc_flags |= (__force int)
4673 		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4674 
4675 	if (gfp_mask & __GFP_ATOMIC) {
4676 		/*
4677 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4678 		 * if it can't schedule.
4679 		 */
4680 		if (!(gfp_mask & __GFP_NOMEMALLOC))
4681 			alloc_flags |= ALLOC_HARDER;
4682 		/*
4683 		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4684 		 * comment for __cpuset_node_allowed().
4685 		 */
4686 		alloc_flags &= ~ALLOC_CPUSET;
4687 	} else if (unlikely(rt_task(current)) && in_task())
4688 		alloc_flags |= ALLOC_HARDER;
4689 
4690 	alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4691 
4692 	return alloc_flags;
4693 }
4694 
4695 static bool oom_reserves_allowed(struct task_struct *tsk)
4696 {
4697 	if (!tsk_is_oom_victim(tsk))
4698 		return false;
4699 
4700 	/*
4701 	 * !MMU doesn't have oom reaper so give access to memory reserves
4702 	 * only to the thread with TIF_MEMDIE set
4703 	 */
4704 	if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4705 		return false;
4706 
4707 	return true;
4708 }
4709 
4710 /*
4711  * Distinguish requests which really need access to full memory
4712  * reserves from oom victims which can live with a portion of it
4713  */
4714 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4715 {
4716 	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4717 		return 0;
4718 	if (gfp_mask & __GFP_MEMALLOC)
4719 		return ALLOC_NO_WATERMARKS;
4720 	if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4721 		return ALLOC_NO_WATERMARKS;
4722 	if (!in_interrupt()) {
4723 		if (current->flags & PF_MEMALLOC)
4724 			return ALLOC_NO_WATERMARKS;
4725 		else if (oom_reserves_allowed(current))
4726 			return ALLOC_OOM;
4727 	}
4728 
4729 	return 0;
4730 }
4731 
4732 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4733 {
4734 	return !!__gfp_pfmemalloc_flags(gfp_mask);
4735 }
4736 
4737 /*
4738  * Checks whether it makes sense to retry the reclaim to make a forward progress
4739  * for the given allocation request.
4740  *
4741  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4742  * without success, or when we couldn't even meet the watermark if we
4743  * reclaimed all remaining pages on the LRU lists.
4744  *
4745  * Returns true if a retry is viable or false to enter the oom path.
4746  */
4747 static inline bool
4748 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4749 		     struct alloc_context *ac, int alloc_flags,
4750 		     bool did_some_progress, int *no_progress_loops)
4751 {
4752 	struct zone *zone;
4753 	struct zoneref *z;
4754 	bool ret = false;
4755 
4756 	/*
4757 	 * Costly allocations might have made a progress but this doesn't mean
4758 	 * their order will become available due to high fragmentation so
4759 	 * always increment the no progress counter for them
4760 	 */
4761 	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4762 		*no_progress_loops = 0;
4763 	else
4764 		(*no_progress_loops)++;
4765 
4766 	/*
4767 	 * Make sure we converge to OOM if we cannot make any progress
4768 	 * several times in the row.
4769 	 */
4770 	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4771 		/* Before OOM, exhaust highatomic_reserve */
4772 		return unreserve_highatomic_pageblock(ac, true);
4773 	}
4774 
4775 	/*
4776 	 * Keep reclaiming pages while there is a chance this will lead
4777 	 * somewhere.  If none of the target zones can satisfy our allocation
4778 	 * request even if all reclaimable pages are considered then we are
4779 	 * screwed and have to go OOM.
4780 	 */
4781 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4782 				ac->highest_zoneidx, ac->nodemask) {
4783 		unsigned long available;
4784 		unsigned long reclaimable;
4785 		unsigned long min_wmark = min_wmark_pages(zone);
4786 		bool wmark;
4787 
4788 		available = reclaimable = zone_reclaimable_pages(zone);
4789 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4790 
4791 		/*
4792 		 * Would the allocation succeed if we reclaimed all
4793 		 * reclaimable pages?
4794 		 */
4795 		wmark = __zone_watermark_ok(zone, order, min_wmark,
4796 				ac->highest_zoneidx, alloc_flags, available);
4797 		trace_reclaim_retry_zone(z, order, reclaimable,
4798 				available, min_wmark, *no_progress_loops, wmark);
4799 		if (wmark) {
4800 			/*
4801 			 * If we didn't make any progress and have a lot of
4802 			 * dirty + writeback pages then we should wait for
4803 			 * an IO to complete to slow down the reclaim and
4804 			 * prevent from pre mature OOM
4805 			 */
4806 			if (!did_some_progress) {
4807 				unsigned long write_pending;
4808 
4809 				write_pending = zone_page_state_snapshot(zone,
4810 							NR_ZONE_WRITE_PENDING);
4811 
4812 				if (2 * write_pending > reclaimable) {
4813 					congestion_wait(BLK_RW_ASYNC, HZ/10);
4814 					return true;
4815 				}
4816 			}
4817 
4818 			ret = true;
4819 			goto out;
4820 		}
4821 	}
4822 
4823 out:
4824 	/*
4825 	 * Memory allocation/reclaim might be called from a WQ context and the
4826 	 * current implementation of the WQ concurrency control doesn't
4827 	 * recognize that a particular WQ is congested if the worker thread is
4828 	 * looping without ever sleeping. Therefore we have to do a short sleep
4829 	 * here rather than calling cond_resched().
4830 	 */
4831 	if (current->flags & PF_WQ_WORKER)
4832 		schedule_timeout_uninterruptible(1);
4833 	else
4834 		cond_resched();
4835 	return ret;
4836 }
4837 
4838 static inline bool
4839 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4840 {
4841 	/*
4842 	 * It's possible that cpuset's mems_allowed and the nodemask from
4843 	 * mempolicy don't intersect. This should be normally dealt with by
4844 	 * policy_nodemask(), but it's possible to race with cpuset update in
4845 	 * such a way the check therein was true, and then it became false
4846 	 * before we got our cpuset_mems_cookie here.
4847 	 * This assumes that for all allocations, ac->nodemask can come only
4848 	 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4849 	 * when it does not intersect with the cpuset restrictions) or the
4850 	 * caller can deal with a violated nodemask.
4851 	 */
4852 	if (cpusets_enabled() && ac->nodemask &&
4853 			!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4854 		ac->nodemask = NULL;
4855 		return true;
4856 	}
4857 
4858 	/*
4859 	 * When updating a task's mems_allowed or mempolicy nodemask, it is
4860 	 * possible to race with parallel threads in such a way that our
4861 	 * allocation can fail while the mask is being updated. If we are about
4862 	 * to fail, check if the cpuset changed during allocation and if so,
4863 	 * retry.
4864 	 */
4865 	if (read_mems_allowed_retry(cpuset_mems_cookie))
4866 		return true;
4867 
4868 	return false;
4869 }
4870 
4871 static inline struct page *
4872 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4873 						struct alloc_context *ac)
4874 {
4875 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4876 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4877 	struct page *page = NULL;
4878 	unsigned int alloc_flags;
4879 	unsigned long did_some_progress;
4880 	enum compact_priority compact_priority;
4881 	enum compact_result compact_result;
4882 	int compaction_retries;
4883 	int no_progress_loops;
4884 	unsigned int cpuset_mems_cookie;
4885 	int reserve_flags;
4886 
4887 	/*
4888 	 * We also sanity check to catch abuse of atomic reserves being used by
4889 	 * callers that are not in atomic context.
4890 	 */
4891 	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4892 				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4893 		gfp_mask &= ~__GFP_ATOMIC;
4894 
4895 retry_cpuset:
4896 	compaction_retries = 0;
4897 	no_progress_loops = 0;
4898 	compact_priority = DEF_COMPACT_PRIORITY;
4899 	cpuset_mems_cookie = read_mems_allowed_begin();
4900 
4901 	/*
4902 	 * The fast path uses conservative alloc_flags to succeed only until
4903 	 * kswapd needs to be woken up, and to avoid the cost of setting up
4904 	 * alloc_flags precisely. So we do that now.
4905 	 */
4906 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
4907 
4908 	/*
4909 	 * We need to recalculate the starting point for the zonelist iterator
4910 	 * because we might have used different nodemask in the fast path, or
4911 	 * there was a cpuset modification and we are retrying - otherwise we
4912 	 * could end up iterating over non-eligible zones endlessly.
4913 	 */
4914 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4915 					ac->highest_zoneidx, ac->nodemask);
4916 	if (!ac->preferred_zoneref->zone)
4917 		goto nopage;
4918 
4919 	if (alloc_flags & ALLOC_KSWAPD)
4920 		wake_all_kswapds(order, gfp_mask, ac);
4921 
4922 	/*
4923 	 * The adjusted alloc_flags might result in immediate success, so try
4924 	 * that first
4925 	 */
4926 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4927 	if (page)
4928 		goto got_pg;
4929 
4930 	/*
4931 	 * For costly allocations, try direct compaction first, as it's likely
4932 	 * that we have enough base pages and don't need to reclaim. For non-
4933 	 * movable high-order allocations, do that as well, as compaction will
4934 	 * try prevent permanent fragmentation by migrating from blocks of the
4935 	 * same migratetype.
4936 	 * Don't try this for allocations that are allowed to ignore
4937 	 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4938 	 */
4939 	if (can_direct_reclaim &&
4940 			(costly_order ||
4941 			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4942 			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
4943 		page = __alloc_pages_direct_compact(gfp_mask, order,
4944 						alloc_flags, ac,
4945 						INIT_COMPACT_PRIORITY,
4946 						&compact_result);
4947 		if (page)
4948 			goto got_pg;
4949 
4950 		/*
4951 		 * Checks for costly allocations with __GFP_NORETRY, which
4952 		 * includes some THP page fault allocations
4953 		 */
4954 		if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4955 			/*
4956 			 * If allocating entire pageblock(s) and compaction
4957 			 * failed because all zones are below low watermarks
4958 			 * or is prohibited because it recently failed at this
4959 			 * order, fail immediately unless the allocator has
4960 			 * requested compaction and reclaim retry.
4961 			 *
4962 			 * Reclaim is
4963 			 *  - potentially very expensive because zones are far
4964 			 *    below their low watermarks or this is part of very
4965 			 *    bursty high order allocations,
4966 			 *  - not guaranteed to help because isolate_freepages()
4967 			 *    may not iterate over freed pages as part of its
4968 			 *    linear scan, and
4969 			 *  - unlikely to make entire pageblocks free on its
4970 			 *    own.
4971 			 */
4972 			if (compact_result == COMPACT_SKIPPED ||
4973 			    compact_result == COMPACT_DEFERRED)
4974 				goto nopage;
4975 
4976 			/*
4977 			 * Looks like reclaim/compaction is worth trying, but
4978 			 * sync compaction could be very expensive, so keep
4979 			 * using async compaction.
4980 			 */
4981 			compact_priority = INIT_COMPACT_PRIORITY;
4982 		}
4983 	}
4984 
4985 retry:
4986 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4987 	if (alloc_flags & ALLOC_KSWAPD)
4988 		wake_all_kswapds(order, gfp_mask, ac);
4989 
4990 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4991 	if (reserve_flags)
4992 		alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags);
4993 
4994 	/*
4995 	 * Reset the nodemask and zonelist iterators if memory policies can be
4996 	 * ignored. These allocations are high priority and system rather than
4997 	 * user oriented.
4998 	 */
4999 	if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
5000 		ac->nodemask = NULL;
5001 		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5002 					ac->highest_zoneidx, ac->nodemask);
5003 	}
5004 
5005 	/* Attempt with potentially adjusted zonelist and alloc_flags */
5006 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5007 	if (page)
5008 		goto got_pg;
5009 
5010 	/* Caller is not willing to reclaim, we can't balance anything */
5011 	if (!can_direct_reclaim)
5012 		goto nopage;
5013 
5014 	/* Avoid recursion of direct reclaim */
5015 	if (current->flags & PF_MEMALLOC)
5016 		goto nopage;
5017 
5018 	/* Try direct reclaim and then allocating */
5019 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
5020 							&did_some_progress);
5021 	if (page)
5022 		goto got_pg;
5023 
5024 	/* Try direct compaction and then allocating */
5025 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
5026 					compact_priority, &compact_result);
5027 	if (page)
5028 		goto got_pg;
5029 
5030 	/* Do not loop if specifically requested */
5031 	if (gfp_mask & __GFP_NORETRY)
5032 		goto nopage;
5033 
5034 	/*
5035 	 * Do not retry costly high order allocations unless they are
5036 	 * __GFP_RETRY_MAYFAIL
5037 	 */
5038 	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
5039 		goto nopage;
5040 
5041 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
5042 				 did_some_progress > 0, &no_progress_loops))
5043 		goto retry;
5044 
5045 	/*
5046 	 * It doesn't make any sense to retry for the compaction if the order-0
5047 	 * reclaim is not able to make any progress because the current
5048 	 * implementation of the compaction depends on the sufficient amount
5049 	 * of free memory (see __compaction_suitable)
5050 	 */
5051 	if (did_some_progress > 0 &&
5052 			should_compact_retry(ac, order, alloc_flags,
5053 				compact_result, &compact_priority,
5054 				&compaction_retries))
5055 		goto retry;
5056 
5057 
5058 	/* Deal with possible cpuset update races before we start OOM killing */
5059 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
5060 		goto retry_cpuset;
5061 
5062 	/* Reclaim has failed us, start killing things */
5063 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
5064 	if (page)
5065 		goto got_pg;
5066 
5067 	/* Avoid allocations with no watermarks from looping endlessly */
5068 	if (tsk_is_oom_victim(current) &&
5069 	    (alloc_flags & ALLOC_OOM ||
5070 	     (gfp_mask & __GFP_NOMEMALLOC)))
5071 		goto nopage;
5072 
5073 	/* Retry as long as the OOM killer is making progress */
5074 	if (did_some_progress) {
5075 		no_progress_loops = 0;
5076 		goto retry;
5077 	}
5078 
5079 nopage:
5080 	/* Deal with possible cpuset update races before we fail */
5081 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
5082 		goto retry_cpuset;
5083 
5084 	/*
5085 	 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
5086 	 * we always retry
5087 	 */
5088 	if (gfp_mask & __GFP_NOFAIL) {
5089 		/*
5090 		 * All existing users of the __GFP_NOFAIL are blockable, so warn
5091 		 * of any new users that actually require GFP_NOWAIT
5092 		 */
5093 		if (WARN_ON_ONCE(!can_direct_reclaim))
5094 			goto fail;
5095 
5096 		/*
5097 		 * PF_MEMALLOC request from this context is rather bizarre
5098 		 * because we cannot reclaim anything and only can loop waiting
5099 		 * for somebody to do a work for us
5100 		 */
5101 		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
5102 
5103 		/*
5104 		 * non failing costly orders are a hard requirement which we
5105 		 * are not prepared for much so let's warn about these users
5106 		 * so that we can identify them and convert them to something
5107 		 * else.
5108 		 */
5109 		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
5110 
5111 		/*
5112 		 * Help non-failing allocations by giving them access to memory
5113 		 * reserves but do not use ALLOC_NO_WATERMARKS because this
5114 		 * could deplete whole memory reserves which would just make
5115 		 * the situation worse
5116 		 */
5117 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
5118 		if (page)
5119 			goto got_pg;
5120 
5121 		cond_resched();
5122 		goto retry;
5123 	}
5124 fail:
5125 	warn_alloc(gfp_mask, ac->nodemask,
5126 			"page allocation failure: order:%u", order);
5127 got_pg:
5128 	return page;
5129 }
5130 
5131 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
5132 		int preferred_nid, nodemask_t *nodemask,
5133 		struct alloc_context *ac, gfp_t *alloc_gfp,
5134 		unsigned int *alloc_flags)
5135 {
5136 	ac->highest_zoneidx = gfp_zone(gfp_mask);
5137 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
5138 	ac->nodemask = nodemask;
5139 	ac->migratetype = gfp_migratetype(gfp_mask);
5140 
5141 	if (cpusets_enabled()) {
5142 		*alloc_gfp |= __GFP_HARDWALL;
5143 		/*
5144 		 * When we are in the interrupt context, it is irrelevant
5145 		 * to the current task context. It means that any node ok.
5146 		 */
5147 		if (in_task() && !ac->nodemask)
5148 			ac->nodemask = &cpuset_current_mems_allowed;
5149 		else
5150 			*alloc_flags |= ALLOC_CPUSET;
5151 	}
5152 
5153 	fs_reclaim_acquire(gfp_mask);
5154 	fs_reclaim_release(gfp_mask);
5155 
5156 	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
5157 
5158 	if (should_fail_alloc_page(gfp_mask, order))
5159 		return false;
5160 
5161 	*alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5162 
5163 	/* Dirty zone balancing only done in the fast path */
5164 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5165 
5166 	/*
5167 	 * The preferred zone is used for statistics but crucially it is
5168 	 * also used as the starting point for the zonelist iterator. It
5169 	 * may get reset for allocations that ignore memory policies.
5170 	 */
5171 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5172 					ac->highest_zoneidx, ac->nodemask);
5173 
5174 	return true;
5175 }
5176 
5177 /*
5178  * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5179  * @gfp: GFP flags for the allocation
5180  * @preferred_nid: The preferred NUMA node ID to allocate from
5181  * @nodemask: Set of nodes to allocate from, may be NULL
5182  * @nr_pages: The number of pages desired on the list or array
5183  * @page_list: Optional list to store the allocated pages
5184  * @page_array: Optional array to store the pages
5185  *
5186  * This is a batched version of the page allocator that attempts to
5187  * allocate nr_pages quickly. Pages are added to page_list if page_list
5188  * is not NULL, otherwise it is assumed that the page_array is valid.
5189  *
5190  * For lists, nr_pages is the number of pages that should be allocated.
5191  *
5192  * For arrays, only NULL elements are populated with pages and nr_pages
5193  * is the maximum number of pages that will be stored in the array.
5194  *
5195  * Returns the number of pages on the list or array.
5196  */
5197 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5198 			nodemask_t *nodemask, int nr_pages,
5199 			struct list_head *page_list,
5200 			struct page **page_array)
5201 {
5202 	struct page *page;
5203 	unsigned long flags;
5204 	struct zone *zone;
5205 	struct zoneref *z;
5206 	struct per_cpu_pages *pcp;
5207 	struct list_head *pcp_list;
5208 	struct alloc_context ac;
5209 	gfp_t alloc_gfp;
5210 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
5211 	int nr_populated = 0, nr_account = 0;
5212 
5213 	/*
5214 	 * Skip populated array elements to determine if any pages need
5215 	 * to be allocated before disabling IRQs.
5216 	 */
5217 	while (page_array && nr_populated < nr_pages && page_array[nr_populated])
5218 		nr_populated++;
5219 
5220 	/* No pages requested? */
5221 	if (unlikely(nr_pages <= 0))
5222 		goto out;
5223 
5224 	/* Already populated array? */
5225 	if (unlikely(page_array && nr_pages - nr_populated == 0))
5226 		goto out;
5227 
5228 	/* Bulk allocator does not support memcg accounting. */
5229 	if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT))
5230 		goto failed;
5231 
5232 	/* Use the single page allocator for one page. */
5233 	if (nr_pages - nr_populated == 1)
5234 		goto failed;
5235 
5236 #ifdef CONFIG_PAGE_OWNER
5237 	/*
5238 	 * PAGE_OWNER may recurse into the allocator to allocate space to
5239 	 * save the stack with pagesets.lock held. Releasing/reacquiring
5240 	 * removes much of the performance benefit of bulk allocation so
5241 	 * force the caller to allocate one page at a time as it'll have
5242 	 * similar performance to added complexity to the bulk allocator.
5243 	 */
5244 	if (static_branch_unlikely(&page_owner_inited))
5245 		goto failed;
5246 #endif
5247 
5248 	/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5249 	gfp &= gfp_allowed_mask;
5250 	alloc_gfp = gfp;
5251 	if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5252 		goto out;
5253 	gfp = alloc_gfp;
5254 
5255 	/* Find an allowed local zone that meets the low watermark. */
5256 	for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5257 		unsigned long mark;
5258 
5259 		if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5260 		    !__cpuset_zone_allowed(zone, gfp)) {
5261 			continue;
5262 		}
5263 
5264 		if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5265 		    zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5266 			goto failed;
5267 		}
5268 
5269 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5270 		if (zone_watermark_fast(zone, 0,  mark,
5271 				zonelist_zone_idx(ac.preferred_zoneref),
5272 				alloc_flags, gfp)) {
5273 			break;
5274 		}
5275 	}
5276 
5277 	/*
5278 	 * If there are no allowed local zones that meets the watermarks then
5279 	 * try to allocate a single page and reclaim if necessary.
5280 	 */
5281 	if (unlikely(!zone))
5282 		goto failed;
5283 
5284 	/* Attempt the batch allocation */
5285 	local_lock_irqsave(&pagesets.lock, flags);
5286 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
5287 	pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5288 
5289 	while (nr_populated < nr_pages) {
5290 
5291 		/* Skip existing pages */
5292 		if (page_array && page_array[nr_populated]) {
5293 			nr_populated++;
5294 			continue;
5295 		}
5296 
5297 		page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5298 								pcp, pcp_list);
5299 		if (unlikely(!page)) {
5300 			/* Try and get at least one page */
5301 			if (!nr_populated)
5302 				goto failed_irq;
5303 			break;
5304 		}
5305 		nr_account++;
5306 
5307 		prep_new_page(page, 0, gfp, 0);
5308 		if (page_list)
5309 			list_add(&page->lru, page_list);
5310 		else
5311 			page_array[nr_populated] = page;
5312 		nr_populated++;
5313 	}
5314 
5315 	local_unlock_irqrestore(&pagesets.lock, flags);
5316 
5317 	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5318 	zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
5319 
5320 out:
5321 	return nr_populated;
5322 
5323 failed_irq:
5324 	local_unlock_irqrestore(&pagesets.lock, flags);
5325 
5326 failed:
5327 	page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5328 	if (page) {
5329 		if (page_list)
5330 			list_add(&page->lru, page_list);
5331 		else
5332 			page_array[nr_populated] = page;
5333 		nr_populated++;
5334 	}
5335 
5336 	goto out;
5337 }
5338 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5339 
5340 /*
5341  * This is the 'heart' of the zoned buddy allocator.
5342  */
5343 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
5344 							nodemask_t *nodemask)
5345 {
5346 	struct page *page;
5347 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
5348 	gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5349 	struct alloc_context ac = { };
5350 
5351 	/*
5352 	 * There are several places where we assume that the order value is sane
5353 	 * so bail out early if the request is out of bound.
5354 	 */
5355 	if (unlikely(order >= MAX_ORDER)) {
5356 		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
5357 		return NULL;
5358 	}
5359 
5360 	gfp &= gfp_allowed_mask;
5361 	/*
5362 	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5363 	 * resp. GFP_NOIO which has to be inherited for all allocation requests
5364 	 * from a particular context which has been marked by
5365 	 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5366 	 * movable zones are not used during allocation.
5367 	 */
5368 	gfp = current_gfp_context(gfp);
5369 	alloc_gfp = gfp;
5370 	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5371 			&alloc_gfp, &alloc_flags))
5372 		return NULL;
5373 
5374 	/*
5375 	 * Forbid the first pass from falling back to types that fragment
5376 	 * memory until all local zones are considered.
5377 	 */
5378 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
5379 
5380 	/* First allocation attempt */
5381 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5382 	if (likely(page))
5383 		goto out;
5384 
5385 	alloc_gfp = gfp;
5386 	ac.spread_dirty_pages = false;
5387 
5388 	/*
5389 	 * Restore the original nodemask if it was potentially replaced with
5390 	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5391 	 */
5392 	ac.nodemask = nodemask;
5393 
5394 	page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5395 
5396 out:
5397 	if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
5398 	    unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5399 		__free_pages(page, order);
5400 		page = NULL;
5401 	}
5402 
5403 	trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5404 
5405 	return page;
5406 }
5407 EXPORT_SYMBOL(__alloc_pages);
5408 
5409 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
5410 		nodemask_t *nodemask)
5411 {
5412 	struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
5413 			preferred_nid, nodemask);
5414 
5415 	if (page && order > 1)
5416 		prep_transhuge_page(page);
5417 	return (struct folio *)page;
5418 }
5419 EXPORT_SYMBOL(__folio_alloc);
5420 
5421 /*
5422  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5423  * address cannot represent highmem pages. Use alloc_pages and then kmap if
5424  * you need to access high mem.
5425  */
5426 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5427 {
5428 	struct page *page;
5429 
5430 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5431 	if (!page)
5432 		return 0;
5433 	return (unsigned long) page_address(page);
5434 }
5435 EXPORT_SYMBOL(__get_free_pages);
5436 
5437 unsigned long get_zeroed_page(gfp_t gfp_mask)
5438 {
5439 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5440 }
5441 EXPORT_SYMBOL(get_zeroed_page);
5442 
5443 /**
5444  * __free_pages - Free pages allocated with alloc_pages().
5445  * @page: The page pointer returned from alloc_pages().
5446  * @order: The order of the allocation.
5447  *
5448  * This function can free multi-page allocations that are not compound
5449  * pages.  It does not check that the @order passed in matches that of
5450  * the allocation, so it is easy to leak memory.  Freeing more memory
5451  * than was allocated will probably emit a warning.
5452  *
5453  * If the last reference to this page is speculative, it will be released
5454  * by put_page() which only frees the first page of a non-compound
5455  * allocation.  To prevent the remaining pages from being leaked, we free
5456  * the subsequent pages here.  If you want to use the page's reference
5457  * count to decide when to free the allocation, you should allocate a
5458  * compound page, and use put_page() instead of __free_pages().
5459  *
5460  * Context: May be called in interrupt context or while holding a normal
5461  * spinlock, but not in NMI context or while holding a raw spinlock.
5462  */
5463 void __free_pages(struct page *page, unsigned int order)
5464 {
5465 	if (put_page_testzero(page))
5466 		free_the_page(page, order);
5467 	else if (!PageHead(page))
5468 		while (order-- > 0)
5469 			free_the_page(page + (1 << order), order);
5470 }
5471 EXPORT_SYMBOL(__free_pages);
5472 
5473 void free_pages(unsigned long addr, unsigned int order)
5474 {
5475 	if (addr != 0) {
5476 		VM_BUG_ON(!virt_addr_valid((void *)addr));
5477 		__free_pages(virt_to_page((void *)addr), order);
5478 	}
5479 }
5480 
5481 EXPORT_SYMBOL(free_pages);
5482 
5483 /*
5484  * Page Fragment:
5485  *  An arbitrary-length arbitrary-offset area of memory which resides
5486  *  within a 0 or higher order page.  Multiple fragments within that page
5487  *  are individually refcounted, in the page's reference counter.
5488  *
5489  * The page_frag functions below provide a simple allocation framework for
5490  * page fragments.  This is used by the network stack and network device
5491  * drivers to provide a backing region of memory for use as either an
5492  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5493  */
5494 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5495 					     gfp_t gfp_mask)
5496 {
5497 	struct page *page = NULL;
5498 	gfp_t gfp = gfp_mask;
5499 
5500 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5501 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5502 		    __GFP_NOMEMALLOC;
5503 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5504 				PAGE_FRAG_CACHE_MAX_ORDER);
5505 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5506 #endif
5507 	if (unlikely(!page))
5508 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5509 
5510 	nc->va = page ? page_address(page) : NULL;
5511 
5512 	return page;
5513 }
5514 
5515 void __page_frag_cache_drain(struct page *page, unsigned int count)
5516 {
5517 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5518 
5519 	if (page_ref_sub_and_test(page, count))
5520 		free_the_page(page, compound_order(page));
5521 }
5522 EXPORT_SYMBOL(__page_frag_cache_drain);
5523 
5524 void *page_frag_alloc_align(struct page_frag_cache *nc,
5525 		      unsigned int fragsz, gfp_t gfp_mask,
5526 		      unsigned int align_mask)
5527 {
5528 	unsigned int size = PAGE_SIZE;
5529 	struct page *page;
5530 	int offset;
5531 
5532 	if (unlikely(!nc->va)) {
5533 refill:
5534 		page = __page_frag_cache_refill(nc, gfp_mask);
5535 		if (!page)
5536 			return NULL;
5537 
5538 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5539 		/* if size can vary use size else just use PAGE_SIZE */
5540 		size = nc->size;
5541 #endif
5542 		/* Even if we own the page, we do not use atomic_set().
5543 		 * This would break get_page_unless_zero() users.
5544 		 */
5545 		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5546 
5547 		/* reset page count bias and offset to start of new frag */
5548 		nc->pfmemalloc = page_is_pfmemalloc(page);
5549 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5550 		nc->offset = size;
5551 	}
5552 
5553 	offset = nc->offset - fragsz;
5554 	if (unlikely(offset < 0)) {
5555 		page = virt_to_page(nc->va);
5556 
5557 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5558 			goto refill;
5559 
5560 		if (unlikely(nc->pfmemalloc)) {
5561 			free_the_page(page, compound_order(page));
5562 			goto refill;
5563 		}
5564 
5565 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5566 		/* if size can vary use size else just use PAGE_SIZE */
5567 		size = nc->size;
5568 #endif
5569 		/* OK, page count is 0, we can safely set it */
5570 		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5571 
5572 		/* reset page count bias and offset to start of new frag */
5573 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5574 		offset = size - fragsz;
5575 	}
5576 
5577 	nc->pagecnt_bias--;
5578 	offset &= align_mask;
5579 	nc->offset = offset;
5580 
5581 	return nc->va + offset;
5582 }
5583 EXPORT_SYMBOL(page_frag_alloc_align);
5584 
5585 /*
5586  * Frees a page fragment allocated out of either a compound or order 0 page.
5587  */
5588 void page_frag_free(void *addr)
5589 {
5590 	struct page *page = virt_to_head_page(addr);
5591 
5592 	if (unlikely(put_page_testzero(page)))
5593 		free_the_page(page, compound_order(page));
5594 }
5595 EXPORT_SYMBOL(page_frag_free);
5596 
5597 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5598 		size_t size)
5599 {
5600 	if (addr) {
5601 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
5602 		unsigned long used = addr + PAGE_ALIGN(size);
5603 
5604 		split_page(virt_to_page((void *)addr), order);
5605 		while (used < alloc_end) {
5606 			free_page(used);
5607 			used += PAGE_SIZE;
5608 		}
5609 	}
5610 	return (void *)addr;
5611 }
5612 
5613 /**
5614  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5615  * @size: the number of bytes to allocate
5616  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5617  *
5618  * This function is similar to alloc_pages(), except that it allocates the
5619  * minimum number of pages to satisfy the request.  alloc_pages() can only
5620  * allocate memory in power-of-two pages.
5621  *
5622  * This function is also limited by MAX_ORDER.
5623  *
5624  * Memory allocated by this function must be released by free_pages_exact().
5625  *
5626  * Return: pointer to the allocated area or %NULL in case of error.
5627  */
5628 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5629 {
5630 	unsigned int order = get_order(size);
5631 	unsigned long addr;
5632 
5633 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5634 		gfp_mask &= ~__GFP_COMP;
5635 
5636 	addr = __get_free_pages(gfp_mask, order);
5637 	return make_alloc_exact(addr, order, size);
5638 }
5639 EXPORT_SYMBOL(alloc_pages_exact);
5640 
5641 /**
5642  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5643  *			   pages on a node.
5644  * @nid: the preferred node ID where memory should be allocated
5645  * @size: the number of bytes to allocate
5646  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5647  *
5648  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5649  * back.
5650  *
5651  * Return: pointer to the allocated area or %NULL in case of error.
5652  */
5653 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5654 {
5655 	unsigned int order = get_order(size);
5656 	struct page *p;
5657 
5658 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5659 		gfp_mask &= ~__GFP_COMP;
5660 
5661 	p = alloc_pages_node(nid, gfp_mask, order);
5662 	if (!p)
5663 		return NULL;
5664 	return make_alloc_exact((unsigned long)page_address(p), order, size);
5665 }
5666 
5667 /**
5668  * free_pages_exact - release memory allocated via alloc_pages_exact()
5669  * @virt: the value returned by alloc_pages_exact.
5670  * @size: size of allocation, same value as passed to alloc_pages_exact().
5671  *
5672  * Release the memory allocated by a previous call to alloc_pages_exact.
5673  */
5674 void free_pages_exact(void *virt, size_t size)
5675 {
5676 	unsigned long addr = (unsigned long)virt;
5677 	unsigned long end = addr + PAGE_ALIGN(size);
5678 
5679 	while (addr < end) {
5680 		free_page(addr);
5681 		addr += PAGE_SIZE;
5682 	}
5683 }
5684 EXPORT_SYMBOL(free_pages_exact);
5685 
5686 /**
5687  * nr_free_zone_pages - count number of pages beyond high watermark
5688  * @offset: The zone index of the highest zone
5689  *
5690  * nr_free_zone_pages() counts the number of pages which are beyond the
5691  * high watermark within all zones at or below a given zone index.  For each
5692  * zone, the number of pages is calculated as:
5693  *
5694  *     nr_free_zone_pages = managed_pages - high_pages
5695  *
5696  * Return: number of pages beyond high watermark.
5697  */
5698 static unsigned long nr_free_zone_pages(int offset)
5699 {
5700 	struct zoneref *z;
5701 	struct zone *zone;
5702 
5703 	/* Just pick one node, since fallback list is circular */
5704 	unsigned long sum = 0;
5705 
5706 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5707 
5708 	for_each_zone_zonelist(zone, z, zonelist, offset) {
5709 		unsigned long size = zone_managed_pages(zone);
5710 		unsigned long high = high_wmark_pages(zone);
5711 		if (size > high)
5712 			sum += size - high;
5713 	}
5714 
5715 	return sum;
5716 }
5717 
5718 /**
5719  * nr_free_buffer_pages - count number of pages beyond high watermark
5720  *
5721  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5722  * watermark within ZONE_DMA and ZONE_NORMAL.
5723  *
5724  * Return: number of pages beyond high watermark within ZONE_DMA and
5725  * ZONE_NORMAL.
5726  */
5727 unsigned long nr_free_buffer_pages(void)
5728 {
5729 	return nr_free_zone_pages(gfp_zone(GFP_USER));
5730 }
5731 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5732 
5733 static inline void show_node(struct zone *zone)
5734 {
5735 	if (IS_ENABLED(CONFIG_NUMA))
5736 		printk("Node %d ", zone_to_nid(zone));
5737 }
5738 
5739 long si_mem_available(void)
5740 {
5741 	long available;
5742 	unsigned long pagecache;
5743 	unsigned long wmark_low = 0;
5744 	unsigned long pages[NR_LRU_LISTS];
5745 	unsigned long reclaimable;
5746 	struct zone *zone;
5747 	int lru;
5748 
5749 	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5750 		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5751 
5752 	for_each_zone(zone)
5753 		wmark_low += low_wmark_pages(zone);
5754 
5755 	/*
5756 	 * Estimate the amount of memory available for userspace allocations,
5757 	 * without causing swapping.
5758 	 */
5759 	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5760 
5761 	/*
5762 	 * Not all the page cache can be freed, otherwise the system will
5763 	 * start swapping. Assume at least half of the page cache, or the
5764 	 * low watermark worth of cache, needs to stay.
5765 	 */
5766 	pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5767 	pagecache -= min(pagecache / 2, wmark_low);
5768 	available += pagecache;
5769 
5770 	/*
5771 	 * Part of the reclaimable slab and other kernel memory consists of
5772 	 * items that are in use, and cannot be freed. Cap this estimate at the
5773 	 * low watermark.
5774 	 */
5775 	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5776 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5777 	available += reclaimable - min(reclaimable / 2, wmark_low);
5778 
5779 	if (available < 0)
5780 		available = 0;
5781 	return available;
5782 }
5783 EXPORT_SYMBOL_GPL(si_mem_available);
5784 
5785 void si_meminfo(struct sysinfo *val)
5786 {
5787 	val->totalram = totalram_pages();
5788 	val->sharedram = global_node_page_state(NR_SHMEM);
5789 	val->freeram = global_zone_page_state(NR_FREE_PAGES);
5790 	val->bufferram = nr_blockdev_pages();
5791 	val->totalhigh = totalhigh_pages();
5792 	val->freehigh = nr_free_highpages();
5793 	val->mem_unit = PAGE_SIZE;
5794 }
5795 
5796 EXPORT_SYMBOL(si_meminfo);
5797 
5798 #ifdef CONFIG_NUMA
5799 void si_meminfo_node(struct sysinfo *val, int nid)
5800 {
5801 	int zone_type;		/* needs to be signed */
5802 	unsigned long managed_pages = 0;
5803 	unsigned long managed_highpages = 0;
5804 	unsigned long free_highpages = 0;
5805 	pg_data_t *pgdat = NODE_DATA(nid);
5806 
5807 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5808 		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5809 	val->totalram = managed_pages;
5810 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
5811 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5812 #ifdef CONFIG_HIGHMEM
5813 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5814 		struct zone *zone = &pgdat->node_zones[zone_type];
5815 
5816 		if (is_highmem(zone)) {
5817 			managed_highpages += zone_managed_pages(zone);
5818 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5819 		}
5820 	}
5821 	val->totalhigh = managed_highpages;
5822 	val->freehigh = free_highpages;
5823 #else
5824 	val->totalhigh = managed_highpages;
5825 	val->freehigh = free_highpages;
5826 #endif
5827 	val->mem_unit = PAGE_SIZE;
5828 }
5829 #endif
5830 
5831 /*
5832  * Determine whether the node should be displayed or not, depending on whether
5833  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5834  */
5835 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5836 {
5837 	if (!(flags & SHOW_MEM_FILTER_NODES))
5838 		return false;
5839 
5840 	/*
5841 	 * no node mask - aka implicit memory numa policy. Do not bother with
5842 	 * the synchronization - read_mems_allowed_begin - because we do not
5843 	 * have to be precise here.
5844 	 */
5845 	if (!nodemask)
5846 		nodemask = &cpuset_current_mems_allowed;
5847 
5848 	return !node_isset(nid, *nodemask);
5849 }
5850 
5851 #define K(x) ((x) << (PAGE_SHIFT-10))
5852 
5853 static void show_migration_types(unsigned char type)
5854 {
5855 	static const char types[MIGRATE_TYPES] = {
5856 		[MIGRATE_UNMOVABLE]	= 'U',
5857 		[MIGRATE_MOVABLE]	= 'M',
5858 		[MIGRATE_RECLAIMABLE]	= 'E',
5859 		[MIGRATE_HIGHATOMIC]	= 'H',
5860 #ifdef CONFIG_CMA
5861 		[MIGRATE_CMA]		= 'C',
5862 #endif
5863 #ifdef CONFIG_MEMORY_ISOLATION
5864 		[MIGRATE_ISOLATE]	= 'I',
5865 #endif
5866 	};
5867 	char tmp[MIGRATE_TYPES + 1];
5868 	char *p = tmp;
5869 	int i;
5870 
5871 	for (i = 0; i < MIGRATE_TYPES; i++) {
5872 		if (type & (1 << i))
5873 			*p++ = types[i];
5874 	}
5875 
5876 	*p = '\0';
5877 	printk(KERN_CONT "(%s) ", tmp);
5878 }
5879 
5880 /*
5881  * Show free area list (used inside shift_scroll-lock stuff)
5882  * We also calculate the percentage fragmentation. We do this by counting the
5883  * memory on each free list with the exception of the first item on the list.
5884  *
5885  * Bits in @filter:
5886  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5887  *   cpuset.
5888  */
5889 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5890 {
5891 	unsigned long free_pcp = 0;
5892 	int cpu;
5893 	struct zone *zone;
5894 	pg_data_t *pgdat;
5895 
5896 	for_each_populated_zone(zone) {
5897 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5898 			continue;
5899 
5900 		for_each_online_cpu(cpu)
5901 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
5902 	}
5903 
5904 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5905 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5906 		" unevictable:%lu dirty:%lu writeback:%lu\n"
5907 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5908 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5909 		" kernel_misc_reclaimable:%lu\n"
5910 		" free:%lu free_pcp:%lu free_cma:%lu\n",
5911 		global_node_page_state(NR_ACTIVE_ANON),
5912 		global_node_page_state(NR_INACTIVE_ANON),
5913 		global_node_page_state(NR_ISOLATED_ANON),
5914 		global_node_page_state(NR_ACTIVE_FILE),
5915 		global_node_page_state(NR_INACTIVE_FILE),
5916 		global_node_page_state(NR_ISOLATED_FILE),
5917 		global_node_page_state(NR_UNEVICTABLE),
5918 		global_node_page_state(NR_FILE_DIRTY),
5919 		global_node_page_state(NR_WRITEBACK),
5920 		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
5921 		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
5922 		global_node_page_state(NR_FILE_MAPPED),
5923 		global_node_page_state(NR_SHMEM),
5924 		global_node_page_state(NR_PAGETABLE),
5925 		global_zone_page_state(NR_BOUNCE),
5926 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
5927 		global_zone_page_state(NR_FREE_PAGES),
5928 		free_pcp,
5929 		global_zone_page_state(NR_FREE_CMA_PAGES));
5930 
5931 	for_each_online_pgdat(pgdat) {
5932 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5933 			continue;
5934 
5935 		printk("Node %d"
5936 			" active_anon:%lukB"
5937 			" inactive_anon:%lukB"
5938 			" active_file:%lukB"
5939 			" inactive_file:%lukB"
5940 			" unevictable:%lukB"
5941 			" isolated(anon):%lukB"
5942 			" isolated(file):%lukB"
5943 			" mapped:%lukB"
5944 			" dirty:%lukB"
5945 			" writeback:%lukB"
5946 			" shmem:%lukB"
5947 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5948 			" shmem_thp: %lukB"
5949 			" shmem_pmdmapped: %lukB"
5950 			" anon_thp: %lukB"
5951 #endif
5952 			" writeback_tmp:%lukB"
5953 			" kernel_stack:%lukB"
5954 #ifdef CONFIG_SHADOW_CALL_STACK
5955 			" shadow_call_stack:%lukB"
5956 #endif
5957 			" pagetables:%lukB"
5958 			" all_unreclaimable? %s"
5959 			"\n",
5960 			pgdat->node_id,
5961 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5962 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5963 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5964 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5965 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
5966 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5967 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5968 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
5969 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
5970 			K(node_page_state(pgdat, NR_WRITEBACK)),
5971 			K(node_page_state(pgdat, NR_SHMEM)),
5972 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5973 			K(node_page_state(pgdat, NR_SHMEM_THPS)),
5974 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
5975 			K(node_page_state(pgdat, NR_ANON_THPS)),
5976 #endif
5977 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5978 			node_page_state(pgdat, NR_KERNEL_STACK_KB),
5979 #ifdef CONFIG_SHADOW_CALL_STACK
5980 			node_page_state(pgdat, NR_KERNEL_SCS_KB),
5981 #endif
5982 			K(node_page_state(pgdat, NR_PAGETABLE)),
5983 			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5984 				"yes" : "no");
5985 	}
5986 
5987 	for_each_populated_zone(zone) {
5988 		int i;
5989 
5990 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5991 			continue;
5992 
5993 		free_pcp = 0;
5994 		for_each_online_cpu(cpu)
5995 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
5996 
5997 		show_node(zone);
5998 		printk(KERN_CONT
5999 			"%s"
6000 			" free:%lukB"
6001 			" min:%lukB"
6002 			" low:%lukB"
6003 			" high:%lukB"
6004 			" reserved_highatomic:%luKB"
6005 			" active_anon:%lukB"
6006 			" inactive_anon:%lukB"
6007 			" active_file:%lukB"
6008 			" inactive_file:%lukB"
6009 			" unevictable:%lukB"
6010 			" writepending:%lukB"
6011 			" present:%lukB"
6012 			" managed:%lukB"
6013 			" mlocked:%lukB"
6014 			" bounce:%lukB"
6015 			" free_pcp:%lukB"
6016 			" local_pcp:%ukB"
6017 			" free_cma:%lukB"
6018 			"\n",
6019 			zone->name,
6020 			K(zone_page_state(zone, NR_FREE_PAGES)),
6021 			K(min_wmark_pages(zone)),
6022 			K(low_wmark_pages(zone)),
6023 			K(high_wmark_pages(zone)),
6024 			K(zone->nr_reserved_highatomic),
6025 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
6026 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
6027 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
6028 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
6029 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
6030 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
6031 			K(zone->present_pages),
6032 			K(zone_managed_pages(zone)),
6033 			K(zone_page_state(zone, NR_MLOCK)),
6034 			K(zone_page_state(zone, NR_BOUNCE)),
6035 			K(free_pcp),
6036 			K(this_cpu_read(zone->per_cpu_pageset->count)),
6037 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
6038 		printk("lowmem_reserve[]:");
6039 		for (i = 0; i < MAX_NR_ZONES; i++)
6040 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
6041 		printk(KERN_CONT "\n");
6042 	}
6043 
6044 	for_each_populated_zone(zone) {
6045 		unsigned int order;
6046 		unsigned long nr[MAX_ORDER], flags, total = 0;
6047 		unsigned char types[MAX_ORDER];
6048 
6049 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6050 			continue;
6051 		show_node(zone);
6052 		printk(KERN_CONT "%s: ", zone->name);
6053 
6054 		spin_lock_irqsave(&zone->lock, flags);
6055 		for (order = 0; order < MAX_ORDER; order++) {
6056 			struct free_area *area = &zone->free_area[order];
6057 			int type;
6058 
6059 			nr[order] = area->nr_free;
6060 			total += nr[order] << order;
6061 
6062 			types[order] = 0;
6063 			for (type = 0; type < MIGRATE_TYPES; type++) {
6064 				if (!free_area_empty(area, type))
6065 					types[order] |= 1 << type;
6066 			}
6067 		}
6068 		spin_unlock_irqrestore(&zone->lock, flags);
6069 		for (order = 0; order < MAX_ORDER; order++) {
6070 			printk(KERN_CONT "%lu*%lukB ",
6071 			       nr[order], K(1UL) << order);
6072 			if (nr[order])
6073 				show_migration_types(types[order]);
6074 		}
6075 		printk(KERN_CONT "= %lukB\n", K(total));
6076 	}
6077 
6078 	hugetlb_show_meminfo();
6079 
6080 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
6081 
6082 	show_swap_cache_info();
6083 }
6084 
6085 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
6086 {
6087 	zoneref->zone = zone;
6088 	zoneref->zone_idx = zone_idx(zone);
6089 }
6090 
6091 /*
6092  * Builds allocation fallback zone lists.
6093  *
6094  * Add all populated zones of a node to the zonelist.
6095  */
6096 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
6097 {
6098 	struct zone *zone;
6099 	enum zone_type zone_type = MAX_NR_ZONES;
6100 	int nr_zones = 0;
6101 
6102 	do {
6103 		zone_type--;
6104 		zone = pgdat->node_zones + zone_type;
6105 		if (managed_zone(zone)) {
6106 			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
6107 			check_highest_zone(zone_type);
6108 		}
6109 	} while (zone_type);
6110 
6111 	return nr_zones;
6112 }
6113 
6114 #ifdef CONFIG_NUMA
6115 
6116 static int __parse_numa_zonelist_order(char *s)
6117 {
6118 	/*
6119 	 * We used to support different zonelists modes but they turned
6120 	 * out to be just not useful. Let's keep the warning in place
6121 	 * if somebody still use the cmd line parameter so that we do
6122 	 * not fail it silently
6123 	 */
6124 	if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
6125 		pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
6126 		return -EINVAL;
6127 	}
6128 	return 0;
6129 }
6130 
6131 char numa_zonelist_order[] = "Node";
6132 
6133 /*
6134  * sysctl handler for numa_zonelist_order
6135  */
6136 int numa_zonelist_order_handler(struct ctl_table *table, int write,
6137 		void *buffer, size_t *length, loff_t *ppos)
6138 {
6139 	if (write)
6140 		return __parse_numa_zonelist_order(buffer);
6141 	return proc_dostring(table, write, buffer, length, ppos);
6142 }
6143 
6144 
6145 #define MAX_NODE_LOAD (nr_online_nodes)
6146 static int node_load[MAX_NUMNODES];
6147 
6148 /**
6149  * find_next_best_node - find the next node that should appear in a given node's fallback list
6150  * @node: node whose fallback list we're appending
6151  * @used_node_mask: nodemask_t of already used nodes
6152  *
6153  * We use a number of factors to determine which is the next node that should
6154  * appear on a given node's fallback list.  The node should not have appeared
6155  * already in @node's fallback list, and it should be the next closest node
6156  * according to the distance array (which contains arbitrary distance values
6157  * from each node to each node in the system), and should also prefer nodes
6158  * with no CPUs, since presumably they'll have very little allocation pressure
6159  * on them otherwise.
6160  *
6161  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
6162  */
6163 int find_next_best_node(int node, nodemask_t *used_node_mask)
6164 {
6165 	int n, val;
6166 	int min_val = INT_MAX;
6167 	int best_node = NUMA_NO_NODE;
6168 
6169 	/* Use the local node if we haven't already */
6170 	if (!node_isset(node, *used_node_mask)) {
6171 		node_set(node, *used_node_mask);
6172 		return node;
6173 	}
6174 
6175 	for_each_node_state(n, N_MEMORY) {
6176 
6177 		/* Don't want a node to appear more than once */
6178 		if (node_isset(n, *used_node_mask))
6179 			continue;
6180 
6181 		/* Use the distance array to find the distance */
6182 		val = node_distance(node, n);
6183 
6184 		/* Penalize nodes under us ("prefer the next node") */
6185 		val += (n < node);
6186 
6187 		/* Give preference to headless and unused nodes */
6188 		if (!cpumask_empty(cpumask_of_node(n)))
6189 			val += PENALTY_FOR_NODE_WITH_CPUS;
6190 
6191 		/* Slight preference for less loaded node */
6192 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
6193 		val += node_load[n];
6194 
6195 		if (val < min_val) {
6196 			min_val = val;
6197 			best_node = n;
6198 		}
6199 	}
6200 
6201 	if (best_node >= 0)
6202 		node_set(best_node, *used_node_mask);
6203 
6204 	return best_node;
6205 }
6206 
6207 
6208 /*
6209  * Build zonelists ordered by node and zones within node.
6210  * This results in maximum locality--normal zone overflows into local
6211  * DMA zone, if any--but risks exhausting DMA zone.
6212  */
6213 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
6214 		unsigned nr_nodes)
6215 {
6216 	struct zoneref *zonerefs;
6217 	int i;
6218 
6219 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6220 
6221 	for (i = 0; i < nr_nodes; i++) {
6222 		int nr_zones;
6223 
6224 		pg_data_t *node = NODE_DATA(node_order[i]);
6225 
6226 		nr_zones = build_zonerefs_node(node, zonerefs);
6227 		zonerefs += nr_zones;
6228 	}
6229 	zonerefs->zone = NULL;
6230 	zonerefs->zone_idx = 0;
6231 }
6232 
6233 /*
6234  * Build gfp_thisnode zonelists
6235  */
6236 static void build_thisnode_zonelists(pg_data_t *pgdat)
6237 {
6238 	struct zoneref *zonerefs;
6239 	int nr_zones;
6240 
6241 	zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
6242 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
6243 	zonerefs += nr_zones;
6244 	zonerefs->zone = NULL;
6245 	zonerefs->zone_idx = 0;
6246 }
6247 
6248 /*
6249  * Build zonelists ordered by zone and nodes within zones.
6250  * This results in conserving DMA zone[s] until all Normal memory is
6251  * exhausted, but results in overflowing to remote node while memory
6252  * may still exist in local DMA zone.
6253  */
6254 
6255 static void build_zonelists(pg_data_t *pgdat)
6256 {
6257 	static int node_order[MAX_NUMNODES];
6258 	int node, load, nr_nodes = 0;
6259 	nodemask_t used_mask = NODE_MASK_NONE;
6260 	int local_node, prev_node;
6261 
6262 	/* NUMA-aware ordering of nodes */
6263 	local_node = pgdat->node_id;
6264 	load = nr_online_nodes;
6265 	prev_node = local_node;
6266 
6267 	memset(node_order, 0, sizeof(node_order));
6268 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
6269 		/*
6270 		 * We don't want to pressure a particular node.
6271 		 * So adding penalty to the first node in same
6272 		 * distance group to make it round-robin.
6273 		 */
6274 		if (node_distance(local_node, node) !=
6275 		    node_distance(local_node, prev_node))
6276 			node_load[node] = load;
6277 
6278 		node_order[nr_nodes++] = node;
6279 		prev_node = node;
6280 		load--;
6281 	}
6282 
6283 	build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
6284 	build_thisnode_zonelists(pgdat);
6285 }
6286 
6287 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6288 /*
6289  * Return node id of node used for "local" allocations.
6290  * I.e., first node id of first zone in arg node's generic zonelist.
6291  * Used for initializing percpu 'numa_mem', which is used primarily
6292  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6293  */
6294 int local_memory_node(int node)
6295 {
6296 	struct zoneref *z;
6297 
6298 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
6299 				   gfp_zone(GFP_KERNEL),
6300 				   NULL);
6301 	return zone_to_nid(z->zone);
6302 }
6303 #endif
6304 
6305 static void setup_min_unmapped_ratio(void);
6306 static void setup_min_slab_ratio(void);
6307 #else	/* CONFIG_NUMA */
6308 
6309 static void build_zonelists(pg_data_t *pgdat)
6310 {
6311 	int node, local_node;
6312 	struct zoneref *zonerefs;
6313 	int nr_zones;
6314 
6315 	local_node = pgdat->node_id;
6316 
6317 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6318 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
6319 	zonerefs += nr_zones;
6320 
6321 	/*
6322 	 * Now we build the zonelist so that it contains the zones
6323 	 * of all the other nodes.
6324 	 * We don't want to pressure a particular node, so when
6325 	 * building the zones for node N, we make sure that the
6326 	 * zones coming right after the local ones are those from
6327 	 * node N+1 (modulo N)
6328 	 */
6329 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6330 		if (!node_online(node))
6331 			continue;
6332 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6333 		zonerefs += nr_zones;
6334 	}
6335 	for (node = 0; node < local_node; node++) {
6336 		if (!node_online(node))
6337 			continue;
6338 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6339 		zonerefs += nr_zones;
6340 	}
6341 
6342 	zonerefs->zone = NULL;
6343 	zonerefs->zone_idx = 0;
6344 }
6345 
6346 #endif	/* CONFIG_NUMA */
6347 
6348 /*
6349  * Boot pageset table. One per cpu which is going to be used for all
6350  * zones and all nodes. The parameters will be set in such a way
6351  * that an item put on a list will immediately be handed over to
6352  * the buddy list. This is safe since pageset manipulation is done
6353  * with interrupts disabled.
6354  *
6355  * The boot_pagesets must be kept even after bootup is complete for
6356  * unused processors and/or zones. They do play a role for bootstrapping
6357  * hotplugged processors.
6358  *
6359  * zoneinfo_show() and maybe other functions do
6360  * not check if the processor is online before following the pageset pointer.
6361  * Other parts of the kernel may not check if the zone is available.
6362  */
6363 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
6364 /* These effectively disable the pcplists in the boot pageset completely */
6365 #define BOOT_PAGESET_HIGH	0
6366 #define BOOT_PAGESET_BATCH	1
6367 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
6368 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
6369 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
6370 
6371 static void __build_all_zonelists(void *data)
6372 {
6373 	int nid;
6374 	int __maybe_unused cpu;
6375 	pg_data_t *self = data;
6376 	static DEFINE_SPINLOCK(lock);
6377 
6378 	spin_lock(&lock);
6379 
6380 #ifdef CONFIG_NUMA
6381 	memset(node_load, 0, sizeof(node_load));
6382 #endif
6383 
6384 	/*
6385 	 * This node is hotadded and no memory is yet present.   So just
6386 	 * building zonelists is fine - no need to touch other nodes.
6387 	 */
6388 	if (self && !node_online(self->node_id)) {
6389 		build_zonelists(self);
6390 	} else {
6391 		for_each_online_node(nid) {
6392 			pg_data_t *pgdat = NODE_DATA(nid);
6393 
6394 			build_zonelists(pgdat);
6395 		}
6396 
6397 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6398 		/*
6399 		 * We now know the "local memory node" for each node--
6400 		 * i.e., the node of the first zone in the generic zonelist.
6401 		 * Set up numa_mem percpu variable for on-line cpus.  During
6402 		 * boot, only the boot cpu should be on-line;  we'll init the
6403 		 * secondary cpus' numa_mem as they come on-line.  During
6404 		 * node/memory hotplug, we'll fixup all on-line cpus.
6405 		 */
6406 		for_each_online_cpu(cpu)
6407 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6408 #endif
6409 	}
6410 
6411 	spin_unlock(&lock);
6412 }
6413 
6414 static noinline void __init
6415 build_all_zonelists_init(void)
6416 {
6417 	int cpu;
6418 
6419 	__build_all_zonelists(NULL);
6420 
6421 	/*
6422 	 * Initialize the boot_pagesets that are going to be used
6423 	 * for bootstrapping processors. The real pagesets for
6424 	 * each zone will be allocated later when the per cpu
6425 	 * allocator is available.
6426 	 *
6427 	 * boot_pagesets are used also for bootstrapping offline
6428 	 * cpus if the system is already booted because the pagesets
6429 	 * are needed to initialize allocators on a specific cpu too.
6430 	 * F.e. the percpu allocator needs the page allocator which
6431 	 * needs the percpu allocator in order to allocate its pagesets
6432 	 * (a chicken-egg dilemma).
6433 	 */
6434 	for_each_possible_cpu(cpu)
6435 		per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
6436 
6437 	mminit_verify_zonelist();
6438 	cpuset_init_current_mems_allowed();
6439 }
6440 
6441 /*
6442  * unless system_state == SYSTEM_BOOTING.
6443  *
6444  * __ref due to call of __init annotated helper build_all_zonelists_init
6445  * [protected by SYSTEM_BOOTING].
6446  */
6447 void __ref build_all_zonelists(pg_data_t *pgdat)
6448 {
6449 	unsigned long vm_total_pages;
6450 
6451 	if (system_state == SYSTEM_BOOTING) {
6452 		build_all_zonelists_init();
6453 	} else {
6454 		__build_all_zonelists(pgdat);
6455 		/* cpuset refresh routine should be here */
6456 	}
6457 	/* Get the number of free pages beyond high watermark in all zones. */
6458 	vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6459 	/*
6460 	 * Disable grouping by mobility if the number of pages in the
6461 	 * system is too low to allow the mechanism to work. It would be
6462 	 * more accurate, but expensive to check per-zone. This check is
6463 	 * made on memory-hotadd so a system can start with mobility
6464 	 * disabled and enable it later
6465 	 */
6466 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6467 		page_group_by_mobility_disabled = 1;
6468 	else
6469 		page_group_by_mobility_disabled = 0;
6470 
6471 	pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
6472 		nr_online_nodes,
6473 		page_group_by_mobility_disabled ? "off" : "on",
6474 		vm_total_pages);
6475 #ifdef CONFIG_NUMA
6476 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6477 #endif
6478 }
6479 
6480 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6481 static bool __meminit
6482 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6483 {
6484 	static struct memblock_region *r;
6485 
6486 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6487 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6488 			for_each_mem_region(r) {
6489 				if (*pfn < memblock_region_memory_end_pfn(r))
6490 					break;
6491 			}
6492 		}
6493 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
6494 		    memblock_is_mirror(r)) {
6495 			*pfn = memblock_region_memory_end_pfn(r);
6496 			return true;
6497 		}
6498 	}
6499 	return false;
6500 }
6501 
6502 /*
6503  * Initially all pages are reserved - free ones are freed
6504  * up by memblock_free_all() once the early boot process is
6505  * done. Non-atomic initialization, single-pass.
6506  *
6507  * All aligned pageblocks are initialized to the specified migratetype
6508  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6509  * zone stats (e.g., nr_isolate_pageblock) are touched.
6510  */
6511 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
6512 		unsigned long start_pfn, unsigned long zone_end_pfn,
6513 		enum meminit_context context,
6514 		struct vmem_altmap *altmap, int migratetype)
6515 {
6516 	unsigned long pfn, end_pfn = start_pfn + size;
6517 	struct page *page;
6518 
6519 	if (highest_memmap_pfn < end_pfn - 1)
6520 		highest_memmap_pfn = end_pfn - 1;
6521 
6522 #ifdef CONFIG_ZONE_DEVICE
6523 	/*
6524 	 * Honor reservation requested by the driver for this ZONE_DEVICE
6525 	 * memory. We limit the total number of pages to initialize to just
6526 	 * those that might contain the memory mapping. We will defer the
6527 	 * ZONE_DEVICE page initialization until after we have released
6528 	 * the hotplug lock.
6529 	 */
6530 	if (zone == ZONE_DEVICE) {
6531 		if (!altmap)
6532 			return;
6533 
6534 		if (start_pfn == altmap->base_pfn)
6535 			start_pfn += altmap->reserve;
6536 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6537 	}
6538 #endif
6539 
6540 	for (pfn = start_pfn; pfn < end_pfn; ) {
6541 		/*
6542 		 * There can be holes in boot-time mem_map[]s handed to this
6543 		 * function.  They do not exist on hotplugged memory.
6544 		 */
6545 		if (context == MEMINIT_EARLY) {
6546 			if (overlap_memmap_init(zone, &pfn))
6547 				continue;
6548 			if (defer_init(nid, pfn, zone_end_pfn))
6549 				break;
6550 		}
6551 
6552 		page = pfn_to_page(pfn);
6553 		__init_single_page(page, pfn, zone, nid);
6554 		if (context == MEMINIT_HOTPLUG)
6555 			__SetPageReserved(page);
6556 
6557 		/*
6558 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6559 		 * such that unmovable allocations won't be scattered all
6560 		 * over the place during system boot.
6561 		 */
6562 		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6563 			set_pageblock_migratetype(page, migratetype);
6564 			cond_resched();
6565 		}
6566 		pfn++;
6567 	}
6568 }
6569 
6570 #ifdef CONFIG_ZONE_DEVICE
6571 void __ref memmap_init_zone_device(struct zone *zone,
6572 				   unsigned long start_pfn,
6573 				   unsigned long nr_pages,
6574 				   struct dev_pagemap *pgmap)
6575 {
6576 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
6577 	struct pglist_data *pgdat = zone->zone_pgdat;
6578 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6579 	unsigned long zone_idx = zone_idx(zone);
6580 	unsigned long start = jiffies;
6581 	int nid = pgdat->node_id;
6582 
6583 	if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
6584 		return;
6585 
6586 	/*
6587 	 * The call to memmap_init should have already taken care
6588 	 * of the pages reserved for the memmap, so we can just jump to
6589 	 * the end of that region and start processing the device pages.
6590 	 */
6591 	if (altmap) {
6592 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6593 		nr_pages = end_pfn - start_pfn;
6594 	}
6595 
6596 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
6597 		struct page *page = pfn_to_page(pfn);
6598 
6599 		__init_single_page(page, pfn, zone_idx, nid);
6600 
6601 		/*
6602 		 * Mark page reserved as it will need to wait for onlining
6603 		 * phase for it to be fully associated with a zone.
6604 		 *
6605 		 * We can use the non-atomic __set_bit operation for setting
6606 		 * the flag as we are still initializing the pages.
6607 		 */
6608 		__SetPageReserved(page);
6609 
6610 		/*
6611 		 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6612 		 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
6613 		 * ever freed or placed on a driver-private list.
6614 		 */
6615 		page->pgmap = pgmap;
6616 		page->zone_device_data = NULL;
6617 
6618 		/*
6619 		 * Mark the block movable so that blocks are reserved for
6620 		 * movable at startup. This will force kernel allocations
6621 		 * to reserve their blocks rather than leaking throughout
6622 		 * the address space during boot when many long-lived
6623 		 * kernel allocations are made.
6624 		 *
6625 		 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6626 		 * because this is done early in section_activate()
6627 		 */
6628 		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6629 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6630 			cond_resched();
6631 		}
6632 	}
6633 
6634 	pr_info("%s initialised %lu pages in %ums\n", __func__,
6635 		nr_pages, jiffies_to_msecs(jiffies - start));
6636 }
6637 
6638 #endif
6639 static void __meminit zone_init_free_lists(struct zone *zone)
6640 {
6641 	unsigned int order, t;
6642 	for_each_migratetype_order(order, t) {
6643 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6644 		zone->free_area[order].nr_free = 0;
6645 	}
6646 }
6647 
6648 /*
6649  * Only struct pages that correspond to ranges defined by memblock.memory
6650  * are zeroed and initialized by going through __init_single_page() during
6651  * memmap_init_zone_range().
6652  *
6653  * But, there could be struct pages that correspond to holes in
6654  * memblock.memory. This can happen because of the following reasons:
6655  * - physical memory bank size is not necessarily the exact multiple of the
6656  *   arbitrary section size
6657  * - early reserved memory may not be listed in memblock.memory
6658  * - memory layouts defined with memmap= kernel parameter may not align
6659  *   nicely with memmap sections
6660  *
6661  * Explicitly initialize those struct pages so that:
6662  * - PG_Reserved is set
6663  * - zone and node links point to zone and node that span the page if the
6664  *   hole is in the middle of a zone
6665  * - zone and node links point to adjacent zone/node if the hole falls on
6666  *   the zone boundary; the pages in such holes will be prepended to the
6667  *   zone/node above the hole except for the trailing pages in the last
6668  *   section that will be appended to the zone/node below.
6669  */
6670 static void __init init_unavailable_range(unsigned long spfn,
6671 					  unsigned long epfn,
6672 					  int zone, int node)
6673 {
6674 	unsigned long pfn;
6675 	u64 pgcnt = 0;
6676 
6677 	for (pfn = spfn; pfn < epfn; pfn++) {
6678 		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6679 			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6680 				+ pageblock_nr_pages - 1;
6681 			continue;
6682 		}
6683 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
6684 		__SetPageReserved(pfn_to_page(pfn));
6685 		pgcnt++;
6686 	}
6687 
6688 	if (pgcnt)
6689 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6690 			node, zone_names[zone], pgcnt);
6691 }
6692 
6693 static void __init memmap_init_zone_range(struct zone *zone,
6694 					  unsigned long start_pfn,
6695 					  unsigned long end_pfn,
6696 					  unsigned long *hole_pfn)
6697 {
6698 	unsigned long zone_start_pfn = zone->zone_start_pfn;
6699 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
6700 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
6701 
6702 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6703 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
6704 
6705 	if (start_pfn >= end_pfn)
6706 		return;
6707 
6708 	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
6709 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6710 
6711 	if (*hole_pfn < start_pfn)
6712 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
6713 
6714 	*hole_pfn = end_pfn;
6715 }
6716 
6717 static void __init memmap_init(void)
6718 {
6719 	unsigned long start_pfn, end_pfn;
6720 	unsigned long hole_pfn = 0;
6721 	int i, j, zone_id = 0, nid;
6722 
6723 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6724 		struct pglist_data *node = NODE_DATA(nid);
6725 
6726 		for (j = 0; j < MAX_NR_ZONES; j++) {
6727 			struct zone *zone = node->node_zones + j;
6728 
6729 			if (!populated_zone(zone))
6730 				continue;
6731 
6732 			memmap_init_zone_range(zone, start_pfn, end_pfn,
6733 					       &hole_pfn);
6734 			zone_id = j;
6735 		}
6736 	}
6737 
6738 #ifdef CONFIG_SPARSEMEM
6739 	/*
6740 	 * Initialize the memory map for hole in the range [memory_end,
6741 	 * section_end].
6742 	 * Append the pages in this hole to the highest zone in the last
6743 	 * node.
6744 	 * The call to init_unavailable_range() is outside the ifdef to
6745 	 * silence the compiler warining about zone_id set but not used;
6746 	 * for FLATMEM it is a nop anyway
6747 	 */
6748 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
6749 	if (hole_pfn < end_pfn)
6750 #endif
6751 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
6752 }
6753 
6754 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
6755 			  phys_addr_t min_addr, int nid, bool exact_nid)
6756 {
6757 	void *ptr;
6758 
6759 	if (exact_nid)
6760 		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
6761 						   MEMBLOCK_ALLOC_ACCESSIBLE,
6762 						   nid);
6763 	else
6764 		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
6765 						 MEMBLOCK_ALLOC_ACCESSIBLE,
6766 						 nid);
6767 
6768 	if (ptr && size > 0)
6769 		page_init_poison(ptr, size);
6770 
6771 	return ptr;
6772 }
6773 
6774 static int zone_batchsize(struct zone *zone)
6775 {
6776 #ifdef CONFIG_MMU
6777 	int batch;
6778 
6779 	/*
6780 	 * The number of pages to batch allocate is either ~0.1%
6781 	 * of the zone or 1MB, whichever is smaller. The batch
6782 	 * size is striking a balance between allocation latency
6783 	 * and zone lock contention.
6784 	 */
6785 	batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE);
6786 	batch /= 4;		/* We effectively *= 4 below */
6787 	if (batch < 1)
6788 		batch = 1;
6789 
6790 	/*
6791 	 * Clamp the batch to a 2^n - 1 value. Having a power
6792 	 * of 2 value was found to be more likely to have
6793 	 * suboptimal cache aliasing properties in some cases.
6794 	 *
6795 	 * For example if 2 tasks are alternately allocating
6796 	 * batches of pages, one task can end up with a lot
6797 	 * of pages of one half of the possible page colors
6798 	 * and the other with pages of the other colors.
6799 	 */
6800 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
6801 
6802 	return batch;
6803 
6804 #else
6805 	/* The deferral and batching of frees should be suppressed under NOMMU
6806 	 * conditions.
6807 	 *
6808 	 * The problem is that NOMMU needs to be able to allocate large chunks
6809 	 * of contiguous memory as there's no hardware page translation to
6810 	 * assemble apparent contiguous memory from discontiguous pages.
6811 	 *
6812 	 * Queueing large contiguous runs of pages for batching, however,
6813 	 * causes the pages to actually be freed in smaller chunks.  As there
6814 	 * can be a significant delay between the individual batches being
6815 	 * recycled, this leads to the once large chunks of space being
6816 	 * fragmented and becoming unavailable for high-order allocations.
6817 	 */
6818 	return 0;
6819 #endif
6820 }
6821 
6822 static int zone_highsize(struct zone *zone, int batch, int cpu_online)
6823 {
6824 #ifdef CONFIG_MMU
6825 	int high;
6826 	int nr_split_cpus;
6827 	unsigned long total_pages;
6828 
6829 	if (!percpu_pagelist_high_fraction) {
6830 		/*
6831 		 * By default, the high value of the pcp is based on the zone
6832 		 * low watermark so that if they are full then background
6833 		 * reclaim will not be started prematurely.
6834 		 */
6835 		total_pages = low_wmark_pages(zone);
6836 	} else {
6837 		/*
6838 		 * If percpu_pagelist_high_fraction is configured, the high
6839 		 * value is based on a fraction of the managed pages in the
6840 		 * zone.
6841 		 */
6842 		total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
6843 	}
6844 
6845 	/*
6846 	 * Split the high value across all online CPUs local to the zone. Note
6847 	 * that early in boot that CPUs may not be online yet and that during
6848 	 * CPU hotplug that the cpumask is not yet updated when a CPU is being
6849 	 * onlined. For memory nodes that have no CPUs, split pcp->high across
6850 	 * all online CPUs to mitigate the risk that reclaim is triggered
6851 	 * prematurely due to pages stored on pcp lists.
6852 	 */
6853 	nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
6854 	if (!nr_split_cpus)
6855 		nr_split_cpus = num_online_cpus();
6856 	high = total_pages / nr_split_cpus;
6857 
6858 	/*
6859 	 * Ensure high is at least batch*4. The multiple is based on the
6860 	 * historical relationship between high and batch.
6861 	 */
6862 	high = max(high, batch << 2);
6863 
6864 	return high;
6865 #else
6866 	return 0;
6867 #endif
6868 }
6869 
6870 /*
6871  * pcp->high and pcp->batch values are related and generally batch is lower
6872  * than high. They are also related to pcp->count such that count is lower
6873  * than high, and as soon as it reaches high, the pcplist is flushed.
6874  *
6875  * However, guaranteeing these relations at all times would require e.g. write
6876  * barriers here but also careful usage of read barriers at the read side, and
6877  * thus be prone to error and bad for performance. Thus the update only prevents
6878  * store tearing. Any new users of pcp->batch and pcp->high should ensure they
6879  * can cope with those fields changing asynchronously, and fully trust only the
6880  * pcp->count field on the local CPU with interrupts disabled.
6881  *
6882  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6883  * outside of boot time (or some other assurance that no concurrent updaters
6884  * exist).
6885  */
6886 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6887 		unsigned long batch)
6888 {
6889 	WRITE_ONCE(pcp->batch, batch);
6890 	WRITE_ONCE(pcp->high, high);
6891 }
6892 
6893 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
6894 {
6895 	int pindex;
6896 
6897 	memset(pcp, 0, sizeof(*pcp));
6898 	memset(pzstats, 0, sizeof(*pzstats));
6899 
6900 	for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
6901 		INIT_LIST_HEAD(&pcp->lists[pindex]);
6902 
6903 	/*
6904 	 * Set batch and high values safe for a boot pageset. A true percpu
6905 	 * pageset's initialization will update them subsequently. Here we don't
6906 	 * need to be as careful as pageset_update() as nobody can access the
6907 	 * pageset yet.
6908 	 */
6909 	pcp->high = BOOT_PAGESET_HIGH;
6910 	pcp->batch = BOOT_PAGESET_BATCH;
6911 	pcp->free_factor = 0;
6912 }
6913 
6914 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
6915 		unsigned long batch)
6916 {
6917 	struct per_cpu_pages *pcp;
6918 	int cpu;
6919 
6920 	for_each_possible_cpu(cpu) {
6921 		pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6922 		pageset_update(pcp, high, batch);
6923 	}
6924 }
6925 
6926 /*
6927  * Calculate and set new high and batch values for all per-cpu pagesets of a
6928  * zone based on the zone's size.
6929  */
6930 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
6931 {
6932 	int new_high, new_batch;
6933 
6934 	new_batch = max(1, zone_batchsize(zone));
6935 	new_high = zone_highsize(zone, new_batch, cpu_online);
6936 
6937 	if (zone->pageset_high == new_high &&
6938 	    zone->pageset_batch == new_batch)
6939 		return;
6940 
6941 	zone->pageset_high = new_high;
6942 	zone->pageset_batch = new_batch;
6943 
6944 	__zone_set_pageset_high_and_batch(zone, new_high, new_batch);
6945 }
6946 
6947 void __meminit setup_zone_pageset(struct zone *zone)
6948 {
6949 	int cpu;
6950 
6951 	/* Size may be 0 on !SMP && !NUMA */
6952 	if (sizeof(struct per_cpu_zonestat) > 0)
6953 		zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
6954 
6955 	zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
6956 	for_each_possible_cpu(cpu) {
6957 		struct per_cpu_pages *pcp;
6958 		struct per_cpu_zonestat *pzstats;
6959 
6960 		pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6961 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6962 		per_cpu_pages_init(pcp, pzstats);
6963 	}
6964 
6965 	zone_set_pageset_high_and_batch(zone, 0);
6966 }
6967 
6968 /*
6969  * Allocate per cpu pagesets and initialize them.
6970  * Before this call only boot pagesets were available.
6971  */
6972 void __init setup_per_cpu_pageset(void)
6973 {
6974 	struct pglist_data *pgdat;
6975 	struct zone *zone;
6976 	int __maybe_unused cpu;
6977 
6978 	for_each_populated_zone(zone)
6979 		setup_zone_pageset(zone);
6980 
6981 #ifdef CONFIG_NUMA
6982 	/*
6983 	 * Unpopulated zones continue using the boot pagesets.
6984 	 * The numa stats for these pagesets need to be reset.
6985 	 * Otherwise, they will end up skewing the stats of
6986 	 * the nodes these zones are associated with.
6987 	 */
6988 	for_each_possible_cpu(cpu) {
6989 		struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
6990 		memset(pzstats->vm_numa_event, 0,
6991 		       sizeof(pzstats->vm_numa_event));
6992 	}
6993 #endif
6994 
6995 	for_each_online_pgdat(pgdat)
6996 		pgdat->per_cpu_nodestats =
6997 			alloc_percpu(struct per_cpu_nodestat);
6998 }
6999 
7000 static __meminit void zone_pcp_init(struct zone *zone)
7001 {
7002 	/*
7003 	 * per cpu subsystem is not up at this point. The following code
7004 	 * relies on the ability of the linker to provide the
7005 	 * offset of a (static) per cpu variable into the per cpu area.
7006 	 */
7007 	zone->per_cpu_pageset = &boot_pageset;
7008 	zone->per_cpu_zonestats = &boot_zonestats;
7009 	zone->pageset_high = BOOT_PAGESET_HIGH;
7010 	zone->pageset_batch = BOOT_PAGESET_BATCH;
7011 
7012 	if (populated_zone(zone))
7013 		pr_debug("  %s zone: %lu pages, LIFO batch:%u\n", zone->name,
7014 			 zone->present_pages, zone_batchsize(zone));
7015 }
7016 
7017 void __meminit init_currently_empty_zone(struct zone *zone,
7018 					unsigned long zone_start_pfn,
7019 					unsigned long size)
7020 {
7021 	struct pglist_data *pgdat = zone->zone_pgdat;
7022 	int zone_idx = zone_idx(zone) + 1;
7023 
7024 	if (zone_idx > pgdat->nr_zones)
7025 		pgdat->nr_zones = zone_idx;
7026 
7027 	zone->zone_start_pfn = zone_start_pfn;
7028 
7029 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
7030 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
7031 			pgdat->node_id,
7032 			(unsigned long)zone_idx(zone),
7033 			zone_start_pfn, (zone_start_pfn + size));
7034 
7035 	zone_init_free_lists(zone);
7036 	zone->initialized = 1;
7037 }
7038 
7039 /**
7040  * get_pfn_range_for_nid - Return the start and end page frames for a node
7041  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
7042  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
7043  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
7044  *
7045  * It returns the start and end page frame of a node based on information
7046  * provided by memblock_set_node(). If called for a node
7047  * with no available memory, a warning is printed and the start and end
7048  * PFNs will be 0.
7049  */
7050 void __init get_pfn_range_for_nid(unsigned int nid,
7051 			unsigned long *start_pfn, unsigned long *end_pfn)
7052 {
7053 	unsigned long this_start_pfn, this_end_pfn;
7054 	int i;
7055 
7056 	*start_pfn = -1UL;
7057 	*end_pfn = 0;
7058 
7059 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
7060 		*start_pfn = min(*start_pfn, this_start_pfn);
7061 		*end_pfn = max(*end_pfn, this_end_pfn);
7062 	}
7063 
7064 	if (*start_pfn == -1UL)
7065 		*start_pfn = 0;
7066 }
7067 
7068 /*
7069  * This finds a zone that can be used for ZONE_MOVABLE pages. The
7070  * assumption is made that zones within a node are ordered in monotonic
7071  * increasing memory addresses so that the "highest" populated zone is used
7072  */
7073 static void __init find_usable_zone_for_movable(void)
7074 {
7075 	int zone_index;
7076 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
7077 		if (zone_index == ZONE_MOVABLE)
7078 			continue;
7079 
7080 		if (arch_zone_highest_possible_pfn[zone_index] >
7081 				arch_zone_lowest_possible_pfn[zone_index])
7082 			break;
7083 	}
7084 
7085 	VM_BUG_ON(zone_index == -1);
7086 	movable_zone = zone_index;
7087 }
7088 
7089 /*
7090  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
7091  * because it is sized independent of architecture. Unlike the other zones,
7092  * the starting point for ZONE_MOVABLE is not fixed. It may be different
7093  * in each node depending on the size of each node and how evenly kernelcore
7094  * is distributed. This helper function adjusts the zone ranges
7095  * provided by the architecture for a given node by using the end of the
7096  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
7097  * zones within a node are in order of monotonic increases memory addresses
7098  */
7099 static void __init adjust_zone_range_for_zone_movable(int nid,
7100 					unsigned long zone_type,
7101 					unsigned long node_start_pfn,
7102 					unsigned long node_end_pfn,
7103 					unsigned long *zone_start_pfn,
7104 					unsigned long *zone_end_pfn)
7105 {
7106 	/* Only adjust if ZONE_MOVABLE is on this node */
7107 	if (zone_movable_pfn[nid]) {
7108 		/* Size ZONE_MOVABLE */
7109 		if (zone_type == ZONE_MOVABLE) {
7110 			*zone_start_pfn = zone_movable_pfn[nid];
7111 			*zone_end_pfn = min(node_end_pfn,
7112 				arch_zone_highest_possible_pfn[movable_zone]);
7113 
7114 		/* Adjust for ZONE_MOVABLE starting within this range */
7115 		} else if (!mirrored_kernelcore &&
7116 			*zone_start_pfn < zone_movable_pfn[nid] &&
7117 			*zone_end_pfn > zone_movable_pfn[nid]) {
7118 			*zone_end_pfn = zone_movable_pfn[nid];
7119 
7120 		/* Check if this whole range is within ZONE_MOVABLE */
7121 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
7122 			*zone_start_pfn = *zone_end_pfn;
7123 	}
7124 }
7125 
7126 /*
7127  * Return the number of pages a zone spans in a node, including holes
7128  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
7129  */
7130 static unsigned long __init zone_spanned_pages_in_node(int nid,
7131 					unsigned long zone_type,
7132 					unsigned long node_start_pfn,
7133 					unsigned long node_end_pfn,
7134 					unsigned long *zone_start_pfn,
7135 					unsigned long *zone_end_pfn)
7136 {
7137 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7138 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7139 	/* When hotadd a new node from cpu_up(), the node should be empty */
7140 	if (!node_start_pfn && !node_end_pfn)
7141 		return 0;
7142 
7143 	/* Get the start and end of the zone */
7144 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7145 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7146 	adjust_zone_range_for_zone_movable(nid, zone_type,
7147 				node_start_pfn, node_end_pfn,
7148 				zone_start_pfn, zone_end_pfn);
7149 
7150 	/* Check that this node has pages within the zone's required range */
7151 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
7152 		return 0;
7153 
7154 	/* Move the zone boundaries inside the node if necessary */
7155 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
7156 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
7157 
7158 	/* Return the spanned pages */
7159 	return *zone_end_pfn - *zone_start_pfn;
7160 }
7161 
7162 /*
7163  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
7164  * then all holes in the requested range will be accounted for.
7165  */
7166 unsigned long __init __absent_pages_in_range(int nid,
7167 				unsigned long range_start_pfn,
7168 				unsigned long range_end_pfn)
7169 {
7170 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
7171 	unsigned long start_pfn, end_pfn;
7172 	int i;
7173 
7174 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7175 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
7176 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
7177 		nr_absent -= end_pfn - start_pfn;
7178 	}
7179 	return nr_absent;
7180 }
7181 
7182 /**
7183  * absent_pages_in_range - Return number of page frames in holes within a range
7184  * @start_pfn: The start PFN to start searching for holes
7185  * @end_pfn: The end PFN to stop searching for holes
7186  *
7187  * Return: the number of pages frames in memory holes within a range.
7188  */
7189 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
7190 							unsigned long end_pfn)
7191 {
7192 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
7193 }
7194 
7195 /* Return the number of page frames in holes in a zone on a node */
7196 static unsigned long __init zone_absent_pages_in_node(int nid,
7197 					unsigned long zone_type,
7198 					unsigned long node_start_pfn,
7199 					unsigned long node_end_pfn)
7200 {
7201 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7202 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7203 	unsigned long zone_start_pfn, zone_end_pfn;
7204 	unsigned long nr_absent;
7205 
7206 	/* When hotadd a new node from cpu_up(), the node should be empty */
7207 	if (!node_start_pfn && !node_end_pfn)
7208 		return 0;
7209 
7210 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7211 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7212 
7213 	adjust_zone_range_for_zone_movable(nid, zone_type,
7214 			node_start_pfn, node_end_pfn,
7215 			&zone_start_pfn, &zone_end_pfn);
7216 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
7217 
7218 	/*
7219 	 * ZONE_MOVABLE handling.
7220 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
7221 	 * and vice versa.
7222 	 */
7223 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
7224 		unsigned long start_pfn, end_pfn;
7225 		struct memblock_region *r;
7226 
7227 		for_each_mem_region(r) {
7228 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
7229 					  zone_start_pfn, zone_end_pfn);
7230 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
7231 					zone_start_pfn, zone_end_pfn);
7232 
7233 			if (zone_type == ZONE_MOVABLE &&
7234 			    memblock_is_mirror(r))
7235 				nr_absent += end_pfn - start_pfn;
7236 
7237 			if (zone_type == ZONE_NORMAL &&
7238 			    !memblock_is_mirror(r))
7239 				nr_absent += end_pfn - start_pfn;
7240 		}
7241 	}
7242 
7243 	return nr_absent;
7244 }
7245 
7246 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
7247 						unsigned long node_start_pfn,
7248 						unsigned long node_end_pfn)
7249 {
7250 	unsigned long realtotalpages = 0, totalpages = 0;
7251 	enum zone_type i;
7252 
7253 	for (i = 0; i < MAX_NR_ZONES; i++) {
7254 		struct zone *zone = pgdat->node_zones + i;
7255 		unsigned long zone_start_pfn, zone_end_pfn;
7256 		unsigned long spanned, absent;
7257 		unsigned long size, real_size;
7258 
7259 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
7260 						     node_start_pfn,
7261 						     node_end_pfn,
7262 						     &zone_start_pfn,
7263 						     &zone_end_pfn);
7264 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
7265 						   node_start_pfn,
7266 						   node_end_pfn);
7267 
7268 		size = spanned;
7269 		real_size = size - absent;
7270 
7271 		if (size)
7272 			zone->zone_start_pfn = zone_start_pfn;
7273 		else
7274 			zone->zone_start_pfn = 0;
7275 		zone->spanned_pages = size;
7276 		zone->present_pages = real_size;
7277 #if defined(CONFIG_MEMORY_HOTPLUG)
7278 		zone->present_early_pages = real_size;
7279 #endif
7280 
7281 		totalpages += size;
7282 		realtotalpages += real_size;
7283 	}
7284 
7285 	pgdat->node_spanned_pages = totalpages;
7286 	pgdat->node_present_pages = realtotalpages;
7287 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
7288 }
7289 
7290 #ifndef CONFIG_SPARSEMEM
7291 /*
7292  * Calculate the size of the zone->blockflags rounded to an unsigned long
7293  * Start by making sure zonesize is a multiple of pageblock_order by rounding
7294  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
7295  * round what is now in bits to nearest long in bits, then return it in
7296  * bytes.
7297  */
7298 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
7299 {
7300 	unsigned long usemapsize;
7301 
7302 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
7303 	usemapsize = roundup(zonesize, pageblock_nr_pages);
7304 	usemapsize = usemapsize >> pageblock_order;
7305 	usemapsize *= NR_PAGEBLOCK_BITS;
7306 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
7307 
7308 	return usemapsize / 8;
7309 }
7310 
7311 static void __ref setup_usemap(struct zone *zone)
7312 {
7313 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
7314 					       zone->spanned_pages);
7315 	zone->pageblock_flags = NULL;
7316 	if (usemapsize) {
7317 		zone->pageblock_flags =
7318 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
7319 					    zone_to_nid(zone));
7320 		if (!zone->pageblock_flags)
7321 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7322 			      usemapsize, zone->name, zone_to_nid(zone));
7323 	}
7324 }
7325 #else
7326 static inline void setup_usemap(struct zone *zone) {}
7327 #endif /* CONFIG_SPARSEMEM */
7328 
7329 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
7330 
7331 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
7332 void __init set_pageblock_order(void)
7333 {
7334 	unsigned int order;
7335 
7336 	/* Check that pageblock_nr_pages has not already been setup */
7337 	if (pageblock_order)
7338 		return;
7339 
7340 	if (HPAGE_SHIFT > PAGE_SHIFT)
7341 		order = HUGETLB_PAGE_ORDER;
7342 	else
7343 		order = MAX_ORDER - 1;
7344 
7345 	/*
7346 	 * Assume the largest contiguous order of interest is a huge page.
7347 	 * This value may be variable depending on boot parameters on IA64 and
7348 	 * powerpc.
7349 	 */
7350 	pageblock_order = order;
7351 }
7352 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7353 
7354 /*
7355  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
7356  * is unused as pageblock_order is set at compile-time. See
7357  * include/linux/pageblock-flags.h for the values of pageblock_order based on
7358  * the kernel config
7359  */
7360 void __init set_pageblock_order(void)
7361 {
7362 }
7363 
7364 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7365 
7366 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
7367 						unsigned long present_pages)
7368 {
7369 	unsigned long pages = spanned_pages;
7370 
7371 	/*
7372 	 * Provide a more accurate estimation if there are holes within
7373 	 * the zone and SPARSEMEM is in use. If there are holes within the
7374 	 * zone, each populated memory region may cost us one or two extra
7375 	 * memmap pages due to alignment because memmap pages for each
7376 	 * populated regions may not be naturally aligned on page boundary.
7377 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
7378 	 */
7379 	if (spanned_pages > present_pages + (present_pages >> 4) &&
7380 	    IS_ENABLED(CONFIG_SPARSEMEM))
7381 		pages = present_pages;
7382 
7383 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
7384 }
7385 
7386 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7387 static void pgdat_init_split_queue(struct pglist_data *pgdat)
7388 {
7389 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
7390 
7391 	spin_lock_init(&ds_queue->split_queue_lock);
7392 	INIT_LIST_HEAD(&ds_queue->split_queue);
7393 	ds_queue->split_queue_len = 0;
7394 }
7395 #else
7396 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
7397 #endif
7398 
7399 #ifdef CONFIG_COMPACTION
7400 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
7401 {
7402 	init_waitqueue_head(&pgdat->kcompactd_wait);
7403 }
7404 #else
7405 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
7406 #endif
7407 
7408 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
7409 {
7410 	pgdat_resize_init(pgdat);
7411 
7412 	pgdat_init_split_queue(pgdat);
7413 	pgdat_init_kcompactd(pgdat);
7414 
7415 	init_waitqueue_head(&pgdat->kswapd_wait);
7416 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
7417 
7418 	pgdat_page_ext_init(pgdat);
7419 	lruvec_init(&pgdat->__lruvec);
7420 }
7421 
7422 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
7423 							unsigned long remaining_pages)
7424 {
7425 	atomic_long_set(&zone->managed_pages, remaining_pages);
7426 	zone_set_nid(zone, nid);
7427 	zone->name = zone_names[idx];
7428 	zone->zone_pgdat = NODE_DATA(nid);
7429 	spin_lock_init(&zone->lock);
7430 	zone_seqlock_init(zone);
7431 	zone_pcp_init(zone);
7432 }
7433 
7434 /*
7435  * Set up the zone data structures
7436  * - init pgdat internals
7437  * - init all zones belonging to this node
7438  *
7439  * NOTE: this function is only called during memory hotplug
7440  */
7441 #ifdef CONFIG_MEMORY_HOTPLUG
7442 void __ref free_area_init_core_hotplug(int nid)
7443 {
7444 	enum zone_type z;
7445 	pg_data_t *pgdat = NODE_DATA(nid);
7446 
7447 	pgdat_init_internals(pgdat);
7448 	for (z = 0; z < MAX_NR_ZONES; z++)
7449 		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
7450 }
7451 #endif
7452 
7453 /*
7454  * Set up the zone data structures:
7455  *   - mark all pages reserved
7456  *   - mark all memory queues empty
7457  *   - clear the memory bitmaps
7458  *
7459  * NOTE: pgdat should get zeroed by caller.
7460  * NOTE: this function is only called during early init.
7461  */
7462 static void __init free_area_init_core(struct pglist_data *pgdat)
7463 {
7464 	enum zone_type j;
7465 	int nid = pgdat->node_id;
7466 
7467 	pgdat_init_internals(pgdat);
7468 	pgdat->per_cpu_nodestats = &boot_nodestats;
7469 
7470 	for (j = 0; j < MAX_NR_ZONES; j++) {
7471 		struct zone *zone = pgdat->node_zones + j;
7472 		unsigned long size, freesize, memmap_pages;
7473 
7474 		size = zone->spanned_pages;
7475 		freesize = zone->present_pages;
7476 
7477 		/*
7478 		 * Adjust freesize so that it accounts for how much memory
7479 		 * is used by this zone for memmap. This affects the watermark
7480 		 * and per-cpu initialisations
7481 		 */
7482 		memmap_pages = calc_memmap_size(size, freesize);
7483 		if (!is_highmem_idx(j)) {
7484 			if (freesize >= memmap_pages) {
7485 				freesize -= memmap_pages;
7486 				if (memmap_pages)
7487 					pr_debug("  %s zone: %lu pages used for memmap\n",
7488 						 zone_names[j], memmap_pages);
7489 			} else
7490 				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
7491 					zone_names[j], memmap_pages, freesize);
7492 		}
7493 
7494 		/* Account for reserved pages */
7495 		if (j == 0 && freesize > dma_reserve) {
7496 			freesize -= dma_reserve;
7497 			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
7498 		}
7499 
7500 		if (!is_highmem_idx(j))
7501 			nr_kernel_pages += freesize;
7502 		/* Charge for highmem memmap if there are enough kernel pages */
7503 		else if (nr_kernel_pages > memmap_pages * 2)
7504 			nr_kernel_pages -= memmap_pages;
7505 		nr_all_pages += freesize;
7506 
7507 		/*
7508 		 * Set an approximate value for lowmem here, it will be adjusted
7509 		 * when the bootmem allocator frees pages into the buddy system.
7510 		 * And all highmem pages will be managed by the buddy system.
7511 		 */
7512 		zone_init_internals(zone, j, nid, freesize);
7513 
7514 		if (!size)
7515 			continue;
7516 
7517 		set_pageblock_order();
7518 		setup_usemap(zone);
7519 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
7520 	}
7521 }
7522 
7523 #ifdef CONFIG_FLATMEM
7524 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
7525 {
7526 	unsigned long __maybe_unused start = 0;
7527 	unsigned long __maybe_unused offset = 0;
7528 
7529 	/* Skip empty nodes */
7530 	if (!pgdat->node_spanned_pages)
7531 		return;
7532 
7533 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
7534 	offset = pgdat->node_start_pfn - start;
7535 	/* ia64 gets its own node_mem_map, before this, without bootmem */
7536 	if (!pgdat->node_mem_map) {
7537 		unsigned long size, end;
7538 		struct page *map;
7539 
7540 		/*
7541 		 * The zone's endpoints aren't required to be MAX_ORDER
7542 		 * aligned but the node_mem_map endpoints must be in order
7543 		 * for the buddy allocator to function correctly.
7544 		 */
7545 		end = pgdat_end_pfn(pgdat);
7546 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
7547 		size =  (end - start) * sizeof(struct page);
7548 		map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
7549 				   pgdat->node_id, false);
7550 		if (!map)
7551 			panic("Failed to allocate %ld bytes for node %d memory map\n",
7552 			      size, pgdat->node_id);
7553 		pgdat->node_mem_map = map + offset;
7554 	}
7555 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7556 				__func__, pgdat->node_id, (unsigned long)pgdat,
7557 				(unsigned long)pgdat->node_mem_map);
7558 #ifndef CONFIG_NUMA
7559 	/*
7560 	 * With no DISCONTIG, the global mem_map is just set as node 0's
7561 	 */
7562 	if (pgdat == NODE_DATA(0)) {
7563 		mem_map = NODE_DATA(0)->node_mem_map;
7564 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
7565 			mem_map -= offset;
7566 	}
7567 #endif
7568 }
7569 #else
7570 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
7571 #endif /* CONFIG_FLATMEM */
7572 
7573 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7574 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7575 {
7576 	pgdat->first_deferred_pfn = ULONG_MAX;
7577 }
7578 #else
7579 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7580 #endif
7581 
7582 static void __init free_area_init_node(int nid)
7583 {
7584 	pg_data_t *pgdat = NODE_DATA(nid);
7585 	unsigned long start_pfn = 0;
7586 	unsigned long end_pfn = 0;
7587 
7588 	/* pg_data_t should be reset to zero when it's allocated */
7589 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
7590 
7591 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7592 
7593 	pgdat->node_id = nid;
7594 	pgdat->node_start_pfn = start_pfn;
7595 	pgdat->per_cpu_nodestats = NULL;
7596 
7597 	pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7598 		(u64)start_pfn << PAGE_SHIFT,
7599 		end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7600 	calculate_node_totalpages(pgdat, start_pfn, end_pfn);
7601 
7602 	alloc_node_mem_map(pgdat);
7603 	pgdat_set_deferred_range(pgdat);
7604 
7605 	free_area_init_core(pgdat);
7606 }
7607 
7608 void __init free_area_init_memoryless_node(int nid)
7609 {
7610 	free_area_init_node(nid);
7611 }
7612 
7613 #if MAX_NUMNODES > 1
7614 /*
7615  * Figure out the number of possible node ids.
7616  */
7617 void __init setup_nr_node_ids(void)
7618 {
7619 	unsigned int highest;
7620 
7621 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
7622 	nr_node_ids = highest + 1;
7623 }
7624 #endif
7625 
7626 /**
7627  * node_map_pfn_alignment - determine the maximum internode alignment
7628  *
7629  * This function should be called after node map is populated and sorted.
7630  * It calculates the maximum power of two alignment which can distinguish
7631  * all the nodes.
7632  *
7633  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7634  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
7635  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
7636  * shifted, 1GiB is enough and this function will indicate so.
7637  *
7638  * This is used to test whether pfn -> nid mapping of the chosen memory
7639  * model has fine enough granularity to avoid incorrect mapping for the
7640  * populated node map.
7641  *
7642  * Return: the determined alignment in pfn's.  0 if there is no alignment
7643  * requirement (single node).
7644  */
7645 unsigned long __init node_map_pfn_alignment(void)
7646 {
7647 	unsigned long accl_mask = 0, last_end = 0;
7648 	unsigned long start, end, mask;
7649 	int last_nid = NUMA_NO_NODE;
7650 	int i, nid;
7651 
7652 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7653 		if (!start || last_nid < 0 || last_nid == nid) {
7654 			last_nid = nid;
7655 			last_end = end;
7656 			continue;
7657 		}
7658 
7659 		/*
7660 		 * Start with a mask granular enough to pin-point to the
7661 		 * start pfn and tick off bits one-by-one until it becomes
7662 		 * too coarse to separate the current node from the last.
7663 		 */
7664 		mask = ~((1 << __ffs(start)) - 1);
7665 		while (mask && last_end <= (start & (mask << 1)))
7666 			mask <<= 1;
7667 
7668 		/* accumulate all internode masks */
7669 		accl_mask |= mask;
7670 	}
7671 
7672 	/* convert mask to number of pages */
7673 	return ~accl_mask + 1;
7674 }
7675 
7676 /**
7677  * find_min_pfn_with_active_regions - Find the minimum PFN registered
7678  *
7679  * Return: the minimum PFN based on information provided via
7680  * memblock_set_node().
7681  */
7682 unsigned long __init find_min_pfn_with_active_regions(void)
7683 {
7684 	return PHYS_PFN(memblock_start_of_DRAM());
7685 }
7686 
7687 /*
7688  * early_calculate_totalpages()
7689  * Sum pages in active regions for movable zone.
7690  * Populate N_MEMORY for calculating usable_nodes.
7691  */
7692 static unsigned long __init early_calculate_totalpages(void)
7693 {
7694 	unsigned long totalpages = 0;
7695 	unsigned long start_pfn, end_pfn;
7696 	int i, nid;
7697 
7698 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7699 		unsigned long pages = end_pfn - start_pfn;
7700 
7701 		totalpages += pages;
7702 		if (pages)
7703 			node_set_state(nid, N_MEMORY);
7704 	}
7705 	return totalpages;
7706 }
7707 
7708 /*
7709  * Find the PFN the Movable zone begins in each node. Kernel memory
7710  * is spread evenly between nodes as long as the nodes have enough
7711  * memory. When they don't, some nodes will have more kernelcore than
7712  * others
7713  */
7714 static void __init find_zone_movable_pfns_for_nodes(void)
7715 {
7716 	int i, nid;
7717 	unsigned long usable_startpfn;
7718 	unsigned long kernelcore_node, kernelcore_remaining;
7719 	/* save the state before borrow the nodemask */
7720 	nodemask_t saved_node_state = node_states[N_MEMORY];
7721 	unsigned long totalpages = early_calculate_totalpages();
7722 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
7723 	struct memblock_region *r;
7724 
7725 	/* Need to find movable_zone earlier when movable_node is specified. */
7726 	find_usable_zone_for_movable();
7727 
7728 	/*
7729 	 * If movable_node is specified, ignore kernelcore and movablecore
7730 	 * options.
7731 	 */
7732 	if (movable_node_is_enabled()) {
7733 		for_each_mem_region(r) {
7734 			if (!memblock_is_hotpluggable(r))
7735 				continue;
7736 
7737 			nid = memblock_get_region_node(r);
7738 
7739 			usable_startpfn = PFN_DOWN(r->base);
7740 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7741 				min(usable_startpfn, zone_movable_pfn[nid]) :
7742 				usable_startpfn;
7743 		}
7744 
7745 		goto out2;
7746 	}
7747 
7748 	/*
7749 	 * If kernelcore=mirror is specified, ignore movablecore option
7750 	 */
7751 	if (mirrored_kernelcore) {
7752 		bool mem_below_4gb_not_mirrored = false;
7753 
7754 		for_each_mem_region(r) {
7755 			if (memblock_is_mirror(r))
7756 				continue;
7757 
7758 			nid = memblock_get_region_node(r);
7759 
7760 			usable_startpfn = memblock_region_memory_base_pfn(r);
7761 
7762 			if (usable_startpfn < 0x100000) {
7763 				mem_below_4gb_not_mirrored = true;
7764 				continue;
7765 			}
7766 
7767 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7768 				min(usable_startpfn, zone_movable_pfn[nid]) :
7769 				usable_startpfn;
7770 		}
7771 
7772 		if (mem_below_4gb_not_mirrored)
7773 			pr_warn("This configuration results in unmirrored kernel memory.\n");
7774 
7775 		goto out2;
7776 	}
7777 
7778 	/*
7779 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7780 	 * amount of necessary memory.
7781 	 */
7782 	if (required_kernelcore_percent)
7783 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
7784 				       10000UL;
7785 	if (required_movablecore_percent)
7786 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
7787 					10000UL;
7788 
7789 	/*
7790 	 * If movablecore= was specified, calculate what size of
7791 	 * kernelcore that corresponds so that memory usable for
7792 	 * any allocation type is evenly spread. If both kernelcore
7793 	 * and movablecore are specified, then the value of kernelcore
7794 	 * will be used for required_kernelcore if it's greater than
7795 	 * what movablecore would have allowed.
7796 	 */
7797 	if (required_movablecore) {
7798 		unsigned long corepages;
7799 
7800 		/*
7801 		 * Round-up so that ZONE_MOVABLE is at least as large as what
7802 		 * was requested by the user
7803 		 */
7804 		required_movablecore =
7805 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
7806 		required_movablecore = min(totalpages, required_movablecore);
7807 		corepages = totalpages - required_movablecore;
7808 
7809 		required_kernelcore = max(required_kernelcore, corepages);
7810 	}
7811 
7812 	/*
7813 	 * If kernelcore was not specified or kernelcore size is larger
7814 	 * than totalpages, there is no ZONE_MOVABLE.
7815 	 */
7816 	if (!required_kernelcore || required_kernelcore >= totalpages)
7817 		goto out;
7818 
7819 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
7820 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7821 
7822 restart:
7823 	/* Spread kernelcore memory as evenly as possible throughout nodes */
7824 	kernelcore_node = required_kernelcore / usable_nodes;
7825 	for_each_node_state(nid, N_MEMORY) {
7826 		unsigned long start_pfn, end_pfn;
7827 
7828 		/*
7829 		 * Recalculate kernelcore_node if the division per node
7830 		 * now exceeds what is necessary to satisfy the requested
7831 		 * amount of memory for the kernel
7832 		 */
7833 		if (required_kernelcore < kernelcore_node)
7834 			kernelcore_node = required_kernelcore / usable_nodes;
7835 
7836 		/*
7837 		 * As the map is walked, we track how much memory is usable
7838 		 * by the kernel using kernelcore_remaining. When it is
7839 		 * 0, the rest of the node is usable by ZONE_MOVABLE
7840 		 */
7841 		kernelcore_remaining = kernelcore_node;
7842 
7843 		/* Go through each range of PFNs within this node */
7844 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7845 			unsigned long size_pages;
7846 
7847 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
7848 			if (start_pfn >= end_pfn)
7849 				continue;
7850 
7851 			/* Account for what is only usable for kernelcore */
7852 			if (start_pfn < usable_startpfn) {
7853 				unsigned long kernel_pages;
7854 				kernel_pages = min(end_pfn, usable_startpfn)
7855 								- start_pfn;
7856 
7857 				kernelcore_remaining -= min(kernel_pages,
7858 							kernelcore_remaining);
7859 				required_kernelcore -= min(kernel_pages,
7860 							required_kernelcore);
7861 
7862 				/* Continue if range is now fully accounted */
7863 				if (end_pfn <= usable_startpfn) {
7864 
7865 					/*
7866 					 * Push zone_movable_pfn to the end so
7867 					 * that if we have to rebalance
7868 					 * kernelcore across nodes, we will
7869 					 * not double account here
7870 					 */
7871 					zone_movable_pfn[nid] = end_pfn;
7872 					continue;
7873 				}
7874 				start_pfn = usable_startpfn;
7875 			}
7876 
7877 			/*
7878 			 * The usable PFN range for ZONE_MOVABLE is from
7879 			 * start_pfn->end_pfn. Calculate size_pages as the
7880 			 * number of pages used as kernelcore
7881 			 */
7882 			size_pages = end_pfn - start_pfn;
7883 			if (size_pages > kernelcore_remaining)
7884 				size_pages = kernelcore_remaining;
7885 			zone_movable_pfn[nid] = start_pfn + size_pages;
7886 
7887 			/*
7888 			 * Some kernelcore has been met, update counts and
7889 			 * break if the kernelcore for this node has been
7890 			 * satisfied
7891 			 */
7892 			required_kernelcore -= min(required_kernelcore,
7893 								size_pages);
7894 			kernelcore_remaining -= size_pages;
7895 			if (!kernelcore_remaining)
7896 				break;
7897 		}
7898 	}
7899 
7900 	/*
7901 	 * If there is still required_kernelcore, we do another pass with one
7902 	 * less node in the count. This will push zone_movable_pfn[nid] further
7903 	 * along on the nodes that still have memory until kernelcore is
7904 	 * satisfied
7905 	 */
7906 	usable_nodes--;
7907 	if (usable_nodes && required_kernelcore > usable_nodes)
7908 		goto restart;
7909 
7910 out2:
7911 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7912 	for (nid = 0; nid < MAX_NUMNODES; nid++)
7913 		zone_movable_pfn[nid] =
7914 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
7915 
7916 out:
7917 	/* restore the node_state */
7918 	node_states[N_MEMORY] = saved_node_state;
7919 }
7920 
7921 /* Any regular or high memory on that node ? */
7922 static void check_for_memory(pg_data_t *pgdat, int nid)
7923 {
7924 	enum zone_type zone_type;
7925 
7926 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
7927 		struct zone *zone = &pgdat->node_zones[zone_type];
7928 		if (populated_zone(zone)) {
7929 			if (IS_ENABLED(CONFIG_HIGHMEM))
7930 				node_set_state(nid, N_HIGH_MEMORY);
7931 			if (zone_type <= ZONE_NORMAL)
7932 				node_set_state(nid, N_NORMAL_MEMORY);
7933 			break;
7934 		}
7935 	}
7936 }
7937 
7938 /*
7939  * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
7940  * such cases we allow max_zone_pfn sorted in the descending order
7941  */
7942 bool __weak arch_has_descending_max_zone_pfns(void)
7943 {
7944 	return false;
7945 }
7946 
7947 /**
7948  * free_area_init - Initialise all pg_data_t and zone data
7949  * @max_zone_pfn: an array of max PFNs for each zone
7950  *
7951  * This will call free_area_init_node() for each active node in the system.
7952  * Using the page ranges provided by memblock_set_node(), the size of each
7953  * zone in each node and their holes is calculated. If the maximum PFN
7954  * between two adjacent zones match, it is assumed that the zone is empty.
7955  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7956  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7957  * starts where the previous one ended. For example, ZONE_DMA32 starts
7958  * at arch_max_dma_pfn.
7959  */
7960 void __init free_area_init(unsigned long *max_zone_pfn)
7961 {
7962 	unsigned long start_pfn, end_pfn;
7963 	int i, nid, zone;
7964 	bool descending;
7965 
7966 	/* Record where the zone boundaries are */
7967 	memset(arch_zone_lowest_possible_pfn, 0,
7968 				sizeof(arch_zone_lowest_possible_pfn));
7969 	memset(arch_zone_highest_possible_pfn, 0,
7970 				sizeof(arch_zone_highest_possible_pfn));
7971 
7972 	start_pfn = find_min_pfn_with_active_regions();
7973 	descending = arch_has_descending_max_zone_pfns();
7974 
7975 	for (i = 0; i < MAX_NR_ZONES; i++) {
7976 		if (descending)
7977 			zone = MAX_NR_ZONES - i - 1;
7978 		else
7979 			zone = i;
7980 
7981 		if (zone == ZONE_MOVABLE)
7982 			continue;
7983 
7984 		end_pfn = max(max_zone_pfn[zone], start_pfn);
7985 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
7986 		arch_zone_highest_possible_pfn[zone] = end_pfn;
7987 
7988 		start_pfn = end_pfn;
7989 	}
7990 
7991 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
7992 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
7993 	find_zone_movable_pfns_for_nodes();
7994 
7995 	/* Print out the zone ranges */
7996 	pr_info("Zone ranges:\n");
7997 	for (i = 0; i < MAX_NR_ZONES; i++) {
7998 		if (i == ZONE_MOVABLE)
7999 			continue;
8000 		pr_info("  %-8s ", zone_names[i]);
8001 		if (arch_zone_lowest_possible_pfn[i] ==
8002 				arch_zone_highest_possible_pfn[i])
8003 			pr_cont("empty\n");
8004 		else
8005 			pr_cont("[mem %#018Lx-%#018Lx]\n",
8006 				(u64)arch_zone_lowest_possible_pfn[i]
8007 					<< PAGE_SHIFT,
8008 				((u64)arch_zone_highest_possible_pfn[i]
8009 					<< PAGE_SHIFT) - 1);
8010 	}
8011 
8012 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
8013 	pr_info("Movable zone start for each node\n");
8014 	for (i = 0; i < MAX_NUMNODES; i++) {
8015 		if (zone_movable_pfn[i])
8016 			pr_info("  Node %d: %#018Lx\n", i,
8017 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
8018 	}
8019 
8020 	/*
8021 	 * Print out the early node map, and initialize the
8022 	 * subsection-map relative to active online memory ranges to
8023 	 * enable future "sub-section" extensions of the memory map.
8024 	 */
8025 	pr_info("Early memory node ranges\n");
8026 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8027 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
8028 			(u64)start_pfn << PAGE_SHIFT,
8029 			((u64)end_pfn << PAGE_SHIFT) - 1);
8030 		subsection_map_init(start_pfn, end_pfn - start_pfn);
8031 	}
8032 
8033 	/* Initialise every node */
8034 	mminit_verify_pageflags_layout();
8035 	setup_nr_node_ids();
8036 	for_each_online_node(nid) {
8037 		pg_data_t *pgdat = NODE_DATA(nid);
8038 		free_area_init_node(nid);
8039 
8040 		/* Any memory on that node */
8041 		if (pgdat->node_present_pages)
8042 			node_set_state(nid, N_MEMORY);
8043 		check_for_memory(pgdat, nid);
8044 	}
8045 
8046 	memmap_init();
8047 }
8048 
8049 static int __init cmdline_parse_core(char *p, unsigned long *core,
8050 				     unsigned long *percent)
8051 {
8052 	unsigned long long coremem;
8053 	char *endptr;
8054 
8055 	if (!p)
8056 		return -EINVAL;
8057 
8058 	/* Value may be a percentage of total memory, otherwise bytes */
8059 	coremem = simple_strtoull(p, &endptr, 0);
8060 	if (*endptr == '%') {
8061 		/* Paranoid check for percent values greater than 100 */
8062 		WARN_ON(coremem > 100);
8063 
8064 		*percent = coremem;
8065 	} else {
8066 		coremem = memparse(p, &p);
8067 		/* Paranoid check that UL is enough for the coremem value */
8068 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
8069 
8070 		*core = coremem >> PAGE_SHIFT;
8071 		*percent = 0UL;
8072 	}
8073 	return 0;
8074 }
8075 
8076 /*
8077  * kernelcore=size sets the amount of memory for use for allocations that
8078  * cannot be reclaimed or migrated.
8079  */
8080 static int __init cmdline_parse_kernelcore(char *p)
8081 {
8082 	/* parse kernelcore=mirror */
8083 	if (parse_option_str(p, "mirror")) {
8084 		mirrored_kernelcore = true;
8085 		return 0;
8086 	}
8087 
8088 	return cmdline_parse_core(p, &required_kernelcore,
8089 				  &required_kernelcore_percent);
8090 }
8091 
8092 /*
8093  * movablecore=size sets the amount of memory for use for allocations that
8094  * can be reclaimed or migrated.
8095  */
8096 static int __init cmdline_parse_movablecore(char *p)
8097 {
8098 	return cmdline_parse_core(p, &required_movablecore,
8099 				  &required_movablecore_percent);
8100 }
8101 
8102 early_param("kernelcore", cmdline_parse_kernelcore);
8103 early_param("movablecore", cmdline_parse_movablecore);
8104 
8105 void adjust_managed_page_count(struct page *page, long count)
8106 {
8107 	atomic_long_add(count, &page_zone(page)->managed_pages);
8108 	totalram_pages_add(count);
8109 #ifdef CONFIG_HIGHMEM
8110 	if (PageHighMem(page))
8111 		totalhigh_pages_add(count);
8112 #endif
8113 }
8114 EXPORT_SYMBOL(adjust_managed_page_count);
8115 
8116 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
8117 {
8118 	void *pos;
8119 	unsigned long pages = 0;
8120 
8121 	start = (void *)PAGE_ALIGN((unsigned long)start);
8122 	end = (void *)((unsigned long)end & PAGE_MASK);
8123 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
8124 		struct page *page = virt_to_page(pos);
8125 		void *direct_map_addr;
8126 
8127 		/*
8128 		 * 'direct_map_addr' might be different from 'pos'
8129 		 * because some architectures' virt_to_page()
8130 		 * work with aliases.  Getting the direct map
8131 		 * address ensures that we get a _writeable_
8132 		 * alias for the memset().
8133 		 */
8134 		direct_map_addr = page_address(page);
8135 		/*
8136 		 * Perform a kasan-unchecked memset() since this memory
8137 		 * has not been initialized.
8138 		 */
8139 		direct_map_addr = kasan_reset_tag(direct_map_addr);
8140 		if ((unsigned int)poison <= 0xFF)
8141 			memset(direct_map_addr, poison, PAGE_SIZE);
8142 
8143 		free_reserved_page(page);
8144 	}
8145 
8146 	if (pages && s)
8147 		pr_info("Freeing %s memory: %ldK\n",
8148 			s, pages << (PAGE_SHIFT - 10));
8149 
8150 	return pages;
8151 }
8152 
8153 void __init mem_init_print_info(void)
8154 {
8155 	unsigned long physpages, codesize, datasize, rosize, bss_size;
8156 	unsigned long init_code_size, init_data_size;
8157 
8158 	physpages = get_num_physpages();
8159 	codesize = _etext - _stext;
8160 	datasize = _edata - _sdata;
8161 	rosize = __end_rodata - __start_rodata;
8162 	bss_size = __bss_stop - __bss_start;
8163 	init_data_size = __init_end - __init_begin;
8164 	init_code_size = _einittext - _sinittext;
8165 
8166 	/*
8167 	 * Detect special cases and adjust section sizes accordingly:
8168 	 * 1) .init.* may be embedded into .data sections
8169 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
8170 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
8171 	 * 3) .rodata.* may be embedded into .text or .data sections.
8172 	 */
8173 #define adj_init_size(start, end, size, pos, adj) \
8174 	do { \
8175 		if (start <= pos && pos < end && size > adj) \
8176 			size -= adj; \
8177 	} while (0)
8178 
8179 	adj_init_size(__init_begin, __init_end, init_data_size,
8180 		     _sinittext, init_code_size);
8181 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
8182 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
8183 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
8184 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
8185 
8186 #undef	adj_init_size
8187 
8188 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
8189 #ifdef	CONFIG_HIGHMEM
8190 		", %luK highmem"
8191 #endif
8192 		")\n",
8193 		nr_free_pages() << (PAGE_SHIFT - 10),
8194 		physpages << (PAGE_SHIFT - 10),
8195 		codesize >> 10, datasize >> 10, rosize >> 10,
8196 		(init_data_size + init_code_size) >> 10, bss_size >> 10,
8197 		(physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
8198 		totalcma_pages << (PAGE_SHIFT - 10)
8199 #ifdef	CONFIG_HIGHMEM
8200 		, totalhigh_pages() << (PAGE_SHIFT - 10)
8201 #endif
8202 		);
8203 }
8204 
8205 /**
8206  * set_dma_reserve - set the specified number of pages reserved in the first zone
8207  * @new_dma_reserve: The number of pages to mark reserved
8208  *
8209  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
8210  * In the DMA zone, a significant percentage may be consumed by kernel image
8211  * and other unfreeable allocations which can skew the watermarks badly. This
8212  * function may optionally be used to account for unfreeable pages in the
8213  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8214  * smaller per-cpu batchsize.
8215  */
8216 void __init set_dma_reserve(unsigned long new_dma_reserve)
8217 {
8218 	dma_reserve = new_dma_reserve;
8219 }
8220 
8221 static int page_alloc_cpu_dead(unsigned int cpu)
8222 {
8223 	struct zone *zone;
8224 
8225 	lru_add_drain_cpu(cpu);
8226 	drain_pages(cpu);
8227 
8228 	/*
8229 	 * Spill the event counters of the dead processor
8230 	 * into the current processors event counters.
8231 	 * This artificially elevates the count of the current
8232 	 * processor.
8233 	 */
8234 	vm_events_fold_cpu(cpu);
8235 
8236 	/*
8237 	 * Zero the differential counters of the dead processor
8238 	 * so that the vm statistics are consistent.
8239 	 *
8240 	 * This is only okay since the processor is dead and cannot
8241 	 * race with what we are doing.
8242 	 */
8243 	cpu_vm_stats_fold(cpu);
8244 
8245 	for_each_populated_zone(zone)
8246 		zone_pcp_update(zone, 0);
8247 
8248 	return 0;
8249 }
8250 
8251 static int page_alloc_cpu_online(unsigned int cpu)
8252 {
8253 	struct zone *zone;
8254 
8255 	for_each_populated_zone(zone)
8256 		zone_pcp_update(zone, 1);
8257 	return 0;
8258 }
8259 
8260 #ifdef CONFIG_NUMA
8261 int hashdist = HASHDIST_DEFAULT;
8262 
8263 static int __init set_hashdist(char *str)
8264 {
8265 	if (!str)
8266 		return 0;
8267 	hashdist = simple_strtoul(str, &str, 0);
8268 	return 1;
8269 }
8270 __setup("hashdist=", set_hashdist);
8271 #endif
8272 
8273 void __init page_alloc_init(void)
8274 {
8275 	int ret;
8276 
8277 #ifdef CONFIG_NUMA
8278 	if (num_node_state(N_MEMORY) == 1)
8279 		hashdist = 0;
8280 #endif
8281 
8282 	ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
8283 					"mm/page_alloc:pcp",
8284 					page_alloc_cpu_online,
8285 					page_alloc_cpu_dead);
8286 	WARN_ON(ret < 0);
8287 }
8288 
8289 /*
8290  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
8291  *	or min_free_kbytes changes.
8292  */
8293 static void calculate_totalreserve_pages(void)
8294 {
8295 	struct pglist_data *pgdat;
8296 	unsigned long reserve_pages = 0;
8297 	enum zone_type i, j;
8298 
8299 	for_each_online_pgdat(pgdat) {
8300 
8301 		pgdat->totalreserve_pages = 0;
8302 
8303 		for (i = 0; i < MAX_NR_ZONES; i++) {
8304 			struct zone *zone = pgdat->node_zones + i;
8305 			long max = 0;
8306 			unsigned long managed_pages = zone_managed_pages(zone);
8307 
8308 			/* Find valid and maximum lowmem_reserve in the zone */
8309 			for (j = i; j < MAX_NR_ZONES; j++) {
8310 				if (zone->lowmem_reserve[j] > max)
8311 					max = zone->lowmem_reserve[j];
8312 			}
8313 
8314 			/* we treat the high watermark as reserved pages. */
8315 			max += high_wmark_pages(zone);
8316 
8317 			if (max > managed_pages)
8318 				max = managed_pages;
8319 
8320 			pgdat->totalreserve_pages += max;
8321 
8322 			reserve_pages += max;
8323 		}
8324 	}
8325 	totalreserve_pages = reserve_pages;
8326 }
8327 
8328 /*
8329  * setup_per_zone_lowmem_reserve - called whenever
8330  *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
8331  *	has a correct pages reserved value, so an adequate number of
8332  *	pages are left in the zone after a successful __alloc_pages().
8333  */
8334 static void setup_per_zone_lowmem_reserve(void)
8335 {
8336 	struct pglist_data *pgdat;
8337 	enum zone_type i, j;
8338 
8339 	for_each_online_pgdat(pgdat) {
8340 		for (i = 0; i < MAX_NR_ZONES - 1; i++) {
8341 			struct zone *zone = &pgdat->node_zones[i];
8342 			int ratio = sysctl_lowmem_reserve_ratio[i];
8343 			bool clear = !ratio || !zone_managed_pages(zone);
8344 			unsigned long managed_pages = 0;
8345 
8346 			for (j = i + 1; j < MAX_NR_ZONES; j++) {
8347 				struct zone *upper_zone = &pgdat->node_zones[j];
8348 
8349 				managed_pages += zone_managed_pages(upper_zone);
8350 
8351 				if (clear)
8352 					zone->lowmem_reserve[j] = 0;
8353 				else
8354 					zone->lowmem_reserve[j] = managed_pages / ratio;
8355 			}
8356 		}
8357 	}
8358 
8359 	/* update totalreserve_pages */
8360 	calculate_totalreserve_pages();
8361 }
8362 
8363 static void __setup_per_zone_wmarks(void)
8364 {
8365 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
8366 	unsigned long lowmem_pages = 0;
8367 	struct zone *zone;
8368 	unsigned long flags;
8369 
8370 	/* Calculate total number of !ZONE_HIGHMEM pages */
8371 	for_each_zone(zone) {
8372 		if (!is_highmem(zone))
8373 			lowmem_pages += zone_managed_pages(zone);
8374 	}
8375 
8376 	for_each_zone(zone) {
8377 		u64 tmp;
8378 
8379 		spin_lock_irqsave(&zone->lock, flags);
8380 		tmp = (u64)pages_min * zone_managed_pages(zone);
8381 		do_div(tmp, lowmem_pages);
8382 		if (is_highmem(zone)) {
8383 			/*
8384 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
8385 			 * need highmem pages, so cap pages_min to a small
8386 			 * value here.
8387 			 *
8388 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8389 			 * deltas control async page reclaim, and so should
8390 			 * not be capped for highmem.
8391 			 */
8392 			unsigned long min_pages;
8393 
8394 			min_pages = zone_managed_pages(zone) / 1024;
8395 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
8396 			zone->_watermark[WMARK_MIN] = min_pages;
8397 		} else {
8398 			/*
8399 			 * If it's a lowmem zone, reserve a number of pages
8400 			 * proportionate to the zone's size.
8401 			 */
8402 			zone->_watermark[WMARK_MIN] = tmp;
8403 		}
8404 
8405 		/*
8406 		 * Set the kswapd watermarks distance according to the
8407 		 * scale factor in proportion to available memory, but
8408 		 * ensure a minimum size on small systems.
8409 		 */
8410 		tmp = max_t(u64, tmp >> 2,
8411 			    mult_frac(zone_managed_pages(zone),
8412 				      watermark_scale_factor, 10000));
8413 
8414 		zone->watermark_boost = 0;
8415 		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
8416 		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
8417 
8418 		spin_unlock_irqrestore(&zone->lock, flags);
8419 	}
8420 
8421 	/* update totalreserve_pages */
8422 	calculate_totalreserve_pages();
8423 }
8424 
8425 /**
8426  * setup_per_zone_wmarks - called when min_free_kbytes changes
8427  * or when memory is hot-{added|removed}
8428  *
8429  * Ensures that the watermark[min,low,high] values for each zone are set
8430  * correctly with respect to min_free_kbytes.
8431  */
8432 void setup_per_zone_wmarks(void)
8433 {
8434 	struct zone *zone;
8435 	static DEFINE_SPINLOCK(lock);
8436 
8437 	spin_lock(&lock);
8438 	__setup_per_zone_wmarks();
8439 	spin_unlock(&lock);
8440 
8441 	/*
8442 	 * The watermark size have changed so update the pcpu batch
8443 	 * and high limits or the limits may be inappropriate.
8444 	 */
8445 	for_each_zone(zone)
8446 		zone_pcp_update(zone, 0);
8447 }
8448 
8449 /*
8450  * Initialise min_free_kbytes.
8451  *
8452  * For small machines we want it small (128k min).  For large machines
8453  * we want it large (256MB max).  But it is not linear, because network
8454  * bandwidth does not increase linearly with machine size.  We use
8455  *
8456  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
8457  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
8458  *
8459  * which yields
8460  *
8461  * 16MB:	512k
8462  * 32MB:	724k
8463  * 64MB:	1024k
8464  * 128MB:	1448k
8465  * 256MB:	2048k
8466  * 512MB:	2896k
8467  * 1024MB:	4096k
8468  * 2048MB:	5792k
8469  * 4096MB:	8192k
8470  * 8192MB:	11584k
8471  * 16384MB:	16384k
8472  */
8473 int __meminit init_per_zone_wmark_min(void)
8474 {
8475 	unsigned long lowmem_kbytes;
8476 	int new_min_free_kbytes;
8477 
8478 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
8479 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
8480 
8481 	if (new_min_free_kbytes > user_min_free_kbytes) {
8482 		min_free_kbytes = new_min_free_kbytes;
8483 		if (min_free_kbytes < 128)
8484 			min_free_kbytes = 128;
8485 		if (min_free_kbytes > 262144)
8486 			min_free_kbytes = 262144;
8487 	} else {
8488 		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8489 				new_min_free_kbytes, user_min_free_kbytes);
8490 	}
8491 	setup_per_zone_wmarks();
8492 	refresh_zone_stat_thresholds();
8493 	setup_per_zone_lowmem_reserve();
8494 
8495 #ifdef CONFIG_NUMA
8496 	setup_min_unmapped_ratio();
8497 	setup_min_slab_ratio();
8498 #endif
8499 
8500 	khugepaged_min_free_kbytes_update();
8501 
8502 	return 0;
8503 }
8504 postcore_initcall(init_per_zone_wmark_min)
8505 
8506 /*
8507  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8508  *	that we can call two helper functions whenever min_free_kbytes
8509  *	changes.
8510  */
8511 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8512 		void *buffer, size_t *length, loff_t *ppos)
8513 {
8514 	int rc;
8515 
8516 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8517 	if (rc)
8518 		return rc;
8519 
8520 	if (write) {
8521 		user_min_free_kbytes = min_free_kbytes;
8522 		setup_per_zone_wmarks();
8523 	}
8524 	return 0;
8525 }
8526 
8527 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8528 		void *buffer, size_t *length, loff_t *ppos)
8529 {
8530 	int rc;
8531 
8532 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8533 	if (rc)
8534 		return rc;
8535 
8536 	if (write)
8537 		setup_per_zone_wmarks();
8538 
8539 	return 0;
8540 }
8541 
8542 #ifdef CONFIG_NUMA
8543 static void setup_min_unmapped_ratio(void)
8544 {
8545 	pg_data_t *pgdat;
8546 	struct zone *zone;
8547 
8548 	for_each_online_pgdat(pgdat)
8549 		pgdat->min_unmapped_pages = 0;
8550 
8551 	for_each_zone(zone)
8552 		zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8553 						         sysctl_min_unmapped_ratio) / 100;
8554 }
8555 
8556 
8557 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8558 		void *buffer, size_t *length, loff_t *ppos)
8559 {
8560 	int rc;
8561 
8562 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8563 	if (rc)
8564 		return rc;
8565 
8566 	setup_min_unmapped_ratio();
8567 
8568 	return 0;
8569 }
8570 
8571 static void setup_min_slab_ratio(void)
8572 {
8573 	pg_data_t *pgdat;
8574 	struct zone *zone;
8575 
8576 	for_each_online_pgdat(pgdat)
8577 		pgdat->min_slab_pages = 0;
8578 
8579 	for_each_zone(zone)
8580 		zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8581 						     sysctl_min_slab_ratio) / 100;
8582 }
8583 
8584 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8585 		void *buffer, size_t *length, loff_t *ppos)
8586 {
8587 	int rc;
8588 
8589 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8590 	if (rc)
8591 		return rc;
8592 
8593 	setup_min_slab_ratio();
8594 
8595 	return 0;
8596 }
8597 #endif
8598 
8599 /*
8600  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8601  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8602  *	whenever sysctl_lowmem_reserve_ratio changes.
8603  *
8604  * The reserve ratio obviously has absolutely no relation with the
8605  * minimum watermarks. The lowmem reserve ratio can only make sense
8606  * if in function of the boot time zone sizes.
8607  */
8608 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8609 		void *buffer, size_t *length, loff_t *ppos)
8610 {
8611 	int i;
8612 
8613 	proc_dointvec_minmax(table, write, buffer, length, ppos);
8614 
8615 	for (i = 0; i < MAX_NR_ZONES; i++) {
8616 		if (sysctl_lowmem_reserve_ratio[i] < 1)
8617 			sysctl_lowmem_reserve_ratio[i] = 0;
8618 	}
8619 
8620 	setup_per_zone_lowmem_reserve();
8621 	return 0;
8622 }
8623 
8624 /*
8625  * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
8626  * cpu. It is the fraction of total pages in each zone that a hot per cpu
8627  * pagelist can have before it gets flushed back to buddy allocator.
8628  */
8629 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
8630 		int write, void *buffer, size_t *length, loff_t *ppos)
8631 {
8632 	struct zone *zone;
8633 	int old_percpu_pagelist_high_fraction;
8634 	int ret;
8635 
8636 	mutex_lock(&pcp_batch_high_lock);
8637 	old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
8638 
8639 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8640 	if (!write || ret < 0)
8641 		goto out;
8642 
8643 	/* Sanity checking to avoid pcp imbalance */
8644 	if (percpu_pagelist_high_fraction &&
8645 	    percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
8646 		percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
8647 		ret = -EINVAL;
8648 		goto out;
8649 	}
8650 
8651 	/* No change? */
8652 	if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
8653 		goto out;
8654 
8655 	for_each_populated_zone(zone)
8656 		zone_set_pageset_high_and_batch(zone, 0);
8657 out:
8658 	mutex_unlock(&pcp_batch_high_lock);
8659 	return ret;
8660 }
8661 
8662 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8663 /*
8664  * Returns the number of pages that arch has reserved but
8665  * is not known to alloc_large_system_hash().
8666  */
8667 static unsigned long __init arch_reserved_kernel_pages(void)
8668 {
8669 	return 0;
8670 }
8671 #endif
8672 
8673 /*
8674  * Adaptive scale is meant to reduce sizes of hash tables on large memory
8675  * machines. As memory size is increased the scale is also increased but at
8676  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
8677  * quadruples the scale is increased by one, which means the size of hash table
8678  * only doubles, instead of quadrupling as well.
8679  * Because 32-bit systems cannot have large physical memory, where this scaling
8680  * makes sense, it is disabled on such platforms.
8681  */
8682 #if __BITS_PER_LONG > 32
8683 #define ADAPT_SCALE_BASE	(64ul << 30)
8684 #define ADAPT_SCALE_SHIFT	2
8685 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
8686 #endif
8687 
8688 /*
8689  * allocate a large system hash table from bootmem
8690  * - it is assumed that the hash table must contain an exact power-of-2
8691  *   quantity of entries
8692  * - limit is the number of hash buckets, not the total allocation size
8693  */
8694 void *__init alloc_large_system_hash(const char *tablename,
8695 				     unsigned long bucketsize,
8696 				     unsigned long numentries,
8697 				     int scale,
8698 				     int flags,
8699 				     unsigned int *_hash_shift,
8700 				     unsigned int *_hash_mask,
8701 				     unsigned long low_limit,
8702 				     unsigned long high_limit)
8703 {
8704 	unsigned long long max = high_limit;
8705 	unsigned long log2qty, size;
8706 	void *table = NULL;
8707 	gfp_t gfp_flags;
8708 	bool virt;
8709 	bool huge;
8710 
8711 	/* allow the kernel cmdline to have a say */
8712 	if (!numentries) {
8713 		/* round applicable memory size up to nearest megabyte */
8714 		numentries = nr_kernel_pages;
8715 		numentries -= arch_reserved_kernel_pages();
8716 
8717 		/* It isn't necessary when PAGE_SIZE >= 1MB */
8718 		if (PAGE_SHIFT < 20)
8719 			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
8720 
8721 #if __BITS_PER_LONG > 32
8722 		if (!high_limit) {
8723 			unsigned long adapt;
8724 
8725 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
8726 			     adapt <<= ADAPT_SCALE_SHIFT)
8727 				scale++;
8728 		}
8729 #endif
8730 
8731 		/* limit to 1 bucket per 2^scale bytes of low memory */
8732 		if (scale > PAGE_SHIFT)
8733 			numentries >>= (scale - PAGE_SHIFT);
8734 		else
8735 			numentries <<= (PAGE_SHIFT - scale);
8736 
8737 		/* Make sure we've got at least a 0-order allocation.. */
8738 		if (unlikely(flags & HASH_SMALL)) {
8739 			/* Makes no sense without HASH_EARLY */
8740 			WARN_ON(!(flags & HASH_EARLY));
8741 			if (!(numentries >> *_hash_shift)) {
8742 				numentries = 1UL << *_hash_shift;
8743 				BUG_ON(!numentries);
8744 			}
8745 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
8746 			numentries = PAGE_SIZE / bucketsize;
8747 	}
8748 	numentries = roundup_pow_of_two(numentries);
8749 
8750 	/* limit allocation size to 1/16 total memory by default */
8751 	if (max == 0) {
8752 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
8753 		do_div(max, bucketsize);
8754 	}
8755 	max = min(max, 0x80000000ULL);
8756 
8757 	if (numentries < low_limit)
8758 		numentries = low_limit;
8759 	if (numentries > max)
8760 		numentries = max;
8761 
8762 	log2qty = ilog2(numentries);
8763 
8764 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
8765 	do {
8766 		virt = false;
8767 		size = bucketsize << log2qty;
8768 		if (flags & HASH_EARLY) {
8769 			if (flags & HASH_ZERO)
8770 				table = memblock_alloc(size, SMP_CACHE_BYTES);
8771 			else
8772 				table = memblock_alloc_raw(size,
8773 							   SMP_CACHE_BYTES);
8774 		} else if (get_order(size) >= MAX_ORDER || hashdist) {
8775 			table = __vmalloc(size, gfp_flags);
8776 			virt = true;
8777 			huge = is_vm_area_hugepages(table);
8778 		} else {
8779 			/*
8780 			 * If bucketsize is not a power-of-two, we may free
8781 			 * some pages at the end of hash table which
8782 			 * alloc_pages_exact() automatically does
8783 			 */
8784 			table = alloc_pages_exact(size, gfp_flags);
8785 			kmemleak_alloc(table, size, 1, gfp_flags);
8786 		}
8787 	} while (!table && size > PAGE_SIZE && --log2qty);
8788 
8789 	if (!table)
8790 		panic("Failed to allocate %s hash table\n", tablename);
8791 
8792 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8793 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
8794 		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
8795 
8796 	if (_hash_shift)
8797 		*_hash_shift = log2qty;
8798 	if (_hash_mask)
8799 		*_hash_mask = (1 << log2qty) - 1;
8800 
8801 	return table;
8802 }
8803 
8804 /*
8805  * This function checks whether pageblock includes unmovable pages or not.
8806  *
8807  * PageLRU check without isolation or lru_lock could race so that
8808  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8809  * check without lock_page also may miss some movable non-lru pages at
8810  * race condition. So you can't expect this function should be exact.
8811  *
8812  * Returns a page without holding a reference. If the caller wants to
8813  * dereference that page (e.g., dumping), it has to make sure that it
8814  * cannot get removed (e.g., via memory unplug) concurrently.
8815  *
8816  */
8817 struct page *has_unmovable_pages(struct zone *zone, struct page *page,
8818 				 int migratetype, int flags)
8819 {
8820 	unsigned long iter = 0;
8821 	unsigned long pfn = page_to_pfn(page);
8822 	unsigned long offset = pfn % pageblock_nr_pages;
8823 
8824 	if (is_migrate_cma_page(page)) {
8825 		/*
8826 		 * CMA allocations (alloc_contig_range) really need to mark
8827 		 * isolate CMA pageblocks even when they are not movable in fact
8828 		 * so consider them movable here.
8829 		 */
8830 		if (is_migrate_cma(migratetype))
8831 			return NULL;
8832 
8833 		return page;
8834 	}
8835 
8836 	for (; iter < pageblock_nr_pages - offset; iter++) {
8837 		page = pfn_to_page(pfn + iter);
8838 
8839 		/*
8840 		 * Both, bootmem allocations and memory holes are marked
8841 		 * PG_reserved and are unmovable. We can even have unmovable
8842 		 * allocations inside ZONE_MOVABLE, for example when
8843 		 * specifying "movablecore".
8844 		 */
8845 		if (PageReserved(page))
8846 			return page;
8847 
8848 		/*
8849 		 * If the zone is movable and we have ruled out all reserved
8850 		 * pages then it should be reasonably safe to assume the rest
8851 		 * is movable.
8852 		 */
8853 		if (zone_idx(zone) == ZONE_MOVABLE)
8854 			continue;
8855 
8856 		/*
8857 		 * Hugepages are not in LRU lists, but they're movable.
8858 		 * THPs are on the LRU, but need to be counted as #small pages.
8859 		 * We need not scan over tail pages because we don't
8860 		 * handle each tail page individually in migration.
8861 		 */
8862 		if (PageHuge(page) || PageTransCompound(page)) {
8863 			struct page *head = compound_head(page);
8864 			unsigned int skip_pages;
8865 
8866 			if (PageHuge(page)) {
8867 				if (!hugepage_migration_supported(page_hstate(head)))
8868 					return page;
8869 			} else if (!PageLRU(head) && !__PageMovable(head)) {
8870 				return page;
8871 			}
8872 
8873 			skip_pages = compound_nr(head) - (page - head);
8874 			iter += skip_pages - 1;
8875 			continue;
8876 		}
8877 
8878 		/*
8879 		 * We can't use page_count without pin a page
8880 		 * because another CPU can free compound page.
8881 		 * This check already skips compound tails of THP
8882 		 * because their page->_refcount is zero at all time.
8883 		 */
8884 		if (!page_ref_count(page)) {
8885 			if (PageBuddy(page))
8886 				iter += (1 << buddy_order(page)) - 1;
8887 			continue;
8888 		}
8889 
8890 		/*
8891 		 * The HWPoisoned page may be not in buddy system, and
8892 		 * page_count() is not 0.
8893 		 */
8894 		if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
8895 			continue;
8896 
8897 		/*
8898 		 * We treat all PageOffline() pages as movable when offlining
8899 		 * to give drivers a chance to decrement their reference count
8900 		 * in MEM_GOING_OFFLINE in order to indicate that these pages
8901 		 * can be offlined as there are no direct references anymore.
8902 		 * For actually unmovable PageOffline() where the driver does
8903 		 * not support this, we will fail later when trying to actually
8904 		 * move these pages that still have a reference count > 0.
8905 		 * (false negatives in this function only)
8906 		 */
8907 		if ((flags & MEMORY_OFFLINE) && PageOffline(page))
8908 			continue;
8909 
8910 		if (__PageMovable(page) || PageLRU(page))
8911 			continue;
8912 
8913 		/*
8914 		 * If there are RECLAIMABLE pages, we need to check
8915 		 * it.  But now, memory offline itself doesn't call
8916 		 * shrink_node_slabs() and it still to be fixed.
8917 		 */
8918 		return page;
8919 	}
8920 	return NULL;
8921 }
8922 
8923 #ifdef CONFIG_CONTIG_ALLOC
8924 static unsigned long pfn_max_align_down(unsigned long pfn)
8925 {
8926 	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8927 			     pageblock_nr_pages) - 1);
8928 }
8929 
8930 static unsigned long pfn_max_align_up(unsigned long pfn)
8931 {
8932 	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8933 				pageblock_nr_pages));
8934 }
8935 
8936 #if defined(CONFIG_DYNAMIC_DEBUG) || \
8937 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
8938 /* Usage: See admin-guide/dynamic-debug-howto.rst */
8939 static void alloc_contig_dump_pages(struct list_head *page_list)
8940 {
8941 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
8942 
8943 	if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
8944 		struct page *page;
8945 
8946 		dump_stack();
8947 		list_for_each_entry(page, page_list, lru)
8948 			dump_page(page, "migration failure");
8949 	}
8950 }
8951 #else
8952 static inline void alloc_contig_dump_pages(struct list_head *page_list)
8953 {
8954 }
8955 #endif
8956 
8957 /* [start, end) must belong to a single zone. */
8958 static int __alloc_contig_migrate_range(struct compact_control *cc,
8959 					unsigned long start, unsigned long end)
8960 {
8961 	/* This function is based on compact_zone() from compaction.c. */
8962 	unsigned int nr_reclaimed;
8963 	unsigned long pfn = start;
8964 	unsigned int tries = 0;
8965 	int ret = 0;
8966 	struct migration_target_control mtc = {
8967 		.nid = zone_to_nid(cc->zone),
8968 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
8969 	};
8970 
8971 	lru_cache_disable();
8972 
8973 	while (pfn < end || !list_empty(&cc->migratepages)) {
8974 		if (fatal_signal_pending(current)) {
8975 			ret = -EINTR;
8976 			break;
8977 		}
8978 
8979 		if (list_empty(&cc->migratepages)) {
8980 			cc->nr_migratepages = 0;
8981 			ret = isolate_migratepages_range(cc, pfn, end);
8982 			if (ret && ret != -EAGAIN)
8983 				break;
8984 			pfn = cc->migrate_pfn;
8985 			tries = 0;
8986 		} else if (++tries == 5) {
8987 			ret = -EBUSY;
8988 			break;
8989 		}
8990 
8991 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8992 							&cc->migratepages);
8993 		cc->nr_migratepages -= nr_reclaimed;
8994 
8995 		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
8996 			NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
8997 
8998 		/*
8999 		 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
9000 		 * to retry again over this error, so do the same here.
9001 		 */
9002 		if (ret == -ENOMEM)
9003 			break;
9004 	}
9005 
9006 	lru_cache_enable();
9007 	if (ret < 0) {
9008 		if (ret == -EBUSY)
9009 			alloc_contig_dump_pages(&cc->migratepages);
9010 		putback_movable_pages(&cc->migratepages);
9011 		return ret;
9012 	}
9013 	return 0;
9014 }
9015 
9016 /**
9017  * alloc_contig_range() -- tries to allocate given range of pages
9018  * @start:	start PFN to allocate
9019  * @end:	one-past-the-last PFN to allocate
9020  * @migratetype:	migratetype of the underlying pageblocks (either
9021  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
9022  *			in range must have the same migratetype and it must
9023  *			be either of the two.
9024  * @gfp_mask:	GFP mask to use during compaction
9025  *
9026  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
9027  * aligned.  The PFN range must belong to a single zone.
9028  *
9029  * The first thing this routine does is attempt to MIGRATE_ISOLATE all
9030  * pageblocks in the range.  Once isolated, the pageblocks should not
9031  * be modified by others.
9032  *
9033  * Return: zero on success or negative error code.  On success all
9034  * pages which PFN is in [start, end) are allocated for the caller and
9035  * need to be freed with free_contig_range().
9036  */
9037 int alloc_contig_range(unsigned long start, unsigned long end,
9038 		       unsigned migratetype, gfp_t gfp_mask)
9039 {
9040 	unsigned long outer_start, outer_end;
9041 	unsigned int order;
9042 	int ret = 0;
9043 
9044 	struct compact_control cc = {
9045 		.nr_migratepages = 0,
9046 		.order = -1,
9047 		.zone = page_zone(pfn_to_page(start)),
9048 		.mode = MIGRATE_SYNC,
9049 		.ignore_skip_hint = true,
9050 		.no_set_skip_hint = true,
9051 		.gfp_mask = current_gfp_context(gfp_mask),
9052 		.alloc_contig = true,
9053 	};
9054 	INIT_LIST_HEAD(&cc.migratepages);
9055 
9056 	/*
9057 	 * What we do here is we mark all pageblocks in range as
9058 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
9059 	 * have different sizes, and due to the way page allocator
9060 	 * work, we align the range to biggest of the two pages so
9061 	 * that page allocator won't try to merge buddies from
9062 	 * different pageblocks and change MIGRATE_ISOLATE to some
9063 	 * other migration type.
9064 	 *
9065 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
9066 	 * migrate the pages from an unaligned range (ie. pages that
9067 	 * we are interested in).  This will put all the pages in
9068 	 * range back to page allocator as MIGRATE_ISOLATE.
9069 	 *
9070 	 * When this is done, we take the pages in range from page
9071 	 * allocator removing them from the buddy system.  This way
9072 	 * page allocator will never consider using them.
9073 	 *
9074 	 * This lets us mark the pageblocks back as
9075 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
9076 	 * aligned range but not in the unaligned, original range are
9077 	 * put back to page allocator so that buddy can use them.
9078 	 */
9079 
9080 	ret = start_isolate_page_range(pfn_max_align_down(start),
9081 				       pfn_max_align_up(end), migratetype, 0);
9082 	if (ret)
9083 		return ret;
9084 
9085 	drain_all_pages(cc.zone);
9086 
9087 	/*
9088 	 * In case of -EBUSY, we'd like to know which page causes problem.
9089 	 * So, just fall through. test_pages_isolated() has a tracepoint
9090 	 * which will report the busy page.
9091 	 *
9092 	 * It is possible that busy pages could become available before
9093 	 * the call to test_pages_isolated, and the range will actually be
9094 	 * allocated.  So, if we fall through be sure to clear ret so that
9095 	 * -EBUSY is not accidentally used or returned to caller.
9096 	 */
9097 	ret = __alloc_contig_migrate_range(&cc, start, end);
9098 	if (ret && ret != -EBUSY)
9099 		goto done;
9100 	ret = 0;
9101 
9102 	/*
9103 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
9104 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
9105 	 * more, all pages in [start, end) are free in page allocator.
9106 	 * What we are going to do is to allocate all pages from
9107 	 * [start, end) (that is remove them from page allocator).
9108 	 *
9109 	 * The only problem is that pages at the beginning and at the
9110 	 * end of interesting range may be not aligned with pages that
9111 	 * page allocator holds, ie. they can be part of higher order
9112 	 * pages.  Because of this, we reserve the bigger range and
9113 	 * once this is done free the pages we are not interested in.
9114 	 *
9115 	 * We don't have to hold zone->lock here because the pages are
9116 	 * isolated thus they won't get removed from buddy.
9117 	 */
9118 
9119 	order = 0;
9120 	outer_start = start;
9121 	while (!PageBuddy(pfn_to_page(outer_start))) {
9122 		if (++order >= MAX_ORDER) {
9123 			outer_start = start;
9124 			break;
9125 		}
9126 		outer_start &= ~0UL << order;
9127 	}
9128 
9129 	if (outer_start != start) {
9130 		order = buddy_order(pfn_to_page(outer_start));
9131 
9132 		/*
9133 		 * outer_start page could be small order buddy page and
9134 		 * it doesn't include start page. Adjust outer_start
9135 		 * in this case to report failed page properly
9136 		 * on tracepoint in test_pages_isolated()
9137 		 */
9138 		if (outer_start + (1UL << order) <= start)
9139 			outer_start = start;
9140 	}
9141 
9142 	/* Make sure the range is really isolated. */
9143 	if (test_pages_isolated(outer_start, end, 0)) {
9144 		ret = -EBUSY;
9145 		goto done;
9146 	}
9147 
9148 	/* Grab isolated pages from freelists. */
9149 	outer_end = isolate_freepages_range(&cc, outer_start, end);
9150 	if (!outer_end) {
9151 		ret = -EBUSY;
9152 		goto done;
9153 	}
9154 
9155 	/* Free head and tail (if any) */
9156 	if (start != outer_start)
9157 		free_contig_range(outer_start, start - outer_start);
9158 	if (end != outer_end)
9159 		free_contig_range(end, outer_end - end);
9160 
9161 done:
9162 	undo_isolate_page_range(pfn_max_align_down(start),
9163 				pfn_max_align_up(end), migratetype);
9164 	return ret;
9165 }
9166 EXPORT_SYMBOL(alloc_contig_range);
9167 
9168 static int __alloc_contig_pages(unsigned long start_pfn,
9169 				unsigned long nr_pages, gfp_t gfp_mask)
9170 {
9171 	unsigned long end_pfn = start_pfn + nr_pages;
9172 
9173 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
9174 				  gfp_mask);
9175 }
9176 
9177 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
9178 				   unsigned long nr_pages)
9179 {
9180 	unsigned long i, end_pfn = start_pfn + nr_pages;
9181 	struct page *page;
9182 
9183 	for (i = start_pfn; i < end_pfn; i++) {
9184 		page = pfn_to_online_page(i);
9185 		if (!page)
9186 			return false;
9187 
9188 		if (page_zone(page) != z)
9189 			return false;
9190 
9191 		if (PageReserved(page))
9192 			return false;
9193 	}
9194 	return true;
9195 }
9196 
9197 static bool zone_spans_last_pfn(const struct zone *zone,
9198 				unsigned long start_pfn, unsigned long nr_pages)
9199 {
9200 	unsigned long last_pfn = start_pfn + nr_pages - 1;
9201 
9202 	return zone_spans_pfn(zone, last_pfn);
9203 }
9204 
9205 /**
9206  * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
9207  * @nr_pages:	Number of contiguous pages to allocate
9208  * @gfp_mask:	GFP mask to limit search and used during compaction
9209  * @nid:	Target node
9210  * @nodemask:	Mask for other possible nodes
9211  *
9212  * This routine is a wrapper around alloc_contig_range(). It scans over zones
9213  * on an applicable zonelist to find a contiguous pfn range which can then be
9214  * tried for allocation with alloc_contig_range(). This routine is intended
9215  * for allocation requests which can not be fulfilled with the buddy allocator.
9216  *
9217  * The allocated memory is always aligned to a page boundary. If nr_pages is a
9218  * power of two then the alignment is guaranteed to be to the given nr_pages
9219  * (e.g. 1GB request would be aligned to 1GB).
9220  *
9221  * Allocated pages can be freed with free_contig_range() or by manually calling
9222  * __free_page() on each allocated page.
9223  *
9224  * Return: pointer to contiguous pages on success, or NULL if not successful.
9225  */
9226 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
9227 				int nid, nodemask_t *nodemask)
9228 {
9229 	unsigned long ret, pfn, flags;
9230 	struct zonelist *zonelist;
9231 	struct zone *zone;
9232 	struct zoneref *z;
9233 
9234 	zonelist = node_zonelist(nid, gfp_mask);
9235 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
9236 					gfp_zone(gfp_mask), nodemask) {
9237 		spin_lock_irqsave(&zone->lock, flags);
9238 
9239 		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
9240 		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
9241 			if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
9242 				/*
9243 				 * We release the zone lock here because
9244 				 * alloc_contig_range() will also lock the zone
9245 				 * at some point. If there's an allocation
9246 				 * spinning on this lock, it may win the race
9247 				 * and cause alloc_contig_range() to fail...
9248 				 */
9249 				spin_unlock_irqrestore(&zone->lock, flags);
9250 				ret = __alloc_contig_pages(pfn, nr_pages,
9251 							gfp_mask);
9252 				if (!ret)
9253 					return pfn_to_page(pfn);
9254 				spin_lock_irqsave(&zone->lock, flags);
9255 			}
9256 			pfn += nr_pages;
9257 		}
9258 		spin_unlock_irqrestore(&zone->lock, flags);
9259 	}
9260 	return NULL;
9261 }
9262 #endif /* CONFIG_CONTIG_ALLOC */
9263 
9264 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
9265 {
9266 	unsigned long count = 0;
9267 
9268 	for (; nr_pages--; pfn++) {
9269 		struct page *page = pfn_to_page(pfn);
9270 
9271 		count += page_count(page) != 1;
9272 		__free_page(page);
9273 	}
9274 	WARN(count != 0, "%lu pages are still in use!\n", count);
9275 }
9276 EXPORT_SYMBOL(free_contig_range);
9277 
9278 /*
9279  * The zone indicated has a new number of managed_pages; batch sizes and percpu
9280  * page high values need to be recalculated.
9281  */
9282 void zone_pcp_update(struct zone *zone, int cpu_online)
9283 {
9284 	mutex_lock(&pcp_batch_high_lock);
9285 	zone_set_pageset_high_and_batch(zone, cpu_online);
9286 	mutex_unlock(&pcp_batch_high_lock);
9287 }
9288 
9289 /*
9290  * Effectively disable pcplists for the zone by setting the high limit to 0
9291  * and draining all cpus. A concurrent page freeing on another CPU that's about
9292  * to put the page on pcplist will either finish before the drain and the page
9293  * will be drained, or observe the new high limit and skip the pcplist.
9294  *
9295  * Must be paired with a call to zone_pcp_enable().
9296  */
9297 void zone_pcp_disable(struct zone *zone)
9298 {
9299 	mutex_lock(&pcp_batch_high_lock);
9300 	__zone_set_pageset_high_and_batch(zone, 0, 1);
9301 	__drain_all_pages(zone, true);
9302 }
9303 
9304 void zone_pcp_enable(struct zone *zone)
9305 {
9306 	__zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
9307 	mutex_unlock(&pcp_batch_high_lock);
9308 }
9309 
9310 void zone_pcp_reset(struct zone *zone)
9311 {
9312 	int cpu;
9313 	struct per_cpu_zonestat *pzstats;
9314 
9315 	if (zone->per_cpu_pageset != &boot_pageset) {
9316 		for_each_online_cpu(cpu) {
9317 			pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
9318 			drain_zonestat(zone, pzstats);
9319 		}
9320 		free_percpu(zone->per_cpu_pageset);
9321 		free_percpu(zone->per_cpu_zonestats);
9322 		zone->per_cpu_pageset = &boot_pageset;
9323 		zone->per_cpu_zonestats = &boot_zonestats;
9324 	}
9325 }
9326 
9327 #ifdef CONFIG_MEMORY_HOTREMOVE
9328 /*
9329  * All pages in the range must be in a single zone, must not contain holes,
9330  * must span full sections, and must be isolated before calling this function.
9331  */
9332 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
9333 {
9334 	unsigned long pfn = start_pfn;
9335 	struct page *page;
9336 	struct zone *zone;
9337 	unsigned int order;
9338 	unsigned long flags;
9339 
9340 	offline_mem_sections(pfn, end_pfn);
9341 	zone = page_zone(pfn_to_page(pfn));
9342 	spin_lock_irqsave(&zone->lock, flags);
9343 	while (pfn < end_pfn) {
9344 		page = pfn_to_page(pfn);
9345 		/*
9346 		 * The HWPoisoned page may be not in buddy system, and
9347 		 * page_count() is not 0.
9348 		 */
9349 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
9350 			pfn++;
9351 			continue;
9352 		}
9353 		/*
9354 		 * At this point all remaining PageOffline() pages have a
9355 		 * reference count of 0 and can simply be skipped.
9356 		 */
9357 		if (PageOffline(page)) {
9358 			BUG_ON(page_count(page));
9359 			BUG_ON(PageBuddy(page));
9360 			pfn++;
9361 			continue;
9362 		}
9363 
9364 		BUG_ON(page_count(page));
9365 		BUG_ON(!PageBuddy(page));
9366 		order = buddy_order(page);
9367 		del_page_from_free_list(page, zone, order);
9368 		pfn += (1 << order);
9369 	}
9370 	spin_unlock_irqrestore(&zone->lock, flags);
9371 }
9372 #endif
9373 
9374 bool is_free_buddy_page(struct page *page)
9375 {
9376 	struct zone *zone = page_zone(page);
9377 	unsigned long pfn = page_to_pfn(page);
9378 	unsigned long flags;
9379 	unsigned int order;
9380 
9381 	spin_lock_irqsave(&zone->lock, flags);
9382 	for (order = 0; order < MAX_ORDER; order++) {
9383 		struct page *page_head = page - (pfn & ((1 << order) - 1));
9384 
9385 		if (PageBuddy(page_head) && buddy_order(page_head) >= order)
9386 			break;
9387 	}
9388 	spin_unlock_irqrestore(&zone->lock, flags);
9389 
9390 	return order < MAX_ORDER;
9391 }
9392 
9393 #ifdef CONFIG_MEMORY_FAILURE
9394 /*
9395  * Break down a higher-order page in sub-pages, and keep our target out of
9396  * buddy allocator.
9397  */
9398 static void break_down_buddy_pages(struct zone *zone, struct page *page,
9399 				   struct page *target, int low, int high,
9400 				   int migratetype)
9401 {
9402 	unsigned long size = 1 << high;
9403 	struct page *current_buddy, *next_page;
9404 
9405 	while (high > low) {
9406 		high--;
9407 		size >>= 1;
9408 
9409 		if (target >= &page[size]) {
9410 			next_page = page + size;
9411 			current_buddy = page;
9412 		} else {
9413 			next_page = page;
9414 			current_buddy = page + size;
9415 		}
9416 
9417 		if (set_page_guard(zone, current_buddy, high, migratetype))
9418 			continue;
9419 
9420 		if (current_buddy != target) {
9421 			add_to_free_list(current_buddy, zone, high, migratetype);
9422 			set_buddy_order(current_buddy, high);
9423 			page = next_page;
9424 		}
9425 	}
9426 }
9427 
9428 /*
9429  * Take a page that will be marked as poisoned off the buddy allocator.
9430  */
9431 bool take_page_off_buddy(struct page *page)
9432 {
9433 	struct zone *zone = page_zone(page);
9434 	unsigned long pfn = page_to_pfn(page);
9435 	unsigned long flags;
9436 	unsigned int order;
9437 	bool ret = false;
9438 
9439 	spin_lock_irqsave(&zone->lock, flags);
9440 	for (order = 0; order < MAX_ORDER; order++) {
9441 		struct page *page_head = page - (pfn & ((1 << order) - 1));
9442 		int page_order = buddy_order(page_head);
9443 
9444 		if (PageBuddy(page_head) && page_order >= order) {
9445 			unsigned long pfn_head = page_to_pfn(page_head);
9446 			int migratetype = get_pfnblock_migratetype(page_head,
9447 								   pfn_head);
9448 
9449 			del_page_from_free_list(page_head, zone, page_order);
9450 			break_down_buddy_pages(zone, page_head, page, 0,
9451 						page_order, migratetype);
9452 			if (!is_migrate_isolate(migratetype))
9453 				__mod_zone_freepage_state(zone, -1, migratetype);
9454 			ret = true;
9455 			break;
9456 		}
9457 		if (page_count(page_head) > 0)
9458 			break;
9459 	}
9460 	spin_unlock_irqrestore(&zone->lock, flags);
9461 	return ret;
9462 }
9463 #endif
9464