xref: /openbmc/linux/mm/page_alloc.c (revision cb325ddd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/page_alloc.c
4  *
5  *  Manages the free list, the system allocates free pages here.
6  *  Note that kmalloc() lives in slab.c
7  *
8  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
9  *  Swap reorganised 29.12.95, Stephen Tweedie
10  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16  */
17 
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/interrupt.h>
24 #include <linux/pagemap.h>
25 #include <linux/jiffies.h>
26 #include <linux/memblock.h>
27 #include <linux/compiler.h>
28 #include <linux/kernel.h>
29 #include <linux/kasan.h>
30 #include <linux/module.h>
31 #include <linux/suspend.h>
32 #include <linux/pagevec.h>
33 #include <linux/blkdev.h>
34 #include <linux/slab.h>
35 #include <linux/ratelimit.h>
36 #include <linux/oom.h>
37 #include <linux/topology.h>
38 #include <linux/sysctl.h>
39 #include <linux/cpu.h>
40 #include <linux/cpuset.h>
41 #include <linux/memory_hotplug.h>
42 #include <linux/nodemask.h>
43 #include <linux/vmalloc.h>
44 #include <linux/vmstat.h>
45 #include <linux/mempolicy.h>
46 #include <linux/memremap.h>
47 #include <linux/stop_machine.h>
48 #include <linux/random.h>
49 #include <linux/sort.h>
50 #include <linux/pfn.h>
51 #include <linux/backing-dev.h>
52 #include <linux/fault-inject.h>
53 #include <linux/page-isolation.h>
54 #include <linux/debugobjects.h>
55 #include <linux/kmemleak.h>
56 #include <linux/compaction.h>
57 #include <trace/events/kmem.h>
58 #include <trace/events/oom.h>
59 #include <linux/prefetch.h>
60 #include <linux/mm_inline.h>
61 #include <linux/mmu_notifier.h>
62 #include <linux/migrate.h>
63 #include <linux/hugetlb.h>
64 #include <linux/sched/rt.h>
65 #include <linux/sched/mm.h>
66 #include <linux/page_owner.h>
67 #include <linux/page_table_check.h>
68 #include <linux/kthread.h>
69 #include <linux/memcontrol.h>
70 #include <linux/ftrace.h>
71 #include <linux/lockdep.h>
72 #include <linux/nmi.h>
73 #include <linux/psi.h>
74 #include <linux/padata.h>
75 #include <linux/khugepaged.h>
76 #include <linux/buffer_head.h>
77 #include <linux/delayacct.h>
78 #include <asm/sections.h>
79 #include <asm/tlbflush.h>
80 #include <asm/div64.h>
81 #include "internal.h"
82 #include "shuffle.h"
83 #include "page_reporting.h"
84 
85 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
86 typedef int __bitwise fpi_t;
87 
88 /* No special request */
89 #define FPI_NONE		((__force fpi_t)0)
90 
91 /*
92  * Skip free page reporting notification for the (possibly merged) page.
93  * This does not hinder free page reporting from grabbing the page,
94  * reporting it and marking it "reported" -  it only skips notifying
95  * the free page reporting infrastructure about a newly freed page. For
96  * example, used when temporarily pulling a page from a freelist and
97  * putting it back unmodified.
98  */
99 #define FPI_SKIP_REPORT_NOTIFY	((__force fpi_t)BIT(0))
100 
101 /*
102  * Place the (possibly merged) page to the tail of the freelist. Will ignore
103  * page shuffling (relevant code - e.g., memory onlining - is expected to
104  * shuffle the whole zone).
105  *
106  * Note: No code should rely on this flag for correctness - it's purely
107  *       to allow for optimizations when handing back either fresh pages
108  *       (memory onlining) or untouched pages (page isolation, free page
109  *       reporting).
110  */
111 #define FPI_TO_TAIL		((__force fpi_t)BIT(1))
112 
113 /*
114  * Don't poison memory with KASAN (only for the tag-based modes).
115  * During boot, all non-reserved memblock memory is exposed to page_alloc.
116  * Poisoning all that memory lengthens boot time, especially on systems with
117  * large amount of RAM. This flag is used to skip that poisoning.
118  * This is only done for the tag-based KASAN modes, as those are able to
119  * detect memory corruptions with the memory tags assigned by default.
120  * All memory allocated normally after boot gets poisoned as usual.
121  */
122 #define FPI_SKIP_KASAN_POISON	((__force fpi_t)BIT(2))
123 
124 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
125 static DEFINE_MUTEX(pcp_batch_high_lock);
126 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
127 
128 struct pagesets {
129 	local_lock_t lock;
130 };
131 static DEFINE_PER_CPU(struct pagesets, pagesets) __maybe_unused = {
132 	.lock = INIT_LOCAL_LOCK(lock),
133 };
134 
135 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
136 DEFINE_PER_CPU(int, numa_node);
137 EXPORT_PER_CPU_SYMBOL(numa_node);
138 #endif
139 
140 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
141 
142 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
143 /*
144  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
145  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
146  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
147  * defined in <linux/topology.h>.
148  */
149 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
150 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
151 #endif
152 
153 /* work_structs for global per-cpu drains */
154 struct pcpu_drain {
155 	struct zone *zone;
156 	struct work_struct work;
157 };
158 static DEFINE_MUTEX(pcpu_drain_mutex);
159 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
160 
161 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
162 volatile unsigned long latent_entropy __latent_entropy;
163 EXPORT_SYMBOL(latent_entropy);
164 #endif
165 
166 /*
167  * Array of node states.
168  */
169 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
170 	[N_POSSIBLE] = NODE_MASK_ALL,
171 	[N_ONLINE] = { { [0] = 1UL } },
172 #ifndef CONFIG_NUMA
173 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
174 #ifdef CONFIG_HIGHMEM
175 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
176 #endif
177 	[N_MEMORY] = { { [0] = 1UL } },
178 	[N_CPU] = { { [0] = 1UL } },
179 #endif	/* NUMA */
180 };
181 EXPORT_SYMBOL(node_states);
182 
183 atomic_long_t _totalram_pages __read_mostly;
184 EXPORT_SYMBOL(_totalram_pages);
185 unsigned long totalreserve_pages __read_mostly;
186 unsigned long totalcma_pages __read_mostly;
187 
188 int percpu_pagelist_high_fraction;
189 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
190 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
191 EXPORT_SYMBOL(init_on_alloc);
192 
193 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
194 EXPORT_SYMBOL(init_on_free);
195 
196 static bool _init_on_alloc_enabled_early __read_mostly
197 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
198 static int __init early_init_on_alloc(char *buf)
199 {
200 
201 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
202 }
203 early_param("init_on_alloc", early_init_on_alloc);
204 
205 static bool _init_on_free_enabled_early __read_mostly
206 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
207 static int __init early_init_on_free(char *buf)
208 {
209 	return kstrtobool(buf, &_init_on_free_enabled_early);
210 }
211 early_param("init_on_free", early_init_on_free);
212 
213 /*
214  * A cached value of the page's pageblock's migratetype, used when the page is
215  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
216  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
217  * Also the migratetype set in the page does not necessarily match the pcplist
218  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
219  * other index - this ensures that it will be put on the correct CMA freelist.
220  */
221 static inline int get_pcppage_migratetype(struct page *page)
222 {
223 	return page->index;
224 }
225 
226 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
227 {
228 	page->index = migratetype;
229 }
230 
231 #ifdef CONFIG_PM_SLEEP
232 /*
233  * The following functions are used by the suspend/hibernate code to temporarily
234  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
235  * while devices are suspended.  To avoid races with the suspend/hibernate code,
236  * they should always be called with system_transition_mutex held
237  * (gfp_allowed_mask also should only be modified with system_transition_mutex
238  * held, unless the suspend/hibernate code is guaranteed not to run in parallel
239  * with that modification).
240  */
241 
242 static gfp_t saved_gfp_mask;
243 
244 void pm_restore_gfp_mask(void)
245 {
246 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
247 	if (saved_gfp_mask) {
248 		gfp_allowed_mask = saved_gfp_mask;
249 		saved_gfp_mask = 0;
250 	}
251 }
252 
253 void pm_restrict_gfp_mask(void)
254 {
255 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
256 	WARN_ON(saved_gfp_mask);
257 	saved_gfp_mask = gfp_allowed_mask;
258 	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
259 }
260 
261 bool pm_suspended_storage(void)
262 {
263 	if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
264 		return false;
265 	return true;
266 }
267 #endif /* CONFIG_PM_SLEEP */
268 
269 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
270 unsigned int pageblock_order __read_mostly;
271 #endif
272 
273 static void __free_pages_ok(struct page *page, unsigned int order,
274 			    fpi_t fpi_flags);
275 
276 /*
277  * results with 256, 32 in the lowmem_reserve sysctl:
278  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
279  *	1G machine -> (16M dma, 784M normal, 224M high)
280  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
281  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
282  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
283  *
284  * TBD: should special case ZONE_DMA32 machines here - in those we normally
285  * don't need any ZONE_NORMAL reservation
286  */
287 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
288 #ifdef CONFIG_ZONE_DMA
289 	[ZONE_DMA] = 256,
290 #endif
291 #ifdef CONFIG_ZONE_DMA32
292 	[ZONE_DMA32] = 256,
293 #endif
294 	[ZONE_NORMAL] = 32,
295 #ifdef CONFIG_HIGHMEM
296 	[ZONE_HIGHMEM] = 0,
297 #endif
298 	[ZONE_MOVABLE] = 0,
299 };
300 
301 static char * const zone_names[MAX_NR_ZONES] = {
302 #ifdef CONFIG_ZONE_DMA
303 	 "DMA",
304 #endif
305 #ifdef CONFIG_ZONE_DMA32
306 	 "DMA32",
307 #endif
308 	 "Normal",
309 #ifdef CONFIG_HIGHMEM
310 	 "HighMem",
311 #endif
312 	 "Movable",
313 #ifdef CONFIG_ZONE_DEVICE
314 	 "Device",
315 #endif
316 };
317 
318 const char * const migratetype_names[MIGRATE_TYPES] = {
319 	"Unmovable",
320 	"Movable",
321 	"Reclaimable",
322 	"HighAtomic",
323 #ifdef CONFIG_CMA
324 	"CMA",
325 #endif
326 #ifdef CONFIG_MEMORY_ISOLATION
327 	"Isolate",
328 #endif
329 };
330 
331 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
332 	[NULL_COMPOUND_DTOR] = NULL,
333 	[COMPOUND_PAGE_DTOR] = free_compound_page,
334 #ifdef CONFIG_HUGETLB_PAGE
335 	[HUGETLB_PAGE_DTOR] = free_huge_page,
336 #endif
337 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
338 	[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
339 #endif
340 };
341 
342 int min_free_kbytes = 1024;
343 int user_min_free_kbytes = -1;
344 int watermark_boost_factor __read_mostly = 15000;
345 int watermark_scale_factor = 10;
346 
347 static unsigned long nr_kernel_pages __initdata;
348 static unsigned long nr_all_pages __initdata;
349 static unsigned long dma_reserve __initdata;
350 
351 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
352 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
353 static unsigned long required_kernelcore __initdata;
354 static unsigned long required_kernelcore_percent __initdata;
355 static unsigned long required_movablecore __initdata;
356 static unsigned long required_movablecore_percent __initdata;
357 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
358 static bool mirrored_kernelcore __meminitdata;
359 
360 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
361 int movable_zone;
362 EXPORT_SYMBOL(movable_zone);
363 
364 #if MAX_NUMNODES > 1
365 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
366 unsigned int nr_online_nodes __read_mostly = 1;
367 EXPORT_SYMBOL(nr_node_ids);
368 EXPORT_SYMBOL(nr_online_nodes);
369 #endif
370 
371 int page_group_by_mobility_disabled __read_mostly;
372 
373 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
374 /*
375  * During boot we initialize deferred pages on-demand, as needed, but once
376  * page_alloc_init_late() has finished, the deferred pages are all initialized,
377  * and we can permanently disable that path.
378  */
379 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
380 
381 /*
382  * Calling kasan_poison_pages() only after deferred memory initialization
383  * has completed. Poisoning pages during deferred memory init will greatly
384  * lengthen the process and cause problem in large memory systems as the
385  * deferred pages initialization is done with interrupt disabled.
386  *
387  * Assuming that there will be no reference to those newly initialized
388  * pages before they are ever allocated, this should have no effect on
389  * KASAN memory tracking as the poison will be properly inserted at page
390  * allocation time. The only corner case is when pages are allocated by
391  * on-demand allocation and then freed again before the deferred pages
392  * initialization is done, but this is not likely to happen.
393  */
394 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
395 {
396 	return static_branch_unlikely(&deferred_pages) ||
397 	       (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
398 		(fpi_flags & FPI_SKIP_KASAN_POISON)) ||
399 	       PageSkipKASanPoison(page);
400 }
401 
402 /* Returns true if the struct page for the pfn is uninitialised */
403 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
404 {
405 	int nid = early_pfn_to_nid(pfn);
406 
407 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
408 		return true;
409 
410 	return false;
411 }
412 
413 /*
414  * Returns true when the remaining initialisation should be deferred until
415  * later in the boot cycle when it can be parallelised.
416  */
417 static bool __meminit
418 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
419 {
420 	static unsigned long prev_end_pfn, nr_initialised;
421 
422 	/*
423 	 * prev_end_pfn static that contains the end of previous zone
424 	 * No need to protect because called very early in boot before smp_init.
425 	 */
426 	if (prev_end_pfn != end_pfn) {
427 		prev_end_pfn = end_pfn;
428 		nr_initialised = 0;
429 	}
430 
431 	/* Always populate low zones for address-constrained allocations */
432 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
433 		return false;
434 
435 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
436 		return true;
437 	/*
438 	 * We start only with one section of pages, more pages are added as
439 	 * needed until the rest of deferred pages are initialized.
440 	 */
441 	nr_initialised++;
442 	if ((nr_initialised > PAGES_PER_SECTION) &&
443 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
444 		NODE_DATA(nid)->first_deferred_pfn = pfn;
445 		return true;
446 	}
447 	return false;
448 }
449 #else
450 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
451 {
452 	return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
453 		(fpi_flags & FPI_SKIP_KASAN_POISON)) ||
454 	       PageSkipKASanPoison(page);
455 }
456 
457 static inline bool early_page_uninitialised(unsigned long pfn)
458 {
459 	return false;
460 }
461 
462 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
463 {
464 	return false;
465 }
466 #endif
467 
468 /* Return a pointer to the bitmap storing bits affecting a block of pages */
469 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
470 							unsigned long pfn)
471 {
472 #ifdef CONFIG_SPARSEMEM
473 	return section_to_usemap(__pfn_to_section(pfn));
474 #else
475 	return page_zone(page)->pageblock_flags;
476 #endif /* CONFIG_SPARSEMEM */
477 }
478 
479 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
480 {
481 #ifdef CONFIG_SPARSEMEM
482 	pfn &= (PAGES_PER_SECTION-1);
483 #else
484 	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
485 #endif /* CONFIG_SPARSEMEM */
486 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
487 }
488 
489 static __always_inline
490 unsigned long __get_pfnblock_flags_mask(const struct page *page,
491 					unsigned long pfn,
492 					unsigned long mask)
493 {
494 	unsigned long *bitmap;
495 	unsigned long bitidx, word_bitidx;
496 	unsigned long word;
497 
498 	bitmap = get_pageblock_bitmap(page, pfn);
499 	bitidx = pfn_to_bitidx(page, pfn);
500 	word_bitidx = bitidx / BITS_PER_LONG;
501 	bitidx &= (BITS_PER_LONG-1);
502 
503 	word = bitmap[word_bitidx];
504 	return (word >> bitidx) & mask;
505 }
506 
507 /**
508  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
509  * @page: The page within the block of interest
510  * @pfn: The target page frame number
511  * @mask: mask of bits that the caller is interested in
512  *
513  * Return: pageblock_bits flags
514  */
515 unsigned long get_pfnblock_flags_mask(const struct page *page,
516 					unsigned long pfn, unsigned long mask)
517 {
518 	return __get_pfnblock_flags_mask(page, pfn, mask);
519 }
520 
521 static __always_inline int get_pfnblock_migratetype(const struct page *page,
522 					unsigned long pfn)
523 {
524 	return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
525 }
526 
527 /**
528  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
529  * @page: The page within the block of interest
530  * @flags: The flags to set
531  * @pfn: The target page frame number
532  * @mask: mask of bits that the caller is interested in
533  */
534 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
535 					unsigned long pfn,
536 					unsigned long mask)
537 {
538 	unsigned long *bitmap;
539 	unsigned long bitidx, word_bitidx;
540 	unsigned long old_word, word;
541 
542 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
543 	BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
544 
545 	bitmap = get_pageblock_bitmap(page, pfn);
546 	bitidx = pfn_to_bitidx(page, pfn);
547 	word_bitidx = bitidx / BITS_PER_LONG;
548 	bitidx &= (BITS_PER_LONG-1);
549 
550 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
551 
552 	mask <<= bitidx;
553 	flags <<= bitidx;
554 
555 	word = READ_ONCE(bitmap[word_bitidx]);
556 	for (;;) {
557 		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
558 		if (word == old_word)
559 			break;
560 		word = old_word;
561 	}
562 }
563 
564 void set_pageblock_migratetype(struct page *page, int migratetype)
565 {
566 	if (unlikely(page_group_by_mobility_disabled &&
567 		     migratetype < MIGRATE_PCPTYPES))
568 		migratetype = MIGRATE_UNMOVABLE;
569 
570 	set_pfnblock_flags_mask(page, (unsigned long)migratetype,
571 				page_to_pfn(page), MIGRATETYPE_MASK);
572 }
573 
574 #ifdef CONFIG_DEBUG_VM
575 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
576 {
577 	int ret = 0;
578 	unsigned seq;
579 	unsigned long pfn = page_to_pfn(page);
580 	unsigned long sp, start_pfn;
581 
582 	do {
583 		seq = zone_span_seqbegin(zone);
584 		start_pfn = zone->zone_start_pfn;
585 		sp = zone->spanned_pages;
586 		if (!zone_spans_pfn(zone, pfn))
587 			ret = 1;
588 	} while (zone_span_seqretry(zone, seq));
589 
590 	if (ret)
591 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
592 			pfn, zone_to_nid(zone), zone->name,
593 			start_pfn, start_pfn + sp);
594 
595 	return ret;
596 }
597 
598 static int page_is_consistent(struct zone *zone, struct page *page)
599 {
600 	if (zone != page_zone(page))
601 		return 0;
602 
603 	return 1;
604 }
605 /*
606  * Temporary debugging check for pages not lying within a given zone.
607  */
608 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
609 {
610 	if (page_outside_zone_boundaries(zone, page))
611 		return 1;
612 	if (!page_is_consistent(zone, page))
613 		return 1;
614 
615 	return 0;
616 }
617 #else
618 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
619 {
620 	return 0;
621 }
622 #endif
623 
624 static void bad_page(struct page *page, const char *reason)
625 {
626 	static unsigned long resume;
627 	static unsigned long nr_shown;
628 	static unsigned long nr_unshown;
629 
630 	/*
631 	 * Allow a burst of 60 reports, then keep quiet for that minute;
632 	 * or allow a steady drip of one report per second.
633 	 */
634 	if (nr_shown == 60) {
635 		if (time_before(jiffies, resume)) {
636 			nr_unshown++;
637 			goto out;
638 		}
639 		if (nr_unshown) {
640 			pr_alert(
641 			      "BUG: Bad page state: %lu messages suppressed\n",
642 				nr_unshown);
643 			nr_unshown = 0;
644 		}
645 		nr_shown = 0;
646 	}
647 	if (nr_shown++ == 0)
648 		resume = jiffies + 60 * HZ;
649 
650 	pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
651 		current->comm, page_to_pfn(page));
652 	dump_page(page, reason);
653 
654 	print_modules();
655 	dump_stack();
656 out:
657 	/* Leave bad fields for debug, except PageBuddy could make trouble */
658 	page_mapcount_reset(page); /* remove PageBuddy */
659 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
660 }
661 
662 static inline unsigned int order_to_pindex(int migratetype, int order)
663 {
664 	int base = order;
665 
666 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
667 	if (order > PAGE_ALLOC_COSTLY_ORDER) {
668 		VM_BUG_ON(order != pageblock_order);
669 		base = PAGE_ALLOC_COSTLY_ORDER + 1;
670 	}
671 #else
672 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
673 #endif
674 
675 	return (MIGRATE_PCPTYPES * base) + migratetype;
676 }
677 
678 static inline int pindex_to_order(unsigned int pindex)
679 {
680 	int order = pindex / MIGRATE_PCPTYPES;
681 
682 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
683 	if (order > PAGE_ALLOC_COSTLY_ORDER)
684 		order = pageblock_order;
685 #else
686 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
687 #endif
688 
689 	return order;
690 }
691 
692 static inline bool pcp_allowed_order(unsigned int order)
693 {
694 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
695 		return true;
696 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
697 	if (order == pageblock_order)
698 		return true;
699 #endif
700 	return false;
701 }
702 
703 static inline void free_the_page(struct page *page, unsigned int order)
704 {
705 	if (pcp_allowed_order(order))		/* Via pcp? */
706 		free_unref_page(page, order);
707 	else
708 		__free_pages_ok(page, order, FPI_NONE);
709 }
710 
711 /*
712  * Higher-order pages are called "compound pages".  They are structured thusly:
713  *
714  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
715  *
716  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
717  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
718  *
719  * The first tail page's ->compound_dtor holds the offset in array of compound
720  * page destructors. See compound_page_dtors.
721  *
722  * The first tail page's ->compound_order holds the order of allocation.
723  * This usage means that zero-order pages may not be compound.
724  */
725 
726 void free_compound_page(struct page *page)
727 {
728 	mem_cgroup_uncharge(page_folio(page));
729 	free_the_page(page, compound_order(page));
730 }
731 
732 static void prep_compound_head(struct page *page, unsigned int order)
733 {
734 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
735 	set_compound_order(page, order);
736 	atomic_set(compound_mapcount_ptr(page), -1);
737 	if (hpage_pincount_available(page))
738 		atomic_set(compound_pincount_ptr(page), 0);
739 }
740 
741 static void prep_compound_tail(struct page *head, int tail_idx)
742 {
743 	struct page *p = head + tail_idx;
744 
745 	p->mapping = TAIL_MAPPING;
746 	set_compound_head(p, head);
747 }
748 
749 void prep_compound_page(struct page *page, unsigned int order)
750 {
751 	int i;
752 	int nr_pages = 1 << order;
753 
754 	__SetPageHead(page);
755 	for (i = 1; i < nr_pages; i++)
756 		prep_compound_tail(page, i);
757 
758 	prep_compound_head(page, order);
759 }
760 
761 #ifdef CONFIG_DEBUG_PAGEALLOC
762 unsigned int _debug_guardpage_minorder;
763 
764 bool _debug_pagealloc_enabled_early __read_mostly
765 			= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
766 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
767 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
768 EXPORT_SYMBOL(_debug_pagealloc_enabled);
769 
770 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
771 
772 static int __init early_debug_pagealloc(char *buf)
773 {
774 	return kstrtobool(buf, &_debug_pagealloc_enabled_early);
775 }
776 early_param("debug_pagealloc", early_debug_pagealloc);
777 
778 static int __init debug_guardpage_minorder_setup(char *buf)
779 {
780 	unsigned long res;
781 
782 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
783 		pr_err("Bad debug_guardpage_minorder value\n");
784 		return 0;
785 	}
786 	_debug_guardpage_minorder = res;
787 	pr_info("Setting debug_guardpage_minorder to %lu\n", res);
788 	return 0;
789 }
790 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
791 
792 static inline bool set_page_guard(struct zone *zone, struct page *page,
793 				unsigned int order, int migratetype)
794 {
795 	if (!debug_guardpage_enabled())
796 		return false;
797 
798 	if (order >= debug_guardpage_minorder())
799 		return false;
800 
801 	__SetPageGuard(page);
802 	INIT_LIST_HEAD(&page->lru);
803 	set_page_private(page, order);
804 	/* Guard pages are not available for any usage */
805 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
806 
807 	return true;
808 }
809 
810 static inline void clear_page_guard(struct zone *zone, struct page *page,
811 				unsigned int order, int migratetype)
812 {
813 	if (!debug_guardpage_enabled())
814 		return;
815 
816 	__ClearPageGuard(page);
817 
818 	set_page_private(page, 0);
819 	if (!is_migrate_isolate(migratetype))
820 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
821 }
822 #else
823 static inline bool set_page_guard(struct zone *zone, struct page *page,
824 			unsigned int order, int migratetype) { return false; }
825 static inline void clear_page_guard(struct zone *zone, struct page *page,
826 				unsigned int order, int migratetype) {}
827 #endif
828 
829 /*
830  * Enable static keys related to various memory debugging and hardening options.
831  * Some override others, and depend on early params that are evaluated in the
832  * order of appearance. So we need to first gather the full picture of what was
833  * enabled, and then make decisions.
834  */
835 void init_mem_debugging_and_hardening(void)
836 {
837 	bool page_poisoning_requested = false;
838 
839 #ifdef CONFIG_PAGE_POISONING
840 	/*
841 	 * Page poisoning is debug page alloc for some arches. If
842 	 * either of those options are enabled, enable poisoning.
843 	 */
844 	if (page_poisoning_enabled() ||
845 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
846 	      debug_pagealloc_enabled())) {
847 		static_branch_enable(&_page_poisoning_enabled);
848 		page_poisoning_requested = true;
849 	}
850 #endif
851 
852 	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
853 	    page_poisoning_requested) {
854 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
855 			"will take precedence over init_on_alloc and init_on_free\n");
856 		_init_on_alloc_enabled_early = false;
857 		_init_on_free_enabled_early = false;
858 	}
859 
860 	if (_init_on_alloc_enabled_early)
861 		static_branch_enable(&init_on_alloc);
862 	else
863 		static_branch_disable(&init_on_alloc);
864 
865 	if (_init_on_free_enabled_early)
866 		static_branch_enable(&init_on_free);
867 	else
868 		static_branch_disable(&init_on_free);
869 
870 #ifdef CONFIG_DEBUG_PAGEALLOC
871 	if (!debug_pagealloc_enabled())
872 		return;
873 
874 	static_branch_enable(&_debug_pagealloc_enabled);
875 
876 	if (!debug_guardpage_minorder())
877 		return;
878 
879 	static_branch_enable(&_debug_guardpage_enabled);
880 #endif
881 }
882 
883 static inline void set_buddy_order(struct page *page, unsigned int order)
884 {
885 	set_page_private(page, order);
886 	__SetPageBuddy(page);
887 }
888 
889 /*
890  * This function checks whether a page is free && is the buddy
891  * we can coalesce a page and its buddy if
892  * (a) the buddy is not in a hole (check before calling!) &&
893  * (b) the buddy is in the buddy system &&
894  * (c) a page and its buddy have the same order &&
895  * (d) a page and its buddy are in the same zone.
896  *
897  * For recording whether a page is in the buddy system, we set PageBuddy.
898  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
899  *
900  * For recording page's order, we use page_private(page).
901  */
902 static inline bool page_is_buddy(struct page *page, struct page *buddy,
903 							unsigned int order)
904 {
905 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
906 		return false;
907 
908 	if (buddy_order(buddy) != order)
909 		return false;
910 
911 	/*
912 	 * zone check is done late to avoid uselessly calculating
913 	 * zone/node ids for pages that could never merge.
914 	 */
915 	if (page_zone_id(page) != page_zone_id(buddy))
916 		return false;
917 
918 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
919 
920 	return true;
921 }
922 
923 #ifdef CONFIG_COMPACTION
924 static inline struct capture_control *task_capc(struct zone *zone)
925 {
926 	struct capture_control *capc = current->capture_control;
927 
928 	return unlikely(capc) &&
929 		!(current->flags & PF_KTHREAD) &&
930 		!capc->page &&
931 		capc->cc->zone == zone ? capc : NULL;
932 }
933 
934 static inline bool
935 compaction_capture(struct capture_control *capc, struct page *page,
936 		   int order, int migratetype)
937 {
938 	if (!capc || order != capc->cc->order)
939 		return false;
940 
941 	/* Do not accidentally pollute CMA or isolated regions*/
942 	if (is_migrate_cma(migratetype) ||
943 	    is_migrate_isolate(migratetype))
944 		return false;
945 
946 	/*
947 	 * Do not let lower order allocations pollute a movable pageblock.
948 	 * This might let an unmovable request use a reclaimable pageblock
949 	 * and vice-versa but no more than normal fallback logic which can
950 	 * have trouble finding a high-order free page.
951 	 */
952 	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
953 		return false;
954 
955 	capc->page = page;
956 	return true;
957 }
958 
959 #else
960 static inline struct capture_control *task_capc(struct zone *zone)
961 {
962 	return NULL;
963 }
964 
965 static inline bool
966 compaction_capture(struct capture_control *capc, struct page *page,
967 		   int order, int migratetype)
968 {
969 	return false;
970 }
971 #endif /* CONFIG_COMPACTION */
972 
973 /* Used for pages not on another list */
974 static inline void add_to_free_list(struct page *page, struct zone *zone,
975 				    unsigned int order, int migratetype)
976 {
977 	struct free_area *area = &zone->free_area[order];
978 
979 	list_add(&page->lru, &area->free_list[migratetype]);
980 	area->nr_free++;
981 }
982 
983 /* Used for pages not on another list */
984 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
985 					 unsigned int order, int migratetype)
986 {
987 	struct free_area *area = &zone->free_area[order];
988 
989 	list_add_tail(&page->lru, &area->free_list[migratetype]);
990 	area->nr_free++;
991 }
992 
993 /*
994  * Used for pages which are on another list. Move the pages to the tail
995  * of the list - so the moved pages won't immediately be considered for
996  * allocation again (e.g., optimization for memory onlining).
997  */
998 static inline void move_to_free_list(struct page *page, struct zone *zone,
999 				     unsigned int order, int migratetype)
1000 {
1001 	struct free_area *area = &zone->free_area[order];
1002 
1003 	list_move_tail(&page->lru, &area->free_list[migratetype]);
1004 }
1005 
1006 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
1007 					   unsigned int order)
1008 {
1009 	/* clear reported state and update reported page count */
1010 	if (page_reported(page))
1011 		__ClearPageReported(page);
1012 
1013 	list_del(&page->lru);
1014 	__ClearPageBuddy(page);
1015 	set_page_private(page, 0);
1016 	zone->free_area[order].nr_free--;
1017 }
1018 
1019 /*
1020  * If this is not the largest possible page, check if the buddy
1021  * of the next-highest order is free. If it is, it's possible
1022  * that pages are being freed that will coalesce soon. In case,
1023  * that is happening, add the free page to the tail of the list
1024  * so it's less likely to be used soon and more likely to be merged
1025  * as a higher order page
1026  */
1027 static inline bool
1028 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
1029 		   struct page *page, unsigned int order)
1030 {
1031 	struct page *higher_page, *higher_buddy;
1032 	unsigned long combined_pfn;
1033 
1034 	if (order >= MAX_ORDER - 2)
1035 		return false;
1036 
1037 	combined_pfn = buddy_pfn & pfn;
1038 	higher_page = page + (combined_pfn - pfn);
1039 	buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
1040 	higher_buddy = higher_page + (buddy_pfn - combined_pfn);
1041 
1042 	return page_is_buddy(higher_page, higher_buddy, order + 1);
1043 }
1044 
1045 /*
1046  * Freeing function for a buddy system allocator.
1047  *
1048  * The concept of a buddy system is to maintain direct-mapped table
1049  * (containing bit values) for memory blocks of various "orders".
1050  * The bottom level table contains the map for the smallest allocatable
1051  * units of memory (here, pages), and each level above it describes
1052  * pairs of units from the levels below, hence, "buddies".
1053  * At a high level, all that happens here is marking the table entry
1054  * at the bottom level available, and propagating the changes upward
1055  * as necessary, plus some accounting needed to play nicely with other
1056  * parts of the VM system.
1057  * At each level, we keep a list of pages, which are heads of continuous
1058  * free pages of length of (1 << order) and marked with PageBuddy.
1059  * Page's order is recorded in page_private(page) field.
1060  * So when we are allocating or freeing one, we can derive the state of the
1061  * other.  That is, if we allocate a small block, and both were
1062  * free, the remainder of the region must be split into blocks.
1063  * If a block is freed, and its buddy is also free, then this
1064  * triggers coalescing into a block of larger size.
1065  *
1066  * -- nyc
1067  */
1068 
1069 static inline void __free_one_page(struct page *page,
1070 		unsigned long pfn,
1071 		struct zone *zone, unsigned int order,
1072 		int migratetype, fpi_t fpi_flags)
1073 {
1074 	struct capture_control *capc = task_capc(zone);
1075 	unsigned int max_order = pageblock_order;
1076 	unsigned long buddy_pfn;
1077 	unsigned long combined_pfn;
1078 	struct page *buddy;
1079 	bool to_tail;
1080 
1081 	VM_BUG_ON(!zone_is_initialized(zone));
1082 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1083 
1084 	VM_BUG_ON(migratetype == -1);
1085 	if (likely(!is_migrate_isolate(migratetype)))
1086 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
1087 
1088 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1089 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
1090 
1091 continue_merging:
1092 	while (order < max_order) {
1093 		if (compaction_capture(capc, page, order, migratetype)) {
1094 			__mod_zone_freepage_state(zone, -(1 << order),
1095 								migratetype);
1096 			return;
1097 		}
1098 		buddy_pfn = __find_buddy_pfn(pfn, order);
1099 		buddy = page + (buddy_pfn - pfn);
1100 
1101 		if (!page_is_buddy(page, buddy, order))
1102 			goto done_merging;
1103 		/*
1104 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1105 		 * merge with it and move up one order.
1106 		 */
1107 		if (page_is_guard(buddy))
1108 			clear_page_guard(zone, buddy, order, migratetype);
1109 		else
1110 			del_page_from_free_list(buddy, zone, order);
1111 		combined_pfn = buddy_pfn & pfn;
1112 		page = page + (combined_pfn - pfn);
1113 		pfn = combined_pfn;
1114 		order++;
1115 	}
1116 	if (order < MAX_ORDER - 1) {
1117 		/* If we are here, it means order is >= pageblock_order.
1118 		 * We want to prevent merge between freepages on pageblock
1119 		 * without fallbacks and normal pageblock. Without this,
1120 		 * pageblock isolation could cause incorrect freepage or CMA
1121 		 * accounting or HIGHATOMIC accounting.
1122 		 *
1123 		 * We don't want to hit this code for the more frequent
1124 		 * low-order merging.
1125 		 */
1126 		int buddy_mt;
1127 
1128 		buddy_pfn = __find_buddy_pfn(pfn, order);
1129 		buddy = page + (buddy_pfn - pfn);
1130 		buddy_mt = get_pageblock_migratetype(buddy);
1131 
1132 		if (migratetype != buddy_mt
1133 				&& (!migratetype_is_mergeable(migratetype) ||
1134 					!migratetype_is_mergeable(buddy_mt)))
1135 			goto done_merging;
1136 		max_order = order + 1;
1137 		goto continue_merging;
1138 	}
1139 
1140 done_merging:
1141 	set_buddy_order(page, order);
1142 
1143 	if (fpi_flags & FPI_TO_TAIL)
1144 		to_tail = true;
1145 	else if (is_shuffle_order(order))
1146 		to_tail = shuffle_pick_tail();
1147 	else
1148 		to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1149 
1150 	if (to_tail)
1151 		add_to_free_list_tail(page, zone, order, migratetype);
1152 	else
1153 		add_to_free_list(page, zone, order, migratetype);
1154 
1155 	/* Notify page reporting subsystem of freed page */
1156 	if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1157 		page_reporting_notify_free(order);
1158 }
1159 
1160 /*
1161  * A bad page could be due to a number of fields. Instead of multiple branches,
1162  * try and check multiple fields with one check. The caller must do a detailed
1163  * check if necessary.
1164  */
1165 static inline bool page_expected_state(struct page *page,
1166 					unsigned long check_flags)
1167 {
1168 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1169 		return false;
1170 
1171 	if (unlikely((unsigned long)page->mapping |
1172 			page_ref_count(page) |
1173 #ifdef CONFIG_MEMCG
1174 			page->memcg_data |
1175 #endif
1176 			(page->flags & check_flags)))
1177 		return false;
1178 
1179 	return true;
1180 }
1181 
1182 static const char *page_bad_reason(struct page *page, unsigned long flags)
1183 {
1184 	const char *bad_reason = NULL;
1185 
1186 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1187 		bad_reason = "nonzero mapcount";
1188 	if (unlikely(page->mapping != NULL))
1189 		bad_reason = "non-NULL mapping";
1190 	if (unlikely(page_ref_count(page) != 0))
1191 		bad_reason = "nonzero _refcount";
1192 	if (unlikely(page->flags & flags)) {
1193 		if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1194 			bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1195 		else
1196 			bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1197 	}
1198 #ifdef CONFIG_MEMCG
1199 	if (unlikely(page->memcg_data))
1200 		bad_reason = "page still charged to cgroup";
1201 #endif
1202 	return bad_reason;
1203 }
1204 
1205 static void check_free_page_bad(struct page *page)
1206 {
1207 	bad_page(page,
1208 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1209 }
1210 
1211 static inline int check_free_page(struct page *page)
1212 {
1213 	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1214 		return 0;
1215 
1216 	/* Something has gone sideways, find it */
1217 	check_free_page_bad(page);
1218 	return 1;
1219 }
1220 
1221 static int free_tail_pages_check(struct page *head_page, struct page *page)
1222 {
1223 	int ret = 1;
1224 
1225 	/*
1226 	 * We rely page->lru.next never has bit 0 set, unless the page
1227 	 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1228 	 */
1229 	BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1230 
1231 	if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1232 		ret = 0;
1233 		goto out;
1234 	}
1235 	switch (page - head_page) {
1236 	case 1:
1237 		/* the first tail page: ->mapping may be compound_mapcount() */
1238 		if (unlikely(compound_mapcount(page))) {
1239 			bad_page(page, "nonzero compound_mapcount");
1240 			goto out;
1241 		}
1242 		break;
1243 	case 2:
1244 		/*
1245 		 * the second tail page: ->mapping is
1246 		 * deferred_list.next -- ignore value.
1247 		 */
1248 		break;
1249 	default:
1250 		if (page->mapping != TAIL_MAPPING) {
1251 			bad_page(page, "corrupted mapping in tail page");
1252 			goto out;
1253 		}
1254 		break;
1255 	}
1256 	if (unlikely(!PageTail(page))) {
1257 		bad_page(page, "PageTail not set");
1258 		goto out;
1259 	}
1260 	if (unlikely(compound_head(page) != head_page)) {
1261 		bad_page(page, "compound_head not consistent");
1262 		goto out;
1263 	}
1264 	ret = 0;
1265 out:
1266 	page->mapping = NULL;
1267 	clear_compound_head(page);
1268 	return ret;
1269 }
1270 
1271 static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags)
1272 {
1273 	int i;
1274 
1275 	if (zero_tags) {
1276 		for (i = 0; i < numpages; i++)
1277 			tag_clear_highpage(page + i);
1278 		return;
1279 	}
1280 
1281 	/* s390's use of memset() could override KASAN redzones. */
1282 	kasan_disable_current();
1283 	for (i = 0; i < numpages; i++) {
1284 		u8 tag = page_kasan_tag(page + i);
1285 		page_kasan_tag_reset(page + i);
1286 		clear_highpage(page + i);
1287 		page_kasan_tag_set(page + i, tag);
1288 	}
1289 	kasan_enable_current();
1290 }
1291 
1292 static __always_inline bool free_pages_prepare(struct page *page,
1293 			unsigned int order, bool check_free, fpi_t fpi_flags)
1294 {
1295 	int bad = 0;
1296 	bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
1297 
1298 	VM_BUG_ON_PAGE(PageTail(page), page);
1299 
1300 	trace_mm_page_free(page, order);
1301 
1302 	if (unlikely(PageHWPoison(page)) && !order) {
1303 		/*
1304 		 * Do not let hwpoison pages hit pcplists/buddy
1305 		 * Untie memcg state and reset page's owner
1306 		 */
1307 		if (memcg_kmem_enabled() && PageMemcgKmem(page))
1308 			__memcg_kmem_uncharge_page(page, order);
1309 		reset_page_owner(page, order);
1310 		page_table_check_free(page, order);
1311 		return false;
1312 	}
1313 
1314 	/*
1315 	 * Check tail pages before head page information is cleared to
1316 	 * avoid checking PageCompound for order-0 pages.
1317 	 */
1318 	if (unlikely(order)) {
1319 		bool compound = PageCompound(page);
1320 		int i;
1321 
1322 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1323 
1324 		if (compound) {
1325 			ClearPageDoubleMap(page);
1326 			ClearPageHasHWPoisoned(page);
1327 		}
1328 		for (i = 1; i < (1 << order); i++) {
1329 			if (compound)
1330 				bad += free_tail_pages_check(page, page + i);
1331 			if (unlikely(check_free_page(page + i))) {
1332 				bad++;
1333 				continue;
1334 			}
1335 			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1336 		}
1337 	}
1338 	if (PageMappingFlags(page))
1339 		page->mapping = NULL;
1340 	if (memcg_kmem_enabled() && PageMemcgKmem(page))
1341 		__memcg_kmem_uncharge_page(page, order);
1342 	if (check_free)
1343 		bad += check_free_page(page);
1344 	if (bad)
1345 		return false;
1346 
1347 	page_cpupid_reset_last(page);
1348 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1349 	reset_page_owner(page, order);
1350 	page_table_check_free(page, order);
1351 
1352 	if (!PageHighMem(page)) {
1353 		debug_check_no_locks_freed(page_address(page),
1354 					   PAGE_SIZE << order);
1355 		debug_check_no_obj_freed(page_address(page),
1356 					   PAGE_SIZE << order);
1357 	}
1358 
1359 	kernel_poison_pages(page, 1 << order);
1360 
1361 	/*
1362 	 * As memory initialization might be integrated into KASAN,
1363 	 * kasan_free_pages and kernel_init_free_pages must be
1364 	 * kept together to avoid discrepancies in behavior.
1365 	 *
1366 	 * With hardware tag-based KASAN, memory tags must be set before the
1367 	 * page becomes unavailable via debug_pagealloc or arch_free_page.
1368 	 */
1369 	if (kasan_has_integrated_init()) {
1370 		if (!skip_kasan_poison)
1371 			kasan_free_pages(page, order);
1372 	} else {
1373 		bool init = want_init_on_free();
1374 
1375 		if (init)
1376 			kernel_init_free_pages(page, 1 << order, false);
1377 		if (!skip_kasan_poison)
1378 			kasan_poison_pages(page, order, init);
1379 	}
1380 
1381 	/*
1382 	 * arch_free_page() can make the page's contents inaccessible.  s390
1383 	 * does this.  So nothing which can access the page's contents should
1384 	 * happen after this.
1385 	 */
1386 	arch_free_page(page, order);
1387 
1388 	debug_pagealloc_unmap_pages(page, 1 << order);
1389 
1390 	return true;
1391 }
1392 
1393 #ifdef CONFIG_DEBUG_VM
1394 /*
1395  * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1396  * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1397  * moved from pcp lists to free lists.
1398  */
1399 static bool free_pcp_prepare(struct page *page, unsigned int order)
1400 {
1401 	return free_pages_prepare(page, order, true, FPI_NONE);
1402 }
1403 
1404 static bool bulkfree_pcp_prepare(struct page *page)
1405 {
1406 	if (debug_pagealloc_enabled_static())
1407 		return check_free_page(page);
1408 	else
1409 		return false;
1410 }
1411 #else
1412 /*
1413  * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1414  * moving from pcp lists to free list in order to reduce overhead. With
1415  * debug_pagealloc enabled, they are checked also immediately when being freed
1416  * to the pcp lists.
1417  */
1418 static bool free_pcp_prepare(struct page *page, unsigned int order)
1419 {
1420 	if (debug_pagealloc_enabled_static())
1421 		return free_pages_prepare(page, order, true, FPI_NONE);
1422 	else
1423 		return free_pages_prepare(page, order, false, FPI_NONE);
1424 }
1425 
1426 static bool bulkfree_pcp_prepare(struct page *page)
1427 {
1428 	return check_free_page(page);
1429 }
1430 #endif /* CONFIG_DEBUG_VM */
1431 
1432 /*
1433  * Frees a number of pages from the PCP lists
1434  * Assumes all pages on list are in same zone.
1435  * count is the number of pages to free.
1436  */
1437 static void free_pcppages_bulk(struct zone *zone, int count,
1438 					struct per_cpu_pages *pcp,
1439 					int pindex)
1440 {
1441 	int min_pindex = 0;
1442 	int max_pindex = NR_PCP_LISTS - 1;
1443 	unsigned int order;
1444 	bool isolated_pageblocks;
1445 	struct page *page;
1446 
1447 	/*
1448 	 * Ensure proper count is passed which otherwise would stuck in the
1449 	 * below while (list_empty(list)) loop.
1450 	 */
1451 	count = min(pcp->count, count);
1452 
1453 	/* Ensure requested pindex is drained first. */
1454 	pindex = pindex - 1;
1455 
1456 	/*
1457 	 * local_lock_irq held so equivalent to spin_lock_irqsave for
1458 	 * both PREEMPT_RT and non-PREEMPT_RT configurations.
1459 	 */
1460 	spin_lock(&zone->lock);
1461 	isolated_pageblocks = has_isolate_pageblock(zone);
1462 
1463 	while (count > 0) {
1464 		struct list_head *list;
1465 		int nr_pages;
1466 
1467 		/* Remove pages from lists in a round-robin fashion. */
1468 		do {
1469 			if (++pindex > max_pindex)
1470 				pindex = min_pindex;
1471 			list = &pcp->lists[pindex];
1472 			if (!list_empty(list))
1473 				break;
1474 
1475 			if (pindex == max_pindex)
1476 				max_pindex--;
1477 			if (pindex == min_pindex)
1478 				min_pindex++;
1479 		} while (1);
1480 
1481 		order = pindex_to_order(pindex);
1482 		nr_pages = 1 << order;
1483 		BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
1484 		do {
1485 			int mt;
1486 
1487 			page = list_last_entry(list, struct page, lru);
1488 			mt = get_pcppage_migratetype(page);
1489 
1490 			/* must delete to avoid corrupting pcp list */
1491 			list_del(&page->lru);
1492 			count -= nr_pages;
1493 			pcp->count -= nr_pages;
1494 
1495 			if (bulkfree_pcp_prepare(page))
1496 				continue;
1497 
1498 			/* MIGRATE_ISOLATE page should not go to pcplists */
1499 			VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1500 			/* Pageblock could have been isolated meanwhile */
1501 			if (unlikely(isolated_pageblocks))
1502 				mt = get_pageblock_migratetype(page);
1503 
1504 			__free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1505 			trace_mm_page_pcpu_drain(page, order, mt);
1506 		} while (count > 0 && !list_empty(list));
1507 	}
1508 
1509 	spin_unlock(&zone->lock);
1510 }
1511 
1512 static void free_one_page(struct zone *zone,
1513 				struct page *page, unsigned long pfn,
1514 				unsigned int order,
1515 				int migratetype, fpi_t fpi_flags)
1516 {
1517 	unsigned long flags;
1518 
1519 	spin_lock_irqsave(&zone->lock, flags);
1520 	if (unlikely(has_isolate_pageblock(zone) ||
1521 		is_migrate_isolate(migratetype))) {
1522 		migratetype = get_pfnblock_migratetype(page, pfn);
1523 	}
1524 	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1525 	spin_unlock_irqrestore(&zone->lock, flags);
1526 }
1527 
1528 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1529 				unsigned long zone, int nid)
1530 {
1531 	mm_zero_struct_page(page);
1532 	set_page_links(page, zone, nid, pfn);
1533 	init_page_count(page);
1534 	page_mapcount_reset(page);
1535 	page_cpupid_reset_last(page);
1536 	page_kasan_tag_reset(page);
1537 
1538 	INIT_LIST_HEAD(&page->lru);
1539 #ifdef WANT_PAGE_VIRTUAL
1540 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
1541 	if (!is_highmem_idx(zone))
1542 		set_page_address(page, __va(pfn << PAGE_SHIFT));
1543 #endif
1544 }
1545 
1546 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1547 static void __meminit init_reserved_page(unsigned long pfn)
1548 {
1549 	pg_data_t *pgdat;
1550 	int nid, zid;
1551 
1552 	if (!early_page_uninitialised(pfn))
1553 		return;
1554 
1555 	nid = early_pfn_to_nid(pfn);
1556 	pgdat = NODE_DATA(nid);
1557 
1558 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1559 		struct zone *zone = &pgdat->node_zones[zid];
1560 
1561 		if (zone_spans_pfn(zone, pfn))
1562 			break;
1563 	}
1564 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1565 }
1566 #else
1567 static inline void init_reserved_page(unsigned long pfn)
1568 {
1569 }
1570 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1571 
1572 /*
1573  * Initialised pages do not have PageReserved set. This function is
1574  * called for each range allocated by the bootmem allocator and
1575  * marks the pages PageReserved. The remaining valid pages are later
1576  * sent to the buddy page allocator.
1577  */
1578 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1579 {
1580 	unsigned long start_pfn = PFN_DOWN(start);
1581 	unsigned long end_pfn = PFN_UP(end);
1582 
1583 	for (; start_pfn < end_pfn; start_pfn++) {
1584 		if (pfn_valid(start_pfn)) {
1585 			struct page *page = pfn_to_page(start_pfn);
1586 
1587 			init_reserved_page(start_pfn);
1588 
1589 			/* Avoid false-positive PageTail() */
1590 			INIT_LIST_HEAD(&page->lru);
1591 
1592 			/*
1593 			 * no need for atomic set_bit because the struct
1594 			 * page is not visible yet so nobody should
1595 			 * access it yet.
1596 			 */
1597 			__SetPageReserved(page);
1598 		}
1599 	}
1600 }
1601 
1602 static void __free_pages_ok(struct page *page, unsigned int order,
1603 			    fpi_t fpi_flags)
1604 {
1605 	unsigned long flags;
1606 	int migratetype;
1607 	unsigned long pfn = page_to_pfn(page);
1608 	struct zone *zone = page_zone(page);
1609 
1610 	if (!free_pages_prepare(page, order, true, fpi_flags))
1611 		return;
1612 
1613 	migratetype = get_pfnblock_migratetype(page, pfn);
1614 
1615 	spin_lock_irqsave(&zone->lock, flags);
1616 	if (unlikely(has_isolate_pageblock(zone) ||
1617 		is_migrate_isolate(migratetype))) {
1618 		migratetype = get_pfnblock_migratetype(page, pfn);
1619 	}
1620 	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1621 	spin_unlock_irqrestore(&zone->lock, flags);
1622 
1623 	__count_vm_events(PGFREE, 1 << order);
1624 }
1625 
1626 void __free_pages_core(struct page *page, unsigned int order)
1627 {
1628 	unsigned int nr_pages = 1 << order;
1629 	struct page *p = page;
1630 	unsigned int loop;
1631 
1632 	/*
1633 	 * When initializing the memmap, __init_single_page() sets the refcount
1634 	 * of all pages to 1 ("allocated"/"not free"). We have to set the
1635 	 * refcount of all involved pages to 0.
1636 	 */
1637 	prefetchw(p);
1638 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1639 		prefetchw(p + 1);
1640 		__ClearPageReserved(p);
1641 		set_page_count(p, 0);
1642 	}
1643 	__ClearPageReserved(p);
1644 	set_page_count(p, 0);
1645 
1646 	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1647 
1648 	/*
1649 	 * Bypass PCP and place fresh pages right to the tail, primarily
1650 	 * relevant for memory onlining.
1651 	 */
1652 	__free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
1653 }
1654 
1655 #ifdef CONFIG_NUMA
1656 
1657 /*
1658  * During memory init memblocks map pfns to nids. The search is expensive and
1659  * this caches recent lookups. The implementation of __early_pfn_to_nid
1660  * treats start/end as pfns.
1661  */
1662 struct mminit_pfnnid_cache {
1663 	unsigned long last_start;
1664 	unsigned long last_end;
1665 	int last_nid;
1666 };
1667 
1668 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1669 
1670 /*
1671  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1672  */
1673 static int __meminit __early_pfn_to_nid(unsigned long pfn,
1674 					struct mminit_pfnnid_cache *state)
1675 {
1676 	unsigned long start_pfn, end_pfn;
1677 	int nid;
1678 
1679 	if (state->last_start <= pfn && pfn < state->last_end)
1680 		return state->last_nid;
1681 
1682 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1683 	if (nid != NUMA_NO_NODE) {
1684 		state->last_start = start_pfn;
1685 		state->last_end = end_pfn;
1686 		state->last_nid = nid;
1687 	}
1688 
1689 	return nid;
1690 }
1691 
1692 int __meminit early_pfn_to_nid(unsigned long pfn)
1693 {
1694 	static DEFINE_SPINLOCK(early_pfn_lock);
1695 	int nid;
1696 
1697 	spin_lock(&early_pfn_lock);
1698 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1699 	if (nid < 0)
1700 		nid = first_online_node;
1701 	spin_unlock(&early_pfn_lock);
1702 
1703 	return nid;
1704 }
1705 #endif /* CONFIG_NUMA */
1706 
1707 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1708 							unsigned int order)
1709 {
1710 	if (early_page_uninitialised(pfn))
1711 		return;
1712 	__free_pages_core(page, order);
1713 }
1714 
1715 /*
1716  * Check that the whole (or subset of) a pageblock given by the interval of
1717  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1718  * with the migration of free compaction scanner.
1719  *
1720  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1721  *
1722  * It's possible on some configurations to have a setup like node0 node1 node0
1723  * i.e. it's possible that all pages within a zones range of pages do not
1724  * belong to a single zone. We assume that a border between node0 and node1
1725  * can occur within a single pageblock, but not a node0 node1 node0
1726  * interleaving within a single pageblock. It is therefore sufficient to check
1727  * the first and last page of a pageblock and avoid checking each individual
1728  * page in a pageblock.
1729  */
1730 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1731 				     unsigned long end_pfn, struct zone *zone)
1732 {
1733 	struct page *start_page;
1734 	struct page *end_page;
1735 
1736 	/* end_pfn is one past the range we are checking */
1737 	end_pfn--;
1738 
1739 	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1740 		return NULL;
1741 
1742 	start_page = pfn_to_online_page(start_pfn);
1743 	if (!start_page)
1744 		return NULL;
1745 
1746 	if (page_zone(start_page) != zone)
1747 		return NULL;
1748 
1749 	end_page = pfn_to_page(end_pfn);
1750 
1751 	/* This gives a shorter code than deriving page_zone(end_page) */
1752 	if (page_zone_id(start_page) != page_zone_id(end_page))
1753 		return NULL;
1754 
1755 	return start_page;
1756 }
1757 
1758 void set_zone_contiguous(struct zone *zone)
1759 {
1760 	unsigned long block_start_pfn = zone->zone_start_pfn;
1761 	unsigned long block_end_pfn;
1762 
1763 	block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1764 	for (; block_start_pfn < zone_end_pfn(zone);
1765 			block_start_pfn = block_end_pfn,
1766 			 block_end_pfn += pageblock_nr_pages) {
1767 
1768 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1769 
1770 		if (!__pageblock_pfn_to_page(block_start_pfn,
1771 					     block_end_pfn, zone))
1772 			return;
1773 		cond_resched();
1774 	}
1775 
1776 	/* We confirm that there is no hole */
1777 	zone->contiguous = true;
1778 }
1779 
1780 void clear_zone_contiguous(struct zone *zone)
1781 {
1782 	zone->contiguous = false;
1783 }
1784 
1785 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1786 static void __init deferred_free_range(unsigned long pfn,
1787 				       unsigned long nr_pages)
1788 {
1789 	struct page *page;
1790 	unsigned long i;
1791 
1792 	if (!nr_pages)
1793 		return;
1794 
1795 	page = pfn_to_page(pfn);
1796 
1797 	/* Free a large naturally-aligned chunk if possible */
1798 	if (nr_pages == pageblock_nr_pages &&
1799 	    (pfn & (pageblock_nr_pages - 1)) == 0) {
1800 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1801 		__free_pages_core(page, pageblock_order);
1802 		return;
1803 	}
1804 
1805 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1806 		if ((pfn & (pageblock_nr_pages - 1)) == 0)
1807 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1808 		__free_pages_core(page, 0);
1809 	}
1810 }
1811 
1812 /* Completion tracking for deferred_init_memmap() threads */
1813 static atomic_t pgdat_init_n_undone __initdata;
1814 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1815 
1816 static inline void __init pgdat_init_report_one_done(void)
1817 {
1818 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1819 		complete(&pgdat_init_all_done_comp);
1820 }
1821 
1822 /*
1823  * Returns true if page needs to be initialized or freed to buddy allocator.
1824  *
1825  * First we check if pfn is valid on architectures where it is possible to have
1826  * holes within pageblock_nr_pages. On systems where it is not possible, this
1827  * function is optimized out.
1828  *
1829  * Then, we check if a current large page is valid by only checking the validity
1830  * of the head pfn.
1831  */
1832 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1833 {
1834 	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1835 		return false;
1836 	return true;
1837 }
1838 
1839 /*
1840  * Free pages to buddy allocator. Try to free aligned pages in
1841  * pageblock_nr_pages sizes.
1842  */
1843 static void __init deferred_free_pages(unsigned long pfn,
1844 				       unsigned long end_pfn)
1845 {
1846 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1847 	unsigned long nr_free = 0;
1848 
1849 	for (; pfn < end_pfn; pfn++) {
1850 		if (!deferred_pfn_valid(pfn)) {
1851 			deferred_free_range(pfn - nr_free, nr_free);
1852 			nr_free = 0;
1853 		} else if (!(pfn & nr_pgmask)) {
1854 			deferred_free_range(pfn - nr_free, nr_free);
1855 			nr_free = 1;
1856 		} else {
1857 			nr_free++;
1858 		}
1859 	}
1860 	/* Free the last block of pages to allocator */
1861 	deferred_free_range(pfn - nr_free, nr_free);
1862 }
1863 
1864 /*
1865  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1866  * by performing it only once every pageblock_nr_pages.
1867  * Return number of pages initialized.
1868  */
1869 static unsigned long  __init deferred_init_pages(struct zone *zone,
1870 						 unsigned long pfn,
1871 						 unsigned long end_pfn)
1872 {
1873 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1874 	int nid = zone_to_nid(zone);
1875 	unsigned long nr_pages = 0;
1876 	int zid = zone_idx(zone);
1877 	struct page *page = NULL;
1878 
1879 	for (; pfn < end_pfn; pfn++) {
1880 		if (!deferred_pfn_valid(pfn)) {
1881 			page = NULL;
1882 			continue;
1883 		} else if (!page || !(pfn & nr_pgmask)) {
1884 			page = pfn_to_page(pfn);
1885 		} else {
1886 			page++;
1887 		}
1888 		__init_single_page(page, pfn, zid, nid);
1889 		nr_pages++;
1890 	}
1891 	return (nr_pages);
1892 }
1893 
1894 /*
1895  * This function is meant to pre-load the iterator for the zone init.
1896  * Specifically it walks through the ranges until we are caught up to the
1897  * first_init_pfn value and exits there. If we never encounter the value we
1898  * return false indicating there are no valid ranges left.
1899  */
1900 static bool __init
1901 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1902 				    unsigned long *spfn, unsigned long *epfn,
1903 				    unsigned long first_init_pfn)
1904 {
1905 	u64 j;
1906 
1907 	/*
1908 	 * Start out by walking through the ranges in this zone that have
1909 	 * already been initialized. We don't need to do anything with them
1910 	 * so we just need to flush them out of the system.
1911 	 */
1912 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1913 		if (*epfn <= first_init_pfn)
1914 			continue;
1915 		if (*spfn < first_init_pfn)
1916 			*spfn = first_init_pfn;
1917 		*i = j;
1918 		return true;
1919 	}
1920 
1921 	return false;
1922 }
1923 
1924 /*
1925  * Initialize and free pages. We do it in two loops: first we initialize
1926  * struct page, then free to buddy allocator, because while we are
1927  * freeing pages we can access pages that are ahead (computing buddy
1928  * page in __free_one_page()).
1929  *
1930  * In order to try and keep some memory in the cache we have the loop
1931  * broken along max page order boundaries. This way we will not cause
1932  * any issues with the buddy page computation.
1933  */
1934 static unsigned long __init
1935 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1936 		       unsigned long *end_pfn)
1937 {
1938 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1939 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
1940 	unsigned long nr_pages = 0;
1941 	u64 j = *i;
1942 
1943 	/* First we loop through and initialize the page values */
1944 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1945 		unsigned long t;
1946 
1947 		if (mo_pfn <= *start_pfn)
1948 			break;
1949 
1950 		t = min(mo_pfn, *end_pfn);
1951 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
1952 
1953 		if (mo_pfn < *end_pfn) {
1954 			*start_pfn = mo_pfn;
1955 			break;
1956 		}
1957 	}
1958 
1959 	/* Reset values and now loop through freeing pages as needed */
1960 	swap(j, *i);
1961 
1962 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
1963 		unsigned long t;
1964 
1965 		if (mo_pfn <= spfn)
1966 			break;
1967 
1968 		t = min(mo_pfn, epfn);
1969 		deferred_free_pages(spfn, t);
1970 
1971 		if (mo_pfn <= epfn)
1972 			break;
1973 	}
1974 
1975 	return nr_pages;
1976 }
1977 
1978 static void __init
1979 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
1980 			   void *arg)
1981 {
1982 	unsigned long spfn, epfn;
1983 	struct zone *zone = arg;
1984 	u64 i;
1985 
1986 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
1987 
1988 	/*
1989 	 * Initialize and free pages in MAX_ORDER sized increments so that we
1990 	 * can avoid introducing any issues with the buddy allocator.
1991 	 */
1992 	while (spfn < end_pfn) {
1993 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
1994 		cond_resched();
1995 	}
1996 }
1997 
1998 /* An arch may override for more concurrency. */
1999 __weak int __init
2000 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2001 {
2002 	return 1;
2003 }
2004 
2005 /* Initialise remaining memory on a node */
2006 static int __init deferred_init_memmap(void *data)
2007 {
2008 	pg_data_t *pgdat = data;
2009 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2010 	unsigned long spfn = 0, epfn = 0;
2011 	unsigned long first_init_pfn, flags;
2012 	unsigned long start = jiffies;
2013 	struct zone *zone;
2014 	int zid, max_threads;
2015 	u64 i;
2016 
2017 	/* Bind memory initialisation thread to a local node if possible */
2018 	if (!cpumask_empty(cpumask))
2019 		set_cpus_allowed_ptr(current, cpumask);
2020 
2021 	pgdat_resize_lock(pgdat, &flags);
2022 	first_init_pfn = pgdat->first_deferred_pfn;
2023 	if (first_init_pfn == ULONG_MAX) {
2024 		pgdat_resize_unlock(pgdat, &flags);
2025 		pgdat_init_report_one_done();
2026 		return 0;
2027 	}
2028 
2029 	/* Sanity check boundaries */
2030 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2031 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2032 	pgdat->first_deferred_pfn = ULONG_MAX;
2033 
2034 	/*
2035 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
2036 	 * interrupt thread must allocate this early in boot, zone must be
2037 	 * pre-grown prior to start of deferred page initialization.
2038 	 */
2039 	pgdat_resize_unlock(pgdat, &flags);
2040 
2041 	/* Only the highest zone is deferred so find it */
2042 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2043 		zone = pgdat->node_zones + zid;
2044 		if (first_init_pfn < zone_end_pfn(zone))
2045 			break;
2046 	}
2047 
2048 	/* If the zone is empty somebody else may have cleared out the zone */
2049 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2050 						 first_init_pfn))
2051 		goto zone_empty;
2052 
2053 	max_threads = deferred_page_init_max_threads(cpumask);
2054 
2055 	while (spfn < epfn) {
2056 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2057 		struct padata_mt_job job = {
2058 			.thread_fn   = deferred_init_memmap_chunk,
2059 			.fn_arg      = zone,
2060 			.start       = spfn,
2061 			.size        = epfn_align - spfn,
2062 			.align       = PAGES_PER_SECTION,
2063 			.min_chunk   = PAGES_PER_SECTION,
2064 			.max_threads = max_threads,
2065 		};
2066 
2067 		padata_do_multithreaded(&job);
2068 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2069 						    epfn_align);
2070 	}
2071 zone_empty:
2072 	/* Sanity check that the next zone really is unpopulated */
2073 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2074 
2075 	pr_info("node %d deferred pages initialised in %ums\n",
2076 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2077 
2078 	pgdat_init_report_one_done();
2079 	return 0;
2080 }
2081 
2082 /*
2083  * If this zone has deferred pages, try to grow it by initializing enough
2084  * deferred pages to satisfy the allocation specified by order, rounded up to
2085  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2086  * of SECTION_SIZE bytes by initializing struct pages in increments of
2087  * PAGES_PER_SECTION * sizeof(struct page) bytes.
2088  *
2089  * Return true when zone was grown, otherwise return false. We return true even
2090  * when we grow less than requested, to let the caller decide if there are
2091  * enough pages to satisfy the allocation.
2092  *
2093  * Note: We use noinline because this function is needed only during boot, and
2094  * it is called from a __ref function _deferred_grow_zone. This way we are
2095  * making sure that it is not inlined into permanent text section.
2096  */
2097 static noinline bool __init
2098 deferred_grow_zone(struct zone *zone, unsigned int order)
2099 {
2100 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2101 	pg_data_t *pgdat = zone->zone_pgdat;
2102 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2103 	unsigned long spfn, epfn, flags;
2104 	unsigned long nr_pages = 0;
2105 	u64 i;
2106 
2107 	/* Only the last zone may have deferred pages */
2108 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2109 		return false;
2110 
2111 	pgdat_resize_lock(pgdat, &flags);
2112 
2113 	/*
2114 	 * If someone grew this zone while we were waiting for spinlock, return
2115 	 * true, as there might be enough pages already.
2116 	 */
2117 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2118 		pgdat_resize_unlock(pgdat, &flags);
2119 		return true;
2120 	}
2121 
2122 	/* If the zone is empty somebody else may have cleared out the zone */
2123 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2124 						 first_deferred_pfn)) {
2125 		pgdat->first_deferred_pfn = ULONG_MAX;
2126 		pgdat_resize_unlock(pgdat, &flags);
2127 		/* Retry only once. */
2128 		return first_deferred_pfn != ULONG_MAX;
2129 	}
2130 
2131 	/*
2132 	 * Initialize and free pages in MAX_ORDER sized increments so
2133 	 * that we can avoid introducing any issues with the buddy
2134 	 * allocator.
2135 	 */
2136 	while (spfn < epfn) {
2137 		/* update our first deferred PFN for this section */
2138 		first_deferred_pfn = spfn;
2139 
2140 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2141 		touch_nmi_watchdog();
2142 
2143 		/* We should only stop along section boundaries */
2144 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2145 			continue;
2146 
2147 		/* If our quota has been met we can stop here */
2148 		if (nr_pages >= nr_pages_needed)
2149 			break;
2150 	}
2151 
2152 	pgdat->first_deferred_pfn = spfn;
2153 	pgdat_resize_unlock(pgdat, &flags);
2154 
2155 	return nr_pages > 0;
2156 }
2157 
2158 /*
2159  * deferred_grow_zone() is __init, but it is called from
2160  * get_page_from_freelist() during early boot until deferred_pages permanently
2161  * disables this call. This is why we have refdata wrapper to avoid warning,
2162  * and to ensure that the function body gets unloaded.
2163  */
2164 static bool __ref
2165 _deferred_grow_zone(struct zone *zone, unsigned int order)
2166 {
2167 	return deferred_grow_zone(zone, order);
2168 }
2169 
2170 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2171 
2172 void __init page_alloc_init_late(void)
2173 {
2174 	struct zone *zone;
2175 	int nid;
2176 
2177 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2178 
2179 	/* There will be num_node_state(N_MEMORY) threads */
2180 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2181 	for_each_node_state(nid, N_MEMORY) {
2182 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2183 	}
2184 
2185 	/* Block until all are initialised */
2186 	wait_for_completion(&pgdat_init_all_done_comp);
2187 
2188 	/*
2189 	 * We initialized the rest of the deferred pages.  Permanently disable
2190 	 * on-demand struct page initialization.
2191 	 */
2192 	static_branch_disable(&deferred_pages);
2193 
2194 	/* Reinit limits that are based on free pages after the kernel is up */
2195 	files_maxfiles_init();
2196 #endif
2197 
2198 	buffer_init();
2199 
2200 	/* Discard memblock private memory */
2201 	memblock_discard();
2202 
2203 	for_each_node_state(nid, N_MEMORY)
2204 		shuffle_free_memory(NODE_DATA(nid));
2205 
2206 	for_each_populated_zone(zone)
2207 		set_zone_contiguous(zone);
2208 }
2209 
2210 #ifdef CONFIG_CMA
2211 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2212 void __init init_cma_reserved_pageblock(struct page *page)
2213 {
2214 	unsigned i = pageblock_nr_pages;
2215 	struct page *p = page;
2216 
2217 	do {
2218 		__ClearPageReserved(p);
2219 		set_page_count(p, 0);
2220 	} while (++p, --i);
2221 
2222 	set_pageblock_migratetype(page, MIGRATE_CMA);
2223 	set_page_refcounted(page);
2224 	__free_pages(page, pageblock_order);
2225 
2226 	adjust_managed_page_count(page, pageblock_nr_pages);
2227 	page_zone(page)->cma_pages += pageblock_nr_pages;
2228 }
2229 #endif
2230 
2231 /*
2232  * The order of subdivision here is critical for the IO subsystem.
2233  * Please do not alter this order without good reasons and regression
2234  * testing. Specifically, as large blocks of memory are subdivided,
2235  * the order in which smaller blocks are delivered depends on the order
2236  * they're subdivided in this function. This is the primary factor
2237  * influencing the order in which pages are delivered to the IO
2238  * subsystem according to empirical testing, and this is also justified
2239  * by considering the behavior of a buddy system containing a single
2240  * large block of memory acted on by a series of small allocations.
2241  * This behavior is a critical factor in sglist merging's success.
2242  *
2243  * -- nyc
2244  */
2245 static inline void expand(struct zone *zone, struct page *page,
2246 	int low, int high, int migratetype)
2247 {
2248 	unsigned long size = 1 << high;
2249 
2250 	while (high > low) {
2251 		high--;
2252 		size >>= 1;
2253 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2254 
2255 		/*
2256 		 * Mark as guard pages (or page), that will allow to
2257 		 * merge back to allocator when buddy will be freed.
2258 		 * Corresponding page table entries will not be touched,
2259 		 * pages will stay not present in virtual address space
2260 		 */
2261 		if (set_page_guard(zone, &page[size], high, migratetype))
2262 			continue;
2263 
2264 		add_to_free_list(&page[size], zone, high, migratetype);
2265 		set_buddy_order(&page[size], high);
2266 	}
2267 }
2268 
2269 static void check_new_page_bad(struct page *page)
2270 {
2271 	if (unlikely(page->flags & __PG_HWPOISON)) {
2272 		/* Don't complain about hwpoisoned pages */
2273 		page_mapcount_reset(page); /* remove PageBuddy */
2274 		return;
2275 	}
2276 
2277 	bad_page(page,
2278 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2279 }
2280 
2281 /*
2282  * This page is about to be returned from the page allocator
2283  */
2284 static inline int check_new_page(struct page *page)
2285 {
2286 	if (likely(page_expected_state(page,
2287 				PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2288 		return 0;
2289 
2290 	check_new_page_bad(page);
2291 	return 1;
2292 }
2293 
2294 static bool check_new_pages(struct page *page, unsigned int order)
2295 {
2296 	int i;
2297 	for (i = 0; i < (1 << order); i++) {
2298 		struct page *p = page + i;
2299 
2300 		if (unlikely(check_new_page(p)))
2301 			return true;
2302 	}
2303 
2304 	return false;
2305 }
2306 
2307 #ifdef CONFIG_DEBUG_VM
2308 /*
2309  * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2310  * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2311  * also checked when pcp lists are refilled from the free lists.
2312  */
2313 static inline bool check_pcp_refill(struct page *page, unsigned int order)
2314 {
2315 	if (debug_pagealloc_enabled_static())
2316 		return check_new_pages(page, order);
2317 	else
2318 		return false;
2319 }
2320 
2321 static inline bool check_new_pcp(struct page *page, unsigned int order)
2322 {
2323 	return check_new_pages(page, order);
2324 }
2325 #else
2326 /*
2327  * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2328  * when pcp lists are being refilled from the free lists. With debug_pagealloc
2329  * enabled, they are also checked when being allocated from the pcp lists.
2330  */
2331 static inline bool check_pcp_refill(struct page *page, unsigned int order)
2332 {
2333 	return check_new_pages(page, order);
2334 }
2335 static inline bool check_new_pcp(struct page *page, unsigned int order)
2336 {
2337 	if (debug_pagealloc_enabled_static())
2338 		return check_new_pages(page, order);
2339 	else
2340 		return false;
2341 }
2342 #endif /* CONFIG_DEBUG_VM */
2343 
2344 inline void post_alloc_hook(struct page *page, unsigned int order,
2345 				gfp_t gfp_flags)
2346 {
2347 	set_page_private(page, 0);
2348 	set_page_refcounted(page);
2349 
2350 	arch_alloc_page(page, order);
2351 	debug_pagealloc_map_pages(page, 1 << order);
2352 
2353 	/*
2354 	 * Page unpoisoning must happen before memory initialization.
2355 	 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2356 	 * allocations and the page unpoisoning code will complain.
2357 	 */
2358 	kernel_unpoison_pages(page, 1 << order);
2359 
2360 	/*
2361 	 * As memory initialization might be integrated into KASAN,
2362 	 * kasan_alloc_pages and kernel_init_free_pages must be
2363 	 * kept together to avoid discrepancies in behavior.
2364 	 */
2365 	if (kasan_has_integrated_init()) {
2366 		kasan_alloc_pages(page, order, gfp_flags);
2367 	} else {
2368 		bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
2369 
2370 		kasan_unpoison_pages(page, order, init);
2371 		if (init)
2372 			kernel_init_free_pages(page, 1 << order,
2373 					       gfp_flags & __GFP_ZEROTAGS);
2374 	}
2375 
2376 	set_page_owner(page, order, gfp_flags);
2377 	page_table_check_alloc(page, order);
2378 }
2379 
2380 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2381 							unsigned int alloc_flags)
2382 {
2383 	post_alloc_hook(page, order, gfp_flags);
2384 
2385 	if (order && (gfp_flags & __GFP_COMP))
2386 		prep_compound_page(page, order);
2387 
2388 	/*
2389 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2390 	 * allocate the page. The expectation is that the caller is taking
2391 	 * steps that will free more memory. The caller should avoid the page
2392 	 * being used for !PFMEMALLOC purposes.
2393 	 */
2394 	if (alloc_flags & ALLOC_NO_WATERMARKS)
2395 		set_page_pfmemalloc(page);
2396 	else
2397 		clear_page_pfmemalloc(page);
2398 }
2399 
2400 /*
2401  * Go through the free lists for the given migratetype and remove
2402  * the smallest available page from the freelists
2403  */
2404 static __always_inline
2405 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2406 						int migratetype)
2407 {
2408 	unsigned int current_order;
2409 	struct free_area *area;
2410 	struct page *page;
2411 
2412 	/* Find a page of the appropriate size in the preferred list */
2413 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2414 		area = &(zone->free_area[current_order]);
2415 		page = get_page_from_free_area(area, migratetype);
2416 		if (!page)
2417 			continue;
2418 		del_page_from_free_list(page, zone, current_order);
2419 		expand(zone, page, order, current_order, migratetype);
2420 		set_pcppage_migratetype(page, migratetype);
2421 		return page;
2422 	}
2423 
2424 	return NULL;
2425 }
2426 
2427 
2428 /*
2429  * This array describes the order lists are fallen back to when
2430  * the free lists for the desirable migrate type are depleted
2431  *
2432  * The other migratetypes do not have fallbacks.
2433  */
2434 static int fallbacks[MIGRATE_TYPES][3] = {
2435 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
2436 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2437 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2438 };
2439 
2440 #ifdef CONFIG_CMA
2441 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2442 					unsigned int order)
2443 {
2444 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2445 }
2446 #else
2447 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2448 					unsigned int order) { return NULL; }
2449 #endif
2450 
2451 /*
2452  * Move the free pages in a range to the freelist tail of the requested type.
2453  * Note that start_page and end_pages are not aligned on a pageblock
2454  * boundary. If alignment is required, use move_freepages_block()
2455  */
2456 static int move_freepages(struct zone *zone,
2457 			  unsigned long start_pfn, unsigned long end_pfn,
2458 			  int migratetype, int *num_movable)
2459 {
2460 	struct page *page;
2461 	unsigned long pfn;
2462 	unsigned int order;
2463 	int pages_moved = 0;
2464 
2465 	for (pfn = start_pfn; pfn <= end_pfn;) {
2466 		page = pfn_to_page(pfn);
2467 		if (!PageBuddy(page)) {
2468 			/*
2469 			 * We assume that pages that could be isolated for
2470 			 * migration are movable. But we don't actually try
2471 			 * isolating, as that would be expensive.
2472 			 */
2473 			if (num_movable &&
2474 					(PageLRU(page) || __PageMovable(page)))
2475 				(*num_movable)++;
2476 			pfn++;
2477 			continue;
2478 		}
2479 
2480 		/* Make sure we are not inadvertently changing nodes */
2481 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2482 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2483 
2484 		order = buddy_order(page);
2485 		move_to_free_list(page, zone, order, migratetype);
2486 		pfn += 1 << order;
2487 		pages_moved += 1 << order;
2488 	}
2489 
2490 	return pages_moved;
2491 }
2492 
2493 int move_freepages_block(struct zone *zone, struct page *page,
2494 				int migratetype, int *num_movable)
2495 {
2496 	unsigned long start_pfn, end_pfn, pfn;
2497 
2498 	if (num_movable)
2499 		*num_movable = 0;
2500 
2501 	pfn = page_to_pfn(page);
2502 	start_pfn = pfn & ~(pageblock_nr_pages - 1);
2503 	end_pfn = start_pfn + pageblock_nr_pages - 1;
2504 
2505 	/* Do not cross zone boundaries */
2506 	if (!zone_spans_pfn(zone, start_pfn))
2507 		start_pfn = pfn;
2508 	if (!zone_spans_pfn(zone, end_pfn))
2509 		return 0;
2510 
2511 	return move_freepages(zone, start_pfn, end_pfn, migratetype,
2512 								num_movable);
2513 }
2514 
2515 static void change_pageblock_range(struct page *pageblock_page,
2516 					int start_order, int migratetype)
2517 {
2518 	int nr_pageblocks = 1 << (start_order - pageblock_order);
2519 
2520 	while (nr_pageblocks--) {
2521 		set_pageblock_migratetype(pageblock_page, migratetype);
2522 		pageblock_page += pageblock_nr_pages;
2523 	}
2524 }
2525 
2526 /*
2527  * When we are falling back to another migratetype during allocation, try to
2528  * steal extra free pages from the same pageblocks to satisfy further
2529  * allocations, instead of polluting multiple pageblocks.
2530  *
2531  * If we are stealing a relatively large buddy page, it is likely there will
2532  * be more free pages in the pageblock, so try to steal them all. For
2533  * reclaimable and unmovable allocations, we steal regardless of page size,
2534  * as fragmentation caused by those allocations polluting movable pageblocks
2535  * is worse than movable allocations stealing from unmovable and reclaimable
2536  * pageblocks.
2537  */
2538 static bool can_steal_fallback(unsigned int order, int start_mt)
2539 {
2540 	/*
2541 	 * Leaving this order check is intended, although there is
2542 	 * relaxed order check in next check. The reason is that
2543 	 * we can actually steal whole pageblock if this condition met,
2544 	 * but, below check doesn't guarantee it and that is just heuristic
2545 	 * so could be changed anytime.
2546 	 */
2547 	if (order >= pageblock_order)
2548 		return true;
2549 
2550 	if (order >= pageblock_order / 2 ||
2551 		start_mt == MIGRATE_RECLAIMABLE ||
2552 		start_mt == MIGRATE_UNMOVABLE ||
2553 		page_group_by_mobility_disabled)
2554 		return true;
2555 
2556 	return false;
2557 }
2558 
2559 static inline bool boost_watermark(struct zone *zone)
2560 {
2561 	unsigned long max_boost;
2562 
2563 	if (!watermark_boost_factor)
2564 		return false;
2565 	/*
2566 	 * Don't bother in zones that are unlikely to produce results.
2567 	 * On small machines, including kdump capture kernels running
2568 	 * in a small area, boosting the watermark can cause an out of
2569 	 * memory situation immediately.
2570 	 */
2571 	if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2572 		return false;
2573 
2574 	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2575 			watermark_boost_factor, 10000);
2576 
2577 	/*
2578 	 * high watermark may be uninitialised if fragmentation occurs
2579 	 * very early in boot so do not boost. We do not fall
2580 	 * through and boost by pageblock_nr_pages as failing
2581 	 * allocations that early means that reclaim is not going
2582 	 * to help and it may even be impossible to reclaim the
2583 	 * boosted watermark resulting in a hang.
2584 	 */
2585 	if (!max_boost)
2586 		return false;
2587 
2588 	max_boost = max(pageblock_nr_pages, max_boost);
2589 
2590 	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2591 		max_boost);
2592 
2593 	return true;
2594 }
2595 
2596 /*
2597  * This function implements actual steal behaviour. If order is large enough,
2598  * we can steal whole pageblock. If not, we first move freepages in this
2599  * pageblock to our migratetype and determine how many already-allocated pages
2600  * are there in the pageblock with a compatible migratetype. If at least half
2601  * of pages are free or compatible, we can change migratetype of the pageblock
2602  * itself, so pages freed in the future will be put on the correct free list.
2603  */
2604 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2605 		unsigned int alloc_flags, int start_type, bool whole_block)
2606 {
2607 	unsigned int current_order = buddy_order(page);
2608 	int free_pages, movable_pages, alike_pages;
2609 	int old_block_type;
2610 
2611 	old_block_type = get_pageblock_migratetype(page);
2612 
2613 	/*
2614 	 * This can happen due to races and we want to prevent broken
2615 	 * highatomic accounting.
2616 	 */
2617 	if (is_migrate_highatomic(old_block_type))
2618 		goto single_page;
2619 
2620 	/* Take ownership for orders >= pageblock_order */
2621 	if (current_order >= pageblock_order) {
2622 		change_pageblock_range(page, current_order, start_type);
2623 		goto single_page;
2624 	}
2625 
2626 	/*
2627 	 * Boost watermarks to increase reclaim pressure to reduce the
2628 	 * likelihood of future fallbacks. Wake kswapd now as the node
2629 	 * may be balanced overall and kswapd will not wake naturally.
2630 	 */
2631 	if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2632 		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2633 
2634 	/* We are not allowed to try stealing from the whole block */
2635 	if (!whole_block)
2636 		goto single_page;
2637 
2638 	free_pages = move_freepages_block(zone, page, start_type,
2639 						&movable_pages);
2640 	/*
2641 	 * Determine how many pages are compatible with our allocation.
2642 	 * For movable allocation, it's the number of movable pages which
2643 	 * we just obtained. For other types it's a bit more tricky.
2644 	 */
2645 	if (start_type == MIGRATE_MOVABLE) {
2646 		alike_pages = movable_pages;
2647 	} else {
2648 		/*
2649 		 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2650 		 * to MOVABLE pageblock, consider all non-movable pages as
2651 		 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2652 		 * vice versa, be conservative since we can't distinguish the
2653 		 * exact migratetype of non-movable pages.
2654 		 */
2655 		if (old_block_type == MIGRATE_MOVABLE)
2656 			alike_pages = pageblock_nr_pages
2657 						- (free_pages + movable_pages);
2658 		else
2659 			alike_pages = 0;
2660 	}
2661 
2662 	/* moving whole block can fail due to zone boundary conditions */
2663 	if (!free_pages)
2664 		goto single_page;
2665 
2666 	/*
2667 	 * If a sufficient number of pages in the block are either free or of
2668 	 * comparable migratability as our allocation, claim the whole block.
2669 	 */
2670 	if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2671 			page_group_by_mobility_disabled)
2672 		set_pageblock_migratetype(page, start_type);
2673 
2674 	return;
2675 
2676 single_page:
2677 	move_to_free_list(page, zone, current_order, start_type);
2678 }
2679 
2680 /*
2681  * Check whether there is a suitable fallback freepage with requested order.
2682  * If only_stealable is true, this function returns fallback_mt only if
2683  * we can steal other freepages all together. This would help to reduce
2684  * fragmentation due to mixed migratetype pages in one pageblock.
2685  */
2686 int find_suitable_fallback(struct free_area *area, unsigned int order,
2687 			int migratetype, bool only_stealable, bool *can_steal)
2688 {
2689 	int i;
2690 	int fallback_mt;
2691 
2692 	if (area->nr_free == 0)
2693 		return -1;
2694 
2695 	*can_steal = false;
2696 	for (i = 0;; i++) {
2697 		fallback_mt = fallbacks[migratetype][i];
2698 		if (fallback_mt == MIGRATE_TYPES)
2699 			break;
2700 
2701 		if (free_area_empty(area, fallback_mt))
2702 			continue;
2703 
2704 		if (can_steal_fallback(order, migratetype))
2705 			*can_steal = true;
2706 
2707 		if (!only_stealable)
2708 			return fallback_mt;
2709 
2710 		if (*can_steal)
2711 			return fallback_mt;
2712 	}
2713 
2714 	return -1;
2715 }
2716 
2717 /*
2718  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2719  * there are no empty page blocks that contain a page with a suitable order
2720  */
2721 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2722 				unsigned int alloc_order)
2723 {
2724 	int mt;
2725 	unsigned long max_managed, flags;
2726 
2727 	/*
2728 	 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2729 	 * Check is race-prone but harmless.
2730 	 */
2731 	max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2732 	if (zone->nr_reserved_highatomic >= max_managed)
2733 		return;
2734 
2735 	spin_lock_irqsave(&zone->lock, flags);
2736 
2737 	/* Recheck the nr_reserved_highatomic limit under the lock */
2738 	if (zone->nr_reserved_highatomic >= max_managed)
2739 		goto out_unlock;
2740 
2741 	/* Yoink! */
2742 	mt = get_pageblock_migratetype(page);
2743 	/* Only reserve normal pageblocks (i.e., they can merge with others) */
2744 	if (migratetype_is_mergeable(mt)) {
2745 		zone->nr_reserved_highatomic += pageblock_nr_pages;
2746 		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2747 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2748 	}
2749 
2750 out_unlock:
2751 	spin_unlock_irqrestore(&zone->lock, flags);
2752 }
2753 
2754 /*
2755  * Used when an allocation is about to fail under memory pressure. This
2756  * potentially hurts the reliability of high-order allocations when under
2757  * intense memory pressure but failed atomic allocations should be easier
2758  * to recover from than an OOM.
2759  *
2760  * If @force is true, try to unreserve a pageblock even though highatomic
2761  * pageblock is exhausted.
2762  */
2763 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2764 						bool force)
2765 {
2766 	struct zonelist *zonelist = ac->zonelist;
2767 	unsigned long flags;
2768 	struct zoneref *z;
2769 	struct zone *zone;
2770 	struct page *page;
2771 	int order;
2772 	bool ret;
2773 
2774 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2775 								ac->nodemask) {
2776 		/*
2777 		 * Preserve at least one pageblock unless memory pressure
2778 		 * is really high.
2779 		 */
2780 		if (!force && zone->nr_reserved_highatomic <=
2781 					pageblock_nr_pages)
2782 			continue;
2783 
2784 		spin_lock_irqsave(&zone->lock, flags);
2785 		for (order = 0; order < MAX_ORDER; order++) {
2786 			struct free_area *area = &(zone->free_area[order]);
2787 
2788 			page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2789 			if (!page)
2790 				continue;
2791 
2792 			/*
2793 			 * In page freeing path, migratetype change is racy so
2794 			 * we can counter several free pages in a pageblock
2795 			 * in this loop although we changed the pageblock type
2796 			 * from highatomic to ac->migratetype. So we should
2797 			 * adjust the count once.
2798 			 */
2799 			if (is_migrate_highatomic_page(page)) {
2800 				/*
2801 				 * It should never happen but changes to
2802 				 * locking could inadvertently allow a per-cpu
2803 				 * drain to add pages to MIGRATE_HIGHATOMIC
2804 				 * while unreserving so be safe and watch for
2805 				 * underflows.
2806 				 */
2807 				zone->nr_reserved_highatomic -= min(
2808 						pageblock_nr_pages,
2809 						zone->nr_reserved_highatomic);
2810 			}
2811 
2812 			/*
2813 			 * Convert to ac->migratetype and avoid the normal
2814 			 * pageblock stealing heuristics. Minimally, the caller
2815 			 * is doing the work and needs the pages. More
2816 			 * importantly, if the block was always converted to
2817 			 * MIGRATE_UNMOVABLE or another type then the number
2818 			 * of pageblocks that cannot be completely freed
2819 			 * may increase.
2820 			 */
2821 			set_pageblock_migratetype(page, ac->migratetype);
2822 			ret = move_freepages_block(zone, page, ac->migratetype,
2823 									NULL);
2824 			if (ret) {
2825 				spin_unlock_irqrestore(&zone->lock, flags);
2826 				return ret;
2827 			}
2828 		}
2829 		spin_unlock_irqrestore(&zone->lock, flags);
2830 	}
2831 
2832 	return false;
2833 }
2834 
2835 /*
2836  * Try finding a free buddy page on the fallback list and put it on the free
2837  * list of requested migratetype, possibly along with other pages from the same
2838  * block, depending on fragmentation avoidance heuristics. Returns true if
2839  * fallback was found so that __rmqueue_smallest() can grab it.
2840  *
2841  * The use of signed ints for order and current_order is a deliberate
2842  * deviation from the rest of this file, to make the for loop
2843  * condition simpler.
2844  */
2845 static __always_inline bool
2846 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2847 						unsigned int alloc_flags)
2848 {
2849 	struct free_area *area;
2850 	int current_order;
2851 	int min_order = order;
2852 	struct page *page;
2853 	int fallback_mt;
2854 	bool can_steal;
2855 
2856 	/*
2857 	 * Do not steal pages from freelists belonging to other pageblocks
2858 	 * i.e. orders < pageblock_order. If there are no local zones free,
2859 	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2860 	 */
2861 	if (alloc_flags & ALLOC_NOFRAGMENT)
2862 		min_order = pageblock_order;
2863 
2864 	/*
2865 	 * Find the largest available free page in the other list. This roughly
2866 	 * approximates finding the pageblock with the most free pages, which
2867 	 * would be too costly to do exactly.
2868 	 */
2869 	for (current_order = MAX_ORDER - 1; current_order >= min_order;
2870 				--current_order) {
2871 		area = &(zone->free_area[current_order]);
2872 		fallback_mt = find_suitable_fallback(area, current_order,
2873 				start_migratetype, false, &can_steal);
2874 		if (fallback_mt == -1)
2875 			continue;
2876 
2877 		/*
2878 		 * We cannot steal all free pages from the pageblock and the
2879 		 * requested migratetype is movable. In that case it's better to
2880 		 * steal and split the smallest available page instead of the
2881 		 * largest available page, because even if the next movable
2882 		 * allocation falls back into a different pageblock than this
2883 		 * one, it won't cause permanent fragmentation.
2884 		 */
2885 		if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2886 					&& current_order > order)
2887 			goto find_smallest;
2888 
2889 		goto do_steal;
2890 	}
2891 
2892 	return false;
2893 
2894 find_smallest:
2895 	for (current_order = order; current_order < MAX_ORDER;
2896 							current_order++) {
2897 		area = &(zone->free_area[current_order]);
2898 		fallback_mt = find_suitable_fallback(area, current_order,
2899 				start_migratetype, false, &can_steal);
2900 		if (fallback_mt != -1)
2901 			break;
2902 	}
2903 
2904 	/*
2905 	 * This should not happen - we already found a suitable fallback
2906 	 * when looking for the largest page.
2907 	 */
2908 	VM_BUG_ON(current_order == MAX_ORDER);
2909 
2910 do_steal:
2911 	page = get_page_from_free_area(area, fallback_mt);
2912 
2913 	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2914 								can_steal);
2915 
2916 	trace_mm_page_alloc_extfrag(page, order, current_order,
2917 		start_migratetype, fallback_mt);
2918 
2919 	return true;
2920 
2921 }
2922 
2923 /*
2924  * Do the hard work of removing an element from the buddy allocator.
2925  * Call me with the zone->lock already held.
2926  */
2927 static __always_inline struct page *
2928 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2929 						unsigned int alloc_flags)
2930 {
2931 	struct page *page;
2932 
2933 	if (IS_ENABLED(CONFIG_CMA)) {
2934 		/*
2935 		 * Balance movable allocations between regular and CMA areas by
2936 		 * allocating from CMA when over half of the zone's free memory
2937 		 * is in the CMA area.
2938 		 */
2939 		if (alloc_flags & ALLOC_CMA &&
2940 		    zone_page_state(zone, NR_FREE_CMA_PAGES) >
2941 		    zone_page_state(zone, NR_FREE_PAGES) / 2) {
2942 			page = __rmqueue_cma_fallback(zone, order);
2943 			if (page)
2944 				goto out;
2945 		}
2946 	}
2947 retry:
2948 	page = __rmqueue_smallest(zone, order, migratetype);
2949 	if (unlikely(!page)) {
2950 		if (alloc_flags & ALLOC_CMA)
2951 			page = __rmqueue_cma_fallback(zone, order);
2952 
2953 		if (!page && __rmqueue_fallback(zone, order, migratetype,
2954 								alloc_flags))
2955 			goto retry;
2956 	}
2957 out:
2958 	if (page)
2959 		trace_mm_page_alloc_zone_locked(page, order, migratetype);
2960 	return page;
2961 }
2962 
2963 /*
2964  * Obtain a specified number of elements from the buddy allocator, all under
2965  * a single hold of the lock, for efficiency.  Add them to the supplied list.
2966  * Returns the number of new pages which were placed at *list.
2967  */
2968 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2969 			unsigned long count, struct list_head *list,
2970 			int migratetype, unsigned int alloc_flags)
2971 {
2972 	int i, allocated = 0;
2973 
2974 	/*
2975 	 * local_lock_irq held so equivalent to spin_lock_irqsave for
2976 	 * both PREEMPT_RT and non-PREEMPT_RT configurations.
2977 	 */
2978 	spin_lock(&zone->lock);
2979 	for (i = 0; i < count; ++i) {
2980 		struct page *page = __rmqueue(zone, order, migratetype,
2981 								alloc_flags);
2982 		if (unlikely(page == NULL))
2983 			break;
2984 
2985 		if (unlikely(check_pcp_refill(page, order)))
2986 			continue;
2987 
2988 		/*
2989 		 * Split buddy pages returned by expand() are received here in
2990 		 * physical page order. The page is added to the tail of
2991 		 * caller's list. From the callers perspective, the linked list
2992 		 * is ordered by page number under some conditions. This is
2993 		 * useful for IO devices that can forward direction from the
2994 		 * head, thus also in the physical page order. This is useful
2995 		 * for IO devices that can merge IO requests if the physical
2996 		 * pages are ordered properly.
2997 		 */
2998 		list_add_tail(&page->lru, list);
2999 		allocated++;
3000 		if (is_migrate_cma(get_pcppage_migratetype(page)))
3001 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3002 					      -(1 << order));
3003 	}
3004 
3005 	/*
3006 	 * i pages were removed from the buddy list even if some leak due
3007 	 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
3008 	 * on i. Do not confuse with 'allocated' which is the number of
3009 	 * pages added to the pcp list.
3010 	 */
3011 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3012 	spin_unlock(&zone->lock);
3013 	return allocated;
3014 }
3015 
3016 #ifdef CONFIG_NUMA
3017 /*
3018  * Called from the vmstat counter updater to drain pagesets of this
3019  * currently executing processor on remote nodes after they have
3020  * expired.
3021  *
3022  * Note that this function must be called with the thread pinned to
3023  * a single processor.
3024  */
3025 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3026 {
3027 	unsigned long flags;
3028 	int to_drain, batch;
3029 
3030 	local_lock_irqsave(&pagesets.lock, flags);
3031 	batch = READ_ONCE(pcp->batch);
3032 	to_drain = min(pcp->count, batch);
3033 	if (to_drain > 0)
3034 		free_pcppages_bulk(zone, to_drain, pcp, 0);
3035 	local_unlock_irqrestore(&pagesets.lock, flags);
3036 }
3037 #endif
3038 
3039 /*
3040  * Drain pcplists of the indicated processor and zone.
3041  *
3042  * The processor must either be the current processor and the
3043  * thread pinned to the current processor or a processor that
3044  * is not online.
3045  */
3046 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3047 {
3048 	unsigned long flags;
3049 	struct per_cpu_pages *pcp;
3050 
3051 	local_lock_irqsave(&pagesets.lock, flags);
3052 
3053 	pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3054 	if (pcp->count)
3055 		free_pcppages_bulk(zone, pcp->count, pcp, 0);
3056 
3057 	local_unlock_irqrestore(&pagesets.lock, flags);
3058 }
3059 
3060 /*
3061  * Drain pcplists of all zones on the indicated processor.
3062  *
3063  * The processor must either be the current processor and the
3064  * thread pinned to the current processor or a processor that
3065  * is not online.
3066  */
3067 static void drain_pages(unsigned int cpu)
3068 {
3069 	struct zone *zone;
3070 
3071 	for_each_populated_zone(zone) {
3072 		drain_pages_zone(cpu, zone);
3073 	}
3074 }
3075 
3076 /*
3077  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3078  *
3079  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3080  * the single zone's pages.
3081  */
3082 void drain_local_pages(struct zone *zone)
3083 {
3084 	int cpu = smp_processor_id();
3085 
3086 	if (zone)
3087 		drain_pages_zone(cpu, zone);
3088 	else
3089 		drain_pages(cpu);
3090 }
3091 
3092 static void drain_local_pages_wq(struct work_struct *work)
3093 {
3094 	struct pcpu_drain *drain;
3095 
3096 	drain = container_of(work, struct pcpu_drain, work);
3097 
3098 	/*
3099 	 * drain_all_pages doesn't use proper cpu hotplug protection so
3100 	 * we can race with cpu offline when the WQ can move this from
3101 	 * a cpu pinned worker to an unbound one. We can operate on a different
3102 	 * cpu which is alright but we also have to make sure to not move to
3103 	 * a different one.
3104 	 */
3105 	migrate_disable();
3106 	drain_local_pages(drain->zone);
3107 	migrate_enable();
3108 }
3109 
3110 /*
3111  * The implementation of drain_all_pages(), exposing an extra parameter to
3112  * drain on all cpus.
3113  *
3114  * drain_all_pages() is optimized to only execute on cpus where pcplists are
3115  * not empty. The check for non-emptiness can however race with a free to
3116  * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3117  * that need the guarantee that every CPU has drained can disable the
3118  * optimizing racy check.
3119  */
3120 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3121 {
3122 	int cpu;
3123 
3124 	/*
3125 	 * Allocate in the BSS so we won't require allocation in
3126 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3127 	 */
3128 	static cpumask_t cpus_with_pcps;
3129 
3130 	/*
3131 	 * Make sure nobody triggers this path before mm_percpu_wq is fully
3132 	 * initialized.
3133 	 */
3134 	if (WARN_ON_ONCE(!mm_percpu_wq))
3135 		return;
3136 
3137 	/*
3138 	 * Do not drain if one is already in progress unless it's specific to
3139 	 * a zone. Such callers are primarily CMA and memory hotplug and need
3140 	 * the drain to be complete when the call returns.
3141 	 */
3142 	if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3143 		if (!zone)
3144 			return;
3145 		mutex_lock(&pcpu_drain_mutex);
3146 	}
3147 
3148 	/*
3149 	 * We don't care about racing with CPU hotplug event
3150 	 * as offline notification will cause the notified
3151 	 * cpu to drain that CPU pcps and on_each_cpu_mask
3152 	 * disables preemption as part of its processing
3153 	 */
3154 	for_each_online_cpu(cpu) {
3155 		struct per_cpu_pages *pcp;
3156 		struct zone *z;
3157 		bool has_pcps = false;
3158 
3159 		if (force_all_cpus) {
3160 			/*
3161 			 * The pcp.count check is racy, some callers need a
3162 			 * guarantee that no cpu is missed.
3163 			 */
3164 			has_pcps = true;
3165 		} else if (zone) {
3166 			pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3167 			if (pcp->count)
3168 				has_pcps = true;
3169 		} else {
3170 			for_each_populated_zone(z) {
3171 				pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3172 				if (pcp->count) {
3173 					has_pcps = true;
3174 					break;
3175 				}
3176 			}
3177 		}
3178 
3179 		if (has_pcps)
3180 			cpumask_set_cpu(cpu, &cpus_with_pcps);
3181 		else
3182 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
3183 	}
3184 
3185 	for_each_cpu(cpu, &cpus_with_pcps) {
3186 		struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3187 
3188 		drain->zone = zone;
3189 		INIT_WORK(&drain->work, drain_local_pages_wq);
3190 		queue_work_on(cpu, mm_percpu_wq, &drain->work);
3191 	}
3192 	for_each_cpu(cpu, &cpus_with_pcps)
3193 		flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
3194 
3195 	mutex_unlock(&pcpu_drain_mutex);
3196 }
3197 
3198 /*
3199  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3200  *
3201  * When zone parameter is non-NULL, spill just the single zone's pages.
3202  *
3203  * Note that this can be extremely slow as the draining happens in a workqueue.
3204  */
3205 void drain_all_pages(struct zone *zone)
3206 {
3207 	__drain_all_pages(zone, false);
3208 }
3209 
3210 #ifdef CONFIG_HIBERNATION
3211 
3212 /*
3213  * Touch the watchdog for every WD_PAGE_COUNT pages.
3214  */
3215 #define WD_PAGE_COUNT	(128*1024)
3216 
3217 void mark_free_pages(struct zone *zone)
3218 {
3219 	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3220 	unsigned long flags;
3221 	unsigned int order, t;
3222 	struct page *page;
3223 
3224 	if (zone_is_empty(zone))
3225 		return;
3226 
3227 	spin_lock_irqsave(&zone->lock, flags);
3228 
3229 	max_zone_pfn = zone_end_pfn(zone);
3230 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3231 		if (pfn_valid(pfn)) {
3232 			page = pfn_to_page(pfn);
3233 
3234 			if (!--page_count) {
3235 				touch_nmi_watchdog();
3236 				page_count = WD_PAGE_COUNT;
3237 			}
3238 
3239 			if (page_zone(page) != zone)
3240 				continue;
3241 
3242 			if (!swsusp_page_is_forbidden(page))
3243 				swsusp_unset_page_free(page);
3244 		}
3245 
3246 	for_each_migratetype_order(order, t) {
3247 		list_for_each_entry(page,
3248 				&zone->free_area[order].free_list[t], lru) {
3249 			unsigned long i;
3250 
3251 			pfn = page_to_pfn(page);
3252 			for (i = 0; i < (1UL << order); i++) {
3253 				if (!--page_count) {
3254 					touch_nmi_watchdog();
3255 					page_count = WD_PAGE_COUNT;
3256 				}
3257 				swsusp_set_page_free(pfn_to_page(pfn + i));
3258 			}
3259 		}
3260 	}
3261 	spin_unlock_irqrestore(&zone->lock, flags);
3262 }
3263 #endif /* CONFIG_PM */
3264 
3265 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
3266 							unsigned int order)
3267 {
3268 	int migratetype;
3269 
3270 	if (!free_pcp_prepare(page, order))
3271 		return false;
3272 
3273 	migratetype = get_pfnblock_migratetype(page, pfn);
3274 	set_pcppage_migratetype(page, migratetype);
3275 	return true;
3276 }
3277 
3278 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch,
3279 		       bool free_high)
3280 {
3281 	int min_nr_free, max_nr_free;
3282 
3283 	/* Free everything if batch freeing high-order pages. */
3284 	if (unlikely(free_high))
3285 		return pcp->count;
3286 
3287 	/* Check for PCP disabled or boot pageset */
3288 	if (unlikely(high < batch))
3289 		return 1;
3290 
3291 	/* Leave at least pcp->batch pages on the list */
3292 	min_nr_free = batch;
3293 	max_nr_free = high - batch;
3294 
3295 	/*
3296 	 * Double the number of pages freed each time there is subsequent
3297 	 * freeing of pages without any allocation.
3298 	 */
3299 	batch <<= pcp->free_factor;
3300 	if (batch < max_nr_free)
3301 		pcp->free_factor++;
3302 	batch = clamp(batch, min_nr_free, max_nr_free);
3303 
3304 	return batch;
3305 }
3306 
3307 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
3308 		       bool free_high)
3309 {
3310 	int high = READ_ONCE(pcp->high);
3311 
3312 	if (unlikely(!high || free_high))
3313 		return 0;
3314 
3315 	if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
3316 		return high;
3317 
3318 	/*
3319 	 * If reclaim is active, limit the number of pages that can be
3320 	 * stored on pcp lists
3321 	 */
3322 	return min(READ_ONCE(pcp->batch) << 2, high);
3323 }
3324 
3325 static void free_unref_page_commit(struct page *page, int migratetype,
3326 				   unsigned int order)
3327 {
3328 	struct zone *zone = page_zone(page);
3329 	struct per_cpu_pages *pcp;
3330 	int high;
3331 	int pindex;
3332 	bool free_high;
3333 
3334 	__count_vm_event(PGFREE);
3335 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
3336 	pindex = order_to_pindex(migratetype, order);
3337 	list_add(&page->lru, &pcp->lists[pindex]);
3338 	pcp->count += 1 << order;
3339 
3340 	/*
3341 	 * As high-order pages other than THP's stored on PCP can contribute
3342 	 * to fragmentation, limit the number stored when PCP is heavily
3343 	 * freeing without allocation. The remainder after bulk freeing
3344 	 * stops will be drained from vmstat refresh context.
3345 	 */
3346 	free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER);
3347 
3348 	high = nr_pcp_high(pcp, zone, free_high);
3349 	if (pcp->count >= high) {
3350 		int batch = READ_ONCE(pcp->batch);
3351 
3352 		free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex);
3353 	}
3354 }
3355 
3356 /*
3357  * Free a pcp page
3358  */
3359 void free_unref_page(struct page *page, unsigned int order)
3360 {
3361 	unsigned long flags;
3362 	unsigned long pfn = page_to_pfn(page);
3363 	int migratetype;
3364 
3365 	if (!free_unref_page_prepare(page, pfn, order))
3366 		return;
3367 
3368 	/*
3369 	 * We only track unmovable, reclaimable and movable on pcp lists.
3370 	 * Place ISOLATE pages on the isolated list because they are being
3371 	 * offlined but treat HIGHATOMIC as movable pages so we can get those
3372 	 * areas back if necessary. Otherwise, we may have to free
3373 	 * excessively into the page allocator
3374 	 */
3375 	migratetype = get_pcppage_migratetype(page);
3376 	if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
3377 		if (unlikely(is_migrate_isolate(migratetype))) {
3378 			free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
3379 			return;
3380 		}
3381 		migratetype = MIGRATE_MOVABLE;
3382 	}
3383 
3384 	local_lock_irqsave(&pagesets.lock, flags);
3385 	free_unref_page_commit(page, migratetype, order);
3386 	local_unlock_irqrestore(&pagesets.lock, flags);
3387 }
3388 
3389 /*
3390  * Free a list of 0-order pages
3391  */
3392 void free_unref_page_list(struct list_head *list)
3393 {
3394 	struct page *page, *next;
3395 	unsigned long flags;
3396 	int batch_count = 0;
3397 	int migratetype;
3398 
3399 	/* Prepare pages for freeing */
3400 	list_for_each_entry_safe(page, next, list, lru) {
3401 		unsigned long pfn = page_to_pfn(page);
3402 		if (!free_unref_page_prepare(page, pfn, 0)) {
3403 			list_del(&page->lru);
3404 			continue;
3405 		}
3406 
3407 		/*
3408 		 * Free isolated pages directly to the allocator, see
3409 		 * comment in free_unref_page.
3410 		 */
3411 		migratetype = get_pcppage_migratetype(page);
3412 		if (unlikely(is_migrate_isolate(migratetype))) {
3413 			list_del(&page->lru);
3414 			free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
3415 			continue;
3416 		}
3417 	}
3418 
3419 	local_lock_irqsave(&pagesets.lock, flags);
3420 	list_for_each_entry_safe(page, next, list, lru) {
3421 		/*
3422 		 * Non-isolated types over MIGRATE_PCPTYPES get added
3423 		 * to the MIGRATE_MOVABLE pcp list.
3424 		 */
3425 		migratetype = get_pcppage_migratetype(page);
3426 		if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3427 			migratetype = MIGRATE_MOVABLE;
3428 
3429 		trace_mm_page_free_batched(page);
3430 		free_unref_page_commit(page, migratetype, 0);
3431 
3432 		/*
3433 		 * Guard against excessive IRQ disabled times when we get
3434 		 * a large list of pages to free.
3435 		 */
3436 		if (++batch_count == SWAP_CLUSTER_MAX) {
3437 			local_unlock_irqrestore(&pagesets.lock, flags);
3438 			batch_count = 0;
3439 			local_lock_irqsave(&pagesets.lock, flags);
3440 		}
3441 	}
3442 	local_unlock_irqrestore(&pagesets.lock, flags);
3443 }
3444 
3445 /*
3446  * split_page takes a non-compound higher-order page, and splits it into
3447  * n (1<<order) sub-pages: page[0..n]
3448  * Each sub-page must be freed individually.
3449  *
3450  * Note: this is probably too low level an operation for use in drivers.
3451  * Please consult with lkml before using this in your driver.
3452  */
3453 void split_page(struct page *page, unsigned int order)
3454 {
3455 	int i;
3456 
3457 	VM_BUG_ON_PAGE(PageCompound(page), page);
3458 	VM_BUG_ON_PAGE(!page_count(page), page);
3459 
3460 	for (i = 1; i < (1 << order); i++)
3461 		set_page_refcounted(page + i);
3462 	split_page_owner(page, 1 << order);
3463 	split_page_memcg(page, 1 << order);
3464 }
3465 EXPORT_SYMBOL_GPL(split_page);
3466 
3467 int __isolate_free_page(struct page *page, unsigned int order)
3468 {
3469 	unsigned long watermark;
3470 	struct zone *zone;
3471 	int mt;
3472 
3473 	BUG_ON(!PageBuddy(page));
3474 
3475 	zone = page_zone(page);
3476 	mt = get_pageblock_migratetype(page);
3477 
3478 	if (!is_migrate_isolate(mt)) {
3479 		/*
3480 		 * Obey watermarks as if the page was being allocated. We can
3481 		 * emulate a high-order watermark check with a raised order-0
3482 		 * watermark, because we already know our high-order page
3483 		 * exists.
3484 		 */
3485 		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3486 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3487 			return 0;
3488 
3489 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
3490 	}
3491 
3492 	/* Remove page from free list */
3493 
3494 	del_page_from_free_list(page, zone, order);
3495 
3496 	/*
3497 	 * Set the pageblock if the isolated page is at least half of a
3498 	 * pageblock
3499 	 */
3500 	if (order >= pageblock_order - 1) {
3501 		struct page *endpage = page + (1 << order) - 1;
3502 		for (; page < endpage; page += pageblock_nr_pages) {
3503 			int mt = get_pageblock_migratetype(page);
3504 			/*
3505 			 * Only change normal pageblocks (i.e., they can merge
3506 			 * with others)
3507 			 */
3508 			if (migratetype_is_mergeable(mt))
3509 				set_pageblock_migratetype(page,
3510 							  MIGRATE_MOVABLE);
3511 		}
3512 	}
3513 
3514 
3515 	return 1UL << order;
3516 }
3517 
3518 /**
3519  * __putback_isolated_page - Return a now-isolated page back where we got it
3520  * @page: Page that was isolated
3521  * @order: Order of the isolated page
3522  * @mt: The page's pageblock's migratetype
3523  *
3524  * This function is meant to return a page pulled from the free lists via
3525  * __isolate_free_page back to the free lists they were pulled from.
3526  */
3527 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3528 {
3529 	struct zone *zone = page_zone(page);
3530 
3531 	/* zone lock should be held when this function is called */
3532 	lockdep_assert_held(&zone->lock);
3533 
3534 	/* Return isolated page to tail of freelist. */
3535 	__free_one_page(page, page_to_pfn(page), zone, order, mt,
3536 			FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3537 }
3538 
3539 /*
3540  * Update NUMA hit/miss statistics
3541  *
3542  * Must be called with interrupts disabled.
3543  */
3544 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3545 				   long nr_account)
3546 {
3547 #ifdef CONFIG_NUMA
3548 	enum numa_stat_item local_stat = NUMA_LOCAL;
3549 
3550 	/* skip numa counters update if numa stats is disabled */
3551 	if (!static_branch_likely(&vm_numa_stat_key))
3552 		return;
3553 
3554 	if (zone_to_nid(z) != numa_node_id())
3555 		local_stat = NUMA_OTHER;
3556 
3557 	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3558 		__count_numa_events(z, NUMA_HIT, nr_account);
3559 	else {
3560 		__count_numa_events(z, NUMA_MISS, nr_account);
3561 		__count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3562 	}
3563 	__count_numa_events(z, local_stat, nr_account);
3564 #endif
3565 }
3566 
3567 /* Remove page from the per-cpu list, caller must protect the list */
3568 static inline
3569 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3570 			int migratetype,
3571 			unsigned int alloc_flags,
3572 			struct per_cpu_pages *pcp,
3573 			struct list_head *list)
3574 {
3575 	struct page *page;
3576 
3577 	do {
3578 		if (list_empty(list)) {
3579 			int batch = READ_ONCE(pcp->batch);
3580 			int alloced;
3581 
3582 			/*
3583 			 * Scale batch relative to order if batch implies
3584 			 * free pages can be stored on the PCP. Batch can
3585 			 * be 1 for small zones or for boot pagesets which
3586 			 * should never store free pages as the pages may
3587 			 * belong to arbitrary zones.
3588 			 */
3589 			if (batch > 1)
3590 				batch = max(batch >> order, 2);
3591 			alloced = rmqueue_bulk(zone, order,
3592 					batch, list,
3593 					migratetype, alloc_flags);
3594 
3595 			pcp->count += alloced << order;
3596 			if (unlikely(list_empty(list)))
3597 				return NULL;
3598 		}
3599 
3600 		page = list_first_entry(list, struct page, lru);
3601 		list_del(&page->lru);
3602 		pcp->count -= 1 << order;
3603 	} while (check_new_pcp(page, order));
3604 
3605 	return page;
3606 }
3607 
3608 /* Lock and remove page from the per-cpu list */
3609 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3610 			struct zone *zone, unsigned int order,
3611 			gfp_t gfp_flags, int migratetype,
3612 			unsigned int alloc_flags)
3613 {
3614 	struct per_cpu_pages *pcp;
3615 	struct list_head *list;
3616 	struct page *page;
3617 	unsigned long flags;
3618 
3619 	local_lock_irqsave(&pagesets.lock, flags);
3620 
3621 	/*
3622 	 * On allocation, reduce the number of pages that are batch freed.
3623 	 * See nr_pcp_free() where free_factor is increased for subsequent
3624 	 * frees.
3625 	 */
3626 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
3627 	pcp->free_factor >>= 1;
3628 	list = &pcp->lists[order_to_pindex(migratetype, order)];
3629 	page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3630 	local_unlock_irqrestore(&pagesets.lock, flags);
3631 	if (page) {
3632 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3633 		zone_statistics(preferred_zone, zone, 1);
3634 	}
3635 	return page;
3636 }
3637 
3638 /*
3639  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3640  */
3641 static inline
3642 struct page *rmqueue(struct zone *preferred_zone,
3643 			struct zone *zone, unsigned int order,
3644 			gfp_t gfp_flags, unsigned int alloc_flags,
3645 			int migratetype)
3646 {
3647 	unsigned long flags;
3648 	struct page *page;
3649 
3650 	if (likely(pcp_allowed_order(order))) {
3651 		/*
3652 		 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3653 		 * we need to skip it when CMA area isn't allowed.
3654 		 */
3655 		if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3656 				migratetype != MIGRATE_MOVABLE) {
3657 			page = rmqueue_pcplist(preferred_zone, zone, order,
3658 					gfp_flags, migratetype, alloc_flags);
3659 			goto out;
3660 		}
3661 	}
3662 
3663 	/*
3664 	 * We most definitely don't want callers attempting to
3665 	 * allocate greater than order-1 page units with __GFP_NOFAIL.
3666 	 */
3667 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3668 
3669 	do {
3670 		page = NULL;
3671 		spin_lock_irqsave(&zone->lock, flags);
3672 		/*
3673 		 * order-0 request can reach here when the pcplist is skipped
3674 		 * due to non-CMA allocation context. HIGHATOMIC area is
3675 		 * reserved for high-order atomic allocation, so order-0
3676 		 * request should skip it.
3677 		 */
3678 		if (order > 0 && alloc_flags & ALLOC_HARDER) {
3679 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3680 			if (page)
3681 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
3682 		}
3683 		if (!page) {
3684 			page = __rmqueue(zone, order, migratetype, alloc_flags);
3685 			if (!page)
3686 				goto failed;
3687 		}
3688 		__mod_zone_freepage_state(zone, -(1 << order),
3689 					  get_pcppage_migratetype(page));
3690 		spin_unlock_irqrestore(&zone->lock, flags);
3691 	} while (check_new_pages(page, order));
3692 
3693 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3694 	zone_statistics(preferred_zone, zone, 1);
3695 
3696 out:
3697 	/* Separate test+clear to avoid unnecessary atomics */
3698 	if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3699 		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3700 		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3701 	}
3702 
3703 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3704 	return page;
3705 
3706 failed:
3707 	spin_unlock_irqrestore(&zone->lock, flags);
3708 	return NULL;
3709 }
3710 
3711 #ifdef CONFIG_FAIL_PAGE_ALLOC
3712 
3713 static struct {
3714 	struct fault_attr attr;
3715 
3716 	bool ignore_gfp_highmem;
3717 	bool ignore_gfp_reclaim;
3718 	u32 min_order;
3719 } fail_page_alloc = {
3720 	.attr = FAULT_ATTR_INITIALIZER,
3721 	.ignore_gfp_reclaim = true,
3722 	.ignore_gfp_highmem = true,
3723 	.min_order = 1,
3724 };
3725 
3726 static int __init setup_fail_page_alloc(char *str)
3727 {
3728 	return setup_fault_attr(&fail_page_alloc.attr, str);
3729 }
3730 __setup("fail_page_alloc=", setup_fail_page_alloc);
3731 
3732 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3733 {
3734 	if (order < fail_page_alloc.min_order)
3735 		return false;
3736 	if (gfp_mask & __GFP_NOFAIL)
3737 		return false;
3738 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3739 		return false;
3740 	if (fail_page_alloc.ignore_gfp_reclaim &&
3741 			(gfp_mask & __GFP_DIRECT_RECLAIM))
3742 		return false;
3743 
3744 	return should_fail(&fail_page_alloc.attr, 1 << order);
3745 }
3746 
3747 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3748 
3749 static int __init fail_page_alloc_debugfs(void)
3750 {
3751 	umode_t mode = S_IFREG | 0600;
3752 	struct dentry *dir;
3753 
3754 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3755 					&fail_page_alloc.attr);
3756 
3757 	debugfs_create_bool("ignore-gfp-wait", mode, dir,
3758 			    &fail_page_alloc.ignore_gfp_reclaim);
3759 	debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3760 			    &fail_page_alloc.ignore_gfp_highmem);
3761 	debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3762 
3763 	return 0;
3764 }
3765 
3766 late_initcall(fail_page_alloc_debugfs);
3767 
3768 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3769 
3770 #else /* CONFIG_FAIL_PAGE_ALLOC */
3771 
3772 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3773 {
3774 	return false;
3775 }
3776 
3777 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3778 
3779 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3780 {
3781 	return __should_fail_alloc_page(gfp_mask, order);
3782 }
3783 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3784 
3785 static inline long __zone_watermark_unusable_free(struct zone *z,
3786 				unsigned int order, unsigned int alloc_flags)
3787 {
3788 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3789 	long unusable_free = (1 << order) - 1;
3790 
3791 	/*
3792 	 * If the caller does not have rights to ALLOC_HARDER then subtract
3793 	 * the high-atomic reserves. This will over-estimate the size of the
3794 	 * atomic reserve but it avoids a search.
3795 	 */
3796 	if (likely(!alloc_harder))
3797 		unusable_free += z->nr_reserved_highatomic;
3798 
3799 #ifdef CONFIG_CMA
3800 	/* If allocation can't use CMA areas don't use free CMA pages */
3801 	if (!(alloc_flags & ALLOC_CMA))
3802 		unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3803 #endif
3804 
3805 	return unusable_free;
3806 }
3807 
3808 /*
3809  * Return true if free base pages are above 'mark'. For high-order checks it
3810  * will return true of the order-0 watermark is reached and there is at least
3811  * one free page of a suitable size. Checking now avoids taking the zone lock
3812  * to check in the allocation paths if no pages are free.
3813  */
3814 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3815 			 int highest_zoneidx, unsigned int alloc_flags,
3816 			 long free_pages)
3817 {
3818 	long min = mark;
3819 	int o;
3820 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3821 
3822 	/* free_pages may go negative - that's OK */
3823 	free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3824 
3825 	if (alloc_flags & ALLOC_HIGH)
3826 		min -= min / 2;
3827 
3828 	if (unlikely(alloc_harder)) {
3829 		/*
3830 		 * OOM victims can try even harder than normal ALLOC_HARDER
3831 		 * users on the grounds that it's definitely going to be in
3832 		 * the exit path shortly and free memory. Any allocation it
3833 		 * makes during the free path will be small and short-lived.
3834 		 */
3835 		if (alloc_flags & ALLOC_OOM)
3836 			min -= min / 2;
3837 		else
3838 			min -= min / 4;
3839 	}
3840 
3841 	/*
3842 	 * Check watermarks for an order-0 allocation request. If these
3843 	 * are not met, then a high-order request also cannot go ahead
3844 	 * even if a suitable page happened to be free.
3845 	 */
3846 	if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3847 		return false;
3848 
3849 	/* If this is an order-0 request then the watermark is fine */
3850 	if (!order)
3851 		return true;
3852 
3853 	/* For a high-order request, check at least one suitable page is free */
3854 	for (o = order; o < MAX_ORDER; o++) {
3855 		struct free_area *area = &z->free_area[o];
3856 		int mt;
3857 
3858 		if (!area->nr_free)
3859 			continue;
3860 
3861 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3862 			if (!free_area_empty(area, mt))
3863 				return true;
3864 		}
3865 
3866 #ifdef CONFIG_CMA
3867 		if ((alloc_flags & ALLOC_CMA) &&
3868 		    !free_area_empty(area, MIGRATE_CMA)) {
3869 			return true;
3870 		}
3871 #endif
3872 		if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
3873 			return true;
3874 	}
3875 	return false;
3876 }
3877 
3878 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3879 		      int highest_zoneidx, unsigned int alloc_flags)
3880 {
3881 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3882 					zone_page_state(z, NR_FREE_PAGES));
3883 }
3884 
3885 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3886 				unsigned long mark, int highest_zoneidx,
3887 				unsigned int alloc_flags, gfp_t gfp_mask)
3888 {
3889 	long free_pages;
3890 
3891 	free_pages = zone_page_state(z, NR_FREE_PAGES);
3892 
3893 	/*
3894 	 * Fast check for order-0 only. If this fails then the reserves
3895 	 * need to be calculated.
3896 	 */
3897 	if (!order) {
3898 		long fast_free;
3899 
3900 		fast_free = free_pages;
3901 		fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
3902 		if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
3903 			return true;
3904 	}
3905 
3906 	if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3907 					free_pages))
3908 		return true;
3909 	/*
3910 	 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
3911 	 * when checking the min watermark. The min watermark is the
3912 	 * point where boosting is ignored so that kswapd is woken up
3913 	 * when below the low watermark.
3914 	 */
3915 	if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3916 		&& ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3917 		mark = z->_watermark[WMARK_MIN];
3918 		return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3919 					alloc_flags, free_pages);
3920 	}
3921 
3922 	return false;
3923 }
3924 
3925 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3926 			unsigned long mark, int highest_zoneidx)
3927 {
3928 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
3929 
3930 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3931 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3932 
3933 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3934 								free_pages);
3935 }
3936 
3937 #ifdef CONFIG_NUMA
3938 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
3939 
3940 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3941 {
3942 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3943 				node_reclaim_distance;
3944 }
3945 #else	/* CONFIG_NUMA */
3946 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3947 {
3948 	return true;
3949 }
3950 #endif	/* CONFIG_NUMA */
3951 
3952 /*
3953  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3954  * fragmentation is subtle. If the preferred zone was HIGHMEM then
3955  * premature use of a lower zone may cause lowmem pressure problems that
3956  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3957  * probably too small. It only makes sense to spread allocations to avoid
3958  * fragmentation between the Normal and DMA32 zones.
3959  */
3960 static inline unsigned int
3961 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3962 {
3963 	unsigned int alloc_flags;
3964 
3965 	/*
3966 	 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3967 	 * to save a branch.
3968 	 */
3969 	alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3970 
3971 #ifdef CONFIG_ZONE_DMA32
3972 	if (!zone)
3973 		return alloc_flags;
3974 
3975 	if (zone_idx(zone) != ZONE_NORMAL)
3976 		return alloc_flags;
3977 
3978 	/*
3979 	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3980 	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3981 	 * on UMA that if Normal is populated then so is DMA32.
3982 	 */
3983 	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3984 	if (nr_online_nodes > 1 && !populated_zone(--zone))
3985 		return alloc_flags;
3986 
3987 	alloc_flags |= ALLOC_NOFRAGMENT;
3988 #endif /* CONFIG_ZONE_DMA32 */
3989 	return alloc_flags;
3990 }
3991 
3992 /* Must be called after current_gfp_context() which can change gfp_mask */
3993 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3994 						  unsigned int alloc_flags)
3995 {
3996 #ifdef CONFIG_CMA
3997 	if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3998 		alloc_flags |= ALLOC_CMA;
3999 #endif
4000 	return alloc_flags;
4001 }
4002 
4003 /*
4004  * get_page_from_freelist goes through the zonelist trying to allocate
4005  * a page.
4006  */
4007 static struct page *
4008 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
4009 						const struct alloc_context *ac)
4010 {
4011 	struct zoneref *z;
4012 	struct zone *zone;
4013 	struct pglist_data *last_pgdat_dirty_limit = NULL;
4014 	bool no_fallback;
4015 
4016 retry:
4017 	/*
4018 	 * Scan zonelist, looking for a zone with enough free.
4019 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
4020 	 */
4021 	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
4022 	z = ac->preferred_zoneref;
4023 	for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
4024 					ac->nodemask) {
4025 		struct page *page;
4026 		unsigned long mark;
4027 
4028 		if (cpusets_enabled() &&
4029 			(alloc_flags & ALLOC_CPUSET) &&
4030 			!__cpuset_zone_allowed(zone, gfp_mask))
4031 				continue;
4032 		/*
4033 		 * When allocating a page cache page for writing, we
4034 		 * want to get it from a node that is within its dirty
4035 		 * limit, such that no single node holds more than its
4036 		 * proportional share of globally allowed dirty pages.
4037 		 * The dirty limits take into account the node's
4038 		 * lowmem reserves and high watermark so that kswapd
4039 		 * should be able to balance it without having to
4040 		 * write pages from its LRU list.
4041 		 *
4042 		 * XXX: For now, allow allocations to potentially
4043 		 * exceed the per-node dirty limit in the slowpath
4044 		 * (spread_dirty_pages unset) before going into reclaim,
4045 		 * which is important when on a NUMA setup the allowed
4046 		 * nodes are together not big enough to reach the
4047 		 * global limit.  The proper fix for these situations
4048 		 * will require awareness of nodes in the
4049 		 * dirty-throttling and the flusher threads.
4050 		 */
4051 		if (ac->spread_dirty_pages) {
4052 			if (last_pgdat_dirty_limit == zone->zone_pgdat)
4053 				continue;
4054 
4055 			if (!node_dirty_ok(zone->zone_pgdat)) {
4056 				last_pgdat_dirty_limit = zone->zone_pgdat;
4057 				continue;
4058 			}
4059 		}
4060 
4061 		if (no_fallback && nr_online_nodes > 1 &&
4062 		    zone != ac->preferred_zoneref->zone) {
4063 			int local_nid;
4064 
4065 			/*
4066 			 * If moving to a remote node, retry but allow
4067 			 * fragmenting fallbacks. Locality is more important
4068 			 * than fragmentation avoidance.
4069 			 */
4070 			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
4071 			if (zone_to_nid(zone) != local_nid) {
4072 				alloc_flags &= ~ALLOC_NOFRAGMENT;
4073 				goto retry;
4074 			}
4075 		}
4076 
4077 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
4078 		if (!zone_watermark_fast(zone, order, mark,
4079 				       ac->highest_zoneidx, alloc_flags,
4080 				       gfp_mask)) {
4081 			int ret;
4082 
4083 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4084 			/*
4085 			 * Watermark failed for this zone, but see if we can
4086 			 * grow this zone if it contains deferred pages.
4087 			 */
4088 			if (static_branch_unlikely(&deferred_pages)) {
4089 				if (_deferred_grow_zone(zone, order))
4090 					goto try_this_zone;
4091 			}
4092 #endif
4093 			/* Checked here to keep the fast path fast */
4094 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4095 			if (alloc_flags & ALLOC_NO_WATERMARKS)
4096 				goto try_this_zone;
4097 
4098 			if (!node_reclaim_enabled() ||
4099 			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
4100 				continue;
4101 
4102 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
4103 			switch (ret) {
4104 			case NODE_RECLAIM_NOSCAN:
4105 				/* did not scan */
4106 				continue;
4107 			case NODE_RECLAIM_FULL:
4108 				/* scanned but unreclaimable */
4109 				continue;
4110 			default:
4111 				/* did we reclaim enough */
4112 				if (zone_watermark_ok(zone, order, mark,
4113 					ac->highest_zoneidx, alloc_flags))
4114 					goto try_this_zone;
4115 
4116 				continue;
4117 			}
4118 		}
4119 
4120 try_this_zone:
4121 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
4122 				gfp_mask, alloc_flags, ac->migratetype);
4123 		if (page) {
4124 			prep_new_page(page, order, gfp_mask, alloc_flags);
4125 
4126 			/*
4127 			 * If this is a high-order atomic allocation then check
4128 			 * if the pageblock should be reserved for the future
4129 			 */
4130 			if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
4131 				reserve_highatomic_pageblock(page, zone, order);
4132 
4133 			return page;
4134 		} else {
4135 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4136 			/* Try again if zone has deferred pages */
4137 			if (static_branch_unlikely(&deferred_pages)) {
4138 				if (_deferred_grow_zone(zone, order))
4139 					goto try_this_zone;
4140 			}
4141 #endif
4142 		}
4143 	}
4144 
4145 	/*
4146 	 * It's possible on a UMA machine to get through all zones that are
4147 	 * fragmented. If avoiding fragmentation, reset and try again.
4148 	 */
4149 	if (no_fallback) {
4150 		alloc_flags &= ~ALLOC_NOFRAGMENT;
4151 		goto retry;
4152 	}
4153 
4154 	return NULL;
4155 }
4156 
4157 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
4158 {
4159 	unsigned int filter = SHOW_MEM_FILTER_NODES;
4160 
4161 	/*
4162 	 * This documents exceptions given to allocations in certain
4163 	 * contexts that are allowed to allocate outside current's set
4164 	 * of allowed nodes.
4165 	 */
4166 	if (!(gfp_mask & __GFP_NOMEMALLOC))
4167 		if (tsk_is_oom_victim(current) ||
4168 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
4169 			filter &= ~SHOW_MEM_FILTER_NODES;
4170 	if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4171 		filter &= ~SHOW_MEM_FILTER_NODES;
4172 
4173 	show_mem(filter, nodemask);
4174 }
4175 
4176 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4177 {
4178 	struct va_format vaf;
4179 	va_list args;
4180 	static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4181 
4182 	if ((gfp_mask & __GFP_NOWARN) ||
4183 	     !__ratelimit(&nopage_rs) ||
4184 	     ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
4185 		return;
4186 
4187 	va_start(args, fmt);
4188 	vaf.fmt = fmt;
4189 	vaf.va = &args;
4190 	pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4191 			current->comm, &vaf, gfp_mask, &gfp_mask,
4192 			nodemask_pr_args(nodemask));
4193 	va_end(args);
4194 
4195 	cpuset_print_current_mems_allowed();
4196 	pr_cont("\n");
4197 	dump_stack();
4198 	warn_alloc_show_mem(gfp_mask, nodemask);
4199 }
4200 
4201 static inline struct page *
4202 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4203 			      unsigned int alloc_flags,
4204 			      const struct alloc_context *ac)
4205 {
4206 	struct page *page;
4207 
4208 	page = get_page_from_freelist(gfp_mask, order,
4209 			alloc_flags|ALLOC_CPUSET, ac);
4210 	/*
4211 	 * fallback to ignore cpuset restriction if our nodes
4212 	 * are depleted
4213 	 */
4214 	if (!page)
4215 		page = get_page_from_freelist(gfp_mask, order,
4216 				alloc_flags, ac);
4217 
4218 	return page;
4219 }
4220 
4221 static inline struct page *
4222 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4223 	const struct alloc_context *ac, unsigned long *did_some_progress)
4224 {
4225 	struct oom_control oc = {
4226 		.zonelist = ac->zonelist,
4227 		.nodemask = ac->nodemask,
4228 		.memcg = NULL,
4229 		.gfp_mask = gfp_mask,
4230 		.order = order,
4231 	};
4232 	struct page *page;
4233 
4234 	*did_some_progress = 0;
4235 
4236 	/*
4237 	 * Acquire the oom lock.  If that fails, somebody else is
4238 	 * making progress for us.
4239 	 */
4240 	if (!mutex_trylock(&oom_lock)) {
4241 		*did_some_progress = 1;
4242 		schedule_timeout_uninterruptible(1);
4243 		return NULL;
4244 	}
4245 
4246 	/*
4247 	 * Go through the zonelist yet one more time, keep very high watermark
4248 	 * here, this is only to catch a parallel oom killing, we must fail if
4249 	 * we're still under heavy pressure. But make sure that this reclaim
4250 	 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4251 	 * allocation which will never fail due to oom_lock already held.
4252 	 */
4253 	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4254 				      ~__GFP_DIRECT_RECLAIM, order,
4255 				      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4256 	if (page)
4257 		goto out;
4258 
4259 	/* Coredumps can quickly deplete all memory reserves */
4260 	if (current->flags & PF_DUMPCORE)
4261 		goto out;
4262 	/* The OOM killer will not help higher order allocs */
4263 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4264 		goto out;
4265 	/*
4266 	 * We have already exhausted all our reclaim opportunities without any
4267 	 * success so it is time to admit defeat. We will skip the OOM killer
4268 	 * because it is very likely that the caller has a more reasonable
4269 	 * fallback than shooting a random task.
4270 	 *
4271 	 * The OOM killer may not free memory on a specific node.
4272 	 */
4273 	if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4274 		goto out;
4275 	/* The OOM killer does not needlessly kill tasks for lowmem */
4276 	if (ac->highest_zoneidx < ZONE_NORMAL)
4277 		goto out;
4278 	if (pm_suspended_storage())
4279 		goto out;
4280 	/*
4281 	 * XXX: GFP_NOFS allocations should rather fail than rely on
4282 	 * other request to make a forward progress.
4283 	 * We are in an unfortunate situation where out_of_memory cannot
4284 	 * do much for this context but let's try it to at least get
4285 	 * access to memory reserved if the current task is killed (see
4286 	 * out_of_memory). Once filesystems are ready to handle allocation
4287 	 * failures more gracefully we should just bail out here.
4288 	 */
4289 
4290 	/* Exhausted what can be done so it's blame time */
4291 	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4292 		*did_some_progress = 1;
4293 
4294 		/*
4295 		 * Help non-failing allocations by giving them access to memory
4296 		 * reserves
4297 		 */
4298 		if (gfp_mask & __GFP_NOFAIL)
4299 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4300 					ALLOC_NO_WATERMARKS, ac);
4301 	}
4302 out:
4303 	mutex_unlock(&oom_lock);
4304 	return page;
4305 }
4306 
4307 /*
4308  * Maximum number of compaction retries with a progress before OOM
4309  * killer is consider as the only way to move forward.
4310  */
4311 #define MAX_COMPACT_RETRIES 16
4312 
4313 #ifdef CONFIG_COMPACTION
4314 /* Try memory compaction for high-order allocations before reclaim */
4315 static struct page *
4316 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4317 		unsigned int alloc_flags, const struct alloc_context *ac,
4318 		enum compact_priority prio, enum compact_result *compact_result)
4319 {
4320 	struct page *page = NULL;
4321 	unsigned long pflags;
4322 	unsigned int noreclaim_flag;
4323 
4324 	if (!order)
4325 		return NULL;
4326 
4327 	psi_memstall_enter(&pflags);
4328 	delayacct_compact_start();
4329 	noreclaim_flag = memalloc_noreclaim_save();
4330 
4331 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4332 								prio, &page);
4333 
4334 	memalloc_noreclaim_restore(noreclaim_flag);
4335 	psi_memstall_leave(&pflags);
4336 	delayacct_compact_end();
4337 
4338 	if (*compact_result == COMPACT_SKIPPED)
4339 		return NULL;
4340 	/*
4341 	 * At least in one zone compaction wasn't deferred or skipped, so let's
4342 	 * count a compaction stall
4343 	 */
4344 	count_vm_event(COMPACTSTALL);
4345 
4346 	/* Prep a captured page if available */
4347 	if (page)
4348 		prep_new_page(page, order, gfp_mask, alloc_flags);
4349 
4350 	/* Try get a page from the freelist if available */
4351 	if (!page)
4352 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4353 
4354 	if (page) {
4355 		struct zone *zone = page_zone(page);
4356 
4357 		zone->compact_blockskip_flush = false;
4358 		compaction_defer_reset(zone, order, true);
4359 		count_vm_event(COMPACTSUCCESS);
4360 		return page;
4361 	}
4362 
4363 	/*
4364 	 * It's bad if compaction run occurs and fails. The most likely reason
4365 	 * is that pages exist, but not enough to satisfy watermarks.
4366 	 */
4367 	count_vm_event(COMPACTFAIL);
4368 
4369 	cond_resched();
4370 
4371 	return NULL;
4372 }
4373 
4374 static inline bool
4375 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4376 		     enum compact_result compact_result,
4377 		     enum compact_priority *compact_priority,
4378 		     int *compaction_retries)
4379 {
4380 	int max_retries = MAX_COMPACT_RETRIES;
4381 	int min_priority;
4382 	bool ret = false;
4383 	int retries = *compaction_retries;
4384 	enum compact_priority priority = *compact_priority;
4385 
4386 	if (!order)
4387 		return false;
4388 
4389 	if (fatal_signal_pending(current))
4390 		return false;
4391 
4392 	if (compaction_made_progress(compact_result))
4393 		(*compaction_retries)++;
4394 
4395 	/*
4396 	 * compaction considers all the zone as desperately out of memory
4397 	 * so it doesn't really make much sense to retry except when the
4398 	 * failure could be caused by insufficient priority
4399 	 */
4400 	if (compaction_failed(compact_result))
4401 		goto check_priority;
4402 
4403 	/*
4404 	 * compaction was skipped because there are not enough order-0 pages
4405 	 * to work with, so we retry only if it looks like reclaim can help.
4406 	 */
4407 	if (compaction_needs_reclaim(compact_result)) {
4408 		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4409 		goto out;
4410 	}
4411 
4412 	/*
4413 	 * make sure the compaction wasn't deferred or didn't bail out early
4414 	 * due to locks contention before we declare that we should give up.
4415 	 * But the next retry should use a higher priority if allowed, so
4416 	 * we don't just keep bailing out endlessly.
4417 	 */
4418 	if (compaction_withdrawn(compact_result)) {
4419 		goto check_priority;
4420 	}
4421 
4422 	/*
4423 	 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4424 	 * costly ones because they are de facto nofail and invoke OOM
4425 	 * killer to move on while costly can fail and users are ready
4426 	 * to cope with that. 1/4 retries is rather arbitrary but we
4427 	 * would need much more detailed feedback from compaction to
4428 	 * make a better decision.
4429 	 */
4430 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4431 		max_retries /= 4;
4432 	if (*compaction_retries <= max_retries) {
4433 		ret = true;
4434 		goto out;
4435 	}
4436 
4437 	/*
4438 	 * Make sure there are attempts at the highest priority if we exhausted
4439 	 * all retries or failed at the lower priorities.
4440 	 */
4441 check_priority:
4442 	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4443 			MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4444 
4445 	if (*compact_priority > min_priority) {
4446 		(*compact_priority)--;
4447 		*compaction_retries = 0;
4448 		ret = true;
4449 	}
4450 out:
4451 	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4452 	return ret;
4453 }
4454 #else
4455 static inline struct page *
4456 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4457 		unsigned int alloc_flags, const struct alloc_context *ac,
4458 		enum compact_priority prio, enum compact_result *compact_result)
4459 {
4460 	*compact_result = COMPACT_SKIPPED;
4461 	return NULL;
4462 }
4463 
4464 static inline bool
4465 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4466 		     enum compact_result compact_result,
4467 		     enum compact_priority *compact_priority,
4468 		     int *compaction_retries)
4469 {
4470 	struct zone *zone;
4471 	struct zoneref *z;
4472 
4473 	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4474 		return false;
4475 
4476 	/*
4477 	 * There are setups with compaction disabled which would prefer to loop
4478 	 * inside the allocator rather than hit the oom killer prematurely.
4479 	 * Let's give them a good hope and keep retrying while the order-0
4480 	 * watermarks are OK.
4481 	 */
4482 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4483 				ac->highest_zoneidx, ac->nodemask) {
4484 		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4485 					ac->highest_zoneidx, alloc_flags))
4486 			return true;
4487 	}
4488 	return false;
4489 }
4490 #endif /* CONFIG_COMPACTION */
4491 
4492 #ifdef CONFIG_LOCKDEP
4493 static struct lockdep_map __fs_reclaim_map =
4494 	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4495 
4496 static bool __need_reclaim(gfp_t gfp_mask)
4497 {
4498 	/* no reclaim without waiting on it */
4499 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4500 		return false;
4501 
4502 	/* this guy won't enter reclaim */
4503 	if (current->flags & PF_MEMALLOC)
4504 		return false;
4505 
4506 	if (gfp_mask & __GFP_NOLOCKDEP)
4507 		return false;
4508 
4509 	return true;
4510 }
4511 
4512 void __fs_reclaim_acquire(unsigned long ip)
4513 {
4514 	lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4515 }
4516 
4517 void __fs_reclaim_release(unsigned long ip)
4518 {
4519 	lock_release(&__fs_reclaim_map, ip);
4520 }
4521 
4522 void fs_reclaim_acquire(gfp_t gfp_mask)
4523 {
4524 	gfp_mask = current_gfp_context(gfp_mask);
4525 
4526 	if (__need_reclaim(gfp_mask)) {
4527 		if (gfp_mask & __GFP_FS)
4528 			__fs_reclaim_acquire(_RET_IP_);
4529 
4530 #ifdef CONFIG_MMU_NOTIFIER
4531 		lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4532 		lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4533 #endif
4534 
4535 	}
4536 }
4537 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4538 
4539 void fs_reclaim_release(gfp_t gfp_mask)
4540 {
4541 	gfp_mask = current_gfp_context(gfp_mask);
4542 
4543 	if (__need_reclaim(gfp_mask)) {
4544 		if (gfp_mask & __GFP_FS)
4545 			__fs_reclaim_release(_RET_IP_);
4546 	}
4547 }
4548 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4549 #endif
4550 
4551 /* Perform direct synchronous page reclaim */
4552 static unsigned long
4553 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4554 					const struct alloc_context *ac)
4555 {
4556 	unsigned int noreclaim_flag;
4557 	unsigned long progress;
4558 
4559 	cond_resched();
4560 
4561 	/* We now go into synchronous reclaim */
4562 	cpuset_memory_pressure_bump();
4563 	fs_reclaim_acquire(gfp_mask);
4564 	noreclaim_flag = memalloc_noreclaim_save();
4565 
4566 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4567 								ac->nodemask);
4568 
4569 	memalloc_noreclaim_restore(noreclaim_flag);
4570 	fs_reclaim_release(gfp_mask);
4571 
4572 	cond_resched();
4573 
4574 	return progress;
4575 }
4576 
4577 /* The really slow allocator path where we enter direct reclaim */
4578 static inline struct page *
4579 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4580 		unsigned int alloc_flags, const struct alloc_context *ac,
4581 		unsigned long *did_some_progress)
4582 {
4583 	struct page *page = NULL;
4584 	unsigned long pflags;
4585 	bool drained = false;
4586 
4587 	psi_memstall_enter(&pflags);
4588 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4589 	if (unlikely(!(*did_some_progress)))
4590 		goto out;
4591 
4592 retry:
4593 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4594 
4595 	/*
4596 	 * If an allocation failed after direct reclaim, it could be because
4597 	 * pages are pinned on the per-cpu lists or in high alloc reserves.
4598 	 * Shrink them and try again
4599 	 */
4600 	if (!page && !drained) {
4601 		unreserve_highatomic_pageblock(ac, false);
4602 		drain_all_pages(NULL);
4603 		drained = true;
4604 		goto retry;
4605 	}
4606 out:
4607 	psi_memstall_leave(&pflags);
4608 
4609 	return page;
4610 }
4611 
4612 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4613 			     const struct alloc_context *ac)
4614 {
4615 	struct zoneref *z;
4616 	struct zone *zone;
4617 	pg_data_t *last_pgdat = NULL;
4618 	enum zone_type highest_zoneidx = ac->highest_zoneidx;
4619 
4620 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4621 					ac->nodemask) {
4622 		if (last_pgdat != zone->zone_pgdat)
4623 			wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4624 		last_pgdat = zone->zone_pgdat;
4625 	}
4626 }
4627 
4628 static inline unsigned int
4629 gfp_to_alloc_flags(gfp_t gfp_mask)
4630 {
4631 	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4632 
4633 	/*
4634 	 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4635 	 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4636 	 * to save two branches.
4637 	 */
4638 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4639 	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4640 
4641 	/*
4642 	 * The caller may dip into page reserves a bit more if the caller
4643 	 * cannot run direct reclaim, or if the caller has realtime scheduling
4644 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4645 	 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4646 	 */
4647 	alloc_flags |= (__force int)
4648 		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4649 
4650 	if (gfp_mask & __GFP_ATOMIC) {
4651 		/*
4652 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4653 		 * if it can't schedule.
4654 		 */
4655 		if (!(gfp_mask & __GFP_NOMEMALLOC))
4656 			alloc_flags |= ALLOC_HARDER;
4657 		/*
4658 		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4659 		 * comment for __cpuset_node_allowed().
4660 		 */
4661 		alloc_flags &= ~ALLOC_CPUSET;
4662 	} else if (unlikely(rt_task(current)) && in_task())
4663 		alloc_flags |= ALLOC_HARDER;
4664 
4665 	alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4666 
4667 	return alloc_flags;
4668 }
4669 
4670 static bool oom_reserves_allowed(struct task_struct *tsk)
4671 {
4672 	if (!tsk_is_oom_victim(tsk))
4673 		return false;
4674 
4675 	/*
4676 	 * !MMU doesn't have oom reaper so give access to memory reserves
4677 	 * only to the thread with TIF_MEMDIE set
4678 	 */
4679 	if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4680 		return false;
4681 
4682 	return true;
4683 }
4684 
4685 /*
4686  * Distinguish requests which really need access to full memory
4687  * reserves from oom victims which can live with a portion of it
4688  */
4689 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4690 {
4691 	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4692 		return 0;
4693 	if (gfp_mask & __GFP_MEMALLOC)
4694 		return ALLOC_NO_WATERMARKS;
4695 	if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4696 		return ALLOC_NO_WATERMARKS;
4697 	if (!in_interrupt()) {
4698 		if (current->flags & PF_MEMALLOC)
4699 			return ALLOC_NO_WATERMARKS;
4700 		else if (oom_reserves_allowed(current))
4701 			return ALLOC_OOM;
4702 	}
4703 
4704 	return 0;
4705 }
4706 
4707 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4708 {
4709 	return !!__gfp_pfmemalloc_flags(gfp_mask);
4710 }
4711 
4712 /*
4713  * Checks whether it makes sense to retry the reclaim to make a forward progress
4714  * for the given allocation request.
4715  *
4716  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4717  * without success, or when we couldn't even meet the watermark if we
4718  * reclaimed all remaining pages on the LRU lists.
4719  *
4720  * Returns true if a retry is viable or false to enter the oom path.
4721  */
4722 static inline bool
4723 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4724 		     struct alloc_context *ac, int alloc_flags,
4725 		     bool did_some_progress, int *no_progress_loops)
4726 {
4727 	struct zone *zone;
4728 	struct zoneref *z;
4729 	bool ret = false;
4730 
4731 	/*
4732 	 * Costly allocations might have made a progress but this doesn't mean
4733 	 * their order will become available due to high fragmentation so
4734 	 * always increment the no progress counter for them
4735 	 */
4736 	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4737 		*no_progress_loops = 0;
4738 	else
4739 		(*no_progress_loops)++;
4740 
4741 	/*
4742 	 * Make sure we converge to OOM if we cannot make any progress
4743 	 * several times in the row.
4744 	 */
4745 	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4746 		/* Before OOM, exhaust highatomic_reserve */
4747 		return unreserve_highatomic_pageblock(ac, true);
4748 	}
4749 
4750 	/*
4751 	 * Keep reclaiming pages while there is a chance this will lead
4752 	 * somewhere.  If none of the target zones can satisfy our allocation
4753 	 * request even if all reclaimable pages are considered then we are
4754 	 * screwed and have to go OOM.
4755 	 */
4756 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4757 				ac->highest_zoneidx, ac->nodemask) {
4758 		unsigned long available;
4759 		unsigned long reclaimable;
4760 		unsigned long min_wmark = min_wmark_pages(zone);
4761 		bool wmark;
4762 
4763 		available = reclaimable = zone_reclaimable_pages(zone);
4764 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4765 
4766 		/*
4767 		 * Would the allocation succeed if we reclaimed all
4768 		 * reclaimable pages?
4769 		 */
4770 		wmark = __zone_watermark_ok(zone, order, min_wmark,
4771 				ac->highest_zoneidx, alloc_flags, available);
4772 		trace_reclaim_retry_zone(z, order, reclaimable,
4773 				available, min_wmark, *no_progress_loops, wmark);
4774 		if (wmark) {
4775 			ret = true;
4776 			break;
4777 		}
4778 	}
4779 
4780 	/*
4781 	 * Memory allocation/reclaim might be called from a WQ context and the
4782 	 * current implementation of the WQ concurrency control doesn't
4783 	 * recognize that a particular WQ is congested if the worker thread is
4784 	 * looping without ever sleeping. Therefore we have to do a short sleep
4785 	 * here rather than calling cond_resched().
4786 	 */
4787 	if (current->flags & PF_WQ_WORKER)
4788 		schedule_timeout_uninterruptible(1);
4789 	else
4790 		cond_resched();
4791 	return ret;
4792 }
4793 
4794 static inline bool
4795 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4796 {
4797 	/*
4798 	 * It's possible that cpuset's mems_allowed and the nodemask from
4799 	 * mempolicy don't intersect. This should be normally dealt with by
4800 	 * policy_nodemask(), but it's possible to race with cpuset update in
4801 	 * such a way the check therein was true, and then it became false
4802 	 * before we got our cpuset_mems_cookie here.
4803 	 * This assumes that for all allocations, ac->nodemask can come only
4804 	 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4805 	 * when it does not intersect with the cpuset restrictions) or the
4806 	 * caller can deal with a violated nodemask.
4807 	 */
4808 	if (cpusets_enabled() && ac->nodemask &&
4809 			!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4810 		ac->nodemask = NULL;
4811 		return true;
4812 	}
4813 
4814 	/*
4815 	 * When updating a task's mems_allowed or mempolicy nodemask, it is
4816 	 * possible to race with parallel threads in such a way that our
4817 	 * allocation can fail while the mask is being updated. If we are about
4818 	 * to fail, check if the cpuset changed during allocation and if so,
4819 	 * retry.
4820 	 */
4821 	if (read_mems_allowed_retry(cpuset_mems_cookie))
4822 		return true;
4823 
4824 	return false;
4825 }
4826 
4827 static inline struct page *
4828 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4829 						struct alloc_context *ac)
4830 {
4831 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4832 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4833 	struct page *page = NULL;
4834 	unsigned int alloc_flags;
4835 	unsigned long did_some_progress;
4836 	enum compact_priority compact_priority;
4837 	enum compact_result compact_result;
4838 	int compaction_retries;
4839 	int no_progress_loops;
4840 	unsigned int cpuset_mems_cookie;
4841 	int reserve_flags;
4842 
4843 	/*
4844 	 * We also sanity check to catch abuse of atomic reserves being used by
4845 	 * callers that are not in atomic context.
4846 	 */
4847 	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4848 				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4849 		gfp_mask &= ~__GFP_ATOMIC;
4850 
4851 retry_cpuset:
4852 	compaction_retries = 0;
4853 	no_progress_loops = 0;
4854 	compact_priority = DEF_COMPACT_PRIORITY;
4855 	cpuset_mems_cookie = read_mems_allowed_begin();
4856 
4857 	/*
4858 	 * The fast path uses conservative alloc_flags to succeed only until
4859 	 * kswapd needs to be woken up, and to avoid the cost of setting up
4860 	 * alloc_flags precisely. So we do that now.
4861 	 */
4862 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
4863 
4864 	/*
4865 	 * We need to recalculate the starting point for the zonelist iterator
4866 	 * because we might have used different nodemask in the fast path, or
4867 	 * there was a cpuset modification and we are retrying - otherwise we
4868 	 * could end up iterating over non-eligible zones endlessly.
4869 	 */
4870 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4871 					ac->highest_zoneidx, ac->nodemask);
4872 	if (!ac->preferred_zoneref->zone)
4873 		goto nopage;
4874 
4875 	/*
4876 	 * Check for insane configurations where the cpuset doesn't contain
4877 	 * any suitable zone to satisfy the request - e.g. non-movable
4878 	 * GFP_HIGHUSER allocations from MOVABLE nodes only.
4879 	 */
4880 	if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
4881 		struct zoneref *z = first_zones_zonelist(ac->zonelist,
4882 					ac->highest_zoneidx,
4883 					&cpuset_current_mems_allowed);
4884 		if (!z->zone)
4885 			goto nopage;
4886 	}
4887 
4888 	if (alloc_flags & ALLOC_KSWAPD)
4889 		wake_all_kswapds(order, gfp_mask, ac);
4890 
4891 	/*
4892 	 * The adjusted alloc_flags might result in immediate success, so try
4893 	 * that first
4894 	 */
4895 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4896 	if (page)
4897 		goto got_pg;
4898 
4899 	/*
4900 	 * For costly allocations, try direct compaction first, as it's likely
4901 	 * that we have enough base pages and don't need to reclaim. For non-
4902 	 * movable high-order allocations, do that as well, as compaction will
4903 	 * try prevent permanent fragmentation by migrating from blocks of the
4904 	 * same migratetype.
4905 	 * Don't try this for allocations that are allowed to ignore
4906 	 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4907 	 */
4908 	if (can_direct_reclaim &&
4909 			(costly_order ||
4910 			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4911 			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
4912 		page = __alloc_pages_direct_compact(gfp_mask, order,
4913 						alloc_flags, ac,
4914 						INIT_COMPACT_PRIORITY,
4915 						&compact_result);
4916 		if (page)
4917 			goto got_pg;
4918 
4919 		/*
4920 		 * Checks for costly allocations with __GFP_NORETRY, which
4921 		 * includes some THP page fault allocations
4922 		 */
4923 		if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4924 			/*
4925 			 * If allocating entire pageblock(s) and compaction
4926 			 * failed because all zones are below low watermarks
4927 			 * or is prohibited because it recently failed at this
4928 			 * order, fail immediately unless the allocator has
4929 			 * requested compaction and reclaim retry.
4930 			 *
4931 			 * Reclaim is
4932 			 *  - potentially very expensive because zones are far
4933 			 *    below their low watermarks or this is part of very
4934 			 *    bursty high order allocations,
4935 			 *  - not guaranteed to help because isolate_freepages()
4936 			 *    may not iterate over freed pages as part of its
4937 			 *    linear scan, and
4938 			 *  - unlikely to make entire pageblocks free on its
4939 			 *    own.
4940 			 */
4941 			if (compact_result == COMPACT_SKIPPED ||
4942 			    compact_result == COMPACT_DEFERRED)
4943 				goto nopage;
4944 
4945 			/*
4946 			 * Looks like reclaim/compaction is worth trying, but
4947 			 * sync compaction could be very expensive, so keep
4948 			 * using async compaction.
4949 			 */
4950 			compact_priority = INIT_COMPACT_PRIORITY;
4951 		}
4952 	}
4953 
4954 retry:
4955 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4956 	if (alloc_flags & ALLOC_KSWAPD)
4957 		wake_all_kswapds(order, gfp_mask, ac);
4958 
4959 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4960 	if (reserve_flags)
4961 		alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags);
4962 
4963 	/*
4964 	 * Reset the nodemask and zonelist iterators if memory policies can be
4965 	 * ignored. These allocations are high priority and system rather than
4966 	 * user oriented.
4967 	 */
4968 	if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4969 		ac->nodemask = NULL;
4970 		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4971 					ac->highest_zoneidx, ac->nodemask);
4972 	}
4973 
4974 	/* Attempt with potentially adjusted zonelist and alloc_flags */
4975 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4976 	if (page)
4977 		goto got_pg;
4978 
4979 	/* Caller is not willing to reclaim, we can't balance anything */
4980 	if (!can_direct_reclaim)
4981 		goto nopage;
4982 
4983 	/* Avoid recursion of direct reclaim */
4984 	if (current->flags & PF_MEMALLOC)
4985 		goto nopage;
4986 
4987 	/* Try direct reclaim and then allocating */
4988 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4989 							&did_some_progress);
4990 	if (page)
4991 		goto got_pg;
4992 
4993 	/* Try direct compaction and then allocating */
4994 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4995 					compact_priority, &compact_result);
4996 	if (page)
4997 		goto got_pg;
4998 
4999 	/* Do not loop if specifically requested */
5000 	if (gfp_mask & __GFP_NORETRY)
5001 		goto nopage;
5002 
5003 	/*
5004 	 * Do not retry costly high order allocations unless they are
5005 	 * __GFP_RETRY_MAYFAIL
5006 	 */
5007 	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
5008 		goto nopage;
5009 
5010 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
5011 				 did_some_progress > 0, &no_progress_loops))
5012 		goto retry;
5013 
5014 	/*
5015 	 * It doesn't make any sense to retry for the compaction if the order-0
5016 	 * reclaim is not able to make any progress because the current
5017 	 * implementation of the compaction depends on the sufficient amount
5018 	 * of free memory (see __compaction_suitable)
5019 	 */
5020 	if (did_some_progress > 0 &&
5021 			should_compact_retry(ac, order, alloc_flags,
5022 				compact_result, &compact_priority,
5023 				&compaction_retries))
5024 		goto retry;
5025 
5026 
5027 	/* Deal with possible cpuset update races before we start OOM killing */
5028 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
5029 		goto retry_cpuset;
5030 
5031 	/* Reclaim has failed us, start killing things */
5032 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
5033 	if (page)
5034 		goto got_pg;
5035 
5036 	/* Avoid allocations with no watermarks from looping endlessly */
5037 	if (tsk_is_oom_victim(current) &&
5038 	    (alloc_flags & ALLOC_OOM ||
5039 	     (gfp_mask & __GFP_NOMEMALLOC)))
5040 		goto nopage;
5041 
5042 	/* Retry as long as the OOM killer is making progress */
5043 	if (did_some_progress) {
5044 		no_progress_loops = 0;
5045 		goto retry;
5046 	}
5047 
5048 nopage:
5049 	/* Deal with possible cpuset update races before we fail */
5050 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
5051 		goto retry_cpuset;
5052 
5053 	/*
5054 	 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
5055 	 * we always retry
5056 	 */
5057 	if (gfp_mask & __GFP_NOFAIL) {
5058 		/*
5059 		 * All existing users of the __GFP_NOFAIL are blockable, so warn
5060 		 * of any new users that actually require GFP_NOWAIT
5061 		 */
5062 		if (WARN_ON_ONCE(!can_direct_reclaim))
5063 			goto fail;
5064 
5065 		/*
5066 		 * PF_MEMALLOC request from this context is rather bizarre
5067 		 * because we cannot reclaim anything and only can loop waiting
5068 		 * for somebody to do a work for us
5069 		 */
5070 		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
5071 
5072 		/*
5073 		 * non failing costly orders are a hard requirement which we
5074 		 * are not prepared for much so let's warn about these users
5075 		 * so that we can identify them and convert them to something
5076 		 * else.
5077 		 */
5078 		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
5079 
5080 		/*
5081 		 * Help non-failing allocations by giving them access to memory
5082 		 * reserves but do not use ALLOC_NO_WATERMARKS because this
5083 		 * could deplete whole memory reserves which would just make
5084 		 * the situation worse
5085 		 */
5086 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
5087 		if (page)
5088 			goto got_pg;
5089 
5090 		cond_resched();
5091 		goto retry;
5092 	}
5093 fail:
5094 	warn_alloc(gfp_mask, ac->nodemask,
5095 			"page allocation failure: order:%u", order);
5096 got_pg:
5097 	return page;
5098 }
5099 
5100 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
5101 		int preferred_nid, nodemask_t *nodemask,
5102 		struct alloc_context *ac, gfp_t *alloc_gfp,
5103 		unsigned int *alloc_flags)
5104 {
5105 	ac->highest_zoneidx = gfp_zone(gfp_mask);
5106 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
5107 	ac->nodemask = nodemask;
5108 	ac->migratetype = gfp_migratetype(gfp_mask);
5109 
5110 	if (cpusets_enabled()) {
5111 		*alloc_gfp |= __GFP_HARDWALL;
5112 		/*
5113 		 * When we are in the interrupt context, it is irrelevant
5114 		 * to the current task context. It means that any node ok.
5115 		 */
5116 		if (in_task() && !ac->nodemask)
5117 			ac->nodemask = &cpuset_current_mems_allowed;
5118 		else
5119 			*alloc_flags |= ALLOC_CPUSET;
5120 	}
5121 
5122 	fs_reclaim_acquire(gfp_mask);
5123 	fs_reclaim_release(gfp_mask);
5124 
5125 	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
5126 
5127 	if (should_fail_alloc_page(gfp_mask, order))
5128 		return false;
5129 
5130 	*alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5131 
5132 	/* Dirty zone balancing only done in the fast path */
5133 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5134 
5135 	/*
5136 	 * The preferred zone is used for statistics but crucially it is
5137 	 * also used as the starting point for the zonelist iterator. It
5138 	 * may get reset for allocations that ignore memory policies.
5139 	 */
5140 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5141 					ac->highest_zoneidx, ac->nodemask);
5142 
5143 	return true;
5144 }
5145 
5146 /*
5147  * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5148  * @gfp: GFP flags for the allocation
5149  * @preferred_nid: The preferred NUMA node ID to allocate from
5150  * @nodemask: Set of nodes to allocate from, may be NULL
5151  * @nr_pages: The number of pages desired on the list or array
5152  * @page_list: Optional list to store the allocated pages
5153  * @page_array: Optional array to store the pages
5154  *
5155  * This is a batched version of the page allocator that attempts to
5156  * allocate nr_pages quickly. Pages are added to page_list if page_list
5157  * is not NULL, otherwise it is assumed that the page_array is valid.
5158  *
5159  * For lists, nr_pages is the number of pages that should be allocated.
5160  *
5161  * For arrays, only NULL elements are populated with pages and nr_pages
5162  * is the maximum number of pages that will be stored in the array.
5163  *
5164  * Returns the number of pages on the list or array.
5165  */
5166 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5167 			nodemask_t *nodemask, int nr_pages,
5168 			struct list_head *page_list,
5169 			struct page **page_array)
5170 {
5171 	struct page *page;
5172 	unsigned long flags;
5173 	struct zone *zone;
5174 	struct zoneref *z;
5175 	struct per_cpu_pages *pcp;
5176 	struct list_head *pcp_list;
5177 	struct alloc_context ac;
5178 	gfp_t alloc_gfp;
5179 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
5180 	int nr_populated = 0, nr_account = 0;
5181 
5182 	/*
5183 	 * Skip populated array elements to determine if any pages need
5184 	 * to be allocated before disabling IRQs.
5185 	 */
5186 	while (page_array && nr_populated < nr_pages && page_array[nr_populated])
5187 		nr_populated++;
5188 
5189 	/* No pages requested? */
5190 	if (unlikely(nr_pages <= 0))
5191 		goto out;
5192 
5193 	/* Already populated array? */
5194 	if (unlikely(page_array && nr_pages - nr_populated == 0))
5195 		goto out;
5196 
5197 	/* Bulk allocator does not support memcg accounting. */
5198 	if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT))
5199 		goto failed;
5200 
5201 	/* Use the single page allocator for one page. */
5202 	if (nr_pages - nr_populated == 1)
5203 		goto failed;
5204 
5205 #ifdef CONFIG_PAGE_OWNER
5206 	/*
5207 	 * PAGE_OWNER may recurse into the allocator to allocate space to
5208 	 * save the stack with pagesets.lock held. Releasing/reacquiring
5209 	 * removes much of the performance benefit of bulk allocation so
5210 	 * force the caller to allocate one page at a time as it'll have
5211 	 * similar performance to added complexity to the bulk allocator.
5212 	 */
5213 	if (static_branch_unlikely(&page_owner_inited))
5214 		goto failed;
5215 #endif
5216 
5217 	/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5218 	gfp &= gfp_allowed_mask;
5219 	alloc_gfp = gfp;
5220 	if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5221 		goto out;
5222 	gfp = alloc_gfp;
5223 
5224 	/* Find an allowed local zone that meets the low watermark. */
5225 	for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5226 		unsigned long mark;
5227 
5228 		if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5229 		    !__cpuset_zone_allowed(zone, gfp)) {
5230 			continue;
5231 		}
5232 
5233 		if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5234 		    zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5235 			goto failed;
5236 		}
5237 
5238 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5239 		if (zone_watermark_fast(zone, 0,  mark,
5240 				zonelist_zone_idx(ac.preferred_zoneref),
5241 				alloc_flags, gfp)) {
5242 			break;
5243 		}
5244 	}
5245 
5246 	/*
5247 	 * If there are no allowed local zones that meets the watermarks then
5248 	 * try to allocate a single page and reclaim if necessary.
5249 	 */
5250 	if (unlikely(!zone))
5251 		goto failed;
5252 
5253 	/* Attempt the batch allocation */
5254 	local_lock_irqsave(&pagesets.lock, flags);
5255 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
5256 	pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5257 
5258 	while (nr_populated < nr_pages) {
5259 
5260 		/* Skip existing pages */
5261 		if (page_array && page_array[nr_populated]) {
5262 			nr_populated++;
5263 			continue;
5264 		}
5265 
5266 		page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5267 								pcp, pcp_list);
5268 		if (unlikely(!page)) {
5269 			/* Try and get at least one page */
5270 			if (!nr_populated)
5271 				goto failed_irq;
5272 			break;
5273 		}
5274 		nr_account++;
5275 
5276 		prep_new_page(page, 0, gfp, 0);
5277 		if (page_list)
5278 			list_add(&page->lru, page_list);
5279 		else
5280 			page_array[nr_populated] = page;
5281 		nr_populated++;
5282 	}
5283 
5284 	local_unlock_irqrestore(&pagesets.lock, flags);
5285 
5286 	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5287 	zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
5288 
5289 out:
5290 	return nr_populated;
5291 
5292 failed_irq:
5293 	local_unlock_irqrestore(&pagesets.lock, flags);
5294 
5295 failed:
5296 	page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5297 	if (page) {
5298 		if (page_list)
5299 			list_add(&page->lru, page_list);
5300 		else
5301 			page_array[nr_populated] = page;
5302 		nr_populated++;
5303 	}
5304 
5305 	goto out;
5306 }
5307 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5308 
5309 /*
5310  * This is the 'heart' of the zoned buddy allocator.
5311  */
5312 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
5313 							nodemask_t *nodemask)
5314 {
5315 	struct page *page;
5316 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
5317 	gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5318 	struct alloc_context ac = { };
5319 
5320 	/*
5321 	 * There are several places where we assume that the order value is sane
5322 	 * so bail out early if the request is out of bound.
5323 	 */
5324 	if (unlikely(order >= MAX_ORDER)) {
5325 		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
5326 		return NULL;
5327 	}
5328 
5329 	gfp &= gfp_allowed_mask;
5330 	/*
5331 	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5332 	 * resp. GFP_NOIO which has to be inherited for all allocation requests
5333 	 * from a particular context which has been marked by
5334 	 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5335 	 * movable zones are not used during allocation.
5336 	 */
5337 	gfp = current_gfp_context(gfp);
5338 	alloc_gfp = gfp;
5339 	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5340 			&alloc_gfp, &alloc_flags))
5341 		return NULL;
5342 
5343 	/*
5344 	 * Forbid the first pass from falling back to types that fragment
5345 	 * memory until all local zones are considered.
5346 	 */
5347 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
5348 
5349 	/* First allocation attempt */
5350 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5351 	if (likely(page))
5352 		goto out;
5353 
5354 	alloc_gfp = gfp;
5355 	ac.spread_dirty_pages = false;
5356 
5357 	/*
5358 	 * Restore the original nodemask if it was potentially replaced with
5359 	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5360 	 */
5361 	ac.nodemask = nodemask;
5362 
5363 	page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5364 
5365 out:
5366 	if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
5367 	    unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5368 		__free_pages(page, order);
5369 		page = NULL;
5370 	}
5371 
5372 	trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5373 
5374 	return page;
5375 }
5376 EXPORT_SYMBOL(__alloc_pages);
5377 
5378 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
5379 		nodemask_t *nodemask)
5380 {
5381 	struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
5382 			preferred_nid, nodemask);
5383 
5384 	if (page && order > 1)
5385 		prep_transhuge_page(page);
5386 	return (struct folio *)page;
5387 }
5388 EXPORT_SYMBOL(__folio_alloc);
5389 
5390 /*
5391  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5392  * address cannot represent highmem pages. Use alloc_pages and then kmap if
5393  * you need to access high mem.
5394  */
5395 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5396 {
5397 	struct page *page;
5398 
5399 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5400 	if (!page)
5401 		return 0;
5402 	return (unsigned long) page_address(page);
5403 }
5404 EXPORT_SYMBOL(__get_free_pages);
5405 
5406 unsigned long get_zeroed_page(gfp_t gfp_mask)
5407 {
5408 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5409 }
5410 EXPORT_SYMBOL(get_zeroed_page);
5411 
5412 /**
5413  * __free_pages - Free pages allocated with alloc_pages().
5414  * @page: The page pointer returned from alloc_pages().
5415  * @order: The order of the allocation.
5416  *
5417  * This function can free multi-page allocations that are not compound
5418  * pages.  It does not check that the @order passed in matches that of
5419  * the allocation, so it is easy to leak memory.  Freeing more memory
5420  * than was allocated will probably emit a warning.
5421  *
5422  * If the last reference to this page is speculative, it will be released
5423  * by put_page() which only frees the first page of a non-compound
5424  * allocation.  To prevent the remaining pages from being leaked, we free
5425  * the subsequent pages here.  If you want to use the page's reference
5426  * count to decide when to free the allocation, you should allocate a
5427  * compound page, and use put_page() instead of __free_pages().
5428  *
5429  * Context: May be called in interrupt context or while holding a normal
5430  * spinlock, but not in NMI context or while holding a raw spinlock.
5431  */
5432 void __free_pages(struct page *page, unsigned int order)
5433 {
5434 	if (put_page_testzero(page))
5435 		free_the_page(page, order);
5436 	else if (!PageHead(page))
5437 		while (order-- > 0)
5438 			free_the_page(page + (1 << order), order);
5439 }
5440 EXPORT_SYMBOL(__free_pages);
5441 
5442 void free_pages(unsigned long addr, unsigned int order)
5443 {
5444 	if (addr != 0) {
5445 		VM_BUG_ON(!virt_addr_valid((void *)addr));
5446 		__free_pages(virt_to_page((void *)addr), order);
5447 	}
5448 }
5449 
5450 EXPORT_SYMBOL(free_pages);
5451 
5452 /*
5453  * Page Fragment:
5454  *  An arbitrary-length arbitrary-offset area of memory which resides
5455  *  within a 0 or higher order page.  Multiple fragments within that page
5456  *  are individually refcounted, in the page's reference counter.
5457  *
5458  * The page_frag functions below provide a simple allocation framework for
5459  * page fragments.  This is used by the network stack and network device
5460  * drivers to provide a backing region of memory for use as either an
5461  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5462  */
5463 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5464 					     gfp_t gfp_mask)
5465 {
5466 	struct page *page = NULL;
5467 	gfp_t gfp = gfp_mask;
5468 
5469 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5470 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5471 		    __GFP_NOMEMALLOC;
5472 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5473 				PAGE_FRAG_CACHE_MAX_ORDER);
5474 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5475 #endif
5476 	if (unlikely(!page))
5477 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5478 
5479 	nc->va = page ? page_address(page) : NULL;
5480 
5481 	return page;
5482 }
5483 
5484 void __page_frag_cache_drain(struct page *page, unsigned int count)
5485 {
5486 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5487 
5488 	if (page_ref_sub_and_test(page, count))
5489 		free_the_page(page, compound_order(page));
5490 }
5491 EXPORT_SYMBOL(__page_frag_cache_drain);
5492 
5493 void *page_frag_alloc_align(struct page_frag_cache *nc,
5494 		      unsigned int fragsz, gfp_t gfp_mask,
5495 		      unsigned int align_mask)
5496 {
5497 	unsigned int size = PAGE_SIZE;
5498 	struct page *page;
5499 	int offset;
5500 
5501 	if (unlikely(!nc->va)) {
5502 refill:
5503 		page = __page_frag_cache_refill(nc, gfp_mask);
5504 		if (!page)
5505 			return NULL;
5506 
5507 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5508 		/* if size can vary use size else just use PAGE_SIZE */
5509 		size = nc->size;
5510 #endif
5511 		/* Even if we own the page, we do not use atomic_set().
5512 		 * This would break get_page_unless_zero() users.
5513 		 */
5514 		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5515 
5516 		/* reset page count bias and offset to start of new frag */
5517 		nc->pfmemalloc = page_is_pfmemalloc(page);
5518 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5519 		nc->offset = size;
5520 	}
5521 
5522 	offset = nc->offset - fragsz;
5523 	if (unlikely(offset < 0)) {
5524 		page = virt_to_page(nc->va);
5525 
5526 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5527 			goto refill;
5528 
5529 		if (unlikely(nc->pfmemalloc)) {
5530 			free_the_page(page, compound_order(page));
5531 			goto refill;
5532 		}
5533 
5534 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5535 		/* if size can vary use size else just use PAGE_SIZE */
5536 		size = nc->size;
5537 #endif
5538 		/* OK, page count is 0, we can safely set it */
5539 		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5540 
5541 		/* reset page count bias and offset to start of new frag */
5542 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5543 		offset = size - fragsz;
5544 	}
5545 
5546 	nc->pagecnt_bias--;
5547 	offset &= align_mask;
5548 	nc->offset = offset;
5549 
5550 	return nc->va + offset;
5551 }
5552 EXPORT_SYMBOL(page_frag_alloc_align);
5553 
5554 /*
5555  * Frees a page fragment allocated out of either a compound or order 0 page.
5556  */
5557 void page_frag_free(void *addr)
5558 {
5559 	struct page *page = virt_to_head_page(addr);
5560 
5561 	if (unlikely(put_page_testzero(page)))
5562 		free_the_page(page, compound_order(page));
5563 }
5564 EXPORT_SYMBOL(page_frag_free);
5565 
5566 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5567 		size_t size)
5568 {
5569 	if (addr) {
5570 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
5571 		unsigned long used = addr + PAGE_ALIGN(size);
5572 
5573 		split_page(virt_to_page((void *)addr), order);
5574 		while (used < alloc_end) {
5575 			free_page(used);
5576 			used += PAGE_SIZE;
5577 		}
5578 	}
5579 	return (void *)addr;
5580 }
5581 
5582 /**
5583  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5584  * @size: the number of bytes to allocate
5585  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5586  *
5587  * This function is similar to alloc_pages(), except that it allocates the
5588  * minimum number of pages to satisfy the request.  alloc_pages() can only
5589  * allocate memory in power-of-two pages.
5590  *
5591  * This function is also limited by MAX_ORDER.
5592  *
5593  * Memory allocated by this function must be released by free_pages_exact().
5594  *
5595  * Return: pointer to the allocated area or %NULL in case of error.
5596  */
5597 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5598 {
5599 	unsigned int order = get_order(size);
5600 	unsigned long addr;
5601 
5602 	if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5603 		gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5604 
5605 	addr = __get_free_pages(gfp_mask, order);
5606 	return make_alloc_exact(addr, order, size);
5607 }
5608 EXPORT_SYMBOL(alloc_pages_exact);
5609 
5610 /**
5611  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5612  *			   pages on a node.
5613  * @nid: the preferred node ID where memory should be allocated
5614  * @size: the number of bytes to allocate
5615  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5616  *
5617  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5618  * back.
5619  *
5620  * Return: pointer to the allocated area or %NULL in case of error.
5621  */
5622 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5623 {
5624 	unsigned int order = get_order(size);
5625 	struct page *p;
5626 
5627 	if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5628 		gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5629 
5630 	p = alloc_pages_node(nid, gfp_mask, order);
5631 	if (!p)
5632 		return NULL;
5633 	return make_alloc_exact((unsigned long)page_address(p), order, size);
5634 }
5635 
5636 /**
5637  * free_pages_exact - release memory allocated via alloc_pages_exact()
5638  * @virt: the value returned by alloc_pages_exact.
5639  * @size: size of allocation, same value as passed to alloc_pages_exact().
5640  *
5641  * Release the memory allocated by a previous call to alloc_pages_exact.
5642  */
5643 void free_pages_exact(void *virt, size_t size)
5644 {
5645 	unsigned long addr = (unsigned long)virt;
5646 	unsigned long end = addr + PAGE_ALIGN(size);
5647 
5648 	while (addr < end) {
5649 		free_page(addr);
5650 		addr += PAGE_SIZE;
5651 	}
5652 }
5653 EXPORT_SYMBOL(free_pages_exact);
5654 
5655 /**
5656  * nr_free_zone_pages - count number of pages beyond high watermark
5657  * @offset: The zone index of the highest zone
5658  *
5659  * nr_free_zone_pages() counts the number of pages which are beyond the
5660  * high watermark within all zones at or below a given zone index.  For each
5661  * zone, the number of pages is calculated as:
5662  *
5663  *     nr_free_zone_pages = managed_pages - high_pages
5664  *
5665  * Return: number of pages beyond high watermark.
5666  */
5667 static unsigned long nr_free_zone_pages(int offset)
5668 {
5669 	struct zoneref *z;
5670 	struct zone *zone;
5671 
5672 	/* Just pick one node, since fallback list is circular */
5673 	unsigned long sum = 0;
5674 
5675 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5676 
5677 	for_each_zone_zonelist(zone, z, zonelist, offset) {
5678 		unsigned long size = zone_managed_pages(zone);
5679 		unsigned long high = high_wmark_pages(zone);
5680 		if (size > high)
5681 			sum += size - high;
5682 	}
5683 
5684 	return sum;
5685 }
5686 
5687 /**
5688  * nr_free_buffer_pages - count number of pages beyond high watermark
5689  *
5690  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5691  * watermark within ZONE_DMA and ZONE_NORMAL.
5692  *
5693  * Return: number of pages beyond high watermark within ZONE_DMA and
5694  * ZONE_NORMAL.
5695  */
5696 unsigned long nr_free_buffer_pages(void)
5697 {
5698 	return nr_free_zone_pages(gfp_zone(GFP_USER));
5699 }
5700 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5701 
5702 static inline void show_node(struct zone *zone)
5703 {
5704 	if (IS_ENABLED(CONFIG_NUMA))
5705 		printk("Node %d ", zone_to_nid(zone));
5706 }
5707 
5708 long si_mem_available(void)
5709 {
5710 	long available;
5711 	unsigned long pagecache;
5712 	unsigned long wmark_low = 0;
5713 	unsigned long pages[NR_LRU_LISTS];
5714 	unsigned long reclaimable;
5715 	struct zone *zone;
5716 	int lru;
5717 
5718 	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5719 		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5720 
5721 	for_each_zone(zone)
5722 		wmark_low += low_wmark_pages(zone);
5723 
5724 	/*
5725 	 * Estimate the amount of memory available for userspace allocations,
5726 	 * without causing swapping.
5727 	 */
5728 	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5729 
5730 	/*
5731 	 * Not all the page cache can be freed, otherwise the system will
5732 	 * start swapping. Assume at least half of the page cache, or the
5733 	 * low watermark worth of cache, needs to stay.
5734 	 */
5735 	pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5736 	pagecache -= min(pagecache / 2, wmark_low);
5737 	available += pagecache;
5738 
5739 	/*
5740 	 * Part of the reclaimable slab and other kernel memory consists of
5741 	 * items that are in use, and cannot be freed. Cap this estimate at the
5742 	 * low watermark.
5743 	 */
5744 	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5745 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5746 	available += reclaimable - min(reclaimable / 2, wmark_low);
5747 
5748 	if (available < 0)
5749 		available = 0;
5750 	return available;
5751 }
5752 EXPORT_SYMBOL_GPL(si_mem_available);
5753 
5754 void si_meminfo(struct sysinfo *val)
5755 {
5756 	val->totalram = totalram_pages();
5757 	val->sharedram = global_node_page_state(NR_SHMEM);
5758 	val->freeram = global_zone_page_state(NR_FREE_PAGES);
5759 	val->bufferram = nr_blockdev_pages();
5760 	val->totalhigh = totalhigh_pages();
5761 	val->freehigh = nr_free_highpages();
5762 	val->mem_unit = PAGE_SIZE;
5763 }
5764 
5765 EXPORT_SYMBOL(si_meminfo);
5766 
5767 #ifdef CONFIG_NUMA
5768 void si_meminfo_node(struct sysinfo *val, int nid)
5769 {
5770 	int zone_type;		/* needs to be signed */
5771 	unsigned long managed_pages = 0;
5772 	unsigned long managed_highpages = 0;
5773 	unsigned long free_highpages = 0;
5774 	pg_data_t *pgdat = NODE_DATA(nid);
5775 
5776 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5777 		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5778 	val->totalram = managed_pages;
5779 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
5780 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5781 #ifdef CONFIG_HIGHMEM
5782 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5783 		struct zone *zone = &pgdat->node_zones[zone_type];
5784 
5785 		if (is_highmem(zone)) {
5786 			managed_highpages += zone_managed_pages(zone);
5787 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5788 		}
5789 	}
5790 	val->totalhigh = managed_highpages;
5791 	val->freehigh = free_highpages;
5792 #else
5793 	val->totalhigh = managed_highpages;
5794 	val->freehigh = free_highpages;
5795 #endif
5796 	val->mem_unit = PAGE_SIZE;
5797 }
5798 #endif
5799 
5800 /*
5801  * Determine whether the node should be displayed or not, depending on whether
5802  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5803  */
5804 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5805 {
5806 	if (!(flags & SHOW_MEM_FILTER_NODES))
5807 		return false;
5808 
5809 	/*
5810 	 * no node mask - aka implicit memory numa policy. Do not bother with
5811 	 * the synchronization - read_mems_allowed_begin - because we do not
5812 	 * have to be precise here.
5813 	 */
5814 	if (!nodemask)
5815 		nodemask = &cpuset_current_mems_allowed;
5816 
5817 	return !node_isset(nid, *nodemask);
5818 }
5819 
5820 #define K(x) ((x) << (PAGE_SHIFT-10))
5821 
5822 static void show_migration_types(unsigned char type)
5823 {
5824 	static const char types[MIGRATE_TYPES] = {
5825 		[MIGRATE_UNMOVABLE]	= 'U',
5826 		[MIGRATE_MOVABLE]	= 'M',
5827 		[MIGRATE_RECLAIMABLE]	= 'E',
5828 		[MIGRATE_HIGHATOMIC]	= 'H',
5829 #ifdef CONFIG_CMA
5830 		[MIGRATE_CMA]		= 'C',
5831 #endif
5832 #ifdef CONFIG_MEMORY_ISOLATION
5833 		[MIGRATE_ISOLATE]	= 'I',
5834 #endif
5835 	};
5836 	char tmp[MIGRATE_TYPES + 1];
5837 	char *p = tmp;
5838 	int i;
5839 
5840 	for (i = 0; i < MIGRATE_TYPES; i++) {
5841 		if (type & (1 << i))
5842 			*p++ = types[i];
5843 	}
5844 
5845 	*p = '\0';
5846 	printk(KERN_CONT "(%s) ", tmp);
5847 }
5848 
5849 /*
5850  * Show free area list (used inside shift_scroll-lock stuff)
5851  * We also calculate the percentage fragmentation. We do this by counting the
5852  * memory on each free list with the exception of the first item on the list.
5853  *
5854  * Bits in @filter:
5855  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5856  *   cpuset.
5857  */
5858 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5859 {
5860 	unsigned long free_pcp = 0;
5861 	int cpu;
5862 	struct zone *zone;
5863 	pg_data_t *pgdat;
5864 
5865 	for_each_populated_zone(zone) {
5866 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5867 			continue;
5868 
5869 		for_each_online_cpu(cpu)
5870 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
5871 	}
5872 
5873 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5874 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5875 		" unevictable:%lu dirty:%lu writeback:%lu\n"
5876 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5877 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5878 		" kernel_misc_reclaimable:%lu\n"
5879 		" free:%lu free_pcp:%lu free_cma:%lu\n",
5880 		global_node_page_state(NR_ACTIVE_ANON),
5881 		global_node_page_state(NR_INACTIVE_ANON),
5882 		global_node_page_state(NR_ISOLATED_ANON),
5883 		global_node_page_state(NR_ACTIVE_FILE),
5884 		global_node_page_state(NR_INACTIVE_FILE),
5885 		global_node_page_state(NR_ISOLATED_FILE),
5886 		global_node_page_state(NR_UNEVICTABLE),
5887 		global_node_page_state(NR_FILE_DIRTY),
5888 		global_node_page_state(NR_WRITEBACK),
5889 		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
5890 		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
5891 		global_node_page_state(NR_FILE_MAPPED),
5892 		global_node_page_state(NR_SHMEM),
5893 		global_node_page_state(NR_PAGETABLE),
5894 		global_zone_page_state(NR_BOUNCE),
5895 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
5896 		global_zone_page_state(NR_FREE_PAGES),
5897 		free_pcp,
5898 		global_zone_page_state(NR_FREE_CMA_PAGES));
5899 
5900 	for_each_online_pgdat(pgdat) {
5901 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5902 			continue;
5903 
5904 		printk("Node %d"
5905 			" active_anon:%lukB"
5906 			" inactive_anon:%lukB"
5907 			" active_file:%lukB"
5908 			" inactive_file:%lukB"
5909 			" unevictable:%lukB"
5910 			" isolated(anon):%lukB"
5911 			" isolated(file):%lukB"
5912 			" mapped:%lukB"
5913 			" dirty:%lukB"
5914 			" writeback:%lukB"
5915 			" shmem:%lukB"
5916 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5917 			" shmem_thp: %lukB"
5918 			" shmem_pmdmapped: %lukB"
5919 			" anon_thp: %lukB"
5920 #endif
5921 			" writeback_tmp:%lukB"
5922 			" kernel_stack:%lukB"
5923 #ifdef CONFIG_SHADOW_CALL_STACK
5924 			" shadow_call_stack:%lukB"
5925 #endif
5926 			" pagetables:%lukB"
5927 			" all_unreclaimable? %s"
5928 			"\n",
5929 			pgdat->node_id,
5930 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5931 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5932 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5933 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5934 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
5935 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5936 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5937 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
5938 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
5939 			K(node_page_state(pgdat, NR_WRITEBACK)),
5940 			K(node_page_state(pgdat, NR_SHMEM)),
5941 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5942 			K(node_page_state(pgdat, NR_SHMEM_THPS)),
5943 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
5944 			K(node_page_state(pgdat, NR_ANON_THPS)),
5945 #endif
5946 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5947 			node_page_state(pgdat, NR_KERNEL_STACK_KB),
5948 #ifdef CONFIG_SHADOW_CALL_STACK
5949 			node_page_state(pgdat, NR_KERNEL_SCS_KB),
5950 #endif
5951 			K(node_page_state(pgdat, NR_PAGETABLE)),
5952 			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5953 				"yes" : "no");
5954 	}
5955 
5956 	for_each_populated_zone(zone) {
5957 		int i;
5958 
5959 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5960 			continue;
5961 
5962 		free_pcp = 0;
5963 		for_each_online_cpu(cpu)
5964 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
5965 
5966 		show_node(zone);
5967 		printk(KERN_CONT
5968 			"%s"
5969 			" free:%lukB"
5970 			" boost:%lukB"
5971 			" min:%lukB"
5972 			" low:%lukB"
5973 			" high:%lukB"
5974 			" reserved_highatomic:%luKB"
5975 			" active_anon:%lukB"
5976 			" inactive_anon:%lukB"
5977 			" active_file:%lukB"
5978 			" inactive_file:%lukB"
5979 			" unevictable:%lukB"
5980 			" writepending:%lukB"
5981 			" present:%lukB"
5982 			" managed:%lukB"
5983 			" mlocked:%lukB"
5984 			" bounce:%lukB"
5985 			" free_pcp:%lukB"
5986 			" local_pcp:%ukB"
5987 			" free_cma:%lukB"
5988 			"\n",
5989 			zone->name,
5990 			K(zone_page_state(zone, NR_FREE_PAGES)),
5991 			K(zone->watermark_boost),
5992 			K(min_wmark_pages(zone)),
5993 			K(low_wmark_pages(zone)),
5994 			K(high_wmark_pages(zone)),
5995 			K(zone->nr_reserved_highatomic),
5996 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
5997 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
5998 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
5999 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
6000 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
6001 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
6002 			K(zone->present_pages),
6003 			K(zone_managed_pages(zone)),
6004 			K(zone_page_state(zone, NR_MLOCK)),
6005 			K(zone_page_state(zone, NR_BOUNCE)),
6006 			K(free_pcp),
6007 			K(this_cpu_read(zone->per_cpu_pageset->count)),
6008 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
6009 		printk("lowmem_reserve[]:");
6010 		for (i = 0; i < MAX_NR_ZONES; i++)
6011 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
6012 		printk(KERN_CONT "\n");
6013 	}
6014 
6015 	for_each_populated_zone(zone) {
6016 		unsigned int order;
6017 		unsigned long nr[MAX_ORDER], flags, total = 0;
6018 		unsigned char types[MAX_ORDER];
6019 
6020 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6021 			continue;
6022 		show_node(zone);
6023 		printk(KERN_CONT "%s: ", zone->name);
6024 
6025 		spin_lock_irqsave(&zone->lock, flags);
6026 		for (order = 0; order < MAX_ORDER; order++) {
6027 			struct free_area *area = &zone->free_area[order];
6028 			int type;
6029 
6030 			nr[order] = area->nr_free;
6031 			total += nr[order] << order;
6032 
6033 			types[order] = 0;
6034 			for (type = 0; type < MIGRATE_TYPES; type++) {
6035 				if (!free_area_empty(area, type))
6036 					types[order] |= 1 << type;
6037 			}
6038 		}
6039 		spin_unlock_irqrestore(&zone->lock, flags);
6040 		for (order = 0; order < MAX_ORDER; order++) {
6041 			printk(KERN_CONT "%lu*%lukB ",
6042 			       nr[order], K(1UL) << order);
6043 			if (nr[order])
6044 				show_migration_types(types[order]);
6045 		}
6046 		printk(KERN_CONT "= %lukB\n", K(total));
6047 	}
6048 
6049 	hugetlb_show_meminfo();
6050 
6051 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
6052 
6053 	show_swap_cache_info();
6054 }
6055 
6056 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
6057 {
6058 	zoneref->zone = zone;
6059 	zoneref->zone_idx = zone_idx(zone);
6060 }
6061 
6062 /*
6063  * Builds allocation fallback zone lists.
6064  *
6065  * Add all populated zones of a node to the zonelist.
6066  */
6067 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
6068 {
6069 	struct zone *zone;
6070 	enum zone_type zone_type = MAX_NR_ZONES;
6071 	int nr_zones = 0;
6072 
6073 	do {
6074 		zone_type--;
6075 		zone = pgdat->node_zones + zone_type;
6076 		if (managed_zone(zone)) {
6077 			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
6078 			check_highest_zone(zone_type);
6079 		}
6080 	} while (zone_type);
6081 
6082 	return nr_zones;
6083 }
6084 
6085 #ifdef CONFIG_NUMA
6086 
6087 static int __parse_numa_zonelist_order(char *s)
6088 {
6089 	/*
6090 	 * We used to support different zonelists modes but they turned
6091 	 * out to be just not useful. Let's keep the warning in place
6092 	 * if somebody still use the cmd line parameter so that we do
6093 	 * not fail it silently
6094 	 */
6095 	if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
6096 		pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
6097 		return -EINVAL;
6098 	}
6099 	return 0;
6100 }
6101 
6102 char numa_zonelist_order[] = "Node";
6103 
6104 /*
6105  * sysctl handler for numa_zonelist_order
6106  */
6107 int numa_zonelist_order_handler(struct ctl_table *table, int write,
6108 		void *buffer, size_t *length, loff_t *ppos)
6109 {
6110 	if (write)
6111 		return __parse_numa_zonelist_order(buffer);
6112 	return proc_dostring(table, write, buffer, length, ppos);
6113 }
6114 
6115 
6116 #define MAX_NODE_LOAD (nr_online_nodes)
6117 static int node_load[MAX_NUMNODES];
6118 
6119 /**
6120  * find_next_best_node - find the next node that should appear in a given node's fallback list
6121  * @node: node whose fallback list we're appending
6122  * @used_node_mask: nodemask_t of already used nodes
6123  *
6124  * We use a number of factors to determine which is the next node that should
6125  * appear on a given node's fallback list.  The node should not have appeared
6126  * already in @node's fallback list, and it should be the next closest node
6127  * according to the distance array (which contains arbitrary distance values
6128  * from each node to each node in the system), and should also prefer nodes
6129  * with no CPUs, since presumably they'll have very little allocation pressure
6130  * on them otherwise.
6131  *
6132  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
6133  */
6134 int find_next_best_node(int node, nodemask_t *used_node_mask)
6135 {
6136 	int n, val;
6137 	int min_val = INT_MAX;
6138 	int best_node = NUMA_NO_NODE;
6139 
6140 	/* Use the local node if we haven't already */
6141 	if (!node_isset(node, *used_node_mask)) {
6142 		node_set(node, *used_node_mask);
6143 		return node;
6144 	}
6145 
6146 	for_each_node_state(n, N_MEMORY) {
6147 
6148 		/* Don't want a node to appear more than once */
6149 		if (node_isset(n, *used_node_mask))
6150 			continue;
6151 
6152 		/* Use the distance array to find the distance */
6153 		val = node_distance(node, n);
6154 
6155 		/* Penalize nodes under us ("prefer the next node") */
6156 		val += (n < node);
6157 
6158 		/* Give preference to headless and unused nodes */
6159 		if (!cpumask_empty(cpumask_of_node(n)))
6160 			val += PENALTY_FOR_NODE_WITH_CPUS;
6161 
6162 		/* Slight preference for less loaded node */
6163 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
6164 		val += node_load[n];
6165 
6166 		if (val < min_val) {
6167 			min_val = val;
6168 			best_node = n;
6169 		}
6170 	}
6171 
6172 	if (best_node >= 0)
6173 		node_set(best_node, *used_node_mask);
6174 
6175 	return best_node;
6176 }
6177 
6178 
6179 /*
6180  * Build zonelists ordered by node and zones within node.
6181  * This results in maximum locality--normal zone overflows into local
6182  * DMA zone, if any--but risks exhausting DMA zone.
6183  */
6184 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
6185 		unsigned nr_nodes)
6186 {
6187 	struct zoneref *zonerefs;
6188 	int i;
6189 
6190 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6191 
6192 	for (i = 0; i < nr_nodes; i++) {
6193 		int nr_zones;
6194 
6195 		pg_data_t *node = NODE_DATA(node_order[i]);
6196 
6197 		nr_zones = build_zonerefs_node(node, zonerefs);
6198 		zonerefs += nr_zones;
6199 	}
6200 	zonerefs->zone = NULL;
6201 	zonerefs->zone_idx = 0;
6202 }
6203 
6204 /*
6205  * Build gfp_thisnode zonelists
6206  */
6207 static void build_thisnode_zonelists(pg_data_t *pgdat)
6208 {
6209 	struct zoneref *zonerefs;
6210 	int nr_zones;
6211 
6212 	zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
6213 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
6214 	zonerefs += nr_zones;
6215 	zonerefs->zone = NULL;
6216 	zonerefs->zone_idx = 0;
6217 }
6218 
6219 /*
6220  * Build zonelists ordered by zone and nodes within zones.
6221  * This results in conserving DMA zone[s] until all Normal memory is
6222  * exhausted, but results in overflowing to remote node while memory
6223  * may still exist in local DMA zone.
6224  */
6225 
6226 static void build_zonelists(pg_data_t *pgdat)
6227 {
6228 	static int node_order[MAX_NUMNODES];
6229 	int node, load, nr_nodes = 0;
6230 	nodemask_t used_mask = NODE_MASK_NONE;
6231 	int local_node, prev_node;
6232 
6233 	/* NUMA-aware ordering of nodes */
6234 	local_node = pgdat->node_id;
6235 	load = nr_online_nodes;
6236 	prev_node = local_node;
6237 
6238 	memset(node_order, 0, sizeof(node_order));
6239 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
6240 		/*
6241 		 * We don't want to pressure a particular node.
6242 		 * So adding penalty to the first node in same
6243 		 * distance group to make it round-robin.
6244 		 */
6245 		if (node_distance(local_node, node) !=
6246 		    node_distance(local_node, prev_node))
6247 			node_load[node] += load;
6248 
6249 		node_order[nr_nodes++] = node;
6250 		prev_node = node;
6251 		load--;
6252 	}
6253 
6254 	build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
6255 	build_thisnode_zonelists(pgdat);
6256 	pr_info("Fallback order for Node %d: ", local_node);
6257 	for (node = 0; node < nr_nodes; node++)
6258 		pr_cont("%d ", node_order[node]);
6259 	pr_cont("\n");
6260 }
6261 
6262 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6263 /*
6264  * Return node id of node used for "local" allocations.
6265  * I.e., first node id of first zone in arg node's generic zonelist.
6266  * Used for initializing percpu 'numa_mem', which is used primarily
6267  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6268  */
6269 int local_memory_node(int node)
6270 {
6271 	struct zoneref *z;
6272 
6273 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
6274 				   gfp_zone(GFP_KERNEL),
6275 				   NULL);
6276 	return zone_to_nid(z->zone);
6277 }
6278 #endif
6279 
6280 static void setup_min_unmapped_ratio(void);
6281 static void setup_min_slab_ratio(void);
6282 #else	/* CONFIG_NUMA */
6283 
6284 static void build_zonelists(pg_data_t *pgdat)
6285 {
6286 	int node, local_node;
6287 	struct zoneref *zonerefs;
6288 	int nr_zones;
6289 
6290 	local_node = pgdat->node_id;
6291 
6292 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6293 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
6294 	zonerefs += nr_zones;
6295 
6296 	/*
6297 	 * Now we build the zonelist so that it contains the zones
6298 	 * of all the other nodes.
6299 	 * We don't want to pressure a particular node, so when
6300 	 * building the zones for node N, we make sure that the
6301 	 * zones coming right after the local ones are those from
6302 	 * node N+1 (modulo N)
6303 	 */
6304 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6305 		if (!node_online(node))
6306 			continue;
6307 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6308 		zonerefs += nr_zones;
6309 	}
6310 	for (node = 0; node < local_node; node++) {
6311 		if (!node_online(node))
6312 			continue;
6313 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6314 		zonerefs += nr_zones;
6315 	}
6316 
6317 	zonerefs->zone = NULL;
6318 	zonerefs->zone_idx = 0;
6319 }
6320 
6321 #endif	/* CONFIG_NUMA */
6322 
6323 /*
6324  * Boot pageset table. One per cpu which is going to be used for all
6325  * zones and all nodes. The parameters will be set in such a way
6326  * that an item put on a list will immediately be handed over to
6327  * the buddy list. This is safe since pageset manipulation is done
6328  * with interrupts disabled.
6329  *
6330  * The boot_pagesets must be kept even after bootup is complete for
6331  * unused processors and/or zones. They do play a role for bootstrapping
6332  * hotplugged processors.
6333  *
6334  * zoneinfo_show() and maybe other functions do
6335  * not check if the processor is online before following the pageset pointer.
6336  * Other parts of the kernel may not check if the zone is available.
6337  */
6338 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
6339 /* These effectively disable the pcplists in the boot pageset completely */
6340 #define BOOT_PAGESET_HIGH	0
6341 #define BOOT_PAGESET_BATCH	1
6342 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
6343 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
6344 DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
6345 
6346 static void __build_all_zonelists(void *data)
6347 {
6348 	int nid;
6349 	int __maybe_unused cpu;
6350 	pg_data_t *self = data;
6351 	static DEFINE_SPINLOCK(lock);
6352 
6353 	spin_lock(&lock);
6354 
6355 #ifdef CONFIG_NUMA
6356 	memset(node_load, 0, sizeof(node_load));
6357 #endif
6358 
6359 	/*
6360 	 * This node is hotadded and no memory is yet present.   So just
6361 	 * building zonelists is fine - no need to touch other nodes.
6362 	 */
6363 	if (self && !node_online(self->node_id)) {
6364 		build_zonelists(self);
6365 	} else {
6366 		/*
6367 		 * All possible nodes have pgdat preallocated
6368 		 * in free_area_init
6369 		 */
6370 		for_each_node(nid) {
6371 			pg_data_t *pgdat = NODE_DATA(nid);
6372 
6373 			build_zonelists(pgdat);
6374 		}
6375 
6376 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6377 		/*
6378 		 * We now know the "local memory node" for each node--
6379 		 * i.e., the node of the first zone in the generic zonelist.
6380 		 * Set up numa_mem percpu variable for on-line cpus.  During
6381 		 * boot, only the boot cpu should be on-line;  we'll init the
6382 		 * secondary cpus' numa_mem as they come on-line.  During
6383 		 * node/memory hotplug, we'll fixup all on-line cpus.
6384 		 */
6385 		for_each_online_cpu(cpu)
6386 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6387 #endif
6388 	}
6389 
6390 	spin_unlock(&lock);
6391 }
6392 
6393 static noinline void __init
6394 build_all_zonelists_init(void)
6395 {
6396 	int cpu;
6397 
6398 	__build_all_zonelists(NULL);
6399 
6400 	/*
6401 	 * Initialize the boot_pagesets that are going to be used
6402 	 * for bootstrapping processors. The real pagesets for
6403 	 * each zone will be allocated later when the per cpu
6404 	 * allocator is available.
6405 	 *
6406 	 * boot_pagesets are used also for bootstrapping offline
6407 	 * cpus if the system is already booted because the pagesets
6408 	 * are needed to initialize allocators on a specific cpu too.
6409 	 * F.e. the percpu allocator needs the page allocator which
6410 	 * needs the percpu allocator in order to allocate its pagesets
6411 	 * (a chicken-egg dilemma).
6412 	 */
6413 	for_each_possible_cpu(cpu)
6414 		per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
6415 
6416 	mminit_verify_zonelist();
6417 	cpuset_init_current_mems_allowed();
6418 }
6419 
6420 /*
6421  * unless system_state == SYSTEM_BOOTING.
6422  *
6423  * __ref due to call of __init annotated helper build_all_zonelists_init
6424  * [protected by SYSTEM_BOOTING].
6425  */
6426 void __ref build_all_zonelists(pg_data_t *pgdat)
6427 {
6428 	unsigned long vm_total_pages;
6429 
6430 	if (system_state == SYSTEM_BOOTING) {
6431 		build_all_zonelists_init();
6432 	} else {
6433 		__build_all_zonelists(pgdat);
6434 		/* cpuset refresh routine should be here */
6435 	}
6436 	/* Get the number of free pages beyond high watermark in all zones. */
6437 	vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6438 	/*
6439 	 * Disable grouping by mobility if the number of pages in the
6440 	 * system is too low to allow the mechanism to work. It would be
6441 	 * more accurate, but expensive to check per-zone. This check is
6442 	 * made on memory-hotadd so a system can start with mobility
6443 	 * disabled and enable it later
6444 	 */
6445 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6446 		page_group_by_mobility_disabled = 1;
6447 	else
6448 		page_group_by_mobility_disabled = 0;
6449 
6450 	pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
6451 		nr_online_nodes,
6452 		page_group_by_mobility_disabled ? "off" : "on",
6453 		vm_total_pages);
6454 #ifdef CONFIG_NUMA
6455 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6456 #endif
6457 }
6458 
6459 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6460 static bool __meminit
6461 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6462 {
6463 	static struct memblock_region *r;
6464 
6465 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6466 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6467 			for_each_mem_region(r) {
6468 				if (*pfn < memblock_region_memory_end_pfn(r))
6469 					break;
6470 			}
6471 		}
6472 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
6473 		    memblock_is_mirror(r)) {
6474 			*pfn = memblock_region_memory_end_pfn(r);
6475 			return true;
6476 		}
6477 	}
6478 	return false;
6479 }
6480 
6481 /*
6482  * Initially all pages are reserved - free ones are freed
6483  * up by memblock_free_all() once the early boot process is
6484  * done. Non-atomic initialization, single-pass.
6485  *
6486  * All aligned pageblocks are initialized to the specified migratetype
6487  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6488  * zone stats (e.g., nr_isolate_pageblock) are touched.
6489  */
6490 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
6491 		unsigned long start_pfn, unsigned long zone_end_pfn,
6492 		enum meminit_context context,
6493 		struct vmem_altmap *altmap, int migratetype)
6494 {
6495 	unsigned long pfn, end_pfn = start_pfn + size;
6496 	struct page *page;
6497 
6498 	if (highest_memmap_pfn < end_pfn - 1)
6499 		highest_memmap_pfn = end_pfn - 1;
6500 
6501 #ifdef CONFIG_ZONE_DEVICE
6502 	/*
6503 	 * Honor reservation requested by the driver for this ZONE_DEVICE
6504 	 * memory. We limit the total number of pages to initialize to just
6505 	 * those that might contain the memory mapping. We will defer the
6506 	 * ZONE_DEVICE page initialization until after we have released
6507 	 * the hotplug lock.
6508 	 */
6509 	if (zone == ZONE_DEVICE) {
6510 		if (!altmap)
6511 			return;
6512 
6513 		if (start_pfn == altmap->base_pfn)
6514 			start_pfn += altmap->reserve;
6515 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6516 	}
6517 #endif
6518 
6519 	for (pfn = start_pfn; pfn < end_pfn; ) {
6520 		/*
6521 		 * There can be holes in boot-time mem_map[]s handed to this
6522 		 * function.  They do not exist on hotplugged memory.
6523 		 */
6524 		if (context == MEMINIT_EARLY) {
6525 			if (overlap_memmap_init(zone, &pfn))
6526 				continue;
6527 			if (defer_init(nid, pfn, zone_end_pfn))
6528 				break;
6529 		}
6530 
6531 		page = pfn_to_page(pfn);
6532 		__init_single_page(page, pfn, zone, nid);
6533 		if (context == MEMINIT_HOTPLUG)
6534 			__SetPageReserved(page);
6535 
6536 		/*
6537 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6538 		 * such that unmovable allocations won't be scattered all
6539 		 * over the place during system boot.
6540 		 */
6541 		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6542 			set_pageblock_migratetype(page, migratetype);
6543 			cond_resched();
6544 		}
6545 		pfn++;
6546 	}
6547 }
6548 
6549 #ifdef CONFIG_ZONE_DEVICE
6550 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
6551 					  unsigned long zone_idx, int nid,
6552 					  struct dev_pagemap *pgmap)
6553 {
6554 
6555 	__init_single_page(page, pfn, zone_idx, nid);
6556 
6557 	/*
6558 	 * Mark page reserved as it will need to wait for onlining
6559 	 * phase for it to be fully associated with a zone.
6560 	 *
6561 	 * We can use the non-atomic __set_bit operation for setting
6562 	 * the flag as we are still initializing the pages.
6563 	 */
6564 	__SetPageReserved(page);
6565 
6566 	/*
6567 	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6568 	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
6569 	 * ever freed or placed on a driver-private list.
6570 	 */
6571 	page->pgmap = pgmap;
6572 	page->zone_device_data = NULL;
6573 
6574 	/*
6575 	 * Mark the block movable so that blocks are reserved for
6576 	 * movable at startup. This will force kernel allocations
6577 	 * to reserve their blocks rather than leaking throughout
6578 	 * the address space during boot when many long-lived
6579 	 * kernel allocations are made.
6580 	 *
6581 	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6582 	 * because this is done early in section_activate()
6583 	 */
6584 	if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6585 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6586 		cond_resched();
6587 	}
6588 }
6589 
6590 static void __ref memmap_init_compound(struct page *head,
6591 				       unsigned long head_pfn,
6592 				       unsigned long zone_idx, int nid,
6593 				       struct dev_pagemap *pgmap,
6594 				       unsigned long nr_pages)
6595 {
6596 	unsigned long pfn, end_pfn = head_pfn + nr_pages;
6597 	unsigned int order = pgmap->vmemmap_shift;
6598 
6599 	__SetPageHead(head);
6600 	for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
6601 		struct page *page = pfn_to_page(pfn);
6602 
6603 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
6604 		prep_compound_tail(head, pfn - head_pfn);
6605 		set_page_count(page, 0);
6606 
6607 		/*
6608 		 * The first tail page stores compound_mapcount_ptr() and
6609 		 * compound_order() and the second tail page stores
6610 		 * compound_pincount_ptr(). Call prep_compound_head() after
6611 		 * the first and second tail pages have been initialized to
6612 		 * not have the data overwritten.
6613 		 */
6614 		if (pfn == head_pfn + 2)
6615 			prep_compound_head(head, order);
6616 	}
6617 }
6618 
6619 void __ref memmap_init_zone_device(struct zone *zone,
6620 				   unsigned long start_pfn,
6621 				   unsigned long nr_pages,
6622 				   struct dev_pagemap *pgmap)
6623 {
6624 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
6625 	struct pglist_data *pgdat = zone->zone_pgdat;
6626 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6627 	unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
6628 	unsigned long zone_idx = zone_idx(zone);
6629 	unsigned long start = jiffies;
6630 	int nid = pgdat->node_id;
6631 
6632 	if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
6633 		return;
6634 
6635 	/*
6636 	 * The call to memmap_init should have already taken care
6637 	 * of the pages reserved for the memmap, so we can just jump to
6638 	 * the end of that region and start processing the device pages.
6639 	 */
6640 	if (altmap) {
6641 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6642 		nr_pages = end_pfn - start_pfn;
6643 	}
6644 
6645 	for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
6646 		struct page *page = pfn_to_page(pfn);
6647 
6648 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
6649 
6650 		if (pfns_per_compound == 1)
6651 			continue;
6652 
6653 		memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
6654 				     pfns_per_compound);
6655 	}
6656 
6657 	pr_info("%s initialised %lu pages in %ums\n", __func__,
6658 		nr_pages, jiffies_to_msecs(jiffies - start));
6659 }
6660 
6661 #endif
6662 static void __meminit zone_init_free_lists(struct zone *zone)
6663 {
6664 	unsigned int order, t;
6665 	for_each_migratetype_order(order, t) {
6666 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6667 		zone->free_area[order].nr_free = 0;
6668 	}
6669 }
6670 
6671 /*
6672  * Only struct pages that correspond to ranges defined by memblock.memory
6673  * are zeroed and initialized by going through __init_single_page() during
6674  * memmap_init_zone_range().
6675  *
6676  * But, there could be struct pages that correspond to holes in
6677  * memblock.memory. This can happen because of the following reasons:
6678  * - physical memory bank size is not necessarily the exact multiple of the
6679  *   arbitrary section size
6680  * - early reserved memory may not be listed in memblock.memory
6681  * - memory layouts defined with memmap= kernel parameter may not align
6682  *   nicely with memmap sections
6683  *
6684  * Explicitly initialize those struct pages so that:
6685  * - PG_Reserved is set
6686  * - zone and node links point to zone and node that span the page if the
6687  *   hole is in the middle of a zone
6688  * - zone and node links point to adjacent zone/node if the hole falls on
6689  *   the zone boundary; the pages in such holes will be prepended to the
6690  *   zone/node above the hole except for the trailing pages in the last
6691  *   section that will be appended to the zone/node below.
6692  */
6693 static void __init init_unavailable_range(unsigned long spfn,
6694 					  unsigned long epfn,
6695 					  int zone, int node)
6696 {
6697 	unsigned long pfn;
6698 	u64 pgcnt = 0;
6699 
6700 	for (pfn = spfn; pfn < epfn; pfn++) {
6701 		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6702 			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6703 				+ pageblock_nr_pages - 1;
6704 			continue;
6705 		}
6706 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
6707 		__SetPageReserved(pfn_to_page(pfn));
6708 		pgcnt++;
6709 	}
6710 
6711 	if (pgcnt)
6712 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6713 			node, zone_names[zone], pgcnt);
6714 }
6715 
6716 static void __init memmap_init_zone_range(struct zone *zone,
6717 					  unsigned long start_pfn,
6718 					  unsigned long end_pfn,
6719 					  unsigned long *hole_pfn)
6720 {
6721 	unsigned long zone_start_pfn = zone->zone_start_pfn;
6722 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
6723 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
6724 
6725 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6726 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
6727 
6728 	if (start_pfn >= end_pfn)
6729 		return;
6730 
6731 	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
6732 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6733 
6734 	if (*hole_pfn < start_pfn)
6735 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
6736 
6737 	*hole_pfn = end_pfn;
6738 }
6739 
6740 static void __init memmap_init(void)
6741 {
6742 	unsigned long start_pfn, end_pfn;
6743 	unsigned long hole_pfn = 0;
6744 	int i, j, zone_id = 0, nid;
6745 
6746 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6747 		struct pglist_data *node = NODE_DATA(nid);
6748 
6749 		for (j = 0; j < MAX_NR_ZONES; j++) {
6750 			struct zone *zone = node->node_zones + j;
6751 
6752 			if (!populated_zone(zone))
6753 				continue;
6754 
6755 			memmap_init_zone_range(zone, start_pfn, end_pfn,
6756 					       &hole_pfn);
6757 			zone_id = j;
6758 		}
6759 	}
6760 
6761 #ifdef CONFIG_SPARSEMEM
6762 	/*
6763 	 * Initialize the memory map for hole in the range [memory_end,
6764 	 * section_end].
6765 	 * Append the pages in this hole to the highest zone in the last
6766 	 * node.
6767 	 * The call to init_unavailable_range() is outside the ifdef to
6768 	 * silence the compiler warining about zone_id set but not used;
6769 	 * for FLATMEM it is a nop anyway
6770 	 */
6771 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
6772 	if (hole_pfn < end_pfn)
6773 #endif
6774 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
6775 }
6776 
6777 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
6778 			  phys_addr_t min_addr, int nid, bool exact_nid)
6779 {
6780 	void *ptr;
6781 
6782 	if (exact_nid)
6783 		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
6784 						   MEMBLOCK_ALLOC_ACCESSIBLE,
6785 						   nid);
6786 	else
6787 		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
6788 						 MEMBLOCK_ALLOC_ACCESSIBLE,
6789 						 nid);
6790 
6791 	if (ptr && size > 0)
6792 		page_init_poison(ptr, size);
6793 
6794 	return ptr;
6795 }
6796 
6797 static int zone_batchsize(struct zone *zone)
6798 {
6799 #ifdef CONFIG_MMU
6800 	int batch;
6801 
6802 	/*
6803 	 * The number of pages to batch allocate is either ~0.1%
6804 	 * of the zone or 1MB, whichever is smaller. The batch
6805 	 * size is striking a balance between allocation latency
6806 	 * and zone lock contention.
6807 	 */
6808 	batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE);
6809 	batch /= 4;		/* We effectively *= 4 below */
6810 	if (batch < 1)
6811 		batch = 1;
6812 
6813 	/*
6814 	 * Clamp the batch to a 2^n - 1 value. Having a power
6815 	 * of 2 value was found to be more likely to have
6816 	 * suboptimal cache aliasing properties in some cases.
6817 	 *
6818 	 * For example if 2 tasks are alternately allocating
6819 	 * batches of pages, one task can end up with a lot
6820 	 * of pages of one half of the possible page colors
6821 	 * and the other with pages of the other colors.
6822 	 */
6823 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
6824 
6825 	return batch;
6826 
6827 #else
6828 	/* The deferral and batching of frees should be suppressed under NOMMU
6829 	 * conditions.
6830 	 *
6831 	 * The problem is that NOMMU needs to be able to allocate large chunks
6832 	 * of contiguous memory as there's no hardware page translation to
6833 	 * assemble apparent contiguous memory from discontiguous pages.
6834 	 *
6835 	 * Queueing large contiguous runs of pages for batching, however,
6836 	 * causes the pages to actually be freed in smaller chunks.  As there
6837 	 * can be a significant delay between the individual batches being
6838 	 * recycled, this leads to the once large chunks of space being
6839 	 * fragmented and becoming unavailable for high-order allocations.
6840 	 */
6841 	return 0;
6842 #endif
6843 }
6844 
6845 static int zone_highsize(struct zone *zone, int batch, int cpu_online)
6846 {
6847 #ifdef CONFIG_MMU
6848 	int high;
6849 	int nr_split_cpus;
6850 	unsigned long total_pages;
6851 
6852 	if (!percpu_pagelist_high_fraction) {
6853 		/*
6854 		 * By default, the high value of the pcp is based on the zone
6855 		 * low watermark so that if they are full then background
6856 		 * reclaim will not be started prematurely.
6857 		 */
6858 		total_pages = low_wmark_pages(zone);
6859 	} else {
6860 		/*
6861 		 * If percpu_pagelist_high_fraction is configured, the high
6862 		 * value is based on a fraction of the managed pages in the
6863 		 * zone.
6864 		 */
6865 		total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
6866 	}
6867 
6868 	/*
6869 	 * Split the high value across all online CPUs local to the zone. Note
6870 	 * that early in boot that CPUs may not be online yet and that during
6871 	 * CPU hotplug that the cpumask is not yet updated when a CPU is being
6872 	 * onlined. For memory nodes that have no CPUs, split pcp->high across
6873 	 * all online CPUs to mitigate the risk that reclaim is triggered
6874 	 * prematurely due to pages stored on pcp lists.
6875 	 */
6876 	nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
6877 	if (!nr_split_cpus)
6878 		nr_split_cpus = num_online_cpus();
6879 	high = total_pages / nr_split_cpus;
6880 
6881 	/*
6882 	 * Ensure high is at least batch*4. The multiple is based on the
6883 	 * historical relationship between high and batch.
6884 	 */
6885 	high = max(high, batch << 2);
6886 
6887 	return high;
6888 #else
6889 	return 0;
6890 #endif
6891 }
6892 
6893 /*
6894  * pcp->high and pcp->batch values are related and generally batch is lower
6895  * than high. They are also related to pcp->count such that count is lower
6896  * than high, and as soon as it reaches high, the pcplist is flushed.
6897  *
6898  * However, guaranteeing these relations at all times would require e.g. write
6899  * barriers here but also careful usage of read barriers at the read side, and
6900  * thus be prone to error and bad for performance. Thus the update only prevents
6901  * store tearing. Any new users of pcp->batch and pcp->high should ensure they
6902  * can cope with those fields changing asynchronously, and fully trust only the
6903  * pcp->count field on the local CPU with interrupts disabled.
6904  *
6905  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6906  * outside of boot time (or some other assurance that no concurrent updaters
6907  * exist).
6908  */
6909 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6910 		unsigned long batch)
6911 {
6912 	WRITE_ONCE(pcp->batch, batch);
6913 	WRITE_ONCE(pcp->high, high);
6914 }
6915 
6916 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
6917 {
6918 	int pindex;
6919 
6920 	memset(pcp, 0, sizeof(*pcp));
6921 	memset(pzstats, 0, sizeof(*pzstats));
6922 
6923 	for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
6924 		INIT_LIST_HEAD(&pcp->lists[pindex]);
6925 
6926 	/*
6927 	 * Set batch and high values safe for a boot pageset. A true percpu
6928 	 * pageset's initialization will update them subsequently. Here we don't
6929 	 * need to be as careful as pageset_update() as nobody can access the
6930 	 * pageset yet.
6931 	 */
6932 	pcp->high = BOOT_PAGESET_HIGH;
6933 	pcp->batch = BOOT_PAGESET_BATCH;
6934 	pcp->free_factor = 0;
6935 }
6936 
6937 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
6938 		unsigned long batch)
6939 {
6940 	struct per_cpu_pages *pcp;
6941 	int cpu;
6942 
6943 	for_each_possible_cpu(cpu) {
6944 		pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6945 		pageset_update(pcp, high, batch);
6946 	}
6947 }
6948 
6949 /*
6950  * Calculate and set new high and batch values for all per-cpu pagesets of a
6951  * zone based on the zone's size.
6952  */
6953 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
6954 {
6955 	int new_high, new_batch;
6956 
6957 	new_batch = max(1, zone_batchsize(zone));
6958 	new_high = zone_highsize(zone, new_batch, cpu_online);
6959 
6960 	if (zone->pageset_high == new_high &&
6961 	    zone->pageset_batch == new_batch)
6962 		return;
6963 
6964 	zone->pageset_high = new_high;
6965 	zone->pageset_batch = new_batch;
6966 
6967 	__zone_set_pageset_high_and_batch(zone, new_high, new_batch);
6968 }
6969 
6970 void __meminit setup_zone_pageset(struct zone *zone)
6971 {
6972 	int cpu;
6973 
6974 	/* Size may be 0 on !SMP && !NUMA */
6975 	if (sizeof(struct per_cpu_zonestat) > 0)
6976 		zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
6977 
6978 	zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
6979 	for_each_possible_cpu(cpu) {
6980 		struct per_cpu_pages *pcp;
6981 		struct per_cpu_zonestat *pzstats;
6982 
6983 		pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6984 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6985 		per_cpu_pages_init(pcp, pzstats);
6986 	}
6987 
6988 	zone_set_pageset_high_and_batch(zone, 0);
6989 }
6990 
6991 /*
6992  * Allocate per cpu pagesets and initialize them.
6993  * Before this call only boot pagesets were available.
6994  */
6995 void __init setup_per_cpu_pageset(void)
6996 {
6997 	struct pglist_data *pgdat;
6998 	struct zone *zone;
6999 	int __maybe_unused cpu;
7000 
7001 	for_each_populated_zone(zone)
7002 		setup_zone_pageset(zone);
7003 
7004 #ifdef CONFIG_NUMA
7005 	/*
7006 	 * Unpopulated zones continue using the boot pagesets.
7007 	 * The numa stats for these pagesets need to be reset.
7008 	 * Otherwise, they will end up skewing the stats of
7009 	 * the nodes these zones are associated with.
7010 	 */
7011 	for_each_possible_cpu(cpu) {
7012 		struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
7013 		memset(pzstats->vm_numa_event, 0,
7014 		       sizeof(pzstats->vm_numa_event));
7015 	}
7016 #endif
7017 
7018 	for_each_online_pgdat(pgdat)
7019 		pgdat->per_cpu_nodestats =
7020 			alloc_percpu(struct per_cpu_nodestat);
7021 }
7022 
7023 static __meminit void zone_pcp_init(struct zone *zone)
7024 {
7025 	/*
7026 	 * per cpu subsystem is not up at this point. The following code
7027 	 * relies on the ability of the linker to provide the
7028 	 * offset of a (static) per cpu variable into the per cpu area.
7029 	 */
7030 	zone->per_cpu_pageset = &boot_pageset;
7031 	zone->per_cpu_zonestats = &boot_zonestats;
7032 	zone->pageset_high = BOOT_PAGESET_HIGH;
7033 	zone->pageset_batch = BOOT_PAGESET_BATCH;
7034 
7035 	if (populated_zone(zone))
7036 		pr_debug("  %s zone: %lu pages, LIFO batch:%u\n", zone->name,
7037 			 zone->present_pages, zone_batchsize(zone));
7038 }
7039 
7040 void __meminit init_currently_empty_zone(struct zone *zone,
7041 					unsigned long zone_start_pfn,
7042 					unsigned long size)
7043 {
7044 	struct pglist_data *pgdat = zone->zone_pgdat;
7045 	int zone_idx = zone_idx(zone) + 1;
7046 
7047 	if (zone_idx > pgdat->nr_zones)
7048 		pgdat->nr_zones = zone_idx;
7049 
7050 	zone->zone_start_pfn = zone_start_pfn;
7051 
7052 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
7053 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
7054 			pgdat->node_id,
7055 			(unsigned long)zone_idx(zone),
7056 			zone_start_pfn, (zone_start_pfn + size));
7057 
7058 	zone_init_free_lists(zone);
7059 	zone->initialized = 1;
7060 }
7061 
7062 /**
7063  * get_pfn_range_for_nid - Return the start and end page frames for a node
7064  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
7065  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
7066  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
7067  *
7068  * It returns the start and end page frame of a node based on information
7069  * provided by memblock_set_node(). If called for a node
7070  * with no available memory, a warning is printed and the start and end
7071  * PFNs will be 0.
7072  */
7073 void __init get_pfn_range_for_nid(unsigned int nid,
7074 			unsigned long *start_pfn, unsigned long *end_pfn)
7075 {
7076 	unsigned long this_start_pfn, this_end_pfn;
7077 	int i;
7078 
7079 	*start_pfn = -1UL;
7080 	*end_pfn = 0;
7081 
7082 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
7083 		*start_pfn = min(*start_pfn, this_start_pfn);
7084 		*end_pfn = max(*end_pfn, this_end_pfn);
7085 	}
7086 
7087 	if (*start_pfn == -1UL)
7088 		*start_pfn = 0;
7089 }
7090 
7091 /*
7092  * This finds a zone that can be used for ZONE_MOVABLE pages. The
7093  * assumption is made that zones within a node are ordered in monotonic
7094  * increasing memory addresses so that the "highest" populated zone is used
7095  */
7096 static void __init find_usable_zone_for_movable(void)
7097 {
7098 	int zone_index;
7099 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
7100 		if (zone_index == ZONE_MOVABLE)
7101 			continue;
7102 
7103 		if (arch_zone_highest_possible_pfn[zone_index] >
7104 				arch_zone_lowest_possible_pfn[zone_index])
7105 			break;
7106 	}
7107 
7108 	VM_BUG_ON(zone_index == -1);
7109 	movable_zone = zone_index;
7110 }
7111 
7112 /*
7113  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
7114  * because it is sized independent of architecture. Unlike the other zones,
7115  * the starting point for ZONE_MOVABLE is not fixed. It may be different
7116  * in each node depending on the size of each node and how evenly kernelcore
7117  * is distributed. This helper function adjusts the zone ranges
7118  * provided by the architecture for a given node by using the end of the
7119  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
7120  * zones within a node are in order of monotonic increases memory addresses
7121  */
7122 static void __init adjust_zone_range_for_zone_movable(int nid,
7123 					unsigned long zone_type,
7124 					unsigned long node_start_pfn,
7125 					unsigned long node_end_pfn,
7126 					unsigned long *zone_start_pfn,
7127 					unsigned long *zone_end_pfn)
7128 {
7129 	/* Only adjust if ZONE_MOVABLE is on this node */
7130 	if (zone_movable_pfn[nid]) {
7131 		/* Size ZONE_MOVABLE */
7132 		if (zone_type == ZONE_MOVABLE) {
7133 			*zone_start_pfn = zone_movable_pfn[nid];
7134 			*zone_end_pfn = min(node_end_pfn,
7135 				arch_zone_highest_possible_pfn[movable_zone]);
7136 
7137 		/* Adjust for ZONE_MOVABLE starting within this range */
7138 		} else if (!mirrored_kernelcore &&
7139 			*zone_start_pfn < zone_movable_pfn[nid] &&
7140 			*zone_end_pfn > zone_movable_pfn[nid]) {
7141 			*zone_end_pfn = zone_movable_pfn[nid];
7142 
7143 		/* Check if this whole range is within ZONE_MOVABLE */
7144 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
7145 			*zone_start_pfn = *zone_end_pfn;
7146 	}
7147 }
7148 
7149 /*
7150  * Return the number of pages a zone spans in a node, including holes
7151  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
7152  */
7153 static unsigned long __init zone_spanned_pages_in_node(int nid,
7154 					unsigned long zone_type,
7155 					unsigned long node_start_pfn,
7156 					unsigned long node_end_pfn,
7157 					unsigned long *zone_start_pfn,
7158 					unsigned long *zone_end_pfn)
7159 {
7160 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7161 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7162 	/* When hotadd a new node from cpu_up(), the node should be empty */
7163 	if (!node_start_pfn && !node_end_pfn)
7164 		return 0;
7165 
7166 	/* Get the start and end of the zone */
7167 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7168 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7169 	adjust_zone_range_for_zone_movable(nid, zone_type,
7170 				node_start_pfn, node_end_pfn,
7171 				zone_start_pfn, zone_end_pfn);
7172 
7173 	/* Check that this node has pages within the zone's required range */
7174 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
7175 		return 0;
7176 
7177 	/* Move the zone boundaries inside the node if necessary */
7178 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
7179 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
7180 
7181 	/* Return the spanned pages */
7182 	return *zone_end_pfn - *zone_start_pfn;
7183 }
7184 
7185 /*
7186  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
7187  * then all holes in the requested range will be accounted for.
7188  */
7189 unsigned long __init __absent_pages_in_range(int nid,
7190 				unsigned long range_start_pfn,
7191 				unsigned long range_end_pfn)
7192 {
7193 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
7194 	unsigned long start_pfn, end_pfn;
7195 	int i;
7196 
7197 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7198 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
7199 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
7200 		nr_absent -= end_pfn - start_pfn;
7201 	}
7202 	return nr_absent;
7203 }
7204 
7205 /**
7206  * absent_pages_in_range - Return number of page frames in holes within a range
7207  * @start_pfn: The start PFN to start searching for holes
7208  * @end_pfn: The end PFN to stop searching for holes
7209  *
7210  * Return: the number of pages frames in memory holes within a range.
7211  */
7212 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
7213 							unsigned long end_pfn)
7214 {
7215 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
7216 }
7217 
7218 /* Return the number of page frames in holes in a zone on a node */
7219 static unsigned long __init zone_absent_pages_in_node(int nid,
7220 					unsigned long zone_type,
7221 					unsigned long node_start_pfn,
7222 					unsigned long node_end_pfn)
7223 {
7224 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7225 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7226 	unsigned long zone_start_pfn, zone_end_pfn;
7227 	unsigned long nr_absent;
7228 
7229 	/* When hotadd a new node from cpu_up(), the node should be empty */
7230 	if (!node_start_pfn && !node_end_pfn)
7231 		return 0;
7232 
7233 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7234 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7235 
7236 	adjust_zone_range_for_zone_movable(nid, zone_type,
7237 			node_start_pfn, node_end_pfn,
7238 			&zone_start_pfn, &zone_end_pfn);
7239 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
7240 
7241 	/*
7242 	 * ZONE_MOVABLE handling.
7243 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
7244 	 * and vice versa.
7245 	 */
7246 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
7247 		unsigned long start_pfn, end_pfn;
7248 		struct memblock_region *r;
7249 
7250 		for_each_mem_region(r) {
7251 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
7252 					  zone_start_pfn, zone_end_pfn);
7253 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
7254 					zone_start_pfn, zone_end_pfn);
7255 
7256 			if (zone_type == ZONE_MOVABLE &&
7257 			    memblock_is_mirror(r))
7258 				nr_absent += end_pfn - start_pfn;
7259 
7260 			if (zone_type == ZONE_NORMAL &&
7261 			    !memblock_is_mirror(r))
7262 				nr_absent += end_pfn - start_pfn;
7263 		}
7264 	}
7265 
7266 	return nr_absent;
7267 }
7268 
7269 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
7270 						unsigned long node_start_pfn,
7271 						unsigned long node_end_pfn)
7272 {
7273 	unsigned long realtotalpages = 0, totalpages = 0;
7274 	enum zone_type i;
7275 
7276 	for (i = 0; i < MAX_NR_ZONES; i++) {
7277 		struct zone *zone = pgdat->node_zones + i;
7278 		unsigned long zone_start_pfn, zone_end_pfn;
7279 		unsigned long spanned, absent;
7280 		unsigned long size, real_size;
7281 
7282 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
7283 						     node_start_pfn,
7284 						     node_end_pfn,
7285 						     &zone_start_pfn,
7286 						     &zone_end_pfn);
7287 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
7288 						   node_start_pfn,
7289 						   node_end_pfn);
7290 
7291 		size = spanned;
7292 		real_size = size - absent;
7293 
7294 		if (size)
7295 			zone->zone_start_pfn = zone_start_pfn;
7296 		else
7297 			zone->zone_start_pfn = 0;
7298 		zone->spanned_pages = size;
7299 		zone->present_pages = real_size;
7300 #if defined(CONFIG_MEMORY_HOTPLUG)
7301 		zone->present_early_pages = real_size;
7302 #endif
7303 
7304 		totalpages += size;
7305 		realtotalpages += real_size;
7306 	}
7307 
7308 	pgdat->node_spanned_pages = totalpages;
7309 	pgdat->node_present_pages = realtotalpages;
7310 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
7311 }
7312 
7313 #ifndef CONFIG_SPARSEMEM
7314 /*
7315  * Calculate the size of the zone->blockflags rounded to an unsigned long
7316  * Start by making sure zonesize is a multiple of pageblock_order by rounding
7317  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
7318  * round what is now in bits to nearest long in bits, then return it in
7319  * bytes.
7320  */
7321 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
7322 {
7323 	unsigned long usemapsize;
7324 
7325 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
7326 	usemapsize = roundup(zonesize, pageblock_nr_pages);
7327 	usemapsize = usemapsize >> pageblock_order;
7328 	usemapsize *= NR_PAGEBLOCK_BITS;
7329 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
7330 
7331 	return usemapsize / 8;
7332 }
7333 
7334 static void __ref setup_usemap(struct zone *zone)
7335 {
7336 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
7337 					       zone->spanned_pages);
7338 	zone->pageblock_flags = NULL;
7339 	if (usemapsize) {
7340 		zone->pageblock_flags =
7341 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
7342 					    zone_to_nid(zone));
7343 		if (!zone->pageblock_flags)
7344 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7345 			      usemapsize, zone->name, zone_to_nid(zone));
7346 	}
7347 }
7348 #else
7349 static inline void setup_usemap(struct zone *zone) {}
7350 #endif /* CONFIG_SPARSEMEM */
7351 
7352 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
7353 
7354 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
7355 void __init set_pageblock_order(void)
7356 {
7357 	unsigned int order = MAX_ORDER - 1;
7358 
7359 	/* Check that pageblock_nr_pages has not already been setup */
7360 	if (pageblock_order)
7361 		return;
7362 
7363 	/* Don't let pageblocks exceed the maximum allocation granularity. */
7364 	if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
7365 		order = HUGETLB_PAGE_ORDER;
7366 
7367 	/*
7368 	 * Assume the largest contiguous order of interest is a huge page.
7369 	 * This value may be variable depending on boot parameters on IA64 and
7370 	 * powerpc.
7371 	 */
7372 	pageblock_order = order;
7373 }
7374 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7375 
7376 /*
7377  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
7378  * is unused as pageblock_order is set at compile-time. See
7379  * include/linux/pageblock-flags.h for the values of pageblock_order based on
7380  * the kernel config
7381  */
7382 void __init set_pageblock_order(void)
7383 {
7384 }
7385 
7386 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7387 
7388 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
7389 						unsigned long present_pages)
7390 {
7391 	unsigned long pages = spanned_pages;
7392 
7393 	/*
7394 	 * Provide a more accurate estimation if there are holes within
7395 	 * the zone and SPARSEMEM is in use. If there are holes within the
7396 	 * zone, each populated memory region may cost us one or two extra
7397 	 * memmap pages due to alignment because memmap pages for each
7398 	 * populated regions may not be naturally aligned on page boundary.
7399 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
7400 	 */
7401 	if (spanned_pages > present_pages + (present_pages >> 4) &&
7402 	    IS_ENABLED(CONFIG_SPARSEMEM))
7403 		pages = present_pages;
7404 
7405 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
7406 }
7407 
7408 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7409 static void pgdat_init_split_queue(struct pglist_data *pgdat)
7410 {
7411 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
7412 
7413 	spin_lock_init(&ds_queue->split_queue_lock);
7414 	INIT_LIST_HEAD(&ds_queue->split_queue);
7415 	ds_queue->split_queue_len = 0;
7416 }
7417 #else
7418 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
7419 #endif
7420 
7421 #ifdef CONFIG_COMPACTION
7422 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
7423 {
7424 	init_waitqueue_head(&pgdat->kcompactd_wait);
7425 }
7426 #else
7427 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
7428 #endif
7429 
7430 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
7431 {
7432 	int i;
7433 
7434 	pgdat_resize_init(pgdat);
7435 
7436 	pgdat_init_split_queue(pgdat);
7437 	pgdat_init_kcompactd(pgdat);
7438 
7439 	init_waitqueue_head(&pgdat->kswapd_wait);
7440 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
7441 
7442 	for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
7443 		init_waitqueue_head(&pgdat->reclaim_wait[i]);
7444 
7445 	pgdat_page_ext_init(pgdat);
7446 	lruvec_init(&pgdat->__lruvec);
7447 }
7448 
7449 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
7450 							unsigned long remaining_pages)
7451 {
7452 	atomic_long_set(&zone->managed_pages, remaining_pages);
7453 	zone_set_nid(zone, nid);
7454 	zone->name = zone_names[idx];
7455 	zone->zone_pgdat = NODE_DATA(nid);
7456 	spin_lock_init(&zone->lock);
7457 	zone_seqlock_init(zone);
7458 	zone_pcp_init(zone);
7459 }
7460 
7461 /*
7462  * Set up the zone data structures
7463  * - init pgdat internals
7464  * - init all zones belonging to this node
7465  *
7466  * NOTE: this function is only called during memory hotplug
7467  */
7468 #ifdef CONFIG_MEMORY_HOTPLUG
7469 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
7470 {
7471 	int nid = pgdat->node_id;
7472 	enum zone_type z;
7473 	int cpu;
7474 
7475 	pgdat_init_internals(pgdat);
7476 
7477 	if (pgdat->per_cpu_nodestats == &boot_nodestats)
7478 		pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
7479 
7480 	/*
7481 	 * Reset the nr_zones, order and highest_zoneidx before reuse.
7482 	 * Note that kswapd will init kswapd_highest_zoneidx properly
7483 	 * when it starts in the near future.
7484 	 */
7485 	pgdat->nr_zones = 0;
7486 	pgdat->kswapd_order = 0;
7487 	pgdat->kswapd_highest_zoneidx = 0;
7488 	pgdat->node_start_pfn = 0;
7489 	for_each_online_cpu(cpu) {
7490 		struct per_cpu_nodestat *p;
7491 
7492 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
7493 		memset(p, 0, sizeof(*p));
7494 	}
7495 
7496 	for (z = 0; z < MAX_NR_ZONES; z++)
7497 		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
7498 }
7499 #endif
7500 
7501 /*
7502  * Set up the zone data structures:
7503  *   - mark all pages reserved
7504  *   - mark all memory queues empty
7505  *   - clear the memory bitmaps
7506  *
7507  * NOTE: pgdat should get zeroed by caller.
7508  * NOTE: this function is only called during early init.
7509  */
7510 static void __init free_area_init_core(struct pglist_data *pgdat)
7511 {
7512 	enum zone_type j;
7513 	int nid = pgdat->node_id;
7514 
7515 	pgdat_init_internals(pgdat);
7516 	pgdat->per_cpu_nodestats = &boot_nodestats;
7517 
7518 	for (j = 0; j < MAX_NR_ZONES; j++) {
7519 		struct zone *zone = pgdat->node_zones + j;
7520 		unsigned long size, freesize, memmap_pages;
7521 
7522 		size = zone->spanned_pages;
7523 		freesize = zone->present_pages;
7524 
7525 		/*
7526 		 * Adjust freesize so that it accounts for how much memory
7527 		 * is used by this zone for memmap. This affects the watermark
7528 		 * and per-cpu initialisations
7529 		 */
7530 		memmap_pages = calc_memmap_size(size, freesize);
7531 		if (!is_highmem_idx(j)) {
7532 			if (freesize >= memmap_pages) {
7533 				freesize -= memmap_pages;
7534 				if (memmap_pages)
7535 					pr_debug("  %s zone: %lu pages used for memmap\n",
7536 						 zone_names[j], memmap_pages);
7537 			} else
7538 				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
7539 					zone_names[j], memmap_pages, freesize);
7540 		}
7541 
7542 		/* Account for reserved pages */
7543 		if (j == 0 && freesize > dma_reserve) {
7544 			freesize -= dma_reserve;
7545 			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
7546 		}
7547 
7548 		if (!is_highmem_idx(j))
7549 			nr_kernel_pages += freesize;
7550 		/* Charge for highmem memmap if there are enough kernel pages */
7551 		else if (nr_kernel_pages > memmap_pages * 2)
7552 			nr_kernel_pages -= memmap_pages;
7553 		nr_all_pages += freesize;
7554 
7555 		/*
7556 		 * Set an approximate value for lowmem here, it will be adjusted
7557 		 * when the bootmem allocator frees pages into the buddy system.
7558 		 * And all highmem pages will be managed by the buddy system.
7559 		 */
7560 		zone_init_internals(zone, j, nid, freesize);
7561 
7562 		if (!size)
7563 			continue;
7564 
7565 		set_pageblock_order();
7566 		setup_usemap(zone);
7567 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
7568 	}
7569 }
7570 
7571 #ifdef CONFIG_FLATMEM
7572 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
7573 {
7574 	unsigned long __maybe_unused start = 0;
7575 	unsigned long __maybe_unused offset = 0;
7576 
7577 	/* Skip empty nodes */
7578 	if (!pgdat->node_spanned_pages)
7579 		return;
7580 
7581 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
7582 	offset = pgdat->node_start_pfn - start;
7583 	/* ia64 gets its own node_mem_map, before this, without bootmem */
7584 	if (!pgdat->node_mem_map) {
7585 		unsigned long size, end;
7586 		struct page *map;
7587 
7588 		/*
7589 		 * The zone's endpoints aren't required to be MAX_ORDER
7590 		 * aligned but the node_mem_map endpoints must be in order
7591 		 * for the buddy allocator to function correctly.
7592 		 */
7593 		end = pgdat_end_pfn(pgdat);
7594 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
7595 		size =  (end - start) * sizeof(struct page);
7596 		map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
7597 				   pgdat->node_id, false);
7598 		if (!map)
7599 			panic("Failed to allocate %ld bytes for node %d memory map\n",
7600 			      size, pgdat->node_id);
7601 		pgdat->node_mem_map = map + offset;
7602 	}
7603 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7604 				__func__, pgdat->node_id, (unsigned long)pgdat,
7605 				(unsigned long)pgdat->node_mem_map);
7606 #ifndef CONFIG_NUMA
7607 	/*
7608 	 * With no DISCONTIG, the global mem_map is just set as node 0's
7609 	 */
7610 	if (pgdat == NODE_DATA(0)) {
7611 		mem_map = NODE_DATA(0)->node_mem_map;
7612 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
7613 			mem_map -= offset;
7614 	}
7615 #endif
7616 }
7617 #else
7618 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
7619 #endif /* CONFIG_FLATMEM */
7620 
7621 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7622 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7623 {
7624 	pgdat->first_deferred_pfn = ULONG_MAX;
7625 }
7626 #else
7627 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7628 #endif
7629 
7630 static void __init free_area_init_node(int nid)
7631 {
7632 	pg_data_t *pgdat = NODE_DATA(nid);
7633 	unsigned long start_pfn = 0;
7634 	unsigned long end_pfn = 0;
7635 
7636 	/* pg_data_t should be reset to zero when it's allocated */
7637 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
7638 
7639 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7640 
7641 	pgdat->node_id = nid;
7642 	pgdat->node_start_pfn = start_pfn;
7643 	pgdat->per_cpu_nodestats = NULL;
7644 
7645 	if (start_pfn != end_pfn) {
7646 		pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7647 			(u64)start_pfn << PAGE_SHIFT,
7648 			end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7649 	} else {
7650 		pr_info("Initmem setup node %d as memoryless\n", nid);
7651 	}
7652 
7653 	calculate_node_totalpages(pgdat, start_pfn, end_pfn);
7654 
7655 	alloc_node_mem_map(pgdat);
7656 	pgdat_set_deferred_range(pgdat);
7657 
7658 	free_area_init_core(pgdat);
7659 }
7660 
7661 static void __init free_area_init_memoryless_node(int nid)
7662 {
7663 	free_area_init_node(nid);
7664 }
7665 
7666 #if MAX_NUMNODES > 1
7667 /*
7668  * Figure out the number of possible node ids.
7669  */
7670 void __init setup_nr_node_ids(void)
7671 {
7672 	unsigned int highest;
7673 
7674 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
7675 	nr_node_ids = highest + 1;
7676 }
7677 #endif
7678 
7679 /**
7680  * node_map_pfn_alignment - determine the maximum internode alignment
7681  *
7682  * This function should be called after node map is populated and sorted.
7683  * It calculates the maximum power of two alignment which can distinguish
7684  * all the nodes.
7685  *
7686  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7687  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
7688  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
7689  * shifted, 1GiB is enough and this function will indicate so.
7690  *
7691  * This is used to test whether pfn -> nid mapping of the chosen memory
7692  * model has fine enough granularity to avoid incorrect mapping for the
7693  * populated node map.
7694  *
7695  * Return: the determined alignment in pfn's.  0 if there is no alignment
7696  * requirement (single node).
7697  */
7698 unsigned long __init node_map_pfn_alignment(void)
7699 {
7700 	unsigned long accl_mask = 0, last_end = 0;
7701 	unsigned long start, end, mask;
7702 	int last_nid = NUMA_NO_NODE;
7703 	int i, nid;
7704 
7705 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7706 		if (!start || last_nid < 0 || last_nid == nid) {
7707 			last_nid = nid;
7708 			last_end = end;
7709 			continue;
7710 		}
7711 
7712 		/*
7713 		 * Start with a mask granular enough to pin-point to the
7714 		 * start pfn and tick off bits one-by-one until it becomes
7715 		 * too coarse to separate the current node from the last.
7716 		 */
7717 		mask = ~((1 << __ffs(start)) - 1);
7718 		while (mask && last_end <= (start & (mask << 1)))
7719 			mask <<= 1;
7720 
7721 		/* accumulate all internode masks */
7722 		accl_mask |= mask;
7723 	}
7724 
7725 	/* convert mask to number of pages */
7726 	return ~accl_mask + 1;
7727 }
7728 
7729 /**
7730  * find_min_pfn_with_active_regions - Find the minimum PFN registered
7731  *
7732  * Return: the minimum PFN based on information provided via
7733  * memblock_set_node().
7734  */
7735 unsigned long __init find_min_pfn_with_active_regions(void)
7736 {
7737 	return PHYS_PFN(memblock_start_of_DRAM());
7738 }
7739 
7740 /*
7741  * early_calculate_totalpages()
7742  * Sum pages in active regions for movable zone.
7743  * Populate N_MEMORY for calculating usable_nodes.
7744  */
7745 static unsigned long __init early_calculate_totalpages(void)
7746 {
7747 	unsigned long totalpages = 0;
7748 	unsigned long start_pfn, end_pfn;
7749 	int i, nid;
7750 
7751 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7752 		unsigned long pages = end_pfn - start_pfn;
7753 
7754 		totalpages += pages;
7755 		if (pages)
7756 			node_set_state(nid, N_MEMORY);
7757 	}
7758 	return totalpages;
7759 }
7760 
7761 /*
7762  * Find the PFN the Movable zone begins in each node. Kernel memory
7763  * is spread evenly between nodes as long as the nodes have enough
7764  * memory. When they don't, some nodes will have more kernelcore than
7765  * others
7766  */
7767 static void __init find_zone_movable_pfns_for_nodes(void)
7768 {
7769 	int i, nid;
7770 	unsigned long usable_startpfn;
7771 	unsigned long kernelcore_node, kernelcore_remaining;
7772 	/* save the state before borrow the nodemask */
7773 	nodemask_t saved_node_state = node_states[N_MEMORY];
7774 	unsigned long totalpages = early_calculate_totalpages();
7775 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
7776 	struct memblock_region *r;
7777 
7778 	/* Need to find movable_zone earlier when movable_node is specified. */
7779 	find_usable_zone_for_movable();
7780 
7781 	/*
7782 	 * If movable_node is specified, ignore kernelcore and movablecore
7783 	 * options.
7784 	 */
7785 	if (movable_node_is_enabled()) {
7786 		for_each_mem_region(r) {
7787 			if (!memblock_is_hotpluggable(r))
7788 				continue;
7789 
7790 			nid = memblock_get_region_node(r);
7791 
7792 			usable_startpfn = PFN_DOWN(r->base);
7793 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7794 				min(usable_startpfn, zone_movable_pfn[nid]) :
7795 				usable_startpfn;
7796 		}
7797 
7798 		goto out2;
7799 	}
7800 
7801 	/*
7802 	 * If kernelcore=mirror is specified, ignore movablecore option
7803 	 */
7804 	if (mirrored_kernelcore) {
7805 		bool mem_below_4gb_not_mirrored = false;
7806 
7807 		for_each_mem_region(r) {
7808 			if (memblock_is_mirror(r))
7809 				continue;
7810 
7811 			nid = memblock_get_region_node(r);
7812 
7813 			usable_startpfn = memblock_region_memory_base_pfn(r);
7814 
7815 			if (usable_startpfn < 0x100000) {
7816 				mem_below_4gb_not_mirrored = true;
7817 				continue;
7818 			}
7819 
7820 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7821 				min(usable_startpfn, zone_movable_pfn[nid]) :
7822 				usable_startpfn;
7823 		}
7824 
7825 		if (mem_below_4gb_not_mirrored)
7826 			pr_warn("This configuration results in unmirrored kernel memory.\n");
7827 
7828 		goto out2;
7829 	}
7830 
7831 	/*
7832 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7833 	 * amount of necessary memory.
7834 	 */
7835 	if (required_kernelcore_percent)
7836 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
7837 				       10000UL;
7838 	if (required_movablecore_percent)
7839 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
7840 					10000UL;
7841 
7842 	/*
7843 	 * If movablecore= was specified, calculate what size of
7844 	 * kernelcore that corresponds so that memory usable for
7845 	 * any allocation type is evenly spread. If both kernelcore
7846 	 * and movablecore are specified, then the value of kernelcore
7847 	 * will be used for required_kernelcore if it's greater than
7848 	 * what movablecore would have allowed.
7849 	 */
7850 	if (required_movablecore) {
7851 		unsigned long corepages;
7852 
7853 		/*
7854 		 * Round-up so that ZONE_MOVABLE is at least as large as what
7855 		 * was requested by the user
7856 		 */
7857 		required_movablecore =
7858 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
7859 		required_movablecore = min(totalpages, required_movablecore);
7860 		corepages = totalpages - required_movablecore;
7861 
7862 		required_kernelcore = max(required_kernelcore, corepages);
7863 	}
7864 
7865 	/*
7866 	 * If kernelcore was not specified or kernelcore size is larger
7867 	 * than totalpages, there is no ZONE_MOVABLE.
7868 	 */
7869 	if (!required_kernelcore || required_kernelcore >= totalpages)
7870 		goto out;
7871 
7872 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
7873 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7874 
7875 restart:
7876 	/* Spread kernelcore memory as evenly as possible throughout nodes */
7877 	kernelcore_node = required_kernelcore / usable_nodes;
7878 	for_each_node_state(nid, N_MEMORY) {
7879 		unsigned long start_pfn, end_pfn;
7880 
7881 		/*
7882 		 * Recalculate kernelcore_node if the division per node
7883 		 * now exceeds what is necessary to satisfy the requested
7884 		 * amount of memory for the kernel
7885 		 */
7886 		if (required_kernelcore < kernelcore_node)
7887 			kernelcore_node = required_kernelcore / usable_nodes;
7888 
7889 		/*
7890 		 * As the map is walked, we track how much memory is usable
7891 		 * by the kernel using kernelcore_remaining. When it is
7892 		 * 0, the rest of the node is usable by ZONE_MOVABLE
7893 		 */
7894 		kernelcore_remaining = kernelcore_node;
7895 
7896 		/* Go through each range of PFNs within this node */
7897 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7898 			unsigned long size_pages;
7899 
7900 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
7901 			if (start_pfn >= end_pfn)
7902 				continue;
7903 
7904 			/* Account for what is only usable for kernelcore */
7905 			if (start_pfn < usable_startpfn) {
7906 				unsigned long kernel_pages;
7907 				kernel_pages = min(end_pfn, usable_startpfn)
7908 								- start_pfn;
7909 
7910 				kernelcore_remaining -= min(kernel_pages,
7911 							kernelcore_remaining);
7912 				required_kernelcore -= min(kernel_pages,
7913 							required_kernelcore);
7914 
7915 				/* Continue if range is now fully accounted */
7916 				if (end_pfn <= usable_startpfn) {
7917 
7918 					/*
7919 					 * Push zone_movable_pfn to the end so
7920 					 * that if we have to rebalance
7921 					 * kernelcore across nodes, we will
7922 					 * not double account here
7923 					 */
7924 					zone_movable_pfn[nid] = end_pfn;
7925 					continue;
7926 				}
7927 				start_pfn = usable_startpfn;
7928 			}
7929 
7930 			/*
7931 			 * The usable PFN range for ZONE_MOVABLE is from
7932 			 * start_pfn->end_pfn. Calculate size_pages as the
7933 			 * number of pages used as kernelcore
7934 			 */
7935 			size_pages = end_pfn - start_pfn;
7936 			if (size_pages > kernelcore_remaining)
7937 				size_pages = kernelcore_remaining;
7938 			zone_movable_pfn[nid] = start_pfn + size_pages;
7939 
7940 			/*
7941 			 * Some kernelcore has been met, update counts and
7942 			 * break if the kernelcore for this node has been
7943 			 * satisfied
7944 			 */
7945 			required_kernelcore -= min(required_kernelcore,
7946 								size_pages);
7947 			kernelcore_remaining -= size_pages;
7948 			if (!kernelcore_remaining)
7949 				break;
7950 		}
7951 	}
7952 
7953 	/*
7954 	 * If there is still required_kernelcore, we do another pass with one
7955 	 * less node in the count. This will push zone_movable_pfn[nid] further
7956 	 * along on the nodes that still have memory until kernelcore is
7957 	 * satisfied
7958 	 */
7959 	usable_nodes--;
7960 	if (usable_nodes && required_kernelcore > usable_nodes)
7961 		goto restart;
7962 
7963 out2:
7964 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7965 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
7966 		unsigned long start_pfn, end_pfn;
7967 
7968 		zone_movable_pfn[nid] =
7969 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
7970 
7971 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7972 		if (zone_movable_pfn[nid] >= end_pfn)
7973 			zone_movable_pfn[nid] = 0;
7974 	}
7975 
7976 out:
7977 	/* restore the node_state */
7978 	node_states[N_MEMORY] = saved_node_state;
7979 }
7980 
7981 /* Any regular or high memory on that node ? */
7982 static void check_for_memory(pg_data_t *pgdat, int nid)
7983 {
7984 	enum zone_type zone_type;
7985 
7986 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
7987 		struct zone *zone = &pgdat->node_zones[zone_type];
7988 		if (populated_zone(zone)) {
7989 			if (IS_ENABLED(CONFIG_HIGHMEM))
7990 				node_set_state(nid, N_HIGH_MEMORY);
7991 			if (zone_type <= ZONE_NORMAL)
7992 				node_set_state(nid, N_NORMAL_MEMORY);
7993 			break;
7994 		}
7995 	}
7996 }
7997 
7998 /*
7999  * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
8000  * such cases we allow max_zone_pfn sorted in the descending order
8001  */
8002 bool __weak arch_has_descending_max_zone_pfns(void)
8003 {
8004 	return false;
8005 }
8006 
8007 /**
8008  * free_area_init - Initialise all pg_data_t and zone data
8009  * @max_zone_pfn: an array of max PFNs for each zone
8010  *
8011  * This will call free_area_init_node() for each active node in the system.
8012  * Using the page ranges provided by memblock_set_node(), the size of each
8013  * zone in each node and their holes is calculated. If the maximum PFN
8014  * between two adjacent zones match, it is assumed that the zone is empty.
8015  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
8016  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
8017  * starts where the previous one ended. For example, ZONE_DMA32 starts
8018  * at arch_max_dma_pfn.
8019  */
8020 void __init free_area_init(unsigned long *max_zone_pfn)
8021 {
8022 	unsigned long start_pfn, end_pfn;
8023 	int i, nid, zone;
8024 	bool descending;
8025 
8026 	/* Record where the zone boundaries are */
8027 	memset(arch_zone_lowest_possible_pfn, 0,
8028 				sizeof(arch_zone_lowest_possible_pfn));
8029 	memset(arch_zone_highest_possible_pfn, 0,
8030 				sizeof(arch_zone_highest_possible_pfn));
8031 
8032 	start_pfn = find_min_pfn_with_active_regions();
8033 	descending = arch_has_descending_max_zone_pfns();
8034 
8035 	for (i = 0; i < MAX_NR_ZONES; i++) {
8036 		if (descending)
8037 			zone = MAX_NR_ZONES - i - 1;
8038 		else
8039 			zone = i;
8040 
8041 		if (zone == ZONE_MOVABLE)
8042 			continue;
8043 
8044 		end_pfn = max(max_zone_pfn[zone], start_pfn);
8045 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
8046 		arch_zone_highest_possible_pfn[zone] = end_pfn;
8047 
8048 		start_pfn = end_pfn;
8049 	}
8050 
8051 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
8052 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
8053 	find_zone_movable_pfns_for_nodes();
8054 
8055 	/* Print out the zone ranges */
8056 	pr_info("Zone ranges:\n");
8057 	for (i = 0; i < MAX_NR_ZONES; i++) {
8058 		if (i == ZONE_MOVABLE)
8059 			continue;
8060 		pr_info("  %-8s ", zone_names[i]);
8061 		if (arch_zone_lowest_possible_pfn[i] ==
8062 				arch_zone_highest_possible_pfn[i])
8063 			pr_cont("empty\n");
8064 		else
8065 			pr_cont("[mem %#018Lx-%#018Lx]\n",
8066 				(u64)arch_zone_lowest_possible_pfn[i]
8067 					<< PAGE_SHIFT,
8068 				((u64)arch_zone_highest_possible_pfn[i]
8069 					<< PAGE_SHIFT) - 1);
8070 	}
8071 
8072 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
8073 	pr_info("Movable zone start for each node\n");
8074 	for (i = 0; i < MAX_NUMNODES; i++) {
8075 		if (zone_movable_pfn[i])
8076 			pr_info("  Node %d: %#018Lx\n", i,
8077 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
8078 	}
8079 
8080 	/*
8081 	 * Print out the early node map, and initialize the
8082 	 * subsection-map relative to active online memory ranges to
8083 	 * enable future "sub-section" extensions of the memory map.
8084 	 */
8085 	pr_info("Early memory node ranges\n");
8086 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8087 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
8088 			(u64)start_pfn << PAGE_SHIFT,
8089 			((u64)end_pfn << PAGE_SHIFT) - 1);
8090 		subsection_map_init(start_pfn, end_pfn - start_pfn);
8091 	}
8092 
8093 	/* Initialise every node */
8094 	mminit_verify_pageflags_layout();
8095 	setup_nr_node_ids();
8096 	for_each_node(nid) {
8097 		pg_data_t *pgdat;
8098 
8099 		if (!node_online(nid)) {
8100 			pr_info("Initializing node %d as memoryless\n", nid);
8101 
8102 			/* Allocator not initialized yet */
8103 			pgdat = arch_alloc_nodedata(nid);
8104 			if (!pgdat) {
8105 				pr_err("Cannot allocate %zuB for node %d.\n",
8106 						sizeof(*pgdat), nid);
8107 				continue;
8108 			}
8109 			arch_refresh_nodedata(nid, pgdat);
8110 			free_area_init_memoryless_node(nid);
8111 
8112 			/*
8113 			 * We do not want to confuse userspace by sysfs
8114 			 * files/directories for node without any memory
8115 			 * attached to it, so this node is not marked as
8116 			 * N_MEMORY and not marked online so that no sysfs
8117 			 * hierarchy will be created via register_one_node for
8118 			 * it. The pgdat will get fully initialized by
8119 			 * hotadd_init_pgdat() when memory is hotplugged into
8120 			 * this node.
8121 			 */
8122 			continue;
8123 		}
8124 
8125 		pgdat = NODE_DATA(nid);
8126 		free_area_init_node(nid);
8127 
8128 		/* Any memory on that node */
8129 		if (pgdat->node_present_pages)
8130 			node_set_state(nid, N_MEMORY);
8131 		check_for_memory(pgdat, nid);
8132 	}
8133 
8134 	memmap_init();
8135 }
8136 
8137 static int __init cmdline_parse_core(char *p, unsigned long *core,
8138 				     unsigned long *percent)
8139 {
8140 	unsigned long long coremem;
8141 	char *endptr;
8142 
8143 	if (!p)
8144 		return -EINVAL;
8145 
8146 	/* Value may be a percentage of total memory, otherwise bytes */
8147 	coremem = simple_strtoull(p, &endptr, 0);
8148 	if (*endptr == '%') {
8149 		/* Paranoid check for percent values greater than 100 */
8150 		WARN_ON(coremem > 100);
8151 
8152 		*percent = coremem;
8153 	} else {
8154 		coremem = memparse(p, &p);
8155 		/* Paranoid check that UL is enough for the coremem value */
8156 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
8157 
8158 		*core = coremem >> PAGE_SHIFT;
8159 		*percent = 0UL;
8160 	}
8161 	return 0;
8162 }
8163 
8164 /*
8165  * kernelcore=size sets the amount of memory for use for allocations that
8166  * cannot be reclaimed or migrated.
8167  */
8168 static int __init cmdline_parse_kernelcore(char *p)
8169 {
8170 	/* parse kernelcore=mirror */
8171 	if (parse_option_str(p, "mirror")) {
8172 		mirrored_kernelcore = true;
8173 		return 0;
8174 	}
8175 
8176 	return cmdline_parse_core(p, &required_kernelcore,
8177 				  &required_kernelcore_percent);
8178 }
8179 
8180 /*
8181  * movablecore=size sets the amount of memory for use for allocations that
8182  * can be reclaimed or migrated.
8183  */
8184 static int __init cmdline_parse_movablecore(char *p)
8185 {
8186 	return cmdline_parse_core(p, &required_movablecore,
8187 				  &required_movablecore_percent);
8188 }
8189 
8190 early_param("kernelcore", cmdline_parse_kernelcore);
8191 early_param("movablecore", cmdline_parse_movablecore);
8192 
8193 void adjust_managed_page_count(struct page *page, long count)
8194 {
8195 	atomic_long_add(count, &page_zone(page)->managed_pages);
8196 	totalram_pages_add(count);
8197 #ifdef CONFIG_HIGHMEM
8198 	if (PageHighMem(page))
8199 		totalhigh_pages_add(count);
8200 #endif
8201 }
8202 EXPORT_SYMBOL(adjust_managed_page_count);
8203 
8204 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
8205 {
8206 	void *pos;
8207 	unsigned long pages = 0;
8208 
8209 	start = (void *)PAGE_ALIGN((unsigned long)start);
8210 	end = (void *)((unsigned long)end & PAGE_MASK);
8211 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
8212 		struct page *page = virt_to_page(pos);
8213 		void *direct_map_addr;
8214 
8215 		/*
8216 		 * 'direct_map_addr' might be different from 'pos'
8217 		 * because some architectures' virt_to_page()
8218 		 * work with aliases.  Getting the direct map
8219 		 * address ensures that we get a _writeable_
8220 		 * alias for the memset().
8221 		 */
8222 		direct_map_addr = page_address(page);
8223 		/*
8224 		 * Perform a kasan-unchecked memset() since this memory
8225 		 * has not been initialized.
8226 		 */
8227 		direct_map_addr = kasan_reset_tag(direct_map_addr);
8228 		if ((unsigned int)poison <= 0xFF)
8229 			memset(direct_map_addr, poison, PAGE_SIZE);
8230 
8231 		free_reserved_page(page);
8232 	}
8233 
8234 	if (pages && s)
8235 		pr_info("Freeing %s memory: %ldK\n", s, K(pages));
8236 
8237 	return pages;
8238 }
8239 
8240 void __init mem_init_print_info(void)
8241 {
8242 	unsigned long physpages, codesize, datasize, rosize, bss_size;
8243 	unsigned long init_code_size, init_data_size;
8244 
8245 	physpages = get_num_physpages();
8246 	codesize = _etext - _stext;
8247 	datasize = _edata - _sdata;
8248 	rosize = __end_rodata - __start_rodata;
8249 	bss_size = __bss_stop - __bss_start;
8250 	init_data_size = __init_end - __init_begin;
8251 	init_code_size = _einittext - _sinittext;
8252 
8253 	/*
8254 	 * Detect special cases and adjust section sizes accordingly:
8255 	 * 1) .init.* may be embedded into .data sections
8256 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
8257 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
8258 	 * 3) .rodata.* may be embedded into .text or .data sections.
8259 	 */
8260 #define adj_init_size(start, end, size, pos, adj) \
8261 	do { \
8262 		if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
8263 			size -= adj; \
8264 	} while (0)
8265 
8266 	adj_init_size(__init_begin, __init_end, init_data_size,
8267 		     _sinittext, init_code_size);
8268 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
8269 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
8270 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
8271 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
8272 
8273 #undef	adj_init_size
8274 
8275 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
8276 #ifdef	CONFIG_HIGHMEM
8277 		", %luK highmem"
8278 #endif
8279 		")\n",
8280 		K(nr_free_pages()), K(physpages),
8281 		codesize >> 10, datasize >> 10, rosize >> 10,
8282 		(init_data_size + init_code_size) >> 10, bss_size >> 10,
8283 		K(physpages - totalram_pages() - totalcma_pages),
8284 		K(totalcma_pages)
8285 #ifdef	CONFIG_HIGHMEM
8286 		, K(totalhigh_pages())
8287 #endif
8288 		);
8289 }
8290 
8291 /**
8292  * set_dma_reserve - set the specified number of pages reserved in the first zone
8293  * @new_dma_reserve: The number of pages to mark reserved
8294  *
8295  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
8296  * In the DMA zone, a significant percentage may be consumed by kernel image
8297  * and other unfreeable allocations which can skew the watermarks badly. This
8298  * function may optionally be used to account for unfreeable pages in the
8299  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8300  * smaller per-cpu batchsize.
8301  */
8302 void __init set_dma_reserve(unsigned long new_dma_reserve)
8303 {
8304 	dma_reserve = new_dma_reserve;
8305 }
8306 
8307 static int page_alloc_cpu_dead(unsigned int cpu)
8308 {
8309 	struct zone *zone;
8310 
8311 	lru_add_drain_cpu(cpu);
8312 	drain_pages(cpu);
8313 
8314 	/*
8315 	 * Spill the event counters of the dead processor
8316 	 * into the current processors event counters.
8317 	 * This artificially elevates the count of the current
8318 	 * processor.
8319 	 */
8320 	vm_events_fold_cpu(cpu);
8321 
8322 	/*
8323 	 * Zero the differential counters of the dead processor
8324 	 * so that the vm statistics are consistent.
8325 	 *
8326 	 * This is only okay since the processor is dead and cannot
8327 	 * race with what we are doing.
8328 	 */
8329 	cpu_vm_stats_fold(cpu);
8330 
8331 	for_each_populated_zone(zone)
8332 		zone_pcp_update(zone, 0);
8333 
8334 	return 0;
8335 }
8336 
8337 static int page_alloc_cpu_online(unsigned int cpu)
8338 {
8339 	struct zone *zone;
8340 
8341 	for_each_populated_zone(zone)
8342 		zone_pcp_update(zone, 1);
8343 	return 0;
8344 }
8345 
8346 #ifdef CONFIG_NUMA
8347 int hashdist = HASHDIST_DEFAULT;
8348 
8349 static int __init set_hashdist(char *str)
8350 {
8351 	if (!str)
8352 		return 0;
8353 	hashdist = simple_strtoul(str, &str, 0);
8354 	return 1;
8355 }
8356 __setup("hashdist=", set_hashdist);
8357 #endif
8358 
8359 void __init page_alloc_init(void)
8360 {
8361 	int ret;
8362 
8363 #ifdef CONFIG_NUMA
8364 	if (num_node_state(N_MEMORY) == 1)
8365 		hashdist = 0;
8366 #endif
8367 
8368 	ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
8369 					"mm/page_alloc:pcp",
8370 					page_alloc_cpu_online,
8371 					page_alloc_cpu_dead);
8372 	WARN_ON(ret < 0);
8373 }
8374 
8375 /*
8376  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
8377  *	or min_free_kbytes changes.
8378  */
8379 static void calculate_totalreserve_pages(void)
8380 {
8381 	struct pglist_data *pgdat;
8382 	unsigned long reserve_pages = 0;
8383 	enum zone_type i, j;
8384 
8385 	for_each_online_pgdat(pgdat) {
8386 
8387 		pgdat->totalreserve_pages = 0;
8388 
8389 		for (i = 0; i < MAX_NR_ZONES; i++) {
8390 			struct zone *zone = pgdat->node_zones + i;
8391 			long max = 0;
8392 			unsigned long managed_pages = zone_managed_pages(zone);
8393 
8394 			/* Find valid and maximum lowmem_reserve in the zone */
8395 			for (j = i; j < MAX_NR_ZONES; j++) {
8396 				if (zone->lowmem_reserve[j] > max)
8397 					max = zone->lowmem_reserve[j];
8398 			}
8399 
8400 			/* we treat the high watermark as reserved pages. */
8401 			max += high_wmark_pages(zone);
8402 
8403 			if (max > managed_pages)
8404 				max = managed_pages;
8405 
8406 			pgdat->totalreserve_pages += max;
8407 
8408 			reserve_pages += max;
8409 		}
8410 	}
8411 	totalreserve_pages = reserve_pages;
8412 }
8413 
8414 /*
8415  * setup_per_zone_lowmem_reserve - called whenever
8416  *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
8417  *	has a correct pages reserved value, so an adequate number of
8418  *	pages are left in the zone after a successful __alloc_pages().
8419  */
8420 static void setup_per_zone_lowmem_reserve(void)
8421 {
8422 	struct pglist_data *pgdat;
8423 	enum zone_type i, j;
8424 
8425 	for_each_online_pgdat(pgdat) {
8426 		for (i = 0; i < MAX_NR_ZONES - 1; i++) {
8427 			struct zone *zone = &pgdat->node_zones[i];
8428 			int ratio = sysctl_lowmem_reserve_ratio[i];
8429 			bool clear = !ratio || !zone_managed_pages(zone);
8430 			unsigned long managed_pages = 0;
8431 
8432 			for (j = i + 1; j < MAX_NR_ZONES; j++) {
8433 				struct zone *upper_zone = &pgdat->node_zones[j];
8434 
8435 				managed_pages += zone_managed_pages(upper_zone);
8436 
8437 				if (clear)
8438 					zone->lowmem_reserve[j] = 0;
8439 				else
8440 					zone->lowmem_reserve[j] = managed_pages / ratio;
8441 			}
8442 		}
8443 	}
8444 
8445 	/* update totalreserve_pages */
8446 	calculate_totalreserve_pages();
8447 }
8448 
8449 static void __setup_per_zone_wmarks(void)
8450 {
8451 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
8452 	unsigned long lowmem_pages = 0;
8453 	struct zone *zone;
8454 	unsigned long flags;
8455 
8456 	/* Calculate total number of !ZONE_HIGHMEM pages */
8457 	for_each_zone(zone) {
8458 		if (!is_highmem(zone))
8459 			lowmem_pages += zone_managed_pages(zone);
8460 	}
8461 
8462 	for_each_zone(zone) {
8463 		u64 tmp;
8464 
8465 		spin_lock_irqsave(&zone->lock, flags);
8466 		tmp = (u64)pages_min * zone_managed_pages(zone);
8467 		do_div(tmp, lowmem_pages);
8468 		if (is_highmem(zone)) {
8469 			/*
8470 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
8471 			 * need highmem pages, so cap pages_min to a small
8472 			 * value here.
8473 			 *
8474 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8475 			 * deltas control async page reclaim, and so should
8476 			 * not be capped for highmem.
8477 			 */
8478 			unsigned long min_pages;
8479 
8480 			min_pages = zone_managed_pages(zone) / 1024;
8481 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
8482 			zone->_watermark[WMARK_MIN] = min_pages;
8483 		} else {
8484 			/*
8485 			 * If it's a lowmem zone, reserve a number of pages
8486 			 * proportionate to the zone's size.
8487 			 */
8488 			zone->_watermark[WMARK_MIN] = tmp;
8489 		}
8490 
8491 		/*
8492 		 * Set the kswapd watermarks distance according to the
8493 		 * scale factor in proportion to available memory, but
8494 		 * ensure a minimum size on small systems.
8495 		 */
8496 		tmp = max_t(u64, tmp >> 2,
8497 			    mult_frac(zone_managed_pages(zone),
8498 				      watermark_scale_factor, 10000));
8499 
8500 		zone->watermark_boost = 0;
8501 		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
8502 		zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
8503 		zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
8504 
8505 		spin_unlock_irqrestore(&zone->lock, flags);
8506 	}
8507 
8508 	/* update totalreserve_pages */
8509 	calculate_totalreserve_pages();
8510 }
8511 
8512 /**
8513  * setup_per_zone_wmarks - called when min_free_kbytes changes
8514  * or when memory is hot-{added|removed}
8515  *
8516  * Ensures that the watermark[min,low,high] values for each zone are set
8517  * correctly with respect to min_free_kbytes.
8518  */
8519 void setup_per_zone_wmarks(void)
8520 {
8521 	struct zone *zone;
8522 	static DEFINE_SPINLOCK(lock);
8523 
8524 	spin_lock(&lock);
8525 	__setup_per_zone_wmarks();
8526 	spin_unlock(&lock);
8527 
8528 	/*
8529 	 * The watermark size have changed so update the pcpu batch
8530 	 * and high limits or the limits may be inappropriate.
8531 	 */
8532 	for_each_zone(zone)
8533 		zone_pcp_update(zone, 0);
8534 }
8535 
8536 /*
8537  * Initialise min_free_kbytes.
8538  *
8539  * For small machines we want it small (128k min).  For large machines
8540  * we want it large (256MB max).  But it is not linear, because network
8541  * bandwidth does not increase linearly with machine size.  We use
8542  *
8543  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
8544  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
8545  *
8546  * which yields
8547  *
8548  * 16MB:	512k
8549  * 32MB:	724k
8550  * 64MB:	1024k
8551  * 128MB:	1448k
8552  * 256MB:	2048k
8553  * 512MB:	2896k
8554  * 1024MB:	4096k
8555  * 2048MB:	5792k
8556  * 4096MB:	8192k
8557  * 8192MB:	11584k
8558  * 16384MB:	16384k
8559  */
8560 void calculate_min_free_kbytes(void)
8561 {
8562 	unsigned long lowmem_kbytes;
8563 	int new_min_free_kbytes;
8564 
8565 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
8566 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
8567 
8568 	if (new_min_free_kbytes > user_min_free_kbytes)
8569 		min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
8570 	else
8571 		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8572 				new_min_free_kbytes, user_min_free_kbytes);
8573 
8574 }
8575 
8576 int __meminit init_per_zone_wmark_min(void)
8577 {
8578 	calculate_min_free_kbytes();
8579 	setup_per_zone_wmarks();
8580 	refresh_zone_stat_thresholds();
8581 	setup_per_zone_lowmem_reserve();
8582 
8583 #ifdef CONFIG_NUMA
8584 	setup_min_unmapped_ratio();
8585 	setup_min_slab_ratio();
8586 #endif
8587 
8588 	khugepaged_min_free_kbytes_update();
8589 
8590 	return 0;
8591 }
8592 postcore_initcall(init_per_zone_wmark_min)
8593 
8594 /*
8595  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8596  *	that we can call two helper functions whenever min_free_kbytes
8597  *	changes.
8598  */
8599 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8600 		void *buffer, size_t *length, loff_t *ppos)
8601 {
8602 	int rc;
8603 
8604 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8605 	if (rc)
8606 		return rc;
8607 
8608 	if (write) {
8609 		user_min_free_kbytes = min_free_kbytes;
8610 		setup_per_zone_wmarks();
8611 	}
8612 	return 0;
8613 }
8614 
8615 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8616 		void *buffer, size_t *length, loff_t *ppos)
8617 {
8618 	int rc;
8619 
8620 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8621 	if (rc)
8622 		return rc;
8623 
8624 	if (write)
8625 		setup_per_zone_wmarks();
8626 
8627 	return 0;
8628 }
8629 
8630 #ifdef CONFIG_NUMA
8631 static void setup_min_unmapped_ratio(void)
8632 {
8633 	pg_data_t *pgdat;
8634 	struct zone *zone;
8635 
8636 	for_each_online_pgdat(pgdat)
8637 		pgdat->min_unmapped_pages = 0;
8638 
8639 	for_each_zone(zone)
8640 		zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8641 						         sysctl_min_unmapped_ratio) / 100;
8642 }
8643 
8644 
8645 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8646 		void *buffer, size_t *length, loff_t *ppos)
8647 {
8648 	int rc;
8649 
8650 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8651 	if (rc)
8652 		return rc;
8653 
8654 	setup_min_unmapped_ratio();
8655 
8656 	return 0;
8657 }
8658 
8659 static void setup_min_slab_ratio(void)
8660 {
8661 	pg_data_t *pgdat;
8662 	struct zone *zone;
8663 
8664 	for_each_online_pgdat(pgdat)
8665 		pgdat->min_slab_pages = 0;
8666 
8667 	for_each_zone(zone)
8668 		zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8669 						     sysctl_min_slab_ratio) / 100;
8670 }
8671 
8672 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8673 		void *buffer, size_t *length, loff_t *ppos)
8674 {
8675 	int rc;
8676 
8677 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8678 	if (rc)
8679 		return rc;
8680 
8681 	setup_min_slab_ratio();
8682 
8683 	return 0;
8684 }
8685 #endif
8686 
8687 /*
8688  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8689  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8690  *	whenever sysctl_lowmem_reserve_ratio changes.
8691  *
8692  * The reserve ratio obviously has absolutely no relation with the
8693  * minimum watermarks. The lowmem reserve ratio can only make sense
8694  * if in function of the boot time zone sizes.
8695  */
8696 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8697 		void *buffer, size_t *length, loff_t *ppos)
8698 {
8699 	int i;
8700 
8701 	proc_dointvec_minmax(table, write, buffer, length, ppos);
8702 
8703 	for (i = 0; i < MAX_NR_ZONES; i++) {
8704 		if (sysctl_lowmem_reserve_ratio[i] < 1)
8705 			sysctl_lowmem_reserve_ratio[i] = 0;
8706 	}
8707 
8708 	setup_per_zone_lowmem_reserve();
8709 	return 0;
8710 }
8711 
8712 /*
8713  * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
8714  * cpu. It is the fraction of total pages in each zone that a hot per cpu
8715  * pagelist can have before it gets flushed back to buddy allocator.
8716  */
8717 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
8718 		int write, void *buffer, size_t *length, loff_t *ppos)
8719 {
8720 	struct zone *zone;
8721 	int old_percpu_pagelist_high_fraction;
8722 	int ret;
8723 
8724 	mutex_lock(&pcp_batch_high_lock);
8725 	old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
8726 
8727 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8728 	if (!write || ret < 0)
8729 		goto out;
8730 
8731 	/* Sanity checking to avoid pcp imbalance */
8732 	if (percpu_pagelist_high_fraction &&
8733 	    percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
8734 		percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
8735 		ret = -EINVAL;
8736 		goto out;
8737 	}
8738 
8739 	/* No change? */
8740 	if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
8741 		goto out;
8742 
8743 	for_each_populated_zone(zone)
8744 		zone_set_pageset_high_and_batch(zone, 0);
8745 out:
8746 	mutex_unlock(&pcp_batch_high_lock);
8747 	return ret;
8748 }
8749 
8750 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8751 /*
8752  * Returns the number of pages that arch has reserved but
8753  * is not known to alloc_large_system_hash().
8754  */
8755 static unsigned long __init arch_reserved_kernel_pages(void)
8756 {
8757 	return 0;
8758 }
8759 #endif
8760 
8761 /*
8762  * Adaptive scale is meant to reduce sizes of hash tables on large memory
8763  * machines. As memory size is increased the scale is also increased but at
8764  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
8765  * quadruples the scale is increased by one, which means the size of hash table
8766  * only doubles, instead of quadrupling as well.
8767  * Because 32-bit systems cannot have large physical memory, where this scaling
8768  * makes sense, it is disabled on such platforms.
8769  */
8770 #if __BITS_PER_LONG > 32
8771 #define ADAPT_SCALE_BASE	(64ul << 30)
8772 #define ADAPT_SCALE_SHIFT	2
8773 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
8774 #endif
8775 
8776 /*
8777  * allocate a large system hash table from bootmem
8778  * - it is assumed that the hash table must contain an exact power-of-2
8779  *   quantity of entries
8780  * - limit is the number of hash buckets, not the total allocation size
8781  */
8782 void *__init alloc_large_system_hash(const char *tablename,
8783 				     unsigned long bucketsize,
8784 				     unsigned long numentries,
8785 				     int scale,
8786 				     int flags,
8787 				     unsigned int *_hash_shift,
8788 				     unsigned int *_hash_mask,
8789 				     unsigned long low_limit,
8790 				     unsigned long high_limit)
8791 {
8792 	unsigned long long max = high_limit;
8793 	unsigned long log2qty, size;
8794 	void *table = NULL;
8795 	gfp_t gfp_flags;
8796 	bool virt;
8797 	bool huge;
8798 
8799 	/* allow the kernel cmdline to have a say */
8800 	if (!numentries) {
8801 		/* round applicable memory size up to nearest megabyte */
8802 		numentries = nr_kernel_pages;
8803 		numentries -= arch_reserved_kernel_pages();
8804 
8805 		/* It isn't necessary when PAGE_SIZE >= 1MB */
8806 		if (PAGE_SHIFT < 20)
8807 			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
8808 
8809 #if __BITS_PER_LONG > 32
8810 		if (!high_limit) {
8811 			unsigned long adapt;
8812 
8813 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
8814 			     adapt <<= ADAPT_SCALE_SHIFT)
8815 				scale++;
8816 		}
8817 #endif
8818 
8819 		/* limit to 1 bucket per 2^scale bytes of low memory */
8820 		if (scale > PAGE_SHIFT)
8821 			numentries >>= (scale - PAGE_SHIFT);
8822 		else
8823 			numentries <<= (PAGE_SHIFT - scale);
8824 
8825 		/* Make sure we've got at least a 0-order allocation.. */
8826 		if (unlikely(flags & HASH_SMALL)) {
8827 			/* Makes no sense without HASH_EARLY */
8828 			WARN_ON(!(flags & HASH_EARLY));
8829 			if (!(numentries >> *_hash_shift)) {
8830 				numentries = 1UL << *_hash_shift;
8831 				BUG_ON(!numentries);
8832 			}
8833 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
8834 			numentries = PAGE_SIZE / bucketsize;
8835 	}
8836 	numentries = roundup_pow_of_two(numentries);
8837 
8838 	/* limit allocation size to 1/16 total memory by default */
8839 	if (max == 0) {
8840 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
8841 		do_div(max, bucketsize);
8842 	}
8843 	max = min(max, 0x80000000ULL);
8844 
8845 	if (numentries < low_limit)
8846 		numentries = low_limit;
8847 	if (numentries > max)
8848 		numentries = max;
8849 
8850 	log2qty = ilog2(numentries);
8851 
8852 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
8853 	do {
8854 		virt = false;
8855 		size = bucketsize << log2qty;
8856 		if (flags & HASH_EARLY) {
8857 			if (flags & HASH_ZERO)
8858 				table = memblock_alloc(size, SMP_CACHE_BYTES);
8859 			else
8860 				table = memblock_alloc_raw(size,
8861 							   SMP_CACHE_BYTES);
8862 		} else if (get_order(size) >= MAX_ORDER || hashdist) {
8863 			table = __vmalloc(size, gfp_flags);
8864 			virt = true;
8865 			if (table)
8866 				huge = is_vm_area_hugepages(table);
8867 		} else {
8868 			/*
8869 			 * If bucketsize is not a power-of-two, we may free
8870 			 * some pages at the end of hash table which
8871 			 * alloc_pages_exact() automatically does
8872 			 */
8873 			table = alloc_pages_exact(size, gfp_flags);
8874 			kmemleak_alloc(table, size, 1, gfp_flags);
8875 		}
8876 	} while (!table && size > PAGE_SIZE && --log2qty);
8877 
8878 	if (!table)
8879 		panic("Failed to allocate %s hash table\n", tablename);
8880 
8881 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8882 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
8883 		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
8884 
8885 	if (_hash_shift)
8886 		*_hash_shift = log2qty;
8887 	if (_hash_mask)
8888 		*_hash_mask = (1 << log2qty) - 1;
8889 
8890 	return table;
8891 }
8892 
8893 /*
8894  * This function checks whether pageblock includes unmovable pages or not.
8895  *
8896  * PageLRU check without isolation or lru_lock could race so that
8897  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8898  * check without lock_page also may miss some movable non-lru pages at
8899  * race condition. So you can't expect this function should be exact.
8900  *
8901  * Returns a page without holding a reference. If the caller wants to
8902  * dereference that page (e.g., dumping), it has to make sure that it
8903  * cannot get removed (e.g., via memory unplug) concurrently.
8904  *
8905  */
8906 struct page *has_unmovable_pages(struct zone *zone, struct page *page,
8907 				 int migratetype, int flags)
8908 {
8909 	unsigned long iter = 0;
8910 	unsigned long pfn = page_to_pfn(page);
8911 	unsigned long offset = pfn % pageblock_nr_pages;
8912 
8913 	if (is_migrate_cma_page(page)) {
8914 		/*
8915 		 * CMA allocations (alloc_contig_range) really need to mark
8916 		 * isolate CMA pageblocks even when they are not movable in fact
8917 		 * so consider them movable here.
8918 		 */
8919 		if (is_migrate_cma(migratetype))
8920 			return NULL;
8921 
8922 		return page;
8923 	}
8924 
8925 	for (; iter < pageblock_nr_pages - offset; iter++) {
8926 		page = pfn_to_page(pfn + iter);
8927 
8928 		/*
8929 		 * Both, bootmem allocations and memory holes are marked
8930 		 * PG_reserved and are unmovable. We can even have unmovable
8931 		 * allocations inside ZONE_MOVABLE, for example when
8932 		 * specifying "movablecore".
8933 		 */
8934 		if (PageReserved(page))
8935 			return page;
8936 
8937 		/*
8938 		 * If the zone is movable and we have ruled out all reserved
8939 		 * pages then it should be reasonably safe to assume the rest
8940 		 * is movable.
8941 		 */
8942 		if (zone_idx(zone) == ZONE_MOVABLE)
8943 			continue;
8944 
8945 		/*
8946 		 * Hugepages are not in LRU lists, but they're movable.
8947 		 * THPs are on the LRU, but need to be counted as #small pages.
8948 		 * We need not scan over tail pages because we don't
8949 		 * handle each tail page individually in migration.
8950 		 */
8951 		if (PageHuge(page) || PageTransCompound(page)) {
8952 			struct page *head = compound_head(page);
8953 			unsigned int skip_pages;
8954 
8955 			if (PageHuge(page)) {
8956 				if (!hugepage_migration_supported(page_hstate(head)))
8957 					return page;
8958 			} else if (!PageLRU(head) && !__PageMovable(head)) {
8959 				return page;
8960 			}
8961 
8962 			skip_pages = compound_nr(head) - (page - head);
8963 			iter += skip_pages - 1;
8964 			continue;
8965 		}
8966 
8967 		/*
8968 		 * We can't use page_count without pin a page
8969 		 * because another CPU can free compound page.
8970 		 * This check already skips compound tails of THP
8971 		 * because their page->_refcount is zero at all time.
8972 		 */
8973 		if (!page_ref_count(page)) {
8974 			if (PageBuddy(page))
8975 				iter += (1 << buddy_order(page)) - 1;
8976 			continue;
8977 		}
8978 
8979 		/*
8980 		 * The HWPoisoned page may be not in buddy system, and
8981 		 * page_count() is not 0.
8982 		 */
8983 		if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
8984 			continue;
8985 
8986 		/*
8987 		 * We treat all PageOffline() pages as movable when offlining
8988 		 * to give drivers a chance to decrement their reference count
8989 		 * in MEM_GOING_OFFLINE in order to indicate that these pages
8990 		 * can be offlined as there are no direct references anymore.
8991 		 * For actually unmovable PageOffline() where the driver does
8992 		 * not support this, we will fail later when trying to actually
8993 		 * move these pages that still have a reference count > 0.
8994 		 * (false negatives in this function only)
8995 		 */
8996 		if ((flags & MEMORY_OFFLINE) && PageOffline(page))
8997 			continue;
8998 
8999 		if (__PageMovable(page) || PageLRU(page))
9000 			continue;
9001 
9002 		/*
9003 		 * If there are RECLAIMABLE pages, we need to check
9004 		 * it.  But now, memory offline itself doesn't call
9005 		 * shrink_node_slabs() and it still to be fixed.
9006 		 */
9007 		return page;
9008 	}
9009 	return NULL;
9010 }
9011 
9012 #ifdef CONFIG_CONTIG_ALLOC
9013 static unsigned long pfn_max_align_down(unsigned long pfn)
9014 {
9015 	return ALIGN_DOWN(pfn, MAX_ORDER_NR_PAGES);
9016 }
9017 
9018 static unsigned long pfn_max_align_up(unsigned long pfn)
9019 {
9020 	return ALIGN(pfn, MAX_ORDER_NR_PAGES);
9021 }
9022 
9023 #if defined(CONFIG_DYNAMIC_DEBUG) || \
9024 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
9025 /* Usage: See admin-guide/dynamic-debug-howto.rst */
9026 static void alloc_contig_dump_pages(struct list_head *page_list)
9027 {
9028 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
9029 
9030 	if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
9031 		struct page *page;
9032 
9033 		dump_stack();
9034 		list_for_each_entry(page, page_list, lru)
9035 			dump_page(page, "migration failure");
9036 	}
9037 }
9038 #else
9039 static inline void alloc_contig_dump_pages(struct list_head *page_list)
9040 {
9041 }
9042 #endif
9043 
9044 /* [start, end) must belong to a single zone. */
9045 static int __alloc_contig_migrate_range(struct compact_control *cc,
9046 					unsigned long start, unsigned long end)
9047 {
9048 	/* This function is based on compact_zone() from compaction.c. */
9049 	unsigned int nr_reclaimed;
9050 	unsigned long pfn = start;
9051 	unsigned int tries = 0;
9052 	int ret = 0;
9053 	struct migration_target_control mtc = {
9054 		.nid = zone_to_nid(cc->zone),
9055 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
9056 	};
9057 
9058 	lru_cache_disable();
9059 
9060 	while (pfn < end || !list_empty(&cc->migratepages)) {
9061 		if (fatal_signal_pending(current)) {
9062 			ret = -EINTR;
9063 			break;
9064 		}
9065 
9066 		if (list_empty(&cc->migratepages)) {
9067 			cc->nr_migratepages = 0;
9068 			ret = isolate_migratepages_range(cc, pfn, end);
9069 			if (ret && ret != -EAGAIN)
9070 				break;
9071 			pfn = cc->migrate_pfn;
9072 			tries = 0;
9073 		} else if (++tries == 5) {
9074 			ret = -EBUSY;
9075 			break;
9076 		}
9077 
9078 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
9079 							&cc->migratepages);
9080 		cc->nr_migratepages -= nr_reclaimed;
9081 
9082 		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
9083 			NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
9084 
9085 		/*
9086 		 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
9087 		 * to retry again over this error, so do the same here.
9088 		 */
9089 		if (ret == -ENOMEM)
9090 			break;
9091 	}
9092 
9093 	lru_cache_enable();
9094 	if (ret < 0) {
9095 		if (ret == -EBUSY)
9096 			alloc_contig_dump_pages(&cc->migratepages);
9097 		putback_movable_pages(&cc->migratepages);
9098 		return ret;
9099 	}
9100 	return 0;
9101 }
9102 
9103 /**
9104  * alloc_contig_range() -- tries to allocate given range of pages
9105  * @start:	start PFN to allocate
9106  * @end:	one-past-the-last PFN to allocate
9107  * @migratetype:	migratetype of the underlying pageblocks (either
9108  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
9109  *			in range must have the same migratetype and it must
9110  *			be either of the two.
9111  * @gfp_mask:	GFP mask to use during compaction
9112  *
9113  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
9114  * aligned.  The PFN range must belong to a single zone.
9115  *
9116  * The first thing this routine does is attempt to MIGRATE_ISOLATE all
9117  * pageblocks in the range.  Once isolated, the pageblocks should not
9118  * be modified by others.
9119  *
9120  * Return: zero on success or negative error code.  On success all
9121  * pages which PFN is in [start, end) are allocated for the caller and
9122  * need to be freed with free_contig_range().
9123  */
9124 int alloc_contig_range(unsigned long start, unsigned long end,
9125 		       unsigned migratetype, gfp_t gfp_mask)
9126 {
9127 	unsigned long outer_start, outer_end;
9128 	unsigned int order;
9129 	int ret = 0;
9130 
9131 	struct compact_control cc = {
9132 		.nr_migratepages = 0,
9133 		.order = -1,
9134 		.zone = page_zone(pfn_to_page(start)),
9135 		.mode = MIGRATE_SYNC,
9136 		.ignore_skip_hint = true,
9137 		.no_set_skip_hint = true,
9138 		.gfp_mask = current_gfp_context(gfp_mask),
9139 		.alloc_contig = true,
9140 	};
9141 	INIT_LIST_HEAD(&cc.migratepages);
9142 
9143 	/*
9144 	 * What we do here is we mark all pageblocks in range as
9145 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
9146 	 * have different sizes, and due to the way page allocator
9147 	 * work, we align the range to biggest of the two pages so
9148 	 * that page allocator won't try to merge buddies from
9149 	 * different pageblocks and change MIGRATE_ISOLATE to some
9150 	 * other migration type.
9151 	 *
9152 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
9153 	 * migrate the pages from an unaligned range (ie. pages that
9154 	 * we are interested in).  This will put all the pages in
9155 	 * range back to page allocator as MIGRATE_ISOLATE.
9156 	 *
9157 	 * When this is done, we take the pages in range from page
9158 	 * allocator removing them from the buddy system.  This way
9159 	 * page allocator will never consider using them.
9160 	 *
9161 	 * This lets us mark the pageblocks back as
9162 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
9163 	 * aligned range but not in the unaligned, original range are
9164 	 * put back to page allocator so that buddy can use them.
9165 	 */
9166 
9167 	ret = start_isolate_page_range(pfn_max_align_down(start),
9168 				       pfn_max_align_up(end), migratetype, 0);
9169 	if (ret)
9170 		return ret;
9171 
9172 	drain_all_pages(cc.zone);
9173 
9174 	/*
9175 	 * In case of -EBUSY, we'd like to know which page causes problem.
9176 	 * So, just fall through. test_pages_isolated() has a tracepoint
9177 	 * which will report the busy page.
9178 	 *
9179 	 * It is possible that busy pages could become available before
9180 	 * the call to test_pages_isolated, and the range will actually be
9181 	 * allocated.  So, if we fall through be sure to clear ret so that
9182 	 * -EBUSY is not accidentally used or returned to caller.
9183 	 */
9184 	ret = __alloc_contig_migrate_range(&cc, start, end);
9185 	if (ret && ret != -EBUSY)
9186 		goto done;
9187 	ret = 0;
9188 
9189 	/*
9190 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
9191 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
9192 	 * more, all pages in [start, end) are free in page allocator.
9193 	 * What we are going to do is to allocate all pages from
9194 	 * [start, end) (that is remove them from page allocator).
9195 	 *
9196 	 * The only problem is that pages at the beginning and at the
9197 	 * end of interesting range may be not aligned with pages that
9198 	 * page allocator holds, ie. they can be part of higher order
9199 	 * pages.  Because of this, we reserve the bigger range and
9200 	 * once this is done free the pages we are not interested in.
9201 	 *
9202 	 * We don't have to hold zone->lock here because the pages are
9203 	 * isolated thus they won't get removed from buddy.
9204 	 */
9205 
9206 	order = 0;
9207 	outer_start = start;
9208 	while (!PageBuddy(pfn_to_page(outer_start))) {
9209 		if (++order >= MAX_ORDER) {
9210 			outer_start = start;
9211 			break;
9212 		}
9213 		outer_start &= ~0UL << order;
9214 	}
9215 
9216 	if (outer_start != start) {
9217 		order = buddy_order(pfn_to_page(outer_start));
9218 
9219 		/*
9220 		 * outer_start page could be small order buddy page and
9221 		 * it doesn't include start page. Adjust outer_start
9222 		 * in this case to report failed page properly
9223 		 * on tracepoint in test_pages_isolated()
9224 		 */
9225 		if (outer_start + (1UL << order) <= start)
9226 			outer_start = start;
9227 	}
9228 
9229 	/* Make sure the range is really isolated. */
9230 	if (test_pages_isolated(outer_start, end, 0)) {
9231 		ret = -EBUSY;
9232 		goto done;
9233 	}
9234 
9235 	/* Grab isolated pages from freelists. */
9236 	outer_end = isolate_freepages_range(&cc, outer_start, end);
9237 	if (!outer_end) {
9238 		ret = -EBUSY;
9239 		goto done;
9240 	}
9241 
9242 	/* Free head and tail (if any) */
9243 	if (start != outer_start)
9244 		free_contig_range(outer_start, start - outer_start);
9245 	if (end != outer_end)
9246 		free_contig_range(end, outer_end - end);
9247 
9248 done:
9249 	undo_isolate_page_range(pfn_max_align_down(start),
9250 				pfn_max_align_up(end), migratetype);
9251 	return ret;
9252 }
9253 EXPORT_SYMBOL(alloc_contig_range);
9254 
9255 static int __alloc_contig_pages(unsigned long start_pfn,
9256 				unsigned long nr_pages, gfp_t gfp_mask)
9257 {
9258 	unsigned long end_pfn = start_pfn + nr_pages;
9259 
9260 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
9261 				  gfp_mask);
9262 }
9263 
9264 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
9265 				   unsigned long nr_pages)
9266 {
9267 	unsigned long i, end_pfn = start_pfn + nr_pages;
9268 	struct page *page;
9269 
9270 	for (i = start_pfn; i < end_pfn; i++) {
9271 		page = pfn_to_online_page(i);
9272 		if (!page)
9273 			return false;
9274 
9275 		if (page_zone(page) != z)
9276 			return false;
9277 
9278 		if (PageReserved(page))
9279 			return false;
9280 	}
9281 	return true;
9282 }
9283 
9284 static bool zone_spans_last_pfn(const struct zone *zone,
9285 				unsigned long start_pfn, unsigned long nr_pages)
9286 {
9287 	unsigned long last_pfn = start_pfn + nr_pages - 1;
9288 
9289 	return zone_spans_pfn(zone, last_pfn);
9290 }
9291 
9292 /**
9293  * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
9294  * @nr_pages:	Number of contiguous pages to allocate
9295  * @gfp_mask:	GFP mask to limit search and used during compaction
9296  * @nid:	Target node
9297  * @nodemask:	Mask for other possible nodes
9298  *
9299  * This routine is a wrapper around alloc_contig_range(). It scans over zones
9300  * on an applicable zonelist to find a contiguous pfn range which can then be
9301  * tried for allocation with alloc_contig_range(). This routine is intended
9302  * for allocation requests which can not be fulfilled with the buddy allocator.
9303  *
9304  * The allocated memory is always aligned to a page boundary. If nr_pages is a
9305  * power of two, then allocated range is also guaranteed to be aligned to same
9306  * nr_pages (e.g. 1GB request would be aligned to 1GB).
9307  *
9308  * Allocated pages can be freed with free_contig_range() or by manually calling
9309  * __free_page() on each allocated page.
9310  *
9311  * Return: pointer to contiguous pages on success, or NULL if not successful.
9312  */
9313 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
9314 				int nid, nodemask_t *nodemask)
9315 {
9316 	unsigned long ret, pfn, flags;
9317 	struct zonelist *zonelist;
9318 	struct zone *zone;
9319 	struct zoneref *z;
9320 
9321 	zonelist = node_zonelist(nid, gfp_mask);
9322 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
9323 					gfp_zone(gfp_mask), nodemask) {
9324 		spin_lock_irqsave(&zone->lock, flags);
9325 
9326 		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
9327 		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
9328 			if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
9329 				/*
9330 				 * We release the zone lock here because
9331 				 * alloc_contig_range() will also lock the zone
9332 				 * at some point. If there's an allocation
9333 				 * spinning on this lock, it may win the race
9334 				 * and cause alloc_contig_range() to fail...
9335 				 */
9336 				spin_unlock_irqrestore(&zone->lock, flags);
9337 				ret = __alloc_contig_pages(pfn, nr_pages,
9338 							gfp_mask);
9339 				if (!ret)
9340 					return pfn_to_page(pfn);
9341 				spin_lock_irqsave(&zone->lock, flags);
9342 			}
9343 			pfn += nr_pages;
9344 		}
9345 		spin_unlock_irqrestore(&zone->lock, flags);
9346 	}
9347 	return NULL;
9348 }
9349 #endif /* CONFIG_CONTIG_ALLOC */
9350 
9351 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
9352 {
9353 	unsigned long count = 0;
9354 
9355 	for (; nr_pages--; pfn++) {
9356 		struct page *page = pfn_to_page(pfn);
9357 
9358 		count += page_count(page) != 1;
9359 		__free_page(page);
9360 	}
9361 	WARN(count != 0, "%lu pages are still in use!\n", count);
9362 }
9363 EXPORT_SYMBOL(free_contig_range);
9364 
9365 /*
9366  * The zone indicated has a new number of managed_pages; batch sizes and percpu
9367  * page high values need to be recalculated.
9368  */
9369 void zone_pcp_update(struct zone *zone, int cpu_online)
9370 {
9371 	mutex_lock(&pcp_batch_high_lock);
9372 	zone_set_pageset_high_and_batch(zone, cpu_online);
9373 	mutex_unlock(&pcp_batch_high_lock);
9374 }
9375 
9376 /*
9377  * Effectively disable pcplists for the zone by setting the high limit to 0
9378  * and draining all cpus. A concurrent page freeing on another CPU that's about
9379  * to put the page on pcplist will either finish before the drain and the page
9380  * will be drained, or observe the new high limit and skip the pcplist.
9381  *
9382  * Must be paired with a call to zone_pcp_enable().
9383  */
9384 void zone_pcp_disable(struct zone *zone)
9385 {
9386 	mutex_lock(&pcp_batch_high_lock);
9387 	__zone_set_pageset_high_and_batch(zone, 0, 1);
9388 	__drain_all_pages(zone, true);
9389 }
9390 
9391 void zone_pcp_enable(struct zone *zone)
9392 {
9393 	__zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
9394 	mutex_unlock(&pcp_batch_high_lock);
9395 }
9396 
9397 void zone_pcp_reset(struct zone *zone)
9398 {
9399 	int cpu;
9400 	struct per_cpu_zonestat *pzstats;
9401 
9402 	if (zone->per_cpu_pageset != &boot_pageset) {
9403 		for_each_online_cpu(cpu) {
9404 			pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
9405 			drain_zonestat(zone, pzstats);
9406 		}
9407 		free_percpu(zone->per_cpu_pageset);
9408 		free_percpu(zone->per_cpu_zonestats);
9409 		zone->per_cpu_pageset = &boot_pageset;
9410 		zone->per_cpu_zonestats = &boot_zonestats;
9411 	}
9412 }
9413 
9414 #ifdef CONFIG_MEMORY_HOTREMOVE
9415 /*
9416  * All pages in the range must be in a single zone, must not contain holes,
9417  * must span full sections, and must be isolated before calling this function.
9418  */
9419 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
9420 {
9421 	unsigned long pfn = start_pfn;
9422 	struct page *page;
9423 	struct zone *zone;
9424 	unsigned int order;
9425 	unsigned long flags;
9426 
9427 	offline_mem_sections(pfn, end_pfn);
9428 	zone = page_zone(pfn_to_page(pfn));
9429 	spin_lock_irqsave(&zone->lock, flags);
9430 	while (pfn < end_pfn) {
9431 		page = pfn_to_page(pfn);
9432 		/*
9433 		 * The HWPoisoned page may be not in buddy system, and
9434 		 * page_count() is not 0.
9435 		 */
9436 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
9437 			pfn++;
9438 			continue;
9439 		}
9440 		/*
9441 		 * At this point all remaining PageOffline() pages have a
9442 		 * reference count of 0 and can simply be skipped.
9443 		 */
9444 		if (PageOffline(page)) {
9445 			BUG_ON(page_count(page));
9446 			BUG_ON(PageBuddy(page));
9447 			pfn++;
9448 			continue;
9449 		}
9450 
9451 		BUG_ON(page_count(page));
9452 		BUG_ON(!PageBuddy(page));
9453 		order = buddy_order(page);
9454 		del_page_from_free_list(page, zone, order);
9455 		pfn += (1 << order);
9456 	}
9457 	spin_unlock_irqrestore(&zone->lock, flags);
9458 }
9459 #endif
9460 
9461 /*
9462  * This function returns a stable result only if called under zone lock.
9463  */
9464 bool is_free_buddy_page(struct page *page)
9465 {
9466 	unsigned long pfn = page_to_pfn(page);
9467 	unsigned int order;
9468 
9469 	for (order = 0; order < MAX_ORDER; order++) {
9470 		struct page *page_head = page - (pfn & ((1 << order) - 1));
9471 
9472 		if (PageBuddy(page_head) &&
9473 		    buddy_order_unsafe(page_head) >= order)
9474 			break;
9475 	}
9476 
9477 	return order < MAX_ORDER;
9478 }
9479 EXPORT_SYMBOL(is_free_buddy_page);
9480 
9481 #ifdef CONFIG_MEMORY_FAILURE
9482 /*
9483  * Break down a higher-order page in sub-pages, and keep our target out of
9484  * buddy allocator.
9485  */
9486 static void break_down_buddy_pages(struct zone *zone, struct page *page,
9487 				   struct page *target, int low, int high,
9488 				   int migratetype)
9489 {
9490 	unsigned long size = 1 << high;
9491 	struct page *current_buddy, *next_page;
9492 
9493 	while (high > low) {
9494 		high--;
9495 		size >>= 1;
9496 
9497 		if (target >= &page[size]) {
9498 			next_page = page + size;
9499 			current_buddy = page;
9500 		} else {
9501 			next_page = page;
9502 			current_buddy = page + size;
9503 		}
9504 
9505 		if (set_page_guard(zone, current_buddy, high, migratetype))
9506 			continue;
9507 
9508 		if (current_buddy != target) {
9509 			add_to_free_list(current_buddy, zone, high, migratetype);
9510 			set_buddy_order(current_buddy, high);
9511 			page = next_page;
9512 		}
9513 	}
9514 }
9515 
9516 /*
9517  * Take a page that will be marked as poisoned off the buddy allocator.
9518  */
9519 bool take_page_off_buddy(struct page *page)
9520 {
9521 	struct zone *zone = page_zone(page);
9522 	unsigned long pfn = page_to_pfn(page);
9523 	unsigned long flags;
9524 	unsigned int order;
9525 	bool ret = false;
9526 
9527 	spin_lock_irqsave(&zone->lock, flags);
9528 	for (order = 0; order < MAX_ORDER; order++) {
9529 		struct page *page_head = page - (pfn & ((1 << order) - 1));
9530 		int page_order = buddy_order(page_head);
9531 
9532 		if (PageBuddy(page_head) && page_order >= order) {
9533 			unsigned long pfn_head = page_to_pfn(page_head);
9534 			int migratetype = get_pfnblock_migratetype(page_head,
9535 								   pfn_head);
9536 
9537 			del_page_from_free_list(page_head, zone, page_order);
9538 			break_down_buddy_pages(zone, page_head, page, 0,
9539 						page_order, migratetype);
9540 			SetPageHWPoisonTakenOff(page);
9541 			if (!is_migrate_isolate(migratetype))
9542 				__mod_zone_freepage_state(zone, -1, migratetype);
9543 			ret = true;
9544 			break;
9545 		}
9546 		if (page_count(page_head) > 0)
9547 			break;
9548 	}
9549 	spin_unlock_irqrestore(&zone->lock, flags);
9550 	return ret;
9551 }
9552 
9553 /*
9554  * Cancel takeoff done by take_page_off_buddy().
9555  */
9556 bool put_page_back_buddy(struct page *page)
9557 {
9558 	struct zone *zone = page_zone(page);
9559 	unsigned long pfn = page_to_pfn(page);
9560 	unsigned long flags;
9561 	int migratetype = get_pfnblock_migratetype(page, pfn);
9562 	bool ret = false;
9563 
9564 	spin_lock_irqsave(&zone->lock, flags);
9565 	if (put_page_testzero(page)) {
9566 		ClearPageHWPoisonTakenOff(page);
9567 		__free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
9568 		if (TestClearPageHWPoison(page)) {
9569 			num_poisoned_pages_dec();
9570 			ret = true;
9571 		}
9572 	}
9573 	spin_unlock_irqrestore(&zone->lock, flags);
9574 
9575 	return ret;
9576 }
9577 #endif
9578 
9579 #ifdef CONFIG_ZONE_DMA
9580 bool has_managed_dma(void)
9581 {
9582 	struct pglist_data *pgdat;
9583 
9584 	for_each_online_pgdat(pgdat) {
9585 		struct zone *zone = &pgdat->node_zones[ZONE_DMA];
9586 
9587 		if (managed_zone(zone))
9588 			return true;
9589 	}
9590 	return false;
9591 }
9592 #endif /* CONFIG_ZONE_DMA */
9593