xref: /openbmc/linux/mm/page_alloc.c (revision d3402925)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/page_alloc.c
4  *
5  *  Manages the free list, the system allocates free pages here.
6  *  Note that kmalloc() lives in slab.c
7  *
8  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
9  *  Swap reorganised 29.12.95, Stephen Tweedie
10  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16  */
17 
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/interrupt.h>
24 #include <linux/pagemap.h>
25 #include <linux/jiffies.h>
26 #include <linux/memblock.h>
27 #include <linux/compiler.h>
28 #include <linux/kernel.h>
29 #include <linux/kasan.h>
30 #include <linux/kmsan.h>
31 #include <linux/module.h>
32 #include <linux/suspend.h>
33 #include <linux/pagevec.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/ratelimit.h>
37 #include <linux/oom.h>
38 #include <linux/topology.h>
39 #include <linux/sysctl.h>
40 #include <linux/cpu.h>
41 #include <linux/cpuset.h>
42 #include <linux/memory_hotplug.h>
43 #include <linux/nodemask.h>
44 #include <linux/vmalloc.h>
45 #include <linux/vmstat.h>
46 #include <linux/mempolicy.h>
47 #include <linux/memremap.h>
48 #include <linux/stop_machine.h>
49 #include <linux/random.h>
50 #include <linux/sort.h>
51 #include <linux/pfn.h>
52 #include <linux/backing-dev.h>
53 #include <linux/fault-inject.h>
54 #include <linux/page-isolation.h>
55 #include <linux/debugobjects.h>
56 #include <linux/kmemleak.h>
57 #include <linux/compaction.h>
58 #include <trace/events/kmem.h>
59 #include <trace/events/oom.h>
60 #include <linux/prefetch.h>
61 #include <linux/mm_inline.h>
62 #include <linux/mmu_notifier.h>
63 #include <linux/migrate.h>
64 #include <linux/hugetlb.h>
65 #include <linux/sched/rt.h>
66 #include <linux/sched/mm.h>
67 #include <linux/page_owner.h>
68 #include <linux/page_table_check.h>
69 #include <linux/kthread.h>
70 #include <linux/memcontrol.h>
71 #include <linux/ftrace.h>
72 #include <linux/lockdep.h>
73 #include <linux/nmi.h>
74 #include <linux/psi.h>
75 #include <linux/padata.h>
76 #include <linux/khugepaged.h>
77 #include <linux/buffer_head.h>
78 #include <linux/delayacct.h>
79 #include <asm/sections.h>
80 #include <asm/tlbflush.h>
81 #include <asm/div64.h>
82 #include "internal.h"
83 #include "shuffle.h"
84 #include "page_reporting.h"
85 #include "swap.h"
86 
87 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
88 typedef int __bitwise fpi_t;
89 
90 /* No special request */
91 #define FPI_NONE		((__force fpi_t)0)
92 
93 /*
94  * Skip free page reporting notification for the (possibly merged) page.
95  * This does not hinder free page reporting from grabbing the page,
96  * reporting it and marking it "reported" -  it only skips notifying
97  * the free page reporting infrastructure about a newly freed page. For
98  * example, used when temporarily pulling a page from a freelist and
99  * putting it back unmodified.
100  */
101 #define FPI_SKIP_REPORT_NOTIFY	((__force fpi_t)BIT(0))
102 
103 /*
104  * Place the (possibly merged) page to the tail of the freelist. Will ignore
105  * page shuffling (relevant code - e.g., memory onlining - is expected to
106  * shuffle the whole zone).
107  *
108  * Note: No code should rely on this flag for correctness - it's purely
109  *       to allow for optimizations when handing back either fresh pages
110  *       (memory onlining) or untouched pages (page isolation, free page
111  *       reporting).
112  */
113 #define FPI_TO_TAIL		((__force fpi_t)BIT(1))
114 
115 /*
116  * Don't poison memory with KASAN (only for the tag-based modes).
117  * During boot, all non-reserved memblock memory is exposed to page_alloc.
118  * Poisoning all that memory lengthens boot time, especially on systems with
119  * large amount of RAM. This flag is used to skip that poisoning.
120  * This is only done for the tag-based KASAN modes, as those are able to
121  * detect memory corruptions with the memory tags assigned by default.
122  * All memory allocated normally after boot gets poisoned as usual.
123  */
124 #define FPI_SKIP_KASAN_POISON	((__force fpi_t)BIT(2))
125 
126 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
127 static DEFINE_MUTEX(pcp_batch_high_lock);
128 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
129 
130 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
131 /*
132  * On SMP, spin_trylock is sufficient protection.
133  * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
134  */
135 #define pcp_trylock_prepare(flags)	do { } while (0)
136 #define pcp_trylock_finish(flag)	do { } while (0)
137 #else
138 
139 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
140 #define pcp_trylock_prepare(flags)	local_irq_save(flags)
141 #define pcp_trylock_finish(flags)	local_irq_restore(flags)
142 #endif
143 
144 /*
145  * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
146  * a migration causing the wrong PCP to be locked and remote memory being
147  * potentially allocated, pin the task to the CPU for the lookup+lock.
148  * preempt_disable is used on !RT because it is faster than migrate_disable.
149  * migrate_disable is used on RT because otherwise RT spinlock usage is
150  * interfered with and a high priority task cannot preempt the allocator.
151  */
152 #ifndef CONFIG_PREEMPT_RT
153 #define pcpu_task_pin()		preempt_disable()
154 #define pcpu_task_unpin()	preempt_enable()
155 #else
156 #define pcpu_task_pin()		migrate_disable()
157 #define pcpu_task_unpin()	migrate_enable()
158 #endif
159 
160 /*
161  * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
162  * Return value should be used with equivalent unlock helper.
163  */
164 #define pcpu_spin_lock(type, member, ptr)				\
165 ({									\
166 	type *_ret;							\
167 	pcpu_task_pin();						\
168 	_ret = this_cpu_ptr(ptr);					\
169 	spin_lock(&_ret->member);					\
170 	_ret;								\
171 })
172 
173 #define pcpu_spin_trylock(type, member, ptr)				\
174 ({									\
175 	type *_ret;							\
176 	pcpu_task_pin();						\
177 	_ret = this_cpu_ptr(ptr);					\
178 	if (!spin_trylock(&_ret->member)) {				\
179 		pcpu_task_unpin();					\
180 		_ret = NULL;						\
181 	}								\
182 	_ret;								\
183 })
184 
185 #define pcpu_spin_unlock(member, ptr)					\
186 ({									\
187 	spin_unlock(&ptr->member);					\
188 	pcpu_task_unpin();						\
189 })
190 
191 /* struct per_cpu_pages specific helpers. */
192 #define pcp_spin_lock(ptr)						\
193 	pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
194 
195 #define pcp_spin_trylock(ptr)						\
196 	pcpu_spin_trylock(struct per_cpu_pages, lock, ptr)
197 
198 #define pcp_spin_unlock(ptr)						\
199 	pcpu_spin_unlock(lock, ptr)
200 
201 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
202 DEFINE_PER_CPU(int, numa_node);
203 EXPORT_PER_CPU_SYMBOL(numa_node);
204 #endif
205 
206 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
207 
208 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
209 /*
210  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
211  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
212  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
213  * defined in <linux/topology.h>.
214  */
215 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
216 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
217 #endif
218 
219 static DEFINE_MUTEX(pcpu_drain_mutex);
220 
221 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
222 volatile unsigned long latent_entropy __latent_entropy;
223 EXPORT_SYMBOL(latent_entropy);
224 #endif
225 
226 /*
227  * Array of node states.
228  */
229 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
230 	[N_POSSIBLE] = NODE_MASK_ALL,
231 	[N_ONLINE] = { { [0] = 1UL } },
232 #ifndef CONFIG_NUMA
233 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
234 #ifdef CONFIG_HIGHMEM
235 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
236 #endif
237 	[N_MEMORY] = { { [0] = 1UL } },
238 	[N_CPU] = { { [0] = 1UL } },
239 #endif	/* NUMA */
240 };
241 EXPORT_SYMBOL(node_states);
242 
243 atomic_long_t _totalram_pages __read_mostly;
244 EXPORT_SYMBOL(_totalram_pages);
245 unsigned long totalreserve_pages __read_mostly;
246 unsigned long totalcma_pages __read_mostly;
247 
248 int percpu_pagelist_high_fraction;
249 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
250 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
251 EXPORT_SYMBOL(init_on_alloc);
252 
253 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
254 EXPORT_SYMBOL(init_on_free);
255 
256 /* perform sanity checks on struct pages being allocated or freed */
257 static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
258 
259 static bool _init_on_alloc_enabled_early __read_mostly
260 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
261 static int __init early_init_on_alloc(char *buf)
262 {
263 
264 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
265 }
266 early_param("init_on_alloc", early_init_on_alloc);
267 
268 static bool _init_on_free_enabled_early __read_mostly
269 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
270 static int __init early_init_on_free(char *buf)
271 {
272 	return kstrtobool(buf, &_init_on_free_enabled_early);
273 }
274 early_param("init_on_free", early_init_on_free);
275 
276 /*
277  * A cached value of the page's pageblock's migratetype, used when the page is
278  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
279  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
280  * Also the migratetype set in the page does not necessarily match the pcplist
281  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
282  * other index - this ensures that it will be put on the correct CMA freelist.
283  */
284 static inline int get_pcppage_migratetype(struct page *page)
285 {
286 	return page->index;
287 }
288 
289 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
290 {
291 	page->index = migratetype;
292 }
293 
294 #ifdef CONFIG_PM_SLEEP
295 /*
296  * The following functions are used by the suspend/hibernate code to temporarily
297  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
298  * while devices are suspended.  To avoid races with the suspend/hibernate code,
299  * they should always be called with system_transition_mutex held
300  * (gfp_allowed_mask also should only be modified with system_transition_mutex
301  * held, unless the suspend/hibernate code is guaranteed not to run in parallel
302  * with that modification).
303  */
304 
305 static gfp_t saved_gfp_mask;
306 
307 void pm_restore_gfp_mask(void)
308 {
309 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
310 	if (saved_gfp_mask) {
311 		gfp_allowed_mask = saved_gfp_mask;
312 		saved_gfp_mask = 0;
313 	}
314 }
315 
316 void pm_restrict_gfp_mask(void)
317 {
318 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
319 	WARN_ON(saved_gfp_mask);
320 	saved_gfp_mask = gfp_allowed_mask;
321 	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
322 }
323 
324 bool pm_suspended_storage(void)
325 {
326 	if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
327 		return false;
328 	return true;
329 }
330 #endif /* CONFIG_PM_SLEEP */
331 
332 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
333 unsigned int pageblock_order __read_mostly;
334 #endif
335 
336 static void __free_pages_ok(struct page *page, unsigned int order,
337 			    fpi_t fpi_flags);
338 
339 /*
340  * results with 256, 32 in the lowmem_reserve sysctl:
341  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
342  *	1G machine -> (16M dma, 784M normal, 224M high)
343  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
344  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
345  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
346  *
347  * TBD: should special case ZONE_DMA32 machines here - in those we normally
348  * don't need any ZONE_NORMAL reservation
349  */
350 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
351 #ifdef CONFIG_ZONE_DMA
352 	[ZONE_DMA] = 256,
353 #endif
354 #ifdef CONFIG_ZONE_DMA32
355 	[ZONE_DMA32] = 256,
356 #endif
357 	[ZONE_NORMAL] = 32,
358 #ifdef CONFIG_HIGHMEM
359 	[ZONE_HIGHMEM] = 0,
360 #endif
361 	[ZONE_MOVABLE] = 0,
362 };
363 
364 static char * const zone_names[MAX_NR_ZONES] = {
365 #ifdef CONFIG_ZONE_DMA
366 	 "DMA",
367 #endif
368 #ifdef CONFIG_ZONE_DMA32
369 	 "DMA32",
370 #endif
371 	 "Normal",
372 #ifdef CONFIG_HIGHMEM
373 	 "HighMem",
374 #endif
375 	 "Movable",
376 #ifdef CONFIG_ZONE_DEVICE
377 	 "Device",
378 #endif
379 };
380 
381 const char * const migratetype_names[MIGRATE_TYPES] = {
382 	"Unmovable",
383 	"Movable",
384 	"Reclaimable",
385 	"HighAtomic",
386 #ifdef CONFIG_CMA
387 	"CMA",
388 #endif
389 #ifdef CONFIG_MEMORY_ISOLATION
390 	"Isolate",
391 #endif
392 };
393 
394 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
395 	[NULL_COMPOUND_DTOR] = NULL,
396 	[COMPOUND_PAGE_DTOR] = free_compound_page,
397 #ifdef CONFIG_HUGETLB_PAGE
398 	[HUGETLB_PAGE_DTOR] = free_huge_page,
399 #endif
400 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
401 	[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
402 #endif
403 };
404 
405 int min_free_kbytes = 1024;
406 int user_min_free_kbytes = -1;
407 int watermark_boost_factor __read_mostly = 15000;
408 int watermark_scale_factor = 10;
409 
410 static unsigned long nr_kernel_pages __initdata;
411 static unsigned long nr_all_pages __initdata;
412 static unsigned long dma_reserve __initdata;
413 
414 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
415 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
416 static unsigned long required_kernelcore __initdata;
417 static unsigned long required_kernelcore_percent __initdata;
418 static unsigned long required_movablecore __initdata;
419 static unsigned long required_movablecore_percent __initdata;
420 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
421 bool mirrored_kernelcore __initdata_memblock;
422 
423 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
424 int movable_zone;
425 EXPORT_SYMBOL(movable_zone);
426 
427 #if MAX_NUMNODES > 1
428 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
429 unsigned int nr_online_nodes __read_mostly = 1;
430 EXPORT_SYMBOL(nr_node_ids);
431 EXPORT_SYMBOL(nr_online_nodes);
432 #endif
433 
434 int page_group_by_mobility_disabled __read_mostly;
435 
436 bool deferred_struct_pages __meminitdata;
437 
438 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
439 /*
440  * During boot we initialize deferred pages on-demand, as needed, but once
441  * page_alloc_init_late() has finished, the deferred pages are all initialized,
442  * and we can permanently disable that path.
443  */
444 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
445 
446 static inline bool deferred_pages_enabled(void)
447 {
448 	return static_branch_unlikely(&deferred_pages);
449 }
450 
451 /* Returns true if the struct page for the pfn is initialised */
452 static inline bool __meminit early_page_initialised(unsigned long pfn)
453 {
454 	int nid = early_pfn_to_nid(pfn);
455 
456 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
457 		return false;
458 
459 	return true;
460 }
461 
462 /*
463  * Returns true when the remaining initialisation should be deferred until
464  * later in the boot cycle when it can be parallelised.
465  */
466 static bool __meminit
467 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
468 {
469 	static unsigned long prev_end_pfn, nr_initialised;
470 
471 	if (early_page_ext_enabled())
472 		return false;
473 	/*
474 	 * prev_end_pfn static that contains the end of previous zone
475 	 * No need to protect because called very early in boot before smp_init.
476 	 */
477 	if (prev_end_pfn != end_pfn) {
478 		prev_end_pfn = end_pfn;
479 		nr_initialised = 0;
480 	}
481 
482 	/* Always populate low zones for address-constrained allocations */
483 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
484 		return false;
485 
486 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
487 		return true;
488 	/*
489 	 * We start only with one section of pages, more pages are added as
490 	 * needed until the rest of deferred pages are initialized.
491 	 */
492 	nr_initialised++;
493 	if ((nr_initialised > PAGES_PER_SECTION) &&
494 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
495 		NODE_DATA(nid)->first_deferred_pfn = pfn;
496 		return true;
497 	}
498 	return false;
499 }
500 #else
501 static inline bool deferred_pages_enabled(void)
502 {
503 	return false;
504 }
505 
506 static inline bool early_page_initialised(unsigned long pfn)
507 {
508 	return true;
509 }
510 
511 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
512 {
513 	return false;
514 }
515 #endif
516 
517 /* Return a pointer to the bitmap storing bits affecting a block of pages */
518 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
519 							unsigned long pfn)
520 {
521 #ifdef CONFIG_SPARSEMEM
522 	return section_to_usemap(__pfn_to_section(pfn));
523 #else
524 	return page_zone(page)->pageblock_flags;
525 #endif /* CONFIG_SPARSEMEM */
526 }
527 
528 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
529 {
530 #ifdef CONFIG_SPARSEMEM
531 	pfn &= (PAGES_PER_SECTION-1);
532 #else
533 	pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
534 #endif /* CONFIG_SPARSEMEM */
535 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
536 }
537 
538 static __always_inline
539 unsigned long __get_pfnblock_flags_mask(const struct page *page,
540 					unsigned long pfn,
541 					unsigned long mask)
542 {
543 	unsigned long *bitmap;
544 	unsigned long bitidx, word_bitidx;
545 	unsigned long word;
546 
547 	bitmap = get_pageblock_bitmap(page, pfn);
548 	bitidx = pfn_to_bitidx(page, pfn);
549 	word_bitidx = bitidx / BITS_PER_LONG;
550 	bitidx &= (BITS_PER_LONG-1);
551 	/*
552 	 * This races, without locks, with set_pfnblock_flags_mask(). Ensure
553 	 * a consistent read of the memory array, so that results, even though
554 	 * racy, are not corrupted.
555 	 */
556 	word = READ_ONCE(bitmap[word_bitidx]);
557 	return (word >> bitidx) & mask;
558 }
559 
560 /**
561  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
562  * @page: The page within the block of interest
563  * @pfn: The target page frame number
564  * @mask: mask of bits that the caller is interested in
565  *
566  * Return: pageblock_bits flags
567  */
568 unsigned long get_pfnblock_flags_mask(const struct page *page,
569 					unsigned long pfn, unsigned long mask)
570 {
571 	return __get_pfnblock_flags_mask(page, pfn, mask);
572 }
573 
574 static __always_inline int get_pfnblock_migratetype(const struct page *page,
575 					unsigned long pfn)
576 {
577 	return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
578 }
579 
580 /**
581  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
582  * @page: The page within the block of interest
583  * @flags: The flags to set
584  * @pfn: The target page frame number
585  * @mask: mask of bits that the caller is interested in
586  */
587 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
588 					unsigned long pfn,
589 					unsigned long mask)
590 {
591 	unsigned long *bitmap;
592 	unsigned long bitidx, word_bitidx;
593 	unsigned long word;
594 
595 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
596 	BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
597 
598 	bitmap = get_pageblock_bitmap(page, pfn);
599 	bitidx = pfn_to_bitidx(page, pfn);
600 	word_bitidx = bitidx / BITS_PER_LONG;
601 	bitidx &= (BITS_PER_LONG-1);
602 
603 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
604 
605 	mask <<= bitidx;
606 	flags <<= bitidx;
607 
608 	word = READ_ONCE(bitmap[word_bitidx]);
609 	do {
610 	} while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
611 }
612 
613 void set_pageblock_migratetype(struct page *page, int migratetype)
614 {
615 	if (unlikely(page_group_by_mobility_disabled &&
616 		     migratetype < MIGRATE_PCPTYPES))
617 		migratetype = MIGRATE_UNMOVABLE;
618 
619 	set_pfnblock_flags_mask(page, (unsigned long)migratetype,
620 				page_to_pfn(page), MIGRATETYPE_MASK);
621 }
622 
623 #ifdef CONFIG_DEBUG_VM
624 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
625 {
626 	int ret = 0;
627 	unsigned seq;
628 	unsigned long pfn = page_to_pfn(page);
629 	unsigned long sp, start_pfn;
630 
631 	do {
632 		seq = zone_span_seqbegin(zone);
633 		start_pfn = zone->zone_start_pfn;
634 		sp = zone->spanned_pages;
635 		if (!zone_spans_pfn(zone, pfn))
636 			ret = 1;
637 	} while (zone_span_seqretry(zone, seq));
638 
639 	if (ret)
640 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
641 			pfn, zone_to_nid(zone), zone->name,
642 			start_pfn, start_pfn + sp);
643 
644 	return ret;
645 }
646 
647 static int page_is_consistent(struct zone *zone, struct page *page)
648 {
649 	if (zone != page_zone(page))
650 		return 0;
651 
652 	return 1;
653 }
654 /*
655  * Temporary debugging check for pages not lying within a given zone.
656  */
657 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
658 {
659 	if (page_outside_zone_boundaries(zone, page))
660 		return 1;
661 	if (!page_is_consistent(zone, page))
662 		return 1;
663 
664 	return 0;
665 }
666 #else
667 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
668 {
669 	return 0;
670 }
671 #endif
672 
673 static void bad_page(struct page *page, const char *reason)
674 {
675 	static unsigned long resume;
676 	static unsigned long nr_shown;
677 	static unsigned long nr_unshown;
678 
679 	/*
680 	 * Allow a burst of 60 reports, then keep quiet for that minute;
681 	 * or allow a steady drip of one report per second.
682 	 */
683 	if (nr_shown == 60) {
684 		if (time_before(jiffies, resume)) {
685 			nr_unshown++;
686 			goto out;
687 		}
688 		if (nr_unshown) {
689 			pr_alert(
690 			      "BUG: Bad page state: %lu messages suppressed\n",
691 				nr_unshown);
692 			nr_unshown = 0;
693 		}
694 		nr_shown = 0;
695 	}
696 	if (nr_shown++ == 0)
697 		resume = jiffies + 60 * HZ;
698 
699 	pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
700 		current->comm, page_to_pfn(page));
701 	dump_page(page, reason);
702 
703 	print_modules();
704 	dump_stack();
705 out:
706 	/* Leave bad fields for debug, except PageBuddy could make trouble */
707 	page_mapcount_reset(page); /* remove PageBuddy */
708 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
709 }
710 
711 static inline unsigned int order_to_pindex(int migratetype, int order)
712 {
713 	int base = order;
714 
715 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
716 	if (order > PAGE_ALLOC_COSTLY_ORDER) {
717 		VM_BUG_ON(order != pageblock_order);
718 		return NR_LOWORDER_PCP_LISTS;
719 	}
720 #else
721 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
722 #endif
723 
724 	return (MIGRATE_PCPTYPES * base) + migratetype;
725 }
726 
727 static inline int pindex_to_order(unsigned int pindex)
728 {
729 	int order = pindex / MIGRATE_PCPTYPES;
730 
731 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
732 	if (pindex == NR_LOWORDER_PCP_LISTS)
733 		order = pageblock_order;
734 #else
735 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
736 #endif
737 
738 	return order;
739 }
740 
741 static inline bool pcp_allowed_order(unsigned int order)
742 {
743 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
744 		return true;
745 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
746 	if (order == pageblock_order)
747 		return true;
748 #endif
749 	return false;
750 }
751 
752 static inline void free_the_page(struct page *page, unsigned int order)
753 {
754 	if (pcp_allowed_order(order))		/* Via pcp? */
755 		free_unref_page(page, order);
756 	else
757 		__free_pages_ok(page, order, FPI_NONE);
758 }
759 
760 /*
761  * Higher-order pages are called "compound pages".  They are structured thusly:
762  *
763  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
764  *
765  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
766  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
767  *
768  * The first tail page's ->compound_dtor holds the offset in array of compound
769  * page destructors. See compound_page_dtors.
770  *
771  * The first tail page's ->compound_order holds the order of allocation.
772  * This usage means that zero-order pages may not be compound.
773  */
774 
775 void free_compound_page(struct page *page)
776 {
777 	mem_cgroup_uncharge(page_folio(page));
778 	free_the_page(page, compound_order(page));
779 }
780 
781 static void prep_compound_head(struct page *page, unsigned int order)
782 {
783 	struct folio *folio = (struct folio *)page;
784 
785 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
786 	set_compound_order(page, order);
787 	atomic_set(&folio->_entire_mapcount, -1);
788 	atomic_set(&folio->_nr_pages_mapped, 0);
789 	atomic_set(&folio->_pincount, 0);
790 }
791 
792 static void prep_compound_tail(struct page *head, int tail_idx)
793 {
794 	struct page *p = head + tail_idx;
795 
796 	p->mapping = TAIL_MAPPING;
797 	set_compound_head(p, head);
798 	set_page_private(p, 0);
799 }
800 
801 void prep_compound_page(struct page *page, unsigned int order)
802 {
803 	int i;
804 	int nr_pages = 1 << order;
805 
806 	__SetPageHead(page);
807 	for (i = 1; i < nr_pages; i++)
808 		prep_compound_tail(page, i);
809 
810 	prep_compound_head(page, order);
811 }
812 
813 void destroy_large_folio(struct folio *folio)
814 {
815 	enum compound_dtor_id dtor = folio->_folio_dtor;
816 
817 	VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
818 	compound_page_dtors[dtor](&folio->page);
819 }
820 
821 #ifdef CONFIG_DEBUG_PAGEALLOC
822 unsigned int _debug_guardpage_minorder;
823 
824 bool _debug_pagealloc_enabled_early __read_mostly
825 			= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
826 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
827 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
828 EXPORT_SYMBOL(_debug_pagealloc_enabled);
829 
830 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
831 
832 static int __init early_debug_pagealloc(char *buf)
833 {
834 	return kstrtobool(buf, &_debug_pagealloc_enabled_early);
835 }
836 early_param("debug_pagealloc", early_debug_pagealloc);
837 
838 static int __init debug_guardpage_minorder_setup(char *buf)
839 {
840 	unsigned long res;
841 
842 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
843 		pr_err("Bad debug_guardpage_minorder value\n");
844 		return 0;
845 	}
846 	_debug_guardpage_minorder = res;
847 	pr_info("Setting debug_guardpage_minorder to %lu\n", res);
848 	return 0;
849 }
850 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
851 
852 static inline bool set_page_guard(struct zone *zone, struct page *page,
853 				unsigned int order, int migratetype)
854 {
855 	if (!debug_guardpage_enabled())
856 		return false;
857 
858 	if (order >= debug_guardpage_minorder())
859 		return false;
860 
861 	__SetPageGuard(page);
862 	INIT_LIST_HEAD(&page->buddy_list);
863 	set_page_private(page, order);
864 	/* Guard pages are not available for any usage */
865 	if (!is_migrate_isolate(migratetype))
866 		__mod_zone_freepage_state(zone, -(1 << order), migratetype);
867 
868 	return true;
869 }
870 
871 static inline void clear_page_guard(struct zone *zone, struct page *page,
872 				unsigned int order, int migratetype)
873 {
874 	if (!debug_guardpage_enabled())
875 		return;
876 
877 	__ClearPageGuard(page);
878 
879 	set_page_private(page, 0);
880 	if (!is_migrate_isolate(migratetype))
881 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
882 }
883 #else
884 static inline bool set_page_guard(struct zone *zone, struct page *page,
885 			unsigned int order, int migratetype) { return false; }
886 static inline void clear_page_guard(struct zone *zone, struct page *page,
887 				unsigned int order, int migratetype) {}
888 #endif
889 
890 /*
891  * Enable static keys related to various memory debugging and hardening options.
892  * Some override others, and depend on early params that are evaluated in the
893  * order of appearance. So we need to first gather the full picture of what was
894  * enabled, and then make decisions.
895  */
896 void __init init_mem_debugging_and_hardening(void)
897 {
898 	bool page_poisoning_requested = false;
899 	bool want_check_pages = false;
900 
901 #ifdef CONFIG_PAGE_POISONING
902 	/*
903 	 * Page poisoning is debug page alloc for some arches. If
904 	 * either of those options are enabled, enable poisoning.
905 	 */
906 	if (page_poisoning_enabled() ||
907 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
908 	      debug_pagealloc_enabled())) {
909 		static_branch_enable(&_page_poisoning_enabled);
910 		page_poisoning_requested = true;
911 		want_check_pages = true;
912 	}
913 #endif
914 
915 	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
916 	    page_poisoning_requested) {
917 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
918 			"will take precedence over init_on_alloc and init_on_free\n");
919 		_init_on_alloc_enabled_early = false;
920 		_init_on_free_enabled_early = false;
921 	}
922 
923 	if (_init_on_alloc_enabled_early) {
924 		want_check_pages = true;
925 		static_branch_enable(&init_on_alloc);
926 	} else {
927 		static_branch_disable(&init_on_alloc);
928 	}
929 
930 	if (_init_on_free_enabled_early) {
931 		want_check_pages = true;
932 		static_branch_enable(&init_on_free);
933 	} else {
934 		static_branch_disable(&init_on_free);
935 	}
936 
937 	if (IS_ENABLED(CONFIG_KMSAN) &&
938 	    (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
939 		pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
940 
941 #ifdef CONFIG_DEBUG_PAGEALLOC
942 	if (debug_pagealloc_enabled()) {
943 		want_check_pages = true;
944 		static_branch_enable(&_debug_pagealloc_enabled);
945 
946 		if (debug_guardpage_minorder())
947 			static_branch_enable(&_debug_guardpage_enabled);
948 	}
949 #endif
950 
951 	/*
952 	 * Any page debugging or hardening option also enables sanity checking
953 	 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
954 	 * enabled already.
955 	 */
956 	if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
957 		static_branch_enable(&check_pages_enabled);
958 }
959 
960 static inline void set_buddy_order(struct page *page, unsigned int order)
961 {
962 	set_page_private(page, order);
963 	__SetPageBuddy(page);
964 }
965 
966 #ifdef CONFIG_COMPACTION
967 static inline struct capture_control *task_capc(struct zone *zone)
968 {
969 	struct capture_control *capc = current->capture_control;
970 
971 	return unlikely(capc) &&
972 		!(current->flags & PF_KTHREAD) &&
973 		!capc->page &&
974 		capc->cc->zone == zone ? capc : NULL;
975 }
976 
977 static inline bool
978 compaction_capture(struct capture_control *capc, struct page *page,
979 		   int order, int migratetype)
980 {
981 	if (!capc || order != capc->cc->order)
982 		return false;
983 
984 	/* Do not accidentally pollute CMA or isolated regions*/
985 	if (is_migrate_cma(migratetype) ||
986 	    is_migrate_isolate(migratetype))
987 		return false;
988 
989 	/*
990 	 * Do not let lower order allocations pollute a movable pageblock.
991 	 * This might let an unmovable request use a reclaimable pageblock
992 	 * and vice-versa but no more than normal fallback logic which can
993 	 * have trouble finding a high-order free page.
994 	 */
995 	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
996 		return false;
997 
998 	capc->page = page;
999 	return true;
1000 }
1001 
1002 #else
1003 static inline struct capture_control *task_capc(struct zone *zone)
1004 {
1005 	return NULL;
1006 }
1007 
1008 static inline bool
1009 compaction_capture(struct capture_control *capc, struct page *page,
1010 		   int order, int migratetype)
1011 {
1012 	return false;
1013 }
1014 #endif /* CONFIG_COMPACTION */
1015 
1016 /* Used for pages not on another list */
1017 static inline void add_to_free_list(struct page *page, struct zone *zone,
1018 				    unsigned int order, int migratetype)
1019 {
1020 	struct free_area *area = &zone->free_area[order];
1021 
1022 	list_add(&page->buddy_list, &area->free_list[migratetype]);
1023 	area->nr_free++;
1024 }
1025 
1026 /* Used for pages not on another list */
1027 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
1028 					 unsigned int order, int migratetype)
1029 {
1030 	struct free_area *area = &zone->free_area[order];
1031 
1032 	list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
1033 	area->nr_free++;
1034 }
1035 
1036 /*
1037  * Used for pages which are on another list. Move the pages to the tail
1038  * of the list - so the moved pages won't immediately be considered for
1039  * allocation again (e.g., optimization for memory onlining).
1040  */
1041 static inline void move_to_free_list(struct page *page, struct zone *zone,
1042 				     unsigned int order, int migratetype)
1043 {
1044 	struct free_area *area = &zone->free_area[order];
1045 
1046 	list_move_tail(&page->buddy_list, &area->free_list[migratetype]);
1047 }
1048 
1049 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
1050 					   unsigned int order)
1051 {
1052 	/* clear reported state and update reported page count */
1053 	if (page_reported(page))
1054 		__ClearPageReported(page);
1055 
1056 	list_del(&page->buddy_list);
1057 	__ClearPageBuddy(page);
1058 	set_page_private(page, 0);
1059 	zone->free_area[order].nr_free--;
1060 }
1061 
1062 /*
1063  * If this is not the largest possible page, check if the buddy
1064  * of the next-highest order is free. If it is, it's possible
1065  * that pages are being freed that will coalesce soon. In case,
1066  * that is happening, add the free page to the tail of the list
1067  * so it's less likely to be used soon and more likely to be merged
1068  * as a higher order page
1069  */
1070 static inline bool
1071 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
1072 		   struct page *page, unsigned int order)
1073 {
1074 	unsigned long higher_page_pfn;
1075 	struct page *higher_page;
1076 
1077 	if (order >= MAX_ORDER - 2)
1078 		return false;
1079 
1080 	higher_page_pfn = buddy_pfn & pfn;
1081 	higher_page = page + (higher_page_pfn - pfn);
1082 
1083 	return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
1084 			NULL) != NULL;
1085 }
1086 
1087 /*
1088  * Freeing function for a buddy system allocator.
1089  *
1090  * The concept of a buddy system is to maintain direct-mapped table
1091  * (containing bit values) for memory blocks of various "orders".
1092  * The bottom level table contains the map for the smallest allocatable
1093  * units of memory (here, pages), and each level above it describes
1094  * pairs of units from the levels below, hence, "buddies".
1095  * At a high level, all that happens here is marking the table entry
1096  * at the bottom level available, and propagating the changes upward
1097  * as necessary, plus some accounting needed to play nicely with other
1098  * parts of the VM system.
1099  * At each level, we keep a list of pages, which are heads of continuous
1100  * free pages of length of (1 << order) and marked with PageBuddy.
1101  * Page's order is recorded in page_private(page) field.
1102  * So when we are allocating or freeing one, we can derive the state of the
1103  * other.  That is, if we allocate a small block, and both were
1104  * free, the remainder of the region must be split into blocks.
1105  * If a block is freed, and its buddy is also free, then this
1106  * triggers coalescing into a block of larger size.
1107  *
1108  * -- nyc
1109  */
1110 
1111 static inline void __free_one_page(struct page *page,
1112 		unsigned long pfn,
1113 		struct zone *zone, unsigned int order,
1114 		int migratetype, fpi_t fpi_flags)
1115 {
1116 	struct capture_control *capc = task_capc(zone);
1117 	unsigned long buddy_pfn = 0;
1118 	unsigned long combined_pfn;
1119 	struct page *buddy;
1120 	bool to_tail;
1121 
1122 	VM_BUG_ON(!zone_is_initialized(zone));
1123 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1124 
1125 	VM_BUG_ON(migratetype == -1);
1126 	if (likely(!is_migrate_isolate(migratetype)))
1127 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
1128 
1129 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1130 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
1131 
1132 	while (order < MAX_ORDER - 1) {
1133 		if (compaction_capture(capc, page, order, migratetype)) {
1134 			__mod_zone_freepage_state(zone, -(1 << order),
1135 								migratetype);
1136 			return;
1137 		}
1138 
1139 		buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
1140 		if (!buddy)
1141 			goto done_merging;
1142 
1143 		if (unlikely(order >= pageblock_order)) {
1144 			/*
1145 			 * We want to prevent merge between freepages on pageblock
1146 			 * without fallbacks and normal pageblock. Without this,
1147 			 * pageblock isolation could cause incorrect freepage or CMA
1148 			 * accounting or HIGHATOMIC accounting.
1149 			 */
1150 			int buddy_mt = get_pageblock_migratetype(buddy);
1151 
1152 			if (migratetype != buddy_mt
1153 					&& (!migratetype_is_mergeable(migratetype) ||
1154 						!migratetype_is_mergeable(buddy_mt)))
1155 				goto done_merging;
1156 		}
1157 
1158 		/*
1159 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1160 		 * merge with it and move up one order.
1161 		 */
1162 		if (page_is_guard(buddy))
1163 			clear_page_guard(zone, buddy, order, migratetype);
1164 		else
1165 			del_page_from_free_list(buddy, zone, order);
1166 		combined_pfn = buddy_pfn & pfn;
1167 		page = page + (combined_pfn - pfn);
1168 		pfn = combined_pfn;
1169 		order++;
1170 	}
1171 
1172 done_merging:
1173 	set_buddy_order(page, order);
1174 
1175 	if (fpi_flags & FPI_TO_TAIL)
1176 		to_tail = true;
1177 	else if (is_shuffle_order(order))
1178 		to_tail = shuffle_pick_tail();
1179 	else
1180 		to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1181 
1182 	if (to_tail)
1183 		add_to_free_list_tail(page, zone, order, migratetype);
1184 	else
1185 		add_to_free_list(page, zone, order, migratetype);
1186 
1187 	/* Notify page reporting subsystem of freed page */
1188 	if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1189 		page_reporting_notify_free(order);
1190 }
1191 
1192 /**
1193  * split_free_page() -- split a free page at split_pfn_offset
1194  * @free_page:		the original free page
1195  * @order:		the order of the page
1196  * @split_pfn_offset:	split offset within the page
1197  *
1198  * Return -ENOENT if the free page is changed, otherwise 0
1199  *
1200  * It is used when the free page crosses two pageblocks with different migratetypes
1201  * at split_pfn_offset within the page. The split free page will be put into
1202  * separate migratetype lists afterwards. Otherwise, the function achieves
1203  * nothing.
1204  */
1205 int split_free_page(struct page *free_page,
1206 			unsigned int order, unsigned long split_pfn_offset)
1207 {
1208 	struct zone *zone = page_zone(free_page);
1209 	unsigned long free_page_pfn = page_to_pfn(free_page);
1210 	unsigned long pfn;
1211 	unsigned long flags;
1212 	int free_page_order;
1213 	int mt;
1214 	int ret = 0;
1215 
1216 	if (split_pfn_offset == 0)
1217 		return ret;
1218 
1219 	spin_lock_irqsave(&zone->lock, flags);
1220 
1221 	if (!PageBuddy(free_page) || buddy_order(free_page) != order) {
1222 		ret = -ENOENT;
1223 		goto out;
1224 	}
1225 
1226 	mt = get_pageblock_migratetype(free_page);
1227 	if (likely(!is_migrate_isolate(mt)))
1228 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
1229 
1230 	del_page_from_free_list(free_page, zone, order);
1231 	for (pfn = free_page_pfn;
1232 	     pfn < free_page_pfn + (1UL << order);) {
1233 		int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn);
1234 
1235 		free_page_order = min_t(unsigned int,
1236 					pfn ? __ffs(pfn) : order,
1237 					__fls(split_pfn_offset));
1238 		__free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order,
1239 				mt, FPI_NONE);
1240 		pfn += 1UL << free_page_order;
1241 		split_pfn_offset -= (1UL << free_page_order);
1242 		/* we have done the first part, now switch to second part */
1243 		if (split_pfn_offset == 0)
1244 			split_pfn_offset = (1UL << order) - (pfn - free_page_pfn);
1245 	}
1246 out:
1247 	spin_unlock_irqrestore(&zone->lock, flags);
1248 	return ret;
1249 }
1250 /*
1251  * A bad page could be due to a number of fields. Instead of multiple branches,
1252  * try and check multiple fields with one check. The caller must do a detailed
1253  * check if necessary.
1254  */
1255 static inline bool page_expected_state(struct page *page,
1256 					unsigned long check_flags)
1257 {
1258 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1259 		return false;
1260 
1261 	if (unlikely((unsigned long)page->mapping |
1262 			page_ref_count(page) |
1263 #ifdef CONFIG_MEMCG
1264 			page->memcg_data |
1265 #endif
1266 			(page->flags & check_flags)))
1267 		return false;
1268 
1269 	return true;
1270 }
1271 
1272 static const char *page_bad_reason(struct page *page, unsigned long flags)
1273 {
1274 	const char *bad_reason = NULL;
1275 
1276 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1277 		bad_reason = "nonzero mapcount";
1278 	if (unlikely(page->mapping != NULL))
1279 		bad_reason = "non-NULL mapping";
1280 	if (unlikely(page_ref_count(page) != 0))
1281 		bad_reason = "nonzero _refcount";
1282 	if (unlikely(page->flags & flags)) {
1283 		if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1284 			bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1285 		else
1286 			bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1287 	}
1288 #ifdef CONFIG_MEMCG
1289 	if (unlikely(page->memcg_data))
1290 		bad_reason = "page still charged to cgroup";
1291 #endif
1292 	return bad_reason;
1293 }
1294 
1295 static void free_page_is_bad_report(struct page *page)
1296 {
1297 	bad_page(page,
1298 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1299 }
1300 
1301 static inline bool free_page_is_bad(struct page *page)
1302 {
1303 	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1304 		return false;
1305 
1306 	/* Something has gone sideways, find it */
1307 	free_page_is_bad_report(page);
1308 	return true;
1309 }
1310 
1311 static int free_tail_pages_check(struct page *head_page, struct page *page)
1312 {
1313 	struct folio *folio = (struct folio *)head_page;
1314 	int ret = 1;
1315 
1316 	/*
1317 	 * We rely page->lru.next never has bit 0 set, unless the page
1318 	 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1319 	 */
1320 	BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1321 
1322 	if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1323 		ret = 0;
1324 		goto out;
1325 	}
1326 	switch (page - head_page) {
1327 	case 1:
1328 		/* the first tail page: these may be in place of ->mapping */
1329 		if (unlikely(folio_entire_mapcount(folio))) {
1330 			bad_page(page, "nonzero entire_mapcount");
1331 			goto out;
1332 		}
1333 		if (unlikely(atomic_read(&folio->_nr_pages_mapped))) {
1334 			bad_page(page, "nonzero nr_pages_mapped");
1335 			goto out;
1336 		}
1337 		if (unlikely(atomic_read(&folio->_pincount))) {
1338 			bad_page(page, "nonzero pincount");
1339 			goto out;
1340 		}
1341 		break;
1342 	case 2:
1343 		/*
1344 		 * the second tail page: ->mapping is
1345 		 * deferred_list.next -- ignore value.
1346 		 */
1347 		break;
1348 	default:
1349 		if (page->mapping != TAIL_MAPPING) {
1350 			bad_page(page, "corrupted mapping in tail page");
1351 			goto out;
1352 		}
1353 		break;
1354 	}
1355 	if (unlikely(!PageTail(page))) {
1356 		bad_page(page, "PageTail not set");
1357 		goto out;
1358 	}
1359 	if (unlikely(compound_head(page) != head_page)) {
1360 		bad_page(page, "compound_head not consistent");
1361 		goto out;
1362 	}
1363 	ret = 0;
1364 out:
1365 	page->mapping = NULL;
1366 	clear_compound_head(page);
1367 	return ret;
1368 }
1369 
1370 /*
1371  * Skip KASAN memory poisoning when either:
1372  *
1373  * 1. Deferred memory initialization has not yet completed,
1374  *    see the explanation below.
1375  * 2. Skipping poisoning is requested via FPI_SKIP_KASAN_POISON,
1376  *    see the comment next to it.
1377  * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON,
1378  *    see the comment next to it.
1379  * 4. The allocation is excluded from being checked due to sampling,
1380  *    see the call to kasan_unpoison_pages.
1381  *
1382  * Poisoning pages during deferred memory init will greatly lengthen the
1383  * process and cause problem in large memory systems as the deferred pages
1384  * initialization is done with interrupt disabled.
1385  *
1386  * Assuming that there will be no reference to those newly initialized
1387  * pages before they are ever allocated, this should have no effect on
1388  * KASAN memory tracking as the poison will be properly inserted at page
1389  * allocation time. The only corner case is when pages are allocated by
1390  * on-demand allocation and then freed again before the deferred pages
1391  * initialization is done, but this is not likely to happen.
1392  */
1393 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
1394 {
1395 	return deferred_pages_enabled() ||
1396 	       (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
1397 		(fpi_flags & FPI_SKIP_KASAN_POISON)) ||
1398 	       PageSkipKASanPoison(page);
1399 }
1400 
1401 static void kernel_init_pages(struct page *page, int numpages)
1402 {
1403 	int i;
1404 
1405 	/* s390's use of memset() could override KASAN redzones. */
1406 	kasan_disable_current();
1407 	for (i = 0; i < numpages; i++)
1408 		clear_highpage_kasan_tagged(page + i);
1409 	kasan_enable_current();
1410 }
1411 
1412 static __always_inline bool free_pages_prepare(struct page *page,
1413 			unsigned int order, fpi_t fpi_flags)
1414 {
1415 	int bad = 0;
1416 	bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
1417 	bool init = want_init_on_free();
1418 
1419 	VM_BUG_ON_PAGE(PageTail(page), page);
1420 
1421 	trace_mm_page_free(page, order);
1422 	kmsan_free_page(page, order);
1423 
1424 	if (unlikely(PageHWPoison(page)) && !order) {
1425 		/*
1426 		 * Do not let hwpoison pages hit pcplists/buddy
1427 		 * Untie memcg state and reset page's owner
1428 		 */
1429 		if (memcg_kmem_online() && PageMemcgKmem(page))
1430 			__memcg_kmem_uncharge_page(page, order);
1431 		reset_page_owner(page, order);
1432 		page_table_check_free(page, order);
1433 		return false;
1434 	}
1435 
1436 	/*
1437 	 * Check tail pages before head page information is cleared to
1438 	 * avoid checking PageCompound for order-0 pages.
1439 	 */
1440 	if (unlikely(order)) {
1441 		bool compound = PageCompound(page);
1442 		int i;
1443 
1444 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1445 
1446 		if (compound)
1447 			ClearPageHasHWPoisoned(page);
1448 		for (i = 1; i < (1 << order); i++) {
1449 			if (compound)
1450 				bad += free_tail_pages_check(page, page + i);
1451 			if (static_branch_unlikely(&check_pages_enabled)) {
1452 				if (unlikely(free_page_is_bad(page + i))) {
1453 					bad++;
1454 					continue;
1455 				}
1456 			}
1457 			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1458 		}
1459 	}
1460 	if (PageMappingFlags(page))
1461 		page->mapping = NULL;
1462 	if (memcg_kmem_online() && PageMemcgKmem(page))
1463 		__memcg_kmem_uncharge_page(page, order);
1464 	if (static_branch_unlikely(&check_pages_enabled)) {
1465 		if (free_page_is_bad(page))
1466 			bad++;
1467 		if (bad)
1468 			return false;
1469 	}
1470 
1471 	page_cpupid_reset_last(page);
1472 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1473 	reset_page_owner(page, order);
1474 	page_table_check_free(page, order);
1475 
1476 	if (!PageHighMem(page)) {
1477 		debug_check_no_locks_freed(page_address(page),
1478 					   PAGE_SIZE << order);
1479 		debug_check_no_obj_freed(page_address(page),
1480 					   PAGE_SIZE << order);
1481 	}
1482 
1483 	kernel_poison_pages(page, 1 << order);
1484 
1485 	/*
1486 	 * As memory initialization might be integrated into KASAN,
1487 	 * KASAN poisoning and memory initialization code must be
1488 	 * kept together to avoid discrepancies in behavior.
1489 	 *
1490 	 * With hardware tag-based KASAN, memory tags must be set before the
1491 	 * page becomes unavailable via debug_pagealloc or arch_free_page.
1492 	 */
1493 	if (!skip_kasan_poison) {
1494 		kasan_poison_pages(page, order, init);
1495 
1496 		/* Memory is already initialized if KASAN did it internally. */
1497 		if (kasan_has_integrated_init())
1498 			init = false;
1499 	}
1500 	if (init)
1501 		kernel_init_pages(page, 1 << order);
1502 
1503 	/*
1504 	 * arch_free_page() can make the page's contents inaccessible.  s390
1505 	 * does this.  So nothing which can access the page's contents should
1506 	 * happen after this.
1507 	 */
1508 	arch_free_page(page, order);
1509 
1510 	debug_pagealloc_unmap_pages(page, 1 << order);
1511 
1512 	return true;
1513 }
1514 
1515 /*
1516  * Frees a number of pages from the PCP lists
1517  * Assumes all pages on list are in same zone.
1518  * count is the number of pages to free.
1519  */
1520 static void free_pcppages_bulk(struct zone *zone, int count,
1521 					struct per_cpu_pages *pcp,
1522 					int pindex)
1523 {
1524 	unsigned long flags;
1525 	int min_pindex = 0;
1526 	int max_pindex = NR_PCP_LISTS - 1;
1527 	unsigned int order;
1528 	bool isolated_pageblocks;
1529 	struct page *page;
1530 
1531 	/*
1532 	 * Ensure proper count is passed which otherwise would stuck in the
1533 	 * below while (list_empty(list)) loop.
1534 	 */
1535 	count = min(pcp->count, count);
1536 
1537 	/* Ensure requested pindex is drained first. */
1538 	pindex = pindex - 1;
1539 
1540 	spin_lock_irqsave(&zone->lock, flags);
1541 	isolated_pageblocks = has_isolate_pageblock(zone);
1542 
1543 	while (count > 0) {
1544 		struct list_head *list;
1545 		int nr_pages;
1546 
1547 		/* Remove pages from lists in a round-robin fashion. */
1548 		do {
1549 			if (++pindex > max_pindex)
1550 				pindex = min_pindex;
1551 			list = &pcp->lists[pindex];
1552 			if (!list_empty(list))
1553 				break;
1554 
1555 			if (pindex == max_pindex)
1556 				max_pindex--;
1557 			if (pindex == min_pindex)
1558 				min_pindex++;
1559 		} while (1);
1560 
1561 		order = pindex_to_order(pindex);
1562 		nr_pages = 1 << order;
1563 		do {
1564 			int mt;
1565 
1566 			page = list_last_entry(list, struct page, pcp_list);
1567 			mt = get_pcppage_migratetype(page);
1568 
1569 			/* must delete to avoid corrupting pcp list */
1570 			list_del(&page->pcp_list);
1571 			count -= nr_pages;
1572 			pcp->count -= nr_pages;
1573 
1574 			/* MIGRATE_ISOLATE page should not go to pcplists */
1575 			VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1576 			/* Pageblock could have been isolated meanwhile */
1577 			if (unlikely(isolated_pageblocks))
1578 				mt = get_pageblock_migratetype(page);
1579 
1580 			__free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1581 			trace_mm_page_pcpu_drain(page, order, mt);
1582 		} while (count > 0 && !list_empty(list));
1583 	}
1584 
1585 	spin_unlock_irqrestore(&zone->lock, flags);
1586 }
1587 
1588 static void free_one_page(struct zone *zone,
1589 				struct page *page, unsigned long pfn,
1590 				unsigned int order,
1591 				int migratetype, fpi_t fpi_flags)
1592 {
1593 	unsigned long flags;
1594 
1595 	spin_lock_irqsave(&zone->lock, flags);
1596 	if (unlikely(has_isolate_pageblock(zone) ||
1597 		is_migrate_isolate(migratetype))) {
1598 		migratetype = get_pfnblock_migratetype(page, pfn);
1599 	}
1600 	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1601 	spin_unlock_irqrestore(&zone->lock, flags);
1602 }
1603 
1604 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1605 				unsigned long zone, int nid)
1606 {
1607 	mm_zero_struct_page(page);
1608 	set_page_links(page, zone, nid, pfn);
1609 	init_page_count(page);
1610 	page_mapcount_reset(page);
1611 	page_cpupid_reset_last(page);
1612 	page_kasan_tag_reset(page);
1613 
1614 	INIT_LIST_HEAD(&page->lru);
1615 #ifdef WANT_PAGE_VIRTUAL
1616 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
1617 	if (!is_highmem_idx(zone))
1618 		set_page_address(page, __va(pfn << PAGE_SHIFT));
1619 #endif
1620 }
1621 
1622 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1623 static void __meminit init_reserved_page(unsigned long pfn)
1624 {
1625 	pg_data_t *pgdat;
1626 	int nid, zid;
1627 
1628 	if (early_page_initialised(pfn))
1629 		return;
1630 
1631 	nid = early_pfn_to_nid(pfn);
1632 	pgdat = NODE_DATA(nid);
1633 
1634 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1635 		struct zone *zone = &pgdat->node_zones[zid];
1636 
1637 		if (zone_spans_pfn(zone, pfn))
1638 			break;
1639 	}
1640 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1641 }
1642 #else
1643 static inline void init_reserved_page(unsigned long pfn)
1644 {
1645 }
1646 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1647 
1648 /*
1649  * Initialised pages do not have PageReserved set. This function is
1650  * called for each range allocated by the bootmem allocator and
1651  * marks the pages PageReserved. The remaining valid pages are later
1652  * sent to the buddy page allocator.
1653  */
1654 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1655 {
1656 	unsigned long start_pfn = PFN_DOWN(start);
1657 	unsigned long end_pfn = PFN_UP(end);
1658 
1659 	for (; start_pfn < end_pfn; start_pfn++) {
1660 		if (pfn_valid(start_pfn)) {
1661 			struct page *page = pfn_to_page(start_pfn);
1662 
1663 			init_reserved_page(start_pfn);
1664 
1665 			/* Avoid false-positive PageTail() */
1666 			INIT_LIST_HEAD(&page->lru);
1667 
1668 			/*
1669 			 * no need for atomic set_bit because the struct
1670 			 * page is not visible yet so nobody should
1671 			 * access it yet.
1672 			 */
1673 			__SetPageReserved(page);
1674 		}
1675 	}
1676 }
1677 
1678 static void __free_pages_ok(struct page *page, unsigned int order,
1679 			    fpi_t fpi_flags)
1680 {
1681 	unsigned long flags;
1682 	int migratetype;
1683 	unsigned long pfn = page_to_pfn(page);
1684 	struct zone *zone = page_zone(page);
1685 
1686 	if (!free_pages_prepare(page, order, fpi_flags))
1687 		return;
1688 
1689 	/*
1690 	 * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here
1691 	 * is used to avoid calling get_pfnblock_migratetype() under the lock.
1692 	 * This will reduce the lock holding time.
1693 	 */
1694 	migratetype = get_pfnblock_migratetype(page, pfn);
1695 
1696 	spin_lock_irqsave(&zone->lock, flags);
1697 	if (unlikely(has_isolate_pageblock(zone) ||
1698 		is_migrate_isolate(migratetype))) {
1699 		migratetype = get_pfnblock_migratetype(page, pfn);
1700 	}
1701 	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1702 	spin_unlock_irqrestore(&zone->lock, flags);
1703 
1704 	__count_vm_events(PGFREE, 1 << order);
1705 }
1706 
1707 void __free_pages_core(struct page *page, unsigned int order)
1708 {
1709 	unsigned int nr_pages = 1 << order;
1710 	struct page *p = page;
1711 	unsigned int loop;
1712 
1713 	/*
1714 	 * When initializing the memmap, __init_single_page() sets the refcount
1715 	 * of all pages to 1 ("allocated"/"not free"). We have to set the
1716 	 * refcount of all involved pages to 0.
1717 	 */
1718 	prefetchw(p);
1719 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1720 		prefetchw(p + 1);
1721 		__ClearPageReserved(p);
1722 		set_page_count(p, 0);
1723 	}
1724 	__ClearPageReserved(p);
1725 	set_page_count(p, 0);
1726 
1727 	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1728 
1729 	/*
1730 	 * Bypass PCP and place fresh pages right to the tail, primarily
1731 	 * relevant for memory onlining.
1732 	 */
1733 	__free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
1734 }
1735 
1736 #ifdef CONFIG_NUMA
1737 
1738 /*
1739  * During memory init memblocks map pfns to nids. The search is expensive and
1740  * this caches recent lookups. The implementation of __early_pfn_to_nid
1741  * treats start/end as pfns.
1742  */
1743 struct mminit_pfnnid_cache {
1744 	unsigned long last_start;
1745 	unsigned long last_end;
1746 	int last_nid;
1747 };
1748 
1749 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1750 
1751 /*
1752  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1753  */
1754 static int __meminit __early_pfn_to_nid(unsigned long pfn,
1755 					struct mminit_pfnnid_cache *state)
1756 {
1757 	unsigned long start_pfn, end_pfn;
1758 	int nid;
1759 
1760 	if (state->last_start <= pfn && pfn < state->last_end)
1761 		return state->last_nid;
1762 
1763 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1764 	if (nid != NUMA_NO_NODE) {
1765 		state->last_start = start_pfn;
1766 		state->last_end = end_pfn;
1767 		state->last_nid = nid;
1768 	}
1769 
1770 	return nid;
1771 }
1772 
1773 int __meminit early_pfn_to_nid(unsigned long pfn)
1774 {
1775 	static DEFINE_SPINLOCK(early_pfn_lock);
1776 	int nid;
1777 
1778 	spin_lock(&early_pfn_lock);
1779 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1780 	if (nid < 0)
1781 		nid = first_online_node;
1782 	spin_unlock(&early_pfn_lock);
1783 
1784 	return nid;
1785 }
1786 #endif /* CONFIG_NUMA */
1787 
1788 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1789 							unsigned int order)
1790 {
1791 	if (!early_page_initialised(pfn))
1792 		return;
1793 	if (!kmsan_memblock_free_pages(page, order)) {
1794 		/* KMSAN will take care of these pages. */
1795 		return;
1796 	}
1797 	__free_pages_core(page, order);
1798 }
1799 
1800 /*
1801  * Check that the whole (or subset of) a pageblock given by the interval of
1802  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1803  * with the migration of free compaction scanner.
1804  *
1805  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1806  *
1807  * It's possible on some configurations to have a setup like node0 node1 node0
1808  * i.e. it's possible that all pages within a zones range of pages do not
1809  * belong to a single zone. We assume that a border between node0 and node1
1810  * can occur within a single pageblock, but not a node0 node1 node0
1811  * interleaving within a single pageblock. It is therefore sufficient to check
1812  * the first and last page of a pageblock and avoid checking each individual
1813  * page in a pageblock.
1814  */
1815 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1816 				     unsigned long end_pfn, struct zone *zone)
1817 {
1818 	struct page *start_page;
1819 	struct page *end_page;
1820 
1821 	/* end_pfn is one past the range we are checking */
1822 	end_pfn--;
1823 
1824 	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1825 		return NULL;
1826 
1827 	start_page = pfn_to_online_page(start_pfn);
1828 	if (!start_page)
1829 		return NULL;
1830 
1831 	if (page_zone(start_page) != zone)
1832 		return NULL;
1833 
1834 	end_page = pfn_to_page(end_pfn);
1835 
1836 	/* This gives a shorter code than deriving page_zone(end_page) */
1837 	if (page_zone_id(start_page) != page_zone_id(end_page))
1838 		return NULL;
1839 
1840 	return start_page;
1841 }
1842 
1843 void set_zone_contiguous(struct zone *zone)
1844 {
1845 	unsigned long block_start_pfn = zone->zone_start_pfn;
1846 	unsigned long block_end_pfn;
1847 
1848 	block_end_pfn = pageblock_end_pfn(block_start_pfn);
1849 	for (; block_start_pfn < zone_end_pfn(zone);
1850 			block_start_pfn = block_end_pfn,
1851 			 block_end_pfn += pageblock_nr_pages) {
1852 
1853 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1854 
1855 		if (!__pageblock_pfn_to_page(block_start_pfn,
1856 					     block_end_pfn, zone))
1857 			return;
1858 		cond_resched();
1859 	}
1860 
1861 	/* We confirm that there is no hole */
1862 	zone->contiguous = true;
1863 }
1864 
1865 void clear_zone_contiguous(struct zone *zone)
1866 {
1867 	zone->contiguous = false;
1868 }
1869 
1870 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1871 static void __init deferred_free_range(unsigned long pfn,
1872 				       unsigned long nr_pages)
1873 {
1874 	struct page *page;
1875 	unsigned long i;
1876 
1877 	if (!nr_pages)
1878 		return;
1879 
1880 	page = pfn_to_page(pfn);
1881 
1882 	/* Free a large naturally-aligned chunk if possible */
1883 	if (nr_pages == pageblock_nr_pages && pageblock_aligned(pfn)) {
1884 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1885 		__free_pages_core(page, pageblock_order);
1886 		return;
1887 	}
1888 
1889 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1890 		if (pageblock_aligned(pfn))
1891 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1892 		__free_pages_core(page, 0);
1893 	}
1894 }
1895 
1896 /* Completion tracking for deferred_init_memmap() threads */
1897 static atomic_t pgdat_init_n_undone __initdata;
1898 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1899 
1900 static inline void __init pgdat_init_report_one_done(void)
1901 {
1902 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1903 		complete(&pgdat_init_all_done_comp);
1904 }
1905 
1906 /*
1907  * Returns true if page needs to be initialized or freed to buddy allocator.
1908  *
1909  * We check if a current large page is valid by only checking the validity
1910  * of the head pfn.
1911  */
1912 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1913 {
1914 	if (pageblock_aligned(pfn) && !pfn_valid(pfn))
1915 		return false;
1916 	return true;
1917 }
1918 
1919 /*
1920  * Free pages to buddy allocator. Try to free aligned pages in
1921  * pageblock_nr_pages sizes.
1922  */
1923 static void __init deferred_free_pages(unsigned long pfn,
1924 				       unsigned long end_pfn)
1925 {
1926 	unsigned long nr_free = 0;
1927 
1928 	for (; pfn < end_pfn; pfn++) {
1929 		if (!deferred_pfn_valid(pfn)) {
1930 			deferred_free_range(pfn - nr_free, nr_free);
1931 			nr_free = 0;
1932 		} else if (pageblock_aligned(pfn)) {
1933 			deferred_free_range(pfn - nr_free, nr_free);
1934 			nr_free = 1;
1935 		} else {
1936 			nr_free++;
1937 		}
1938 	}
1939 	/* Free the last block of pages to allocator */
1940 	deferred_free_range(pfn - nr_free, nr_free);
1941 }
1942 
1943 /*
1944  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1945  * by performing it only once every pageblock_nr_pages.
1946  * Return number of pages initialized.
1947  */
1948 static unsigned long  __init deferred_init_pages(struct zone *zone,
1949 						 unsigned long pfn,
1950 						 unsigned long end_pfn)
1951 {
1952 	int nid = zone_to_nid(zone);
1953 	unsigned long nr_pages = 0;
1954 	int zid = zone_idx(zone);
1955 	struct page *page = NULL;
1956 
1957 	for (; pfn < end_pfn; pfn++) {
1958 		if (!deferred_pfn_valid(pfn)) {
1959 			page = NULL;
1960 			continue;
1961 		} else if (!page || pageblock_aligned(pfn)) {
1962 			page = pfn_to_page(pfn);
1963 		} else {
1964 			page++;
1965 		}
1966 		__init_single_page(page, pfn, zid, nid);
1967 		nr_pages++;
1968 	}
1969 	return (nr_pages);
1970 }
1971 
1972 /*
1973  * This function is meant to pre-load the iterator for the zone init.
1974  * Specifically it walks through the ranges until we are caught up to the
1975  * first_init_pfn value and exits there. If we never encounter the value we
1976  * return false indicating there are no valid ranges left.
1977  */
1978 static bool __init
1979 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1980 				    unsigned long *spfn, unsigned long *epfn,
1981 				    unsigned long first_init_pfn)
1982 {
1983 	u64 j;
1984 
1985 	/*
1986 	 * Start out by walking through the ranges in this zone that have
1987 	 * already been initialized. We don't need to do anything with them
1988 	 * so we just need to flush them out of the system.
1989 	 */
1990 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1991 		if (*epfn <= first_init_pfn)
1992 			continue;
1993 		if (*spfn < first_init_pfn)
1994 			*spfn = first_init_pfn;
1995 		*i = j;
1996 		return true;
1997 	}
1998 
1999 	return false;
2000 }
2001 
2002 /*
2003  * Initialize and free pages. We do it in two loops: first we initialize
2004  * struct page, then free to buddy allocator, because while we are
2005  * freeing pages we can access pages that are ahead (computing buddy
2006  * page in __free_one_page()).
2007  *
2008  * In order to try and keep some memory in the cache we have the loop
2009  * broken along max page order boundaries. This way we will not cause
2010  * any issues with the buddy page computation.
2011  */
2012 static unsigned long __init
2013 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2014 		       unsigned long *end_pfn)
2015 {
2016 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2017 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
2018 	unsigned long nr_pages = 0;
2019 	u64 j = *i;
2020 
2021 	/* First we loop through and initialize the page values */
2022 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2023 		unsigned long t;
2024 
2025 		if (mo_pfn <= *start_pfn)
2026 			break;
2027 
2028 		t = min(mo_pfn, *end_pfn);
2029 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
2030 
2031 		if (mo_pfn < *end_pfn) {
2032 			*start_pfn = mo_pfn;
2033 			break;
2034 		}
2035 	}
2036 
2037 	/* Reset values and now loop through freeing pages as needed */
2038 	swap(j, *i);
2039 
2040 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2041 		unsigned long t;
2042 
2043 		if (mo_pfn <= spfn)
2044 			break;
2045 
2046 		t = min(mo_pfn, epfn);
2047 		deferred_free_pages(spfn, t);
2048 
2049 		if (mo_pfn <= epfn)
2050 			break;
2051 	}
2052 
2053 	return nr_pages;
2054 }
2055 
2056 static void __init
2057 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2058 			   void *arg)
2059 {
2060 	unsigned long spfn, epfn;
2061 	struct zone *zone = arg;
2062 	u64 i;
2063 
2064 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2065 
2066 	/*
2067 	 * Initialize and free pages in MAX_ORDER sized increments so that we
2068 	 * can avoid introducing any issues with the buddy allocator.
2069 	 */
2070 	while (spfn < end_pfn) {
2071 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
2072 		cond_resched();
2073 	}
2074 }
2075 
2076 /* An arch may override for more concurrency. */
2077 __weak int __init
2078 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2079 {
2080 	return 1;
2081 }
2082 
2083 /* Initialise remaining memory on a node */
2084 static int __init deferred_init_memmap(void *data)
2085 {
2086 	pg_data_t *pgdat = data;
2087 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2088 	unsigned long spfn = 0, epfn = 0;
2089 	unsigned long first_init_pfn, flags;
2090 	unsigned long start = jiffies;
2091 	struct zone *zone;
2092 	int zid, max_threads;
2093 	u64 i;
2094 
2095 	/* Bind memory initialisation thread to a local node if possible */
2096 	if (!cpumask_empty(cpumask))
2097 		set_cpus_allowed_ptr(current, cpumask);
2098 
2099 	pgdat_resize_lock(pgdat, &flags);
2100 	first_init_pfn = pgdat->first_deferred_pfn;
2101 	if (first_init_pfn == ULONG_MAX) {
2102 		pgdat_resize_unlock(pgdat, &flags);
2103 		pgdat_init_report_one_done();
2104 		return 0;
2105 	}
2106 
2107 	/* Sanity check boundaries */
2108 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2109 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2110 	pgdat->first_deferred_pfn = ULONG_MAX;
2111 
2112 	/*
2113 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
2114 	 * interrupt thread must allocate this early in boot, zone must be
2115 	 * pre-grown prior to start of deferred page initialization.
2116 	 */
2117 	pgdat_resize_unlock(pgdat, &flags);
2118 
2119 	/* Only the highest zone is deferred so find it */
2120 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2121 		zone = pgdat->node_zones + zid;
2122 		if (first_init_pfn < zone_end_pfn(zone))
2123 			break;
2124 	}
2125 
2126 	/* If the zone is empty somebody else may have cleared out the zone */
2127 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2128 						 first_init_pfn))
2129 		goto zone_empty;
2130 
2131 	max_threads = deferred_page_init_max_threads(cpumask);
2132 
2133 	while (spfn < epfn) {
2134 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2135 		struct padata_mt_job job = {
2136 			.thread_fn   = deferred_init_memmap_chunk,
2137 			.fn_arg      = zone,
2138 			.start       = spfn,
2139 			.size        = epfn_align - spfn,
2140 			.align       = PAGES_PER_SECTION,
2141 			.min_chunk   = PAGES_PER_SECTION,
2142 			.max_threads = max_threads,
2143 		};
2144 
2145 		padata_do_multithreaded(&job);
2146 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2147 						    epfn_align);
2148 	}
2149 zone_empty:
2150 	/* Sanity check that the next zone really is unpopulated */
2151 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2152 
2153 	pr_info("node %d deferred pages initialised in %ums\n",
2154 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2155 
2156 	pgdat_init_report_one_done();
2157 	return 0;
2158 }
2159 
2160 /*
2161  * If this zone has deferred pages, try to grow it by initializing enough
2162  * deferred pages to satisfy the allocation specified by order, rounded up to
2163  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2164  * of SECTION_SIZE bytes by initializing struct pages in increments of
2165  * PAGES_PER_SECTION * sizeof(struct page) bytes.
2166  *
2167  * Return true when zone was grown, otherwise return false. We return true even
2168  * when we grow less than requested, to let the caller decide if there are
2169  * enough pages to satisfy the allocation.
2170  *
2171  * Note: We use noinline because this function is needed only during boot, and
2172  * it is called from a __ref function _deferred_grow_zone. This way we are
2173  * making sure that it is not inlined into permanent text section.
2174  */
2175 static noinline bool __init
2176 deferred_grow_zone(struct zone *zone, unsigned int order)
2177 {
2178 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2179 	pg_data_t *pgdat = zone->zone_pgdat;
2180 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2181 	unsigned long spfn, epfn, flags;
2182 	unsigned long nr_pages = 0;
2183 	u64 i;
2184 
2185 	/* Only the last zone may have deferred pages */
2186 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2187 		return false;
2188 
2189 	pgdat_resize_lock(pgdat, &flags);
2190 
2191 	/*
2192 	 * If someone grew this zone while we were waiting for spinlock, return
2193 	 * true, as there might be enough pages already.
2194 	 */
2195 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2196 		pgdat_resize_unlock(pgdat, &flags);
2197 		return true;
2198 	}
2199 
2200 	/* If the zone is empty somebody else may have cleared out the zone */
2201 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2202 						 first_deferred_pfn)) {
2203 		pgdat->first_deferred_pfn = ULONG_MAX;
2204 		pgdat_resize_unlock(pgdat, &flags);
2205 		/* Retry only once. */
2206 		return first_deferred_pfn != ULONG_MAX;
2207 	}
2208 
2209 	/*
2210 	 * Initialize and free pages in MAX_ORDER sized increments so
2211 	 * that we can avoid introducing any issues with the buddy
2212 	 * allocator.
2213 	 */
2214 	while (spfn < epfn) {
2215 		/* update our first deferred PFN for this section */
2216 		first_deferred_pfn = spfn;
2217 
2218 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2219 		touch_nmi_watchdog();
2220 
2221 		/* We should only stop along section boundaries */
2222 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2223 			continue;
2224 
2225 		/* If our quota has been met we can stop here */
2226 		if (nr_pages >= nr_pages_needed)
2227 			break;
2228 	}
2229 
2230 	pgdat->first_deferred_pfn = spfn;
2231 	pgdat_resize_unlock(pgdat, &flags);
2232 
2233 	return nr_pages > 0;
2234 }
2235 
2236 /*
2237  * deferred_grow_zone() is __init, but it is called from
2238  * get_page_from_freelist() during early boot until deferred_pages permanently
2239  * disables this call. This is why we have refdata wrapper to avoid warning,
2240  * and to ensure that the function body gets unloaded.
2241  */
2242 static bool __ref
2243 _deferred_grow_zone(struct zone *zone, unsigned int order)
2244 {
2245 	return deferred_grow_zone(zone, order);
2246 }
2247 
2248 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2249 
2250 void __init page_alloc_init_late(void)
2251 {
2252 	struct zone *zone;
2253 	int nid;
2254 
2255 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2256 
2257 	/* There will be num_node_state(N_MEMORY) threads */
2258 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2259 	for_each_node_state(nid, N_MEMORY) {
2260 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2261 	}
2262 
2263 	/* Block until all are initialised */
2264 	wait_for_completion(&pgdat_init_all_done_comp);
2265 
2266 	/*
2267 	 * We initialized the rest of the deferred pages.  Permanently disable
2268 	 * on-demand struct page initialization.
2269 	 */
2270 	static_branch_disable(&deferred_pages);
2271 
2272 	/* Reinit limits that are based on free pages after the kernel is up */
2273 	files_maxfiles_init();
2274 #endif
2275 
2276 	buffer_init();
2277 
2278 	/* Discard memblock private memory */
2279 	memblock_discard();
2280 
2281 	for_each_node_state(nid, N_MEMORY)
2282 		shuffle_free_memory(NODE_DATA(nid));
2283 
2284 	for_each_populated_zone(zone)
2285 		set_zone_contiguous(zone);
2286 }
2287 
2288 #ifdef CONFIG_CMA
2289 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2290 void __init init_cma_reserved_pageblock(struct page *page)
2291 {
2292 	unsigned i = pageblock_nr_pages;
2293 	struct page *p = page;
2294 
2295 	do {
2296 		__ClearPageReserved(p);
2297 		set_page_count(p, 0);
2298 	} while (++p, --i);
2299 
2300 	set_pageblock_migratetype(page, MIGRATE_CMA);
2301 	set_page_refcounted(page);
2302 	__free_pages(page, pageblock_order);
2303 
2304 	adjust_managed_page_count(page, pageblock_nr_pages);
2305 	page_zone(page)->cma_pages += pageblock_nr_pages;
2306 }
2307 #endif
2308 
2309 /*
2310  * The order of subdivision here is critical for the IO subsystem.
2311  * Please do not alter this order without good reasons and regression
2312  * testing. Specifically, as large blocks of memory are subdivided,
2313  * the order in which smaller blocks are delivered depends on the order
2314  * they're subdivided in this function. This is the primary factor
2315  * influencing the order in which pages are delivered to the IO
2316  * subsystem according to empirical testing, and this is also justified
2317  * by considering the behavior of a buddy system containing a single
2318  * large block of memory acted on by a series of small allocations.
2319  * This behavior is a critical factor in sglist merging's success.
2320  *
2321  * -- nyc
2322  */
2323 static inline void expand(struct zone *zone, struct page *page,
2324 	int low, int high, int migratetype)
2325 {
2326 	unsigned long size = 1 << high;
2327 
2328 	while (high > low) {
2329 		high--;
2330 		size >>= 1;
2331 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2332 
2333 		/*
2334 		 * Mark as guard pages (or page), that will allow to
2335 		 * merge back to allocator when buddy will be freed.
2336 		 * Corresponding page table entries will not be touched,
2337 		 * pages will stay not present in virtual address space
2338 		 */
2339 		if (set_page_guard(zone, &page[size], high, migratetype))
2340 			continue;
2341 
2342 		add_to_free_list(&page[size], zone, high, migratetype);
2343 		set_buddy_order(&page[size], high);
2344 	}
2345 }
2346 
2347 static void check_new_page_bad(struct page *page)
2348 {
2349 	if (unlikely(page->flags & __PG_HWPOISON)) {
2350 		/* Don't complain about hwpoisoned pages */
2351 		page_mapcount_reset(page); /* remove PageBuddy */
2352 		return;
2353 	}
2354 
2355 	bad_page(page,
2356 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2357 }
2358 
2359 /*
2360  * This page is about to be returned from the page allocator
2361  */
2362 static int check_new_page(struct page *page)
2363 {
2364 	if (likely(page_expected_state(page,
2365 				PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2366 		return 0;
2367 
2368 	check_new_page_bad(page);
2369 	return 1;
2370 }
2371 
2372 static inline bool check_new_pages(struct page *page, unsigned int order)
2373 {
2374 	if (static_branch_unlikely(&check_pages_enabled)) {
2375 		for (int i = 0; i < (1 << order); i++) {
2376 			struct page *p = page + i;
2377 
2378 			if (unlikely(check_new_page(p)))
2379 				return true;
2380 		}
2381 	}
2382 
2383 	return false;
2384 }
2385 
2386 static inline bool should_skip_kasan_unpoison(gfp_t flags)
2387 {
2388 	/* Don't skip if a software KASAN mode is enabled. */
2389 	if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
2390 	    IS_ENABLED(CONFIG_KASAN_SW_TAGS))
2391 		return false;
2392 
2393 	/* Skip, if hardware tag-based KASAN is not enabled. */
2394 	if (!kasan_hw_tags_enabled())
2395 		return true;
2396 
2397 	/*
2398 	 * With hardware tag-based KASAN enabled, skip if this has been
2399 	 * requested via __GFP_SKIP_KASAN_UNPOISON.
2400 	 */
2401 	return flags & __GFP_SKIP_KASAN_UNPOISON;
2402 }
2403 
2404 static inline bool should_skip_init(gfp_t flags)
2405 {
2406 	/* Don't skip, if hardware tag-based KASAN is not enabled. */
2407 	if (!kasan_hw_tags_enabled())
2408 		return false;
2409 
2410 	/* For hardware tag-based KASAN, skip if requested. */
2411 	return (flags & __GFP_SKIP_ZERO);
2412 }
2413 
2414 inline void post_alloc_hook(struct page *page, unsigned int order,
2415 				gfp_t gfp_flags)
2416 {
2417 	bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
2418 			!should_skip_init(gfp_flags);
2419 	bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
2420 	bool reset_tags = true;
2421 	int i;
2422 
2423 	set_page_private(page, 0);
2424 	set_page_refcounted(page);
2425 
2426 	arch_alloc_page(page, order);
2427 	debug_pagealloc_map_pages(page, 1 << order);
2428 
2429 	/*
2430 	 * Page unpoisoning must happen before memory initialization.
2431 	 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2432 	 * allocations and the page unpoisoning code will complain.
2433 	 */
2434 	kernel_unpoison_pages(page, 1 << order);
2435 
2436 	/*
2437 	 * As memory initialization might be integrated into KASAN,
2438 	 * KASAN unpoisoning and memory initializion code must be
2439 	 * kept together to avoid discrepancies in behavior.
2440 	 */
2441 
2442 	/*
2443 	 * If memory tags should be zeroed
2444 	 * (which happens only when memory should be initialized as well).
2445 	 */
2446 	if (zero_tags) {
2447 		/* Initialize both memory and memory tags. */
2448 		for (i = 0; i != 1 << order; ++i)
2449 			tag_clear_highpage(page + i);
2450 
2451 		/* Take note that memory was initialized by the loop above. */
2452 		init = false;
2453 	}
2454 	if (!should_skip_kasan_unpoison(gfp_flags)) {
2455 		/* Try unpoisoning (or setting tags) and initializing memory. */
2456 		if (kasan_unpoison_pages(page, order, init)) {
2457 			/* Take note that memory was initialized by KASAN. */
2458 			if (kasan_has_integrated_init())
2459 				init = false;
2460 			/* Take note that memory tags were set by KASAN. */
2461 			reset_tags = false;
2462 		} else {
2463 			/*
2464 			 * KASAN decided to exclude this allocation from being
2465 			 * (un)poisoned due to sampling. Make KASAN skip
2466 			 * poisoning when the allocation is freed.
2467 			 */
2468 			SetPageSkipKASanPoison(page);
2469 		}
2470 	}
2471 	/*
2472 	 * If memory tags have not been set by KASAN, reset the page tags to
2473 	 * ensure page_address() dereferencing does not fault.
2474 	 */
2475 	if (reset_tags) {
2476 		for (i = 0; i != 1 << order; ++i)
2477 			page_kasan_tag_reset(page + i);
2478 	}
2479 	/* If memory is still not initialized, initialize it now. */
2480 	if (init)
2481 		kernel_init_pages(page, 1 << order);
2482 	/* Propagate __GFP_SKIP_KASAN_POISON to page flags. */
2483 	if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON))
2484 		SetPageSkipKASanPoison(page);
2485 
2486 	set_page_owner(page, order, gfp_flags);
2487 	page_table_check_alloc(page, order);
2488 }
2489 
2490 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2491 							unsigned int alloc_flags)
2492 {
2493 	post_alloc_hook(page, order, gfp_flags);
2494 
2495 	if (order && (gfp_flags & __GFP_COMP))
2496 		prep_compound_page(page, order);
2497 
2498 	/*
2499 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2500 	 * allocate the page. The expectation is that the caller is taking
2501 	 * steps that will free more memory. The caller should avoid the page
2502 	 * being used for !PFMEMALLOC purposes.
2503 	 */
2504 	if (alloc_flags & ALLOC_NO_WATERMARKS)
2505 		set_page_pfmemalloc(page);
2506 	else
2507 		clear_page_pfmemalloc(page);
2508 }
2509 
2510 /*
2511  * Go through the free lists for the given migratetype and remove
2512  * the smallest available page from the freelists
2513  */
2514 static __always_inline
2515 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2516 						int migratetype)
2517 {
2518 	unsigned int current_order;
2519 	struct free_area *area;
2520 	struct page *page;
2521 
2522 	/* Find a page of the appropriate size in the preferred list */
2523 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2524 		area = &(zone->free_area[current_order]);
2525 		page = get_page_from_free_area(area, migratetype);
2526 		if (!page)
2527 			continue;
2528 		del_page_from_free_list(page, zone, current_order);
2529 		expand(zone, page, order, current_order, migratetype);
2530 		set_pcppage_migratetype(page, migratetype);
2531 		trace_mm_page_alloc_zone_locked(page, order, migratetype,
2532 				pcp_allowed_order(order) &&
2533 				migratetype < MIGRATE_PCPTYPES);
2534 		return page;
2535 	}
2536 
2537 	return NULL;
2538 }
2539 
2540 
2541 /*
2542  * This array describes the order lists are fallen back to when
2543  * the free lists for the desirable migrate type are depleted
2544  *
2545  * The other migratetypes do not have fallbacks.
2546  */
2547 static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = {
2548 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE   },
2549 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
2550 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE   },
2551 };
2552 
2553 #ifdef CONFIG_CMA
2554 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2555 					unsigned int order)
2556 {
2557 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2558 }
2559 #else
2560 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2561 					unsigned int order) { return NULL; }
2562 #endif
2563 
2564 /*
2565  * Move the free pages in a range to the freelist tail of the requested type.
2566  * Note that start_page and end_pages are not aligned on a pageblock
2567  * boundary. If alignment is required, use move_freepages_block()
2568  */
2569 static int move_freepages(struct zone *zone,
2570 			  unsigned long start_pfn, unsigned long end_pfn,
2571 			  int migratetype, int *num_movable)
2572 {
2573 	struct page *page;
2574 	unsigned long pfn;
2575 	unsigned int order;
2576 	int pages_moved = 0;
2577 
2578 	for (pfn = start_pfn; pfn <= end_pfn;) {
2579 		page = pfn_to_page(pfn);
2580 		if (!PageBuddy(page)) {
2581 			/*
2582 			 * We assume that pages that could be isolated for
2583 			 * migration are movable. But we don't actually try
2584 			 * isolating, as that would be expensive.
2585 			 */
2586 			if (num_movable &&
2587 					(PageLRU(page) || __PageMovable(page)))
2588 				(*num_movable)++;
2589 			pfn++;
2590 			continue;
2591 		}
2592 
2593 		/* Make sure we are not inadvertently changing nodes */
2594 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2595 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2596 
2597 		order = buddy_order(page);
2598 		move_to_free_list(page, zone, order, migratetype);
2599 		pfn += 1 << order;
2600 		pages_moved += 1 << order;
2601 	}
2602 
2603 	return pages_moved;
2604 }
2605 
2606 int move_freepages_block(struct zone *zone, struct page *page,
2607 				int migratetype, int *num_movable)
2608 {
2609 	unsigned long start_pfn, end_pfn, pfn;
2610 
2611 	if (num_movable)
2612 		*num_movable = 0;
2613 
2614 	pfn = page_to_pfn(page);
2615 	start_pfn = pageblock_start_pfn(pfn);
2616 	end_pfn = pageblock_end_pfn(pfn) - 1;
2617 
2618 	/* Do not cross zone boundaries */
2619 	if (!zone_spans_pfn(zone, start_pfn))
2620 		start_pfn = pfn;
2621 	if (!zone_spans_pfn(zone, end_pfn))
2622 		return 0;
2623 
2624 	return move_freepages(zone, start_pfn, end_pfn, migratetype,
2625 								num_movable);
2626 }
2627 
2628 static void change_pageblock_range(struct page *pageblock_page,
2629 					int start_order, int migratetype)
2630 {
2631 	int nr_pageblocks = 1 << (start_order - pageblock_order);
2632 
2633 	while (nr_pageblocks--) {
2634 		set_pageblock_migratetype(pageblock_page, migratetype);
2635 		pageblock_page += pageblock_nr_pages;
2636 	}
2637 }
2638 
2639 /*
2640  * When we are falling back to another migratetype during allocation, try to
2641  * steal extra free pages from the same pageblocks to satisfy further
2642  * allocations, instead of polluting multiple pageblocks.
2643  *
2644  * If we are stealing a relatively large buddy page, it is likely there will
2645  * be more free pages in the pageblock, so try to steal them all. For
2646  * reclaimable and unmovable allocations, we steal regardless of page size,
2647  * as fragmentation caused by those allocations polluting movable pageblocks
2648  * is worse than movable allocations stealing from unmovable and reclaimable
2649  * pageblocks.
2650  */
2651 static bool can_steal_fallback(unsigned int order, int start_mt)
2652 {
2653 	/*
2654 	 * Leaving this order check is intended, although there is
2655 	 * relaxed order check in next check. The reason is that
2656 	 * we can actually steal whole pageblock if this condition met,
2657 	 * but, below check doesn't guarantee it and that is just heuristic
2658 	 * so could be changed anytime.
2659 	 */
2660 	if (order >= pageblock_order)
2661 		return true;
2662 
2663 	if (order >= pageblock_order / 2 ||
2664 		start_mt == MIGRATE_RECLAIMABLE ||
2665 		start_mt == MIGRATE_UNMOVABLE ||
2666 		page_group_by_mobility_disabled)
2667 		return true;
2668 
2669 	return false;
2670 }
2671 
2672 static inline bool boost_watermark(struct zone *zone)
2673 {
2674 	unsigned long max_boost;
2675 
2676 	if (!watermark_boost_factor)
2677 		return false;
2678 	/*
2679 	 * Don't bother in zones that are unlikely to produce results.
2680 	 * On small machines, including kdump capture kernels running
2681 	 * in a small area, boosting the watermark can cause an out of
2682 	 * memory situation immediately.
2683 	 */
2684 	if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2685 		return false;
2686 
2687 	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2688 			watermark_boost_factor, 10000);
2689 
2690 	/*
2691 	 * high watermark may be uninitialised if fragmentation occurs
2692 	 * very early in boot so do not boost. We do not fall
2693 	 * through and boost by pageblock_nr_pages as failing
2694 	 * allocations that early means that reclaim is not going
2695 	 * to help and it may even be impossible to reclaim the
2696 	 * boosted watermark resulting in a hang.
2697 	 */
2698 	if (!max_boost)
2699 		return false;
2700 
2701 	max_boost = max(pageblock_nr_pages, max_boost);
2702 
2703 	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2704 		max_boost);
2705 
2706 	return true;
2707 }
2708 
2709 /*
2710  * This function implements actual steal behaviour. If order is large enough,
2711  * we can steal whole pageblock. If not, we first move freepages in this
2712  * pageblock to our migratetype and determine how many already-allocated pages
2713  * are there in the pageblock with a compatible migratetype. If at least half
2714  * of pages are free or compatible, we can change migratetype of the pageblock
2715  * itself, so pages freed in the future will be put on the correct free list.
2716  */
2717 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2718 		unsigned int alloc_flags, int start_type, bool whole_block)
2719 {
2720 	unsigned int current_order = buddy_order(page);
2721 	int free_pages, movable_pages, alike_pages;
2722 	int old_block_type;
2723 
2724 	old_block_type = get_pageblock_migratetype(page);
2725 
2726 	/*
2727 	 * This can happen due to races and we want to prevent broken
2728 	 * highatomic accounting.
2729 	 */
2730 	if (is_migrate_highatomic(old_block_type))
2731 		goto single_page;
2732 
2733 	/* Take ownership for orders >= pageblock_order */
2734 	if (current_order >= pageblock_order) {
2735 		change_pageblock_range(page, current_order, start_type);
2736 		goto single_page;
2737 	}
2738 
2739 	/*
2740 	 * Boost watermarks to increase reclaim pressure to reduce the
2741 	 * likelihood of future fallbacks. Wake kswapd now as the node
2742 	 * may be balanced overall and kswapd will not wake naturally.
2743 	 */
2744 	if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2745 		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2746 
2747 	/* We are not allowed to try stealing from the whole block */
2748 	if (!whole_block)
2749 		goto single_page;
2750 
2751 	free_pages = move_freepages_block(zone, page, start_type,
2752 						&movable_pages);
2753 	/*
2754 	 * Determine how many pages are compatible with our allocation.
2755 	 * For movable allocation, it's the number of movable pages which
2756 	 * we just obtained. For other types it's a bit more tricky.
2757 	 */
2758 	if (start_type == MIGRATE_MOVABLE) {
2759 		alike_pages = movable_pages;
2760 	} else {
2761 		/*
2762 		 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2763 		 * to MOVABLE pageblock, consider all non-movable pages as
2764 		 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2765 		 * vice versa, be conservative since we can't distinguish the
2766 		 * exact migratetype of non-movable pages.
2767 		 */
2768 		if (old_block_type == MIGRATE_MOVABLE)
2769 			alike_pages = pageblock_nr_pages
2770 						- (free_pages + movable_pages);
2771 		else
2772 			alike_pages = 0;
2773 	}
2774 
2775 	/* moving whole block can fail due to zone boundary conditions */
2776 	if (!free_pages)
2777 		goto single_page;
2778 
2779 	/*
2780 	 * If a sufficient number of pages in the block are either free or of
2781 	 * comparable migratability as our allocation, claim the whole block.
2782 	 */
2783 	if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2784 			page_group_by_mobility_disabled)
2785 		set_pageblock_migratetype(page, start_type);
2786 
2787 	return;
2788 
2789 single_page:
2790 	move_to_free_list(page, zone, current_order, start_type);
2791 }
2792 
2793 /*
2794  * Check whether there is a suitable fallback freepage with requested order.
2795  * If only_stealable is true, this function returns fallback_mt only if
2796  * we can steal other freepages all together. This would help to reduce
2797  * fragmentation due to mixed migratetype pages in one pageblock.
2798  */
2799 int find_suitable_fallback(struct free_area *area, unsigned int order,
2800 			int migratetype, bool only_stealable, bool *can_steal)
2801 {
2802 	int i;
2803 	int fallback_mt;
2804 
2805 	if (area->nr_free == 0)
2806 		return -1;
2807 
2808 	*can_steal = false;
2809 	for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
2810 		fallback_mt = fallbacks[migratetype][i];
2811 		if (free_area_empty(area, fallback_mt))
2812 			continue;
2813 
2814 		if (can_steal_fallback(order, migratetype))
2815 			*can_steal = true;
2816 
2817 		if (!only_stealable)
2818 			return fallback_mt;
2819 
2820 		if (*can_steal)
2821 			return fallback_mt;
2822 	}
2823 
2824 	return -1;
2825 }
2826 
2827 /*
2828  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2829  * there are no empty page blocks that contain a page with a suitable order
2830  */
2831 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2832 				unsigned int alloc_order)
2833 {
2834 	int mt;
2835 	unsigned long max_managed, flags;
2836 
2837 	/*
2838 	 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2839 	 * Check is race-prone but harmless.
2840 	 */
2841 	max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2842 	if (zone->nr_reserved_highatomic >= max_managed)
2843 		return;
2844 
2845 	spin_lock_irqsave(&zone->lock, flags);
2846 
2847 	/* Recheck the nr_reserved_highatomic limit under the lock */
2848 	if (zone->nr_reserved_highatomic >= max_managed)
2849 		goto out_unlock;
2850 
2851 	/* Yoink! */
2852 	mt = get_pageblock_migratetype(page);
2853 	/* Only reserve normal pageblocks (i.e., they can merge with others) */
2854 	if (migratetype_is_mergeable(mt)) {
2855 		zone->nr_reserved_highatomic += pageblock_nr_pages;
2856 		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2857 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2858 	}
2859 
2860 out_unlock:
2861 	spin_unlock_irqrestore(&zone->lock, flags);
2862 }
2863 
2864 /*
2865  * Used when an allocation is about to fail under memory pressure. This
2866  * potentially hurts the reliability of high-order allocations when under
2867  * intense memory pressure but failed atomic allocations should be easier
2868  * to recover from than an OOM.
2869  *
2870  * If @force is true, try to unreserve a pageblock even though highatomic
2871  * pageblock is exhausted.
2872  */
2873 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2874 						bool force)
2875 {
2876 	struct zonelist *zonelist = ac->zonelist;
2877 	unsigned long flags;
2878 	struct zoneref *z;
2879 	struct zone *zone;
2880 	struct page *page;
2881 	int order;
2882 	bool ret;
2883 
2884 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2885 								ac->nodemask) {
2886 		/*
2887 		 * Preserve at least one pageblock unless memory pressure
2888 		 * is really high.
2889 		 */
2890 		if (!force && zone->nr_reserved_highatomic <=
2891 					pageblock_nr_pages)
2892 			continue;
2893 
2894 		spin_lock_irqsave(&zone->lock, flags);
2895 		for (order = 0; order < MAX_ORDER; order++) {
2896 			struct free_area *area = &(zone->free_area[order]);
2897 
2898 			page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2899 			if (!page)
2900 				continue;
2901 
2902 			/*
2903 			 * In page freeing path, migratetype change is racy so
2904 			 * we can counter several free pages in a pageblock
2905 			 * in this loop although we changed the pageblock type
2906 			 * from highatomic to ac->migratetype. So we should
2907 			 * adjust the count once.
2908 			 */
2909 			if (is_migrate_highatomic_page(page)) {
2910 				/*
2911 				 * It should never happen but changes to
2912 				 * locking could inadvertently allow a per-cpu
2913 				 * drain to add pages to MIGRATE_HIGHATOMIC
2914 				 * while unreserving so be safe and watch for
2915 				 * underflows.
2916 				 */
2917 				zone->nr_reserved_highatomic -= min(
2918 						pageblock_nr_pages,
2919 						zone->nr_reserved_highatomic);
2920 			}
2921 
2922 			/*
2923 			 * Convert to ac->migratetype and avoid the normal
2924 			 * pageblock stealing heuristics. Minimally, the caller
2925 			 * is doing the work and needs the pages. More
2926 			 * importantly, if the block was always converted to
2927 			 * MIGRATE_UNMOVABLE or another type then the number
2928 			 * of pageblocks that cannot be completely freed
2929 			 * may increase.
2930 			 */
2931 			set_pageblock_migratetype(page, ac->migratetype);
2932 			ret = move_freepages_block(zone, page, ac->migratetype,
2933 									NULL);
2934 			if (ret) {
2935 				spin_unlock_irqrestore(&zone->lock, flags);
2936 				return ret;
2937 			}
2938 		}
2939 		spin_unlock_irqrestore(&zone->lock, flags);
2940 	}
2941 
2942 	return false;
2943 }
2944 
2945 /*
2946  * Try finding a free buddy page on the fallback list and put it on the free
2947  * list of requested migratetype, possibly along with other pages from the same
2948  * block, depending on fragmentation avoidance heuristics. Returns true if
2949  * fallback was found so that __rmqueue_smallest() can grab it.
2950  *
2951  * The use of signed ints for order and current_order is a deliberate
2952  * deviation from the rest of this file, to make the for loop
2953  * condition simpler.
2954  */
2955 static __always_inline bool
2956 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2957 						unsigned int alloc_flags)
2958 {
2959 	struct free_area *area;
2960 	int current_order;
2961 	int min_order = order;
2962 	struct page *page;
2963 	int fallback_mt;
2964 	bool can_steal;
2965 
2966 	/*
2967 	 * Do not steal pages from freelists belonging to other pageblocks
2968 	 * i.e. orders < pageblock_order. If there are no local zones free,
2969 	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2970 	 */
2971 	if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
2972 		min_order = pageblock_order;
2973 
2974 	/*
2975 	 * Find the largest available free page in the other list. This roughly
2976 	 * approximates finding the pageblock with the most free pages, which
2977 	 * would be too costly to do exactly.
2978 	 */
2979 	for (current_order = MAX_ORDER - 1; current_order >= min_order;
2980 				--current_order) {
2981 		area = &(zone->free_area[current_order]);
2982 		fallback_mt = find_suitable_fallback(area, current_order,
2983 				start_migratetype, false, &can_steal);
2984 		if (fallback_mt == -1)
2985 			continue;
2986 
2987 		/*
2988 		 * We cannot steal all free pages from the pageblock and the
2989 		 * requested migratetype is movable. In that case it's better to
2990 		 * steal and split the smallest available page instead of the
2991 		 * largest available page, because even if the next movable
2992 		 * allocation falls back into a different pageblock than this
2993 		 * one, it won't cause permanent fragmentation.
2994 		 */
2995 		if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2996 					&& current_order > order)
2997 			goto find_smallest;
2998 
2999 		goto do_steal;
3000 	}
3001 
3002 	return false;
3003 
3004 find_smallest:
3005 	for (current_order = order; current_order < MAX_ORDER;
3006 							current_order++) {
3007 		area = &(zone->free_area[current_order]);
3008 		fallback_mt = find_suitable_fallback(area, current_order,
3009 				start_migratetype, false, &can_steal);
3010 		if (fallback_mt != -1)
3011 			break;
3012 	}
3013 
3014 	/*
3015 	 * This should not happen - we already found a suitable fallback
3016 	 * when looking for the largest page.
3017 	 */
3018 	VM_BUG_ON(current_order == MAX_ORDER);
3019 
3020 do_steal:
3021 	page = get_page_from_free_area(area, fallback_mt);
3022 
3023 	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
3024 								can_steal);
3025 
3026 	trace_mm_page_alloc_extfrag(page, order, current_order,
3027 		start_migratetype, fallback_mt);
3028 
3029 	return true;
3030 
3031 }
3032 
3033 /*
3034  * Do the hard work of removing an element from the buddy allocator.
3035  * Call me with the zone->lock already held.
3036  */
3037 static __always_inline struct page *
3038 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
3039 						unsigned int alloc_flags)
3040 {
3041 	struct page *page;
3042 
3043 	if (IS_ENABLED(CONFIG_CMA)) {
3044 		/*
3045 		 * Balance movable allocations between regular and CMA areas by
3046 		 * allocating from CMA when over half of the zone's free memory
3047 		 * is in the CMA area.
3048 		 */
3049 		if (alloc_flags & ALLOC_CMA &&
3050 		    zone_page_state(zone, NR_FREE_CMA_PAGES) >
3051 		    zone_page_state(zone, NR_FREE_PAGES) / 2) {
3052 			page = __rmqueue_cma_fallback(zone, order);
3053 			if (page)
3054 				return page;
3055 		}
3056 	}
3057 retry:
3058 	page = __rmqueue_smallest(zone, order, migratetype);
3059 	if (unlikely(!page)) {
3060 		if (alloc_flags & ALLOC_CMA)
3061 			page = __rmqueue_cma_fallback(zone, order);
3062 
3063 		if (!page && __rmqueue_fallback(zone, order, migratetype,
3064 								alloc_flags))
3065 			goto retry;
3066 	}
3067 	return page;
3068 }
3069 
3070 /*
3071  * Obtain a specified number of elements from the buddy allocator, all under
3072  * a single hold of the lock, for efficiency.  Add them to the supplied list.
3073  * Returns the number of new pages which were placed at *list.
3074  */
3075 static int rmqueue_bulk(struct zone *zone, unsigned int order,
3076 			unsigned long count, struct list_head *list,
3077 			int migratetype, unsigned int alloc_flags)
3078 {
3079 	unsigned long flags;
3080 	int i;
3081 
3082 	spin_lock_irqsave(&zone->lock, flags);
3083 	for (i = 0; i < count; ++i) {
3084 		struct page *page = __rmqueue(zone, order, migratetype,
3085 								alloc_flags);
3086 		if (unlikely(page == NULL))
3087 			break;
3088 
3089 		/*
3090 		 * Split buddy pages returned by expand() are received here in
3091 		 * physical page order. The page is added to the tail of
3092 		 * caller's list. From the callers perspective, the linked list
3093 		 * is ordered by page number under some conditions. This is
3094 		 * useful for IO devices that can forward direction from the
3095 		 * head, thus also in the physical page order. This is useful
3096 		 * for IO devices that can merge IO requests if the physical
3097 		 * pages are ordered properly.
3098 		 */
3099 		list_add_tail(&page->pcp_list, list);
3100 		if (is_migrate_cma(get_pcppage_migratetype(page)))
3101 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3102 					      -(1 << order));
3103 	}
3104 
3105 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3106 	spin_unlock_irqrestore(&zone->lock, flags);
3107 
3108 	return i;
3109 }
3110 
3111 #ifdef CONFIG_NUMA
3112 /*
3113  * Called from the vmstat counter updater to drain pagesets of this
3114  * currently executing processor on remote nodes after they have
3115  * expired.
3116  */
3117 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3118 {
3119 	int to_drain, batch;
3120 
3121 	batch = READ_ONCE(pcp->batch);
3122 	to_drain = min(pcp->count, batch);
3123 	if (to_drain > 0) {
3124 		spin_lock(&pcp->lock);
3125 		free_pcppages_bulk(zone, to_drain, pcp, 0);
3126 		spin_unlock(&pcp->lock);
3127 	}
3128 }
3129 #endif
3130 
3131 /*
3132  * Drain pcplists of the indicated processor and zone.
3133  */
3134 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3135 {
3136 	struct per_cpu_pages *pcp;
3137 
3138 	pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3139 	if (pcp->count) {
3140 		spin_lock(&pcp->lock);
3141 		free_pcppages_bulk(zone, pcp->count, pcp, 0);
3142 		spin_unlock(&pcp->lock);
3143 	}
3144 }
3145 
3146 /*
3147  * Drain pcplists of all zones on the indicated processor.
3148  */
3149 static void drain_pages(unsigned int cpu)
3150 {
3151 	struct zone *zone;
3152 
3153 	for_each_populated_zone(zone) {
3154 		drain_pages_zone(cpu, zone);
3155 	}
3156 }
3157 
3158 /*
3159  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3160  */
3161 void drain_local_pages(struct zone *zone)
3162 {
3163 	int cpu = smp_processor_id();
3164 
3165 	if (zone)
3166 		drain_pages_zone(cpu, zone);
3167 	else
3168 		drain_pages(cpu);
3169 }
3170 
3171 /*
3172  * The implementation of drain_all_pages(), exposing an extra parameter to
3173  * drain on all cpus.
3174  *
3175  * drain_all_pages() is optimized to only execute on cpus where pcplists are
3176  * not empty. The check for non-emptiness can however race with a free to
3177  * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3178  * that need the guarantee that every CPU has drained can disable the
3179  * optimizing racy check.
3180  */
3181 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3182 {
3183 	int cpu;
3184 
3185 	/*
3186 	 * Allocate in the BSS so we won't require allocation in
3187 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3188 	 */
3189 	static cpumask_t cpus_with_pcps;
3190 
3191 	/*
3192 	 * Do not drain if one is already in progress unless it's specific to
3193 	 * a zone. Such callers are primarily CMA and memory hotplug and need
3194 	 * the drain to be complete when the call returns.
3195 	 */
3196 	if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3197 		if (!zone)
3198 			return;
3199 		mutex_lock(&pcpu_drain_mutex);
3200 	}
3201 
3202 	/*
3203 	 * We don't care about racing with CPU hotplug event
3204 	 * as offline notification will cause the notified
3205 	 * cpu to drain that CPU pcps and on_each_cpu_mask
3206 	 * disables preemption as part of its processing
3207 	 */
3208 	for_each_online_cpu(cpu) {
3209 		struct per_cpu_pages *pcp;
3210 		struct zone *z;
3211 		bool has_pcps = false;
3212 
3213 		if (force_all_cpus) {
3214 			/*
3215 			 * The pcp.count check is racy, some callers need a
3216 			 * guarantee that no cpu is missed.
3217 			 */
3218 			has_pcps = true;
3219 		} else if (zone) {
3220 			pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3221 			if (pcp->count)
3222 				has_pcps = true;
3223 		} else {
3224 			for_each_populated_zone(z) {
3225 				pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3226 				if (pcp->count) {
3227 					has_pcps = true;
3228 					break;
3229 				}
3230 			}
3231 		}
3232 
3233 		if (has_pcps)
3234 			cpumask_set_cpu(cpu, &cpus_with_pcps);
3235 		else
3236 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
3237 	}
3238 
3239 	for_each_cpu(cpu, &cpus_with_pcps) {
3240 		if (zone)
3241 			drain_pages_zone(cpu, zone);
3242 		else
3243 			drain_pages(cpu);
3244 	}
3245 
3246 	mutex_unlock(&pcpu_drain_mutex);
3247 }
3248 
3249 /*
3250  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3251  *
3252  * When zone parameter is non-NULL, spill just the single zone's pages.
3253  */
3254 void drain_all_pages(struct zone *zone)
3255 {
3256 	__drain_all_pages(zone, false);
3257 }
3258 
3259 #ifdef CONFIG_HIBERNATION
3260 
3261 /*
3262  * Touch the watchdog for every WD_PAGE_COUNT pages.
3263  */
3264 #define WD_PAGE_COUNT	(128*1024)
3265 
3266 void mark_free_pages(struct zone *zone)
3267 {
3268 	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3269 	unsigned long flags;
3270 	unsigned int order, t;
3271 	struct page *page;
3272 
3273 	if (zone_is_empty(zone))
3274 		return;
3275 
3276 	spin_lock_irqsave(&zone->lock, flags);
3277 
3278 	max_zone_pfn = zone_end_pfn(zone);
3279 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3280 		if (pfn_valid(pfn)) {
3281 			page = pfn_to_page(pfn);
3282 
3283 			if (!--page_count) {
3284 				touch_nmi_watchdog();
3285 				page_count = WD_PAGE_COUNT;
3286 			}
3287 
3288 			if (page_zone(page) != zone)
3289 				continue;
3290 
3291 			if (!swsusp_page_is_forbidden(page))
3292 				swsusp_unset_page_free(page);
3293 		}
3294 
3295 	for_each_migratetype_order(order, t) {
3296 		list_for_each_entry(page,
3297 				&zone->free_area[order].free_list[t], buddy_list) {
3298 			unsigned long i;
3299 
3300 			pfn = page_to_pfn(page);
3301 			for (i = 0; i < (1UL << order); i++) {
3302 				if (!--page_count) {
3303 					touch_nmi_watchdog();
3304 					page_count = WD_PAGE_COUNT;
3305 				}
3306 				swsusp_set_page_free(pfn_to_page(pfn + i));
3307 			}
3308 		}
3309 	}
3310 	spin_unlock_irqrestore(&zone->lock, flags);
3311 }
3312 #endif /* CONFIG_PM */
3313 
3314 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
3315 							unsigned int order)
3316 {
3317 	int migratetype;
3318 
3319 	if (!free_pages_prepare(page, order, FPI_NONE))
3320 		return false;
3321 
3322 	migratetype = get_pfnblock_migratetype(page, pfn);
3323 	set_pcppage_migratetype(page, migratetype);
3324 	return true;
3325 }
3326 
3327 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch,
3328 		       bool free_high)
3329 {
3330 	int min_nr_free, max_nr_free;
3331 
3332 	/* Free everything if batch freeing high-order pages. */
3333 	if (unlikely(free_high))
3334 		return pcp->count;
3335 
3336 	/* Check for PCP disabled or boot pageset */
3337 	if (unlikely(high < batch))
3338 		return 1;
3339 
3340 	/* Leave at least pcp->batch pages on the list */
3341 	min_nr_free = batch;
3342 	max_nr_free = high - batch;
3343 
3344 	/*
3345 	 * Double the number of pages freed each time there is subsequent
3346 	 * freeing of pages without any allocation.
3347 	 */
3348 	batch <<= pcp->free_factor;
3349 	if (batch < max_nr_free)
3350 		pcp->free_factor++;
3351 	batch = clamp(batch, min_nr_free, max_nr_free);
3352 
3353 	return batch;
3354 }
3355 
3356 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
3357 		       bool free_high)
3358 {
3359 	int high = READ_ONCE(pcp->high);
3360 
3361 	if (unlikely(!high || free_high))
3362 		return 0;
3363 
3364 	if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
3365 		return high;
3366 
3367 	/*
3368 	 * If reclaim is active, limit the number of pages that can be
3369 	 * stored on pcp lists
3370 	 */
3371 	return min(READ_ONCE(pcp->batch) << 2, high);
3372 }
3373 
3374 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
3375 				   struct page *page, int migratetype,
3376 				   unsigned int order)
3377 {
3378 	int high;
3379 	int pindex;
3380 	bool free_high;
3381 
3382 	__count_vm_events(PGFREE, 1 << order);
3383 	pindex = order_to_pindex(migratetype, order);
3384 	list_add(&page->pcp_list, &pcp->lists[pindex]);
3385 	pcp->count += 1 << order;
3386 
3387 	/*
3388 	 * As high-order pages other than THP's stored on PCP can contribute
3389 	 * to fragmentation, limit the number stored when PCP is heavily
3390 	 * freeing without allocation. The remainder after bulk freeing
3391 	 * stops will be drained from vmstat refresh context.
3392 	 */
3393 	free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER);
3394 
3395 	high = nr_pcp_high(pcp, zone, free_high);
3396 	if (pcp->count >= high) {
3397 		int batch = READ_ONCE(pcp->batch);
3398 
3399 		free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex);
3400 	}
3401 }
3402 
3403 /*
3404  * Free a pcp page
3405  */
3406 void free_unref_page(struct page *page, unsigned int order)
3407 {
3408 	unsigned long __maybe_unused UP_flags;
3409 	struct per_cpu_pages *pcp;
3410 	struct zone *zone;
3411 	unsigned long pfn = page_to_pfn(page);
3412 	int migratetype;
3413 
3414 	if (!free_unref_page_prepare(page, pfn, order))
3415 		return;
3416 
3417 	/*
3418 	 * We only track unmovable, reclaimable and movable on pcp lists.
3419 	 * Place ISOLATE pages on the isolated list because they are being
3420 	 * offlined but treat HIGHATOMIC as movable pages so we can get those
3421 	 * areas back if necessary. Otherwise, we may have to free
3422 	 * excessively into the page allocator
3423 	 */
3424 	migratetype = get_pcppage_migratetype(page);
3425 	if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
3426 		if (unlikely(is_migrate_isolate(migratetype))) {
3427 			free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
3428 			return;
3429 		}
3430 		migratetype = MIGRATE_MOVABLE;
3431 	}
3432 
3433 	zone = page_zone(page);
3434 	pcp_trylock_prepare(UP_flags);
3435 	pcp = pcp_spin_trylock(zone->per_cpu_pageset);
3436 	if (pcp) {
3437 		free_unref_page_commit(zone, pcp, page, migratetype, order);
3438 		pcp_spin_unlock(pcp);
3439 	} else {
3440 		free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
3441 	}
3442 	pcp_trylock_finish(UP_flags);
3443 }
3444 
3445 /*
3446  * Free a list of 0-order pages
3447  */
3448 void free_unref_page_list(struct list_head *list)
3449 {
3450 	unsigned long __maybe_unused UP_flags;
3451 	struct page *page, *next;
3452 	struct per_cpu_pages *pcp = NULL;
3453 	struct zone *locked_zone = NULL;
3454 	int batch_count = 0;
3455 	int migratetype;
3456 
3457 	/* Prepare pages for freeing */
3458 	list_for_each_entry_safe(page, next, list, lru) {
3459 		unsigned long pfn = page_to_pfn(page);
3460 		if (!free_unref_page_prepare(page, pfn, 0)) {
3461 			list_del(&page->lru);
3462 			continue;
3463 		}
3464 
3465 		/*
3466 		 * Free isolated pages directly to the allocator, see
3467 		 * comment in free_unref_page.
3468 		 */
3469 		migratetype = get_pcppage_migratetype(page);
3470 		if (unlikely(is_migrate_isolate(migratetype))) {
3471 			list_del(&page->lru);
3472 			free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
3473 			continue;
3474 		}
3475 	}
3476 
3477 	list_for_each_entry_safe(page, next, list, lru) {
3478 		struct zone *zone = page_zone(page);
3479 
3480 		list_del(&page->lru);
3481 		migratetype = get_pcppage_migratetype(page);
3482 
3483 		/*
3484 		 * Either different zone requiring a different pcp lock or
3485 		 * excessive lock hold times when freeing a large list of
3486 		 * pages.
3487 		 */
3488 		if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
3489 			if (pcp) {
3490 				pcp_spin_unlock(pcp);
3491 				pcp_trylock_finish(UP_flags);
3492 			}
3493 
3494 			batch_count = 0;
3495 
3496 			/*
3497 			 * trylock is necessary as pages may be getting freed
3498 			 * from IRQ or SoftIRQ context after an IO completion.
3499 			 */
3500 			pcp_trylock_prepare(UP_flags);
3501 			pcp = pcp_spin_trylock(zone->per_cpu_pageset);
3502 			if (unlikely(!pcp)) {
3503 				pcp_trylock_finish(UP_flags);
3504 				free_one_page(zone, page, page_to_pfn(page),
3505 					      0, migratetype, FPI_NONE);
3506 				locked_zone = NULL;
3507 				continue;
3508 			}
3509 			locked_zone = zone;
3510 		}
3511 
3512 		/*
3513 		 * Non-isolated types over MIGRATE_PCPTYPES get added
3514 		 * to the MIGRATE_MOVABLE pcp list.
3515 		 */
3516 		if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3517 			migratetype = MIGRATE_MOVABLE;
3518 
3519 		trace_mm_page_free_batched(page);
3520 		free_unref_page_commit(zone, pcp, page, migratetype, 0);
3521 		batch_count++;
3522 	}
3523 
3524 	if (pcp) {
3525 		pcp_spin_unlock(pcp);
3526 		pcp_trylock_finish(UP_flags);
3527 	}
3528 }
3529 
3530 /*
3531  * split_page takes a non-compound higher-order page, and splits it into
3532  * n (1<<order) sub-pages: page[0..n]
3533  * Each sub-page must be freed individually.
3534  *
3535  * Note: this is probably too low level an operation for use in drivers.
3536  * Please consult with lkml before using this in your driver.
3537  */
3538 void split_page(struct page *page, unsigned int order)
3539 {
3540 	int i;
3541 
3542 	VM_BUG_ON_PAGE(PageCompound(page), page);
3543 	VM_BUG_ON_PAGE(!page_count(page), page);
3544 
3545 	for (i = 1; i < (1 << order); i++)
3546 		set_page_refcounted(page + i);
3547 	split_page_owner(page, 1 << order);
3548 	split_page_memcg(page, 1 << order);
3549 }
3550 EXPORT_SYMBOL_GPL(split_page);
3551 
3552 int __isolate_free_page(struct page *page, unsigned int order)
3553 {
3554 	struct zone *zone = page_zone(page);
3555 	int mt = get_pageblock_migratetype(page);
3556 
3557 	if (!is_migrate_isolate(mt)) {
3558 		unsigned long watermark;
3559 		/*
3560 		 * Obey watermarks as if the page was being allocated. We can
3561 		 * emulate a high-order watermark check with a raised order-0
3562 		 * watermark, because we already know our high-order page
3563 		 * exists.
3564 		 */
3565 		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3566 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3567 			return 0;
3568 
3569 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
3570 	}
3571 
3572 	del_page_from_free_list(page, zone, order);
3573 
3574 	/*
3575 	 * Set the pageblock if the isolated page is at least half of a
3576 	 * pageblock
3577 	 */
3578 	if (order >= pageblock_order - 1) {
3579 		struct page *endpage = page + (1 << order) - 1;
3580 		for (; page < endpage; page += pageblock_nr_pages) {
3581 			int mt = get_pageblock_migratetype(page);
3582 			/*
3583 			 * Only change normal pageblocks (i.e., they can merge
3584 			 * with others)
3585 			 */
3586 			if (migratetype_is_mergeable(mt))
3587 				set_pageblock_migratetype(page,
3588 							  MIGRATE_MOVABLE);
3589 		}
3590 	}
3591 
3592 	return 1UL << order;
3593 }
3594 
3595 /**
3596  * __putback_isolated_page - Return a now-isolated page back where we got it
3597  * @page: Page that was isolated
3598  * @order: Order of the isolated page
3599  * @mt: The page's pageblock's migratetype
3600  *
3601  * This function is meant to return a page pulled from the free lists via
3602  * __isolate_free_page back to the free lists they were pulled from.
3603  */
3604 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3605 {
3606 	struct zone *zone = page_zone(page);
3607 
3608 	/* zone lock should be held when this function is called */
3609 	lockdep_assert_held(&zone->lock);
3610 
3611 	/* Return isolated page to tail of freelist. */
3612 	__free_one_page(page, page_to_pfn(page), zone, order, mt,
3613 			FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3614 }
3615 
3616 /*
3617  * Update NUMA hit/miss statistics
3618  */
3619 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3620 				   long nr_account)
3621 {
3622 #ifdef CONFIG_NUMA
3623 	enum numa_stat_item local_stat = NUMA_LOCAL;
3624 
3625 	/* skip numa counters update if numa stats is disabled */
3626 	if (!static_branch_likely(&vm_numa_stat_key))
3627 		return;
3628 
3629 	if (zone_to_nid(z) != numa_node_id())
3630 		local_stat = NUMA_OTHER;
3631 
3632 	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3633 		__count_numa_events(z, NUMA_HIT, nr_account);
3634 	else {
3635 		__count_numa_events(z, NUMA_MISS, nr_account);
3636 		__count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3637 	}
3638 	__count_numa_events(z, local_stat, nr_account);
3639 #endif
3640 }
3641 
3642 static __always_inline
3643 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
3644 			   unsigned int order, unsigned int alloc_flags,
3645 			   int migratetype)
3646 {
3647 	struct page *page;
3648 	unsigned long flags;
3649 
3650 	do {
3651 		page = NULL;
3652 		spin_lock_irqsave(&zone->lock, flags);
3653 		/*
3654 		 * order-0 request can reach here when the pcplist is skipped
3655 		 * due to non-CMA allocation context. HIGHATOMIC area is
3656 		 * reserved for high-order atomic allocation, so order-0
3657 		 * request should skip it.
3658 		 */
3659 		if (alloc_flags & ALLOC_HIGHATOMIC)
3660 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3661 		if (!page) {
3662 			page = __rmqueue(zone, order, migratetype, alloc_flags);
3663 
3664 			/*
3665 			 * If the allocation fails, allow OOM handling access
3666 			 * to HIGHATOMIC reserves as failing now is worse than
3667 			 * failing a high-order atomic allocation in the
3668 			 * future.
3669 			 */
3670 			if (!page && (alloc_flags & ALLOC_OOM))
3671 				page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3672 
3673 			if (!page) {
3674 				spin_unlock_irqrestore(&zone->lock, flags);
3675 				return NULL;
3676 			}
3677 		}
3678 		__mod_zone_freepage_state(zone, -(1 << order),
3679 					  get_pcppage_migratetype(page));
3680 		spin_unlock_irqrestore(&zone->lock, flags);
3681 	} while (check_new_pages(page, order));
3682 
3683 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3684 	zone_statistics(preferred_zone, zone, 1);
3685 
3686 	return page;
3687 }
3688 
3689 /* Remove page from the per-cpu list, caller must protect the list */
3690 static inline
3691 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3692 			int migratetype,
3693 			unsigned int alloc_flags,
3694 			struct per_cpu_pages *pcp,
3695 			struct list_head *list)
3696 {
3697 	struct page *page;
3698 
3699 	do {
3700 		if (list_empty(list)) {
3701 			int batch = READ_ONCE(pcp->batch);
3702 			int alloced;
3703 
3704 			/*
3705 			 * Scale batch relative to order if batch implies
3706 			 * free pages can be stored on the PCP. Batch can
3707 			 * be 1 for small zones or for boot pagesets which
3708 			 * should never store free pages as the pages may
3709 			 * belong to arbitrary zones.
3710 			 */
3711 			if (batch > 1)
3712 				batch = max(batch >> order, 2);
3713 			alloced = rmqueue_bulk(zone, order,
3714 					batch, list,
3715 					migratetype, alloc_flags);
3716 
3717 			pcp->count += alloced << order;
3718 			if (unlikely(list_empty(list)))
3719 				return NULL;
3720 		}
3721 
3722 		page = list_first_entry(list, struct page, pcp_list);
3723 		list_del(&page->pcp_list);
3724 		pcp->count -= 1 << order;
3725 	} while (check_new_pages(page, order));
3726 
3727 	return page;
3728 }
3729 
3730 /* Lock and remove page from the per-cpu list */
3731 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3732 			struct zone *zone, unsigned int order,
3733 			int migratetype, unsigned int alloc_flags)
3734 {
3735 	struct per_cpu_pages *pcp;
3736 	struct list_head *list;
3737 	struct page *page;
3738 	unsigned long __maybe_unused UP_flags;
3739 
3740 	/* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
3741 	pcp_trylock_prepare(UP_flags);
3742 	pcp = pcp_spin_trylock(zone->per_cpu_pageset);
3743 	if (!pcp) {
3744 		pcp_trylock_finish(UP_flags);
3745 		return NULL;
3746 	}
3747 
3748 	/*
3749 	 * On allocation, reduce the number of pages that are batch freed.
3750 	 * See nr_pcp_free() where free_factor is increased for subsequent
3751 	 * frees.
3752 	 */
3753 	pcp->free_factor >>= 1;
3754 	list = &pcp->lists[order_to_pindex(migratetype, order)];
3755 	page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3756 	pcp_spin_unlock(pcp);
3757 	pcp_trylock_finish(UP_flags);
3758 	if (page) {
3759 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3760 		zone_statistics(preferred_zone, zone, 1);
3761 	}
3762 	return page;
3763 }
3764 
3765 /*
3766  * Allocate a page from the given zone.
3767  * Use pcplists for THP or "cheap" high-order allocations.
3768  */
3769 
3770 /*
3771  * Do not instrument rmqueue() with KMSAN. This function may call
3772  * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
3773  * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
3774  * may call rmqueue() again, which will result in a deadlock.
3775  */
3776 __no_sanitize_memory
3777 static inline
3778 struct page *rmqueue(struct zone *preferred_zone,
3779 			struct zone *zone, unsigned int order,
3780 			gfp_t gfp_flags, unsigned int alloc_flags,
3781 			int migratetype)
3782 {
3783 	struct page *page;
3784 
3785 	/*
3786 	 * We most definitely don't want callers attempting to
3787 	 * allocate greater than order-1 page units with __GFP_NOFAIL.
3788 	 */
3789 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3790 
3791 	if (likely(pcp_allowed_order(order))) {
3792 		/*
3793 		 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3794 		 * we need to skip it when CMA area isn't allowed.
3795 		 */
3796 		if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3797 				migratetype != MIGRATE_MOVABLE) {
3798 			page = rmqueue_pcplist(preferred_zone, zone, order,
3799 					migratetype, alloc_flags);
3800 			if (likely(page))
3801 				goto out;
3802 		}
3803 	}
3804 
3805 	page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
3806 							migratetype);
3807 
3808 out:
3809 	/* Separate test+clear to avoid unnecessary atomics */
3810 	if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
3811 		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3812 		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3813 	}
3814 
3815 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3816 	return page;
3817 }
3818 
3819 #ifdef CONFIG_FAIL_PAGE_ALLOC
3820 
3821 static struct {
3822 	struct fault_attr attr;
3823 
3824 	bool ignore_gfp_highmem;
3825 	bool ignore_gfp_reclaim;
3826 	u32 min_order;
3827 } fail_page_alloc = {
3828 	.attr = FAULT_ATTR_INITIALIZER,
3829 	.ignore_gfp_reclaim = true,
3830 	.ignore_gfp_highmem = true,
3831 	.min_order = 1,
3832 };
3833 
3834 static int __init setup_fail_page_alloc(char *str)
3835 {
3836 	return setup_fault_attr(&fail_page_alloc.attr, str);
3837 }
3838 __setup("fail_page_alloc=", setup_fail_page_alloc);
3839 
3840 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3841 {
3842 	int flags = 0;
3843 
3844 	if (order < fail_page_alloc.min_order)
3845 		return false;
3846 	if (gfp_mask & __GFP_NOFAIL)
3847 		return false;
3848 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3849 		return false;
3850 	if (fail_page_alloc.ignore_gfp_reclaim &&
3851 			(gfp_mask & __GFP_DIRECT_RECLAIM))
3852 		return false;
3853 
3854 	/* See comment in __should_failslab() */
3855 	if (gfp_mask & __GFP_NOWARN)
3856 		flags |= FAULT_NOWARN;
3857 
3858 	return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags);
3859 }
3860 
3861 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3862 
3863 static int __init fail_page_alloc_debugfs(void)
3864 {
3865 	umode_t mode = S_IFREG | 0600;
3866 	struct dentry *dir;
3867 
3868 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3869 					&fail_page_alloc.attr);
3870 
3871 	debugfs_create_bool("ignore-gfp-wait", mode, dir,
3872 			    &fail_page_alloc.ignore_gfp_reclaim);
3873 	debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3874 			    &fail_page_alloc.ignore_gfp_highmem);
3875 	debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3876 
3877 	return 0;
3878 }
3879 
3880 late_initcall(fail_page_alloc_debugfs);
3881 
3882 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3883 
3884 #else /* CONFIG_FAIL_PAGE_ALLOC */
3885 
3886 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3887 {
3888 	return false;
3889 }
3890 
3891 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3892 
3893 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3894 {
3895 	return __should_fail_alloc_page(gfp_mask, order);
3896 }
3897 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3898 
3899 static inline long __zone_watermark_unusable_free(struct zone *z,
3900 				unsigned int order, unsigned int alloc_flags)
3901 {
3902 	long unusable_free = (1 << order) - 1;
3903 
3904 	/*
3905 	 * If the caller does not have rights to reserves below the min
3906 	 * watermark then subtract the high-atomic reserves. This will
3907 	 * over-estimate the size of the atomic reserve but it avoids a search.
3908 	 */
3909 	if (likely(!(alloc_flags & ALLOC_RESERVES)))
3910 		unusable_free += z->nr_reserved_highatomic;
3911 
3912 #ifdef CONFIG_CMA
3913 	/* If allocation can't use CMA areas don't use free CMA pages */
3914 	if (!(alloc_flags & ALLOC_CMA))
3915 		unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3916 #endif
3917 
3918 	return unusable_free;
3919 }
3920 
3921 /*
3922  * Return true if free base pages are above 'mark'. For high-order checks it
3923  * will return true of the order-0 watermark is reached and there is at least
3924  * one free page of a suitable size. Checking now avoids taking the zone lock
3925  * to check in the allocation paths if no pages are free.
3926  */
3927 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3928 			 int highest_zoneidx, unsigned int alloc_flags,
3929 			 long free_pages)
3930 {
3931 	long min = mark;
3932 	int o;
3933 
3934 	/* free_pages may go negative - that's OK */
3935 	free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3936 
3937 	if (unlikely(alloc_flags & ALLOC_RESERVES)) {
3938 		/*
3939 		 * __GFP_HIGH allows access to 50% of the min reserve as well
3940 		 * as OOM.
3941 		 */
3942 		if (alloc_flags & ALLOC_MIN_RESERVE) {
3943 			min -= min / 2;
3944 
3945 			/*
3946 			 * Non-blocking allocations (e.g. GFP_ATOMIC) can
3947 			 * access more reserves than just __GFP_HIGH. Other
3948 			 * non-blocking allocations requests such as GFP_NOWAIT
3949 			 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
3950 			 * access to the min reserve.
3951 			 */
3952 			if (alloc_flags & ALLOC_NON_BLOCK)
3953 				min -= min / 4;
3954 		}
3955 
3956 		/*
3957 		 * OOM victims can try even harder than the normal reserve
3958 		 * users on the grounds that it's definitely going to be in
3959 		 * the exit path shortly and free memory. Any allocation it
3960 		 * makes during the free path will be small and short-lived.
3961 		 */
3962 		if (alloc_flags & ALLOC_OOM)
3963 			min -= min / 2;
3964 	}
3965 
3966 	/*
3967 	 * Check watermarks for an order-0 allocation request. If these
3968 	 * are not met, then a high-order request also cannot go ahead
3969 	 * even if a suitable page happened to be free.
3970 	 */
3971 	if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3972 		return false;
3973 
3974 	/* If this is an order-0 request then the watermark is fine */
3975 	if (!order)
3976 		return true;
3977 
3978 	/* For a high-order request, check at least one suitable page is free */
3979 	for (o = order; o < MAX_ORDER; o++) {
3980 		struct free_area *area = &z->free_area[o];
3981 		int mt;
3982 
3983 		if (!area->nr_free)
3984 			continue;
3985 
3986 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3987 			if (!free_area_empty(area, mt))
3988 				return true;
3989 		}
3990 
3991 #ifdef CONFIG_CMA
3992 		if ((alloc_flags & ALLOC_CMA) &&
3993 		    !free_area_empty(area, MIGRATE_CMA)) {
3994 			return true;
3995 		}
3996 #endif
3997 		if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
3998 		    !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
3999 			return true;
4000 		}
4001 	}
4002 	return false;
4003 }
4004 
4005 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
4006 		      int highest_zoneidx, unsigned int alloc_flags)
4007 {
4008 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
4009 					zone_page_state(z, NR_FREE_PAGES));
4010 }
4011 
4012 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
4013 				unsigned long mark, int highest_zoneidx,
4014 				unsigned int alloc_flags, gfp_t gfp_mask)
4015 {
4016 	long free_pages;
4017 
4018 	free_pages = zone_page_state(z, NR_FREE_PAGES);
4019 
4020 	/*
4021 	 * Fast check for order-0 only. If this fails then the reserves
4022 	 * need to be calculated.
4023 	 */
4024 	if (!order) {
4025 		long usable_free;
4026 		long reserved;
4027 
4028 		usable_free = free_pages;
4029 		reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
4030 
4031 		/* reserved may over estimate high-atomic reserves. */
4032 		usable_free -= min(usable_free, reserved);
4033 		if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
4034 			return true;
4035 	}
4036 
4037 	if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
4038 					free_pages))
4039 		return true;
4040 
4041 	/*
4042 	 * Ignore watermark boosting for __GFP_HIGH order-0 allocations
4043 	 * when checking the min watermark. The min watermark is the
4044 	 * point where boosting is ignored so that kswapd is woken up
4045 	 * when below the low watermark.
4046 	 */
4047 	if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
4048 		&& ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
4049 		mark = z->_watermark[WMARK_MIN];
4050 		return __zone_watermark_ok(z, order, mark, highest_zoneidx,
4051 					alloc_flags, free_pages);
4052 	}
4053 
4054 	return false;
4055 }
4056 
4057 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
4058 			unsigned long mark, int highest_zoneidx)
4059 {
4060 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
4061 
4062 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
4063 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
4064 
4065 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
4066 								free_pages);
4067 }
4068 
4069 #ifdef CONFIG_NUMA
4070 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
4071 
4072 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4073 {
4074 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
4075 				node_reclaim_distance;
4076 }
4077 #else	/* CONFIG_NUMA */
4078 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4079 {
4080 	return true;
4081 }
4082 #endif	/* CONFIG_NUMA */
4083 
4084 /*
4085  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
4086  * fragmentation is subtle. If the preferred zone was HIGHMEM then
4087  * premature use of a lower zone may cause lowmem pressure problems that
4088  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
4089  * probably too small. It only makes sense to spread allocations to avoid
4090  * fragmentation between the Normal and DMA32 zones.
4091  */
4092 static inline unsigned int
4093 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
4094 {
4095 	unsigned int alloc_flags;
4096 
4097 	/*
4098 	 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4099 	 * to save a branch.
4100 	 */
4101 	alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
4102 
4103 #ifdef CONFIG_ZONE_DMA32
4104 	if (!zone)
4105 		return alloc_flags;
4106 
4107 	if (zone_idx(zone) != ZONE_NORMAL)
4108 		return alloc_flags;
4109 
4110 	/*
4111 	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
4112 	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
4113 	 * on UMA that if Normal is populated then so is DMA32.
4114 	 */
4115 	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
4116 	if (nr_online_nodes > 1 && !populated_zone(--zone))
4117 		return alloc_flags;
4118 
4119 	alloc_flags |= ALLOC_NOFRAGMENT;
4120 #endif /* CONFIG_ZONE_DMA32 */
4121 	return alloc_flags;
4122 }
4123 
4124 /* Must be called after current_gfp_context() which can change gfp_mask */
4125 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
4126 						  unsigned int alloc_flags)
4127 {
4128 #ifdef CONFIG_CMA
4129 	if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4130 		alloc_flags |= ALLOC_CMA;
4131 #endif
4132 	return alloc_flags;
4133 }
4134 
4135 /*
4136  * get_page_from_freelist goes through the zonelist trying to allocate
4137  * a page.
4138  */
4139 static struct page *
4140 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
4141 						const struct alloc_context *ac)
4142 {
4143 	struct zoneref *z;
4144 	struct zone *zone;
4145 	struct pglist_data *last_pgdat = NULL;
4146 	bool last_pgdat_dirty_ok = false;
4147 	bool no_fallback;
4148 
4149 retry:
4150 	/*
4151 	 * Scan zonelist, looking for a zone with enough free.
4152 	 * See also __cpuset_node_allowed() comment in kernel/cgroup/cpuset.c.
4153 	 */
4154 	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
4155 	z = ac->preferred_zoneref;
4156 	for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
4157 					ac->nodemask) {
4158 		struct page *page;
4159 		unsigned long mark;
4160 
4161 		if (cpusets_enabled() &&
4162 			(alloc_flags & ALLOC_CPUSET) &&
4163 			!__cpuset_zone_allowed(zone, gfp_mask))
4164 				continue;
4165 		/*
4166 		 * When allocating a page cache page for writing, we
4167 		 * want to get it from a node that is within its dirty
4168 		 * limit, such that no single node holds more than its
4169 		 * proportional share of globally allowed dirty pages.
4170 		 * The dirty limits take into account the node's
4171 		 * lowmem reserves and high watermark so that kswapd
4172 		 * should be able to balance it without having to
4173 		 * write pages from its LRU list.
4174 		 *
4175 		 * XXX: For now, allow allocations to potentially
4176 		 * exceed the per-node dirty limit in the slowpath
4177 		 * (spread_dirty_pages unset) before going into reclaim,
4178 		 * which is important when on a NUMA setup the allowed
4179 		 * nodes are together not big enough to reach the
4180 		 * global limit.  The proper fix for these situations
4181 		 * will require awareness of nodes in the
4182 		 * dirty-throttling and the flusher threads.
4183 		 */
4184 		if (ac->spread_dirty_pages) {
4185 			if (last_pgdat != zone->zone_pgdat) {
4186 				last_pgdat = zone->zone_pgdat;
4187 				last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
4188 			}
4189 
4190 			if (!last_pgdat_dirty_ok)
4191 				continue;
4192 		}
4193 
4194 		if (no_fallback && nr_online_nodes > 1 &&
4195 		    zone != ac->preferred_zoneref->zone) {
4196 			int local_nid;
4197 
4198 			/*
4199 			 * If moving to a remote node, retry but allow
4200 			 * fragmenting fallbacks. Locality is more important
4201 			 * than fragmentation avoidance.
4202 			 */
4203 			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
4204 			if (zone_to_nid(zone) != local_nid) {
4205 				alloc_flags &= ~ALLOC_NOFRAGMENT;
4206 				goto retry;
4207 			}
4208 		}
4209 
4210 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
4211 		if (!zone_watermark_fast(zone, order, mark,
4212 				       ac->highest_zoneidx, alloc_flags,
4213 				       gfp_mask)) {
4214 			int ret;
4215 
4216 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4217 			/*
4218 			 * Watermark failed for this zone, but see if we can
4219 			 * grow this zone if it contains deferred pages.
4220 			 */
4221 			if (deferred_pages_enabled()) {
4222 				if (_deferred_grow_zone(zone, order))
4223 					goto try_this_zone;
4224 			}
4225 #endif
4226 			/* Checked here to keep the fast path fast */
4227 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4228 			if (alloc_flags & ALLOC_NO_WATERMARKS)
4229 				goto try_this_zone;
4230 
4231 			if (!node_reclaim_enabled() ||
4232 			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
4233 				continue;
4234 
4235 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
4236 			switch (ret) {
4237 			case NODE_RECLAIM_NOSCAN:
4238 				/* did not scan */
4239 				continue;
4240 			case NODE_RECLAIM_FULL:
4241 				/* scanned but unreclaimable */
4242 				continue;
4243 			default:
4244 				/* did we reclaim enough */
4245 				if (zone_watermark_ok(zone, order, mark,
4246 					ac->highest_zoneidx, alloc_flags))
4247 					goto try_this_zone;
4248 
4249 				continue;
4250 			}
4251 		}
4252 
4253 try_this_zone:
4254 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
4255 				gfp_mask, alloc_flags, ac->migratetype);
4256 		if (page) {
4257 			prep_new_page(page, order, gfp_mask, alloc_flags);
4258 
4259 			/*
4260 			 * If this is a high-order atomic allocation then check
4261 			 * if the pageblock should be reserved for the future
4262 			 */
4263 			if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
4264 				reserve_highatomic_pageblock(page, zone, order);
4265 
4266 			return page;
4267 		} else {
4268 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4269 			/* Try again if zone has deferred pages */
4270 			if (deferred_pages_enabled()) {
4271 				if (_deferred_grow_zone(zone, order))
4272 					goto try_this_zone;
4273 			}
4274 #endif
4275 		}
4276 	}
4277 
4278 	/*
4279 	 * It's possible on a UMA machine to get through all zones that are
4280 	 * fragmented. If avoiding fragmentation, reset and try again.
4281 	 */
4282 	if (no_fallback) {
4283 		alloc_flags &= ~ALLOC_NOFRAGMENT;
4284 		goto retry;
4285 	}
4286 
4287 	return NULL;
4288 }
4289 
4290 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
4291 {
4292 	unsigned int filter = SHOW_MEM_FILTER_NODES;
4293 
4294 	/*
4295 	 * This documents exceptions given to allocations in certain
4296 	 * contexts that are allowed to allocate outside current's set
4297 	 * of allowed nodes.
4298 	 */
4299 	if (!(gfp_mask & __GFP_NOMEMALLOC))
4300 		if (tsk_is_oom_victim(current) ||
4301 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
4302 			filter &= ~SHOW_MEM_FILTER_NODES;
4303 	if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4304 		filter &= ~SHOW_MEM_FILTER_NODES;
4305 
4306 	__show_mem(filter, nodemask, gfp_zone(gfp_mask));
4307 }
4308 
4309 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4310 {
4311 	struct va_format vaf;
4312 	va_list args;
4313 	static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4314 
4315 	if ((gfp_mask & __GFP_NOWARN) ||
4316 	     !__ratelimit(&nopage_rs) ||
4317 	     ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
4318 		return;
4319 
4320 	va_start(args, fmt);
4321 	vaf.fmt = fmt;
4322 	vaf.va = &args;
4323 	pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4324 			current->comm, &vaf, gfp_mask, &gfp_mask,
4325 			nodemask_pr_args(nodemask));
4326 	va_end(args);
4327 
4328 	cpuset_print_current_mems_allowed();
4329 	pr_cont("\n");
4330 	dump_stack();
4331 	warn_alloc_show_mem(gfp_mask, nodemask);
4332 }
4333 
4334 static inline struct page *
4335 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4336 			      unsigned int alloc_flags,
4337 			      const struct alloc_context *ac)
4338 {
4339 	struct page *page;
4340 
4341 	page = get_page_from_freelist(gfp_mask, order,
4342 			alloc_flags|ALLOC_CPUSET, ac);
4343 	/*
4344 	 * fallback to ignore cpuset restriction if our nodes
4345 	 * are depleted
4346 	 */
4347 	if (!page)
4348 		page = get_page_from_freelist(gfp_mask, order,
4349 				alloc_flags, ac);
4350 
4351 	return page;
4352 }
4353 
4354 static inline struct page *
4355 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4356 	const struct alloc_context *ac, unsigned long *did_some_progress)
4357 {
4358 	struct oom_control oc = {
4359 		.zonelist = ac->zonelist,
4360 		.nodemask = ac->nodemask,
4361 		.memcg = NULL,
4362 		.gfp_mask = gfp_mask,
4363 		.order = order,
4364 	};
4365 	struct page *page;
4366 
4367 	*did_some_progress = 0;
4368 
4369 	/*
4370 	 * Acquire the oom lock.  If that fails, somebody else is
4371 	 * making progress for us.
4372 	 */
4373 	if (!mutex_trylock(&oom_lock)) {
4374 		*did_some_progress = 1;
4375 		schedule_timeout_uninterruptible(1);
4376 		return NULL;
4377 	}
4378 
4379 	/*
4380 	 * Go through the zonelist yet one more time, keep very high watermark
4381 	 * here, this is only to catch a parallel oom killing, we must fail if
4382 	 * we're still under heavy pressure. But make sure that this reclaim
4383 	 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4384 	 * allocation which will never fail due to oom_lock already held.
4385 	 */
4386 	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4387 				      ~__GFP_DIRECT_RECLAIM, order,
4388 				      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4389 	if (page)
4390 		goto out;
4391 
4392 	/* Coredumps can quickly deplete all memory reserves */
4393 	if (current->flags & PF_DUMPCORE)
4394 		goto out;
4395 	/* The OOM killer will not help higher order allocs */
4396 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4397 		goto out;
4398 	/*
4399 	 * We have already exhausted all our reclaim opportunities without any
4400 	 * success so it is time to admit defeat. We will skip the OOM killer
4401 	 * because it is very likely that the caller has a more reasonable
4402 	 * fallback than shooting a random task.
4403 	 *
4404 	 * The OOM killer may not free memory on a specific node.
4405 	 */
4406 	if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4407 		goto out;
4408 	/* The OOM killer does not needlessly kill tasks for lowmem */
4409 	if (ac->highest_zoneidx < ZONE_NORMAL)
4410 		goto out;
4411 	if (pm_suspended_storage())
4412 		goto out;
4413 	/*
4414 	 * XXX: GFP_NOFS allocations should rather fail than rely on
4415 	 * other request to make a forward progress.
4416 	 * We are in an unfortunate situation where out_of_memory cannot
4417 	 * do much for this context but let's try it to at least get
4418 	 * access to memory reserved if the current task is killed (see
4419 	 * out_of_memory). Once filesystems are ready to handle allocation
4420 	 * failures more gracefully we should just bail out here.
4421 	 */
4422 
4423 	/* Exhausted what can be done so it's blame time */
4424 	if (out_of_memory(&oc) ||
4425 	    WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
4426 		*did_some_progress = 1;
4427 
4428 		/*
4429 		 * Help non-failing allocations by giving them access to memory
4430 		 * reserves
4431 		 */
4432 		if (gfp_mask & __GFP_NOFAIL)
4433 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4434 					ALLOC_NO_WATERMARKS, ac);
4435 	}
4436 out:
4437 	mutex_unlock(&oom_lock);
4438 	return page;
4439 }
4440 
4441 /*
4442  * Maximum number of compaction retries with a progress before OOM
4443  * killer is consider as the only way to move forward.
4444  */
4445 #define MAX_COMPACT_RETRIES 16
4446 
4447 #ifdef CONFIG_COMPACTION
4448 /* Try memory compaction for high-order allocations before reclaim */
4449 static struct page *
4450 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4451 		unsigned int alloc_flags, const struct alloc_context *ac,
4452 		enum compact_priority prio, enum compact_result *compact_result)
4453 {
4454 	struct page *page = NULL;
4455 	unsigned long pflags;
4456 	unsigned int noreclaim_flag;
4457 
4458 	if (!order)
4459 		return NULL;
4460 
4461 	psi_memstall_enter(&pflags);
4462 	delayacct_compact_start();
4463 	noreclaim_flag = memalloc_noreclaim_save();
4464 
4465 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4466 								prio, &page);
4467 
4468 	memalloc_noreclaim_restore(noreclaim_flag);
4469 	psi_memstall_leave(&pflags);
4470 	delayacct_compact_end();
4471 
4472 	if (*compact_result == COMPACT_SKIPPED)
4473 		return NULL;
4474 	/*
4475 	 * At least in one zone compaction wasn't deferred or skipped, so let's
4476 	 * count a compaction stall
4477 	 */
4478 	count_vm_event(COMPACTSTALL);
4479 
4480 	/* Prep a captured page if available */
4481 	if (page)
4482 		prep_new_page(page, order, gfp_mask, alloc_flags);
4483 
4484 	/* Try get a page from the freelist if available */
4485 	if (!page)
4486 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4487 
4488 	if (page) {
4489 		struct zone *zone = page_zone(page);
4490 
4491 		zone->compact_blockskip_flush = false;
4492 		compaction_defer_reset(zone, order, true);
4493 		count_vm_event(COMPACTSUCCESS);
4494 		return page;
4495 	}
4496 
4497 	/*
4498 	 * It's bad if compaction run occurs and fails. The most likely reason
4499 	 * is that pages exist, but not enough to satisfy watermarks.
4500 	 */
4501 	count_vm_event(COMPACTFAIL);
4502 
4503 	cond_resched();
4504 
4505 	return NULL;
4506 }
4507 
4508 static inline bool
4509 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4510 		     enum compact_result compact_result,
4511 		     enum compact_priority *compact_priority,
4512 		     int *compaction_retries)
4513 {
4514 	int max_retries = MAX_COMPACT_RETRIES;
4515 	int min_priority;
4516 	bool ret = false;
4517 	int retries = *compaction_retries;
4518 	enum compact_priority priority = *compact_priority;
4519 
4520 	if (!order)
4521 		return false;
4522 
4523 	if (fatal_signal_pending(current))
4524 		return false;
4525 
4526 	if (compaction_made_progress(compact_result))
4527 		(*compaction_retries)++;
4528 
4529 	/*
4530 	 * compaction considers all the zone as desperately out of memory
4531 	 * so it doesn't really make much sense to retry except when the
4532 	 * failure could be caused by insufficient priority
4533 	 */
4534 	if (compaction_failed(compact_result))
4535 		goto check_priority;
4536 
4537 	/*
4538 	 * compaction was skipped because there are not enough order-0 pages
4539 	 * to work with, so we retry only if it looks like reclaim can help.
4540 	 */
4541 	if (compaction_needs_reclaim(compact_result)) {
4542 		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4543 		goto out;
4544 	}
4545 
4546 	/*
4547 	 * make sure the compaction wasn't deferred or didn't bail out early
4548 	 * due to locks contention before we declare that we should give up.
4549 	 * But the next retry should use a higher priority if allowed, so
4550 	 * we don't just keep bailing out endlessly.
4551 	 */
4552 	if (compaction_withdrawn(compact_result)) {
4553 		goto check_priority;
4554 	}
4555 
4556 	/*
4557 	 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4558 	 * costly ones because they are de facto nofail and invoke OOM
4559 	 * killer to move on while costly can fail and users are ready
4560 	 * to cope with that. 1/4 retries is rather arbitrary but we
4561 	 * would need much more detailed feedback from compaction to
4562 	 * make a better decision.
4563 	 */
4564 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4565 		max_retries /= 4;
4566 	if (*compaction_retries <= max_retries) {
4567 		ret = true;
4568 		goto out;
4569 	}
4570 
4571 	/*
4572 	 * Make sure there are attempts at the highest priority if we exhausted
4573 	 * all retries or failed at the lower priorities.
4574 	 */
4575 check_priority:
4576 	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4577 			MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4578 
4579 	if (*compact_priority > min_priority) {
4580 		(*compact_priority)--;
4581 		*compaction_retries = 0;
4582 		ret = true;
4583 	}
4584 out:
4585 	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4586 	return ret;
4587 }
4588 #else
4589 static inline struct page *
4590 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4591 		unsigned int alloc_flags, const struct alloc_context *ac,
4592 		enum compact_priority prio, enum compact_result *compact_result)
4593 {
4594 	*compact_result = COMPACT_SKIPPED;
4595 	return NULL;
4596 }
4597 
4598 static inline bool
4599 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4600 		     enum compact_result compact_result,
4601 		     enum compact_priority *compact_priority,
4602 		     int *compaction_retries)
4603 {
4604 	struct zone *zone;
4605 	struct zoneref *z;
4606 
4607 	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4608 		return false;
4609 
4610 	/*
4611 	 * There are setups with compaction disabled which would prefer to loop
4612 	 * inside the allocator rather than hit the oom killer prematurely.
4613 	 * Let's give them a good hope and keep retrying while the order-0
4614 	 * watermarks are OK.
4615 	 */
4616 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4617 				ac->highest_zoneidx, ac->nodemask) {
4618 		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4619 					ac->highest_zoneidx, alloc_flags))
4620 			return true;
4621 	}
4622 	return false;
4623 }
4624 #endif /* CONFIG_COMPACTION */
4625 
4626 #ifdef CONFIG_LOCKDEP
4627 static struct lockdep_map __fs_reclaim_map =
4628 	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4629 
4630 static bool __need_reclaim(gfp_t gfp_mask)
4631 {
4632 	/* no reclaim without waiting on it */
4633 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4634 		return false;
4635 
4636 	/* this guy won't enter reclaim */
4637 	if (current->flags & PF_MEMALLOC)
4638 		return false;
4639 
4640 	if (gfp_mask & __GFP_NOLOCKDEP)
4641 		return false;
4642 
4643 	return true;
4644 }
4645 
4646 void __fs_reclaim_acquire(unsigned long ip)
4647 {
4648 	lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4649 }
4650 
4651 void __fs_reclaim_release(unsigned long ip)
4652 {
4653 	lock_release(&__fs_reclaim_map, ip);
4654 }
4655 
4656 void fs_reclaim_acquire(gfp_t gfp_mask)
4657 {
4658 	gfp_mask = current_gfp_context(gfp_mask);
4659 
4660 	if (__need_reclaim(gfp_mask)) {
4661 		if (gfp_mask & __GFP_FS)
4662 			__fs_reclaim_acquire(_RET_IP_);
4663 
4664 #ifdef CONFIG_MMU_NOTIFIER
4665 		lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4666 		lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4667 #endif
4668 
4669 	}
4670 }
4671 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4672 
4673 void fs_reclaim_release(gfp_t gfp_mask)
4674 {
4675 	gfp_mask = current_gfp_context(gfp_mask);
4676 
4677 	if (__need_reclaim(gfp_mask)) {
4678 		if (gfp_mask & __GFP_FS)
4679 			__fs_reclaim_release(_RET_IP_);
4680 	}
4681 }
4682 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4683 #endif
4684 
4685 /*
4686  * Zonelists may change due to hotplug during allocation. Detect when zonelists
4687  * have been rebuilt so allocation retries. Reader side does not lock and
4688  * retries the allocation if zonelist changes. Writer side is protected by the
4689  * embedded spin_lock.
4690  */
4691 static DEFINE_SEQLOCK(zonelist_update_seq);
4692 
4693 static unsigned int zonelist_iter_begin(void)
4694 {
4695 	if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4696 		return read_seqbegin(&zonelist_update_seq);
4697 
4698 	return 0;
4699 }
4700 
4701 static unsigned int check_retry_zonelist(unsigned int seq)
4702 {
4703 	if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4704 		return read_seqretry(&zonelist_update_seq, seq);
4705 
4706 	return seq;
4707 }
4708 
4709 /* Perform direct synchronous page reclaim */
4710 static unsigned long
4711 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4712 					const struct alloc_context *ac)
4713 {
4714 	unsigned int noreclaim_flag;
4715 	unsigned long progress;
4716 
4717 	cond_resched();
4718 
4719 	/* We now go into synchronous reclaim */
4720 	cpuset_memory_pressure_bump();
4721 	fs_reclaim_acquire(gfp_mask);
4722 	noreclaim_flag = memalloc_noreclaim_save();
4723 
4724 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4725 								ac->nodemask);
4726 
4727 	memalloc_noreclaim_restore(noreclaim_flag);
4728 	fs_reclaim_release(gfp_mask);
4729 
4730 	cond_resched();
4731 
4732 	return progress;
4733 }
4734 
4735 /* The really slow allocator path where we enter direct reclaim */
4736 static inline struct page *
4737 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4738 		unsigned int alloc_flags, const struct alloc_context *ac,
4739 		unsigned long *did_some_progress)
4740 {
4741 	struct page *page = NULL;
4742 	unsigned long pflags;
4743 	bool drained = false;
4744 
4745 	psi_memstall_enter(&pflags);
4746 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4747 	if (unlikely(!(*did_some_progress)))
4748 		goto out;
4749 
4750 retry:
4751 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4752 
4753 	/*
4754 	 * If an allocation failed after direct reclaim, it could be because
4755 	 * pages are pinned on the per-cpu lists or in high alloc reserves.
4756 	 * Shrink them and try again
4757 	 */
4758 	if (!page && !drained) {
4759 		unreserve_highatomic_pageblock(ac, false);
4760 		drain_all_pages(NULL);
4761 		drained = true;
4762 		goto retry;
4763 	}
4764 out:
4765 	psi_memstall_leave(&pflags);
4766 
4767 	return page;
4768 }
4769 
4770 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4771 			     const struct alloc_context *ac)
4772 {
4773 	struct zoneref *z;
4774 	struct zone *zone;
4775 	pg_data_t *last_pgdat = NULL;
4776 	enum zone_type highest_zoneidx = ac->highest_zoneidx;
4777 
4778 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4779 					ac->nodemask) {
4780 		if (!managed_zone(zone))
4781 			continue;
4782 		if (last_pgdat != zone->zone_pgdat) {
4783 			wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4784 			last_pgdat = zone->zone_pgdat;
4785 		}
4786 	}
4787 }
4788 
4789 static inline unsigned int
4790 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
4791 {
4792 	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4793 
4794 	/*
4795 	 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE
4796 	 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4797 	 * to save two branches.
4798 	 */
4799 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
4800 	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4801 
4802 	/*
4803 	 * The caller may dip into page reserves a bit more if the caller
4804 	 * cannot run direct reclaim, or if the caller has realtime scheduling
4805 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4806 	 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
4807 	 */
4808 	alloc_flags |= (__force int)
4809 		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4810 
4811 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
4812 		/*
4813 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4814 		 * if it can't schedule.
4815 		 */
4816 		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
4817 			alloc_flags |= ALLOC_NON_BLOCK;
4818 
4819 			if (order > 0)
4820 				alloc_flags |= ALLOC_HIGHATOMIC;
4821 		}
4822 
4823 		/*
4824 		 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
4825 		 * GFP_ATOMIC) rather than fail, see the comment for
4826 		 * __cpuset_node_allowed().
4827 		 */
4828 		if (alloc_flags & ALLOC_MIN_RESERVE)
4829 			alloc_flags &= ~ALLOC_CPUSET;
4830 	} else if (unlikely(rt_task(current)) && in_task())
4831 		alloc_flags |= ALLOC_MIN_RESERVE;
4832 
4833 	alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4834 
4835 	return alloc_flags;
4836 }
4837 
4838 static bool oom_reserves_allowed(struct task_struct *tsk)
4839 {
4840 	if (!tsk_is_oom_victim(tsk))
4841 		return false;
4842 
4843 	/*
4844 	 * !MMU doesn't have oom reaper so give access to memory reserves
4845 	 * only to the thread with TIF_MEMDIE set
4846 	 */
4847 	if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4848 		return false;
4849 
4850 	return true;
4851 }
4852 
4853 /*
4854  * Distinguish requests which really need access to full memory
4855  * reserves from oom victims which can live with a portion of it
4856  */
4857 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4858 {
4859 	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4860 		return 0;
4861 	if (gfp_mask & __GFP_MEMALLOC)
4862 		return ALLOC_NO_WATERMARKS;
4863 	if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4864 		return ALLOC_NO_WATERMARKS;
4865 	if (!in_interrupt()) {
4866 		if (current->flags & PF_MEMALLOC)
4867 			return ALLOC_NO_WATERMARKS;
4868 		else if (oom_reserves_allowed(current))
4869 			return ALLOC_OOM;
4870 	}
4871 
4872 	return 0;
4873 }
4874 
4875 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4876 {
4877 	return !!__gfp_pfmemalloc_flags(gfp_mask);
4878 }
4879 
4880 /*
4881  * Checks whether it makes sense to retry the reclaim to make a forward progress
4882  * for the given allocation request.
4883  *
4884  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4885  * without success, or when we couldn't even meet the watermark if we
4886  * reclaimed all remaining pages on the LRU lists.
4887  *
4888  * Returns true if a retry is viable or false to enter the oom path.
4889  */
4890 static inline bool
4891 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4892 		     struct alloc_context *ac, int alloc_flags,
4893 		     bool did_some_progress, int *no_progress_loops)
4894 {
4895 	struct zone *zone;
4896 	struct zoneref *z;
4897 	bool ret = false;
4898 
4899 	/*
4900 	 * Costly allocations might have made a progress but this doesn't mean
4901 	 * their order will become available due to high fragmentation so
4902 	 * always increment the no progress counter for them
4903 	 */
4904 	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4905 		*no_progress_loops = 0;
4906 	else
4907 		(*no_progress_loops)++;
4908 
4909 	/*
4910 	 * Make sure we converge to OOM if we cannot make any progress
4911 	 * several times in the row.
4912 	 */
4913 	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4914 		/* Before OOM, exhaust highatomic_reserve */
4915 		return unreserve_highatomic_pageblock(ac, true);
4916 	}
4917 
4918 	/*
4919 	 * Keep reclaiming pages while there is a chance this will lead
4920 	 * somewhere.  If none of the target zones can satisfy our allocation
4921 	 * request even if all reclaimable pages are considered then we are
4922 	 * screwed and have to go OOM.
4923 	 */
4924 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4925 				ac->highest_zoneidx, ac->nodemask) {
4926 		unsigned long available;
4927 		unsigned long reclaimable;
4928 		unsigned long min_wmark = min_wmark_pages(zone);
4929 		bool wmark;
4930 
4931 		available = reclaimable = zone_reclaimable_pages(zone);
4932 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4933 
4934 		/*
4935 		 * Would the allocation succeed if we reclaimed all
4936 		 * reclaimable pages?
4937 		 */
4938 		wmark = __zone_watermark_ok(zone, order, min_wmark,
4939 				ac->highest_zoneidx, alloc_flags, available);
4940 		trace_reclaim_retry_zone(z, order, reclaimable,
4941 				available, min_wmark, *no_progress_loops, wmark);
4942 		if (wmark) {
4943 			ret = true;
4944 			break;
4945 		}
4946 	}
4947 
4948 	/*
4949 	 * Memory allocation/reclaim might be called from a WQ context and the
4950 	 * current implementation of the WQ concurrency control doesn't
4951 	 * recognize that a particular WQ is congested if the worker thread is
4952 	 * looping without ever sleeping. Therefore we have to do a short sleep
4953 	 * here rather than calling cond_resched().
4954 	 */
4955 	if (current->flags & PF_WQ_WORKER)
4956 		schedule_timeout_uninterruptible(1);
4957 	else
4958 		cond_resched();
4959 	return ret;
4960 }
4961 
4962 static inline bool
4963 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4964 {
4965 	/*
4966 	 * It's possible that cpuset's mems_allowed and the nodemask from
4967 	 * mempolicy don't intersect. This should be normally dealt with by
4968 	 * policy_nodemask(), but it's possible to race with cpuset update in
4969 	 * such a way the check therein was true, and then it became false
4970 	 * before we got our cpuset_mems_cookie here.
4971 	 * This assumes that for all allocations, ac->nodemask can come only
4972 	 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4973 	 * when it does not intersect with the cpuset restrictions) or the
4974 	 * caller can deal with a violated nodemask.
4975 	 */
4976 	if (cpusets_enabled() && ac->nodemask &&
4977 			!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4978 		ac->nodemask = NULL;
4979 		return true;
4980 	}
4981 
4982 	/*
4983 	 * When updating a task's mems_allowed or mempolicy nodemask, it is
4984 	 * possible to race with parallel threads in such a way that our
4985 	 * allocation can fail while the mask is being updated. If we are about
4986 	 * to fail, check if the cpuset changed during allocation and if so,
4987 	 * retry.
4988 	 */
4989 	if (read_mems_allowed_retry(cpuset_mems_cookie))
4990 		return true;
4991 
4992 	return false;
4993 }
4994 
4995 static inline struct page *
4996 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4997 						struct alloc_context *ac)
4998 {
4999 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
5000 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
5001 	struct page *page = NULL;
5002 	unsigned int alloc_flags;
5003 	unsigned long did_some_progress;
5004 	enum compact_priority compact_priority;
5005 	enum compact_result compact_result;
5006 	int compaction_retries;
5007 	int no_progress_loops;
5008 	unsigned int cpuset_mems_cookie;
5009 	unsigned int zonelist_iter_cookie;
5010 	int reserve_flags;
5011 
5012 restart:
5013 	compaction_retries = 0;
5014 	no_progress_loops = 0;
5015 	compact_priority = DEF_COMPACT_PRIORITY;
5016 	cpuset_mems_cookie = read_mems_allowed_begin();
5017 	zonelist_iter_cookie = zonelist_iter_begin();
5018 
5019 	/*
5020 	 * The fast path uses conservative alloc_flags to succeed only until
5021 	 * kswapd needs to be woken up, and to avoid the cost of setting up
5022 	 * alloc_flags precisely. So we do that now.
5023 	 */
5024 	alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
5025 
5026 	/*
5027 	 * We need to recalculate the starting point for the zonelist iterator
5028 	 * because we might have used different nodemask in the fast path, or
5029 	 * there was a cpuset modification and we are retrying - otherwise we
5030 	 * could end up iterating over non-eligible zones endlessly.
5031 	 */
5032 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5033 					ac->highest_zoneidx, ac->nodemask);
5034 	if (!ac->preferred_zoneref->zone)
5035 		goto nopage;
5036 
5037 	/*
5038 	 * Check for insane configurations where the cpuset doesn't contain
5039 	 * any suitable zone to satisfy the request - e.g. non-movable
5040 	 * GFP_HIGHUSER allocations from MOVABLE nodes only.
5041 	 */
5042 	if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
5043 		struct zoneref *z = first_zones_zonelist(ac->zonelist,
5044 					ac->highest_zoneidx,
5045 					&cpuset_current_mems_allowed);
5046 		if (!z->zone)
5047 			goto nopage;
5048 	}
5049 
5050 	if (alloc_flags & ALLOC_KSWAPD)
5051 		wake_all_kswapds(order, gfp_mask, ac);
5052 
5053 	/*
5054 	 * The adjusted alloc_flags might result in immediate success, so try
5055 	 * that first
5056 	 */
5057 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5058 	if (page)
5059 		goto got_pg;
5060 
5061 	/*
5062 	 * For costly allocations, try direct compaction first, as it's likely
5063 	 * that we have enough base pages and don't need to reclaim. For non-
5064 	 * movable high-order allocations, do that as well, as compaction will
5065 	 * try prevent permanent fragmentation by migrating from blocks of the
5066 	 * same migratetype.
5067 	 * Don't try this for allocations that are allowed to ignore
5068 	 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
5069 	 */
5070 	if (can_direct_reclaim &&
5071 			(costly_order ||
5072 			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
5073 			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
5074 		page = __alloc_pages_direct_compact(gfp_mask, order,
5075 						alloc_flags, ac,
5076 						INIT_COMPACT_PRIORITY,
5077 						&compact_result);
5078 		if (page)
5079 			goto got_pg;
5080 
5081 		/*
5082 		 * Checks for costly allocations with __GFP_NORETRY, which
5083 		 * includes some THP page fault allocations
5084 		 */
5085 		if (costly_order && (gfp_mask & __GFP_NORETRY)) {
5086 			/*
5087 			 * If allocating entire pageblock(s) and compaction
5088 			 * failed because all zones are below low watermarks
5089 			 * or is prohibited because it recently failed at this
5090 			 * order, fail immediately unless the allocator has
5091 			 * requested compaction and reclaim retry.
5092 			 *
5093 			 * Reclaim is
5094 			 *  - potentially very expensive because zones are far
5095 			 *    below their low watermarks or this is part of very
5096 			 *    bursty high order allocations,
5097 			 *  - not guaranteed to help because isolate_freepages()
5098 			 *    may not iterate over freed pages as part of its
5099 			 *    linear scan, and
5100 			 *  - unlikely to make entire pageblocks free on its
5101 			 *    own.
5102 			 */
5103 			if (compact_result == COMPACT_SKIPPED ||
5104 			    compact_result == COMPACT_DEFERRED)
5105 				goto nopage;
5106 
5107 			/*
5108 			 * Looks like reclaim/compaction is worth trying, but
5109 			 * sync compaction could be very expensive, so keep
5110 			 * using async compaction.
5111 			 */
5112 			compact_priority = INIT_COMPACT_PRIORITY;
5113 		}
5114 	}
5115 
5116 retry:
5117 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
5118 	if (alloc_flags & ALLOC_KSWAPD)
5119 		wake_all_kswapds(order, gfp_mask, ac);
5120 
5121 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
5122 	if (reserve_flags)
5123 		alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
5124 					  (alloc_flags & ALLOC_KSWAPD);
5125 
5126 	/*
5127 	 * Reset the nodemask and zonelist iterators if memory policies can be
5128 	 * ignored. These allocations are high priority and system rather than
5129 	 * user oriented.
5130 	 */
5131 	if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
5132 		ac->nodemask = NULL;
5133 		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5134 					ac->highest_zoneidx, ac->nodemask);
5135 	}
5136 
5137 	/* Attempt with potentially adjusted zonelist and alloc_flags */
5138 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5139 	if (page)
5140 		goto got_pg;
5141 
5142 	/* Caller is not willing to reclaim, we can't balance anything */
5143 	if (!can_direct_reclaim)
5144 		goto nopage;
5145 
5146 	/* Avoid recursion of direct reclaim */
5147 	if (current->flags & PF_MEMALLOC)
5148 		goto nopage;
5149 
5150 	/* Try direct reclaim and then allocating */
5151 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
5152 							&did_some_progress);
5153 	if (page)
5154 		goto got_pg;
5155 
5156 	/* Try direct compaction and then allocating */
5157 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
5158 					compact_priority, &compact_result);
5159 	if (page)
5160 		goto got_pg;
5161 
5162 	/* Do not loop if specifically requested */
5163 	if (gfp_mask & __GFP_NORETRY)
5164 		goto nopage;
5165 
5166 	/*
5167 	 * Do not retry costly high order allocations unless they are
5168 	 * __GFP_RETRY_MAYFAIL
5169 	 */
5170 	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
5171 		goto nopage;
5172 
5173 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
5174 				 did_some_progress > 0, &no_progress_loops))
5175 		goto retry;
5176 
5177 	/*
5178 	 * It doesn't make any sense to retry for the compaction if the order-0
5179 	 * reclaim is not able to make any progress because the current
5180 	 * implementation of the compaction depends on the sufficient amount
5181 	 * of free memory (see __compaction_suitable)
5182 	 */
5183 	if (did_some_progress > 0 &&
5184 			should_compact_retry(ac, order, alloc_flags,
5185 				compact_result, &compact_priority,
5186 				&compaction_retries))
5187 		goto retry;
5188 
5189 
5190 	/*
5191 	 * Deal with possible cpuset update races or zonelist updates to avoid
5192 	 * a unnecessary OOM kill.
5193 	 */
5194 	if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
5195 	    check_retry_zonelist(zonelist_iter_cookie))
5196 		goto restart;
5197 
5198 	/* Reclaim has failed us, start killing things */
5199 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
5200 	if (page)
5201 		goto got_pg;
5202 
5203 	/* Avoid allocations with no watermarks from looping endlessly */
5204 	if (tsk_is_oom_victim(current) &&
5205 	    (alloc_flags & ALLOC_OOM ||
5206 	     (gfp_mask & __GFP_NOMEMALLOC)))
5207 		goto nopage;
5208 
5209 	/* Retry as long as the OOM killer is making progress */
5210 	if (did_some_progress) {
5211 		no_progress_loops = 0;
5212 		goto retry;
5213 	}
5214 
5215 nopage:
5216 	/*
5217 	 * Deal with possible cpuset update races or zonelist updates to avoid
5218 	 * a unnecessary OOM kill.
5219 	 */
5220 	if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
5221 	    check_retry_zonelist(zonelist_iter_cookie))
5222 		goto restart;
5223 
5224 	/*
5225 	 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
5226 	 * we always retry
5227 	 */
5228 	if (gfp_mask & __GFP_NOFAIL) {
5229 		/*
5230 		 * All existing users of the __GFP_NOFAIL are blockable, so warn
5231 		 * of any new users that actually require GFP_NOWAIT
5232 		 */
5233 		if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
5234 			goto fail;
5235 
5236 		/*
5237 		 * PF_MEMALLOC request from this context is rather bizarre
5238 		 * because we cannot reclaim anything and only can loop waiting
5239 		 * for somebody to do a work for us
5240 		 */
5241 		WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
5242 
5243 		/*
5244 		 * non failing costly orders are a hard requirement which we
5245 		 * are not prepared for much so let's warn about these users
5246 		 * so that we can identify them and convert them to something
5247 		 * else.
5248 		 */
5249 		WARN_ON_ONCE_GFP(costly_order, gfp_mask);
5250 
5251 		/*
5252 		 * Help non-failing allocations by giving some access to memory
5253 		 * reserves normally used for high priority non-blocking
5254 		 * allocations but do not use ALLOC_NO_WATERMARKS because this
5255 		 * could deplete whole memory reserves which would just make
5256 		 * the situation worse.
5257 		 */
5258 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
5259 		if (page)
5260 			goto got_pg;
5261 
5262 		cond_resched();
5263 		goto retry;
5264 	}
5265 fail:
5266 	warn_alloc(gfp_mask, ac->nodemask,
5267 			"page allocation failure: order:%u", order);
5268 got_pg:
5269 	return page;
5270 }
5271 
5272 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
5273 		int preferred_nid, nodemask_t *nodemask,
5274 		struct alloc_context *ac, gfp_t *alloc_gfp,
5275 		unsigned int *alloc_flags)
5276 {
5277 	ac->highest_zoneidx = gfp_zone(gfp_mask);
5278 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
5279 	ac->nodemask = nodemask;
5280 	ac->migratetype = gfp_migratetype(gfp_mask);
5281 
5282 	if (cpusets_enabled()) {
5283 		*alloc_gfp |= __GFP_HARDWALL;
5284 		/*
5285 		 * When we are in the interrupt context, it is irrelevant
5286 		 * to the current task context. It means that any node ok.
5287 		 */
5288 		if (in_task() && !ac->nodemask)
5289 			ac->nodemask = &cpuset_current_mems_allowed;
5290 		else
5291 			*alloc_flags |= ALLOC_CPUSET;
5292 	}
5293 
5294 	might_alloc(gfp_mask);
5295 
5296 	if (should_fail_alloc_page(gfp_mask, order))
5297 		return false;
5298 
5299 	*alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5300 
5301 	/* Dirty zone balancing only done in the fast path */
5302 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5303 
5304 	/*
5305 	 * The preferred zone is used for statistics but crucially it is
5306 	 * also used as the starting point for the zonelist iterator. It
5307 	 * may get reset for allocations that ignore memory policies.
5308 	 */
5309 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5310 					ac->highest_zoneidx, ac->nodemask);
5311 
5312 	return true;
5313 }
5314 
5315 /*
5316  * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5317  * @gfp: GFP flags for the allocation
5318  * @preferred_nid: The preferred NUMA node ID to allocate from
5319  * @nodemask: Set of nodes to allocate from, may be NULL
5320  * @nr_pages: The number of pages desired on the list or array
5321  * @page_list: Optional list to store the allocated pages
5322  * @page_array: Optional array to store the pages
5323  *
5324  * This is a batched version of the page allocator that attempts to
5325  * allocate nr_pages quickly. Pages are added to page_list if page_list
5326  * is not NULL, otherwise it is assumed that the page_array is valid.
5327  *
5328  * For lists, nr_pages is the number of pages that should be allocated.
5329  *
5330  * For arrays, only NULL elements are populated with pages and nr_pages
5331  * is the maximum number of pages that will be stored in the array.
5332  *
5333  * Returns the number of pages on the list or array.
5334  */
5335 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5336 			nodemask_t *nodemask, int nr_pages,
5337 			struct list_head *page_list,
5338 			struct page **page_array)
5339 {
5340 	struct page *page;
5341 	unsigned long __maybe_unused UP_flags;
5342 	struct zone *zone;
5343 	struct zoneref *z;
5344 	struct per_cpu_pages *pcp;
5345 	struct list_head *pcp_list;
5346 	struct alloc_context ac;
5347 	gfp_t alloc_gfp;
5348 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
5349 	int nr_populated = 0, nr_account = 0;
5350 
5351 	/*
5352 	 * Skip populated array elements to determine if any pages need
5353 	 * to be allocated before disabling IRQs.
5354 	 */
5355 	while (page_array && nr_populated < nr_pages && page_array[nr_populated])
5356 		nr_populated++;
5357 
5358 	/* No pages requested? */
5359 	if (unlikely(nr_pages <= 0))
5360 		goto out;
5361 
5362 	/* Already populated array? */
5363 	if (unlikely(page_array && nr_pages - nr_populated == 0))
5364 		goto out;
5365 
5366 	/* Bulk allocator does not support memcg accounting. */
5367 	if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
5368 		goto failed;
5369 
5370 	/* Use the single page allocator for one page. */
5371 	if (nr_pages - nr_populated == 1)
5372 		goto failed;
5373 
5374 #ifdef CONFIG_PAGE_OWNER
5375 	/*
5376 	 * PAGE_OWNER may recurse into the allocator to allocate space to
5377 	 * save the stack with pagesets.lock held. Releasing/reacquiring
5378 	 * removes much of the performance benefit of bulk allocation so
5379 	 * force the caller to allocate one page at a time as it'll have
5380 	 * similar performance to added complexity to the bulk allocator.
5381 	 */
5382 	if (static_branch_unlikely(&page_owner_inited))
5383 		goto failed;
5384 #endif
5385 
5386 	/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5387 	gfp &= gfp_allowed_mask;
5388 	alloc_gfp = gfp;
5389 	if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5390 		goto out;
5391 	gfp = alloc_gfp;
5392 
5393 	/* Find an allowed local zone that meets the low watermark. */
5394 	for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5395 		unsigned long mark;
5396 
5397 		if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5398 		    !__cpuset_zone_allowed(zone, gfp)) {
5399 			continue;
5400 		}
5401 
5402 		if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5403 		    zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5404 			goto failed;
5405 		}
5406 
5407 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5408 		if (zone_watermark_fast(zone, 0,  mark,
5409 				zonelist_zone_idx(ac.preferred_zoneref),
5410 				alloc_flags, gfp)) {
5411 			break;
5412 		}
5413 	}
5414 
5415 	/*
5416 	 * If there are no allowed local zones that meets the watermarks then
5417 	 * try to allocate a single page and reclaim if necessary.
5418 	 */
5419 	if (unlikely(!zone))
5420 		goto failed;
5421 
5422 	/* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
5423 	pcp_trylock_prepare(UP_flags);
5424 	pcp = pcp_spin_trylock(zone->per_cpu_pageset);
5425 	if (!pcp)
5426 		goto failed_irq;
5427 
5428 	/* Attempt the batch allocation */
5429 	pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5430 	while (nr_populated < nr_pages) {
5431 
5432 		/* Skip existing pages */
5433 		if (page_array && page_array[nr_populated]) {
5434 			nr_populated++;
5435 			continue;
5436 		}
5437 
5438 		page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5439 								pcp, pcp_list);
5440 		if (unlikely(!page)) {
5441 			/* Try and allocate at least one page */
5442 			if (!nr_account) {
5443 				pcp_spin_unlock(pcp);
5444 				goto failed_irq;
5445 			}
5446 			break;
5447 		}
5448 		nr_account++;
5449 
5450 		prep_new_page(page, 0, gfp, 0);
5451 		if (page_list)
5452 			list_add(&page->lru, page_list);
5453 		else
5454 			page_array[nr_populated] = page;
5455 		nr_populated++;
5456 	}
5457 
5458 	pcp_spin_unlock(pcp);
5459 	pcp_trylock_finish(UP_flags);
5460 
5461 	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5462 	zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
5463 
5464 out:
5465 	return nr_populated;
5466 
5467 failed_irq:
5468 	pcp_trylock_finish(UP_flags);
5469 
5470 failed:
5471 	page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5472 	if (page) {
5473 		if (page_list)
5474 			list_add(&page->lru, page_list);
5475 		else
5476 			page_array[nr_populated] = page;
5477 		nr_populated++;
5478 	}
5479 
5480 	goto out;
5481 }
5482 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5483 
5484 /*
5485  * This is the 'heart' of the zoned buddy allocator.
5486  */
5487 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
5488 							nodemask_t *nodemask)
5489 {
5490 	struct page *page;
5491 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
5492 	gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5493 	struct alloc_context ac = { };
5494 
5495 	/*
5496 	 * There are several places where we assume that the order value is sane
5497 	 * so bail out early if the request is out of bound.
5498 	 */
5499 	if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
5500 		return NULL;
5501 
5502 	gfp &= gfp_allowed_mask;
5503 	/*
5504 	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5505 	 * resp. GFP_NOIO which has to be inherited for all allocation requests
5506 	 * from a particular context which has been marked by
5507 	 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5508 	 * movable zones are not used during allocation.
5509 	 */
5510 	gfp = current_gfp_context(gfp);
5511 	alloc_gfp = gfp;
5512 	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5513 			&alloc_gfp, &alloc_flags))
5514 		return NULL;
5515 
5516 	/*
5517 	 * Forbid the first pass from falling back to types that fragment
5518 	 * memory until all local zones are considered.
5519 	 */
5520 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
5521 
5522 	/* First allocation attempt */
5523 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5524 	if (likely(page))
5525 		goto out;
5526 
5527 	alloc_gfp = gfp;
5528 	ac.spread_dirty_pages = false;
5529 
5530 	/*
5531 	 * Restore the original nodemask if it was potentially replaced with
5532 	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5533 	 */
5534 	ac.nodemask = nodemask;
5535 
5536 	page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5537 
5538 out:
5539 	if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
5540 	    unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5541 		__free_pages(page, order);
5542 		page = NULL;
5543 	}
5544 
5545 	trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5546 	kmsan_alloc_page(page, order, alloc_gfp);
5547 
5548 	return page;
5549 }
5550 EXPORT_SYMBOL(__alloc_pages);
5551 
5552 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
5553 		nodemask_t *nodemask)
5554 {
5555 	struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
5556 			preferred_nid, nodemask);
5557 
5558 	if (page && order > 1)
5559 		prep_transhuge_page(page);
5560 	return (struct folio *)page;
5561 }
5562 EXPORT_SYMBOL(__folio_alloc);
5563 
5564 /*
5565  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5566  * address cannot represent highmem pages. Use alloc_pages and then kmap if
5567  * you need to access high mem.
5568  */
5569 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5570 {
5571 	struct page *page;
5572 
5573 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5574 	if (!page)
5575 		return 0;
5576 	return (unsigned long) page_address(page);
5577 }
5578 EXPORT_SYMBOL(__get_free_pages);
5579 
5580 unsigned long get_zeroed_page(gfp_t gfp_mask)
5581 {
5582 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5583 }
5584 EXPORT_SYMBOL(get_zeroed_page);
5585 
5586 /**
5587  * __free_pages - Free pages allocated with alloc_pages().
5588  * @page: The page pointer returned from alloc_pages().
5589  * @order: The order of the allocation.
5590  *
5591  * This function can free multi-page allocations that are not compound
5592  * pages.  It does not check that the @order passed in matches that of
5593  * the allocation, so it is easy to leak memory.  Freeing more memory
5594  * than was allocated will probably emit a warning.
5595  *
5596  * If the last reference to this page is speculative, it will be released
5597  * by put_page() which only frees the first page of a non-compound
5598  * allocation.  To prevent the remaining pages from being leaked, we free
5599  * the subsequent pages here.  If you want to use the page's reference
5600  * count to decide when to free the allocation, you should allocate a
5601  * compound page, and use put_page() instead of __free_pages().
5602  *
5603  * Context: May be called in interrupt context or while holding a normal
5604  * spinlock, but not in NMI context or while holding a raw spinlock.
5605  */
5606 void __free_pages(struct page *page, unsigned int order)
5607 {
5608 	/* get PageHead before we drop reference */
5609 	int head = PageHead(page);
5610 
5611 	if (put_page_testzero(page))
5612 		free_the_page(page, order);
5613 	else if (!head)
5614 		while (order-- > 0)
5615 			free_the_page(page + (1 << order), order);
5616 }
5617 EXPORT_SYMBOL(__free_pages);
5618 
5619 void free_pages(unsigned long addr, unsigned int order)
5620 {
5621 	if (addr != 0) {
5622 		VM_BUG_ON(!virt_addr_valid((void *)addr));
5623 		__free_pages(virt_to_page((void *)addr), order);
5624 	}
5625 }
5626 
5627 EXPORT_SYMBOL(free_pages);
5628 
5629 /*
5630  * Page Fragment:
5631  *  An arbitrary-length arbitrary-offset area of memory which resides
5632  *  within a 0 or higher order page.  Multiple fragments within that page
5633  *  are individually refcounted, in the page's reference counter.
5634  *
5635  * The page_frag functions below provide a simple allocation framework for
5636  * page fragments.  This is used by the network stack and network device
5637  * drivers to provide a backing region of memory for use as either an
5638  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5639  */
5640 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5641 					     gfp_t gfp_mask)
5642 {
5643 	struct page *page = NULL;
5644 	gfp_t gfp = gfp_mask;
5645 
5646 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5647 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5648 		    __GFP_NOMEMALLOC;
5649 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5650 				PAGE_FRAG_CACHE_MAX_ORDER);
5651 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5652 #endif
5653 	if (unlikely(!page))
5654 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5655 
5656 	nc->va = page ? page_address(page) : NULL;
5657 
5658 	return page;
5659 }
5660 
5661 void __page_frag_cache_drain(struct page *page, unsigned int count)
5662 {
5663 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5664 
5665 	if (page_ref_sub_and_test(page, count))
5666 		free_the_page(page, compound_order(page));
5667 }
5668 EXPORT_SYMBOL(__page_frag_cache_drain);
5669 
5670 void *page_frag_alloc_align(struct page_frag_cache *nc,
5671 		      unsigned int fragsz, gfp_t gfp_mask,
5672 		      unsigned int align_mask)
5673 {
5674 	unsigned int size = PAGE_SIZE;
5675 	struct page *page;
5676 	int offset;
5677 
5678 	if (unlikely(!nc->va)) {
5679 refill:
5680 		page = __page_frag_cache_refill(nc, gfp_mask);
5681 		if (!page)
5682 			return NULL;
5683 
5684 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5685 		/* if size can vary use size else just use PAGE_SIZE */
5686 		size = nc->size;
5687 #endif
5688 		/* Even if we own the page, we do not use atomic_set().
5689 		 * This would break get_page_unless_zero() users.
5690 		 */
5691 		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5692 
5693 		/* reset page count bias and offset to start of new frag */
5694 		nc->pfmemalloc = page_is_pfmemalloc(page);
5695 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5696 		nc->offset = size;
5697 	}
5698 
5699 	offset = nc->offset - fragsz;
5700 	if (unlikely(offset < 0)) {
5701 		page = virt_to_page(nc->va);
5702 
5703 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5704 			goto refill;
5705 
5706 		if (unlikely(nc->pfmemalloc)) {
5707 			free_the_page(page, compound_order(page));
5708 			goto refill;
5709 		}
5710 
5711 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5712 		/* if size can vary use size else just use PAGE_SIZE */
5713 		size = nc->size;
5714 #endif
5715 		/* OK, page count is 0, we can safely set it */
5716 		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5717 
5718 		/* reset page count bias and offset to start of new frag */
5719 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5720 		offset = size - fragsz;
5721 		if (unlikely(offset < 0)) {
5722 			/*
5723 			 * The caller is trying to allocate a fragment
5724 			 * with fragsz > PAGE_SIZE but the cache isn't big
5725 			 * enough to satisfy the request, this may
5726 			 * happen in low memory conditions.
5727 			 * We don't release the cache page because
5728 			 * it could make memory pressure worse
5729 			 * so we simply return NULL here.
5730 			 */
5731 			return NULL;
5732 		}
5733 	}
5734 
5735 	nc->pagecnt_bias--;
5736 	offset &= align_mask;
5737 	nc->offset = offset;
5738 
5739 	return nc->va + offset;
5740 }
5741 EXPORT_SYMBOL(page_frag_alloc_align);
5742 
5743 /*
5744  * Frees a page fragment allocated out of either a compound or order 0 page.
5745  */
5746 void page_frag_free(void *addr)
5747 {
5748 	struct page *page = virt_to_head_page(addr);
5749 
5750 	if (unlikely(put_page_testzero(page)))
5751 		free_the_page(page, compound_order(page));
5752 }
5753 EXPORT_SYMBOL(page_frag_free);
5754 
5755 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5756 		size_t size)
5757 {
5758 	if (addr) {
5759 		unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
5760 		struct page *page = virt_to_page((void *)addr);
5761 		struct page *last = page + nr;
5762 
5763 		split_page_owner(page, 1 << order);
5764 		split_page_memcg(page, 1 << order);
5765 		while (page < --last)
5766 			set_page_refcounted(last);
5767 
5768 		last = page + (1UL << order);
5769 		for (page += nr; page < last; page++)
5770 			__free_pages_ok(page, 0, FPI_TO_TAIL);
5771 	}
5772 	return (void *)addr;
5773 }
5774 
5775 /**
5776  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5777  * @size: the number of bytes to allocate
5778  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5779  *
5780  * This function is similar to alloc_pages(), except that it allocates the
5781  * minimum number of pages to satisfy the request.  alloc_pages() can only
5782  * allocate memory in power-of-two pages.
5783  *
5784  * This function is also limited by MAX_ORDER.
5785  *
5786  * Memory allocated by this function must be released by free_pages_exact().
5787  *
5788  * Return: pointer to the allocated area or %NULL in case of error.
5789  */
5790 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5791 {
5792 	unsigned int order = get_order(size);
5793 	unsigned long addr;
5794 
5795 	if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5796 		gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5797 
5798 	addr = __get_free_pages(gfp_mask, order);
5799 	return make_alloc_exact(addr, order, size);
5800 }
5801 EXPORT_SYMBOL(alloc_pages_exact);
5802 
5803 /**
5804  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5805  *			   pages on a node.
5806  * @nid: the preferred node ID where memory should be allocated
5807  * @size: the number of bytes to allocate
5808  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5809  *
5810  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5811  * back.
5812  *
5813  * Return: pointer to the allocated area or %NULL in case of error.
5814  */
5815 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5816 {
5817 	unsigned int order = get_order(size);
5818 	struct page *p;
5819 
5820 	if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5821 		gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5822 
5823 	p = alloc_pages_node(nid, gfp_mask, order);
5824 	if (!p)
5825 		return NULL;
5826 	return make_alloc_exact((unsigned long)page_address(p), order, size);
5827 }
5828 
5829 /**
5830  * free_pages_exact - release memory allocated via alloc_pages_exact()
5831  * @virt: the value returned by alloc_pages_exact.
5832  * @size: size of allocation, same value as passed to alloc_pages_exact().
5833  *
5834  * Release the memory allocated by a previous call to alloc_pages_exact.
5835  */
5836 void free_pages_exact(void *virt, size_t size)
5837 {
5838 	unsigned long addr = (unsigned long)virt;
5839 	unsigned long end = addr + PAGE_ALIGN(size);
5840 
5841 	while (addr < end) {
5842 		free_page(addr);
5843 		addr += PAGE_SIZE;
5844 	}
5845 }
5846 EXPORT_SYMBOL(free_pages_exact);
5847 
5848 /**
5849  * nr_free_zone_pages - count number of pages beyond high watermark
5850  * @offset: The zone index of the highest zone
5851  *
5852  * nr_free_zone_pages() counts the number of pages which are beyond the
5853  * high watermark within all zones at or below a given zone index.  For each
5854  * zone, the number of pages is calculated as:
5855  *
5856  *     nr_free_zone_pages = managed_pages - high_pages
5857  *
5858  * Return: number of pages beyond high watermark.
5859  */
5860 static unsigned long nr_free_zone_pages(int offset)
5861 {
5862 	struct zoneref *z;
5863 	struct zone *zone;
5864 
5865 	/* Just pick one node, since fallback list is circular */
5866 	unsigned long sum = 0;
5867 
5868 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5869 
5870 	for_each_zone_zonelist(zone, z, zonelist, offset) {
5871 		unsigned long size = zone_managed_pages(zone);
5872 		unsigned long high = high_wmark_pages(zone);
5873 		if (size > high)
5874 			sum += size - high;
5875 	}
5876 
5877 	return sum;
5878 }
5879 
5880 /**
5881  * nr_free_buffer_pages - count number of pages beyond high watermark
5882  *
5883  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5884  * watermark within ZONE_DMA and ZONE_NORMAL.
5885  *
5886  * Return: number of pages beyond high watermark within ZONE_DMA and
5887  * ZONE_NORMAL.
5888  */
5889 unsigned long nr_free_buffer_pages(void)
5890 {
5891 	return nr_free_zone_pages(gfp_zone(GFP_USER));
5892 }
5893 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5894 
5895 static inline void show_node(struct zone *zone)
5896 {
5897 	if (IS_ENABLED(CONFIG_NUMA))
5898 		printk("Node %d ", zone_to_nid(zone));
5899 }
5900 
5901 long si_mem_available(void)
5902 {
5903 	long available;
5904 	unsigned long pagecache;
5905 	unsigned long wmark_low = 0;
5906 	unsigned long pages[NR_LRU_LISTS];
5907 	unsigned long reclaimable;
5908 	struct zone *zone;
5909 	int lru;
5910 
5911 	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5912 		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5913 
5914 	for_each_zone(zone)
5915 		wmark_low += low_wmark_pages(zone);
5916 
5917 	/*
5918 	 * Estimate the amount of memory available for userspace allocations,
5919 	 * without causing swapping or OOM.
5920 	 */
5921 	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5922 
5923 	/*
5924 	 * Not all the page cache can be freed, otherwise the system will
5925 	 * start swapping or thrashing. Assume at least half of the page
5926 	 * cache, or the low watermark worth of cache, needs to stay.
5927 	 */
5928 	pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5929 	pagecache -= min(pagecache / 2, wmark_low);
5930 	available += pagecache;
5931 
5932 	/*
5933 	 * Part of the reclaimable slab and other kernel memory consists of
5934 	 * items that are in use, and cannot be freed. Cap this estimate at the
5935 	 * low watermark.
5936 	 */
5937 	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5938 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5939 	available += reclaimable - min(reclaimable / 2, wmark_low);
5940 
5941 	if (available < 0)
5942 		available = 0;
5943 	return available;
5944 }
5945 EXPORT_SYMBOL_GPL(si_mem_available);
5946 
5947 void si_meminfo(struct sysinfo *val)
5948 {
5949 	val->totalram = totalram_pages();
5950 	val->sharedram = global_node_page_state(NR_SHMEM);
5951 	val->freeram = global_zone_page_state(NR_FREE_PAGES);
5952 	val->bufferram = nr_blockdev_pages();
5953 	val->totalhigh = totalhigh_pages();
5954 	val->freehigh = nr_free_highpages();
5955 	val->mem_unit = PAGE_SIZE;
5956 }
5957 
5958 EXPORT_SYMBOL(si_meminfo);
5959 
5960 #ifdef CONFIG_NUMA
5961 void si_meminfo_node(struct sysinfo *val, int nid)
5962 {
5963 	int zone_type;		/* needs to be signed */
5964 	unsigned long managed_pages = 0;
5965 	unsigned long managed_highpages = 0;
5966 	unsigned long free_highpages = 0;
5967 	pg_data_t *pgdat = NODE_DATA(nid);
5968 
5969 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5970 		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5971 	val->totalram = managed_pages;
5972 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
5973 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5974 #ifdef CONFIG_HIGHMEM
5975 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5976 		struct zone *zone = &pgdat->node_zones[zone_type];
5977 
5978 		if (is_highmem(zone)) {
5979 			managed_highpages += zone_managed_pages(zone);
5980 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5981 		}
5982 	}
5983 	val->totalhigh = managed_highpages;
5984 	val->freehigh = free_highpages;
5985 #else
5986 	val->totalhigh = managed_highpages;
5987 	val->freehigh = free_highpages;
5988 #endif
5989 	val->mem_unit = PAGE_SIZE;
5990 }
5991 #endif
5992 
5993 /*
5994  * Determine whether the node should be displayed or not, depending on whether
5995  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5996  */
5997 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5998 {
5999 	if (!(flags & SHOW_MEM_FILTER_NODES))
6000 		return false;
6001 
6002 	/*
6003 	 * no node mask - aka implicit memory numa policy. Do not bother with
6004 	 * the synchronization - read_mems_allowed_begin - because we do not
6005 	 * have to be precise here.
6006 	 */
6007 	if (!nodemask)
6008 		nodemask = &cpuset_current_mems_allowed;
6009 
6010 	return !node_isset(nid, *nodemask);
6011 }
6012 
6013 #define K(x) ((x) << (PAGE_SHIFT-10))
6014 
6015 static void show_migration_types(unsigned char type)
6016 {
6017 	static const char types[MIGRATE_TYPES] = {
6018 		[MIGRATE_UNMOVABLE]	= 'U',
6019 		[MIGRATE_MOVABLE]	= 'M',
6020 		[MIGRATE_RECLAIMABLE]	= 'E',
6021 		[MIGRATE_HIGHATOMIC]	= 'H',
6022 #ifdef CONFIG_CMA
6023 		[MIGRATE_CMA]		= 'C',
6024 #endif
6025 #ifdef CONFIG_MEMORY_ISOLATION
6026 		[MIGRATE_ISOLATE]	= 'I',
6027 #endif
6028 	};
6029 	char tmp[MIGRATE_TYPES + 1];
6030 	char *p = tmp;
6031 	int i;
6032 
6033 	for (i = 0; i < MIGRATE_TYPES; i++) {
6034 		if (type & (1 << i))
6035 			*p++ = types[i];
6036 	}
6037 
6038 	*p = '\0';
6039 	printk(KERN_CONT "(%s) ", tmp);
6040 }
6041 
6042 static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
6043 {
6044 	int zone_idx;
6045 	for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
6046 		if (zone_managed_pages(pgdat->node_zones + zone_idx))
6047 			return true;
6048 	return false;
6049 }
6050 
6051 /*
6052  * Show free area list (used inside shift_scroll-lock stuff)
6053  * We also calculate the percentage fragmentation. We do this by counting the
6054  * memory on each free list with the exception of the first item on the list.
6055  *
6056  * Bits in @filter:
6057  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
6058  *   cpuset.
6059  */
6060 void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
6061 {
6062 	unsigned long free_pcp = 0;
6063 	int cpu, nid;
6064 	struct zone *zone;
6065 	pg_data_t *pgdat;
6066 
6067 	for_each_populated_zone(zone) {
6068 		if (zone_idx(zone) > max_zone_idx)
6069 			continue;
6070 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6071 			continue;
6072 
6073 		for_each_online_cpu(cpu)
6074 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
6075 	}
6076 
6077 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
6078 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
6079 		" unevictable:%lu dirty:%lu writeback:%lu\n"
6080 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
6081 		" mapped:%lu shmem:%lu pagetables:%lu\n"
6082 		" sec_pagetables:%lu bounce:%lu\n"
6083 		" kernel_misc_reclaimable:%lu\n"
6084 		" free:%lu free_pcp:%lu free_cma:%lu\n",
6085 		global_node_page_state(NR_ACTIVE_ANON),
6086 		global_node_page_state(NR_INACTIVE_ANON),
6087 		global_node_page_state(NR_ISOLATED_ANON),
6088 		global_node_page_state(NR_ACTIVE_FILE),
6089 		global_node_page_state(NR_INACTIVE_FILE),
6090 		global_node_page_state(NR_ISOLATED_FILE),
6091 		global_node_page_state(NR_UNEVICTABLE),
6092 		global_node_page_state(NR_FILE_DIRTY),
6093 		global_node_page_state(NR_WRITEBACK),
6094 		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
6095 		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
6096 		global_node_page_state(NR_FILE_MAPPED),
6097 		global_node_page_state(NR_SHMEM),
6098 		global_node_page_state(NR_PAGETABLE),
6099 		global_node_page_state(NR_SECONDARY_PAGETABLE),
6100 		global_zone_page_state(NR_BOUNCE),
6101 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
6102 		global_zone_page_state(NR_FREE_PAGES),
6103 		free_pcp,
6104 		global_zone_page_state(NR_FREE_CMA_PAGES));
6105 
6106 	for_each_online_pgdat(pgdat) {
6107 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
6108 			continue;
6109 		if (!node_has_managed_zones(pgdat, max_zone_idx))
6110 			continue;
6111 
6112 		printk("Node %d"
6113 			" active_anon:%lukB"
6114 			" inactive_anon:%lukB"
6115 			" active_file:%lukB"
6116 			" inactive_file:%lukB"
6117 			" unevictable:%lukB"
6118 			" isolated(anon):%lukB"
6119 			" isolated(file):%lukB"
6120 			" mapped:%lukB"
6121 			" dirty:%lukB"
6122 			" writeback:%lukB"
6123 			" shmem:%lukB"
6124 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6125 			" shmem_thp: %lukB"
6126 			" shmem_pmdmapped: %lukB"
6127 			" anon_thp: %lukB"
6128 #endif
6129 			" writeback_tmp:%lukB"
6130 			" kernel_stack:%lukB"
6131 #ifdef CONFIG_SHADOW_CALL_STACK
6132 			" shadow_call_stack:%lukB"
6133 #endif
6134 			" pagetables:%lukB"
6135 			" sec_pagetables:%lukB"
6136 			" all_unreclaimable? %s"
6137 			"\n",
6138 			pgdat->node_id,
6139 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
6140 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
6141 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
6142 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
6143 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
6144 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
6145 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
6146 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
6147 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
6148 			K(node_page_state(pgdat, NR_WRITEBACK)),
6149 			K(node_page_state(pgdat, NR_SHMEM)),
6150 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6151 			K(node_page_state(pgdat, NR_SHMEM_THPS)),
6152 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
6153 			K(node_page_state(pgdat, NR_ANON_THPS)),
6154 #endif
6155 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
6156 			node_page_state(pgdat, NR_KERNEL_STACK_KB),
6157 #ifdef CONFIG_SHADOW_CALL_STACK
6158 			node_page_state(pgdat, NR_KERNEL_SCS_KB),
6159 #endif
6160 			K(node_page_state(pgdat, NR_PAGETABLE)),
6161 			K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
6162 			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
6163 				"yes" : "no");
6164 	}
6165 
6166 	for_each_populated_zone(zone) {
6167 		int i;
6168 
6169 		if (zone_idx(zone) > max_zone_idx)
6170 			continue;
6171 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6172 			continue;
6173 
6174 		free_pcp = 0;
6175 		for_each_online_cpu(cpu)
6176 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
6177 
6178 		show_node(zone);
6179 		printk(KERN_CONT
6180 			"%s"
6181 			" free:%lukB"
6182 			" boost:%lukB"
6183 			" min:%lukB"
6184 			" low:%lukB"
6185 			" high:%lukB"
6186 			" reserved_highatomic:%luKB"
6187 			" active_anon:%lukB"
6188 			" inactive_anon:%lukB"
6189 			" active_file:%lukB"
6190 			" inactive_file:%lukB"
6191 			" unevictable:%lukB"
6192 			" writepending:%lukB"
6193 			" present:%lukB"
6194 			" managed:%lukB"
6195 			" mlocked:%lukB"
6196 			" bounce:%lukB"
6197 			" free_pcp:%lukB"
6198 			" local_pcp:%ukB"
6199 			" free_cma:%lukB"
6200 			"\n",
6201 			zone->name,
6202 			K(zone_page_state(zone, NR_FREE_PAGES)),
6203 			K(zone->watermark_boost),
6204 			K(min_wmark_pages(zone)),
6205 			K(low_wmark_pages(zone)),
6206 			K(high_wmark_pages(zone)),
6207 			K(zone->nr_reserved_highatomic),
6208 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
6209 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
6210 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
6211 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
6212 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
6213 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
6214 			K(zone->present_pages),
6215 			K(zone_managed_pages(zone)),
6216 			K(zone_page_state(zone, NR_MLOCK)),
6217 			K(zone_page_state(zone, NR_BOUNCE)),
6218 			K(free_pcp),
6219 			K(this_cpu_read(zone->per_cpu_pageset->count)),
6220 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
6221 		printk("lowmem_reserve[]:");
6222 		for (i = 0; i < MAX_NR_ZONES; i++)
6223 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
6224 		printk(KERN_CONT "\n");
6225 	}
6226 
6227 	for_each_populated_zone(zone) {
6228 		unsigned int order;
6229 		unsigned long nr[MAX_ORDER], flags, total = 0;
6230 		unsigned char types[MAX_ORDER];
6231 
6232 		if (zone_idx(zone) > max_zone_idx)
6233 			continue;
6234 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6235 			continue;
6236 		show_node(zone);
6237 		printk(KERN_CONT "%s: ", zone->name);
6238 
6239 		spin_lock_irqsave(&zone->lock, flags);
6240 		for (order = 0; order < MAX_ORDER; order++) {
6241 			struct free_area *area = &zone->free_area[order];
6242 			int type;
6243 
6244 			nr[order] = area->nr_free;
6245 			total += nr[order] << order;
6246 
6247 			types[order] = 0;
6248 			for (type = 0; type < MIGRATE_TYPES; type++) {
6249 				if (!free_area_empty(area, type))
6250 					types[order] |= 1 << type;
6251 			}
6252 		}
6253 		spin_unlock_irqrestore(&zone->lock, flags);
6254 		for (order = 0; order < MAX_ORDER; order++) {
6255 			printk(KERN_CONT "%lu*%lukB ",
6256 			       nr[order], K(1UL) << order);
6257 			if (nr[order])
6258 				show_migration_types(types[order]);
6259 		}
6260 		printk(KERN_CONT "= %lukB\n", K(total));
6261 	}
6262 
6263 	for_each_online_node(nid) {
6264 		if (show_mem_node_skip(filter, nid, nodemask))
6265 			continue;
6266 		hugetlb_show_meminfo_node(nid);
6267 	}
6268 
6269 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
6270 
6271 	show_swap_cache_info();
6272 }
6273 
6274 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
6275 {
6276 	zoneref->zone = zone;
6277 	zoneref->zone_idx = zone_idx(zone);
6278 }
6279 
6280 /*
6281  * Builds allocation fallback zone lists.
6282  *
6283  * Add all populated zones of a node to the zonelist.
6284  */
6285 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
6286 {
6287 	struct zone *zone;
6288 	enum zone_type zone_type = MAX_NR_ZONES;
6289 	int nr_zones = 0;
6290 
6291 	do {
6292 		zone_type--;
6293 		zone = pgdat->node_zones + zone_type;
6294 		if (populated_zone(zone)) {
6295 			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
6296 			check_highest_zone(zone_type);
6297 		}
6298 	} while (zone_type);
6299 
6300 	return nr_zones;
6301 }
6302 
6303 #ifdef CONFIG_NUMA
6304 
6305 static int __parse_numa_zonelist_order(char *s)
6306 {
6307 	/*
6308 	 * We used to support different zonelists modes but they turned
6309 	 * out to be just not useful. Let's keep the warning in place
6310 	 * if somebody still use the cmd line parameter so that we do
6311 	 * not fail it silently
6312 	 */
6313 	if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
6314 		pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
6315 		return -EINVAL;
6316 	}
6317 	return 0;
6318 }
6319 
6320 char numa_zonelist_order[] = "Node";
6321 
6322 /*
6323  * sysctl handler for numa_zonelist_order
6324  */
6325 int numa_zonelist_order_handler(struct ctl_table *table, int write,
6326 		void *buffer, size_t *length, loff_t *ppos)
6327 {
6328 	if (write)
6329 		return __parse_numa_zonelist_order(buffer);
6330 	return proc_dostring(table, write, buffer, length, ppos);
6331 }
6332 
6333 
6334 static int node_load[MAX_NUMNODES];
6335 
6336 /**
6337  * find_next_best_node - find the next node that should appear in a given node's fallback list
6338  * @node: node whose fallback list we're appending
6339  * @used_node_mask: nodemask_t of already used nodes
6340  *
6341  * We use a number of factors to determine which is the next node that should
6342  * appear on a given node's fallback list.  The node should not have appeared
6343  * already in @node's fallback list, and it should be the next closest node
6344  * according to the distance array (which contains arbitrary distance values
6345  * from each node to each node in the system), and should also prefer nodes
6346  * with no CPUs, since presumably they'll have very little allocation pressure
6347  * on them otherwise.
6348  *
6349  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
6350  */
6351 int find_next_best_node(int node, nodemask_t *used_node_mask)
6352 {
6353 	int n, val;
6354 	int min_val = INT_MAX;
6355 	int best_node = NUMA_NO_NODE;
6356 
6357 	/* Use the local node if we haven't already */
6358 	if (!node_isset(node, *used_node_mask)) {
6359 		node_set(node, *used_node_mask);
6360 		return node;
6361 	}
6362 
6363 	for_each_node_state(n, N_MEMORY) {
6364 
6365 		/* Don't want a node to appear more than once */
6366 		if (node_isset(n, *used_node_mask))
6367 			continue;
6368 
6369 		/* Use the distance array to find the distance */
6370 		val = node_distance(node, n);
6371 
6372 		/* Penalize nodes under us ("prefer the next node") */
6373 		val += (n < node);
6374 
6375 		/* Give preference to headless and unused nodes */
6376 		if (!cpumask_empty(cpumask_of_node(n)))
6377 			val += PENALTY_FOR_NODE_WITH_CPUS;
6378 
6379 		/* Slight preference for less loaded node */
6380 		val *= MAX_NUMNODES;
6381 		val += node_load[n];
6382 
6383 		if (val < min_val) {
6384 			min_val = val;
6385 			best_node = n;
6386 		}
6387 	}
6388 
6389 	if (best_node >= 0)
6390 		node_set(best_node, *used_node_mask);
6391 
6392 	return best_node;
6393 }
6394 
6395 
6396 /*
6397  * Build zonelists ordered by node and zones within node.
6398  * This results in maximum locality--normal zone overflows into local
6399  * DMA zone, if any--but risks exhausting DMA zone.
6400  */
6401 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
6402 		unsigned nr_nodes)
6403 {
6404 	struct zoneref *zonerefs;
6405 	int i;
6406 
6407 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6408 
6409 	for (i = 0; i < nr_nodes; i++) {
6410 		int nr_zones;
6411 
6412 		pg_data_t *node = NODE_DATA(node_order[i]);
6413 
6414 		nr_zones = build_zonerefs_node(node, zonerefs);
6415 		zonerefs += nr_zones;
6416 	}
6417 	zonerefs->zone = NULL;
6418 	zonerefs->zone_idx = 0;
6419 }
6420 
6421 /*
6422  * Build gfp_thisnode zonelists
6423  */
6424 static void build_thisnode_zonelists(pg_data_t *pgdat)
6425 {
6426 	struct zoneref *zonerefs;
6427 	int nr_zones;
6428 
6429 	zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
6430 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
6431 	zonerefs += nr_zones;
6432 	zonerefs->zone = NULL;
6433 	zonerefs->zone_idx = 0;
6434 }
6435 
6436 /*
6437  * Build zonelists ordered by zone and nodes within zones.
6438  * This results in conserving DMA zone[s] until all Normal memory is
6439  * exhausted, but results in overflowing to remote node while memory
6440  * may still exist in local DMA zone.
6441  */
6442 
6443 static void build_zonelists(pg_data_t *pgdat)
6444 {
6445 	static int node_order[MAX_NUMNODES];
6446 	int node, nr_nodes = 0;
6447 	nodemask_t used_mask = NODE_MASK_NONE;
6448 	int local_node, prev_node;
6449 
6450 	/* NUMA-aware ordering of nodes */
6451 	local_node = pgdat->node_id;
6452 	prev_node = local_node;
6453 
6454 	memset(node_order, 0, sizeof(node_order));
6455 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
6456 		/*
6457 		 * We don't want to pressure a particular node.
6458 		 * So adding penalty to the first node in same
6459 		 * distance group to make it round-robin.
6460 		 */
6461 		if (node_distance(local_node, node) !=
6462 		    node_distance(local_node, prev_node))
6463 			node_load[node] += 1;
6464 
6465 		node_order[nr_nodes++] = node;
6466 		prev_node = node;
6467 	}
6468 
6469 	build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
6470 	build_thisnode_zonelists(pgdat);
6471 	pr_info("Fallback order for Node %d: ", local_node);
6472 	for (node = 0; node < nr_nodes; node++)
6473 		pr_cont("%d ", node_order[node]);
6474 	pr_cont("\n");
6475 }
6476 
6477 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6478 /*
6479  * Return node id of node used for "local" allocations.
6480  * I.e., first node id of first zone in arg node's generic zonelist.
6481  * Used for initializing percpu 'numa_mem', which is used primarily
6482  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6483  */
6484 int local_memory_node(int node)
6485 {
6486 	struct zoneref *z;
6487 
6488 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
6489 				   gfp_zone(GFP_KERNEL),
6490 				   NULL);
6491 	return zone_to_nid(z->zone);
6492 }
6493 #endif
6494 
6495 static void setup_min_unmapped_ratio(void);
6496 static void setup_min_slab_ratio(void);
6497 #else	/* CONFIG_NUMA */
6498 
6499 static void build_zonelists(pg_data_t *pgdat)
6500 {
6501 	int node, local_node;
6502 	struct zoneref *zonerefs;
6503 	int nr_zones;
6504 
6505 	local_node = pgdat->node_id;
6506 
6507 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6508 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
6509 	zonerefs += nr_zones;
6510 
6511 	/*
6512 	 * Now we build the zonelist so that it contains the zones
6513 	 * of all the other nodes.
6514 	 * We don't want to pressure a particular node, so when
6515 	 * building the zones for node N, we make sure that the
6516 	 * zones coming right after the local ones are those from
6517 	 * node N+1 (modulo N)
6518 	 */
6519 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6520 		if (!node_online(node))
6521 			continue;
6522 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6523 		zonerefs += nr_zones;
6524 	}
6525 	for (node = 0; node < local_node; node++) {
6526 		if (!node_online(node))
6527 			continue;
6528 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6529 		zonerefs += nr_zones;
6530 	}
6531 
6532 	zonerefs->zone = NULL;
6533 	zonerefs->zone_idx = 0;
6534 }
6535 
6536 #endif	/* CONFIG_NUMA */
6537 
6538 /*
6539  * Boot pageset table. One per cpu which is going to be used for all
6540  * zones and all nodes. The parameters will be set in such a way
6541  * that an item put on a list will immediately be handed over to
6542  * the buddy list. This is safe since pageset manipulation is done
6543  * with interrupts disabled.
6544  *
6545  * The boot_pagesets must be kept even after bootup is complete for
6546  * unused processors and/or zones. They do play a role for bootstrapping
6547  * hotplugged processors.
6548  *
6549  * zoneinfo_show() and maybe other functions do
6550  * not check if the processor is online before following the pageset pointer.
6551  * Other parts of the kernel may not check if the zone is available.
6552  */
6553 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
6554 /* These effectively disable the pcplists in the boot pageset completely */
6555 #define BOOT_PAGESET_HIGH	0
6556 #define BOOT_PAGESET_BATCH	1
6557 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
6558 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
6559 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
6560 
6561 static void __build_all_zonelists(void *data)
6562 {
6563 	int nid;
6564 	int __maybe_unused cpu;
6565 	pg_data_t *self = data;
6566 
6567 	write_seqlock(&zonelist_update_seq);
6568 
6569 #ifdef CONFIG_NUMA
6570 	memset(node_load, 0, sizeof(node_load));
6571 #endif
6572 
6573 	/*
6574 	 * This node is hotadded and no memory is yet present.   So just
6575 	 * building zonelists is fine - no need to touch other nodes.
6576 	 */
6577 	if (self && !node_online(self->node_id)) {
6578 		build_zonelists(self);
6579 	} else {
6580 		/*
6581 		 * All possible nodes have pgdat preallocated
6582 		 * in free_area_init
6583 		 */
6584 		for_each_node(nid) {
6585 			pg_data_t *pgdat = NODE_DATA(nid);
6586 
6587 			build_zonelists(pgdat);
6588 		}
6589 
6590 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6591 		/*
6592 		 * We now know the "local memory node" for each node--
6593 		 * i.e., the node of the first zone in the generic zonelist.
6594 		 * Set up numa_mem percpu variable for on-line cpus.  During
6595 		 * boot, only the boot cpu should be on-line;  we'll init the
6596 		 * secondary cpus' numa_mem as they come on-line.  During
6597 		 * node/memory hotplug, we'll fixup all on-line cpus.
6598 		 */
6599 		for_each_online_cpu(cpu)
6600 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6601 #endif
6602 	}
6603 
6604 	write_sequnlock(&zonelist_update_seq);
6605 }
6606 
6607 static noinline void __init
6608 build_all_zonelists_init(void)
6609 {
6610 	int cpu;
6611 
6612 	__build_all_zonelists(NULL);
6613 
6614 	/*
6615 	 * Initialize the boot_pagesets that are going to be used
6616 	 * for bootstrapping processors. The real pagesets for
6617 	 * each zone will be allocated later when the per cpu
6618 	 * allocator is available.
6619 	 *
6620 	 * boot_pagesets are used also for bootstrapping offline
6621 	 * cpus if the system is already booted because the pagesets
6622 	 * are needed to initialize allocators on a specific cpu too.
6623 	 * F.e. the percpu allocator needs the page allocator which
6624 	 * needs the percpu allocator in order to allocate its pagesets
6625 	 * (a chicken-egg dilemma).
6626 	 */
6627 	for_each_possible_cpu(cpu)
6628 		per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
6629 
6630 	mminit_verify_zonelist();
6631 	cpuset_init_current_mems_allowed();
6632 }
6633 
6634 /*
6635  * unless system_state == SYSTEM_BOOTING.
6636  *
6637  * __ref due to call of __init annotated helper build_all_zonelists_init
6638  * [protected by SYSTEM_BOOTING].
6639  */
6640 void __ref build_all_zonelists(pg_data_t *pgdat)
6641 {
6642 	unsigned long vm_total_pages;
6643 
6644 	if (system_state == SYSTEM_BOOTING) {
6645 		build_all_zonelists_init();
6646 	} else {
6647 		__build_all_zonelists(pgdat);
6648 		/* cpuset refresh routine should be here */
6649 	}
6650 	/* Get the number of free pages beyond high watermark in all zones. */
6651 	vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6652 	/*
6653 	 * Disable grouping by mobility if the number of pages in the
6654 	 * system is too low to allow the mechanism to work. It would be
6655 	 * more accurate, but expensive to check per-zone. This check is
6656 	 * made on memory-hotadd so a system can start with mobility
6657 	 * disabled and enable it later
6658 	 */
6659 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6660 		page_group_by_mobility_disabled = 1;
6661 	else
6662 		page_group_by_mobility_disabled = 0;
6663 
6664 	pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
6665 		nr_online_nodes,
6666 		page_group_by_mobility_disabled ? "off" : "on",
6667 		vm_total_pages);
6668 #ifdef CONFIG_NUMA
6669 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6670 #endif
6671 }
6672 
6673 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6674 static bool __meminit
6675 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6676 {
6677 	static struct memblock_region *r;
6678 
6679 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6680 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6681 			for_each_mem_region(r) {
6682 				if (*pfn < memblock_region_memory_end_pfn(r))
6683 					break;
6684 			}
6685 		}
6686 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
6687 		    memblock_is_mirror(r)) {
6688 			*pfn = memblock_region_memory_end_pfn(r);
6689 			return true;
6690 		}
6691 	}
6692 	return false;
6693 }
6694 
6695 /*
6696  * Initially all pages are reserved - free ones are freed
6697  * up by memblock_free_all() once the early boot process is
6698  * done. Non-atomic initialization, single-pass.
6699  *
6700  * All aligned pageblocks are initialized to the specified migratetype
6701  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6702  * zone stats (e.g., nr_isolate_pageblock) are touched.
6703  */
6704 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
6705 		unsigned long start_pfn, unsigned long zone_end_pfn,
6706 		enum meminit_context context,
6707 		struct vmem_altmap *altmap, int migratetype)
6708 {
6709 	unsigned long pfn, end_pfn = start_pfn + size;
6710 	struct page *page;
6711 
6712 	if (highest_memmap_pfn < end_pfn - 1)
6713 		highest_memmap_pfn = end_pfn - 1;
6714 
6715 #ifdef CONFIG_ZONE_DEVICE
6716 	/*
6717 	 * Honor reservation requested by the driver for this ZONE_DEVICE
6718 	 * memory. We limit the total number of pages to initialize to just
6719 	 * those that might contain the memory mapping. We will defer the
6720 	 * ZONE_DEVICE page initialization until after we have released
6721 	 * the hotplug lock.
6722 	 */
6723 	if (zone == ZONE_DEVICE) {
6724 		if (!altmap)
6725 			return;
6726 
6727 		if (start_pfn == altmap->base_pfn)
6728 			start_pfn += altmap->reserve;
6729 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6730 	}
6731 #endif
6732 
6733 	for (pfn = start_pfn; pfn < end_pfn; ) {
6734 		/*
6735 		 * There can be holes in boot-time mem_map[]s handed to this
6736 		 * function.  They do not exist on hotplugged memory.
6737 		 */
6738 		if (context == MEMINIT_EARLY) {
6739 			if (overlap_memmap_init(zone, &pfn))
6740 				continue;
6741 			if (defer_init(nid, pfn, zone_end_pfn)) {
6742 				deferred_struct_pages = true;
6743 				break;
6744 			}
6745 		}
6746 
6747 		page = pfn_to_page(pfn);
6748 		__init_single_page(page, pfn, zone, nid);
6749 		if (context == MEMINIT_HOTPLUG)
6750 			__SetPageReserved(page);
6751 
6752 		/*
6753 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6754 		 * such that unmovable allocations won't be scattered all
6755 		 * over the place during system boot.
6756 		 */
6757 		if (pageblock_aligned(pfn)) {
6758 			set_pageblock_migratetype(page, migratetype);
6759 			cond_resched();
6760 		}
6761 		pfn++;
6762 	}
6763 }
6764 
6765 #ifdef CONFIG_ZONE_DEVICE
6766 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
6767 					  unsigned long zone_idx, int nid,
6768 					  struct dev_pagemap *pgmap)
6769 {
6770 
6771 	__init_single_page(page, pfn, zone_idx, nid);
6772 
6773 	/*
6774 	 * Mark page reserved as it will need to wait for onlining
6775 	 * phase for it to be fully associated with a zone.
6776 	 *
6777 	 * We can use the non-atomic __set_bit operation for setting
6778 	 * the flag as we are still initializing the pages.
6779 	 */
6780 	__SetPageReserved(page);
6781 
6782 	/*
6783 	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6784 	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
6785 	 * ever freed or placed on a driver-private list.
6786 	 */
6787 	page->pgmap = pgmap;
6788 	page->zone_device_data = NULL;
6789 
6790 	/*
6791 	 * Mark the block movable so that blocks are reserved for
6792 	 * movable at startup. This will force kernel allocations
6793 	 * to reserve their blocks rather than leaking throughout
6794 	 * the address space during boot when many long-lived
6795 	 * kernel allocations are made.
6796 	 *
6797 	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6798 	 * because this is done early in section_activate()
6799 	 */
6800 	if (pageblock_aligned(pfn)) {
6801 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6802 		cond_resched();
6803 	}
6804 
6805 	/*
6806 	 * ZONE_DEVICE pages are released directly to the driver page allocator
6807 	 * which will set the page count to 1 when allocating the page.
6808 	 */
6809 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
6810 	    pgmap->type == MEMORY_DEVICE_COHERENT)
6811 		set_page_count(page, 0);
6812 }
6813 
6814 /*
6815  * With compound page geometry and when struct pages are stored in ram most
6816  * tail pages are reused. Consequently, the amount of unique struct pages to
6817  * initialize is a lot smaller that the total amount of struct pages being
6818  * mapped. This is a paired / mild layering violation with explicit knowledge
6819  * of how the sparse_vmemmap internals handle compound pages in the lack
6820  * of an altmap. See vmemmap_populate_compound_pages().
6821  */
6822 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
6823 					      unsigned long nr_pages)
6824 {
6825 	return is_power_of_2(sizeof(struct page)) &&
6826 		!altmap ? 2 * (PAGE_SIZE / sizeof(struct page)) : nr_pages;
6827 }
6828 
6829 static void __ref memmap_init_compound(struct page *head,
6830 				       unsigned long head_pfn,
6831 				       unsigned long zone_idx, int nid,
6832 				       struct dev_pagemap *pgmap,
6833 				       unsigned long nr_pages)
6834 {
6835 	unsigned long pfn, end_pfn = head_pfn + nr_pages;
6836 	unsigned int order = pgmap->vmemmap_shift;
6837 
6838 	__SetPageHead(head);
6839 	for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
6840 		struct page *page = pfn_to_page(pfn);
6841 
6842 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
6843 		prep_compound_tail(head, pfn - head_pfn);
6844 		set_page_count(page, 0);
6845 
6846 		/*
6847 		 * The first tail page stores important compound page info.
6848 		 * Call prep_compound_head() after the first tail page has
6849 		 * been initialized, to not have the data overwritten.
6850 		 */
6851 		if (pfn == head_pfn + 1)
6852 			prep_compound_head(head, order);
6853 	}
6854 }
6855 
6856 void __ref memmap_init_zone_device(struct zone *zone,
6857 				   unsigned long start_pfn,
6858 				   unsigned long nr_pages,
6859 				   struct dev_pagemap *pgmap)
6860 {
6861 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
6862 	struct pglist_data *pgdat = zone->zone_pgdat;
6863 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6864 	unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
6865 	unsigned long zone_idx = zone_idx(zone);
6866 	unsigned long start = jiffies;
6867 	int nid = pgdat->node_id;
6868 
6869 	if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
6870 		return;
6871 
6872 	/*
6873 	 * The call to memmap_init should have already taken care
6874 	 * of the pages reserved for the memmap, so we can just jump to
6875 	 * the end of that region and start processing the device pages.
6876 	 */
6877 	if (altmap) {
6878 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6879 		nr_pages = end_pfn - start_pfn;
6880 	}
6881 
6882 	for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
6883 		struct page *page = pfn_to_page(pfn);
6884 
6885 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
6886 
6887 		if (pfns_per_compound == 1)
6888 			continue;
6889 
6890 		memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
6891 				     compound_nr_pages(altmap, pfns_per_compound));
6892 	}
6893 
6894 	pr_info("%s initialised %lu pages in %ums\n", __func__,
6895 		nr_pages, jiffies_to_msecs(jiffies - start));
6896 }
6897 
6898 #endif
6899 static void __meminit zone_init_free_lists(struct zone *zone)
6900 {
6901 	unsigned int order, t;
6902 	for_each_migratetype_order(order, t) {
6903 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6904 		zone->free_area[order].nr_free = 0;
6905 	}
6906 }
6907 
6908 /*
6909  * Only struct pages that correspond to ranges defined by memblock.memory
6910  * are zeroed and initialized by going through __init_single_page() during
6911  * memmap_init_zone_range().
6912  *
6913  * But, there could be struct pages that correspond to holes in
6914  * memblock.memory. This can happen because of the following reasons:
6915  * - physical memory bank size is not necessarily the exact multiple of the
6916  *   arbitrary section size
6917  * - early reserved memory may not be listed in memblock.memory
6918  * - memory layouts defined with memmap= kernel parameter may not align
6919  *   nicely with memmap sections
6920  *
6921  * Explicitly initialize those struct pages so that:
6922  * - PG_Reserved is set
6923  * - zone and node links point to zone and node that span the page if the
6924  *   hole is in the middle of a zone
6925  * - zone and node links point to adjacent zone/node if the hole falls on
6926  *   the zone boundary; the pages in such holes will be prepended to the
6927  *   zone/node above the hole except for the trailing pages in the last
6928  *   section that will be appended to the zone/node below.
6929  */
6930 static void __init init_unavailable_range(unsigned long spfn,
6931 					  unsigned long epfn,
6932 					  int zone, int node)
6933 {
6934 	unsigned long pfn;
6935 	u64 pgcnt = 0;
6936 
6937 	for (pfn = spfn; pfn < epfn; pfn++) {
6938 		if (!pfn_valid(pageblock_start_pfn(pfn))) {
6939 			pfn = pageblock_end_pfn(pfn) - 1;
6940 			continue;
6941 		}
6942 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
6943 		__SetPageReserved(pfn_to_page(pfn));
6944 		pgcnt++;
6945 	}
6946 
6947 	if (pgcnt)
6948 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6949 			node, zone_names[zone], pgcnt);
6950 }
6951 
6952 static void __init memmap_init_zone_range(struct zone *zone,
6953 					  unsigned long start_pfn,
6954 					  unsigned long end_pfn,
6955 					  unsigned long *hole_pfn)
6956 {
6957 	unsigned long zone_start_pfn = zone->zone_start_pfn;
6958 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
6959 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
6960 
6961 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6962 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
6963 
6964 	if (start_pfn >= end_pfn)
6965 		return;
6966 
6967 	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
6968 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6969 
6970 	if (*hole_pfn < start_pfn)
6971 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
6972 
6973 	*hole_pfn = end_pfn;
6974 }
6975 
6976 static void __init memmap_init(void)
6977 {
6978 	unsigned long start_pfn, end_pfn;
6979 	unsigned long hole_pfn = 0;
6980 	int i, j, zone_id = 0, nid;
6981 
6982 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6983 		struct pglist_data *node = NODE_DATA(nid);
6984 
6985 		for (j = 0; j < MAX_NR_ZONES; j++) {
6986 			struct zone *zone = node->node_zones + j;
6987 
6988 			if (!populated_zone(zone))
6989 				continue;
6990 
6991 			memmap_init_zone_range(zone, start_pfn, end_pfn,
6992 					       &hole_pfn);
6993 			zone_id = j;
6994 		}
6995 	}
6996 
6997 #ifdef CONFIG_SPARSEMEM
6998 	/*
6999 	 * Initialize the memory map for hole in the range [memory_end,
7000 	 * section_end].
7001 	 * Append the pages in this hole to the highest zone in the last
7002 	 * node.
7003 	 * The call to init_unavailable_range() is outside the ifdef to
7004 	 * silence the compiler warining about zone_id set but not used;
7005 	 * for FLATMEM it is a nop anyway
7006 	 */
7007 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
7008 	if (hole_pfn < end_pfn)
7009 #endif
7010 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
7011 }
7012 
7013 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
7014 			  phys_addr_t min_addr, int nid, bool exact_nid)
7015 {
7016 	void *ptr;
7017 
7018 	if (exact_nid)
7019 		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
7020 						   MEMBLOCK_ALLOC_ACCESSIBLE,
7021 						   nid);
7022 	else
7023 		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
7024 						 MEMBLOCK_ALLOC_ACCESSIBLE,
7025 						 nid);
7026 
7027 	if (ptr && size > 0)
7028 		page_init_poison(ptr, size);
7029 
7030 	return ptr;
7031 }
7032 
7033 static int zone_batchsize(struct zone *zone)
7034 {
7035 #ifdef CONFIG_MMU
7036 	int batch;
7037 
7038 	/*
7039 	 * The number of pages to batch allocate is either ~0.1%
7040 	 * of the zone or 1MB, whichever is smaller. The batch
7041 	 * size is striking a balance between allocation latency
7042 	 * and zone lock contention.
7043 	 */
7044 	batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE);
7045 	batch /= 4;		/* We effectively *= 4 below */
7046 	if (batch < 1)
7047 		batch = 1;
7048 
7049 	/*
7050 	 * Clamp the batch to a 2^n - 1 value. Having a power
7051 	 * of 2 value was found to be more likely to have
7052 	 * suboptimal cache aliasing properties in some cases.
7053 	 *
7054 	 * For example if 2 tasks are alternately allocating
7055 	 * batches of pages, one task can end up with a lot
7056 	 * of pages of one half of the possible page colors
7057 	 * and the other with pages of the other colors.
7058 	 */
7059 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
7060 
7061 	return batch;
7062 
7063 #else
7064 	/* The deferral and batching of frees should be suppressed under NOMMU
7065 	 * conditions.
7066 	 *
7067 	 * The problem is that NOMMU needs to be able to allocate large chunks
7068 	 * of contiguous memory as there's no hardware page translation to
7069 	 * assemble apparent contiguous memory from discontiguous pages.
7070 	 *
7071 	 * Queueing large contiguous runs of pages for batching, however,
7072 	 * causes the pages to actually be freed in smaller chunks.  As there
7073 	 * can be a significant delay between the individual batches being
7074 	 * recycled, this leads to the once large chunks of space being
7075 	 * fragmented and becoming unavailable for high-order allocations.
7076 	 */
7077 	return 0;
7078 #endif
7079 }
7080 
7081 static int zone_highsize(struct zone *zone, int batch, int cpu_online)
7082 {
7083 #ifdef CONFIG_MMU
7084 	int high;
7085 	int nr_split_cpus;
7086 	unsigned long total_pages;
7087 
7088 	if (!percpu_pagelist_high_fraction) {
7089 		/*
7090 		 * By default, the high value of the pcp is based on the zone
7091 		 * low watermark so that if they are full then background
7092 		 * reclaim will not be started prematurely.
7093 		 */
7094 		total_pages = low_wmark_pages(zone);
7095 	} else {
7096 		/*
7097 		 * If percpu_pagelist_high_fraction is configured, the high
7098 		 * value is based on a fraction of the managed pages in the
7099 		 * zone.
7100 		 */
7101 		total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
7102 	}
7103 
7104 	/*
7105 	 * Split the high value across all online CPUs local to the zone. Note
7106 	 * that early in boot that CPUs may not be online yet and that during
7107 	 * CPU hotplug that the cpumask is not yet updated when a CPU is being
7108 	 * onlined. For memory nodes that have no CPUs, split pcp->high across
7109 	 * all online CPUs to mitigate the risk that reclaim is triggered
7110 	 * prematurely due to pages stored on pcp lists.
7111 	 */
7112 	nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
7113 	if (!nr_split_cpus)
7114 		nr_split_cpus = num_online_cpus();
7115 	high = total_pages / nr_split_cpus;
7116 
7117 	/*
7118 	 * Ensure high is at least batch*4. The multiple is based on the
7119 	 * historical relationship between high and batch.
7120 	 */
7121 	high = max(high, batch << 2);
7122 
7123 	return high;
7124 #else
7125 	return 0;
7126 #endif
7127 }
7128 
7129 /*
7130  * pcp->high and pcp->batch values are related and generally batch is lower
7131  * than high. They are also related to pcp->count such that count is lower
7132  * than high, and as soon as it reaches high, the pcplist is flushed.
7133  *
7134  * However, guaranteeing these relations at all times would require e.g. write
7135  * barriers here but also careful usage of read barriers at the read side, and
7136  * thus be prone to error and bad for performance. Thus the update only prevents
7137  * store tearing. Any new users of pcp->batch and pcp->high should ensure they
7138  * can cope with those fields changing asynchronously, and fully trust only the
7139  * pcp->count field on the local CPU with interrupts disabled.
7140  *
7141  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
7142  * outside of boot time (or some other assurance that no concurrent updaters
7143  * exist).
7144  */
7145 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
7146 		unsigned long batch)
7147 {
7148 	WRITE_ONCE(pcp->batch, batch);
7149 	WRITE_ONCE(pcp->high, high);
7150 }
7151 
7152 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
7153 {
7154 	int pindex;
7155 
7156 	memset(pcp, 0, sizeof(*pcp));
7157 	memset(pzstats, 0, sizeof(*pzstats));
7158 
7159 	spin_lock_init(&pcp->lock);
7160 	for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
7161 		INIT_LIST_HEAD(&pcp->lists[pindex]);
7162 
7163 	/*
7164 	 * Set batch and high values safe for a boot pageset. A true percpu
7165 	 * pageset's initialization will update them subsequently. Here we don't
7166 	 * need to be as careful as pageset_update() as nobody can access the
7167 	 * pageset yet.
7168 	 */
7169 	pcp->high = BOOT_PAGESET_HIGH;
7170 	pcp->batch = BOOT_PAGESET_BATCH;
7171 	pcp->free_factor = 0;
7172 }
7173 
7174 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
7175 		unsigned long batch)
7176 {
7177 	struct per_cpu_pages *pcp;
7178 	int cpu;
7179 
7180 	for_each_possible_cpu(cpu) {
7181 		pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
7182 		pageset_update(pcp, high, batch);
7183 	}
7184 }
7185 
7186 /*
7187  * Calculate and set new high and batch values for all per-cpu pagesets of a
7188  * zone based on the zone's size.
7189  */
7190 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
7191 {
7192 	int new_high, new_batch;
7193 
7194 	new_batch = max(1, zone_batchsize(zone));
7195 	new_high = zone_highsize(zone, new_batch, cpu_online);
7196 
7197 	if (zone->pageset_high == new_high &&
7198 	    zone->pageset_batch == new_batch)
7199 		return;
7200 
7201 	zone->pageset_high = new_high;
7202 	zone->pageset_batch = new_batch;
7203 
7204 	__zone_set_pageset_high_and_batch(zone, new_high, new_batch);
7205 }
7206 
7207 void __meminit setup_zone_pageset(struct zone *zone)
7208 {
7209 	int cpu;
7210 
7211 	/* Size may be 0 on !SMP && !NUMA */
7212 	if (sizeof(struct per_cpu_zonestat) > 0)
7213 		zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
7214 
7215 	zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
7216 	for_each_possible_cpu(cpu) {
7217 		struct per_cpu_pages *pcp;
7218 		struct per_cpu_zonestat *pzstats;
7219 
7220 		pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
7221 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
7222 		per_cpu_pages_init(pcp, pzstats);
7223 	}
7224 
7225 	zone_set_pageset_high_and_batch(zone, 0);
7226 }
7227 
7228 /*
7229  * The zone indicated has a new number of managed_pages; batch sizes and percpu
7230  * page high values need to be recalculated.
7231  */
7232 static void zone_pcp_update(struct zone *zone, int cpu_online)
7233 {
7234 	mutex_lock(&pcp_batch_high_lock);
7235 	zone_set_pageset_high_and_batch(zone, cpu_online);
7236 	mutex_unlock(&pcp_batch_high_lock);
7237 }
7238 
7239 /*
7240  * Allocate per cpu pagesets and initialize them.
7241  * Before this call only boot pagesets were available.
7242  */
7243 void __init setup_per_cpu_pageset(void)
7244 {
7245 	struct pglist_data *pgdat;
7246 	struct zone *zone;
7247 	int __maybe_unused cpu;
7248 
7249 	for_each_populated_zone(zone)
7250 		setup_zone_pageset(zone);
7251 
7252 #ifdef CONFIG_NUMA
7253 	/*
7254 	 * Unpopulated zones continue using the boot pagesets.
7255 	 * The numa stats for these pagesets need to be reset.
7256 	 * Otherwise, they will end up skewing the stats of
7257 	 * the nodes these zones are associated with.
7258 	 */
7259 	for_each_possible_cpu(cpu) {
7260 		struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
7261 		memset(pzstats->vm_numa_event, 0,
7262 		       sizeof(pzstats->vm_numa_event));
7263 	}
7264 #endif
7265 
7266 	for_each_online_pgdat(pgdat)
7267 		pgdat->per_cpu_nodestats =
7268 			alloc_percpu(struct per_cpu_nodestat);
7269 }
7270 
7271 static __meminit void zone_pcp_init(struct zone *zone)
7272 {
7273 	/*
7274 	 * per cpu subsystem is not up at this point. The following code
7275 	 * relies on the ability of the linker to provide the
7276 	 * offset of a (static) per cpu variable into the per cpu area.
7277 	 */
7278 	zone->per_cpu_pageset = &boot_pageset;
7279 	zone->per_cpu_zonestats = &boot_zonestats;
7280 	zone->pageset_high = BOOT_PAGESET_HIGH;
7281 	zone->pageset_batch = BOOT_PAGESET_BATCH;
7282 
7283 	if (populated_zone(zone))
7284 		pr_debug("  %s zone: %lu pages, LIFO batch:%u\n", zone->name,
7285 			 zone->present_pages, zone_batchsize(zone));
7286 }
7287 
7288 void __meminit init_currently_empty_zone(struct zone *zone,
7289 					unsigned long zone_start_pfn,
7290 					unsigned long size)
7291 {
7292 	struct pglist_data *pgdat = zone->zone_pgdat;
7293 	int zone_idx = zone_idx(zone) + 1;
7294 
7295 	if (zone_idx > pgdat->nr_zones)
7296 		pgdat->nr_zones = zone_idx;
7297 
7298 	zone->zone_start_pfn = zone_start_pfn;
7299 
7300 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
7301 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
7302 			pgdat->node_id,
7303 			(unsigned long)zone_idx(zone),
7304 			zone_start_pfn, (zone_start_pfn + size));
7305 
7306 	zone_init_free_lists(zone);
7307 	zone->initialized = 1;
7308 }
7309 
7310 /**
7311  * get_pfn_range_for_nid - Return the start and end page frames for a node
7312  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
7313  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
7314  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
7315  *
7316  * It returns the start and end page frame of a node based on information
7317  * provided by memblock_set_node(). If called for a node
7318  * with no available memory, a warning is printed and the start and end
7319  * PFNs will be 0.
7320  */
7321 void __init get_pfn_range_for_nid(unsigned int nid,
7322 			unsigned long *start_pfn, unsigned long *end_pfn)
7323 {
7324 	unsigned long this_start_pfn, this_end_pfn;
7325 	int i;
7326 
7327 	*start_pfn = -1UL;
7328 	*end_pfn = 0;
7329 
7330 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
7331 		*start_pfn = min(*start_pfn, this_start_pfn);
7332 		*end_pfn = max(*end_pfn, this_end_pfn);
7333 	}
7334 
7335 	if (*start_pfn == -1UL)
7336 		*start_pfn = 0;
7337 }
7338 
7339 /*
7340  * This finds a zone that can be used for ZONE_MOVABLE pages. The
7341  * assumption is made that zones within a node are ordered in monotonic
7342  * increasing memory addresses so that the "highest" populated zone is used
7343  */
7344 static void __init find_usable_zone_for_movable(void)
7345 {
7346 	int zone_index;
7347 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
7348 		if (zone_index == ZONE_MOVABLE)
7349 			continue;
7350 
7351 		if (arch_zone_highest_possible_pfn[zone_index] >
7352 				arch_zone_lowest_possible_pfn[zone_index])
7353 			break;
7354 	}
7355 
7356 	VM_BUG_ON(zone_index == -1);
7357 	movable_zone = zone_index;
7358 }
7359 
7360 /*
7361  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
7362  * because it is sized independent of architecture. Unlike the other zones,
7363  * the starting point for ZONE_MOVABLE is not fixed. It may be different
7364  * in each node depending on the size of each node and how evenly kernelcore
7365  * is distributed. This helper function adjusts the zone ranges
7366  * provided by the architecture for a given node by using the end of the
7367  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
7368  * zones within a node are in order of monotonic increases memory addresses
7369  */
7370 static void __init adjust_zone_range_for_zone_movable(int nid,
7371 					unsigned long zone_type,
7372 					unsigned long node_start_pfn,
7373 					unsigned long node_end_pfn,
7374 					unsigned long *zone_start_pfn,
7375 					unsigned long *zone_end_pfn)
7376 {
7377 	/* Only adjust if ZONE_MOVABLE is on this node */
7378 	if (zone_movable_pfn[nid]) {
7379 		/* Size ZONE_MOVABLE */
7380 		if (zone_type == ZONE_MOVABLE) {
7381 			*zone_start_pfn = zone_movable_pfn[nid];
7382 			*zone_end_pfn = min(node_end_pfn,
7383 				arch_zone_highest_possible_pfn[movable_zone]);
7384 
7385 		/* Adjust for ZONE_MOVABLE starting within this range */
7386 		} else if (!mirrored_kernelcore &&
7387 			*zone_start_pfn < zone_movable_pfn[nid] &&
7388 			*zone_end_pfn > zone_movable_pfn[nid]) {
7389 			*zone_end_pfn = zone_movable_pfn[nid];
7390 
7391 		/* Check if this whole range is within ZONE_MOVABLE */
7392 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
7393 			*zone_start_pfn = *zone_end_pfn;
7394 	}
7395 }
7396 
7397 /*
7398  * Return the number of pages a zone spans in a node, including holes
7399  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
7400  */
7401 static unsigned long __init zone_spanned_pages_in_node(int nid,
7402 					unsigned long zone_type,
7403 					unsigned long node_start_pfn,
7404 					unsigned long node_end_pfn,
7405 					unsigned long *zone_start_pfn,
7406 					unsigned long *zone_end_pfn)
7407 {
7408 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7409 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7410 	/* When hotadd a new node from cpu_up(), the node should be empty */
7411 	if (!node_start_pfn && !node_end_pfn)
7412 		return 0;
7413 
7414 	/* Get the start and end of the zone */
7415 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7416 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7417 	adjust_zone_range_for_zone_movable(nid, zone_type,
7418 				node_start_pfn, node_end_pfn,
7419 				zone_start_pfn, zone_end_pfn);
7420 
7421 	/* Check that this node has pages within the zone's required range */
7422 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
7423 		return 0;
7424 
7425 	/* Move the zone boundaries inside the node if necessary */
7426 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
7427 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
7428 
7429 	/* Return the spanned pages */
7430 	return *zone_end_pfn - *zone_start_pfn;
7431 }
7432 
7433 /*
7434  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
7435  * then all holes in the requested range will be accounted for.
7436  */
7437 unsigned long __init __absent_pages_in_range(int nid,
7438 				unsigned long range_start_pfn,
7439 				unsigned long range_end_pfn)
7440 {
7441 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
7442 	unsigned long start_pfn, end_pfn;
7443 	int i;
7444 
7445 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7446 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
7447 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
7448 		nr_absent -= end_pfn - start_pfn;
7449 	}
7450 	return nr_absent;
7451 }
7452 
7453 /**
7454  * absent_pages_in_range - Return number of page frames in holes within a range
7455  * @start_pfn: The start PFN to start searching for holes
7456  * @end_pfn: The end PFN to stop searching for holes
7457  *
7458  * Return: the number of pages frames in memory holes within a range.
7459  */
7460 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
7461 							unsigned long end_pfn)
7462 {
7463 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
7464 }
7465 
7466 /* Return the number of page frames in holes in a zone on a node */
7467 static unsigned long __init zone_absent_pages_in_node(int nid,
7468 					unsigned long zone_type,
7469 					unsigned long node_start_pfn,
7470 					unsigned long node_end_pfn)
7471 {
7472 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7473 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7474 	unsigned long zone_start_pfn, zone_end_pfn;
7475 	unsigned long nr_absent;
7476 
7477 	/* When hotadd a new node from cpu_up(), the node should be empty */
7478 	if (!node_start_pfn && !node_end_pfn)
7479 		return 0;
7480 
7481 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7482 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7483 
7484 	adjust_zone_range_for_zone_movable(nid, zone_type,
7485 			node_start_pfn, node_end_pfn,
7486 			&zone_start_pfn, &zone_end_pfn);
7487 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
7488 
7489 	/*
7490 	 * ZONE_MOVABLE handling.
7491 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
7492 	 * and vice versa.
7493 	 */
7494 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
7495 		unsigned long start_pfn, end_pfn;
7496 		struct memblock_region *r;
7497 
7498 		for_each_mem_region(r) {
7499 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
7500 					  zone_start_pfn, zone_end_pfn);
7501 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
7502 					zone_start_pfn, zone_end_pfn);
7503 
7504 			if (zone_type == ZONE_MOVABLE &&
7505 			    memblock_is_mirror(r))
7506 				nr_absent += end_pfn - start_pfn;
7507 
7508 			if (zone_type == ZONE_NORMAL &&
7509 			    !memblock_is_mirror(r))
7510 				nr_absent += end_pfn - start_pfn;
7511 		}
7512 	}
7513 
7514 	return nr_absent;
7515 }
7516 
7517 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
7518 						unsigned long node_start_pfn,
7519 						unsigned long node_end_pfn)
7520 {
7521 	unsigned long realtotalpages = 0, totalpages = 0;
7522 	enum zone_type i;
7523 
7524 	for (i = 0; i < MAX_NR_ZONES; i++) {
7525 		struct zone *zone = pgdat->node_zones + i;
7526 		unsigned long zone_start_pfn, zone_end_pfn;
7527 		unsigned long spanned, absent;
7528 		unsigned long size, real_size;
7529 
7530 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
7531 						     node_start_pfn,
7532 						     node_end_pfn,
7533 						     &zone_start_pfn,
7534 						     &zone_end_pfn);
7535 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
7536 						   node_start_pfn,
7537 						   node_end_pfn);
7538 
7539 		size = spanned;
7540 		real_size = size - absent;
7541 
7542 		if (size)
7543 			zone->zone_start_pfn = zone_start_pfn;
7544 		else
7545 			zone->zone_start_pfn = 0;
7546 		zone->spanned_pages = size;
7547 		zone->present_pages = real_size;
7548 #if defined(CONFIG_MEMORY_HOTPLUG)
7549 		zone->present_early_pages = real_size;
7550 #endif
7551 
7552 		totalpages += size;
7553 		realtotalpages += real_size;
7554 	}
7555 
7556 	pgdat->node_spanned_pages = totalpages;
7557 	pgdat->node_present_pages = realtotalpages;
7558 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
7559 }
7560 
7561 #ifndef CONFIG_SPARSEMEM
7562 /*
7563  * Calculate the size of the zone->blockflags rounded to an unsigned long
7564  * Start by making sure zonesize is a multiple of pageblock_order by rounding
7565  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
7566  * round what is now in bits to nearest long in bits, then return it in
7567  * bytes.
7568  */
7569 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
7570 {
7571 	unsigned long usemapsize;
7572 
7573 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
7574 	usemapsize = roundup(zonesize, pageblock_nr_pages);
7575 	usemapsize = usemapsize >> pageblock_order;
7576 	usemapsize *= NR_PAGEBLOCK_BITS;
7577 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
7578 
7579 	return usemapsize / 8;
7580 }
7581 
7582 static void __ref setup_usemap(struct zone *zone)
7583 {
7584 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
7585 					       zone->spanned_pages);
7586 	zone->pageblock_flags = NULL;
7587 	if (usemapsize) {
7588 		zone->pageblock_flags =
7589 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
7590 					    zone_to_nid(zone));
7591 		if (!zone->pageblock_flags)
7592 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7593 			      usemapsize, zone->name, zone_to_nid(zone));
7594 	}
7595 }
7596 #else
7597 static inline void setup_usemap(struct zone *zone) {}
7598 #endif /* CONFIG_SPARSEMEM */
7599 
7600 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
7601 
7602 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
7603 void __init set_pageblock_order(void)
7604 {
7605 	unsigned int order = MAX_ORDER - 1;
7606 
7607 	/* Check that pageblock_nr_pages has not already been setup */
7608 	if (pageblock_order)
7609 		return;
7610 
7611 	/* Don't let pageblocks exceed the maximum allocation granularity. */
7612 	if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
7613 		order = HUGETLB_PAGE_ORDER;
7614 
7615 	/*
7616 	 * Assume the largest contiguous order of interest is a huge page.
7617 	 * This value may be variable depending on boot parameters on IA64 and
7618 	 * powerpc.
7619 	 */
7620 	pageblock_order = order;
7621 }
7622 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7623 
7624 /*
7625  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
7626  * is unused as pageblock_order is set at compile-time. See
7627  * include/linux/pageblock-flags.h for the values of pageblock_order based on
7628  * the kernel config
7629  */
7630 void __init set_pageblock_order(void)
7631 {
7632 }
7633 
7634 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7635 
7636 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
7637 						unsigned long present_pages)
7638 {
7639 	unsigned long pages = spanned_pages;
7640 
7641 	/*
7642 	 * Provide a more accurate estimation if there are holes within
7643 	 * the zone and SPARSEMEM is in use. If there are holes within the
7644 	 * zone, each populated memory region may cost us one or two extra
7645 	 * memmap pages due to alignment because memmap pages for each
7646 	 * populated regions may not be naturally aligned on page boundary.
7647 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
7648 	 */
7649 	if (spanned_pages > present_pages + (present_pages >> 4) &&
7650 	    IS_ENABLED(CONFIG_SPARSEMEM))
7651 		pages = present_pages;
7652 
7653 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
7654 }
7655 
7656 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7657 static void pgdat_init_split_queue(struct pglist_data *pgdat)
7658 {
7659 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
7660 
7661 	spin_lock_init(&ds_queue->split_queue_lock);
7662 	INIT_LIST_HEAD(&ds_queue->split_queue);
7663 	ds_queue->split_queue_len = 0;
7664 }
7665 #else
7666 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
7667 #endif
7668 
7669 #ifdef CONFIG_COMPACTION
7670 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
7671 {
7672 	init_waitqueue_head(&pgdat->kcompactd_wait);
7673 }
7674 #else
7675 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
7676 #endif
7677 
7678 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
7679 {
7680 	int i;
7681 
7682 	pgdat_resize_init(pgdat);
7683 	pgdat_kswapd_lock_init(pgdat);
7684 
7685 	pgdat_init_split_queue(pgdat);
7686 	pgdat_init_kcompactd(pgdat);
7687 
7688 	init_waitqueue_head(&pgdat->kswapd_wait);
7689 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
7690 
7691 	for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
7692 		init_waitqueue_head(&pgdat->reclaim_wait[i]);
7693 
7694 	pgdat_page_ext_init(pgdat);
7695 	lruvec_init(&pgdat->__lruvec);
7696 }
7697 
7698 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
7699 							unsigned long remaining_pages)
7700 {
7701 	atomic_long_set(&zone->managed_pages, remaining_pages);
7702 	zone_set_nid(zone, nid);
7703 	zone->name = zone_names[idx];
7704 	zone->zone_pgdat = NODE_DATA(nid);
7705 	spin_lock_init(&zone->lock);
7706 	zone_seqlock_init(zone);
7707 	zone_pcp_init(zone);
7708 }
7709 
7710 /*
7711  * Set up the zone data structures
7712  * - init pgdat internals
7713  * - init all zones belonging to this node
7714  *
7715  * NOTE: this function is only called during memory hotplug
7716  */
7717 #ifdef CONFIG_MEMORY_HOTPLUG
7718 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
7719 {
7720 	int nid = pgdat->node_id;
7721 	enum zone_type z;
7722 	int cpu;
7723 
7724 	pgdat_init_internals(pgdat);
7725 
7726 	if (pgdat->per_cpu_nodestats == &boot_nodestats)
7727 		pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
7728 
7729 	/*
7730 	 * Reset the nr_zones, order and highest_zoneidx before reuse.
7731 	 * Note that kswapd will init kswapd_highest_zoneidx properly
7732 	 * when it starts in the near future.
7733 	 */
7734 	pgdat->nr_zones = 0;
7735 	pgdat->kswapd_order = 0;
7736 	pgdat->kswapd_highest_zoneidx = 0;
7737 	pgdat->node_start_pfn = 0;
7738 	for_each_online_cpu(cpu) {
7739 		struct per_cpu_nodestat *p;
7740 
7741 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
7742 		memset(p, 0, sizeof(*p));
7743 	}
7744 
7745 	for (z = 0; z < MAX_NR_ZONES; z++)
7746 		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
7747 }
7748 #endif
7749 
7750 /*
7751  * Set up the zone data structures:
7752  *   - mark all pages reserved
7753  *   - mark all memory queues empty
7754  *   - clear the memory bitmaps
7755  *
7756  * NOTE: pgdat should get zeroed by caller.
7757  * NOTE: this function is only called during early init.
7758  */
7759 static void __init free_area_init_core(struct pglist_data *pgdat)
7760 {
7761 	enum zone_type j;
7762 	int nid = pgdat->node_id;
7763 
7764 	pgdat_init_internals(pgdat);
7765 	pgdat->per_cpu_nodestats = &boot_nodestats;
7766 
7767 	for (j = 0; j < MAX_NR_ZONES; j++) {
7768 		struct zone *zone = pgdat->node_zones + j;
7769 		unsigned long size, freesize, memmap_pages;
7770 
7771 		size = zone->spanned_pages;
7772 		freesize = zone->present_pages;
7773 
7774 		/*
7775 		 * Adjust freesize so that it accounts for how much memory
7776 		 * is used by this zone for memmap. This affects the watermark
7777 		 * and per-cpu initialisations
7778 		 */
7779 		memmap_pages = calc_memmap_size(size, freesize);
7780 		if (!is_highmem_idx(j)) {
7781 			if (freesize >= memmap_pages) {
7782 				freesize -= memmap_pages;
7783 				if (memmap_pages)
7784 					pr_debug("  %s zone: %lu pages used for memmap\n",
7785 						 zone_names[j], memmap_pages);
7786 			} else
7787 				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
7788 					zone_names[j], memmap_pages, freesize);
7789 		}
7790 
7791 		/* Account for reserved pages */
7792 		if (j == 0 && freesize > dma_reserve) {
7793 			freesize -= dma_reserve;
7794 			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
7795 		}
7796 
7797 		if (!is_highmem_idx(j))
7798 			nr_kernel_pages += freesize;
7799 		/* Charge for highmem memmap if there are enough kernel pages */
7800 		else if (nr_kernel_pages > memmap_pages * 2)
7801 			nr_kernel_pages -= memmap_pages;
7802 		nr_all_pages += freesize;
7803 
7804 		/*
7805 		 * Set an approximate value for lowmem here, it will be adjusted
7806 		 * when the bootmem allocator frees pages into the buddy system.
7807 		 * And all highmem pages will be managed by the buddy system.
7808 		 */
7809 		zone_init_internals(zone, j, nid, freesize);
7810 
7811 		if (!size)
7812 			continue;
7813 
7814 		set_pageblock_order();
7815 		setup_usemap(zone);
7816 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
7817 	}
7818 }
7819 
7820 #ifdef CONFIG_FLATMEM
7821 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
7822 {
7823 	unsigned long __maybe_unused start = 0;
7824 	unsigned long __maybe_unused offset = 0;
7825 
7826 	/* Skip empty nodes */
7827 	if (!pgdat->node_spanned_pages)
7828 		return;
7829 
7830 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
7831 	offset = pgdat->node_start_pfn - start;
7832 	/* ia64 gets its own node_mem_map, before this, without bootmem */
7833 	if (!pgdat->node_mem_map) {
7834 		unsigned long size, end;
7835 		struct page *map;
7836 
7837 		/*
7838 		 * The zone's endpoints aren't required to be MAX_ORDER
7839 		 * aligned but the node_mem_map endpoints must be in order
7840 		 * for the buddy allocator to function correctly.
7841 		 */
7842 		end = pgdat_end_pfn(pgdat);
7843 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
7844 		size =  (end - start) * sizeof(struct page);
7845 		map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
7846 				   pgdat->node_id, false);
7847 		if (!map)
7848 			panic("Failed to allocate %ld bytes for node %d memory map\n",
7849 			      size, pgdat->node_id);
7850 		pgdat->node_mem_map = map + offset;
7851 	}
7852 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7853 				__func__, pgdat->node_id, (unsigned long)pgdat,
7854 				(unsigned long)pgdat->node_mem_map);
7855 #ifndef CONFIG_NUMA
7856 	/*
7857 	 * With no DISCONTIG, the global mem_map is just set as node 0's
7858 	 */
7859 	if (pgdat == NODE_DATA(0)) {
7860 		mem_map = NODE_DATA(0)->node_mem_map;
7861 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
7862 			mem_map -= offset;
7863 	}
7864 #endif
7865 }
7866 #else
7867 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
7868 #endif /* CONFIG_FLATMEM */
7869 
7870 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7871 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7872 {
7873 	pgdat->first_deferred_pfn = ULONG_MAX;
7874 }
7875 #else
7876 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7877 #endif
7878 
7879 static void __init free_area_init_node(int nid)
7880 {
7881 	pg_data_t *pgdat = NODE_DATA(nid);
7882 	unsigned long start_pfn = 0;
7883 	unsigned long end_pfn = 0;
7884 
7885 	/* pg_data_t should be reset to zero when it's allocated */
7886 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
7887 
7888 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7889 
7890 	pgdat->node_id = nid;
7891 	pgdat->node_start_pfn = start_pfn;
7892 	pgdat->per_cpu_nodestats = NULL;
7893 
7894 	if (start_pfn != end_pfn) {
7895 		pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7896 			(u64)start_pfn << PAGE_SHIFT,
7897 			end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7898 	} else {
7899 		pr_info("Initmem setup node %d as memoryless\n", nid);
7900 	}
7901 
7902 	calculate_node_totalpages(pgdat, start_pfn, end_pfn);
7903 
7904 	alloc_node_mem_map(pgdat);
7905 	pgdat_set_deferred_range(pgdat);
7906 
7907 	free_area_init_core(pgdat);
7908 	lru_gen_init_pgdat(pgdat);
7909 }
7910 
7911 static void __init free_area_init_memoryless_node(int nid)
7912 {
7913 	free_area_init_node(nid);
7914 }
7915 
7916 #if MAX_NUMNODES > 1
7917 /*
7918  * Figure out the number of possible node ids.
7919  */
7920 void __init setup_nr_node_ids(void)
7921 {
7922 	unsigned int highest;
7923 
7924 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
7925 	nr_node_ids = highest + 1;
7926 }
7927 #endif
7928 
7929 /**
7930  * node_map_pfn_alignment - determine the maximum internode alignment
7931  *
7932  * This function should be called after node map is populated and sorted.
7933  * It calculates the maximum power of two alignment which can distinguish
7934  * all the nodes.
7935  *
7936  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7937  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
7938  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
7939  * shifted, 1GiB is enough and this function will indicate so.
7940  *
7941  * This is used to test whether pfn -> nid mapping of the chosen memory
7942  * model has fine enough granularity to avoid incorrect mapping for the
7943  * populated node map.
7944  *
7945  * Return: the determined alignment in pfn's.  0 if there is no alignment
7946  * requirement (single node).
7947  */
7948 unsigned long __init node_map_pfn_alignment(void)
7949 {
7950 	unsigned long accl_mask = 0, last_end = 0;
7951 	unsigned long start, end, mask;
7952 	int last_nid = NUMA_NO_NODE;
7953 	int i, nid;
7954 
7955 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7956 		if (!start || last_nid < 0 || last_nid == nid) {
7957 			last_nid = nid;
7958 			last_end = end;
7959 			continue;
7960 		}
7961 
7962 		/*
7963 		 * Start with a mask granular enough to pin-point to the
7964 		 * start pfn and tick off bits one-by-one until it becomes
7965 		 * too coarse to separate the current node from the last.
7966 		 */
7967 		mask = ~((1 << __ffs(start)) - 1);
7968 		while (mask && last_end <= (start & (mask << 1)))
7969 			mask <<= 1;
7970 
7971 		/* accumulate all internode masks */
7972 		accl_mask |= mask;
7973 	}
7974 
7975 	/* convert mask to number of pages */
7976 	return ~accl_mask + 1;
7977 }
7978 
7979 /*
7980  * early_calculate_totalpages()
7981  * Sum pages in active regions for movable zone.
7982  * Populate N_MEMORY for calculating usable_nodes.
7983  */
7984 static unsigned long __init early_calculate_totalpages(void)
7985 {
7986 	unsigned long totalpages = 0;
7987 	unsigned long start_pfn, end_pfn;
7988 	int i, nid;
7989 
7990 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7991 		unsigned long pages = end_pfn - start_pfn;
7992 
7993 		totalpages += pages;
7994 		if (pages)
7995 			node_set_state(nid, N_MEMORY);
7996 	}
7997 	return totalpages;
7998 }
7999 
8000 /*
8001  * Find the PFN the Movable zone begins in each node. Kernel memory
8002  * is spread evenly between nodes as long as the nodes have enough
8003  * memory. When they don't, some nodes will have more kernelcore than
8004  * others
8005  */
8006 static void __init find_zone_movable_pfns_for_nodes(void)
8007 {
8008 	int i, nid;
8009 	unsigned long usable_startpfn;
8010 	unsigned long kernelcore_node, kernelcore_remaining;
8011 	/* save the state before borrow the nodemask */
8012 	nodemask_t saved_node_state = node_states[N_MEMORY];
8013 	unsigned long totalpages = early_calculate_totalpages();
8014 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
8015 	struct memblock_region *r;
8016 
8017 	/* Need to find movable_zone earlier when movable_node is specified. */
8018 	find_usable_zone_for_movable();
8019 
8020 	/*
8021 	 * If movable_node is specified, ignore kernelcore and movablecore
8022 	 * options.
8023 	 */
8024 	if (movable_node_is_enabled()) {
8025 		for_each_mem_region(r) {
8026 			if (!memblock_is_hotpluggable(r))
8027 				continue;
8028 
8029 			nid = memblock_get_region_node(r);
8030 
8031 			usable_startpfn = PFN_DOWN(r->base);
8032 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
8033 				min(usable_startpfn, zone_movable_pfn[nid]) :
8034 				usable_startpfn;
8035 		}
8036 
8037 		goto out2;
8038 	}
8039 
8040 	/*
8041 	 * If kernelcore=mirror is specified, ignore movablecore option
8042 	 */
8043 	if (mirrored_kernelcore) {
8044 		bool mem_below_4gb_not_mirrored = false;
8045 
8046 		for_each_mem_region(r) {
8047 			if (memblock_is_mirror(r))
8048 				continue;
8049 
8050 			nid = memblock_get_region_node(r);
8051 
8052 			usable_startpfn = memblock_region_memory_base_pfn(r);
8053 
8054 			if (usable_startpfn < PHYS_PFN(SZ_4G)) {
8055 				mem_below_4gb_not_mirrored = true;
8056 				continue;
8057 			}
8058 
8059 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
8060 				min(usable_startpfn, zone_movable_pfn[nid]) :
8061 				usable_startpfn;
8062 		}
8063 
8064 		if (mem_below_4gb_not_mirrored)
8065 			pr_warn("This configuration results in unmirrored kernel memory.\n");
8066 
8067 		goto out2;
8068 	}
8069 
8070 	/*
8071 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
8072 	 * amount of necessary memory.
8073 	 */
8074 	if (required_kernelcore_percent)
8075 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
8076 				       10000UL;
8077 	if (required_movablecore_percent)
8078 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
8079 					10000UL;
8080 
8081 	/*
8082 	 * If movablecore= was specified, calculate what size of
8083 	 * kernelcore that corresponds so that memory usable for
8084 	 * any allocation type is evenly spread. If both kernelcore
8085 	 * and movablecore are specified, then the value of kernelcore
8086 	 * will be used for required_kernelcore if it's greater than
8087 	 * what movablecore would have allowed.
8088 	 */
8089 	if (required_movablecore) {
8090 		unsigned long corepages;
8091 
8092 		/*
8093 		 * Round-up so that ZONE_MOVABLE is at least as large as what
8094 		 * was requested by the user
8095 		 */
8096 		required_movablecore =
8097 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
8098 		required_movablecore = min(totalpages, required_movablecore);
8099 		corepages = totalpages - required_movablecore;
8100 
8101 		required_kernelcore = max(required_kernelcore, corepages);
8102 	}
8103 
8104 	/*
8105 	 * If kernelcore was not specified or kernelcore size is larger
8106 	 * than totalpages, there is no ZONE_MOVABLE.
8107 	 */
8108 	if (!required_kernelcore || required_kernelcore >= totalpages)
8109 		goto out;
8110 
8111 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
8112 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
8113 
8114 restart:
8115 	/* Spread kernelcore memory as evenly as possible throughout nodes */
8116 	kernelcore_node = required_kernelcore / usable_nodes;
8117 	for_each_node_state(nid, N_MEMORY) {
8118 		unsigned long start_pfn, end_pfn;
8119 
8120 		/*
8121 		 * Recalculate kernelcore_node if the division per node
8122 		 * now exceeds what is necessary to satisfy the requested
8123 		 * amount of memory for the kernel
8124 		 */
8125 		if (required_kernelcore < kernelcore_node)
8126 			kernelcore_node = required_kernelcore / usable_nodes;
8127 
8128 		/*
8129 		 * As the map is walked, we track how much memory is usable
8130 		 * by the kernel using kernelcore_remaining. When it is
8131 		 * 0, the rest of the node is usable by ZONE_MOVABLE
8132 		 */
8133 		kernelcore_remaining = kernelcore_node;
8134 
8135 		/* Go through each range of PFNs within this node */
8136 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
8137 			unsigned long size_pages;
8138 
8139 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
8140 			if (start_pfn >= end_pfn)
8141 				continue;
8142 
8143 			/* Account for what is only usable for kernelcore */
8144 			if (start_pfn < usable_startpfn) {
8145 				unsigned long kernel_pages;
8146 				kernel_pages = min(end_pfn, usable_startpfn)
8147 								- start_pfn;
8148 
8149 				kernelcore_remaining -= min(kernel_pages,
8150 							kernelcore_remaining);
8151 				required_kernelcore -= min(kernel_pages,
8152 							required_kernelcore);
8153 
8154 				/* Continue if range is now fully accounted */
8155 				if (end_pfn <= usable_startpfn) {
8156 
8157 					/*
8158 					 * Push zone_movable_pfn to the end so
8159 					 * that if we have to rebalance
8160 					 * kernelcore across nodes, we will
8161 					 * not double account here
8162 					 */
8163 					zone_movable_pfn[nid] = end_pfn;
8164 					continue;
8165 				}
8166 				start_pfn = usable_startpfn;
8167 			}
8168 
8169 			/*
8170 			 * The usable PFN range for ZONE_MOVABLE is from
8171 			 * start_pfn->end_pfn. Calculate size_pages as the
8172 			 * number of pages used as kernelcore
8173 			 */
8174 			size_pages = end_pfn - start_pfn;
8175 			if (size_pages > kernelcore_remaining)
8176 				size_pages = kernelcore_remaining;
8177 			zone_movable_pfn[nid] = start_pfn + size_pages;
8178 
8179 			/*
8180 			 * Some kernelcore has been met, update counts and
8181 			 * break if the kernelcore for this node has been
8182 			 * satisfied
8183 			 */
8184 			required_kernelcore -= min(required_kernelcore,
8185 								size_pages);
8186 			kernelcore_remaining -= size_pages;
8187 			if (!kernelcore_remaining)
8188 				break;
8189 		}
8190 	}
8191 
8192 	/*
8193 	 * If there is still required_kernelcore, we do another pass with one
8194 	 * less node in the count. This will push zone_movable_pfn[nid] further
8195 	 * along on the nodes that still have memory until kernelcore is
8196 	 * satisfied
8197 	 */
8198 	usable_nodes--;
8199 	if (usable_nodes && required_kernelcore > usable_nodes)
8200 		goto restart;
8201 
8202 out2:
8203 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
8204 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
8205 		unsigned long start_pfn, end_pfn;
8206 
8207 		zone_movable_pfn[nid] =
8208 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
8209 
8210 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
8211 		if (zone_movable_pfn[nid] >= end_pfn)
8212 			zone_movable_pfn[nid] = 0;
8213 	}
8214 
8215 out:
8216 	/* restore the node_state */
8217 	node_states[N_MEMORY] = saved_node_state;
8218 }
8219 
8220 /* Any regular or high memory on that node ? */
8221 static void check_for_memory(pg_data_t *pgdat, int nid)
8222 {
8223 	enum zone_type zone_type;
8224 
8225 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
8226 		struct zone *zone = &pgdat->node_zones[zone_type];
8227 		if (populated_zone(zone)) {
8228 			if (IS_ENABLED(CONFIG_HIGHMEM))
8229 				node_set_state(nid, N_HIGH_MEMORY);
8230 			if (zone_type <= ZONE_NORMAL)
8231 				node_set_state(nid, N_NORMAL_MEMORY);
8232 			break;
8233 		}
8234 	}
8235 }
8236 
8237 /*
8238  * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
8239  * such cases we allow max_zone_pfn sorted in the descending order
8240  */
8241 bool __weak arch_has_descending_max_zone_pfns(void)
8242 {
8243 	return false;
8244 }
8245 
8246 /**
8247  * free_area_init - Initialise all pg_data_t and zone data
8248  * @max_zone_pfn: an array of max PFNs for each zone
8249  *
8250  * This will call free_area_init_node() for each active node in the system.
8251  * Using the page ranges provided by memblock_set_node(), the size of each
8252  * zone in each node and their holes is calculated. If the maximum PFN
8253  * between two adjacent zones match, it is assumed that the zone is empty.
8254  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
8255  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
8256  * starts where the previous one ended. For example, ZONE_DMA32 starts
8257  * at arch_max_dma_pfn.
8258  */
8259 void __init free_area_init(unsigned long *max_zone_pfn)
8260 {
8261 	unsigned long start_pfn, end_pfn;
8262 	int i, nid, zone;
8263 	bool descending;
8264 
8265 	/* Record where the zone boundaries are */
8266 	memset(arch_zone_lowest_possible_pfn, 0,
8267 				sizeof(arch_zone_lowest_possible_pfn));
8268 	memset(arch_zone_highest_possible_pfn, 0,
8269 				sizeof(arch_zone_highest_possible_pfn));
8270 
8271 	start_pfn = PHYS_PFN(memblock_start_of_DRAM());
8272 	descending = arch_has_descending_max_zone_pfns();
8273 
8274 	for (i = 0; i < MAX_NR_ZONES; i++) {
8275 		if (descending)
8276 			zone = MAX_NR_ZONES - i - 1;
8277 		else
8278 			zone = i;
8279 
8280 		if (zone == ZONE_MOVABLE)
8281 			continue;
8282 
8283 		end_pfn = max(max_zone_pfn[zone], start_pfn);
8284 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
8285 		arch_zone_highest_possible_pfn[zone] = end_pfn;
8286 
8287 		start_pfn = end_pfn;
8288 	}
8289 
8290 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
8291 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
8292 	find_zone_movable_pfns_for_nodes();
8293 
8294 	/* Print out the zone ranges */
8295 	pr_info("Zone ranges:\n");
8296 	for (i = 0; i < MAX_NR_ZONES; i++) {
8297 		if (i == ZONE_MOVABLE)
8298 			continue;
8299 		pr_info("  %-8s ", zone_names[i]);
8300 		if (arch_zone_lowest_possible_pfn[i] ==
8301 				arch_zone_highest_possible_pfn[i])
8302 			pr_cont("empty\n");
8303 		else
8304 			pr_cont("[mem %#018Lx-%#018Lx]\n",
8305 				(u64)arch_zone_lowest_possible_pfn[i]
8306 					<< PAGE_SHIFT,
8307 				((u64)arch_zone_highest_possible_pfn[i]
8308 					<< PAGE_SHIFT) - 1);
8309 	}
8310 
8311 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
8312 	pr_info("Movable zone start for each node\n");
8313 	for (i = 0; i < MAX_NUMNODES; i++) {
8314 		if (zone_movable_pfn[i])
8315 			pr_info("  Node %d: %#018Lx\n", i,
8316 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
8317 	}
8318 
8319 	/*
8320 	 * Print out the early node map, and initialize the
8321 	 * subsection-map relative to active online memory ranges to
8322 	 * enable future "sub-section" extensions of the memory map.
8323 	 */
8324 	pr_info("Early memory node ranges\n");
8325 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8326 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
8327 			(u64)start_pfn << PAGE_SHIFT,
8328 			((u64)end_pfn << PAGE_SHIFT) - 1);
8329 		subsection_map_init(start_pfn, end_pfn - start_pfn);
8330 	}
8331 
8332 	/* Initialise every node */
8333 	mminit_verify_pageflags_layout();
8334 	setup_nr_node_ids();
8335 	for_each_node(nid) {
8336 		pg_data_t *pgdat;
8337 
8338 		if (!node_online(nid)) {
8339 			pr_info("Initializing node %d as memoryless\n", nid);
8340 
8341 			/* Allocator not initialized yet */
8342 			pgdat = arch_alloc_nodedata(nid);
8343 			if (!pgdat)
8344 				panic("Cannot allocate %zuB for node %d.\n",
8345 				       sizeof(*pgdat), nid);
8346 			arch_refresh_nodedata(nid, pgdat);
8347 			free_area_init_memoryless_node(nid);
8348 
8349 			/*
8350 			 * We do not want to confuse userspace by sysfs
8351 			 * files/directories for node without any memory
8352 			 * attached to it, so this node is not marked as
8353 			 * N_MEMORY and not marked online so that no sysfs
8354 			 * hierarchy will be created via register_one_node for
8355 			 * it. The pgdat will get fully initialized by
8356 			 * hotadd_init_pgdat() when memory is hotplugged into
8357 			 * this node.
8358 			 */
8359 			continue;
8360 		}
8361 
8362 		pgdat = NODE_DATA(nid);
8363 		free_area_init_node(nid);
8364 
8365 		/* Any memory on that node */
8366 		if (pgdat->node_present_pages)
8367 			node_set_state(nid, N_MEMORY);
8368 		check_for_memory(pgdat, nid);
8369 	}
8370 
8371 	memmap_init();
8372 }
8373 
8374 static int __init cmdline_parse_core(char *p, unsigned long *core,
8375 				     unsigned long *percent)
8376 {
8377 	unsigned long long coremem;
8378 	char *endptr;
8379 
8380 	if (!p)
8381 		return -EINVAL;
8382 
8383 	/* Value may be a percentage of total memory, otherwise bytes */
8384 	coremem = simple_strtoull(p, &endptr, 0);
8385 	if (*endptr == '%') {
8386 		/* Paranoid check for percent values greater than 100 */
8387 		WARN_ON(coremem > 100);
8388 
8389 		*percent = coremem;
8390 	} else {
8391 		coremem = memparse(p, &p);
8392 		/* Paranoid check that UL is enough for the coremem value */
8393 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
8394 
8395 		*core = coremem >> PAGE_SHIFT;
8396 		*percent = 0UL;
8397 	}
8398 	return 0;
8399 }
8400 
8401 /*
8402  * kernelcore=size sets the amount of memory for use for allocations that
8403  * cannot be reclaimed or migrated.
8404  */
8405 static int __init cmdline_parse_kernelcore(char *p)
8406 {
8407 	/* parse kernelcore=mirror */
8408 	if (parse_option_str(p, "mirror")) {
8409 		mirrored_kernelcore = true;
8410 		return 0;
8411 	}
8412 
8413 	return cmdline_parse_core(p, &required_kernelcore,
8414 				  &required_kernelcore_percent);
8415 }
8416 
8417 /*
8418  * movablecore=size sets the amount of memory for use for allocations that
8419  * can be reclaimed or migrated.
8420  */
8421 static int __init cmdline_parse_movablecore(char *p)
8422 {
8423 	return cmdline_parse_core(p, &required_movablecore,
8424 				  &required_movablecore_percent);
8425 }
8426 
8427 early_param("kernelcore", cmdline_parse_kernelcore);
8428 early_param("movablecore", cmdline_parse_movablecore);
8429 
8430 void adjust_managed_page_count(struct page *page, long count)
8431 {
8432 	atomic_long_add(count, &page_zone(page)->managed_pages);
8433 	totalram_pages_add(count);
8434 #ifdef CONFIG_HIGHMEM
8435 	if (PageHighMem(page))
8436 		totalhigh_pages_add(count);
8437 #endif
8438 }
8439 EXPORT_SYMBOL(adjust_managed_page_count);
8440 
8441 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
8442 {
8443 	void *pos;
8444 	unsigned long pages = 0;
8445 
8446 	start = (void *)PAGE_ALIGN((unsigned long)start);
8447 	end = (void *)((unsigned long)end & PAGE_MASK);
8448 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
8449 		struct page *page = virt_to_page(pos);
8450 		void *direct_map_addr;
8451 
8452 		/*
8453 		 * 'direct_map_addr' might be different from 'pos'
8454 		 * because some architectures' virt_to_page()
8455 		 * work with aliases.  Getting the direct map
8456 		 * address ensures that we get a _writeable_
8457 		 * alias for the memset().
8458 		 */
8459 		direct_map_addr = page_address(page);
8460 		/*
8461 		 * Perform a kasan-unchecked memset() since this memory
8462 		 * has not been initialized.
8463 		 */
8464 		direct_map_addr = kasan_reset_tag(direct_map_addr);
8465 		if ((unsigned int)poison <= 0xFF)
8466 			memset(direct_map_addr, poison, PAGE_SIZE);
8467 
8468 		free_reserved_page(page);
8469 	}
8470 
8471 	if (pages && s)
8472 		pr_info("Freeing %s memory: %ldK\n", s, K(pages));
8473 
8474 	return pages;
8475 }
8476 
8477 void __init mem_init_print_info(void)
8478 {
8479 	unsigned long physpages, codesize, datasize, rosize, bss_size;
8480 	unsigned long init_code_size, init_data_size;
8481 
8482 	physpages = get_num_physpages();
8483 	codesize = _etext - _stext;
8484 	datasize = _edata - _sdata;
8485 	rosize = __end_rodata - __start_rodata;
8486 	bss_size = __bss_stop - __bss_start;
8487 	init_data_size = __init_end - __init_begin;
8488 	init_code_size = _einittext - _sinittext;
8489 
8490 	/*
8491 	 * Detect special cases and adjust section sizes accordingly:
8492 	 * 1) .init.* may be embedded into .data sections
8493 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
8494 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
8495 	 * 3) .rodata.* may be embedded into .text or .data sections.
8496 	 */
8497 #define adj_init_size(start, end, size, pos, adj) \
8498 	do { \
8499 		if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
8500 			size -= adj; \
8501 	} while (0)
8502 
8503 	adj_init_size(__init_begin, __init_end, init_data_size,
8504 		     _sinittext, init_code_size);
8505 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
8506 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
8507 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
8508 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
8509 
8510 #undef	adj_init_size
8511 
8512 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
8513 #ifdef	CONFIG_HIGHMEM
8514 		", %luK highmem"
8515 #endif
8516 		")\n",
8517 		K(nr_free_pages()), K(physpages),
8518 		codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
8519 		(init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
8520 		K(physpages - totalram_pages() - totalcma_pages),
8521 		K(totalcma_pages)
8522 #ifdef	CONFIG_HIGHMEM
8523 		, K(totalhigh_pages())
8524 #endif
8525 		);
8526 }
8527 
8528 /**
8529  * set_dma_reserve - set the specified number of pages reserved in the first zone
8530  * @new_dma_reserve: The number of pages to mark reserved
8531  *
8532  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
8533  * In the DMA zone, a significant percentage may be consumed by kernel image
8534  * and other unfreeable allocations which can skew the watermarks badly. This
8535  * function may optionally be used to account for unfreeable pages in the
8536  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8537  * smaller per-cpu batchsize.
8538  */
8539 void __init set_dma_reserve(unsigned long new_dma_reserve)
8540 {
8541 	dma_reserve = new_dma_reserve;
8542 }
8543 
8544 static int page_alloc_cpu_dead(unsigned int cpu)
8545 {
8546 	struct zone *zone;
8547 
8548 	lru_add_drain_cpu(cpu);
8549 	mlock_drain_remote(cpu);
8550 	drain_pages(cpu);
8551 
8552 	/*
8553 	 * Spill the event counters of the dead processor
8554 	 * into the current processors event counters.
8555 	 * This artificially elevates the count of the current
8556 	 * processor.
8557 	 */
8558 	vm_events_fold_cpu(cpu);
8559 
8560 	/*
8561 	 * Zero the differential counters of the dead processor
8562 	 * so that the vm statistics are consistent.
8563 	 *
8564 	 * This is only okay since the processor is dead and cannot
8565 	 * race with what we are doing.
8566 	 */
8567 	cpu_vm_stats_fold(cpu);
8568 
8569 	for_each_populated_zone(zone)
8570 		zone_pcp_update(zone, 0);
8571 
8572 	return 0;
8573 }
8574 
8575 static int page_alloc_cpu_online(unsigned int cpu)
8576 {
8577 	struct zone *zone;
8578 
8579 	for_each_populated_zone(zone)
8580 		zone_pcp_update(zone, 1);
8581 	return 0;
8582 }
8583 
8584 #ifdef CONFIG_NUMA
8585 int hashdist = HASHDIST_DEFAULT;
8586 
8587 static int __init set_hashdist(char *str)
8588 {
8589 	if (!str)
8590 		return 0;
8591 	hashdist = simple_strtoul(str, &str, 0);
8592 	return 1;
8593 }
8594 __setup("hashdist=", set_hashdist);
8595 #endif
8596 
8597 void __init page_alloc_init(void)
8598 {
8599 	int ret;
8600 
8601 #ifdef CONFIG_NUMA
8602 	if (num_node_state(N_MEMORY) == 1)
8603 		hashdist = 0;
8604 #endif
8605 
8606 	ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
8607 					"mm/page_alloc:pcp",
8608 					page_alloc_cpu_online,
8609 					page_alloc_cpu_dead);
8610 	WARN_ON(ret < 0);
8611 }
8612 
8613 /*
8614  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
8615  *	or min_free_kbytes changes.
8616  */
8617 static void calculate_totalreserve_pages(void)
8618 {
8619 	struct pglist_data *pgdat;
8620 	unsigned long reserve_pages = 0;
8621 	enum zone_type i, j;
8622 
8623 	for_each_online_pgdat(pgdat) {
8624 
8625 		pgdat->totalreserve_pages = 0;
8626 
8627 		for (i = 0; i < MAX_NR_ZONES; i++) {
8628 			struct zone *zone = pgdat->node_zones + i;
8629 			long max = 0;
8630 			unsigned long managed_pages = zone_managed_pages(zone);
8631 
8632 			/* Find valid and maximum lowmem_reserve in the zone */
8633 			for (j = i; j < MAX_NR_ZONES; j++) {
8634 				if (zone->lowmem_reserve[j] > max)
8635 					max = zone->lowmem_reserve[j];
8636 			}
8637 
8638 			/* we treat the high watermark as reserved pages. */
8639 			max += high_wmark_pages(zone);
8640 
8641 			if (max > managed_pages)
8642 				max = managed_pages;
8643 
8644 			pgdat->totalreserve_pages += max;
8645 
8646 			reserve_pages += max;
8647 		}
8648 	}
8649 	totalreserve_pages = reserve_pages;
8650 }
8651 
8652 /*
8653  * setup_per_zone_lowmem_reserve - called whenever
8654  *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
8655  *	has a correct pages reserved value, so an adequate number of
8656  *	pages are left in the zone after a successful __alloc_pages().
8657  */
8658 static void setup_per_zone_lowmem_reserve(void)
8659 {
8660 	struct pglist_data *pgdat;
8661 	enum zone_type i, j;
8662 
8663 	for_each_online_pgdat(pgdat) {
8664 		for (i = 0; i < MAX_NR_ZONES - 1; i++) {
8665 			struct zone *zone = &pgdat->node_zones[i];
8666 			int ratio = sysctl_lowmem_reserve_ratio[i];
8667 			bool clear = !ratio || !zone_managed_pages(zone);
8668 			unsigned long managed_pages = 0;
8669 
8670 			for (j = i + 1; j < MAX_NR_ZONES; j++) {
8671 				struct zone *upper_zone = &pgdat->node_zones[j];
8672 
8673 				managed_pages += zone_managed_pages(upper_zone);
8674 
8675 				if (clear)
8676 					zone->lowmem_reserve[j] = 0;
8677 				else
8678 					zone->lowmem_reserve[j] = managed_pages / ratio;
8679 			}
8680 		}
8681 	}
8682 
8683 	/* update totalreserve_pages */
8684 	calculate_totalreserve_pages();
8685 }
8686 
8687 static void __setup_per_zone_wmarks(void)
8688 {
8689 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
8690 	unsigned long lowmem_pages = 0;
8691 	struct zone *zone;
8692 	unsigned long flags;
8693 
8694 	/* Calculate total number of !ZONE_HIGHMEM pages */
8695 	for_each_zone(zone) {
8696 		if (!is_highmem(zone))
8697 			lowmem_pages += zone_managed_pages(zone);
8698 	}
8699 
8700 	for_each_zone(zone) {
8701 		u64 tmp;
8702 
8703 		spin_lock_irqsave(&zone->lock, flags);
8704 		tmp = (u64)pages_min * zone_managed_pages(zone);
8705 		do_div(tmp, lowmem_pages);
8706 		if (is_highmem(zone)) {
8707 			/*
8708 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
8709 			 * need highmem pages, so cap pages_min to a small
8710 			 * value here.
8711 			 *
8712 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8713 			 * deltas control async page reclaim, and so should
8714 			 * not be capped for highmem.
8715 			 */
8716 			unsigned long min_pages;
8717 
8718 			min_pages = zone_managed_pages(zone) / 1024;
8719 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
8720 			zone->_watermark[WMARK_MIN] = min_pages;
8721 		} else {
8722 			/*
8723 			 * If it's a lowmem zone, reserve a number of pages
8724 			 * proportionate to the zone's size.
8725 			 */
8726 			zone->_watermark[WMARK_MIN] = tmp;
8727 		}
8728 
8729 		/*
8730 		 * Set the kswapd watermarks distance according to the
8731 		 * scale factor in proportion to available memory, but
8732 		 * ensure a minimum size on small systems.
8733 		 */
8734 		tmp = max_t(u64, tmp >> 2,
8735 			    mult_frac(zone_managed_pages(zone),
8736 				      watermark_scale_factor, 10000));
8737 
8738 		zone->watermark_boost = 0;
8739 		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
8740 		zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
8741 		zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
8742 
8743 		spin_unlock_irqrestore(&zone->lock, flags);
8744 	}
8745 
8746 	/* update totalreserve_pages */
8747 	calculate_totalreserve_pages();
8748 }
8749 
8750 /**
8751  * setup_per_zone_wmarks - called when min_free_kbytes changes
8752  * or when memory is hot-{added|removed}
8753  *
8754  * Ensures that the watermark[min,low,high] values for each zone are set
8755  * correctly with respect to min_free_kbytes.
8756  */
8757 void setup_per_zone_wmarks(void)
8758 {
8759 	struct zone *zone;
8760 	static DEFINE_SPINLOCK(lock);
8761 
8762 	spin_lock(&lock);
8763 	__setup_per_zone_wmarks();
8764 	spin_unlock(&lock);
8765 
8766 	/*
8767 	 * The watermark size have changed so update the pcpu batch
8768 	 * and high limits or the limits may be inappropriate.
8769 	 */
8770 	for_each_zone(zone)
8771 		zone_pcp_update(zone, 0);
8772 }
8773 
8774 /*
8775  * Initialise min_free_kbytes.
8776  *
8777  * For small machines we want it small (128k min).  For large machines
8778  * we want it large (256MB max).  But it is not linear, because network
8779  * bandwidth does not increase linearly with machine size.  We use
8780  *
8781  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
8782  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
8783  *
8784  * which yields
8785  *
8786  * 16MB:	512k
8787  * 32MB:	724k
8788  * 64MB:	1024k
8789  * 128MB:	1448k
8790  * 256MB:	2048k
8791  * 512MB:	2896k
8792  * 1024MB:	4096k
8793  * 2048MB:	5792k
8794  * 4096MB:	8192k
8795  * 8192MB:	11584k
8796  * 16384MB:	16384k
8797  */
8798 void calculate_min_free_kbytes(void)
8799 {
8800 	unsigned long lowmem_kbytes;
8801 	int new_min_free_kbytes;
8802 
8803 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
8804 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
8805 
8806 	if (new_min_free_kbytes > user_min_free_kbytes)
8807 		min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
8808 	else
8809 		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8810 				new_min_free_kbytes, user_min_free_kbytes);
8811 
8812 }
8813 
8814 int __meminit init_per_zone_wmark_min(void)
8815 {
8816 	calculate_min_free_kbytes();
8817 	setup_per_zone_wmarks();
8818 	refresh_zone_stat_thresholds();
8819 	setup_per_zone_lowmem_reserve();
8820 
8821 #ifdef CONFIG_NUMA
8822 	setup_min_unmapped_ratio();
8823 	setup_min_slab_ratio();
8824 #endif
8825 
8826 	khugepaged_min_free_kbytes_update();
8827 
8828 	return 0;
8829 }
8830 postcore_initcall(init_per_zone_wmark_min)
8831 
8832 /*
8833  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8834  *	that we can call two helper functions whenever min_free_kbytes
8835  *	changes.
8836  */
8837 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8838 		void *buffer, size_t *length, loff_t *ppos)
8839 {
8840 	int rc;
8841 
8842 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8843 	if (rc)
8844 		return rc;
8845 
8846 	if (write) {
8847 		user_min_free_kbytes = min_free_kbytes;
8848 		setup_per_zone_wmarks();
8849 	}
8850 	return 0;
8851 }
8852 
8853 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8854 		void *buffer, size_t *length, loff_t *ppos)
8855 {
8856 	int rc;
8857 
8858 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8859 	if (rc)
8860 		return rc;
8861 
8862 	if (write)
8863 		setup_per_zone_wmarks();
8864 
8865 	return 0;
8866 }
8867 
8868 #ifdef CONFIG_NUMA
8869 static void setup_min_unmapped_ratio(void)
8870 {
8871 	pg_data_t *pgdat;
8872 	struct zone *zone;
8873 
8874 	for_each_online_pgdat(pgdat)
8875 		pgdat->min_unmapped_pages = 0;
8876 
8877 	for_each_zone(zone)
8878 		zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8879 						         sysctl_min_unmapped_ratio) / 100;
8880 }
8881 
8882 
8883 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8884 		void *buffer, size_t *length, loff_t *ppos)
8885 {
8886 	int rc;
8887 
8888 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8889 	if (rc)
8890 		return rc;
8891 
8892 	setup_min_unmapped_ratio();
8893 
8894 	return 0;
8895 }
8896 
8897 static void setup_min_slab_ratio(void)
8898 {
8899 	pg_data_t *pgdat;
8900 	struct zone *zone;
8901 
8902 	for_each_online_pgdat(pgdat)
8903 		pgdat->min_slab_pages = 0;
8904 
8905 	for_each_zone(zone)
8906 		zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8907 						     sysctl_min_slab_ratio) / 100;
8908 }
8909 
8910 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8911 		void *buffer, size_t *length, loff_t *ppos)
8912 {
8913 	int rc;
8914 
8915 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8916 	if (rc)
8917 		return rc;
8918 
8919 	setup_min_slab_ratio();
8920 
8921 	return 0;
8922 }
8923 #endif
8924 
8925 /*
8926  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8927  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8928  *	whenever sysctl_lowmem_reserve_ratio changes.
8929  *
8930  * The reserve ratio obviously has absolutely no relation with the
8931  * minimum watermarks. The lowmem reserve ratio can only make sense
8932  * if in function of the boot time zone sizes.
8933  */
8934 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8935 		void *buffer, size_t *length, loff_t *ppos)
8936 {
8937 	int i;
8938 
8939 	proc_dointvec_minmax(table, write, buffer, length, ppos);
8940 
8941 	for (i = 0; i < MAX_NR_ZONES; i++) {
8942 		if (sysctl_lowmem_reserve_ratio[i] < 1)
8943 			sysctl_lowmem_reserve_ratio[i] = 0;
8944 	}
8945 
8946 	setup_per_zone_lowmem_reserve();
8947 	return 0;
8948 }
8949 
8950 /*
8951  * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
8952  * cpu. It is the fraction of total pages in each zone that a hot per cpu
8953  * pagelist can have before it gets flushed back to buddy allocator.
8954  */
8955 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
8956 		int write, void *buffer, size_t *length, loff_t *ppos)
8957 {
8958 	struct zone *zone;
8959 	int old_percpu_pagelist_high_fraction;
8960 	int ret;
8961 
8962 	mutex_lock(&pcp_batch_high_lock);
8963 	old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
8964 
8965 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8966 	if (!write || ret < 0)
8967 		goto out;
8968 
8969 	/* Sanity checking to avoid pcp imbalance */
8970 	if (percpu_pagelist_high_fraction &&
8971 	    percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
8972 		percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
8973 		ret = -EINVAL;
8974 		goto out;
8975 	}
8976 
8977 	/* No change? */
8978 	if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
8979 		goto out;
8980 
8981 	for_each_populated_zone(zone)
8982 		zone_set_pageset_high_and_batch(zone, 0);
8983 out:
8984 	mutex_unlock(&pcp_batch_high_lock);
8985 	return ret;
8986 }
8987 
8988 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8989 /*
8990  * Returns the number of pages that arch has reserved but
8991  * is not known to alloc_large_system_hash().
8992  */
8993 static unsigned long __init arch_reserved_kernel_pages(void)
8994 {
8995 	return 0;
8996 }
8997 #endif
8998 
8999 /*
9000  * Adaptive scale is meant to reduce sizes of hash tables on large memory
9001  * machines. As memory size is increased the scale is also increased but at
9002  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
9003  * quadruples the scale is increased by one, which means the size of hash table
9004  * only doubles, instead of quadrupling as well.
9005  * Because 32-bit systems cannot have large physical memory, where this scaling
9006  * makes sense, it is disabled on such platforms.
9007  */
9008 #if __BITS_PER_LONG > 32
9009 #define ADAPT_SCALE_BASE	(64ul << 30)
9010 #define ADAPT_SCALE_SHIFT	2
9011 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
9012 #endif
9013 
9014 /*
9015  * allocate a large system hash table from bootmem
9016  * - it is assumed that the hash table must contain an exact power-of-2
9017  *   quantity of entries
9018  * - limit is the number of hash buckets, not the total allocation size
9019  */
9020 void *__init alloc_large_system_hash(const char *tablename,
9021 				     unsigned long bucketsize,
9022 				     unsigned long numentries,
9023 				     int scale,
9024 				     int flags,
9025 				     unsigned int *_hash_shift,
9026 				     unsigned int *_hash_mask,
9027 				     unsigned long low_limit,
9028 				     unsigned long high_limit)
9029 {
9030 	unsigned long long max = high_limit;
9031 	unsigned long log2qty, size;
9032 	void *table;
9033 	gfp_t gfp_flags;
9034 	bool virt;
9035 	bool huge;
9036 
9037 	/* allow the kernel cmdline to have a say */
9038 	if (!numentries) {
9039 		/* round applicable memory size up to nearest megabyte */
9040 		numentries = nr_kernel_pages;
9041 		numentries -= arch_reserved_kernel_pages();
9042 
9043 		/* It isn't necessary when PAGE_SIZE >= 1MB */
9044 		if (PAGE_SIZE < SZ_1M)
9045 			numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
9046 
9047 #if __BITS_PER_LONG > 32
9048 		if (!high_limit) {
9049 			unsigned long adapt;
9050 
9051 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
9052 			     adapt <<= ADAPT_SCALE_SHIFT)
9053 				scale++;
9054 		}
9055 #endif
9056 
9057 		/* limit to 1 bucket per 2^scale bytes of low memory */
9058 		if (scale > PAGE_SHIFT)
9059 			numentries >>= (scale - PAGE_SHIFT);
9060 		else
9061 			numentries <<= (PAGE_SHIFT - scale);
9062 
9063 		/* Make sure we've got at least a 0-order allocation.. */
9064 		if (unlikely(flags & HASH_SMALL)) {
9065 			/* Makes no sense without HASH_EARLY */
9066 			WARN_ON(!(flags & HASH_EARLY));
9067 			if (!(numentries >> *_hash_shift)) {
9068 				numentries = 1UL << *_hash_shift;
9069 				BUG_ON(!numentries);
9070 			}
9071 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9072 			numentries = PAGE_SIZE / bucketsize;
9073 	}
9074 	numentries = roundup_pow_of_two(numentries);
9075 
9076 	/* limit allocation size to 1/16 total memory by default */
9077 	if (max == 0) {
9078 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
9079 		do_div(max, bucketsize);
9080 	}
9081 	max = min(max, 0x80000000ULL);
9082 
9083 	if (numentries < low_limit)
9084 		numentries = low_limit;
9085 	if (numentries > max)
9086 		numentries = max;
9087 
9088 	log2qty = ilog2(numentries);
9089 
9090 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
9091 	do {
9092 		virt = false;
9093 		size = bucketsize << log2qty;
9094 		if (flags & HASH_EARLY) {
9095 			if (flags & HASH_ZERO)
9096 				table = memblock_alloc(size, SMP_CACHE_BYTES);
9097 			else
9098 				table = memblock_alloc_raw(size,
9099 							   SMP_CACHE_BYTES);
9100 		} else if (get_order(size) >= MAX_ORDER || hashdist) {
9101 			table = vmalloc_huge(size, gfp_flags);
9102 			virt = true;
9103 			if (table)
9104 				huge = is_vm_area_hugepages(table);
9105 		} else {
9106 			/*
9107 			 * If bucketsize is not a power-of-two, we may free
9108 			 * some pages at the end of hash table which
9109 			 * alloc_pages_exact() automatically does
9110 			 */
9111 			table = alloc_pages_exact(size, gfp_flags);
9112 			kmemleak_alloc(table, size, 1, gfp_flags);
9113 		}
9114 	} while (!table && size > PAGE_SIZE && --log2qty);
9115 
9116 	if (!table)
9117 		panic("Failed to allocate %s hash table\n", tablename);
9118 
9119 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
9120 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
9121 		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
9122 
9123 	if (_hash_shift)
9124 		*_hash_shift = log2qty;
9125 	if (_hash_mask)
9126 		*_hash_mask = (1 << log2qty) - 1;
9127 
9128 	return table;
9129 }
9130 
9131 #ifdef CONFIG_CONTIG_ALLOC
9132 #if defined(CONFIG_DYNAMIC_DEBUG) || \
9133 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
9134 /* Usage: See admin-guide/dynamic-debug-howto.rst */
9135 static void alloc_contig_dump_pages(struct list_head *page_list)
9136 {
9137 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
9138 
9139 	if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
9140 		struct page *page;
9141 
9142 		dump_stack();
9143 		list_for_each_entry(page, page_list, lru)
9144 			dump_page(page, "migration failure");
9145 	}
9146 }
9147 #else
9148 static inline void alloc_contig_dump_pages(struct list_head *page_list)
9149 {
9150 }
9151 #endif
9152 
9153 /* [start, end) must belong to a single zone. */
9154 int __alloc_contig_migrate_range(struct compact_control *cc,
9155 					unsigned long start, unsigned long end)
9156 {
9157 	/* This function is based on compact_zone() from compaction.c. */
9158 	unsigned int nr_reclaimed;
9159 	unsigned long pfn = start;
9160 	unsigned int tries = 0;
9161 	int ret = 0;
9162 	struct migration_target_control mtc = {
9163 		.nid = zone_to_nid(cc->zone),
9164 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
9165 	};
9166 
9167 	lru_cache_disable();
9168 
9169 	while (pfn < end || !list_empty(&cc->migratepages)) {
9170 		if (fatal_signal_pending(current)) {
9171 			ret = -EINTR;
9172 			break;
9173 		}
9174 
9175 		if (list_empty(&cc->migratepages)) {
9176 			cc->nr_migratepages = 0;
9177 			ret = isolate_migratepages_range(cc, pfn, end);
9178 			if (ret && ret != -EAGAIN)
9179 				break;
9180 			pfn = cc->migrate_pfn;
9181 			tries = 0;
9182 		} else if (++tries == 5) {
9183 			ret = -EBUSY;
9184 			break;
9185 		}
9186 
9187 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
9188 							&cc->migratepages);
9189 		cc->nr_migratepages -= nr_reclaimed;
9190 
9191 		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
9192 			NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
9193 
9194 		/*
9195 		 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
9196 		 * to retry again over this error, so do the same here.
9197 		 */
9198 		if (ret == -ENOMEM)
9199 			break;
9200 	}
9201 
9202 	lru_cache_enable();
9203 	if (ret < 0) {
9204 		if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
9205 			alloc_contig_dump_pages(&cc->migratepages);
9206 		putback_movable_pages(&cc->migratepages);
9207 		return ret;
9208 	}
9209 	return 0;
9210 }
9211 
9212 /**
9213  * alloc_contig_range() -- tries to allocate given range of pages
9214  * @start:	start PFN to allocate
9215  * @end:	one-past-the-last PFN to allocate
9216  * @migratetype:	migratetype of the underlying pageblocks (either
9217  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
9218  *			in range must have the same migratetype and it must
9219  *			be either of the two.
9220  * @gfp_mask:	GFP mask to use during compaction
9221  *
9222  * The PFN range does not have to be pageblock aligned. The PFN range must
9223  * belong to a single zone.
9224  *
9225  * The first thing this routine does is attempt to MIGRATE_ISOLATE all
9226  * pageblocks in the range.  Once isolated, the pageblocks should not
9227  * be modified by others.
9228  *
9229  * Return: zero on success or negative error code.  On success all
9230  * pages which PFN is in [start, end) are allocated for the caller and
9231  * need to be freed with free_contig_range().
9232  */
9233 int alloc_contig_range(unsigned long start, unsigned long end,
9234 		       unsigned migratetype, gfp_t gfp_mask)
9235 {
9236 	unsigned long outer_start, outer_end;
9237 	int order;
9238 	int ret = 0;
9239 
9240 	struct compact_control cc = {
9241 		.nr_migratepages = 0,
9242 		.order = -1,
9243 		.zone = page_zone(pfn_to_page(start)),
9244 		.mode = MIGRATE_SYNC,
9245 		.ignore_skip_hint = true,
9246 		.no_set_skip_hint = true,
9247 		.gfp_mask = current_gfp_context(gfp_mask),
9248 		.alloc_contig = true,
9249 	};
9250 	INIT_LIST_HEAD(&cc.migratepages);
9251 
9252 	/*
9253 	 * What we do here is we mark all pageblocks in range as
9254 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
9255 	 * have different sizes, and due to the way page allocator
9256 	 * work, start_isolate_page_range() has special handlings for this.
9257 	 *
9258 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
9259 	 * migrate the pages from an unaligned range (ie. pages that
9260 	 * we are interested in). This will put all the pages in
9261 	 * range back to page allocator as MIGRATE_ISOLATE.
9262 	 *
9263 	 * When this is done, we take the pages in range from page
9264 	 * allocator removing them from the buddy system.  This way
9265 	 * page allocator will never consider using them.
9266 	 *
9267 	 * This lets us mark the pageblocks back as
9268 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
9269 	 * aligned range but not in the unaligned, original range are
9270 	 * put back to page allocator so that buddy can use them.
9271 	 */
9272 
9273 	ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
9274 	if (ret)
9275 		goto done;
9276 
9277 	drain_all_pages(cc.zone);
9278 
9279 	/*
9280 	 * In case of -EBUSY, we'd like to know which page causes problem.
9281 	 * So, just fall through. test_pages_isolated() has a tracepoint
9282 	 * which will report the busy page.
9283 	 *
9284 	 * It is possible that busy pages could become available before
9285 	 * the call to test_pages_isolated, and the range will actually be
9286 	 * allocated.  So, if we fall through be sure to clear ret so that
9287 	 * -EBUSY is not accidentally used or returned to caller.
9288 	 */
9289 	ret = __alloc_contig_migrate_range(&cc, start, end);
9290 	if (ret && ret != -EBUSY)
9291 		goto done;
9292 	ret = 0;
9293 
9294 	/*
9295 	 * Pages from [start, end) are within a pageblock_nr_pages
9296 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
9297 	 * more, all pages in [start, end) are free in page allocator.
9298 	 * What we are going to do is to allocate all pages from
9299 	 * [start, end) (that is remove them from page allocator).
9300 	 *
9301 	 * The only problem is that pages at the beginning and at the
9302 	 * end of interesting range may be not aligned with pages that
9303 	 * page allocator holds, ie. they can be part of higher order
9304 	 * pages.  Because of this, we reserve the bigger range and
9305 	 * once this is done free the pages we are not interested in.
9306 	 *
9307 	 * We don't have to hold zone->lock here because the pages are
9308 	 * isolated thus they won't get removed from buddy.
9309 	 */
9310 
9311 	order = 0;
9312 	outer_start = start;
9313 	while (!PageBuddy(pfn_to_page(outer_start))) {
9314 		if (++order >= MAX_ORDER) {
9315 			outer_start = start;
9316 			break;
9317 		}
9318 		outer_start &= ~0UL << order;
9319 	}
9320 
9321 	if (outer_start != start) {
9322 		order = buddy_order(pfn_to_page(outer_start));
9323 
9324 		/*
9325 		 * outer_start page could be small order buddy page and
9326 		 * it doesn't include start page. Adjust outer_start
9327 		 * in this case to report failed page properly
9328 		 * on tracepoint in test_pages_isolated()
9329 		 */
9330 		if (outer_start + (1UL << order) <= start)
9331 			outer_start = start;
9332 	}
9333 
9334 	/* Make sure the range is really isolated. */
9335 	if (test_pages_isolated(outer_start, end, 0)) {
9336 		ret = -EBUSY;
9337 		goto done;
9338 	}
9339 
9340 	/* Grab isolated pages from freelists. */
9341 	outer_end = isolate_freepages_range(&cc, outer_start, end);
9342 	if (!outer_end) {
9343 		ret = -EBUSY;
9344 		goto done;
9345 	}
9346 
9347 	/* Free head and tail (if any) */
9348 	if (start != outer_start)
9349 		free_contig_range(outer_start, start - outer_start);
9350 	if (end != outer_end)
9351 		free_contig_range(end, outer_end - end);
9352 
9353 done:
9354 	undo_isolate_page_range(start, end, migratetype);
9355 	return ret;
9356 }
9357 EXPORT_SYMBOL(alloc_contig_range);
9358 
9359 static int __alloc_contig_pages(unsigned long start_pfn,
9360 				unsigned long nr_pages, gfp_t gfp_mask)
9361 {
9362 	unsigned long end_pfn = start_pfn + nr_pages;
9363 
9364 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
9365 				  gfp_mask);
9366 }
9367 
9368 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
9369 				   unsigned long nr_pages)
9370 {
9371 	unsigned long i, end_pfn = start_pfn + nr_pages;
9372 	struct page *page;
9373 
9374 	for (i = start_pfn; i < end_pfn; i++) {
9375 		page = pfn_to_online_page(i);
9376 		if (!page)
9377 			return false;
9378 
9379 		if (page_zone(page) != z)
9380 			return false;
9381 
9382 		if (PageReserved(page))
9383 			return false;
9384 	}
9385 	return true;
9386 }
9387 
9388 static bool zone_spans_last_pfn(const struct zone *zone,
9389 				unsigned long start_pfn, unsigned long nr_pages)
9390 {
9391 	unsigned long last_pfn = start_pfn + nr_pages - 1;
9392 
9393 	return zone_spans_pfn(zone, last_pfn);
9394 }
9395 
9396 /**
9397  * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
9398  * @nr_pages:	Number of contiguous pages to allocate
9399  * @gfp_mask:	GFP mask to limit search and used during compaction
9400  * @nid:	Target node
9401  * @nodemask:	Mask for other possible nodes
9402  *
9403  * This routine is a wrapper around alloc_contig_range(). It scans over zones
9404  * on an applicable zonelist to find a contiguous pfn range which can then be
9405  * tried for allocation with alloc_contig_range(). This routine is intended
9406  * for allocation requests which can not be fulfilled with the buddy allocator.
9407  *
9408  * The allocated memory is always aligned to a page boundary. If nr_pages is a
9409  * power of two, then allocated range is also guaranteed to be aligned to same
9410  * nr_pages (e.g. 1GB request would be aligned to 1GB).
9411  *
9412  * Allocated pages can be freed with free_contig_range() or by manually calling
9413  * __free_page() on each allocated page.
9414  *
9415  * Return: pointer to contiguous pages on success, or NULL if not successful.
9416  */
9417 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
9418 				int nid, nodemask_t *nodemask)
9419 {
9420 	unsigned long ret, pfn, flags;
9421 	struct zonelist *zonelist;
9422 	struct zone *zone;
9423 	struct zoneref *z;
9424 
9425 	zonelist = node_zonelist(nid, gfp_mask);
9426 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
9427 					gfp_zone(gfp_mask), nodemask) {
9428 		spin_lock_irqsave(&zone->lock, flags);
9429 
9430 		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
9431 		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
9432 			if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
9433 				/*
9434 				 * We release the zone lock here because
9435 				 * alloc_contig_range() will also lock the zone
9436 				 * at some point. If there's an allocation
9437 				 * spinning on this lock, it may win the race
9438 				 * and cause alloc_contig_range() to fail...
9439 				 */
9440 				spin_unlock_irqrestore(&zone->lock, flags);
9441 				ret = __alloc_contig_pages(pfn, nr_pages,
9442 							gfp_mask);
9443 				if (!ret)
9444 					return pfn_to_page(pfn);
9445 				spin_lock_irqsave(&zone->lock, flags);
9446 			}
9447 			pfn += nr_pages;
9448 		}
9449 		spin_unlock_irqrestore(&zone->lock, flags);
9450 	}
9451 	return NULL;
9452 }
9453 #endif /* CONFIG_CONTIG_ALLOC */
9454 
9455 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
9456 {
9457 	unsigned long count = 0;
9458 
9459 	for (; nr_pages--; pfn++) {
9460 		struct page *page = pfn_to_page(pfn);
9461 
9462 		count += page_count(page) != 1;
9463 		__free_page(page);
9464 	}
9465 	WARN(count != 0, "%lu pages are still in use!\n", count);
9466 }
9467 EXPORT_SYMBOL(free_contig_range);
9468 
9469 /*
9470  * Effectively disable pcplists for the zone by setting the high limit to 0
9471  * and draining all cpus. A concurrent page freeing on another CPU that's about
9472  * to put the page on pcplist will either finish before the drain and the page
9473  * will be drained, or observe the new high limit and skip the pcplist.
9474  *
9475  * Must be paired with a call to zone_pcp_enable().
9476  */
9477 void zone_pcp_disable(struct zone *zone)
9478 {
9479 	mutex_lock(&pcp_batch_high_lock);
9480 	__zone_set_pageset_high_and_batch(zone, 0, 1);
9481 	__drain_all_pages(zone, true);
9482 }
9483 
9484 void zone_pcp_enable(struct zone *zone)
9485 {
9486 	__zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
9487 	mutex_unlock(&pcp_batch_high_lock);
9488 }
9489 
9490 void zone_pcp_reset(struct zone *zone)
9491 {
9492 	int cpu;
9493 	struct per_cpu_zonestat *pzstats;
9494 
9495 	if (zone->per_cpu_pageset != &boot_pageset) {
9496 		for_each_online_cpu(cpu) {
9497 			pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
9498 			drain_zonestat(zone, pzstats);
9499 		}
9500 		free_percpu(zone->per_cpu_pageset);
9501 		zone->per_cpu_pageset = &boot_pageset;
9502 		if (zone->per_cpu_zonestats != &boot_zonestats) {
9503 			free_percpu(zone->per_cpu_zonestats);
9504 			zone->per_cpu_zonestats = &boot_zonestats;
9505 		}
9506 	}
9507 }
9508 
9509 #ifdef CONFIG_MEMORY_HOTREMOVE
9510 /*
9511  * All pages in the range must be in a single zone, must not contain holes,
9512  * must span full sections, and must be isolated before calling this function.
9513  */
9514 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
9515 {
9516 	unsigned long pfn = start_pfn;
9517 	struct page *page;
9518 	struct zone *zone;
9519 	unsigned int order;
9520 	unsigned long flags;
9521 
9522 	offline_mem_sections(pfn, end_pfn);
9523 	zone = page_zone(pfn_to_page(pfn));
9524 	spin_lock_irqsave(&zone->lock, flags);
9525 	while (pfn < end_pfn) {
9526 		page = pfn_to_page(pfn);
9527 		/*
9528 		 * The HWPoisoned page may be not in buddy system, and
9529 		 * page_count() is not 0.
9530 		 */
9531 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
9532 			pfn++;
9533 			continue;
9534 		}
9535 		/*
9536 		 * At this point all remaining PageOffline() pages have a
9537 		 * reference count of 0 and can simply be skipped.
9538 		 */
9539 		if (PageOffline(page)) {
9540 			BUG_ON(page_count(page));
9541 			BUG_ON(PageBuddy(page));
9542 			pfn++;
9543 			continue;
9544 		}
9545 
9546 		BUG_ON(page_count(page));
9547 		BUG_ON(!PageBuddy(page));
9548 		order = buddy_order(page);
9549 		del_page_from_free_list(page, zone, order);
9550 		pfn += (1 << order);
9551 	}
9552 	spin_unlock_irqrestore(&zone->lock, flags);
9553 }
9554 #endif
9555 
9556 /*
9557  * This function returns a stable result only if called under zone lock.
9558  */
9559 bool is_free_buddy_page(struct page *page)
9560 {
9561 	unsigned long pfn = page_to_pfn(page);
9562 	unsigned int order;
9563 
9564 	for (order = 0; order < MAX_ORDER; order++) {
9565 		struct page *page_head = page - (pfn & ((1 << order) - 1));
9566 
9567 		if (PageBuddy(page_head) &&
9568 		    buddy_order_unsafe(page_head) >= order)
9569 			break;
9570 	}
9571 
9572 	return order < MAX_ORDER;
9573 }
9574 EXPORT_SYMBOL(is_free_buddy_page);
9575 
9576 #ifdef CONFIG_MEMORY_FAILURE
9577 /*
9578  * Break down a higher-order page in sub-pages, and keep our target out of
9579  * buddy allocator.
9580  */
9581 static void break_down_buddy_pages(struct zone *zone, struct page *page,
9582 				   struct page *target, int low, int high,
9583 				   int migratetype)
9584 {
9585 	unsigned long size = 1 << high;
9586 	struct page *current_buddy, *next_page;
9587 
9588 	while (high > low) {
9589 		high--;
9590 		size >>= 1;
9591 
9592 		if (target >= &page[size]) {
9593 			next_page = page + size;
9594 			current_buddy = page;
9595 		} else {
9596 			next_page = page;
9597 			current_buddy = page + size;
9598 		}
9599 
9600 		if (set_page_guard(zone, current_buddy, high, migratetype))
9601 			continue;
9602 
9603 		if (current_buddy != target) {
9604 			add_to_free_list(current_buddy, zone, high, migratetype);
9605 			set_buddy_order(current_buddy, high);
9606 			page = next_page;
9607 		}
9608 	}
9609 }
9610 
9611 /*
9612  * Take a page that will be marked as poisoned off the buddy allocator.
9613  */
9614 bool take_page_off_buddy(struct page *page)
9615 {
9616 	struct zone *zone = page_zone(page);
9617 	unsigned long pfn = page_to_pfn(page);
9618 	unsigned long flags;
9619 	unsigned int order;
9620 	bool ret = false;
9621 
9622 	spin_lock_irqsave(&zone->lock, flags);
9623 	for (order = 0; order < MAX_ORDER; order++) {
9624 		struct page *page_head = page - (pfn & ((1 << order) - 1));
9625 		int page_order = buddy_order(page_head);
9626 
9627 		if (PageBuddy(page_head) && page_order >= order) {
9628 			unsigned long pfn_head = page_to_pfn(page_head);
9629 			int migratetype = get_pfnblock_migratetype(page_head,
9630 								   pfn_head);
9631 
9632 			del_page_from_free_list(page_head, zone, page_order);
9633 			break_down_buddy_pages(zone, page_head, page, 0,
9634 						page_order, migratetype);
9635 			SetPageHWPoisonTakenOff(page);
9636 			if (!is_migrate_isolate(migratetype))
9637 				__mod_zone_freepage_state(zone, -1, migratetype);
9638 			ret = true;
9639 			break;
9640 		}
9641 		if (page_count(page_head) > 0)
9642 			break;
9643 	}
9644 	spin_unlock_irqrestore(&zone->lock, flags);
9645 	return ret;
9646 }
9647 
9648 /*
9649  * Cancel takeoff done by take_page_off_buddy().
9650  */
9651 bool put_page_back_buddy(struct page *page)
9652 {
9653 	struct zone *zone = page_zone(page);
9654 	unsigned long pfn = page_to_pfn(page);
9655 	unsigned long flags;
9656 	int migratetype = get_pfnblock_migratetype(page, pfn);
9657 	bool ret = false;
9658 
9659 	spin_lock_irqsave(&zone->lock, flags);
9660 	if (put_page_testzero(page)) {
9661 		ClearPageHWPoisonTakenOff(page);
9662 		__free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
9663 		if (TestClearPageHWPoison(page)) {
9664 			ret = true;
9665 		}
9666 	}
9667 	spin_unlock_irqrestore(&zone->lock, flags);
9668 
9669 	return ret;
9670 }
9671 #endif
9672 
9673 #ifdef CONFIG_ZONE_DMA
9674 bool has_managed_dma(void)
9675 {
9676 	struct pglist_data *pgdat;
9677 
9678 	for_each_online_pgdat(pgdat) {
9679 		struct zone *zone = &pgdat->node_zones[ZONE_DMA];
9680 
9681 		if (managed_zone(zone))
9682 			return true;
9683 	}
9684 	return false;
9685 }
9686 #endif /* CONFIG_ZONE_DMA */
9687