xref: /openbmc/linux/mm/page_alloc.c (revision a16be368)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/page_alloc.c
4  *
5  *  Manages the free list, the system allocates free pages here.
6  *  Note that kmalloc() lives in slab.c
7  *
8  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
9  *  Swap reorganised 29.12.95, Stephen Tweedie
10  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16  */
17 
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/swap.h>
22 #include <linux/interrupt.h>
23 #include <linux/pagemap.h>
24 #include <linux/jiffies.h>
25 #include <linux/memblock.h>
26 #include <linux/compiler.h>
27 #include <linux/kernel.h>
28 #include <linux/kasan.h>
29 #include <linux/module.h>
30 #include <linux/suspend.h>
31 #include <linux/pagevec.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/ratelimit.h>
35 #include <linux/oom.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/memremap.h>
46 #include <linux/stop_machine.h>
47 #include <linux/random.h>
48 #include <linux/sort.h>
49 #include <linux/pfn.h>
50 #include <linux/backing-dev.h>
51 #include <linux/fault-inject.h>
52 #include <linux/page-isolation.h>
53 #include <linux/debugobjects.h>
54 #include <linux/kmemleak.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <trace/events/oom.h>
58 #include <linux/prefetch.h>
59 #include <linux/mm_inline.h>
60 #include <linux/migrate.h>
61 #include <linux/hugetlb.h>
62 #include <linux/sched/rt.h>
63 #include <linux/sched/mm.h>
64 #include <linux/page_owner.h>
65 #include <linux/kthread.h>
66 #include <linux/memcontrol.h>
67 #include <linux/ftrace.h>
68 #include <linux/lockdep.h>
69 #include <linux/nmi.h>
70 #include <linux/psi.h>
71 #include <linux/padata.h>
72 
73 #include <asm/sections.h>
74 #include <asm/tlbflush.h>
75 #include <asm/div64.h>
76 #include "internal.h"
77 #include "shuffle.h"
78 #include "page_reporting.h"
79 
80 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
81 static DEFINE_MUTEX(pcp_batch_high_lock);
82 #define MIN_PERCPU_PAGELIST_FRACTION	(8)
83 
84 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
85 DEFINE_PER_CPU(int, numa_node);
86 EXPORT_PER_CPU_SYMBOL(numa_node);
87 #endif
88 
89 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
90 
91 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
92 /*
93  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
94  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
95  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
96  * defined in <linux/topology.h>.
97  */
98 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
99 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
100 #endif
101 
102 /* work_structs for global per-cpu drains */
103 struct pcpu_drain {
104 	struct zone *zone;
105 	struct work_struct work;
106 };
107 static DEFINE_MUTEX(pcpu_drain_mutex);
108 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
109 
110 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
111 volatile unsigned long latent_entropy __latent_entropy;
112 EXPORT_SYMBOL(latent_entropy);
113 #endif
114 
115 /*
116  * Array of node states.
117  */
118 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
119 	[N_POSSIBLE] = NODE_MASK_ALL,
120 	[N_ONLINE] = { { [0] = 1UL } },
121 #ifndef CONFIG_NUMA
122 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
123 #ifdef CONFIG_HIGHMEM
124 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
125 #endif
126 	[N_MEMORY] = { { [0] = 1UL } },
127 	[N_CPU] = { { [0] = 1UL } },
128 #endif	/* NUMA */
129 };
130 EXPORT_SYMBOL(node_states);
131 
132 atomic_long_t _totalram_pages __read_mostly;
133 EXPORT_SYMBOL(_totalram_pages);
134 unsigned long totalreserve_pages __read_mostly;
135 unsigned long totalcma_pages __read_mostly;
136 
137 int percpu_pagelist_fraction;
138 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
139 #ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
140 DEFINE_STATIC_KEY_TRUE(init_on_alloc);
141 #else
142 DEFINE_STATIC_KEY_FALSE(init_on_alloc);
143 #endif
144 EXPORT_SYMBOL(init_on_alloc);
145 
146 #ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
147 DEFINE_STATIC_KEY_TRUE(init_on_free);
148 #else
149 DEFINE_STATIC_KEY_FALSE(init_on_free);
150 #endif
151 EXPORT_SYMBOL(init_on_free);
152 
153 static int __init early_init_on_alloc(char *buf)
154 {
155 	int ret;
156 	bool bool_result;
157 
158 	if (!buf)
159 		return -EINVAL;
160 	ret = kstrtobool(buf, &bool_result);
161 	if (bool_result && page_poisoning_enabled())
162 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_alloc\n");
163 	if (bool_result)
164 		static_branch_enable(&init_on_alloc);
165 	else
166 		static_branch_disable(&init_on_alloc);
167 	return ret;
168 }
169 early_param("init_on_alloc", early_init_on_alloc);
170 
171 static int __init early_init_on_free(char *buf)
172 {
173 	int ret;
174 	bool bool_result;
175 
176 	if (!buf)
177 		return -EINVAL;
178 	ret = kstrtobool(buf, &bool_result);
179 	if (bool_result && page_poisoning_enabled())
180 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_free\n");
181 	if (bool_result)
182 		static_branch_enable(&init_on_free);
183 	else
184 		static_branch_disable(&init_on_free);
185 	return ret;
186 }
187 early_param("init_on_free", early_init_on_free);
188 
189 /*
190  * A cached value of the page's pageblock's migratetype, used when the page is
191  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
192  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
193  * Also the migratetype set in the page does not necessarily match the pcplist
194  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
195  * other index - this ensures that it will be put on the correct CMA freelist.
196  */
197 static inline int get_pcppage_migratetype(struct page *page)
198 {
199 	return page->index;
200 }
201 
202 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
203 {
204 	page->index = migratetype;
205 }
206 
207 #ifdef CONFIG_PM_SLEEP
208 /*
209  * The following functions are used by the suspend/hibernate code to temporarily
210  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
211  * while devices are suspended.  To avoid races with the suspend/hibernate code,
212  * they should always be called with system_transition_mutex held
213  * (gfp_allowed_mask also should only be modified with system_transition_mutex
214  * held, unless the suspend/hibernate code is guaranteed not to run in parallel
215  * with that modification).
216  */
217 
218 static gfp_t saved_gfp_mask;
219 
220 void pm_restore_gfp_mask(void)
221 {
222 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
223 	if (saved_gfp_mask) {
224 		gfp_allowed_mask = saved_gfp_mask;
225 		saved_gfp_mask = 0;
226 	}
227 }
228 
229 void pm_restrict_gfp_mask(void)
230 {
231 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
232 	WARN_ON(saved_gfp_mask);
233 	saved_gfp_mask = gfp_allowed_mask;
234 	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
235 }
236 
237 bool pm_suspended_storage(void)
238 {
239 	if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
240 		return false;
241 	return true;
242 }
243 #endif /* CONFIG_PM_SLEEP */
244 
245 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
246 unsigned int pageblock_order __read_mostly;
247 #endif
248 
249 static void __free_pages_ok(struct page *page, unsigned int order);
250 
251 /*
252  * results with 256, 32 in the lowmem_reserve sysctl:
253  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
254  *	1G machine -> (16M dma, 784M normal, 224M high)
255  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
256  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
257  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
258  *
259  * TBD: should special case ZONE_DMA32 machines here - in those we normally
260  * don't need any ZONE_NORMAL reservation
261  */
262 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
263 #ifdef CONFIG_ZONE_DMA
264 	[ZONE_DMA] = 256,
265 #endif
266 #ifdef CONFIG_ZONE_DMA32
267 	[ZONE_DMA32] = 256,
268 #endif
269 	[ZONE_NORMAL] = 32,
270 #ifdef CONFIG_HIGHMEM
271 	[ZONE_HIGHMEM] = 0,
272 #endif
273 	[ZONE_MOVABLE] = 0,
274 };
275 
276 static char * const zone_names[MAX_NR_ZONES] = {
277 #ifdef CONFIG_ZONE_DMA
278 	 "DMA",
279 #endif
280 #ifdef CONFIG_ZONE_DMA32
281 	 "DMA32",
282 #endif
283 	 "Normal",
284 #ifdef CONFIG_HIGHMEM
285 	 "HighMem",
286 #endif
287 	 "Movable",
288 #ifdef CONFIG_ZONE_DEVICE
289 	 "Device",
290 #endif
291 };
292 
293 const char * const migratetype_names[MIGRATE_TYPES] = {
294 	"Unmovable",
295 	"Movable",
296 	"Reclaimable",
297 	"HighAtomic",
298 #ifdef CONFIG_CMA
299 	"CMA",
300 #endif
301 #ifdef CONFIG_MEMORY_ISOLATION
302 	"Isolate",
303 #endif
304 };
305 
306 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
307 	[NULL_COMPOUND_DTOR] = NULL,
308 	[COMPOUND_PAGE_DTOR] = free_compound_page,
309 #ifdef CONFIG_HUGETLB_PAGE
310 	[HUGETLB_PAGE_DTOR] = free_huge_page,
311 #endif
312 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
313 	[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
314 #endif
315 };
316 
317 int min_free_kbytes = 1024;
318 int user_min_free_kbytes = -1;
319 #ifdef CONFIG_DISCONTIGMEM
320 /*
321  * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
322  * are not on separate NUMA nodes. Functionally this works but with
323  * watermark_boost_factor, it can reclaim prematurely as the ranges can be
324  * quite small. By default, do not boost watermarks on discontigmem as in
325  * many cases very high-order allocations like THP are likely to be
326  * unsupported and the premature reclaim offsets the advantage of long-term
327  * fragmentation avoidance.
328  */
329 int watermark_boost_factor __read_mostly;
330 #else
331 int watermark_boost_factor __read_mostly = 15000;
332 #endif
333 int watermark_scale_factor = 10;
334 
335 static unsigned long nr_kernel_pages __initdata;
336 static unsigned long nr_all_pages __initdata;
337 static unsigned long dma_reserve __initdata;
338 
339 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
340 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
341 static unsigned long required_kernelcore __initdata;
342 static unsigned long required_kernelcore_percent __initdata;
343 static unsigned long required_movablecore __initdata;
344 static unsigned long required_movablecore_percent __initdata;
345 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
346 static bool mirrored_kernelcore __meminitdata;
347 
348 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
349 int movable_zone;
350 EXPORT_SYMBOL(movable_zone);
351 
352 #if MAX_NUMNODES > 1
353 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
354 unsigned int nr_online_nodes __read_mostly = 1;
355 EXPORT_SYMBOL(nr_node_ids);
356 EXPORT_SYMBOL(nr_online_nodes);
357 #endif
358 
359 int page_group_by_mobility_disabled __read_mostly;
360 
361 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
362 /*
363  * During boot we initialize deferred pages on-demand, as needed, but once
364  * page_alloc_init_late() has finished, the deferred pages are all initialized,
365  * and we can permanently disable that path.
366  */
367 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
368 
369 /*
370  * Calling kasan_free_pages() only after deferred memory initialization
371  * has completed. Poisoning pages during deferred memory init will greatly
372  * lengthen the process and cause problem in large memory systems as the
373  * deferred pages initialization is done with interrupt disabled.
374  *
375  * Assuming that there will be no reference to those newly initialized
376  * pages before they are ever allocated, this should have no effect on
377  * KASAN memory tracking as the poison will be properly inserted at page
378  * allocation time. The only corner case is when pages are allocated by
379  * on-demand allocation and then freed again before the deferred pages
380  * initialization is done, but this is not likely to happen.
381  */
382 static inline void kasan_free_nondeferred_pages(struct page *page, int order)
383 {
384 	if (!static_branch_unlikely(&deferred_pages))
385 		kasan_free_pages(page, order);
386 }
387 
388 /* Returns true if the struct page for the pfn is uninitialised */
389 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
390 {
391 	int nid = early_pfn_to_nid(pfn);
392 
393 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
394 		return true;
395 
396 	return false;
397 }
398 
399 /*
400  * Returns true when the remaining initialisation should be deferred until
401  * later in the boot cycle when it can be parallelised.
402  */
403 static bool __meminit
404 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
405 {
406 	static unsigned long prev_end_pfn, nr_initialised;
407 
408 	/*
409 	 * prev_end_pfn static that contains the end of previous zone
410 	 * No need to protect because called very early in boot before smp_init.
411 	 */
412 	if (prev_end_pfn != end_pfn) {
413 		prev_end_pfn = end_pfn;
414 		nr_initialised = 0;
415 	}
416 
417 	/* Always populate low zones for address-constrained allocations */
418 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
419 		return false;
420 
421 	/*
422 	 * We start only with one section of pages, more pages are added as
423 	 * needed until the rest of deferred pages are initialized.
424 	 */
425 	nr_initialised++;
426 	if ((nr_initialised > PAGES_PER_SECTION) &&
427 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
428 		NODE_DATA(nid)->first_deferred_pfn = pfn;
429 		return true;
430 	}
431 	return false;
432 }
433 #else
434 #define kasan_free_nondeferred_pages(p, o)	kasan_free_pages(p, o)
435 
436 static inline bool early_page_uninitialised(unsigned long pfn)
437 {
438 	return false;
439 }
440 
441 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
442 {
443 	return false;
444 }
445 #endif
446 
447 /* Return a pointer to the bitmap storing bits affecting a block of pages */
448 static inline unsigned long *get_pageblock_bitmap(struct page *page,
449 							unsigned long pfn)
450 {
451 #ifdef CONFIG_SPARSEMEM
452 	return section_to_usemap(__pfn_to_section(pfn));
453 #else
454 	return page_zone(page)->pageblock_flags;
455 #endif /* CONFIG_SPARSEMEM */
456 }
457 
458 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
459 {
460 #ifdef CONFIG_SPARSEMEM
461 	pfn &= (PAGES_PER_SECTION-1);
462 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
463 #else
464 	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
465 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
466 #endif /* CONFIG_SPARSEMEM */
467 }
468 
469 /**
470  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
471  * @page: The page within the block of interest
472  * @pfn: The target page frame number
473  * @end_bitidx: The last bit of interest to retrieve
474  * @mask: mask of bits that the caller is interested in
475  *
476  * Return: pageblock_bits flags
477  */
478 static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
479 					unsigned long pfn,
480 					unsigned long end_bitidx,
481 					unsigned long mask)
482 {
483 	unsigned long *bitmap;
484 	unsigned long bitidx, word_bitidx;
485 	unsigned long word;
486 
487 	bitmap = get_pageblock_bitmap(page, pfn);
488 	bitidx = pfn_to_bitidx(page, pfn);
489 	word_bitidx = bitidx / BITS_PER_LONG;
490 	bitidx &= (BITS_PER_LONG-1);
491 
492 	word = bitmap[word_bitidx];
493 	bitidx += end_bitidx;
494 	return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
495 }
496 
497 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
498 					unsigned long end_bitidx,
499 					unsigned long mask)
500 {
501 	return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
502 }
503 
504 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
505 {
506 	return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
507 }
508 
509 /**
510  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
511  * @page: The page within the block of interest
512  * @flags: The flags to set
513  * @pfn: The target page frame number
514  * @end_bitidx: The last bit of interest
515  * @mask: mask of bits that the caller is interested in
516  */
517 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
518 					unsigned long pfn,
519 					unsigned long end_bitidx,
520 					unsigned long mask)
521 {
522 	unsigned long *bitmap;
523 	unsigned long bitidx, word_bitidx;
524 	unsigned long old_word, word;
525 
526 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
527 	BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
528 
529 	bitmap = get_pageblock_bitmap(page, pfn);
530 	bitidx = pfn_to_bitidx(page, pfn);
531 	word_bitidx = bitidx / BITS_PER_LONG;
532 	bitidx &= (BITS_PER_LONG-1);
533 
534 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
535 
536 	bitidx += end_bitidx;
537 	mask <<= (BITS_PER_LONG - bitidx - 1);
538 	flags <<= (BITS_PER_LONG - bitidx - 1);
539 
540 	word = READ_ONCE(bitmap[word_bitidx]);
541 	for (;;) {
542 		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
543 		if (word == old_word)
544 			break;
545 		word = old_word;
546 	}
547 }
548 
549 void set_pageblock_migratetype(struct page *page, int migratetype)
550 {
551 	if (unlikely(page_group_by_mobility_disabled &&
552 		     migratetype < MIGRATE_PCPTYPES))
553 		migratetype = MIGRATE_UNMOVABLE;
554 
555 	set_pageblock_flags_group(page, (unsigned long)migratetype,
556 					PB_migrate, PB_migrate_end);
557 }
558 
559 #ifdef CONFIG_DEBUG_VM
560 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
561 {
562 	int ret = 0;
563 	unsigned seq;
564 	unsigned long pfn = page_to_pfn(page);
565 	unsigned long sp, start_pfn;
566 
567 	do {
568 		seq = zone_span_seqbegin(zone);
569 		start_pfn = zone->zone_start_pfn;
570 		sp = zone->spanned_pages;
571 		if (!zone_spans_pfn(zone, pfn))
572 			ret = 1;
573 	} while (zone_span_seqretry(zone, seq));
574 
575 	if (ret)
576 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
577 			pfn, zone_to_nid(zone), zone->name,
578 			start_pfn, start_pfn + sp);
579 
580 	return ret;
581 }
582 
583 static int page_is_consistent(struct zone *zone, struct page *page)
584 {
585 	if (!pfn_valid_within(page_to_pfn(page)))
586 		return 0;
587 	if (zone != page_zone(page))
588 		return 0;
589 
590 	return 1;
591 }
592 /*
593  * Temporary debugging check for pages not lying within a given zone.
594  */
595 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
596 {
597 	if (page_outside_zone_boundaries(zone, page))
598 		return 1;
599 	if (!page_is_consistent(zone, page))
600 		return 1;
601 
602 	return 0;
603 }
604 #else
605 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
606 {
607 	return 0;
608 }
609 #endif
610 
611 static void bad_page(struct page *page, const char *reason)
612 {
613 	static unsigned long resume;
614 	static unsigned long nr_shown;
615 	static unsigned long nr_unshown;
616 
617 	/*
618 	 * Allow a burst of 60 reports, then keep quiet for that minute;
619 	 * or allow a steady drip of one report per second.
620 	 */
621 	if (nr_shown == 60) {
622 		if (time_before(jiffies, resume)) {
623 			nr_unshown++;
624 			goto out;
625 		}
626 		if (nr_unshown) {
627 			pr_alert(
628 			      "BUG: Bad page state: %lu messages suppressed\n",
629 				nr_unshown);
630 			nr_unshown = 0;
631 		}
632 		nr_shown = 0;
633 	}
634 	if (nr_shown++ == 0)
635 		resume = jiffies + 60 * HZ;
636 
637 	pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
638 		current->comm, page_to_pfn(page));
639 	__dump_page(page, reason);
640 	dump_page_owner(page);
641 
642 	print_modules();
643 	dump_stack();
644 out:
645 	/* Leave bad fields for debug, except PageBuddy could make trouble */
646 	page_mapcount_reset(page); /* remove PageBuddy */
647 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
648 }
649 
650 /*
651  * Higher-order pages are called "compound pages".  They are structured thusly:
652  *
653  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
654  *
655  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
656  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
657  *
658  * The first tail page's ->compound_dtor holds the offset in array of compound
659  * page destructors. See compound_page_dtors.
660  *
661  * The first tail page's ->compound_order holds the order of allocation.
662  * This usage means that zero-order pages may not be compound.
663  */
664 
665 void free_compound_page(struct page *page)
666 {
667 	mem_cgroup_uncharge(page);
668 	__free_pages_ok(page, compound_order(page));
669 }
670 
671 void prep_compound_page(struct page *page, unsigned int order)
672 {
673 	int i;
674 	int nr_pages = 1 << order;
675 
676 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
677 	set_compound_order(page, order);
678 	__SetPageHead(page);
679 	for (i = 1; i < nr_pages; i++) {
680 		struct page *p = page + i;
681 		set_page_count(p, 0);
682 		p->mapping = TAIL_MAPPING;
683 		set_compound_head(p, page);
684 	}
685 	atomic_set(compound_mapcount_ptr(page), -1);
686 	if (hpage_pincount_available(page))
687 		atomic_set(compound_pincount_ptr(page), 0);
688 }
689 
690 #ifdef CONFIG_DEBUG_PAGEALLOC
691 unsigned int _debug_guardpage_minorder;
692 
693 bool _debug_pagealloc_enabled_early __read_mostly
694 			= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
695 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
696 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
697 EXPORT_SYMBOL(_debug_pagealloc_enabled);
698 
699 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
700 
701 static int __init early_debug_pagealloc(char *buf)
702 {
703 	return kstrtobool(buf, &_debug_pagealloc_enabled_early);
704 }
705 early_param("debug_pagealloc", early_debug_pagealloc);
706 
707 void init_debug_pagealloc(void)
708 {
709 	if (!debug_pagealloc_enabled())
710 		return;
711 
712 	static_branch_enable(&_debug_pagealloc_enabled);
713 
714 	if (!debug_guardpage_minorder())
715 		return;
716 
717 	static_branch_enable(&_debug_guardpage_enabled);
718 }
719 
720 static int __init debug_guardpage_minorder_setup(char *buf)
721 {
722 	unsigned long res;
723 
724 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
725 		pr_err("Bad debug_guardpage_minorder value\n");
726 		return 0;
727 	}
728 	_debug_guardpage_minorder = res;
729 	pr_info("Setting debug_guardpage_minorder to %lu\n", res);
730 	return 0;
731 }
732 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
733 
734 static inline bool set_page_guard(struct zone *zone, struct page *page,
735 				unsigned int order, int migratetype)
736 {
737 	if (!debug_guardpage_enabled())
738 		return false;
739 
740 	if (order >= debug_guardpage_minorder())
741 		return false;
742 
743 	__SetPageGuard(page);
744 	INIT_LIST_HEAD(&page->lru);
745 	set_page_private(page, order);
746 	/* Guard pages are not available for any usage */
747 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
748 
749 	return true;
750 }
751 
752 static inline void clear_page_guard(struct zone *zone, struct page *page,
753 				unsigned int order, int migratetype)
754 {
755 	if (!debug_guardpage_enabled())
756 		return;
757 
758 	__ClearPageGuard(page);
759 
760 	set_page_private(page, 0);
761 	if (!is_migrate_isolate(migratetype))
762 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
763 }
764 #else
765 static inline bool set_page_guard(struct zone *zone, struct page *page,
766 			unsigned int order, int migratetype) { return false; }
767 static inline void clear_page_guard(struct zone *zone, struct page *page,
768 				unsigned int order, int migratetype) {}
769 #endif
770 
771 static inline void set_page_order(struct page *page, unsigned int order)
772 {
773 	set_page_private(page, order);
774 	__SetPageBuddy(page);
775 }
776 
777 /*
778  * This function checks whether a page is free && is the buddy
779  * we can coalesce a page and its buddy if
780  * (a) the buddy is not in a hole (check before calling!) &&
781  * (b) the buddy is in the buddy system &&
782  * (c) a page and its buddy have the same order &&
783  * (d) a page and its buddy are in the same zone.
784  *
785  * For recording whether a page is in the buddy system, we set PageBuddy.
786  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
787  *
788  * For recording page's order, we use page_private(page).
789  */
790 static inline bool page_is_buddy(struct page *page, struct page *buddy,
791 							unsigned int order)
792 {
793 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
794 		return false;
795 
796 	if (page_order(buddy) != order)
797 		return false;
798 
799 	/*
800 	 * zone check is done late to avoid uselessly calculating
801 	 * zone/node ids for pages that could never merge.
802 	 */
803 	if (page_zone_id(page) != page_zone_id(buddy))
804 		return false;
805 
806 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
807 
808 	return true;
809 }
810 
811 #ifdef CONFIG_COMPACTION
812 static inline struct capture_control *task_capc(struct zone *zone)
813 {
814 	struct capture_control *capc = current->capture_control;
815 
816 	return capc &&
817 		!(current->flags & PF_KTHREAD) &&
818 		!capc->page &&
819 		capc->cc->zone == zone &&
820 		capc->cc->direct_compaction ? capc : NULL;
821 }
822 
823 static inline bool
824 compaction_capture(struct capture_control *capc, struct page *page,
825 		   int order, int migratetype)
826 {
827 	if (!capc || order != capc->cc->order)
828 		return false;
829 
830 	/* Do not accidentally pollute CMA or isolated regions*/
831 	if (is_migrate_cma(migratetype) ||
832 	    is_migrate_isolate(migratetype))
833 		return false;
834 
835 	/*
836 	 * Do not let lower order allocations polluate a movable pageblock.
837 	 * This might let an unmovable request use a reclaimable pageblock
838 	 * and vice-versa but no more than normal fallback logic which can
839 	 * have trouble finding a high-order free page.
840 	 */
841 	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
842 		return false;
843 
844 	capc->page = page;
845 	return true;
846 }
847 
848 #else
849 static inline struct capture_control *task_capc(struct zone *zone)
850 {
851 	return NULL;
852 }
853 
854 static inline bool
855 compaction_capture(struct capture_control *capc, struct page *page,
856 		   int order, int migratetype)
857 {
858 	return false;
859 }
860 #endif /* CONFIG_COMPACTION */
861 
862 /* Used for pages not on another list */
863 static inline void add_to_free_list(struct page *page, struct zone *zone,
864 				    unsigned int order, int migratetype)
865 {
866 	struct free_area *area = &zone->free_area[order];
867 
868 	list_add(&page->lru, &area->free_list[migratetype]);
869 	area->nr_free++;
870 }
871 
872 /* Used for pages not on another list */
873 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
874 					 unsigned int order, int migratetype)
875 {
876 	struct free_area *area = &zone->free_area[order];
877 
878 	list_add_tail(&page->lru, &area->free_list[migratetype]);
879 	area->nr_free++;
880 }
881 
882 /* Used for pages which are on another list */
883 static inline void move_to_free_list(struct page *page, struct zone *zone,
884 				     unsigned int order, int migratetype)
885 {
886 	struct free_area *area = &zone->free_area[order];
887 
888 	list_move(&page->lru, &area->free_list[migratetype]);
889 }
890 
891 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
892 					   unsigned int order)
893 {
894 	/* clear reported state and update reported page count */
895 	if (page_reported(page))
896 		__ClearPageReported(page);
897 
898 	list_del(&page->lru);
899 	__ClearPageBuddy(page);
900 	set_page_private(page, 0);
901 	zone->free_area[order].nr_free--;
902 }
903 
904 /*
905  * If this is not the largest possible page, check if the buddy
906  * of the next-highest order is free. If it is, it's possible
907  * that pages are being freed that will coalesce soon. In case,
908  * that is happening, add the free page to the tail of the list
909  * so it's less likely to be used soon and more likely to be merged
910  * as a higher order page
911  */
912 static inline bool
913 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
914 		   struct page *page, unsigned int order)
915 {
916 	struct page *higher_page, *higher_buddy;
917 	unsigned long combined_pfn;
918 
919 	if (order >= MAX_ORDER - 2)
920 		return false;
921 
922 	if (!pfn_valid_within(buddy_pfn))
923 		return false;
924 
925 	combined_pfn = buddy_pfn & pfn;
926 	higher_page = page + (combined_pfn - pfn);
927 	buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
928 	higher_buddy = higher_page + (buddy_pfn - combined_pfn);
929 
930 	return pfn_valid_within(buddy_pfn) &&
931 	       page_is_buddy(higher_page, higher_buddy, order + 1);
932 }
933 
934 /*
935  * Freeing function for a buddy system allocator.
936  *
937  * The concept of a buddy system is to maintain direct-mapped table
938  * (containing bit values) for memory blocks of various "orders".
939  * The bottom level table contains the map for the smallest allocatable
940  * units of memory (here, pages), and each level above it describes
941  * pairs of units from the levels below, hence, "buddies".
942  * At a high level, all that happens here is marking the table entry
943  * at the bottom level available, and propagating the changes upward
944  * as necessary, plus some accounting needed to play nicely with other
945  * parts of the VM system.
946  * At each level, we keep a list of pages, which are heads of continuous
947  * free pages of length of (1 << order) and marked with PageBuddy.
948  * Page's order is recorded in page_private(page) field.
949  * So when we are allocating or freeing one, we can derive the state of the
950  * other.  That is, if we allocate a small block, and both were
951  * free, the remainder of the region must be split into blocks.
952  * If a block is freed, and its buddy is also free, then this
953  * triggers coalescing into a block of larger size.
954  *
955  * -- nyc
956  */
957 
958 static inline void __free_one_page(struct page *page,
959 		unsigned long pfn,
960 		struct zone *zone, unsigned int order,
961 		int migratetype, bool report)
962 {
963 	struct capture_control *capc = task_capc(zone);
964 	unsigned long uninitialized_var(buddy_pfn);
965 	unsigned long combined_pfn;
966 	unsigned int max_order;
967 	struct page *buddy;
968 	bool to_tail;
969 
970 	max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
971 
972 	VM_BUG_ON(!zone_is_initialized(zone));
973 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
974 
975 	VM_BUG_ON(migratetype == -1);
976 	if (likely(!is_migrate_isolate(migratetype)))
977 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
978 
979 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
980 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
981 
982 continue_merging:
983 	while (order < max_order - 1) {
984 		if (compaction_capture(capc, page, order, migratetype)) {
985 			__mod_zone_freepage_state(zone, -(1 << order),
986 								migratetype);
987 			return;
988 		}
989 		buddy_pfn = __find_buddy_pfn(pfn, order);
990 		buddy = page + (buddy_pfn - pfn);
991 
992 		if (!pfn_valid_within(buddy_pfn))
993 			goto done_merging;
994 		if (!page_is_buddy(page, buddy, order))
995 			goto done_merging;
996 		/*
997 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
998 		 * merge with it and move up one order.
999 		 */
1000 		if (page_is_guard(buddy))
1001 			clear_page_guard(zone, buddy, order, migratetype);
1002 		else
1003 			del_page_from_free_list(buddy, zone, order);
1004 		combined_pfn = buddy_pfn & pfn;
1005 		page = page + (combined_pfn - pfn);
1006 		pfn = combined_pfn;
1007 		order++;
1008 	}
1009 	if (max_order < MAX_ORDER) {
1010 		/* If we are here, it means order is >= pageblock_order.
1011 		 * We want to prevent merge between freepages on isolate
1012 		 * pageblock and normal pageblock. Without this, pageblock
1013 		 * isolation could cause incorrect freepage or CMA accounting.
1014 		 *
1015 		 * We don't want to hit this code for the more frequent
1016 		 * low-order merging.
1017 		 */
1018 		if (unlikely(has_isolate_pageblock(zone))) {
1019 			int buddy_mt;
1020 
1021 			buddy_pfn = __find_buddy_pfn(pfn, order);
1022 			buddy = page + (buddy_pfn - pfn);
1023 			buddy_mt = get_pageblock_migratetype(buddy);
1024 
1025 			if (migratetype != buddy_mt
1026 					&& (is_migrate_isolate(migratetype) ||
1027 						is_migrate_isolate(buddy_mt)))
1028 				goto done_merging;
1029 		}
1030 		max_order++;
1031 		goto continue_merging;
1032 	}
1033 
1034 done_merging:
1035 	set_page_order(page, order);
1036 
1037 	if (is_shuffle_order(order))
1038 		to_tail = shuffle_pick_tail();
1039 	else
1040 		to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1041 
1042 	if (to_tail)
1043 		add_to_free_list_tail(page, zone, order, migratetype);
1044 	else
1045 		add_to_free_list(page, zone, order, migratetype);
1046 
1047 	/* Notify page reporting subsystem of freed page */
1048 	if (report)
1049 		page_reporting_notify_free(order);
1050 }
1051 
1052 /*
1053  * A bad page could be due to a number of fields. Instead of multiple branches,
1054  * try and check multiple fields with one check. The caller must do a detailed
1055  * check if necessary.
1056  */
1057 static inline bool page_expected_state(struct page *page,
1058 					unsigned long check_flags)
1059 {
1060 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1061 		return false;
1062 
1063 	if (unlikely((unsigned long)page->mapping |
1064 			page_ref_count(page) |
1065 #ifdef CONFIG_MEMCG
1066 			(unsigned long)page->mem_cgroup |
1067 #endif
1068 			(page->flags & check_flags)))
1069 		return false;
1070 
1071 	return true;
1072 }
1073 
1074 static const char *page_bad_reason(struct page *page, unsigned long flags)
1075 {
1076 	const char *bad_reason = NULL;
1077 
1078 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1079 		bad_reason = "nonzero mapcount";
1080 	if (unlikely(page->mapping != NULL))
1081 		bad_reason = "non-NULL mapping";
1082 	if (unlikely(page_ref_count(page) != 0))
1083 		bad_reason = "nonzero _refcount";
1084 	if (unlikely(page->flags & flags)) {
1085 		if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1086 			bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1087 		else
1088 			bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1089 	}
1090 #ifdef CONFIG_MEMCG
1091 	if (unlikely(page->mem_cgroup))
1092 		bad_reason = "page still charged to cgroup";
1093 #endif
1094 	return bad_reason;
1095 }
1096 
1097 static void check_free_page_bad(struct page *page)
1098 {
1099 	bad_page(page,
1100 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1101 }
1102 
1103 static inline int check_free_page(struct page *page)
1104 {
1105 	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1106 		return 0;
1107 
1108 	/* Something has gone sideways, find it */
1109 	check_free_page_bad(page);
1110 	return 1;
1111 }
1112 
1113 static int free_tail_pages_check(struct page *head_page, struct page *page)
1114 {
1115 	int ret = 1;
1116 
1117 	/*
1118 	 * We rely page->lru.next never has bit 0 set, unless the page
1119 	 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1120 	 */
1121 	BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1122 
1123 	if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1124 		ret = 0;
1125 		goto out;
1126 	}
1127 	switch (page - head_page) {
1128 	case 1:
1129 		/* the first tail page: ->mapping may be compound_mapcount() */
1130 		if (unlikely(compound_mapcount(page))) {
1131 			bad_page(page, "nonzero compound_mapcount");
1132 			goto out;
1133 		}
1134 		break;
1135 	case 2:
1136 		/*
1137 		 * the second tail page: ->mapping is
1138 		 * deferred_list.next -- ignore value.
1139 		 */
1140 		break;
1141 	default:
1142 		if (page->mapping != TAIL_MAPPING) {
1143 			bad_page(page, "corrupted mapping in tail page");
1144 			goto out;
1145 		}
1146 		break;
1147 	}
1148 	if (unlikely(!PageTail(page))) {
1149 		bad_page(page, "PageTail not set");
1150 		goto out;
1151 	}
1152 	if (unlikely(compound_head(page) != head_page)) {
1153 		bad_page(page, "compound_head not consistent");
1154 		goto out;
1155 	}
1156 	ret = 0;
1157 out:
1158 	page->mapping = NULL;
1159 	clear_compound_head(page);
1160 	return ret;
1161 }
1162 
1163 static void kernel_init_free_pages(struct page *page, int numpages)
1164 {
1165 	int i;
1166 
1167 	for (i = 0; i < numpages; i++)
1168 		clear_highpage(page + i);
1169 }
1170 
1171 static __always_inline bool free_pages_prepare(struct page *page,
1172 					unsigned int order, bool check_free)
1173 {
1174 	int bad = 0;
1175 
1176 	VM_BUG_ON_PAGE(PageTail(page), page);
1177 
1178 	trace_mm_page_free(page, order);
1179 
1180 	/*
1181 	 * Check tail pages before head page information is cleared to
1182 	 * avoid checking PageCompound for order-0 pages.
1183 	 */
1184 	if (unlikely(order)) {
1185 		bool compound = PageCompound(page);
1186 		int i;
1187 
1188 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1189 
1190 		if (compound)
1191 			ClearPageDoubleMap(page);
1192 		for (i = 1; i < (1 << order); i++) {
1193 			if (compound)
1194 				bad += free_tail_pages_check(page, page + i);
1195 			if (unlikely(check_free_page(page + i))) {
1196 				bad++;
1197 				continue;
1198 			}
1199 			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1200 		}
1201 	}
1202 	if (PageMappingFlags(page))
1203 		page->mapping = NULL;
1204 	if (memcg_kmem_enabled() && PageKmemcg(page))
1205 		__memcg_kmem_uncharge_page(page, order);
1206 	if (check_free)
1207 		bad += check_free_page(page);
1208 	if (bad)
1209 		return false;
1210 
1211 	page_cpupid_reset_last(page);
1212 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1213 	reset_page_owner(page, order);
1214 
1215 	if (!PageHighMem(page)) {
1216 		debug_check_no_locks_freed(page_address(page),
1217 					   PAGE_SIZE << order);
1218 		debug_check_no_obj_freed(page_address(page),
1219 					   PAGE_SIZE << order);
1220 	}
1221 	if (want_init_on_free())
1222 		kernel_init_free_pages(page, 1 << order);
1223 
1224 	kernel_poison_pages(page, 1 << order, 0);
1225 	/*
1226 	 * arch_free_page() can make the page's contents inaccessible.  s390
1227 	 * does this.  So nothing which can access the page's contents should
1228 	 * happen after this.
1229 	 */
1230 	arch_free_page(page, order);
1231 
1232 	if (debug_pagealloc_enabled_static())
1233 		kernel_map_pages(page, 1 << order, 0);
1234 
1235 	kasan_free_nondeferred_pages(page, order);
1236 
1237 	return true;
1238 }
1239 
1240 #ifdef CONFIG_DEBUG_VM
1241 /*
1242  * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1243  * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1244  * moved from pcp lists to free lists.
1245  */
1246 static bool free_pcp_prepare(struct page *page)
1247 {
1248 	return free_pages_prepare(page, 0, true);
1249 }
1250 
1251 static bool bulkfree_pcp_prepare(struct page *page)
1252 {
1253 	if (debug_pagealloc_enabled_static())
1254 		return check_free_page(page);
1255 	else
1256 		return false;
1257 }
1258 #else
1259 /*
1260  * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1261  * moving from pcp lists to free list in order to reduce overhead. With
1262  * debug_pagealloc enabled, they are checked also immediately when being freed
1263  * to the pcp lists.
1264  */
1265 static bool free_pcp_prepare(struct page *page)
1266 {
1267 	if (debug_pagealloc_enabled_static())
1268 		return free_pages_prepare(page, 0, true);
1269 	else
1270 		return free_pages_prepare(page, 0, false);
1271 }
1272 
1273 static bool bulkfree_pcp_prepare(struct page *page)
1274 {
1275 	return check_free_page(page);
1276 }
1277 #endif /* CONFIG_DEBUG_VM */
1278 
1279 static inline void prefetch_buddy(struct page *page)
1280 {
1281 	unsigned long pfn = page_to_pfn(page);
1282 	unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1283 	struct page *buddy = page + (buddy_pfn - pfn);
1284 
1285 	prefetch(buddy);
1286 }
1287 
1288 /*
1289  * Frees a number of pages from the PCP lists
1290  * Assumes all pages on list are in same zone, and of same order.
1291  * count is the number of pages to free.
1292  *
1293  * If the zone was previously in an "all pages pinned" state then look to
1294  * see if this freeing clears that state.
1295  *
1296  * And clear the zone's pages_scanned counter, to hold off the "all pages are
1297  * pinned" detection logic.
1298  */
1299 static void free_pcppages_bulk(struct zone *zone, int count,
1300 					struct per_cpu_pages *pcp)
1301 {
1302 	int migratetype = 0;
1303 	int batch_free = 0;
1304 	int prefetch_nr = 0;
1305 	bool isolated_pageblocks;
1306 	struct page *page, *tmp;
1307 	LIST_HEAD(head);
1308 
1309 	while (count) {
1310 		struct list_head *list;
1311 
1312 		/*
1313 		 * Remove pages from lists in a round-robin fashion. A
1314 		 * batch_free count is maintained that is incremented when an
1315 		 * empty list is encountered.  This is so more pages are freed
1316 		 * off fuller lists instead of spinning excessively around empty
1317 		 * lists
1318 		 */
1319 		do {
1320 			batch_free++;
1321 			if (++migratetype == MIGRATE_PCPTYPES)
1322 				migratetype = 0;
1323 			list = &pcp->lists[migratetype];
1324 		} while (list_empty(list));
1325 
1326 		/* This is the only non-empty list. Free them all. */
1327 		if (batch_free == MIGRATE_PCPTYPES)
1328 			batch_free = count;
1329 
1330 		do {
1331 			page = list_last_entry(list, struct page, lru);
1332 			/* must delete to avoid corrupting pcp list */
1333 			list_del(&page->lru);
1334 			pcp->count--;
1335 
1336 			if (bulkfree_pcp_prepare(page))
1337 				continue;
1338 
1339 			list_add_tail(&page->lru, &head);
1340 
1341 			/*
1342 			 * We are going to put the page back to the global
1343 			 * pool, prefetch its buddy to speed up later access
1344 			 * under zone->lock. It is believed the overhead of
1345 			 * an additional test and calculating buddy_pfn here
1346 			 * can be offset by reduced memory latency later. To
1347 			 * avoid excessive prefetching due to large count, only
1348 			 * prefetch buddy for the first pcp->batch nr of pages.
1349 			 */
1350 			if (prefetch_nr++ < pcp->batch)
1351 				prefetch_buddy(page);
1352 		} while (--count && --batch_free && !list_empty(list));
1353 	}
1354 
1355 	spin_lock(&zone->lock);
1356 	isolated_pageblocks = has_isolate_pageblock(zone);
1357 
1358 	/*
1359 	 * Use safe version since after __free_one_page(),
1360 	 * page->lru.next will not point to original list.
1361 	 */
1362 	list_for_each_entry_safe(page, tmp, &head, lru) {
1363 		int mt = get_pcppage_migratetype(page);
1364 		/* MIGRATE_ISOLATE page should not go to pcplists */
1365 		VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1366 		/* Pageblock could have been isolated meanwhile */
1367 		if (unlikely(isolated_pageblocks))
1368 			mt = get_pageblock_migratetype(page);
1369 
1370 		__free_one_page(page, page_to_pfn(page), zone, 0, mt, true);
1371 		trace_mm_page_pcpu_drain(page, 0, mt);
1372 	}
1373 	spin_unlock(&zone->lock);
1374 }
1375 
1376 static void free_one_page(struct zone *zone,
1377 				struct page *page, unsigned long pfn,
1378 				unsigned int order,
1379 				int migratetype)
1380 {
1381 	spin_lock(&zone->lock);
1382 	if (unlikely(has_isolate_pageblock(zone) ||
1383 		is_migrate_isolate(migratetype))) {
1384 		migratetype = get_pfnblock_migratetype(page, pfn);
1385 	}
1386 	__free_one_page(page, pfn, zone, order, migratetype, true);
1387 	spin_unlock(&zone->lock);
1388 }
1389 
1390 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1391 				unsigned long zone, int nid)
1392 {
1393 	mm_zero_struct_page(page);
1394 	set_page_links(page, zone, nid, pfn);
1395 	init_page_count(page);
1396 	page_mapcount_reset(page);
1397 	page_cpupid_reset_last(page);
1398 	page_kasan_tag_reset(page);
1399 
1400 	INIT_LIST_HEAD(&page->lru);
1401 #ifdef WANT_PAGE_VIRTUAL
1402 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
1403 	if (!is_highmem_idx(zone))
1404 		set_page_address(page, __va(pfn << PAGE_SHIFT));
1405 #endif
1406 }
1407 
1408 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1409 static void __meminit init_reserved_page(unsigned long pfn)
1410 {
1411 	pg_data_t *pgdat;
1412 	int nid, zid;
1413 
1414 	if (!early_page_uninitialised(pfn))
1415 		return;
1416 
1417 	nid = early_pfn_to_nid(pfn);
1418 	pgdat = NODE_DATA(nid);
1419 
1420 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1421 		struct zone *zone = &pgdat->node_zones[zid];
1422 
1423 		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1424 			break;
1425 	}
1426 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1427 }
1428 #else
1429 static inline void init_reserved_page(unsigned long pfn)
1430 {
1431 }
1432 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1433 
1434 /*
1435  * Initialised pages do not have PageReserved set. This function is
1436  * called for each range allocated by the bootmem allocator and
1437  * marks the pages PageReserved. The remaining valid pages are later
1438  * sent to the buddy page allocator.
1439  */
1440 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1441 {
1442 	unsigned long start_pfn = PFN_DOWN(start);
1443 	unsigned long end_pfn = PFN_UP(end);
1444 
1445 	for (; start_pfn < end_pfn; start_pfn++) {
1446 		if (pfn_valid(start_pfn)) {
1447 			struct page *page = pfn_to_page(start_pfn);
1448 
1449 			init_reserved_page(start_pfn);
1450 
1451 			/* Avoid false-positive PageTail() */
1452 			INIT_LIST_HEAD(&page->lru);
1453 
1454 			/*
1455 			 * no need for atomic set_bit because the struct
1456 			 * page is not visible yet so nobody should
1457 			 * access it yet.
1458 			 */
1459 			__SetPageReserved(page);
1460 		}
1461 	}
1462 }
1463 
1464 static void __free_pages_ok(struct page *page, unsigned int order)
1465 {
1466 	unsigned long flags;
1467 	int migratetype;
1468 	unsigned long pfn = page_to_pfn(page);
1469 
1470 	if (!free_pages_prepare(page, order, true))
1471 		return;
1472 
1473 	migratetype = get_pfnblock_migratetype(page, pfn);
1474 	local_irq_save(flags);
1475 	__count_vm_events(PGFREE, 1 << order);
1476 	free_one_page(page_zone(page), page, pfn, order, migratetype);
1477 	local_irq_restore(flags);
1478 }
1479 
1480 void __free_pages_core(struct page *page, unsigned int order)
1481 {
1482 	unsigned int nr_pages = 1 << order;
1483 	struct page *p = page;
1484 	unsigned int loop;
1485 
1486 	prefetchw(p);
1487 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1488 		prefetchw(p + 1);
1489 		__ClearPageReserved(p);
1490 		set_page_count(p, 0);
1491 	}
1492 	__ClearPageReserved(p);
1493 	set_page_count(p, 0);
1494 
1495 	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1496 	set_page_refcounted(page);
1497 	__free_pages(page, order);
1498 }
1499 
1500 #ifdef CONFIG_NEED_MULTIPLE_NODES
1501 
1502 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1503 
1504 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1505 
1506 /*
1507  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1508  */
1509 int __meminit __early_pfn_to_nid(unsigned long pfn,
1510 					struct mminit_pfnnid_cache *state)
1511 {
1512 	unsigned long start_pfn, end_pfn;
1513 	int nid;
1514 
1515 	if (state->last_start <= pfn && pfn < state->last_end)
1516 		return state->last_nid;
1517 
1518 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1519 	if (nid != NUMA_NO_NODE) {
1520 		state->last_start = start_pfn;
1521 		state->last_end = end_pfn;
1522 		state->last_nid = nid;
1523 	}
1524 
1525 	return nid;
1526 }
1527 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
1528 
1529 int __meminit early_pfn_to_nid(unsigned long pfn)
1530 {
1531 	static DEFINE_SPINLOCK(early_pfn_lock);
1532 	int nid;
1533 
1534 	spin_lock(&early_pfn_lock);
1535 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1536 	if (nid < 0)
1537 		nid = first_online_node;
1538 	spin_unlock(&early_pfn_lock);
1539 
1540 	return nid;
1541 }
1542 #endif /* CONFIG_NEED_MULTIPLE_NODES */
1543 
1544 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1545 							unsigned int order)
1546 {
1547 	if (early_page_uninitialised(pfn))
1548 		return;
1549 	__free_pages_core(page, order);
1550 }
1551 
1552 /*
1553  * Check that the whole (or subset of) a pageblock given by the interval of
1554  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1555  * with the migration of free compaction scanner. The scanners then need to
1556  * use only pfn_valid_within() check for arches that allow holes within
1557  * pageblocks.
1558  *
1559  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1560  *
1561  * It's possible on some configurations to have a setup like node0 node1 node0
1562  * i.e. it's possible that all pages within a zones range of pages do not
1563  * belong to a single zone. We assume that a border between node0 and node1
1564  * can occur within a single pageblock, but not a node0 node1 node0
1565  * interleaving within a single pageblock. It is therefore sufficient to check
1566  * the first and last page of a pageblock and avoid checking each individual
1567  * page in a pageblock.
1568  */
1569 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1570 				     unsigned long end_pfn, struct zone *zone)
1571 {
1572 	struct page *start_page;
1573 	struct page *end_page;
1574 
1575 	/* end_pfn is one past the range we are checking */
1576 	end_pfn--;
1577 
1578 	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1579 		return NULL;
1580 
1581 	start_page = pfn_to_online_page(start_pfn);
1582 	if (!start_page)
1583 		return NULL;
1584 
1585 	if (page_zone(start_page) != zone)
1586 		return NULL;
1587 
1588 	end_page = pfn_to_page(end_pfn);
1589 
1590 	/* This gives a shorter code than deriving page_zone(end_page) */
1591 	if (page_zone_id(start_page) != page_zone_id(end_page))
1592 		return NULL;
1593 
1594 	return start_page;
1595 }
1596 
1597 void set_zone_contiguous(struct zone *zone)
1598 {
1599 	unsigned long block_start_pfn = zone->zone_start_pfn;
1600 	unsigned long block_end_pfn;
1601 
1602 	block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1603 	for (; block_start_pfn < zone_end_pfn(zone);
1604 			block_start_pfn = block_end_pfn,
1605 			 block_end_pfn += pageblock_nr_pages) {
1606 
1607 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1608 
1609 		if (!__pageblock_pfn_to_page(block_start_pfn,
1610 					     block_end_pfn, zone))
1611 			return;
1612 		cond_resched();
1613 	}
1614 
1615 	/* We confirm that there is no hole */
1616 	zone->contiguous = true;
1617 }
1618 
1619 void clear_zone_contiguous(struct zone *zone)
1620 {
1621 	zone->contiguous = false;
1622 }
1623 
1624 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1625 static void __init deferred_free_range(unsigned long pfn,
1626 				       unsigned long nr_pages)
1627 {
1628 	struct page *page;
1629 	unsigned long i;
1630 
1631 	if (!nr_pages)
1632 		return;
1633 
1634 	page = pfn_to_page(pfn);
1635 
1636 	/* Free a large naturally-aligned chunk if possible */
1637 	if (nr_pages == pageblock_nr_pages &&
1638 	    (pfn & (pageblock_nr_pages - 1)) == 0) {
1639 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1640 		__free_pages_core(page, pageblock_order);
1641 		return;
1642 	}
1643 
1644 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1645 		if ((pfn & (pageblock_nr_pages - 1)) == 0)
1646 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1647 		__free_pages_core(page, 0);
1648 	}
1649 }
1650 
1651 /* Completion tracking for deferred_init_memmap() threads */
1652 static atomic_t pgdat_init_n_undone __initdata;
1653 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1654 
1655 static inline void __init pgdat_init_report_one_done(void)
1656 {
1657 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1658 		complete(&pgdat_init_all_done_comp);
1659 }
1660 
1661 /*
1662  * Returns true if page needs to be initialized or freed to buddy allocator.
1663  *
1664  * First we check if pfn is valid on architectures where it is possible to have
1665  * holes within pageblock_nr_pages. On systems where it is not possible, this
1666  * function is optimized out.
1667  *
1668  * Then, we check if a current large page is valid by only checking the validity
1669  * of the head pfn.
1670  */
1671 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1672 {
1673 	if (!pfn_valid_within(pfn))
1674 		return false;
1675 	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1676 		return false;
1677 	return true;
1678 }
1679 
1680 /*
1681  * Free pages to buddy allocator. Try to free aligned pages in
1682  * pageblock_nr_pages sizes.
1683  */
1684 static void __init deferred_free_pages(unsigned long pfn,
1685 				       unsigned long end_pfn)
1686 {
1687 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1688 	unsigned long nr_free = 0;
1689 
1690 	for (; pfn < end_pfn; pfn++) {
1691 		if (!deferred_pfn_valid(pfn)) {
1692 			deferred_free_range(pfn - nr_free, nr_free);
1693 			nr_free = 0;
1694 		} else if (!(pfn & nr_pgmask)) {
1695 			deferred_free_range(pfn - nr_free, nr_free);
1696 			nr_free = 1;
1697 		} else {
1698 			nr_free++;
1699 		}
1700 	}
1701 	/* Free the last block of pages to allocator */
1702 	deferred_free_range(pfn - nr_free, nr_free);
1703 }
1704 
1705 /*
1706  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1707  * by performing it only once every pageblock_nr_pages.
1708  * Return number of pages initialized.
1709  */
1710 static unsigned long  __init deferred_init_pages(struct zone *zone,
1711 						 unsigned long pfn,
1712 						 unsigned long end_pfn)
1713 {
1714 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1715 	int nid = zone_to_nid(zone);
1716 	unsigned long nr_pages = 0;
1717 	int zid = zone_idx(zone);
1718 	struct page *page = NULL;
1719 
1720 	for (; pfn < end_pfn; pfn++) {
1721 		if (!deferred_pfn_valid(pfn)) {
1722 			page = NULL;
1723 			continue;
1724 		} else if (!page || !(pfn & nr_pgmask)) {
1725 			page = pfn_to_page(pfn);
1726 		} else {
1727 			page++;
1728 		}
1729 		__init_single_page(page, pfn, zid, nid);
1730 		nr_pages++;
1731 	}
1732 	return (nr_pages);
1733 }
1734 
1735 /*
1736  * This function is meant to pre-load the iterator for the zone init.
1737  * Specifically it walks through the ranges until we are caught up to the
1738  * first_init_pfn value and exits there. If we never encounter the value we
1739  * return false indicating there are no valid ranges left.
1740  */
1741 static bool __init
1742 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1743 				    unsigned long *spfn, unsigned long *epfn,
1744 				    unsigned long first_init_pfn)
1745 {
1746 	u64 j;
1747 
1748 	/*
1749 	 * Start out by walking through the ranges in this zone that have
1750 	 * already been initialized. We don't need to do anything with them
1751 	 * so we just need to flush them out of the system.
1752 	 */
1753 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1754 		if (*epfn <= first_init_pfn)
1755 			continue;
1756 		if (*spfn < first_init_pfn)
1757 			*spfn = first_init_pfn;
1758 		*i = j;
1759 		return true;
1760 	}
1761 
1762 	return false;
1763 }
1764 
1765 /*
1766  * Initialize and free pages. We do it in two loops: first we initialize
1767  * struct page, then free to buddy allocator, because while we are
1768  * freeing pages we can access pages that are ahead (computing buddy
1769  * page in __free_one_page()).
1770  *
1771  * In order to try and keep some memory in the cache we have the loop
1772  * broken along max page order boundaries. This way we will not cause
1773  * any issues with the buddy page computation.
1774  */
1775 static unsigned long __init
1776 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1777 		       unsigned long *end_pfn)
1778 {
1779 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1780 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
1781 	unsigned long nr_pages = 0;
1782 	u64 j = *i;
1783 
1784 	/* First we loop through and initialize the page values */
1785 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1786 		unsigned long t;
1787 
1788 		if (mo_pfn <= *start_pfn)
1789 			break;
1790 
1791 		t = min(mo_pfn, *end_pfn);
1792 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
1793 
1794 		if (mo_pfn < *end_pfn) {
1795 			*start_pfn = mo_pfn;
1796 			break;
1797 		}
1798 	}
1799 
1800 	/* Reset values and now loop through freeing pages as needed */
1801 	swap(j, *i);
1802 
1803 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
1804 		unsigned long t;
1805 
1806 		if (mo_pfn <= spfn)
1807 			break;
1808 
1809 		t = min(mo_pfn, epfn);
1810 		deferred_free_pages(spfn, t);
1811 
1812 		if (mo_pfn <= epfn)
1813 			break;
1814 	}
1815 
1816 	return nr_pages;
1817 }
1818 
1819 static void __init
1820 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
1821 			   void *arg)
1822 {
1823 	unsigned long spfn, epfn;
1824 	struct zone *zone = arg;
1825 	u64 i;
1826 
1827 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
1828 
1829 	/*
1830 	 * Initialize and free pages in MAX_ORDER sized increments so that we
1831 	 * can avoid introducing any issues with the buddy allocator.
1832 	 */
1833 	while (spfn < end_pfn) {
1834 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
1835 		cond_resched();
1836 	}
1837 }
1838 
1839 /* An arch may override for more concurrency. */
1840 __weak int __init
1841 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
1842 {
1843 	return 1;
1844 }
1845 
1846 /* Initialise remaining memory on a node */
1847 static int __init deferred_init_memmap(void *data)
1848 {
1849 	pg_data_t *pgdat = data;
1850 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1851 	unsigned long spfn = 0, epfn = 0;
1852 	unsigned long first_init_pfn, flags;
1853 	unsigned long start = jiffies;
1854 	struct zone *zone;
1855 	int zid, max_threads;
1856 	u64 i;
1857 
1858 	/* Bind memory initialisation thread to a local node if possible */
1859 	if (!cpumask_empty(cpumask))
1860 		set_cpus_allowed_ptr(current, cpumask);
1861 
1862 	pgdat_resize_lock(pgdat, &flags);
1863 	first_init_pfn = pgdat->first_deferred_pfn;
1864 	if (first_init_pfn == ULONG_MAX) {
1865 		pgdat_resize_unlock(pgdat, &flags);
1866 		pgdat_init_report_one_done();
1867 		return 0;
1868 	}
1869 
1870 	/* Sanity check boundaries */
1871 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1872 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1873 	pgdat->first_deferred_pfn = ULONG_MAX;
1874 
1875 	/*
1876 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
1877 	 * interrupt thread must allocate this early in boot, zone must be
1878 	 * pre-grown prior to start of deferred page initialization.
1879 	 */
1880 	pgdat_resize_unlock(pgdat, &flags);
1881 
1882 	/* Only the highest zone is deferred so find it */
1883 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1884 		zone = pgdat->node_zones + zid;
1885 		if (first_init_pfn < zone_end_pfn(zone))
1886 			break;
1887 	}
1888 
1889 	/* If the zone is empty somebody else may have cleared out the zone */
1890 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1891 						 first_init_pfn))
1892 		goto zone_empty;
1893 
1894 	max_threads = deferred_page_init_max_threads(cpumask);
1895 
1896 	while (spfn < epfn) {
1897 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
1898 		struct padata_mt_job job = {
1899 			.thread_fn   = deferred_init_memmap_chunk,
1900 			.fn_arg      = zone,
1901 			.start       = spfn,
1902 			.size        = epfn_align - spfn,
1903 			.align       = PAGES_PER_SECTION,
1904 			.min_chunk   = PAGES_PER_SECTION,
1905 			.max_threads = max_threads,
1906 		};
1907 
1908 		padata_do_multithreaded(&job);
1909 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1910 						    epfn_align);
1911 	}
1912 zone_empty:
1913 	/* Sanity check that the next zone really is unpopulated */
1914 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1915 
1916 	pr_info("node %d deferred pages initialised in %ums\n",
1917 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
1918 
1919 	pgdat_init_report_one_done();
1920 	return 0;
1921 }
1922 
1923 /*
1924  * If this zone has deferred pages, try to grow it by initializing enough
1925  * deferred pages to satisfy the allocation specified by order, rounded up to
1926  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
1927  * of SECTION_SIZE bytes by initializing struct pages in increments of
1928  * PAGES_PER_SECTION * sizeof(struct page) bytes.
1929  *
1930  * Return true when zone was grown, otherwise return false. We return true even
1931  * when we grow less than requested, to let the caller decide if there are
1932  * enough pages to satisfy the allocation.
1933  *
1934  * Note: We use noinline because this function is needed only during boot, and
1935  * it is called from a __ref function _deferred_grow_zone. This way we are
1936  * making sure that it is not inlined into permanent text section.
1937  */
1938 static noinline bool __init
1939 deferred_grow_zone(struct zone *zone, unsigned int order)
1940 {
1941 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
1942 	pg_data_t *pgdat = zone->zone_pgdat;
1943 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
1944 	unsigned long spfn, epfn, flags;
1945 	unsigned long nr_pages = 0;
1946 	u64 i;
1947 
1948 	/* Only the last zone may have deferred pages */
1949 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
1950 		return false;
1951 
1952 	pgdat_resize_lock(pgdat, &flags);
1953 
1954 	/*
1955 	 * If someone grew this zone while we were waiting for spinlock, return
1956 	 * true, as there might be enough pages already.
1957 	 */
1958 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
1959 		pgdat_resize_unlock(pgdat, &flags);
1960 		return true;
1961 	}
1962 
1963 	/* If the zone is empty somebody else may have cleared out the zone */
1964 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1965 						 first_deferred_pfn)) {
1966 		pgdat->first_deferred_pfn = ULONG_MAX;
1967 		pgdat_resize_unlock(pgdat, &flags);
1968 		/* Retry only once. */
1969 		return first_deferred_pfn != ULONG_MAX;
1970 	}
1971 
1972 	/*
1973 	 * Initialize and free pages in MAX_ORDER sized increments so
1974 	 * that we can avoid introducing any issues with the buddy
1975 	 * allocator.
1976 	 */
1977 	while (spfn < epfn) {
1978 		/* update our first deferred PFN for this section */
1979 		first_deferred_pfn = spfn;
1980 
1981 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
1982 		touch_nmi_watchdog();
1983 
1984 		/* We should only stop along section boundaries */
1985 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
1986 			continue;
1987 
1988 		/* If our quota has been met we can stop here */
1989 		if (nr_pages >= nr_pages_needed)
1990 			break;
1991 	}
1992 
1993 	pgdat->first_deferred_pfn = spfn;
1994 	pgdat_resize_unlock(pgdat, &flags);
1995 
1996 	return nr_pages > 0;
1997 }
1998 
1999 /*
2000  * deferred_grow_zone() is __init, but it is called from
2001  * get_page_from_freelist() during early boot until deferred_pages permanently
2002  * disables this call. This is why we have refdata wrapper to avoid warning,
2003  * and to ensure that the function body gets unloaded.
2004  */
2005 static bool __ref
2006 _deferred_grow_zone(struct zone *zone, unsigned int order)
2007 {
2008 	return deferred_grow_zone(zone, order);
2009 }
2010 
2011 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2012 
2013 void __init page_alloc_init_late(void)
2014 {
2015 	struct zone *zone;
2016 	int nid;
2017 
2018 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2019 
2020 	/* There will be num_node_state(N_MEMORY) threads */
2021 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2022 	for_each_node_state(nid, N_MEMORY) {
2023 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2024 	}
2025 
2026 	/* Block until all are initialised */
2027 	wait_for_completion(&pgdat_init_all_done_comp);
2028 
2029 	/*
2030 	 * The number of managed pages has changed due to the initialisation
2031 	 * so the pcpu batch and high limits needs to be updated or the limits
2032 	 * will be artificially small.
2033 	 */
2034 	for_each_populated_zone(zone)
2035 		zone_pcp_update(zone);
2036 
2037 	/*
2038 	 * We initialized the rest of the deferred pages.  Permanently disable
2039 	 * on-demand struct page initialization.
2040 	 */
2041 	static_branch_disable(&deferred_pages);
2042 
2043 	/* Reinit limits that are based on free pages after the kernel is up */
2044 	files_maxfiles_init();
2045 #endif
2046 
2047 	/* Discard memblock private memory */
2048 	memblock_discard();
2049 
2050 	for_each_node_state(nid, N_MEMORY)
2051 		shuffle_free_memory(NODE_DATA(nid));
2052 
2053 	for_each_populated_zone(zone)
2054 		set_zone_contiguous(zone);
2055 }
2056 
2057 #ifdef CONFIG_CMA
2058 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2059 void __init init_cma_reserved_pageblock(struct page *page)
2060 {
2061 	unsigned i = pageblock_nr_pages;
2062 	struct page *p = page;
2063 
2064 	do {
2065 		__ClearPageReserved(p);
2066 		set_page_count(p, 0);
2067 	} while (++p, --i);
2068 
2069 	set_pageblock_migratetype(page, MIGRATE_CMA);
2070 
2071 	if (pageblock_order >= MAX_ORDER) {
2072 		i = pageblock_nr_pages;
2073 		p = page;
2074 		do {
2075 			set_page_refcounted(p);
2076 			__free_pages(p, MAX_ORDER - 1);
2077 			p += MAX_ORDER_NR_PAGES;
2078 		} while (i -= MAX_ORDER_NR_PAGES);
2079 	} else {
2080 		set_page_refcounted(page);
2081 		__free_pages(page, pageblock_order);
2082 	}
2083 
2084 	adjust_managed_page_count(page, pageblock_nr_pages);
2085 }
2086 #endif
2087 
2088 /*
2089  * The order of subdivision here is critical for the IO subsystem.
2090  * Please do not alter this order without good reasons and regression
2091  * testing. Specifically, as large blocks of memory are subdivided,
2092  * the order in which smaller blocks are delivered depends on the order
2093  * they're subdivided in this function. This is the primary factor
2094  * influencing the order in which pages are delivered to the IO
2095  * subsystem according to empirical testing, and this is also justified
2096  * by considering the behavior of a buddy system containing a single
2097  * large block of memory acted on by a series of small allocations.
2098  * This behavior is a critical factor in sglist merging's success.
2099  *
2100  * -- nyc
2101  */
2102 static inline void expand(struct zone *zone, struct page *page,
2103 	int low, int high, int migratetype)
2104 {
2105 	unsigned long size = 1 << high;
2106 
2107 	while (high > low) {
2108 		high--;
2109 		size >>= 1;
2110 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2111 
2112 		/*
2113 		 * Mark as guard pages (or page), that will allow to
2114 		 * merge back to allocator when buddy will be freed.
2115 		 * Corresponding page table entries will not be touched,
2116 		 * pages will stay not present in virtual address space
2117 		 */
2118 		if (set_page_guard(zone, &page[size], high, migratetype))
2119 			continue;
2120 
2121 		add_to_free_list(&page[size], zone, high, migratetype);
2122 		set_page_order(&page[size], high);
2123 	}
2124 }
2125 
2126 static void check_new_page_bad(struct page *page)
2127 {
2128 	if (unlikely(page->flags & __PG_HWPOISON)) {
2129 		/* Don't complain about hwpoisoned pages */
2130 		page_mapcount_reset(page); /* remove PageBuddy */
2131 		return;
2132 	}
2133 
2134 	bad_page(page,
2135 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2136 }
2137 
2138 /*
2139  * This page is about to be returned from the page allocator
2140  */
2141 static inline int check_new_page(struct page *page)
2142 {
2143 	if (likely(page_expected_state(page,
2144 				PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2145 		return 0;
2146 
2147 	check_new_page_bad(page);
2148 	return 1;
2149 }
2150 
2151 static inline bool free_pages_prezeroed(void)
2152 {
2153 	return (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
2154 		page_poisoning_enabled()) || want_init_on_free();
2155 }
2156 
2157 #ifdef CONFIG_DEBUG_VM
2158 /*
2159  * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2160  * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2161  * also checked when pcp lists are refilled from the free lists.
2162  */
2163 static inline bool check_pcp_refill(struct page *page)
2164 {
2165 	if (debug_pagealloc_enabled_static())
2166 		return check_new_page(page);
2167 	else
2168 		return false;
2169 }
2170 
2171 static inline bool check_new_pcp(struct page *page)
2172 {
2173 	return check_new_page(page);
2174 }
2175 #else
2176 /*
2177  * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2178  * when pcp lists are being refilled from the free lists. With debug_pagealloc
2179  * enabled, they are also checked when being allocated from the pcp lists.
2180  */
2181 static inline bool check_pcp_refill(struct page *page)
2182 {
2183 	return check_new_page(page);
2184 }
2185 static inline bool check_new_pcp(struct page *page)
2186 {
2187 	if (debug_pagealloc_enabled_static())
2188 		return check_new_page(page);
2189 	else
2190 		return false;
2191 }
2192 #endif /* CONFIG_DEBUG_VM */
2193 
2194 static bool check_new_pages(struct page *page, unsigned int order)
2195 {
2196 	int i;
2197 	for (i = 0; i < (1 << order); i++) {
2198 		struct page *p = page + i;
2199 
2200 		if (unlikely(check_new_page(p)))
2201 			return true;
2202 	}
2203 
2204 	return false;
2205 }
2206 
2207 inline void post_alloc_hook(struct page *page, unsigned int order,
2208 				gfp_t gfp_flags)
2209 {
2210 	set_page_private(page, 0);
2211 	set_page_refcounted(page);
2212 
2213 	arch_alloc_page(page, order);
2214 	if (debug_pagealloc_enabled_static())
2215 		kernel_map_pages(page, 1 << order, 1);
2216 	kasan_alloc_pages(page, order);
2217 	kernel_poison_pages(page, 1 << order, 1);
2218 	set_page_owner(page, order, gfp_flags);
2219 }
2220 
2221 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2222 							unsigned int alloc_flags)
2223 {
2224 	post_alloc_hook(page, order, gfp_flags);
2225 
2226 	if (!free_pages_prezeroed() && want_init_on_alloc(gfp_flags))
2227 		kernel_init_free_pages(page, 1 << order);
2228 
2229 	if (order && (gfp_flags & __GFP_COMP))
2230 		prep_compound_page(page, order);
2231 
2232 	/*
2233 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2234 	 * allocate the page. The expectation is that the caller is taking
2235 	 * steps that will free more memory. The caller should avoid the page
2236 	 * being used for !PFMEMALLOC purposes.
2237 	 */
2238 	if (alloc_flags & ALLOC_NO_WATERMARKS)
2239 		set_page_pfmemalloc(page);
2240 	else
2241 		clear_page_pfmemalloc(page);
2242 }
2243 
2244 /*
2245  * Go through the free lists for the given migratetype and remove
2246  * the smallest available page from the freelists
2247  */
2248 static __always_inline
2249 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2250 						int migratetype)
2251 {
2252 	unsigned int current_order;
2253 	struct free_area *area;
2254 	struct page *page;
2255 
2256 	/* Find a page of the appropriate size in the preferred list */
2257 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2258 		area = &(zone->free_area[current_order]);
2259 		page = get_page_from_free_area(area, migratetype);
2260 		if (!page)
2261 			continue;
2262 		del_page_from_free_list(page, zone, current_order);
2263 		expand(zone, page, order, current_order, migratetype);
2264 		set_pcppage_migratetype(page, migratetype);
2265 		return page;
2266 	}
2267 
2268 	return NULL;
2269 }
2270 
2271 
2272 /*
2273  * This array describes the order lists are fallen back to when
2274  * the free lists for the desirable migrate type are depleted
2275  */
2276 static int fallbacks[MIGRATE_TYPES][4] = {
2277 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
2278 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2279 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2280 #ifdef CONFIG_CMA
2281 	[MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
2282 #endif
2283 #ifdef CONFIG_MEMORY_ISOLATION
2284 	[MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
2285 #endif
2286 };
2287 
2288 #ifdef CONFIG_CMA
2289 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2290 					unsigned int order)
2291 {
2292 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2293 }
2294 #else
2295 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2296 					unsigned int order) { return NULL; }
2297 #endif
2298 
2299 /*
2300  * Move the free pages in a range to the free lists of the requested type.
2301  * Note that start_page and end_pages are not aligned on a pageblock
2302  * boundary. If alignment is required, use move_freepages_block()
2303  */
2304 static int move_freepages(struct zone *zone,
2305 			  struct page *start_page, struct page *end_page,
2306 			  int migratetype, int *num_movable)
2307 {
2308 	struct page *page;
2309 	unsigned int order;
2310 	int pages_moved = 0;
2311 
2312 	for (page = start_page; page <= end_page;) {
2313 		if (!pfn_valid_within(page_to_pfn(page))) {
2314 			page++;
2315 			continue;
2316 		}
2317 
2318 		if (!PageBuddy(page)) {
2319 			/*
2320 			 * We assume that pages that could be isolated for
2321 			 * migration are movable. But we don't actually try
2322 			 * isolating, as that would be expensive.
2323 			 */
2324 			if (num_movable &&
2325 					(PageLRU(page) || __PageMovable(page)))
2326 				(*num_movable)++;
2327 
2328 			page++;
2329 			continue;
2330 		}
2331 
2332 		/* Make sure we are not inadvertently changing nodes */
2333 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2334 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2335 
2336 		order = page_order(page);
2337 		move_to_free_list(page, zone, order, migratetype);
2338 		page += 1 << order;
2339 		pages_moved += 1 << order;
2340 	}
2341 
2342 	return pages_moved;
2343 }
2344 
2345 int move_freepages_block(struct zone *zone, struct page *page,
2346 				int migratetype, int *num_movable)
2347 {
2348 	unsigned long start_pfn, end_pfn;
2349 	struct page *start_page, *end_page;
2350 
2351 	if (num_movable)
2352 		*num_movable = 0;
2353 
2354 	start_pfn = page_to_pfn(page);
2355 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
2356 	start_page = pfn_to_page(start_pfn);
2357 	end_page = start_page + pageblock_nr_pages - 1;
2358 	end_pfn = start_pfn + pageblock_nr_pages - 1;
2359 
2360 	/* Do not cross zone boundaries */
2361 	if (!zone_spans_pfn(zone, start_pfn))
2362 		start_page = page;
2363 	if (!zone_spans_pfn(zone, end_pfn))
2364 		return 0;
2365 
2366 	return move_freepages(zone, start_page, end_page, migratetype,
2367 								num_movable);
2368 }
2369 
2370 static void change_pageblock_range(struct page *pageblock_page,
2371 					int start_order, int migratetype)
2372 {
2373 	int nr_pageblocks = 1 << (start_order - pageblock_order);
2374 
2375 	while (nr_pageblocks--) {
2376 		set_pageblock_migratetype(pageblock_page, migratetype);
2377 		pageblock_page += pageblock_nr_pages;
2378 	}
2379 }
2380 
2381 /*
2382  * When we are falling back to another migratetype during allocation, try to
2383  * steal extra free pages from the same pageblocks to satisfy further
2384  * allocations, instead of polluting multiple pageblocks.
2385  *
2386  * If we are stealing a relatively large buddy page, it is likely there will
2387  * be more free pages in the pageblock, so try to steal them all. For
2388  * reclaimable and unmovable allocations, we steal regardless of page size,
2389  * as fragmentation caused by those allocations polluting movable pageblocks
2390  * is worse than movable allocations stealing from unmovable and reclaimable
2391  * pageblocks.
2392  */
2393 static bool can_steal_fallback(unsigned int order, int start_mt)
2394 {
2395 	/*
2396 	 * Leaving this order check is intended, although there is
2397 	 * relaxed order check in next check. The reason is that
2398 	 * we can actually steal whole pageblock if this condition met,
2399 	 * but, below check doesn't guarantee it and that is just heuristic
2400 	 * so could be changed anytime.
2401 	 */
2402 	if (order >= pageblock_order)
2403 		return true;
2404 
2405 	if (order >= pageblock_order / 2 ||
2406 		start_mt == MIGRATE_RECLAIMABLE ||
2407 		start_mt == MIGRATE_UNMOVABLE ||
2408 		page_group_by_mobility_disabled)
2409 		return true;
2410 
2411 	return false;
2412 }
2413 
2414 static inline void boost_watermark(struct zone *zone)
2415 {
2416 	unsigned long max_boost;
2417 
2418 	if (!watermark_boost_factor)
2419 		return;
2420 	/*
2421 	 * Don't bother in zones that are unlikely to produce results.
2422 	 * On small machines, including kdump capture kernels running
2423 	 * in a small area, boosting the watermark can cause an out of
2424 	 * memory situation immediately.
2425 	 */
2426 	if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2427 		return;
2428 
2429 	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2430 			watermark_boost_factor, 10000);
2431 
2432 	/*
2433 	 * high watermark may be uninitialised if fragmentation occurs
2434 	 * very early in boot so do not boost. We do not fall
2435 	 * through and boost by pageblock_nr_pages as failing
2436 	 * allocations that early means that reclaim is not going
2437 	 * to help and it may even be impossible to reclaim the
2438 	 * boosted watermark resulting in a hang.
2439 	 */
2440 	if (!max_boost)
2441 		return;
2442 
2443 	max_boost = max(pageblock_nr_pages, max_boost);
2444 
2445 	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2446 		max_boost);
2447 }
2448 
2449 /*
2450  * This function implements actual steal behaviour. If order is large enough,
2451  * we can steal whole pageblock. If not, we first move freepages in this
2452  * pageblock to our migratetype and determine how many already-allocated pages
2453  * are there in the pageblock with a compatible migratetype. If at least half
2454  * of pages are free or compatible, we can change migratetype of the pageblock
2455  * itself, so pages freed in the future will be put on the correct free list.
2456  */
2457 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2458 		unsigned int alloc_flags, int start_type, bool whole_block)
2459 {
2460 	unsigned int current_order = page_order(page);
2461 	int free_pages, movable_pages, alike_pages;
2462 	int old_block_type;
2463 
2464 	old_block_type = get_pageblock_migratetype(page);
2465 
2466 	/*
2467 	 * This can happen due to races and we want to prevent broken
2468 	 * highatomic accounting.
2469 	 */
2470 	if (is_migrate_highatomic(old_block_type))
2471 		goto single_page;
2472 
2473 	/* Take ownership for orders >= pageblock_order */
2474 	if (current_order >= pageblock_order) {
2475 		change_pageblock_range(page, current_order, start_type);
2476 		goto single_page;
2477 	}
2478 
2479 	/*
2480 	 * Boost watermarks to increase reclaim pressure to reduce the
2481 	 * likelihood of future fallbacks. Wake kswapd now as the node
2482 	 * may be balanced overall and kswapd will not wake naturally.
2483 	 */
2484 	boost_watermark(zone);
2485 	if (alloc_flags & ALLOC_KSWAPD)
2486 		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2487 
2488 	/* We are not allowed to try stealing from the whole block */
2489 	if (!whole_block)
2490 		goto single_page;
2491 
2492 	free_pages = move_freepages_block(zone, page, start_type,
2493 						&movable_pages);
2494 	/*
2495 	 * Determine how many pages are compatible with our allocation.
2496 	 * For movable allocation, it's the number of movable pages which
2497 	 * we just obtained. For other types it's a bit more tricky.
2498 	 */
2499 	if (start_type == MIGRATE_MOVABLE) {
2500 		alike_pages = movable_pages;
2501 	} else {
2502 		/*
2503 		 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2504 		 * to MOVABLE pageblock, consider all non-movable pages as
2505 		 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2506 		 * vice versa, be conservative since we can't distinguish the
2507 		 * exact migratetype of non-movable pages.
2508 		 */
2509 		if (old_block_type == MIGRATE_MOVABLE)
2510 			alike_pages = pageblock_nr_pages
2511 						- (free_pages + movable_pages);
2512 		else
2513 			alike_pages = 0;
2514 	}
2515 
2516 	/* moving whole block can fail due to zone boundary conditions */
2517 	if (!free_pages)
2518 		goto single_page;
2519 
2520 	/*
2521 	 * If a sufficient number of pages in the block are either free or of
2522 	 * comparable migratability as our allocation, claim the whole block.
2523 	 */
2524 	if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2525 			page_group_by_mobility_disabled)
2526 		set_pageblock_migratetype(page, start_type);
2527 
2528 	return;
2529 
2530 single_page:
2531 	move_to_free_list(page, zone, current_order, start_type);
2532 }
2533 
2534 /*
2535  * Check whether there is a suitable fallback freepage with requested order.
2536  * If only_stealable is true, this function returns fallback_mt only if
2537  * we can steal other freepages all together. This would help to reduce
2538  * fragmentation due to mixed migratetype pages in one pageblock.
2539  */
2540 int find_suitable_fallback(struct free_area *area, unsigned int order,
2541 			int migratetype, bool only_stealable, bool *can_steal)
2542 {
2543 	int i;
2544 	int fallback_mt;
2545 
2546 	if (area->nr_free == 0)
2547 		return -1;
2548 
2549 	*can_steal = false;
2550 	for (i = 0;; i++) {
2551 		fallback_mt = fallbacks[migratetype][i];
2552 		if (fallback_mt == MIGRATE_TYPES)
2553 			break;
2554 
2555 		if (free_area_empty(area, fallback_mt))
2556 			continue;
2557 
2558 		if (can_steal_fallback(order, migratetype))
2559 			*can_steal = true;
2560 
2561 		if (!only_stealable)
2562 			return fallback_mt;
2563 
2564 		if (*can_steal)
2565 			return fallback_mt;
2566 	}
2567 
2568 	return -1;
2569 }
2570 
2571 /*
2572  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2573  * there are no empty page blocks that contain a page with a suitable order
2574  */
2575 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2576 				unsigned int alloc_order)
2577 {
2578 	int mt;
2579 	unsigned long max_managed, flags;
2580 
2581 	/*
2582 	 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2583 	 * Check is race-prone but harmless.
2584 	 */
2585 	max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2586 	if (zone->nr_reserved_highatomic >= max_managed)
2587 		return;
2588 
2589 	spin_lock_irqsave(&zone->lock, flags);
2590 
2591 	/* Recheck the nr_reserved_highatomic limit under the lock */
2592 	if (zone->nr_reserved_highatomic >= max_managed)
2593 		goto out_unlock;
2594 
2595 	/* Yoink! */
2596 	mt = get_pageblock_migratetype(page);
2597 	if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2598 	    && !is_migrate_cma(mt)) {
2599 		zone->nr_reserved_highatomic += pageblock_nr_pages;
2600 		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2601 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2602 	}
2603 
2604 out_unlock:
2605 	spin_unlock_irqrestore(&zone->lock, flags);
2606 }
2607 
2608 /*
2609  * Used when an allocation is about to fail under memory pressure. This
2610  * potentially hurts the reliability of high-order allocations when under
2611  * intense memory pressure but failed atomic allocations should be easier
2612  * to recover from than an OOM.
2613  *
2614  * If @force is true, try to unreserve a pageblock even though highatomic
2615  * pageblock is exhausted.
2616  */
2617 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2618 						bool force)
2619 {
2620 	struct zonelist *zonelist = ac->zonelist;
2621 	unsigned long flags;
2622 	struct zoneref *z;
2623 	struct zone *zone;
2624 	struct page *page;
2625 	int order;
2626 	bool ret;
2627 
2628 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2629 								ac->nodemask) {
2630 		/*
2631 		 * Preserve at least one pageblock unless memory pressure
2632 		 * is really high.
2633 		 */
2634 		if (!force && zone->nr_reserved_highatomic <=
2635 					pageblock_nr_pages)
2636 			continue;
2637 
2638 		spin_lock_irqsave(&zone->lock, flags);
2639 		for (order = 0; order < MAX_ORDER; order++) {
2640 			struct free_area *area = &(zone->free_area[order]);
2641 
2642 			page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2643 			if (!page)
2644 				continue;
2645 
2646 			/*
2647 			 * In page freeing path, migratetype change is racy so
2648 			 * we can counter several free pages in a pageblock
2649 			 * in this loop althoug we changed the pageblock type
2650 			 * from highatomic to ac->migratetype. So we should
2651 			 * adjust the count once.
2652 			 */
2653 			if (is_migrate_highatomic_page(page)) {
2654 				/*
2655 				 * It should never happen but changes to
2656 				 * locking could inadvertently allow a per-cpu
2657 				 * drain to add pages to MIGRATE_HIGHATOMIC
2658 				 * while unreserving so be safe and watch for
2659 				 * underflows.
2660 				 */
2661 				zone->nr_reserved_highatomic -= min(
2662 						pageblock_nr_pages,
2663 						zone->nr_reserved_highatomic);
2664 			}
2665 
2666 			/*
2667 			 * Convert to ac->migratetype and avoid the normal
2668 			 * pageblock stealing heuristics. Minimally, the caller
2669 			 * is doing the work and needs the pages. More
2670 			 * importantly, if the block was always converted to
2671 			 * MIGRATE_UNMOVABLE or another type then the number
2672 			 * of pageblocks that cannot be completely freed
2673 			 * may increase.
2674 			 */
2675 			set_pageblock_migratetype(page, ac->migratetype);
2676 			ret = move_freepages_block(zone, page, ac->migratetype,
2677 									NULL);
2678 			if (ret) {
2679 				spin_unlock_irqrestore(&zone->lock, flags);
2680 				return ret;
2681 			}
2682 		}
2683 		spin_unlock_irqrestore(&zone->lock, flags);
2684 	}
2685 
2686 	return false;
2687 }
2688 
2689 /*
2690  * Try finding a free buddy page on the fallback list and put it on the free
2691  * list of requested migratetype, possibly along with other pages from the same
2692  * block, depending on fragmentation avoidance heuristics. Returns true if
2693  * fallback was found so that __rmqueue_smallest() can grab it.
2694  *
2695  * The use of signed ints for order and current_order is a deliberate
2696  * deviation from the rest of this file, to make the for loop
2697  * condition simpler.
2698  */
2699 static __always_inline bool
2700 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2701 						unsigned int alloc_flags)
2702 {
2703 	struct free_area *area;
2704 	int current_order;
2705 	int min_order = order;
2706 	struct page *page;
2707 	int fallback_mt;
2708 	bool can_steal;
2709 
2710 	/*
2711 	 * Do not steal pages from freelists belonging to other pageblocks
2712 	 * i.e. orders < pageblock_order. If there are no local zones free,
2713 	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2714 	 */
2715 	if (alloc_flags & ALLOC_NOFRAGMENT)
2716 		min_order = pageblock_order;
2717 
2718 	/*
2719 	 * Find the largest available free page in the other list. This roughly
2720 	 * approximates finding the pageblock with the most free pages, which
2721 	 * would be too costly to do exactly.
2722 	 */
2723 	for (current_order = MAX_ORDER - 1; current_order >= min_order;
2724 				--current_order) {
2725 		area = &(zone->free_area[current_order]);
2726 		fallback_mt = find_suitable_fallback(area, current_order,
2727 				start_migratetype, false, &can_steal);
2728 		if (fallback_mt == -1)
2729 			continue;
2730 
2731 		/*
2732 		 * We cannot steal all free pages from the pageblock and the
2733 		 * requested migratetype is movable. In that case it's better to
2734 		 * steal and split the smallest available page instead of the
2735 		 * largest available page, because even if the next movable
2736 		 * allocation falls back into a different pageblock than this
2737 		 * one, it won't cause permanent fragmentation.
2738 		 */
2739 		if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2740 					&& current_order > order)
2741 			goto find_smallest;
2742 
2743 		goto do_steal;
2744 	}
2745 
2746 	return false;
2747 
2748 find_smallest:
2749 	for (current_order = order; current_order < MAX_ORDER;
2750 							current_order++) {
2751 		area = &(zone->free_area[current_order]);
2752 		fallback_mt = find_suitable_fallback(area, current_order,
2753 				start_migratetype, false, &can_steal);
2754 		if (fallback_mt != -1)
2755 			break;
2756 	}
2757 
2758 	/*
2759 	 * This should not happen - we already found a suitable fallback
2760 	 * when looking for the largest page.
2761 	 */
2762 	VM_BUG_ON(current_order == MAX_ORDER);
2763 
2764 do_steal:
2765 	page = get_page_from_free_area(area, fallback_mt);
2766 
2767 	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2768 								can_steal);
2769 
2770 	trace_mm_page_alloc_extfrag(page, order, current_order,
2771 		start_migratetype, fallback_mt);
2772 
2773 	return true;
2774 
2775 }
2776 
2777 /*
2778  * Do the hard work of removing an element from the buddy allocator.
2779  * Call me with the zone->lock already held.
2780  */
2781 static __always_inline struct page *
2782 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2783 						unsigned int alloc_flags)
2784 {
2785 	struct page *page;
2786 
2787 #ifdef CONFIG_CMA
2788 	/*
2789 	 * Balance movable allocations between regular and CMA areas by
2790 	 * allocating from CMA when over half of the zone's free memory
2791 	 * is in the CMA area.
2792 	 */
2793 	if (migratetype == MIGRATE_MOVABLE &&
2794 	    zone_page_state(zone, NR_FREE_CMA_PAGES) >
2795 	    zone_page_state(zone, NR_FREE_PAGES) / 2) {
2796 		page = __rmqueue_cma_fallback(zone, order);
2797 		if (page)
2798 			return page;
2799 	}
2800 #endif
2801 retry:
2802 	page = __rmqueue_smallest(zone, order, migratetype);
2803 	if (unlikely(!page)) {
2804 		if (migratetype == MIGRATE_MOVABLE)
2805 			page = __rmqueue_cma_fallback(zone, order);
2806 
2807 		if (!page && __rmqueue_fallback(zone, order, migratetype,
2808 								alloc_flags))
2809 			goto retry;
2810 	}
2811 
2812 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
2813 	return page;
2814 }
2815 
2816 /*
2817  * Obtain a specified number of elements from the buddy allocator, all under
2818  * a single hold of the lock, for efficiency.  Add them to the supplied list.
2819  * Returns the number of new pages which were placed at *list.
2820  */
2821 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2822 			unsigned long count, struct list_head *list,
2823 			int migratetype, unsigned int alloc_flags)
2824 {
2825 	int i, alloced = 0;
2826 
2827 	spin_lock(&zone->lock);
2828 	for (i = 0; i < count; ++i) {
2829 		struct page *page = __rmqueue(zone, order, migratetype,
2830 								alloc_flags);
2831 		if (unlikely(page == NULL))
2832 			break;
2833 
2834 		if (unlikely(check_pcp_refill(page)))
2835 			continue;
2836 
2837 		/*
2838 		 * Split buddy pages returned by expand() are received here in
2839 		 * physical page order. The page is added to the tail of
2840 		 * caller's list. From the callers perspective, the linked list
2841 		 * is ordered by page number under some conditions. This is
2842 		 * useful for IO devices that can forward direction from the
2843 		 * head, thus also in the physical page order. This is useful
2844 		 * for IO devices that can merge IO requests if the physical
2845 		 * pages are ordered properly.
2846 		 */
2847 		list_add_tail(&page->lru, list);
2848 		alloced++;
2849 		if (is_migrate_cma(get_pcppage_migratetype(page)))
2850 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2851 					      -(1 << order));
2852 	}
2853 
2854 	/*
2855 	 * i pages were removed from the buddy list even if some leak due
2856 	 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2857 	 * on i. Do not confuse with 'alloced' which is the number of
2858 	 * pages added to the pcp list.
2859 	 */
2860 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2861 	spin_unlock(&zone->lock);
2862 	return alloced;
2863 }
2864 
2865 #ifdef CONFIG_NUMA
2866 /*
2867  * Called from the vmstat counter updater to drain pagesets of this
2868  * currently executing processor on remote nodes after they have
2869  * expired.
2870  *
2871  * Note that this function must be called with the thread pinned to
2872  * a single processor.
2873  */
2874 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2875 {
2876 	unsigned long flags;
2877 	int to_drain, batch;
2878 
2879 	local_irq_save(flags);
2880 	batch = READ_ONCE(pcp->batch);
2881 	to_drain = min(pcp->count, batch);
2882 	if (to_drain > 0)
2883 		free_pcppages_bulk(zone, to_drain, pcp);
2884 	local_irq_restore(flags);
2885 }
2886 #endif
2887 
2888 /*
2889  * Drain pcplists of the indicated processor and zone.
2890  *
2891  * The processor must either be the current processor and the
2892  * thread pinned to the current processor or a processor that
2893  * is not online.
2894  */
2895 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2896 {
2897 	unsigned long flags;
2898 	struct per_cpu_pageset *pset;
2899 	struct per_cpu_pages *pcp;
2900 
2901 	local_irq_save(flags);
2902 	pset = per_cpu_ptr(zone->pageset, cpu);
2903 
2904 	pcp = &pset->pcp;
2905 	if (pcp->count)
2906 		free_pcppages_bulk(zone, pcp->count, pcp);
2907 	local_irq_restore(flags);
2908 }
2909 
2910 /*
2911  * Drain pcplists of all zones on the indicated processor.
2912  *
2913  * The processor must either be the current processor and the
2914  * thread pinned to the current processor or a processor that
2915  * is not online.
2916  */
2917 static void drain_pages(unsigned int cpu)
2918 {
2919 	struct zone *zone;
2920 
2921 	for_each_populated_zone(zone) {
2922 		drain_pages_zone(cpu, zone);
2923 	}
2924 }
2925 
2926 /*
2927  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2928  *
2929  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2930  * the single zone's pages.
2931  */
2932 void drain_local_pages(struct zone *zone)
2933 {
2934 	int cpu = smp_processor_id();
2935 
2936 	if (zone)
2937 		drain_pages_zone(cpu, zone);
2938 	else
2939 		drain_pages(cpu);
2940 }
2941 
2942 static void drain_local_pages_wq(struct work_struct *work)
2943 {
2944 	struct pcpu_drain *drain;
2945 
2946 	drain = container_of(work, struct pcpu_drain, work);
2947 
2948 	/*
2949 	 * drain_all_pages doesn't use proper cpu hotplug protection so
2950 	 * we can race with cpu offline when the WQ can move this from
2951 	 * a cpu pinned worker to an unbound one. We can operate on a different
2952 	 * cpu which is allright but we also have to make sure to not move to
2953 	 * a different one.
2954 	 */
2955 	preempt_disable();
2956 	drain_local_pages(drain->zone);
2957 	preempt_enable();
2958 }
2959 
2960 /*
2961  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2962  *
2963  * When zone parameter is non-NULL, spill just the single zone's pages.
2964  *
2965  * Note that this can be extremely slow as the draining happens in a workqueue.
2966  */
2967 void drain_all_pages(struct zone *zone)
2968 {
2969 	int cpu;
2970 
2971 	/*
2972 	 * Allocate in the BSS so we wont require allocation in
2973 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2974 	 */
2975 	static cpumask_t cpus_with_pcps;
2976 
2977 	/*
2978 	 * Make sure nobody triggers this path before mm_percpu_wq is fully
2979 	 * initialized.
2980 	 */
2981 	if (WARN_ON_ONCE(!mm_percpu_wq))
2982 		return;
2983 
2984 	/*
2985 	 * Do not drain if one is already in progress unless it's specific to
2986 	 * a zone. Such callers are primarily CMA and memory hotplug and need
2987 	 * the drain to be complete when the call returns.
2988 	 */
2989 	if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2990 		if (!zone)
2991 			return;
2992 		mutex_lock(&pcpu_drain_mutex);
2993 	}
2994 
2995 	/*
2996 	 * We don't care about racing with CPU hotplug event
2997 	 * as offline notification will cause the notified
2998 	 * cpu to drain that CPU pcps and on_each_cpu_mask
2999 	 * disables preemption as part of its processing
3000 	 */
3001 	for_each_online_cpu(cpu) {
3002 		struct per_cpu_pageset *pcp;
3003 		struct zone *z;
3004 		bool has_pcps = false;
3005 
3006 		if (zone) {
3007 			pcp = per_cpu_ptr(zone->pageset, cpu);
3008 			if (pcp->pcp.count)
3009 				has_pcps = true;
3010 		} else {
3011 			for_each_populated_zone(z) {
3012 				pcp = per_cpu_ptr(z->pageset, cpu);
3013 				if (pcp->pcp.count) {
3014 					has_pcps = true;
3015 					break;
3016 				}
3017 			}
3018 		}
3019 
3020 		if (has_pcps)
3021 			cpumask_set_cpu(cpu, &cpus_with_pcps);
3022 		else
3023 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
3024 	}
3025 
3026 	for_each_cpu(cpu, &cpus_with_pcps) {
3027 		struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3028 
3029 		drain->zone = zone;
3030 		INIT_WORK(&drain->work, drain_local_pages_wq);
3031 		queue_work_on(cpu, mm_percpu_wq, &drain->work);
3032 	}
3033 	for_each_cpu(cpu, &cpus_with_pcps)
3034 		flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
3035 
3036 	mutex_unlock(&pcpu_drain_mutex);
3037 }
3038 
3039 #ifdef CONFIG_HIBERNATION
3040 
3041 /*
3042  * Touch the watchdog for every WD_PAGE_COUNT pages.
3043  */
3044 #define WD_PAGE_COUNT	(128*1024)
3045 
3046 void mark_free_pages(struct zone *zone)
3047 {
3048 	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3049 	unsigned long flags;
3050 	unsigned int order, t;
3051 	struct page *page;
3052 
3053 	if (zone_is_empty(zone))
3054 		return;
3055 
3056 	spin_lock_irqsave(&zone->lock, flags);
3057 
3058 	max_zone_pfn = zone_end_pfn(zone);
3059 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3060 		if (pfn_valid(pfn)) {
3061 			page = pfn_to_page(pfn);
3062 
3063 			if (!--page_count) {
3064 				touch_nmi_watchdog();
3065 				page_count = WD_PAGE_COUNT;
3066 			}
3067 
3068 			if (page_zone(page) != zone)
3069 				continue;
3070 
3071 			if (!swsusp_page_is_forbidden(page))
3072 				swsusp_unset_page_free(page);
3073 		}
3074 
3075 	for_each_migratetype_order(order, t) {
3076 		list_for_each_entry(page,
3077 				&zone->free_area[order].free_list[t], lru) {
3078 			unsigned long i;
3079 
3080 			pfn = page_to_pfn(page);
3081 			for (i = 0; i < (1UL << order); i++) {
3082 				if (!--page_count) {
3083 					touch_nmi_watchdog();
3084 					page_count = WD_PAGE_COUNT;
3085 				}
3086 				swsusp_set_page_free(pfn_to_page(pfn + i));
3087 			}
3088 		}
3089 	}
3090 	spin_unlock_irqrestore(&zone->lock, flags);
3091 }
3092 #endif /* CONFIG_PM */
3093 
3094 static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
3095 {
3096 	int migratetype;
3097 
3098 	if (!free_pcp_prepare(page))
3099 		return false;
3100 
3101 	migratetype = get_pfnblock_migratetype(page, pfn);
3102 	set_pcppage_migratetype(page, migratetype);
3103 	return true;
3104 }
3105 
3106 static void free_unref_page_commit(struct page *page, unsigned long pfn)
3107 {
3108 	struct zone *zone = page_zone(page);
3109 	struct per_cpu_pages *pcp;
3110 	int migratetype;
3111 
3112 	migratetype = get_pcppage_migratetype(page);
3113 	__count_vm_event(PGFREE);
3114 
3115 	/*
3116 	 * We only track unmovable, reclaimable and movable on pcp lists.
3117 	 * Free ISOLATE pages back to the allocator because they are being
3118 	 * offlined but treat HIGHATOMIC as movable pages so we can get those
3119 	 * areas back if necessary. Otherwise, we may have to free
3120 	 * excessively into the page allocator
3121 	 */
3122 	if (migratetype >= MIGRATE_PCPTYPES) {
3123 		if (unlikely(is_migrate_isolate(migratetype))) {
3124 			free_one_page(zone, page, pfn, 0, migratetype);
3125 			return;
3126 		}
3127 		migratetype = MIGRATE_MOVABLE;
3128 	}
3129 
3130 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
3131 	list_add(&page->lru, &pcp->lists[migratetype]);
3132 	pcp->count++;
3133 	if (pcp->count >= pcp->high) {
3134 		unsigned long batch = READ_ONCE(pcp->batch);
3135 		free_pcppages_bulk(zone, batch, pcp);
3136 	}
3137 }
3138 
3139 /*
3140  * Free a 0-order page
3141  */
3142 void free_unref_page(struct page *page)
3143 {
3144 	unsigned long flags;
3145 	unsigned long pfn = page_to_pfn(page);
3146 
3147 	if (!free_unref_page_prepare(page, pfn))
3148 		return;
3149 
3150 	local_irq_save(flags);
3151 	free_unref_page_commit(page, pfn);
3152 	local_irq_restore(flags);
3153 }
3154 
3155 /*
3156  * Free a list of 0-order pages
3157  */
3158 void free_unref_page_list(struct list_head *list)
3159 {
3160 	struct page *page, *next;
3161 	unsigned long flags, pfn;
3162 	int batch_count = 0;
3163 
3164 	/* Prepare pages for freeing */
3165 	list_for_each_entry_safe(page, next, list, lru) {
3166 		pfn = page_to_pfn(page);
3167 		if (!free_unref_page_prepare(page, pfn))
3168 			list_del(&page->lru);
3169 		set_page_private(page, pfn);
3170 	}
3171 
3172 	local_irq_save(flags);
3173 	list_for_each_entry_safe(page, next, list, lru) {
3174 		unsigned long pfn = page_private(page);
3175 
3176 		set_page_private(page, 0);
3177 		trace_mm_page_free_batched(page);
3178 		free_unref_page_commit(page, pfn);
3179 
3180 		/*
3181 		 * Guard against excessive IRQ disabled times when we get
3182 		 * a large list of pages to free.
3183 		 */
3184 		if (++batch_count == SWAP_CLUSTER_MAX) {
3185 			local_irq_restore(flags);
3186 			batch_count = 0;
3187 			local_irq_save(flags);
3188 		}
3189 	}
3190 	local_irq_restore(flags);
3191 }
3192 
3193 /*
3194  * split_page takes a non-compound higher-order page, and splits it into
3195  * n (1<<order) sub-pages: page[0..n]
3196  * Each sub-page must be freed individually.
3197  *
3198  * Note: this is probably too low level an operation for use in drivers.
3199  * Please consult with lkml before using this in your driver.
3200  */
3201 void split_page(struct page *page, unsigned int order)
3202 {
3203 	int i;
3204 
3205 	VM_BUG_ON_PAGE(PageCompound(page), page);
3206 	VM_BUG_ON_PAGE(!page_count(page), page);
3207 
3208 	for (i = 1; i < (1 << order); i++)
3209 		set_page_refcounted(page + i);
3210 	split_page_owner(page, order);
3211 }
3212 EXPORT_SYMBOL_GPL(split_page);
3213 
3214 int __isolate_free_page(struct page *page, unsigned int order)
3215 {
3216 	unsigned long watermark;
3217 	struct zone *zone;
3218 	int mt;
3219 
3220 	BUG_ON(!PageBuddy(page));
3221 
3222 	zone = page_zone(page);
3223 	mt = get_pageblock_migratetype(page);
3224 
3225 	if (!is_migrate_isolate(mt)) {
3226 		/*
3227 		 * Obey watermarks as if the page was being allocated. We can
3228 		 * emulate a high-order watermark check with a raised order-0
3229 		 * watermark, because we already know our high-order page
3230 		 * exists.
3231 		 */
3232 		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3233 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3234 			return 0;
3235 
3236 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
3237 	}
3238 
3239 	/* Remove page from free list */
3240 
3241 	del_page_from_free_list(page, zone, order);
3242 
3243 	/*
3244 	 * Set the pageblock if the isolated page is at least half of a
3245 	 * pageblock
3246 	 */
3247 	if (order >= pageblock_order - 1) {
3248 		struct page *endpage = page + (1 << order) - 1;
3249 		for (; page < endpage; page += pageblock_nr_pages) {
3250 			int mt = get_pageblock_migratetype(page);
3251 			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3252 			    && !is_migrate_highatomic(mt))
3253 				set_pageblock_migratetype(page,
3254 							  MIGRATE_MOVABLE);
3255 		}
3256 	}
3257 
3258 
3259 	return 1UL << order;
3260 }
3261 
3262 /**
3263  * __putback_isolated_page - Return a now-isolated page back where we got it
3264  * @page: Page that was isolated
3265  * @order: Order of the isolated page
3266  * @mt: The page's pageblock's migratetype
3267  *
3268  * This function is meant to return a page pulled from the free lists via
3269  * __isolate_free_page back to the free lists they were pulled from.
3270  */
3271 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3272 {
3273 	struct zone *zone = page_zone(page);
3274 
3275 	/* zone lock should be held when this function is called */
3276 	lockdep_assert_held(&zone->lock);
3277 
3278 	/* Return isolated page to tail of freelist. */
3279 	__free_one_page(page, page_to_pfn(page), zone, order, mt, false);
3280 }
3281 
3282 /*
3283  * Update NUMA hit/miss statistics
3284  *
3285  * Must be called with interrupts disabled.
3286  */
3287 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
3288 {
3289 #ifdef CONFIG_NUMA
3290 	enum numa_stat_item local_stat = NUMA_LOCAL;
3291 
3292 	/* skip numa counters update if numa stats is disabled */
3293 	if (!static_branch_likely(&vm_numa_stat_key))
3294 		return;
3295 
3296 	if (zone_to_nid(z) != numa_node_id())
3297 		local_stat = NUMA_OTHER;
3298 
3299 	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3300 		__inc_numa_state(z, NUMA_HIT);
3301 	else {
3302 		__inc_numa_state(z, NUMA_MISS);
3303 		__inc_numa_state(preferred_zone, NUMA_FOREIGN);
3304 	}
3305 	__inc_numa_state(z, local_stat);
3306 #endif
3307 }
3308 
3309 /* Remove page from the per-cpu list, caller must protect the list */
3310 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
3311 			unsigned int alloc_flags,
3312 			struct per_cpu_pages *pcp,
3313 			struct list_head *list)
3314 {
3315 	struct page *page;
3316 
3317 	do {
3318 		if (list_empty(list)) {
3319 			pcp->count += rmqueue_bulk(zone, 0,
3320 					pcp->batch, list,
3321 					migratetype, alloc_flags);
3322 			if (unlikely(list_empty(list)))
3323 				return NULL;
3324 		}
3325 
3326 		page = list_first_entry(list, struct page, lru);
3327 		list_del(&page->lru);
3328 		pcp->count--;
3329 	} while (check_new_pcp(page));
3330 
3331 	return page;
3332 }
3333 
3334 /* Lock and remove page from the per-cpu list */
3335 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3336 			struct zone *zone, gfp_t gfp_flags,
3337 			int migratetype, unsigned int alloc_flags)
3338 {
3339 	struct per_cpu_pages *pcp;
3340 	struct list_head *list;
3341 	struct page *page;
3342 	unsigned long flags;
3343 
3344 	local_irq_save(flags);
3345 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
3346 	list = &pcp->lists[migratetype];
3347 	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
3348 	if (page) {
3349 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3350 		zone_statistics(preferred_zone, zone);
3351 	}
3352 	local_irq_restore(flags);
3353 	return page;
3354 }
3355 
3356 /*
3357  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3358  */
3359 static inline
3360 struct page *rmqueue(struct zone *preferred_zone,
3361 			struct zone *zone, unsigned int order,
3362 			gfp_t gfp_flags, unsigned int alloc_flags,
3363 			int migratetype)
3364 {
3365 	unsigned long flags;
3366 	struct page *page;
3367 
3368 	if (likely(order == 0)) {
3369 		page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
3370 					migratetype, alloc_flags);
3371 		goto out;
3372 	}
3373 
3374 	/*
3375 	 * We most definitely don't want callers attempting to
3376 	 * allocate greater than order-1 page units with __GFP_NOFAIL.
3377 	 */
3378 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3379 	spin_lock_irqsave(&zone->lock, flags);
3380 
3381 	do {
3382 		page = NULL;
3383 		if (alloc_flags & ALLOC_HARDER) {
3384 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3385 			if (page)
3386 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
3387 		}
3388 		if (!page)
3389 			page = __rmqueue(zone, order, migratetype, alloc_flags);
3390 	} while (page && check_new_pages(page, order));
3391 	spin_unlock(&zone->lock);
3392 	if (!page)
3393 		goto failed;
3394 	__mod_zone_freepage_state(zone, -(1 << order),
3395 				  get_pcppage_migratetype(page));
3396 
3397 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3398 	zone_statistics(preferred_zone, zone);
3399 	local_irq_restore(flags);
3400 
3401 out:
3402 	/* Separate test+clear to avoid unnecessary atomics */
3403 	if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3404 		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3405 		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3406 	}
3407 
3408 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3409 	return page;
3410 
3411 failed:
3412 	local_irq_restore(flags);
3413 	return NULL;
3414 }
3415 
3416 #ifdef CONFIG_FAIL_PAGE_ALLOC
3417 
3418 static struct {
3419 	struct fault_attr attr;
3420 
3421 	bool ignore_gfp_highmem;
3422 	bool ignore_gfp_reclaim;
3423 	u32 min_order;
3424 } fail_page_alloc = {
3425 	.attr = FAULT_ATTR_INITIALIZER,
3426 	.ignore_gfp_reclaim = true,
3427 	.ignore_gfp_highmem = true,
3428 	.min_order = 1,
3429 };
3430 
3431 static int __init setup_fail_page_alloc(char *str)
3432 {
3433 	return setup_fault_attr(&fail_page_alloc.attr, str);
3434 }
3435 __setup("fail_page_alloc=", setup_fail_page_alloc);
3436 
3437 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3438 {
3439 	if (order < fail_page_alloc.min_order)
3440 		return false;
3441 	if (gfp_mask & __GFP_NOFAIL)
3442 		return false;
3443 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3444 		return false;
3445 	if (fail_page_alloc.ignore_gfp_reclaim &&
3446 			(gfp_mask & __GFP_DIRECT_RECLAIM))
3447 		return false;
3448 
3449 	return should_fail(&fail_page_alloc.attr, 1 << order);
3450 }
3451 
3452 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3453 
3454 static int __init fail_page_alloc_debugfs(void)
3455 {
3456 	umode_t mode = S_IFREG | 0600;
3457 	struct dentry *dir;
3458 
3459 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3460 					&fail_page_alloc.attr);
3461 
3462 	debugfs_create_bool("ignore-gfp-wait", mode, dir,
3463 			    &fail_page_alloc.ignore_gfp_reclaim);
3464 	debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3465 			    &fail_page_alloc.ignore_gfp_highmem);
3466 	debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3467 
3468 	return 0;
3469 }
3470 
3471 late_initcall(fail_page_alloc_debugfs);
3472 
3473 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3474 
3475 #else /* CONFIG_FAIL_PAGE_ALLOC */
3476 
3477 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3478 {
3479 	return false;
3480 }
3481 
3482 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3483 
3484 static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3485 {
3486 	return __should_fail_alloc_page(gfp_mask, order);
3487 }
3488 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3489 
3490 /*
3491  * Return true if free base pages are above 'mark'. For high-order checks it
3492  * will return true of the order-0 watermark is reached and there is at least
3493  * one free page of a suitable size. Checking now avoids taking the zone lock
3494  * to check in the allocation paths if no pages are free.
3495  */
3496 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3497 			 int highest_zoneidx, unsigned int alloc_flags,
3498 			 long free_pages)
3499 {
3500 	long min = mark;
3501 	int o;
3502 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3503 
3504 	/* free_pages may go negative - that's OK */
3505 	free_pages -= (1 << order) - 1;
3506 
3507 	if (alloc_flags & ALLOC_HIGH)
3508 		min -= min / 2;
3509 
3510 	/*
3511 	 * If the caller does not have rights to ALLOC_HARDER then subtract
3512 	 * the high-atomic reserves. This will over-estimate the size of the
3513 	 * atomic reserve but it avoids a search.
3514 	 */
3515 	if (likely(!alloc_harder)) {
3516 		free_pages -= z->nr_reserved_highatomic;
3517 	} else {
3518 		/*
3519 		 * OOM victims can try even harder than normal ALLOC_HARDER
3520 		 * users on the grounds that it's definitely going to be in
3521 		 * the exit path shortly and free memory. Any allocation it
3522 		 * makes during the free path will be small and short-lived.
3523 		 */
3524 		if (alloc_flags & ALLOC_OOM)
3525 			min -= min / 2;
3526 		else
3527 			min -= min / 4;
3528 	}
3529 
3530 
3531 #ifdef CONFIG_CMA
3532 	/* If allocation can't use CMA areas don't use free CMA pages */
3533 	if (!(alloc_flags & ALLOC_CMA))
3534 		free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
3535 #endif
3536 
3537 	/*
3538 	 * Check watermarks for an order-0 allocation request. If these
3539 	 * are not met, then a high-order request also cannot go ahead
3540 	 * even if a suitable page happened to be free.
3541 	 */
3542 	if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3543 		return false;
3544 
3545 	/* If this is an order-0 request then the watermark is fine */
3546 	if (!order)
3547 		return true;
3548 
3549 	/* For a high-order request, check at least one suitable page is free */
3550 	for (o = order; o < MAX_ORDER; o++) {
3551 		struct free_area *area = &z->free_area[o];
3552 		int mt;
3553 
3554 		if (!area->nr_free)
3555 			continue;
3556 
3557 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3558 			if (!free_area_empty(area, mt))
3559 				return true;
3560 		}
3561 
3562 #ifdef CONFIG_CMA
3563 		if ((alloc_flags & ALLOC_CMA) &&
3564 		    !free_area_empty(area, MIGRATE_CMA)) {
3565 			return true;
3566 		}
3567 #endif
3568 		if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
3569 			return true;
3570 	}
3571 	return false;
3572 }
3573 
3574 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3575 		      int highest_zoneidx, unsigned int alloc_flags)
3576 {
3577 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3578 					zone_page_state(z, NR_FREE_PAGES));
3579 }
3580 
3581 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3582 				unsigned long mark, int highest_zoneidx,
3583 				unsigned int alloc_flags)
3584 {
3585 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
3586 	long cma_pages = 0;
3587 
3588 #ifdef CONFIG_CMA
3589 	/* If allocation can't use CMA areas don't use free CMA pages */
3590 	if (!(alloc_flags & ALLOC_CMA))
3591 		cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3592 #endif
3593 
3594 	/*
3595 	 * Fast check for order-0 only. If this fails then the reserves
3596 	 * need to be calculated. There is a corner case where the check
3597 	 * passes but only the high-order atomic reserve are free. If
3598 	 * the caller is !atomic then it'll uselessly search the free
3599 	 * list. That corner case is then slower but it is harmless.
3600 	 */
3601 	if (!order && (free_pages - cma_pages) >
3602 				mark + z->lowmem_reserve[highest_zoneidx])
3603 		return true;
3604 
3605 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3606 					free_pages);
3607 }
3608 
3609 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3610 			unsigned long mark, int highest_zoneidx)
3611 {
3612 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
3613 
3614 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3615 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3616 
3617 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3618 								free_pages);
3619 }
3620 
3621 #ifdef CONFIG_NUMA
3622 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3623 {
3624 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3625 				node_reclaim_distance;
3626 }
3627 #else	/* CONFIG_NUMA */
3628 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3629 {
3630 	return true;
3631 }
3632 #endif	/* CONFIG_NUMA */
3633 
3634 /*
3635  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3636  * fragmentation is subtle. If the preferred zone was HIGHMEM then
3637  * premature use of a lower zone may cause lowmem pressure problems that
3638  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3639  * probably too small. It only makes sense to spread allocations to avoid
3640  * fragmentation between the Normal and DMA32 zones.
3641  */
3642 static inline unsigned int
3643 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3644 {
3645 	unsigned int alloc_flags;
3646 
3647 	/*
3648 	 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3649 	 * to save a branch.
3650 	 */
3651 	alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3652 
3653 #ifdef CONFIG_ZONE_DMA32
3654 	if (!zone)
3655 		return alloc_flags;
3656 
3657 	if (zone_idx(zone) != ZONE_NORMAL)
3658 		return alloc_flags;
3659 
3660 	/*
3661 	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3662 	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3663 	 * on UMA that if Normal is populated then so is DMA32.
3664 	 */
3665 	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3666 	if (nr_online_nodes > 1 && !populated_zone(--zone))
3667 		return alloc_flags;
3668 
3669 	alloc_flags |= ALLOC_NOFRAGMENT;
3670 #endif /* CONFIG_ZONE_DMA32 */
3671 	return alloc_flags;
3672 }
3673 
3674 /*
3675  * get_page_from_freelist goes through the zonelist trying to allocate
3676  * a page.
3677  */
3678 static struct page *
3679 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3680 						const struct alloc_context *ac)
3681 {
3682 	struct zoneref *z;
3683 	struct zone *zone;
3684 	struct pglist_data *last_pgdat_dirty_limit = NULL;
3685 	bool no_fallback;
3686 
3687 retry:
3688 	/*
3689 	 * Scan zonelist, looking for a zone with enough free.
3690 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
3691 	 */
3692 	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3693 	z = ac->preferred_zoneref;
3694 	for_next_zone_zonelist_nodemask(zone, z, ac->zonelist,
3695 					ac->highest_zoneidx, ac->nodemask) {
3696 		struct page *page;
3697 		unsigned long mark;
3698 
3699 		if (cpusets_enabled() &&
3700 			(alloc_flags & ALLOC_CPUSET) &&
3701 			!__cpuset_zone_allowed(zone, gfp_mask))
3702 				continue;
3703 		/*
3704 		 * When allocating a page cache page for writing, we
3705 		 * want to get it from a node that is within its dirty
3706 		 * limit, such that no single node holds more than its
3707 		 * proportional share of globally allowed dirty pages.
3708 		 * The dirty limits take into account the node's
3709 		 * lowmem reserves and high watermark so that kswapd
3710 		 * should be able to balance it without having to
3711 		 * write pages from its LRU list.
3712 		 *
3713 		 * XXX: For now, allow allocations to potentially
3714 		 * exceed the per-node dirty limit in the slowpath
3715 		 * (spread_dirty_pages unset) before going into reclaim,
3716 		 * which is important when on a NUMA setup the allowed
3717 		 * nodes are together not big enough to reach the
3718 		 * global limit.  The proper fix for these situations
3719 		 * will require awareness of nodes in the
3720 		 * dirty-throttling and the flusher threads.
3721 		 */
3722 		if (ac->spread_dirty_pages) {
3723 			if (last_pgdat_dirty_limit == zone->zone_pgdat)
3724 				continue;
3725 
3726 			if (!node_dirty_ok(zone->zone_pgdat)) {
3727 				last_pgdat_dirty_limit = zone->zone_pgdat;
3728 				continue;
3729 			}
3730 		}
3731 
3732 		if (no_fallback && nr_online_nodes > 1 &&
3733 		    zone != ac->preferred_zoneref->zone) {
3734 			int local_nid;
3735 
3736 			/*
3737 			 * If moving to a remote node, retry but allow
3738 			 * fragmenting fallbacks. Locality is more important
3739 			 * than fragmentation avoidance.
3740 			 */
3741 			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3742 			if (zone_to_nid(zone) != local_nid) {
3743 				alloc_flags &= ~ALLOC_NOFRAGMENT;
3744 				goto retry;
3745 			}
3746 		}
3747 
3748 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3749 		if (!zone_watermark_fast(zone, order, mark,
3750 				       ac->highest_zoneidx, alloc_flags)) {
3751 			int ret;
3752 
3753 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3754 			/*
3755 			 * Watermark failed for this zone, but see if we can
3756 			 * grow this zone if it contains deferred pages.
3757 			 */
3758 			if (static_branch_unlikely(&deferred_pages)) {
3759 				if (_deferred_grow_zone(zone, order))
3760 					goto try_this_zone;
3761 			}
3762 #endif
3763 			/* Checked here to keep the fast path fast */
3764 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3765 			if (alloc_flags & ALLOC_NO_WATERMARKS)
3766 				goto try_this_zone;
3767 
3768 			if (node_reclaim_mode == 0 ||
3769 			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3770 				continue;
3771 
3772 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3773 			switch (ret) {
3774 			case NODE_RECLAIM_NOSCAN:
3775 				/* did not scan */
3776 				continue;
3777 			case NODE_RECLAIM_FULL:
3778 				/* scanned but unreclaimable */
3779 				continue;
3780 			default:
3781 				/* did we reclaim enough */
3782 				if (zone_watermark_ok(zone, order, mark,
3783 					ac->highest_zoneidx, alloc_flags))
3784 					goto try_this_zone;
3785 
3786 				continue;
3787 			}
3788 		}
3789 
3790 try_this_zone:
3791 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3792 				gfp_mask, alloc_flags, ac->migratetype);
3793 		if (page) {
3794 			prep_new_page(page, order, gfp_mask, alloc_flags);
3795 
3796 			/*
3797 			 * If this is a high-order atomic allocation then check
3798 			 * if the pageblock should be reserved for the future
3799 			 */
3800 			if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3801 				reserve_highatomic_pageblock(page, zone, order);
3802 
3803 			return page;
3804 		} else {
3805 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3806 			/* Try again if zone has deferred pages */
3807 			if (static_branch_unlikely(&deferred_pages)) {
3808 				if (_deferred_grow_zone(zone, order))
3809 					goto try_this_zone;
3810 			}
3811 #endif
3812 		}
3813 	}
3814 
3815 	/*
3816 	 * It's possible on a UMA machine to get through all zones that are
3817 	 * fragmented. If avoiding fragmentation, reset and try again.
3818 	 */
3819 	if (no_fallback) {
3820 		alloc_flags &= ~ALLOC_NOFRAGMENT;
3821 		goto retry;
3822 	}
3823 
3824 	return NULL;
3825 }
3826 
3827 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3828 {
3829 	unsigned int filter = SHOW_MEM_FILTER_NODES;
3830 
3831 	/*
3832 	 * This documents exceptions given to allocations in certain
3833 	 * contexts that are allowed to allocate outside current's set
3834 	 * of allowed nodes.
3835 	 */
3836 	if (!(gfp_mask & __GFP_NOMEMALLOC))
3837 		if (tsk_is_oom_victim(current) ||
3838 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
3839 			filter &= ~SHOW_MEM_FILTER_NODES;
3840 	if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3841 		filter &= ~SHOW_MEM_FILTER_NODES;
3842 
3843 	show_mem(filter, nodemask);
3844 }
3845 
3846 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3847 {
3848 	struct va_format vaf;
3849 	va_list args;
3850 	static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
3851 
3852 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3853 		return;
3854 
3855 	va_start(args, fmt);
3856 	vaf.fmt = fmt;
3857 	vaf.va = &args;
3858 	pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
3859 			current->comm, &vaf, gfp_mask, &gfp_mask,
3860 			nodemask_pr_args(nodemask));
3861 	va_end(args);
3862 
3863 	cpuset_print_current_mems_allowed();
3864 	pr_cont("\n");
3865 	dump_stack();
3866 	warn_alloc_show_mem(gfp_mask, nodemask);
3867 }
3868 
3869 static inline struct page *
3870 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3871 			      unsigned int alloc_flags,
3872 			      const struct alloc_context *ac)
3873 {
3874 	struct page *page;
3875 
3876 	page = get_page_from_freelist(gfp_mask, order,
3877 			alloc_flags|ALLOC_CPUSET, ac);
3878 	/*
3879 	 * fallback to ignore cpuset restriction if our nodes
3880 	 * are depleted
3881 	 */
3882 	if (!page)
3883 		page = get_page_from_freelist(gfp_mask, order,
3884 				alloc_flags, ac);
3885 
3886 	return page;
3887 }
3888 
3889 static inline struct page *
3890 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3891 	const struct alloc_context *ac, unsigned long *did_some_progress)
3892 {
3893 	struct oom_control oc = {
3894 		.zonelist = ac->zonelist,
3895 		.nodemask = ac->nodemask,
3896 		.memcg = NULL,
3897 		.gfp_mask = gfp_mask,
3898 		.order = order,
3899 	};
3900 	struct page *page;
3901 
3902 	*did_some_progress = 0;
3903 
3904 	/*
3905 	 * Acquire the oom lock.  If that fails, somebody else is
3906 	 * making progress for us.
3907 	 */
3908 	if (!mutex_trylock(&oom_lock)) {
3909 		*did_some_progress = 1;
3910 		schedule_timeout_uninterruptible(1);
3911 		return NULL;
3912 	}
3913 
3914 	/*
3915 	 * Go through the zonelist yet one more time, keep very high watermark
3916 	 * here, this is only to catch a parallel oom killing, we must fail if
3917 	 * we're still under heavy pressure. But make sure that this reclaim
3918 	 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3919 	 * allocation which will never fail due to oom_lock already held.
3920 	 */
3921 	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3922 				      ~__GFP_DIRECT_RECLAIM, order,
3923 				      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3924 	if (page)
3925 		goto out;
3926 
3927 	/* Coredumps can quickly deplete all memory reserves */
3928 	if (current->flags & PF_DUMPCORE)
3929 		goto out;
3930 	/* The OOM killer will not help higher order allocs */
3931 	if (order > PAGE_ALLOC_COSTLY_ORDER)
3932 		goto out;
3933 	/*
3934 	 * We have already exhausted all our reclaim opportunities without any
3935 	 * success so it is time to admit defeat. We will skip the OOM killer
3936 	 * because it is very likely that the caller has a more reasonable
3937 	 * fallback than shooting a random task.
3938 	 */
3939 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
3940 		goto out;
3941 	/* The OOM killer does not needlessly kill tasks for lowmem */
3942 	if (ac->highest_zoneidx < ZONE_NORMAL)
3943 		goto out;
3944 	if (pm_suspended_storage())
3945 		goto out;
3946 	/*
3947 	 * XXX: GFP_NOFS allocations should rather fail than rely on
3948 	 * other request to make a forward progress.
3949 	 * We are in an unfortunate situation where out_of_memory cannot
3950 	 * do much for this context but let's try it to at least get
3951 	 * access to memory reserved if the current task is killed (see
3952 	 * out_of_memory). Once filesystems are ready to handle allocation
3953 	 * failures more gracefully we should just bail out here.
3954 	 */
3955 
3956 	/* The OOM killer may not free memory on a specific node */
3957 	if (gfp_mask & __GFP_THISNODE)
3958 		goto out;
3959 
3960 	/* Exhausted what can be done so it's blame time */
3961 	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3962 		*did_some_progress = 1;
3963 
3964 		/*
3965 		 * Help non-failing allocations by giving them access to memory
3966 		 * reserves
3967 		 */
3968 		if (gfp_mask & __GFP_NOFAIL)
3969 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3970 					ALLOC_NO_WATERMARKS, ac);
3971 	}
3972 out:
3973 	mutex_unlock(&oom_lock);
3974 	return page;
3975 }
3976 
3977 /*
3978  * Maximum number of compaction retries wit a progress before OOM
3979  * killer is consider as the only way to move forward.
3980  */
3981 #define MAX_COMPACT_RETRIES 16
3982 
3983 #ifdef CONFIG_COMPACTION
3984 /* Try memory compaction for high-order allocations before reclaim */
3985 static struct page *
3986 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3987 		unsigned int alloc_flags, const struct alloc_context *ac,
3988 		enum compact_priority prio, enum compact_result *compact_result)
3989 {
3990 	struct page *page = NULL;
3991 	unsigned long pflags;
3992 	unsigned int noreclaim_flag;
3993 
3994 	if (!order)
3995 		return NULL;
3996 
3997 	psi_memstall_enter(&pflags);
3998 	noreclaim_flag = memalloc_noreclaim_save();
3999 
4000 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4001 								prio, &page);
4002 
4003 	memalloc_noreclaim_restore(noreclaim_flag);
4004 	psi_memstall_leave(&pflags);
4005 
4006 	/*
4007 	 * At least in one zone compaction wasn't deferred or skipped, so let's
4008 	 * count a compaction stall
4009 	 */
4010 	count_vm_event(COMPACTSTALL);
4011 
4012 	/* Prep a captured page if available */
4013 	if (page)
4014 		prep_new_page(page, order, gfp_mask, alloc_flags);
4015 
4016 	/* Try get a page from the freelist if available */
4017 	if (!page)
4018 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4019 
4020 	if (page) {
4021 		struct zone *zone = page_zone(page);
4022 
4023 		zone->compact_blockskip_flush = false;
4024 		compaction_defer_reset(zone, order, true);
4025 		count_vm_event(COMPACTSUCCESS);
4026 		return page;
4027 	}
4028 
4029 	/*
4030 	 * It's bad if compaction run occurs and fails. The most likely reason
4031 	 * is that pages exist, but not enough to satisfy watermarks.
4032 	 */
4033 	count_vm_event(COMPACTFAIL);
4034 
4035 	cond_resched();
4036 
4037 	return NULL;
4038 }
4039 
4040 static inline bool
4041 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4042 		     enum compact_result compact_result,
4043 		     enum compact_priority *compact_priority,
4044 		     int *compaction_retries)
4045 {
4046 	int max_retries = MAX_COMPACT_RETRIES;
4047 	int min_priority;
4048 	bool ret = false;
4049 	int retries = *compaction_retries;
4050 	enum compact_priority priority = *compact_priority;
4051 
4052 	if (!order)
4053 		return false;
4054 
4055 	if (compaction_made_progress(compact_result))
4056 		(*compaction_retries)++;
4057 
4058 	/*
4059 	 * compaction considers all the zone as desperately out of memory
4060 	 * so it doesn't really make much sense to retry except when the
4061 	 * failure could be caused by insufficient priority
4062 	 */
4063 	if (compaction_failed(compact_result))
4064 		goto check_priority;
4065 
4066 	/*
4067 	 * compaction was skipped because there are not enough order-0 pages
4068 	 * to work with, so we retry only if it looks like reclaim can help.
4069 	 */
4070 	if (compaction_needs_reclaim(compact_result)) {
4071 		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4072 		goto out;
4073 	}
4074 
4075 	/*
4076 	 * make sure the compaction wasn't deferred or didn't bail out early
4077 	 * due to locks contention before we declare that we should give up.
4078 	 * But the next retry should use a higher priority if allowed, so
4079 	 * we don't just keep bailing out endlessly.
4080 	 */
4081 	if (compaction_withdrawn(compact_result)) {
4082 		goto check_priority;
4083 	}
4084 
4085 	/*
4086 	 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4087 	 * costly ones because they are de facto nofail and invoke OOM
4088 	 * killer to move on while costly can fail and users are ready
4089 	 * to cope with that. 1/4 retries is rather arbitrary but we
4090 	 * would need much more detailed feedback from compaction to
4091 	 * make a better decision.
4092 	 */
4093 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4094 		max_retries /= 4;
4095 	if (*compaction_retries <= max_retries) {
4096 		ret = true;
4097 		goto out;
4098 	}
4099 
4100 	/*
4101 	 * Make sure there are attempts at the highest priority if we exhausted
4102 	 * all retries or failed at the lower priorities.
4103 	 */
4104 check_priority:
4105 	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4106 			MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4107 
4108 	if (*compact_priority > min_priority) {
4109 		(*compact_priority)--;
4110 		*compaction_retries = 0;
4111 		ret = true;
4112 	}
4113 out:
4114 	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4115 	return ret;
4116 }
4117 #else
4118 static inline struct page *
4119 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4120 		unsigned int alloc_flags, const struct alloc_context *ac,
4121 		enum compact_priority prio, enum compact_result *compact_result)
4122 {
4123 	*compact_result = COMPACT_SKIPPED;
4124 	return NULL;
4125 }
4126 
4127 static inline bool
4128 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4129 		     enum compact_result compact_result,
4130 		     enum compact_priority *compact_priority,
4131 		     int *compaction_retries)
4132 {
4133 	struct zone *zone;
4134 	struct zoneref *z;
4135 
4136 	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4137 		return false;
4138 
4139 	/*
4140 	 * There are setups with compaction disabled which would prefer to loop
4141 	 * inside the allocator rather than hit the oom killer prematurely.
4142 	 * Let's give them a good hope and keep retrying while the order-0
4143 	 * watermarks are OK.
4144 	 */
4145 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4146 				ac->highest_zoneidx, ac->nodemask) {
4147 		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4148 					ac->highest_zoneidx, alloc_flags))
4149 			return true;
4150 	}
4151 	return false;
4152 }
4153 #endif /* CONFIG_COMPACTION */
4154 
4155 #ifdef CONFIG_LOCKDEP
4156 static struct lockdep_map __fs_reclaim_map =
4157 	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4158 
4159 static bool __need_fs_reclaim(gfp_t gfp_mask)
4160 {
4161 	gfp_mask = current_gfp_context(gfp_mask);
4162 
4163 	/* no reclaim without waiting on it */
4164 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4165 		return false;
4166 
4167 	/* this guy won't enter reclaim */
4168 	if (current->flags & PF_MEMALLOC)
4169 		return false;
4170 
4171 	/* We're only interested __GFP_FS allocations for now */
4172 	if (!(gfp_mask & __GFP_FS))
4173 		return false;
4174 
4175 	if (gfp_mask & __GFP_NOLOCKDEP)
4176 		return false;
4177 
4178 	return true;
4179 }
4180 
4181 void __fs_reclaim_acquire(void)
4182 {
4183 	lock_map_acquire(&__fs_reclaim_map);
4184 }
4185 
4186 void __fs_reclaim_release(void)
4187 {
4188 	lock_map_release(&__fs_reclaim_map);
4189 }
4190 
4191 void fs_reclaim_acquire(gfp_t gfp_mask)
4192 {
4193 	if (__need_fs_reclaim(gfp_mask))
4194 		__fs_reclaim_acquire();
4195 }
4196 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4197 
4198 void fs_reclaim_release(gfp_t gfp_mask)
4199 {
4200 	if (__need_fs_reclaim(gfp_mask))
4201 		__fs_reclaim_release();
4202 }
4203 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4204 #endif
4205 
4206 /* Perform direct synchronous page reclaim */
4207 static int
4208 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4209 					const struct alloc_context *ac)
4210 {
4211 	int progress;
4212 	unsigned int noreclaim_flag;
4213 	unsigned long pflags;
4214 
4215 	cond_resched();
4216 
4217 	/* We now go into synchronous reclaim */
4218 	cpuset_memory_pressure_bump();
4219 	psi_memstall_enter(&pflags);
4220 	fs_reclaim_acquire(gfp_mask);
4221 	noreclaim_flag = memalloc_noreclaim_save();
4222 
4223 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4224 								ac->nodemask);
4225 
4226 	memalloc_noreclaim_restore(noreclaim_flag);
4227 	fs_reclaim_release(gfp_mask);
4228 	psi_memstall_leave(&pflags);
4229 
4230 	cond_resched();
4231 
4232 	return progress;
4233 }
4234 
4235 /* The really slow allocator path where we enter direct reclaim */
4236 static inline struct page *
4237 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4238 		unsigned int alloc_flags, const struct alloc_context *ac,
4239 		unsigned long *did_some_progress)
4240 {
4241 	struct page *page = NULL;
4242 	bool drained = false;
4243 
4244 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4245 	if (unlikely(!(*did_some_progress)))
4246 		return NULL;
4247 
4248 retry:
4249 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4250 
4251 	/*
4252 	 * If an allocation failed after direct reclaim, it could be because
4253 	 * pages are pinned on the per-cpu lists or in high alloc reserves.
4254 	 * Shrink them them and try again
4255 	 */
4256 	if (!page && !drained) {
4257 		unreserve_highatomic_pageblock(ac, false);
4258 		drain_all_pages(NULL);
4259 		drained = true;
4260 		goto retry;
4261 	}
4262 
4263 	return page;
4264 }
4265 
4266 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4267 			     const struct alloc_context *ac)
4268 {
4269 	struct zoneref *z;
4270 	struct zone *zone;
4271 	pg_data_t *last_pgdat = NULL;
4272 	enum zone_type highest_zoneidx = ac->highest_zoneidx;
4273 
4274 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4275 					ac->nodemask) {
4276 		if (last_pgdat != zone->zone_pgdat)
4277 			wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4278 		last_pgdat = zone->zone_pgdat;
4279 	}
4280 }
4281 
4282 static inline unsigned int
4283 gfp_to_alloc_flags(gfp_t gfp_mask)
4284 {
4285 	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4286 
4287 	/*
4288 	 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4289 	 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4290 	 * to save two branches.
4291 	 */
4292 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4293 	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4294 
4295 	/*
4296 	 * The caller may dip into page reserves a bit more if the caller
4297 	 * cannot run direct reclaim, or if the caller has realtime scheduling
4298 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4299 	 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4300 	 */
4301 	alloc_flags |= (__force int)
4302 		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4303 
4304 	if (gfp_mask & __GFP_ATOMIC) {
4305 		/*
4306 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4307 		 * if it can't schedule.
4308 		 */
4309 		if (!(gfp_mask & __GFP_NOMEMALLOC))
4310 			alloc_flags |= ALLOC_HARDER;
4311 		/*
4312 		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4313 		 * comment for __cpuset_node_allowed().
4314 		 */
4315 		alloc_flags &= ~ALLOC_CPUSET;
4316 	} else if (unlikely(rt_task(current)) && !in_interrupt())
4317 		alloc_flags |= ALLOC_HARDER;
4318 
4319 #ifdef CONFIG_CMA
4320 	if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4321 		alloc_flags |= ALLOC_CMA;
4322 #endif
4323 	return alloc_flags;
4324 }
4325 
4326 static bool oom_reserves_allowed(struct task_struct *tsk)
4327 {
4328 	if (!tsk_is_oom_victim(tsk))
4329 		return false;
4330 
4331 	/*
4332 	 * !MMU doesn't have oom reaper so give access to memory reserves
4333 	 * only to the thread with TIF_MEMDIE set
4334 	 */
4335 	if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4336 		return false;
4337 
4338 	return true;
4339 }
4340 
4341 /*
4342  * Distinguish requests which really need access to full memory
4343  * reserves from oom victims which can live with a portion of it
4344  */
4345 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4346 {
4347 	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4348 		return 0;
4349 	if (gfp_mask & __GFP_MEMALLOC)
4350 		return ALLOC_NO_WATERMARKS;
4351 	if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4352 		return ALLOC_NO_WATERMARKS;
4353 	if (!in_interrupt()) {
4354 		if (current->flags & PF_MEMALLOC)
4355 			return ALLOC_NO_WATERMARKS;
4356 		else if (oom_reserves_allowed(current))
4357 			return ALLOC_OOM;
4358 	}
4359 
4360 	return 0;
4361 }
4362 
4363 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4364 {
4365 	return !!__gfp_pfmemalloc_flags(gfp_mask);
4366 }
4367 
4368 /*
4369  * Checks whether it makes sense to retry the reclaim to make a forward progress
4370  * for the given allocation request.
4371  *
4372  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4373  * without success, or when we couldn't even meet the watermark if we
4374  * reclaimed all remaining pages on the LRU lists.
4375  *
4376  * Returns true if a retry is viable or false to enter the oom path.
4377  */
4378 static inline bool
4379 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4380 		     struct alloc_context *ac, int alloc_flags,
4381 		     bool did_some_progress, int *no_progress_loops)
4382 {
4383 	struct zone *zone;
4384 	struct zoneref *z;
4385 	bool ret = false;
4386 
4387 	/*
4388 	 * Costly allocations might have made a progress but this doesn't mean
4389 	 * their order will become available due to high fragmentation so
4390 	 * always increment the no progress counter for them
4391 	 */
4392 	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4393 		*no_progress_loops = 0;
4394 	else
4395 		(*no_progress_loops)++;
4396 
4397 	/*
4398 	 * Make sure we converge to OOM if we cannot make any progress
4399 	 * several times in the row.
4400 	 */
4401 	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4402 		/* Before OOM, exhaust highatomic_reserve */
4403 		return unreserve_highatomic_pageblock(ac, true);
4404 	}
4405 
4406 	/*
4407 	 * Keep reclaiming pages while there is a chance this will lead
4408 	 * somewhere.  If none of the target zones can satisfy our allocation
4409 	 * request even if all reclaimable pages are considered then we are
4410 	 * screwed and have to go OOM.
4411 	 */
4412 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4413 				ac->highest_zoneidx, ac->nodemask) {
4414 		unsigned long available;
4415 		unsigned long reclaimable;
4416 		unsigned long min_wmark = min_wmark_pages(zone);
4417 		bool wmark;
4418 
4419 		available = reclaimable = zone_reclaimable_pages(zone);
4420 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4421 
4422 		/*
4423 		 * Would the allocation succeed if we reclaimed all
4424 		 * reclaimable pages?
4425 		 */
4426 		wmark = __zone_watermark_ok(zone, order, min_wmark,
4427 				ac->highest_zoneidx, alloc_flags, available);
4428 		trace_reclaim_retry_zone(z, order, reclaimable,
4429 				available, min_wmark, *no_progress_loops, wmark);
4430 		if (wmark) {
4431 			/*
4432 			 * If we didn't make any progress and have a lot of
4433 			 * dirty + writeback pages then we should wait for
4434 			 * an IO to complete to slow down the reclaim and
4435 			 * prevent from pre mature OOM
4436 			 */
4437 			if (!did_some_progress) {
4438 				unsigned long write_pending;
4439 
4440 				write_pending = zone_page_state_snapshot(zone,
4441 							NR_ZONE_WRITE_PENDING);
4442 
4443 				if (2 * write_pending > reclaimable) {
4444 					congestion_wait(BLK_RW_ASYNC, HZ/10);
4445 					return true;
4446 				}
4447 			}
4448 
4449 			ret = true;
4450 			goto out;
4451 		}
4452 	}
4453 
4454 out:
4455 	/*
4456 	 * Memory allocation/reclaim might be called from a WQ context and the
4457 	 * current implementation of the WQ concurrency control doesn't
4458 	 * recognize that a particular WQ is congested if the worker thread is
4459 	 * looping without ever sleeping. Therefore we have to do a short sleep
4460 	 * here rather than calling cond_resched().
4461 	 */
4462 	if (current->flags & PF_WQ_WORKER)
4463 		schedule_timeout_uninterruptible(1);
4464 	else
4465 		cond_resched();
4466 	return ret;
4467 }
4468 
4469 static inline bool
4470 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4471 {
4472 	/*
4473 	 * It's possible that cpuset's mems_allowed and the nodemask from
4474 	 * mempolicy don't intersect. This should be normally dealt with by
4475 	 * policy_nodemask(), but it's possible to race with cpuset update in
4476 	 * such a way the check therein was true, and then it became false
4477 	 * before we got our cpuset_mems_cookie here.
4478 	 * This assumes that for all allocations, ac->nodemask can come only
4479 	 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4480 	 * when it does not intersect with the cpuset restrictions) or the
4481 	 * caller can deal with a violated nodemask.
4482 	 */
4483 	if (cpusets_enabled() && ac->nodemask &&
4484 			!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4485 		ac->nodemask = NULL;
4486 		return true;
4487 	}
4488 
4489 	/*
4490 	 * When updating a task's mems_allowed or mempolicy nodemask, it is
4491 	 * possible to race with parallel threads in such a way that our
4492 	 * allocation can fail while the mask is being updated. If we are about
4493 	 * to fail, check if the cpuset changed during allocation and if so,
4494 	 * retry.
4495 	 */
4496 	if (read_mems_allowed_retry(cpuset_mems_cookie))
4497 		return true;
4498 
4499 	return false;
4500 }
4501 
4502 static inline struct page *
4503 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4504 						struct alloc_context *ac)
4505 {
4506 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4507 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4508 	struct page *page = NULL;
4509 	unsigned int alloc_flags;
4510 	unsigned long did_some_progress;
4511 	enum compact_priority compact_priority;
4512 	enum compact_result compact_result;
4513 	int compaction_retries;
4514 	int no_progress_loops;
4515 	unsigned int cpuset_mems_cookie;
4516 	int reserve_flags;
4517 
4518 	/*
4519 	 * We also sanity check to catch abuse of atomic reserves being used by
4520 	 * callers that are not in atomic context.
4521 	 */
4522 	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4523 				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4524 		gfp_mask &= ~__GFP_ATOMIC;
4525 
4526 retry_cpuset:
4527 	compaction_retries = 0;
4528 	no_progress_loops = 0;
4529 	compact_priority = DEF_COMPACT_PRIORITY;
4530 	cpuset_mems_cookie = read_mems_allowed_begin();
4531 
4532 	/*
4533 	 * The fast path uses conservative alloc_flags to succeed only until
4534 	 * kswapd needs to be woken up, and to avoid the cost of setting up
4535 	 * alloc_flags precisely. So we do that now.
4536 	 */
4537 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
4538 
4539 	/*
4540 	 * We need to recalculate the starting point for the zonelist iterator
4541 	 * because we might have used different nodemask in the fast path, or
4542 	 * there was a cpuset modification and we are retrying - otherwise we
4543 	 * could end up iterating over non-eligible zones endlessly.
4544 	 */
4545 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4546 					ac->highest_zoneidx, ac->nodemask);
4547 	if (!ac->preferred_zoneref->zone)
4548 		goto nopage;
4549 
4550 	if (alloc_flags & ALLOC_KSWAPD)
4551 		wake_all_kswapds(order, gfp_mask, ac);
4552 
4553 	/*
4554 	 * The adjusted alloc_flags might result in immediate success, so try
4555 	 * that first
4556 	 */
4557 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4558 	if (page)
4559 		goto got_pg;
4560 
4561 	/*
4562 	 * For costly allocations, try direct compaction first, as it's likely
4563 	 * that we have enough base pages and don't need to reclaim. For non-
4564 	 * movable high-order allocations, do that as well, as compaction will
4565 	 * try prevent permanent fragmentation by migrating from blocks of the
4566 	 * same migratetype.
4567 	 * Don't try this for allocations that are allowed to ignore
4568 	 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4569 	 */
4570 	if (can_direct_reclaim &&
4571 			(costly_order ||
4572 			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4573 			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
4574 		page = __alloc_pages_direct_compact(gfp_mask, order,
4575 						alloc_flags, ac,
4576 						INIT_COMPACT_PRIORITY,
4577 						&compact_result);
4578 		if (page)
4579 			goto got_pg;
4580 
4581 		/*
4582 		 * Checks for costly allocations with __GFP_NORETRY, which
4583 		 * includes some THP page fault allocations
4584 		 */
4585 		if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4586 			/*
4587 			 * If allocating entire pageblock(s) and compaction
4588 			 * failed because all zones are below low watermarks
4589 			 * or is prohibited because it recently failed at this
4590 			 * order, fail immediately unless the allocator has
4591 			 * requested compaction and reclaim retry.
4592 			 *
4593 			 * Reclaim is
4594 			 *  - potentially very expensive because zones are far
4595 			 *    below their low watermarks or this is part of very
4596 			 *    bursty high order allocations,
4597 			 *  - not guaranteed to help because isolate_freepages()
4598 			 *    may not iterate over freed pages as part of its
4599 			 *    linear scan, and
4600 			 *  - unlikely to make entire pageblocks free on its
4601 			 *    own.
4602 			 */
4603 			if (compact_result == COMPACT_SKIPPED ||
4604 			    compact_result == COMPACT_DEFERRED)
4605 				goto nopage;
4606 
4607 			/*
4608 			 * Looks like reclaim/compaction is worth trying, but
4609 			 * sync compaction could be very expensive, so keep
4610 			 * using async compaction.
4611 			 */
4612 			compact_priority = INIT_COMPACT_PRIORITY;
4613 		}
4614 	}
4615 
4616 retry:
4617 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4618 	if (alloc_flags & ALLOC_KSWAPD)
4619 		wake_all_kswapds(order, gfp_mask, ac);
4620 
4621 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4622 	if (reserve_flags)
4623 		alloc_flags = reserve_flags;
4624 
4625 	/*
4626 	 * Reset the nodemask and zonelist iterators if memory policies can be
4627 	 * ignored. These allocations are high priority and system rather than
4628 	 * user oriented.
4629 	 */
4630 	if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4631 		ac->nodemask = NULL;
4632 		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4633 					ac->highest_zoneidx, ac->nodemask);
4634 	}
4635 
4636 	/* Attempt with potentially adjusted zonelist and alloc_flags */
4637 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4638 	if (page)
4639 		goto got_pg;
4640 
4641 	/* Caller is not willing to reclaim, we can't balance anything */
4642 	if (!can_direct_reclaim)
4643 		goto nopage;
4644 
4645 	/* Avoid recursion of direct reclaim */
4646 	if (current->flags & PF_MEMALLOC)
4647 		goto nopage;
4648 
4649 	/* Try direct reclaim and then allocating */
4650 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4651 							&did_some_progress);
4652 	if (page)
4653 		goto got_pg;
4654 
4655 	/* Try direct compaction and then allocating */
4656 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4657 					compact_priority, &compact_result);
4658 	if (page)
4659 		goto got_pg;
4660 
4661 	/* Do not loop if specifically requested */
4662 	if (gfp_mask & __GFP_NORETRY)
4663 		goto nopage;
4664 
4665 	/*
4666 	 * Do not retry costly high order allocations unless they are
4667 	 * __GFP_RETRY_MAYFAIL
4668 	 */
4669 	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4670 		goto nopage;
4671 
4672 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4673 				 did_some_progress > 0, &no_progress_loops))
4674 		goto retry;
4675 
4676 	/*
4677 	 * It doesn't make any sense to retry for the compaction if the order-0
4678 	 * reclaim is not able to make any progress because the current
4679 	 * implementation of the compaction depends on the sufficient amount
4680 	 * of free memory (see __compaction_suitable)
4681 	 */
4682 	if (did_some_progress > 0 &&
4683 			should_compact_retry(ac, order, alloc_flags,
4684 				compact_result, &compact_priority,
4685 				&compaction_retries))
4686 		goto retry;
4687 
4688 
4689 	/* Deal with possible cpuset update races before we start OOM killing */
4690 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
4691 		goto retry_cpuset;
4692 
4693 	/* Reclaim has failed us, start killing things */
4694 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4695 	if (page)
4696 		goto got_pg;
4697 
4698 	/* Avoid allocations with no watermarks from looping endlessly */
4699 	if (tsk_is_oom_victim(current) &&
4700 	    (alloc_flags == ALLOC_OOM ||
4701 	     (gfp_mask & __GFP_NOMEMALLOC)))
4702 		goto nopage;
4703 
4704 	/* Retry as long as the OOM killer is making progress */
4705 	if (did_some_progress) {
4706 		no_progress_loops = 0;
4707 		goto retry;
4708 	}
4709 
4710 nopage:
4711 	/* Deal with possible cpuset update races before we fail */
4712 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
4713 		goto retry_cpuset;
4714 
4715 	/*
4716 	 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4717 	 * we always retry
4718 	 */
4719 	if (gfp_mask & __GFP_NOFAIL) {
4720 		/*
4721 		 * All existing users of the __GFP_NOFAIL are blockable, so warn
4722 		 * of any new users that actually require GFP_NOWAIT
4723 		 */
4724 		if (WARN_ON_ONCE(!can_direct_reclaim))
4725 			goto fail;
4726 
4727 		/*
4728 		 * PF_MEMALLOC request from this context is rather bizarre
4729 		 * because we cannot reclaim anything and only can loop waiting
4730 		 * for somebody to do a work for us
4731 		 */
4732 		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4733 
4734 		/*
4735 		 * non failing costly orders are a hard requirement which we
4736 		 * are not prepared for much so let's warn about these users
4737 		 * so that we can identify them and convert them to something
4738 		 * else.
4739 		 */
4740 		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4741 
4742 		/*
4743 		 * Help non-failing allocations by giving them access to memory
4744 		 * reserves but do not use ALLOC_NO_WATERMARKS because this
4745 		 * could deplete whole memory reserves which would just make
4746 		 * the situation worse
4747 		 */
4748 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4749 		if (page)
4750 			goto got_pg;
4751 
4752 		cond_resched();
4753 		goto retry;
4754 	}
4755 fail:
4756 	warn_alloc(gfp_mask, ac->nodemask,
4757 			"page allocation failure: order:%u", order);
4758 got_pg:
4759 	return page;
4760 }
4761 
4762 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4763 		int preferred_nid, nodemask_t *nodemask,
4764 		struct alloc_context *ac, gfp_t *alloc_mask,
4765 		unsigned int *alloc_flags)
4766 {
4767 	ac->highest_zoneidx = gfp_zone(gfp_mask);
4768 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4769 	ac->nodemask = nodemask;
4770 	ac->migratetype = gfp_migratetype(gfp_mask);
4771 
4772 	if (cpusets_enabled()) {
4773 		*alloc_mask |= __GFP_HARDWALL;
4774 		if (!ac->nodemask)
4775 			ac->nodemask = &cpuset_current_mems_allowed;
4776 		else
4777 			*alloc_flags |= ALLOC_CPUSET;
4778 	}
4779 
4780 	fs_reclaim_acquire(gfp_mask);
4781 	fs_reclaim_release(gfp_mask);
4782 
4783 	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4784 
4785 	if (should_fail_alloc_page(gfp_mask, order))
4786 		return false;
4787 
4788 	if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4789 		*alloc_flags |= ALLOC_CMA;
4790 
4791 	return true;
4792 }
4793 
4794 /* Determine whether to spread dirty pages and what the first usable zone */
4795 static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
4796 {
4797 	/* Dirty zone balancing only done in the fast path */
4798 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4799 
4800 	/*
4801 	 * The preferred zone is used for statistics but crucially it is
4802 	 * also used as the starting point for the zonelist iterator. It
4803 	 * may get reset for allocations that ignore memory policies.
4804 	 */
4805 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4806 					ac->highest_zoneidx, ac->nodemask);
4807 }
4808 
4809 /*
4810  * This is the 'heart' of the zoned buddy allocator.
4811  */
4812 struct page *
4813 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4814 							nodemask_t *nodemask)
4815 {
4816 	struct page *page;
4817 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
4818 	gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
4819 	struct alloc_context ac = { };
4820 
4821 	/*
4822 	 * There are several places where we assume that the order value is sane
4823 	 * so bail out early if the request is out of bound.
4824 	 */
4825 	if (unlikely(order >= MAX_ORDER)) {
4826 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4827 		return NULL;
4828 	}
4829 
4830 	gfp_mask &= gfp_allowed_mask;
4831 	alloc_mask = gfp_mask;
4832 	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4833 		return NULL;
4834 
4835 	finalise_ac(gfp_mask, &ac);
4836 
4837 	/*
4838 	 * Forbid the first pass from falling back to types that fragment
4839 	 * memory until all local zones are considered.
4840 	 */
4841 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
4842 
4843 	/* First allocation attempt */
4844 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4845 	if (likely(page))
4846 		goto out;
4847 
4848 	/*
4849 	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4850 	 * resp. GFP_NOIO which has to be inherited for all allocation requests
4851 	 * from a particular context which has been marked by
4852 	 * memalloc_no{fs,io}_{save,restore}.
4853 	 */
4854 	alloc_mask = current_gfp_context(gfp_mask);
4855 	ac.spread_dirty_pages = false;
4856 
4857 	/*
4858 	 * Restore the original nodemask if it was potentially replaced with
4859 	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4860 	 */
4861 	ac.nodemask = nodemask;
4862 
4863 	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
4864 
4865 out:
4866 	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4867 	    unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
4868 		__free_pages(page, order);
4869 		page = NULL;
4870 	}
4871 
4872 	trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4873 
4874 	return page;
4875 }
4876 EXPORT_SYMBOL(__alloc_pages_nodemask);
4877 
4878 /*
4879  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
4880  * address cannot represent highmem pages. Use alloc_pages and then kmap if
4881  * you need to access high mem.
4882  */
4883 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4884 {
4885 	struct page *page;
4886 
4887 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4888 	if (!page)
4889 		return 0;
4890 	return (unsigned long) page_address(page);
4891 }
4892 EXPORT_SYMBOL(__get_free_pages);
4893 
4894 unsigned long get_zeroed_page(gfp_t gfp_mask)
4895 {
4896 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
4897 }
4898 EXPORT_SYMBOL(get_zeroed_page);
4899 
4900 static inline void free_the_page(struct page *page, unsigned int order)
4901 {
4902 	if (order == 0)		/* Via pcp? */
4903 		free_unref_page(page);
4904 	else
4905 		__free_pages_ok(page, order);
4906 }
4907 
4908 void __free_pages(struct page *page, unsigned int order)
4909 {
4910 	if (put_page_testzero(page))
4911 		free_the_page(page, order);
4912 }
4913 EXPORT_SYMBOL(__free_pages);
4914 
4915 void free_pages(unsigned long addr, unsigned int order)
4916 {
4917 	if (addr != 0) {
4918 		VM_BUG_ON(!virt_addr_valid((void *)addr));
4919 		__free_pages(virt_to_page((void *)addr), order);
4920 	}
4921 }
4922 
4923 EXPORT_SYMBOL(free_pages);
4924 
4925 /*
4926  * Page Fragment:
4927  *  An arbitrary-length arbitrary-offset area of memory which resides
4928  *  within a 0 or higher order page.  Multiple fragments within that page
4929  *  are individually refcounted, in the page's reference counter.
4930  *
4931  * The page_frag functions below provide a simple allocation framework for
4932  * page fragments.  This is used by the network stack and network device
4933  * drivers to provide a backing region of memory for use as either an
4934  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4935  */
4936 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4937 					     gfp_t gfp_mask)
4938 {
4939 	struct page *page = NULL;
4940 	gfp_t gfp = gfp_mask;
4941 
4942 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4943 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4944 		    __GFP_NOMEMALLOC;
4945 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4946 				PAGE_FRAG_CACHE_MAX_ORDER);
4947 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4948 #endif
4949 	if (unlikely(!page))
4950 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4951 
4952 	nc->va = page ? page_address(page) : NULL;
4953 
4954 	return page;
4955 }
4956 
4957 void __page_frag_cache_drain(struct page *page, unsigned int count)
4958 {
4959 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4960 
4961 	if (page_ref_sub_and_test(page, count))
4962 		free_the_page(page, compound_order(page));
4963 }
4964 EXPORT_SYMBOL(__page_frag_cache_drain);
4965 
4966 void *page_frag_alloc(struct page_frag_cache *nc,
4967 		      unsigned int fragsz, gfp_t gfp_mask)
4968 {
4969 	unsigned int size = PAGE_SIZE;
4970 	struct page *page;
4971 	int offset;
4972 
4973 	if (unlikely(!nc->va)) {
4974 refill:
4975 		page = __page_frag_cache_refill(nc, gfp_mask);
4976 		if (!page)
4977 			return NULL;
4978 
4979 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4980 		/* if size can vary use size else just use PAGE_SIZE */
4981 		size = nc->size;
4982 #endif
4983 		/* Even if we own the page, we do not use atomic_set().
4984 		 * This would break get_page_unless_zero() users.
4985 		 */
4986 		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
4987 
4988 		/* reset page count bias and offset to start of new frag */
4989 		nc->pfmemalloc = page_is_pfmemalloc(page);
4990 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4991 		nc->offset = size;
4992 	}
4993 
4994 	offset = nc->offset - fragsz;
4995 	if (unlikely(offset < 0)) {
4996 		page = virt_to_page(nc->va);
4997 
4998 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4999 			goto refill;
5000 
5001 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5002 		/* if size can vary use size else just use PAGE_SIZE */
5003 		size = nc->size;
5004 #endif
5005 		/* OK, page count is 0, we can safely set it */
5006 		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5007 
5008 		/* reset page count bias and offset to start of new frag */
5009 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5010 		offset = size - fragsz;
5011 	}
5012 
5013 	nc->pagecnt_bias--;
5014 	nc->offset = offset;
5015 
5016 	return nc->va + offset;
5017 }
5018 EXPORT_SYMBOL(page_frag_alloc);
5019 
5020 /*
5021  * Frees a page fragment allocated out of either a compound or order 0 page.
5022  */
5023 void page_frag_free(void *addr)
5024 {
5025 	struct page *page = virt_to_head_page(addr);
5026 
5027 	if (unlikely(put_page_testzero(page)))
5028 		free_the_page(page, compound_order(page));
5029 }
5030 EXPORT_SYMBOL(page_frag_free);
5031 
5032 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5033 		size_t size)
5034 {
5035 	if (addr) {
5036 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
5037 		unsigned long used = addr + PAGE_ALIGN(size);
5038 
5039 		split_page(virt_to_page((void *)addr), order);
5040 		while (used < alloc_end) {
5041 			free_page(used);
5042 			used += PAGE_SIZE;
5043 		}
5044 	}
5045 	return (void *)addr;
5046 }
5047 
5048 /**
5049  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5050  * @size: the number of bytes to allocate
5051  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5052  *
5053  * This function is similar to alloc_pages(), except that it allocates the
5054  * minimum number of pages to satisfy the request.  alloc_pages() can only
5055  * allocate memory in power-of-two pages.
5056  *
5057  * This function is also limited by MAX_ORDER.
5058  *
5059  * Memory allocated by this function must be released by free_pages_exact().
5060  *
5061  * Return: pointer to the allocated area or %NULL in case of error.
5062  */
5063 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5064 {
5065 	unsigned int order = get_order(size);
5066 	unsigned long addr;
5067 
5068 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5069 		gfp_mask &= ~__GFP_COMP;
5070 
5071 	addr = __get_free_pages(gfp_mask, order);
5072 	return make_alloc_exact(addr, order, size);
5073 }
5074 EXPORT_SYMBOL(alloc_pages_exact);
5075 
5076 /**
5077  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5078  *			   pages on a node.
5079  * @nid: the preferred node ID where memory should be allocated
5080  * @size: the number of bytes to allocate
5081  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5082  *
5083  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5084  * back.
5085  *
5086  * Return: pointer to the allocated area or %NULL in case of error.
5087  */
5088 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5089 {
5090 	unsigned int order = get_order(size);
5091 	struct page *p;
5092 
5093 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5094 		gfp_mask &= ~__GFP_COMP;
5095 
5096 	p = alloc_pages_node(nid, gfp_mask, order);
5097 	if (!p)
5098 		return NULL;
5099 	return make_alloc_exact((unsigned long)page_address(p), order, size);
5100 }
5101 
5102 /**
5103  * free_pages_exact - release memory allocated via alloc_pages_exact()
5104  * @virt: the value returned by alloc_pages_exact.
5105  * @size: size of allocation, same value as passed to alloc_pages_exact().
5106  *
5107  * Release the memory allocated by a previous call to alloc_pages_exact.
5108  */
5109 void free_pages_exact(void *virt, size_t size)
5110 {
5111 	unsigned long addr = (unsigned long)virt;
5112 	unsigned long end = addr + PAGE_ALIGN(size);
5113 
5114 	while (addr < end) {
5115 		free_page(addr);
5116 		addr += PAGE_SIZE;
5117 	}
5118 }
5119 EXPORT_SYMBOL(free_pages_exact);
5120 
5121 /**
5122  * nr_free_zone_pages - count number of pages beyond high watermark
5123  * @offset: The zone index of the highest zone
5124  *
5125  * nr_free_zone_pages() counts the number of pages which are beyond the
5126  * high watermark within all zones at or below a given zone index.  For each
5127  * zone, the number of pages is calculated as:
5128  *
5129  *     nr_free_zone_pages = managed_pages - high_pages
5130  *
5131  * Return: number of pages beyond high watermark.
5132  */
5133 static unsigned long nr_free_zone_pages(int offset)
5134 {
5135 	struct zoneref *z;
5136 	struct zone *zone;
5137 
5138 	/* Just pick one node, since fallback list is circular */
5139 	unsigned long sum = 0;
5140 
5141 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5142 
5143 	for_each_zone_zonelist(zone, z, zonelist, offset) {
5144 		unsigned long size = zone_managed_pages(zone);
5145 		unsigned long high = high_wmark_pages(zone);
5146 		if (size > high)
5147 			sum += size - high;
5148 	}
5149 
5150 	return sum;
5151 }
5152 
5153 /**
5154  * nr_free_buffer_pages - count number of pages beyond high watermark
5155  *
5156  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5157  * watermark within ZONE_DMA and ZONE_NORMAL.
5158  *
5159  * Return: number of pages beyond high watermark within ZONE_DMA and
5160  * ZONE_NORMAL.
5161  */
5162 unsigned long nr_free_buffer_pages(void)
5163 {
5164 	return nr_free_zone_pages(gfp_zone(GFP_USER));
5165 }
5166 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5167 
5168 /**
5169  * nr_free_pagecache_pages - count number of pages beyond high watermark
5170  *
5171  * nr_free_pagecache_pages() counts the number of pages which are beyond the
5172  * high watermark within all zones.
5173  *
5174  * Return: number of pages beyond high watermark within all zones.
5175  */
5176 unsigned long nr_free_pagecache_pages(void)
5177 {
5178 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
5179 }
5180 
5181 static inline void show_node(struct zone *zone)
5182 {
5183 	if (IS_ENABLED(CONFIG_NUMA))
5184 		printk("Node %d ", zone_to_nid(zone));
5185 }
5186 
5187 long si_mem_available(void)
5188 {
5189 	long available;
5190 	unsigned long pagecache;
5191 	unsigned long wmark_low = 0;
5192 	unsigned long pages[NR_LRU_LISTS];
5193 	unsigned long reclaimable;
5194 	struct zone *zone;
5195 	int lru;
5196 
5197 	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5198 		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5199 
5200 	for_each_zone(zone)
5201 		wmark_low += low_wmark_pages(zone);
5202 
5203 	/*
5204 	 * Estimate the amount of memory available for userspace allocations,
5205 	 * without causing swapping.
5206 	 */
5207 	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5208 
5209 	/*
5210 	 * Not all the page cache can be freed, otherwise the system will
5211 	 * start swapping. Assume at least half of the page cache, or the
5212 	 * low watermark worth of cache, needs to stay.
5213 	 */
5214 	pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5215 	pagecache -= min(pagecache / 2, wmark_low);
5216 	available += pagecache;
5217 
5218 	/*
5219 	 * Part of the reclaimable slab and other kernel memory consists of
5220 	 * items that are in use, and cannot be freed. Cap this estimate at the
5221 	 * low watermark.
5222 	 */
5223 	reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE) +
5224 			global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5225 	available += reclaimable - min(reclaimable / 2, wmark_low);
5226 
5227 	if (available < 0)
5228 		available = 0;
5229 	return available;
5230 }
5231 EXPORT_SYMBOL_GPL(si_mem_available);
5232 
5233 void si_meminfo(struct sysinfo *val)
5234 {
5235 	val->totalram = totalram_pages();
5236 	val->sharedram = global_node_page_state(NR_SHMEM);
5237 	val->freeram = global_zone_page_state(NR_FREE_PAGES);
5238 	val->bufferram = nr_blockdev_pages();
5239 	val->totalhigh = totalhigh_pages();
5240 	val->freehigh = nr_free_highpages();
5241 	val->mem_unit = PAGE_SIZE;
5242 }
5243 
5244 EXPORT_SYMBOL(si_meminfo);
5245 
5246 #ifdef CONFIG_NUMA
5247 void si_meminfo_node(struct sysinfo *val, int nid)
5248 {
5249 	int zone_type;		/* needs to be signed */
5250 	unsigned long managed_pages = 0;
5251 	unsigned long managed_highpages = 0;
5252 	unsigned long free_highpages = 0;
5253 	pg_data_t *pgdat = NODE_DATA(nid);
5254 
5255 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5256 		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5257 	val->totalram = managed_pages;
5258 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
5259 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5260 #ifdef CONFIG_HIGHMEM
5261 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5262 		struct zone *zone = &pgdat->node_zones[zone_type];
5263 
5264 		if (is_highmem(zone)) {
5265 			managed_highpages += zone_managed_pages(zone);
5266 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5267 		}
5268 	}
5269 	val->totalhigh = managed_highpages;
5270 	val->freehigh = free_highpages;
5271 #else
5272 	val->totalhigh = managed_highpages;
5273 	val->freehigh = free_highpages;
5274 #endif
5275 	val->mem_unit = PAGE_SIZE;
5276 }
5277 #endif
5278 
5279 /*
5280  * Determine whether the node should be displayed or not, depending on whether
5281  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5282  */
5283 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5284 {
5285 	if (!(flags & SHOW_MEM_FILTER_NODES))
5286 		return false;
5287 
5288 	/*
5289 	 * no node mask - aka implicit memory numa policy. Do not bother with
5290 	 * the synchronization - read_mems_allowed_begin - because we do not
5291 	 * have to be precise here.
5292 	 */
5293 	if (!nodemask)
5294 		nodemask = &cpuset_current_mems_allowed;
5295 
5296 	return !node_isset(nid, *nodemask);
5297 }
5298 
5299 #define K(x) ((x) << (PAGE_SHIFT-10))
5300 
5301 static void show_migration_types(unsigned char type)
5302 {
5303 	static const char types[MIGRATE_TYPES] = {
5304 		[MIGRATE_UNMOVABLE]	= 'U',
5305 		[MIGRATE_MOVABLE]	= 'M',
5306 		[MIGRATE_RECLAIMABLE]	= 'E',
5307 		[MIGRATE_HIGHATOMIC]	= 'H',
5308 #ifdef CONFIG_CMA
5309 		[MIGRATE_CMA]		= 'C',
5310 #endif
5311 #ifdef CONFIG_MEMORY_ISOLATION
5312 		[MIGRATE_ISOLATE]	= 'I',
5313 #endif
5314 	};
5315 	char tmp[MIGRATE_TYPES + 1];
5316 	char *p = tmp;
5317 	int i;
5318 
5319 	for (i = 0; i < MIGRATE_TYPES; i++) {
5320 		if (type & (1 << i))
5321 			*p++ = types[i];
5322 	}
5323 
5324 	*p = '\0';
5325 	printk(KERN_CONT "(%s) ", tmp);
5326 }
5327 
5328 /*
5329  * Show free area list (used inside shift_scroll-lock stuff)
5330  * We also calculate the percentage fragmentation. We do this by counting the
5331  * memory on each free list with the exception of the first item on the list.
5332  *
5333  * Bits in @filter:
5334  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5335  *   cpuset.
5336  */
5337 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5338 {
5339 	unsigned long free_pcp = 0;
5340 	int cpu;
5341 	struct zone *zone;
5342 	pg_data_t *pgdat;
5343 
5344 	for_each_populated_zone(zone) {
5345 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5346 			continue;
5347 
5348 		for_each_online_cpu(cpu)
5349 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5350 	}
5351 
5352 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5353 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5354 		" unevictable:%lu dirty:%lu writeback:%lu\n"
5355 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5356 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5357 		" free:%lu free_pcp:%lu free_cma:%lu\n",
5358 		global_node_page_state(NR_ACTIVE_ANON),
5359 		global_node_page_state(NR_INACTIVE_ANON),
5360 		global_node_page_state(NR_ISOLATED_ANON),
5361 		global_node_page_state(NR_ACTIVE_FILE),
5362 		global_node_page_state(NR_INACTIVE_FILE),
5363 		global_node_page_state(NR_ISOLATED_FILE),
5364 		global_node_page_state(NR_UNEVICTABLE),
5365 		global_node_page_state(NR_FILE_DIRTY),
5366 		global_node_page_state(NR_WRITEBACK),
5367 		global_node_page_state(NR_SLAB_RECLAIMABLE),
5368 		global_node_page_state(NR_SLAB_UNRECLAIMABLE),
5369 		global_node_page_state(NR_FILE_MAPPED),
5370 		global_node_page_state(NR_SHMEM),
5371 		global_zone_page_state(NR_PAGETABLE),
5372 		global_zone_page_state(NR_BOUNCE),
5373 		global_zone_page_state(NR_FREE_PAGES),
5374 		free_pcp,
5375 		global_zone_page_state(NR_FREE_CMA_PAGES));
5376 
5377 	for_each_online_pgdat(pgdat) {
5378 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5379 			continue;
5380 
5381 		printk("Node %d"
5382 			" active_anon:%lukB"
5383 			" inactive_anon:%lukB"
5384 			" active_file:%lukB"
5385 			" inactive_file:%lukB"
5386 			" unevictable:%lukB"
5387 			" isolated(anon):%lukB"
5388 			" isolated(file):%lukB"
5389 			" mapped:%lukB"
5390 			" dirty:%lukB"
5391 			" writeback:%lukB"
5392 			" shmem:%lukB"
5393 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5394 			" shmem_thp: %lukB"
5395 			" shmem_pmdmapped: %lukB"
5396 			" anon_thp: %lukB"
5397 #endif
5398 			" writeback_tmp:%lukB"
5399 			" all_unreclaimable? %s"
5400 			"\n",
5401 			pgdat->node_id,
5402 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5403 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5404 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5405 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5406 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
5407 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5408 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5409 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
5410 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
5411 			K(node_page_state(pgdat, NR_WRITEBACK)),
5412 			K(node_page_state(pgdat, NR_SHMEM)),
5413 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5414 			K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
5415 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
5416 					* HPAGE_PMD_NR),
5417 			K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
5418 #endif
5419 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5420 			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5421 				"yes" : "no");
5422 	}
5423 
5424 	for_each_populated_zone(zone) {
5425 		int i;
5426 
5427 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5428 			continue;
5429 
5430 		free_pcp = 0;
5431 		for_each_online_cpu(cpu)
5432 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5433 
5434 		show_node(zone);
5435 		printk(KERN_CONT
5436 			"%s"
5437 			" free:%lukB"
5438 			" min:%lukB"
5439 			" low:%lukB"
5440 			" high:%lukB"
5441 			" reserved_highatomic:%luKB"
5442 			" active_anon:%lukB"
5443 			" inactive_anon:%lukB"
5444 			" active_file:%lukB"
5445 			" inactive_file:%lukB"
5446 			" unevictable:%lukB"
5447 			" writepending:%lukB"
5448 			" present:%lukB"
5449 			" managed:%lukB"
5450 			" mlocked:%lukB"
5451 			" kernel_stack:%lukB"
5452 #ifdef CONFIG_SHADOW_CALL_STACK
5453 			" shadow_call_stack:%lukB"
5454 #endif
5455 			" pagetables:%lukB"
5456 			" bounce:%lukB"
5457 			" free_pcp:%lukB"
5458 			" local_pcp:%ukB"
5459 			" free_cma:%lukB"
5460 			"\n",
5461 			zone->name,
5462 			K(zone_page_state(zone, NR_FREE_PAGES)),
5463 			K(min_wmark_pages(zone)),
5464 			K(low_wmark_pages(zone)),
5465 			K(high_wmark_pages(zone)),
5466 			K(zone->nr_reserved_highatomic),
5467 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
5468 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
5469 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
5470 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
5471 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5472 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
5473 			K(zone->present_pages),
5474 			K(zone_managed_pages(zone)),
5475 			K(zone_page_state(zone, NR_MLOCK)),
5476 			zone_page_state(zone, NR_KERNEL_STACK_KB),
5477 #ifdef CONFIG_SHADOW_CALL_STACK
5478 			zone_page_state(zone, NR_KERNEL_SCS_KB),
5479 #endif
5480 			K(zone_page_state(zone, NR_PAGETABLE)),
5481 			K(zone_page_state(zone, NR_BOUNCE)),
5482 			K(free_pcp),
5483 			K(this_cpu_read(zone->pageset->pcp.count)),
5484 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
5485 		printk("lowmem_reserve[]:");
5486 		for (i = 0; i < MAX_NR_ZONES; i++)
5487 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
5488 		printk(KERN_CONT "\n");
5489 	}
5490 
5491 	for_each_populated_zone(zone) {
5492 		unsigned int order;
5493 		unsigned long nr[MAX_ORDER], flags, total = 0;
5494 		unsigned char types[MAX_ORDER];
5495 
5496 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5497 			continue;
5498 		show_node(zone);
5499 		printk(KERN_CONT "%s: ", zone->name);
5500 
5501 		spin_lock_irqsave(&zone->lock, flags);
5502 		for (order = 0; order < MAX_ORDER; order++) {
5503 			struct free_area *area = &zone->free_area[order];
5504 			int type;
5505 
5506 			nr[order] = area->nr_free;
5507 			total += nr[order] << order;
5508 
5509 			types[order] = 0;
5510 			for (type = 0; type < MIGRATE_TYPES; type++) {
5511 				if (!free_area_empty(area, type))
5512 					types[order] |= 1 << type;
5513 			}
5514 		}
5515 		spin_unlock_irqrestore(&zone->lock, flags);
5516 		for (order = 0; order < MAX_ORDER; order++) {
5517 			printk(KERN_CONT "%lu*%lukB ",
5518 			       nr[order], K(1UL) << order);
5519 			if (nr[order])
5520 				show_migration_types(types[order]);
5521 		}
5522 		printk(KERN_CONT "= %lukB\n", K(total));
5523 	}
5524 
5525 	hugetlb_show_meminfo();
5526 
5527 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
5528 
5529 	show_swap_cache_info();
5530 }
5531 
5532 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5533 {
5534 	zoneref->zone = zone;
5535 	zoneref->zone_idx = zone_idx(zone);
5536 }
5537 
5538 /*
5539  * Builds allocation fallback zone lists.
5540  *
5541  * Add all populated zones of a node to the zonelist.
5542  */
5543 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5544 {
5545 	struct zone *zone;
5546 	enum zone_type zone_type = MAX_NR_ZONES;
5547 	int nr_zones = 0;
5548 
5549 	do {
5550 		zone_type--;
5551 		zone = pgdat->node_zones + zone_type;
5552 		if (managed_zone(zone)) {
5553 			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5554 			check_highest_zone(zone_type);
5555 		}
5556 	} while (zone_type);
5557 
5558 	return nr_zones;
5559 }
5560 
5561 #ifdef CONFIG_NUMA
5562 
5563 static int __parse_numa_zonelist_order(char *s)
5564 {
5565 	/*
5566 	 * We used to support different zonlists modes but they turned
5567 	 * out to be just not useful. Let's keep the warning in place
5568 	 * if somebody still use the cmd line parameter so that we do
5569 	 * not fail it silently
5570 	 */
5571 	if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5572 		pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
5573 		return -EINVAL;
5574 	}
5575 	return 0;
5576 }
5577 
5578 char numa_zonelist_order[] = "Node";
5579 
5580 /*
5581  * sysctl handler for numa_zonelist_order
5582  */
5583 int numa_zonelist_order_handler(struct ctl_table *table, int write,
5584 		void *buffer, size_t *length, loff_t *ppos)
5585 {
5586 	if (write)
5587 		return __parse_numa_zonelist_order(buffer);
5588 	return proc_dostring(table, write, buffer, length, ppos);
5589 }
5590 
5591 
5592 #define MAX_NODE_LOAD (nr_online_nodes)
5593 static int node_load[MAX_NUMNODES];
5594 
5595 /**
5596  * find_next_best_node - find the next node that should appear in a given node's fallback list
5597  * @node: node whose fallback list we're appending
5598  * @used_node_mask: nodemask_t of already used nodes
5599  *
5600  * We use a number of factors to determine which is the next node that should
5601  * appear on a given node's fallback list.  The node should not have appeared
5602  * already in @node's fallback list, and it should be the next closest node
5603  * according to the distance array (which contains arbitrary distance values
5604  * from each node to each node in the system), and should also prefer nodes
5605  * with no CPUs, since presumably they'll have very little allocation pressure
5606  * on them otherwise.
5607  *
5608  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5609  */
5610 static int find_next_best_node(int node, nodemask_t *used_node_mask)
5611 {
5612 	int n, val;
5613 	int min_val = INT_MAX;
5614 	int best_node = NUMA_NO_NODE;
5615 	const struct cpumask *tmp = cpumask_of_node(0);
5616 
5617 	/* Use the local node if we haven't already */
5618 	if (!node_isset(node, *used_node_mask)) {
5619 		node_set(node, *used_node_mask);
5620 		return node;
5621 	}
5622 
5623 	for_each_node_state(n, N_MEMORY) {
5624 
5625 		/* Don't want a node to appear more than once */
5626 		if (node_isset(n, *used_node_mask))
5627 			continue;
5628 
5629 		/* Use the distance array to find the distance */
5630 		val = node_distance(node, n);
5631 
5632 		/* Penalize nodes under us ("prefer the next node") */
5633 		val += (n < node);
5634 
5635 		/* Give preference to headless and unused nodes */
5636 		tmp = cpumask_of_node(n);
5637 		if (!cpumask_empty(tmp))
5638 			val += PENALTY_FOR_NODE_WITH_CPUS;
5639 
5640 		/* Slight preference for less loaded node */
5641 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
5642 		val += node_load[n];
5643 
5644 		if (val < min_val) {
5645 			min_val = val;
5646 			best_node = n;
5647 		}
5648 	}
5649 
5650 	if (best_node >= 0)
5651 		node_set(best_node, *used_node_mask);
5652 
5653 	return best_node;
5654 }
5655 
5656 
5657 /*
5658  * Build zonelists ordered by node and zones within node.
5659  * This results in maximum locality--normal zone overflows into local
5660  * DMA zone, if any--but risks exhausting DMA zone.
5661  */
5662 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5663 		unsigned nr_nodes)
5664 {
5665 	struct zoneref *zonerefs;
5666 	int i;
5667 
5668 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5669 
5670 	for (i = 0; i < nr_nodes; i++) {
5671 		int nr_zones;
5672 
5673 		pg_data_t *node = NODE_DATA(node_order[i]);
5674 
5675 		nr_zones = build_zonerefs_node(node, zonerefs);
5676 		zonerefs += nr_zones;
5677 	}
5678 	zonerefs->zone = NULL;
5679 	zonerefs->zone_idx = 0;
5680 }
5681 
5682 /*
5683  * Build gfp_thisnode zonelists
5684  */
5685 static void build_thisnode_zonelists(pg_data_t *pgdat)
5686 {
5687 	struct zoneref *zonerefs;
5688 	int nr_zones;
5689 
5690 	zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5691 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
5692 	zonerefs += nr_zones;
5693 	zonerefs->zone = NULL;
5694 	zonerefs->zone_idx = 0;
5695 }
5696 
5697 /*
5698  * Build zonelists ordered by zone and nodes within zones.
5699  * This results in conserving DMA zone[s] until all Normal memory is
5700  * exhausted, but results in overflowing to remote node while memory
5701  * may still exist in local DMA zone.
5702  */
5703 
5704 static void build_zonelists(pg_data_t *pgdat)
5705 {
5706 	static int node_order[MAX_NUMNODES];
5707 	int node, load, nr_nodes = 0;
5708 	nodemask_t used_mask = NODE_MASK_NONE;
5709 	int local_node, prev_node;
5710 
5711 	/* NUMA-aware ordering of nodes */
5712 	local_node = pgdat->node_id;
5713 	load = nr_online_nodes;
5714 	prev_node = local_node;
5715 
5716 	memset(node_order, 0, sizeof(node_order));
5717 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5718 		/*
5719 		 * We don't want to pressure a particular node.
5720 		 * So adding penalty to the first node in same
5721 		 * distance group to make it round-robin.
5722 		 */
5723 		if (node_distance(local_node, node) !=
5724 		    node_distance(local_node, prev_node))
5725 			node_load[node] = load;
5726 
5727 		node_order[nr_nodes++] = node;
5728 		prev_node = node;
5729 		load--;
5730 	}
5731 
5732 	build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5733 	build_thisnode_zonelists(pgdat);
5734 }
5735 
5736 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5737 /*
5738  * Return node id of node used for "local" allocations.
5739  * I.e., first node id of first zone in arg node's generic zonelist.
5740  * Used for initializing percpu 'numa_mem', which is used primarily
5741  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5742  */
5743 int local_memory_node(int node)
5744 {
5745 	struct zoneref *z;
5746 
5747 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5748 				   gfp_zone(GFP_KERNEL),
5749 				   NULL);
5750 	return zone_to_nid(z->zone);
5751 }
5752 #endif
5753 
5754 static void setup_min_unmapped_ratio(void);
5755 static void setup_min_slab_ratio(void);
5756 #else	/* CONFIG_NUMA */
5757 
5758 static void build_zonelists(pg_data_t *pgdat)
5759 {
5760 	int node, local_node;
5761 	struct zoneref *zonerefs;
5762 	int nr_zones;
5763 
5764 	local_node = pgdat->node_id;
5765 
5766 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5767 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
5768 	zonerefs += nr_zones;
5769 
5770 	/*
5771 	 * Now we build the zonelist so that it contains the zones
5772 	 * of all the other nodes.
5773 	 * We don't want to pressure a particular node, so when
5774 	 * building the zones for node N, we make sure that the
5775 	 * zones coming right after the local ones are those from
5776 	 * node N+1 (modulo N)
5777 	 */
5778 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5779 		if (!node_online(node))
5780 			continue;
5781 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5782 		zonerefs += nr_zones;
5783 	}
5784 	for (node = 0; node < local_node; node++) {
5785 		if (!node_online(node))
5786 			continue;
5787 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5788 		zonerefs += nr_zones;
5789 	}
5790 
5791 	zonerefs->zone = NULL;
5792 	zonerefs->zone_idx = 0;
5793 }
5794 
5795 #endif	/* CONFIG_NUMA */
5796 
5797 /*
5798  * Boot pageset table. One per cpu which is going to be used for all
5799  * zones and all nodes. The parameters will be set in such a way
5800  * that an item put on a list will immediately be handed over to
5801  * the buddy list. This is safe since pageset manipulation is done
5802  * with interrupts disabled.
5803  *
5804  * The boot_pagesets must be kept even after bootup is complete for
5805  * unused processors and/or zones. They do play a role for bootstrapping
5806  * hotplugged processors.
5807  *
5808  * zoneinfo_show() and maybe other functions do
5809  * not check if the processor is online before following the pageset pointer.
5810  * Other parts of the kernel may not check if the zone is available.
5811  */
5812 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5813 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5814 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5815 
5816 static void __build_all_zonelists(void *data)
5817 {
5818 	int nid;
5819 	int __maybe_unused cpu;
5820 	pg_data_t *self = data;
5821 	static DEFINE_SPINLOCK(lock);
5822 
5823 	spin_lock(&lock);
5824 
5825 #ifdef CONFIG_NUMA
5826 	memset(node_load, 0, sizeof(node_load));
5827 #endif
5828 
5829 	/*
5830 	 * This node is hotadded and no memory is yet present.   So just
5831 	 * building zonelists is fine - no need to touch other nodes.
5832 	 */
5833 	if (self && !node_online(self->node_id)) {
5834 		build_zonelists(self);
5835 	} else {
5836 		for_each_online_node(nid) {
5837 			pg_data_t *pgdat = NODE_DATA(nid);
5838 
5839 			build_zonelists(pgdat);
5840 		}
5841 
5842 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5843 		/*
5844 		 * We now know the "local memory node" for each node--
5845 		 * i.e., the node of the first zone in the generic zonelist.
5846 		 * Set up numa_mem percpu variable for on-line cpus.  During
5847 		 * boot, only the boot cpu should be on-line;  we'll init the
5848 		 * secondary cpus' numa_mem as they come on-line.  During
5849 		 * node/memory hotplug, we'll fixup all on-line cpus.
5850 		 */
5851 		for_each_online_cpu(cpu)
5852 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5853 #endif
5854 	}
5855 
5856 	spin_unlock(&lock);
5857 }
5858 
5859 static noinline void __init
5860 build_all_zonelists_init(void)
5861 {
5862 	int cpu;
5863 
5864 	__build_all_zonelists(NULL);
5865 
5866 	/*
5867 	 * Initialize the boot_pagesets that are going to be used
5868 	 * for bootstrapping processors. The real pagesets for
5869 	 * each zone will be allocated later when the per cpu
5870 	 * allocator is available.
5871 	 *
5872 	 * boot_pagesets are used also for bootstrapping offline
5873 	 * cpus if the system is already booted because the pagesets
5874 	 * are needed to initialize allocators on a specific cpu too.
5875 	 * F.e. the percpu allocator needs the page allocator which
5876 	 * needs the percpu allocator in order to allocate its pagesets
5877 	 * (a chicken-egg dilemma).
5878 	 */
5879 	for_each_possible_cpu(cpu)
5880 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5881 
5882 	mminit_verify_zonelist();
5883 	cpuset_init_current_mems_allowed();
5884 }
5885 
5886 /*
5887  * unless system_state == SYSTEM_BOOTING.
5888  *
5889  * __ref due to call of __init annotated helper build_all_zonelists_init
5890  * [protected by SYSTEM_BOOTING].
5891  */
5892 void __ref build_all_zonelists(pg_data_t *pgdat)
5893 {
5894 	if (system_state == SYSTEM_BOOTING) {
5895 		build_all_zonelists_init();
5896 	} else {
5897 		__build_all_zonelists(pgdat);
5898 		/* cpuset refresh routine should be here */
5899 	}
5900 	vm_total_pages = nr_free_pagecache_pages();
5901 	/*
5902 	 * Disable grouping by mobility if the number of pages in the
5903 	 * system is too low to allow the mechanism to work. It would be
5904 	 * more accurate, but expensive to check per-zone. This check is
5905 	 * made on memory-hotadd so a system can start with mobility
5906 	 * disabled and enable it later
5907 	 */
5908 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5909 		page_group_by_mobility_disabled = 1;
5910 	else
5911 		page_group_by_mobility_disabled = 0;
5912 
5913 	pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
5914 		nr_online_nodes,
5915 		page_group_by_mobility_disabled ? "off" : "on",
5916 		vm_total_pages);
5917 #ifdef CONFIG_NUMA
5918 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5919 #endif
5920 }
5921 
5922 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
5923 static bool __meminit
5924 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
5925 {
5926 	static struct memblock_region *r;
5927 
5928 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5929 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
5930 			for_each_memblock(memory, r) {
5931 				if (*pfn < memblock_region_memory_end_pfn(r))
5932 					break;
5933 			}
5934 		}
5935 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
5936 		    memblock_is_mirror(r)) {
5937 			*pfn = memblock_region_memory_end_pfn(r);
5938 			return true;
5939 		}
5940 	}
5941 	return false;
5942 }
5943 
5944 /*
5945  * Initially all pages are reserved - free ones are freed
5946  * up by memblock_free_all() once the early boot process is
5947  * done. Non-atomic initialization, single-pass.
5948  */
5949 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5950 		unsigned long start_pfn, enum memmap_context context,
5951 		struct vmem_altmap *altmap)
5952 {
5953 	unsigned long pfn, end_pfn = start_pfn + size;
5954 	struct page *page;
5955 
5956 	if (highest_memmap_pfn < end_pfn - 1)
5957 		highest_memmap_pfn = end_pfn - 1;
5958 
5959 #ifdef CONFIG_ZONE_DEVICE
5960 	/*
5961 	 * Honor reservation requested by the driver for this ZONE_DEVICE
5962 	 * memory. We limit the total number of pages to initialize to just
5963 	 * those that might contain the memory mapping. We will defer the
5964 	 * ZONE_DEVICE page initialization until after we have released
5965 	 * the hotplug lock.
5966 	 */
5967 	if (zone == ZONE_DEVICE) {
5968 		if (!altmap)
5969 			return;
5970 
5971 		if (start_pfn == altmap->base_pfn)
5972 			start_pfn += altmap->reserve;
5973 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
5974 	}
5975 #endif
5976 
5977 	for (pfn = start_pfn; pfn < end_pfn; ) {
5978 		/*
5979 		 * There can be holes in boot-time mem_map[]s handed to this
5980 		 * function.  They do not exist on hotplugged memory.
5981 		 */
5982 		if (context == MEMMAP_EARLY) {
5983 			if (overlap_memmap_init(zone, &pfn))
5984 				continue;
5985 			if (defer_init(nid, pfn, end_pfn))
5986 				break;
5987 		}
5988 
5989 		page = pfn_to_page(pfn);
5990 		__init_single_page(page, pfn, zone, nid);
5991 		if (context == MEMMAP_HOTPLUG)
5992 			__SetPageReserved(page);
5993 
5994 		/*
5995 		 * Mark the block movable so that blocks are reserved for
5996 		 * movable at startup. This will force kernel allocations
5997 		 * to reserve their blocks rather than leaking throughout
5998 		 * the address space during boot when many long-lived
5999 		 * kernel allocations are made.
6000 		 *
6001 		 * bitmap is created for zone's valid pfn range. but memmap
6002 		 * can be created for invalid pages (for alignment)
6003 		 * check here not to call set_pageblock_migratetype() against
6004 		 * pfn out of zone.
6005 		 */
6006 		if (!(pfn & (pageblock_nr_pages - 1))) {
6007 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6008 			cond_resched();
6009 		}
6010 		pfn++;
6011 	}
6012 }
6013 
6014 #ifdef CONFIG_ZONE_DEVICE
6015 void __ref memmap_init_zone_device(struct zone *zone,
6016 				   unsigned long start_pfn,
6017 				   unsigned long nr_pages,
6018 				   struct dev_pagemap *pgmap)
6019 {
6020 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
6021 	struct pglist_data *pgdat = zone->zone_pgdat;
6022 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6023 	unsigned long zone_idx = zone_idx(zone);
6024 	unsigned long start = jiffies;
6025 	int nid = pgdat->node_id;
6026 
6027 	if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
6028 		return;
6029 
6030 	/*
6031 	 * The call to memmap_init_zone should have already taken care
6032 	 * of the pages reserved for the memmap, so we can just jump to
6033 	 * the end of that region and start processing the device pages.
6034 	 */
6035 	if (altmap) {
6036 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6037 		nr_pages = end_pfn - start_pfn;
6038 	}
6039 
6040 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
6041 		struct page *page = pfn_to_page(pfn);
6042 
6043 		__init_single_page(page, pfn, zone_idx, nid);
6044 
6045 		/*
6046 		 * Mark page reserved as it will need to wait for onlining
6047 		 * phase for it to be fully associated with a zone.
6048 		 *
6049 		 * We can use the non-atomic __set_bit operation for setting
6050 		 * the flag as we are still initializing the pages.
6051 		 */
6052 		__SetPageReserved(page);
6053 
6054 		/*
6055 		 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6056 		 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
6057 		 * ever freed or placed on a driver-private list.
6058 		 */
6059 		page->pgmap = pgmap;
6060 		page->zone_device_data = NULL;
6061 
6062 		/*
6063 		 * Mark the block movable so that blocks are reserved for
6064 		 * movable at startup. This will force kernel allocations
6065 		 * to reserve their blocks rather than leaking throughout
6066 		 * the address space during boot when many long-lived
6067 		 * kernel allocations are made.
6068 		 *
6069 		 * bitmap is created for zone's valid pfn range. but memmap
6070 		 * can be created for invalid pages (for alignment)
6071 		 * check here not to call set_pageblock_migratetype() against
6072 		 * pfn out of zone.
6073 		 *
6074 		 * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
6075 		 * because this is done early in section_activate()
6076 		 */
6077 		if (!(pfn & (pageblock_nr_pages - 1))) {
6078 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6079 			cond_resched();
6080 		}
6081 	}
6082 
6083 	pr_info("%s initialised %lu pages in %ums\n", __func__,
6084 		nr_pages, jiffies_to_msecs(jiffies - start));
6085 }
6086 
6087 #endif
6088 static void __meminit zone_init_free_lists(struct zone *zone)
6089 {
6090 	unsigned int order, t;
6091 	for_each_migratetype_order(order, t) {
6092 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6093 		zone->free_area[order].nr_free = 0;
6094 	}
6095 }
6096 
6097 void __meminit __weak memmap_init(unsigned long size, int nid,
6098 				  unsigned long zone,
6099 				  unsigned long range_start_pfn)
6100 {
6101 	unsigned long start_pfn, end_pfn;
6102 	unsigned long range_end_pfn = range_start_pfn + size;
6103 	int i;
6104 
6105 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6106 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6107 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6108 
6109 		if (end_pfn > start_pfn) {
6110 			size = end_pfn - start_pfn;
6111 			memmap_init_zone(size, nid, zone, start_pfn,
6112 					 MEMMAP_EARLY, NULL);
6113 		}
6114 	}
6115 }
6116 
6117 static int zone_batchsize(struct zone *zone)
6118 {
6119 #ifdef CONFIG_MMU
6120 	int batch;
6121 
6122 	/*
6123 	 * The per-cpu-pages pools are set to around 1000th of the
6124 	 * size of the zone.
6125 	 */
6126 	batch = zone_managed_pages(zone) / 1024;
6127 	/* But no more than a meg. */
6128 	if (batch * PAGE_SIZE > 1024 * 1024)
6129 		batch = (1024 * 1024) / PAGE_SIZE;
6130 	batch /= 4;		/* We effectively *= 4 below */
6131 	if (batch < 1)
6132 		batch = 1;
6133 
6134 	/*
6135 	 * Clamp the batch to a 2^n - 1 value. Having a power
6136 	 * of 2 value was found to be more likely to have
6137 	 * suboptimal cache aliasing properties in some cases.
6138 	 *
6139 	 * For example if 2 tasks are alternately allocating
6140 	 * batches of pages, one task can end up with a lot
6141 	 * of pages of one half of the possible page colors
6142 	 * and the other with pages of the other colors.
6143 	 */
6144 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
6145 
6146 	return batch;
6147 
6148 #else
6149 	/* The deferral and batching of frees should be suppressed under NOMMU
6150 	 * conditions.
6151 	 *
6152 	 * The problem is that NOMMU needs to be able to allocate large chunks
6153 	 * of contiguous memory as there's no hardware page translation to
6154 	 * assemble apparent contiguous memory from discontiguous pages.
6155 	 *
6156 	 * Queueing large contiguous runs of pages for batching, however,
6157 	 * causes the pages to actually be freed in smaller chunks.  As there
6158 	 * can be a significant delay between the individual batches being
6159 	 * recycled, this leads to the once large chunks of space being
6160 	 * fragmented and becoming unavailable for high-order allocations.
6161 	 */
6162 	return 0;
6163 #endif
6164 }
6165 
6166 /*
6167  * pcp->high and pcp->batch values are related and dependent on one another:
6168  * ->batch must never be higher then ->high.
6169  * The following function updates them in a safe manner without read side
6170  * locking.
6171  *
6172  * Any new users of pcp->batch and pcp->high should ensure they can cope with
6173  * those fields changing asynchronously (acording the the above rule).
6174  *
6175  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6176  * outside of boot time (or some other assurance that no concurrent updaters
6177  * exist).
6178  */
6179 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6180 		unsigned long batch)
6181 {
6182        /* start with a fail safe value for batch */
6183 	pcp->batch = 1;
6184 	smp_wmb();
6185 
6186        /* Update high, then batch, in order */
6187 	pcp->high = high;
6188 	smp_wmb();
6189 
6190 	pcp->batch = batch;
6191 }
6192 
6193 /* a companion to pageset_set_high() */
6194 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
6195 {
6196 	pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
6197 }
6198 
6199 static void pageset_init(struct per_cpu_pageset *p)
6200 {
6201 	struct per_cpu_pages *pcp;
6202 	int migratetype;
6203 
6204 	memset(p, 0, sizeof(*p));
6205 
6206 	pcp = &p->pcp;
6207 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
6208 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
6209 }
6210 
6211 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
6212 {
6213 	pageset_init(p);
6214 	pageset_set_batch(p, batch);
6215 }
6216 
6217 /*
6218  * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
6219  * to the value high for the pageset p.
6220  */
6221 static void pageset_set_high(struct per_cpu_pageset *p,
6222 				unsigned long high)
6223 {
6224 	unsigned long batch = max(1UL, high / 4);
6225 	if ((high / 4) > (PAGE_SHIFT * 8))
6226 		batch = PAGE_SHIFT * 8;
6227 
6228 	pageset_update(&p->pcp, high, batch);
6229 }
6230 
6231 static void pageset_set_high_and_batch(struct zone *zone,
6232 				       struct per_cpu_pageset *pcp)
6233 {
6234 	if (percpu_pagelist_fraction)
6235 		pageset_set_high(pcp,
6236 			(zone_managed_pages(zone) /
6237 				percpu_pagelist_fraction));
6238 	else
6239 		pageset_set_batch(pcp, zone_batchsize(zone));
6240 }
6241 
6242 static void __meminit zone_pageset_init(struct zone *zone, int cpu)
6243 {
6244 	struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
6245 
6246 	pageset_init(pcp);
6247 	pageset_set_high_and_batch(zone, pcp);
6248 }
6249 
6250 void __meminit setup_zone_pageset(struct zone *zone)
6251 {
6252 	int cpu;
6253 	zone->pageset = alloc_percpu(struct per_cpu_pageset);
6254 	for_each_possible_cpu(cpu)
6255 		zone_pageset_init(zone, cpu);
6256 }
6257 
6258 /*
6259  * Allocate per cpu pagesets and initialize them.
6260  * Before this call only boot pagesets were available.
6261  */
6262 void __init setup_per_cpu_pageset(void)
6263 {
6264 	struct pglist_data *pgdat;
6265 	struct zone *zone;
6266 	int __maybe_unused cpu;
6267 
6268 	for_each_populated_zone(zone)
6269 		setup_zone_pageset(zone);
6270 
6271 #ifdef CONFIG_NUMA
6272 	/*
6273 	 * Unpopulated zones continue using the boot pagesets.
6274 	 * The numa stats for these pagesets need to be reset.
6275 	 * Otherwise, they will end up skewing the stats of
6276 	 * the nodes these zones are associated with.
6277 	 */
6278 	for_each_possible_cpu(cpu) {
6279 		struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu);
6280 		memset(pcp->vm_numa_stat_diff, 0,
6281 		       sizeof(pcp->vm_numa_stat_diff));
6282 	}
6283 #endif
6284 
6285 	for_each_online_pgdat(pgdat)
6286 		pgdat->per_cpu_nodestats =
6287 			alloc_percpu(struct per_cpu_nodestat);
6288 }
6289 
6290 static __meminit void zone_pcp_init(struct zone *zone)
6291 {
6292 	/*
6293 	 * per cpu subsystem is not up at this point. The following code
6294 	 * relies on the ability of the linker to provide the
6295 	 * offset of a (static) per cpu variable into the per cpu area.
6296 	 */
6297 	zone->pageset = &boot_pageset;
6298 
6299 	if (populated_zone(zone))
6300 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
6301 			zone->name, zone->present_pages,
6302 					 zone_batchsize(zone));
6303 }
6304 
6305 void __meminit init_currently_empty_zone(struct zone *zone,
6306 					unsigned long zone_start_pfn,
6307 					unsigned long size)
6308 {
6309 	struct pglist_data *pgdat = zone->zone_pgdat;
6310 	int zone_idx = zone_idx(zone) + 1;
6311 
6312 	if (zone_idx > pgdat->nr_zones)
6313 		pgdat->nr_zones = zone_idx;
6314 
6315 	zone->zone_start_pfn = zone_start_pfn;
6316 
6317 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
6318 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
6319 			pgdat->node_id,
6320 			(unsigned long)zone_idx(zone),
6321 			zone_start_pfn, (zone_start_pfn + size));
6322 
6323 	zone_init_free_lists(zone);
6324 	zone->initialized = 1;
6325 }
6326 
6327 /**
6328  * sparse_memory_present_with_active_regions - Call memory_present for each active range
6329  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
6330  *
6331  * If an architecture guarantees that all ranges registered contain no holes and may
6332  * be freed, this function may be used instead of calling memory_present() manually.
6333  */
6334 void __init sparse_memory_present_with_active_regions(int nid)
6335 {
6336 	unsigned long start_pfn, end_pfn;
6337 	int i, this_nid;
6338 
6339 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
6340 		memory_present(this_nid, start_pfn, end_pfn);
6341 }
6342 
6343 /**
6344  * get_pfn_range_for_nid - Return the start and end page frames for a node
6345  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
6346  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6347  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
6348  *
6349  * It returns the start and end page frame of a node based on information
6350  * provided by memblock_set_node(). If called for a node
6351  * with no available memory, a warning is printed and the start and end
6352  * PFNs will be 0.
6353  */
6354 void __init get_pfn_range_for_nid(unsigned int nid,
6355 			unsigned long *start_pfn, unsigned long *end_pfn)
6356 {
6357 	unsigned long this_start_pfn, this_end_pfn;
6358 	int i;
6359 
6360 	*start_pfn = -1UL;
6361 	*end_pfn = 0;
6362 
6363 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
6364 		*start_pfn = min(*start_pfn, this_start_pfn);
6365 		*end_pfn = max(*end_pfn, this_end_pfn);
6366 	}
6367 
6368 	if (*start_pfn == -1UL)
6369 		*start_pfn = 0;
6370 }
6371 
6372 /*
6373  * This finds a zone that can be used for ZONE_MOVABLE pages. The
6374  * assumption is made that zones within a node are ordered in monotonic
6375  * increasing memory addresses so that the "highest" populated zone is used
6376  */
6377 static void __init find_usable_zone_for_movable(void)
6378 {
6379 	int zone_index;
6380 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
6381 		if (zone_index == ZONE_MOVABLE)
6382 			continue;
6383 
6384 		if (arch_zone_highest_possible_pfn[zone_index] >
6385 				arch_zone_lowest_possible_pfn[zone_index])
6386 			break;
6387 	}
6388 
6389 	VM_BUG_ON(zone_index == -1);
6390 	movable_zone = zone_index;
6391 }
6392 
6393 /*
6394  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6395  * because it is sized independent of architecture. Unlike the other zones,
6396  * the starting point for ZONE_MOVABLE is not fixed. It may be different
6397  * in each node depending on the size of each node and how evenly kernelcore
6398  * is distributed. This helper function adjusts the zone ranges
6399  * provided by the architecture for a given node by using the end of the
6400  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6401  * zones within a node are in order of monotonic increases memory addresses
6402  */
6403 static void __init adjust_zone_range_for_zone_movable(int nid,
6404 					unsigned long zone_type,
6405 					unsigned long node_start_pfn,
6406 					unsigned long node_end_pfn,
6407 					unsigned long *zone_start_pfn,
6408 					unsigned long *zone_end_pfn)
6409 {
6410 	/* Only adjust if ZONE_MOVABLE is on this node */
6411 	if (zone_movable_pfn[nid]) {
6412 		/* Size ZONE_MOVABLE */
6413 		if (zone_type == ZONE_MOVABLE) {
6414 			*zone_start_pfn = zone_movable_pfn[nid];
6415 			*zone_end_pfn = min(node_end_pfn,
6416 				arch_zone_highest_possible_pfn[movable_zone]);
6417 
6418 		/* Adjust for ZONE_MOVABLE starting within this range */
6419 		} else if (!mirrored_kernelcore &&
6420 			*zone_start_pfn < zone_movable_pfn[nid] &&
6421 			*zone_end_pfn > zone_movable_pfn[nid]) {
6422 			*zone_end_pfn = zone_movable_pfn[nid];
6423 
6424 		/* Check if this whole range is within ZONE_MOVABLE */
6425 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
6426 			*zone_start_pfn = *zone_end_pfn;
6427 	}
6428 }
6429 
6430 /*
6431  * Return the number of pages a zone spans in a node, including holes
6432  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
6433  */
6434 static unsigned long __init zone_spanned_pages_in_node(int nid,
6435 					unsigned long zone_type,
6436 					unsigned long node_start_pfn,
6437 					unsigned long node_end_pfn,
6438 					unsigned long *zone_start_pfn,
6439 					unsigned long *zone_end_pfn)
6440 {
6441 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6442 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6443 	/* When hotadd a new node from cpu_up(), the node should be empty */
6444 	if (!node_start_pfn && !node_end_pfn)
6445 		return 0;
6446 
6447 	/* Get the start and end of the zone */
6448 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6449 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6450 	adjust_zone_range_for_zone_movable(nid, zone_type,
6451 				node_start_pfn, node_end_pfn,
6452 				zone_start_pfn, zone_end_pfn);
6453 
6454 	/* Check that this node has pages within the zone's required range */
6455 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
6456 		return 0;
6457 
6458 	/* Move the zone boundaries inside the node if necessary */
6459 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
6460 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
6461 
6462 	/* Return the spanned pages */
6463 	return *zone_end_pfn - *zone_start_pfn;
6464 }
6465 
6466 /*
6467  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
6468  * then all holes in the requested range will be accounted for.
6469  */
6470 unsigned long __init __absent_pages_in_range(int nid,
6471 				unsigned long range_start_pfn,
6472 				unsigned long range_end_pfn)
6473 {
6474 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
6475 	unsigned long start_pfn, end_pfn;
6476 	int i;
6477 
6478 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6479 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6480 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6481 		nr_absent -= end_pfn - start_pfn;
6482 	}
6483 	return nr_absent;
6484 }
6485 
6486 /**
6487  * absent_pages_in_range - Return number of page frames in holes within a range
6488  * @start_pfn: The start PFN to start searching for holes
6489  * @end_pfn: The end PFN to stop searching for holes
6490  *
6491  * Return: the number of pages frames in memory holes within a range.
6492  */
6493 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6494 							unsigned long end_pfn)
6495 {
6496 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
6497 }
6498 
6499 /* Return the number of page frames in holes in a zone on a node */
6500 static unsigned long __init zone_absent_pages_in_node(int nid,
6501 					unsigned long zone_type,
6502 					unsigned long node_start_pfn,
6503 					unsigned long node_end_pfn)
6504 {
6505 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6506 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6507 	unsigned long zone_start_pfn, zone_end_pfn;
6508 	unsigned long nr_absent;
6509 
6510 	/* When hotadd a new node from cpu_up(), the node should be empty */
6511 	if (!node_start_pfn && !node_end_pfn)
6512 		return 0;
6513 
6514 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6515 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6516 
6517 	adjust_zone_range_for_zone_movable(nid, zone_type,
6518 			node_start_pfn, node_end_pfn,
6519 			&zone_start_pfn, &zone_end_pfn);
6520 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
6521 
6522 	/*
6523 	 * ZONE_MOVABLE handling.
6524 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
6525 	 * and vice versa.
6526 	 */
6527 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
6528 		unsigned long start_pfn, end_pfn;
6529 		struct memblock_region *r;
6530 
6531 		for_each_memblock(memory, r) {
6532 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
6533 					  zone_start_pfn, zone_end_pfn);
6534 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
6535 					zone_start_pfn, zone_end_pfn);
6536 
6537 			if (zone_type == ZONE_MOVABLE &&
6538 			    memblock_is_mirror(r))
6539 				nr_absent += end_pfn - start_pfn;
6540 
6541 			if (zone_type == ZONE_NORMAL &&
6542 			    !memblock_is_mirror(r))
6543 				nr_absent += end_pfn - start_pfn;
6544 		}
6545 	}
6546 
6547 	return nr_absent;
6548 }
6549 
6550 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
6551 						unsigned long node_start_pfn,
6552 						unsigned long node_end_pfn)
6553 {
6554 	unsigned long realtotalpages = 0, totalpages = 0;
6555 	enum zone_type i;
6556 
6557 	for (i = 0; i < MAX_NR_ZONES; i++) {
6558 		struct zone *zone = pgdat->node_zones + i;
6559 		unsigned long zone_start_pfn, zone_end_pfn;
6560 		unsigned long spanned, absent;
6561 		unsigned long size, real_size;
6562 
6563 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
6564 						     node_start_pfn,
6565 						     node_end_pfn,
6566 						     &zone_start_pfn,
6567 						     &zone_end_pfn);
6568 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
6569 						   node_start_pfn,
6570 						   node_end_pfn);
6571 
6572 		size = spanned;
6573 		real_size = size - absent;
6574 
6575 		if (size)
6576 			zone->zone_start_pfn = zone_start_pfn;
6577 		else
6578 			zone->zone_start_pfn = 0;
6579 		zone->spanned_pages = size;
6580 		zone->present_pages = real_size;
6581 
6582 		totalpages += size;
6583 		realtotalpages += real_size;
6584 	}
6585 
6586 	pgdat->node_spanned_pages = totalpages;
6587 	pgdat->node_present_pages = realtotalpages;
6588 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
6589 							realtotalpages);
6590 }
6591 
6592 #ifndef CONFIG_SPARSEMEM
6593 /*
6594  * Calculate the size of the zone->blockflags rounded to an unsigned long
6595  * Start by making sure zonesize is a multiple of pageblock_order by rounding
6596  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
6597  * round what is now in bits to nearest long in bits, then return it in
6598  * bytes.
6599  */
6600 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
6601 {
6602 	unsigned long usemapsize;
6603 
6604 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
6605 	usemapsize = roundup(zonesize, pageblock_nr_pages);
6606 	usemapsize = usemapsize >> pageblock_order;
6607 	usemapsize *= NR_PAGEBLOCK_BITS;
6608 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6609 
6610 	return usemapsize / 8;
6611 }
6612 
6613 static void __ref setup_usemap(struct pglist_data *pgdat,
6614 				struct zone *zone,
6615 				unsigned long zone_start_pfn,
6616 				unsigned long zonesize)
6617 {
6618 	unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
6619 	zone->pageblock_flags = NULL;
6620 	if (usemapsize) {
6621 		zone->pageblock_flags =
6622 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
6623 					    pgdat->node_id);
6624 		if (!zone->pageblock_flags)
6625 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
6626 			      usemapsize, zone->name, pgdat->node_id);
6627 	}
6628 }
6629 #else
6630 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6631 				unsigned long zone_start_pfn, unsigned long zonesize) {}
6632 #endif /* CONFIG_SPARSEMEM */
6633 
6634 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
6635 
6636 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
6637 void __init set_pageblock_order(void)
6638 {
6639 	unsigned int order;
6640 
6641 	/* Check that pageblock_nr_pages has not already been setup */
6642 	if (pageblock_order)
6643 		return;
6644 
6645 	if (HPAGE_SHIFT > PAGE_SHIFT)
6646 		order = HUGETLB_PAGE_ORDER;
6647 	else
6648 		order = MAX_ORDER - 1;
6649 
6650 	/*
6651 	 * Assume the largest contiguous order of interest is a huge page.
6652 	 * This value may be variable depending on boot parameters on IA64 and
6653 	 * powerpc.
6654 	 */
6655 	pageblock_order = order;
6656 }
6657 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6658 
6659 /*
6660  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
6661  * is unused as pageblock_order is set at compile-time. See
6662  * include/linux/pageblock-flags.h for the values of pageblock_order based on
6663  * the kernel config
6664  */
6665 void __init set_pageblock_order(void)
6666 {
6667 }
6668 
6669 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6670 
6671 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
6672 						unsigned long present_pages)
6673 {
6674 	unsigned long pages = spanned_pages;
6675 
6676 	/*
6677 	 * Provide a more accurate estimation if there are holes within
6678 	 * the zone and SPARSEMEM is in use. If there are holes within the
6679 	 * zone, each populated memory region may cost us one or two extra
6680 	 * memmap pages due to alignment because memmap pages for each
6681 	 * populated regions may not be naturally aligned on page boundary.
6682 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
6683 	 */
6684 	if (spanned_pages > present_pages + (present_pages >> 4) &&
6685 	    IS_ENABLED(CONFIG_SPARSEMEM))
6686 		pages = present_pages;
6687 
6688 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6689 }
6690 
6691 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6692 static void pgdat_init_split_queue(struct pglist_data *pgdat)
6693 {
6694 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
6695 
6696 	spin_lock_init(&ds_queue->split_queue_lock);
6697 	INIT_LIST_HEAD(&ds_queue->split_queue);
6698 	ds_queue->split_queue_len = 0;
6699 }
6700 #else
6701 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
6702 #endif
6703 
6704 #ifdef CONFIG_COMPACTION
6705 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
6706 {
6707 	init_waitqueue_head(&pgdat->kcompactd_wait);
6708 }
6709 #else
6710 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
6711 #endif
6712 
6713 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
6714 {
6715 	pgdat_resize_init(pgdat);
6716 
6717 	pgdat_init_split_queue(pgdat);
6718 	pgdat_init_kcompactd(pgdat);
6719 
6720 	init_waitqueue_head(&pgdat->kswapd_wait);
6721 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
6722 
6723 	pgdat_page_ext_init(pgdat);
6724 	spin_lock_init(&pgdat->lru_lock);
6725 	lruvec_init(&pgdat->__lruvec);
6726 }
6727 
6728 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
6729 							unsigned long remaining_pages)
6730 {
6731 	atomic_long_set(&zone->managed_pages, remaining_pages);
6732 	zone_set_nid(zone, nid);
6733 	zone->name = zone_names[idx];
6734 	zone->zone_pgdat = NODE_DATA(nid);
6735 	spin_lock_init(&zone->lock);
6736 	zone_seqlock_init(zone);
6737 	zone_pcp_init(zone);
6738 }
6739 
6740 /*
6741  * Set up the zone data structures
6742  * - init pgdat internals
6743  * - init all zones belonging to this node
6744  *
6745  * NOTE: this function is only called during memory hotplug
6746  */
6747 #ifdef CONFIG_MEMORY_HOTPLUG
6748 void __ref free_area_init_core_hotplug(int nid)
6749 {
6750 	enum zone_type z;
6751 	pg_data_t *pgdat = NODE_DATA(nid);
6752 
6753 	pgdat_init_internals(pgdat);
6754 	for (z = 0; z < MAX_NR_ZONES; z++)
6755 		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
6756 }
6757 #endif
6758 
6759 /*
6760  * Set up the zone data structures:
6761  *   - mark all pages reserved
6762  *   - mark all memory queues empty
6763  *   - clear the memory bitmaps
6764  *
6765  * NOTE: pgdat should get zeroed by caller.
6766  * NOTE: this function is only called during early init.
6767  */
6768 static void __init free_area_init_core(struct pglist_data *pgdat)
6769 {
6770 	enum zone_type j;
6771 	int nid = pgdat->node_id;
6772 
6773 	pgdat_init_internals(pgdat);
6774 	pgdat->per_cpu_nodestats = &boot_nodestats;
6775 
6776 	for (j = 0; j < MAX_NR_ZONES; j++) {
6777 		struct zone *zone = pgdat->node_zones + j;
6778 		unsigned long size, freesize, memmap_pages;
6779 		unsigned long zone_start_pfn = zone->zone_start_pfn;
6780 
6781 		size = zone->spanned_pages;
6782 		freesize = zone->present_pages;
6783 
6784 		/*
6785 		 * Adjust freesize so that it accounts for how much memory
6786 		 * is used by this zone for memmap. This affects the watermark
6787 		 * and per-cpu initialisations
6788 		 */
6789 		memmap_pages = calc_memmap_size(size, freesize);
6790 		if (!is_highmem_idx(j)) {
6791 			if (freesize >= memmap_pages) {
6792 				freesize -= memmap_pages;
6793 				if (memmap_pages)
6794 					printk(KERN_DEBUG
6795 					       "  %s zone: %lu pages used for memmap\n",
6796 					       zone_names[j], memmap_pages);
6797 			} else
6798 				pr_warn("  %s zone: %lu pages exceeds freesize %lu\n",
6799 					zone_names[j], memmap_pages, freesize);
6800 		}
6801 
6802 		/* Account for reserved pages */
6803 		if (j == 0 && freesize > dma_reserve) {
6804 			freesize -= dma_reserve;
6805 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
6806 					zone_names[0], dma_reserve);
6807 		}
6808 
6809 		if (!is_highmem_idx(j))
6810 			nr_kernel_pages += freesize;
6811 		/* Charge for highmem memmap if there are enough kernel pages */
6812 		else if (nr_kernel_pages > memmap_pages * 2)
6813 			nr_kernel_pages -= memmap_pages;
6814 		nr_all_pages += freesize;
6815 
6816 		/*
6817 		 * Set an approximate value for lowmem here, it will be adjusted
6818 		 * when the bootmem allocator frees pages into the buddy system.
6819 		 * And all highmem pages will be managed by the buddy system.
6820 		 */
6821 		zone_init_internals(zone, j, nid, freesize);
6822 
6823 		if (!size)
6824 			continue;
6825 
6826 		set_pageblock_order();
6827 		setup_usemap(pgdat, zone, zone_start_pfn, size);
6828 		init_currently_empty_zone(zone, zone_start_pfn, size);
6829 		memmap_init(size, nid, j, zone_start_pfn);
6830 	}
6831 }
6832 
6833 #ifdef CONFIG_FLAT_NODE_MEM_MAP
6834 static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6835 {
6836 	unsigned long __maybe_unused start = 0;
6837 	unsigned long __maybe_unused offset = 0;
6838 
6839 	/* Skip empty nodes */
6840 	if (!pgdat->node_spanned_pages)
6841 		return;
6842 
6843 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6844 	offset = pgdat->node_start_pfn - start;
6845 	/* ia64 gets its own node_mem_map, before this, without bootmem */
6846 	if (!pgdat->node_mem_map) {
6847 		unsigned long size, end;
6848 		struct page *map;
6849 
6850 		/*
6851 		 * The zone's endpoints aren't required to be MAX_ORDER
6852 		 * aligned but the node_mem_map endpoints must be in order
6853 		 * for the buddy allocator to function correctly.
6854 		 */
6855 		end = pgdat_end_pfn(pgdat);
6856 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
6857 		size =  (end - start) * sizeof(struct page);
6858 		map = memblock_alloc_node(size, SMP_CACHE_BYTES,
6859 					  pgdat->node_id);
6860 		if (!map)
6861 			panic("Failed to allocate %ld bytes for node %d memory map\n",
6862 			      size, pgdat->node_id);
6863 		pgdat->node_mem_map = map + offset;
6864 	}
6865 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
6866 				__func__, pgdat->node_id, (unsigned long)pgdat,
6867 				(unsigned long)pgdat->node_mem_map);
6868 #ifndef CONFIG_NEED_MULTIPLE_NODES
6869 	/*
6870 	 * With no DISCONTIG, the global mem_map is just set as node 0's
6871 	 */
6872 	if (pgdat == NODE_DATA(0)) {
6873 		mem_map = NODE_DATA(0)->node_mem_map;
6874 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
6875 			mem_map -= offset;
6876 	}
6877 #endif
6878 }
6879 #else
6880 static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
6881 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
6882 
6883 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
6884 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
6885 {
6886 	pgdat->first_deferred_pfn = ULONG_MAX;
6887 }
6888 #else
6889 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
6890 #endif
6891 
6892 static void __init free_area_init_node(int nid)
6893 {
6894 	pg_data_t *pgdat = NODE_DATA(nid);
6895 	unsigned long start_pfn = 0;
6896 	unsigned long end_pfn = 0;
6897 
6898 	/* pg_data_t should be reset to zero when it's allocated */
6899 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
6900 
6901 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
6902 
6903 	pgdat->node_id = nid;
6904 	pgdat->node_start_pfn = start_pfn;
6905 	pgdat->per_cpu_nodestats = NULL;
6906 
6907 	pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
6908 		(u64)start_pfn << PAGE_SHIFT,
6909 		end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
6910 	calculate_node_totalpages(pgdat, start_pfn, end_pfn);
6911 
6912 	alloc_node_mem_map(pgdat);
6913 	pgdat_set_deferred_range(pgdat);
6914 
6915 	free_area_init_core(pgdat);
6916 }
6917 
6918 void __init free_area_init_memoryless_node(int nid)
6919 {
6920 	free_area_init_node(nid);
6921 }
6922 
6923 #if !defined(CONFIG_FLAT_NODE_MEM_MAP)
6924 /*
6925  * Initialize all valid struct pages in the range [spfn, epfn) and mark them
6926  * PageReserved(). Return the number of struct pages that were initialized.
6927  */
6928 static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
6929 {
6930 	unsigned long pfn;
6931 	u64 pgcnt = 0;
6932 
6933 	for (pfn = spfn; pfn < epfn; pfn++) {
6934 		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6935 			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6936 				+ pageblock_nr_pages - 1;
6937 			continue;
6938 		}
6939 		/*
6940 		 * Use a fake node/zone (0) for now. Some of these pages
6941 		 * (in memblock.reserved but not in memblock.memory) will
6942 		 * get re-initialized via reserve_bootmem_region() later.
6943 		 */
6944 		__init_single_page(pfn_to_page(pfn), pfn, 0, 0);
6945 		__SetPageReserved(pfn_to_page(pfn));
6946 		pgcnt++;
6947 	}
6948 
6949 	return pgcnt;
6950 }
6951 
6952 /*
6953  * Only struct pages that are backed by physical memory are zeroed and
6954  * initialized by going through __init_single_page(). But, there are some
6955  * struct pages which are reserved in memblock allocator and their fields
6956  * may be accessed (for example page_to_pfn() on some configuration accesses
6957  * flags). We must explicitly initialize those struct pages.
6958  *
6959  * This function also addresses a similar issue where struct pages are left
6960  * uninitialized because the physical address range is not covered by
6961  * memblock.memory or memblock.reserved. That could happen when memblock
6962  * layout is manually configured via memmap=, or when the highest physical
6963  * address (max_pfn) does not end on a section boundary.
6964  */
6965 static void __init init_unavailable_mem(void)
6966 {
6967 	phys_addr_t start, end;
6968 	u64 i, pgcnt;
6969 	phys_addr_t next = 0;
6970 
6971 	/*
6972 	 * Loop through unavailable ranges not covered by memblock.memory.
6973 	 */
6974 	pgcnt = 0;
6975 	for_each_mem_range(i, &memblock.memory, NULL,
6976 			NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
6977 		if (next < start)
6978 			pgcnt += init_unavailable_range(PFN_DOWN(next),
6979 							PFN_UP(start));
6980 		next = end;
6981 	}
6982 
6983 	/*
6984 	 * Early sections always have a fully populated memmap for the whole
6985 	 * section - see pfn_valid(). If the last section has holes at the
6986 	 * end and that section is marked "online", the memmap will be
6987 	 * considered initialized. Make sure that memmap has a well defined
6988 	 * state.
6989 	 */
6990 	pgcnt += init_unavailable_range(PFN_DOWN(next),
6991 					round_up(max_pfn, PAGES_PER_SECTION));
6992 
6993 	/*
6994 	 * Struct pages that do not have backing memory. This could be because
6995 	 * firmware is using some of this memory, or for some other reasons.
6996 	 */
6997 	if (pgcnt)
6998 		pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
6999 }
7000 #else
7001 static inline void __init init_unavailable_mem(void)
7002 {
7003 }
7004 #endif /* !CONFIG_FLAT_NODE_MEM_MAP */
7005 
7006 #if MAX_NUMNODES > 1
7007 /*
7008  * Figure out the number of possible node ids.
7009  */
7010 void __init setup_nr_node_ids(void)
7011 {
7012 	unsigned int highest;
7013 
7014 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
7015 	nr_node_ids = highest + 1;
7016 }
7017 #endif
7018 
7019 /**
7020  * node_map_pfn_alignment - determine the maximum internode alignment
7021  *
7022  * This function should be called after node map is populated and sorted.
7023  * It calculates the maximum power of two alignment which can distinguish
7024  * all the nodes.
7025  *
7026  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7027  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
7028  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
7029  * shifted, 1GiB is enough and this function will indicate so.
7030  *
7031  * This is used to test whether pfn -> nid mapping of the chosen memory
7032  * model has fine enough granularity to avoid incorrect mapping for the
7033  * populated node map.
7034  *
7035  * Return: the determined alignment in pfn's.  0 if there is no alignment
7036  * requirement (single node).
7037  */
7038 unsigned long __init node_map_pfn_alignment(void)
7039 {
7040 	unsigned long accl_mask = 0, last_end = 0;
7041 	unsigned long start, end, mask;
7042 	int last_nid = NUMA_NO_NODE;
7043 	int i, nid;
7044 
7045 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7046 		if (!start || last_nid < 0 || last_nid == nid) {
7047 			last_nid = nid;
7048 			last_end = end;
7049 			continue;
7050 		}
7051 
7052 		/*
7053 		 * Start with a mask granular enough to pin-point to the
7054 		 * start pfn and tick off bits one-by-one until it becomes
7055 		 * too coarse to separate the current node from the last.
7056 		 */
7057 		mask = ~((1 << __ffs(start)) - 1);
7058 		while (mask && last_end <= (start & (mask << 1)))
7059 			mask <<= 1;
7060 
7061 		/* accumulate all internode masks */
7062 		accl_mask |= mask;
7063 	}
7064 
7065 	/* convert mask to number of pages */
7066 	return ~accl_mask + 1;
7067 }
7068 
7069 /**
7070  * find_min_pfn_with_active_regions - Find the minimum PFN registered
7071  *
7072  * Return: the minimum PFN based on information provided via
7073  * memblock_set_node().
7074  */
7075 unsigned long __init find_min_pfn_with_active_regions(void)
7076 {
7077 	return PHYS_PFN(memblock_start_of_DRAM());
7078 }
7079 
7080 /*
7081  * early_calculate_totalpages()
7082  * Sum pages in active regions for movable zone.
7083  * Populate N_MEMORY for calculating usable_nodes.
7084  */
7085 static unsigned long __init early_calculate_totalpages(void)
7086 {
7087 	unsigned long totalpages = 0;
7088 	unsigned long start_pfn, end_pfn;
7089 	int i, nid;
7090 
7091 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7092 		unsigned long pages = end_pfn - start_pfn;
7093 
7094 		totalpages += pages;
7095 		if (pages)
7096 			node_set_state(nid, N_MEMORY);
7097 	}
7098 	return totalpages;
7099 }
7100 
7101 /*
7102  * Find the PFN the Movable zone begins in each node. Kernel memory
7103  * is spread evenly between nodes as long as the nodes have enough
7104  * memory. When they don't, some nodes will have more kernelcore than
7105  * others
7106  */
7107 static void __init find_zone_movable_pfns_for_nodes(void)
7108 {
7109 	int i, nid;
7110 	unsigned long usable_startpfn;
7111 	unsigned long kernelcore_node, kernelcore_remaining;
7112 	/* save the state before borrow the nodemask */
7113 	nodemask_t saved_node_state = node_states[N_MEMORY];
7114 	unsigned long totalpages = early_calculate_totalpages();
7115 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
7116 	struct memblock_region *r;
7117 
7118 	/* Need to find movable_zone earlier when movable_node is specified. */
7119 	find_usable_zone_for_movable();
7120 
7121 	/*
7122 	 * If movable_node is specified, ignore kernelcore and movablecore
7123 	 * options.
7124 	 */
7125 	if (movable_node_is_enabled()) {
7126 		for_each_memblock(memory, r) {
7127 			if (!memblock_is_hotpluggable(r))
7128 				continue;
7129 
7130 			nid = memblock_get_region_node(r);
7131 
7132 			usable_startpfn = PFN_DOWN(r->base);
7133 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7134 				min(usable_startpfn, zone_movable_pfn[nid]) :
7135 				usable_startpfn;
7136 		}
7137 
7138 		goto out2;
7139 	}
7140 
7141 	/*
7142 	 * If kernelcore=mirror is specified, ignore movablecore option
7143 	 */
7144 	if (mirrored_kernelcore) {
7145 		bool mem_below_4gb_not_mirrored = false;
7146 
7147 		for_each_memblock(memory, r) {
7148 			if (memblock_is_mirror(r))
7149 				continue;
7150 
7151 			nid = memblock_get_region_node(r);
7152 
7153 			usable_startpfn = memblock_region_memory_base_pfn(r);
7154 
7155 			if (usable_startpfn < 0x100000) {
7156 				mem_below_4gb_not_mirrored = true;
7157 				continue;
7158 			}
7159 
7160 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7161 				min(usable_startpfn, zone_movable_pfn[nid]) :
7162 				usable_startpfn;
7163 		}
7164 
7165 		if (mem_below_4gb_not_mirrored)
7166 			pr_warn("This configuration results in unmirrored kernel memory.\n");
7167 
7168 		goto out2;
7169 	}
7170 
7171 	/*
7172 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7173 	 * amount of necessary memory.
7174 	 */
7175 	if (required_kernelcore_percent)
7176 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
7177 				       10000UL;
7178 	if (required_movablecore_percent)
7179 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
7180 					10000UL;
7181 
7182 	/*
7183 	 * If movablecore= was specified, calculate what size of
7184 	 * kernelcore that corresponds so that memory usable for
7185 	 * any allocation type is evenly spread. If both kernelcore
7186 	 * and movablecore are specified, then the value of kernelcore
7187 	 * will be used for required_kernelcore if it's greater than
7188 	 * what movablecore would have allowed.
7189 	 */
7190 	if (required_movablecore) {
7191 		unsigned long corepages;
7192 
7193 		/*
7194 		 * Round-up so that ZONE_MOVABLE is at least as large as what
7195 		 * was requested by the user
7196 		 */
7197 		required_movablecore =
7198 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
7199 		required_movablecore = min(totalpages, required_movablecore);
7200 		corepages = totalpages - required_movablecore;
7201 
7202 		required_kernelcore = max(required_kernelcore, corepages);
7203 	}
7204 
7205 	/*
7206 	 * If kernelcore was not specified or kernelcore size is larger
7207 	 * than totalpages, there is no ZONE_MOVABLE.
7208 	 */
7209 	if (!required_kernelcore || required_kernelcore >= totalpages)
7210 		goto out;
7211 
7212 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
7213 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7214 
7215 restart:
7216 	/* Spread kernelcore memory as evenly as possible throughout nodes */
7217 	kernelcore_node = required_kernelcore / usable_nodes;
7218 	for_each_node_state(nid, N_MEMORY) {
7219 		unsigned long start_pfn, end_pfn;
7220 
7221 		/*
7222 		 * Recalculate kernelcore_node if the division per node
7223 		 * now exceeds what is necessary to satisfy the requested
7224 		 * amount of memory for the kernel
7225 		 */
7226 		if (required_kernelcore < kernelcore_node)
7227 			kernelcore_node = required_kernelcore / usable_nodes;
7228 
7229 		/*
7230 		 * As the map is walked, we track how much memory is usable
7231 		 * by the kernel using kernelcore_remaining. When it is
7232 		 * 0, the rest of the node is usable by ZONE_MOVABLE
7233 		 */
7234 		kernelcore_remaining = kernelcore_node;
7235 
7236 		/* Go through each range of PFNs within this node */
7237 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7238 			unsigned long size_pages;
7239 
7240 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
7241 			if (start_pfn >= end_pfn)
7242 				continue;
7243 
7244 			/* Account for what is only usable for kernelcore */
7245 			if (start_pfn < usable_startpfn) {
7246 				unsigned long kernel_pages;
7247 				kernel_pages = min(end_pfn, usable_startpfn)
7248 								- start_pfn;
7249 
7250 				kernelcore_remaining -= min(kernel_pages,
7251 							kernelcore_remaining);
7252 				required_kernelcore -= min(kernel_pages,
7253 							required_kernelcore);
7254 
7255 				/* Continue if range is now fully accounted */
7256 				if (end_pfn <= usable_startpfn) {
7257 
7258 					/*
7259 					 * Push zone_movable_pfn to the end so
7260 					 * that if we have to rebalance
7261 					 * kernelcore across nodes, we will
7262 					 * not double account here
7263 					 */
7264 					zone_movable_pfn[nid] = end_pfn;
7265 					continue;
7266 				}
7267 				start_pfn = usable_startpfn;
7268 			}
7269 
7270 			/*
7271 			 * The usable PFN range for ZONE_MOVABLE is from
7272 			 * start_pfn->end_pfn. Calculate size_pages as the
7273 			 * number of pages used as kernelcore
7274 			 */
7275 			size_pages = end_pfn - start_pfn;
7276 			if (size_pages > kernelcore_remaining)
7277 				size_pages = kernelcore_remaining;
7278 			zone_movable_pfn[nid] = start_pfn + size_pages;
7279 
7280 			/*
7281 			 * Some kernelcore has been met, update counts and
7282 			 * break if the kernelcore for this node has been
7283 			 * satisfied
7284 			 */
7285 			required_kernelcore -= min(required_kernelcore,
7286 								size_pages);
7287 			kernelcore_remaining -= size_pages;
7288 			if (!kernelcore_remaining)
7289 				break;
7290 		}
7291 	}
7292 
7293 	/*
7294 	 * If there is still required_kernelcore, we do another pass with one
7295 	 * less node in the count. This will push zone_movable_pfn[nid] further
7296 	 * along on the nodes that still have memory until kernelcore is
7297 	 * satisfied
7298 	 */
7299 	usable_nodes--;
7300 	if (usable_nodes && required_kernelcore > usable_nodes)
7301 		goto restart;
7302 
7303 out2:
7304 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7305 	for (nid = 0; nid < MAX_NUMNODES; nid++)
7306 		zone_movable_pfn[nid] =
7307 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
7308 
7309 out:
7310 	/* restore the node_state */
7311 	node_states[N_MEMORY] = saved_node_state;
7312 }
7313 
7314 /* Any regular or high memory on that node ? */
7315 static void check_for_memory(pg_data_t *pgdat, int nid)
7316 {
7317 	enum zone_type zone_type;
7318 
7319 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
7320 		struct zone *zone = &pgdat->node_zones[zone_type];
7321 		if (populated_zone(zone)) {
7322 			if (IS_ENABLED(CONFIG_HIGHMEM))
7323 				node_set_state(nid, N_HIGH_MEMORY);
7324 			if (zone_type <= ZONE_NORMAL)
7325 				node_set_state(nid, N_NORMAL_MEMORY);
7326 			break;
7327 		}
7328 	}
7329 }
7330 
7331 /*
7332  * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
7333  * such cases we allow max_zone_pfn sorted in the descending order
7334  */
7335 bool __weak arch_has_descending_max_zone_pfns(void)
7336 {
7337 	return false;
7338 }
7339 
7340 /**
7341  * free_area_init - Initialise all pg_data_t and zone data
7342  * @max_zone_pfn: an array of max PFNs for each zone
7343  *
7344  * This will call free_area_init_node() for each active node in the system.
7345  * Using the page ranges provided by memblock_set_node(), the size of each
7346  * zone in each node and their holes is calculated. If the maximum PFN
7347  * between two adjacent zones match, it is assumed that the zone is empty.
7348  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7349  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7350  * starts where the previous one ended. For example, ZONE_DMA32 starts
7351  * at arch_max_dma_pfn.
7352  */
7353 void __init free_area_init(unsigned long *max_zone_pfn)
7354 {
7355 	unsigned long start_pfn, end_pfn;
7356 	int i, nid, zone;
7357 	bool descending;
7358 
7359 	/* Record where the zone boundaries are */
7360 	memset(arch_zone_lowest_possible_pfn, 0,
7361 				sizeof(arch_zone_lowest_possible_pfn));
7362 	memset(arch_zone_highest_possible_pfn, 0,
7363 				sizeof(arch_zone_highest_possible_pfn));
7364 
7365 	start_pfn = find_min_pfn_with_active_regions();
7366 	descending = arch_has_descending_max_zone_pfns();
7367 
7368 	for (i = 0; i < MAX_NR_ZONES; i++) {
7369 		if (descending)
7370 			zone = MAX_NR_ZONES - i - 1;
7371 		else
7372 			zone = i;
7373 
7374 		if (zone == ZONE_MOVABLE)
7375 			continue;
7376 
7377 		end_pfn = max(max_zone_pfn[zone], start_pfn);
7378 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
7379 		arch_zone_highest_possible_pfn[zone] = end_pfn;
7380 
7381 		start_pfn = end_pfn;
7382 	}
7383 
7384 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
7385 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
7386 	find_zone_movable_pfns_for_nodes();
7387 
7388 	/* Print out the zone ranges */
7389 	pr_info("Zone ranges:\n");
7390 	for (i = 0; i < MAX_NR_ZONES; i++) {
7391 		if (i == ZONE_MOVABLE)
7392 			continue;
7393 		pr_info("  %-8s ", zone_names[i]);
7394 		if (arch_zone_lowest_possible_pfn[i] ==
7395 				arch_zone_highest_possible_pfn[i])
7396 			pr_cont("empty\n");
7397 		else
7398 			pr_cont("[mem %#018Lx-%#018Lx]\n",
7399 				(u64)arch_zone_lowest_possible_pfn[i]
7400 					<< PAGE_SHIFT,
7401 				((u64)arch_zone_highest_possible_pfn[i]
7402 					<< PAGE_SHIFT) - 1);
7403 	}
7404 
7405 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
7406 	pr_info("Movable zone start for each node\n");
7407 	for (i = 0; i < MAX_NUMNODES; i++) {
7408 		if (zone_movable_pfn[i])
7409 			pr_info("  Node %d: %#018Lx\n", i,
7410 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
7411 	}
7412 
7413 	/*
7414 	 * Print out the early node map, and initialize the
7415 	 * subsection-map relative to active online memory ranges to
7416 	 * enable future "sub-section" extensions of the memory map.
7417 	 */
7418 	pr_info("Early memory node ranges\n");
7419 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7420 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7421 			(u64)start_pfn << PAGE_SHIFT,
7422 			((u64)end_pfn << PAGE_SHIFT) - 1);
7423 		subsection_map_init(start_pfn, end_pfn - start_pfn);
7424 	}
7425 
7426 	/* Initialise every node */
7427 	mminit_verify_pageflags_layout();
7428 	setup_nr_node_ids();
7429 	init_unavailable_mem();
7430 	for_each_online_node(nid) {
7431 		pg_data_t *pgdat = NODE_DATA(nid);
7432 		free_area_init_node(nid);
7433 
7434 		/* Any memory on that node */
7435 		if (pgdat->node_present_pages)
7436 			node_set_state(nid, N_MEMORY);
7437 		check_for_memory(pgdat, nid);
7438 	}
7439 }
7440 
7441 static int __init cmdline_parse_core(char *p, unsigned long *core,
7442 				     unsigned long *percent)
7443 {
7444 	unsigned long long coremem;
7445 	char *endptr;
7446 
7447 	if (!p)
7448 		return -EINVAL;
7449 
7450 	/* Value may be a percentage of total memory, otherwise bytes */
7451 	coremem = simple_strtoull(p, &endptr, 0);
7452 	if (*endptr == '%') {
7453 		/* Paranoid check for percent values greater than 100 */
7454 		WARN_ON(coremem > 100);
7455 
7456 		*percent = coremem;
7457 	} else {
7458 		coremem = memparse(p, &p);
7459 		/* Paranoid check that UL is enough for the coremem value */
7460 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
7461 
7462 		*core = coremem >> PAGE_SHIFT;
7463 		*percent = 0UL;
7464 	}
7465 	return 0;
7466 }
7467 
7468 /*
7469  * kernelcore=size sets the amount of memory for use for allocations that
7470  * cannot be reclaimed or migrated.
7471  */
7472 static int __init cmdline_parse_kernelcore(char *p)
7473 {
7474 	/* parse kernelcore=mirror */
7475 	if (parse_option_str(p, "mirror")) {
7476 		mirrored_kernelcore = true;
7477 		return 0;
7478 	}
7479 
7480 	return cmdline_parse_core(p, &required_kernelcore,
7481 				  &required_kernelcore_percent);
7482 }
7483 
7484 /*
7485  * movablecore=size sets the amount of memory for use for allocations that
7486  * can be reclaimed or migrated.
7487  */
7488 static int __init cmdline_parse_movablecore(char *p)
7489 {
7490 	return cmdline_parse_core(p, &required_movablecore,
7491 				  &required_movablecore_percent);
7492 }
7493 
7494 early_param("kernelcore", cmdline_parse_kernelcore);
7495 early_param("movablecore", cmdline_parse_movablecore);
7496 
7497 void adjust_managed_page_count(struct page *page, long count)
7498 {
7499 	atomic_long_add(count, &page_zone(page)->managed_pages);
7500 	totalram_pages_add(count);
7501 #ifdef CONFIG_HIGHMEM
7502 	if (PageHighMem(page))
7503 		totalhigh_pages_add(count);
7504 #endif
7505 }
7506 EXPORT_SYMBOL(adjust_managed_page_count);
7507 
7508 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
7509 {
7510 	void *pos;
7511 	unsigned long pages = 0;
7512 
7513 	start = (void *)PAGE_ALIGN((unsigned long)start);
7514 	end = (void *)((unsigned long)end & PAGE_MASK);
7515 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
7516 		struct page *page = virt_to_page(pos);
7517 		void *direct_map_addr;
7518 
7519 		/*
7520 		 * 'direct_map_addr' might be different from 'pos'
7521 		 * because some architectures' virt_to_page()
7522 		 * work with aliases.  Getting the direct map
7523 		 * address ensures that we get a _writeable_
7524 		 * alias for the memset().
7525 		 */
7526 		direct_map_addr = page_address(page);
7527 		if ((unsigned int)poison <= 0xFF)
7528 			memset(direct_map_addr, poison, PAGE_SIZE);
7529 
7530 		free_reserved_page(page);
7531 	}
7532 
7533 	if (pages && s)
7534 		pr_info("Freeing %s memory: %ldK\n",
7535 			s, pages << (PAGE_SHIFT - 10));
7536 
7537 	return pages;
7538 }
7539 
7540 #ifdef	CONFIG_HIGHMEM
7541 void free_highmem_page(struct page *page)
7542 {
7543 	__free_reserved_page(page);
7544 	totalram_pages_inc();
7545 	atomic_long_inc(&page_zone(page)->managed_pages);
7546 	totalhigh_pages_inc();
7547 }
7548 #endif
7549 
7550 
7551 void __init mem_init_print_info(const char *str)
7552 {
7553 	unsigned long physpages, codesize, datasize, rosize, bss_size;
7554 	unsigned long init_code_size, init_data_size;
7555 
7556 	physpages = get_num_physpages();
7557 	codesize = _etext - _stext;
7558 	datasize = _edata - _sdata;
7559 	rosize = __end_rodata - __start_rodata;
7560 	bss_size = __bss_stop - __bss_start;
7561 	init_data_size = __init_end - __init_begin;
7562 	init_code_size = _einittext - _sinittext;
7563 
7564 	/*
7565 	 * Detect special cases and adjust section sizes accordingly:
7566 	 * 1) .init.* may be embedded into .data sections
7567 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
7568 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
7569 	 * 3) .rodata.* may be embedded into .text or .data sections.
7570 	 */
7571 #define adj_init_size(start, end, size, pos, adj) \
7572 	do { \
7573 		if (start <= pos && pos < end && size > adj) \
7574 			size -= adj; \
7575 	} while (0)
7576 
7577 	adj_init_size(__init_begin, __init_end, init_data_size,
7578 		     _sinittext, init_code_size);
7579 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
7580 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
7581 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
7582 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
7583 
7584 #undef	adj_init_size
7585 
7586 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7587 #ifdef	CONFIG_HIGHMEM
7588 		", %luK highmem"
7589 #endif
7590 		"%s%s)\n",
7591 		nr_free_pages() << (PAGE_SHIFT - 10),
7592 		physpages << (PAGE_SHIFT - 10),
7593 		codesize >> 10, datasize >> 10, rosize >> 10,
7594 		(init_data_size + init_code_size) >> 10, bss_size >> 10,
7595 		(physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
7596 		totalcma_pages << (PAGE_SHIFT - 10),
7597 #ifdef	CONFIG_HIGHMEM
7598 		totalhigh_pages() << (PAGE_SHIFT - 10),
7599 #endif
7600 		str ? ", " : "", str ? str : "");
7601 }
7602 
7603 /**
7604  * set_dma_reserve - set the specified number of pages reserved in the first zone
7605  * @new_dma_reserve: The number of pages to mark reserved
7606  *
7607  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7608  * In the DMA zone, a significant percentage may be consumed by kernel image
7609  * and other unfreeable allocations which can skew the watermarks badly. This
7610  * function may optionally be used to account for unfreeable pages in the
7611  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7612  * smaller per-cpu batchsize.
7613  */
7614 void __init set_dma_reserve(unsigned long new_dma_reserve)
7615 {
7616 	dma_reserve = new_dma_reserve;
7617 }
7618 
7619 static int page_alloc_cpu_dead(unsigned int cpu)
7620 {
7621 
7622 	lru_add_drain_cpu(cpu);
7623 	drain_pages(cpu);
7624 
7625 	/*
7626 	 * Spill the event counters of the dead processor
7627 	 * into the current processors event counters.
7628 	 * This artificially elevates the count of the current
7629 	 * processor.
7630 	 */
7631 	vm_events_fold_cpu(cpu);
7632 
7633 	/*
7634 	 * Zero the differential counters of the dead processor
7635 	 * so that the vm statistics are consistent.
7636 	 *
7637 	 * This is only okay since the processor is dead and cannot
7638 	 * race with what we are doing.
7639 	 */
7640 	cpu_vm_stats_fold(cpu);
7641 	return 0;
7642 }
7643 
7644 #ifdef CONFIG_NUMA
7645 int hashdist = HASHDIST_DEFAULT;
7646 
7647 static int __init set_hashdist(char *str)
7648 {
7649 	if (!str)
7650 		return 0;
7651 	hashdist = simple_strtoul(str, &str, 0);
7652 	return 1;
7653 }
7654 __setup("hashdist=", set_hashdist);
7655 #endif
7656 
7657 void __init page_alloc_init(void)
7658 {
7659 	int ret;
7660 
7661 #ifdef CONFIG_NUMA
7662 	if (num_node_state(N_MEMORY) == 1)
7663 		hashdist = 0;
7664 #endif
7665 
7666 	ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
7667 					"mm/page_alloc:dead", NULL,
7668 					page_alloc_cpu_dead);
7669 	WARN_ON(ret < 0);
7670 }
7671 
7672 /*
7673  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
7674  *	or min_free_kbytes changes.
7675  */
7676 static void calculate_totalreserve_pages(void)
7677 {
7678 	struct pglist_data *pgdat;
7679 	unsigned long reserve_pages = 0;
7680 	enum zone_type i, j;
7681 
7682 	for_each_online_pgdat(pgdat) {
7683 
7684 		pgdat->totalreserve_pages = 0;
7685 
7686 		for (i = 0; i < MAX_NR_ZONES; i++) {
7687 			struct zone *zone = pgdat->node_zones + i;
7688 			long max = 0;
7689 			unsigned long managed_pages = zone_managed_pages(zone);
7690 
7691 			/* Find valid and maximum lowmem_reserve in the zone */
7692 			for (j = i; j < MAX_NR_ZONES; j++) {
7693 				if (zone->lowmem_reserve[j] > max)
7694 					max = zone->lowmem_reserve[j];
7695 			}
7696 
7697 			/* we treat the high watermark as reserved pages. */
7698 			max += high_wmark_pages(zone);
7699 
7700 			if (max > managed_pages)
7701 				max = managed_pages;
7702 
7703 			pgdat->totalreserve_pages += max;
7704 
7705 			reserve_pages += max;
7706 		}
7707 	}
7708 	totalreserve_pages = reserve_pages;
7709 }
7710 
7711 /*
7712  * setup_per_zone_lowmem_reserve - called whenever
7713  *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
7714  *	has a correct pages reserved value, so an adequate number of
7715  *	pages are left in the zone after a successful __alloc_pages().
7716  */
7717 static void setup_per_zone_lowmem_reserve(void)
7718 {
7719 	struct pglist_data *pgdat;
7720 	enum zone_type j, idx;
7721 
7722 	for_each_online_pgdat(pgdat) {
7723 		for (j = 0; j < MAX_NR_ZONES; j++) {
7724 			struct zone *zone = pgdat->node_zones + j;
7725 			unsigned long managed_pages = zone_managed_pages(zone);
7726 
7727 			zone->lowmem_reserve[j] = 0;
7728 
7729 			idx = j;
7730 			while (idx) {
7731 				struct zone *lower_zone;
7732 
7733 				idx--;
7734 				lower_zone = pgdat->node_zones + idx;
7735 
7736 				if (!sysctl_lowmem_reserve_ratio[idx] ||
7737 				    !zone_managed_pages(lower_zone)) {
7738 					lower_zone->lowmem_reserve[j] = 0;
7739 					continue;
7740 				} else {
7741 					lower_zone->lowmem_reserve[j] =
7742 						managed_pages / sysctl_lowmem_reserve_ratio[idx];
7743 				}
7744 				managed_pages += zone_managed_pages(lower_zone);
7745 			}
7746 		}
7747 	}
7748 
7749 	/* update totalreserve_pages */
7750 	calculate_totalreserve_pages();
7751 }
7752 
7753 static void __setup_per_zone_wmarks(void)
7754 {
7755 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
7756 	unsigned long lowmem_pages = 0;
7757 	struct zone *zone;
7758 	unsigned long flags;
7759 
7760 	/* Calculate total number of !ZONE_HIGHMEM pages */
7761 	for_each_zone(zone) {
7762 		if (!is_highmem(zone))
7763 			lowmem_pages += zone_managed_pages(zone);
7764 	}
7765 
7766 	for_each_zone(zone) {
7767 		u64 tmp;
7768 
7769 		spin_lock_irqsave(&zone->lock, flags);
7770 		tmp = (u64)pages_min * zone_managed_pages(zone);
7771 		do_div(tmp, lowmem_pages);
7772 		if (is_highmem(zone)) {
7773 			/*
7774 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
7775 			 * need highmem pages, so cap pages_min to a small
7776 			 * value here.
7777 			 *
7778 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
7779 			 * deltas control async page reclaim, and so should
7780 			 * not be capped for highmem.
7781 			 */
7782 			unsigned long min_pages;
7783 
7784 			min_pages = zone_managed_pages(zone) / 1024;
7785 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7786 			zone->_watermark[WMARK_MIN] = min_pages;
7787 		} else {
7788 			/*
7789 			 * If it's a lowmem zone, reserve a number of pages
7790 			 * proportionate to the zone's size.
7791 			 */
7792 			zone->_watermark[WMARK_MIN] = tmp;
7793 		}
7794 
7795 		/*
7796 		 * Set the kswapd watermarks distance according to the
7797 		 * scale factor in proportion to available memory, but
7798 		 * ensure a minimum size on small systems.
7799 		 */
7800 		tmp = max_t(u64, tmp >> 2,
7801 			    mult_frac(zone_managed_pages(zone),
7802 				      watermark_scale_factor, 10000));
7803 
7804 		zone->watermark_boost = 0;
7805 		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
7806 		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7807 
7808 		spin_unlock_irqrestore(&zone->lock, flags);
7809 	}
7810 
7811 	/* update totalreserve_pages */
7812 	calculate_totalreserve_pages();
7813 }
7814 
7815 /**
7816  * setup_per_zone_wmarks - called when min_free_kbytes changes
7817  * or when memory is hot-{added|removed}
7818  *
7819  * Ensures that the watermark[min,low,high] values for each zone are set
7820  * correctly with respect to min_free_kbytes.
7821  */
7822 void setup_per_zone_wmarks(void)
7823 {
7824 	static DEFINE_SPINLOCK(lock);
7825 
7826 	spin_lock(&lock);
7827 	__setup_per_zone_wmarks();
7828 	spin_unlock(&lock);
7829 }
7830 
7831 /*
7832  * Initialise min_free_kbytes.
7833  *
7834  * For small machines we want it small (128k min).  For large machines
7835  * we want it large (64MB max).  But it is not linear, because network
7836  * bandwidth does not increase linearly with machine size.  We use
7837  *
7838  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
7839  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
7840  *
7841  * which yields
7842  *
7843  * 16MB:	512k
7844  * 32MB:	724k
7845  * 64MB:	1024k
7846  * 128MB:	1448k
7847  * 256MB:	2048k
7848  * 512MB:	2896k
7849  * 1024MB:	4096k
7850  * 2048MB:	5792k
7851  * 4096MB:	8192k
7852  * 8192MB:	11584k
7853  * 16384MB:	16384k
7854  */
7855 int __meminit init_per_zone_wmark_min(void)
7856 {
7857 	unsigned long lowmem_kbytes;
7858 	int new_min_free_kbytes;
7859 
7860 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
7861 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
7862 
7863 	if (new_min_free_kbytes > user_min_free_kbytes) {
7864 		min_free_kbytes = new_min_free_kbytes;
7865 		if (min_free_kbytes < 128)
7866 			min_free_kbytes = 128;
7867 		if (min_free_kbytes > 262144)
7868 			min_free_kbytes = 262144;
7869 	} else {
7870 		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7871 				new_min_free_kbytes, user_min_free_kbytes);
7872 	}
7873 	setup_per_zone_wmarks();
7874 	refresh_zone_stat_thresholds();
7875 	setup_per_zone_lowmem_reserve();
7876 
7877 #ifdef CONFIG_NUMA
7878 	setup_min_unmapped_ratio();
7879 	setup_min_slab_ratio();
7880 #endif
7881 
7882 	return 0;
7883 }
7884 core_initcall(init_per_zone_wmark_min)
7885 
7886 /*
7887  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
7888  *	that we can call two helper functions whenever min_free_kbytes
7889  *	changes.
7890  */
7891 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
7892 		void *buffer, size_t *length, loff_t *ppos)
7893 {
7894 	int rc;
7895 
7896 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7897 	if (rc)
7898 		return rc;
7899 
7900 	if (write) {
7901 		user_min_free_kbytes = min_free_kbytes;
7902 		setup_per_zone_wmarks();
7903 	}
7904 	return 0;
7905 }
7906 
7907 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
7908 		void *buffer, size_t *length, loff_t *ppos)
7909 {
7910 	int rc;
7911 
7912 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7913 	if (rc)
7914 		return rc;
7915 
7916 	if (write)
7917 		setup_per_zone_wmarks();
7918 
7919 	return 0;
7920 }
7921 
7922 #ifdef CONFIG_NUMA
7923 static void setup_min_unmapped_ratio(void)
7924 {
7925 	pg_data_t *pgdat;
7926 	struct zone *zone;
7927 
7928 	for_each_online_pgdat(pgdat)
7929 		pgdat->min_unmapped_pages = 0;
7930 
7931 	for_each_zone(zone)
7932 		zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
7933 						         sysctl_min_unmapped_ratio) / 100;
7934 }
7935 
7936 
7937 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
7938 		void *buffer, size_t *length, loff_t *ppos)
7939 {
7940 	int rc;
7941 
7942 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7943 	if (rc)
7944 		return rc;
7945 
7946 	setup_min_unmapped_ratio();
7947 
7948 	return 0;
7949 }
7950 
7951 static void setup_min_slab_ratio(void)
7952 {
7953 	pg_data_t *pgdat;
7954 	struct zone *zone;
7955 
7956 	for_each_online_pgdat(pgdat)
7957 		pgdat->min_slab_pages = 0;
7958 
7959 	for_each_zone(zone)
7960 		zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
7961 						     sysctl_min_slab_ratio) / 100;
7962 }
7963 
7964 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7965 		void *buffer, size_t *length, loff_t *ppos)
7966 {
7967 	int rc;
7968 
7969 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7970 	if (rc)
7971 		return rc;
7972 
7973 	setup_min_slab_ratio();
7974 
7975 	return 0;
7976 }
7977 #endif
7978 
7979 /*
7980  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
7981  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
7982  *	whenever sysctl_lowmem_reserve_ratio changes.
7983  *
7984  * The reserve ratio obviously has absolutely no relation with the
7985  * minimum watermarks. The lowmem reserve ratio can only make sense
7986  * if in function of the boot time zone sizes.
7987  */
7988 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
7989 		void *buffer, size_t *length, loff_t *ppos)
7990 {
7991 	int i;
7992 
7993 	proc_dointvec_minmax(table, write, buffer, length, ppos);
7994 
7995 	for (i = 0; i < MAX_NR_ZONES; i++) {
7996 		if (sysctl_lowmem_reserve_ratio[i] < 1)
7997 			sysctl_lowmem_reserve_ratio[i] = 0;
7998 	}
7999 
8000 	setup_per_zone_lowmem_reserve();
8001 	return 0;
8002 }
8003 
8004 static void __zone_pcp_update(struct zone *zone)
8005 {
8006 	unsigned int cpu;
8007 
8008 	for_each_possible_cpu(cpu)
8009 		pageset_set_high_and_batch(zone,
8010 				per_cpu_ptr(zone->pageset, cpu));
8011 }
8012 
8013 /*
8014  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8015  * cpu.  It is the fraction of total pages in each zone that a hot per cpu
8016  * pagelist can have before it gets flushed back to buddy allocator.
8017  */
8018 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
8019 		void *buffer, size_t *length, loff_t *ppos)
8020 {
8021 	struct zone *zone;
8022 	int old_percpu_pagelist_fraction;
8023 	int ret;
8024 
8025 	mutex_lock(&pcp_batch_high_lock);
8026 	old_percpu_pagelist_fraction = percpu_pagelist_fraction;
8027 
8028 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8029 	if (!write || ret < 0)
8030 		goto out;
8031 
8032 	/* Sanity checking to avoid pcp imbalance */
8033 	if (percpu_pagelist_fraction &&
8034 	    percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
8035 		percpu_pagelist_fraction = old_percpu_pagelist_fraction;
8036 		ret = -EINVAL;
8037 		goto out;
8038 	}
8039 
8040 	/* No change? */
8041 	if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
8042 		goto out;
8043 
8044 	for_each_populated_zone(zone)
8045 		__zone_pcp_update(zone);
8046 out:
8047 	mutex_unlock(&pcp_batch_high_lock);
8048 	return ret;
8049 }
8050 
8051 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8052 /*
8053  * Returns the number of pages that arch has reserved but
8054  * is not known to alloc_large_system_hash().
8055  */
8056 static unsigned long __init arch_reserved_kernel_pages(void)
8057 {
8058 	return 0;
8059 }
8060 #endif
8061 
8062 /*
8063  * Adaptive scale is meant to reduce sizes of hash tables on large memory
8064  * machines. As memory size is increased the scale is also increased but at
8065  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
8066  * quadruples the scale is increased by one, which means the size of hash table
8067  * only doubles, instead of quadrupling as well.
8068  * Because 32-bit systems cannot have large physical memory, where this scaling
8069  * makes sense, it is disabled on such platforms.
8070  */
8071 #if __BITS_PER_LONG > 32
8072 #define ADAPT_SCALE_BASE	(64ul << 30)
8073 #define ADAPT_SCALE_SHIFT	2
8074 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
8075 #endif
8076 
8077 /*
8078  * allocate a large system hash table from bootmem
8079  * - it is assumed that the hash table must contain an exact power-of-2
8080  *   quantity of entries
8081  * - limit is the number of hash buckets, not the total allocation size
8082  */
8083 void *__init alloc_large_system_hash(const char *tablename,
8084 				     unsigned long bucketsize,
8085 				     unsigned long numentries,
8086 				     int scale,
8087 				     int flags,
8088 				     unsigned int *_hash_shift,
8089 				     unsigned int *_hash_mask,
8090 				     unsigned long low_limit,
8091 				     unsigned long high_limit)
8092 {
8093 	unsigned long long max = high_limit;
8094 	unsigned long log2qty, size;
8095 	void *table = NULL;
8096 	gfp_t gfp_flags;
8097 	bool virt;
8098 
8099 	/* allow the kernel cmdline to have a say */
8100 	if (!numentries) {
8101 		/* round applicable memory size up to nearest megabyte */
8102 		numentries = nr_kernel_pages;
8103 		numentries -= arch_reserved_kernel_pages();
8104 
8105 		/* It isn't necessary when PAGE_SIZE >= 1MB */
8106 		if (PAGE_SHIFT < 20)
8107 			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
8108 
8109 #if __BITS_PER_LONG > 32
8110 		if (!high_limit) {
8111 			unsigned long adapt;
8112 
8113 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
8114 			     adapt <<= ADAPT_SCALE_SHIFT)
8115 				scale++;
8116 		}
8117 #endif
8118 
8119 		/* limit to 1 bucket per 2^scale bytes of low memory */
8120 		if (scale > PAGE_SHIFT)
8121 			numentries >>= (scale - PAGE_SHIFT);
8122 		else
8123 			numentries <<= (PAGE_SHIFT - scale);
8124 
8125 		/* Make sure we've got at least a 0-order allocation.. */
8126 		if (unlikely(flags & HASH_SMALL)) {
8127 			/* Makes no sense without HASH_EARLY */
8128 			WARN_ON(!(flags & HASH_EARLY));
8129 			if (!(numentries >> *_hash_shift)) {
8130 				numentries = 1UL << *_hash_shift;
8131 				BUG_ON(!numentries);
8132 			}
8133 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
8134 			numentries = PAGE_SIZE / bucketsize;
8135 	}
8136 	numentries = roundup_pow_of_two(numentries);
8137 
8138 	/* limit allocation size to 1/16 total memory by default */
8139 	if (max == 0) {
8140 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
8141 		do_div(max, bucketsize);
8142 	}
8143 	max = min(max, 0x80000000ULL);
8144 
8145 	if (numentries < low_limit)
8146 		numentries = low_limit;
8147 	if (numentries > max)
8148 		numentries = max;
8149 
8150 	log2qty = ilog2(numentries);
8151 
8152 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
8153 	do {
8154 		virt = false;
8155 		size = bucketsize << log2qty;
8156 		if (flags & HASH_EARLY) {
8157 			if (flags & HASH_ZERO)
8158 				table = memblock_alloc(size, SMP_CACHE_BYTES);
8159 			else
8160 				table = memblock_alloc_raw(size,
8161 							   SMP_CACHE_BYTES);
8162 		} else if (get_order(size) >= MAX_ORDER || hashdist) {
8163 			table = __vmalloc(size, gfp_flags);
8164 			virt = true;
8165 		} else {
8166 			/*
8167 			 * If bucketsize is not a power-of-two, we may free
8168 			 * some pages at the end of hash table which
8169 			 * alloc_pages_exact() automatically does
8170 			 */
8171 			table = alloc_pages_exact(size, gfp_flags);
8172 			kmemleak_alloc(table, size, 1, gfp_flags);
8173 		}
8174 	} while (!table && size > PAGE_SIZE && --log2qty);
8175 
8176 	if (!table)
8177 		panic("Failed to allocate %s hash table\n", tablename);
8178 
8179 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8180 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
8181 		virt ? "vmalloc" : "linear");
8182 
8183 	if (_hash_shift)
8184 		*_hash_shift = log2qty;
8185 	if (_hash_mask)
8186 		*_hash_mask = (1 << log2qty) - 1;
8187 
8188 	return table;
8189 }
8190 
8191 /*
8192  * This function checks whether pageblock includes unmovable pages or not.
8193  *
8194  * PageLRU check without isolation or lru_lock could race so that
8195  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8196  * check without lock_page also may miss some movable non-lru pages at
8197  * race condition. So you can't expect this function should be exact.
8198  *
8199  * Returns a page without holding a reference. If the caller wants to
8200  * dereference that page (e.g., dumping), it has to make sure that that it
8201  * cannot get removed (e.g., via memory unplug) concurrently.
8202  *
8203  */
8204 struct page *has_unmovable_pages(struct zone *zone, struct page *page,
8205 				 int migratetype, int flags)
8206 {
8207 	unsigned long iter = 0;
8208 	unsigned long pfn = page_to_pfn(page);
8209 
8210 	/*
8211 	 * TODO we could make this much more efficient by not checking every
8212 	 * page in the range if we know all of them are in MOVABLE_ZONE and
8213 	 * that the movable zone guarantees that pages are migratable but
8214 	 * the later is not the case right now unfortunatelly. E.g. movablecore
8215 	 * can still lead to having bootmem allocations in zone_movable.
8216 	 */
8217 
8218 	if (is_migrate_cma_page(page)) {
8219 		/*
8220 		 * CMA allocations (alloc_contig_range) really need to mark
8221 		 * isolate CMA pageblocks even when they are not movable in fact
8222 		 * so consider them movable here.
8223 		 */
8224 		if (is_migrate_cma(migratetype))
8225 			return NULL;
8226 
8227 		return page;
8228 	}
8229 
8230 	for (; iter < pageblock_nr_pages; iter++) {
8231 		if (!pfn_valid_within(pfn + iter))
8232 			continue;
8233 
8234 		page = pfn_to_page(pfn + iter);
8235 
8236 		if (PageReserved(page))
8237 			return page;
8238 
8239 		/*
8240 		 * If the zone is movable and we have ruled out all reserved
8241 		 * pages then it should be reasonably safe to assume the rest
8242 		 * is movable.
8243 		 */
8244 		if (zone_idx(zone) == ZONE_MOVABLE)
8245 			continue;
8246 
8247 		/*
8248 		 * Hugepages are not in LRU lists, but they're movable.
8249 		 * THPs are on the LRU, but need to be counted as #small pages.
8250 		 * We need not scan over tail pages because we don't
8251 		 * handle each tail page individually in migration.
8252 		 */
8253 		if (PageHuge(page) || PageTransCompound(page)) {
8254 			struct page *head = compound_head(page);
8255 			unsigned int skip_pages;
8256 
8257 			if (PageHuge(page)) {
8258 				if (!hugepage_migration_supported(page_hstate(head)))
8259 					return page;
8260 			} else if (!PageLRU(head) && !__PageMovable(head)) {
8261 				return page;
8262 			}
8263 
8264 			skip_pages = compound_nr(head) - (page - head);
8265 			iter += skip_pages - 1;
8266 			continue;
8267 		}
8268 
8269 		/*
8270 		 * We can't use page_count without pin a page
8271 		 * because another CPU can free compound page.
8272 		 * This check already skips compound tails of THP
8273 		 * because their page->_refcount is zero at all time.
8274 		 */
8275 		if (!page_ref_count(page)) {
8276 			if (PageBuddy(page))
8277 				iter += (1 << page_order(page)) - 1;
8278 			continue;
8279 		}
8280 
8281 		/*
8282 		 * The HWPoisoned page may be not in buddy system, and
8283 		 * page_count() is not 0.
8284 		 */
8285 		if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
8286 			continue;
8287 
8288 		/*
8289 		 * We treat all PageOffline() pages as movable when offlining
8290 		 * to give drivers a chance to decrement their reference count
8291 		 * in MEM_GOING_OFFLINE in order to indicate that these pages
8292 		 * can be offlined as there are no direct references anymore.
8293 		 * For actually unmovable PageOffline() where the driver does
8294 		 * not support this, we will fail later when trying to actually
8295 		 * move these pages that still have a reference count > 0.
8296 		 * (false negatives in this function only)
8297 		 */
8298 		if ((flags & MEMORY_OFFLINE) && PageOffline(page))
8299 			continue;
8300 
8301 		if (__PageMovable(page) || PageLRU(page))
8302 			continue;
8303 
8304 		/*
8305 		 * If there are RECLAIMABLE pages, we need to check
8306 		 * it.  But now, memory offline itself doesn't call
8307 		 * shrink_node_slabs() and it still to be fixed.
8308 		 */
8309 		/*
8310 		 * If the page is not RAM, page_count()should be 0.
8311 		 * we don't need more check. This is an _used_ not-movable page.
8312 		 *
8313 		 * The problematic thing here is PG_reserved pages. PG_reserved
8314 		 * is set to both of a memory hole page and a _used_ kernel
8315 		 * page at boot.
8316 		 */
8317 		return page;
8318 	}
8319 	return NULL;
8320 }
8321 
8322 #ifdef CONFIG_CONTIG_ALLOC
8323 static unsigned long pfn_max_align_down(unsigned long pfn)
8324 {
8325 	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8326 			     pageblock_nr_pages) - 1);
8327 }
8328 
8329 static unsigned long pfn_max_align_up(unsigned long pfn)
8330 {
8331 	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8332 				pageblock_nr_pages));
8333 }
8334 
8335 /* [start, end) must belong to a single zone. */
8336 static int __alloc_contig_migrate_range(struct compact_control *cc,
8337 					unsigned long start, unsigned long end)
8338 {
8339 	/* This function is based on compact_zone() from compaction.c. */
8340 	unsigned int nr_reclaimed;
8341 	unsigned long pfn = start;
8342 	unsigned int tries = 0;
8343 	int ret = 0;
8344 
8345 	migrate_prep();
8346 
8347 	while (pfn < end || !list_empty(&cc->migratepages)) {
8348 		if (fatal_signal_pending(current)) {
8349 			ret = -EINTR;
8350 			break;
8351 		}
8352 
8353 		if (list_empty(&cc->migratepages)) {
8354 			cc->nr_migratepages = 0;
8355 			pfn = isolate_migratepages_range(cc, pfn, end);
8356 			if (!pfn) {
8357 				ret = -EINTR;
8358 				break;
8359 			}
8360 			tries = 0;
8361 		} else if (++tries == 5) {
8362 			ret = ret < 0 ? ret : -EBUSY;
8363 			break;
8364 		}
8365 
8366 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8367 							&cc->migratepages);
8368 		cc->nr_migratepages -= nr_reclaimed;
8369 
8370 		ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
8371 				    NULL, 0, cc->mode, MR_CONTIG_RANGE);
8372 	}
8373 	if (ret < 0) {
8374 		putback_movable_pages(&cc->migratepages);
8375 		return ret;
8376 	}
8377 	return 0;
8378 }
8379 
8380 /**
8381  * alloc_contig_range() -- tries to allocate given range of pages
8382  * @start:	start PFN to allocate
8383  * @end:	one-past-the-last PFN to allocate
8384  * @migratetype:	migratetype of the underlaying pageblocks (either
8385  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
8386  *			in range must have the same migratetype and it must
8387  *			be either of the two.
8388  * @gfp_mask:	GFP mask to use during compaction
8389  *
8390  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
8391  * aligned.  The PFN range must belong to a single zone.
8392  *
8393  * The first thing this routine does is attempt to MIGRATE_ISOLATE all
8394  * pageblocks in the range.  Once isolated, the pageblocks should not
8395  * be modified by others.
8396  *
8397  * Return: zero on success or negative error code.  On success all
8398  * pages which PFN is in [start, end) are allocated for the caller and
8399  * need to be freed with free_contig_range().
8400  */
8401 int alloc_contig_range(unsigned long start, unsigned long end,
8402 		       unsigned migratetype, gfp_t gfp_mask)
8403 {
8404 	unsigned long outer_start, outer_end;
8405 	unsigned int order;
8406 	int ret = 0;
8407 
8408 	struct compact_control cc = {
8409 		.nr_migratepages = 0,
8410 		.order = -1,
8411 		.zone = page_zone(pfn_to_page(start)),
8412 		.mode = MIGRATE_SYNC,
8413 		.ignore_skip_hint = true,
8414 		.no_set_skip_hint = true,
8415 		.gfp_mask = current_gfp_context(gfp_mask),
8416 		.alloc_contig = true,
8417 	};
8418 	INIT_LIST_HEAD(&cc.migratepages);
8419 
8420 	/*
8421 	 * What we do here is we mark all pageblocks in range as
8422 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
8423 	 * have different sizes, and due to the way page allocator
8424 	 * work, we align the range to biggest of the two pages so
8425 	 * that page allocator won't try to merge buddies from
8426 	 * different pageblocks and change MIGRATE_ISOLATE to some
8427 	 * other migration type.
8428 	 *
8429 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
8430 	 * migrate the pages from an unaligned range (ie. pages that
8431 	 * we are interested in).  This will put all the pages in
8432 	 * range back to page allocator as MIGRATE_ISOLATE.
8433 	 *
8434 	 * When this is done, we take the pages in range from page
8435 	 * allocator removing them from the buddy system.  This way
8436 	 * page allocator will never consider using them.
8437 	 *
8438 	 * This lets us mark the pageblocks back as
8439 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
8440 	 * aligned range but not in the unaligned, original range are
8441 	 * put back to page allocator so that buddy can use them.
8442 	 */
8443 
8444 	ret = start_isolate_page_range(pfn_max_align_down(start),
8445 				       pfn_max_align_up(end), migratetype, 0);
8446 	if (ret < 0)
8447 		return ret;
8448 
8449 	/*
8450 	 * In case of -EBUSY, we'd like to know which page causes problem.
8451 	 * So, just fall through. test_pages_isolated() has a tracepoint
8452 	 * which will report the busy page.
8453 	 *
8454 	 * It is possible that busy pages could become available before
8455 	 * the call to test_pages_isolated, and the range will actually be
8456 	 * allocated.  So, if we fall through be sure to clear ret so that
8457 	 * -EBUSY is not accidentally used or returned to caller.
8458 	 */
8459 	ret = __alloc_contig_migrate_range(&cc, start, end);
8460 	if (ret && ret != -EBUSY)
8461 		goto done;
8462 	ret =0;
8463 
8464 	/*
8465 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
8466 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
8467 	 * more, all pages in [start, end) are free in page allocator.
8468 	 * What we are going to do is to allocate all pages from
8469 	 * [start, end) (that is remove them from page allocator).
8470 	 *
8471 	 * The only problem is that pages at the beginning and at the
8472 	 * end of interesting range may be not aligned with pages that
8473 	 * page allocator holds, ie. they can be part of higher order
8474 	 * pages.  Because of this, we reserve the bigger range and
8475 	 * once this is done free the pages we are not interested in.
8476 	 *
8477 	 * We don't have to hold zone->lock here because the pages are
8478 	 * isolated thus they won't get removed from buddy.
8479 	 */
8480 
8481 	lru_add_drain_all();
8482 
8483 	order = 0;
8484 	outer_start = start;
8485 	while (!PageBuddy(pfn_to_page(outer_start))) {
8486 		if (++order >= MAX_ORDER) {
8487 			outer_start = start;
8488 			break;
8489 		}
8490 		outer_start &= ~0UL << order;
8491 	}
8492 
8493 	if (outer_start != start) {
8494 		order = page_order(pfn_to_page(outer_start));
8495 
8496 		/*
8497 		 * outer_start page could be small order buddy page and
8498 		 * it doesn't include start page. Adjust outer_start
8499 		 * in this case to report failed page properly
8500 		 * on tracepoint in test_pages_isolated()
8501 		 */
8502 		if (outer_start + (1UL << order) <= start)
8503 			outer_start = start;
8504 	}
8505 
8506 	/* Make sure the range is really isolated. */
8507 	if (test_pages_isolated(outer_start, end, 0)) {
8508 		pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
8509 			__func__, outer_start, end);
8510 		ret = -EBUSY;
8511 		goto done;
8512 	}
8513 
8514 	/* Grab isolated pages from freelists. */
8515 	outer_end = isolate_freepages_range(&cc, outer_start, end);
8516 	if (!outer_end) {
8517 		ret = -EBUSY;
8518 		goto done;
8519 	}
8520 
8521 	/* Free head and tail (if any) */
8522 	if (start != outer_start)
8523 		free_contig_range(outer_start, start - outer_start);
8524 	if (end != outer_end)
8525 		free_contig_range(end, outer_end - end);
8526 
8527 done:
8528 	undo_isolate_page_range(pfn_max_align_down(start),
8529 				pfn_max_align_up(end), migratetype);
8530 	return ret;
8531 }
8532 EXPORT_SYMBOL(alloc_contig_range);
8533 
8534 static int __alloc_contig_pages(unsigned long start_pfn,
8535 				unsigned long nr_pages, gfp_t gfp_mask)
8536 {
8537 	unsigned long end_pfn = start_pfn + nr_pages;
8538 
8539 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
8540 				  gfp_mask);
8541 }
8542 
8543 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
8544 				   unsigned long nr_pages)
8545 {
8546 	unsigned long i, end_pfn = start_pfn + nr_pages;
8547 	struct page *page;
8548 
8549 	for (i = start_pfn; i < end_pfn; i++) {
8550 		page = pfn_to_online_page(i);
8551 		if (!page)
8552 			return false;
8553 
8554 		if (page_zone(page) != z)
8555 			return false;
8556 
8557 		if (PageReserved(page))
8558 			return false;
8559 
8560 		if (page_count(page) > 0)
8561 			return false;
8562 
8563 		if (PageHuge(page))
8564 			return false;
8565 	}
8566 	return true;
8567 }
8568 
8569 static bool zone_spans_last_pfn(const struct zone *zone,
8570 				unsigned long start_pfn, unsigned long nr_pages)
8571 {
8572 	unsigned long last_pfn = start_pfn + nr_pages - 1;
8573 
8574 	return zone_spans_pfn(zone, last_pfn);
8575 }
8576 
8577 /**
8578  * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
8579  * @nr_pages:	Number of contiguous pages to allocate
8580  * @gfp_mask:	GFP mask to limit search and used during compaction
8581  * @nid:	Target node
8582  * @nodemask:	Mask for other possible nodes
8583  *
8584  * This routine is a wrapper around alloc_contig_range(). It scans over zones
8585  * on an applicable zonelist to find a contiguous pfn range which can then be
8586  * tried for allocation with alloc_contig_range(). This routine is intended
8587  * for allocation requests which can not be fulfilled with the buddy allocator.
8588  *
8589  * The allocated memory is always aligned to a page boundary. If nr_pages is a
8590  * power of two then the alignment is guaranteed to be to the given nr_pages
8591  * (e.g. 1GB request would be aligned to 1GB).
8592  *
8593  * Allocated pages can be freed with free_contig_range() or by manually calling
8594  * __free_page() on each allocated page.
8595  *
8596  * Return: pointer to contiguous pages on success, or NULL if not successful.
8597  */
8598 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
8599 				int nid, nodemask_t *nodemask)
8600 {
8601 	unsigned long ret, pfn, flags;
8602 	struct zonelist *zonelist;
8603 	struct zone *zone;
8604 	struct zoneref *z;
8605 
8606 	zonelist = node_zonelist(nid, gfp_mask);
8607 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
8608 					gfp_zone(gfp_mask), nodemask) {
8609 		spin_lock_irqsave(&zone->lock, flags);
8610 
8611 		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
8612 		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
8613 			if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
8614 				/*
8615 				 * We release the zone lock here because
8616 				 * alloc_contig_range() will also lock the zone
8617 				 * at some point. If there's an allocation
8618 				 * spinning on this lock, it may win the race
8619 				 * and cause alloc_contig_range() to fail...
8620 				 */
8621 				spin_unlock_irqrestore(&zone->lock, flags);
8622 				ret = __alloc_contig_pages(pfn, nr_pages,
8623 							gfp_mask);
8624 				if (!ret)
8625 					return pfn_to_page(pfn);
8626 				spin_lock_irqsave(&zone->lock, flags);
8627 			}
8628 			pfn += nr_pages;
8629 		}
8630 		spin_unlock_irqrestore(&zone->lock, flags);
8631 	}
8632 	return NULL;
8633 }
8634 #endif /* CONFIG_CONTIG_ALLOC */
8635 
8636 void free_contig_range(unsigned long pfn, unsigned int nr_pages)
8637 {
8638 	unsigned int count = 0;
8639 
8640 	for (; nr_pages--; pfn++) {
8641 		struct page *page = pfn_to_page(pfn);
8642 
8643 		count += page_count(page) != 1;
8644 		__free_page(page);
8645 	}
8646 	WARN(count != 0, "%d pages are still in use!\n", count);
8647 }
8648 EXPORT_SYMBOL(free_contig_range);
8649 
8650 /*
8651  * The zone indicated has a new number of managed_pages; batch sizes and percpu
8652  * page high values need to be recalulated.
8653  */
8654 void __meminit zone_pcp_update(struct zone *zone)
8655 {
8656 	mutex_lock(&pcp_batch_high_lock);
8657 	__zone_pcp_update(zone);
8658 	mutex_unlock(&pcp_batch_high_lock);
8659 }
8660 
8661 void zone_pcp_reset(struct zone *zone)
8662 {
8663 	unsigned long flags;
8664 	int cpu;
8665 	struct per_cpu_pageset *pset;
8666 
8667 	/* avoid races with drain_pages()  */
8668 	local_irq_save(flags);
8669 	if (zone->pageset != &boot_pageset) {
8670 		for_each_online_cpu(cpu) {
8671 			pset = per_cpu_ptr(zone->pageset, cpu);
8672 			drain_zonestat(zone, pset);
8673 		}
8674 		free_percpu(zone->pageset);
8675 		zone->pageset = &boot_pageset;
8676 	}
8677 	local_irq_restore(flags);
8678 }
8679 
8680 #ifdef CONFIG_MEMORY_HOTREMOVE
8681 /*
8682  * All pages in the range must be in a single zone and isolated
8683  * before calling this.
8684  */
8685 unsigned long
8686 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
8687 {
8688 	struct page *page;
8689 	struct zone *zone;
8690 	unsigned int order;
8691 	unsigned long pfn;
8692 	unsigned long flags;
8693 	unsigned long offlined_pages = 0;
8694 
8695 	/* find the first valid pfn */
8696 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
8697 		if (pfn_valid(pfn))
8698 			break;
8699 	if (pfn == end_pfn)
8700 		return offlined_pages;
8701 
8702 	offline_mem_sections(pfn, end_pfn);
8703 	zone = page_zone(pfn_to_page(pfn));
8704 	spin_lock_irqsave(&zone->lock, flags);
8705 	pfn = start_pfn;
8706 	while (pfn < end_pfn) {
8707 		if (!pfn_valid(pfn)) {
8708 			pfn++;
8709 			continue;
8710 		}
8711 		page = pfn_to_page(pfn);
8712 		/*
8713 		 * The HWPoisoned page may be not in buddy system, and
8714 		 * page_count() is not 0.
8715 		 */
8716 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
8717 			pfn++;
8718 			offlined_pages++;
8719 			continue;
8720 		}
8721 		/*
8722 		 * At this point all remaining PageOffline() pages have a
8723 		 * reference count of 0 and can simply be skipped.
8724 		 */
8725 		if (PageOffline(page)) {
8726 			BUG_ON(page_count(page));
8727 			BUG_ON(PageBuddy(page));
8728 			pfn++;
8729 			offlined_pages++;
8730 			continue;
8731 		}
8732 
8733 		BUG_ON(page_count(page));
8734 		BUG_ON(!PageBuddy(page));
8735 		order = page_order(page);
8736 		offlined_pages += 1 << order;
8737 		del_page_from_free_list(page, zone, order);
8738 		pfn += (1 << order);
8739 	}
8740 	spin_unlock_irqrestore(&zone->lock, flags);
8741 
8742 	return offlined_pages;
8743 }
8744 #endif
8745 
8746 bool is_free_buddy_page(struct page *page)
8747 {
8748 	struct zone *zone = page_zone(page);
8749 	unsigned long pfn = page_to_pfn(page);
8750 	unsigned long flags;
8751 	unsigned int order;
8752 
8753 	spin_lock_irqsave(&zone->lock, flags);
8754 	for (order = 0; order < MAX_ORDER; order++) {
8755 		struct page *page_head = page - (pfn & ((1 << order) - 1));
8756 
8757 		if (PageBuddy(page_head) && page_order(page_head) >= order)
8758 			break;
8759 	}
8760 	spin_unlock_irqrestore(&zone->lock, flags);
8761 
8762 	return order < MAX_ORDER;
8763 }
8764 
8765 #ifdef CONFIG_MEMORY_FAILURE
8766 /*
8767  * Set PG_hwpoison flag if a given page is confirmed to be a free page.  This
8768  * test is performed under the zone lock to prevent a race against page
8769  * allocation.
8770  */
8771 bool set_hwpoison_free_buddy_page(struct page *page)
8772 {
8773 	struct zone *zone = page_zone(page);
8774 	unsigned long pfn = page_to_pfn(page);
8775 	unsigned long flags;
8776 	unsigned int order;
8777 	bool hwpoisoned = false;
8778 
8779 	spin_lock_irqsave(&zone->lock, flags);
8780 	for (order = 0; order < MAX_ORDER; order++) {
8781 		struct page *page_head = page - (pfn & ((1 << order) - 1));
8782 
8783 		if (PageBuddy(page_head) && page_order(page_head) >= order) {
8784 			if (!TestSetPageHWPoison(page))
8785 				hwpoisoned = true;
8786 			break;
8787 		}
8788 	}
8789 	spin_unlock_irqrestore(&zone->lock, flags);
8790 
8791 	return hwpoisoned;
8792 }
8793 #endif
8794