xref: /openbmc/linux/mm/page_alloc.c (revision 8938c48f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/page_alloc.c
4  *
5  *  Manages the free list, the system allocates free pages here.
6  *  Note that kmalloc() lives in slab.c
7  *
8  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
9  *  Swap reorganised 29.12.95, Stephen Tweedie
10  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16  */
17 
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/swap.h>
22 #include <linux/interrupt.h>
23 #include <linux/pagemap.h>
24 #include <linux/jiffies.h>
25 #include <linux/memblock.h>
26 #include <linux/compiler.h>
27 #include <linux/kernel.h>
28 #include <linux/kasan.h>
29 #include <linux/module.h>
30 #include <linux/suspend.h>
31 #include <linux/pagevec.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/ratelimit.h>
35 #include <linux/oom.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/memremap.h>
46 #include <linux/stop_machine.h>
47 #include <linux/random.h>
48 #include <linux/sort.h>
49 #include <linux/pfn.h>
50 #include <linux/backing-dev.h>
51 #include <linux/fault-inject.h>
52 #include <linux/page-isolation.h>
53 #include <linux/debugobjects.h>
54 #include <linux/kmemleak.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <trace/events/oom.h>
58 #include <linux/prefetch.h>
59 #include <linux/mm_inline.h>
60 #include <linux/mmu_notifier.h>
61 #include <linux/migrate.h>
62 #include <linux/hugetlb.h>
63 #include <linux/sched/rt.h>
64 #include <linux/sched/mm.h>
65 #include <linux/page_owner.h>
66 #include <linux/kthread.h>
67 #include <linux/memcontrol.h>
68 #include <linux/ftrace.h>
69 #include <linux/lockdep.h>
70 #include <linux/nmi.h>
71 #include <linux/psi.h>
72 #include <linux/padata.h>
73 #include <linux/khugepaged.h>
74 #include <linux/buffer_head.h>
75 
76 #include <asm/sections.h>
77 #include <asm/tlbflush.h>
78 #include <asm/div64.h>
79 #include "internal.h"
80 #include "shuffle.h"
81 #include "page_reporting.h"
82 
83 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
84 typedef int __bitwise fpi_t;
85 
86 /* No special request */
87 #define FPI_NONE		((__force fpi_t)0)
88 
89 /*
90  * Skip free page reporting notification for the (possibly merged) page.
91  * This does not hinder free page reporting from grabbing the page,
92  * reporting it and marking it "reported" -  it only skips notifying
93  * the free page reporting infrastructure about a newly freed page. For
94  * example, used when temporarily pulling a page from a freelist and
95  * putting it back unmodified.
96  */
97 #define FPI_SKIP_REPORT_NOTIFY	((__force fpi_t)BIT(0))
98 
99 /*
100  * Place the (possibly merged) page to the tail of the freelist. Will ignore
101  * page shuffling (relevant code - e.g., memory onlining - is expected to
102  * shuffle the whole zone).
103  *
104  * Note: No code should rely on this flag for correctness - it's purely
105  *       to allow for optimizations when handing back either fresh pages
106  *       (memory onlining) or untouched pages (page isolation, free page
107  *       reporting).
108  */
109 #define FPI_TO_TAIL		((__force fpi_t)BIT(1))
110 
111 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
112 static DEFINE_MUTEX(pcp_batch_high_lock);
113 #define MIN_PERCPU_PAGELIST_FRACTION	(8)
114 
115 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
116 DEFINE_PER_CPU(int, numa_node);
117 EXPORT_PER_CPU_SYMBOL(numa_node);
118 #endif
119 
120 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
121 
122 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
123 /*
124  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
125  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
126  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
127  * defined in <linux/topology.h>.
128  */
129 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
130 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
131 #endif
132 
133 /* work_structs for global per-cpu drains */
134 struct pcpu_drain {
135 	struct zone *zone;
136 	struct work_struct work;
137 };
138 static DEFINE_MUTEX(pcpu_drain_mutex);
139 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
140 
141 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
142 volatile unsigned long latent_entropy __latent_entropy;
143 EXPORT_SYMBOL(latent_entropy);
144 #endif
145 
146 /*
147  * Array of node states.
148  */
149 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
150 	[N_POSSIBLE] = NODE_MASK_ALL,
151 	[N_ONLINE] = { { [0] = 1UL } },
152 #ifndef CONFIG_NUMA
153 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
154 #ifdef CONFIG_HIGHMEM
155 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
156 #endif
157 	[N_MEMORY] = { { [0] = 1UL } },
158 	[N_CPU] = { { [0] = 1UL } },
159 #endif	/* NUMA */
160 };
161 EXPORT_SYMBOL(node_states);
162 
163 atomic_long_t _totalram_pages __read_mostly;
164 EXPORT_SYMBOL(_totalram_pages);
165 unsigned long totalreserve_pages __read_mostly;
166 unsigned long totalcma_pages __read_mostly;
167 
168 int percpu_pagelist_fraction;
169 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
170 DEFINE_STATIC_KEY_FALSE(init_on_alloc);
171 EXPORT_SYMBOL(init_on_alloc);
172 
173 DEFINE_STATIC_KEY_FALSE(init_on_free);
174 EXPORT_SYMBOL(init_on_free);
175 
176 static bool _init_on_alloc_enabled_early __read_mostly
177 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
178 static int __init early_init_on_alloc(char *buf)
179 {
180 
181 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
182 }
183 early_param("init_on_alloc", early_init_on_alloc);
184 
185 static bool _init_on_free_enabled_early __read_mostly
186 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
187 static int __init early_init_on_free(char *buf)
188 {
189 	return kstrtobool(buf, &_init_on_free_enabled_early);
190 }
191 early_param("init_on_free", early_init_on_free);
192 
193 /*
194  * A cached value of the page's pageblock's migratetype, used when the page is
195  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
196  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
197  * Also the migratetype set in the page does not necessarily match the pcplist
198  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
199  * other index - this ensures that it will be put on the correct CMA freelist.
200  */
201 static inline int get_pcppage_migratetype(struct page *page)
202 {
203 	return page->index;
204 }
205 
206 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
207 {
208 	page->index = migratetype;
209 }
210 
211 #ifdef CONFIG_PM_SLEEP
212 /*
213  * The following functions are used by the suspend/hibernate code to temporarily
214  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
215  * while devices are suspended.  To avoid races with the suspend/hibernate code,
216  * they should always be called with system_transition_mutex held
217  * (gfp_allowed_mask also should only be modified with system_transition_mutex
218  * held, unless the suspend/hibernate code is guaranteed not to run in parallel
219  * with that modification).
220  */
221 
222 static gfp_t saved_gfp_mask;
223 
224 void pm_restore_gfp_mask(void)
225 {
226 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
227 	if (saved_gfp_mask) {
228 		gfp_allowed_mask = saved_gfp_mask;
229 		saved_gfp_mask = 0;
230 	}
231 }
232 
233 void pm_restrict_gfp_mask(void)
234 {
235 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
236 	WARN_ON(saved_gfp_mask);
237 	saved_gfp_mask = gfp_allowed_mask;
238 	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
239 }
240 
241 bool pm_suspended_storage(void)
242 {
243 	if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
244 		return false;
245 	return true;
246 }
247 #endif /* CONFIG_PM_SLEEP */
248 
249 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
250 unsigned int pageblock_order __read_mostly;
251 #endif
252 
253 static void __free_pages_ok(struct page *page, unsigned int order,
254 			    fpi_t fpi_flags);
255 
256 /*
257  * results with 256, 32 in the lowmem_reserve sysctl:
258  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
259  *	1G machine -> (16M dma, 784M normal, 224M high)
260  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
261  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
262  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
263  *
264  * TBD: should special case ZONE_DMA32 machines here - in those we normally
265  * don't need any ZONE_NORMAL reservation
266  */
267 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
268 #ifdef CONFIG_ZONE_DMA
269 	[ZONE_DMA] = 256,
270 #endif
271 #ifdef CONFIG_ZONE_DMA32
272 	[ZONE_DMA32] = 256,
273 #endif
274 	[ZONE_NORMAL] = 32,
275 #ifdef CONFIG_HIGHMEM
276 	[ZONE_HIGHMEM] = 0,
277 #endif
278 	[ZONE_MOVABLE] = 0,
279 };
280 
281 static char * const zone_names[MAX_NR_ZONES] = {
282 #ifdef CONFIG_ZONE_DMA
283 	 "DMA",
284 #endif
285 #ifdef CONFIG_ZONE_DMA32
286 	 "DMA32",
287 #endif
288 	 "Normal",
289 #ifdef CONFIG_HIGHMEM
290 	 "HighMem",
291 #endif
292 	 "Movable",
293 #ifdef CONFIG_ZONE_DEVICE
294 	 "Device",
295 #endif
296 };
297 
298 const char * const migratetype_names[MIGRATE_TYPES] = {
299 	"Unmovable",
300 	"Movable",
301 	"Reclaimable",
302 	"HighAtomic",
303 #ifdef CONFIG_CMA
304 	"CMA",
305 #endif
306 #ifdef CONFIG_MEMORY_ISOLATION
307 	"Isolate",
308 #endif
309 };
310 
311 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
312 	[NULL_COMPOUND_DTOR] = NULL,
313 	[COMPOUND_PAGE_DTOR] = free_compound_page,
314 #ifdef CONFIG_HUGETLB_PAGE
315 	[HUGETLB_PAGE_DTOR] = free_huge_page,
316 #endif
317 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
318 	[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
319 #endif
320 };
321 
322 int min_free_kbytes = 1024;
323 int user_min_free_kbytes = -1;
324 #ifdef CONFIG_DISCONTIGMEM
325 /*
326  * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
327  * are not on separate NUMA nodes. Functionally this works but with
328  * watermark_boost_factor, it can reclaim prematurely as the ranges can be
329  * quite small. By default, do not boost watermarks on discontigmem as in
330  * many cases very high-order allocations like THP are likely to be
331  * unsupported and the premature reclaim offsets the advantage of long-term
332  * fragmentation avoidance.
333  */
334 int watermark_boost_factor __read_mostly;
335 #else
336 int watermark_boost_factor __read_mostly = 15000;
337 #endif
338 int watermark_scale_factor = 10;
339 
340 static unsigned long nr_kernel_pages __initdata;
341 static unsigned long nr_all_pages __initdata;
342 static unsigned long dma_reserve __initdata;
343 
344 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
345 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
346 static unsigned long required_kernelcore __initdata;
347 static unsigned long required_kernelcore_percent __initdata;
348 static unsigned long required_movablecore __initdata;
349 static unsigned long required_movablecore_percent __initdata;
350 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
351 static bool mirrored_kernelcore __meminitdata;
352 
353 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
354 int movable_zone;
355 EXPORT_SYMBOL(movable_zone);
356 
357 #if MAX_NUMNODES > 1
358 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
359 unsigned int nr_online_nodes __read_mostly = 1;
360 EXPORT_SYMBOL(nr_node_ids);
361 EXPORT_SYMBOL(nr_online_nodes);
362 #endif
363 
364 int page_group_by_mobility_disabled __read_mostly;
365 
366 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
367 /*
368  * During boot we initialize deferred pages on-demand, as needed, but once
369  * page_alloc_init_late() has finished, the deferred pages are all initialized,
370  * and we can permanently disable that path.
371  */
372 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
373 
374 /*
375  * Calling kasan_free_pages() only after deferred memory initialization
376  * has completed. Poisoning pages during deferred memory init will greatly
377  * lengthen the process and cause problem in large memory systems as the
378  * deferred pages initialization is done with interrupt disabled.
379  *
380  * Assuming that there will be no reference to those newly initialized
381  * pages before they are ever allocated, this should have no effect on
382  * KASAN memory tracking as the poison will be properly inserted at page
383  * allocation time. The only corner case is when pages are allocated by
384  * on-demand allocation and then freed again before the deferred pages
385  * initialization is done, but this is not likely to happen.
386  */
387 static inline void kasan_free_nondeferred_pages(struct page *page, int order)
388 {
389 	if (!static_branch_unlikely(&deferred_pages))
390 		kasan_free_pages(page, order);
391 }
392 
393 /* Returns true if the struct page for the pfn is uninitialised */
394 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
395 {
396 	int nid = early_pfn_to_nid(pfn);
397 
398 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
399 		return true;
400 
401 	return false;
402 }
403 
404 /*
405  * Returns true when the remaining initialisation should be deferred until
406  * later in the boot cycle when it can be parallelised.
407  */
408 static bool __meminit
409 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
410 {
411 	static unsigned long prev_end_pfn, nr_initialised;
412 
413 	/*
414 	 * prev_end_pfn static that contains the end of previous zone
415 	 * No need to protect because called very early in boot before smp_init.
416 	 */
417 	if (prev_end_pfn != end_pfn) {
418 		prev_end_pfn = end_pfn;
419 		nr_initialised = 0;
420 	}
421 
422 	/* Always populate low zones for address-constrained allocations */
423 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
424 		return false;
425 
426 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
427 		return true;
428 	/*
429 	 * We start only with one section of pages, more pages are added as
430 	 * needed until the rest of deferred pages are initialized.
431 	 */
432 	nr_initialised++;
433 	if ((nr_initialised > PAGES_PER_SECTION) &&
434 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
435 		NODE_DATA(nid)->first_deferred_pfn = pfn;
436 		return true;
437 	}
438 	return false;
439 }
440 #else
441 #define kasan_free_nondeferred_pages(p, o)	kasan_free_pages(p, o)
442 
443 static inline bool early_page_uninitialised(unsigned long pfn)
444 {
445 	return false;
446 }
447 
448 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
449 {
450 	return false;
451 }
452 #endif
453 
454 /* Return a pointer to the bitmap storing bits affecting a block of pages */
455 static inline unsigned long *get_pageblock_bitmap(struct page *page,
456 							unsigned long pfn)
457 {
458 #ifdef CONFIG_SPARSEMEM
459 	return section_to_usemap(__pfn_to_section(pfn));
460 #else
461 	return page_zone(page)->pageblock_flags;
462 #endif /* CONFIG_SPARSEMEM */
463 }
464 
465 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
466 {
467 #ifdef CONFIG_SPARSEMEM
468 	pfn &= (PAGES_PER_SECTION-1);
469 #else
470 	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
471 #endif /* CONFIG_SPARSEMEM */
472 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
473 }
474 
475 static __always_inline
476 unsigned long __get_pfnblock_flags_mask(struct page *page,
477 					unsigned long pfn,
478 					unsigned long mask)
479 {
480 	unsigned long *bitmap;
481 	unsigned long bitidx, word_bitidx;
482 	unsigned long word;
483 
484 	bitmap = get_pageblock_bitmap(page, pfn);
485 	bitidx = pfn_to_bitidx(page, pfn);
486 	word_bitidx = bitidx / BITS_PER_LONG;
487 	bitidx &= (BITS_PER_LONG-1);
488 
489 	word = bitmap[word_bitidx];
490 	return (word >> bitidx) & mask;
491 }
492 
493 /**
494  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
495  * @page: The page within the block of interest
496  * @pfn: The target page frame number
497  * @mask: mask of bits that the caller is interested in
498  *
499  * Return: pageblock_bits flags
500  */
501 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
502 					unsigned long mask)
503 {
504 	return __get_pfnblock_flags_mask(page, pfn, mask);
505 }
506 
507 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
508 {
509 	return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
510 }
511 
512 /**
513  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
514  * @page: The page within the block of interest
515  * @flags: The flags to set
516  * @pfn: The target page frame number
517  * @mask: mask of bits that the caller is interested in
518  */
519 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
520 					unsigned long pfn,
521 					unsigned long mask)
522 {
523 	unsigned long *bitmap;
524 	unsigned long bitidx, word_bitidx;
525 	unsigned long old_word, word;
526 
527 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
528 	BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
529 
530 	bitmap = get_pageblock_bitmap(page, pfn);
531 	bitidx = pfn_to_bitidx(page, pfn);
532 	word_bitidx = bitidx / BITS_PER_LONG;
533 	bitidx &= (BITS_PER_LONG-1);
534 
535 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
536 
537 	mask <<= bitidx;
538 	flags <<= bitidx;
539 
540 	word = READ_ONCE(bitmap[word_bitidx]);
541 	for (;;) {
542 		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
543 		if (word == old_word)
544 			break;
545 		word = old_word;
546 	}
547 }
548 
549 void set_pageblock_migratetype(struct page *page, int migratetype)
550 {
551 	if (unlikely(page_group_by_mobility_disabled &&
552 		     migratetype < MIGRATE_PCPTYPES))
553 		migratetype = MIGRATE_UNMOVABLE;
554 
555 	set_pfnblock_flags_mask(page, (unsigned long)migratetype,
556 				page_to_pfn(page), MIGRATETYPE_MASK);
557 }
558 
559 #ifdef CONFIG_DEBUG_VM
560 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
561 {
562 	int ret = 0;
563 	unsigned seq;
564 	unsigned long pfn = page_to_pfn(page);
565 	unsigned long sp, start_pfn;
566 
567 	do {
568 		seq = zone_span_seqbegin(zone);
569 		start_pfn = zone->zone_start_pfn;
570 		sp = zone->spanned_pages;
571 		if (!zone_spans_pfn(zone, pfn))
572 			ret = 1;
573 	} while (zone_span_seqretry(zone, seq));
574 
575 	if (ret)
576 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
577 			pfn, zone_to_nid(zone), zone->name,
578 			start_pfn, start_pfn + sp);
579 
580 	return ret;
581 }
582 
583 static int page_is_consistent(struct zone *zone, struct page *page)
584 {
585 	if (!pfn_valid_within(page_to_pfn(page)))
586 		return 0;
587 	if (zone != page_zone(page))
588 		return 0;
589 
590 	return 1;
591 }
592 /*
593  * Temporary debugging check for pages not lying within a given zone.
594  */
595 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
596 {
597 	if (page_outside_zone_boundaries(zone, page))
598 		return 1;
599 	if (!page_is_consistent(zone, page))
600 		return 1;
601 
602 	return 0;
603 }
604 #else
605 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
606 {
607 	return 0;
608 }
609 #endif
610 
611 static void bad_page(struct page *page, const char *reason)
612 {
613 	static unsigned long resume;
614 	static unsigned long nr_shown;
615 	static unsigned long nr_unshown;
616 
617 	/*
618 	 * Allow a burst of 60 reports, then keep quiet for that minute;
619 	 * or allow a steady drip of one report per second.
620 	 */
621 	if (nr_shown == 60) {
622 		if (time_before(jiffies, resume)) {
623 			nr_unshown++;
624 			goto out;
625 		}
626 		if (nr_unshown) {
627 			pr_alert(
628 			      "BUG: Bad page state: %lu messages suppressed\n",
629 				nr_unshown);
630 			nr_unshown = 0;
631 		}
632 		nr_shown = 0;
633 	}
634 	if (nr_shown++ == 0)
635 		resume = jiffies + 60 * HZ;
636 
637 	pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
638 		current->comm, page_to_pfn(page));
639 	__dump_page(page, reason);
640 	dump_page_owner(page);
641 
642 	print_modules();
643 	dump_stack();
644 out:
645 	/* Leave bad fields for debug, except PageBuddy could make trouble */
646 	page_mapcount_reset(page); /* remove PageBuddy */
647 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
648 }
649 
650 /*
651  * Higher-order pages are called "compound pages".  They are structured thusly:
652  *
653  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
654  *
655  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
656  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
657  *
658  * The first tail page's ->compound_dtor holds the offset in array of compound
659  * page destructors. See compound_page_dtors.
660  *
661  * The first tail page's ->compound_order holds the order of allocation.
662  * This usage means that zero-order pages may not be compound.
663  */
664 
665 void free_compound_page(struct page *page)
666 {
667 	mem_cgroup_uncharge(page);
668 	__free_pages_ok(page, compound_order(page), FPI_NONE);
669 }
670 
671 void prep_compound_page(struct page *page, unsigned int order)
672 {
673 	int i;
674 	int nr_pages = 1 << order;
675 
676 	__SetPageHead(page);
677 	for (i = 1; i < nr_pages; i++) {
678 		struct page *p = page + i;
679 		set_page_count(p, 0);
680 		p->mapping = TAIL_MAPPING;
681 		set_compound_head(p, page);
682 	}
683 
684 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
685 	set_compound_order(page, order);
686 	atomic_set(compound_mapcount_ptr(page), -1);
687 	if (hpage_pincount_available(page))
688 		atomic_set(compound_pincount_ptr(page), 0);
689 }
690 
691 #ifdef CONFIG_DEBUG_PAGEALLOC
692 unsigned int _debug_guardpage_minorder;
693 
694 bool _debug_pagealloc_enabled_early __read_mostly
695 			= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
696 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
697 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
698 EXPORT_SYMBOL(_debug_pagealloc_enabled);
699 
700 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
701 
702 static int __init early_debug_pagealloc(char *buf)
703 {
704 	return kstrtobool(buf, &_debug_pagealloc_enabled_early);
705 }
706 early_param("debug_pagealloc", early_debug_pagealloc);
707 
708 static int __init debug_guardpage_minorder_setup(char *buf)
709 {
710 	unsigned long res;
711 
712 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
713 		pr_err("Bad debug_guardpage_minorder value\n");
714 		return 0;
715 	}
716 	_debug_guardpage_minorder = res;
717 	pr_info("Setting debug_guardpage_minorder to %lu\n", res);
718 	return 0;
719 }
720 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
721 
722 static inline bool set_page_guard(struct zone *zone, struct page *page,
723 				unsigned int order, int migratetype)
724 {
725 	if (!debug_guardpage_enabled())
726 		return false;
727 
728 	if (order >= debug_guardpage_minorder())
729 		return false;
730 
731 	__SetPageGuard(page);
732 	INIT_LIST_HEAD(&page->lru);
733 	set_page_private(page, order);
734 	/* Guard pages are not available for any usage */
735 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
736 
737 	return true;
738 }
739 
740 static inline void clear_page_guard(struct zone *zone, struct page *page,
741 				unsigned int order, int migratetype)
742 {
743 	if (!debug_guardpage_enabled())
744 		return;
745 
746 	__ClearPageGuard(page);
747 
748 	set_page_private(page, 0);
749 	if (!is_migrate_isolate(migratetype))
750 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
751 }
752 #else
753 static inline bool set_page_guard(struct zone *zone, struct page *page,
754 			unsigned int order, int migratetype) { return false; }
755 static inline void clear_page_guard(struct zone *zone, struct page *page,
756 				unsigned int order, int migratetype) {}
757 #endif
758 
759 /*
760  * Enable static keys related to various memory debugging and hardening options.
761  * Some override others, and depend on early params that are evaluated in the
762  * order of appearance. So we need to first gather the full picture of what was
763  * enabled, and then make decisions.
764  */
765 void init_mem_debugging_and_hardening(void)
766 {
767 	if (_init_on_alloc_enabled_early) {
768 		if (page_poisoning_enabled())
769 			pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
770 				"will take precedence over init_on_alloc\n");
771 		else
772 			static_branch_enable(&init_on_alloc);
773 	}
774 	if (_init_on_free_enabled_early) {
775 		if (page_poisoning_enabled())
776 			pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
777 				"will take precedence over init_on_free\n");
778 		else
779 			static_branch_enable(&init_on_free);
780 	}
781 
782 #ifdef CONFIG_PAGE_POISONING
783 	/*
784 	 * Page poisoning is debug page alloc for some arches. If
785 	 * either of those options are enabled, enable poisoning.
786 	 */
787 	if (page_poisoning_enabled() ||
788 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
789 	      debug_pagealloc_enabled()))
790 		static_branch_enable(&_page_poisoning_enabled);
791 #endif
792 
793 #ifdef CONFIG_DEBUG_PAGEALLOC
794 	if (!debug_pagealloc_enabled())
795 		return;
796 
797 	static_branch_enable(&_debug_pagealloc_enabled);
798 
799 	if (!debug_guardpage_minorder())
800 		return;
801 
802 	static_branch_enable(&_debug_guardpage_enabled);
803 #endif
804 }
805 
806 static inline void set_buddy_order(struct page *page, unsigned int order)
807 {
808 	set_page_private(page, order);
809 	__SetPageBuddy(page);
810 }
811 
812 /*
813  * This function checks whether a page is free && is the buddy
814  * we can coalesce a page and its buddy if
815  * (a) the buddy is not in a hole (check before calling!) &&
816  * (b) the buddy is in the buddy system &&
817  * (c) a page and its buddy have the same order &&
818  * (d) a page and its buddy are in the same zone.
819  *
820  * For recording whether a page is in the buddy system, we set PageBuddy.
821  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
822  *
823  * For recording page's order, we use page_private(page).
824  */
825 static inline bool page_is_buddy(struct page *page, struct page *buddy,
826 							unsigned int order)
827 {
828 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
829 		return false;
830 
831 	if (buddy_order(buddy) != order)
832 		return false;
833 
834 	/*
835 	 * zone check is done late to avoid uselessly calculating
836 	 * zone/node ids for pages that could never merge.
837 	 */
838 	if (page_zone_id(page) != page_zone_id(buddy))
839 		return false;
840 
841 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
842 
843 	return true;
844 }
845 
846 #ifdef CONFIG_COMPACTION
847 static inline struct capture_control *task_capc(struct zone *zone)
848 {
849 	struct capture_control *capc = current->capture_control;
850 
851 	return unlikely(capc) &&
852 		!(current->flags & PF_KTHREAD) &&
853 		!capc->page &&
854 		capc->cc->zone == zone ? capc : NULL;
855 }
856 
857 static inline bool
858 compaction_capture(struct capture_control *capc, struct page *page,
859 		   int order, int migratetype)
860 {
861 	if (!capc || order != capc->cc->order)
862 		return false;
863 
864 	/* Do not accidentally pollute CMA or isolated regions*/
865 	if (is_migrate_cma(migratetype) ||
866 	    is_migrate_isolate(migratetype))
867 		return false;
868 
869 	/*
870 	 * Do not let lower order allocations polluate a movable pageblock.
871 	 * This might let an unmovable request use a reclaimable pageblock
872 	 * and vice-versa but no more than normal fallback logic which can
873 	 * have trouble finding a high-order free page.
874 	 */
875 	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
876 		return false;
877 
878 	capc->page = page;
879 	return true;
880 }
881 
882 #else
883 static inline struct capture_control *task_capc(struct zone *zone)
884 {
885 	return NULL;
886 }
887 
888 static inline bool
889 compaction_capture(struct capture_control *capc, struct page *page,
890 		   int order, int migratetype)
891 {
892 	return false;
893 }
894 #endif /* CONFIG_COMPACTION */
895 
896 /* Used for pages not on another list */
897 static inline void add_to_free_list(struct page *page, struct zone *zone,
898 				    unsigned int order, int migratetype)
899 {
900 	struct free_area *area = &zone->free_area[order];
901 
902 	list_add(&page->lru, &area->free_list[migratetype]);
903 	area->nr_free++;
904 }
905 
906 /* Used for pages not on another list */
907 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
908 					 unsigned int order, int migratetype)
909 {
910 	struct free_area *area = &zone->free_area[order];
911 
912 	list_add_tail(&page->lru, &area->free_list[migratetype]);
913 	area->nr_free++;
914 }
915 
916 /*
917  * Used for pages which are on another list. Move the pages to the tail
918  * of the list - so the moved pages won't immediately be considered for
919  * allocation again (e.g., optimization for memory onlining).
920  */
921 static inline void move_to_free_list(struct page *page, struct zone *zone,
922 				     unsigned int order, int migratetype)
923 {
924 	struct free_area *area = &zone->free_area[order];
925 
926 	list_move_tail(&page->lru, &area->free_list[migratetype]);
927 }
928 
929 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
930 					   unsigned int order)
931 {
932 	/* clear reported state and update reported page count */
933 	if (page_reported(page))
934 		__ClearPageReported(page);
935 
936 	list_del(&page->lru);
937 	__ClearPageBuddy(page);
938 	set_page_private(page, 0);
939 	zone->free_area[order].nr_free--;
940 }
941 
942 /*
943  * If this is not the largest possible page, check if the buddy
944  * of the next-highest order is free. If it is, it's possible
945  * that pages are being freed that will coalesce soon. In case,
946  * that is happening, add the free page to the tail of the list
947  * so it's less likely to be used soon and more likely to be merged
948  * as a higher order page
949  */
950 static inline bool
951 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
952 		   struct page *page, unsigned int order)
953 {
954 	struct page *higher_page, *higher_buddy;
955 	unsigned long combined_pfn;
956 
957 	if (order >= MAX_ORDER - 2)
958 		return false;
959 
960 	if (!pfn_valid_within(buddy_pfn))
961 		return false;
962 
963 	combined_pfn = buddy_pfn & pfn;
964 	higher_page = page + (combined_pfn - pfn);
965 	buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
966 	higher_buddy = higher_page + (buddy_pfn - combined_pfn);
967 
968 	return pfn_valid_within(buddy_pfn) &&
969 	       page_is_buddy(higher_page, higher_buddy, order + 1);
970 }
971 
972 /*
973  * Freeing function for a buddy system allocator.
974  *
975  * The concept of a buddy system is to maintain direct-mapped table
976  * (containing bit values) for memory blocks of various "orders".
977  * The bottom level table contains the map for the smallest allocatable
978  * units of memory (here, pages), and each level above it describes
979  * pairs of units from the levels below, hence, "buddies".
980  * At a high level, all that happens here is marking the table entry
981  * at the bottom level available, and propagating the changes upward
982  * as necessary, plus some accounting needed to play nicely with other
983  * parts of the VM system.
984  * At each level, we keep a list of pages, which are heads of continuous
985  * free pages of length of (1 << order) and marked with PageBuddy.
986  * Page's order is recorded in page_private(page) field.
987  * So when we are allocating or freeing one, we can derive the state of the
988  * other.  That is, if we allocate a small block, and both were
989  * free, the remainder of the region must be split into blocks.
990  * If a block is freed, and its buddy is also free, then this
991  * triggers coalescing into a block of larger size.
992  *
993  * -- nyc
994  */
995 
996 static inline void __free_one_page(struct page *page,
997 		unsigned long pfn,
998 		struct zone *zone, unsigned int order,
999 		int migratetype, fpi_t fpi_flags)
1000 {
1001 	struct capture_control *capc = task_capc(zone);
1002 	unsigned long buddy_pfn;
1003 	unsigned long combined_pfn;
1004 	unsigned int max_order;
1005 	struct page *buddy;
1006 	bool to_tail;
1007 
1008 	max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
1009 
1010 	VM_BUG_ON(!zone_is_initialized(zone));
1011 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1012 
1013 	VM_BUG_ON(migratetype == -1);
1014 	if (likely(!is_migrate_isolate(migratetype)))
1015 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
1016 
1017 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1018 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
1019 
1020 continue_merging:
1021 	while (order < max_order) {
1022 		if (compaction_capture(capc, page, order, migratetype)) {
1023 			__mod_zone_freepage_state(zone, -(1 << order),
1024 								migratetype);
1025 			return;
1026 		}
1027 		buddy_pfn = __find_buddy_pfn(pfn, order);
1028 		buddy = page + (buddy_pfn - pfn);
1029 
1030 		if (!pfn_valid_within(buddy_pfn))
1031 			goto done_merging;
1032 		if (!page_is_buddy(page, buddy, order))
1033 			goto done_merging;
1034 		/*
1035 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1036 		 * merge with it and move up one order.
1037 		 */
1038 		if (page_is_guard(buddy))
1039 			clear_page_guard(zone, buddy, order, migratetype);
1040 		else
1041 			del_page_from_free_list(buddy, zone, order);
1042 		combined_pfn = buddy_pfn & pfn;
1043 		page = page + (combined_pfn - pfn);
1044 		pfn = combined_pfn;
1045 		order++;
1046 	}
1047 	if (order < MAX_ORDER - 1) {
1048 		/* If we are here, it means order is >= pageblock_order.
1049 		 * We want to prevent merge between freepages on isolate
1050 		 * pageblock and normal pageblock. Without this, pageblock
1051 		 * isolation could cause incorrect freepage or CMA accounting.
1052 		 *
1053 		 * We don't want to hit this code for the more frequent
1054 		 * low-order merging.
1055 		 */
1056 		if (unlikely(has_isolate_pageblock(zone))) {
1057 			int buddy_mt;
1058 
1059 			buddy_pfn = __find_buddy_pfn(pfn, order);
1060 			buddy = page + (buddy_pfn - pfn);
1061 			buddy_mt = get_pageblock_migratetype(buddy);
1062 
1063 			if (migratetype != buddy_mt
1064 					&& (is_migrate_isolate(migratetype) ||
1065 						is_migrate_isolate(buddy_mt)))
1066 				goto done_merging;
1067 		}
1068 		max_order = order + 1;
1069 		goto continue_merging;
1070 	}
1071 
1072 done_merging:
1073 	set_buddy_order(page, order);
1074 
1075 	if (fpi_flags & FPI_TO_TAIL)
1076 		to_tail = true;
1077 	else if (is_shuffle_order(order))
1078 		to_tail = shuffle_pick_tail();
1079 	else
1080 		to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1081 
1082 	if (to_tail)
1083 		add_to_free_list_tail(page, zone, order, migratetype);
1084 	else
1085 		add_to_free_list(page, zone, order, migratetype);
1086 
1087 	/* Notify page reporting subsystem of freed page */
1088 	if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1089 		page_reporting_notify_free(order);
1090 }
1091 
1092 /*
1093  * A bad page could be due to a number of fields. Instead of multiple branches,
1094  * try and check multiple fields with one check. The caller must do a detailed
1095  * check if necessary.
1096  */
1097 static inline bool page_expected_state(struct page *page,
1098 					unsigned long check_flags)
1099 {
1100 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1101 		return false;
1102 
1103 	if (unlikely((unsigned long)page->mapping |
1104 			page_ref_count(page) |
1105 #ifdef CONFIG_MEMCG
1106 			(unsigned long)page_memcg(page) |
1107 #endif
1108 			(page->flags & check_flags)))
1109 		return false;
1110 
1111 	return true;
1112 }
1113 
1114 static const char *page_bad_reason(struct page *page, unsigned long flags)
1115 {
1116 	const char *bad_reason = NULL;
1117 
1118 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1119 		bad_reason = "nonzero mapcount";
1120 	if (unlikely(page->mapping != NULL))
1121 		bad_reason = "non-NULL mapping";
1122 	if (unlikely(page_ref_count(page) != 0))
1123 		bad_reason = "nonzero _refcount";
1124 	if (unlikely(page->flags & flags)) {
1125 		if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1126 			bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1127 		else
1128 			bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1129 	}
1130 #ifdef CONFIG_MEMCG
1131 	if (unlikely(page_memcg(page)))
1132 		bad_reason = "page still charged to cgroup";
1133 #endif
1134 	return bad_reason;
1135 }
1136 
1137 static void check_free_page_bad(struct page *page)
1138 {
1139 	bad_page(page,
1140 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1141 }
1142 
1143 static inline int check_free_page(struct page *page)
1144 {
1145 	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1146 		return 0;
1147 
1148 	/* Something has gone sideways, find it */
1149 	check_free_page_bad(page);
1150 	return 1;
1151 }
1152 
1153 static int free_tail_pages_check(struct page *head_page, struct page *page)
1154 {
1155 	int ret = 1;
1156 
1157 	/*
1158 	 * We rely page->lru.next never has bit 0 set, unless the page
1159 	 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1160 	 */
1161 	BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1162 
1163 	if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1164 		ret = 0;
1165 		goto out;
1166 	}
1167 	switch (page - head_page) {
1168 	case 1:
1169 		/* the first tail page: ->mapping may be compound_mapcount() */
1170 		if (unlikely(compound_mapcount(page))) {
1171 			bad_page(page, "nonzero compound_mapcount");
1172 			goto out;
1173 		}
1174 		break;
1175 	case 2:
1176 		/*
1177 		 * the second tail page: ->mapping is
1178 		 * deferred_list.next -- ignore value.
1179 		 */
1180 		break;
1181 	default:
1182 		if (page->mapping != TAIL_MAPPING) {
1183 			bad_page(page, "corrupted mapping in tail page");
1184 			goto out;
1185 		}
1186 		break;
1187 	}
1188 	if (unlikely(!PageTail(page))) {
1189 		bad_page(page, "PageTail not set");
1190 		goto out;
1191 	}
1192 	if (unlikely(compound_head(page) != head_page)) {
1193 		bad_page(page, "compound_head not consistent");
1194 		goto out;
1195 	}
1196 	ret = 0;
1197 out:
1198 	page->mapping = NULL;
1199 	clear_compound_head(page);
1200 	return ret;
1201 }
1202 
1203 static void kernel_init_free_pages(struct page *page, int numpages)
1204 {
1205 	int i;
1206 
1207 	/* s390's use of memset() could override KASAN redzones. */
1208 	kasan_disable_current();
1209 	for (i = 0; i < numpages; i++) {
1210 		u8 tag = page_kasan_tag(page + i);
1211 		page_kasan_tag_reset(page + i);
1212 		clear_highpage(page + i);
1213 		page_kasan_tag_set(page + i, tag);
1214 	}
1215 	kasan_enable_current();
1216 }
1217 
1218 static __always_inline bool free_pages_prepare(struct page *page,
1219 					unsigned int order, bool check_free)
1220 {
1221 	int bad = 0;
1222 
1223 	VM_BUG_ON_PAGE(PageTail(page), page);
1224 
1225 	trace_mm_page_free(page, order);
1226 
1227 	if (unlikely(PageHWPoison(page)) && !order) {
1228 		/*
1229 		 * Do not let hwpoison pages hit pcplists/buddy
1230 		 * Untie memcg state and reset page's owner
1231 		 */
1232 		if (memcg_kmem_enabled() && PageMemcgKmem(page))
1233 			__memcg_kmem_uncharge_page(page, order);
1234 		reset_page_owner(page, order);
1235 		return false;
1236 	}
1237 
1238 	/*
1239 	 * Check tail pages before head page information is cleared to
1240 	 * avoid checking PageCompound for order-0 pages.
1241 	 */
1242 	if (unlikely(order)) {
1243 		bool compound = PageCompound(page);
1244 		int i;
1245 
1246 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1247 
1248 		if (compound)
1249 			ClearPageDoubleMap(page);
1250 		for (i = 1; i < (1 << order); i++) {
1251 			if (compound)
1252 				bad += free_tail_pages_check(page, page + i);
1253 			if (unlikely(check_free_page(page + i))) {
1254 				bad++;
1255 				continue;
1256 			}
1257 			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1258 		}
1259 	}
1260 	if (PageMappingFlags(page))
1261 		page->mapping = NULL;
1262 	if (memcg_kmem_enabled() && PageMemcgKmem(page))
1263 		__memcg_kmem_uncharge_page(page, order);
1264 	if (check_free)
1265 		bad += check_free_page(page);
1266 	if (bad)
1267 		return false;
1268 
1269 	page_cpupid_reset_last(page);
1270 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1271 	reset_page_owner(page, order);
1272 
1273 	if (!PageHighMem(page)) {
1274 		debug_check_no_locks_freed(page_address(page),
1275 					   PAGE_SIZE << order);
1276 		debug_check_no_obj_freed(page_address(page),
1277 					   PAGE_SIZE << order);
1278 	}
1279 	if (want_init_on_free())
1280 		kernel_init_free_pages(page, 1 << order);
1281 
1282 	kernel_poison_pages(page, 1 << order);
1283 
1284 	/*
1285 	 * arch_free_page() can make the page's contents inaccessible.  s390
1286 	 * does this.  So nothing which can access the page's contents should
1287 	 * happen after this.
1288 	 */
1289 	arch_free_page(page, order);
1290 
1291 	debug_pagealloc_unmap_pages(page, 1 << order);
1292 
1293 	kasan_free_nondeferred_pages(page, order);
1294 
1295 	return true;
1296 }
1297 
1298 #ifdef CONFIG_DEBUG_VM
1299 /*
1300  * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1301  * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1302  * moved from pcp lists to free lists.
1303  */
1304 static bool free_pcp_prepare(struct page *page)
1305 {
1306 	return free_pages_prepare(page, 0, true);
1307 }
1308 
1309 static bool bulkfree_pcp_prepare(struct page *page)
1310 {
1311 	if (debug_pagealloc_enabled_static())
1312 		return check_free_page(page);
1313 	else
1314 		return false;
1315 }
1316 #else
1317 /*
1318  * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1319  * moving from pcp lists to free list in order to reduce overhead. With
1320  * debug_pagealloc enabled, they are checked also immediately when being freed
1321  * to the pcp lists.
1322  */
1323 static bool free_pcp_prepare(struct page *page)
1324 {
1325 	if (debug_pagealloc_enabled_static())
1326 		return free_pages_prepare(page, 0, true);
1327 	else
1328 		return free_pages_prepare(page, 0, false);
1329 }
1330 
1331 static bool bulkfree_pcp_prepare(struct page *page)
1332 {
1333 	return check_free_page(page);
1334 }
1335 #endif /* CONFIG_DEBUG_VM */
1336 
1337 static inline void prefetch_buddy(struct page *page)
1338 {
1339 	unsigned long pfn = page_to_pfn(page);
1340 	unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1341 	struct page *buddy = page + (buddy_pfn - pfn);
1342 
1343 	prefetch(buddy);
1344 }
1345 
1346 /*
1347  * Frees a number of pages from the PCP lists
1348  * Assumes all pages on list are in same zone, and of same order.
1349  * count is the number of pages to free.
1350  *
1351  * If the zone was previously in an "all pages pinned" state then look to
1352  * see if this freeing clears that state.
1353  *
1354  * And clear the zone's pages_scanned counter, to hold off the "all pages are
1355  * pinned" detection logic.
1356  */
1357 static void free_pcppages_bulk(struct zone *zone, int count,
1358 					struct per_cpu_pages *pcp)
1359 {
1360 	int migratetype = 0;
1361 	int batch_free = 0;
1362 	int prefetch_nr = READ_ONCE(pcp->batch);
1363 	bool isolated_pageblocks;
1364 	struct page *page, *tmp;
1365 	LIST_HEAD(head);
1366 
1367 	/*
1368 	 * Ensure proper count is passed which otherwise would stuck in the
1369 	 * below while (list_empty(list)) loop.
1370 	 */
1371 	count = min(pcp->count, count);
1372 	while (count) {
1373 		struct list_head *list;
1374 
1375 		/*
1376 		 * Remove pages from lists in a round-robin fashion. A
1377 		 * batch_free count is maintained that is incremented when an
1378 		 * empty list is encountered.  This is so more pages are freed
1379 		 * off fuller lists instead of spinning excessively around empty
1380 		 * lists
1381 		 */
1382 		do {
1383 			batch_free++;
1384 			if (++migratetype == MIGRATE_PCPTYPES)
1385 				migratetype = 0;
1386 			list = &pcp->lists[migratetype];
1387 		} while (list_empty(list));
1388 
1389 		/* This is the only non-empty list. Free them all. */
1390 		if (batch_free == MIGRATE_PCPTYPES)
1391 			batch_free = count;
1392 
1393 		do {
1394 			page = list_last_entry(list, struct page, lru);
1395 			/* must delete to avoid corrupting pcp list */
1396 			list_del(&page->lru);
1397 			pcp->count--;
1398 
1399 			if (bulkfree_pcp_prepare(page))
1400 				continue;
1401 
1402 			list_add_tail(&page->lru, &head);
1403 
1404 			/*
1405 			 * We are going to put the page back to the global
1406 			 * pool, prefetch its buddy to speed up later access
1407 			 * under zone->lock. It is believed the overhead of
1408 			 * an additional test and calculating buddy_pfn here
1409 			 * can be offset by reduced memory latency later. To
1410 			 * avoid excessive prefetching due to large count, only
1411 			 * prefetch buddy for the first pcp->batch nr of pages.
1412 			 */
1413 			if (prefetch_nr) {
1414 				prefetch_buddy(page);
1415 				prefetch_nr--;
1416 			}
1417 		} while (--count && --batch_free && !list_empty(list));
1418 	}
1419 
1420 	spin_lock(&zone->lock);
1421 	isolated_pageblocks = has_isolate_pageblock(zone);
1422 
1423 	/*
1424 	 * Use safe version since after __free_one_page(),
1425 	 * page->lru.next will not point to original list.
1426 	 */
1427 	list_for_each_entry_safe(page, tmp, &head, lru) {
1428 		int mt = get_pcppage_migratetype(page);
1429 		/* MIGRATE_ISOLATE page should not go to pcplists */
1430 		VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1431 		/* Pageblock could have been isolated meanwhile */
1432 		if (unlikely(isolated_pageblocks))
1433 			mt = get_pageblock_migratetype(page);
1434 
1435 		__free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
1436 		trace_mm_page_pcpu_drain(page, 0, mt);
1437 	}
1438 	spin_unlock(&zone->lock);
1439 }
1440 
1441 static void free_one_page(struct zone *zone,
1442 				struct page *page, unsigned long pfn,
1443 				unsigned int order,
1444 				int migratetype, fpi_t fpi_flags)
1445 {
1446 	spin_lock(&zone->lock);
1447 	if (unlikely(has_isolate_pageblock(zone) ||
1448 		is_migrate_isolate(migratetype))) {
1449 		migratetype = get_pfnblock_migratetype(page, pfn);
1450 	}
1451 	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1452 	spin_unlock(&zone->lock);
1453 }
1454 
1455 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1456 				unsigned long zone, int nid)
1457 {
1458 	mm_zero_struct_page(page);
1459 	set_page_links(page, zone, nid, pfn);
1460 	init_page_count(page);
1461 	page_mapcount_reset(page);
1462 	page_cpupid_reset_last(page);
1463 	page_kasan_tag_reset(page);
1464 
1465 	INIT_LIST_HEAD(&page->lru);
1466 #ifdef WANT_PAGE_VIRTUAL
1467 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
1468 	if (!is_highmem_idx(zone))
1469 		set_page_address(page, __va(pfn << PAGE_SHIFT));
1470 #endif
1471 }
1472 
1473 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1474 static void __meminit init_reserved_page(unsigned long pfn)
1475 {
1476 	pg_data_t *pgdat;
1477 	int nid, zid;
1478 
1479 	if (!early_page_uninitialised(pfn))
1480 		return;
1481 
1482 	nid = early_pfn_to_nid(pfn);
1483 	pgdat = NODE_DATA(nid);
1484 
1485 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1486 		struct zone *zone = &pgdat->node_zones[zid];
1487 
1488 		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1489 			break;
1490 	}
1491 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1492 }
1493 #else
1494 static inline void init_reserved_page(unsigned long pfn)
1495 {
1496 }
1497 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1498 
1499 /*
1500  * Initialised pages do not have PageReserved set. This function is
1501  * called for each range allocated by the bootmem allocator and
1502  * marks the pages PageReserved. The remaining valid pages are later
1503  * sent to the buddy page allocator.
1504  */
1505 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1506 {
1507 	unsigned long start_pfn = PFN_DOWN(start);
1508 	unsigned long end_pfn = PFN_UP(end);
1509 
1510 	for (; start_pfn < end_pfn; start_pfn++) {
1511 		if (pfn_valid(start_pfn)) {
1512 			struct page *page = pfn_to_page(start_pfn);
1513 
1514 			init_reserved_page(start_pfn);
1515 
1516 			/* Avoid false-positive PageTail() */
1517 			INIT_LIST_HEAD(&page->lru);
1518 
1519 			/*
1520 			 * no need for atomic set_bit because the struct
1521 			 * page is not visible yet so nobody should
1522 			 * access it yet.
1523 			 */
1524 			__SetPageReserved(page);
1525 		}
1526 	}
1527 }
1528 
1529 static void __free_pages_ok(struct page *page, unsigned int order,
1530 			    fpi_t fpi_flags)
1531 {
1532 	unsigned long flags;
1533 	int migratetype;
1534 	unsigned long pfn = page_to_pfn(page);
1535 
1536 	if (!free_pages_prepare(page, order, true))
1537 		return;
1538 
1539 	migratetype = get_pfnblock_migratetype(page, pfn);
1540 	local_irq_save(flags);
1541 	__count_vm_events(PGFREE, 1 << order);
1542 	free_one_page(page_zone(page), page, pfn, order, migratetype,
1543 		      fpi_flags);
1544 	local_irq_restore(flags);
1545 }
1546 
1547 void __free_pages_core(struct page *page, unsigned int order)
1548 {
1549 	unsigned int nr_pages = 1 << order;
1550 	struct page *p = page;
1551 	unsigned int loop;
1552 
1553 	/*
1554 	 * When initializing the memmap, __init_single_page() sets the refcount
1555 	 * of all pages to 1 ("allocated"/"not free"). We have to set the
1556 	 * refcount of all involved pages to 0.
1557 	 */
1558 	prefetchw(p);
1559 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1560 		prefetchw(p + 1);
1561 		__ClearPageReserved(p);
1562 		set_page_count(p, 0);
1563 	}
1564 	__ClearPageReserved(p);
1565 	set_page_count(p, 0);
1566 
1567 	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1568 
1569 	/*
1570 	 * Bypass PCP and place fresh pages right to the tail, primarily
1571 	 * relevant for memory onlining.
1572 	 */
1573 	__free_pages_ok(page, order, FPI_TO_TAIL);
1574 }
1575 
1576 #ifdef CONFIG_NEED_MULTIPLE_NODES
1577 
1578 /*
1579  * During memory init memblocks map pfns to nids. The search is expensive and
1580  * this caches recent lookups. The implementation of __early_pfn_to_nid
1581  * treats start/end as pfns.
1582  */
1583 struct mminit_pfnnid_cache {
1584 	unsigned long last_start;
1585 	unsigned long last_end;
1586 	int last_nid;
1587 };
1588 
1589 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1590 
1591 /*
1592  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1593  */
1594 static int __meminit __early_pfn_to_nid(unsigned long pfn,
1595 					struct mminit_pfnnid_cache *state)
1596 {
1597 	unsigned long start_pfn, end_pfn;
1598 	int nid;
1599 
1600 	if (state->last_start <= pfn && pfn < state->last_end)
1601 		return state->last_nid;
1602 
1603 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1604 	if (nid != NUMA_NO_NODE) {
1605 		state->last_start = start_pfn;
1606 		state->last_end = end_pfn;
1607 		state->last_nid = nid;
1608 	}
1609 
1610 	return nid;
1611 }
1612 
1613 int __meminit early_pfn_to_nid(unsigned long pfn)
1614 {
1615 	static DEFINE_SPINLOCK(early_pfn_lock);
1616 	int nid;
1617 
1618 	spin_lock(&early_pfn_lock);
1619 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1620 	if (nid < 0)
1621 		nid = first_online_node;
1622 	spin_unlock(&early_pfn_lock);
1623 
1624 	return nid;
1625 }
1626 #endif /* CONFIG_NEED_MULTIPLE_NODES */
1627 
1628 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1629 							unsigned int order)
1630 {
1631 	if (early_page_uninitialised(pfn))
1632 		return;
1633 	__free_pages_core(page, order);
1634 }
1635 
1636 /*
1637  * Check that the whole (or subset of) a pageblock given by the interval of
1638  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1639  * with the migration of free compaction scanner. The scanners then need to
1640  * use only pfn_valid_within() check for arches that allow holes within
1641  * pageblocks.
1642  *
1643  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1644  *
1645  * It's possible on some configurations to have a setup like node0 node1 node0
1646  * i.e. it's possible that all pages within a zones range of pages do not
1647  * belong to a single zone. We assume that a border between node0 and node1
1648  * can occur within a single pageblock, but not a node0 node1 node0
1649  * interleaving within a single pageblock. It is therefore sufficient to check
1650  * the first and last page of a pageblock and avoid checking each individual
1651  * page in a pageblock.
1652  */
1653 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1654 				     unsigned long end_pfn, struct zone *zone)
1655 {
1656 	struct page *start_page;
1657 	struct page *end_page;
1658 
1659 	/* end_pfn is one past the range we are checking */
1660 	end_pfn--;
1661 
1662 	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1663 		return NULL;
1664 
1665 	start_page = pfn_to_online_page(start_pfn);
1666 	if (!start_page)
1667 		return NULL;
1668 
1669 	if (page_zone(start_page) != zone)
1670 		return NULL;
1671 
1672 	end_page = pfn_to_page(end_pfn);
1673 
1674 	/* This gives a shorter code than deriving page_zone(end_page) */
1675 	if (page_zone_id(start_page) != page_zone_id(end_page))
1676 		return NULL;
1677 
1678 	return start_page;
1679 }
1680 
1681 void set_zone_contiguous(struct zone *zone)
1682 {
1683 	unsigned long block_start_pfn = zone->zone_start_pfn;
1684 	unsigned long block_end_pfn;
1685 
1686 	block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1687 	for (; block_start_pfn < zone_end_pfn(zone);
1688 			block_start_pfn = block_end_pfn,
1689 			 block_end_pfn += pageblock_nr_pages) {
1690 
1691 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1692 
1693 		if (!__pageblock_pfn_to_page(block_start_pfn,
1694 					     block_end_pfn, zone))
1695 			return;
1696 		cond_resched();
1697 	}
1698 
1699 	/* We confirm that there is no hole */
1700 	zone->contiguous = true;
1701 }
1702 
1703 void clear_zone_contiguous(struct zone *zone)
1704 {
1705 	zone->contiguous = false;
1706 }
1707 
1708 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1709 static void __init deferred_free_range(unsigned long pfn,
1710 				       unsigned long nr_pages)
1711 {
1712 	struct page *page;
1713 	unsigned long i;
1714 
1715 	if (!nr_pages)
1716 		return;
1717 
1718 	page = pfn_to_page(pfn);
1719 
1720 	/* Free a large naturally-aligned chunk if possible */
1721 	if (nr_pages == pageblock_nr_pages &&
1722 	    (pfn & (pageblock_nr_pages - 1)) == 0) {
1723 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1724 		__free_pages_core(page, pageblock_order);
1725 		return;
1726 	}
1727 
1728 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1729 		if ((pfn & (pageblock_nr_pages - 1)) == 0)
1730 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1731 		__free_pages_core(page, 0);
1732 	}
1733 }
1734 
1735 /* Completion tracking for deferred_init_memmap() threads */
1736 static atomic_t pgdat_init_n_undone __initdata;
1737 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1738 
1739 static inline void __init pgdat_init_report_one_done(void)
1740 {
1741 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1742 		complete(&pgdat_init_all_done_comp);
1743 }
1744 
1745 /*
1746  * Returns true if page needs to be initialized or freed to buddy allocator.
1747  *
1748  * First we check if pfn is valid on architectures where it is possible to have
1749  * holes within pageblock_nr_pages. On systems where it is not possible, this
1750  * function is optimized out.
1751  *
1752  * Then, we check if a current large page is valid by only checking the validity
1753  * of the head pfn.
1754  */
1755 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1756 {
1757 	if (!pfn_valid_within(pfn))
1758 		return false;
1759 	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1760 		return false;
1761 	return true;
1762 }
1763 
1764 /*
1765  * Free pages to buddy allocator. Try to free aligned pages in
1766  * pageblock_nr_pages sizes.
1767  */
1768 static void __init deferred_free_pages(unsigned long pfn,
1769 				       unsigned long end_pfn)
1770 {
1771 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1772 	unsigned long nr_free = 0;
1773 
1774 	for (; pfn < end_pfn; pfn++) {
1775 		if (!deferred_pfn_valid(pfn)) {
1776 			deferred_free_range(pfn - nr_free, nr_free);
1777 			nr_free = 0;
1778 		} else if (!(pfn & nr_pgmask)) {
1779 			deferred_free_range(pfn - nr_free, nr_free);
1780 			nr_free = 1;
1781 		} else {
1782 			nr_free++;
1783 		}
1784 	}
1785 	/* Free the last block of pages to allocator */
1786 	deferred_free_range(pfn - nr_free, nr_free);
1787 }
1788 
1789 /*
1790  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1791  * by performing it only once every pageblock_nr_pages.
1792  * Return number of pages initialized.
1793  */
1794 static unsigned long  __init deferred_init_pages(struct zone *zone,
1795 						 unsigned long pfn,
1796 						 unsigned long end_pfn)
1797 {
1798 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1799 	int nid = zone_to_nid(zone);
1800 	unsigned long nr_pages = 0;
1801 	int zid = zone_idx(zone);
1802 	struct page *page = NULL;
1803 
1804 	for (; pfn < end_pfn; pfn++) {
1805 		if (!deferred_pfn_valid(pfn)) {
1806 			page = NULL;
1807 			continue;
1808 		} else if (!page || !(pfn & nr_pgmask)) {
1809 			page = pfn_to_page(pfn);
1810 		} else {
1811 			page++;
1812 		}
1813 		__init_single_page(page, pfn, zid, nid);
1814 		nr_pages++;
1815 	}
1816 	return (nr_pages);
1817 }
1818 
1819 /*
1820  * This function is meant to pre-load the iterator for the zone init.
1821  * Specifically it walks through the ranges until we are caught up to the
1822  * first_init_pfn value and exits there. If we never encounter the value we
1823  * return false indicating there are no valid ranges left.
1824  */
1825 static bool __init
1826 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1827 				    unsigned long *spfn, unsigned long *epfn,
1828 				    unsigned long first_init_pfn)
1829 {
1830 	u64 j;
1831 
1832 	/*
1833 	 * Start out by walking through the ranges in this zone that have
1834 	 * already been initialized. We don't need to do anything with them
1835 	 * so we just need to flush them out of the system.
1836 	 */
1837 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1838 		if (*epfn <= first_init_pfn)
1839 			continue;
1840 		if (*spfn < first_init_pfn)
1841 			*spfn = first_init_pfn;
1842 		*i = j;
1843 		return true;
1844 	}
1845 
1846 	return false;
1847 }
1848 
1849 /*
1850  * Initialize and free pages. We do it in two loops: first we initialize
1851  * struct page, then free to buddy allocator, because while we are
1852  * freeing pages we can access pages that are ahead (computing buddy
1853  * page in __free_one_page()).
1854  *
1855  * In order to try and keep some memory in the cache we have the loop
1856  * broken along max page order boundaries. This way we will not cause
1857  * any issues with the buddy page computation.
1858  */
1859 static unsigned long __init
1860 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1861 		       unsigned long *end_pfn)
1862 {
1863 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1864 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
1865 	unsigned long nr_pages = 0;
1866 	u64 j = *i;
1867 
1868 	/* First we loop through and initialize the page values */
1869 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1870 		unsigned long t;
1871 
1872 		if (mo_pfn <= *start_pfn)
1873 			break;
1874 
1875 		t = min(mo_pfn, *end_pfn);
1876 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
1877 
1878 		if (mo_pfn < *end_pfn) {
1879 			*start_pfn = mo_pfn;
1880 			break;
1881 		}
1882 	}
1883 
1884 	/* Reset values and now loop through freeing pages as needed */
1885 	swap(j, *i);
1886 
1887 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
1888 		unsigned long t;
1889 
1890 		if (mo_pfn <= spfn)
1891 			break;
1892 
1893 		t = min(mo_pfn, epfn);
1894 		deferred_free_pages(spfn, t);
1895 
1896 		if (mo_pfn <= epfn)
1897 			break;
1898 	}
1899 
1900 	return nr_pages;
1901 }
1902 
1903 static void __init
1904 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
1905 			   void *arg)
1906 {
1907 	unsigned long spfn, epfn;
1908 	struct zone *zone = arg;
1909 	u64 i;
1910 
1911 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
1912 
1913 	/*
1914 	 * Initialize and free pages in MAX_ORDER sized increments so that we
1915 	 * can avoid introducing any issues with the buddy allocator.
1916 	 */
1917 	while (spfn < end_pfn) {
1918 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
1919 		cond_resched();
1920 	}
1921 }
1922 
1923 /* An arch may override for more concurrency. */
1924 __weak int __init
1925 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
1926 {
1927 	return 1;
1928 }
1929 
1930 /* Initialise remaining memory on a node */
1931 static int __init deferred_init_memmap(void *data)
1932 {
1933 	pg_data_t *pgdat = data;
1934 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1935 	unsigned long spfn = 0, epfn = 0;
1936 	unsigned long first_init_pfn, flags;
1937 	unsigned long start = jiffies;
1938 	struct zone *zone;
1939 	int zid, max_threads;
1940 	u64 i;
1941 
1942 	/* Bind memory initialisation thread to a local node if possible */
1943 	if (!cpumask_empty(cpumask))
1944 		set_cpus_allowed_ptr(current, cpumask);
1945 
1946 	pgdat_resize_lock(pgdat, &flags);
1947 	first_init_pfn = pgdat->first_deferred_pfn;
1948 	if (first_init_pfn == ULONG_MAX) {
1949 		pgdat_resize_unlock(pgdat, &flags);
1950 		pgdat_init_report_one_done();
1951 		return 0;
1952 	}
1953 
1954 	/* Sanity check boundaries */
1955 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1956 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1957 	pgdat->first_deferred_pfn = ULONG_MAX;
1958 
1959 	/*
1960 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
1961 	 * interrupt thread must allocate this early in boot, zone must be
1962 	 * pre-grown prior to start of deferred page initialization.
1963 	 */
1964 	pgdat_resize_unlock(pgdat, &flags);
1965 
1966 	/* Only the highest zone is deferred so find it */
1967 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1968 		zone = pgdat->node_zones + zid;
1969 		if (first_init_pfn < zone_end_pfn(zone))
1970 			break;
1971 	}
1972 
1973 	/* If the zone is empty somebody else may have cleared out the zone */
1974 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1975 						 first_init_pfn))
1976 		goto zone_empty;
1977 
1978 	max_threads = deferred_page_init_max_threads(cpumask);
1979 
1980 	while (spfn < epfn) {
1981 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
1982 		struct padata_mt_job job = {
1983 			.thread_fn   = deferred_init_memmap_chunk,
1984 			.fn_arg      = zone,
1985 			.start       = spfn,
1986 			.size        = epfn_align - spfn,
1987 			.align       = PAGES_PER_SECTION,
1988 			.min_chunk   = PAGES_PER_SECTION,
1989 			.max_threads = max_threads,
1990 		};
1991 
1992 		padata_do_multithreaded(&job);
1993 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1994 						    epfn_align);
1995 	}
1996 zone_empty:
1997 	/* Sanity check that the next zone really is unpopulated */
1998 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1999 
2000 	pr_info("node %d deferred pages initialised in %ums\n",
2001 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2002 
2003 	pgdat_init_report_one_done();
2004 	return 0;
2005 }
2006 
2007 /*
2008  * If this zone has deferred pages, try to grow it by initializing enough
2009  * deferred pages to satisfy the allocation specified by order, rounded up to
2010  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2011  * of SECTION_SIZE bytes by initializing struct pages in increments of
2012  * PAGES_PER_SECTION * sizeof(struct page) bytes.
2013  *
2014  * Return true when zone was grown, otherwise return false. We return true even
2015  * when we grow less than requested, to let the caller decide if there are
2016  * enough pages to satisfy the allocation.
2017  *
2018  * Note: We use noinline because this function is needed only during boot, and
2019  * it is called from a __ref function _deferred_grow_zone. This way we are
2020  * making sure that it is not inlined into permanent text section.
2021  */
2022 static noinline bool __init
2023 deferred_grow_zone(struct zone *zone, unsigned int order)
2024 {
2025 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2026 	pg_data_t *pgdat = zone->zone_pgdat;
2027 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2028 	unsigned long spfn, epfn, flags;
2029 	unsigned long nr_pages = 0;
2030 	u64 i;
2031 
2032 	/* Only the last zone may have deferred pages */
2033 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2034 		return false;
2035 
2036 	pgdat_resize_lock(pgdat, &flags);
2037 
2038 	/*
2039 	 * If someone grew this zone while we were waiting for spinlock, return
2040 	 * true, as there might be enough pages already.
2041 	 */
2042 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2043 		pgdat_resize_unlock(pgdat, &flags);
2044 		return true;
2045 	}
2046 
2047 	/* If the zone is empty somebody else may have cleared out the zone */
2048 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2049 						 first_deferred_pfn)) {
2050 		pgdat->first_deferred_pfn = ULONG_MAX;
2051 		pgdat_resize_unlock(pgdat, &flags);
2052 		/* Retry only once. */
2053 		return first_deferred_pfn != ULONG_MAX;
2054 	}
2055 
2056 	/*
2057 	 * Initialize and free pages in MAX_ORDER sized increments so
2058 	 * that we can avoid introducing any issues with the buddy
2059 	 * allocator.
2060 	 */
2061 	while (spfn < epfn) {
2062 		/* update our first deferred PFN for this section */
2063 		first_deferred_pfn = spfn;
2064 
2065 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2066 		touch_nmi_watchdog();
2067 
2068 		/* We should only stop along section boundaries */
2069 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2070 			continue;
2071 
2072 		/* If our quota has been met we can stop here */
2073 		if (nr_pages >= nr_pages_needed)
2074 			break;
2075 	}
2076 
2077 	pgdat->first_deferred_pfn = spfn;
2078 	pgdat_resize_unlock(pgdat, &flags);
2079 
2080 	return nr_pages > 0;
2081 }
2082 
2083 /*
2084  * deferred_grow_zone() is __init, but it is called from
2085  * get_page_from_freelist() during early boot until deferred_pages permanently
2086  * disables this call. This is why we have refdata wrapper to avoid warning,
2087  * and to ensure that the function body gets unloaded.
2088  */
2089 static bool __ref
2090 _deferred_grow_zone(struct zone *zone, unsigned int order)
2091 {
2092 	return deferred_grow_zone(zone, order);
2093 }
2094 
2095 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2096 
2097 void __init page_alloc_init_late(void)
2098 {
2099 	struct zone *zone;
2100 	int nid;
2101 
2102 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2103 
2104 	/* There will be num_node_state(N_MEMORY) threads */
2105 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2106 	for_each_node_state(nid, N_MEMORY) {
2107 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2108 	}
2109 
2110 	/* Block until all are initialised */
2111 	wait_for_completion(&pgdat_init_all_done_comp);
2112 
2113 	/*
2114 	 * The number of managed pages has changed due to the initialisation
2115 	 * so the pcpu batch and high limits needs to be updated or the limits
2116 	 * will be artificially small.
2117 	 */
2118 	for_each_populated_zone(zone)
2119 		zone_pcp_update(zone);
2120 
2121 	/*
2122 	 * We initialized the rest of the deferred pages.  Permanently disable
2123 	 * on-demand struct page initialization.
2124 	 */
2125 	static_branch_disable(&deferred_pages);
2126 
2127 	/* Reinit limits that are based on free pages after the kernel is up */
2128 	files_maxfiles_init();
2129 #endif
2130 
2131 	buffer_init();
2132 
2133 	/* Discard memblock private memory */
2134 	memblock_discard();
2135 
2136 	for_each_node_state(nid, N_MEMORY)
2137 		shuffle_free_memory(NODE_DATA(nid));
2138 
2139 	for_each_populated_zone(zone)
2140 		set_zone_contiguous(zone);
2141 }
2142 
2143 #ifdef CONFIG_CMA
2144 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2145 void __init init_cma_reserved_pageblock(struct page *page)
2146 {
2147 	unsigned i = pageblock_nr_pages;
2148 	struct page *p = page;
2149 
2150 	do {
2151 		__ClearPageReserved(p);
2152 		set_page_count(p, 0);
2153 	} while (++p, --i);
2154 
2155 	set_pageblock_migratetype(page, MIGRATE_CMA);
2156 
2157 	if (pageblock_order >= MAX_ORDER) {
2158 		i = pageblock_nr_pages;
2159 		p = page;
2160 		do {
2161 			set_page_refcounted(p);
2162 			__free_pages(p, MAX_ORDER - 1);
2163 			p += MAX_ORDER_NR_PAGES;
2164 		} while (i -= MAX_ORDER_NR_PAGES);
2165 	} else {
2166 		set_page_refcounted(page);
2167 		__free_pages(page, pageblock_order);
2168 	}
2169 
2170 	adjust_managed_page_count(page, pageblock_nr_pages);
2171 	page_zone(page)->cma_pages += pageblock_nr_pages;
2172 }
2173 #endif
2174 
2175 /*
2176  * The order of subdivision here is critical for the IO subsystem.
2177  * Please do not alter this order without good reasons and regression
2178  * testing. Specifically, as large blocks of memory are subdivided,
2179  * the order in which smaller blocks are delivered depends on the order
2180  * they're subdivided in this function. This is the primary factor
2181  * influencing the order in which pages are delivered to the IO
2182  * subsystem according to empirical testing, and this is also justified
2183  * by considering the behavior of a buddy system containing a single
2184  * large block of memory acted on by a series of small allocations.
2185  * This behavior is a critical factor in sglist merging's success.
2186  *
2187  * -- nyc
2188  */
2189 static inline void expand(struct zone *zone, struct page *page,
2190 	int low, int high, int migratetype)
2191 {
2192 	unsigned long size = 1 << high;
2193 
2194 	while (high > low) {
2195 		high--;
2196 		size >>= 1;
2197 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2198 
2199 		/*
2200 		 * Mark as guard pages (or page), that will allow to
2201 		 * merge back to allocator when buddy will be freed.
2202 		 * Corresponding page table entries will not be touched,
2203 		 * pages will stay not present in virtual address space
2204 		 */
2205 		if (set_page_guard(zone, &page[size], high, migratetype))
2206 			continue;
2207 
2208 		add_to_free_list(&page[size], zone, high, migratetype);
2209 		set_buddy_order(&page[size], high);
2210 	}
2211 }
2212 
2213 static void check_new_page_bad(struct page *page)
2214 {
2215 	if (unlikely(page->flags & __PG_HWPOISON)) {
2216 		/* Don't complain about hwpoisoned pages */
2217 		page_mapcount_reset(page); /* remove PageBuddy */
2218 		return;
2219 	}
2220 
2221 	bad_page(page,
2222 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2223 }
2224 
2225 /*
2226  * This page is about to be returned from the page allocator
2227  */
2228 static inline int check_new_page(struct page *page)
2229 {
2230 	if (likely(page_expected_state(page,
2231 				PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2232 		return 0;
2233 
2234 	check_new_page_bad(page);
2235 	return 1;
2236 }
2237 
2238 #ifdef CONFIG_DEBUG_VM
2239 /*
2240  * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2241  * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2242  * also checked when pcp lists are refilled from the free lists.
2243  */
2244 static inline bool check_pcp_refill(struct page *page)
2245 {
2246 	if (debug_pagealloc_enabled_static())
2247 		return check_new_page(page);
2248 	else
2249 		return false;
2250 }
2251 
2252 static inline bool check_new_pcp(struct page *page)
2253 {
2254 	return check_new_page(page);
2255 }
2256 #else
2257 /*
2258  * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2259  * when pcp lists are being refilled from the free lists. With debug_pagealloc
2260  * enabled, they are also checked when being allocated from the pcp lists.
2261  */
2262 static inline bool check_pcp_refill(struct page *page)
2263 {
2264 	return check_new_page(page);
2265 }
2266 static inline bool check_new_pcp(struct page *page)
2267 {
2268 	if (debug_pagealloc_enabled_static())
2269 		return check_new_page(page);
2270 	else
2271 		return false;
2272 }
2273 #endif /* CONFIG_DEBUG_VM */
2274 
2275 static bool check_new_pages(struct page *page, unsigned int order)
2276 {
2277 	int i;
2278 	for (i = 0; i < (1 << order); i++) {
2279 		struct page *p = page + i;
2280 
2281 		if (unlikely(check_new_page(p)))
2282 			return true;
2283 	}
2284 
2285 	return false;
2286 }
2287 
2288 inline void post_alloc_hook(struct page *page, unsigned int order,
2289 				gfp_t gfp_flags)
2290 {
2291 	set_page_private(page, 0);
2292 	set_page_refcounted(page);
2293 
2294 	arch_alloc_page(page, order);
2295 	debug_pagealloc_map_pages(page, 1 << order);
2296 	kasan_alloc_pages(page, order);
2297 	kernel_unpoison_pages(page, 1 << order);
2298 	set_page_owner(page, order, gfp_flags);
2299 
2300 	if (!want_init_on_free() && want_init_on_alloc(gfp_flags))
2301 		kernel_init_free_pages(page, 1 << order);
2302 }
2303 
2304 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2305 							unsigned int alloc_flags)
2306 {
2307 	post_alloc_hook(page, order, gfp_flags);
2308 
2309 	if (order && (gfp_flags & __GFP_COMP))
2310 		prep_compound_page(page, order);
2311 
2312 	/*
2313 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2314 	 * allocate the page. The expectation is that the caller is taking
2315 	 * steps that will free more memory. The caller should avoid the page
2316 	 * being used for !PFMEMALLOC purposes.
2317 	 */
2318 	if (alloc_flags & ALLOC_NO_WATERMARKS)
2319 		set_page_pfmemalloc(page);
2320 	else
2321 		clear_page_pfmemalloc(page);
2322 }
2323 
2324 /*
2325  * Go through the free lists for the given migratetype and remove
2326  * the smallest available page from the freelists
2327  */
2328 static __always_inline
2329 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2330 						int migratetype)
2331 {
2332 	unsigned int current_order;
2333 	struct free_area *area;
2334 	struct page *page;
2335 
2336 	/* Find a page of the appropriate size in the preferred list */
2337 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2338 		area = &(zone->free_area[current_order]);
2339 		page = get_page_from_free_area(area, migratetype);
2340 		if (!page)
2341 			continue;
2342 		del_page_from_free_list(page, zone, current_order);
2343 		expand(zone, page, order, current_order, migratetype);
2344 		set_pcppage_migratetype(page, migratetype);
2345 		return page;
2346 	}
2347 
2348 	return NULL;
2349 }
2350 
2351 
2352 /*
2353  * This array describes the order lists are fallen back to when
2354  * the free lists for the desirable migrate type are depleted
2355  */
2356 static int fallbacks[MIGRATE_TYPES][3] = {
2357 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
2358 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2359 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2360 #ifdef CONFIG_CMA
2361 	[MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
2362 #endif
2363 #ifdef CONFIG_MEMORY_ISOLATION
2364 	[MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
2365 #endif
2366 };
2367 
2368 #ifdef CONFIG_CMA
2369 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2370 					unsigned int order)
2371 {
2372 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2373 }
2374 #else
2375 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2376 					unsigned int order) { return NULL; }
2377 #endif
2378 
2379 /*
2380  * Move the free pages in a range to the freelist tail of the requested type.
2381  * Note that start_page and end_pages are not aligned on a pageblock
2382  * boundary. If alignment is required, use move_freepages_block()
2383  */
2384 static int move_freepages(struct zone *zone,
2385 			  struct page *start_page, struct page *end_page,
2386 			  int migratetype, int *num_movable)
2387 {
2388 	struct page *page;
2389 	unsigned int order;
2390 	int pages_moved = 0;
2391 
2392 	for (page = start_page; page <= end_page;) {
2393 		if (!pfn_valid_within(page_to_pfn(page))) {
2394 			page++;
2395 			continue;
2396 		}
2397 
2398 		if (!PageBuddy(page)) {
2399 			/*
2400 			 * We assume that pages that could be isolated for
2401 			 * migration are movable. But we don't actually try
2402 			 * isolating, as that would be expensive.
2403 			 */
2404 			if (num_movable &&
2405 					(PageLRU(page) || __PageMovable(page)))
2406 				(*num_movable)++;
2407 
2408 			page++;
2409 			continue;
2410 		}
2411 
2412 		/* Make sure we are not inadvertently changing nodes */
2413 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2414 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2415 
2416 		order = buddy_order(page);
2417 		move_to_free_list(page, zone, order, migratetype);
2418 		page += 1 << order;
2419 		pages_moved += 1 << order;
2420 	}
2421 
2422 	return pages_moved;
2423 }
2424 
2425 int move_freepages_block(struct zone *zone, struct page *page,
2426 				int migratetype, int *num_movable)
2427 {
2428 	unsigned long start_pfn, end_pfn;
2429 	struct page *start_page, *end_page;
2430 
2431 	if (num_movable)
2432 		*num_movable = 0;
2433 
2434 	start_pfn = page_to_pfn(page);
2435 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
2436 	start_page = pfn_to_page(start_pfn);
2437 	end_page = start_page + pageblock_nr_pages - 1;
2438 	end_pfn = start_pfn + pageblock_nr_pages - 1;
2439 
2440 	/* Do not cross zone boundaries */
2441 	if (!zone_spans_pfn(zone, start_pfn))
2442 		start_page = page;
2443 	if (!zone_spans_pfn(zone, end_pfn))
2444 		return 0;
2445 
2446 	return move_freepages(zone, start_page, end_page, migratetype,
2447 								num_movable);
2448 }
2449 
2450 static void change_pageblock_range(struct page *pageblock_page,
2451 					int start_order, int migratetype)
2452 {
2453 	int nr_pageblocks = 1 << (start_order - pageblock_order);
2454 
2455 	while (nr_pageblocks--) {
2456 		set_pageblock_migratetype(pageblock_page, migratetype);
2457 		pageblock_page += pageblock_nr_pages;
2458 	}
2459 }
2460 
2461 /*
2462  * When we are falling back to another migratetype during allocation, try to
2463  * steal extra free pages from the same pageblocks to satisfy further
2464  * allocations, instead of polluting multiple pageblocks.
2465  *
2466  * If we are stealing a relatively large buddy page, it is likely there will
2467  * be more free pages in the pageblock, so try to steal them all. For
2468  * reclaimable and unmovable allocations, we steal regardless of page size,
2469  * as fragmentation caused by those allocations polluting movable pageblocks
2470  * is worse than movable allocations stealing from unmovable and reclaimable
2471  * pageblocks.
2472  */
2473 static bool can_steal_fallback(unsigned int order, int start_mt)
2474 {
2475 	/*
2476 	 * Leaving this order check is intended, although there is
2477 	 * relaxed order check in next check. The reason is that
2478 	 * we can actually steal whole pageblock if this condition met,
2479 	 * but, below check doesn't guarantee it and that is just heuristic
2480 	 * so could be changed anytime.
2481 	 */
2482 	if (order >= pageblock_order)
2483 		return true;
2484 
2485 	if (order >= pageblock_order / 2 ||
2486 		start_mt == MIGRATE_RECLAIMABLE ||
2487 		start_mt == MIGRATE_UNMOVABLE ||
2488 		page_group_by_mobility_disabled)
2489 		return true;
2490 
2491 	return false;
2492 }
2493 
2494 static inline bool boost_watermark(struct zone *zone)
2495 {
2496 	unsigned long max_boost;
2497 
2498 	if (!watermark_boost_factor)
2499 		return false;
2500 	/*
2501 	 * Don't bother in zones that are unlikely to produce results.
2502 	 * On small machines, including kdump capture kernels running
2503 	 * in a small area, boosting the watermark can cause an out of
2504 	 * memory situation immediately.
2505 	 */
2506 	if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2507 		return false;
2508 
2509 	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2510 			watermark_boost_factor, 10000);
2511 
2512 	/*
2513 	 * high watermark may be uninitialised if fragmentation occurs
2514 	 * very early in boot so do not boost. We do not fall
2515 	 * through and boost by pageblock_nr_pages as failing
2516 	 * allocations that early means that reclaim is not going
2517 	 * to help and it may even be impossible to reclaim the
2518 	 * boosted watermark resulting in a hang.
2519 	 */
2520 	if (!max_boost)
2521 		return false;
2522 
2523 	max_boost = max(pageblock_nr_pages, max_boost);
2524 
2525 	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2526 		max_boost);
2527 
2528 	return true;
2529 }
2530 
2531 /*
2532  * This function implements actual steal behaviour. If order is large enough,
2533  * we can steal whole pageblock. If not, we first move freepages in this
2534  * pageblock to our migratetype and determine how many already-allocated pages
2535  * are there in the pageblock with a compatible migratetype. If at least half
2536  * of pages are free or compatible, we can change migratetype of the pageblock
2537  * itself, so pages freed in the future will be put on the correct free list.
2538  */
2539 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2540 		unsigned int alloc_flags, int start_type, bool whole_block)
2541 {
2542 	unsigned int current_order = buddy_order(page);
2543 	int free_pages, movable_pages, alike_pages;
2544 	int old_block_type;
2545 
2546 	old_block_type = get_pageblock_migratetype(page);
2547 
2548 	/*
2549 	 * This can happen due to races and we want to prevent broken
2550 	 * highatomic accounting.
2551 	 */
2552 	if (is_migrate_highatomic(old_block_type))
2553 		goto single_page;
2554 
2555 	/* Take ownership for orders >= pageblock_order */
2556 	if (current_order >= pageblock_order) {
2557 		change_pageblock_range(page, current_order, start_type);
2558 		goto single_page;
2559 	}
2560 
2561 	/*
2562 	 * Boost watermarks to increase reclaim pressure to reduce the
2563 	 * likelihood of future fallbacks. Wake kswapd now as the node
2564 	 * may be balanced overall and kswapd will not wake naturally.
2565 	 */
2566 	if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2567 		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2568 
2569 	/* We are not allowed to try stealing from the whole block */
2570 	if (!whole_block)
2571 		goto single_page;
2572 
2573 	free_pages = move_freepages_block(zone, page, start_type,
2574 						&movable_pages);
2575 	/*
2576 	 * Determine how many pages are compatible with our allocation.
2577 	 * For movable allocation, it's the number of movable pages which
2578 	 * we just obtained. For other types it's a bit more tricky.
2579 	 */
2580 	if (start_type == MIGRATE_MOVABLE) {
2581 		alike_pages = movable_pages;
2582 	} else {
2583 		/*
2584 		 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2585 		 * to MOVABLE pageblock, consider all non-movable pages as
2586 		 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2587 		 * vice versa, be conservative since we can't distinguish the
2588 		 * exact migratetype of non-movable pages.
2589 		 */
2590 		if (old_block_type == MIGRATE_MOVABLE)
2591 			alike_pages = pageblock_nr_pages
2592 						- (free_pages + movable_pages);
2593 		else
2594 			alike_pages = 0;
2595 	}
2596 
2597 	/* moving whole block can fail due to zone boundary conditions */
2598 	if (!free_pages)
2599 		goto single_page;
2600 
2601 	/*
2602 	 * If a sufficient number of pages in the block are either free or of
2603 	 * comparable migratability as our allocation, claim the whole block.
2604 	 */
2605 	if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2606 			page_group_by_mobility_disabled)
2607 		set_pageblock_migratetype(page, start_type);
2608 
2609 	return;
2610 
2611 single_page:
2612 	move_to_free_list(page, zone, current_order, start_type);
2613 }
2614 
2615 /*
2616  * Check whether there is a suitable fallback freepage with requested order.
2617  * If only_stealable is true, this function returns fallback_mt only if
2618  * we can steal other freepages all together. This would help to reduce
2619  * fragmentation due to mixed migratetype pages in one pageblock.
2620  */
2621 int find_suitable_fallback(struct free_area *area, unsigned int order,
2622 			int migratetype, bool only_stealable, bool *can_steal)
2623 {
2624 	int i;
2625 	int fallback_mt;
2626 
2627 	if (area->nr_free == 0)
2628 		return -1;
2629 
2630 	*can_steal = false;
2631 	for (i = 0;; i++) {
2632 		fallback_mt = fallbacks[migratetype][i];
2633 		if (fallback_mt == MIGRATE_TYPES)
2634 			break;
2635 
2636 		if (free_area_empty(area, fallback_mt))
2637 			continue;
2638 
2639 		if (can_steal_fallback(order, migratetype))
2640 			*can_steal = true;
2641 
2642 		if (!only_stealable)
2643 			return fallback_mt;
2644 
2645 		if (*can_steal)
2646 			return fallback_mt;
2647 	}
2648 
2649 	return -1;
2650 }
2651 
2652 /*
2653  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2654  * there are no empty page blocks that contain a page with a suitable order
2655  */
2656 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2657 				unsigned int alloc_order)
2658 {
2659 	int mt;
2660 	unsigned long max_managed, flags;
2661 
2662 	/*
2663 	 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2664 	 * Check is race-prone but harmless.
2665 	 */
2666 	max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2667 	if (zone->nr_reserved_highatomic >= max_managed)
2668 		return;
2669 
2670 	spin_lock_irqsave(&zone->lock, flags);
2671 
2672 	/* Recheck the nr_reserved_highatomic limit under the lock */
2673 	if (zone->nr_reserved_highatomic >= max_managed)
2674 		goto out_unlock;
2675 
2676 	/* Yoink! */
2677 	mt = get_pageblock_migratetype(page);
2678 	if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2679 	    && !is_migrate_cma(mt)) {
2680 		zone->nr_reserved_highatomic += pageblock_nr_pages;
2681 		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2682 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2683 	}
2684 
2685 out_unlock:
2686 	spin_unlock_irqrestore(&zone->lock, flags);
2687 }
2688 
2689 /*
2690  * Used when an allocation is about to fail under memory pressure. This
2691  * potentially hurts the reliability of high-order allocations when under
2692  * intense memory pressure but failed atomic allocations should be easier
2693  * to recover from than an OOM.
2694  *
2695  * If @force is true, try to unreserve a pageblock even though highatomic
2696  * pageblock is exhausted.
2697  */
2698 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2699 						bool force)
2700 {
2701 	struct zonelist *zonelist = ac->zonelist;
2702 	unsigned long flags;
2703 	struct zoneref *z;
2704 	struct zone *zone;
2705 	struct page *page;
2706 	int order;
2707 	bool ret;
2708 
2709 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2710 								ac->nodemask) {
2711 		/*
2712 		 * Preserve at least one pageblock unless memory pressure
2713 		 * is really high.
2714 		 */
2715 		if (!force && zone->nr_reserved_highatomic <=
2716 					pageblock_nr_pages)
2717 			continue;
2718 
2719 		spin_lock_irqsave(&zone->lock, flags);
2720 		for (order = 0; order < MAX_ORDER; order++) {
2721 			struct free_area *area = &(zone->free_area[order]);
2722 
2723 			page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2724 			if (!page)
2725 				continue;
2726 
2727 			/*
2728 			 * In page freeing path, migratetype change is racy so
2729 			 * we can counter several free pages in a pageblock
2730 			 * in this loop althoug we changed the pageblock type
2731 			 * from highatomic to ac->migratetype. So we should
2732 			 * adjust the count once.
2733 			 */
2734 			if (is_migrate_highatomic_page(page)) {
2735 				/*
2736 				 * It should never happen but changes to
2737 				 * locking could inadvertently allow a per-cpu
2738 				 * drain to add pages to MIGRATE_HIGHATOMIC
2739 				 * while unreserving so be safe and watch for
2740 				 * underflows.
2741 				 */
2742 				zone->nr_reserved_highatomic -= min(
2743 						pageblock_nr_pages,
2744 						zone->nr_reserved_highatomic);
2745 			}
2746 
2747 			/*
2748 			 * Convert to ac->migratetype and avoid the normal
2749 			 * pageblock stealing heuristics. Minimally, the caller
2750 			 * is doing the work and needs the pages. More
2751 			 * importantly, if the block was always converted to
2752 			 * MIGRATE_UNMOVABLE or another type then the number
2753 			 * of pageblocks that cannot be completely freed
2754 			 * may increase.
2755 			 */
2756 			set_pageblock_migratetype(page, ac->migratetype);
2757 			ret = move_freepages_block(zone, page, ac->migratetype,
2758 									NULL);
2759 			if (ret) {
2760 				spin_unlock_irqrestore(&zone->lock, flags);
2761 				return ret;
2762 			}
2763 		}
2764 		spin_unlock_irqrestore(&zone->lock, flags);
2765 	}
2766 
2767 	return false;
2768 }
2769 
2770 /*
2771  * Try finding a free buddy page on the fallback list and put it on the free
2772  * list of requested migratetype, possibly along with other pages from the same
2773  * block, depending on fragmentation avoidance heuristics. Returns true if
2774  * fallback was found so that __rmqueue_smallest() can grab it.
2775  *
2776  * The use of signed ints for order and current_order is a deliberate
2777  * deviation from the rest of this file, to make the for loop
2778  * condition simpler.
2779  */
2780 static __always_inline bool
2781 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2782 						unsigned int alloc_flags)
2783 {
2784 	struct free_area *area;
2785 	int current_order;
2786 	int min_order = order;
2787 	struct page *page;
2788 	int fallback_mt;
2789 	bool can_steal;
2790 
2791 	/*
2792 	 * Do not steal pages from freelists belonging to other pageblocks
2793 	 * i.e. orders < pageblock_order. If there are no local zones free,
2794 	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2795 	 */
2796 	if (alloc_flags & ALLOC_NOFRAGMENT)
2797 		min_order = pageblock_order;
2798 
2799 	/*
2800 	 * Find the largest available free page in the other list. This roughly
2801 	 * approximates finding the pageblock with the most free pages, which
2802 	 * would be too costly to do exactly.
2803 	 */
2804 	for (current_order = MAX_ORDER - 1; current_order >= min_order;
2805 				--current_order) {
2806 		area = &(zone->free_area[current_order]);
2807 		fallback_mt = find_suitable_fallback(area, current_order,
2808 				start_migratetype, false, &can_steal);
2809 		if (fallback_mt == -1)
2810 			continue;
2811 
2812 		/*
2813 		 * We cannot steal all free pages from the pageblock and the
2814 		 * requested migratetype is movable. In that case it's better to
2815 		 * steal and split the smallest available page instead of the
2816 		 * largest available page, because even if the next movable
2817 		 * allocation falls back into a different pageblock than this
2818 		 * one, it won't cause permanent fragmentation.
2819 		 */
2820 		if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2821 					&& current_order > order)
2822 			goto find_smallest;
2823 
2824 		goto do_steal;
2825 	}
2826 
2827 	return false;
2828 
2829 find_smallest:
2830 	for (current_order = order; current_order < MAX_ORDER;
2831 							current_order++) {
2832 		area = &(zone->free_area[current_order]);
2833 		fallback_mt = find_suitable_fallback(area, current_order,
2834 				start_migratetype, false, &can_steal);
2835 		if (fallback_mt != -1)
2836 			break;
2837 	}
2838 
2839 	/*
2840 	 * This should not happen - we already found a suitable fallback
2841 	 * when looking for the largest page.
2842 	 */
2843 	VM_BUG_ON(current_order == MAX_ORDER);
2844 
2845 do_steal:
2846 	page = get_page_from_free_area(area, fallback_mt);
2847 
2848 	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2849 								can_steal);
2850 
2851 	trace_mm_page_alloc_extfrag(page, order, current_order,
2852 		start_migratetype, fallback_mt);
2853 
2854 	return true;
2855 
2856 }
2857 
2858 /*
2859  * Do the hard work of removing an element from the buddy allocator.
2860  * Call me with the zone->lock already held.
2861  */
2862 static __always_inline struct page *
2863 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2864 						unsigned int alloc_flags)
2865 {
2866 	struct page *page;
2867 
2868 	if (IS_ENABLED(CONFIG_CMA)) {
2869 		/*
2870 		 * Balance movable allocations between regular and CMA areas by
2871 		 * allocating from CMA when over half of the zone's free memory
2872 		 * is in the CMA area.
2873 		 */
2874 		if (alloc_flags & ALLOC_CMA &&
2875 		    zone_page_state(zone, NR_FREE_CMA_PAGES) >
2876 		    zone_page_state(zone, NR_FREE_PAGES) / 2) {
2877 			page = __rmqueue_cma_fallback(zone, order);
2878 			if (page)
2879 				goto out;
2880 		}
2881 	}
2882 retry:
2883 	page = __rmqueue_smallest(zone, order, migratetype);
2884 	if (unlikely(!page)) {
2885 		if (alloc_flags & ALLOC_CMA)
2886 			page = __rmqueue_cma_fallback(zone, order);
2887 
2888 		if (!page && __rmqueue_fallback(zone, order, migratetype,
2889 								alloc_flags))
2890 			goto retry;
2891 	}
2892 out:
2893 	if (page)
2894 		trace_mm_page_alloc_zone_locked(page, order, migratetype);
2895 	return page;
2896 }
2897 
2898 /*
2899  * Obtain a specified number of elements from the buddy allocator, all under
2900  * a single hold of the lock, for efficiency.  Add them to the supplied list.
2901  * Returns the number of new pages which were placed at *list.
2902  */
2903 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2904 			unsigned long count, struct list_head *list,
2905 			int migratetype, unsigned int alloc_flags)
2906 {
2907 	int i, alloced = 0;
2908 
2909 	spin_lock(&zone->lock);
2910 	for (i = 0; i < count; ++i) {
2911 		struct page *page = __rmqueue(zone, order, migratetype,
2912 								alloc_flags);
2913 		if (unlikely(page == NULL))
2914 			break;
2915 
2916 		if (unlikely(check_pcp_refill(page)))
2917 			continue;
2918 
2919 		/*
2920 		 * Split buddy pages returned by expand() are received here in
2921 		 * physical page order. The page is added to the tail of
2922 		 * caller's list. From the callers perspective, the linked list
2923 		 * is ordered by page number under some conditions. This is
2924 		 * useful for IO devices that can forward direction from the
2925 		 * head, thus also in the physical page order. This is useful
2926 		 * for IO devices that can merge IO requests if the physical
2927 		 * pages are ordered properly.
2928 		 */
2929 		list_add_tail(&page->lru, list);
2930 		alloced++;
2931 		if (is_migrate_cma(get_pcppage_migratetype(page)))
2932 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2933 					      -(1 << order));
2934 	}
2935 
2936 	/*
2937 	 * i pages were removed from the buddy list even if some leak due
2938 	 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2939 	 * on i. Do not confuse with 'alloced' which is the number of
2940 	 * pages added to the pcp list.
2941 	 */
2942 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2943 	spin_unlock(&zone->lock);
2944 	return alloced;
2945 }
2946 
2947 #ifdef CONFIG_NUMA
2948 /*
2949  * Called from the vmstat counter updater to drain pagesets of this
2950  * currently executing processor on remote nodes after they have
2951  * expired.
2952  *
2953  * Note that this function must be called with the thread pinned to
2954  * a single processor.
2955  */
2956 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2957 {
2958 	unsigned long flags;
2959 	int to_drain, batch;
2960 
2961 	local_irq_save(flags);
2962 	batch = READ_ONCE(pcp->batch);
2963 	to_drain = min(pcp->count, batch);
2964 	if (to_drain > 0)
2965 		free_pcppages_bulk(zone, to_drain, pcp);
2966 	local_irq_restore(flags);
2967 }
2968 #endif
2969 
2970 /*
2971  * Drain pcplists of the indicated processor and zone.
2972  *
2973  * The processor must either be the current processor and the
2974  * thread pinned to the current processor or a processor that
2975  * is not online.
2976  */
2977 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2978 {
2979 	unsigned long flags;
2980 	struct per_cpu_pageset *pset;
2981 	struct per_cpu_pages *pcp;
2982 
2983 	local_irq_save(flags);
2984 	pset = per_cpu_ptr(zone->pageset, cpu);
2985 
2986 	pcp = &pset->pcp;
2987 	if (pcp->count)
2988 		free_pcppages_bulk(zone, pcp->count, pcp);
2989 	local_irq_restore(flags);
2990 }
2991 
2992 /*
2993  * Drain pcplists of all zones on the indicated processor.
2994  *
2995  * The processor must either be the current processor and the
2996  * thread pinned to the current processor or a processor that
2997  * is not online.
2998  */
2999 static void drain_pages(unsigned int cpu)
3000 {
3001 	struct zone *zone;
3002 
3003 	for_each_populated_zone(zone) {
3004 		drain_pages_zone(cpu, zone);
3005 	}
3006 }
3007 
3008 /*
3009  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3010  *
3011  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3012  * the single zone's pages.
3013  */
3014 void drain_local_pages(struct zone *zone)
3015 {
3016 	int cpu = smp_processor_id();
3017 
3018 	if (zone)
3019 		drain_pages_zone(cpu, zone);
3020 	else
3021 		drain_pages(cpu);
3022 }
3023 
3024 static void drain_local_pages_wq(struct work_struct *work)
3025 {
3026 	struct pcpu_drain *drain;
3027 
3028 	drain = container_of(work, struct pcpu_drain, work);
3029 
3030 	/*
3031 	 * drain_all_pages doesn't use proper cpu hotplug protection so
3032 	 * we can race with cpu offline when the WQ can move this from
3033 	 * a cpu pinned worker to an unbound one. We can operate on a different
3034 	 * cpu which is allright but we also have to make sure to not move to
3035 	 * a different one.
3036 	 */
3037 	preempt_disable();
3038 	drain_local_pages(drain->zone);
3039 	preempt_enable();
3040 }
3041 
3042 /*
3043  * The implementation of drain_all_pages(), exposing an extra parameter to
3044  * drain on all cpus.
3045  *
3046  * drain_all_pages() is optimized to only execute on cpus where pcplists are
3047  * not empty. The check for non-emptiness can however race with a free to
3048  * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3049  * that need the guarantee that every CPU has drained can disable the
3050  * optimizing racy check.
3051  */
3052 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3053 {
3054 	int cpu;
3055 
3056 	/*
3057 	 * Allocate in the BSS so we wont require allocation in
3058 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3059 	 */
3060 	static cpumask_t cpus_with_pcps;
3061 
3062 	/*
3063 	 * Make sure nobody triggers this path before mm_percpu_wq is fully
3064 	 * initialized.
3065 	 */
3066 	if (WARN_ON_ONCE(!mm_percpu_wq))
3067 		return;
3068 
3069 	/*
3070 	 * Do not drain if one is already in progress unless it's specific to
3071 	 * a zone. Such callers are primarily CMA and memory hotplug and need
3072 	 * the drain to be complete when the call returns.
3073 	 */
3074 	if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3075 		if (!zone)
3076 			return;
3077 		mutex_lock(&pcpu_drain_mutex);
3078 	}
3079 
3080 	/*
3081 	 * We don't care about racing with CPU hotplug event
3082 	 * as offline notification will cause the notified
3083 	 * cpu to drain that CPU pcps and on_each_cpu_mask
3084 	 * disables preemption as part of its processing
3085 	 */
3086 	for_each_online_cpu(cpu) {
3087 		struct per_cpu_pageset *pcp;
3088 		struct zone *z;
3089 		bool has_pcps = false;
3090 
3091 		if (force_all_cpus) {
3092 			/*
3093 			 * The pcp.count check is racy, some callers need a
3094 			 * guarantee that no cpu is missed.
3095 			 */
3096 			has_pcps = true;
3097 		} else if (zone) {
3098 			pcp = per_cpu_ptr(zone->pageset, cpu);
3099 			if (pcp->pcp.count)
3100 				has_pcps = true;
3101 		} else {
3102 			for_each_populated_zone(z) {
3103 				pcp = per_cpu_ptr(z->pageset, cpu);
3104 				if (pcp->pcp.count) {
3105 					has_pcps = true;
3106 					break;
3107 				}
3108 			}
3109 		}
3110 
3111 		if (has_pcps)
3112 			cpumask_set_cpu(cpu, &cpus_with_pcps);
3113 		else
3114 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
3115 	}
3116 
3117 	for_each_cpu(cpu, &cpus_with_pcps) {
3118 		struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3119 
3120 		drain->zone = zone;
3121 		INIT_WORK(&drain->work, drain_local_pages_wq);
3122 		queue_work_on(cpu, mm_percpu_wq, &drain->work);
3123 	}
3124 	for_each_cpu(cpu, &cpus_with_pcps)
3125 		flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
3126 
3127 	mutex_unlock(&pcpu_drain_mutex);
3128 }
3129 
3130 /*
3131  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3132  *
3133  * When zone parameter is non-NULL, spill just the single zone's pages.
3134  *
3135  * Note that this can be extremely slow as the draining happens in a workqueue.
3136  */
3137 void drain_all_pages(struct zone *zone)
3138 {
3139 	__drain_all_pages(zone, false);
3140 }
3141 
3142 #ifdef CONFIG_HIBERNATION
3143 
3144 /*
3145  * Touch the watchdog for every WD_PAGE_COUNT pages.
3146  */
3147 #define WD_PAGE_COUNT	(128*1024)
3148 
3149 void mark_free_pages(struct zone *zone)
3150 {
3151 	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3152 	unsigned long flags;
3153 	unsigned int order, t;
3154 	struct page *page;
3155 
3156 	if (zone_is_empty(zone))
3157 		return;
3158 
3159 	spin_lock_irqsave(&zone->lock, flags);
3160 
3161 	max_zone_pfn = zone_end_pfn(zone);
3162 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3163 		if (pfn_valid(pfn)) {
3164 			page = pfn_to_page(pfn);
3165 
3166 			if (!--page_count) {
3167 				touch_nmi_watchdog();
3168 				page_count = WD_PAGE_COUNT;
3169 			}
3170 
3171 			if (page_zone(page) != zone)
3172 				continue;
3173 
3174 			if (!swsusp_page_is_forbidden(page))
3175 				swsusp_unset_page_free(page);
3176 		}
3177 
3178 	for_each_migratetype_order(order, t) {
3179 		list_for_each_entry(page,
3180 				&zone->free_area[order].free_list[t], lru) {
3181 			unsigned long i;
3182 
3183 			pfn = page_to_pfn(page);
3184 			for (i = 0; i < (1UL << order); i++) {
3185 				if (!--page_count) {
3186 					touch_nmi_watchdog();
3187 					page_count = WD_PAGE_COUNT;
3188 				}
3189 				swsusp_set_page_free(pfn_to_page(pfn + i));
3190 			}
3191 		}
3192 	}
3193 	spin_unlock_irqrestore(&zone->lock, flags);
3194 }
3195 #endif /* CONFIG_PM */
3196 
3197 static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
3198 {
3199 	int migratetype;
3200 
3201 	if (!free_pcp_prepare(page))
3202 		return false;
3203 
3204 	migratetype = get_pfnblock_migratetype(page, pfn);
3205 	set_pcppage_migratetype(page, migratetype);
3206 	return true;
3207 }
3208 
3209 static void free_unref_page_commit(struct page *page, unsigned long pfn)
3210 {
3211 	struct zone *zone = page_zone(page);
3212 	struct per_cpu_pages *pcp;
3213 	int migratetype;
3214 
3215 	migratetype = get_pcppage_migratetype(page);
3216 	__count_vm_event(PGFREE);
3217 
3218 	/*
3219 	 * We only track unmovable, reclaimable and movable on pcp lists.
3220 	 * Free ISOLATE pages back to the allocator because they are being
3221 	 * offlined but treat HIGHATOMIC as movable pages so we can get those
3222 	 * areas back if necessary. Otherwise, we may have to free
3223 	 * excessively into the page allocator
3224 	 */
3225 	if (migratetype >= MIGRATE_PCPTYPES) {
3226 		if (unlikely(is_migrate_isolate(migratetype))) {
3227 			free_one_page(zone, page, pfn, 0, migratetype,
3228 				      FPI_NONE);
3229 			return;
3230 		}
3231 		migratetype = MIGRATE_MOVABLE;
3232 	}
3233 
3234 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
3235 	list_add(&page->lru, &pcp->lists[migratetype]);
3236 	pcp->count++;
3237 	if (pcp->count >= READ_ONCE(pcp->high))
3238 		free_pcppages_bulk(zone, READ_ONCE(pcp->batch), pcp);
3239 }
3240 
3241 /*
3242  * Free a 0-order page
3243  */
3244 void free_unref_page(struct page *page)
3245 {
3246 	unsigned long flags;
3247 	unsigned long pfn = page_to_pfn(page);
3248 
3249 	if (!free_unref_page_prepare(page, pfn))
3250 		return;
3251 
3252 	local_irq_save(flags);
3253 	free_unref_page_commit(page, pfn);
3254 	local_irq_restore(flags);
3255 }
3256 
3257 /*
3258  * Free a list of 0-order pages
3259  */
3260 void free_unref_page_list(struct list_head *list)
3261 {
3262 	struct page *page, *next;
3263 	unsigned long flags, pfn;
3264 	int batch_count = 0;
3265 
3266 	/* Prepare pages for freeing */
3267 	list_for_each_entry_safe(page, next, list, lru) {
3268 		pfn = page_to_pfn(page);
3269 		if (!free_unref_page_prepare(page, pfn))
3270 			list_del(&page->lru);
3271 		set_page_private(page, pfn);
3272 	}
3273 
3274 	local_irq_save(flags);
3275 	list_for_each_entry_safe(page, next, list, lru) {
3276 		unsigned long pfn = page_private(page);
3277 
3278 		set_page_private(page, 0);
3279 		trace_mm_page_free_batched(page);
3280 		free_unref_page_commit(page, pfn);
3281 
3282 		/*
3283 		 * Guard against excessive IRQ disabled times when we get
3284 		 * a large list of pages to free.
3285 		 */
3286 		if (++batch_count == SWAP_CLUSTER_MAX) {
3287 			local_irq_restore(flags);
3288 			batch_count = 0;
3289 			local_irq_save(flags);
3290 		}
3291 	}
3292 	local_irq_restore(flags);
3293 }
3294 
3295 /*
3296  * split_page takes a non-compound higher-order page, and splits it into
3297  * n (1<<order) sub-pages: page[0..n]
3298  * Each sub-page must be freed individually.
3299  *
3300  * Note: this is probably too low level an operation for use in drivers.
3301  * Please consult with lkml before using this in your driver.
3302  */
3303 void split_page(struct page *page, unsigned int order)
3304 {
3305 	int i;
3306 
3307 	VM_BUG_ON_PAGE(PageCompound(page), page);
3308 	VM_BUG_ON_PAGE(!page_count(page), page);
3309 
3310 	for (i = 1; i < (1 << order); i++)
3311 		set_page_refcounted(page + i);
3312 	split_page_owner(page, 1 << order);
3313 }
3314 EXPORT_SYMBOL_GPL(split_page);
3315 
3316 int __isolate_free_page(struct page *page, unsigned int order)
3317 {
3318 	unsigned long watermark;
3319 	struct zone *zone;
3320 	int mt;
3321 
3322 	BUG_ON(!PageBuddy(page));
3323 
3324 	zone = page_zone(page);
3325 	mt = get_pageblock_migratetype(page);
3326 
3327 	if (!is_migrate_isolate(mt)) {
3328 		/*
3329 		 * Obey watermarks as if the page was being allocated. We can
3330 		 * emulate a high-order watermark check with a raised order-0
3331 		 * watermark, because we already know our high-order page
3332 		 * exists.
3333 		 */
3334 		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3335 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3336 			return 0;
3337 
3338 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
3339 	}
3340 
3341 	/* Remove page from free list */
3342 
3343 	del_page_from_free_list(page, zone, order);
3344 
3345 	/*
3346 	 * Set the pageblock if the isolated page is at least half of a
3347 	 * pageblock
3348 	 */
3349 	if (order >= pageblock_order - 1) {
3350 		struct page *endpage = page + (1 << order) - 1;
3351 		for (; page < endpage; page += pageblock_nr_pages) {
3352 			int mt = get_pageblock_migratetype(page);
3353 			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3354 			    && !is_migrate_highatomic(mt))
3355 				set_pageblock_migratetype(page,
3356 							  MIGRATE_MOVABLE);
3357 		}
3358 	}
3359 
3360 
3361 	return 1UL << order;
3362 }
3363 
3364 /**
3365  * __putback_isolated_page - Return a now-isolated page back where we got it
3366  * @page: Page that was isolated
3367  * @order: Order of the isolated page
3368  * @mt: The page's pageblock's migratetype
3369  *
3370  * This function is meant to return a page pulled from the free lists via
3371  * __isolate_free_page back to the free lists they were pulled from.
3372  */
3373 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3374 {
3375 	struct zone *zone = page_zone(page);
3376 
3377 	/* zone lock should be held when this function is called */
3378 	lockdep_assert_held(&zone->lock);
3379 
3380 	/* Return isolated page to tail of freelist. */
3381 	__free_one_page(page, page_to_pfn(page), zone, order, mt,
3382 			FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3383 }
3384 
3385 /*
3386  * Update NUMA hit/miss statistics
3387  *
3388  * Must be called with interrupts disabled.
3389  */
3390 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
3391 {
3392 #ifdef CONFIG_NUMA
3393 	enum numa_stat_item local_stat = NUMA_LOCAL;
3394 
3395 	/* skip numa counters update if numa stats is disabled */
3396 	if (!static_branch_likely(&vm_numa_stat_key))
3397 		return;
3398 
3399 	if (zone_to_nid(z) != numa_node_id())
3400 		local_stat = NUMA_OTHER;
3401 
3402 	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3403 		__inc_numa_state(z, NUMA_HIT);
3404 	else {
3405 		__inc_numa_state(z, NUMA_MISS);
3406 		__inc_numa_state(preferred_zone, NUMA_FOREIGN);
3407 	}
3408 	__inc_numa_state(z, local_stat);
3409 #endif
3410 }
3411 
3412 /* Remove page from the per-cpu list, caller must protect the list */
3413 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
3414 			unsigned int alloc_flags,
3415 			struct per_cpu_pages *pcp,
3416 			struct list_head *list)
3417 {
3418 	struct page *page;
3419 
3420 	do {
3421 		if (list_empty(list)) {
3422 			pcp->count += rmqueue_bulk(zone, 0,
3423 					READ_ONCE(pcp->batch), list,
3424 					migratetype, alloc_flags);
3425 			if (unlikely(list_empty(list)))
3426 				return NULL;
3427 		}
3428 
3429 		page = list_first_entry(list, struct page, lru);
3430 		list_del(&page->lru);
3431 		pcp->count--;
3432 	} while (check_new_pcp(page));
3433 
3434 	return page;
3435 }
3436 
3437 /* Lock and remove page from the per-cpu list */
3438 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3439 			struct zone *zone, gfp_t gfp_flags,
3440 			int migratetype, unsigned int alloc_flags)
3441 {
3442 	struct per_cpu_pages *pcp;
3443 	struct list_head *list;
3444 	struct page *page;
3445 	unsigned long flags;
3446 
3447 	local_irq_save(flags);
3448 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
3449 	list = &pcp->lists[migratetype];
3450 	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
3451 	if (page) {
3452 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3453 		zone_statistics(preferred_zone, zone);
3454 	}
3455 	local_irq_restore(flags);
3456 	return page;
3457 }
3458 
3459 /*
3460  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3461  */
3462 static inline
3463 struct page *rmqueue(struct zone *preferred_zone,
3464 			struct zone *zone, unsigned int order,
3465 			gfp_t gfp_flags, unsigned int alloc_flags,
3466 			int migratetype)
3467 {
3468 	unsigned long flags;
3469 	struct page *page;
3470 
3471 	if (likely(order == 0)) {
3472 		/*
3473 		 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3474 		 * we need to skip it when CMA area isn't allowed.
3475 		 */
3476 		if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3477 				migratetype != MIGRATE_MOVABLE) {
3478 			page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
3479 					migratetype, alloc_flags);
3480 			goto out;
3481 		}
3482 	}
3483 
3484 	/*
3485 	 * We most definitely don't want callers attempting to
3486 	 * allocate greater than order-1 page units with __GFP_NOFAIL.
3487 	 */
3488 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3489 	spin_lock_irqsave(&zone->lock, flags);
3490 
3491 	do {
3492 		page = NULL;
3493 		/*
3494 		 * order-0 request can reach here when the pcplist is skipped
3495 		 * due to non-CMA allocation context. HIGHATOMIC area is
3496 		 * reserved for high-order atomic allocation, so order-0
3497 		 * request should skip it.
3498 		 */
3499 		if (order > 0 && alloc_flags & ALLOC_HARDER) {
3500 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3501 			if (page)
3502 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
3503 		}
3504 		if (!page)
3505 			page = __rmqueue(zone, order, migratetype, alloc_flags);
3506 	} while (page && check_new_pages(page, order));
3507 	spin_unlock(&zone->lock);
3508 	if (!page)
3509 		goto failed;
3510 	__mod_zone_freepage_state(zone, -(1 << order),
3511 				  get_pcppage_migratetype(page));
3512 
3513 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3514 	zone_statistics(preferred_zone, zone);
3515 	local_irq_restore(flags);
3516 
3517 out:
3518 	/* Separate test+clear to avoid unnecessary atomics */
3519 	if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3520 		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3521 		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3522 	}
3523 
3524 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3525 	return page;
3526 
3527 failed:
3528 	local_irq_restore(flags);
3529 	return NULL;
3530 }
3531 
3532 #ifdef CONFIG_FAIL_PAGE_ALLOC
3533 
3534 static struct {
3535 	struct fault_attr attr;
3536 
3537 	bool ignore_gfp_highmem;
3538 	bool ignore_gfp_reclaim;
3539 	u32 min_order;
3540 } fail_page_alloc = {
3541 	.attr = FAULT_ATTR_INITIALIZER,
3542 	.ignore_gfp_reclaim = true,
3543 	.ignore_gfp_highmem = true,
3544 	.min_order = 1,
3545 };
3546 
3547 static int __init setup_fail_page_alloc(char *str)
3548 {
3549 	return setup_fault_attr(&fail_page_alloc.attr, str);
3550 }
3551 __setup("fail_page_alloc=", setup_fail_page_alloc);
3552 
3553 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3554 {
3555 	if (order < fail_page_alloc.min_order)
3556 		return false;
3557 	if (gfp_mask & __GFP_NOFAIL)
3558 		return false;
3559 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3560 		return false;
3561 	if (fail_page_alloc.ignore_gfp_reclaim &&
3562 			(gfp_mask & __GFP_DIRECT_RECLAIM))
3563 		return false;
3564 
3565 	return should_fail(&fail_page_alloc.attr, 1 << order);
3566 }
3567 
3568 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3569 
3570 static int __init fail_page_alloc_debugfs(void)
3571 {
3572 	umode_t mode = S_IFREG | 0600;
3573 	struct dentry *dir;
3574 
3575 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3576 					&fail_page_alloc.attr);
3577 
3578 	debugfs_create_bool("ignore-gfp-wait", mode, dir,
3579 			    &fail_page_alloc.ignore_gfp_reclaim);
3580 	debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3581 			    &fail_page_alloc.ignore_gfp_highmem);
3582 	debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3583 
3584 	return 0;
3585 }
3586 
3587 late_initcall(fail_page_alloc_debugfs);
3588 
3589 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3590 
3591 #else /* CONFIG_FAIL_PAGE_ALLOC */
3592 
3593 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3594 {
3595 	return false;
3596 }
3597 
3598 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3599 
3600 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3601 {
3602 	return __should_fail_alloc_page(gfp_mask, order);
3603 }
3604 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3605 
3606 static inline long __zone_watermark_unusable_free(struct zone *z,
3607 				unsigned int order, unsigned int alloc_flags)
3608 {
3609 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3610 	long unusable_free = (1 << order) - 1;
3611 
3612 	/*
3613 	 * If the caller does not have rights to ALLOC_HARDER then subtract
3614 	 * the high-atomic reserves. This will over-estimate the size of the
3615 	 * atomic reserve but it avoids a search.
3616 	 */
3617 	if (likely(!alloc_harder))
3618 		unusable_free += z->nr_reserved_highatomic;
3619 
3620 #ifdef CONFIG_CMA
3621 	/* If allocation can't use CMA areas don't use free CMA pages */
3622 	if (!(alloc_flags & ALLOC_CMA))
3623 		unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3624 #endif
3625 
3626 	return unusable_free;
3627 }
3628 
3629 /*
3630  * Return true if free base pages are above 'mark'. For high-order checks it
3631  * will return true of the order-0 watermark is reached and there is at least
3632  * one free page of a suitable size. Checking now avoids taking the zone lock
3633  * to check in the allocation paths if no pages are free.
3634  */
3635 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3636 			 int highest_zoneidx, unsigned int alloc_flags,
3637 			 long free_pages)
3638 {
3639 	long min = mark;
3640 	int o;
3641 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3642 
3643 	/* free_pages may go negative - that's OK */
3644 	free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3645 
3646 	if (alloc_flags & ALLOC_HIGH)
3647 		min -= min / 2;
3648 
3649 	if (unlikely(alloc_harder)) {
3650 		/*
3651 		 * OOM victims can try even harder than normal ALLOC_HARDER
3652 		 * users on the grounds that it's definitely going to be in
3653 		 * the exit path shortly and free memory. Any allocation it
3654 		 * makes during the free path will be small and short-lived.
3655 		 */
3656 		if (alloc_flags & ALLOC_OOM)
3657 			min -= min / 2;
3658 		else
3659 			min -= min / 4;
3660 	}
3661 
3662 	/*
3663 	 * Check watermarks for an order-0 allocation request. If these
3664 	 * are not met, then a high-order request also cannot go ahead
3665 	 * even if a suitable page happened to be free.
3666 	 */
3667 	if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3668 		return false;
3669 
3670 	/* If this is an order-0 request then the watermark is fine */
3671 	if (!order)
3672 		return true;
3673 
3674 	/* For a high-order request, check at least one suitable page is free */
3675 	for (o = order; o < MAX_ORDER; o++) {
3676 		struct free_area *area = &z->free_area[o];
3677 		int mt;
3678 
3679 		if (!area->nr_free)
3680 			continue;
3681 
3682 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3683 			if (!free_area_empty(area, mt))
3684 				return true;
3685 		}
3686 
3687 #ifdef CONFIG_CMA
3688 		if ((alloc_flags & ALLOC_CMA) &&
3689 		    !free_area_empty(area, MIGRATE_CMA)) {
3690 			return true;
3691 		}
3692 #endif
3693 		if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
3694 			return true;
3695 	}
3696 	return false;
3697 }
3698 
3699 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3700 		      int highest_zoneidx, unsigned int alloc_flags)
3701 {
3702 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3703 					zone_page_state(z, NR_FREE_PAGES));
3704 }
3705 
3706 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3707 				unsigned long mark, int highest_zoneidx,
3708 				unsigned int alloc_flags, gfp_t gfp_mask)
3709 {
3710 	long free_pages;
3711 
3712 	free_pages = zone_page_state(z, NR_FREE_PAGES);
3713 
3714 	/*
3715 	 * Fast check for order-0 only. If this fails then the reserves
3716 	 * need to be calculated.
3717 	 */
3718 	if (!order) {
3719 		long fast_free;
3720 
3721 		fast_free = free_pages;
3722 		fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
3723 		if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
3724 			return true;
3725 	}
3726 
3727 	if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3728 					free_pages))
3729 		return true;
3730 	/*
3731 	 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
3732 	 * when checking the min watermark. The min watermark is the
3733 	 * point where boosting is ignored so that kswapd is woken up
3734 	 * when below the low watermark.
3735 	 */
3736 	if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3737 		&& ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3738 		mark = z->_watermark[WMARK_MIN];
3739 		return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3740 					alloc_flags, free_pages);
3741 	}
3742 
3743 	return false;
3744 }
3745 
3746 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3747 			unsigned long mark, int highest_zoneidx)
3748 {
3749 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
3750 
3751 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3752 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3753 
3754 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3755 								free_pages);
3756 }
3757 
3758 #ifdef CONFIG_NUMA
3759 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3760 {
3761 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3762 				node_reclaim_distance;
3763 }
3764 #else	/* CONFIG_NUMA */
3765 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3766 {
3767 	return true;
3768 }
3769 #endif	/* CONFIG_NUMA */
3770 
3771 /*
3772  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3773  * fragmentation is subtle. If the preferred zone was HIGHMEM then
3774  * premature use of a lower zone may cause lowmem pressure problems that
3775  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3776  * probably too small. It only makes sense to spread allocations to avoid
3777  * fragmentation between the Normal and DMA32 zones.
3778  */
3779 static inline unsigned int
3780 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3781 {
3782 	unsigned int alloc_flags;
3783 
3784 	/*
3785 	 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3786 	 * to save a branch.
3787 	 */
3788 	alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3789 
3790 #ifdef CONFIG_ZONE_DMA32
3791 	if (!zone)
3792 		return alloc_flags;
3793 
3794 	if (zone_idx(zone) != ZONE_NORMAL)
3795 		return alloc_flags;
3796 
3797 	/*
3798 	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3799 	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3800 	 * on UMA that if Normal is populated then so is DMA32.
3801 	 */
3802 	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3803 	if (nr_online_nodes > 1 && !populated_zone(--zone))
3804 		return alloc_flags;
3805 
3806 	alloc_flags |= ALLOC_NOFRAGMENT;
3807 #endif /* CONFIG_ZONE_DMA32 */
3808 	return alloc_flags;
3809 }
3810 
3811 static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
3812 					unsigned int alloc_flags)
3813 {
3814 #ifdef CONFIG_CMA
3815 	unsigned int pflags = current->flags;
3816 
3817 	if (!(pflags & PF_MEMALLOC_NOCMA) &&
3818 			gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3819 		alloc_flags |= ALLOC_CMA;
3820 
3821 #endif
3822 	return alloc_flags;
3823 }
3824 
3825 /*
3826  * get_page_from_freelist goes through the zonelist trying to allocate
3827  * a page.
3828  */
3829 static struct page *
3830 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3831 						const struct alloc_context *ac)
3832 {
3833 	struct zoneref *z;
3834 	struct zone *zone;
3835 	struct pglist_data *last_pgdat_dirty_limit = NULL;
3836 	bool no_fallback;
3837 
3838 retry:
3839 	/*
3840 	 * Scan zonelist, looking for a zone with enough free.
3841 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
3842 	 */
3843 	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3844 	z = ac->preferred_zoneref;
3845 	for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3846 					ac->nodemask) {
3847 		struct page *page;
3848 		unsigned long mark;
3849 
3850 		if (cpusets_enabled() &&
3851 			(alloc_flags & ALLOC_CPUSET) &&
3852 			!__cpuset_zone_allowed(zone, gfp_mask))
3853 				continue;
3854 		/*
3855 		 * When allocating a page cache page for writing, we
3856 		 * want to get it from a node that is within its dirty
3857 		 * limit, such that no single node holds more than its
3858 		 * proportional share of globally allowed dirty pages.
3859 		 * The dirty limits take into account the node's
3860 		 * lowmem reserves and high watermark so that kswapd
3861 		 * should be able to balance it without having to
3862 		 * write pages from its LRU list.
3863 		 *
3864 		 * XXX: For now, allow allocations to potentially
3865 		 * exceed the per-node dirty limit in the slowpath
3866 		 * (spread_dirty_pages unset) before going into reclaim,
3867 		 * which is important when on a NUMA setup the allowed
3868 		 * nodes are together not big enough to reach the
3869 		 * global limit.  The proper fix for these situations
3870 		 * will require awareness of nodes in the
3871 		 * dirty-throttling and the flusher threads.
3872 		 */
3873 		if (ac->spread_dirty_pages) {
3874 			if (last_pgdat_dirty_limit == zone->zone_pgdat)
3875 				continue;
3876 
3877 			if (!node_dirty_ok(zone->zone_pgdat)) {
3878 				last_pgdat_dirty_limit = zone->zone_pgdat;
3879 				continue;
3880 			}
3881 		}
3882 
3883 		if (no_fallback && nr_online_nodes > 1 &&
3884 		    zone != ac->preferred_zoneref->zone) {
3885 			int local_nid;
3886 
3887 			/*
3888 			 * If moving to a remote node, retry but allow
3889 			 * fragmenting fallbacks. Locality is more important
3890 			 * than fragmentation avoidance.
3891 			 */
3892 			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3893 			if (zone_to_nid(zone) != local_nid) {
3894 				alloc_flags &= ~ALLOC_NOFRAGMENT;
3895 				goto retry;
3896 			}
3897 		}
3898 
3899 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3900 		if (!zone_watermark_fast(zone, order, mark,
3901 				       ac->highest_zoneidx, alloc_flags,
3902 				       gfp_mask)) {
3903 			int ret;
3904 
3905 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3906 			/*
3907 			 * Watermark failed for this zone, but see if we can
3908 			 * grow this zone if it contains deferred pages.
3909 			 */
3910 			if (static_branch_unlikely(&deferred_pages)) {
3911 				if (_deferred_grow_zone(zone, order))
3912 					goto try_this_zone;
3913 			}
3914 #endif
3915 			/* Checked here to keep the fast path fast */
3916 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3917 			if (alloc_flags & ALLOC_NO_WATERMARKS)
3918 				goto try_this_zone;
3919 
3920 			if (node_reclaim_mode == 0 ||
3921 			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3922 				continue;
3923 
3924 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3925 			switch (ret) {
3926 			case NODE_RECLAIM_NOSCAN:
3927 				/* did not scan */
3928 				continue;
3929 			case NODE_RECLAIM_FULL:
3930 				/* scanned but unreclaimable */
3931 				continue;
3932 			default:
3933 				/* did we reclaim enough */
3934 				if (zone_watermark_ok(zone, order, mark,
3935 					ac->highest_zoneidx, alloc_flags))
3936 					goto try_this_zone;
3937 
3938 				continue;
3939 			}
3940 		}
3941 
3942 try_this_zone:
3943 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3944 				gfp_mask, alloc_flags, ac->migratetype);
3945 		if (page) {
3946 			prep_new_page(page, order, gfp_mask, alloc_flags);
3947 
3948 			/*
3949 			 * If this is a high-order atomic allocation then check
3950 			 * if the pageblock should be reserved for the future
3951 			 */
3952 			if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3953 				reserve_highatomic_pageblock(page, zone, order);
3954 
3955 			return page;
3956 		} else {
3957 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3958 			/* Try again if zone has deferred pages */
3959 			if (static_branch_unlikely(&deferred_pages)) {
3960 				if (_deferred_grow_zone(zone, order))
3961 					goto try_this_zone;
3962 			}
3963 #endif
3964 		}
3965 	}
3966 
3967 	/*
3968 	 * It's possible on a UMA machine to get through all zones that are
3969 	 * fragmented. If avoiding fragmentation, reset and try again.
3970 	 */
3971 	if (no_fallback) {
3972 		alloc_flags &= ~ALLOC_NOFRAGMENT;
3973 		goto retry;
3974 	}
3975 
3976 	return NULL;
3977 }
3978 
3979 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3980 {
3981 	unsigned int filter = SHOW_MEM_FILTER_NODES;
3982 
3983 	/*
3984 	 * This documents exceptions given to allocations in certain
3985 	 * contexts that are allowed to allocate outside current's set
3986 	 * of allowed nodes.
3987 	 */
3988 	if (!(gfp_mask & __GFP_NOMEMALLOC))
3989 		if (tsk_is_oom_victim(current) ||
3990 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
3991 			filter &= ~SHOW_MEM_FILTER_NODES;
3992 	if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3993 		filter &= ~SHOW_MEM_FILTER_NODES;
3994 
3995 	show_mem(filter, nodemask);
3996 }
3997 
3998 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3999 {
4000 	struct va_format vaf;
4001 	va_list args;
4002 	static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4003 
4004 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
4005 		return;
4006 
4007 	va_start(args, fmt);
4008 	vaf.fmt = fmt;
4009 	vaf.va = &args;
4010 	pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4011 			current->comm, &vaf, gfp_mask, &gfp_mask,
4012 			nodemask_pr_args(nodemask));
4013 	va_end(args);
4014 
4015 	cpuset_print_current_mems_allowed();
4016 	pr_cont("\n");
4017 	dump_stack();
4018 	warn_alloc_show_mem(gfp_mask, nodemask);
4019 }
4020 
4021 static inline struct page *
4022 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4023 			      unsigned int alloc_flags,
4024 			      const struct alloc_context *ac)
4025 {
4026 	struct page *page;
4027 
4028 	page = get_page_from_freelist(gfp_mask, order,
4029 			alloc_flags|ALLOC_CPUSET, ac);
4030 	/*
4031 	 * fallback to ignore cpuset restriction if our nodes
4032 	 * are depleted
4033 	 */
4034 	if (!page)
4035 		page = get_page_from_freelist(gfp_mask, order,
4036 				alloc_flags, ac);
4037 
4038 	return page;
4039 }
4040 
4041 static inline struct page *
4042 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4043 	const struct alloc_context *ac, unsigned long *did_some_progress)
4044 {
4045 	struct oom_control oc = {
4046 		.zonelist = ac->zonelist,
4047 		.nodemask = ac->nodemask,
4048 		.memcg = NULL,
4049 		.gfp_mask = gfp_mask,
4050 		.order = order,
4051 	};
4052 	struct page *page;
4053 
4054 	*did_some_progress = 0;
4055 
4056 	/*
4057 	 * Acquire the oom lock.  If that fails, somebody else is
4058 	 * making progress for us.
4059 	 */
4060 	if (!mutex_trylock(&oom_lock)) {
4061 		*did_some_progress = 1;
4062 		schedule_timeout_uninterruptible(1);
4063 		return NULL;
4064 	}
4065 
4066 	/*
4067 	 * Go through the zonelist yet one more time, keep very high watermark
4068 	 * here, this is only to catch a parallel oom killing, we must fail if
4069 	 * we're still under heavy pressure. But make sure that this reclaim
4070 	 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4071 	 * allocation which will never fail due to oom_lock already held.
4072 	 */
4073 	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4074 				      ~__GFP_DIRECT_RECLAIM, order,
4075 				      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4076 	if (page)
4077 		goto out;
4078 
4079 	/* Coredumps can quickly deplete all memory reserves */
4080 	if (current->flags & PF_DUMPCORE)
4081 		goto out;
4082 	/* The OOM killer will not help higher order allocs */
4083 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4084 		goto out;
4085 	/*
4086 	 * We have already exhausted all our reclaim opportunities without any
4087 	 * success so it is time to admit defeat. We will skip the OOM killer
4088 	 * because it is very likely that the caller has a more reasonable
4089 	 * fallback than shooting a random task.
4090 	 *
4091 	 * The OOM killer may not free memory on a specific node.
4092 	 */
4093 	if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4094 		goto out;
4095 	/* The OOM killer does not needlessly kill tasks for lowmem */
4096 	if (ac->highest_zoneidx < ZONE_NORMAL)
4097 		goto out;
4098 	if (pm_suspended_storage())
4099 		goto out;
4100 	/*
4101 	 * XXX: GFP_NOFS allocations should rather fail than rely on
4102 	 * other request to make a forward progress.
4103 	 * We are in an unfortunate situation where out_of_memory cannot
4104 	 * do much for this context but let's try it to at least get
4105 	 * access to memory reserved if the current task is killed (see
4106 	 * out_of_memory). Once filesystems are ready to handle allocation
4107 	 * failures more gracefully we should just bail out here.
4108 	 */
4109 
4110 	/* Exhausted what can be done so it's blame time */
4111 	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4112 		*did_some_progress = 1;
4113 
4114 		/*
4115 		 * Help non-failing allocations by giving them access to memory
4116 		 * reserves
4117 		 */
4118 		if (gfp_mask & __GFP_NOFAIL)
4119 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4120 					ALLOC_NO_WATERMARKS, ac);
4121 	}
4122 out:
4123 	mutex_unlock(&oom_lock);
4124 	return page;
4125 }
4126 
4127 /*
4128  * Maximum number of compaction retries wit a progress before OOM
4129  * killer is consider as the only way to move forward.
4130  */
4131 #define MAX_COMPACT_RETRIES 16
4132 
4133 #ifdef CONFIG_COMPACTION
4134 /* Try memory compaction for high-order allocations before reclaim */
4135 static struct page *
4136 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4137 		unsigned int alloc_flags, const struct alloc_context *ac,
4138 		enum compact_priority prio, enum compact_result *compact_result)
4139 {
4140 	struct page *page = NULL;
4141 	unsigned long pflags;
4142 	unsigned int noreclaim_flag;
4143 
4144 	if (!order)
4145 		return NULL;
4146 
4147 	psi_memstall_enter(&pflags);
4148 	noreclaim_flag = memalloc_noreclaim_save();
4149 
4150 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4151 								prio, &page);
4152 
4153 	memalloc_noreclaim_restore(noreclaim_flag);
4154 	psi_memstall_leave(&pflags);
4155 
4156 	/*
4157 	 * At least in one zone compaction wasn't deferred or skipped, so let's
4158 	 * count a compaction stall
4159 	 */
4160 	count_vm_event(COMPACTSTALL);
4161 
4162 	/* Prep a captured page if available */
4163 	if (page)
4164 		prep_new_page(page, order, gfp_mask, alloc_flags);
4165 
4166 	/* Try get a page from the freelist if available */
4167 	if (!page)
4168 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4169 
4170 	if (page) {
4171 		struct zone *zone = page_zone(page);
4172 
4173 		zone->compact_blockskip_flush = false;
4174 		compaction_defer_reset(zone, order, true);
4175 		count_vm_event(COMPACTSUCCESS);
4176 		return page;
4177 	}
4178 
4179 	/*
4180 	 * It's bad if compaction run occurs and fails. The most likely reason
4181 	 * is that pages exist, but not enough to satisfy watermarks.
4182 	 */
4183 	count_vm_event(COMPACTFAIL);
4184 
4185 	cond_resched();
4186 
4187 	return NULL;
4188 }
4189 
4190 static inline bool
4191 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4192 		     enum compact_result compact_result,
4193 		     enum compact_priority *compact_priority,
4194 		     int *compaction_retries)
4195 {
4196 	int max_retries = MAX_COMPACT_RETRIES;
4197 	int min_priority;
4198 	bool ret = false;
4199 	int retries = *compaction_retries;
4200 	enum compact_priority priority = *compact_priority;
4201 
4202 	if (!order)
4203 		return false;
4204 
4205 	if (compaction_made_progress(compact_result))
4206 		(*compaction_retries)++;
4207 
4208 	/*
4209 	 * compaction considers all the zone as desperately out of memory
4210 	 * so it doesn't really make much sense to retry except when the
4211 	 * failure could be caused by insufficient priority
4212 	 */
4213 	if (compaction_failed(compact_result))
4214 		goto check_priority;
4215 
4216 	/*
4217 	 * compaction was skipped because there are not enough order-0 pages
4218 	 * to work with, so we retry only if it looks like reclaim can help.
4219 	 */
4220 	if (compaction_needs_reclaim(compact_result)) {
4221 		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4222 		goto out;
4223 	}
4224 
4225 	/*
4226 	 * make sure the compaction wasn't deferred or didn't bail out early
4227 	 * due to locks contention before we declare that we should give up.
4228 	 * But the next retry should use a higher priority if allowed, so
4229 	 * we don't just keep bailing out endlessly.
4230 	 */
4231 	if (compaction_withdrawn(compact_result)) {
4232 		goto check_priority;
4233 	}
4234 
4235 	/*
4236 	 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4237 	 * costly ones because they are de facto nofail and invoke OOM
4238 	 * killer to move on while costly can fail and users are ready
4239 	 * to cope with that. 1/4 retries is rather arbitrary but we
4240 	 * would need much more detailed feedback from compaction to
4241 	 * make a better decision.
4242 	 */
4243 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4244 		max_retries /= 4;
4245 	if (*compaction_retries <= max_retries) {
4246 		ret = true;
4247 		goto out;
4248 	}
4249 
4250 	/*
4251 	 * Make sure there are attempts at the highest priority if we exhausted
4252 	 * all retries or failed at the lower priorities.
4253 	 */
4254 check_priority:
4255 	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4256 			MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4257 
4258 	if (*compact_priority > min_priority) {
4259 		(*compact_priority)--;
4260 		*compaction_retries = 0;
4261 		ret = true;
4262 	}
4263 out:
4264 	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4265 	return ret;
4266 }
4267 #else
4268 static inline struct page *
4269 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4270 		unsigned int alloc_flags, const struct alloc_context *ac,
4271 		enum compact_priority prio, enum compact_result *compact_result)
4272 {
4273 	*compact_result = COMPACT_SKIPPED;
4274 	return NULL;
4275 }
4276 
4277 static inline bool
4278 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4279 		     enum compact_result compact_result,
4280 		     enum compact_priority *compact_priority,
4281 		     int *compaction_retries)
4282 {
4283 	struct zone *zone;
4284 	struct zoneref *z;
4285 
4286 	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4287 		return false;
4288 
4289 	/*
4290 	 * There are setups with compaction disabled which would prefer to loop
4291 	 * inside the allocator rather than hit the oom killer prematurely.
4292 	 * Let's give them a good hope and keep retrying while the order-0
4293 	 * watermarks are OK.
4294 	 */
4295 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4296 				ac->highest_zoneidx, ac->nodemask) {
4297 		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4298 					ac->highest_zoneidx, alloc_flags))
4299 			return true;
4300 	}
4301 	return false;
4302 }
4303 #endif /* CONFIG_COMPACTION */
4304 
4305 #ifdef CONFIG_LOCKDEP
4306 static struct lockdep_map __fs_reclaim_map =
4307 	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4308 
4309 static bool __need_reclaim(gfp_t gfp_mask)
4310 {
4311 	/* no reclaim without waiting on it */
4312 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4313 		return false;
4314 
4315 	/* this guy won't enter reclaim */
4316 	if (current->flags & PF_MEMALLOC)
4317 		return false;
4318 
4319 	if (gfp_mask & __GFP_NOLOCKDEP)
4320 		return false;
4321 
4322 	return true;
4323 }
4324 
4325 void __fs_reclaim_acquire(void)
4326 {
4327 	lock_map_acquire(&__fs_reclaim_map);
4328 }
4329 
4330 void __fs_reclaim_release(void)
4331 {
4332 	lock_map_release(&__fs_reclaim_map);
4333 }
4334 
4335 void fs_reclaim_acquire(gfp_t gfp_mask)
4336 {
4337 	gfp_mask = current_gfp_context(gfp_mask);
4338 
4339 	if (__need_reclaim(gfp_mask)) {
4340 		if (gfp_mask & __GFP_FS)
4341 			__fs_reclaim_acquire();
4342 
4343 #ifdef CONFIG_MMU_NOTIFIER
4344 		lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4345 		lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4346 #endif
4347 
4348 	}
4349 }
4350 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4351 
4352 void fs_reclaim_release(gfp_t gfp_mask)
4353 {
4354 	gfp_mask = current_gfp_context(gfp_mask);
4355 
4356 	if (__need_reclaim(gfp_mask)) {
4357 		if (gfp_mask & __GFP_FS)
4358 			__fs_reclaim_release();
4359 	}
4360 }
4361 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4362 #endif
4363 
4364 /* Perform direct synchronous page reclaim */
4365 static unsigned long
4366 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4367 					const struct alloc_context *ac)
4368 {
4369 	unsigned int noreclaim_flag;
4370 	unsigned long pflags, progress;
4371 
4372 	cond_resched();
4373 
4374 	/* We now go into synchronous reclaim */
4375 	cpuset_memory_pressure_bump();
4376 	psi_memstall_enter(&pflags);
4377 	fs_reclaim_acquire(gfp_mask);
4378 	noreclaim_flag = memalloc_noreclaim_save();
4379 
4380 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4381 								ac->nodemask);
4382 
4383 	memalloc_noreclaim_restore(noreclaim_flag);
4384 	fs_reclaim_release(gfp_mask);
4385 	psi_memstall_leave(&pflags);
4386 
4387 	cond_resched();
4388 
4389 	return progress;
4390 }
4391 
4392 /* The really slow allocator path where we enter direct reclaim */
4393 static inline struct page *
4394 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4395 		unsigned int alloc_flags, const struct alloc_context *ac,
4396 		unsigned long *did_some_progress)
4397 {
4398 	struct page *page = NULL;
4399 	bool drained = false;
4400 
4401 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4402 	if (unlikely(!(*did_some_progress)))
4403 		return NULL;
4404 
4405 retry:
4406 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4407 
4408 	/*
4409 	 * If an allocation failed after direct reclaim, it could be because
4410 	 * pages are pinned on the per-cpu lists or in high alloc reserves.
4411 	 * Shrink them and try again
4412 	 */
4413 	if (!page && !drained) {
4414 		unreserve_highatomic_pageblock(ac, false);
4415 		drain_all_pages(NULL);
4416 		drained = true;
4417 		goto retry;
4418 	}
4419 
4420 	return page;
4421 }
4422 
4423 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4424 			     const struct alloc_context *ac)
4425 {
4426 	struct zoneref *z;
4427 	struct zone *zone;
4428 	pg_data_t *last_pgdat = NULL;
4429 	enum zone_type highest_zoneidx = ac->highest_zoneidx;
4430 
4431 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4432 					ac->nodemask) {
4433 		if (last_pgdat != zone->zone_pgdat)
4434 			wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4435 		last_pgdat = zone->zone_pgdat;
4436 	}
4437 }
4438 
4439 static inline unsigned int
4440 gfp_to_alloc_flags(gfp_t gfp_mask)
4441 {
4442 	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4443 
4444 	/*
4445 	 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4446 	 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4447 	 * to save two branches.
4448 	 */
4449 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4450 	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4451 
4452 	/*
4453 	 * The caller may dip into page reserves a bit more if the caller
4454 	 * cannot run direct reclaim, or if the caller has realtime scheduling
4455 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4456 	 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4457 	 */
4458 	alloc_flags |= (__force int)
4459 		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4460 
4461 	if (gfp_mask & __GFP_ATOMIC) {
4462 		/*
4463 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4464 		 * if it can't schedule.
4465 		 */
4466 		if (!(gfp_mask & __GFP_NOMEMALLOC))
4467 			alloc_flags |= ALLOC_HARDER;
4468 		/*
4469 		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4470 		 * comment for __cpuset_node_allowed().
4471 		 */
4472 		alloc_flags &= ~ALLOC_CPUSET;
4473 	} else if (unlikely(rt_task(current)) && !in_interrupt())
4474 		alloc_flags |= ALLOC_HARDER;
4475 
4476 	alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
4477 
4478 	return alloc_flags;
4479 }
4480 
4481 static bool oom_reserves_allowed(struct task_struct *tsk)
4482 {
4483 	if (!tsk_is_oom_victim(tsk))
4484 		return false;
4485 
4486 	/*
4487 	 * !MMU doesn't have oom reaper so give access to memory reserves
4488 	 * only to the thread with TIF_MEMDIE set
4489 	 */
4490 	if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4491 		return false;
4492 
4493 	return true;
4494 }
4495 
4496 /*
4497  * Distinguish requests which really need access to full memory
4498  * reserves from oom victims which can live with a portion of it
4499  */
4500 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4501 {
4502 	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4503 		return 0;
4504 	if (gfp_mask & __GFP_MEMALLOC)
4505 		return ALLOC_NO_WATERMARKS;
4506 	if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4507 		return ALLOC_NO_WATERMARKS;
4508 	if (!in_interrupt()) {
4509 		if (current->flags & PF_MEMALLOC)
4510 			return ALLOC_NO_WATERMARKS;
4511 		else if (oom_reserves_allowed(current))
4512 			return ALLOC_OOM;
4513 	}
4514 
4515 	return 0;
4516 }
4517 
4518 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4519 {
4520 	return !!__gfp_pfmemalloc_flags(gfp_mask);
4521 }
4522 
4523 /*
4524  * Checks whether it makes sense to retry the reclaim to make a forward progress
4525  * for the given allocation request.
4526  *
4527  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4528  * without success, or when we couldn't even meet the watermark if we
4529  * reclaimed all remaining pages on the LRU lists.
4530  *
4531  * Returns true if a retry is viable or false to enter the oom path.
4532  */
4533 static inline bool
4534 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4535 		     struct alloc_context *ac, int alloc_flags,
4536 		     bool did_some_progress, int *no_progress_loops)
4537 {
4538 	struct zone *zone;
4539 	struct zoneref *z;
4540 	bool ret = false;
4541 
4542 	/*
4543 	 * Costly allocations might have made a progress but this doesn't mean
4544 	 * their order will become available due to high fragmentation so
4545 	 * always increment the no progress counter for them
4546 	 */
4547 	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4548 		*no_progress_loops = 0;
4549 	else
4550 		(*no_progress_loops)++;
4551 
4552 	/*
4553 	 * Make sure we converge to OOM if we cannot make any progress
4554 	 * several times in the row.
4555 	 */
4556 	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4557 		/* Before OOM, exhaust highatomic_reserve */
4558 		return unreserve_highatomic_pageblock(ac, true);
4559 	}
4560 
4561 	/*
4562 	 * Keep reclaiming pages while there is a chance this will lead
4563 	 * somewhere.  If none of the target zones can satisfy our allocation
4564 	 * request even if all reclaimable pages are considered then we are
4565 	 * screwed and have to go OOM.
4566 	 */
4567 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4568 				ac->highest_zoneidx, ac->nodemask) {
4569 		unsigned long available;
4570 		unsigned long reclaimable;
4571 		unsigned long min_wmark = min_wmark_pages(zone);
4572 		bool wmark;
4573 
4574 		available = reclaimable = zone_reclaimable_pages(zone);
4575 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4576 
4577 		/*
4578 		 * Would the allocation succeed if we reclaimed all
4579 		 * reclaimable pages?
4580 		 */
4581 		wmark = __zone_watermark_ok(zone, order, min_wmark,
4582 				ac->highest_zoneidx, alloc_flags, available);
4583 		trace_reclaim_retry_zone(z, order, reclaimable,
4584 				available, min_wmark, *no_progress_loops, wmark);
4585 		if (wmark) {
4586 			/*
4587 			 * If we didn't make any progress and have a lot of
4588 			 * dirty + writeback pages then we should wait for
4589 			 * an IO to complete to slow down the reclaim and
4590 			 * prevent from pre mature OOM
4591 			 */
4592 			if (!did_some_progress) {
4593 				unsigned long write_pending;
4594 
4595 				write_pending = zone_page_state_snapshot(zone,
4596 							NR_ZONE_WRITE_PENDING);
4597 
4598 				if (2 * write_pending > reclaimable) {
4599 					congestion_wait(BLK_RW_ASYNC, HZ/10);
4600 					return true;
4601 				}
4602 			}
4603 
4604 			ret = true;
4605 			goto out;
4606 		}
4607 	}
4608 
4609 out:
4610 	/*
4611 	 * Memory allocation/reclaim might be called from a WQ context and the
4612 	 * current implementation of the WQ concurrency control doesn't
4613 	 * recognize that a particular WQ is congested if the worker thread is
4614 	 * looping without ever sleeping. Therefore we have to do a short sleep
4615 	 * here rather than calling cond_resched().
4616 	 */
4617 	if (current->flags & PF_WQ_WORKER)
4618 		schedule_timeout_uninterruptible(1);
4619 	else
4620 		cond_resched();
4621 	return ret;
4622 }
4623 
4624 static inline bool
4625 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4626 {
4627 	/*
4628 	 * It's possible that cpuset's mems_allowed and the nodemask from
4629 	 * mempolicy don't intersect. This should be normally dealt with by
4630 	 * policy_nodemask(), but it's possible to race with cpuset update in
4631 	 * such a way the check therein was true, and then it became false
4632 	 * before we got our cpuset_mems_cookie here.
4633 	 * This assumes that for all allocations, ac->nodemask can come only
4634 	 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4635 	 * when it does not intersect with the cpuset restrictions) or the
4636 	 * caller can deal with a violated nodemask.
4637 	 */
4638 	if (cpusets_enabled() && ac->nodemask &&
4639 			!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4640 		ac->nodemask = NULL;
4641 		return true;
4642 	}
4643 
4644 	/*
4645 	 * When updating a task's mems_allowed or mempolicy nodemask, it is
4646 	 * possible to race with parallel threads in such a way that our
4647 	 * allocation can fail while the mask is being updated. If we are about
4648 	 * to fail, check if the cpuset changed during allocation and if so,
4649 	 * retry.
4650 	 */
4651 	if (read_mems_allowed_retry(cpuset_mems_cookie))
4652 		return true;
4653 
4654 	return false;
4655 }
4656 
4657 static inline struct page *
4658 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4659 						struct alloc_context *ac)
4660 {
4661 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4662 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4663 	struct page *page = NULL;
4664 	unsigned int alloc_flags;
4665 	unsigned long did_some_progress;
4666 	enum compact_priority compact_priority;
4667 	enum compact_result compact_result;
4668 	int compaction_retries;
4669 	int no_progress_loops;
4670 	unsigned int cpuset_mems_cookie;
4671 	int reserve_flags;
4672 
4673 	/*
4674 	 * We also sanity check to catch abuse of atomic reserves being used by
4675 	 * callers that are not in atomic context.
4676 	 */
4677 	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4678 				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4679 		gfp_mask &= ~__GFP_ATOMIC;
4680 
4681 retry_cpuset:
4682 	compaction_retries = 0;
4683 	no_progress_loops = 0;
4684 	compact_priority = DEF_COMPACT_PRIORITY;
4685 	cpuset_mems_cookie = read_mems_allowed_begin();
4686 
4687 	/*
4688 	 * The fast path uses conservative alloc_flags to succeed only until
4689 	 * kswapd needs to be woken up, and to avoid the cost of setting up
4690 	 * alloc_flags precisely. So we do that now.
4691 	 */
4692 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
4693 
4694 	/*
4695 	 * We need to recalculate the starting point for the zonelist iterator
4696 	 * because we might have used different nodemask in the fast path, or
4697 	 * there was a cpuset modification and we are retrying - otherwise we
4698 	 * could end up iterating over non-eligible zones endlessly.
4699 	 */
4700 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4701 					ac->highest_zoneidx, ac->nodemask);
4702 	if (!ac->preferred_zoneref->zone)
4703 		goto nopage;
4704 
4705 	if (alloc_flags & ALLOC_KSWAPD)
4706 		wake_all_kswapds(order, gfp_mask, ac);
4707 
4708 	/*
4709 	 * The adjusted alloc_flags might result in immediate success, so try
4710 	 * that first
4711 	 */
4712 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4713 	if (page)
4714 		goto got_pg;
4715 
4716 	/*
4717 	 * For costly allocations, try direct compaction first, as it's likely
4718 	 * that we have enough base pages and don't need to reclaim. For non-
4719 	 * movable high-order allocations, do that as well, as compaction will
4720 	 * try prevent permanent fragmentation by migrating from blocks of the
4721 	 * same migratetype.
4722 	 * Don't try this for allocations that are allowed to ignore
4723 	 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4724 	 */
4725 	if (can_direct_reclaim &&
4726 			(costly_order ||
4727 			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4728 			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
4729 		page = __alloc_pages_direct_compact(gfp_mask, order,
4730 						alloc_flags, ac,
4731 						INIT_COMPACT_PRIORITY,
4732 						&compact_result);
4733 		if (page)
4734 			goto got_pg;
4735 
4736 		/*
4737 		 * Checks for costly allocations with __GFP_NORETRY, which
4738 		 * includes some THP page fault allocations
4739 		 */
4740 		if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4741 			/*
4742 			 * If allocating entire pageblock(s) and compaction
4743 			 * failed because all zones are below low watermarks
4744 			 * or is prohibited because it recently failed at this
4745 			 * order, fail immediately unless the allocator has
4746 			 * requested compaction and reclaim retry.
4747 			 *
4748 			 * Reclaim is
4749 			 *  - potentially very expensive because zones are far
4750 			 *    below their low watermarks or this is part of very
4751 			 *    bursty high order allocations,
4752 			 *  - not guaranteed to help because isolate_freepages()
4753 			 *    may not iterate over freed pages as part of its
4754 			 *    linear scan, and
4755 			 *  - unlikely to make entire pageblocks free on its
4756 			 *    own.
4757 			 */
4758 			if (compact_result == COMPACT_SKIPPED ||
4759 			    compact_result == COMPACT_DEFERRED)
4760 				goto nopage;
4761 
4762 			/*
4763 			 * Looks like reclaim/compaction is worth trying, but
4764 			 * sync compaction could be very expensive, so keep
4765 			 * using async compaction.
4766 			 */
4767 			compact_priority = INIT_COMPACT_PRIORITY;
4768 		}
4769 	}
4770 
4771 retry:
4772 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4773 	if (alloc_flags & ALLOC_KSWAPD)
4774 		wake_all_kswapds(order, gfp_mask, ac);
4775 
4776 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4777 	if (reserve_flags)
4778 		alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);
4779 
4780 	/*
4781 	 * Reset the nodemask and zonelist iterators if memory policies can be
4782 	 * ignored. These allocations are high priority and system rather than
4783 	 * user oriented.
4784 	 */
4785 	if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4786 		ac->nodemask = NULL;
4787 		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4788 					ac->highest_zoneidx, ac->nodemask);
4789 	}
4790 
4791 	/* Attempt with potentially adjusted zonelist and alloc_flags */
4792 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4793 	if (page)
4794 		goto got_pg;
4795 
4796 	/* Caller is not willing to reclaim, we can't balance anything */
4797 	if (!can_direct_reclaim)
4798 		goto nopage;
4799 
4800 	/* Avoid recursion of direct reclaim */
4801 	if (current->flags & PF_MEMALLOC)
4802 		goto nopage;
4803 
4804 	/* Try direct reclaim and then allocating */
4805 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4806 							&did_some_progress);
4807 	if (page)
4808 		goto got_pg;
4809 
4810 	/* Try direct compaction and then allocating */
4811 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4812 					compact_priority, &compact_result);
4813 	if (page)
4814 		goto got_pg;
4815 
4816 	/* Do not loop if specifically requested */
4817 	if (gfp_mask & __GFP_NORETRY)
4818 		goto nopage;
4819 
4820 	/*
4821 	 * Do not retry costly high order allocations unless they are
4822 	 * __GFP_RETRY_MAYFAIL
4823 	 */
4824 	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4825 		goto nopage;
4826 
4827 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4828 				 did_some_progress > 0, &no_progress_loops))
4829 		goto retry;
4830 
4831 	/*
4832 	 * It doesn't make any sense to retry for the compaction if the order-0
4833 	 * reclaim is not able to make any progress because the current
4834 	 * implementation of the compaction depends on the sufficient amount
4835 	 * of free memory (see __compaction_suitable)
4836 	 */
4837 	if (did_some_progress > 0 &&
4838 			should_compact_retry(ac, order, alloc_flags,
4839 				compact_result, &compact_priority,
4840 				&compaction_retries))
4841 		goto retry;
4842 
4843 
4844 	/* Deal with possible cpuset update races before we start OOM killing */
4845 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
4846 		goto retry_cpuset;
4847 
4848 	/* Reclaim has failed us, start killing things */
4849 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4850 	if (page)
4851 		goto got_pg;
4852 
4853 	/* Avoid allocations with no watermarks from looping endlessly */
4854 	if (tsk_is_oom_victim(current) &&
4855 	    (alloc_flags & ALLOC_OOM ||
4856 	     (gfp_mask & __GFP_NOMEMALLOC)))
4857 		goto nopage;
4858 
4859 	/* Retry as long as the OOM killer is making progress */
4860 	if (did_some_progress) {
4861 		no_progress_loops = 0;
4862 		goto retry;
4863 	}
4864 
4865 nopage:
4866 	/* Deal with possible cpuset update races before we fail */
4867 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
4868 		goto retry_cpuset;
4869 
4870 	/*
4871 	 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4872 	 * we always retry
4873 	 */
4874 	if (gfp_mask & __GFP_NOFAIL) {
4875 		/*
4876 		 * All existing users of the __GFP_NOFAIL are blockable, so warn
4877 		 * of any new users that actually require GFP_NOWAIT
4878 		 */
4879 		if (WARN_ON_ONCE(!can_direct_reclaim))
4880 			goto fail;
4881 
4882 		/*
4883 		 * PF_MEMALLOC request from this context is rather bizarre
4884 		 * because we cannot reclaim anything and only can loop waiting
4885 		 * for somebody to do a work for us
4886 		 */
4887 		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4888 
4889 		/*
4890 		 * non failing costly orders are a hard requirement which we
4891 		 * are not prepared for much so let's warn about these users
4892 		 * so that we can identify them and convert them to something
4893 		 * else.
4894 		 */
4895 		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4896 
4897 		/*
4898 		 * Help non-failing allocations by giving them access to memory
4899 		 * reserves but do not use ALLOC_NO_WATERMARKS because this
4900 		 * could deplete whole memory reserves which would just make
4901 		 * the situation worse
4902 		 */
4903 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4904 		if (page)
4905 			goto got_pg;
4906 
4907 		cond_resched();
4908 		goto retry;
4909 	}
4910 fail:
4911 	warn_alloc(gfp_mask, ac->nodemask,
4912 			"page allocation failure: order:%u", order);
4913 got_pg:
4914 	return page;
4915 }
4916 
4917 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4918 		int preferred_nid, nodemask_t *nodemask,
4919 		struct alloc_context *ac, gfp_t *alloc_mask,
4920 		unsigned int *alloc_flags)
4921 {
4922 	ac->highest_zoneidx = gfp_zone(gfp_mask);
4923 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4924 	ac->nodemask = nodemask;
4925 	ac->migratetype = gfp_migratetype(gfp_mask);
4926 
4927 	if (cpusets_enabled()) {
4928 		*alloc_mask |= __GFP_HARDWALL;
4929 		/*
4930 		 * When we are in the interrupt context, it is irrelevant
4931 		 * to the current task context. It means that any node ok.
4932 		 */
4933 		if (!in_interrupt() && !ac->nodemask)
4934 			ac->nodemask = &cpuset_current_mems_allowed;
4935 		else
4936 			*alloc_flags |= ALLOC_CPUSET;
4937 	}
4938 
4939 	fs_reclaim_acquire(gfp_mask);
4940 	fs_reclaim_release(gfp_mask);
4941 
4942 	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4943 
4944 	if (should_fail_alloc_page(gfp_mask, order))
4945 		return false;
4946 
4947 	*alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
4948 
4949 	/* Dirty zone balancing only done in the fast path */
4950 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4951 
4952 	/*
4953 	 * The preferred zone is used for statistics but crucially it is
4954 	 * also used as the starting point for the zonelist iterator. It
4955 	 * may get reset for allocations that ignore memory policies.
4956 	 */
4957 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4958 					ac->highest_zoneidx, ac->nodemask);
4959 
4960 	return true;
4961 }
4962 
4963 /*
4964  * This is the 'heart' of the zoned buddy allocator.
4965  */
4966 struct page *
4967 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4968 							nodemask_t *nodemask)
4969 {
4970 	struct page *page;
4971 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
4972 	gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
4973 	struct alloc_context ac = { };
4974 
4975 	/*
4976 	 * There are several places where we assume that the order value is sane
4977 	 * so bail out early if the request is out of bound.
4978 	 */
4979 	if (unlikely(order >= MAX_ORDER)) {
4980 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4981 		return NULL;
4982 	}
4983 
4984 	gfp_mask &= gfp_allowed_mask;
4985 	alloc_mask = gfp_mask;
4986 	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4987 		return NULL;
4988 
4989 	/*
4990 	 * Forbid the first pass from falling back to types that fragment
4991 	 * memory until all local zones are considered.
4992 	 */
4993 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
4994 
4995 	/* First allocation attempt */
4996 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4997 	if (likely(page))
4998 		goto out;
4999 
5000 	/*
5001 	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5002 	 * resp. GFP_NOIO which has to be inherited for all allocation requests
5003 	 * from a particular context which has been marked by
5004 	 * memalloc_no{fs,io}_{save,restore}.
5005 	 */
5006 	alloc_mask = current_gfp_context(gfp_mask);
5007 	ac.spread_dirty_pages = false;
5008 
5009 	/*
5010 	 * Restore the original nodemask if it was potentially replaced with
5011 	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5012 	 */
5013 	ac.nodemask = nodemask;
5014 
5015 	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
5016 
5017 out:
5018 	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
5019 	    unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
5020 		__free_pages(page, order);
5021 		page = NULL;
5022 	}
5023 
5024 	trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
5025 
5026 	return page;
5027 }
5028 EXPORT_SYMBOL(__alloc_pages_nodemask);
5029 
5030 /*
5031  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5032  * address cannot represent highmem pages. Use alloc_pages and then kmap if
5033  * you need to access high mem.
5034  */
5035 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5036 {
5037 	struct page *page;
5038 
5039 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5040 	if (!page)
5041 		return 0;
5042 	return (unsigned long) page_address(page);
5043 }
5044 EXPORT_SYMBOL(__get_free_pages);
5045 
5046 unsigned long get_zeroed_page(gfp_t gfp_mask)
5047 {
5048 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5049 }
5050 EXPORT_SYMBOL(get_zeroed_page);
5051 
5052 static inline void free_the_page(struct page *page, unsigned int order)
5053 {
5054 	if (order == 0)		/* Via pcp? */
5055 		free_unref_page(page);
5056 	else
5057 		__free_pages_ok(page, order, FPI_NONE);
5058 }
5059 
5060 /**
5061  * __free_pages - Free pages allocated with alloc_pages().
5062  * @page: The page pointer returned from alloc_pages().
5063  * @order: The order of the allocation.
5064  *
5065  * This function can free multi-page allocations that are not compound
5066  * pages.  It does not check that the @order passed in matches that of
5067  * the allocation, so it is easy to leak memory.  Freeing more memory
5068  * than was allocated will probably emit a warning.
5069  *
5070  * If the last reference to this page is speculative, it will be released
5071  * by put_page() which only frees the first page of a non-compound
5072  * allocation.  To prevent the remaining pages from being leaked, we free
5073  * the subsequent pages here.  If you want to use the page's reference
5074  * count to decide when to free the allocation, you should allocate a
5075  * compound page, and use put_page() instead of __free_pages().
5076  *
5077  * Context: May be called in interrupt context or while holding a normal
5078  * spinlock, but not in NMI context or while holding a raw spinlock.
5079  */
5080 void __free_pages(struct page *page, unsigned int order)
5081 {
5082 	if (put_page_testzero(page))
5083 		free_the_page(page, order);
5084 	else if (!PageHead(page))
5085 		while (order-- > 0)
5086 			free_the_page(page + (1 << order), order);
5087 }
5088 EXPORT_SYMBOL(__free_pages);
5089 
5090 void free_pages(unsigned long addr, unsigned int order)
5091 {
5092 	if (addr != 0) {
5093 		VM_BUG_ON(!virt_addr_valid((void *)addr));
5094 		__free_pages(virt_to_page((void *)addr), order);
5095 	}
5096 }
5097 
5098 EXPORT_SYMBOL(free_pages);
5099 
5100 /*
5101  * Page Fragment:
5102  *  An arbitrary-length arbitrary-offset area of memory which resides
5103  *  within a 0 or higher order page.  Multiple fragments within that page
5104  *  are individually refcounted, in the page's reference counter.
5105  *
5106  * The page_frag functions below provide a simple allocation framework for
5107  * page fragments.  This is used by the network stack and network device
5108  * drivers to provide a backing region of memory for use as either an
5109  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5110  */
5111 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5112 					     gfp_t gfp_mask)
5113 {
5114 	struct page *page = NULL;
5115 	gfp_t gfp = gfp_mask;
5116 
5117 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5118 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5119 		    __GFP_NOMEMALLOC;
5120 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5121 				PAGE_FRAG_CACHE_MAX_ORDER);
5122 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5123 #endif
5124 	if (unlikely(!page))
5125 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5126 
5127 	nc->va = page ? page_address(page) : NULL;
5128 
5129 	return page;
5130 }
5131 
5132 void __page_frag_cache_drain(struct page *page, unsigned int count)
5133 {
5134 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5135 
5136 	if (page_ref_sub_and_test(page, count))
5137 		free_the_page(page, compound_order(page));
5138 }
5139 EXPORT_SYMBOL(__page_frag_cache_drain);
5140 
5141 void *page_frag_alloc_align(struct page_frag_cache *nc,
5142 		      unsigned int fragsz, gfp_t gfp_mask,
5143 		      unsigned int align_mask)
5144 {
5145 	unsigned int size = PAGE_SIZE;
5146 	struct page *page;
5147 	int offset;
5148 
5149 	if (unlikely(!nc->va)) {
5150 refill:
5151 		page = __page_frag_cache_refill(nc, gfp_mask);
5152 		if (!page)
5153 			return NULL;
5154 
5155 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5156 		/* if size can vary use size else just use PAGE_SIZE */
5157 		size = nc->size;
5158 #endif
5159 		/* Even if we own the page, we do not use atomic_set().
5160 		 * This would break get_page_unless_zero() users.
5161 		 */
5162 		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5163 
5164 		/* reset page count bias and offset to start of new frag */
5165 		nc->pfmemalloc = page_is_pfmemalloc(page);
5166 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5167 		nc->offset = size;
5168 	}
5169 
5170 	offset = nc->offset - fragsz;
5171 	if (unlikely(offset < 0)) {
5172 		page = virt_to_page(nc->va);
5173 
5174 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5175 			goto refill;
5176 
5177 		if (unlikely(nc->pfmemalloc)) {
5178 			free_the_page(page, compound_order(page));
5179 			goto refill;
5180 		}
5181 
5182 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5183 		/* if size can vary use size else just use PAGE_SIZE */
5184 		size = nc->size;
5185 #endif
5186 		/* OK, page count is 0, we can safely set it */
5187 		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5188 
5189 		/* reset page count bias and offset to start of new frag */
5190 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5191 		offset = size - fragsz;
5192 	}
5193 
5194 	nc->pagecnt_bias--;
5195 	offset &= align_mask;
5196 	nc->offset = offset;
5197 
5198 	return nc->va + offset;
5199 }
5200 EXPORT_SYMBOL(page_frag_alloc_align);
5201 
5202 /*
5203  * Frees a page fragment allocated out of either a compound or order 0 page.
5204  */
5205 void page_frag_free(void *addr)
5206 {
5207 	struct page *page = virt_to_head_page(addr);
5208 
5209 	if (unlikely(put_page_testzero(page)))
5210 		free_the_page(page, compound_order(page));
5211 }
5212 EXPORT_SYMBOL(page_frag_free);
5213 
5214 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5215 		size_t size)
5216 {
5217 	if (addr) {
5218 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
5219 		unsigned long used = addr + PAGE_ALIGN(size);
5220 
5221 		split_page(virt_to_page((void *)addr), order);
5222 		while (used < alloc_end) {
5223 			free_page(used);
5224 			used += PAGE_SIZE;
5225 		}
5226 	}
5227 	return (void *)addr;
5228 }
5229 
5230 /**
5231  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5232  * @size: the number of bytes to allocate
5233  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5234  *
5235  * This function is similar to alloc_pages(), except that it allocates the
5236  * minimum number of pages to satisfy the request.  alloc_pages() can only
5237  * allocate memory in power-of-two pages.
5238  *
5239  * This function is also limited by MAX_ORDER.
5240  *
5241  * Memory allocated by this function must be released by free_pages_exact().
5242  *
5243  * Return: pointer to the allocated area or %NULL in case of error.
5244  */
5245 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5246 {
5247 	unsigned int order = get_order(size);
5248 	unsigned long addr;
5249 
5250 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5251 		gfp_mask &= ~__GFP_COMP;
5252 
5253 	addr = __get_free_pages(gfp_mask, order);
5254 	return make_alloc_exact(addr, order, size);
5255 }
5256 EXPORT_SYMBOL(alloc_pages_exact);
5257 
5258 /**
5259  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5260  *			   pages on a node.
5261  * @nid: the preferred node ID where memory should be allocated
5262  * @size: the number of bytes to allocate
5263  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5264  *
5265  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5266  * back.
5267  *
5268  * Return: pointer to the allocated area or %NULL in case of error.
5269  */
5270 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5271 {
5272 	unsigned int order = get_order(size);
5273 	struct page *p;
5274 
5275 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5276 		gfp_mask &= ~__GFP_COMP;
5277 
5278 	p = alloc_pages_node(nid, gfp_mask, order);
5279 	if (!p)
5280 		return NULL;
5281 	return make_alloc_exact((unsigned long)page_address(p), order, size);
5282 }
5283 
5284 /**
5285  * free_pages_exact - release memory allocated via alloc_pages_exact()
5286  * @virt: the value returned by alloc_pages_exact.
5287  * @size: size of allocation, same value as passed to alloc_pages_exact().
5288  *
5289  * Release the memory allocated by a previous call to alloc_pages_exact.
5290  */
5291 void free_pages_exact(void *virt, size_t size)
5292 {
5293 	unsigned long addr = (unsigned long)virt;
5294 	unsigned long end = addr + PAGE_ALIGN(size);
5295 
5296 	while (addr < end) {
5297 		free_page(addr);
5298 		addr += PAGE_SIZE;
5299 	}
5300 }
5301 EXPORT_SYMBOL(free_pages_exact);
5302 
5303 /**
5304  * nr_free_zone_pages - count number of pages beyond high watermark
5305  * @offset: The zone index of the highest zone
5306  *
5307  * nr_free_zone_pages() counts the number of pages which are beyond the
5308  * high watermark within all zones at or below a given zone index.  For each
5309  * zone, the number of pages is calculated as:
5310  *
5311  *     nr_free_zone_pages = managed_pages - high_pages
5312  *
5313  * Return: number of pages beyond high watermark.
5314  */
5315 static unsigned long nr_free_zone_pages(int offset)
5316 {
5317 	struct zoneref *z;
5318 	struct zone *zone;
5319 
5320 	/* Just pick one node, since fallback list is circular */
5321 	unsigned long sum = 0;
5322 
5323 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5324 
5325 	for_each_zone_zonelist(zone, z, zonelist, offset) {
5326 		unsigned long size = zone_managed_pages(zone);
5327 		unsigned long high = high_wmark_pages(zone);
5328 		if (size > high)
5329 			sum += size - high;
5330 	}
5331 
5332 	return sum;
5333 }
5334 
5335 /**
5336  * nr_free_buffer_pages - count number of pages beyond high watermark
5337  *
5338  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5339  * watermark within ZONE_DMA and ZONE_NORMAL.
5340  *
5341  * Return: number of pages beyond high watermark within ZONE_DMA and
5342  * ZONE_NORMAL.
5343  */
5344 unsigned long nr_free_buffer_pages(void)
5345 {
5346 	return nr_free_zone_pages(gfp_zone(GFP_USER));
5347 }
5348 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5349 
5350 static inline void show_node(struct zone *zone)
5351 {
5352 	if (IS_ENABLED(CONFIG_NUMA))
5353 		printk("Node %d ", zone_to_nid(zone));
5354 }
5355 
5356 long si_mem_available(void)
5357 {
5358 	long available;
5359 	unsigned long pagecache;
5360 	unsigned long wmark_low = 0;
5361 	unsigned long pages[NR_LRU_LISTS];
5362 	unsigned long reclaimable;
5363 	struct zone *zone;
5364 	int lru;
5365 
5366 	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5367 		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5368 
5369 	for_each_zone(zone)
5370 		wmark_low += low_wmark_pages(zone);
5371 
5372 	/*
5373 	 * Estimate the amount of memory available for userspace allocations,
5374 	 * without causing swapping.
5375 	 */
5376 	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5377 
5378 	/*
5379 	 * Not all the page cache can be freed, otherwise the system will
5380 	 * start swapping. Assume at least half of the page cache, or the
5381 	 * low watermark worth of cache, needs to stay.
5382 	 */
5383 	pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5384 	pagecache -= min(pagecache / 2, wmark_low);
5385 	available += pagecache;
5386 
5387 	/*
5388 	 * Part of the reclaimable slab and other kernel memory consists of
5389 	 * items that are in use, and cannot be freed. Cap this estimate at the
5390 	 * low watermark.
5391 	 */
5392 	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5393 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5394 	available += reclaimable - min(reclaimable / 2, wmark_low);
5395 
5396 	if (available < 0)
5397 		available = 0;
5398 	return available;
5399 }
5400 EXPORT_SYMBOL_GPL(si_mem_available);
5401 
5402 void si_meminfo(struct sysinfo *val)
5403 {
5404 	val->totalram = totalram_pages();
5405 	val->sharedram = global_node_page_state(NR_SHMEM);
5406 	val->freeram = global_zone_page_state(NR_FREE_PAGES);
5407 	val->bufferram = nr_blockdev_pages();
5408 	val->totalhigh = totalhigh_pages();
5409 	val->freehigh = nr_free_highpages();
5410 	val->mem_unit = PAGE_SIZE;
5411 }
5412 
5413 EXPORT_SYMBOL(si_meminfo);
5414 
5415 #ifdef CONFIG_NUMA
5416 void si_meminfo_node(struct sysinfo *val, int nid)
5417 {
5418 	int zone_type;		/* needs to be signed */
5419 	unsigned long managed_pages = 0;
5420 	unsigned long managed_highpages = 0;
5421 	unsigned long free_highpages = 0;
5422 	pg_data_t *pgdat = NODE_DATA(nid);
5423 
5424 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5425 		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5426 	val->totalram = managed_pages;
5427 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
5428 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5429 #ifdef CONFIG_HIGHMEM
5430 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5431 		struct zone *zone = &pgdat->node_zones[zone_type];
5432 
5433 		if (is_highmem(zone)) {
5434 			managed_highpages += zone_managed_pages(zone);
5435 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5436 		}
5437 	}
5438 	val->totalhigh = managed_highpages;
5439 	val->freehigh = free_highpages;
5440 #else
5441 	val->totalhigh = managed_highpages;
5442 	val->freehigh = free_highpages;
5443 #endif
5444 	val->mem_unit = PAGE_SIZE;
5445 }
5446 #endif
5447 
5448 /*
5449  * Determine whether the node should be displayed or not, depending on whether
5450  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5451  */
5452 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5453 {
5454 	if (!(flags & SHOW_MEM_FILTER_NODES))
5455 		return false;
5456 
5457 	/*
5458 	 * no node mask - aka implicit memory numa policy. Do not bother with
5459 	 * the synchronization - read_mems_allowed_begin - because we do not
5460 	 * have to be precise here.
5461 	 */
5462 	if (!nodemask)
5463 		nodemask = &cpuset_current_mems_allowed;
5464 
5465 	return !node_isset(nid, *nodemask);
5466 }
5467 
5468 #define K(x) ((x) << (PAGE_SHIFT-10))
5469 
5470 static void show_migration_types(unsigned char type)
5471 {
5472 	static const char types[MIGRATE_TYPES] = {
5473 		[MIGRATE_UNMOVABLE]	= 'U',
5474 		[MIGRATE_MOVABLE]	= 'M',
5475 		[MIGRATE_RECLAIMABLE]	= 'E',
5476 		[MIGRATE_HIGHATOMIC]	= 'H',
5477 #ifdef CONFIG_CMA
5478 		[MIGRATE_CMA]		= 'C',
5479 #endif
5480 #ifdef CONFIG_MEMORY_ISOLATION
5481 		[MIGRATE_ISOLATE]	= 'I',
5482 #endif
5483 	};
5484 	char tmp[MIGRATE_TYPES + 1];
5485 	char *p = tmp;
5486 	int i;
5487 
5488 	for (i = 0; i < MIGRATE_TYPES; i++) {
5489 		if (type & (1 << i))
5490 			*p++ = types[i];
5491 	}
5492 
5493 	*p = '\0';
5494 	printk(KERN_CONT "(%s) ", tmp);
5495 }
5496 
5497 /*
5498  * Show free area list (used inside shift_scroll-lock stuff)
5499  * We also calculate the percentage fragmentation. We do this by counting the
5500  * memory on each free list with the exception of the first item on the list.
5501  *
5502  * Bits in @filter:
5503  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5504  *   cpuset.
5505  */
5506 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5507 {
5508 	unsigned long free_pcp = 0;
5509 	int cpu;
5510 	struct zone *zone;
5511 	pg_data_t *pgdat;
5512 
5513 	for_each_populated_zone(zone) {
5514 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5515 			continue;
5516 
5517 		for_each_online_cpu(cpu)
5518 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5519 	}
5520 
5521 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5522 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5523 		" unevictable:%lu dirty:%lu writeback:%lu\n"
5524 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5525 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5526 		" free:%lu free_pcp:%lu free_cma:%lu\n",
5527 		global_node_page_state(NR_ACTIVE_ANON),
5528 		global_node_page_state(NR_INACTIVE_ANON),
5529 		global_node_page_state(NR_ISOLATED_ANON),
5530 		global_node_page_state(NR_ACTIVE_FILE),
5531 		global_node_page_state(NR_INACTIVE_FILE),
5532 		global_node_page_state(NR_ISOLATED_FILE),
5533 		global_node_page_state(NR_UNEVICTABLE),
5534 		global_node_page_state(NR_FILE_DIRTY),
5535 		global_node_page_state(NR_WRITEBACK),
5536 		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
5537 		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
5538 		global_node_page_state(NR_FILE_MAPPED),
5539 		global_node_page_state(NR_SHMEM),
5540 		global_node_page_state(NR_PAGETABLE),
5541 		global_zone_page_state(NR_BOUNCE),
5542 		global_zone_page_state(NR_FREE_PAGES),
5543 		free_pcp,
5544 		global_zone_page_state(NR_FREE_CMA_PAGES));
5545 
5546 	for_each_online_pgdat(pgdat) {
5547 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5548 			continue;
5549 
5550 		printk("Node %d"
5551 			" active_anon:%lukB"
5552 			" inactive_anon:%lukB"
5553 			" active_file:%lukB"
5554 			" inactive_file:%lukB"
5555 			" unevictable:%lukB"
5556 			" isolated(anon):%lukB"
5557 			" isolated(file):%lukB"
5558 			" mapped:%lukB"
5559 			" dirty:%lukB"
5560 			" writeback:%lukB"
5561 			" shmem:%lukB"
5562 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5563 			" shmem_thp: %lukB"
5564 			" shmem_pmdmapped: %lukB"
5565 			" anon_thp: %lukB"
5566 #endif
5567 			" writeback_tmp:%lukB"
5568 			" kernel_stack:%lukB"
5569 #ifdef CONFIG_SHADOW_CALL_STACK
5570 			" shadow_call_stack:%lukB"
5571 #endif
5572 			" pagetables:%lukB"
5573 			" all_unreclaimable? %s"
5574 			"\n",
5575 			pgdat->node_id,
5576 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5577 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5578 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5579 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5580 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
5581 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5582 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5583 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
5584 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
5585 			K(node_page_state(pgdat, NR_WRITEBACK)),
5586 			K(node_page_state(pgdat, NR_SHMEM)),
5587 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5588 			K(node_page_state(pgdat, NR_SHMEM_THPS)),
5589 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
5590 			K(node_page_state(pgdat, NR_ANON_THPS)),
5591 #endif
5592 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5593 			node_page_state(pgdat, NR_KERNEL_STACK_KB),
5594 #ifdef CONFIG_SHADOW_CALL_STACK
5595 			node_page_state(pgdat, NR_KERNEL_SCS_KB),
5596 #endif
5597 			K(node_page_state(pgdat, NR_PAGETABLE)),
5598 			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5599 				"yes" : "no");
5600 	}
5601 
5602 	for_each_populated_zone(zone) {
5603 		int i;
5604 
5605 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5606 			continue;
5607 
5608 		free_pcp = 0;
5609 		for_each_online_cpu(cpu)
5610 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5611 
5612 		show_node(zone);
5613 		printk(KERN_CONT
5614 			"%s"
5615 			" free:%lukB"
5616 			" min:%lukB"
5617 			" low:%lukB"
5618 			" high:%lukB"
5619 			" reserved_highatomic:%luKB"
5620 			" active_anon:%lukB"
5621 			" inactive_anon:%lukB"
5622 			" active_file:%lukB"
5623 			" inactive_file:%lukB"
5624 			" unevictable:%lukB"
5625 			" writepending:%lukB"
5626 			" present:%lukB"
5627 			" managed:%lukB"
5628 			" mlocked:%lukB"
5629 			" bounce:%lukB"
5630 			" free_pcp:%lukB"
5631 			" local_pcp:%ukB"
5632 			" free_cma:%lukB"
5633 			"\n",
5634 			zone->name,
5635 			K(zone_page_state(zone, NR_FREE_PAGES)),
5636 			K(min_wmark_pages(zone)),
5637 			K(low_wmark_pages(zone)),
5638 			K(high_wmark_pages(zone)),
5639 			K(zone->nr_reserved_highatomic),
5640 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
5641 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
5642 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
5643 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
5644 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5645 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
5646 			K(zone->present_pages),
5647 			K(zone_managed_pages(zone)),
5648 			K(zone_page_state(zone, NR_MLOCK)),
5649 			K(zone_page_state(zone, NR_BOUNCE)),
5650 			K(free_pcp),
5651 			K(this_cpu_read(zone->pageset->pcp.count)),
5652 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
5653 		printk("lowmem_reserve[]:");
5654 		for (i = 0; i < MAX_NR_ZONES; i++)
5655 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
5656 		printk(KERN_CONT "\n");
5657 	}
5658 
5659 	for_each_populated_zone(zone) {
5660 		unsigned int order;
5661 		unsigned long nr[MAX_ORDER], flags, total = 0;
5662 		unsigned char types[MAX_ORDER];
5663 
5664 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5665 			continue;
5666 		show_node(zone);
5667 		printk(KERN_CONT "%s: ", zone->name);
5668 
5669 		spin_lock_irqsave(&zone->lock, flags);
5670 		for (order = 0; order < MAX_ORDER; order++) {
5671 			struct free_area *area = &zone->free_area[order];
5672 			int type;
5673 
5674 			nr[order] = area->nr_free;
5675 			total += nr[order] << order;
5676 
5677 			types[order] = 0;
5678 			for (type = 0; type < MIGRATE_TYPES; type++) {
5679 				if (!free_area_empty(area, type))
5680 					types[order] |= 1 << type;
5681 			}
5682 		}
5683 		spin_unlock_irqrestore(&zone->lock, flags);
5684 		for (order = 0; order < MAX_ORDER; order++) {
5685 			printk(KERN_CONT "%lu*%lukB ",
5686 			       nr[order], K(1UL) << order);
5687 			if (nr[order])
5688 				show_migration_types(types[order]);
5689 		}
5690 		printk(KERN_CONT "= %lukB\n", K(total));
5691 	}
5692 
5693 	hugetlb_show_meminfo();
5694 
5695 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
5696 
5697 	show_swap_cache_info();
5698 }
5699 
5700 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5701 {
5702 	zoneref->zone = zone;
5703 	zoneref->zone_idx = zone_idx(zone);
5704 }
5705 
5706 /*
5707  * Builds allocation fallback zone lists.
5708  *
5709  * Add all populated zones of a node to the zonelist.
5710  */
5711 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5712 {
5713 	struct zone *zone;
5714 	enum zone_type zone_type = MAX_NR_ZONES;
5715 	int nr_zones = 0;
5716 
5717 	do {
5718 		zone_type--;
5719 		zone = pgdat->node_zones + zone_type;
5720 		if (managed_zone(zone)) {
5721 			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5722 			check_highest_zone(zone_type);
5723 		}
5724 	} while (zone_type);
5725 
5726 	return nr_zones;
5727 }
5728 
5729 #ifdef CONFIG_NUMA
5730 
5731 static int __parse_numa_zonelist_order(char *s)
5732 {
5733 	/*
5734 	 * We used to support different zonlists modes but they turned
5735 	 * out to be just not useful. Let's keep the warning in place
5736 	 * if somebody still use the cmd line parameter so that we do
5737 	 * not fail it silently
5738 	 */
5739 	if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5740 		pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
5741 		return -EINVAL;
5742 	}
5743 	return 0;
5744 }
5745 
5746 char numa_zonelist_order[] = "Node";
5747 
5748 /*
5749  * sysctl handler for numa_zonelist_order
5750  */
5751 int numa_zonelist_order_handler(struct ctl_table *table, int write,
5752 		void *buffer, size_t *length, loff_t *ppos)
5753 {
5754 	if (write)
5755 		return __parse_numa_zonelist_order(buffer);
5756 	return proc_dostring(table, write, buffer, length, ppos);
5757 }
5758 
5759 
5760 #define MAX_NODE_LOAD (nr_online_nodes)
5761 static int node_load[MAX_NUMNODES];
5762 
5763 /**
5764  * find_next_best_node - find the next node that should appear in a given node's fallback list
5765  * @node: node whose fallback list we're appending
5766  * @used_node_mask: nodemask_t of already used nodes
5767  *
5768  * We use a number of factors to determine which is the next node that should
5769  * appear on a given node's fallback list.  The node should not have appeared
5770  * already in @node's fallback list, and it should be the next closest node
5771  * according to the distance array (which contains arbitrary distance values
5772  * from each node to each node in the system), and should also prefer nodes
5773  * with no CPUs, since presumably they'll have very little allocation pressure
5774  * on them otherwise.
5775  *
5776  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5777  */
5778 static int find_next_best_node(int node, nodemask_t *used_node_mask)
5779 {
5780 	int n, val;
5781 	int min_val = INT_MAX;
5782 	int best_node = NUMA_NO_NODE;
5783 
5784 	/* Use the local node if we haven't already */
5785 	if (!node_isset(node, *used_node_mask)) {
5786 		node_set(node, *used_node_mask);
5787 		return node;
5788 	}
5789 
5790 	for_each_node_state(n, N_MEMORY) {
5791 
5792 		/* Don't want a node to appear more than once */
5793 		if (node_isset(n, *used_node_mask))
5794 			continue;
5795 
5796 		/* Use the distance array to find the distance */
5797 		val = node_distance(node, n);
5798 
5799 		/* Penalize nodes under us ("prefer the next node") */
5800 		val += (n < node);
5801 
5802 		/* Give preference to headless and unused nodes */
5803 		if (!cpumask_empty(cpumask_of_node(n)))
5804 			val += PENALTY_FOR_NODE_WITH_CPUS;
5805 
5806 		/* Slight preference for less loaded node */
5807 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
5808 		val += node_load[n];
5809 
5810 		if (val < min_val) {
5811 			min_val = val;
5812 			best_node = n;
5813 		}
5814 	}
5815 
5816 	if (best_node >= 0)
5817 		node_set(best_node, *used_node_mask);
5818 
5819 	return best_node;
5820 }
5821 
5822 
5823 /*
5824  * Build zonelists ordered by node and zones within node.
5825  * This results in maximum locality--normal zone overflows into local
5826  * DMA zone, if any--but risks exhausting DMA zone.
5827  */
5828 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5829 		unsigned nr_nodes)
5830 {
5831 	struct zoneref *zonerefs;
5832 	int i;
5833 
5834 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5835 
5836 	for (i = 0; i < nr_nodes; i++) {
5837 		int nr_zones;
5838 
5839 		pg_data_t *node = NODE_DATA(node_order[i]);
5840 
5841 		nr_zones = build_zonerefs_node(node, zonerefs);
5842 		zonerefs += nr_zones;
5843 	}
5844 	zonerefs->zone = NULL;
5845 	zonerefs->zone_idx = 0;
5846 }
5847 
5848 /*
5849  * Build gfp_thisnode zonelists
5850  */
5851 static void build_thisnode_zonelists(pg_data_t *pgdat)
5852 {
5853 	struct zoneref *zonerefs;
5854 	int nr_zones;
5855 
5856 	zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5857 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
5858 	zonerefs += nr_zones;
5859 	zonerefs->zone = NULL;
5860 	zonerefs->zone_idx = 0;
5861 }
5862 
5863 /*
5864  * Build zonelists ordered by zone and nodes within zones.
5865  * This results in conserving DMA zone[s] until all Normal memory is
5866  * exhausted, but results in overflowing to remote node while memory
5867  * may still exist in local DMA zone.
5868  */
5869 
5870 static void build_zonelists(pg_data_t *pgdat)
5871 {
5872 	static int node_order[MAX_NUMNODES];
5873 	int node, load, nr_nodes = 0;
5874 	nodemask_t used_mask = NODE_MASK_NONE;
5875 	int local_node, prev_node;
5876 
5877 	/* NUMA-aware ordering of nodes */
5878 	local_node = pgdat->node_id;
5879 	load = nr_online_nodes;
5880 	prev_node = local_node;
5881 
5882 	memset(node_order, 0, sizeof(node_order));
5883 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5884 		/*
5885 		 * We don't want to pressure a particular node.
5886 		 * So adding penalty to the first node in same
5887 		 * distance group to make it round-robin.
5888 		 */
5889 		if (node_distance(local_node, node) !=
5890 		    node_distance(local_node, prev_node))
5891 			node_load[node] = load;
5892 
5893 		node_order[nr_nodes++] = node;
5894 		prev_node = node;
5895 		load--;
5896 	}
5897 
5898 	build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5899 	build_thisnode_zonelists(pgdat);
5900 }
5901 
5902 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5903 /*
5904  * Return node id of node used for "local" allocations.
5905  * I.e., first node id of first zone in arg node's generic zonelist.
5906  * Used for initializing percpu 'numa_mem', which is used primarily
5907  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5908  */
5909 int local_memory_node(int node)
5910 {
5911 	struct zoneref *z;
5912 
5913 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5914 				   gfp_zone(GFP_KERNEL),
5915 				   NULL);
5916 	return zone_to_nid(z->zone);
5917 }
5918 #endif
5919 
5920 static void setup_min_unmapped_ratio(void);
5921 static void setup_min_slab_ratio(void);
5922 #else	/* CONFIG_NUMA */
5923 
5924 static void build_zonelists(pg_data_t *pgdat)
5925 {
5926 	int node, local_node;
5927 	struct zoneref *zonerefs;
5928 	int nr_zones;
5929 
5930 	local_node = pgdat->node_id;
5931 
5932 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5933 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
5934 	zonerefs += nr_zones;
5935 
5936 	/*
5937 	 * Now we build the zonelist so that it contains the zones
5938 	 * of all the other nodes.
5939 	 * We don't want to pressure a particular node, so when
5940 	 * building the zones for node N, we make sure that the
5941 	 * zones coming right after the local ones are those from
5942 	 * node N+1 (modulo N)
5943 	 */
5944 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5945 		if (!node_online(node))
5946 			continue;
5947 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5948 		zonerefs += nr_zones;
5949 	}
5950 	for (node = 0; node < local_node; node++) {
5951 		if (!node_online(node))
5952 			continue;
5953 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5954 		zonerefs += nr_zones;
5955 	}
5956 
5957 	zonerefs->zone = NULL;
5958 	zonerefs->zone_idx = 0;
5959 }
5960 
5961 #endif	/* CONFIG_NUMA */
5962 
5963 /*
5964  * Boot pageset table. One per cpu which is going to be used for all
5965  * zones and all nodes. The parameters will be set in such a way
5966  * that an item put on a list will immediately be handed over to
5967  * the buddy list. This is safe since pageset manipulation is done
5968  * with interrupts disabled.
5969  *
5970  * The boot_pagesets must be kept even after bootup is complete for
5971  * unused processors and/or zones. They do play a role for bootstrapping
5972  * hotplugged processors.
5973  *
5974  * zoneinfo_show() and maybe other functions do
5975  * not check if the processor is online before following the pageset pointer.
5976  * Other parts of the kernel may not check if the zone is available.
5977  */
5978 static void pageset_init(struct per_cpu_pageset *p);
5979 /* These effectively disable the pcplists in the boot pageset completely */
5980 #define BOOT_PAGESET_HIGH	0
5981 #define BOOT_PAGESET_BATCH	1
5982 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5983 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5984 
5985 static void __build_all_zonelists(void *data)
5986 {
5987 	int nid;
5988 	int __maybe_unused cpu;
5989 	pg_data_t *self = data;
5990 	static DEFINE_SPINLOCK(lock);
5991 
5992 	spin_lock(&lock);
5993 
5994 #ifdef CONFIG_NUMA
5995 	memset(node_load, 0, sizeof(node_load));
5996 #endif
5997 
5998 	/*
5999 	 * This node is hotadded and no memory is yet present.   So just
6000 	 * building zonelists is fine - no need to touch other nodes.
6001 	 */
6002 	if (self && !node_online(self->node_id)) {
6003 		build_zonelists(self);
6004 	} else {
6005 		for_each_online_node(nid) {
6006 			pg_data_t *pgdat = NODE_DATA(nid);
6007 
6008 			build_zonelists(pgdat);
6009 		}
6010 
6011 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6012 		/*
6013 		 * We now know the "local memory node" for each node--
6014 		 * i.e., the node of the first zone in the generic zonelist.
6015 		 * Set up numa_mem percpu variable for on-line cpus.  During
6016 		 * boot, only the boot cpu should be on-line;  we'll init the
6017 		 * secondary cpus' numa_mem as they come on-line.  During
6018 		 * node/memory hotplug, we'll fixup all on-line cpus.
6019 		 */
6020 		for_each_online_cpu(cpu)
6021 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6022 #endif
6023 	}
6024 
6025 	spin_unlock(&lock);
6026 }
6027 
6028 static noinline void __init
6029 build_all_zonelists_init(void)
6030 {
6031 	int cpu;
6032 
6033 	__build_all_zonelists(NULL);
6034 
6035 	/*
6036 	 * Initialize the boot_pagesets that are going to be used
6037 	 * for bootstrapping processors. The real pagesets for
6038 	 * each zone will be allocated later when the per cpu
6039 	 * allocator is available.
6040 	 *
6041 	 * boot_pagesets are used also for bootstrapping offline
6042 	 * cpus if the system is already booted because the pagesets
6043 	 * are needed to initialize allocators on a specific cpu too.
6044 	 * F.e. the percpu allocator needs the page allocator which
6045 	 * needs the percpu allocator in order to allocate its pagesets
6046 	 * (a chicken-egg dilemma).
6047 	 */
6048 	for_each_possible_cpu(cpu)
6049 		pageset_init(&per_cpu(boot_pageset, cpu));
6050 
6051 	mminit_verify_zonelist();
6052 	cpuset_init_current_mems_allowed();
6053 }
6054 
6055 /*
6056  * unless system_state == SYSTEM_BOOTING.
6057  *
6058  * __ref due to call of __init annotated helper build_all_zonelists_init
6059  * [protected by SYSTEM_BOOTING].
6060  */
6061 void __ref build_all_zonelists(pg_data_t *pgdat)
6062 {
6063 	unsigned long vm_total_pages;
6064 
6065 	if (system_state == SYSTEM_BOOTING) {
6066 		build_all_zonelists_init();
6067 	} else {
6068 		__build_all_zonelists(pgdat);
6069 		/* cpuset refresh routine should be here */
6070 	}
6071 	/* Get the number of free pages beyond high watermark in all zones. */
6072 	vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6073 	/*
6074 	 * Disable grouping by mobility if the number of pages in the
6075 	 * system is too low to allow the mechanism to work. It would be
6076 	 * more accurate, but expensive to check per-zone. This check is
6077 	 * made on memory-hotadd so a system can start with mobility
6078 	 * disabled and enable it later
6079 	 */
6080 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6081 		page_group_by_mobility_disabled = 1;
6082 	else
6083 		page_group_by_mobility_disabled = 0;
6084 
6085 	pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
6086 		nr_online_nodes,
6087 		page_group_by_mobility_disabled ? "off" : "on",
6088 		vm_total_pages);
6089 #ifdef CONFIG_NUMA
6090 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6091 #endif
6092 }
6093 
6094 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6095 static bool __meminit
6096 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6097 {
6098 	static struct memblock_region *r;
6099 
6100 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6101 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6102 			for_each_mem_region(r) {
6103 				if (*pfn < memblock_region_memory_end_pfn(r))
6104 					break;
6105 			}
6106 		}
6107 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
6108 		    memblock_is_mirror(r)) {
6109 			*pfn = memblock_region_memory_end_pfn(r);
6110 			return true;
6111 		}
6112 	}
6113 	return false;
6114 }
6115 
6116 /*
6117  * Initially all pages are reserved - free ones are freed
6118  * up by memblock_free_all() once the early boot process is
6119  * done. Non-atomic initialization, single-pass.
6120  *
6121  * All aligned pageblocks are initialized to the specified migratetype
6122  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6123  * zone stats (e.g., nr_isolate_pageblock) are touched.
6124  */
6125 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
6126 		unsigned long start_pfn, unsigned long zone_end_pfn,
6127 		enum meminit_context context,
6128 		struct vmem_altmap *altmap, int migratetype)
6129 {
6130 	unsigned long pfn, end_pfn = start_pfn + size;
6131 	struct page *page;
6132 
6133 	if (highest_memmap_pfn < end_pfn - 1)
6134 		highest_memmap_pfn = end_pfn - 1;
6135 
6136 #ifdef CONFIG_ZONE_DEVICE
6137 	/*
6138 	 * Honor reservation requested by the driver for this ZONE_DEVICE
6139 	 * memory. We limit the total number of pages to initialize to just
6140 	 * those that might contain the memory mapping. We will defer the
6141 	 * ZONE_DEVICE page initialization until after we have released
6142 	 * the hotplug lock.
6143 	 */
6144 	if (zone == ZONE_DEVICE) {
6145 		if (!altmap)
6146 			return;
6147 
6148 		if (start_pfn == altmap->base_pfn)
6149 			start_pfn += altmap->reserve;
6150 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6151 	}
6152 #endif
6153 
6154 	for (pfn = start_pfn; pfn < end_pfn; ) {
6155 		/*
6156 		 * There can be holes in boot-time mem_map[]s handed to this
6157 		 * function.  They do not exist on hotplugged memory.
6158 		 */
6159 		if (context == MEMINIT_EARLY) {
6160 			if (overlap_memmap_init(zone, &pfn))
6161 				continue;
6162 			if (defer_init(nid, pfn, zone_end_pfn))
6163 				break;
6164 		}
6165 
6166 		page = pfn_to_page(pfn);
6167 		__init_single_page(page, pfn, zone, nid);
6168 		if (context == MEMINIT_HOTPLUG)
6169 			__SetPageReserved(page);
6170 
6171 		/*
6172 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6173 		 * such that unmovable allocations won't be scattered all
6174 		 * over the place during system boot.
6175 		 */
6176 		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6177 			set_pageblock_migratetype(page, migratetype);
6178 			cond_resched();
6179 		}
6180 		pfn++;
6181 	}
6182 }
6183 
6184 #ifdef CONFIG_ZONE_DEVICE
6185 void __ref memmap_init_zone_device(struct zone *zone,
6186 				   unsigned long start_pfn,
6187 				   unsigned long nr_pages,
6188 				   struct dev_pagemap *pgmap)
6189 {
6190 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
6191 	struct pglist_data *pgdat = zone->zone_pgdat;
6192 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6193 	unsigned long zone_idx = zone_idx(zone);
6194 	unsigned long start = jiffies;
6195 	int nid = pgdat->node_id;
6196 
6197 	if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
6198 		return;
6199 
6200 	/*
6201 	 * The call to memmap_init_zone should have already taken care
6202 	 * of the pages reserved for the memmap, so we can just jump to
6203 	 * the end of that region and start processing the device pages.
6204 	 */
6205 	if (altmap) {
6206 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6207 		nr_pages = end_pfn - start_pfn;
6208 	}
6209 
6210 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
6211 		struct page *page = pfn_to_page(pfn);
6212 
6213 		__init_single_page(page, pfn, zone_idx, nid);
6214 
6215 		/*
6216 		 * Mark page reserved as it will need to wait for onlining
6217 		 * phase for it to be fully associated with a zone.
6218 		 *
6219 		 * We can use the non-atomic __set_bit operation for setting
6220 		 * the flag as we are still initializing the pages.
6221 		 */
6222 		__SetPageReserved(page);
6223 
6224 		/*
6225 		 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6226 		 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
6227 		 * ever freed or placed on a driver-private list.
6228 		 */
6229 		page->pgmap = pgmap;
6230 		page->zone_device_data = NULL;
6231 
6232 		/*
6233 		 * Mark the block movable so that blocks are reserved for
6234 		 * movable at startup. This will force kernel allocations
6235 		 * to reserve their blocks rather than leaking throughout
6236 		 * the address space during boot when many long-lived
6237 		 * kernel allocations are made.
6238 		 *
6239 		 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6240 		 * because this is done early in section_activate()
6241 		 */
6242 		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6243 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6244 			cond_resched();
6245 		}
6246 	}
6247 
6248 	pr_info("%s initialised %lu pages in %ums\n", __func__,
6249 		nr_pages, jiffies_to_msecs(jiffies - start));
6250 }
6251 
6252 #endif
6253 static void __meminit zone_init_free_lists(struct zone *zone)
6254 {
6255 	unsigned int order, t;
6256 	for_each_migratetype_order(order, t) {
6257 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6258 		zone->free_area[order].nr_free = 0;
6259 	}
6260 }
6261 
6262 void __meminit __weak memmap_init_zone(struct zone *zone)
6263 {
6264 	unsigned long zone_start_pfn = zone->zone_start_pfn;
6265 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
6266 	int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone);
6267 	unsigned long start_pfn, end_pfn;
6268 
6269 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6270 		start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6271 		end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
6272 
6273 		if (end_pfn > start_pfn)
6274 			memmap_init_range(end_pfn - start_pfn, nid,
6275 					zone_id, start_pfn, zone_end_pfn,
6276 					MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6277 	}
6278 }
6279 
6280 static int zone_batchsize(struct zone *zone)
6281 {
6282 #ifdef CONFIG_MMU
6283 	int batch;
6284 
6285 	/*
6286 	 * The per-cpu-pages pools are set to around 1000th of the
6287 	 * size of the zone.
6288 	 */
6289 	batch = zone_managed_pages(zone) / 1024;
6290 	/* But no more than a meg. */
6291 	if (batch * PAGE_SIZE > 1024 * 1024)
6292 		batch = (1024 * 1024) / PAGE_SIZE;
6293 	batch /= 4;		/* We effectively *= 4 below */
6294 	if (batch < 1)
6295 		batch = 1;
6296 
6297 	/*
6298 	 * Clamp the batch to a 2^n - 1 value. Having a power
6299 	 * of 2 value was found to be more likely to have
6300 	 * suboptimal cache aliasing properties in some cases.
6301 	 *
6302 	 * For example if 2 tasks are alternately allocating
6303 	 * batches of pages, one task can end up with a lot
6304 	 * of pages of one half of the possible page colors
6305 	 * and the other with pages of the other colors.
6306 	 */
6307 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
6308 
6309 	return batch;
6310 
6311 #else
6312 	/* The deferral and batching of frees should be suppressed under NOMMU
6313 	 * conditions.
6314 	 *
6315 	 * The problem is that NOMMU needs to be able to allocate large chunks
6316 	 * of contiguous memory as there's no hardware page translation to
6317 	 * assemble apparent contiguous memory from discontiguous pages.
6318 	 *
6319 	 * Queueing large contiguous runs of pages for batching, however,
6320 	 * causes the pages to actually be freed in smaller chunks.  As there
6321 	 * can be a significant delay between the individual batches being
6322 	 * recycled, this leads to the once large chunks of space being
6323 	 * fragmented and becoming unavailable for high-order allocations.
6324 	 */
6325 	return 0;
6326 #endif
6327 }
6328 
6329 /*
6330  * pcp->high and pcp->batch values are related and generally batch is lower
6331  * than high. They are also related to pcp->count such that count is lower
6332  * than high, and as soon as it reaches high, the pcplist is flushed.
6333  *
6334  * However, guaranteeing these relations at all times would require e.g. write
6335  * barriers here but also careful usage of read barriers at the read side, and
6336  * thus be prone to error and bad for performance. Thus the update only prevents
6337  * store tearing. Any new users of pcp->batch and pcp->high should ensure they
6338  * can cope with those fields changing asynchronously, and fully trust only the
6339  * pcp->count field on the local CPU with interrupts disabled.
6340  *
6341  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6342  * outside of boot time (or some other assurance that no concurrent updaters
6343  * exist).
6344  */
6345 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6346 		unsigned long batch)
6347 {
6348 	WRITE_ONCE(pcp->batch, batch);
6349 	WRITE_ONCE(pcp->high, high);
6350 }
6351 
6352 static void pageset_init(struct per_cpu_pageset *p)
6353 {
6354 	struct per_cpu_pages *pcp;
6355 	int migratetype;
6356 
6357 	memset(p, 0, sizeof(*p));
6358 
6359 	pcp = &p->pcp;
6360 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
6361 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
6362 
6363 	/*
6364 	 * Set batch and high values safe for a boot pageset. A true percpu
6365 	 * pageset's initialization will update them subsequently. Here we don't
6366 	 * need to be as careful as pageset_update() as nobody can access the
6367 	 * pageset yet.
6368 	 */
6369 	pcp->high = BOOT_PAGESET_HIGH;
6370 	pcp->batch = BOOT_PAGESET_BATCH;
6371 }
6372 
6373 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
6374 		unsigned long batch)
6375 {
6376 	struct per_cpu_pageset *p;
6377 	int cpu;
6378 
6379 	for_each_possible_cpu(cpu) {
6380 		p = per_cpu_ptr(zone->pageset, cpu);
6381 		pageset_update(&p->pcp, high, batch);
6382 	}
6383 }
6384 
6385 /*
6386  * Calculate and set new high and batch values for all per-cpu pagesets of a
6387  * zone, based on the zone's size and the percpu_pagelist_fraction sysctl.
6388  */
6389 static void zone_set_pageset_high_and_batch(struct zone *zone)
6390 {
6391 	unsigned long new_high, new_batch;
6392 
6393 	if (percpu_pagelist_fraction) {
6394 		new_high = zone_managed_pages(zone) / percpu_pagelist_fraction;
6395 		new_batch = max(1UL, new_high / 4);
6396 		if ((new_high / 4) > (PAGE_SHIFT * 8))
6397 			new_batch = PAGE_SHIFT * 8;
6398 	} else {
6399 		new_batch = zone_batchsize(zone);
6400 		new_high = 6 * new_batch;
6401 		new_batch = max(1UL, 1 * new_batch);
6402 	}
6403 
6404 	if (zone->pageset_high == new_high &&
6405 	    zone->pageset_batch == new_batch)
6406 		return;
6407 
6408 	zone->pageset_high = new_high;
6409 	zone->pageset_batch = new_batch;
6410 
6411 	__zone_set_pageset_high_and_batch(zone, new_high, new_batch);
6412 }
6413 
6414 void __meminit setup_zone_pageset(struct zone *zone)
6415 {
6416 	struct per_cpu_pageset *p;
6417 	int cpu;
6418 
6419 	zone->pageset = alloc_percpu(struct per_cpu_pageset);
6420 	for_each_possible_cpu(cpu) {
6421 		p = per_cpu_ptr(zone->pageset, cpu);
6422 		pageset_init(p);
6423 	}
6424 
6425 	zone_set_pageset_high_and_batch(zone);
6426 }
6427 
6428 /*
6429  * Allocate per cpu pagesets and initialize them.
6430  * Before this call only boot pagesets were available.
6431  */
6432 void __init setup_per_cpu_pageset(void)
6433 {
6434 	struct pglist_data *pgdat;
6435 	struct zone *zone;
6436 	int __maybe_unused cpu;
6437 
6438 	for_each_populated_zone(zone)
6439 		setup_zone_pageset(zone);
6440 
6441 #ifdef CONFIG_NUMA
6442 	/*
6443 	 * Unpopulated zones continue using the boot pagesets.
6444 	 * The numa stats for these pagesets need to be reset.
6445 	 * Otherwise, they will end up skewing the stats of
6446 	 * the nodes these zones are associated with.
6447 	 */
6448 	for_each_possible_cpu(cpu) {
6449 		struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu);
6450 		memset(pcp->vm_numa_stat_diff, 0,
6451 		       sizeof(pcp->vm_numa_stat_diff));
6452 	}
6453 #endif
6454 
6455 	for_each_online_pgdat(pgdat)
6456 		pgdat->per_cpu_nodestats =
6457 			alloc_percpu(struct per_cpu_nodestat);
6458 }
6459 
6460 static __meminit void zone_pcp_init(struct zone *zone)
6461 {
6462 	/*
6463 	 * per cpu subsystem is not up at this point. The following code
6464 	 * relies on the ability of the linker to provide the
6465 	 * offset of a (static) per cpu variable into the per cpu area.
6466 	 */
6467 	zone->pageset = &boot_pageset;
6468 	zone->pageset_high = BOOT_PAGESET_HIGH;
6469 	zone->pageset_batch = BOOT_PAGESET_BATCH;
6470 
6471 	if (populated_zone(zone))
6472 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
6473 			zone->name, zone->present_pages,
6474 					 zone_batchsize(zone));
6475 }
6476 
6477 void __meminit init_currently_empty_zone(struct zone *zone,
6478 					unsigned long zone_start_pfn,
6479 					unsigned long size)
6480 {
6481 	struct pglist_data *pgdat = zone->zone_pgdat;
6482 	int zone_idx = zone_idx(zone) + 1;
6483 
6484 	if (zone_idx > pgdat->nr_zones)
6485 		pgdat->nr_zones = zone_idx;
6486 
6487 	zone->zone_start_pfn = zone_start_pfn;
6488 
6489 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
6490 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
6491 			pgdat->node_id,
6492 			(unsigned long)zone_idx(zone),
6493 			zone_start_pfn, (zone_start_pfn + size));
6494 
6495 	zone_init_free_lists(zone);
6496 	zone->initialized = 1;
6497 }
6498 
6499 /**
6500  * get_pfn_range_for_nid - Return the start and end page frames for a node
6501  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
6502  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6503  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
6504  *
6505  * It returns the start and end page frame of a node based on information
6506  * provided by memblock_set_node(). If called for a node
6507  * with no available memory, a warning is printed and the start and end
6508  * PFNs will be 0.
6509  */
6510 void __init get_pfn_range_for_nid(unsigned int nid,
6511 			unsigned long *start_pfn, unsigned long *end_pfn)
6512 {
6513 	unsigned long this_start_pfn, this_end_pfn;
6514 	int i;
6515 
6516 	*start_pfn = -1UL;
6517 	*end_pfn = 0;
6518 
6519 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
6520 		*start_pfn = min(*start_pfn, this_start_pfn);
6521 		*end_pfn = max(*end_pfn, this_end_pfn);
6522 	}
6523 
6524 	if (*start_pfn == -1UL)
6525 		*start_pfn = 0;
6526 }
6527 
6528 /*
6529  * This finds a zone that can be used for ZONE_MOVABLE pages. The
6530  * assumption is made that zones within a node are ordered in monotonic
6531  * increasing memory addresses so that the "highest" populated zone is used
6532  */
6533 static void __init find_usable_zone_for_movable(void)
6534 {
6535 	int zone_index;
6536 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
6537 		if (zone_index == ZONE_MOVABLE)
6538 			continue;
6539 
6540 		if (arch_zone_highest_possible_pfn[zone_index] >
6541 				arch_zone_lowest_possible_pfn[zone_index])
6542 			break;
6543 	}
6544 
6545 	VM_BUG_ON(zone_index == -1);
6546 	movable_zone = zone_index;
6547 }
6548 
6549 /*
6550  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6551  * because it is sized independent of architecture. Unlike the other zones,
6552  * the starting point for ZONE_MOVABLE is not fixed. It may be different
6553  * in each node depending on the size of each node and how evenly kernelcore
6554  * is distributed. This helper function adjusts the zone ranges
6555  * provided by the architecture for a given node by using the end of the
6556  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6557  * zones within a node are in order of monotonic increases memory addresses
6558  */
6559 static void __init adjust_zone_range_for_zone_movable(int nid,
6560 					unsigned long zone_type,
6561 					unsigned long node_start_pfn,
6562 					unsigned long node_end_pfn,
6563 					unsigned long *zone_start_pfn,
6564 					unsigned long *zone_end_pfn)
6565 {
6566 	/* Only adjust if ZONE_MOVABLE is on this node */
6567 	if (zone_movable_pfn[nid]) {
6568 		/* Size ZONE_MOVABLE */
6569 		if (zone_type == ZONE_MOVABLE) {
6570 			*zone_start_pfn = zone_movable_pfn[nid];
6571 			*zone_end_pfn = min(node_end_pfn,
6572 				arch_zone_highest_possible_pfn[movable_zone]);
6573 
6574 		/* Adjust for ZONE_MOVABLE starting within this range */
6575 		} else if (!mirrored_kernelcore &&
6576 			*zone_start_pfn < zone_movable_pfn[nid] &&
6577 			*zone_end_pfn > zone_movable_pfn[nid]) {
6578 			*zone_end_pfn = zone_movable_pfn[nid];
6579 
6580 		/* Check if this whole range is within ZONE_MOVABLE */
6581 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
6582 			*zone_start_pfn = *zone_end_pfn;
6583 	}
6584 }
6585 
6586 /*
6587  * Return the number of pages a zone spans in a node, including holes
6588  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
6589  */
6590 static unsigned long __init zone_spanned_pages_in_node(int nid,
6591 					unsigned long zone_type,
6592 					unsigned long node_start_pfn,
6593 					unsigned long node_end_pfn,
6594 					unsigned long *zone_start_pfn,
6595 					unsigned long *zone_end_pfn)
6596 {
6597 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6598 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6599 	/* When hotadd a new node from cpu_up(), the node should be empty */
6600 	if (!node_start_pfn && !node_end_pfn)
6601 		return 0;
6602 
6603 	/* Get the start and end of the zone */
6604 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6605 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6606 	adjust_zone_range_for_zone_movable(nid, zone_type,
6607 				node_start_pfn, node_end_pfn,
6608 				zone_start_pfn, zone_end_pfn);
6609 
6610 	/* Check that this node has pages within the zone's required range */
6611 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
6612 		return 0;
6613 
6614 	/* Move the zone boundaries inside the node if necessary */
6615 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
6616 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
6617 
6618 	/* Return the spanned pages */
6619 	return *zone_end_pfn - *zone_start_pfn;
6620 }
6621 
6622 /*
6623  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
6624  * then all holes in the requested range will be accounted for.
6625  */
6626 unsigned long __init __absent_pages_in_range(int nid,
6627 				unsigned long range_start_pfn,
6628 				unsigned long range_end_pfn)
6629 {
6630 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
6631 	unsigned long start_pfn, end_pfn;
6632 	int i;
6633 
6634 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6635 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6636 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6637 		nr_absent -= end_pfn - start_pfn;
6638 	}
6639 	return nr_absent;
6640 }
6641 
6642 /**
6643  * absent_pages_in_range - Return number of page frames in holes within a range
6644  * @start_pfn: The start PFN to start searching for holes
6645  * @end_pfn: The end PFN to stop searching for holes
6646  *
6647  * Return: the number of pages frames in memory holes within a range.
6648  */
6649 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6650 							unsigned long end_pfn)
6651 {
6652 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
6653 }
6654 
6655 /* Return the number of page frames in holes in a zone on a node */
6656 static unsigned long __init zone_absent_pages_in_node(int nid,
6657 					unsigned long zone_type,
6658 					unsigned long node_start_pfn,
6659 					unsigned long node_end_pfn)
6660 {
6661 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6662 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6663 	unsigned long zone_start_pfn, zone_end_pfn;
6664 	unsigned long nr_absent;
6665 
6666 	/* When hotadd a new node from cpu_up(), the node should be empty */
6667 	if (!node_start_pfn && !node_end_pfn)
6668 		return 0;
6669 
6670 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6671 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6672 
6673 	adjust_zone_range_for_zone_movable(nid, zone_type,
6674 			node_start_pfn, node_end_pfn,
6675 			&zone_start_pfn, &zone_end_pfn);
6676 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
6677 
6678 	/*
6679 	 * ZONE_MOVABLE handling.
6680 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
6681 	 * and vice versa.
6682 	 */
6683 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
6684 		unsigned long start_pfn, end_pfn;
6685 		struct memblock_region *r;
6686 
6687 		for_each_mem_region(r) {
6688 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
6689 					  zone_start_pfn, zone_end_pfn);
6690 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
6691 					zone_start_pfn, zone_end_pfn);
6692 
6693 			if (zone_type == ZONE_MOVABLE &&
6694 			    memblock_is_mirror(r))
6695 				nr_absent += end_pfn - start_pfn;
6696 
6697 			if (zone_type == ZONE_NORMAL &&
6698 			    !memblock_is_mirror(r))
6699 				nr_absent += end_pfn - start_pfn;
6700 		}
6701 	}
6702 
6703 	return nr_absent;
6704 }
6705 
6706 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
6707 						unsigned long node_start_pfn,
6708 						unsigned long node_end_pfn)
6709 {
6710 	unsigned long realtotalpages = 0, totalpages = 0;
6711 	enum zone_type i;
6712 
6713 	for (i = 0; i < MAX_NR_ZONES; i++) {
6714 		struct zone *zone = pgdat->node_zones + i;
6715 		unsigned long zone_start_pfn, zone_end_pfn;
6716 		unsigned long spanned, absent;
6717 		unsigned long size, real_size;
6718 
6719 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
6720 						     node_start_pfn,
6721 						     node_end_pfn,
6722 						     &zone_start_pfn,
6723 						     &zone_end_pfn);
6724 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
6725 						   node_start_pfn,
6726 						   node_end_pfn);
6727 
6728 		size = spanned;
6729 		real_size = size - absent;
6730 
6731 		if (size)
6732 			zone->zone_start_pfn = zone_start_pfn;
6733 		else
6734 			zone->zone_start_pfn = 0;
6735 		zone->spanned_pages = size;
6736 		zone->present_pages = real_size;
6737 
6738 		totalpages += size;
6739 		realtotalpages += real_size;
6740 	}
6741 
6742 	pgdat->node_spanned_pages = totalpages;
6743 	pgdat->node_present_pages = realtotalpages;
6744 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
6745 							realtotalpages);
6746 }
6747 
6748 #ifndef CONFIG_SPARSEMEM
6749 /*
6750  * Calculate the size of the zone->blockflags rounded to an unsigned long
6751  * Start by making sure zonesize is a multiple of pageblock_order by rounding
6752  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
6753  * round what is now in bits to nearest long in bits, then return it in
6754  * bytes.
6755  */
6756 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
6757 {
6758 	unsigned long usemapsize;
6759 
6760 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
6761 	usemapsize = roundup(zonesize, pageblock_nr_pages);
6762 	usemapsize = usemapsize >> pageblock_order;
6763 	usemapsize *= NR_PAGEBLOCK_BITS;
6764 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6765 
6766 	return usemapsize / 8;
6767 }
6768 
6769 static void __ref setup_usemap(struct zone *zone)
6770 {
6771 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
6772 					       zone->spanned_pages);
6773 	zone->pageblock_flags = NULL;
6774 	if (usemapsize) {
6775 		zone->pageblock_flags =
6776 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
6777 					    zone_to_nid(zone));
6778 		if (!zone->pageblock_flags)
6779 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
6780 			      usemapsize, zone->name, zone_to_nid(zone));
6781 	}
6782 }
6783 #else
6784 static inline void setup_usemap(struct zone *zone) {}
6785 #endif /* CONFIG_SPARSEMEM */
6786 
6787 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
6788 
6789 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
6790 void __init set_pageblock_order(void)
6791 {
6792 	unsigned int order;
6793 
6794 	/* Check that pageblock_nr_pages has not already been setup */
6795 	if (pageblock_order)
6796 		return;
6797 
6798 	if (HPAGE_SHIFT > PAGE_SHIFT)
6799 		order = HUGETLB_PAGE_ORDER;
6800 	else
6801 		order = MAX_ORDER - 1;
6802 
6803 	/*
6804 	 * Assume the largest contiguous order of interest is a huge page.
6805 	 * This value may be variable depending on boot parameters on IA64 and
6806 	 * powerpc.
6807 	 */
6808 	pageblock_order = order;
6809 }
6810 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6811 
6812 /*
6813  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
6814  * is unused as pageblock_order is set at compile-time. See
6815  * include/linux/pageblock-flags.h for the values of pageblock_order based on
6816  * the kernel config
6817  */
6818 void __init set_pageblock_order(void)
6819 {
6820 }
6821 
6822 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6823 
6824 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
6825 						unsigned long present_pages)
6826 {
6827 	unsigned long pages = spanned_pages;
6828 
6829 	/*
6830 	 * Provide a more accurate estimation if there are holes within
6831 	 * the zone and SPARSEMEM is in use. If there are holes within the
6832 	 * zone, each populated memory region may cost us one or two extra
6833 	 * memmap pages due to alignment because memmap pages for each
6834 	 * populated regions may not be naturally aligned on page boundary.
6835 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
6836 	 */
6837 	if (spanned_pages > present_pages + (present_pages >> 4) &&
6838 	    IS_ENABLED(CONFIG_SPARSEMEM))
6839 		pages = present_pages;
6840 
6841 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6842 }
6843 
6844 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6845 static void pgdat_init_split_queue(struct pglist_data *pgdat)
6846 {
6847 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
6848 
6849 	spin_lock_init(&ds_queue->split_queue_lock);
6850 	INIT_LIST_HEAD(&ds_queue->split_queue);
6851 	ds_queue->split_queue_len = 0;
6852 }
6853 #else
6854 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
6855 #endif
6856 
6857 #ifdef CONFIG_COMPACTION
6858 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
6859 {
6860 	init_waitqueue_head(&pgdat->kcompactd_wait);
6861 }
6862 #else
6863 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
6864 #endif
6865 
6866 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
6867 {
6868 	pgdat_resize_init(pgdat);
6869 
6870 	pgdat_init_split_queue(pgdat);
6871 	pgdat_init_kcompactd(pgdat);
6872 
6873 	init_waitqueue_head(&pgdat->kswapd_wait);
6874 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
6875 
6876 	pgdat_page_ext_init(pgdat);
6877 	lruvec_init(&pgdat->__lruvec);
6878 }
6879 
6880 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
6881 							unsigned long remaining_pages)
6882 {
6883 	atomic_long_set(&zone->managed_pages, remaining_pages);
6884 	zone_set_nid(zone, nid);
6885 	zone->name = zone_names[idx];
6886 	zone->zone_pgdat = NODE_DATA(nid);
6887 	spin_lock_init(&zone->lock);
6888 	zone_seqlock_init(zone);
6889 	zone_pcp_init(zone);
6890 }
6891 
6892 /*
6893  * Set up the zone data structures
6894  * - init pgdat internals
6895  * - init all zones belonging to this node
6896  *
6897  * NOTE: this function is only called during memory hotplug
6898  */
6899 #ifdef CONFIG_MEMORY_HOTPLUG
6900 void __ref free_area_init_core_hotplug(int nid)
6901 {
6902 	enum zone_type z;
6903 	pg_data_t *pgdat = NODE_DATA(nid);
6904 
6905 	pgdat_init_internals(pgdat);
6906 	for (z = 0; z < MAX_NR_ZONES; z++)
6907 		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
6908 }
6909 #endif
6910 
6911 /*
6912  * Set up the zone data structures:
6913  *   - mark all pages reserved
6914  *   - mark all memory queues empty
6915  *   - clear the memory bitmaps
6916  *
6917  * NOTE: pgdat should get zeroed by caller.
6918  * NOTE: this function is only called during early init.
6919  */
6920 static void __init free_area_init_core(struct pglist_data *pgdat)
6921 {
6922 	enum zone_type j;
6923 	int nid = pgdat->node_id;
6924 
6925 	pgdat_init_internals(pgdat);
6926 	pgdat->per_cpu_nodestats = &boot_nodestats;
6927 
6928 	for (j = 0; j < MAX_NR_ZONES; j++) {
6929 		struct zone *zone = pgdat->node_zones + j;
6930 		unsigned long size, freesize, memmap_pages;
6931 
6932 		size = zone->spanned_pages;
6933 		freesize = zone->present_pages;
6934 
6935 		/*
6936 		 * Adjust freesize so that it accounts for how much memory
6937 		 * is used by this zone for memmap. This affects the watermark
6938 		 * and per-cpu initialisations
6939 		 */
6940 		memmap_pages = calc_memmap_size(size, freesize);
6941 		if (!is_highmem_idx(j)) {
6942 			if (freesize >= memmap_pages) {
6943 				freesize -= memmap_pages;
6944 				if (memmap_pages)
6945 					printk(KERN_DEBUG
6946 					       "  %s zone: %lu pages used for memmap\n",
6947 					       zone_names[j], memmap_pages);
6948 			} else
6949 				pr_warn("  %s zone: %lu pages exceeds freesize %lu\n",
6950 					zone_names[j], memmap_pages, freesize);
6951 		}
6952 
6953 		/* Account for reserved pages */
6954 		if (j == 0 && freesize > dma_reserve) {
6955 			freesize -= dma_reserve;
6956 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
6957 					zone_names[0], dma_reserve);
6958 		}
6959 
6960 		if (!is_highmem_idx(j))
6961 			nr_kernel_pages += freesize;
6962 		/* Charge for highmem memmap if there are enough kernel pages */
6963 		else if (nr_kernel_pages > memmap_pages * 2)
6964 			nr_kernel_pages -= memmap_pages;
6965 		nr_all_pages += freesize;
6966 
6967 		/*
6968 		 * Set an approximate value for lowmem here, it will be adjusted
6969 		 * when the bootmem allocator frees pages into the buddy system.
6970 		 * And all highmem pages will be managed by the buddy system.
6971 		 */
6972 		zone_init_internals(zone, j, nid, freesize);
6973 
6974 		if (!size)
6975 			continue;
6976 
6977 		set_pageblock_order();
6978 		setup_usemap(zone);
6979 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
6980 		memmap_init_zone(zone);
6981 	}
6982 }
6983 
6984 #ifdef CONFIG_FLAT_NODE_MEM_MAP
6985 static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6986 {
6987 	unsigned long __maybe_unused start = 0;
6988 	unsigned long __maybe_unused offset = 0;
6989 
6990 	/* Skip empty nodes */
6991 	if (!pgdat->node_spanned_pages)
6992 		return;
6993 
6994 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6995 	offset = pgdat->node_start_pfn - start;
6996 	/* ia64 gets its own node_mem_map, before this, without bootmem */
6997 	if (!pgdat->node_mem_map) {
6998 		unsigned long size, end;
6999 		struct page *map;
7000 
7001 		/*
7002 		 * The zone's endpoints aren't required to be MAX_ORDER
7003 		 * aligned but the node_mem_map endpoints must be in order
7004 		 * for the buddy allocator to function correctly.
7005 		 */
7006 		end = pgdat_end_pfn(pgdat);
7007 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
7008 		size =  (end - start) * sizeof(struct page);
7009 		map = memblock_alloc_node(size, SMP_CACHE_BYTES,
7010 					  pgdat->node_id);
7011 		if (!map)
7012 			panic("Failed to allocate %ld bytes for node %d memory map\n",
7013 			      size, pgdat->node_id);
7014 		pgdat->node_mem_map = map + offset;
7015 	}
7016 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7017 				__func__, pgdat->node_id, (unsigned long)pgdat,
7018 				(unsigned long)pgdat->node_mem_map);
7019 #ifndef CONFIG_NEED_MULTIPLE_NODES
7020 	/*
7021 	 * With no DISCONTIG, the global mem_map is just set as node 0's
7022 	 */
7023 	if (pgdat == NODE_DATA(0)) {
7024 		mem_map = NODE_DATA(0)->node_mem_map;
7025 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
7026 			mem_map -= offset;
7027 	}
7028 #endif
7029 }
7030 #else
7031 static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
7032 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
7033 
7034 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7035 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7036 {
7037 	pgdat->first_deferred_pfn = ULONG_MAX;
7038 }
7039 #else
7040 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7041 #endif
7042 
7043 static void __init free_area_init_node(int nid)
7044 {
7045 	pg_data_t *pgdat = NODE_DATA(nid);
7046 	unsigned long start_pfn = 0;
7047 	unsigned long end_pfn = 0;
7048 
7049 	/* pg_data_t should be reset to zero when it's allocated */
7050 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
7051 
7052 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7053 
7054 	pgdat->node_id = nid;
7055 	pgdat->node_start_pfn = start_pfn;
7056 	pgdat->per_cpu_nodestats = NULL;
7057 
7058 	pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7059 		(u64)start_pfn << PAGE_SHIFT,
7060 		end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7061 	calculate_node_totalpages(pgdat, start_pfn, end_pfn);
7062 
7063 	alloc_node_mem_map(pgdat);
7064 	pgdat_set_deferred_range(pgdat);
7065 
7066 	free_area_init_core(pgdat);
7067 }
7068 
7069 void __init free_area_init_memoryless_node(int nid)
7070 {
7071 	free_area_init_node(nid);
7072 }
7073 
7074 #if !defined(CONFIG_FLAT_NODE_MEM_MAP)
7075 /*
7076  * Initialize all valid struct pages in the range [spfn, epfn) and mark them
7077  * PageReserved(). Return the number of struct pages that were initialized.
7078  */
7079 static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
7080 {
7081 	unsigned long pfn;
7082 	u64 pgcnt = 0;
7083 
7084 	for (pfn = spfn; pfn < epfn; pfn++) {
7085 		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
7086 			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
7087 				+ pageblock_nr_pages - 1;
7088 			continue;
7089 		}
7090 		/*
7091 		 * Use a fake node/zone (0) for now. Some of these pages
7092 		 * (in memblock.reserved but not in memblock.memory) will
7093 		 * get re-initialized via reserve_bootmem_region() later.
7094 		 */
7095 		__init_single_page(pfn_to_page(pfn), pfn, 0, 0);
7096 		__SetPageReserved(pfn_to_page(pfn));
7097 		pgcnt++;
7098 	}
7099 
7100 	return pgcnt;
7101 }
7102 
7103 /*
7104  * Only struct pages that are backed by physical memory are zeroed and
7105  * initialized by going through __init_single_page(). But, there are some
7106  * struct pages which are reserved in memblock allocator and their fields
7107  * may be accessed (for example page_to_pfn() on some configuration accesses
7108  * flags). We must explicitly initialize those struct pages.
7109  *
7110  * This function also addresses a similar issue where struct pages are left
7111  * uninitialized because the physical address range is not covered by
7112  * memblock.memory or memblock.reserved. That could happen when memblock
7113  * layout is manually configured via memmap=, or when the highest physical
7114  * address (max_pfn) does not end on a section boundary.
7115  */
7116 static void __init init_unavailable_mem(void)
7117 {
7118 	phys_addr_t start, end;
7119 	u64 i, pgcnt;
7120 	phys_addr_t next = 0;
7121 
7122 	/*
7123 	 * Loop through unavailable ranges not covered by memblock.memory.
7124 	 */
7125 	pgcnt = 0;
7126 	for_each_mem_range(i, &start, &end) {
7127 		if (next < start)
7128 			pgcnt += init_unavailable_range(PFN_DOWN(next),
7129 							PFN_UP(start));
7130 		next = end;
7131 	}
7132 
7133 	/*
7134 	 * Early sections always have a fully populated memmap for the whole
7135 	 * section - see pfn_valid(). If the last section has holes at the
7136 	 * end and that section is marked "online", the memmap will be
7137 	 * considered initialized. Make sure that memmap has a well defined
7138 	 * state.
7139 	 */
7140 	pgcnt += init_unavailable_range(PFN_DOWN(next),
7141 					round_up(max_pfn, PAGES_PER_SECTION));
7142 
7143 	/*
7144 	 * Struct pages that do not have backing memory. This could be because
7145 	 * firmware is using some of this memory, or for some other reasons.
7146 	 */
7147 	if (pgcnt)
7148 		pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
7149 }
7150 #else
7151 static inline void __init init_unavailable_mem(void)
7152 {
7153 }
7154 #endif /* !CONFIG_FLAT_NODE_MEM_MAP */
7155 
7156 #if MAX_NUMNODES > 1
7157 /*
7158  * Figure out the number of possible node ids.
7159  */
7160 void __init setup_nr_node_ids(void)
7161 {
7162 	unsigned int highest;
7163 
7164 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
7165 	nr_node_ids = highest + 1;
7166 }
7167 #endif
7168 
7169 /**
7170  * node_map_pfn_alignment - determine the maximum internode alignment
7171  *
7172  * This function should be called after node map is populated and sorted.
7173  * It calculates the maximum power of two alignment which can distinguish
7174  * all the nodes.
7175  *
7176  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7177  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
7178  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
7179  * shifted, 1GiB is enough and this function will indicate so.
7180  *
7181  * This is used to test whether pfn -> nid mapping of the chosen memory
7182  * model has fine enough granularity to avoid incorrect mapping for the
7183  * populated node map.
7184  *
7185  * Return: the determined alignment in pfn's.  0 if there is no alignment
7186  * requirement (single node).
7187  */
7188 unsigned long __init node_map_pfn_alignment(void)
7189 {
7190 	unsigned long accl_mask = 0, last_end = 0;
7191 	unsigned long start, end, mask;
7192 	int last_nid = NUMA_NO_NODE;
7193 	int i, nid;
7194 
7195 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7196 		if (!start || last_nid < 0 || last_nid == nid) {
7197 			last_nid = nid;
7198 			last_end = end;
7199 			continue;
7200 		}
7201 
7202 		/*
7203 		 * Start with a mask granular enough to pin-point to the
7204 		 * start pfn and tick off bits one-by-one until it becomes
7205 		 * too coarse to separate the current node from the last.
7206 		 */
7207 		mask = ~((1 << __ffs(start)) - 1);
7208 		while (mask && last_end <= (start & (mask << 1)))
7209 			mask <<= 1;
7210 
7211 		/* accumulate all internode masks */
7212 		accl_mask |= mask;
7213 	}
7214 
7215 	/* convert mask to number of pages */
7216 	return ~accl_mask + 1;
7217 }
7218 
7219 /**
7220  * find_min_pfn_with_active_regions - Find the minimum PFN registered
7221  *
7222  * Return: the minimum PFN based on information provided via
7223  * memblock_set_node().
7224  */
7225 unsigned long __init find_min_pfn_with_active_regions(void)
7226 {
7227 	return PHYS_PFN(memblock_start_of_DRAM());
7228 }
7229 
7230 /*
7231  * early_calculate_totalpages()
7232  * Sum pages in active regions for movable zone.
7233  * Populate N_MEMORY for calculating usable_nodes.
7234  */
7235 static unsigned long __init early_calculate_totalpages(void)
7236 {
7237 	unsigned long totalpages = 0;
7238 	unsigned long start_pfn, end_pfn;
7239 	int i, nid;
7240 
7241 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7242 		unsigned long pages = end_pfn - start_pfn;
7243 
7244 		totalpages += pages;
7245 		if (pages)
7246 			node_set_state(nid, N_MEMORY);
7247 	}
7248 	return totalpages;
7249 }
7250 
7251 /*
7252  * Find the PFN the Movable zone begins in each node. Kernel memory
7253  * is spread evenly between nodes as long as the nodes have enough
7254  * memory. When they don't, some nodes will have more kernelcore than
7255  * others
7256  */
7257 static void __init find_zone_movable_pfns_for_nodes(void)
7258 {
7259 	int i, nid;
7260 	unsigned long usable_startpfn;
7261 	unsigned long kernelcore_node, kernelcore_remaining;
7262 	/* save the state before borrow the nodemask */
7263 	nodemask_t saved_node_state = node_states[N_MEMORY];
7264 	unsigned long totalpages = early_calculate_totalpages();
7265 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
7266 	struct memblock_region *r;
7267 
7268 	/* Need to find movable_zone earlier when movable_node is specified. */
7269 	find_usable_zone_for_movable();
7270 
7271 	/*
7272 	 * If movable_node is specified, ignore kernelcore and movablecore
7273 	 * options.
7274 	 */
7275 	if (movable_node_is_enabled()) {
7276 		for_each_mem_region(r) {
7277 			if (!memblock_is_hotpluggable(r))
7278 				continue;
7279 
7280 			nid = memblock_get_region_node(r);
7281 
7282 			usable_startpfn = PFN_DOWN(r->base);
7283 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7284 				min(usable_startpfn, zone_movable_pfn[nid]) :
7285 				usable_startpfn;
7286 		}
7287 
7288 		goto out2;
7289 	}
7290 
7291 	/*
7292 	 * If kernelcore=mirror is specified, ignore movablecore option
7293 	 */
7294 	if (mirrored_kernelcore) {
7295 		bool mem_below_4gb_not_mirrored = false;
7296 
7297 		for_each_mem_region(r) {
7298 			if (memblock_is_mirror(r))
7299 				continue;
7300 
7301 			nid = memblock_get_region_node(r);
7302 
7303 			usable_startpfn = memblock_region_memory_base_pfn(r);
7304 
7305 			if (usable_startpfn < 0x100000) {
7306 				mem_below_4gb_not_mirrored = true;
7307 				continue;
7308 			}
7309 
7310 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7311 				min(usable_startpfn, zone_movable_pfn[nid]) :
7312 				usable_startpfn;
7313 		}
7314 
7315 		if (mem_below_4gb_not_mirrored)
7316 			pr_warn("This configuration results in unmirrored kernel memory.\n");
7317 
7318 		goto out2;
7319 	}
7320 
7321 	/*
7322 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7323 	 * amount of necessary memory.
7324 	 */
7325 	if (required_kernelcore_percent)
7326 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
7327 				       10000UL;
7328 	if (required_movablecore_percent)
7329 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
7330 					10000UL;
7331 
7332 	/*
7333 	 * If movablecore= was specified, calculate what size of
7334 	 * kernelcore that corresponds so that memory usable for
7335 	 * any allocation type is evenly spread. If both kernelcore
7336 	 * and movablecore are specified, then the value of kernelcore
7337 	 * will be used for required_kernelcore if it's greater than
7338 	 * what movablecore would have allowed.
7339 	 */
7340 	if (required_movablecore) {
7341 		unsigned long corepages;
7342 
7343 		/*
7344 		 * Round-up so that ZONE_MOVABLE is at least as large as what
7345 		 * was requested by the user
7346 		 */
7347 		required_movablecore =
7348 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
7349 		required_movablecore = min(totalpages, required_movablecore);
7350 		corepages = totalpages - required_movablecore;
7351 
7352 		required_kernelcore = max(required_kernelcore, corepages);
7353 	}
7354 
7355 	/*
7356 	 * If kernelcore was not specified or kernelcore size is larger
7357 	 * than totalpages, there is no ZONE_MOVABLE.
7358 	 */
7359 	if (!required_kernelcore || required_kernelcore >= totalpages)
7360 		goto out;
7361 
7362 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
7363 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7364 
7365 restart:
7366 	/* Spread kernelcore memory as evenly as possible throughout nodes */
7367 	kernelcore_node = required_kernelcore / usable_nodes;
7368 	for_each_node_state(nid, N_MEMORY) {
7369 		unsigned long start_pfn, end_pfn;
7370 
7371 		/*
7372 		 * Recalculate kernelcore_node if the division per node
7373 		 * now exceeds what is necessary to satisfy the requested
7374 		 * amount of memory for the kernel
7375 		 */
7376 		if (required_kernelcore < kernelcore_node)
7377 			kernelcore_node = required_kernelcore / usable_nodes;
7378 
7379 		/*
7380 		 * As the map is walked, we track how much memory is usable
7381 		 * by the kernel using kernelcore_remaining. When it is
7382 		 * 0, the rest of the node is usable by ZONE_MOVABLE
7383 		 */
7384 		kernelcore_remaining = kernelcore_node;
7385 
7386 		/* Go through each range of PFNs within this node */
7387 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7388 			unsigned long size_pages;
7389 
7390 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
7391 			if (start_pfn >= end_pfn)
7392 				continue;
7393 
7394 			/* Account for what is only usable for kernelcore */
7395 			if (start_pfn < usable_startpfn) {
7396 				unsigned long kernel_pages;
7397 				kernel_pages = min(end_pfn, usable_startpfn)
7398 								- start_pfn;
7399 
7400 				kernelcore_remaining -= min(kernel_pages,
7401 							kernelcore_remaining);
7402 				required_kernelcore -= min(kernel_pages,
7403 							required_kernelcore);
7404 
7405 				/* Continue if range is now fully accounted */
7406 				if (end_pfn <= usable_startpfn) {
7407 
7408 					/*
7409 					 * Push zone_movable_pfn to the end so
7410 					 * that if we have to rebalance
7411 					 * kernelcore across nodes, we will
7412 					 * not double account here
7413 					 */
7414 					zone_movable_pfn[nid] = end_pfn;
7415 					continue;
7416 				}
7417 				start_pfn = usable_startpfn;
7418 			}
7419 
7420 			/*
7421 			 * The usable PFN range for ZONE_MOVABLE is from
7422 			 * start_pfn->end_pfn. Calculate size_pages as the
7423 			 * number of pages used as kernelcore
7424 			 */
7425 			size_pages = end_pfn - start_pfn;
7426 			if (size_pages > kernelcore_remaining)
7427 				size_pages = kernelcore_remaining;
7428 			zone_movable_pfn[nid] = start_pfn + size_pages;
7429 
7430 			/*
7431 			 * Some kernelcore has been met, update counts and
7432 			 * break if the kernelcore for this node has been
7433 			 * satisfied
7434 			 */
7435 			required_kernelcore -= min(required_kernelcore,
7436 								size_pages);
7437 			kernelcore_remaining -= size_pages;
7438 			if (!kernelcore_remaining)
7439 				break;
7440 		}
7441 	}
7442 
7443 	/*
7444 	 * If there is still required_kernelcore, we do another pass with one
7445 	 * less node in the count. This will push zone_movable_pfn[nid] further
7446 	 * along on the nodes that still have memory until kernelcore is
7447 	 * satisfied
7448 	 */
7449 	usable_nodes--;
7450 	if (usable_nodes && required_kernelcore > usable_nodes)
7451 		goto restart;
7452 
7453 out2:
7454 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7455 	for (nid = 0; nid < MAX_NUMNODES; nid++)
7456 		zone_movable_pfn[nid] =
7457 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
7458 
7459 out:
7460 	/* restore the node_state */
7461 	node_states[N_MEMORY] = saved_node_state;
7462 }
7463 
7464 /* Any regular or high memory on that node ? */
7465 static void check_for_memory(pg_data_t *pgdat, int nid)
7466 {
7467 	enum zone_type zone_type;
7468 
7469 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
7470 		struct zone *zone = &pgdat->node_zones[zone_type];
7471 		if (populated_zone(zone)) {
7472 			if (IS_ENABLED(CONFIG_HIGHMEM))
7473 				node_set_state(nid, N_HIGH_MEMORY);
7474 			if (zone_type <= ZONE_NORMAL)
7475 				node_set_state(nid, N_NORMAL_MEMORY);
7476 			break;
7477 		}
7478 	}
7479 }
7480 
7481 /*
7482  * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
7483  * such cases we allow max_zone_pfn sorted in the descending order
7484  */
7485 bool __weak arch_has_descending_max_zone_pfns(void)
7486 {
7487 	return false;
7488 }
7489 
7490 /**
7491  * free_area_init - Initialise all pg_data_t and zone data
7492  * @max_zone_pfn: an array of max PFNs for each zone
7493  *
7494  * This will call free_area_init_node() for each active node in the system.
7495  * Using the page ranges provided by memblock_set_node(), the size of each
7496  * zone in each node and their holes is calculated. If the maximum PFN
7497  * between two adjacent zones match, it is assumed that the zone is empty.
7498  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7499  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7500  * starts where the previous one ended. For example, ZONE_DMA32 starts
7501  * at arch_max_dma_pfn.
7502  */
7503 void __init free_area_init(unsigned long *max_zone_pfn)
7504 {
7505 	unsigned long start_pfn, end_pfn;
7506 	int i, nid, zone;
7507 	bool descending;
7508 
7509 	/* Record where the zone boundaries are */
7510 	memset(arch_zone_lowest_possible_pfn, 0,
7511 				sizeof(arch_zone_lowest_possible_pfn));
7512 	memset(arch_zone_highest_possible_pfn, 0,
7513 				sizeof(arch_zone_highest_possible_pfn));
7514 
7515 	start_pfn = find_min_pfn_with_active_regions();
7516 	descending = arch_has_descending_max_zone_pfns();
7517 
7518 	for (i = 0; i < MAX_NR_ZONES; i++) {
7519 		if (descending)
7520 			zone = MAX_NR_ZONES - i - 1;
7521 		else
7522 			zone = i;
7523 
7524 		if (zone == ZONE_MOVABLE)
7525 			continue;
7526 
7527 		end_pfn = max(max_zone_pfn[zone], start_pfn);
7528 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
7529 		arch_zone_highest_possible_pfn[zone] = end_pfn;
7530 
7531 		start_pfn = end_pfn;
7532 	}
7533 
7534 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
7535 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
7536 	find_zone_movable_pfns_for_nodes();
7537 
7538 	/* Print out the zone ranges */
7539 	pr_info("Zone ranges:\n");
7540 	for (i = 0; i < MAX_NR_ZONES; i++) {
7541 		if (i == ZONE_MOVABLE)
7542 			continue;
7543 		pr_info("  %-8s ", zone_names[i]);
7544 		if (arch_zone_lowest_possible_pfn[i] ==
7545 				arch_zone_highest_possible_pfn[i])
7546 			pr_cont("empty\n");
7547 		else
7548 			pr_cont("[mem %#018Lx-%#018Lx]\n",
7549 				(u64)arch_zone_lowest_possible_pfn[i]
7550 					<< PAGE_SHIFT,
7551 				((u64)arch_zone_highest_possible_pfn[i]
7552 					<< PAGE_SHIFT) - 1);
7553 	}
7554 
7555 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
7556 	pr_info("Movable zone start for each node\n");
7557 	for (i = 0; i < MAX_NUMNODES; i++) {
7558 		if (zone_movable_pfn[i])
7559 			pr_info("  Node %d: %#018Lx\n", i,
7560 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
7561 	}
7562 
7563 	/*
7564 	 * Print out the early node map, and initialize the
7565 	 * subsection-map relative to active online memory ranges to
7566 	 * enable future "sub-section" extensions of the memory map.
7567 	 */
7568 	pr_info("Early memory node ranges\n");
7569 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7570 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7571 			(u64)start_pfn << PAGE_SHIFT,
7572 			((u64)end_pfn << PAGE_SHIFT) - 1);
7573 		subsection_map_init(start_pfn, end_pfn - start_pfn);
7574 	}
7575 
7576 	/* Initialise every node */
7577 	mminit_verify_pageflags_layout();
7578 	setup_nr_node_ids();
7579 	init_unavailable_mem();
7580 	for_each_online_node(nid) {
7581 		pg_data_t *pgdat = NODE_DATA(nid);
7582 		free_area_init_node(nid);
7583 
7584 		/* Any memory on that node */
7585 		if (pgdat->node_present_pages)
7586 			node_set_state(nid, N_MEMORY);
7587 		check_for_memory(pgdat, nid);
7588 	}
7589 }
7590 
7591 static int __init cmdline_parse_core(char *p, unsigned long *core,
7592 				     unsigned long *percent)
7593 {
7594 	unsigned long long coremem;
7595 	char *endptr;
7596 
7597 	if (!p)
7598 		return -EINVAL;
7599 
7600 	/* Value may be a percentage of total memory, otherwise bytes */
7601 	coremem = simple_strtoull(p, &endptr, 0);
7602 	if (*endptr == '%') {
7603 		/* Paranoid check for percent values greater than 100 */
7604 		WARN_ON(coremem > 100);
7605 
7606 		*percent = coremem;
7607 	} else {
7608 		coremem = memparse(p, &p);
7609 		/* Paranoid check that UL is enough for the coremem value */
7610 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
7611 
7612 		*core = coremem >> PAGE_SHIFT;
7613 		*percent = 0UL;
7614 	}
7615 	return 0;
7616 }
7617 
7618 /*
7619  * kernelcore=size sets the amount of memory for use for allocations that
7620  * cannot be reclaimed or migrated.
7621  */
7622 static int __init cmdline_parse_kernelcore(char *p)
7623 {
7624 	/* parse kernelcore=mirror */
7625 	if (parse_option_str(p, "mirror")) {
7626 		mirrored_kernelcore = true;
7627 		return 0;
7628 	}
7629 
7630 	return cmdline_parse_core(p, &required_kernelcore,
7631 				  &required_kernelcore_percent);
7632 }
7633 
7634 /*
7635  * movablecore=size sets the amount of memory for use for allocations that
7636  * can be reclaimed or migrated.
7637  */
7638 static int __init cmdline_parse_movablecore(char *p)
7639 {
7640 	return cmdline_parse_core(p, &required_movablecore,
7641 				  &required_movablecore_percent);
7642 }
7643 
7644 early_param("kernelcore", cmdline_parse_kernelcore);
7645 early_param("movablecore", cmdline_parse_movablecore);
7646 
7647 void adjust_managed_page_count(struct page *page, long count)
7648 {
7649 	atomic_long_add(count, &page_zone(page)->managed_pages);
7650 	totalram_pages_add(count);
7651 #ifdef CONFIG_HIGHMEM
7652 	if (PageHighMem(page))
7653 		totalhigh_pages_add(count);
7654 #endif
7655 }
7656 EXPORT_SYMBOL(adjust_managed_page_count);
7657 
7658 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
7659 {
7660 	void *pos;
7661 	unsigned long pages = 0;
7662 
7663 	start = (void *)PAGE_ALIGN((unsigned long)start);
7664 	end = (void *)((unsigned long)end & PAGE_MASK);
7665 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
7666 		struct page *page = virt_to_page(pos);
7667 		void *direct_map_addr;
7668 
7669 		/*
7670 		 * 'direct_map_addr' might be different from 'pos'
7671 		 * because some architectures' virt_to_page()
7672 		 * work with aliases.  Getting the direct map
7673 		 * address ensures that we get a _writeable_
7674 		 * alias for the memset().
7675 		 */
7676 		direct_map_addr = page_address(page);
7677 		/*
7678 		 * Perform a kasan-unchecked memset() since this memory
7679 		 * has not been initialized.
7680 		 */
7681 		direct_map_addr = kasan_reset_tag(direct_map_addr);
7682 		if ((unsigned int)poison <= 0xFF)
7683 			memset(direct_map_addr, poison, PAGE_SIZE);
7684 
7685 		free_reserved_page(page);
7686 	}
7687 
7688 	if (pages && s)
7689 		pr_info("Freeing %s memory: %ldK\n",
7690 			s, pages << (PAGE_SHIFT - 10));
7691 
7692 	return pages;
7693 }
7694 
7695 void __init mem_init_print_info(const char *str)
7696 {
7697 	unsigned long physpages, codesize, datasize, rosize, bss_size;
7698 	unsigned long init_code_size, init_data_size;
7699 
7700 	physpages = get_num_physpages();
7701 	codesize = _etext - _stext;
7702 	datasize = _edata - _sdata;
7703 	rosize = __end_rodata - __start_rodata;
7704 	bss_size = __bss_stop - __bss_start;
7705 	init_data_size = __init_end - __init_begin;
7706 	init_code_size = _einittext - _sinittext;
7707 
7708 	/*
7709 	 * Detect special cases and adjust section sizes accordingly:
7710 	 * 1) .init.* may be embedded into .data sections
7711 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
7712 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
7713 	 * 3) .rodata.* may be embedded into .text or .data sections.
7714 	 */
7715 #define adj_init_size(start, end, size, pos, adj) \
7716 	do { \
7717 		if (start <= pos && pos < end && size > adj) \
7718 			size -= adj; \
7719 	} while (0)
7720 
7721 	adj_init_size(__init_begin, __init_end, init_data_size,
7722 		     _sinittext, init_code_size);
7723 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
7724 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
7725 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
7726 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
7727 
7728 #undef	adj_init_size
7729 
7730 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7731 #ifdef	CONFIG_HIGHMEM
7732 		", %luK highmem"
7733 #endif
7734 		"%s%s)\n",
7735 		nr_free_pages() << (PAGE_SHIFT - 10),
7736 		physpages << (PAGE_SHIFT - 10),
7737 		codesize >> 10, datasize >> 10, rosize >> 10,
7738 		(init_data_size + init_code_size) >> 10, bss_size >> 10,
7739 		(physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
7740 		totalcma_pages << (PAGE_SHIFT - 10),
7741 #ifdef	CONFIG_HIGHMEM
7742 		totalhigh_pages() << (PAGE_SHIFT - 10),
7743 #endif
7744 		str ? ", " : "", str ? str : "");
7745 }
7746 
7747 /**
7748  * set_dma_reserve - set the specified number of pages reserved in the first zone
7749  * @new_dma_reserve: The number of pages to mark reserved
7750  *
7751  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7752  * In the DMA zone, a significant percentage may be consumed by kernel image
7753  * and other unfreeable allocations which can skew the watermarks badly. This
7754  * function may optionally be used to account for unfreeable pages in the
7755  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7756  * smaller per-cpu batchsize.
7757  */
7758 void __init set_dma_reserve(unsigned long new_dma_reserve)
7759 {
7760 	dma_reserve = new_dma_reserve;
7761 }
7762 
7763 static int page_alloc_cpu_dead(unsigned int cpu)
7764 {
7765 
7766 	lru_add_drain_cpu(cpu);
7767 	drain_pages(cpu);
7768 
7769 	/*
7770 	 * Spill the event counters of the dead processor
7771 	 * into the current processors event counters.
7772 	 * This artificially elevates the count of the current
7773 	 * processor.
7774 	 */
7775 	vm_events_fold_cpu(cpu);
7776 
7777 	/*
7778 	 * Zero the differential counters of the dead processor
7779 	 * so that the vm statistics are consistent.
7780 	 *
7781 	 * This is only okay since the processor is dead and cannot
7782 	 * race with what we are doing.
7783 	 */
7784 	cpu_vm_stats_fold(cpu);
7785 	return 0;
7786 }
7787 
7788 #ifdef CONFIG_NUMA
7789 int hashdist = HASHDIST_DEFAULT;
7790 
7791 static int __init set_hashdist(char *str)
7792 {
7793 	if (!str)
7794 		return 0;
7795 	hashdist = simple_strtoul(str, &str, 0);
7796 	return 1;
7797 }
7798 __setup("hashdist=", set_hashdist);
7799 #endif
7800 
7801 void __init page_alloc_init(void)
7802 {
7803 	int ret;
7804 
7805 #ifdef CONFIG_NUMA
7806 	if (num_node_state(N_MEMORY) == 1)
7807 		hashdist = 0;
7808 #endif
7809 
7810 	ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
7811 					"mm/page_alloc:dead", NULL,
7812 					page_alloc_cpu_dead);
7813 	WARN_ON(ret < 0);
7814 }
7815 
7816 /*
7817  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
7818  *	or min_free_kbytes changes.
7819  */
7820 static void calculate_totalreserve_pages(void)
7821 {
7822 	struct pglist_data *pgdat;
7823 	unsigned long reserve_pages = 0;
7824 	enum zone_type i, j;
7825 
7826 	for_each_online_pgdat(pgdat) {
7827 
7828 		pgdat->totalreserve_pages = 0;
7829 
7830 		for (i = 0; i < MAX_NR_ZONES; i++) {
7831 			struct zone *zone = pgdat->node_zones + i;
7832 			long max = 0;
7833 			unsigned long managed_pages = zone_managed_pages(zone);
7834 
7835 			/* Find valid and maximum lowmem_reserve in the zone */
7836 			for (j = i; j < MAX_NR_ZONES; j++) {
7837 				if (zone->lowmem_reserve[j] > max)
7838 					max = zone->lowmem_reserve[j];
7839 			}
7840 
7841 			/* we treat the high watermark as reserved pages. */
7842 			max += high_wmark_pages(zone);
7843 
7844 			if (max > managed_pages)
7845 				max = managed_pages;
7846 
7847 			pgdat->totalreserve_pages += max;
7848 
7849 			reserve_pages += max;
7850 		}
7851 	}
7852 	totalreserve_pages = reserve_pages;
7853 }
7854 
7855 /*
7856  * setup_per_zone_lowmem_reserve - called whenever
7857  *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
7858  *	has a correct pages reserved value, so an adequate number of
7859  *	pages are left in the zone after a successful __alloc_pages().
7860  */
7861 static void setup_per_zone_lowmem_reserve(void)
7862 {
7863 	struct pglist_data *pgdat;
7864 	enum zone_type i, j;
7865 
7866 	for_each_online_pgdat(pgdat) {
7867 		for (i = 0; i < MAX_NR_ZONES - 1; i++) {
7868 			struct zone *zone = &pgdat->node_zones[i];
7869 			int ratio = sysctl_lowmem_reserve_ratio[i];
7870 			bool clear = !ratio || !zone_managed_pages(zone);
7871 			unsigned long managed_pages = 0;
7872 
7873 			for (j = i + 1; j < MAX_NR_ZONES; j++) {
7874 				if (clear) {
7875 					zone->lowmem_reserve[j] = 0;
7876 				} else {
7877 					struct zone *upper_zone = &pgdat->node_zones[j];
7878 
7879 					managed_pages += zone_managed_pages(upper_zone);
7880 					zone->lowmem_reserve[j] = managed_pages / ratio;
7881 				}
7882 			}
7883 		}
7884 	}
7885 
7886 	/* update totalreserve_pages */
7887 	calculate_totalreserve_pages();
7888 }
7889 
7890 static void __setup_per_zone_wmarks(void)
7891 {
7892 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
7893 	unsigned long lowmem_pages = 0;
7894 	struct zone *zone;
7895 	unsigned long flags;
7896 
7897 	/* Calculate total number of !ZONE_HIGHMEM pages */
7898 	for_each_zone(zone) {
7899 		if (!is_highmem(zone))
7900 			lowmem_pages += zone_managed_pages(zone);
7901 	}
7902 
7903 	for_each_zone(zone) {
7904 		u64 tmp;
7905 
7906 		spin_lock_irqsave(&zone->lock, flags);
7907 		tmp = (u64)pages_min * zone_managed_pages(zone);
7908 		do_div(tmp, lowmem_pages);
7909 		if (is_highmem(zone)) {
7910 			/*
7911 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
7912 			 * need highmem pages, so cap pages_min to a small
7913 			 * value here.
7914 			 *
7915 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
7916 			 * deltas control async page reclaim, and so should
7917 			 * not be capped for highmem.
7918 			 */
7919 			unsigned long min_pages;
7920 
7921 			min_pages = zone_managed_pages(zone) / 1024;
7922 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7923 			zone->_watermark[WMARK_MIN] = min_pages;
7924 		} else {
7925 			/*
7926 			 * If it's a lowmem zone, reserve a number of pages
7927 			 * proportionate to the zone's size.
7928 			 */
7929 			zone->_watermark[WMARK_MIN] = tmp;
7930 		}
7931 
7932 		/*
7933 		 * Set the kswapd watermarks distance according to the
7934 		 * scale factor in proportion to available memory, but
7935 		 * ensure a minimum size on small systems.
7936 		 */
7937 		tmp = max_t(u64, tmp >> 2,
7938 			    mult_frac(zone_managed_pages(zone),
7939 				      watermark_scale_factor, 10000));
7940 
7941 		zone->watermark_boost = 0;
7942 		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
7943 		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7944 
7945 		spin_unlock_irqrestore(&zone->lock, flags);
7946 	}
7947 
7948 	/* update totalreserve_pages */
7949 	calculate_totalreserve_pages();
7950 }
7951 
7952 /**
7953  * setup_per_zone_wmarks - called when min_free_kbytes changes
7954  * or when memory is hot-{added|removed}
7955  *
7956  * Ensures that the watermark[min,low,high] values for each zone are set
7957  * correctly with respect to min_free_kbytes.
7958  */
7959 void setup_per_zone_wmarks(void)
7960 {
7961 	static DEFINE_SPINLOCK(lock);
7962 
7963 	spin_lock(&lock);
7964 	__setup_per_zone_wmarks();
7965 	spin_unlock(&lock);
7966 }
7967 
7968 /*
7969  * Initialise min_free_kbytes.
7970  *
7971  * For small machines we want it small (128k min).  For large machines
7972  * we want it large (256MB max).  But it is not linear, because network
7973  * bandwidth does not increase linearly with machine size.  We use
7974  *
7975  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
7976  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
7977  *
7978  * which yields
7979  *
7980  * 16MB:	512k
7981  * 32MB:	724k
7982  * 64MB:	1024k
7983  * 128MB:	1448k
7984  * 256MB:	2048k
7985  * 512MB:	2896k
7986  * 1024MB:	4096k
7987  * 2048MB:	5792k
7988  * 4096MB:	8192k
7989  * 8192MB:	11584k
7990  * 16384MB:	16384k
7991  */
7992 int __meminit init_per_zone_wmark_min(void)
7993 {
7994 	unsigned long lowmem_kbytes;
7995 	int new_min_free_kbytes;
7996 
7997 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
7998 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
7999 
8000 	if (new_min_free_kbytes > user_min_free_kbytes) {
8001 		min_free_kbytes = new_min_free_kbytes;
8002 		if (min_free_kbytes < 128)
8003 			min_free_kbytes = 128;
8004 		if (min_free_kbytes > 262144)
8005 			min_free_kbytes = 262144;
8006 	} else {
8007 		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8008 				new_min_free_kbytes, user_min_free_kbytes);
8009 	}
8010 	setup_per_zone_wmarks();
8011 	refresh_zone_stat_thresholds();
8012 	setup_per_zone_lowmem_reserve();
8013 
8014 #ifdef CONFIG_NUMA
8015 	setup_min_unmapped_ratio();
8016 	setup_min_slab_ratio();
8017 #endif
8018 
8019 	khugepaged_min_free_kbytes_update();
8020 
8021 	return 0;
8022 }
8023 postcore_initcall(init_per_zone_wmark_min)
8024 
8025 /*
8026  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8027  *	that we can call two helper functions whenever min_free_kbytes
8028  *	changes.
8029  */
8030 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8031 		void *buffer, size_t *length, loff_t *ppos)
8032 {
8033 	int rc;
8034 
8035 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8036 	if (rc)
8037 		return rc;
8038 
8039 	if (write) {
8040 		user_min_free_kbytes = min_free_kbytes;
8041 		setup_per_zone_wmarks();
8042 	}
8043 	return 0;
8044 }
8045 
8046 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8047 		void *buffer, size_t *length, loff_t *ppos)
8048 {
8049 	int rc;
8050 
8051 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8052 	if (rc)
8053 		return rc;
8054 
8055 	if (write)
8056 		setup_per_zone_wmarks();
8057 
8058 	return 0;
8059 }
8060 
8061 #ifdef CONFIG_NUMA
8062 static void setup_min_unmapped_ratio(void)
8063 {
8064 	pg_data_t *pgdat;
8065 	struct zone *zone;
8066 
8067 	for_each_online_pgdat(pgdat)
8068 		pgdat->min_unmapped_pages = 0;
8069 
8070 	for_each_zone(zone)
8071 		zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8072 						         sysctl_min_unmapped_ratio) / 100;
8073 }
8074 
8075 
8076 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8077 		void *buffer, size_t *length, loff_t *ppos)
8078 {
8079 	int rc;
8080 
8081 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8082 	if (rc)
8083 		return rc;
8084 
8085 	setup_min_unmapped_ratio();
8086 
8087 	return 0;
8088 }
8089 
8090 static void setup_min_slab_ratio(void)
8091 {
8092 	pg_data_t *pgdat;
8093 	struct zone *zone;
8094 
8095 	for_each_online_pgdat(pgdat)
8096 		pgdat->min_slab_pages = 0;
8097 
8098 	for_each_zone(zone)
8099 		zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8100 						     sysctl_min_slab_ratio) / 100;
8101 }
8102 
8103 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8104 		void *buffer, size_t *length, loff_t *ppos)
8105 {
8106 	int rc;
8107 
8108 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8109 	if (rc)
8110 		return rc;
8111 
8112 	setup_min_slab_ratio();
8113 
8114 	return 0;
8115 }
8116 #endif
8117 
8118 /*
8119  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8120  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8121  *	whenever sysctl_lowmem_reserve_ratio changes.
8122  *
8123  * The reserve ratio obviously has absolutely no relation with the
8124  * minimum watermarks. The lowmem reserve ratio can only make sense
8125  * if in function of the boot time zone sizes.
8126  */
8127 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8128 		void *buffer, size_t *length, loff_t *ppos)
8129 {
8130 	int i;
8131 
8132 	proc_dointvec_minmax(table, write, buffer, length, ppos);
8133 
8134 	for (i = 0; i < MAX_NR_ZONES; i++) {
8135 		if (sysctl_lowmem_reserve_ratio[i] < 1)
8136 			sysctl_lowmem_reserve_ratio[i] = 0;
8137 	}
8138 
8139 	setup_per_zone_lowmem_reserve();
8140 	return 0;
8141 }
8142 
8143 /*
8144  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8145  * cpu.  It is the fraction of total pages in each zone that a hot per cpu
8146  * pagelist can have before it gets flushed back to buddy allocator.
8147  */
8148 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
8149 		void *buffer, size_t *length, loff_t *ppos)
8150 {
8151 	struct zone *zone;
8152 	int old_percpu_pagelist_fraction;
8153 	int ret;
8154 
8155 	mutex_lock(&pcp_batch_high_lock);
8156 	old_percpu_pagelist_fraction = percpu_pagelist_fraction;
8157 
8158 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8159 	if (!write || ret < 0)
8160 		goto out;
8161 
8162 	/* Sanity checking to avoid pcp imbalance */
8163 	if (percpu_pagelist_fraction &&
8164 	    percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
8165 		percpu_pagelist_fraction = old_percpu_pagelist_fraction;
8166 		ret = -EINVAL;
8167 		goto out;
8168 	}
8169 
8170 	/* No change? */
8171 	if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
8172 		goto out;
8173 
8174 	for_each_populated_zone(zone)
8175 		zone_set_pageset_high_and_batch(zone);
8176 out:
8177 	mutex_unlock(&pcp_batch_high_lock);
8178 	return ret;
8179 }
8180 
8181 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8182 /*
8183  * Returns the number of pages that arch has reserved but
8184  * is not known to alloc_large_system_hash().
8185  */
8186 static unsigned long __init arch_reserved_kernel_pages(void)
8187 {
8188 	return 0;
8189 }
8190 #endif
8191 
8192 /*
8193  * Adaptive scale is meant to reduce sizes of hash tables on large memory
8194  * machines. As memory size is increased the scale is also increased but at
8195  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
8196  * quadruples the scale is increased by one, which means the size of hash table
8197  * only doubles, instead of quadrupling as well.
8198  * Because 32-bit systems cannot have large physical memory, where this scaling
8199  * makes sense, it is disabled on such platforms.
8200  */
8201 #if __BITS_PER_LONG > 32
8202 #define ADAPT_SCALE_BASE	(64ul << 30)
8203 #define ADAPT_SCALE_SHIFT	2
8204 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
8205 #endif
8206 
8207 /*
8208  * allocate a large system hash table from bootmem
8209  * - it is assumed that the hash table must contain an exact power-of-2
8210  *   quantity of entries
8211  * - limit is the number of hash buckets, not the total allocation size
8212  */
8213 void *__init alloc_large_system_hash(const char *tablename,
8214 				     unsigned long bucketsize,
8215 				     unsigned long numentries,
8216 				     int scale,
8217 				     int flags,
8218 				     unsigned int *_hash_shift,
8219 				     unsigned int *_hash_mask,
8220 				     unsigned long low_limit,
8221 				     unsigned long high_limit)
8222 {
8223 	unsigned long long max = high_limit;
8224 	unsigned long log2qty, size;
8225 	void *table = NULL;
8226 	gfp_t gfp_flags;
8227 	bool virt;
8228 
8229 	/* allow the kernel cmdline to have a say */
8230 	if (!numentries) {
8231 		/* round applicable memory size up to nearest megabyte */
8232 		numentries = nr_kernel_pages;
8233 		numentries -= arch_reserved_kernel_pages();
8234 
8235 		/* It isn't necessary when PAGE_SIZE >= 1MB */
8236 		if (PAGE_SHIFT < 20)
8237 			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
8238 
8239 #if __BITS_PER_LONG > 32
8240 		if (!high_limit) {
8241 			unsigned long adapt;
8242 
8243 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
8244 			     adapt <<= ADAPT_SCALE_SHIFT)
8245 				scale++;
8246 		}
8247 #endif
8248 
8249 		/* limit to 1 bucket per 2^scale bytes of low memory */
8250 		if (scale > PAGE_SHIFT)
8251 			numentries >>= (scale - PAGE_SHIFT);
8252 		else
8253 			numentries <<= (PAGE_SHIFT - scale);
8254 
8255 		/* Make sure we've got at least a 0-order allocation.. */
8256 		if (unlikely(flags & HASH_SMALL)) {
8257 			/* Makes no sense without HASH_EARLY */
8258 			WARN_ON(!(flags & HASH_EARLY));
8259 			if (!(numentries >> *_hash_shift)) {
8260 				numentries = 1UL << *_hash_shift;
8261 				BUG_ON(!numentries);
8262 			}
8263 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
8264 			numentries = PAGE_SIZE / bucketsize;
8265 	}
8266 	numentries = roundup_pow_of_two(numentries);
8267 
8268 	/* limit allocation size to 1/16 total memory by default */
8269 	if (max == 0) {
8270 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
8271 		do_div(max, bucketsize);
8272 	}
8273 	max = min(max, 0x80000000ULL);
8274 
8275 	if (numentries < low_limit)
8276 		numentries = low_limit;
8277 	if (numentries > max)
8278 		numentries = max;
8279 
8280 	log2qty = ilog2(numentries);
8281 
8282 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
8283 	do {
8284 		virt = false;
8285 		size = bucketsize << log2qty;
8286 		if (flags & HASH_EARLY) {
8287 			if (flags & HASH_ZERO)
8288 				table = memblock_alloc(size, SMP_CACHE_BYTES);
8289 			else
8290 				table = memblock_alloc_raw(size,
8291 							   SMP_CACHE_BYTES);
8292 		} else if (get_order(size) >= MAX_ORDER || hashdist) {
8293 			table = __vmalloc(size, gfp_flags);
8294 			virt = true;
8295 		} else {
8296 			/*
8297 			 * If bucketsize is not a power-of-two, we may free
8298 			 * some pages at the end of hash table which
8299 			 * alloc_pages_exact() automatically does
8300 			 */
8301 			table = alloc_pages_exact(size, gfp_flags);
8302 			kmemleak_alloc(table, size, 1, gfp_flags);
8303 		}
8304 	} while (!table && size > PAGE_SIZE && --log2qty);
8305 
8306 	if (!table)
8307 		panic("Failed to allocate %s hash table\n", tablename);
8308 
8309 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8310 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
8311 		virt ? "vmalloc" : "linear");
8312 
8313 	if (_hash_shift)
8314 		*_hash_shift = log2qty;
8315 	if (_hash_mask)
8316 		*_hash_mask = (1 << log2qty) - 1;
8317 
8318 	return table;
8319 }
8320 
8321 /*
8322  * This function checks whether pageblock includes unmovable pages or not.
8323  *
8324  * PageLRU check without isolation or lru_lock could race so that
8325  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8326  * check without lock_page also may miss some movable non-lru pages at
8327  * race condition. So you can't expect this function should be exact.
8328  *
8329  * Returns a page without holding a reference. If the caller wants to
8330  * dereference that page (e.g., dumping), it has to make sure that it
8331  * cannot get removed (e.g., via memory unplug) concurrently.
8332  *
8333  */
8334 struct page *has_unmovable_pages(struct zone *zone, struct page *page,
8335 				 int migratetype, int flags)
8336 {
8337 	unsigned long iter = 0;
8338 	unsigned long pfn = page_to_pfn(page);
8339 	unsigned long offset = pfn % pageblock_nr_pages;
8340 
8341 	if (is_migrate_cma_page(page)) {
8342 		/*
8343 		 * CMA allocations (alloc_contig_range) really need to mark
8344 		 * isolate CMA pageblocks even when they are not movable in fact
8345 		 * so consider them movable here.
8346 		 */
8347 		if (is_migrate_cma(migratetype))
8348 			return NULL;
8349 
8350 		return page;
8351 	}
8352 
8353 	for (; iter < pageblock_nr_pages - offset; iter++) {
8354 		if (!pfn_valid_within(pfn + iter))
8355 			continue;
8356 
8357 		page = pfn_to_page(pfn + iter);
8358 
8359 		/*
8360 		 * Both, bootmem allocations and memory holes are marked
8361 		 * PG_reserved and are unmovable. We can even have unmovable
8362 		 * allocations inside ZONE_MOVABLE, for example when
8363 		 * specifying "movablecore".
8364 		 */
8365 		if (PageReserved(page))
8366 			return page;
8367 
8368 		/*
8369 		 * If the zone is movable and we have ruled out all reserved
8370 		 * pages then it should be reasonably safe to assume the rest
8371 		 * is movable.
8372 		 */
8373 		if (zone_idx(zone) == ZONE_MOVABLE)
8374 			continue;
8375 
8376 		/*
8377 		 * Hugepages are not in LRU lists, but they're movable.
8378 		 * THPs are on the LRU, but need to be counted as #small pages.
8379 		 * We need not scan over tail pages because we don't
8380 		 * handle each tail page individually in migration.
8381 		 */
8382 		if (PageHuge(page) || PageTransCompound(page)) {
8383 			struct page *head = compound_head(page);
8384 			unsigned int skip_pages;
8385 
8386 			if (PageHuge(page)) {
8387 				if (!hugepage_migration_supported(page_hstate(head)))
8388 					return page;
8389 			} else if (!PageLRU(head) && !__PageMovable(head)) {
8390 				return page;
8391 			}
8392 
8393 			skip_pages = compound_nr(head) - (page - head);
8394 			iter += skip_pages - 1;
8395 			continue;
8396 		}
8397 
8398 		/*
8399 		 * We can't use page_count without pin a page
8400 		 * because another CPU can free compound page.
8401 		 * This check already skips compound tails of THP
8402 		 * because their page->_refcount is zero at all time.
8403 		 */
8404 		if (!page_ref_count(page)) {
8405 			if (PageBuddy(page))
8406 				iter += (1 << buddy_order(page)) - 1;
8407 			continue;
8408 		}
8409 
8410 		/*
8411 		 * The HWPoisoned page may be not in buddy system, and
8412 		 * page_count() is not 0.
8413 		 */
8414 		if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
8415 			continue;
8416 
8417 		/*
8418 		 * We treat all PageOffline() pages as movable when offlining
8419 		 * to give drivers a chance to decrement their reference count
8420 		 * in MEM_GOING_OFFLINE in order to indicate that these pages
8421 		 * can be offlined as there are no direct references anymore.
8422 		 * For actually unmovable PageOffline() where the driver does
8423 		 * not support this, we will fail later when trying to actually
8424 		 * move these pages that still have a reference count > 0.
8425 		 * (false negatives in this function only)
8426 		 */
8427 		if ((flags & MEMORY_OFFLINE) && PageOffline(page))
8428 			continue;
8429 
8430 		if (__PageMovable(page) || PageLRU(page))
8431 			continue;
8432 
8433 		/*
8434 		 * If there are RECLAIMABLE pages, we need to check
8435 		 * it.  But now, memory offline itself doesn't call
8436 		 * shrink_node_slabs() and it still to be fixed.
8437 		 */
8438 		return page;
8439 	}
8440 	return NULL;
8441 }
8442 
8443 #ifdef CONFIG_CONTIG_ALLOC
8444 static unsigned long pfn_max_align_down(unsigned long pfn)
8445 {
8446 	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8447 			     pageblock_nr_pages) - 1);
8448 }
8449 
8450 static unsigned long pfn_max_align_up(unsigned long pfn)
8451 {
8452 	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8453 				pageblock_nr_pages));
8454 }
8455 
8456 /* [start, end) must belong to a single zone. */
8457 static int __alloc_contig_migrate_range(struct compact_control *cc,
8458 					unsigned long start, unsigned long end)
8459 {
8460 	/* This function is based on compact_zone() from compaction.c. */
8461 	unsigned int nr_reclaimed;
8462 	unsigned long pfn = start;
8463 	unsigned int tries = 0;
8464 	int ret = 0;
8465 	struct migration_target_control mtc = {
8466 		.nid = zone_to_nid(cc->zone),
8467 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
8468 	};
8469 
8470 	migrate_prep();
8471 
8472 	while (pfn < end || !list_empty(&cc->migratepages)) {
8473 		if (fatal_signal_pending(current)) {
8474 			ret = -EINTR;
8475 			break;
8476 		}
8477 
8478 		if (list_empty(&cc->migratepages)) {
8479 			cc->nr_migratepages = 0;
8480 			pfn = isolate_migratepages_range(cc, pfn, end);
8481 			if (!pfn) {
8482 				ret = -EINTR;
8483 				break;
8484 			}
8485 			tries = 0;
8486 		} else if (++tries == 5) {
8487 			ret = ret < 0 ? ret : -EBUSY;
8488 			break;
8489 		}
8490 
8491 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8492 							&cc->migratepages);
8493 		cc->nr_migratepages -= nr_reclaimed;
8494 
8495 		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
8496 				NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
8497 	}
8498 	if (ret < 0) {
8499 		putback_movable_pages(&cc->migratepages);
8500 		return ret;
8501 	}
8502 	return 0;
8503 }
8504 
8505 /**
8506  * alloc_contig_range() -- tries to allocate given range of pages
8507  * @start:	start PFN to allocate
8508  * @end:	one-past-the-last PFN to allocate
8509  * @migratetype:	migratetype of the underlaying pageblocks (either
8510  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
8511  *			in range must have the same migratetype and it must
8512  *			be either of the two.
8513  * @gfp_mask:	GFP mask to use during compaction
8514  *
8515  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
8516  * aligned.  The PFN range must belong to a single zone.
8517  *
8518  * The first thing this routine does is attempt to MIGRATE_ISOLATE all
8519  * pageblocks in the range.  Once isolated, the pageblocks should not
8520  * be modified by others.
8521  *
8522  * Return: zero on success or negative error code.  On success all
8523  * pages which PFN is in [start, end) are allocated for the caller and
8524  * need to be freed with free_contig_range().
8525  */
8526 int alloc_contig_range(unsigned long start, unsigned long end,
8527 		       unsigned migratetype, gfp_t gfp_mask)
8528 {
8529 	unsigned long outer_start, outer_end;
8530 	unsigned int order;
8531 	int ret = 0;
8532 
8533 	struct compact_control cc = {
8534 		.nr_migratepages = 0,
8535 		.order = -1,
8536 		.zone = page_zone(pfn_to_page(start)),
8537 		.mode = MIGRATE_SYNC,
8538 		.ignore_skip_hint = true,
8539 		.no_set_skip_hint = true,
8540 		.gfp_mask = current_gfp_context(gfp_mask),
8541 		.alloc_contig = true,
8542 	};
8543 	INIT_LIST_HEAD(&cc.migratepages);
8544 
8545 	/*
8546 	 * What we do here is we mark all pageblocks in range as
8547 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
8548 	 * have different sizes, and due to the way page allocator
8549 	 * work, we align the range to biggest of the two pages so
8550 	 * that page allocator won't try to merge buddies from
8551 	 * different pageblocks and change MIGRATE_ISOLATE to some
8552 	 * other migration type.
8553 	 *
8554 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
8555 	 * migrate the pages from an unaligned range (ie. pages that
8556 	 * we are interested in).  This will put all the pages in
8557 	 * range back to page allocator as MIGRATE_ISOLATE.
8558 	 *
8559 	 * When this is done, we take the pages in range from page
8560 	 * allocator removing them from the buddy system.  This way
8561 	 * page allocator will never consider using them.
8562 	 *
8563 	 * This lets us mark the pageblocks back as
8564 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
8565 	 * aligned range but not in the unaligned, original range are
8566 	 * put back to page allocator so that buddy can use them.
8567 	 */
8568 
8569 	ret = start_isolate_page_range(pfn_max_align_down(start),
8570 				       pfn_max_align_up(end), migratetype, 0);
8571 	if (ret)
8572 		return ret;
8573 
8574 	drain_all_pages(cc.zone);
8575 
8576 	/*
8577 	 * In case of -EBUSY, we'd like to know which page causes problem.
8578 	 * So, just fall through. test_pages_isolated() has a tracepoint
8579 	 * which will report the busy page.
8580 	 *
8581 	 * It is possible that busy pages could become available before
8582 	 * the call to test_pages_isolated, and the range will actually be
8583 	 * allocated.  So, if we fall through be sure to clear ret so that
8584 	 * -EBUSY is not accidentally used or returned to caller.
8585 	 */
8586 	ret = __alloc_contig_migrate_range(&cc, start, end);
8587 	if (ret && ret != -EBUSY)
8588 		goto done;
8589 	ret =0;
8590 
8591 	/*
8592 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
8593 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
8594 	 * more, all pages in [start, end) are free in page allocator.
8595 	 * What we are going to do is to allocate all pages from
8596 	 * [start, end) (that is remove them from page allocator).
8597 	 *
8598 	 * The only problem is that pages at the beginning and at the
8599 	 * end of interesting range may be not aligned with pages that
8600 	 * page allocator holds, ie. they can be part of higher order
8601 	 * pages.  Because of this, we reserve the bigger range and
8602 	 * once this is done free the pages we are not interested in.
8603 	 *
8604 	 * We don't have to hold zone->lock here because the pages are
8605 	 * isolated thus they won't get removed from buddy.
8606 	 */
8607 
8608 	lru_add_drain_all();
8609 
8610 	order = 0;
8611 	outer_start = start;
8612 	while (!PageBuddy(pfn_to_page(outer_start))) {
8613 		if (++order >= MAX_ORDER) {
8614 			outer_start = start;
8615 			break;
8616 		}
8617 		outer_start &= ~0UL << order;
8618 	}
8619 
8620 	if (outer_start != start) {
8621 		order = buddy_order(pfn_to_page(outer_start));
8622 
8623 		/*
8624 		 * outer_start page could be small order buddy page and
8625 		 * it doesn't include start page. Adjust outer_start
8626 		 * in this case to report failed page properly
8627 		 * on tracepoint in test_pages_isolated()
8628 		 */
8629 		if (outer_start + (1UL << order) <= start)
8630 			outer_start = start;
8631 	}
8632 
8633 	/* Make sure the range is really isolated. */
8634 	if (test_pages_isolated(outer_start, end, 0)) {
8635 		pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
8636 			__func__, outer_start, end);
8637 		ret = -EBUSY;
8638 		goto done;
8639 	}
8640 
8641 	/* Grab isolated pages from freelists. */
8642 	outer_end = isolate_freepages_range(&cc, outer_start, end);
8643 	if (!outer_end) {
8644 		ret = -EBUSY;
8645 		goto done;
8646 	}
8647 
8648 	/* Free head and tail (if any) */
8649 	if (start != outer_start)
8650 		free_contig_range(outer_start, start - outer_start);
8651 	if (end != outer_end)
8652 		free_contig_range(end, outer_end - end);
8653 
8654 done:
8655 	undo_isolate_page_range(pfn_max_align_down(start),
8656 				pfn_max_align_up(end), migratetype);
8657 	return ret;
8658 }
8659 EXPORT_SYMBOL(alloc_contig_range);
8660 
8661 static int __alloc_contig_pages(unsigned long start_pfn,
8662 				unsigned long nr_pages, gfp_t gfp_mask)
8663 {
8664 	unsigned long end_pfn = start_pfn + nr_pages;
8665 
8666 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
8667 				  gfp_mask);
8668 }
8669 
8670 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
8671 				   unsigned long nr_pages)
8672 {
8673 	unsigned long i, end_pfn = start_pfn + nr_pages;
8674 	struct page *page;
8675 
8676 	for (i = start_pfn; i < end_pfn; i++) {
8677 		page = pfn_to_online_page(i);
8678 		if (!page)
8679 			return false;
8680 
8681 		if (page_zone(page) != z)
8682 			return false;
8683 
8684 		if (PageReserved(page))
8685 			return false;
8686 
8687 		if (page_count(page) > 0)
8688 			return false;
8689 
8690 		if (PageHuge(page))
8691 			return false;
8692 	}
8693 	return true;
8694 }
8695 
8696 static bool zone_spans_last_pfn(const struct zone *zone,
8697 				unsigned long start_pfn, unsigned long nr_pages)
8698 {
8699 	unsigned long last_pfn = start_pfn + nr_pages - 1;
8700 
8701 	return zone_spans_pfn(zone, last_pfn);
8702 }
8703 
8704 /**
8705  * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
8706  * @nr_pages:	Number of contiguous pages to allocate
8707  * @gfp_mask:	GFP mask to limit search and used during compaction
8708  * @nid:	Target node
8709  * @nodemask:	Mask for other possible nodes
8710  *
8711  * This routine is a wrapper around alloc_contig_range(). It scans over zones
8712  * on an applicable zonelist to find a contiguous pfn range which can then be
8713  * tried for allocation with alloc_contig_range(). This routine is intended
8714  * for allocation requests which can not be fulfilled with the buddy allocator.
8715  *
8716  * The allocated memory is always aligned to a page boundary. If nr_pages is a
8717  * power of two then the alignment is guaranteed to be to the given nr_pages
8718  * (e.g. 1GB request would be aligned to 1GB).
8719  *
8720  * Allocated pages can be freed with free_contig_range() or by manually calling
8721  * __free_page() on each allocated page.
8722  *
8723  * Return: pointer to contiguous pages on success, or NULL if not successful.
8724  */
8725 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
8726 				int nid, nodemask_t *nodemask)
8727 {
8728 	unsigned long ret, pfn, flags;
8729 	struct zonelist *zonelist;
8730 	struct zone *zone;
8731 	struct zoneref *z;
8732 
8733 	zonelist = node_zonelist(nid, gfp_mask);
8734 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
8735 					gfp_zone(gfp_mask), nodemask) {
8736 		spin_lock_irqsave(&zone->lock, flags);
8737 
8738 		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
8739 		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
8740 			if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
8741 				/*
8742 				 * We release the zone lock here because
8743 				 * alloc_contig_range() will also lock the zone
8744 				 * at some point. If there's an allocation
8745 				 * spinning on this lock, it may win the race
8746 				 * and cause alloc_contig_range() to fail...
8747 				 */
8748 				spin_unlock_irqrestore(&zone->lock, flags);
8749 				ret = __alloc_contig_pages(pfn, nr_pages,
8750 							gfp_mask);
8751 				if (!ret)
8752 					return pfn_to_page(pfn);
8753 				spin_lock_irqsave(&zone->lock, flags);
8754 			}
8755 			pfn += nr_pages;
8756 		}
8757 		spin_unlock_irqrestore(&zone->lock, flags);
8758 	}
8759 	return NULL;
8760 }
8761 #endif /* CONFIG_CONTIG_ALLOC */
8762 
8763 void free_contig_range(unsigned long pfn, unsigned int nr_pages)
8764 {
8765 	unsigned int count = 0;
8766 
8767 	for (; nr_pages--; pfn++) {
8768 		struct page *page = pfn_to_page(pfn);
8769 
8770 		count += page_count(page) != 1;
8771 		__free_page(page);
8772 	}
8773 	WARN(count != 0, "%d pages are still in use!\n", count);
8774 }
8775 EXPORT_SYMBOL(free_contig_range);
8776 
8777 /*
8778  * The zone indicated has a new number of managed_pages; batch sizes and percpu
8779  * page high values need to be recalulated.
8780  */
8781 void __meminit zone_pcp_update(struct zone *zone)
8782 {
8783 	mutex_lock(&pcp_batch_high_lock);
8784 	zone_set_pageset_high_and_batch(zone);
8785 	mutex_unlock(&pcp_batch_high_lock);
8786 }
8787 
8788 /*
8789  * Effectively disable pcplists for the zone by setting the high limit to 0
8790  * and draining all cpus. A concurrent page freeing on another CPU that's about
8791  * to put the page on pcplist will either finish before the drain and the page
8792  * will be drained, or observe the new high limit and skip the pcplist.
8793  *
8794  * Must be paired with a call to zone_pcp_enable().
8795  */
8796 void zone_pcp_disable(struct zone *zone)
8797 {
8798 	mutex_lock(&pcp_batch_high_lock);
8799 	__zone_set_pageset_high_and_batch(zone, 0, 1);
8800 	__drain_all_pages(zone, true);
8801 }
8802 
8803 void zone_pcp_enable(struct zone *zone)
8804 {
8805 	__zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
8806 	mutex_unlock(&pcp_batch_high_lock);
8807 }
8808 
8809 void zone_pcp_reset(struct zone *zone)
8810 {
8811 	unsigned long flags;
8812 	int cpu;
8813 	struct per_cpu_pageset *pset;
8814 
8815 	/* avoid races with drain_pages()  */
8816 	local_irq_save(flags);
8817 	if (zone->pageset != &boot_pageset) {
8818 		for_each_online_cpu(cpu) {
8819 			pset = per_cpu_ptr(zone->pageset, cpu);
8820 			drain_zonestat(zone, pset);
8821 		}
8822 		free_percpu(zone->pageset);
8823 		zone->pageset = &boot_pageset;
8824 	}
8825 	local_irq_restore(flags);
8826 }
8827 
8828 #ifdef CONFIG_MEMORY_HOTREMOVE
8829 /*
8830  * All pages in the range must be in a single zone, must not contain holes,
8831  * must span full sections, and must be isolated before calling this function.
8832  */
8833 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
8834 {
8835 	unsigned long pfn = start_pfn;
8836 	struct page *page;
8837 	struct zone *zone;
8838 	unsigned int order;
8839 	unsigned long flags;
8840 
8841 	offline_mem_sections(pfn, end_pfn);
8842 	zone = page_zone(pfn_to_page(pfn));
8843 	spin_lock_irqsave(&zone->lock, flags);
8844 	while (pfn < end_pfn) {
8845 		page = pfn_to_page(pfn);
8846 		/*
8847 		 * The HWPoisoned page may be not in buddy system, and
8848 		 * page_count() is not 0.
8849 		 */
8850 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
8851 			pfn++;
8852 			continue;
8853 		}
8854 		/*
8855 		 * At this point all remaining PageOffline() pages have a
8856 		 * reference count of 0 and can simply be skipped.
8857 		 */
8858 		if (PageOffline(page)) {
8859 			BUG_ON(page_count(page));
8860 			BUG_ON(PageBuddy(page));
8861 			pfn++;
8862 			continue;
8863 		}
8864 
8865 		BUG_ON(page_count(page));
8866 		BUG_ON(!PageBuddy(page));
8867 		order = buddy_order(page);
8868 		del_page_from_free_list(page, zone, order);
8869 		pfn += (1 << order);
8870 	}
8871 	spin_unlock_irqrestore(&zone->lock, flags);
8872 }
8873 #endif
8874 
8875 bool is_free_buddy_page(struct page *page)
8876 {
8877 	struct zone *zone = page_zone(page);
8878 	unsigned long pfn = page_to_pfn(page);
8879 	unsigned long flags;
8880 	unsigned int order;
8881 
8882 	spin_lock_irqsave(&zone->lock, flags);
8883 	for (order = 0; order < MAX_ORDER; order++) {
8884 		struct page *page_head = page - (pfn & ((1 << order) - 1));
8885 
8886 		if (PageBuddy(page_head) && buddy_order(page_head) >= order)
8887 			break;
8888 	}
8889 	spin_unlock_irqrestore(&zone->lock, flags);
8890 
8891 	return order < MAX_ORDER;
8892 }
8893 
8894 #ifdef CONFIG_MEMORY_FAILURE
8895 /*
8896  * Break down a higher-order page in sub-pages, and keep our target out of
8897  * buddy allocator.
8898  */
8899 static void break_down_buddy_pages(struct zone *zone, struct page *page,
8900 				   struct page *target, int low, int high,
8901 				   int migratetype)
8902 {
8903 	unsigned long size = 1 << high;
8904 	struct page *current_buddy, *next_page;
8905 
8906 	while (high > low) {
8907 		high--;
8908 		size >>= 1;
8909 
8910 		if (target >= &page[size]) {
8911 			next_page = page + size;
8912 			current_buddy = page;
8913 		} else {
8914 			next_page = page;
8915 			current_buddy = page + size;
8916 		}
8917 
8918 		if (set_page_guard(zone, current_buddy, high, migratetype))
8919 			continue;
8920 
8921 		if (current_buddy != target) {
8922 			add_to_free_list(current_buddy, zone, high, migratetype);
8923 			set_buddy_order(current_buddy, high);
8924 			page = next_page;
8925 		}
8926 	}
8927 }
8928 
8929 /*
8930  * Take a page that will be marked as poisoned off the buddy allocator.
8931  */
8932 bool take_page_off_buddy(struct page *page)
8933 {
8934 	struct zone *zone = page_zone(page);
8935 	unsigned long pfn = page_to_pfn(page);
8936 	unsigned long flags;
8937 	unsigned int order;
8938 	bool ret = false;
8939 
8940 	spin_lock_irqsave(&zone->lock, flags);
8941 	for (order = 0; order < MAX_ORDER; order++) {
8942 		struct page *page_head = page - (pfn & ((1 << order) - 1));
8943 		int page_order = buddy_order(page_head);
8944 
8945 		if (PageBuddy(page_head) && page_order >= order) {
8946 			unsigned long pfn_head = page_to_pfn(page_head);
8947 			int migratetype = get_pfnblock_migratetype(page_head,
8948 								   pfn_head);
8949 
8950 			del_page_from_free_list(page_head, zone, page_order);
8951 			break_down_buddy_pages(zone, page_head, page, 0,
8952 						page_order, migratetype);
8953 			ret = true;
8954 			break;
8955 		}
8956 		if (page_count(page_head) > 0)
8957 			break;
8958 	}
8959 	spin_unlock_irqrestore(&zone->lock, flags);
8960 	return ret;
8961 }
8962 #endif
8963