xref: /openbmc/linux/mm/page_alloc.c (revision 7aacf86b)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/compiler.h>
26 #include <linux/kernel.h>
27 #include <linux/kmemcheck.h>
28 #include <linux/kasan.h>
29 #include <linux/module.h>
30 #include <linux/suspend.h>
31 #include <linux/pagevec.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/ratelimit.h>
35 #include <linux/oom.h>
36 #include <linux/notifier.h>
37 #include <linux/topology.h>
38 #include <linux/sysctl.h>
39 #include <linux/cpu.h>
40 #include <linux/cpuset.h>
41 #include <linux/memory_hotplug.h>
42 #include <linux/nodemask.h>
43 #include <linux/vmalloc.h>
44 #include <linux/vmstat.h>
45 #include <linux/mempolicy.h>
46 #include <linux/memremap.h>
47 #include <linux/stop_machine.h>
48 #include <linux/sort.h>
49 #include <linux/pfn.h>
50 #include <linux/backing-dev.h>
51 #include <linux/fault-inject.h>
52 #include <linux/page-isolation.h>
53 #include <linux/page_ext.h>
54 #include <linux/debugobjects.h>
55 #include <linux/kmemleak.h>
56 #include <linux/compaction.h>
57 #include <trace/events/kmem.h>
58 #include <trace/events/oom.h>
59 #include <linux/prefetch.h>
60 #include <linux/mm_inline.h>
61 #include <linux/migrate.h>
62 #include <linux/hugetlb.h>
63 #include <linux/sched/rt.h>
64 #include <linux/sched/mm.h>
65 #include <linux/page_owner.h>
66 #include <linux/kthread.h>
67 #include <linux/memcontrol.h>
68 #include <linux/ftrace.h>
69 #include <linux/nmi.h>
70 
71 #include <asm/sections.h>
72 #include <asm/tlbflush.h>
73 #include <asm/div64.h>
74 #include "internal.h"
75 
76 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
77 static DEFINE_MUTEX(pcp_batch_high_lock);
78 #define MIN_PERCPU_PAGELIST_FRACTION	(8)
79 
80 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
81 DEFINE_PER_CPU(int, numa_node);
82 EXPORT_PER_CPU_SYMBOL(numa_node);
83 #endif
84 
85 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
86 /*
87  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
88  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
89  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
90  * defined in <linux/topology.h>.
91  */
92 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
93 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
94 int _node_numa_mem_[MAX_NUMNODES];
95 #endif
96 
97 /* work_structs for global per-cpu drains */
98 DEFINE_MUTEX(pcpu_drain_mutex);
99 DEFINE_PER_CPU(struct work_struct, pcpu_drain);
100 
101 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
102 volatile unsigned long latent_entropy __latent_entropy;
103 EXPORT_SYMBOL(latent_entropy);
104 #endif
105 
106 /*
107  * Array of node states.
108  */
109 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
110 	[N_POSSIBLE] = NODE_MASK_ALL,
111 	[N_ONLINE] = { { [0] = 1UL } },
112 #ifndef CONFIG_NUMA
113 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
114 #ifdef CONFIG_HIGHMEM
115 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
116 #endif
117 	[N_MEMORY] = { { [0] = 1UL } },
118 	[N_CPU] = { { [0] = 1UL } },
119 #endif	/* NUMA */
120 };
121 EXPORT_SYMBOL(node_states);
122 
123 /* Protect totalram_pages and zone->managed_pages */
124 static DEFINE_SPINLOCK(managed_page_count_lock);
125 
126 unsigned long totalram_pages __read_mostly;
127 unsigned long totalreserve_pages __read_mostly;
128 unsigned long totalcma_pages __read_mostly;
129 
130 int percpu_pagelist_fraction;
131 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
132 
133 /*
134  * A cached value of the page's pageblock's migratetype, used when the page is
135  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
136  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
137  * Also the migratetype set in the page does not necessarily match the pcplist
138  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
139  * other index - this ensures that it will be put on the correct CMA freelist.
140  */
141 static inline int get_pcppage_migratetype(struct page *page)
142 {
143 	return page->index;
144 }
145 
146 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
147 {
148 	page->index = migratetype;
149 }
150 
151 #ifdef CONFIG_PM_SLEEP
152 /*
153  * The following functions are used by the suspend/hibernate code to temporarily
154  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
155  * while devices are suspended.  To avoid races with the suspend/hibernate code,
156  * they should always be called with pm_mutex held (gfp_allowed_mask also should
157  * only be modified with pm_mutex held, unless the suspend/hibernate code is
158  * guaranteed not to run in parallel with that modification).
159  */
160 
161 static gfp_t saved_gfp_mask;
162 
163 void pm_restore_gfp_mask(void)
164 {
165 	WARN_ON(!mutex_is_locked(&pm_mutex));
166 	if (saved_gfp_mask) {
167 		gfp_allowed_mask = saved_gfp_mask;
168 		saved_gfp_mask = 0;
169 	}
170 }
171 
172 void pm_restrict_gfp_mask(void)
173 {
174 	WARN_ON(!mutex_is_locked(&pm_mutex));
175 	WARN_ON(saved_gfp_mask);
176 	saved_gfp_mask = gfp_allowed_mask;
177 	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
178 }
179 
180 bool pm_suspended_storage(void)
181 {
182 	if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
183 		return false;
184 	return true;
185 }
186 #endif /* CONFIG_PM_SLEEP */
187 
188 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
189 unsigned int pageblock_order __read_mostly;
190 #endif
191 
192 static void __free_pages_ok(struct page *page, unsigned int order);
193 
194 /*
195  * results with 256, 32 in the lowmem_reserve sysctl:
196  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
197  *	1G machine -> (16M dma, 784M normal, 224M high)
198  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
199  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
200  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
201  *
202  * TBD: should special case ZONE_DMA32 machines here - in those we normally
203  * don't need any ZONE_NORMAL reservation
204  */
205 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
206 #ifdef CONFIG_ZONE_DMA
207 	 256,
208 #endif
209 #ifdef CONFIG_ZONE_DMA32
210 	 256,
211 #endif
212 #ifdef CONFIG_HIGHMEM
213 	 32,
214 #endif
215 	 32,
216 };
217 
218 EXPORT_SYMBOL(totalram_pages);
219 
220 static char * const zone_names[MAX_NR_ZONES] = {
221 #ifdef CONFIG_ZONE_DMA
222 	 "DMA",
223 #endif
224 #ifdef CONFIG_ZONE_DMA32
225 	 "DMA32",
226 #endif
227 	 "Normal",
228 #ifdef CONFIG_HIGHMEM
229 	 "HighMem",
230 #endif
231 	 "Movable",
232 #ifdef CONFIG_ZONE_DEVICE
233 	 "Device",
234 #endif
235 };
236 
237 char * const migratetype_names[MIGRATE_TYPES] = {
238 	"Unmovable",
239 	"Movable",
240 	"Reclaimable",
241 	"HighAtomic",
242 #ifdef CONFIG_CMA
243 	"CMA",
244 #endif
245 #ifdef CONFIG_MEMORY_ISOLATION
246 	"Isolate",
247 #endif
248 };
249 
250 compound_page_dtor * const compound_page_dtors[] = {
251 	NULL,
252 	free_compound_page,
253 #ifdef CONFIG_HUGETLB_PAGE
254 	free_huge_page,
255 #endif
256 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
257 	free_transhuge_page,
258 #endif
259 };
260 
261 int min_free_kbytes = 1024;
262 int user_min_free_kbytes = -1;
263 int watermark_scale_factor = 10;
264 
265 static unsigned long __meminitdata nr_kernel_pages;
266 static unsigned long __meminitdata nr_all_pages;
267 static unsigned long __meminitdata dma_reserve;
268 
269 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
270 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
271 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
272 static unsigned long __initdata required_kernelcore;
273 static unsigned long __initdata required_movablecore;
274 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
275 static bool mirrored_kernelcore;
276 
277 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
278 int movable_zone;
279 EXPORT_SYMBOL(movable_zone);
280 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
281 
282 #if MAX_NUMNODES > 1
283 int nr_node_ids __read_mostly = MAX_NUMNODES;
284 int nr_online_nodes __read_mostly = 1;
285 EXPORT_SYMBOL(nr_node_ids);
286 EXPORT_SYMBOL(nr_online_nodes);
287 #endif
288 
289 int page_group_by_mobility_disabled __read_mostly;
290 
291 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
292 static inline void reset_deferred_meminit(pg_data_t *pgdat)
293 {
294 	unsigned long max_initialise;
295 	unsigned long reserved_lowmem;
296 
297 	/*
298 	 * Initialise at least 2G of a node but also take into account that
299 	 * two large system hashes that can take up 1GB for 0.25TB/node.
300 	 */
301 	max_initialise = max(2UL << (30 - PAGE_SHIFT),
302 		(pgdat->node_spanned_pages >> 8));
303 
304 	/*
305 	 * Compensate the all the memblock reservations (e.g. crash kernel)
306 	 * from the initial estimation to make sure we will initialize enough
307 	 * memory to boot.
308 	 */
309 	reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
310 			pgdat->node_start_pfn + max_initialise);
311 	max_initialise += reserved_lowmem;
312 
313 	pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
314 	pgdat->first_deferred_pfn = ULONG_MAX;
315 }
316 
317 /* Returns true if the struct page for the pfn is uninitialised */
318 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
319 {
320 	int nid = early_pfn_to_nid(pfn);
321 
322 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
323 		return true;
324 
325 	return false;
326 }
327 
328 /*
329  * Returns false when the remaining initialisation should be deferred until
330  * later in the boot cycle when it can be parallelised.
331  */
332 static inline bool update_defer_init(pg_data_t *pgdat,
333 				unsigned long pfn, unsigned long zone_end,
334 				unsigned long *nr_initialised)
335 {
336 	/* Always populate low zones for address-contrained allocations */
337 	if (zone_end < pgdat_end_pfn(pgdat))
338 		return true;
339 	(*nr_initialised)++;
340 	if ((*nr_initialised > pgdat->static_init_size) &&
341 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
342 		pgdat->first_deferred_pfn = pfn;
343 		return false;
344 	}
345 
346 	return true;
347 }
348 #else
349 static inline void reset_deferred_meminit(pg_data_t *pgdat)
350 {
351 }
352 
353 static inline bool early_page_uninitialised(unsigned long pfn)
354 {
355 	return false;
356 }
357 
358 static inline bool update_defer_init(pg_data_t *pgdat,
359 				unsigned long pfn, unsigned long zone_end,
360 				unsigned long *nr_initialised)
361 {
362 	return true;
363 }
364 #endif
365 
366 /* Return a pointer to the bitmap storing bits affecting a block of pages */
367 static inline unsigned long *get_pageblock_bitmap(struct page *page,
368 							unsigned long pfn)
369 {
370 #ifdef CONFIG_SPARSEMEM
371 	return __pfn_to_section(pfn)->pageblock_flags;
372 #else
373 	return page_zone(page)->pageblock_flags;
374 #endif /* CONFIG_SPARSEMEM */
375 }
376 
377 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
378 {
379 #ifdef CONFIG_SPARSEMEM
380 	pfn &= (PAGES_PER_SECTION-1);
381 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
382 #else
383 	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
384 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
385 #endif /* CONFIG_SPARSEMEM */
386 }
387 
388 /**
389  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
390  * @page: The page within the block of interest
391  * @pfn: The target page frame number
392  * @end_bitidx: The last bit of interest to retrieve
393  * @mask: mask of bits that the caller is interested in
394  *
395  * Return: pageblock_bits flags
396  */
397 static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
398 					unsigned long pfn,
399 					unsigned long end_bitidx,
400 					unsigned long mask)
401 {
402 	unsigned long *bitmap;
403 	unsigned long bitidx, word_bitidx;
404 	unsigned long word;
405 
406 	bitmap = get_pageblock_bitmap(page, pfn);
407 	bitidx = pfn_to_bitidx(page, pfn);
408 	word_bitidx = bitidx / BITS_PER_LONG;
409 	bitidx &= (BITS_PER_LONG-1);
410 
411 	word = bitmap[word_bitidx];
412 	bitidx += end_bitidx;
413 	return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
414 }
415 
416 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
417 					unsigned long end_bitidx,
418 					unsigned long mask)
419 {
420 	return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
421 }
422 
423 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
424 {
425 	return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
426 }
427 
428 /**
429  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
430  * @page: The page within the block of interest
431  * @flags: The flags to set
432  * @pfn: The target page frame number
433  * @end_bitidx: The last bit of interest
434  * @mask: mask of bits that the caller is interested in
435  */
436 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
437 					unsigned long pfn,
438 					unsigned long end_bitidx,
439 					unsigned long mask)
440 {
441 	unsigned long *bitmap;
442 	unsigned long bitidx, word_bitidx;
443 	unsigned long old_word, word;
444 
445 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
446 
447 	bitmap = get_pageblock_bitmap(page, pfn);
448 	bitidx = pfn_to_bitidx(page, pfn);
449 	word_bitidx = bitidx / BITS_PER_LONG;
450 	bitidx &= (BITS_PER_LONG-1);
451 
452 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
453 
454 	bitidx += end_bitidx;
455 	mask <<= (BITS_PER_LONG - bitidx - 1);
456 	flags <<= (BITS_PER_LONG - bitidx - 1);
457 
458 	word = READ_ONCE(bitmap[word_bitidx]);
459 	for (;;) {
460 		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
461 		if (word == old_word)
462 			break;
463 		word = old_word;
464 	}
465 }
466 
467 void set_pageblock_migratetype(struct page *page, int migratetype)
468 {
469 	if (unlikely(page_group_by_mobility_disabled &&
470 		     migratetype < MIGRATE_PCPTYPES))
471 		migratetype = MIGRATE_UNMOVABLE;
472 
473 	set_pageblock_flags_group(page, (unsigned long)migratetype,
474 					PB_migrate, PB_migrate_end);
475 }
476 
477 #ifdef CONFIG_DEBUG_VM
478 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
479 {
480 	int ret = 0;
481 	unsigned seq;
482 	unsigned long pfn = page_to_pfn(page);
483 	unsigned long sp, start_pfn;
484 
485 	do {
486 		seq = zone_span_seqbegin(zone);
487 		start_pfn = zone->zone_start_pfn;
488 		sp = zone->spanned_pages;
489 		if (!zone_spans_pfn(zone, pfn))
490 			ret = 1;
491 	} while (zone_span_seqretry(zone, seq));
492 
493 	if (ret)
494 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
495 			pfn, zone_to_nid(zone), zone->name,
496 			start_pfn, start_pfn + sp);
497 
498 	return ret;
499 }
500 
501 static int page_is_consistent(struct zone *zone, struct page *page)
502 {
503 	if (!pfn_valid_within(page_to_pfn(page)))
504 		return 0;
505 	if (zone != page_zone(page))
506 		return 0;
507 
508 	return 1;
509 }
510 /*
511  * Temporary debugging check for pages not lying within a given zone.
512  */
513 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
514 {
515 	if (page_outside_zone_boundaries(zone, page))
516 		return 1;
517 	if (!page_is_consistent(zone, page))
518 		return 1;
519 
520 	return 0;
521 }
522 #else
523 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
524 {
525 	return 0;
526 }
527 #endif
528 
529 static void bad_page(struct page *page, const char *reason,
530 		unsigned long bad_flags)
531 {
532 	static unsigned long resume;
533 	static unsigned long nr_shown;
534 	static unsigned long nr_unshown;
535 
536 	/*
537 	 * Allow a burst of 60 reports, then keep quiet for that minute;
538 	 * or allow a steady drip of one report per second.
539 	 */
540 	if (nr_shown == 60) {
541 		if (time_before(jiffies, resume)) {
542 			nr_unshown++;
543 			goto out;
544 		}
545 		if (nr_unshown) {
546 			pr_alert(
547 			      "BUG: Bad page state: %lu messages suppressed\n",
548 				nr_unshown);
549 			nr_unshown = 0;
550 		}
551 		nr_shown = 0;
552 	}
553 	if (nr_shown++ == 0)
554 		resume = jiffies + 60 * HZ;
555 
556 	pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
557 		current->comm, page_to_pfn(page));
558 	__dump_page(page, reason);
559 	bad_flags &= page->flags;
560 	if (bad_flags)
561 		pr_alert("bad because of flags: %#lx(%pGp)\n",
562 						bad_flags, &bad_flags);
563 	dump_page_owner(page);
564 
565 	print_modules();
566 	dump_stack();
567 out:
568 	/* Leave bad fields for debug, except PageBuddy could make trouble */
569 	page_mapcount_reset(page); /* remove PageBuddy */
570 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
571 }
572 
573 /*
574  * Higher-order pages are called "compound pages".  They are structured thusly:
575  *
576  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
577  *
578  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
579  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
580  *
581  * The first tail page's ->compound_dtor holds the offset in array of compound
582  * page destructors. See compound_page_dtors.
583  *
584  * The first tail page's ->compound_order holds the order of allocation.
585  * This usage means that zero-order pages may not be compound.
586  */
587 
588 void free_compound_page(struct page *page)
589 {
590 	__free_pages_ok(page, compound_order(page));
591 }
592 
593 void prep_compound_page(struct page *page, unsigned int order)
594 {
595 	int i;
596 	int nr_pages = 1 << order;
597 
598 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
599 	set_compound_order(page, order);
600 	__SetPageHead(page);
601 	for (i = 1; i < nr_pages; i++) {
602 		struct page *p = page + i;
603 		set_page_count(p, 0);
604 		p->mapping = TAIL_MAPPING;
605 		set_compound_head(p, page);
606 	}
607 	atomic_set(compound_mapcount_ptr(page), -1);
608 }
609 
610 #ifdef CONFIG_DEBUG_PAGEALLOC
611 unsigned int _debug_guardpage_minorder;
612 bool _debug_pagealloc_enabled __read_mostly
613 			= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
614 EXPORT_SYMBOL(_debug_pagealloc_enabled);
615 bool _debug_guardpage_enabled __read_mostly;
616 
617 static int __init early_debug_pagealloc(char *buf)
618 {
619 	if (!buf)
620 		return -EINVAL;
621 	return kstrtobool(buf, &_debug_pagealloc_enabled);
622 }
623 early_param("debug_pagealloc", early_debug_pagealloc);
624 
625 static bool need_debug_guardpage(void)
626 {
627 	/* If we don't use debug_pagealloc, we don't need guard page */
628 	if (!debug_pagealloc_enabled())
629 		return false;
630 
631 	if (!debug_guardpage_minorder())
632 		return false;
633 
634 	return true;
635 }
636 
637 static void init_debug_guardpage(void)
638 {
639 	if (!debug_pagealloc_enabled())
640 		return;
641 
642 	if (!debug_guardpage_minorder())
643 		return;
644 
645 	_debug_guardpage_enabled = true;
646 }
647 
648 struct page_ext_operations debug_guardpage_ops = {
649 	.need = need_debug_guardpage,
650 	.init = init_debug_guardpage,
651 };
652 
653 static int __init debug_guardpage_minorder_setup(char *buf)
654 {
655 	unsigned long res;
656 
657 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
658 		pr_err("Bad debug_guardpage_minorder value\n");
659 		return 0;
660 	}
661 	_debug_guardpage_minorder = res;
662 	pr_info("Setting debug_guardpage_minorder to %lu\n", res);
663 	return 0;
664 }
665 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
666 
667 static inline bool set_page_guard(struct zone *zone, struct page *page,
668 				unsigned int order, int migratetype)
669 {
670 	struct page_ext *page_ext;
671 
672 	if (!debug_guardpage_enabled())
673 		return false;
674 
675 	if (order >= debug_guardpage_minorder())
676 		return false;
677 
678 	page_ext = lookup_page_ext(page);
679 	if (unlikely(!page_ext))
680 		return false;
681 
682 	__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
683 
684 	INIT_LIST_HEAD(&page->lru);
685 	set_page_private(page, order);
686 	/* Guard pages are not available for any usage */
687 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
688 
689 	return true;
690 }
691 
692 static inline void clear_page_guard(struct zone *zone, struct page *page,
693 				unsigned int order, int migratetype)
694 {
695 	struct page_ext *page_ext;
696 
697 	if (!debug_guardpage_enabled())
698 		return;
699 
700 	page_ext = lookup_page_ext(page);
701 	if (unlikely(!page_ext))
702 		return;
703 
704 	__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
705 
706 	set_page_private(page, 0);
707 	if (!is_migrate_isolate(migratetype))
708 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
709 }
710 #else
711 struct page_ext_operations debug_guardpage_ops;
712 static inline bool set_page_guard(struct zone *zone, struct page *page,
713 			unsigned int order, int migratetype) { return false; }
714 static inline void clear_page_guard(struct zone *zone, struct page *page,
715 				unsigned int order, int migratetype) {}
716 #endif
717 
718 static inline void set_page_order(struct page *page, unsigned int order)
719 {
720 	set_page_private(page, order);
721 	__SetPageBuddy(page);
722 }
723 
724 static inline void rmv_page_order(struct page *page)
725 {
726 	__ClearPageBuddy(page);
727 	set_page_private(page, 0);
728 }
729 
730 /*
731  * This function checks whether a page is free && is the buddy
732  * we can do coalesce a page and its buddy if
733  * (a) the buddy is not in a hole (check before calling!) &&
734  * (b) the buddy is in the buddy system &&
735  * (c) a page and its buddy have the same order &&
736  * (d) a page and its buddy are in the same zone.
737  *
738  * For recording whether a page is in the buddy system, we set ->_mapcount
739  * PAGE_BUDDY_MAPCOUNT_VALUE.
740  * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
741  * serialized by zone->lock.
742  *
743  * For recording page's order, we use page_private(page).
744  */
745 static inline int page_is_buddy(struct page *page, struct page *buddy,
746 							unsigned int order)
747 {
748 	if (page_is_guard(buddy) && page_order(buddy) == order) {
749 		if (page_zone_id(page) != page_zone_id(buddy))
750 			return 0;
751 
752 		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
753 
754 		return 1;
755 	}
756 
757 	if (PageBuddy(buddy) && page_order(buddy) == order) {
758 		/*
759 		 * zone check is done late to avoid uselessly
760 		 * calculating zone/node ids for pages that could
761 		 * never merge.
762 		 */
763 		if (page_zone_id(page) != page_zone_id(buddy))
764 			return 0;
765 
766 		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
767 
768 		return 1;
769 	}
770 	return 0;
771 }
772 
773 /*
774  * Freeing function for a buddy system allocator.
775  *
776  * The concept of a buddy system is to maintain direct-mapped table
777  * (containing bit values) for memory blocks of various "orders".
778  * The bottom level table contains the map for the smallest allocatable
779  * units of memory (here, pages), and each level above it describes
780  * pairs of units from the levels below, hence, "buddies".
781  * At a high level, all that happens here is marking the table entry
782  * at the bottom level available, and propagating the changes upward
783  * as necessary, plus some accounting needed to play nicely with other
784  * parts of the VM system.
785  * At each level, we keep a list of pages, which are heads of continuous
786  * free pages of length of (1 << order) and marked with _mapcount
787  * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
788  * field.
789  * So when we are allocating or freeing one, we can derive the state of the
790  * other.  That is, if we allocate a small block, and both were
791  * free, the remainder of the region must be split into blocks.
792  * If a block is freed, and its buddy is also free, then this
793  * triggers coalescing into a block of larger size.
794  *
795  * -- nyc
796  */
797 
798 static inline void __free_one_page(struct page *page,
799 		unsigned long pfn,
800 		struct zone *zone, unsigned int order,
801 		int migratetype)
802 {
803 	unsigned long combined_pfn;
804 	unsigned long uninitialized_var(buddy_pfn);
805 	struct page *buddy;
806 	unsigned int max_order;
807 
808 	max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
809 
810 	VM_BUG_ON(!zone_is_initialized(zone));
811 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
812 
813 	VM_BUG_ON(migratetype == -1);
814 	if (likely(!is_migrate_isolate(migratetype)))
815 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
816 
817 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
818 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
819 
820 continue_merging:
821 	while (order < max_order - 1) {
822 		buddy_pfn = __find_buddy_pfn(pfn, order);
823 		buddy = page + (buddy_pfn - pfn);
824 
825 		if (!pfn_valid_within(buddy_pfn))
826 			goto done_merging;
827 		if (!page_is_buddy(page, buddy, order))
828 			goto done_merging;
829 		/*
830 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
831 		 * merge with it and move up one order.
832 		 */
833 		if (page_is_guard(buddy)) {
834 			clear_page_guard(zone, buddy, order, migratetype);
835 		} else {
836 			list_del(&buddy->lru);
837 			zone->free_area[order].nr_free--;
838 			rmv_page_order(buddy);
839 		}
840 		combined_pfn = buddy_pfn & pfn;
841 		page = page + (combined_pfn - pfn);
842 		pfn = combined_pfn;
843 		order++;
844 	}
845 	if (max_order < MAX_ORDER) {
846 		/* If we are here, it means order is >= pageblock_order.
847 		 * We want to prevent merge between freepages on isolate
848 		 * pageblock and normal pageblock. Without this, pageblock
849 		 * isolation could cause incorrect freepage or CMA accounting.
850 		 *
851 		 * We don't want to hit this code for the more frequent
852 		 * low-order merging.
853 		 */
854 		if (unlikely(has_isolate_pageblock(zone))) {
855 			int buddy_mt;
856 
857 			buddy_pfn = __find_buddy_pfn(pfn, order);
858 			buddy = page + (buddy_pfn - pfn);
859 			buddy_mt = get_pageblock_migratetype(buddy);
860 
861 			if (migratetype != buddy_mt
862 					&& (is_migrate_isolate(migratetype) ||
863 						is_migrate_isolate(buddy_mt)))
864 				goto done_merging;
865 		}
866 		max_order++;
867 		goto continue_merging;
868 	}
869 
870 done_merging:
871 	set_page_order(page, order);
872 
873 	/*
874 	 * If this is not the largest possible page, check if the buddy
875 	 * of the next-highest order is free. If it is, it's possible
876 	 * that pages are being freed that will coalesce soon. In case,
877 	 * that is happening, add the free page to the tail of the list
878 	 * so it's less likely to be used soon and more likely to be merged
879 	 * as a higher order page
880 	 */
881 	if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
882 		struct page *higher_page, *higher_buddy;
883 		combined_pfn = buddy_pfn & pfn;
884 		higher_page = page + (combined_pfn - pfn);
885 		buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
886 		higher_buddy = higher_page + (buddy_pfn - combined_pfn);
887 		if (pfn_valid_within(buddy_pfn) &&
888 		    page_is_buddy(higher_page, higher_buddy, order + 1)) {
889 			list_add_tail(&page->lru,
890 				&zone->free_area[order].free_list[migratetype]);
891 			goto out;
892 		}
893 	}
894 
895 	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
896 out:
897 	zone->free_area[order].nr_free++;
898 }
899 
900 /*
901  * A bad page could be due to a number of fields. Instead of multiple branches,
902  * try and check multiple fields with one check. The caller must do a detailed
903  * check if necessary.
904  */
905 static inline bool page_expected_state(struct page *page,
906 					unsigned long check_flags)
907 {
908 	if (unlikely(atomic_read(&page->_mapcount) != -1))
909 		return false;
910 
911 	if (unlikely((unsigned long)page->mapping |
912 			page_ref_count(page) |
913 #ifdef CONFIG_MEMCG
914 			(unsigned long)page->mem_cgroup |
915 #endif
916 			(page->flags & check_flags)))
917 		return false;
918 
919 	return true;
920 }
921 
922 static void free_pages_check_bad(struct page *page)
923 {
924 	const char *bad_reason;
925 	unsigned long bad_flags;
926 
927 	bad_reason = NULL;
928 	bad_flags = 0;
929 
930 	if (unlikely(atomic_read(&page->_mapcount) != -1))
931 		bad_reason = "nonzero mapcount";
932 	if (unlikely(page->mapping != NULL))
933 		bad_reason = "non-NULL mapping";
934 	if (unlikely(page_ref_count(page) != 0))
935 		bad_reason = "nonzero _refcount";
936 	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
937 		bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
938 		bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
939 	}
940 #ifdef CONFIG_MEMCG
941 	if (unlikely(page->mem_cgroup))
942 		bad_reason = "page still charged to cgroup";
943 #endif
944 	bad_page(page, bad_reason, bad_flags);
945 }
946 
947 static inline int free_pages_check(struct page *page)
948 {
949 	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
950 		return 0;
951 
952 	/* Something has gone sideways, find it */
953 	free_pages_check_bad(page);
954 	return 1;
955 }
956 
957 static int free_tail_pages_check(struct page *head_page, struct page *page)
958 {
959 	int ret = 1;
960 
961 	/*
962 	 * We rely page->lru.next never has bit 0 set, unless the page
963 	 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
964 	 */
965 	BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
966 
967 	if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
968 		ret = 0;
969 		goto out;
970 	}
971 	switch (page - head_page) {
972 	case 1:
973 		/* the first tail page: ->mapping is compound_mapcount() */
974 		if (unlikely(compound_mapcount(page))) {
975 			bad_page(page, "nonzero compound_mapcount", 0);
976 			goto out;
977 		}
978 		break;
979 	case 2:
980 		/*
981 		 * the second tail page: ->mapping is
982 		 * page_deferred_list().next -- ignore value.
983 		 */
984 		break;
985 	default:
986 		if (page->mapping != TAIL_MAPPING) {
987 			bad_page(page, "corrupted mapping in tail page", 0);
988 			goto out;
989 		}
990 		break;
991 	}
992 	if (unlikely(!PageTail(page))) {
993 		bad_page(page, "PageTail not set", 0);
994 		goto out;
995 	}
996 	if (unlikely(compound_head(page) != head_page)) {
997 		bad_page(page, "compound_head not consistent", 0);
998 		goto out;
999 	}
1000 	ret = 0;
1001 out:
1002 	page->mapping = NULL;
1003 	clear_compound_head(page);
1004 	return ret;
1005 }
1006 
1007 static __always_inline bool free_pages_prepare(struct page *page,
1008 					unsigned int order, bool check_free)
1009 {
1010 	int bad = 0;
1011 
1012 	VM_BUG_ON_PAGE(PageTail(page), page);
1013 
1014 	trace_mm_page_free(page, order);
1015 	kmemcheck_free_shadow(page, order);
1016 
1017 	/*
1018 	 * Check tail pages before head page information is cleared to
1019 	 * avoid checking PageCompound for order-0 pages.
1020 	 */
1021 	if (unlikely(order)) {
1022 		bool compound = PageCompound(page);
1023 		int i;
1024 
1025 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1026 
1027 		if (compound)
1028 			ClearPageDoubleMap(page);
1029 		for (i = 1; i < (1 << order); i++) {
1030 			if (compound)
1031 				bad += free_tail_pages_check(page, page + i);
1032 			if (unlikely(free_pages_check(page + i))) {
1033 				bad++;
1034 				continue;
1035 			}
1036 			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1037 		}
1038 	}
1039 	if (PageMappingFlags(page))
1040 		page->mapping = NULL;
1041 	if (memcg_kmem_enabled() && PageKmemcg(page))
1042 		memcg_kmem_uncharge(page, order);
1043 	if (check_free)
1044 		bad += free_pages_check(page);
1045 	if (bad)
1046 		return false;
1047 
1048 	page_cpupid_reset_last(page);
1049 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1050 	reset_page_owner(page, order);
1051 
1052 	if (!PageHighMem(page)) {
1053 		debug_check_no_locks_freed(page_address(page),
1054 					   PAGE_SIZE << order);
1055 		debug_check_no_obj_freed(page_address(page),
1056 					   PAGE_SIZE << order);
1057 	}
1058 	arch_free_page(page, order);
1059 	kernel_poison_pages(page, 1 << order, 0);
1060 	kernel_map_pages(page, 1 << order, 0);
1061 	kasan_free_pages(page, order);
1062 
1063 	return true;
1064 }
1065 
1066 #ifdef CONFIG_DEBUG_VM
1067 static inline bool free_pcp_prepare(struct page *page)
1068 {
1069 	return free_pages_prepare(page, 0, true);
1070 }
1071 
1072 static inline bool bulkfree_pcp_prepare(struct page *page)
1073 {
1074 	return false;
1075 }
1076 #else
1077 static bool free_pcp_prepare(struct page *page)
1078 {
1079 	return free_pages_prepare(page, 0, false);
1080 }
1081 
1082 static bool bulkfree_pcp_prepare(struct page *page)
1083 {
1084 	return free_pages_check(page);
1085 }
1086 #endif /* CONFIG_DEBUG_VM */
1087 
1088 /*
1089  * Frees a number of pages from the PCP lists
1090  * Assumes all pages on list are in same zone, and of same order.
1091  * count is the number of pages to free.
1092  *
1093  * If the zone was previously in an "all pages pinned" state then look to
1094  * see if this freeing clears that state.
1095  *
1096  * And clear the zone's pages_scanned counter, to hold off the "all pages are
1097  * pinned" detection logic.
1098  */
1099 static void free_pcppages_bulk(struct zone *zone, int count,
1100 					struct per_cpu_pages *pcp)
1101 {
1102 	int migratetype = 0;
1103 	int batch_free = 0;
1104 	bool isolated_pageblocks;
1105 
1106 	spin_lock(&zone->lock);
1107 	isolated_pageblocks = has_isolate_pageblock(zone);
1108 
1109 	while (count) {
1110 		struct page *page;
1111 		struct list_head *list;
1112 
1113 		/*
1114 		 * Remove pages from lists in a round-robin fashion. A
1115 		 * batch_free count is maintained that is incremented when an
1116 		 * empty list is encountered.  This is so more pages are freed
1117 		 * off fuller lists instead of spinning excessively around empty
1118 		 * lists
1119 		 */
1120 		do {
1121 			batch_free++;
1122 			if (++migratetype == MIGRATE_PCPTYPES)
1123 				migratetype = 0;
1124 			list = &pcp->lists[migratetype];
1125 		} while (list_empty(list));
1126 
1127 		/* This is the only non-empty list. Free them all. */
1128 		if (batch_free == MIGRATE_PCPTYPES)
1129 			batch_free = count;
1130 
1131 		do {
1132 			int mt;	/* migratetype of the to-be-freed page */
1133 
1134 			page = list_last_entry(list, struct page, lru);
1135 			/* must delete as __free_one_page list manipulates */
1136 			list_del(&page->lru);
1137 
1138 			mt = get_pcppage_migratetype(page);
1139 			/* MIGRATE_ISOLATE page should not go to pcplists */
1140 			VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1141 			/* Pageblock could have been isolated meanwhile */
1142 			if (unlikely(isolated_pageblocks))
1143 				mt = get_pageblock_migratetype(page);
1144 
1145 			if (bulkfree_pcp_prepare(page))
1146 				continue;
1147 
1148 			__free_one_page(page, page_to_pfn(page), zone, 0, mt);
1149 			trace_mm_page_pcpu_drain(page, 0, mt);
1150 		} while (--count && --batch_free && !list_empty(list));
1151 	}
1152 	spin_unlock(&zone->lock);
1153 }
1154 
1155 static void free_one_page(struct zone *zone,
1156 				struct page *page, unsigned long pfn,
1157 				unsigned int order,
1158 				int migratetype)
1159 {
1160 	spin_lock(&zone->lock);
1161 	if (unlikely(has_isolate_pageblock(zone) ||
1162 		is_migrate_isolate(migratetype))) {
1163 		migratetype = get_pfnblock_migratetype(page, pfn);
1164 	}
1165 	__free_one_page(page, pfn, zone, order, migratetype);
1166 	spin_unlock(&zone->lock);
1167 }
1168 
1169 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1170 				unsigned long zone, int nid)
1171 {
1172 	set_page_links(page, zone, nid, pfn);
1173 	init_page_count(page);
1174 	page_mapcount_reset(page);
1175 	page_cpupid_reset_last(page);
1176 
1177 	INIT_LIST_HEAD(&page->lru);
1178 #ifdef WANT_PAGE_VIRTUAL
1179 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
1180 	if (!is_highmem_idx(zone))
1181 		set_page_address(page, __va(pfn << PAGE_SHIFT));
1182 #endif
1183 }
1184 
1185 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1186 					int nid)
1187 {
1188 	return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
1189 }
1190 
1191 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1192 static void init_reserved_page(unsigned long pfn)
1193 {
1194 	pg_data_t *pgdat;
1195 	int nid, zid;
1196 
1197 	if (!early_page_uninitialised(pfn))
1198 		return;
1199 
1200 	nid = early_pfn_to_nid(pfn);
1201 	pgdat = NODE_DATA(nid);
1202 
1203 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1204 		struct zone *zone = &pgdat->node_zones[zid];
1205 
1206 		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1207 			break;
1208 	}
1209 	__init_single_pfn(pfn, zid, nid);
1210 }
1211 #else
1212 static inline void init_reserved_page(unsigned long pfn)
1213 {
1214 }
1215 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1216 
1217 /*
1218  * Initialised pages do not have PageReserved set. This function is
1219  * called for each range allocated by the bootmem allocator and
1220  * marks the pages PageReserved. The remaining valid pages are later
1221  * sent to the buddy page allocator.
1222  */
1223 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1224 {
1225 	unsigned long start_pfn = PFN_DOWN(start);
1226 	unsigned long end_pfn = PFN_UP(end);
1227 
1228 	for (; start_pfn < end_pfn; start_pfn++) {
1229 		if (pfn_valid(start_pfn)) {
1230 			struct page *page = pfn_to_page(start_pfn);
1231 
1232 			init_reserved_page(start_pfn);
1233 
1234 			/* Avoid false-positive PageTail() */
1235 			INIT_LIST_HEAD(&page->lru);
1236 
1237 			SetPageReserved(page);
1238 		}
1239 	}
1240 }
1241 
1242 static void __free_pages_ok(struct page *page, unsigned int order)
1243 {
1244 	unsigned long flags;
1245 	int migratetype;
1246 	unsigned long pfn = page_to_pfn(page);
1247 
1248 	if (!free_pages_prepare(page, order, true))
1249 		return;
1250 
1251 	migratetype = get_pfnblock_migratetype(page, pfn);
1252 	local_irq_save(flags);
1253 	__count_vm_events(PGFREE, 1 << order);
1254 	free_one_page(page_zone(page), page, pfn, order, migratetype);
1255 	local_irq_restore(flags);
1256 }
1257 
1258 static void __init __free_pages_boot_core(struct page *page, unsigned int order)
1259 {
1260 	unsigned int nr_pages = 1 << order;
1261 	struct page *p = page;
1262 	unsigned int loop;
1263 
1264 	prefetchw(p);
1265 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1266 		prefetchw(p + 1);
1267 		__ClearPageReserved(p);
1268 		set_page_count(p, 0);
1269 	}
1270 	__ClearPageReserved(p);
1271 	set_page_count(p, 0);
1272 
1273 	page_zone(page)->managed_pages += nr_pages;
1274 	set_page_refcounted(page);
1275 	__free_pages(page, order);
1276 }
1277 
1278 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1279 	defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1280 
1281 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1282 
1283 int __meminit early_pfn_to_nid(unsigned long pfn)
1284 {
1285 	static DEFINE_SPINLOCK(early_pfn_lock);
1286 	int nid;
1287 
1288 	spin_lock(&early_pfn_lock);
1289 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1290 	if (nid < 0)
1291 		nid = first_online_node;
1292 	spin_unlock(&early_pfn_lock);
1293 
1294 	return nid;
1295 }
1296 #endif
1297 
1298 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
1299 static inline bool __meminit __maybe_unused
1300 meminit_pfn_in_nid(unsigned long pfn, int node,
1301 		   struct mminit_pfnnid_cache *state)
1302 {
1303 	int nid;
1304 
1305 	nid = __early_pfn_to_nid(pfn, state);
1306 	if (nid >= 0 && nid != node)
1307 		return false;
1308 	return true;
1309 }
1310 
1311 /* Only safe to use early in boot when initialisation is single-threaded */
1312 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1313 {
1314 	return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1315 }
1316 
1317 #else
1318 
1319 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1320 {
1321 	return true;
1322 }
1323 static inline bool __meminit  __maybe_unused
1324 meminit_pfn_in_nid(unsigned long pfn, int node,
1325 		   struct mminit_pfnnid_cache *state)
1326 {
1327 	return true;
1328 }
1329 #endif
1330 
1331 
1332 void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
1333 							unsigned int order)
1334 {
1335 	if (early_page_uninitialised(pfn))
1336 		return;
1337 	return __free_pages_boot_core(page, order);
1338 }
1339 
1340 /*
1341  * Check that the whole (or subset of) a pageblock given by the interval of
1342  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1343  * with the migration of free compaction scanner. The scanners then need to
1344  * use only pfn_valid_within() check for arches that allow holes within
1345  * pageblocks.
1346  *
1347  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1348  *
1349  * It's possible on some configurations to have a setup like node0 node1 node0
1350  * i.e. it's possible that all pages within a zones range of pages do not
1351  * belong to a single zone. We assume that a border between node0 and node1
1352  * can occur within a single pageblock, but not a node0 node1 node0
1353  * interleaving within a single pageblock. It is therefore sufficient to check
1354  * the first and last page of a pageblock and avoid checking each individual
1355  * page in a pageblock.
1356  */
1357 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1358 				     unsigned long end_pfn, struct zone *zone)
1359 {
1360 	struct page *start_page;
1361 	struct page *end_page;
1362 
1363 	/* end_pfn is one past the range we are checking */
1364 	end_pfn--;
1365 
1366 	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1367 		return NULL;
1368 
1369 	start_page = pfn_to_online_page(start_pfn);
1370 	if (!start_page)
1371 		return NULL;
1372 
1373 	if (page_zone(start_page) != zone)
1374 		return NULL;
1375 
1376 	end_page = pfn_to_page(end_pfn);
1377 
1378 	/* This gives a shorter code than deriving page_zone(end_page) */
1379 	if (page_zone_id(start_page) != page_zone_id(end_page))
1380 		return NULL;
1381 
1382 	return start_page;
1383 }
1384 
1385 void set_zone_contiguous(struct zone *zone)
1386 {
1387 	unsigned long block_start_pfn = zone->zone_start_pfn;
1388 	unsigned long block_end_pfn;
1389 
1390 	block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1391 	for (; block_start_pfn < zone_end_pfn(zone);
1392 			block_start_pfn = block_end_pfn,
1393 			 block_end_pfn += pageblock_nr_pages) {
1394 
1395 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1396 
1397 		if (!__pageblock_pfn_to_page(block_start_pfn,
1398 					     block_end_pfn, zone))
1399 			return;
1400 	}
1401 
1402 	/* We confirm that there is no hole */
1403 	zone->contiguous = true;
1404 }
1405 
1406 void clear_zone_contiguous(struct zone *zone)
1407 {
1408 	zone->contiguous = false;
1409 }
1410 
1411 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1412 static void __init deferred_free_range(struct page *page,
1413 					unsigned long pfn, int nr_pages)
1414 {
1415 	int i;
1416 
1417 	if (!page)
1418 		return;
1419 
1420 	/* Free a large naturally-aligned chunk if possible */
1421 	if (nr_pages == pageblock_nr_pages &&
1422 	    (pfn & (pageblock_nr_pages - 1)) == 0) {
1423 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1424 		__free_pages_boot_core(page, pageblock_order);
1425 		return;
1426 	}
1427 
1428 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1429 		if ((pfn & (pageblock_nr_pages - 1)) == 0)
1430 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1431 		__free_pages_boot_core(page, 0);
1432 	}
1433 }
1434 
1435 /* Completion tracking for deferred_init_memmap() threads */
1436 static atomic_t pgdat_init_n_undone __initdata;
1437 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1438 
1439 static inline void __init pgdat_init_report_one_done(void)
1440 {
1441 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1442 		complete(&pgdat_init_all_done_comp);
1443 }
1444 
1445 /* Initialise remaining memory on a node */
1446 static int __init deferred_init_memmap(void *data)
1447 {
1448 	pg_data_t *pgdat = data;
1449 	int nid = pgdat->node_id;
1450 	struct mminit_pfnnid_cache nid_init_state = { };
1451 	unsigned long start = jiffies;
1452 	unsigned long nr_pages = 0;
1453 	unsigned long walk_start, walk_end;
1454 	int i, zid;
1455 	struct zone *zone;
1456 	unsigned long first_init_pfn = pgdat->first_deferred_pfn;
1457 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1458 
1459 	if (first_init_pfn == ULONG_MAX) {
1460 		pgdat_init_report_one_done();
1461 		return 0;
1462 	}
1463 
1464 	/* Bind memory initialisation thread to a local node if possible */
1465 	if (!cpumask_empty(cpumask))
1466 		set_cpus_allowed_ptr(current, cpumask);
1467 
1468 	/* Sanity check boundaries */
1469 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1470 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1471 	pgdat->first_deferred_pfn = ULONG_MAX;
1472 
1473 	/* Only the highest zone is deferred so find it */
1474 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1475 		zone = pgdat->node_zones + zid;
1476 		if (first_init_pfn < zone_end_pfn(zone))
1477 			break;
1478 	}
1479 
1480 	for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1481 		unsigned long pfn, end_pfn;
1482 		struct page *page = NULL;
1483 		struct page *free_base_page = NULL;
1484 		unsigned long free_base_pfn = 0;
1485 		int nr_to_free = 0;
1486 
1487 		end_pfn = min(walk_end, zone_end_pfn(zone));
1488 		pfn = first_init_pfn;
1489 		if (pfn < walk_start)
1490 			pfn = walk_start;
1491 		if (pfn < zone->zone_start_pfn)
1492 			pfn = zone->zone_start_pfn;
1493 
1494 		for (; pfn < end_pfn; pfn++) {
1495 			if (!pfn_valid_within(pfn))
1496 				goto free_range;
1497 
1498 			/*
1499 			 * Ensure pfn_valid is checked every
1500 			 * pageblock_nr_pages for memory holes
1501 			 */
1502 			if ((pfn & (pageblock_nr_pages - 1)) == 0) {
1503 				if (!pfn_valid(pfn)) {
1504 					page = NULL;
1505 					goto free_range;
1506 				}
1507 			}
1508 
1509 			if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1510 				page = NULL;
1511 				goto free_range;
1512 			}
1513 
1514 			/* Minimise pfn page lookups and scheduler checks */
1515 			if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
1516 				page++;
1517 			} else {
1518 				nr_pages += nr_to_free;
1519 				deferred_free_range(free_base_page,
1520 						free_base_pfn, nr_to_free);
1521 				free_base_page = NULL;
1522 				free_base_pfn = nr_to_free = 0;
1523 
1524 				page = pfn_to_page(pfn);
1525 				cond_resched();
1526 			}
1527 
1528 			if (page->flags) {
1529 				VM_BUG_ON(page_zone(page) != zone);
1530 				goto free_range;
1531 			}
1532 
1533 			__init_single_page(page, pfn, zid, nid);
1534 			if (!free_base_page) {
1535 				free_base_page = page;
1536 				free_base_pfn = pfn;
1537 				nr_to_free = 0;
1538 			}
1539 			nr_to_free++;
1540 
1541 			/* Where possible, batch up pages for a single free */
1542 			continue;
1543 free_range:
1544 			/* Free the current block of pages to allocator */
1545 			nr_pages += nr_to_free;
1546 			deferred_free_range(free_base_page, free_base_pfn,
1547 								nr_to_free);
1548 			free_base_page = NULL;
1549 			free_base_pfn = nr_to_free = 0;
1550 		}
1551 		/* Free the last block of pages to allocator */
1552 		nr_pages += nr_to_free;
1553 		deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
1554 
1555 		first_init_pfn = max(end_pfn, first_init_pfn);
1556 	}
1557 
1558 	/* Sanity check that the next zone really is unpopulated */
1559 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1560 
1561 	pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1562 					jiffies_to_msecs(jiffies - start));
1563 
1564 	pgdat_init_report_one_done();
1565 	return 0;
1566 }
1567 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1568 
1569 void __init page_alloc_init_late(void)
1570 {
1571 	struct zone *zone;
1572 
1573 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1574 	int nid;
1575 
1576 	/* There will be num_node_state(N_MEMORY) threads */
1577 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1578 	for_each_node_state(nid, N_MEMORY) {
1579 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1580 	}
1581 
1582 	/* Block until all are initialised */
1583 	wait_for_completion(&pgdat_init_all_done_comp);
1584 
1585 	/* Reinit limits that are based on free pages after the kernel is up */
1586 	files_maxfiles_init();
1587 #endif
1588 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1589 	/* Discard memblock private memory */
1590 	memblock_discard();
1591 #endif
1592 
1593 	for_each_populated_zone(zone)
1594 		set_zone_contiguous(zone);
1595 }
1596 
1597 #ifdef CONFIG_CMA
1598 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1599 void __init init_cma_reserved_pageblock(struct page *page)
1600 {
1601 	unsigned i = pageblock_nr_pages;
1602 	struct page *p = page;
1603 
1604 	do {
1605 		__ClearPageReserved(p);
1606 		set_page_count(p, 0);
1607 	} while (++p, --i);
1608 
1609 	set_pageblock_migratetype(page, MIGRATE_CMA);
1610 
1611 	if (pageblock_order >= MAX_ORDER) {
1612 		i = pageblock_nr_pages;
1613 		p = page;
1614 		do {
1615 			set_page_refcounted(p);
1616 			__free_pages(p, MAX_ORDER - 1);
1617 			p += MAX_ORDER_NR_PAGES;
1618 		} while (i -= MAX_ORDER_NR_PAGES);
1619 	} else {
1620 		set_page_refcounted(page);
1621 		__free_pages(page, pageblock_order);
1622 	}
1623 
1624 	adjust_managed_page_count(page, pageblock_nr_pages);
1625 }
1626 #endif
1627 
1628 /*
1629  * The order of subdivision here is critical for the IO subsystem.
1630  * Please do not alter this order without good reasons and regression
1631  * testing. Specifically, as large blocks of memory are subdivided,
1632  * the order in which smaller blocks are delivered depends on the order
1633  * they're subdivided in this function. This is the primary factor
1634  * influencing the order in which pages are delivered to the IO
1635  * subsystem according to empirical testing, and this is also justified
1636  * by considering the behavior of a buddy system containing a single
1637  * large block of memory acted on by a series of small allocations.
1638  * This behavior is a critical factor in sglist merging's success.
1639  *
1640  * -- nyc
1641  */
1642 static inline void expand(struct zone *zone, struct page *page,
1643 	int low, int high, struct free_area *area,
1644 	int migratetype)
1645 {
1646 	unsigned long size = 1 << high;
1647 
1648 	while (high > low) {
1649 		area--;
1650 		high--;
1651 		size >>= 1;
1652 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1653 
1654 		/*
1655 		 * Mark as guard pages (or page), that will allow to
1656 		 * merge back to allocator when buddy will be freed.
1657 		 * Corresponding page table entries will not be touched,
1658 		 * pages will stay not present in virtual address space
1659 		 */
1660 		if (set_page_guard(zone, &page[size], high, migratetype))
1661 			continue;
1662 
1663 		list_add(&page[size].lru, &area->free_list[migratetype]);
1664 		area->nr_free++;
1665 		set_page_order(&page[size], high);
1666 	}
1667 }
1668 
1669 static void check_new_page_bad(struct page *page)
1670 {
1671 	const char *bad_reason = NULL;
1672 	unsigned long bad_flags = 0;
1673 
1674 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1675 		bad_reason = "nonzero mapcount";
1676 	if (unlikely(page->mapping != NULL))
1677 		bad_reason = "non-NULL mapping";
1678 	if (unlikely(page_ref_count(page) != 0))
1679 		bad_reason = "nonzero _count";
1680 	if (unlikely(page->flags & __PG_HWPOISON)) {
1681 		bad_reason = "HWPoisoned (hardware-corrupted)";
1682 		bad_flags = __PG_HWPOISON;
1683 		/* Don't complain about hwpoisoned pages */
1684 		page_mapcount_reset(page); /* remove PageBuddy */
1685 		return;
1686 	}
1687 	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1688 		bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1689 		bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1690 	}
1691 #ifdef CONFIG_MEMCG
1692 	if (unlikely(page->mem_cgroup))
1693 		bad_reason = "page still charged to cgroup";
1694 #endif
1695 	bad_page(page, bad_reason, bad_flags);
1696 }
1697 
1698 /*
1699  * This page is about to be returned from the page allocator
1700  */
1701 static inline int check_new_page(struct page *page)
1702 {
1703 	if (likely(page_expected_state(page,
1704 				PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1705 		return 0;
1706 
1707 	check_new_page_bad(page);
1708 	return 1;
1709 }
1710 
1711 static inline bool free_pages_prezeroed(void)
1712 {
1713 	return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1714 		page_poisoning_enabled();
1715 }
1716 
1717 #ifdef CONFIG_DEBUG_VM
1718 static bool check_pcp_refill(struct page *page)
1719 {
1720 	return false;
1721 }
1722 
1723 static bool check_new_pcp(struct page *page)
1724 {
1725 	return check_new_page(page);
1726 }
1727 #else
1728 static bool check_pcp_refill(struct page *page)
1729 {
1730 	return check_new_page(page);
1731 }
1732 static bool check_new_pcp(struct page *page)
1733 {
1734 	return false;
1735 }
1736 #endif /* CONFIG_DEBUG_VM */
1737 
1738 static bool check_new_pages(struct page *page, unsigned int order)
1739 {
1740 	int i;
1741 	for (i = 0; i < (1 << order); i++) {
1742 		struct page *p = page + i;
1743 
1744 		if (unlikely(check_new_page(p)))
1745 			return true;
1746 	}
1747 
1748 	return false;
1749 }
1750 
1751 inline void post_alloc_hook(struct page *page, unsigned int order,
1752 				gfp_t gfp_flags)
1753 {
1754 	set_page_private(page, 0);
1755 	set_page_refcounted(page);
1756 
1757 	arch_alloc_page(page, order);
1758 	kernel_map_pages(page, 1 << order, 1);
1759 	kernel_poison_pages(page, 1 << order, 1);
1760 	kasan_alloc_pages(page, order);
1761 	set_page_owner(page, order, gfp_flags);
1762 }
1763 
1764 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1765 							unsigned int alloc_flags)
1766 {
1767 	int i;
1768 
1769 	post_alloc_hook(page, order, gfp_flags);
1770 
1771 	if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
1772 		for (i = 0; i < (1 << order); i++)
1773 			clear_highpage(page + i);
1774 
1775 	if (order && (gfp_flags & __GFP_COMP))
1776 		prep_compound_page(page, order);
1777 
1778 	/*
1779 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1780 	 * allocate the page. The expectation is that the caller is taking
1781 	 * steps that will free more memory. The caller should avoid the page
1782 	 * being used for !PFMEMALLOC purposes.
1783 	 */
1784 	if (alloc_flags & ALLOC_NO_WATERMARKS)
1785 		set_page_pfmemalloc(page);
1786 	else
1787 		clear_page_pfmemalloc(page);
1788 }
1789 
1790 /*
1791  * Go through the free lists for the given migratetype and remove
1792  * the smallest available page from the freelists
1793  */
1794 static inline
1795 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1796 						int migratetype)
1797 {
1798 	unsigned int current_order;
1799 	struct free_area *area;
1800 	struct page *page;
1801 
1802 	/* Find a page of the appropriate size in the preferred list */
1803 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1804 		area = &(zone->free_area[current_order]);
1805 		page = list_first_entry_or_null(&area->free_list[migratetype],
1806 							struct page, lru);
1807 		if (!page)
1808 			continue;
1809 		list_del(&page->lru);
1810 		rmv_page_order(page);
1811 		area->nr_free--;
1812 		expand(zone, page, order, current_order, area, migratetype);
1813 		set_pcppage_migratetype(page, migratetype);
1814 		return page;
1815 	}
1816 
1817 	return NULL;
1818 }
1819 
1820 
1821 /*
1822  * This array describes the order lists are fallen back to when
1823  * the free lists for the desirable migrate type are depleted
1824  */
1825 static int fallbacks[MIGRATE_TYPES][4] = {
1826 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
1827 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
1828 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
1829 #ifdef CONFIG_CMA
1830 	[MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
1831 #endif
1832 #ifdef CONFIG_MEMORY_ISOLATION
1833 	[MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
1834 #endif
1835 };
1836 
1837 #ifdef CONFIG_CMA
1838 static struct page *__rmqueue_cma_fallback(struct zone *zone,
1839 					unsigned int order)
1840 {
1841 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1842 }
1843 #else
1844 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1845 					unsigned int order) { return NULL; }
1846 #endif
1847 
1848 /*
1849  * Move the free pages in a range to the free lists of the requested type.
1850  * Note that start_page and end_pages are not aligned on a pageblock
1851  * boundary. If alignment is required, use move_freepages_block()
1852  */
1853 static int move_freepages(struct zone *zone,
1854 			  struct page *start_page, struct page *end_page,
1855 			  int migratetype, int *num_movable)
1856 {
1857 	struct page *page;
1858 	unsigned int order;
1859 	int pages_moved = 0;
1860 
1861 #ifndef CONFIG_HOLES_IN_ZONE
1862 	/*
1863 	 * page_zone is not safe to call in this context when
1864 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1865 	 * anyway as we check zone boundaries in move_freepages_block().
1866 	 * Remove at a later date when no bug reports exist related to
1867 	 * grouping pages by mobility
1868 	 */
1869 	VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1870 #endif
1871 
1872 	if (num_movable)
1873 		*num_movable = 0;
1874 
1875 	for (page = start_page; page <= end_page;) {
1876 		if (!pfn_valid_within(page_to_pfn(page))) {
1877 			page++;
1878 			continue;
1879 		}
1880 
1881 		/* Make sure we are not inadvertently changing nodes */
1882 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1883 
1884 		if (!PageBuddy(page)) {
1885 			/*
1886 			 * We assume that pages that could be isolated for
1887 			 * migration are movable. But we don't actually try
1888 			 * isolating, as that would be expensive.
1889 			 */
1890 			if (num_movable &&
1891 					(PageLRU(page) || __PageMovable(page)))
1892 				(*num_movable)++;
1893 
1894 			page++;
1895 			continue;
1896 		}
1897 
1898 		order = page_order(page);
1899 		list_move(&page->lru,
1900 			  &zone->free_area[order].free_list[migratetype]);
1901 		page += 1 << order;
1902 		pages_moved += 1 << order;
1903 	}
1904 
1905 	return pages_moved;
1906 }
1907 
1908 int move_freepages_block(struct zone *zone, struct page *page,
1909 				int migratetype, int *num_movable)
1910 {
1911 	unsigned long start_pfn, end_pfn;
1912 	struct page *start_page, *end_page;
1913 
1914 	start_pfn = page_to_pfn(page);
1915 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
1916 	start_page = pfn_to_page(start_pfn);
1917 	end_page = start_page + pageblock_nr_pages - 1;
1918 	end_pfn = start_pfn + pageblock_nr_pages - 1;
1919 
1920 	/* Do not cross zone boundaries */
1921 	if (!zone_spans_pfn(zone, start_pfn))
1922 		start_page = page;
1923 	if (!zone_spans_pfn(zone, end_pfn))
1924 		return 0;
1925 
1926 	return move_freepages(zone, start_page, end_page, migratetype,
1927 								num_movable);
1928 }
1929 
1930 static void change_pageblock_range(struct page *pageblock_page,
1931 					int start_order, int migratetype)
1932 {
1933 	int nr_pageblocks = 1 << (start_order - pageblock_order);
1934 
1935 	while (nr_pageblocks--) {
1936 		set_pageblock_migratetype(pageblock_page, migratetype);
1937 		pageblock_page += pageblock_nr_pages;
1938 	}
1939 }
1940 
1941 /*
1942  * When we are falling back to another migratetype during allocation, try to
1943  * steal extra free pages from the same pageblocks to satisfy further
1944  * allocations, instead of polluting multiple pageblocks.
1945  *
1946  * If we are stealing a relatively large buddy page, it is likely there will
1947  * be more free pages in the pageblock, so try to steal them all. For
1948  * reclaimable and unmovable allocations, we steal regardless of page size,
1949  * as fragmentation caused by those allocations polluting movable pageblocks
1950  * is worse than movable allocations stealing from unmovable and reclaimable
1951  * pageblocks.
1952  */
1953 static bool can_steal_fallback(unsigned int order, int start_mt)
1954 {
1955 	/*
1956 	 * Leaving this order check is intended, although there is
1957 	 * relaxed order check in next check. The reason is that
1958 	 * we can actually steal whole pageblock if this condition met,
1959 	 * but, below check doesn't guarantee it and that is just heuristic
1960 	 * so could be changed anytime.
1961 	 */
1962 	if (order >= pageblock_order)
1963 		return true;
1964 
1965 	if (order >= pageblock_order / 2 ||
1966 		start_mt == MIGRATE_RECLAIMABLE ||
1967 		start_mt == MIGRATE_UNMOVABLE ||
1968 		page_group_by_mobility_disabled)
1969 		return true;
1970 
1971 	return false;
1972 }
1973 
1974 /*
1975  * This function implements actual steal behaviour. If order is large enough,
1976  * we can steal whole pageblock. If not, we first move freepages in this
1977  * pageblock to our migratetype and determine how many already-allocated pages
1978  * are there in the pageblock with a compatible migratetype. If at least half
1979  * of pages are free or compatible, we can change migratetype of the pageblock
1980  * itself, so pages freed in the future will be put on the correct free list.
1981  */
1982 static void steal_suitable_fallback(struct zone *zone, struct page *page,
1983 					int start_type, bool whole_block)
1984 {
1985 	unsigned int current_order = page_order(page);
1986 	struct free_area *area;
1987 	int free_pages, movable_pages, alike_pages;
1988 	int old_block_type;
1989 
1990 	old_block_type = get_pageblock_migratetype(page);
1991 
1992 	/*
1993 	 * This can happen due to races and we want to prevent broken
1994 	 * highatomic accounting.
1995 	 */
1996 	if (is_migrate_highatomic(old_block_type))
1997 		goto single_page;
1998 
1999 	/* Take ownership for orders >= pageblock_order */
2000 	if (current_order >= pageblock_order) {
2001 		change_pageblock_range(page, current_order, start_type);
2002 		goto single_page;
2003 	}
2004 
2005 	/* We are not allowed to try stealing from the whole block */
2006 	if (!whole_block)
2007 		goto single_page;
2008 
2009 	free_pages = move_freepages_block(zone, page, start_type,
2010 						&movable_pages);
2011 	/*
2012 	 * Determine how many pages are compatible with our allocation.
2013 	 * For movable allocation, it's the number of movable pages which
2014 	 * we just obtained. For other types it's a bit more tricky.
2015 	 */
2016 	if (start_type == MIGRATE_MOVABLE) {
2017 		alike_pages = movable_pages;
2018 	} else {
2019 		/*
2020 		 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2021 		 * to MOVABLE pageblock, consider all non-movable pages as
2022 		 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2023 		 * vice versa, be conservative since we can't distinguish the
2024 		 * exact migratetype of non-movable pages.
2025 		 */
2026 		if (old_block_type == MIGRATE_MOVABLE)
2027 			alike_pages = pageblock_nr_pages
2028 						- (free_pages + movable_pages);
2029 		else
2030 			alike_pages = 0;
2031 	}
2032 
2033 	/* moving whole block can fail due to zone boundary conditions */
2034 	if (!free_pages)
2035 		goto single_page;
2036 
2037 	/*
2038 	 * If a sufficient number of pages in the block are either free or of
2039 	 * comparable migratability as our allocation, claim the whole block.
2040 	 */
2041 	if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2042 			page_group_by_mobility_disabled)
2043 		set_pageblock_migratetype(page, start_type);
2044 
2045 	return;
2046 
2047 single_page:
2048 	area = &zone->free_area[current_order];
2049 	list_move(&page->lru, &area->free_list[start_type]);
2050 }
2051 
2052 /*
2053  * Check whether there is a suitable fallback freepage with requested order.
2054  * If only_stealable is true, this function returns fallback_mt only if
2055  * we can steal other freepages all together. This would help to reduce
2056  * fragmentation due to mixed migratetype pages in one pageblock.
2057  */
2058 int find_suitable_fallback(struct free_area *area, unsigned int order,
2059 			int migratetype, bool only_stealable, bool *can_steal)
2060 {
2061 	int i;
2062 	int fallback_mt;
2063 
2064 	if (area->nr_free == 0)
2065 		return -1;
2066 
2067 	*can_steal = false;
2068 	for (i = 0;; i++) {
2069 		fallback_mt = fallbacks[migratetype][i];
2070 		if (fallback_mt == MIGRATE_TYPES)
2071 			break;
2072 
2073 		if (list_empty(&area->free_list[fallback_mt]))
2074 			continue;
2075 
2076 		if (can_steal_fallback(order, migratetype))
2077 			*can_steal = true;
2078 
2079 		if (!only_stealable)
2080 			return fallback_mt;
2081 
2082 		if (*can_steal)
2083 			return fallback_mt;
2084 	}
2085 
2086 	return -1;
2087 }
2088 
2089 /*
2090  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2091  * there are no empty page blocks that contain a page with a suitable order
2092  */
2093 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2094 				unsigned int alloc_order)
2095 {
2096 	int mt;
2097 	unsigned long max_managed, flags;
2098 
2099 	/*
2100 	 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2101 	 * Check is race-prone but harmless.
2102 	 */
2103 	max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2104 	if (zone->nr_reserved_highatomic >= max_managed)
2105 		return;
2106 
2107 	spin_lock_irqsave(&zone->lock, flags);
2108 
2109 	/* Recheck the nr_reserved_highatomic limit under the lock */
2110 	if (zone->nr_reserved_highatomic >= max_managed)
2111 		goto out_unlock;
2112 
2113 	/* Yoink! */
2114 	mt = get_pageblock_migratetype(page);
2115 	if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2116 	    && !is_migrate_cma(mt)) {
2117 		zone->nr_reserved_highatomic += pageblock_nr_pages;
2118 		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2119 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2120 	}
2121 
2122 out_unlock:
2123 	spin_unlock_irqrestore(&zone->lock, flags);
2124 }
2125 
2126 /*
2127  * Used when an allocation is about to fail under memory pressure. This
2128  * potentially hurts the reliability of high-order allocations when under
2129  * intense memory pressure but failed atomic allocations should be easier
2130  * to recover from than an OOM.
2131  *
2132  * If @force is true, try to unreserve a pageblock even though highatomic
2133  * pageblock is exhausted.
2134  */
2135 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2136 						bool force)
2137 {
2138 	struct zonelist *zonelist = ac->zonelist;
2139 	unsigned long flags;
2140 	struct zoneref *z;
2141 	struct zone *zone;
2142 	struct page *page;
2143 	int order;
2144 	bool ret;
2145 
2146 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2147 								ac->nodemask) {
2148 		/*
2149 		 * Preserve at least one pageblock unless memory pressure
2150 		 * is really high.
2151 		 */
2152 		if (!force && zone->nr_reserved_highatomic <=
2153 					pageblock_nr_pages)
2154 			continue;
2155 
2156 		spin_lock_irqsave(&zone->lock, flags);
2157 		for (order = 0; order < MAX_ORDER; order++) {
2158 			struct free_area *area = &(zone->free_area[order]);
2159 
2160 			page = list_first_entry_or_null(
2161 					&area->free_list[MIGRATE_HIGHATOMIC],
2162 					struct page, lru);
2163 			if (!page)
2164 				continue;
2165 
2166 			/*
2167 			 * In page freeing path, migratetype change is racy so
2168 			 * we can counter several free pages in a pageblock
2169 			 * in this loop althoug we changed the pageblock type
2170 			 * from highatomic to ac->migratetype. So we should
2171 			 * adjust the count once.
2172 			 */
2173 			if (is_migrate_highatomic_page(page)) {
2174 				/*
2175 				 * It should never happen but changes to
2176 				 * locking could inadvertently allow a per-cpu
2177 				 * drain to add pages to MIGRATE_HIGHATOMIC
2178 				 * while unreserving so be safe and watch for
2179 				 * underflows.
2180 				 */
2181 				zone->nr_reserved_highatomic -= min(
2182 						pageblock_nr_pages,
2183 						zone->nr_reserved_highatomic);
2184 			}
2185 
2186 			/*
2187 			 * Convert to ac->migratetype and avoid the normal
2188 			 * pageblock stealing heuristics. Minimally, the caller
2189 			 * is doing the work and needs the pages. More
2190 			 * importantly, if the block was always converted to
2191 			 * MIGRATE_UNMOVABLE or another type then the number
2192 			 * of pageblocks that cannot be completely freed
2193 			 * may increase.
2194 			 */
2195 			set_pageblock_migratetype(page, ac->migratetype);
2196 			ret = move_freepages_block(zone, page, ac->migratetype,
2197 									NULL);
2198 			if (ret) {
2199 				spin_unlock_irqrestore(&zone->lock, flags);
2200 				return ret;
2201 			}
2202 		}
2203 		spin_unlock_irqrestore(&zone->lock, flags);
2204 	}
2205 
2206 	return false;
2207 }
2208 
2209 /*
2210  * Try finding a free buddy page on the fallback list and put it on the free
2211  * list of requested migratetype, possibly along with other pages from the same
2212  * block, depending on fragmentation avoidance heuristics. Returns true if
2213  * fallback was found so that __rmqueue_smallest() can grab it.
2214  *
2215  * The use of signed ints for order and current_order is a deliberate
2216  * deviation from the rest of this file, to make the for loop
2217  * condition simpler.
2218  */
2219 static inline bool
2220 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
2221 {
2222 	struct free_area *area;
2223 	int current_order;
2224 	struct page *page;
2225 	int fallback_mt;
2226 	bool can_steal;
2227 
2228 	/*
2229 	 * Find the largest available free page in the other list. This roughly
2230 	 * approximates finding the pageblock with the most free pages, which
2231 	 * would be too costly to do exactly.
2232 	 */
2233 	for (current_order = MAX_ORDER - 1; current_order >= order;
2234 				--current_order) {
2235 		area = &(zone->free_area[current_order]);
2236 		fallback_mt = find_suitable_fallback(area, current_order,
2237 				start_migratetype, false, &can_steal);
2238 		if (fallback_mt == -1)
2239 			continue;
2240 
2241 		/*
2242 		 * We cannot steal all free pages from the pageblock and the
2243 		 * requested migratetype is movable. In that case it's better to
2244 		 * steal and split the smallest available page instead of the
2245 		 * largest available page, because even if the next movable
2246 		 * allocation falls back into a different pageblock than this
2247 		 * one, it won't cause permanent fragmentation.
2248 		 */
2249 		if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2250 					&& current_order > order)
2251 			goto find_smallest;
2252 
2253 		goto do_steal;
2254 	}
2255 
2256 	return false;
2257 
2258 find_smallest:
2259 	for (current_order = order; current_order < MAX_ORDER;
2260 							current_order++) {
2261 		area = &(zone->free_area[current_order]);
2262 		fallback_mt = find_suitable_fallback(area, current_order,
2263 				start_migratetype, false, &can_steal);
2264 		if (fallback_mt != -1)
2265 			break;
2266 	}
2267 
2268 	/*
2269 	 * This should not happen - we already found a suitable fallback
2270 	 * when looking for the largest page.
2271 	 */
2272 	VM_BUG_ON(current_order == MAX_ORDER);
2273 
2274 do_steal:
2275 	page = list_first_entry(&area->free_list[fallback_mt],
2276 							struct page, lru);
2277 
2278 	steal_suitable_fallback(zone, page, start_migratetype, can_steal);
2279 
2280 	trace_mm_page_alloc_extfrag(page, order, current_order,
2281 		start_migratetype, fallback_mt);
2282 
2283 	return true;
2284 
2285 }
2286 
2287 /*
2288  * Do the hard work of removing an element from the buddy allocator.
2289  * Call me with the zone->lock already held.
2290  */
2291 static struct page *__rmqueue(struct zone *zone, unsigned int order,
2292 				int migratetype)
2293 {
2294 	struct page *page;
2295 
2296 retry:
2297 	page = __rmqueue_smallest(zone, order, migratetype);
2298 	if (unlikely(!page)) {
2299 		if (migratetype == MIGRATE_MOVABLE)
2300 			page = __rmqueue_cma_fallback(zone, order);
2301 
2302 		if (!page && __rmqueue_fallback(zone, order, migratetype))
2303 			goto retry;
2304 	}
2305 
2306 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
2307 	return page;
2308 }
2309 
2310 /*
2311  * Obtain a specified number of elements from the buddy allocator, all under
2312  * a single hold of the lock, for efficiency.  Add them to the supplied list.
2313  * Returns the number of new pages which were placed at *list.
2314  */
2315 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2316 			unsigned long count, struct list_head *list,
2317 			int migratetype, bool cold)
2318 {
2319 	int i, alloced = 0;
2320 
2321 	spin_lock(&zone->lock);
2322 	for (i = 0; i < count; ++i) {
2323 		struct page *page = __rmqueue(zone, order, migratetype);
2324 		if (unlikely(page == NULL))
2325 			break;
2326 
2327 		if (unlikely(check_pcp_refill(page)))
2328 			continue;
2329 
2330 		/*
2331 		 * Split buddy pages returned by expand() are received here
2332 		 * in physical page order. The page is added to the callers and
2333 		 * list and the list head then moves forward. From the callers
2334 		 * perspective, the linked list is ordered by page number in
2335 		 * some conditions. This is useful for IO devices that can
2336 		 * merge IO requests if the physical pages are ordered
2337 		 * properly.
2338 		 */
2339 		if (likely(!cold))
2340 			list_add(&page->lru, list);
2341 		else
2342 			list_add_tail(&page->lru, list);
2343 		list = &page->lru;
2344 		alloced++;
2345 		if (is_migrate_cma(get_pcppage_migratetype(page)))
2346 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2347 					      -(1 << order));
2348 	}
2349 
2350 	/*
2351 	 * i pages were removed from the buddy list even if some leak due
2352 	 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2353 	 * on i. Do not confuse with 'alloced' which is the number of
2354 	 * pages added to the pcp list.
2355 	 */
2356 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2357 	spin_unlock(&zone->lock);
2358 	return alloced;
2359 }
2360 
2361 #ifdef CONFIG_NUMA
2362 /*
2363  * Called from the vmstat counter updater to drain pagesets of this
2364  * currently executing processor on remote nodes after they have
2365  * expired.
2366  *
2367  * Note that this function must be called with the thread pinned to
2368  * a single processor.
2369  */
2370 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2371 {
2372 	unsigned long flags;
2373 	int to_drain, batch;
2374 
2375 	local_irq_save(flags);
2376 	batch = READ_ONCE(pcp->batch);
2377 	to_drain = min(pcp->count, batch);
2378 	if (to_drain > 0) {
2379 		free_pcppages_bulk(zone, to_drain, pcp);
2380 		pcp->count -= to_drain;
2381 	}
2382 	local_irq_restore(flags);
2383 }
2384 #endif
2385 
2386 /*
2387  * Drain pcplists of the indicated processor and zone.
2388  *
2389  * The processor must either be the current processor and the
2390  * thread pinned to the current processor or a processor that
2391  * is not online.
2392  */
2393 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2394 {
2395 	unsigned long flags;
2396 	struct per_cpu_pageset *pset;
2397 	struct per_cpu_pages *pcp;
2398 
2399 	local_irq_save(flags);
2400 	pset = per_cpu_ptr(zone->pageset, cpu);
2401 
2402 	pcp = &pset->pcp;
2403 	if (pcp->count) {
2404 		free_pcppages_bulk(zone, pcp->count, pcp);
2405 		pcp->count = 0;
2406 	}
2407 	local_irq_restore(flags);
2408 }
2409 
2410 /*
2411  * Drain pcplists of all zones on the indicated processor.
2412  *
2413  * The processor must either be the current processor and the
2414  * thread pinned to the current processor or a processor that
2415  * is not online.
2416  */
2417 static void drain_pages(unsigned int cpu)
2418 {
2419 	struct zone *zone;
2420 
2421 	for_each_populated_zone(zone) {
2422 		drain_pages_zone(cpu, zone);
2423 	}
2424 }
2425 
2426 /*
2427  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2428  *
2429  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2430  * the single zone's pages.
2431  */
2432 void drain_local_pages(struct zone *zone)
2433 {
2434 	int cpu = smp_processor_id();
2435 
2436 	if (zone)
2437 		drain_pages_zone(cpu, zone);
2438 	else
2439 		drain_pages(cpu);
2440 }
2441 
2442 static void drain_local_pages_wq(struct work_struct *work)
2443 {
2444 	/*
2445 	 * drain_all_pages doesn't use proper cpu hotplug protection so
2446 	 * we can race with cpu offline when the WQ can move this from
2447 	 * a cpu pinned worker to an unbound one. We can operate on a different
2448 	 * cpu which is allright but we also have to make sure to not move to
2449 	 * a different one.
2450 	 */
2451 	preempt_disable();
2452 	drain_local_pages(NULL);
2453 	preempt_enable();
2454 }
2455 
2456 /*
2457  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2458  *
2459  * When zone parameter is non-NULL, spill just the single zone's pages.
2460  *
2461  * Note that this can be extremely slow as the draining happens in a workqueue.
2462  */
2463 void drain_all_pages(struct zone *zone)
2464 {
2465 	int cpu;
2466 
2467 	/*
2468 	 * Allocate in the BSS so we wont require allocation in
2469 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2470 	 */
2471 	static cpumask_t cpus_with_pcps;
2472 
2473 	/*
2474 	 * Make sure nobody triggers this path before mm_percpu_wq is fully
2475 	 * initialized.
2476 	 */
2477 	if (WARN_ON_ONCE(!mm_percpu_wq))
2478 		return;
2479 
2480 	/* Workqueues cannot recurse */
2481 	if (current->flags & PF_WQ_WORKER)
2482 		return;
2483 
2484 	/*
2485 	 * Do not drain if one is already in progress unless it's specific to
2486 	 * a zone. Such callers are primarily CMA and memory hotplug and need
2487 	 * the drain to be complete when the call returns.
2488 	 */
2489 	if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2490 		if (!zone)
2491 			return;
2492 		mutex_lock(&pcpu_drain_mutex);
2493 	}
2494 
2495 	/*
2496 	 * We don't care about racing with CPU hotplug event
2497 	 * as offline notification will cause the notified
2498 	 * cpu to drain that CPU pcps and on_each_cpu_mask
2499 	 * disables preemption as part of its processing
2500 	 */
2501 	for_each_online_cpu(cpu) {
2502 		struct per_cpu_pageset *pcp;
2503 		struct zone *z;
2504 		bool has_pcps = false;
2505 
2506 		if (zone) {
2507 			pcp = per_cpu_ptr(zone->pageset, cpu);
2508 			if (pcp->pcp.count)
2509 				has_pcps = true;
2510 		} else {
2511 			for_each_populated_zone(z) {
2512 				pcp = per_cpu_ptr(z->pageset, cpu);
2513 				if (pcp->pcp.count) {
2514 					has_pcps = true;
2515 					break;
2516 				}
2517 			}
2518 		}
2519 
2520 		if (has_pcps)
2521 			cpumask_set_cpu(cpu, &cpus_with_pcps);
2522 		else
2523 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
2524 	}
2525 
2526 	for_each_cpu(cpu, &cpus_with_pcps) {
2527 		struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2528 		INIT_WORK(work, drain_local_pages_wq);
2529 		queue_work_on(cpu, mm_percpu_wq, work);
2530 	}
2531 	for_each_cpu(cpu, &cpus_with_pcps)
2532 		flush_work(per_cpu_ptr(&pcpu_drain, cpu));
2533 
2534 	mutex_unlock(&pcpu_drain_mutex);
2535 }
2536 
2537 #ifdef CONFIG_HIBERNATION
2538 
2539 /*
2540  * Touch the watchdog for every WD_PAGE_COUNT pages.
2541  */
2542 #define WD_PAGE_COUNT	(128*1024)
2543 
2544 void mark_free_pages(struct zone *zone)
2545 {
2546 	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
2547 	unsigned long flags;
2548 	unsigned int order, t;
2549 	struct page *page;
2550 
2551 	if (zone_is_empty(zone))
2552 		return;
2553 
2554 	spin_lock_irqsave(&zone->lock, flags);
2555 
2556 	max_zone_pfn = zone_end_pfn(zone);
2557 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2558 		if (pfn_valid(pfn)) {
2559 			page = pfn_to_page(pfn);
2560 
2561 			if (!--page_count) {
2562 				touch_nmi_watchdog();
2563 				page_count = WD_PAGE_COUNT;
2564 			}
2565 
2566 			if (page_zone(page) != zone)
2567 				continue;
2568 
2569 			if (!swsusp_page_is_forbidden(page))
2570 				swsusp_unset_page_free(page);
2571 		}
2572 
2573 	for_each_migratetype_order(order, t) {
2574 		list_for_each_entry(page,
2575 				&zone->free_area[order].free_list[t], lru) {
2576 			unsigned long i;
2577 
2578 			pfn = page_to_pfn(page);
2579 			for (i = 0; i < (1UL << order); i++) {
2580 				if (!--page_count) {
2581 					touch_nmi_watchdog();
2582 					page_count = WD_PAGE_COUNT;
2583 				}
2584 				swsusp_set_page_free(pfn_to_page(pfn + i));
2585 			}
2586 		}
2587 	}
2588 	spin_unlock_irqrestore(&zone->lock, flags);
2589 }
2590 #endif /* CONFIG_PM */
2591 
2592 /*
2593  * Free a 0-order page
2594  * cold == true ? free a cold page : free a hot page
2595  */
2596 void free_hot_cold_page(struct page *page, bool cold)
2597 {
2598 	struct zone *zone = page_zone(page);
2599 	struct per_cpu_pages *pcp;
2600 	unsigned long flags;
2601 	unsigned long pfn = page_to_pfn(page);
2602 	int migratetype;
2603 
2604 	if (!free_pcp_prepare(page))
2605 		return;
2606 
2607 	migratetype = get_pfnblock_migratetype(page, pfn);
2608 	set_pcppage_migratetype(page, migratetype);
2609 	local_irq_save(flags);
2610 	__count_vm_event(PGFREE);
2611 
2612 	/*
2613 	 * We only track unmovable, reclaimable and movable on pcp lists.
2614 	 * Free ISOLATE pages back to the allocator because they are being
2615 	 * offlined but treat HIGHATOMIC as movable pages so we can get those
2616 	 * areas back if necessary. Otherwise, we may have to free
2617 	 * excessively into the page allocator
2618 	 */
2619 	if (migratetype >= MIGRATE_PCPTYPES) {
2620 		if (unlikely(is_migrate_isolate(migratetype))) {
2621 			free_one_page(zone, page, pfn, 0, migratetype);
2622 			goto out;
2623 		}
2624 		migratetype = MIGRATE_MOVABLE;
2625 	}
2626 
2627 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
2628 	if (!cold)
2629 		list_add(&page->lru, &pcp->lists[migratetype]);
2630 	else
2631 		list_add_tail(&page->lru, &pcp->lists[migratetype]);
2632 	pcp->count++;
2633 	if (pcp->count >= pcp->high) {
2634 		unsigned long batch = READ_ONCE(pcp->batch);
2635 		free_pcppages_bulk(zone, batch, pcp);
2636 		pcp->count -= batch;
2637 	}
2638 
2639 out:
2640 	local_irq_restore(flags);
2641 }
2642 
2643 /*
2644  * Free a list of 0-order pages
2645  */
2646 void free_hot_cold_page_list(struct list_head *list, bool cold)
2647 {
2648 	struct page *page, *next;
2649 
2650 	list_for_each_entry_safe(page, next, list, lru) {
2651 		trace_mm_page_free_batched(page, cold);
2652 		free_hot_cold_page(page, cold);
2653 	}
2654 }
2655 
2656 /*
2657  * split_page takes a non-compound higher-order page, and splits it into
2658  * n (1<<order) sub-pages: page[0..n]
2659  * Each sub-page must be freed individually.
2660  *
2661  * Note: this is probably too low level an operation for use in drivers.
2662  * Please consult with lkml before using this in your driver.
2663  */
2664 void split_page(struct page *page, unsigned int order)
2665 {
2666 	int i;
2667 
2668 	VM_BUG_ON_PAGE(PageCompound(page), page);
2669 	VM_BUG_ON_PAGE(!page_count(page), page);
2670 
2671 #ifdef CONFIG_KMEMCHECK
2672 	/*
2673 	 * Split shadow pages too, because free(page[0]) would
2674 	 * otherwise free the whole shadow.
2675 	 */
2676 	if (kmemcheck_page_is_tracked(page))
2677 		split_page(virt_to_page(page[0].shadow), order);
2678 #endif
2679 
2680 	for (i = 1; i < (1 << order); i++)
2681 		set_page_refcounted(page + i);
2682 	split_page_owner(page, order);
2683 }
2684 EXPORT_SYMBOL_GPL(split_page);
2685 
2686 int __isolate_free_page(struct page *page, unsigned int order)
2687 {
2688 	unsigned long watermark;
2689 	struct zone *zone;
2690 	int mt;
2691 
2692 	BUG_ON(!PageBuddy(page));
2693 
2694 	zone = page_zone(page);
2695 	mt = get_pageblock_migratetype(page);
2696 
2697 	if (!is_migrate_isolate(mt)) {
2698 		/*
2699 		 * Obey watermarks as if the page was being allocated. We can
2700 		 * emulate a high-order watermark check with a raised order-0
2701 		 * watermark, because we already know our high-order page
2702 		 * exists.
2703 		 */
2704 		watermark = min_wmark_pages(zone) + (1UL << order);
2705 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2706 			return 0;
2707 
2708 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
2709 	}
2710 
2711 	/* Remove page from free list */
2712 	list_del(&page->lru);
2713 	zone->free_area[order].nr_free--;
2714 	rmv_page_order(page);
2715 
2716 	/*
2717 	 * Set the pageblock if the isolated page is at least half of a
2718 	 * pageblock
2719 	 */
2720 	if (order >= pageblock_order - 1) {
2721 		struct page *endpage = page + (1 << order) - 1;
2722 		for (; page < endpage; page += pageblock_nr_pages) {
2723 			int mt = get_pageblock_migratetype(page);
2724 			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
2725 			    && !is_migrate_highatomic(mt))
2726 				set_pageblock_migratetype(page,
2727 							  MIGRATE_MOVABLE);
2728 		}
2729 	}
2730 
2731 
2732 	return 1UL << order;
2733 }
2734 
2735 /*
2736  * Update NUMA hit/miss statistics
2737  *
2738  * Must be called with interrupts disabled.
2739  */
2740 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
2741 {
2742 #ifdef CONFIG_NUMA
2743 	enum zone_stat_item local_stat = NUMA_LOCAL;
2744 
2745 	if (z->node != numa_node_id())
2746 		local_stat = NUMA_OTHER;
2747 
2748 	if (z->node == preferred_zone->node)
2749 		__inc_zone_state(z, NUMA_HIT);
2750 	else {
2751 		__inc_zone_state(z, NUMA_MISS);
2752 		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
2753 	}
2754 	__inc_zone_state(z, local_stat);
2755 #endif
2756 }
2757 
2758 /* Remove page from the per-cpu list, caller must protect the list */
2759 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2760 			bool cold, struct per_cpu_pages *pcp,
2761 			struct list_head *list)
2762 {
2763 	struct page *page;
2764 
2765 	do {
2766 		if (list_empty(list)) {
2767 			pcp->count += rmqueue_bulk(zone, 0,
2768 					pcp->batch, list,
2769 					migratetype, cold);
2770 			if (unlikely(list_empty(list)))
2771 				return NULL;
2772 		}
2773 
2774 		if (cold)
2775 			page = list_last_entry(list, struct page, lru);
2776 		else
2777 			page = list_first_entry(list, struct page, lru);
2778 
2779 		list_del(&page->lru);
2780 		pcp->count--;
2781 	} while (check_new_pcp(page));
2782 
2783 	return page;
2784 }
2785 
2786 /* Lock and remove page from the per-cpu list */
2787 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2788 			struct zone *zone, unsigned int order,
2789 			gfp_t gfp_flags, int migratetype)
2790 {
2791 	struct per_cpu_pages *pcp;
2792 	struct list_head *list;
2793 	bool cold = ((gfp_flags & __GFP_COLD) != 0);
2794 	struct page *page;
2795 	unsigned long flags;
2796 
2797 	local_irq_save(flags);
2798 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
2799 	list = &pcp->lists[migratetype];
2800 	page = __rmqueue_pcplist(zone,  migratetype, cold, pcp, list);
2801 	if (page) {
2802 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2803 		zone_statistics(preferred_zone, zone);
2804 	}
2805 	local_irq_restore(flags);
2806 	return page;
2807 }
2808 
2809 /*
2810  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
2811  */
2812 static inline
2813 struct page *rmqueue(struct zone *preferred_zone,
2814 			struct zone *zone, unsigned int order,
2815 			gfp_t gfp_flags, unsigned int alloc_flags,
2816 			int migratetype)
2817 {
2818 	unsigned long flags;
2819 	struct page *page;
2820 
2821 	if (likely(order == 0)) {
2822 		page = rmqueue_pcplist(preferred_zone, zone, order,
2823 				gfp_flags, migratetype);
2824 		goto out;
2825 	}
2826 
2827 	/*
2828 	 * We most definitely don't want callers attempting to
2829 	 * allocate greater than order-1 page units with __GFP_NOFAIL.
2830 	 */
2831 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2832 	spin_lock_irqsave(&zone->lock, flags);
2833 
2834 	do {
2835 		page = NULL;
2836 		if (alloc_flags & ALLOC_HARDER) {
2837 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2838 			if (page)
2839 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
2840 		}
2841 		if (!page)
2842 			page = __rmqueue(zone, order, migratetype);
2843 	} while (page && check_new_pages(page, order));
2844 	spin_unlock(&zone->lock);
2845 	if (!page)
2846 		goto failed;
2847 	__mod_zone_freepage_state(zone, -(1 << order),
2848 				  get_pcppage_migratetype(page));
2849 
2850 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2851 	zone_statistics(preferred_zone, zone);
2852 	local_irq_restore(flags);
2853 
2854 out:
2855 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
2856 	return page;
2857 
2858 failed:
2859 	local_irq_restore(flags);
2860 	return NULL;
2861 }
2862 
2863 #ifdef CONFIG_FAIL_PAGE_ALLOC
2864 
2865 static struct {
2866 	struct fault_attr attr;
2867 
2868 	bool ignore_gfp_highmem;
2869 	bool ignore_gfp_reclaim;
2870 	u32 min_order;
2871 } fail_page_alloc = {
2872 	.attr = FAULT_ATTR_INITIALIZER,
2873 	.ignore_gfp_reclaim = true,
2874 	.ignore_gfp_highmem = true,
2875 	.min_order = 1,
2876 };
2877 
2878 static int __init setup_fail_page_alloc(char *str)
2879 {
2880 	return setup_fault_attr(&fail_page_alloc.attr, str);
2881 }
2882 __setup("fail_page_alloc=", setup_fail_page_alloc);
2883 
2884 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2885 {
2886 	if (order < fail_page_alloc.min_order)
2887 		return false;
2888 	if (gfp_mask & __GFP_NOFAIL)
2889 		return false;
2890 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
2891 		return false;
2892 	if (fail_page_alloc.ignore_gfp_reclaim &&
2893 			(gfp_mask & __GFP_DIRECT_RECLAIM))
2894 		return false;
2895 
2896 	return should_fail(&fail_page_alloc.attr, 1 << order);
2897 }
2898 
2899 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2900 
2901 static int __init fail_page_alloc_debugfs(void)
2902 {
2903 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
2904 	struct dentry *dir;
2905 
2906 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2907 					&fail_page_alloc.attr);
2908 	if (IS_ERR(dir))
2909 		return PTR_ERR(dir);
2910 
2911 	if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
2912 				&fail_page_alloc.ignore_gfp_reclaim))
2913 		goto fail;
2914 	if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2915 				&fail_page_alloc.ignore_gfp_highmem))
2916 		goto fail;
2917 	if (!debugfs_create_u32("min-order", mode, dir,
2918 				&fail_page_alloc.min_order))
2919 		goto fail;
2920 
2921 	return 0;
2922 fail:
2923 	debugfs_remove_recursive(dir);
2924 
2925 	return -ENOMEM;
2926 }
2927 
2928 late_initcall(fail_page_alloc_debugfs);
2929 
2930 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2931 
2932 #else /* CONFIG_FAIL_PAGE_ALLOC */
2933 
2934 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2935 {
2936 	return false;
2937 }
2938 
2939 #endif /* CONFIG_FAIL_PAGE_ALLOC */
2940 
2941 /*
2942  * Return true if free base pages are above 'mark'. For high-order checks it
2943  * will return true of the order-0 watermark is reached and there is at least
2944  * one free page of a suitable size. Checking now avoids taking the zone lock
2945  * to check in the allocation paths if no pages are free.
2946  */
2947 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2948 			 int classzone_idx, unsigned int alloc_flags,
2949 			 long free_pages)
2950 {
2951 	long min = mark;
2952 	int o;
2953 	const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
2954 
2955 	/* free_pages may go negative - that's OK */
2956 	free_pages -= (1 << order) - 1;
2957 
2958 	if (alloc_flags & ALLOC_HIGH)
2959 		min -= min / 2;
2960 
2961 	/*
2962 	 * If the caller does not have rights to ALLOC_HARDER then subtract
2963 	 * the high-atomic reserves. This will over-estimate the size of the
2964 	 * atomic reserve but it avoids a search.
2965 	 */
2966 	if (likely(!alloc_harder))
2967 		free_pages -= z->nr_reserved_highatomic;
2968 	else
2969 		min -= min / 4;
2970 
2971 #ifdef CONFIG_CMA
2972 	/* If allocation can't use CMA areas don't use free CMA pages */
2973 	if (!(alloc_flags & ALLOC_CMA))
2974 		free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
2975 #endif
2976 
2977 	/*
2978 	 * Check watermarks for an order-0 allocation request. If these
2979 	 * are not met, then a high-order request also cannot go ahead
2980 	 * even if a suitable page happened to be free.
2981 	 */
2982 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
2983 		return false;
2984 
2985 	/* If this is an order-0 request then the watermark is fine */
2986 	if (!order)
2987 		return true;
2988 
2989 	/* For a high-order request, check at least one suitable page is free */
2990 	for (o = order; o < MAX_ORDER; o++) {
2991 		struct free_area *area = &z->free_area[o];
2992 		int mt;
2993 
2994 		if (!area->nr_free)
2995 			continue;
2996 
2997 		if (alloc_harder)
2998 			return true;
2999 
3000 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3001 			if (!list_empty(&area->free_list[mt]))
3002 				return true;
3003 		}
3004 
3005 #ifdef CONFIG_CMA
3006 		if ((alloc_flags & ALLOC_CMA) &&
3007 		    !list_empty(&area->free_list[MIGRATE_CMA])) {
3008 			return true;
3009 		}
3010 #endif
3011 	}
3012 	return false;
3013 }
3014 
3015 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3016 		      int classzone_idx, unsigned int alloc_flags)
3017 {
3018 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3019 					zone_page_state(z, NR_FREE_PAGES));
3020 }
3021 
3022 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3023 		unsigned long mark, int classzone_idx, unsigned int alloc_flags)
3024 {
3025 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
3026 	long cma_pages = 0;
3027 
3028 #ifdef CONFIG_CMA
3029 	/* If allocation can't use CMA areas don't use free CMA pages */
3030 	if (!(alloc_flags & ALLOC_CMA))
3031 		cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3032 #endif
3033 
3034 	/*
3035 	 * Fast check for order-0 only. If this fails then the reserves
3036 	 * need to be calculated. There is a corner case where the check
3037 	 * passes but only the high-order atomic reserve are free. If
3038 	 * the caller is !atomic then it'll uselessly search the free
3039 	 * list. That corner case is then slower but it is harmless.
3040 	 */
3041 	if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
3042 		return true;
3043 
3044 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3045 					free_pages);
3046 }
3047 
3048 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3049 			unsigned long mark, int classzone_idx)
3050 {
3051 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
3052 
3053 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3054 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3055 
3056 	return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
3057 								free_pages);
3058 }
3059 
3060 #ifdef CONFIG_NUMA
3061 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3062 {
3063 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3064 				RECLAIM_DISTANCE;
3065 }
3066 #else	/* CONFIG_NUMA */
3067 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3068 {
3069 	return true;
3070 }
3071 #endif	/* CONFIG_NUMA */
3072 
3073 /*
3074  * get_page_from_freelist goes through the zonelist trying to allocate
3075  * a page.
3076  */
3077 static struct page *
3078 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3079 						const struct alloc_context *ac)
3080 {
3081 	struct zoneref *z = ac->preferred_zoneref;
3082 	struct zone *zone;
3083 	struct pglist_data *last_pgdat_dirty_limit = NULL;
3084 
3085 	/*
3086 	 * Scan zonelist, looking for a zone with enough free.
3087 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
3088 	 */
3089 	for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3090 								ac->nodemask) {
3091 		struct page *page;
3092 		unsigned long mark;
3093 
3094 		if (cpusets_enabled() &&
3095 			(alloc_flags & ALLOC_CPUSET) &&
3096 			!__cpuset_zone_allowed(zone, gfp_mask))
3097 				continue;
3098 		/*
3099 		 * When allocating a page cache page for writing, we
3100 		 * want to get it from a node that is within its dirty
3101 		 * limit, such that no single node holds more than its
3102 		 * proportional share of globally allowed dirty pages.
3103 		 * The dirty limits take into account the node's
3104 		 * lowmem reserves and high watermark so that kswapd
3105 		 * should be able to balance it without having to
3106 		 * write pages from its LRU list.
3107 		 *
3108 		 * XXX: For now, allow allocations to potentially
3109 		 * exceed the per-node dirty limit in the slowpath
3110 		 * (spread_dirty_pages unset) before going into reclaim,
3111 		 * which is important when on a NUMA setup the allowed
3112 		 * nodes are together not big enough to reach the
3113 		 * global limit.  The proper fix for these situations
3114 		 * will require awareness of nodes in the
3115 		 * dirty-throttling and the flusher threads.
3116 		 */
3117 		if (ac->spread_dirty_pages) {
3118 			if (last_pgdat_dirty_limit == zone->zone_pgdat)
3119 				continue;
3120 
3121 			if (!node_dirty_ok(zone->zone_pgdat)) {
3122 				last_pgdat_dirty_limit = zone->zone_pgdat;
3123 				continue;
3124 			}
3125 		}
3126 
3127 		mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
3128 		if (!zone_watermark_fast(zone, order, mark,
3129 				       ac_classzone_idx(ac), alloc_flags)) {
3130 			int ret;
3131 
3132 			/* Checked here to keep the fast path fast */
3133 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3134 			if (alloc_flags & ALLOC_NO_WATERMARKS)
3135 				goto try_this_zone;
3136 
3137 			if (node_reclaim_mode == 0 ||
3138 			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3139 				continue;
3140 
3141 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3142 			switch (ret) {
3143 			case NODE_RECLAIM_NOSCAN:
3144 				/* did not scan */
3145 				continue;
3146 			case NODE_RECLAIM_FULL:
3147 				/* scanned but unreclaimable */
3148 				continue;
3149 			default:
3150 				/* did we reclaim enough */
3151 				if (zone_watermark_ok(zone, order, mark,
3152 						ac_classzone_idx(ac), alloc_flags))
3153 					goto try_this_zone;
3154 
3155 				continue;
3156 			}
3157 		}
3158 
3159 try_this_zone:
3160 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3161 				gfp_mask, alloc_flags, ac->migratetype);
3162 		if (page) {
3163 			prep_new_page(page, order, gfp_mask, alloc_flags);
3164 
3165 			/*
3166 			 * If this is a high-order atomic allocation then check
3167 			 * if the pageblock should be reserved for the future
3168 			 */
3169 			if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3170 				reserve_highatomic_pageblock(page, zone, order);
3171 
3172 			return page;
3173 		}
3174 	}
3175 
3176 	return NULL;
3177 }
3178 
3179 /*
3180  * Large machines with many possible nodes should not always dump per-node
3181  * meminfo in irq context.
3182  */
3183 static inline bool should_suppress_show_mem(void)
3184 {
3185 	bool ret = false;
3186 
3187 #if NODES_SHIFT > 8
3188 	ret = in_interrupt();
3189 #endif
3190 	return ret;
3191 }
3192 
3193 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3194 {
3195 	unsigned int filter = SHOW_MEM_FILTER_NODES;
3196 	static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
3197 
3198 	if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
3199 		return;
3200 
3201 	/*
3202 	 * This documents exceptions given to allocations in certain
3203 	 * contexts that are allowed to allocate outside current's set
3204 	 * of allowed nodes.
3205 	 */
3206 	if (!(gfp_mask & __GFP_NOMEMALLOC))
3207 		if (test_thread_flag(TIF_MEMDIE) ||
3208 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
3209 			filter &= ~SHOW_MEM_FILTER_NODES;
3210 	if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3211 		filter &= ~SHOW_MEM_FILTER_NODES;
3212 
3213 	show_mem(filter, nodemask);
3214 }
3215 
3216 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3217 {
3218 	struct va_format vaf;
3219 	va_list args;
3220 	static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3221 				      DEFAULT_RATELIMIT_BURST);
3222 
3223 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3224 		return;
3225 
3226 	pr_warn("%s: ", current->comm);
3227 
3228 	va_start(args, fmt);
3229 	vaf.fmt = fmt;
3230 	vaf.va = &args;
3231 	pr_cont("%pV", &vaf);
3232 	va_end(args);
3233 
3234 	pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask);
3235 	if (nodemask)
3236 		pr_cont("%*pbl\n", nodemask_pr_args(nodemask));
3237 	else
3238 		pr_cont("(null)\n");
3239 
3240 	cpuset_print_current_mems_allowed();
3241 
3242 	dump_stack();
3243 	warn_alloc_show_mem(gfp_mask, nodemask);
3244 }
3245 
3246 static inline struct page *
3247 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3248 			      unsigned int alloc_flags,
3249 			      const struct alloc_context *ac)
3250 {
3251 	struct page *page;
3252 
3253 	page = get_page_from_freelist(gfp_mask, order,
3254 			alloc_flags|ALLOC_CPUSET, ac);
3255 	/*
3256 	 * fallback to ignore cpuset restriction if our nodes
3257 	 * are depleted
3258 	 */
3259 	if (!page)
3260 		page = get_page_from_freelist(gfp_mask, order,
3261 				alloc_flags, ac);
3262 
3263 	return page;
3264 }
3265 
3266 static inline struct page *
3267 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3268 	const struct alloc_context *ac, unsigned long *did_some_progress)
3269 {
3270 	struct oom_control oc = {
3271 		.zonelist = ac->zonelist,
3272 		.nodemask = ac->nodemask,
3273 		.memcg = NULL,
3274 		.gfp_mask = gfp_mask,
3275 		.order = order,
3276 	};
3277 	struct page *page;
3278 
3279 	*did_some_progress = 0;
3280 
3281 	/*
3282 	 * Acquire the oom lock.  If that fails, somebody else is
3283 	 * making progress for us.
3284 	 */
3285 	if (!mutex_trylock(&oom_lock)) {
3286 		*did_some_progress = 1;
3287 		schedule_timeout_uninterruptible(1);
3288 		return NULL;
3289 	}
3290 
3291 	/*
3292 	 * Go through the zonelist yet one more time, keep very high watermark
3293 	 * here, this is only to catch a parallel oom killing, we must fail if
3294 	 * we're still under heavy pressure.
3295 	 */
3296 	page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
3297 					ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3298 	if (page)
3299 		goto out;
3300 
3301 	/* Coredumps can quickly deplete all memory reserves */
3302 	if (current->flags & PF_DUMPCORE)
3303 		goto out;
3304 	/* The OOM killer will not help higher order allocs */
3305 	if (order > PAGE_ALLOC_COSTLY_ORDER)
3306 		goto out;
3307 	/*
3308 	 * We have already exhausted all our reclaim opportunities without any
3309 	 * success so it is time to admit defeat. We will skip the OOM killer
3310 	 * because it is very likely that the caller has a more reasonable
3311 	 * fallback than shooting a random task.
3312 	 */
3313 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
3314 		goto out;
3315 	/* The OOM killer does not needlessly kill tasks for lowmem */
3316 	if (ac->high_zoneidx < ZONE_NORMAL)
3317 		goto out;
3318 	if (pm_suspended_storage())
3319 		goto out;
3320 	/*
3321 	 * XXX: GFP_NOFS allocations should rather fail than rely on
3322 	 * other request to make a forward progress.
3323 	 * We are in an unfortunate situation where out_of_memory cannot
3324 	 * do much for this context but let's try it to at least get
3325 	 * access to memory reserved if the current task is killed (see
3326 	 * out_of_memory). Once filesystems are ready to handle allocation
3327 	 * failures more gracefully we should just bail out here.
3328 	 */
3329 
3330 	/* The OOM killer may not free memory on a specific node */
3331 	if (gfp_mask & __GFP_THISNODE)
3332 		goto out;
3333 
3334 	/* Exhausted what can be done so it's blamo time */
3335 	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3336 		*did_some_progress = 1;
3337 
3338 		/*
3339 		 * Help non-failing allocations by giving them access to memory
3340 		 * reserves
3341 		 */
3342 		if (gfp_mask & __GFP_NOFAIL)
3343 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3344 					ALLOC_NO_WATERMARKS, ac);
3345 	}
3346 out:
3347 	mutex_unlock(&oom_lock);
3348 	return page;
3349 }
3350 
3351 /*
3352  * Maximum number of compaction retries wit a progress before OOM
3353  * killer is consider as the only way to move forward.
3354  */
3355 #define MAX_COMPACT_RETRIES 16
3356 
3357 #ifdef CONFIG_COMPACTION
3358 /* Try memory compaction for high-order allocations before reclaim */
3359 static struct page *
3360 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3361 		unsigned int alloc_flags, const struct alloc_context *ac,
3362 		enum compact_priority prio, enum compact_result *compact_result)
3363 {
3364 	struct page *page;
3365 	unsigned int noreclaim_flag;
3366 
3367 	if (!order)
3368 		return NULL;
3369 
3370 	noreclaim_flag = memalloc_noreclaim_save();
3371 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3372 									prio);
3373 	memalloc_noreclaim_restore(noreclaim_flag);
3374 
3375 	if (*compact_result <= COMPACT_INACTIVE)
3376 		return NULL;
3377 
3378 	/*
3379 	 * At least in one zone compaction wasn't deferred or skipped, so let's
3380 	 * count a compaction stall
3381 	 */
3382 	count_vm_event(COMPACTSTALL);
3383 
3384 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3385 
3386 	if (page) {
3387 		struct zone *zone = page_zone(page);
3388 
3389 		zone->compact_blockskip_flush = false;
3390 		compaction_defer_reset(zone, order, true);
3391 		count_vm_event(COMPACTSUCCESS);
3392 		return page;
3393 	}
3394 
3395 	/*
3396 	 * It's bad if compaction run occurs and fails. The most likely reason
3397 	 * is that pages exist, but not enough to satisfy watermarks.
3398 	 */
3399 	count_vm_event(COMPACTFAIL);
3400 
3401 	cond_resched();
3402 
3403 	return NULL;
3404 }
3405 
3406 static inline bool
3407 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3408 		     enum compact_result compact_result,
3409 		     enum compact_priority *compact_priority,
3410 		     int *compaction_retries)
3411 {
3412 	int max_retries = MAX_COMPACT_RETRIES;
3413 	int min_priority;
3414 	bool ret = false;
3415 	int retries = *compaction_retries;
3416 	enum compact_priority priority = *compact_priority;
3417 
3418 	if (!order)
3419 		return false;
3420 
3421 	if (compaction_made_progress(compact_result))
3422 		(*compaction_retries)++;
3423 
3424 	/*
3425 	 * compaction considers all the zone as desperately out of memory
3426 	 * so it doesn't really make much sense to retry except when the
3427 	 * failure could be caused by insufficient priority
3428 	 */
3429 	if (compaction_failed(compact_result))
3430 		goto check_priority;
3431 
3432 	/*
3433 	 * make sure the compaction wasn't deferred or didn't bail out early
3434 	 * due to locks contention before we declare that we should give up.
3435 	 * But do not retry if the given zonelist is not suitable for
3436 	 * compaction.
3437 	 */
3438 	if (compaction_withdrawn(compact_result)) {
3439 		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3440 		goto out;
3441 	}
3442 
3443 	/*
3444 	 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
3445 	 * costly ones because they are de facto nofail and invoke OOM
3446 	 * killer to move on while costly can fail and users are ready
3447 	 * to cope with that. 1/4 retries is rather arbitrary but we
3448 	 * would need much more detailed feedback from compaction to
3449 	 * make a better decision.
3450 	 */
3451 	if (order > PAGE_ALLOC_COSTLY_ORDER)
3452 		max_retries /= 4;
3453 	if (*compaction_retries <= max_retries) {
3454 		ret = true;
3455 		goto out;
3456 	}
3457 
3458 	/*
3459 	 * Make sure there are attempts at the highest priority if we exhausted
3460 	 * all retries or failed at the lower priorities.
3461 	 */
3462 check_priority:
3463 	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3464 			MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3465 
3466 	if (*compact_priority > min_priority) {
3467 		(*compact_priority)--;
3468 		*compaction_retries = 0;
3469 		ret = true;
3470 	}
3471 out:
3472 	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3473 	return ret;
3474 }
3475 #else
3476 static inline struct page *
3477 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3478 		unsigned int alloc_flags, const struct alloc_context *ac,
3479 		enum compact_priority prio, enum compact_result *compact_result)
3480 {
3481 	*compact_result = COMPACT_SKIPPED;
3482 	return NULL;
3483 }
3484 
3485 static inline bool
3486 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3487 		     enum compact_result compact_result,
3488 		     enum compact_priority *compact_priority,
3489 		     int *compaction_retries)
3490 {
3491 	struct zone *zone;
3492 	struct zoneref *z;
3493 
3494 	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3495 		return false;
3496 
3497 	/*
3498 	 * There are setups with compaction disabled which would prefer to loop
3499 	 * inside the allocator rather than hit the oom killer prematurely.
3500 	 * Let's give them a good hope and keep retrying while the order-0
3501 	 * watermarks are OK.
3502 	 */
3503 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3504 					ac->nodemask) {
3505 		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3506 					ac_classzone_idx(ac), alloc_flags))
3507 			return true;
3508 	}
3509 	return false;
3510 }
3511 #endif /* CONFIG_COMPACTION */
3512 
3513 /* Perform direct synchronous page reclaim */
3514 static int
3515 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3516 					const struct alloc_context *ac)
3517 {
3518 	struct reclaim_state reclaim_state;
3519 	int progress;
3520 	unsigned int noreclaim_flag;
3521 
3522 	cond_resched();
3523 
3524 	/* We now go into synchronous reclaim */
3525 	cpuset_memory_pressure_bump();
3526 	noreclaim_flag = memalloc_noreclaim_save();
3527 	lockdep_set_current_reclaim_state(gfp_mask);
3528 	reclaim_state.reclaimed_slab = 0;
3529 	current->reclaim_state = &reclaim_state;
3530 
3531 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3532 								ac->nodemask);
3533 
3534 	current->reclaim_state = NULL;
3535 	lockdep_clear_current_reclaim_state();
3536 	memalloc_noreclaim_restore(noreclaim_flag);
3537 
3538 	cond_resched();
3539 
3540 	return progress;
3541 }
3542 
3543 /* The really slow allocator path where we enter direct reclaim */
3544 static inline struct page *
3545 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3546 		unsigned int alloc_flags, const struct alloc_context *ac,
3547 		unsigned long *did_some_progress)
3548 {
3549 	struct page *page = NULL;
3550 	bool drained = false;
3551 
3552 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3553 	if (unlikely(!(*did_some_progress)))
3554 		return NULL;
3555 
3556 retry:
3557 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3558 
3559 	/*
3560 	 * If an allocation failed after direct reclaim, it could be because
3561 	 * pages are pinned on the per-cpu lists or in high alloc reserves.
3562 	 * Shrink them them and try again
3563 	 */
3564 	if (!page && !drained) {
3565 		unreserve_highatomic_pageblock(ac, false);
3566 		drain_all_pages(NULL);
3567 		drained = true;
3568 		goto retry;
3569 	}
3570 
3571 	return page;
3572 }
3573 
3574 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3575 {
3576 	struct zoneref *z;
3577 	struct zone *zone;
3578 	pg_data_t *last_pgdat = NULL;
3579 
3580 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3581 					ac->high_zoneidx, ac->nodemask) {
3582 		if (last_pgdat != zone->zone_pgdat)
3583 			wakeup_kswapd(zone, order, ac->high_zoneidx);
3584 		last_pgdat = zone->zone_pgdat;
3585 	}
3586 }
3587 
3588 static inline unsigned int
3589 gfp_to_alloc_flags(gfp_t gfp_mask)
3590 {
3591 	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3592 
3593 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
3594 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
3595 
3596 	/*
3597 	 * The caller may dip into page reserves a bit more if the caller
3598 	 * cannot run direct reclaim, or if the caller has realtime scheduling
3599 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
3600 	 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
3601 	 */
3602 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
3603 
3604 	if (gfp_mask & __GFP_ATOMIC) {
3605 		/*
3606 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3607 		 * if it can't schedule.
3608 		 */
3609 		if (!(gfp_mask & __GFP_NOMEMALLOC))
3610 			alloc_flags |= ALLOC_HARDER;
3611 		/*
3612 		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
3613 		 * comment for __cpuset_node_allowed().
3614 		 */
3615 		alloc_flags &= ~ALLOC_CPUSET;
3616 	} else if (unlikely(rt_task(current)) && !in_interrupt())
3617 		alloc_flags |= ALLOC_HARDER;
3618 
3619 #ifdef CONFIG_CMA
3620 	if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3621 		alloc_flags |= ALLOC_CMA;
3622 #endif
3623 	return alloc_flags;
3624 }
3625 
3626 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3627 {
3628 	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3629 		return false;
3630 
3631 	if (gfp_mask & __GFP_MEMALLOC)
3632 		return true;
3633 	if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3634 		return true;
3635 	if (!in_interrupt() &&
3636 			((current->flags & PF_MEMALLOC) ||
3637 			 unlikely(test_thread_flag(TIF_MEMDIE))))
3638 		return true;
3639 
3640 	return false;
3641 }
3642 
3643 /*
3644  * Checks whether it makes sense to retry the reclaim to make a forward progress
3645  * for the given allocation request.
3646  *
3647  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
3648  * without success, or when we couldn't even meet the watermark if we
3649  * reclaimed all remaining pages on the LRU lists.
3650  *
3651  * Returns true if a retry is viable or false to enter the oom path.
3652  */
3653 static inline bool
3654 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3655 		     struct alloc_context *ac, int alloc_flags,
3656 		     bool did_some_progress, int *no_progress_loops)
3657 {
3658 	struct zone *zone;
3659 	struct zoneref *z;
3660 
3661 	/*
3662 	 * Costly allocations might have made a progress but this doesn't mean
3663 	 * their order will become available due to high fragmentation so
3664 	 * always increment the no progress counter for them
3665 	 */
3666 	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3667 		*no_progress_loops = 0;
3668 	else
3669 		(*no_progress_loops)++;
3670 
3671 	/*
3672 	 * Make sure we converge to OOM if we cannot make any progress
3673 	 * several times in the row.
3674 	 */
3675 	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
3676 		/* Before OOM, exhaust highatomic_reserve */
3677 		return unreserve_highatomic_pageblock(ac, true);
3678 	}
3679 
3680 	/*
3681 	 * Keep reclaiming pages while there is a chance this will lead
3682 	 * somewhere.  If none of the target zones can satisfy our allocation
3683 	 * request even if all reclaimable pages are considered then we are
3684 	 * screwed and have to go OOM.
3685 	 */
3686 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3687 					ac->nodemask) {
3688 		unsigned long available;
3689 		unsigned long reclaimable;
3690 		unsigned long min_wmark = min_wmark_pages(zone);
3691 		bool wmark;
3692 
3693 		available = reclaimable = zone_reclaimable_pages(zone);
3694 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3695 
3696 		/*
3697 		 * Would the allocation succeed if we reclaimed all
3698 		 * reclaimable pages?
3699 		 */
3700 		wmark = __zone_watermark_ok(zone, order, min_wmark,
3701 				ac_classzone_idx(ac), alloc_flags, available);
3702 		trace_reclaim_retry_zone(z, order, reclaimable,
3703 				available, min_wmark, *no_progress_loops, wmark);
3704 		if (wmark) {
3705 			/*
3706 			 * If we didn't make any progress and have a lot of
3707 			 * dirty + writeback pages then we should wait for
3708 			 * an IO to complete to slow down the reclaim and
3709 			 * prevent from pre mature OOM
3710 			 */
3711 			if (!did_some_progress) {
3712 				unsigned long write_pending;
3713 
3714 				write_pending = zone_page_state_snapshot(zone,
3715 							NR_ZONE_WRITE_PENDING);
3716 
3717 				if (2 * write_pending > reclaimable) {
3718 					congestion_wait(BLK_RW_ASYNC, HZ/10);
3719 					return true;
3720 				}
3721 			}
3722 
3723 			/*
3724 			 * Memory allocation/reclaim might be called from a WQ
3725 			 * context and the current implementation of the WQ
3726 			 * concurrency control doesn't recognize that
3727 			 * a particular WQ is congested if the worker thread is
3728 			 * looping without ever sleeping. Therefore we have to
3729 			 * do a short sleep here rather than calling
3730 			 * cond_resched().
3731 			 */
3732 			if (current->flags & PF_WQ_WORKER)
3733 				schedule_timeout_uninterruptible(1);
3734 			else
3735 				cond_resched();
3736 
3737 			return true;
3738 		}
3739 	}
3740 
3741 	return false;
3742 }
3743 
3744 static inline bool
3745 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
3746 {
3747 	/*
3748 	 * It's possible that cpuset's mems_allowed and the nodemask from
3749 	 * mempolicy don't intersect. This should be normally dealt with by
3750 	 * policy_nodemask(), but it's possible to race with cpuset update in
3751 	 * such a way the check therein was true, and then it became false
3752 	 * before we got our cpuset_mems_cookie here.
3753 	 * This assumes that for all allocations, ac->nodemask can come only
3754 	 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
3755 	 * when it does not intersect with the cpuset restrictions) or the
3756 	 * caller can deal with a violated nodemask.
3757 	 */
3758 	if (cpusets_enabled() && ac->nodemask &&
3759 			!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
3760 		ac->nodemask = NULL;
3761 		return true;
3762 	}
3763 
3764 	/*
3765 	 * When updating a task's mems_allowed or mempolicy nodemask, it is
3766 	 * possible to race with parallel threads in such a way that our
3767 	 * allocation can fail while the mask is being updated. If we are about
3768 	 * to fail, check if the cpuset changed during allocation and if so,
3769 	 * retry.
3770 	 */
3771 	if (read_mems_allowed_retry(cpuset_mems_cookie))
3772 		return true;
3773 
3774 	return false;
3775 }
3776 
3777 static inline struct page *
3778 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3779 						struct alloc_context *ac)
3780 {
3781 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3782 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
3783 	struct page *page = NULL;
3784 	unsigned int alloc_flags;
3785 	unsigned long did_some_progress;
3786 	enum compact_priority compact_priority;
3787 	enum compact_result compact_result;
3788 	int compaction_retries;
3789 	int no_progress_loops;
3790 	unsigned long alloc_start = jiffies;
3791 	unsigned int stall_timeout = 10 * HZ;
3792 	unsigned int cpuset_mems_cookie;
3793 
3794 	/*
3795 	 * In the slowpath, we sanity check order to avoid ever trying to
3796 	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3797 	 * be using allocators in order of preference for an area that is
3798 	 * too large.
3799 	 */
3800 	if (order >= MAX_ORDER) {
3801 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
3802 		return NULL;
3803 	}
3804 
3805 	/*
3806 	 * We also sanity check to catch abuse of atomic reserves being used by
3807 	 * callers that are not in atomic context.
3808 	 */
3809 	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3810 				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3811 		gfp_mask &= ~__GFP_ATOMIC;
3812 
3813 retry_cpuset:
3814 	compaction_retries = 0;
3815 	no_progress_loops = 0;
3816 	compact_priority = DEF_COMPACT_PRIORITY;
3817 	cpuset_mems_cookie = read_mems_allowed_begin();
3818 
3819 	/*
3820 	 * The fast path uses conservative alloc_flags to succeed only until
3821 	 * kswapd needs to be woken up, and to avoid the cost of setting up
3822 	 * alloc_flags precisely. So we do that now.
3823 	 */
3824 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
3825 
3826 	/*
3827 	 * We need to recalculate the starting point for the zonelist iterator
3828 	 * because we might have used different nodemask in the fast path, or
3829 	 * there was a cpuset modification and we are retrying - otherwise we
3830 	 * could end up iterating over non-eligible zones endlessly.
3831 	 */
3832 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3833 					ac->high_zoneidx, ac->nodemask);
3834 	if (!ac->preferred_zoneref->zone)
3835 		goto nopage;
3836 
3837 	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3838 		wake_all_kswapds(order, ac);
3839 
3840 	/*
3841 	 * The adjusted alloc_flags might result in immediate success, so try
3842 	 * that first
3843 	 */
3844 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3845 	if (page)
3846 		goto got_pg;
3847 
3848 	/*
3849 	 * For costly allocations, try direct compaction first, as it's likely
3850 	 * that we have enough base pages and don't need to reclaim. For non-
3851 	 * movable high-order allocations, do that as well, as compaction will
3852 	 * try prevent permanent fragmentation by migrating from blocks of the
3853 	 * same migratetype.
3854 	 * Don't try this for allocations that are allowed to ignore
3855 	 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
3856 	 */
3857 	if (can_direct_reclaim &&
3858 			(costly_order ||
3859 			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
3860 			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
3861 		page = __alloc_pages_direct_compact(gfp_mask, order,
3862 						alloc_flags, ac,
3863 						INIT_COMPACT_PRIORITY,
3864 						&compact_result);
3865 		if (page)
3866 			goto got_pg;
3867 
3868 		/*
3869 		 * Checks for costly allocations with __GFP_NORETRY, which
3870 		 * includes THP page fault allocations
3871 		 */
3872 		if (costly_order && (gfp_mask & __GFP_NORETRY)) {
3873 			/*
3874 			 * If compaction is deferred for high-order allocations,
3875 			 * it is because sync compaction recently failed. If
3876 			 * this is the case and the caller requested a THP
3877 			 * allocation, we do not want to heavily disrupt the
3878 			 * system, so we fail the allocation instead of entering
3879 			 * direct reclaim.
3880 			 */
3881 			if (compact_result == COMPACT_DEFERRED)
3882 				goto nopage;
3883 
3884 			/*
3885 			 * Looks like reclaim/compaction is worth trying, but
3886 			 * sync compaction could be very expensive, so keep
3887 			 * using async compaction.
3888 			 */
3889 			compact_priority = INIT_COMPACT_PRIORITY;
3890 		}
3891 	}
3892 
3893 retry:
3894 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
3895 	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3896 		wake_all_kswapds(order, ac);
3897 
3898 	if (gfp_pfmemalloc_allowed(gfp_mask))
3899 		alloc_flags = ALLOC_NO_WATERMARKS;
3900 
3901 	/*
3902 	 * Reset the zonelist iterators if memory policies can be ignored.
3903 	 * These allocations are high priority and system rather than user
3904 	 * orientated.
3905 	 */
3906 	if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) {
3907 		ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3908 		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3909 					ac->high_zoneidx, ac->nodemask);
3910 	}
3911 
3912 	/* Attempt with potentially adjusted zonelist and alloc_flags */
3913 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3914 	if (page)
3915 		goto got_pg;
3916 
3917 	/* Caller is not willing to reclaim, we can't balance anything */
3918 	if (!can_direct_reclaim)
3919 		goto nopage;
3920 
3921 	/* Make sure we know about allocations which stall for too long */
3922 	if (time_after(jiffies, alloc_start + stall_timeout)) {
3923 		warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
3924 			"page allocation stalls for %ums, order:%u",
3925 			jiffies_to_msecs(jiffies-alloc_start), order);
3926 		stall_timeout += 10 * HZ;
3927 	}
3928 
3929 	/* Avoid recursion of direct reclaim */
3930 	if (current->flags & PF_MEMALLOC)
3931 		goto nopage;
3932 
3933 	/* Try direct reclaim and then allocating */
3934 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3935 							&did_some_progress);
3936 	if (page)
3937 		goto got_pg;
3938 
3939 	/* Try direct compaction and then allocating */
3940 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3941 					compact_priority, &compact_result);
3942 	if (page)
3943 		goto got_pg;
3944 
3945 	/* Do not loop if specifically requested */
3946 	if (gfp_mask & __GFP_NORETRY)
3947 		goto nopage;
3948 
3949 	/*
3950 	 * Do not retry costly high order allocations unless they are
3951 	 * __GFP_RETRY_MAYFAIL
3952 	 */
3953 	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
3954 		goto nopage;
3955 
3956 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
3957 				 did_some_progress > 0, &no_progress_loops))
3958 		goto retry;
3959 
3960 	/*
3961 	 * It doesn't make any sense to retry for the compaction if the order-0
3962 	 * reclaim is not able to make any progress because the current
3963 	 * implementation of the compaction depends on the sufficient amount
3964 	 * of free memory (see __compaction_suitable)
3965 	 */
3966 	if (did_some_progress > 0 &&
3967 			should_compact_retry(ac, order, alloc_flags,
3968 				compact_result, &compact_priority,
3969 				&compaction_retries))
3970 		goto retry;
3971 
3972 
3973 	/* Deal with possible cpuset update races before we start OOM killing */
3974 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
3975 		goto retry_cpuset;
3976 
3977 	/* Reclaim has failed us, start killing things */
3978 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3979 	if (page)
3980 		goto got_pg;
3981 
3982 	/* Avoid allocations with no watermarks from looping endlessly */
3983 	if (test_thread_flag(TIF_MEMDIE) &&
3984 	    (alloc_flags == ALLOC_NO_WATERMARKS ||
3985 	     (gfp_mask & __GFP_NOMEMALLOC)))
3986 		goto nopage;
3987 
3988 	/* Retry as long as the OOM killer is making progress */
3989 	if (did_some_progress) {
3990 		no_progress_loops = 0;
3991 		goto retry;
3992 	}
3993 
3994 nopage:
3995 	/* Deal with possible cpuset update races before we fail */
3996 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
3997 		goto retry_cpuset;
3998 
3999 	/*
4000 	 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4001 	 * we always retry
4002 	 */
4003 	if (gfp_mask & __GFP_NOFAIL) {
4004 		/*
4005 		 * All existing users of the __GFP_NOFAIL are blockable, so warn
4006 		 * of any new users that actually require GFP_NOWAIT
4007 		 */
4008 		if (WARN_ON_ONCE(!can_direct_reclaim))
4009 			goto fail;
4010 
4011 		/*
4012 		 * PF_MEMALLOC request from this context is rather bizarre
4013 		 * because we cannot reclaim anything and only can loop waiting
4014 		 * for somebody to do a work for us
4015 		 */
4016 		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4017 
4018 		/*
4019 		 * non failing costly orders are a hard requirement which we
4020 		 * are not prepared for much so let's warn about these users
4021 		 * so that we can identify them and convert them to something
4022 		 * else.
4023 		 */
4024 		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4025 
4026 		/*
4027 		 * Help non-failing allocations by giving them access to memory
4028 		 * reserves but do not use ALLOC_NO_WATERMARKS because this
4029 		 * could deplete whole memory reserves which would just make
4030 		 * the situation worse
4031 		 */
4032 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4033 		if (page)
4034 			goto got_pg;
4035 
4036 		cond_resched();
4037 		goto retry;
4038 	}
4039 fail:
4040 	warn_alloc(gfp_mask, ac->nodemask,
4041 			"page allocation failure: order:%u", order);
4042 got_pg:
4043 	return page;
4044 }
4045 
4046 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4047 		int preferred_nid, nodemask_t *nodemask,
4048 		struct alloc_context *ac, gfp_t *alloc_mask,
4049 		unsigned int *alloc_flags)
4050 {
4051 	ac->high_zoneidx = gfp_zone(gfp_mask);
4052 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4053 	ac->nodemask = nodemask;
4054 	ac->migratetype = gfpflags_to_migratetype(gfp_mask);
4055 
4056 	if (cpusets_enabled()) {
4057 		*alloc_mask |= __GFP_HARDWALL;
4058 		if (!ac->nodemask)
4059 			ac->nodemask = &cpuset_current_mems_allowed;
4060 		else
4061 			*alloc_flags |= ALLOC_CPUSET;
4062 	}
4063 
4064 	lockdep_trace_alloc(gfp_mask);
4065 
4066 	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4067 
4068 	if (should_fail_alloc_page(gfp_mask, order))
4069 		return false;
4070 
4071 	if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4072 		*alloc_flags |= ALLOC_CMA;
4073 
4074 	return true;
4075 }
4076 
4077 /* Determine whether to spread dirty pages and what the first usable zone */
4078 static inline void finalise_ac(gfp_t gfp_mask,
4079 		unsigned int order, struct alloc_context *ac)
4080 {
4081 	/* Dirty zone balancing only done in the fast path */
4082 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4083 
4084 	/*
4085 	 * The preferred zone is used for statistics but crucially it is
4086 	 * also used as the starting point for the zonelist iterator. It
4087 	 * may get reset for allocations that ignore memory policies.
4088 	 */
4089 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4090 					ac->high_zoneidx, ac->nodemask);
4091 }
4092 
4093 /*
4094  * This is the 'heart' of the zoned buddy allocator.
4095  */
4096 struct page *
4097 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4098 							nodemask_t *nodemask)
4099 {
4100 	struct page *page;
4101 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
4102 	gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
4103 	struct alloc_context ac = { };
4104 
4105 	gfp_mask &= gfp_allowed_mask;
4106 	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4107 		return NULL;
4108 
4109 	finalise_ac(gfp_mask, order, &ac);
4110 
4111 	/* First allocation attempt */
4112 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4113 	if (likely(page))
4114 		goto out;
4115 
4116 	/*
4117 	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4118 	 * resp. GFP_NOIO which has to be inherited for all allocation requests
4119 	 * from a particular context which has been marked by
4120 	 * memalloc_no{fs,io}_{save,restore}.
4121 	 */
4122 	alloc_mask = current_gfp_context(gfp_mask);
4123 	ac.spread_dirty_pages = false;
4124 
4125 	/*
4126 	 * Restore the original nodemask if it was potentially replaced with
4127 	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4128 	 */
4129 	if (unlikely(ac.nodemask != nodemask))
4130 		ac.nodemask = nodemask;
4131 
4132 	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
4133 
4134 out:
4135 	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4136 	    unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
4137 		__free_pages(page, order);
4138 		page = NULL;
4139 	}
4140 
4141 	if (kmemcheck_enabled && page)
4142 		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
4143 
4144 	trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4145 
4146 	return page;
4147 }
4148 EXPORT_SYMBOL(__alloc_pages_nodemask);
4149 
4150 /*
4151  * Common helper functions.
4152  */
4153 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4154 {
4155 	struct page *page;
4156 
4157 	/*
4158 	 * __get_free_pages() returns a 32-bit address, which cannot represent
4159 	 * a highmem page
4160 	 */
4161 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
4162 
4163 	page = alloc_pages(gfp_mask, order);
4164 	if (!page)
4165 		return 0;
4166 	return (unsigned long) page_address(page);
4167 }
4168 EXPORT_SYMBOL(__get_free_pages);
4169 
4170 unsigned long get_zeroed_page(gfp_t gfp_mask)
4171 {
4172 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
4173 }
4174 EXPORT_SYMBOL(get_zeroed_page);
4175 
4176 void __free_pages(struct page *page, unsigned int order)
4177 {
4178 	if (put_page_testzero(page)) {
4179 		if (order == 0)
4180 			free_hot_cold_page(page, false);
4181 		else
4182 			__free_pages_ok(page, order);
4183 	}
4184 }
4185 
4186 EXPORT_SYMBOL(__free_pages);
4187 
4188 void free_pages(unsigned long addr, unsigned int order)
4189 {
4190 	if (addr != 0) {
4191 		VM_BUG_ON(!virt_addr_valid((void *)addr));
4192 		__free_pages(virt_to_page((void *)addr), order);
4193 	}
4194 }
4195 
4196 EXPORT_SYMBOL(free_pages);
4197 
4198 /*
4199  * Page Fragment:
4200  *  An arbitrary-length arbitrary-offset area of memory which resides
4201  *  within a 0 or higher order page.  Multiple fragments within that page
4202  *  are individually refcounted, in the page's reference counter.
4203  *
4204  * The page_frag functions below provide a simple allocation framework for
4205  * page fragments.  This is used by the network stack and network device
4206  * drivers to provide a backing region of memory for use as either an
4207  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4208  */
4209 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4210 					     gfp_t gfp_mask)
4211 {
4212 	struct page *page = NULL;
4213 	gfp_t gfp = gfp_mask;
4214 
4215 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4216 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4217 		    __GFP_NOMEMALLOC;
4218 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4219 				PAGE_FRAG_CACHE_MAX_ORDER);
4220 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4221 #endif
4222 	if (unlikely(!page))
4223 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4224 
4225 	nc->va = page ? page_address(page) : NULL;
4226 
4227 	return page;
4228 }
4229 
4230 void __page_frag_cache_drain(struct page *page, unsigned int count)
4231 {
4232 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4233 
4234 	if (page_ref_sub_and_test(page, count)) {
4235 		unsigned int order = compound_order(page);
4236 
4237 		if (order == 0)
4238 			free_hot_cold_page(page, false);
4239 		else
4240 			__free_pages_ok(page, order);
4241 	}
4242 }
4243 EXPORT_SYMBOL(__page_frag_cache_drain);
4244 
4245 void *page_frag_alloc(struct page_frag_cache *nc,
4246 		      unsigned int fragsz, gfp_t gfp_mask)
4247 {
4248 	unsigned int size = PAGE_SIZE;
4249 	struct page *page;
4250 	int offset;
4251 
4252 	if (unlikely(!nc->va)) {
4253 refill:
4254 		page = __page_frag_cache_refill(nc, gfp_mask);
4255 		if (!page)
4256 			return NULL;
4257 
4258 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4259 		/* if size can vary use size else just use PAGE_SIZE */
4260 		size = nc->size;
4261 #endif
4262 		/* Even if we own the page, we do not use atomic_set().
4263 		 * This would break get_page_unless_zero() users.
4264 		 */
4265 		page_ref_add(page, size - 1);
4266 
4267 		/* reset page count bias and offset to start of new frag */
4268 		nc->pfmemalloc = page_is_pfmemalloc(page);
4269 		nc->pagecnt_bias = size;
4270 		nc->offset = size;
4271 	}
4272 
4273 	offset = nc->offset - fragsz;
4274 	if (unlikely(offset < 0)) {
4275 		page = virt_to_page(nc->va);
4276 
4277 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4278 			goto refill;
4279 
4280 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4281 		/* if size can vary use size else just use PAGE_SIZE */
4282 		size = nc->size;
4283 #endif
4284 		/* OK, page count is 0, we can safely set it */
4285 		set_page_count(page, size);
4286 
4287 		/* reset page count bias and offset to start of new frag */
4288 		nc->pagecnt_bias = size;
4289 		offset = size - fragsz;
4290 	}
4291 
4292 	nc->pagecnt_bias--;
4293 	nc->offset = offset;
4294 
4295 	return nc->va + offset;
4296 }
4297 EXPORT_SYMBOL(page_frag_alloc);
4298 
4299 /*
4300  * Frees a page fragment allocated out of either a compound or order 0 page.
4301  */
4302 void page_frag_free(void *addr)
4303 {
4304 	struct page *page = virt_to_head_page(addr);
4305 
4306 	if (unlikely(put_page_testzero(page)))
4307 		__free_pages_ok(page, compound_order(page));
4308 }
4309 EXPORT_SYMBOL(page_frag_free);
4310 
4311 static void *make_alloc_exact(unsigned long addr, unsigned int order,
4312 		size_t size)
4313 {
4314 	if (addr) {
4315 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
4316 		unsigned long used = addr + PAGE_ALIGN(size);
4317 
4318 		split_page(virt_to_page((void *)addr), order);
4319 		while (used < alloc_end) {
4320 			free_page(used);
4321 			used += PAGE_SIZE;
4322 		}
4323 	}
4324 	return (void *)addr;
4325 }
4326 
4327 /**
4328  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4329  * @size: the number of bytes to allocate
4330  * @gfp_mask: GFP flags for the allocation
4331  *
4332  * This function is similar to alloc_pages(), except that it allocates the
4333  * minimum number of pages to satisfy the request.  alloc_pages() can only
4334  * allocate memory in power-of-two pages.
4335  *
4336  * This function is also limited by MAX_ORDER.
4337  *
4338  * Memory allocated by this function must be released by free_pages_exact().
4339  */
4340 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4341 {
4342 	unsigned int order = get_order(size);
4343 	unsigned long addr;
4344 
4345 	addr = __get_free_pages(gfp_mask, order);
4346 	return make_alloc_exact(addr, order, size);
4347 }
4348 EXPORT_SYMBOL(alloc_pages_exact);
4349 
4350 /**
4351  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4352  *			   pages on a node.
4353  * @nid: the preferred node ID where memory should be allocated
4354  * @size: the number of bytes to allocate
4355  * @gfp_mask: GFP flags for the allocation
4356  *
4357  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4358  * back.
4359  */
4360 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4361 {
4362 	unsigned int order = get_order(size);
4363 	struct page *p = alloc_pages_node(nid, gfp_mask, order);
4364 	if (!p)
4365 		return NULL;
4366 	return make_alloc_exact((unsigned long)page_address(p), order, size);
4367 }
4368 
4369 /**
4370  * free_pages_exact - release memory allocated via alloc_pages_exact()
4371  * @virt: the value returned by alloc_pages_exact.
4372  * @size: size of allocation, same value as passed to alloc_pages_exact().
4373  *
4374  * Release the memory allocated by a previous call to alloc_pages_exact.
4375  */
4376 void free_pages_exact(void *virt, size_t size)
4377 {
4378 	unsigned long addr = (unsigned long)virt;
4379 	unsigned long end = addr + PAGE_ALIGN(size);
4380 
4381 	while (addr < end) {
4382 		free_page(addr);
4383 		addr += PAGE_SIZE;
4384 	}
4385 }
4386 EXPORT_SYMBOL(free_pages_exact);
4387 
4388 /**
4389  * nr_free_zone_pages - count number of pages beyond high watermark
4390  * @offset: The zone index of the highest zone
4391  *
4392  * nr_free_zone_pages() counts the number of counts pages which are beyond the
4393  * high watermark within all zones at or below a given zone index.  For each
4394  * zone, the number of pages is calculated as:
4395  *
4396  *     nr_free_zone_pages = managed_pages - high_pages
4397  */
4398 static unsigned long nr_free_zone_pages(int offset)
4399 {
4400 	struct zoneref *z;
4401 	struct zone *zone;
4402 
4403 	/* Just pick one node, since fallback list is circular */
4404 	unsigned long sum = 0;
4405 
4406 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
4407 
4408 	for_each_zone_zonelist(zone, z, zonelist, offset) {
4409 		unsigned long size = zone->managed_pages;
4410 		unsigned long high = high_wmark_pages(zone);
4411 		if (size > high)
4412 			sum += size - high;
4413 	}
4414 
4415 	return sum;
4416 }
4417 
4418 /**
4419  * nr_free_buffer_pages - count number of pages beyond high watermark
4420  *
4421  * nr_free_buffer_pages() counts the number of pages which are beyond the high
4422  * watermark within ZONE_DMA and ZONE_NORMAL.
4423  */
4424 unsigned long nr_free_buffer_pages(void)
4425 {
4426 	return nr_free_zone_pages(gfp_zone(GFP_USER));
4427 }
4428 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4429 
4430 /**
4431  * nr_free_pagecache_pages - count number of pages beyond high watermark
4432  *
4433  * nr_free_pagecache_pages() counts the number of pages which are beyond the
4434  * high watermark within all zones.
4435  */
4436 unsigned long nr_free_pagecache_pages(void)
4437 {
4438 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
4439 }
4440 
4441 static inline void show_node(struct zone *zone)
4442 {
4443 	if (IS_ENABLED(CONFIG_NUMA))
4444 		printk("Node %d ", zone_to_nid(zone));
4445 }
4446 
4447 long si_mem_available(void)
4448 {
4449 	long available;
4450 	unsigned long pagecache;
4451 	unsigned long wmark_low = 0;
4452 	unsigned long pages[NR_LRU_LISTS];
4453 	struct zone *zone;
4454 	int lru;
4455 
4456 	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4457 		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4458 
4459 	for_each_zone(zone)
4460 		wmark_low += zone->watermark[WMARK_LOW];
4461 
4462 	/*
4463 	 * Estimate the amount of memory available for userspace allocations,
4464 	 * without causing swapping.
4465 	 */
4466 	available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
4467 
4468 	/*
4469 	 * Not all the page cache can be freed, otherwise the system will
4470 	 * start swapping. Assume at least half of the page cache, or the
4471 	 * low watermark worth of cache, needs to stay.
4472 	 */
4473 	pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4474 	pagecache -= min(pagecache / 2, wmark_low);
4475 	available += pagecache;
4476 
4477 	/*
4478 	 * Part of the reclaimable slab consists of items that are in use,
4479 	 * and cannot be freed. Cap this estimate at the low watermark.
4480 	 */
4481 	available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
4482 		     min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
4483 			 wmark_low);
4484 
4485 	if (available < 0)
4486 		available = 0;
4487 	return available;
4488 }
4489 EXPORT_SYMBOL_GPL(si_mem_available);
4490 
4491 void si_meminfo(struct sysinfo *val)
4492 {
4493 	val->totalram = totalram_pages;
4494 	val->sharedram = global_node_page_state(NR_SHMEM);
4495 	val->freeram = global_page_state(NR_FREE_PAGES);
4496 	val->bufferram = nr_blockdev_pages();
4497 	val->totalhigh = totalhigh_pages;
4498 	val->freehigh = nr_free_highpages();
4499 	val->mem_unit = PAGE_SIZE;
4500 }
4501 
4502 EXPORT_SYMBOL(si_meminfo);
4503 
4504 #ifdef CONFIG_NUMA
4505 void si_meminfo_node(struct sysinfo *val, int nid)
4506 {
4507 	int zone_type;		/* needs to be signed */
4508 	unsigned long managed_pages = 0;
4509 	unsigned long managed_highpages = 0;
4510 	unsigned long free_highpages = 0;
4511 	pg_data_t *pgdat = NODE_DATA(nid);
4512 
4513 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4514 		managed_pages += pgdat->node_zones[zone_type].managed_pages;
4515 	val->totalram = managed_pages;
4516 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
4517 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
4518 #ifdef CONFIG_HIGHMEM
4519 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4520 		struct zone *zone = &pgdat->node_zones[zone_type];
4521 
4522 		if (is_highmem(zone)) {
4523 			managed_highpages += zone->managed_pages;
4524 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4525 		}
4526 	}
4527 	val->totalhigh = managed_highpages;
4528 	val->freehigh = free_highpages;
4529 #else
4530 	val->totalhigh = managed_highpages;
4531 	val->freehigh = free_highpages;
4532 #endif
4533 	val->mem_unit = PAGE_SIZE;
4534 }
4535 #endif
4536 
4537 /*
4538  * Determine whether the node should be displayed or not, depending on whether
4539  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
4540  */
4541 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
4542 {
4543 	if (!(flags & SHOW_MEM_FILTER_NODES))
4544 		return false;
4545 
4546 	/*
4547 	 * no node mask - aka implicit memory numa policy. Do not bother with
4548 	 * the synchronization - read_mems_allowed_begin - because we do not
4549 	 * have to be precise here.
4550 	 */
4551 	if (!nodemask)
4552 		nodemask = &cpuset_current_mems_allowed;
4553 
4554 	return !node_isset(nid, *nodemask);
4555 }
4556 
4557 #define K(x) ((x) << (PAGE_SHIFT-10))
4558 
4559 static void show_migration_types(unsigned char type)
4560 {
4561 	static const char types[MIGRATE_TYPES] = {
4562 		[MIGRATE_UNMOVABLE]	= 'U',
4563 		[MIGRATE_MOVABLE]	= 'M',
4564 		[MIGRATE_RECLAIMABLE]	= 'E',
4565 		[MIGRATE_HIGHATOMIC]	= 'H',
4566 #ifdef CONFIG_CMA
4567 		[MIGRATE_CMA]		= 'C',
4568 #endif
4569 #ifdef CONFIG_MEMORY_ISOLATION
4570 		[MIGRATE_ISOLATE]	= 'I',
4571 #endif
4572 	};
4573 	char tmp[MIGRATE_TYPES + 1];
4574 	char *p = tmp;
4575 	int i;
4576 
4577 	for (i = 0; i < MIGRATE_TYPES; i++) {
4578 		if (type & (1 << i))
4579 			*p++ = types[i];
4580 	}
4581 
4582 	*p = '\0';
4583 	printk(KERN_CONT "(%s) ", tmp);
4584 }
4585 
4586 /*
4587  * Show free area list (used inside shift_scroll-lock stuff)
4588  * We also calculate the percentage fragmentation. We do this by counting the
4589  * memory on each free list with the exception of the first item on the list.
4590  *
4591  * Bits in @filter:
4592  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
4593  *   cpuset.
4594  */
4595 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4596 {
4597 	unsigned long free_pcp = 0;
4598 	int cpu;
4599 	struct zone *zone;
4600 	pg_data_t *pgdat;
4601 
4602 	for_each_populated_zone(zone) {
4603 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4604 			continue;
4605 
4606 		for_each_online_cpu(cpu)
4607 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4608 	}
4609 
4610 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4611 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
4612 		" unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4613 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4614 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
4615 		" free:%lu free_pcp:%lu free_cma:%lu\n",
4616 		global_node_page_state(NR_ACTIVE_ANON),
4617 		global_node_page_state(NR_INACTIVE_ANON),
4618 		global_node_page_state(NR_ISOLATED_ANON),
4619 		global_node_page_state(NR_ACTIVE_FILE),
4620 		global_node_page_state(NR_INACTIVE_FILE),
4621 		global_node_page_state(NR_ISOLATED_FILE),
4622 		global_node_page_state(NR_UNEVICTABLE),
4623 		global_node_page_state(NR_FILE_DIRTY),
4624 		global_node_page_state(NR_WRITEBACK),
4625 		global_node_page_state(NR_UNSTABLE_NFS),
4626 		global_node_page_state(NR_SLAB_RECLAIMABLE),
4627 		global_node_page_state(NR_SLAB_UNRECLAIMABLE),
4628 		global_node_page_state(NR_FILE_MAPPED),
4629 		global_node_page_state(NR_SHMEM),
4630 		global_page_state(NR_PAGETABLE),
4631 		global_page_state(NR_BOUNCE),
4632 		global_page_state(NR_FREE_PAGES),
4633 		free_pcp,
4634 		global_page_state(NR_FREE_CMA_PAGES));
4635 
4636 	for_each_online_pgdat(pgdat) {
4637 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
4638 			continue;
4639 
4640 		printk("Node %d"
4641 			" active_anon:%lukB"
4642 			" inactive_anon:%lukB"
4643 			" active_file:%lukB"
4644 			" inactive_file:%lukB"
4645 			" unevictable:%lukB"
4646 			" isolated(anon):%lukB"
4647 			" isolated(file):%lukB"
4648 			" mapped:%lukB"
4649 			" dirty:%lukB"
4650 			" writeback:%lukB"
4651 			" shmem:%lukB"
4652 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4653 			" shmem_thp: %lukB"
4654 			" shmem_pmdmapped: %lukB"
4655 			" anon_thp: %lukB"
4656 #endif
4657 			" writeback_tmp:%lukB"
4658 			" unstable:%lukB"
4659 			" all_unreclaimable? %s"
4660 			"\n",
4661 			pgdat->node_id,
4662 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4663 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4664 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4665 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4666 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
4667 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4668 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
4669 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
4670 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
4671 			K(node_page_state(pgdat, NR_WRITEBACK)),
4672 			K(node_page_state(pgdat, NR_SHMEM)),
4673 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4674 			K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4675 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4676 					* HPAGE_PMD_NR),
4677 			K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4678 #endif
4679 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4680 			K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4681 			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
4682 				"yes" : "no");
4683 	}
4684 
4685 	for_each_populated_zone(zone) {
4686 		int i;
4687 
4688 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4689 			continue;
4690 
4691 		free_pcp = 0;
4692 		for_each_online_cpu(cpu)
4693 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4694 
4695 		show_node(zone);
4696 		printk(KERN_CONT
4697 			"%s"
4698 			" free:%lukB"
4699 			" min:%lukB"
4700 			" low:%lukB"
4701 			" high:%lukB"
4702 			" active_anon:%lukB"
4703 			" inactive_anon:%lukB"
4704 			" active_file:%lukB"
4705 			" inactive_file:%lukB"
4706 			" unevictable:%lukB"
4707 			" writepending:%lukB"
4708 			" present:%lukB"
4709 			" managed:%lukB"
4710 			" mlocked:%lukB"
4711 			" kernel_stack:%lukB"
4712 			" pagetables:%lukB"
4713 			" bounce:%lukB"
4714 			" free_pcp:%lukB"
4715 			" local_pcp:%ukB"
4716 			" free_cma:%lukB"
4717 			"\n",
4718 			zone->name,
4719 			K(zone_page_state(zone, NR_FREE_PAGES)),
4720 			K(min_wmark_pages(zone)),
4721 			K(low_wmark_pages(zone)),
4722 			K(high_wmark_pages(zone)),
4723 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4724 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4725 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4726 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4727 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
4728 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
4729 			K(zone->present_pages),
4730 			K(zone->managed_pages),
4731 			K(zone_page_state(zone, NR_MLOCK)),
4732 			zone_page_state(zone, NR_KERNEL_STACK_KB),
4733 			K(zone_page_state(zone, NR_PAGETABLE)),
4734 			K(zone_page_state(zone, NR_BOUNCE)),
4735 			K(free_pcp),
4736 			K(this_cpu_read(zone->pageset->pcp.count)),
4737 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
4738 		printk("lowmem_reserve[]:");
4739 		for (i = 0; i < MAX_NR_ZONES; i++)
4740 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4741 		printk(KERN_CONT "\n");
4742 	}
4743 
4744 	for_each_populated_zone(zone) {
4745 		unsigned int order;
4746 		unsigned long nr[MAX_ORDER], flags, total = 0;
4747 		unsigned char types[MAX_ORDER];
4748 
4749 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4750 			continue;
4751 		show_node(zone);
4752 		printk(KERN_CONT "%s: ", zone->name);
4753 
4754 		spin_lock_irqsave(&zone->lock, flags);
4755 		for (order = 0; order < MAX_ORDER; order++) {
4756 			struct free_area *area = &zone->free_area[order];
4757 			int type;
4758 
4759 			nr[order] = area->nr_free;
4760 			total += nr[order] << order;
4761 
4762 			types[order] = 0;
4763 			for (type = 0; type < MIGRATE_TYPES; type++) {
4764 				if (!list_empty(&area->free_list[type]))
4765 					types[order] |= 1 << type;
4766 			}
4767 		}
4768 		spin_unlock_irqrestore(&zone->lock, flags);
4769 		for (order = 0; order < MAX_ORDER; order++) {
4770 			printk(KERN_CONT "%lu*%lukB ",
4771 			       nr[order], K(1UL) << order);
4772 			if (nr[order])
4773 				show_migration_types(types[order]);
4774 		}
4775 		printk(KERN_CONT "= %lukB\n", K(total));
4776 	}
4777 
4778 	hugetlb_show_meminfo();
4779 
4780 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
4781 
4782 	show_swap_cache_info();
4783 }
4784 
4785 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4786 {
4787 	zoneref->zone = zone;
4788 	zoneref->zone_idx = zone_idx(zone);
4789 }
4790 
4791 /*
4792  * Builds allocation fallback zone lists.
4793  *
4794  * Add all populated zones of a node to the zonelist.
4795  */
4796 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
4797 				int nr_zones)
4798 {
4799 	struct zone *zone;
4800 	enum zone_type zone_type = MAX_NR_ZONES;
4801 
4802 	do {
4803 		zone_type--;
4804 		zone = pgdat->node_zones + zone_type;
4805 		if (managed_zone(zone)) {
4806 			zoneref_set_zone(zone,
4807 				&zonelist->_zonerefs[nr_zones++]);
4808 			check_highest_zone(zone_type);
4809 		}
4810 	} while (zone_type);
4811 
4812 	return nr_zones;
4813 }
4814 
4815 
4816 /*
4817  *  zonelist_order:
4818  *  0 = automatic detection of better ordering.
4819  *  1 = order by ([node] distance, -zonetype)
4820  *  2 = order by (-zonetype, [node] distance)
4821  *
4822  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
4823  *  the same zonelist. So only NUMA can configure this param.
4824  */
4825 #define ZONELIST_ORDER_DEFAULT  0
4826 #define ZONELIST_ORDER_NODE     1
4827 #define ZONELIST_ORDER_ZONE     2
4828 
4829 /* zonelist order in the kernel.
4830  * set_zonelist_order() will set this to NODE or ZONE.
4831  */
4832 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
4833 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
4834 
4835 
4836 #ifdef CONFIG_NUMA
4837 /* The value user specified ....changed by config */
4838 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4839 /* string for sysctl */
4840 #define NUMA_ZONELIST_ORDER_LEN	16
4841 char numa_zonelist_order[16] = "default";
4842 
4843 /*
4844  * interface for configure zonelist ordering.
4845  * command line option "numa_zonelist_order"
4846  *	= "[dD]efault	- default, automatic configuration.
4847  *	= "[nN]ode 	- order by node locality, then by zone within node
4848  *	= "[zZ]one      - order by zone, then by locality within zone
4849  */
4850 
4851 static int __parse_numa_zonelist_order(char *s)
4852 {
4853 	if (*s == 'd' || *s == 'D') {
4854 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4855 	} else if (*s == 'n' || *s == 'N') {
4856 		user_zonelist_order = ZONELIST_ORDER_NODE;
4857 	} else if (*s == 'z' || *s == 'Z') {
4858 		user_zonelist_order = ZONELIST_ORDER_ZONE;
4859 	} else {
4860 		pr_warn("Ignoring invalid numa_zonelist_order value:  %s\n", s);
4861 		return -EINVAL;
4862 	}
4863 	return 0;
4864 }
4865 
4866 static __init int setup_numa_zonelist_order(char *s)
4867 {
4868 	int ret;
4869 
4870 	if (!s)
4871 		return 0;
4872 
4873 	ret = __parse_numa_zonelist_order(s);
4874 	if (ret == 0)
4875 		strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
4876 
4877 	return ret;
4878 }
4879 early_param("numa_zonelist_order", setup_numa_zonelist_order);
4880 
4881 /*
4882  * sysctl handler for numa_zonelist_order
4883  */
4884 int numa_zonelist_order_handler(struct ctl_table *table, int write,
4885 		void __user *buffer, size_t *length,
4886 		loff_t *ppos)
4887 {
4888 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
4889 	int ret;
4890 	static DEFINE_MUTEX(zl_order_mutex);
4891 
4892 	mutex_lock(&zl_order_mutex);
4893 	if (write) {
4894 		if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4895 			ret = -EINVAL;
4896 			goto out;
4897 		}
4898 		strcpy(saved_string, (char *)table->data);
4899 	}
4900 	ret = proc_dostring(table, write, buffer, length, ppos);
4901 	if (ret)
4902 		goto out;
4903 	if (write) {
4904 		int oldval = user_zonelist_order;
4905 
4906 		ret = __parse_numa_zonelist_order((char *)table->data);
4907 		if (ret) {
4908 			/*
4909 			 * bogus value.  restore saved string
4910 			 */
4911 			strncpy((char *)table->data, saved_string,
4912 				NUMA_ZONELIST_ORDER_LEN);
4913 			user_zonelist_order = oldval;
4914 		} else if (oldval != user_zonelist_order) {
4915 			mem_hotplug_begin();
4916 			mutex_lock(&zonelists_mutex);
4917 			build_all_zonelists(NULL, NULL);
4918 			mutex_unlock(&zonelists_mutex);
4919 			mem_hotplug_done();
4920 		}
4921 	}
4922 out:
4923 	mutex_unlock(&zl_order_mutex);
4924 	return ret;
4925 }
4926 
4927 
4928 #define MAX_NODE_LOAD (nr_online_nodes)
4929 static int node_load[MAX_NUMNODES];
4930 
4931 /**
4932  * find_next_best_node - find the next node that should appear in a given node's fallback list
4933  * @node: node whose fallback list we're appending
4934  * @used_node_mask: nodemask_t of already used nodes
4935  *
4936  * We use a number of factors to determine which is the next node that should
4937  * appear on a given node's fallback list.  The node should not have appeared
4938  * already in @node's fallback list, and it should be the next closest node
4939  * according to the distance array (which contains arbitrary distance values
4940  * from each node to each node in the system), and should also prefer nodes
4941  * with no CPUs, since presumably they'll have very little allocation pressure
4942  * on them otherwise.
4943  * It returns -1 if no node is found.
4944  */
4945 static int find_next_best_node(int node, nodemask_t *used_node_mask)
4946 {
4947 	int n, val;
4948 	int min_val = INT_MAX;
4949 	int best_node = NUMA_NO_NODE;
4950 	const struct cpumask *tmp = cpumask_of_node(0);
4951 
4952 	/* Use the local node if we haven't already */
4953 	if (!node_isset(node, *used_node_mask)) {
4954 		node_set(node, *used_node_mask);
4955 		return node;
4956 	}
4957 
4958 	for_each_node_state(n, N_MEMORY) {
4959 
4960 		/* Don't want a node to appear more than once */
4961 		if (node_isset(n, *used_node_mask))
4962 			continue;
4963 
4964 		/* Use the distance array to find the distance */
4965 		val = node_distance(node, n);
4966 
4967 		/* Penalize nodes under us ("prefer the next node") */
4968 		val += (n < node);
4969 
4970 		/* Give preference to headless and unused nodes */
4971 		tmp = cpumask_of_node(n);
4972 		if (!cpumask_empty(tmp))
4973 			val += PENALTY_FOR_NODE_WITH_CPUS;
4974 
4975 		/* Slight preference for less loaded node */
4976 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4977 		val += node_load[n];
4978 
4979 		if (val < min_val) {
4980 			min_val = val;
4981 			best_node = n;
4982 		}
4983 	}
4984 
4985 	if (best_node >= 0)
4986 		node_set(best_node, *used_node_mask);
4987 
4988 	return best_node;
4989 }
4990 
4991 
4992 /*
4993  * Build zonelists ordered by node and zones within node.
4994  * This results in maximum locality--normal zone overflows into local
4995  * DMA zone, if any--but risks exhausting DMA zone.
4996  */
4997 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
4998 {
4999 	int j;
5000 	struct zonelist *zonelist;
5001 
5002 	zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
5003 	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
5004 		;
5005 	j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5006 	zonelist->_zonerefs[j].zone = NULL;
5007 	zonelist->_zonerefs[j].zone_idx = 0;
5008 }
5009 
5010 /*
5011  * Build gfp_thisnode zonelists
5012  */
5013 static void build_thisnode_zonelists(pg_data_t *pgdat)
5014 {
5015 	int j;
5016 	struct zonelist *zonelist;
5017 
5018 	zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
5019 	j = build_zonelists_node(pgdat, zonelist, 0);
5020 	zonelist->_zonerefs[j].zone = NULL;
5021 	zonelist->_zonerefs[j].zone_idx = 0;
5022 }
5023 
5024 /*
5025  * Build zonelists ordered by zone and nodes within zones.
5026  * This results in conserving DMA zone[s] until all Normal memory is
5027  * exhausted, but results in overflowing to remote node while memory
5028  * may still exist in local DMA zone.
5029  */
5030 static int node_order[MAX_NUMNODES];
5031 
5032 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
5033 {
5034 	int pos, j, node;
5035 	int zone_type;		/* needs to be signed */
5036 	struct zone *z;
5037 	struct zonelist *zonelist;
5038 
5039 	zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
5040 	pos = 0;
5041 	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
5042 		for (j = 0; j < nr_nodes; j++) {
5043 			node = node_order[j];
5044 			z = &NODE_DATA(node)->node_zones[zone_type];
5045 			if (managed_zone(z)) {
5046 				zoneref_set_zone(z,
5047 					&zonelist->_zonerefs[pos++]);
5048 				check_highest_zone(zone_type);
5049 			}
5050 		}
5051 	}
5052 	zonelist->_zonerefs[pos].zone = NULL;
5053 	zonelist->_zonerefs[pos].zone_idx = 0;
5054 }
5055 
5056 #if defined(CONFIG_64BIT)
5057 /*
5058  * Devices that require DMA32/DMA are relatively rare and do not justify a
5059  * penalty to every machine in case the specialised case applies. Default
5060  * to Node-ordering on 64-bit NUMA machines
5061  */
5062 static int default_zonelist_order(void)
5063 {
5064 	return ZONELIST_ORDER_NODE;
5065 }
5066 #else
5067 /*
5068  * On 32-bit, the Normal zone needs to be preserved for allocations accessible
5069  * by the kernel. If processes running on node 0 deplete the low memory zone
5070  * then reclaim will occur more frequency increasing stalls and potentially
5071  * be easier to OOM if a large percentage of the zone is under writeback or
5072  * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
5073  * Hence, default to zone ordering on 32-bit.
5074  */
5075 static int default_zonelist_order(void)
5076 {
5077 	return ZONELIST_ORDER_ZONE;
5078 }
5079 #endif /* CONFIG_64BIT */
5080 
5081 static void set_zonelist_order(void)
5082 {
5083 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
5084 		current_zonelist_order = default_zonelist_order();
5085 	else
5086 		current_zonelist_order = user_zonelist_order;
5087 }
5088 
5089 static void build_zonelists(pg_data_t *pgdat)
5090 {
5091 	int i, node, load;
5092 	nodemask_t used_mask;
5093 	int local_node, prev_node;
5094 	struct zonelist *zonelist;
5095 	unsigned int order = current_zonelist_order;
5096 
5097 	/* initialize zonelists */
5098 	for (i = 0; i < MAX_ZONELISTS; i++) {
5099 		zonelist = pgdat->node_zonelists + i;
5100 		zonelist->_zonerefs[0].zone = NULL;
5101 		zonelist->_zonerefs[0].zone_idx = 0;
5102 	}
5103 
5104 	/* NUMA-aware ordering of nodes */
5105 	local_node = pgdat->node_id;
5106 	load = nr_online_nodes;
5107 	prev_node = local_node;
5108 	nodes_clear(used_mask);
5109 
5110 	memset(node_order, 0, sizeof(node_order));
5111 	i = 0;
5112 
5113 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5114 		/*
5115 		 * We don't want to pressure a particular node.
5116 		 * So adding penalty to the first node in same
5117 		 * distance group to make it round-robin.
5118 		 */
5119 		if (node_distance(local_node, node) !=
5120 		    node_distance(local_node, prev_node))
5121 			node_load[node] = load;
5122 
5123 		prev_node = node;
5124 		load--;
5125 		if (order == ZONELIST_ORDER_NODE)
5126 			build_zonelists_in_node_order(pgdat, node);
5127 		else
5128 			node_order[i++] = node;	/* remember order */
5129 	}
5130 
5131 	if (order == ZONELIST_ORDER_ZONE) {
5132 		/* calculate node order -- i.e., DMA last! */
5133 		build_zonelists_in_zone_order(pgdat, i);
5134 	}
5135 
5136 	build_thisnode_zonelists(pgdat);
5137 }
5138 
5139 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5140 /*
5141  * Return node id of node used for "local" allocations.
5142  * I.e., first node id of first zone in arg node's generic zonelist.
5143  * Used for initializing percpu 'numa_mem', which is used primarily
5144  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5145  */
5146 int local_memory_node(int node)
5147 {
5148 	struct zoneref *z;
5149 
5150 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5151 				   gfp_zone(GFP_KERNEL),
5152 				   NULL);
5153 	return z->zone->node;
5154 }
5155 #endif
5156 
5157 static void setup_min_unmapped_ratio(void);
5158 static void setup_min_slab_ratio(void);
5159 #else	/* CONFIG_NUMA */
5160 
5161 static void set_zonelist_order(void)
5162 {
5163 	current_zonelist_order = ZONELIST_ORDER_ZONE;
5164 }
5165 
5166 static void build_zonelists(pg_data_t *pgdat)
5167 {
5168 	int node, local_node;
5169 	enum zone_type j;
5170 	struct zonelist *zonelist;
5171 
5172 	local_node = pgdat->node_id;
5173 
5174 	zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
5175 	j = build_zonelists_node(pgdat, zonelist, 0);
5176 
5177 	/*
5178 	 * Now we build the zonelist so that it contains the zones
5179 	 * of all the other nodes.
5180 	 * We don't want to pressure a particular node, so when
5181 	 * building the zones for node N, we make sure that the
5182 	 * zones coming right after the local ones are those from
5183 	 * node N+1 (modulo N)
5184 	 */
5185 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5186 		if (!node_online(node))
5187 			continue;
5188 		j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5189 	}
5190 	for (node = 0; node < local_node; node++) {
5191 		if (!node_online(node))
5192 			continue;
5193 		j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5194 	}
5195 
5196 	zonelist->_zonerefs[j].zone = NULL;
5197 	zonelist->_zonerefs[j].zone_idx = 0;
5198 }
5199 
5200 #endif	/* CONFIG_NUMA */
5201 
5202 /*
5203  * Boot pageset table. One per cpu which is going to be used for all
5204  * zones and all nodes. The parameters will be set in such a way
5205  * that an item put on a list will immediately be handed over to
5206  * the buddy list. This is safe since pageset manipulation is done
5207  * with interrupts disabled.
5208  *
5209  * The boot_pagesets must be kept even after bootup is complete for
5210  * unused processors and/or zones. They do play a role for bootstrapping
5211  * hotplugged processors.
5212  *
5213  * zoneinfo_show() and maybe other functions do
5214  * not check if the processor is online before following the pageset pointer.
5215  * Other parts of the kernel may not check if the zone is available.
5216  */
5217 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5218 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5219 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5220 static void setup_zone_pageset(struct zone *zone);
5221 
5222 /*
5223  * Global mutex to protect against size modification of zonelists
5224  * as well as to serialize pageset setup for the new populated zone.
5225  */
5226 DEFINE_MUTEX(zonelists_mutex);
5227 
5228 /* return values int ....just for stop_machine() */
5229 static int __build_all_zonelists(void *data)
5230 {
5231 	int nid;
5232 	int cpu;
5233 	pg_data_t *self = data;
5234 
5235 #ifdef CONFIG_NUMA
5236 	memset(node_load, 0, sizeof(node_load));
5237 #endif
5238 
5239 	if (self && !node_online(self->node_id)) {
5240 		build_zonelists(self);
5241 	}
5242 
5243 	for_each_online_node(nid) {
5244 		pg_data_t *pgdat = NODE_DATA(nid);
5245 
5246 		build_zonelists(pgdat);
5247 	}
5248 
5249 	/*
5250 	 * Initialize the boot_pagesets that are going to be used
5251 	 * for bootstrapping processors. The real pagesets for
5252 	 * each zone will be allocated later when the per cpu
5253 	 * allocator is available.
5254 	 *
5255 	 * boot_pagesets are used also for bootstrapping offline
5256 	 * cpus if the system is already booted because the pagesets
5257 	 * are needed to initialize allocators on a specific cpu too.
5258 	 * F.e. the percpu allocator needs the page allocator which
5259 	 * needs the percpu allocator in order to allocate its pagesets
5260 	 * (a chicken-egg dilemma).
5261 	 */
5262 	for_each_possible_cpu(cpu) {
5263 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5264 
5265 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5266 		/*
5267 		 * We now know the "local memory node" for each node--
5268 		 * i.e., the node of the first zone in the generic zonelist.
5269 		 * Set up numa_mem percpu variable for on-line cpus.  During
5270 		 * boot, only the boot cpu should be on-line;  we'll init the
5271 		 * secondary cpus' numa_mem as they come on-line.  During
5272 		 * node/memory hotplug, we'll fixup all on-line cpus.
5273 		 */
5274 		if (cpu_online(cpu))
5275 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5276 #endif
5277 	}
5278 
5279 	return 0;
5280 }
5281 
5282 static noinline void __init
5283 build_all_zonelists_init(void)
5284 {
5285 	__build_all_zonelists(NULL);
5286 	mminit_verify_zonelist();
5287 	cpuset_init_current_mems_allowed();
5288 }
5289 
5290 /*
5291  * Called with zonelists_mutex held always
5292  * unless system_state == SYSTEM_BOOTING.
5293  *
5294  * __ref due to (1) call of __meminit annotated setup_zone_pageset
5295  * [we're only called with non-NULL zone through __meminit paths] and
5296  * (2) call of __init annotated helper build_all_zonelists_init
5297  * [protected by SYSTEM_BOOTING].
5298  */
5299 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
5300 {
5301 	set_zonelist_order();
5302 
5303 	if (system_state == SYSTEM_BOOTING) {
5304 		build_all_zonelists_init();
5305 	} else {
5306 #ifdef CONFIG_MEMORY_HOTPLUG
5307 		if (zone)
5308 			setup_zone_pageset(zone);
5309 #endif
5310 		/* we have to stop all cpus to guarantee there is no user
5311 		   of zonelist */
5312 		stop_machine_cpuslocked(__build_all_zonelists, pgdat, NULL);
5313 		/* cpuset refresh routine should be here */
5314 	}
5315 	vm_total_pages = nr_free_pagecache_pages();
5316 	/*
5317 	 * Disable grouping by mobility if the number of pages in the
5318 	 * system is too low to allow the mechanism to work. It would be
5319 	 * more accurate, but expensive to check per-zone. This check is
5320 	 * made on memory-hotadd so a system can start with mobility
5321 	 * disabled and enable it later
5322 	 */
5323 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5324 		page_group_by_mobility_disabled = 1;
5325 	else
5326 		page_group_by_mobility_disabled = 0;
5327 
5328 	pr_info("Built %i zonelists in %s order, mobility grouping %s.  Total pages: %ld\n",
5329 		nr_online_nodes,
5330 		zonelist_order_name[current_zonelist_order],
5331 		page_group_by_mobility_disabled ? "off" : "on",
5332 		vm_total_pages);
5333 #ifdef CONFIG_NUMA
5334 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5335 #endif
5336 }
5337 
5338 /*
5339  * Initially all pages are reserved - free ones are freed
5340  * up by free_all_bootmem() once the early boot process is
5341  * done. Non-atomic initialization, single-pass.
5342  */
5343 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5344 		unsigned long start_pfn, enum memmap_context context)
5345 {
5346 	struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
5347 	unsigned long end_pfn = start_pfn + size;
5348 	pg_data_t *pgdat = NODE_DATA(nid);
5349 	unsigned long pfn;
5350 	unsigned long nr_initialised = 0;
5351 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5352 	struct memblock_region *r = NULL, *tmp;
5353 #endif
5354 
5355 	if (highest_memmap_pfn < end_pfn - 1)
5356 		highest_memmap_pfn = end_pfn - 1;
5357 
5358 	/*
5359 	 * Honor reservation requested by the driver for this ZONE_DEVICE
5360 	 * memory
5361 	 */
5362 	if (altmap && start_pfn == altmap->base_pfn)
5363 		start_pfn += altmap->reserve;
5364 
5365 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5366 		/*
5367 		 * There can be holes in boot-time mem_map[]s handed to this
5368 		 * function.  They do not exist on hotplugged memory.
5369 		 */
5370 		if (context != MEMMAP_EARLY)
5371 			goto not_early;
5372 
5373 		if (!early_pfn_valid(pfn)) {
5374 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5375 			/*
5376 			 * Skip to the pfn preceding the next valid one (or
5377 			 * end_pfn), such that we hit a valid pfn (or end_pfn)
5378 			 * on our next iteration of the loop.
5379 			 */
5380 			pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
5381 #endif
5382 			continue;
5383 		}
5384 		if (!early_pfn_in_nid(pfn, nid))
5385 			continue;
5386 		if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5387 			break;
5388 
5389 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5390 		/*
5391 		 * Check given memblock attribute by firmware which can affect
5392 		 * kernel memory layout.  If zone==ZONE_MOVABLE but memory is
5393 		 * mirrored, it's an overlapped memmap init. skip it.
5394 		 */
5395 		if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5396 			if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5397 				for_each_memblock(memory, tmp)
5398 					if (pfn < memblock_region_memory_end_pfn(tmp))
5399 						break;
5400 				r = tmp;
5401 			}
5402 			if (pfn >= memblock_region_memory_base_pfn(r) &&
5403 			    memblock_is_mirror(r)) {
5404 				/* already initialized as NORMAL */
5405 				pfn = memblock_region_memory_end_pfn(r);
5406 				continue;
5407 			}
5408 		}
5409 #endif
5410 
5411 not_early:
5412 		/*
5413 		 * Mark the block movable so that blocks are reserved for
5414 		 * movable at startup. This will force kernel allocations
5415 		 * to reserve their blocks rather than leaking throughout
5416 		 * the address space during boot when many long-lived
5417 		 * kernel allocations are made.
5418 		 *
5419 		 * bitmap is created for zone's valid pfn range. but memmap
5420 		 * can be created for invalid pages (for alignment)
5421 		 * check here not to call set_pageblock_migratetype() against
5422 		 * pfn out of zone.
5423 		 */
5424 		if (!(pfn & (pageblock_nr_pages - 1))) {
5425 			struct page *page = pfn_to_page(pfn);
5426 
5427 			__init_single_page(page, pfn, zone, nid);
5428 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5429 		} else {
5430 			__init_single_pfn(pfn, zone, nid);
5431 		}
5432 	}
5433 }
5434 
5435 static void __meminit zone_init_free_lists(struct zone *zone)
5436 {
5437 	unsigned int order, t;
5438 	for_each_migratetype_order(order, t) {
5439 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
5440 		zone->free_area[order].nr_free = 0;
5441 	}
5442 }
5443 
5444 #ifndef __HAVE_ARCH_MEMMAP_INIT
5445 #define memmap_init(size, nid, zone, start_pfn) \
5446 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
5447 #endif
5448 
5449 static int zone_batchsize(struct zone *zone)
5450 {
5451 #ifdef CONFIG_MMU
5452 	int batch;
5453 
5454 	/*
5455 	 * The per-cpu-pages pools are set to around 1000th of the
5456 	 * size of the zone.  But no more than 1/2 of a meg.
5457 	 *
5458 	 * OK, so we don't know how big the cache is.  So guess.
5459 	 */
5460 	batch = zone->managed_pages / 1024;
5461 	if (batch * PAGE_SIZE > 512 * 1024)
5462 		batch = (512 * 1024) / PAGE_SIZE;
5463 	batch /= 4;		/* We effectively *= 4 below */
5464 	if (batch < 1)
5465 		batch = 1;
5466 
5467 	/*
5468 	 * Clamp the batch to a 2^n - 1 value. Having a power
5469 	 * of 2 value was found to be more likely to have
5470 	 * suboptimal cache aliasing properties in some cases.
5471 	 *
5472 	 * For example if 2 tasks are alternately allocating
5473 	 * batches of pages, one task can end up with a lot
5474 	 * of pages of one half of the possible page colors
5475 	 * and the other with pages of the other colors.
5476 	 */
5477 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
5478 
5479 	return batch;
5480 
5481 #else
5482 	/* The deferral and batching of frees should be suppressed under NOMMU
5483 	 * conditions.
5484 	 *
5485 	 * The problem is that NOMMU needs to be able to allocate large chunks
5486 	 * of contiguous memory as there's no hardware page translation to
5487 	 * assemble apparent contiguous memory from discontiguous pages.
5488 	 *
5489 	 * Queueing large contiguous runs of pages for batching, however,
5490 	 * causes the pages to actually be freed in smaller chunks.  As there
5491 	 * can be a significant delay between the individual batches being
5492 	 * recycled, this leads to the once large chunks of space being
5493 	 * fragmented and becoming unavailable for high-order allocations.
5494 	 */
5495 	return 0;
5496 #endif
5497 }
5498 
5499 /*
5500  * pcp->high and pcp->batch values are related and dependent on one another:
5501  * ->batch must never be higher then ->high.
5502  * The following function updates them in a safe manner without read side
5503  * locking.
5504  *
5505  * Any new users of pcp->batch and pcp->high should ensure they can cope with
5506  * those fields changing asynchronously (acording the the above rule).
5507  *
5508  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5509  * outside of boot time (or some other assurance that no concurrent updaters
5510  * exist).
5511  */
5512 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5513 		unsigned long batch)
5514 {
5515        /* start with a fail safe value for batch */
5516 	pcp->batch = 1;
5517 	smp_wmb();
5518 
5519        /* Update high, then batch, in order */
5520 	pcp->high = high;
5521 	smp_wmb();
5522 
5523 	pcp->batch = batch;
5524 }
5525 
5526 /* a companion to pageset_set_high() */
5527 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5528 {
5529 	pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
5530 }
5531 
5532 static void pageset_init(struct per_cpu_pageset *p)
5533 {
5534 	struct per_cpu_pages *pcp;
5535 	int migratetype;
5536 
5537 	memset(p, 0, sizeof(*p));
5538 
5539 	pcp = &p->pcp;
5540 	pcp->count = 0;
5541 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5542 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
5543 }
5544 
5545 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5546 {
5547 	pageset_init(p);
5548 	pageset_set_batch(p, batch);
5549 }
5550 
5551 /*
5552  * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
5553  * to the value high for the pageset p.
5554  */
5555 static void pageset_set_high(struct per_cpu_pageset *p,
5556 				unsigned long high)
5557 {
5558 	unsigned long batch = max(1UL, high / 4);
5559 	if ((high / 4) > (PAGE_SHIFT * 8))
5560 		batch = PAGE_SHIFT * 8;
5561 
5562 	pageset_update(&p->pcp, high, batch);
5563 }
5564 
5565 static void pageset_set_high_and_batch(struct zone *zone,
5566 				       struct per_cpu_pageset *pcp)
5567 {
5568 	if (percpu_pagelist_fraction)
5569 		pageset_set_high(pcp,
5570 			(zone->managed_pages /
5571 				percpu_pagelist_fraction));
5572 	else
5573 		pageset_set_batch(pcp, zone_batchsize(zone));
5574 }
5575 
5576 static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5577 {
5578 	struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5579 
5580 	pageset_init(pcp);
5581 	pageset_set_high_and_batch(zone, pcp);
5582 }
5583 
5584 static void __meminit setup_zone_pageset(struct zone *zone)
5585 {
5586 	int cpu;
5587 	zone->pageset = alloc_percpu(struct per_cpu_pageset);
5588 	for_each_possible_cpu(cpu)
5589 		zone_pageset_init(zone, cpu);
5590 }
5591 
5592 /*
5593  * Allocate per cpu pagesets and initialize them.
5594  * Before this call only boot pagesets were available.
5595  */
5596 void __init setup_per_cpu_pageset(void)
5597 {
5598 	struct pglist_data *pgdat;
5599 	struct zone *zone;
5600 
5601 	for_each_populated_zone(zone)
5602 		setup_zone_pageset(zone);
5603 
5604 	for_each_online_pgdat(pgdat)
5605 		pgdat->per_cpu_nodestats =
5606 			alloc_percpu(struct per_cpu_nodestat);
5607 }
5608 
5609 static __meminit void zone_pcp_init(struct zone *zone)
5610 {
5611 	/*
5612 	 * per cpu subsystem is not up at this point. The following code
5613 	 * relies on the ability of the linker to provide the
5614 	 * offset of a (static) per cpu variable into the per cpu area.
5615 	 */
5616 	zone->pageset = &boot_pageset;
5617 
5618 	if (populated_zone(zone))
5619 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
5620 			zone->name, zone->present_pages,
5621 					 zone_batchsize(zone));
5622 }
5623 
5624 void __meminit init_currently_empty_zone(struct zone *zone,
5625 					unsigned long zone_start_pfn,
5626 					unsigned long size)
5627 {
5628 	struct pglist_data *pgdat = zone->zone_pgdat;
5629 
5630 	pgdat->nr_zones = zone_idx(zone) + 1;
5631 
5632 	zone->zone_start_pfn = zone_start_pfn;
5633 
5634 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
5635 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
5636 			pgdat->node_id,
5637 			(unsigned long)zone_idx(zone),
5638 			zone_start_pfn, (zone_start_pfn + size));
5639 
5640 	zone_init_free_lists(zone);
5641 	zone->initialized = 1;
5642 }
5643 
5644 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5645 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
5646 
5647 /*
5648  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
5649  */
5650 int __meminit __early_pfn_to_nid(unsigned long pfn,
5651 					struct mminit_pfnnid_cache *state)
5652 {
5653 	unsigned long start_pfn, end_pfn;
5654 	int nid;
5655 
5656 	if (state->last_start <= pfn && pfn < state->last_end)
5657 		return state->last_nid;
5658 
5659 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5660 	if (nid != -1) {
5661 		state->last_start = start_pfn;
5662 		state->last_end = end_pfn;
5663 		state->last_nid = nid;
5664 	}
5665 
5666 	return nid;
5667 }
5668 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
5669 
5670 /**
5671  * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
5672  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
5673  * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
5674  *
5675  * If an architecture guarantees that all ranges registered contain no holes
5676  * and may be freed, this this function may be used instead of calling
5677  * memblock_free_early_nid() manually.
5678  */
5679 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
5680 {
5681 	unsigned long start_pfn, end_pfn;
5682 	int i, this_nid;
5683 
5684 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5685 		start_pfn = min(start_pfn, max_low_pfn);
5686 		end_pfn = min(end_pfn, max_low_pfn);
5687 
5688 		if (start_pfn < end_pfn)
5689 			memblock_free_early_nid(PFN_PHYS(start_pfn),
5690 					(end_pfn - start_pfn) << PAGE_SHIFT,
5691 					this_nid);
5692 	}
5693 }
5694 
5695 /**
5696  * sparse_memory_present_with_active_regions - Call memory_present for each active range
5697  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
5698  *
5699  * If an architecture guarantees that all ranges registered contain no holes and may
5700  * be freed, this function may be used instead of calling memory_present() manually.
5701  */
5702 void __init sparse_memory_present_with_active_regions(int nid)
5703 {
5704 	unsigned long start_pfn, end_pfn;
5705 	int i, this_nid;
5706 
5707 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5708 		memory_present(this_nid, start_pfn, end_pfn);
5709 }
5710 
5711 /**
5712  * get_pfn_range_for_nid - Return the start and end page frames for a node
5713  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
5714  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
5715  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
5716  *
5717  * It returns the start and end page frame of a node based on information
5718  * provided by memblock_set_node(). If called for a node
5719  * with no available memory, a warning is printed and the start and end
5720  * PFNs will be 0.
5721  */
5722 void __meminit get_pfn_range_for_nid(unsigned int nid,
5723 			unsigned long *start_pfn, unsigned long *end_pfn)
5724 {
5725 	unsigned long this_start_pfn, this_end_pfn;
5726 	int i;
5727 
5728 	*start_pfn = -1UL;
5729 	*end_pfn = 0;
5730 
5731 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5732 		*start_pfn = min(*start_pfn, this_start_pfn);
5733 		*end_pfn = max(*end_pfn, this_end_pfn);
5734 	}
5735 
5736 	if (*start_pfn == -1UL)
5737 		*start_pfn = 0;
5738 }
5739 
5740 /*
5741  * This finds a zone that can be used for ZONE_MOVABLE pages. The
5742  * assumption is made that zones within a node are ordered in monotonic
5743  * increasing memory addresses so that the "highest" populated zone is used
5744  */
5745 static void __init find_usable_zone_for_movable(void)
5746 {
5747 	int zone_index;
5748 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5749 		if (zone_index == ZONE_MOVABLE)
5750 			continue;
5751 
5752 		if (arch_zone_highest_possible_pfn[zone_index] >
5753 				arch_zone_lowest_possible_pfn[zone_index])
5754 			break;
5755 	}
5756 
5757 	VM_BUG_ON(zone_index == -1);
5758 	movable_zone = zone_index;
5759 }
5760 
5761 /*
5762  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
5763  * because it is sized independent of architecture. Unlike the other zones,
5764  * the starting point for ZONE_MOVABLE is not fixed. It may be different
5765  * in each node depending on the size of each node and how evenly kernelcore
5766  * is distributed. This helper function adjusts the zone ranges
5767  * provided by the architecture for a given node by using the end of the
5768  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5769  * zones within a node are in order of monotonic increases memory addresses
5770  */
5771 static void __meminit adjust_zone_range_for_zone_movable(int nid,
5772 					unsigned long zone_type,
5773 					unsigned long node_start_pfn,
5774 					unsigned long node_end_pfn,
5775 					unsigned long *zone_start_pfn,
5776 					unsigned long *zone_end_pfn)
5777 {
5778 	/* Only adjust if ZONE_MOVABLE is on this node */
5779 	if (zone_movable_pfn[nid]) {
5780 		/* Size ZONE_MOVABLE */
5781 		if (zone_type == ZONE_MOVABLE) {
5782 			*zone_start_pfn = zone_movable_pfn[nid];
5783 			*zone_end_pfn = min(node_end_pfn,
5784 				arch_zone_highest_possible_pfn[movable_zone]);
5785 
5786 		/* Adjust for ZONE_MOVABLE starting within this range */
5787 		} else if (!mirrored_kernelcore &&
5788 			*zone_start_pfn < zone_movable_pfn[nid] &&
5789 			*zone_end_pfn > zone_movable_pfn[nid]) {
5790 			*zone_end_pfn = zone_movable_pfn[nid];
5791 
5792 		/* Check if this whole range is within ZONE_MOVABLE */
5793 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
5794 			*zone_start_pfn = *zone_end_pfn;
5795 	}
5796 }
5797 
5798 /*
5799  * Return the number of pages a zone spans in a node, including holes
5800  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5801  */
5802 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5803 					unsigned long zone_type,
5804 					unsigned long node_start_pfn,
5805 					unsigned long node_end_pfn,
5806 					unsigned long *zone_start_pfn,
5807 					unsigned long *zone_end_pfn,
5808 					unsigned long *ignored)
5809 {
5810 	/* When hotadd a new node from cpu_up(), the node should be empty */
5811 	if (!node_start_pfn && !node_end_pfn)
5812 		return 0;
5813 
5814 	/* Get the start and end of the zone */
5815 	*zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5816 	*zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
5817 	adjust_zone_range_for_zone_movable(nid, zone_type,
5818 				node_start_pfn, node_end_pfn,
5819 				zone_start_pfn, zone_end_pfn);
5820 
5821 	/* Check that this node has pages within the zone's required range */
5822 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
5823 		return 0;
5824 
5825 	/* Move the zone boundaries inside the node if necessary */
5826 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5827 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
5828 
5829 	/* Return the spanned pages */
5830 	return *zone_end_pfn - *zone_start_pfn;
5831 }
5832 
5833 /*
5834  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
5835  * then all holes in the requested range will be accounted for.
5836  */
5837 unsigned long __meminit __absent_pages_in_range(int nid,
5838 				unsigned long range_start_pfn,
5839 				unsigned long range_end_pfn)
5840 {
5841 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
5842 	unsigned long start_pfn, end_pfn;
5843 	int i;
5844 
5845 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5846 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5847 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5848 		nr_absent -= end_pfn - start_pfn;
5849 	}
5850 	return nr_absent;
5851 }
5852 
5853 /**
5854  * absent_pages_in_range - Return number of page frames in holes within a range
5855  * @start_pfn: The start PFN to start searching for holes
5856  * @end_pfn: The end PFN to stop searching for holes
5857  *
5858  * It returns the number of pages frames in memory holes within a range.
5859  */
5860 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5861 							unsigned long end_pfn)
5862 {
5863 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5864 }
5865 
5866 /* Return the number of page frames in holes in a zone on a node */
5867 static unsigned long __meminit zone_absent_pages_in_node(int nid,
5868 					unsigned long zone_type,
5869 					unsigned long node_start_pfn,
5870 					unsigned long node_end_pfn,
5871 					unsigned long *ignored)
5872 {
5873 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5874 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5875 	unsigned long zone_start_pfn, zone_end_pfn;
5876 	unsigned long nr_absent;
5877 
5878 	/* When hotadd a new node from cpu_up(), the node should be empty */
5879 	if (!node_start_pfn && !node_end_pfn)
5880 		return 0;
5881 
5882 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5883 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5884 
5885 	adjust_zone_range_for_zone_movable(nid, zone_type,
5886 			node_start_pfn, node_end_pfn,
5887 			&zone_start_pfn, &zone_end_pfn);
5888 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5889 
5890 	/*
5891 	 * ZONE_MOVABLE handling.
5892 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5893 	 * and vice versa.
5894 	 */
5895 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5896 		unsigned long start_pfn, end_pfn;
5897 		struct memblock_region *r;
5898 
5899 		for_each_memblock(memory, r) {
5900 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
5901 					  zone_start_pfn, zone_end_pfn);
5902 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
5903 					zone_start_pfn, zone_end_pfn);
5904 
5905 			if (zone_type == ZONE_MOVABLE &&
5906 			    memblock_is_mirror(r))
5907 				nr_absent += end_pfn - start_pfn;
5908 
5909 			if (zone_type == ZONE_NORMAL &&
5910 			    !memblock_is_mirror(r))
5911 				nr_absent += end_pfn - start_pfn;
5912 		}
5913 	}
5914 
5915 	return nr_absent;
5916 }
5917 
5918 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5919 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
5920 					unsigned long zone_type,
5921 					unsigned long node_start_pfn,
5922 					unsigned long node_end_pfn,
5923 					unsigned long *zone_start_pfn,
5924 					unsigned long *zone_end_pfn,
5925 					unsigned long *zones_size)
5926 {
5927 	unsigned int zone;
5928 
5929 	*zone_start_pfn = node_start_pfn;
5930 	for (zone = 0; zone < zone_type; zone++)
5931 		*zone_start_pfn += zones_size[zone];
5932 
5933 	*zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5934 
5935 	return zones_size[zone_type];
5936 }
5937 
5938 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
5939 						unsigned long zone_type,
5940 						unsigned long node_start_pfn,
5941 						unsigned long node_end_pfn,
5942 						unsigned long *zholes_size)
5943 {
5944 	if (!zholes_size)
5945 		return 0;
5946 
5947 	return zholes_size[zone_type];
5948 }
5949 
5950 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5951 
5952 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
5953 						unsigned long node_start_pfn,
5954 						unsigned long node_end_pfn,
5955 						unsigned long *zones_size,
5956 						unsigned long *zholes_size)
5957 {
5958 	unsigned long realtotalpages = 0, totalpages = 0;
5959 	enum zone_type i;
5960 
5961 	for (i = 0; i < MAX_NR_ZONES; i++) {
5962 		struct zone *zone = pgdat->node_zones + i;
5963 		unsigned long zone_start_pfn, zone_end_pfn;
5964 		unsigned long size, real_size;
5965 
5966 		size = zone_spanned_pages_in_node(pgdat->node_id, i,
5967 						  node_start_pfn,
5968 						  node_end_pfn,
5969 						  &zone_start_pfn,
5970 						  &zone_end_pfn,
5971 						  zones_size);
5972 		real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
5973 						  node_start_pfn, node_end_pfn,
5974 						  zholes_size);
5975 		if (size)
5976 			zone->zone_start_pfn = zone_start_pfn;
5977 		else
5978 			zone->zone_start_pfn = 0;
5979 		zone->spanned_pages = size;
5980 		zone->present_pages = real_size;
5981 
5982 		totalpages += size;
5983 		realtotalpages += real_size;
5984 	}
5985 
5986 	pgdat->node_spanned_pages = totalpages;
5987 	pgdat->node_present_pages = realtotalpages;
5988 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5989 							realtotalpages);
5990 }
5991 
5992 #ifndef CONFIG_SPARSEMEM
5993 /*
5994  * Calculate the size of the zone->blockflags rounded to an unsigned long
5995  * Start by making sure zonesize is a multiple of pageblock_order by rounding
5996  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
5997  * round what is now in bits to nearest long in bits, then return it in
5998  * bytes.
5999  */
6000 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
6001 {
6002 	unsigned long usemapsize;
6003 
6004 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
6005 	usemapsize = roundup(zonesize, pageblock_nr_pages);
6006 	usemapsize = usemapsize >> pageblock_order;
6007 	usemapsize *= NR_PAGEBLOCK_BITS;
6008 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6009 
6010 	return usemapsize / 8;
6011 }
6012 
6013 static void __init setup_usemap(struct pglist_data *pgdat,
6014 				struct zone *zone,
6015 				unsigned long zone_start_pfn,
6016 				unsigned long zonesize)
6017 {
6018 	unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
6019 	zone->pageblock_flags = NULL;
6020 	if (usemapsize)
6021 		zone->pageblock_flags =
6022 			memblock_virt_alloc_node_nopanic(usemapsize,
6023 							 pgdat->node_id);
6024 }
6025 #else
6026 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6027 				unsigned long zone_start_pfn, unsigned long zonesize) {}
6028 #endif /* CONFIG_SPARSEMEM */
6029 
6030 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
6031 
6032 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
6033 void __paginginit set_pageblock_order(void)
6034 {
6035 	unsigned int order;
6036 
6037 	/* Check that pageblock_nr_pages has not already been setup */
6038 	if (pageblock_order)
6039 		return;
6040 
6041 	if (HPAGE_SHIFT > PAGE_SHIFT)
6042 		order = HUGETLB_PAGE_ORDER;
6043 	else
6044 		order = MAX_ORDER - 1;
6045 
6046 	/*
6047 	 * Assume the largest contiguous order of interest is a huge page.
6048 	 * This value may be variable depending on boot parameters on IA64 and
6049 	 * powerpc.
6050 	 */
6051 	pageblock_order = order;
6052 }
6053 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6054 
6055 /*
6056  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
6057  * is unused as pageblock_order is set at compile-time. See
6058  * include/linux/pageblock-flags.h for the values of pageblock_order based on
6059  * the kernel config
6060  */
6061 void __paginginit set_pageblock_order(void)
6062 {
6063 }
6064 
6065 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6066 
6067 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
6068 						   unsigned long present_pages)
6069 {
6070 	unsigned long pages = spanned_pages;
6071 
6072 	/*
6073 	 * Provide a more accurate estimation if there are holes within
6074 	 * the zone and SPARSEMEM is in use. If there are holes within the
6075 	 * zone, each populated memory region may cost us one or two extra
6076 	 * memmap pages due to alignment because memmap pages for each
6077 	 * populated regions may not be naturally aligned on page boundary.
6078 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
6079 	 */
6080 	if (spanned_pages > present_pages + (present_pages >> 4) &&
6081 	    IS_ENABLED(CONFIG_SPARSEMEM))
6082 		pages = present_pages;
6083 
6084 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6085 }
6086 
6087 /*
6088  * Set up the zone data structures:
6089  *   - mark all pages reserved
6090  *   - mark all memory queues empty
6091  *   - clear the memory bitmaps
6092  *
6093  * NOTE: pgdat should get zeroed by caller.
6094  */
6095 static void __paginginit free_area_init_core(struct pglist_data *pgdat)
6096 {
6097 	enum zone_type j;
6098 	int nid = pgdat->node_id;
6099 
6100 	pgdat_resize_init(pgdat);
6101 #ifdef CONFIG_NUMA_BALANCING
6102 	spin_lock_init(&pgdat->numabalancing_migrate_lock);
6103 	pgdat->numabalancing_migrate_nr_pages = 0;
6104 	pgdat->numabalancing_migrate_next_window = jiffies;
6105 #endif
6106 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6107 	spin_lock_init(&pgdat->split_queue_lock);
6108 	INIT_LIST_HEAD(&pgdat->split_queue);
6109 	pgdat->split_queue_len = 0;
6110 #endif
6111 	init_waitqueue_head(&pgdat->kswapd_wait);
6112 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
6113 #ifdef CONFIG_COMPACTION
6114 	init_waitqueue_head(&pgdat->kcompactd_wait);
6115 #endif
6116 	pgdat_page_ext_init(pgdat);
6117 	spin_lock_init(&pgdat->lru_lock);
6118 	lruvec_init(node_lruvec(pgdat));
6119 
6120 	pgdat->per_cpu_nodestats = &boot_nodestats;
6121 
6122 	for (j = 0; j < MAX_NR_ZONES; j++) {
6123 		struct zone *zone = pgdat->node_zones + j;
6124 		unsigned long size, realsize, freesize, memmap_pages;
6125 		unsigned long zone_start_pfn = zone->zone_start_pfn;
6126 
6127 		size = zone->spanned_pages;
6128 		realsize = freesize = zone->present_pages;
6129 
6130 		/*
6131 		 * Adjust freesize so that it accounts for how much memory
6132 		 * is used by this zone for memmap. This affects the watermark
6133 		 * and per-cpu initialisations
6134 		 */
6135 		memmap_pages = calc_memmap_size(size, realsize);
6136 		if (!is_highmem_idx(j)) {
6137 			if (freesize >= memmap_pages) {
6138 				freesize -= memmap_pages;
6139 				if (memmap_pages)
6140 					printk(KERN_DEBUG
6141 					       "  %s zone: %lu pages used for memmap\n",
6142 					       zone_names[j], memmap_pages);
6143 			} else
6144 				pr_warn("  %s zone: %lu pages exceeds freesize %lu\n",
6145 					zone_names[j], memmap_pages, freesize);
6146 		}
6147 
6148 		/* Account for reserved pages */
6149 		if (j == 0 && freesize > dma_reserve) {
6150 			freesize -= dma_reserve;
6151 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
6152 					zone_names[0], dma_reserve);
6153 		}
6154 
6155 		if (!is_highmem_idx(j))
6156 			nr_kernel_pages += freesize;
6157 		/* Charge for highmem memmap if there are enough kernel pages */
6158 		else if (nr_kernel_pages > memmap_pages * 2)
6159 			nr_kernel_pages -= memmap_pages;
6160 		nr_all_pages += freesize;
6161 
6162 		/*
6163 		 * Set an approximate value for lowmem here, it will be adjusted
6164 		 * when the bootmem allocator frees pages into the buddy system.
6165 		 * And all highmem pages will be managed by the buddy system.
6166 		 */
6167 		zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
6168 #ifdef CONFIG_NUMA
6169 		zone->node = nid;
6170 #endif
6171 		zone->name = zone_names[j];
6172 		zone->zone_pgdat = pgdat;
6173 		spin_lock_init(&zone->lock);
6174 		zone_seqlock_init(zone);
6175 		zone_pcp_init(zone);
6176 
6177 		if (!size)
6178 			continue;
6179 
6180 		set_pageblock_order();
6181 		setup_usemap(pgdat, zone, zone_start_pfn, size);
6182 		init_currently_empty_zone(zone, zone_start_pfn, size);
6183 		memmap_init(size, nid, j, zone_start_pfn);
6184 	}
6185 }
6186 
6187 static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6188 {
6189 	unsigned long __maybe_unused start = 0;
6190 	unsigned long __maybe_unused offset = 0;
6191 
6192 	/* Skip empty nodes */
6193 	if (!pgdat->node_spanned_pages)
6194 		return;
6195 
6196 #ifdef CONFIG_FLAT_NODE_MEM_MAP
6197 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6198 	offset = pgdat->node_start_pfn - start;
6199 	/* ia64 gets its own node_mem_map, before this, without bootmem */
6200 	if (!pgdat->node_mem_map) {
6201 		unsigned long size, end;
6202 		struct page *map;
6203 
6204 		/*
6205 		 * The zone's endpoints aren't required to be MAX_ORDER
6206 		 * aligned but the node_mem_map endpoints must be in order
6207 		 * for the buddy allocator to function correctly.
6208 		 */
6209 		end = pgdat_end_pfn(pgdat);
6210 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
6211 		size =  (end - start) * sizeof(struct page);
6212 		map = alloc_remap(pgdat->node_id, size);
6213 		if (!map)
6214 			map = memblock_virt_alloc_node_nopanic(size,
6215 							       pgdat->node_id);
6216 		pgdat->node_mem_map = map + offset;
6217 	}
6218 #ifndef CONFIG_NEED_MULTIPLE_NODES
6219 	/*
6220 	 * With no DISCONTIG, the global mem_map is just set as node 0's
6221 	 */
6222 	if (pgdat == NODE_DATA(0)) {
6223 		mem_map = NODE_DATA(0)->node_mem_map;
6224 #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
6225 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
6226 			mem_map -= offset;
6227 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6228 	}
6229 #endif
6230 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
6231 }
6232 
6233 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
6234 		unsigned long node_start_pfn, unsigned long *zholes_size)
6235 {
6236 	pg_data_t *pgdat = NODE_DATA(nid);
6237 	unsigned long start_pfn = 0;
6238 	unsigned long end_pfn = 0;
6239 
6240 	/* pg_data_t should be reset to zero when it's allocated */
6241 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
6242 
6243 	pgdat->node_id = nid;
6244 	pgdat->node_start_pfn = node_start_pfn;
6245 	pgdat->per_cpu_nodestats = NULL;
6246 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6247 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
6248 	pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
6249 		(u64)start_pfn << PAGE_SHIFT,
6250 		end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
6251 #else
6252 	start_pfn = node_start_pfn;
6253 #endif
6254 	calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6255 				  zones_size, zholes_size);
6256 
6257 	alloc_node_mem_map(pgdat);
6258 #ifdef CONFIG_FLAT_NODE_MEM_MAP
6259 	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
6260 		nid, (unsigned long)pgdat,
6261 		(unsigned long)pgdat->node_mem_map);
6262 #endif
6263 
6264 	reset_deferred_meminit(pgdat);
6265 	free_area_init_core(pgdat);
6266 }
6267 
6268 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6269 
6270 #if MAX_NUMNODES > 1
6271 /*
6272  * Figure out the number of possible node ids.
6273  */
6274 void __init setup_nr_node_ids(void)
6275 {
6276 	unsigned int highest;
6277 
6278 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
6279 	nr_node_ids = highest + 1;
6280 }
6281 #endif
6282 
6283 /**
6284  * node_map_pfn_alignment - determine the maximum internode alignment
6285  *
6286  * This function should be called after node map is populated and sorted.
6287  * It calculates the maximum power of two alignment which can distinguish
6288  * all the nodes.
6289  *
6290  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
6291  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
6292  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
6293  * shifted, 1GiB is enough and this function will indicate so.
6294  *
6295  * This is used to test whether pfn -> nid mapping of the chosen memory
6296  * model has fine enough granularity to avoid incorrect mapping for the
6297  * populated node map.
6298  *
6299  * Returns the determined alignment in pfn's.  0 if there is no alignment
6300  * requirement (single node).
6301  */
6302 unsigned long __init node_map_pfn_alignment(void)
6303 {
6304 	unsigned long accl_mask = 0, last_end = 0;
6305 	unsigned long start, end, mask;
6306 	int last_nid = -1;
6307 	int i, nid;
6308 
6309 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
6310 		if (!start || last_nid < 0 || last_nid == nid) {
6311 			last_nid = nid;
6312 			last_end = end;
6313 			continue;
6314 		}
6315 
6316 		/*
6317 		 * Start with a mask granular enough to pin-point to the
6318 		 * start pfn and tick off bits one-by-one until it becomes
6319 		 * too coarse to separate the current node from the last.
6320 		 */
6321 		mask = ~((1 << __ffs(start)) - 1);
6322 		while (mask && last_end <= (start & (mask << 1)))
6323 			mask <<= 1;
6324 
6325 		/* accumulate all internode masks */
6326 		accl_mask |= mask;
6327 	}
6328 
6329 	/* convert mask to number of pages */
6330 	return ~accl_mask + 1;
6331 }
6332 
6333 /* Find the lowest pfn for a node */
6334 static unsigned long __init find_min_pfn_for_node(int nid)
6335 {
6336 	unsigned long min_pfn = ULONG_MAX;
6337 	unsigned long start_pfn;
6338 	int i;
6339 
6340 	for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6341 		min_pfn = min(min_pfn, start_pfn);
6342 
6343 	if (min_pfn == ULONG_MAX) {
6344 		pr_warn("Could not find start_pfn for node %d\n", nid);
6345 		return 0;
6346 	}
6347 
6348 	return min_pfn;
6349 }
6350 
6351 /**
6352  * find_min_pfn_with_active_regions - Find the minimum PFN registered
6353  *
6354  * It returns the minimum PFN based on information provided via
6355  * memblock_set_node().
6356  */
6357 unsigned long __init find_min_pfn_with_active_regions(void)
6358 {
6359 	return find_min_pfn_for_node(MAX_NUMNODES);
6360 }
6361 
6362 /*
6363  * early_calculate_totalpages()
6364  * Sum pages in active regions for movable zone.
6365  * Populate N_MEMORY for calculating usable_nodes.
6366  */
6367 static unsigned long __init early_calculate_totalpages(void)
6368 {
6369 	unsigned long totalpages = 0;
6370 	unsigned long start_pfn, end_pfn;
6371 	int i, nid;
6372 
6373 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6374 		unsigned long pages = end_pfn - start_pfn;
6375 
6376 		totalpages += pages;
6377 		if (pages)
6378 			node_set_state(nid, N_MEMORY);
6379 	}
6380 	return totalpages;
6381 }
6382 
6383 /*
6384  * Find the PFN the Movable zone begins in each node. Kernel memory
6385  * is spread evenly between nodes as long as the nodes have enough
6386  * memory. When they don't, some nodes will have more kernelcore than
6387  * others
6388  */
6389 static void __init find_zone_movable_pfns_for_nodes(void)
6390 {
6391 	int i, nid;
6392 	unsigned long usable_startpfn;
6393 	unsigned long kernelcore_node, kernelcore_remaining;
6394 	/* save the state before borrow the nodemask */
6395 	nodemask_t saved_node_state = node_states[N_MEMORY];
6396 	unsigned long totalpages = early_calculate_totalpages();
6397 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
6398 	struct memblock_region *r;
6399 
6400 	/* Need to find movable_zone earlier when movable_node is specified. */
6401 	find_usable_zone_for_movable();
6402 
6403 	/*
6404 	 * If movable_node is specified, ignore kernelcore and movablecore
6405 	 * options.
6406 	 */
6407 	if (movable_node_is_enabled()) {
6408 		for_each_memblock(memory, r) {
6409 			if (!memblock_is_hotpluggable(r))
6410 				continue;
6411 
6412 			nid = r->nid;
6413 
6414 			usable_startpfn = PFN_DOWN(r->base);
6415 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6416 				min(usable_startpfn, zone_movable_pfn[nid]) :
6417 				usable_startpfn;
6418 		}
6419 
6420 		goto out2;
6421 	}
6422 
6423 	/*
6424 	 * If kernelcore=mirror is specified, ignore movablecore option
6425 	 */
6426 	if (mirrored_kernelcore) {
6427 		bool mem_below_4gb_not_mirrored = false;
6428 
6429 		for_each_memblock(memory, r) {
6430 			if (memblock_is_mirror(r))
6431 				continue;
6432 
6433 			nid = r->nid;
6434 
6435 			usable_startpfn = memblock_region_memory_base_pfn(r);
6436 
6437 			if (usable_startpfn < 0x100000) {
6438 				mem_below_4gb_not_mirrored = true;
6439 				continue;
6440 			}
6441 
6442 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6443 				min(usable_startpfn, zone_movable_pfn[nid]) :
6444 				usable_startpfn;
6445 		}
6446 
6447 		if (mem_below_4gb_not_mirrored)
6448 			pr_warn("This configuration results in unmirrored kernel memory.");
6449 
6450 		goto out2;
6451 	}
6452 
6453 	/*
6454 	 * If movablecore=nn[KMG] was specified, calculate what size of
6455 	 * kernelcore that corresponds so that memory usable for
6456 	 * any allocation type is evenly spread. If both kernelcore
6457 	 * and movablecore are specified, then the value of kernelcore
6458 	 * will be used for required_kernelcore if it's greater than
6459 	 * what movablecore would have allowed.
6460 	 */
6461 	if (required_movablecore) {
6462 		unsigned long corepages;
6463 
6464 		/*
6465 		 * Round-up so that ZONE_MOVABLE is at least as large as what
6466 		 * was requested by the user
6467 		 */
6468 		required_movablecore =
6469 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
6470 		required_movablecore = min(totalpages, required_movablecore);
6471 		corepages = totalpages - required_movablecore;
6472 
6473 		required_kernelcore = max(required_kernelcore, corepages);
6474 	}
6475 
6476 	/*
6477 	 * If kernelcore was not specified or kernelcore size is larger
6478 	 * than totalpages, there is no ZONE_MOVABLE.
6479 	 */
6480 	if (!required_kernelcore || required_kernelcore >= totalpages)
6481 		goto out;
6482 
6483 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
6484 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6485 
6486 restart:
6487 	/* Spread kernelcore memory as evenly as possible throughout nodes */
6488 	kernelcore_node = required_kernelcore / usable_nodes;
6489 	for_each_node_state(nid, N_MEMORY) {
6490 		unsigned long start_pfn, end_pfn;
6491 
6492 		/*
6493 		 * Recalculate kernelcore_node if the division per node
6494 		 * now exceeds what is necessary to satisfy the requested
6495 		 * amount of memory for the kernel
6496 		 */
6497 		if (required_kernelcore < kernelcore_node)
6498 			kernelcore_node = required_kernelcore / usable_nodes;
6499 
6500 		/*
6501 		 * As the map is walked, we track how much memory is usable
6502 		 * by the kernel using kernelcore_remaining. When it is
6503 		 * 0, the rest of the node is usable by ZONE_MOVABLE
6504 		 */
6505 		kernelcore_remaining = kernelcore_node;
6506 
6507 		/* Go through each range of PFNs within this node */
6508 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6509 			unsigned long size_pages;
6510 
6511 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
6512 			if (start_pfn >= end_pfn)
6513 				continue;
6514 
6515 			/* Account for what is only usable for kernelcore */
6516 			if (start_pfn < usable_startpfn) {
6517 				unsigned long kernel_pages;
6518 				kernel_pages = min(end_pfn, usable_startpfn)
6519 								- start_pfn;
6520 
6521 				kernelcore_remaining -= min(kernel_pages,
6522 							kernelcore_remaining);
6523 				required_kernelcore -= min(kernel_pages,
6524 							required_kernelcore);
6525 
6526 				/* Continue if range is now fully accounted */
6527 				if (end_pfn <= usable_startpfn) {
6528 
6529 					/*
6530 					 * Push zone_movable_pfn to the end so
6531 					 * that if we have to rebalance
6532 					 * kernelcore across nodes, we will
6533 					 * not double account here
6534 					 */
6535 					zone_movable_pfn[nid] = end_pfn;
6536 					continue;
6537 				}
6538 				start_pfn = usable_startpfn;
6539 			}
6540 
6541 			/*
6542 			 * The usable PFN range for ZONE_MOVABLE is from
6543 			 * start_pfn->end_pfn. Calculate size_pages as the
6544 			 * number of pages used as kernelcore
6545 			 */
6546 			size_pages = end_pfn - start_pfn;
6547 			if (size_pages > kernelcore_remaining)
6548 				size_pages = kernelcore_remaining;
6549 			zone_movable_pfn[nid] = start_pfn + size_pages;
6550 
6551 			/*
6552 			 * Some kernelcore has been met, update counts and
6553 			 * break if the kernelcore for this node has been
6554 			 * satisfied
6555 			 */
6556 			required_kernelcore -= min(required_kernelcore,
6557 								size_pages);
6558 			kernelcore_remaining -= size_pages;
6559 			if (!kernelcore_remaining)
6560 				break;
6561 		}
6562 	}
6563 
6564 	/*
6565 	 * If there is still required_kernelcore, we do another pass with one
6566 	 * less node in the count. This will push zone_movable_pfn[nid] further
6567 	 * along on the nodes that still have memory until kernelcore is
6568 	 * satisfied
6569 	 */
6570 	usable_nodes--;
6571 	if (usable_nodes && required_kernelcore > usable_nodes)
6572 		goto restart;
6573 
6574 out2:
6575 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
6576 	for (nid = 0; nid < MAX_NUMNODES; nid++)
6577 		zone_movable_pfn[nid] =
6578 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
6579 
6580 out:
6581 	/* restore the node_state */
6582 	node_states[N_MEMORY] = saved_node_state;
6583 }
6584 
6585 /* Any regular or high memory on that node ? */
6586 static void check_for_memory(pg_data_t *pgdat, int nid)
6587 {
6588 	enum zone_type zone_type;
6589 
6590 	if (N_MEMORY == N_NORMAL_MEMORY)
6591 		return;
6592 
6593 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
6594 		struct zone *zone = &pgdat->node_zones[zone_type];
6595 		if (populated_zone(zone)) {
6596 			node_set_state(nid, N_HIGH_MEMORY);
6597 			if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
6598 			    zone_type <= ZONE_NORMAL)
6599 				node_set_state(nid, N_NORMAL_MEMORY);
6600 			break;
6601 		}
6602 	}
6603 }
6604 
6605 /**
6606  * free_area_init_nodes - Initialise all pg_data_t and zone data
6607  * @max_zone_pfn: an array of max PFNs for each zone
6608  *
6609  * This will call free_area_init_node() for each active node in the system.
6610  * Using the page ranges provided by memblock_set_node(), the size of each
6611  * zone in each node and their holes is calculated. If the maximum PFN
6612  * between two adjacent zones match, it is assumed that the zone is empty.
6613  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
6614  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
6615  * starts where the previous one ended. For example, ZONE_DMA32 starts
6616  * at arch_max_dma_pfn.
6617  */
6618 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6619 {
6620 	unsigned long start_pfn, end_pfn;
6621 	int i, nid;
6622 
6623 	/* Record where the zone boundaries are */
6624 	memset(arch_zone_lowest_possible_pfn, 0,
6625 				sizeof(arch_zone_lowest_possible_pfn));
6626 	memset(arch_zone_highest_possible_pfn, 0,
6627 				sizeof(arch_zone_highest_possible_pfn));
6628 
6629 	start_pfn = find_min_pfn_with_active_regions();
6630 
6631 	for (i = 0; i < MAX_NR_ZONES; i++) {
6632 		if (i == ZONE_MOVABLE)
6633 			continue;
6634 
6635 		end_pfn = max(max_zone_pfn[i], start_pfn);
6636 		arch_zone_lowest_possible_pfn[i] = start_pfn;
6637 		arch_zone_highest_possible_pfn[i] = end_pfn;
6638 
6639 		start_pfn = end_pfn;
6640 	}
6641 
6642 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
6643 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
6644 	find_zone_movable_pfns_for_nodes();
6645 
6646 	/* Print out the zone ranges */
6647 	pr_info("Zone ranges:\n");
6648 	for (i = 0; i < MAX_NR_ZONES; i++) {
6649 		if (i == ZONE_MOVABLE)
6650 			continue;
6651 		pr_info("  %-8s ", zone_names[i]);
6652 		if (arch_zone_lowest_possible_pfn[i] ==
6653 				arch_zone_highest_possible_pfn[i])
6654 			pr_cont("empty\n");
6655 		else
6656 			pr_cont("[mem %#018Lx-%#018Lx]\n",
6657 				(u64)arch_zone_lowest_possible_pfn[i]
6658 					<< PAGE_SHIFT,
6659 				((u64)arch_zone_highest_possible_pfn[i]
6660 					<< PAGE_SHIFT) - 1);
6661 	}
6662 
6663 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
6664 	pr_info("Movable zone start for each node\n");
6665 	for (i = 0; i < MAX_NUMNODES; i++) {
6666 		if (zone_movable_pfn[i])
6667 			pr_info("  Node %d: %#018Lx\n", i,
6668 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
6669 	}
6670 
6671 	/* Print out the early node map */
6672 	pr_info("Early memory node ranges\n");
6673 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
6674 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6675 			(u64)start_pfn << PAGE_SHIFT,
6676 			((u64)end_pfn << PAGE_SHIFT) - 1);
6677 
6678 	/* Initialise every node */
6679 	mminit_verify_pageflags_layout();
6680 	setup_nr_node_ids();
6681 	for_each_online_node(nid) {
6682 		pg_data_t *pgdat = NODE_DATA(nid);
6683 		free_area_init_node(nid, NULL,
6684 				find_min_pfn_for_node(nid), NULL);
6685 
6686 		/* Any memory on that node */
6687 		if (pgdat->node_present_pages)
6688 			node_set_state(nid, N_MEMORY);
6689 		check_for_memory(pgdat, nid);
6690 	}
6691 }
6692 
6693 static int __init cmdline_parse_core(char *p, unsigned long *core)
6694 {
6695 	unsigned long long coremem;
6696 	if (!p)
6697 		return -EINVAL;
6698 
6699 	coremem = memparse(p, &p);
6700 	*core = coremem >> PAGE_SHIFT;
6701 
6702 	/* Paranoid check that UL is enough for the coremem value */
6703 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6704 
6705 	return 0;
6706 }
6707 
6708 /*
6709  * kernelcore=size sets the amount of memory for use for allocations that
6710  * cannot be reclaimed or migrated.
6711  */
6712 static int __init cmdline_parse_kernelcore(char *p)
6713 {
6714 	/* parse kernelcore=mirror */
6715 	if (parse_option_str(p, "mirror")) {
6716 		mirrored_kernelcore = true;
6717 		return 0;
6718 	}
6719 
6720 	return cmdline_parse_core(p, &required_kernelcore);
6721 }
6722 
6723 /*
6724  * movablecore=size sets the amount of memory for use for allocations that
6725  * can be reclaimed or migrated.
6726  */
6727 static int __init cmdline_parse_movablecore(char *p)
6728 {
6729 	return cmdline_parse_core(p, &required_movablecore);
6730 }
6731 
6732 early_param("kernelcore", cmdline_parse_kernelcore);
6733 early_param("movablecore", cmdline_parse_movablecore);
6734 
6735 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6736 
6737 void adjust_managed_page_count(struct page *page, long count)
6738 {
6739 	spin_lock(&managed_page_count_lock);
6740 	page_zone(page)->managed_pages += count;
6741 	totalram_pages += count;
6742 #ifdef CONFIG_HIGHMEM
6743 	if (PageHighMem(page))
6744 		totalhigh_pages += count;
6745 #endif
6746 	spin_unlock(&managed_page_count_lock);
6747 }
6748 EXPORT_SYMBOL(adjust_managed_page_count);
6749 
6750 unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
6751 {
6752 	void *pos;
6753 	unsigned long pages = 0;
6754 
6755 	start = (void *)PAGE_ALIGN((unsigned long)start);
6756 	end = (void *)((unsigned long)end & PAGE_MASK);
6757 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
6758 		if ((unsigned int)poison <= 0xFF)
6759 			memset(pos, poison, PAGE_SIZE);
6760 		free_reserved_page(virt_to_page(pos));
6761 	}
6762 
6763 	if (pages && s)
6764 		pr_info("Freeing %s memory: %ldK\n",
6765 			s, pages << (PAGE_SHIFT - 10));
6766 
6767 	return pages;
6768 }
6769 EXPORT_SYMBOL(free_reserved_area);
6770 
6771 #ifdef	CONFIG_HIGHMEM
6772 void free_highmem_page(struct page *page)
6773 {
6774 	__free_reserved_page(page);
6775 	totalram_pages++;
6776 	page_zone(page)->managed_pages++;
6777 	totalhigh_pages++;
6778 }
6779 #endif
6780 
6781 
6782 void __init mem_init_print_info(const char *str)
6783 {
6784 	unsigned long physpages, codesize, datasize, rosize, bss_size;
6785 	unsigned long init_code_size, init_data_size;
6786 
6787 	physpages = get_num_physpages();
6788 	codesize = _etext - _stext;
6789 	datasize = _edata - _sdata;
6790 	rosize = __end_rodata - __start_rodata;
6791 	bss_size = __bss_stop - __bss_start;
6792 	init_data_size = __init_end - __init_begin;
6793 	init_code_size = _einittext - _sinittext;
6794 
6795 	/*
6796 	 * Detect special cases and adjust section sizes accordingly:
6797 	 * 1) .init.* may be embedded into .data sections
6798 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
6799 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
6800 	 * 3) .rodata.* may be embedded into .text or .data sections.
6801 	 */
6802 #define adj_init_size(start, end, size, pos, adj) \
6803 	do { \
6804 		if (start <= pos && pos < end && size > adj) \
6805 			size -= adj; \
6806 	} while (0)
6807 
6808 	adj_init_size(__init_begin, __init_end, init_data_size,
6809 		     _sinittext, init_code_size);
6810 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6811 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6812 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6813 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6814 
6815 #undef	adj_init_size
6816 
6817 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
6818 #ifdef	CONFIG_HIGHMEM
6819 		", %luK highmem"
6820 #endif
6821 		"%s%s)\n",
6822 		nr_free_pages() << (PAGE_SHIFT - 10),
6823 		physpages << (PAGE_SHIFT - 10),
6824 		codesize >> 10, datasize >> 10, rosize >> 10,
6825 		(init_data_size + init_code_size) >> 10, bss_size >> 10,
6826 		(physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6827 		totalcma_pages << (PAGE_SHIFT - 10),
6828 #ifdef	CONFIG_HIGHMEM
6829 		totalhigh_pages << (PAGE_SHIFT - 10),
6830 #endif
6831 		str ? ", " : "", str ? str : "");
6832 }
6833 
6834 /**
6835  * set_dma_reserve - set the specified number of pages reserved in the first zone
6836  * @new_dma_reserve: The number of pages to mark reserved
6837  *
6838  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
6839  * In the DMA zone, a significant percentage may be consumed by kernel image
6840  * and other unfreeable allocations which can skew the watermarks badly. This
6841  * function may optionally be used to account for unfreeable pages in the
6842  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6843  * smaller per-cpu batchsize.
6844  */
6845 void __init set_dma_reserve(unsigned long new_dma_reserve)
6846 {
6847 	dma_reserve = new_dma_reserve;
6848 }
6849 
6850 void __init free_area_init(unsigned long *zones_size)
6851 {
6852 	free_area_init_node(0, zones_size,
6853 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6854 }
6855 
6856 static int page_alloc_cpu_dead(unsigned int cpu)
6857 {
6858 
6859 	lru_add_drain_cpu(cpu);
6860 	drain_pages(cpu);
6861 
6862 	/*
6863 	 * Spill the event counters of the dead processor
6864 	 * into the current processors event counters.
6865 	 * This artificially elevates the count of the current
6866 	 * processor.
6867 	 */
6868 	vm_events_fold_cpu(cpu);
6869 
6870 	/*
6871 	 * Zero the differential counters of the dead processor
6872 	 * so that the vm statistics are consistent.
6873 	 *
6874 	 * This is only okay since the processor is dead and cannot
6875 	 * race with what we are doing.
6876 	 */
6877 	cpu_vm_stats_fold(cpu);
6878 	return 0;
6879 }
6880 
6881 void __init page_alloc_init(void)
6882 {
6883 	int ret;
6884 
6885 	ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
6886 					"mm/page_alloc:dead", NULL,
6887 					page_alloc_cpu_dead);
6888 	WARN_ON(ret < 0);
6889 }
6890 
6891 /*
6892  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
6893  *	or min_free_kbytes changes.
6894  */
6895 static void calculate_totalreserve_pages(void)
6896 {
6897 	struct pglist_data *pgdat;
6898 	unsigned long reserve_pages = 0;
6899 	enum zone_type i, j;
6900 
6901 	for_each_online_pgdat(pgdat) {
6902 
6903 		pgdat->totalreserve_pages = 0;
6904 
6905 		for (i = 0; i < MAX_NR_ZONES; i++) {
6906 			struct zone *zone = pgdat->node_zones + i;
6907 			long max = 0;
6908 
6909 			/* Find valid and maximum lowmem_reserve in the zone */
6910 			for (j = i; j < MAX_NR_ZONES; j++) {
6911 				if (zone->lowmem_reserve[j] > max)
6912 					max = zone->lowmem_reserve[j];
6913 			}
6914 
6915 			/* we treat the high watermark as reserved pages. */
6916 			max += high_wmark_pages(zone);
6917 
6918 			if (max > zone->managed_pages)
6919 				max = zone->managed_pages;
6920 
6921 			pgdat->totalreserve_pages += max;
6922 
6923 			reserve_pages += max;
6924 		}
6925 	}
6926 	totalreserve_pages = reserve_pages;
6927 }
6928 
6929 /*
6930  * setup_per_zone_lowmem_reserve - called whenever
6931  *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
6932  *	has a correct pages reserved value, so an adequate number of
6933  *	pages are left in the zone after a successful __alloc_pages().
6934  */
6935 static void setup_per_zone_lowmem_reserve(void)
6936 {
6937 	struct pglist_data *pgdat;
6938 	enum zone_type j, idx;
6939 
6940 	for_each_online_pgdat(pgdat) {
6941 		for (j = 0; j < MAX_NR_ZONES; j++) {
6942 			struct zone *zone = pgdat->node_zones + j;
6943 			unsigned long managed_pages = zone->managed_pages;
6944 
6945 			zone->lowmem_reserve[j] = 0;
6946 
6947 			idx = j;
6948 			while (idx) {
6949 				struct zone *lower_zone;
6950 
6951 				idx--;
6952 
6953 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
6954 					sysctl_lowmem_reserve_ratio[idx] = 1;
6955 
6956 				lower_zone = pgdat->node_zones + idx;
6957 				lower_zone->lowmem_reserve[j] = managed_pages /
6958 					sysctl_lowmem_reserve_ratio[idx];
6959 				managed_pages += lower_zone->managed_pages;
6960 			}
6961 		}
6962 	}
6963 
6964 	/* update totalreserve_pages */
6965 	calculate_totalreserve_pages();
6966 }
6967 
6968 static void __setup_per_zone_wmarks(void)
6969 {
6970 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6971 	unsigned long lowmem_pages = 0;
6972 	struct zone *zone;
6973 	unsigned long flags;
6974 
6975 	/* Calculate total number of !ZONE_HIGHMEM pages */
6976 	for_each_zone(zone) {
6977 		if (!is_highmem(zone))
6978 			lowmem_pages += zone->managed_pages;
6979 	}
6980 
6981 	for_each_zone(zone) {
6982 		u64 tmp;
6983 
6984 		spin_lock_irqsave(&zone->lock, flags);
6985 		tmp = (u64)pages_min * zone->managed_pages;
6986 		do_div(tmp, lowmem_pages);
6987 		if (is_highmem(zone)) {
6988 			/*
6989 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6990 			 * need highmem pages, so cap pages_min to a small
6991 			 * value here.
6992 			 *
6993 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
6994 			 * deltas control asynch page reclaim, and so should
6995 			 * not be capped for highmem.
6996 			 */
6997 			unsigned long min_pages;
6998 
6999 			min_pages = zone->managed_pages / 1024;
7000 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7001 			zone->watermark[WMARK_MIN] = min_pages;
7002 		} else {
7003 			/*
7004 			 * If it's a lowmem zone, reserve a number of pages
7005 			 * proportionate to the zone's size.
7006 			 */
7007 			zone->watermark[WMARK_MIN] = tmp;
7008 		}
7009 
7010 		/*
7011 		 * Set the kswapd watermarks distance according to the
7012 		 * scale factor in proportion to available memory, but
7013 		 * ensure a minimum size on small systems.
7014 		 */
7015 		tmp = max_t(u64, tmp >> 2,
7016 			    mult_frac(zone->managed_pages,
7017 				      watermark_scale_factor, 10000));
7018 
7019 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
7020 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7021 
7022 		spin_unlock_irqrestore(&zone->lock, flags);
7023 	}
7024 
7025 	/* update totalreserve_pages */
7026 	calculate_totalreserve_pages();
7027 }
7028 
7029 /**
7030  * setup_per_zone_wmarks - called when min_free_kbytes changes
7031  * or when memory is hot-{added|removed}
7032  *
7033  * Ensures that the watermark[min,low,high] values for each zone are set
7034  * correctly with respect to min_free_kbytes.
7035  */
7036 void setup_per_zone_wmarks(void)
7037 {
7038 	mutex_lock(&zonelists_mutex);
7039 	__setup_per_zone_wmarks();
7040 	mutex_unlock(&zonelists_mutex);
7041 }
7042 
7043 /*
7044  * Initialise min_free_kbytes.
7045  *
7046  * For small machines we want it small (128k min).  For large machines
7047  * we want it large (64MB max).  But it is not linear, because network
7048  * bandwidth does not increase linearly with machine size.  We use
7049  *
7050  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
7051  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
7052  *
7053  * which yields
7054  *
7055  * 16MB:	512k
7056  * 32MB:	724k
7057  * 64MB:	1024k
7058  * 128MB:	1448k
7059  * 256MB:	2048k
7060  * 512MB:	2896k
7061  * 1024MB:	4096k
7062  * 2048MB:	5792k
7063  * 4096MB:	8192k
7064  * 8192MB:	11584k
7065  * 16384MB:	16384k
7066  */
7067 int __meminit init_per_zone_wmark_min(void)
7068 {
7069 	unsigned long lowmem_kbytes;
7070 	int new_min_free_kbytes;
7071 
7072 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
7073 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
7074 
7075 	if (new_min_free_kbytes > user_min_free_kbytes) {
7076 		min_free_kbytes = new_min_free_kbytes;
7077 		if (min_free_kbytes < 128)
7078 			min_free_kbytes = 128;
7079 		if (min_free_kbytes > 65536)
7080 			min_free_kbytes = 65536;
7081 	} else {
7082 		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7083 				new_min_free_kbytes, user_min_free_kbytes);
7084 	}
7085 	setup_per_zone_wmarks();
7086 	refresh_zone_stat_thresholds();
7087 	setup_per_zone_lowmem_reserve();
7088 
7089 #ifdef CONFIG_NUMA
7090 	setup_min_unmapped_ratio();
7091 	setup_min_slab_ratio();
7092 #endif
7093 
7094 	return 0;
7095 }
7096 core_initcall(init_per_zone_wmark_min)
7097 
7098 /*
7099  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
7100  *	that we can call two helper functions whenever min_free_kbytes
7101  *	changes.
7102  */
7103 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
7104 	void __user *buffer, size_t *length, loff_t *ppos)
7105 {
7106 	int rc;
7107 
7108 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7109 	if (rc)
7110 		return rc;
7111 
7112 	if (write) {
7113 		user_min_free_kbytes = min_free_kbytes;
7114 		setup_per_zone_wmarks();
7115 	}
7116 	return 0;
7117 }
7118 
7119 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
7120 	void __user *buffer, size_t *length, loff_t *ppos)
7121 {
7122 	int rc;
7123 
7124 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7125 	if (rc)
7126 		return rc;
7127 
7128 	if (write)
7129 		setup_per_zone_wmarks();
7130 
7131 	return 0;
7132 }
7133 
7134 #ifdef CONFIG_NUMA
7135 static void setup_min_unmapped_ratio(void)
7136 {
7137 	pg_data_t *pgdat;
7138 	struct zone *zone;
7139 
7140 	for_each_online_pgdat(pgdat)
7141 		pgdat->min_unmapped_pages = 0;
7142 
7143 	for_each_zone(zone)
7144 		zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
7145 				sysctl_min_unmapped_ratio) / 100;
7146 }
7147 
7148 
7149 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
7150 	void __user *buffer, size_t *length, loff_t *ppos)
7151 {
7152 	int rc;
7153 
7154 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7155 	if (rc)
7156 		return rc;
7157 
7158 	setup_min_unmapped_ratio();
7159 
7160 	return 0;
7161 }
7162 
7163 static void setup_min_slab_ratio(void)
7164 {
7165 	pg_data_t *pgdat;
7166 	struct zone *zone;
7167 
7168 	for_each_online_pgdat(pgdat)
7169 		pgdat->min_slab_pages = 0;
7170 
7171 	for_each_zone(zone)
7172 		zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
7173 				sysctl_min_slab_ratio) / 100;
7174 }
7175 
7176 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7177 	void __user *buffer, size_t *length, loff_t *ppos)
7178 {
7179 	int rc;
7180 
7181 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7182 	if (rc)
7183 		return rc;
7184 
7185 	setup_min_slab_ratio();
7186 
7187 	return 0;
7188 }
7189 #endif
7190 
7191 /*
7192  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
7193  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
7194  *	whenever sysctl_lowmem_reserve_ratio changes.
7195  *
7196  * The reserve ratio obviously has absolutely no relation with the
7197  * minimum watermarks. The lowmem reserve ratio can only make sense
7198  * if in function of the boot time zone sizes.
7199  */
7200 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
7201 	void __user *buffer, size_t *length, loff_t *ppos)
7202 {
7203 	proc_dointvec_minmax(table, write, buffer, length, ppos);
7204 	setup_per_zone_lowmem_reserve();
7205 	return 0;
7206 }
7207 
7208 /*
7209  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
7210  * cpu.  It is the fraction of total pages in each zone that a hot per cpu
7211  * pagelist can have before it gets flushed back to buddy allocator.
7212  */
7213 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
7214 	void __user *buffer, size_t *length, loff_t *ppos)
7215 {
7216 	struct zone *zone;
7217 	int old_percpu_pagelist_fraction;
7218 	int ret;
7219 
7220 	mutex_lock(&pcp_batch_high_lock);
7221 	old_percpu_pagelist_fraction = percpu_pagelist_fraction;
7222 
7223 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7224 	if (!write || ret < 0)
7225 		goto out;
7226 
7227 	/* Sanity checking to avoid pcp imbalance */
7228 	if (percpu_pagelist_fraction &&
7229 	    percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
7230 		percpu_pagelist_fraction = old_percpu_pagelist_fraction;
7231 		ret = -EINVAL;
7232 		goto out;
7233 	}
7234 
7235 	/* No change? */
7236 	if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
7237 		goto out;
7238 
7239 	for_each_populated_zone(zone) {
7240 		unsigned int cpu;
7241 
7242 		for_each_possible_cpu(cpu)
7243 			pageset_set_high_and_batch(zone,
7244 					per_cpu_ptr(zone->pageset, cpu));
7245 	}
7246 out:
7247 	mutex_unlock(&pcp_batch_high_lock);
7248 	return ret;
7249 }
7250 
7251 #ifdef CONFIG_NUMA
7252 int hashdist = HASHDIST_DEFAULT;
7253 
7254 static int __init set_hashdist(char *str)
7255 {
7256 	if (!str)
7257 		return 0;
7258 	hashdist = simple_strtoul(str, &str, 0);
7259 	return 1;
7260 }
7261 __setup("hashdist=", set_hashdist);
7262 #endif
7263 
7264 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7265 /*
7266  * Returns the number of pages that arch has reserved but
7267  * is not known to alloc_large_system_hash().
7268  */
7269 static unsigned long __init arch_reserved_kernel_pages(void)
7270 {
7271 	return 0;
7272 }
7273 #endif
7274 
7275 /*
7276  * Adaptive scale is meant to reduce sizes of hash tables on large memory
7277  * machines. As memory size is increased the scale is also increased but at
7278  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
7279  * quadruples the scale is increased by one, which means the size of hash table
7280  * only doubles, instead of quadrupling as well.
7281  * Because 32-bit systems cannot have large physical memory, where this scaling
7282  * makes sense, it is disabled on such platforms.
7283  */
7284 #if __BITS_PER_LONG > 32
7285 #define ADAPT_SCALE_BASE	(64ul << 30)
7286 #define ADAPT_SCALE_SHIFT	2
7287 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
7288 #endif
7289 
7290 /*
7291  * allocate a large system hash table from bootmem
7292  * - it is assumed that the hash table must contain an exact power-of-2
7293  *   quantity of entries
7294  * - limit is the number of hash buckets, not the total allocation size
7295  */
7296 void *__init alloc_large_system_hash(const char *tablename,
7297 				     unsigned long bucketsize,
7298 				     unsigned long numentries,
7299 				     int scale,
7300 				     int flags,
7301 				     unsigned int *_hash_shift,
7302 				     unsigned int *_hash_mask,
7303 				     unsigned long low_limit,
7304 				     unsigned long high_limit)
7305 {
7306 	unsigned long long max = high_limit;
7307 	unsigned long log2qty, size;
7308 	void *table = NULL;
7309 	gfp_t gfp_flags;
7310 
7311 	/* allow the kernel cmdline to have a say */
7312 	if (!numentries) {
7313 		/* round applicable memory size up to nearest megabyte */
7314 		numentries = nr_kernel_pages;
7315 		numentries -= arch_reserved_kernel_pages();
7316 
7317 		/* It isn't necessary when PAGE_SIZE >= 1MB */
7318 		if (PAGE_SHIFT < 20)
7319 			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
7320 
7321 #if __BITS_PER_LONG > 32
7322 		if (!high_limit) {
7323 			unsigned long adapt;
7324 
7325 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
7326 			     adapt <<= ADAPT_SCALE_SHIFT)
7327 				scale++;
7328 		}
7329 #endif
7330 
7331 		/* limit to 1 bucket per 2^scale bytes of low memory */
7332 		if (scale > PAGE_SHIFT)
7333 			numentries >>= (scale - PAGE_SHIFT);
7334 		else
7335 			numentries <<= (PAGE_SHIFT - scale);
7336 
7337 		/* Make sure we've got at least a 0-order allocation.. */
7338 		if (unlikely(flags & HASH_SMALL)) {
7339 			/* Makes no sense without HASH_EARLY */
7340 			WARN_ON(!(flags & HASH_EARLY));
7341 			if (!(numentries >> *_hash_shift)) {
7342 				numentries = 1UL << *_hash_shift;
7343 				BUG_ON(!numentries);
7344 			}
7345 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
7346 			numentries = PAGE_SIZE / bucketsize;
7347 	}
7348 	numentries = roundup_pow_of_two(numentries);
7349 
7350 	/* limit allocation size to 1/16 total memory by default */
7351 	if (max == 0) {
7352 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7353 		do_div(max, bucketsize);
7354 	}
7355 	max = min(max, 0x80000000ULL);
7356 
7357 	if (numentries < low_limit)
7358 		numentries = low_limit;
7359 	if (numentries > max)
7360 		numentries = max;
7361 
7362 	log2qty = ilog2(numentries);
7363 
7364 	/*
7365 	 * memblock allocator returns zeroed memory already, so HASH_ZERO is
7366 	 * currently not used when HASH_EARLY is specified.
7367 	 */
7368 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
7369 	do {
7370 		size = bucketsize << log2qty;
7371 		if (flags & HASH_EARLY)
7372 			table = memblock_virt_alloc_nopanic(size, 0);
7373 		else if (hashdist)
7374 			table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
7375 		else {
7376 			/*
7377 			 * If bucketsize is not a power-of-two, we may free
7378 			 * some pages at the end of hash table which
7379 			 * alloc_pages_exact() automatically does
7380 			 */
7381 			if (get_order(size) < MAX_ORDER) {
7382 				table = alloc_pages_exact(size, gfp_flags);
7383 				kmemleak_alloc(table, size, 1, gfp_flags);
7384 			}
7385 		}
7386 	} while (!table && size > PAGE_SIZE && --log2qty);
7387 
7388 	if (!table)
7389 		panic("Failed to allocate %s hash table\n", tablename);
7390 
7391 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7392 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
7393 
7394 	if (_hash_shift)
7395 		*_hash_shift = log2qty;
7396 	if (_hash_mask)
7397 		*_hash_mask = (1 << log2qty) - 1;
7398 
7399 	return table;
7400 }
7401 
7402 /*
7403  * This function checks whether pageblock includes unmovable pages or not.
7404  * If @count is not zero, it is okay to include less @count unmovable pages
7405  *
7406  * PageLRU check without isolation or lru_lock could race so that
7407  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
7408  * check without lock_page also may miss some movable non-lru pages at
7409  * race condition. So you can't expect this function should be exact.
7410  */
7411 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7412 			 bool skip_hwpoisoned_pages)
7413 {
7414 	unsigned long pfn, iter, found;
7415 	int mt;
7416 
7417 	/*
7418 	 * For avoiding noise data, lru_add_drain_all() should be called
7419 	 * If ZONE_MOVABLE, the zone never contains unmovable pages
7420 	 */
7421 	if (zone_idx(zone) == ZONE_MOVABLE)
7422 		return false;
7423 	mt = get_pageblock_migratetype(page);
7424 	if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
7425 		return false;
7426 
7427 	pfn = page_to_pfn(page);
7428 	for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7429 		unsigned long check = pfn + iter;
7430 
7431 		if (!pfn_valid_within(check))
7432 			continue;
7433 
7434 		page = pfn_to_page(check);
7435 
7436 		/*
7437 		 * Hugepages are not in LRU lists, but they're movable.
7438 		 * We need not scan over tail pages bacause we don't
7439 		 * handle each tail page individually in migration.
7440 		 */
7441 		if (PageHuge(page)) {
7442 			iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7443 			continue;
7444 		}
7445 
7446 		/*
7447 		 * We can't use page_count without pin a page
7448 		 * because another CPU can free compound page.
7449 		 * This check already skips compound tails of THP
7450 		 * because their page->_refcount is zero at all time.
7451 		 */
7452 		if (!page_ref_count(page)) {
7453 			if (PageBuddy(page))
7454 				iter += (1 << page_order(page)) - 1;
7455 			continue;
7456 		}
7457 
7458 		/*
7459 		 * The HWPoisoned page may be not in buddy system, and
7460 		 * page_count() is not 0.
7461 		 */
7462 		if (skip_hwpoisoned_pages && PageHWPoison(page))
7463 			continue;
7464 
7465 		if (__PageMovable(page))
7466 			continue;
7467 
7468 		if (!PageLRU(page))
7469 			found++;
7470 		/*
7471 		 * If there are RECLAIMABLE pages, we need to check
7472 		 * it.  But now, memory offline itself doesn't call
7473 		 * shrink_node_slabs() and it still to be fixed.
7474 		 */
7475 		/*
7476 		 * If the page is not RAM, page_count()should be 0.
7477 		 * we don't need more check. This is an _used_ not-movable page.
7478 		 *
7479 		 * The problematic thing here is PG_reserved pages. PG_reserved
7480 		 * is set to both of a memory hole page and a _used_ kernel
7481 		 * page at boot.
7482 		 */
7483 		if (found > count)
7484 			return true;
7485 	}
7486 	return false;
7487 }
7488 
7489 bool is_pageblock_removable_nolock(struct page *page)
7490 {
7491 	struct zone *zone;
7492 	unsigned long pfn;
7493 
7494 	/*
7495 	 * We have to be careful here because we are iterating over memory
7496 	 * sections which are not zone aware so we might end up outside of
7497 	 * the zone but still within the section.
7498 	 * We have to take care about the node as well. If the node is offline
7499 	 * its NODE_DATA will be NULL - see page_zone.
7500 	 */
7501 	if (!node_online(page_to_nid(page)))
7502 		return false;
7503 
7504 	zone = page_zone(page);
7505 	pfn = page_to_pfn(page);
7506 	if (!zone_spans_pfn(zone, pfn))
7507 		return false;
7508 
7509 	return !has_unmovable_pages(zone, page, 0, true);
7510 }
7511 
7512 #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
7513 
7514 static unsigned long pfn_max_align_down(unsigned long pfn)
7515 {
7516 	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
7517 			     pageblock_nr_pages) - 1);
7518 }
7519 
7520 static unsigned long pfn_max_align_up(unsigned long pfn)
7521 {
7522 	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
7523 				pageblock_nr_pages));
7524 }
7525 
7526 /* [start, end) must belong to a single zone. */
7527 static int __alloc_contig_migrate_range(struct compact_control *cc,
7528 					unsigned long start, unsigned long end)
7529 {
7530 	/* This function is based on compact_zone() from compaction.c. */
7531 	unsigned long nr_reclaimed;
7532 	unsigned long pfn = start;
7533 	unsigned int tries = 0;
7534 	int ret = 0;
7535 
7536 	migrate_prep();
7537 
7538 	while (pfn < end || !list_empty(&cc->migratepages)) {
7539 		if (fatal_signal_pending(current)) {
7540 			ret = -EINTR;
7541 			break;
7542 		}
7543 
7544 		if (list_empty(&cc->migratepages)) {
7545 			cc->nr_migratepages = 0;
7546 			pfn = isolate_migratepages_range(cc, pfn, end);
7547 			if (!pfn) {
7548 				ret = -EINTR;
7549 				break;
7550 			}
7551 			tries = 0;
7552 		} else if (++tries == 5) {
7553 			ret = ret < 0 ? ret : -EBUSY;
7554 			break;
7555 		}
7556 
7557 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
7558 							&cc->migratepages);
7559 		cc->nr_migratepages -= nr_reclaimed;
7560 
7561 		ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
7562 				    NULL, 0, cc->mode, MR_CMA);
7563 	}
7564 	if (ret < 0) {
7565 		putback_movable_pages(&cc->migratepages);
7566 		return ret;
7567 	}
7568 	return 0;
7569 }
7570 
7571 /**
7572  * alloc_contig_range() -- tries to allocate given range of pages
7573  * @start:	start PFN to allocate
7574  * @end:	one-past-the-last PFN to allocate
7575  * @migratetype:	migratetype of the underlaying pageblocks (either
7576  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
7577  *			in range must have the same migratetype and it must
7578  *			be either of the two.
7579  * @gfp_mask:	GFP mask to use during compaction
7580  *
7581  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
7582  * aligned, however it's the caller's responsibility to guarantee that
7583  * we are the only thread that changes migrate type of pageblocks the
7584  * pages fall in.
7585  *
7586  * The PFN range must belong to a single zone.
7587  *
7588  * Returns zero on success or negative error code.  On success all
7589  * pages which PFN is in [start, end) are allocated for the caller and
7590  * need to be freed with free_contig_range().
7591  */
7592 int alloc_contig_range(unsigned long start, unsigned long end,
7593 		       unsigned migratetype, gfp_t gfp_mask)
7594 {
7595 	unsigned long outer_start, outer_end;
7596 	unsigned int order;
7597 	int ret = 0;
7598 
7599 	struct compact_control cc = {
7600 		.nr_migratepages = 0,
7601 		.order = -1,
7602 		.zone = page_zone(pfn_to_page(start)),
7603 		.mode = MIGRATE_SYNC,
7604 		.ignore_skip_hint = true,
7605 		.gfp_mask = current_gfp_context(gfp_mask),
7606 	};
7607 	INIT_LIST_HEAD(&cc.migratepages);
7608 
7609 	/*
7610 	 * What we do here is we mark all pageblocks in range as
7611 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
7612 	 * have different sizes, and due to the way page allocator
7613 	 * work, we align the range to biggest of the two pages so
7614 	 * that page allocator won't try to merge buddies from
7615 	 * different pageblocks and change MIGRATE_ISOLATE to some
7616 	 * other migration type.
7617 	 *
7618 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
7619 	 * migrate the pages from an unaligned range (ie. pages that
7620 	 * we are interested in).  This will put all the pages in
7621 	 * range back to page allocator as MIGRATE_ISOLATE.
7622 	 *
7623 	 * When this is done, we take the pages in range from page
7624 	 * allocator removing them from the buddy system.  This way
7625 	 * page allocator will never consider using them.
7626 	 *
7627 	 * This lets us mark the pageblocks back as
7628 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
7629 	 * aligned range but not in the unaligned, original range are
7630 	 * put back to page allocator so that buddy can use them.
7631 	 */
7632 
7633 	ret = start_isolate_page_range(pfn_max_align_down(start),
7634 				       pfn_max_align_up(end), migratetype,
7635 				       false);
7636 	if (ret)
7637 		return ret;
7638 
7639 	/*
7640 	 * In case of -EBUSY, we'd like to know which page causes problem.
7641 	 * So, just fall through. We will check it in test_pages_isolated().
7642 	 */
7643 	ret = __alloc_contig_migrate_range(&cc, start, end);
7644 	if (ret && ret != -EBUSY)
7645 		goto done;
7646 
7647 	/*
7648 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
7649 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
7650 	 * more, all pages in [start, end) are free in page allocator.
7651 	 * What we are going to do is to allocate all pages from
7652 	 * [start, end) (that is remove them from page allocator).
7653 	 *
7654 	 * The only problem is that pages at the beginning and at the
7655 	 * end of interesting range may be not aligned with pages that
7656 	 * page allocator holds, ie. they can be part of higher order
7657 	 * pages.  Because of this, we reserve the bigger range and
7658 	 * once this is done free the pages we are not interested in.
7659 	 *
7660 	 * We don't have to hold zone->lock here because the pages are
7661 	 * isolated thus they won't get removed from buddy.
7662 	 */
7663 
7664 	lru_add_drain_all();
7665 	drain_all_pages(cc.zone);
7666 
7667 	order = 0;
7668 	outer_start = start;
7669 	while (!PageBuddy(pfn_to_page(outer_start))) {
7670 		if (++order >= MAX_ORDER) {
7671 			outer_start = start;
7672 			break;
7673 		}
7674 		outer_start &= ~0UL << order;
7675 	}
7676 
7677 	if (outer_start != start) {
7678 		order = page_order(pfn_to_page(outer_start));
7679 
7680 		/*
7681 		 * outer_start page could be small order buddy page and
7682 		 * it doesn't include start page. Adjust outer_start
7683 		 * in this case to report failed page properly
7684 		 * on tracepoint in test_pages_isolated()
7685 		 */
7686 		if (outer_start + (1UL << order) <= start)
7687 			outer_start = start;
7688 	}
7689 
7690 	/* Make sure the range is really isolated. */
7691 	if (test_pages_isolated(outer_start, end, false)) {
7692 		pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
7693 			__func__, outer_start, end);
7694 		ret = -EBUSY;
7695 		goto done;
7696 	}
7697 
7698 	/* Grab isolated pages from freelists. */
7699 	outer_end = isolate_freepages_range(&cc, outer_start, end);
7700 	if (!outer_end) {
7701 		ret = -EBUSY;
7702 		goto done;
7703 	}
7704 
7705 	/* Free head and tail (if any) */
7706 	if (start != outer_start)
7707 		free_contig_range(outer_start, start - outer_start);
7708 	if (end != outer_end)
7709 		free_contig_range(end, outer_end - end);
7710 
7711 done:
7712 	undo_isolate_page_range(pfn_max_align_down(start),
7713 				pfn_max_align_up(end), migratetype);
7714 	return ret;
7715 }
7716 
7717 void free_contig_range(unsigned long pfn, unsigned nr_pages)
7718 {
7719 	unsigned int count = 0;
7720 
7721 	for (; nr_pages--; pfn++) {
7722 		struct page *page = pfn_to_page(pfn);
7723 
7724 		count += page_count(page) != 1;
7725 		__free_page(page);
7726 	}
7727 	WARN(count != 0, "%d pages are still in use!\n", count);
7728 }
7729 #endif
7730 
7731 #ifdef CONFIG_MEMORY_HOTPLUG
7732 /*
7733  * The zone indicated has a new number of managed_pages; batch sizes and percpu
7734  * page high values need to be recalulated.
7735  */
7736 void __meminit zone_pcp_update(struct zone *zone)
7737 {
7738 	unsigned cpu;
7739 	mutex_lock(&pcp_batch_high_lock);
7740 	for_each_possible_cpu(cpu)
7741 		pageset_set_high_and_batch(zone,
7742 				per_cpu_ptr(zone->pageset, cpu));
7743 	mutex_unlock(&pcp_batch_high_lock);
7744 }
7745 #endif
7746 
7747 void zone_pcp_reset(struct zone *zone)
7748 {
7749 	unsigned long flags;
7750 	int cpu;
7751 	struct per_cpu_pageset *pset;
7752 
7753 	/* avoid races with drain_pages()  */
7754 	local_irq_save(flags);
7755 	if (zone->pageset != &boot_pageset) {
7756 		for_each_online_cpu(cpu) {
7757 			pset = per_cpu_ptr(zone->pageset, cpu);
7758 			drain_zonestat(zone, pset);
7759 		}
7760 		free_percpu(zone->pageset);
7761 		zone->pageset = &boot_pageset;
7762 	}
7763 	local_irq_restore(flags);
7764 }
7765 
7766 #ifdef CONFIG_MEMORY_HOTREMOVE
7767 /*
7768  * All pages in the range must be in a single zone and isolated
7769  * before calling this.
7770  */
7771 void
7772 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7773 {
7774 	struct page *page;
7775 	struct zone *zone;
7776 	unsigned int order, i;
7777 	unsigned long pfn;
7778 	unsigned long flags;
7779 	/* find the first valid pfn */
7780 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
7781 		if (pfn_valid(pfn))
7782 			break;
7783 	if (pfn == end_pfn)
7784 		return;
7785 	offline_mem_sections(pfn, end_pfn);
7786 	zone = page_zone(pfn_to_page(pfn));
7787 	spin_lock_irqsave(&zone->lock, flags);
7788 	pfn = start_pfn;
7789 	while (pfn < end_pfn) {
7790 		if (!pfn_valid(pfn)) {
7791 			pfn++;
7792 			continue;
7793 		}
7794 		page = pfn_to_page(pfn);
7795 		/*
7796 		 * The HWPoisoned page may be not in buddy system, and
7797 		 * page_count() is not 0.
7798 		 */
7799 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7800 			pfn++;
7801 			SetPageReserved(page);
7802 			continue;
7803 		}
7804 
7805 		BUG_ON(page_count(page));
7806 		BUG_ON(!PageBuddy(page));
7807 		order = page_order(page);
7808 #ifdef CONFIG_DEBUG_VM
7809 		pr_info("remove from free list %lx %d %lx\n",
7810 			pfn, 1 << order, end_pfn);
7811 #endif
7812 		list_del(&page->lru);
7813 		rmv_page_order(page);
7814 		zone->free_area[order].nr_free--;
7815 		for (i = 0; i < (1 << order); i++)
7816 			SetPageReserved((page+i));
7817 		pfn += (1 << order);
7818 	}
7819 	spin_unlock_irqrestore(&zone->lock, flags);
7820 }
7821 #endif
7822 
7823 bool is_free_buddy_page(struct page *page)
7824 {
7825 	struct zone *zone = page_zone(page);
7826 	unsigned long pfn = page_to_pfn(page);
7827 	unsigned long flags;
7828 	unsigned int order;
7829 
7830 	spin_lock_irqsave(&zone->lock, flags);
7831 	for (order = 0; order < MAX_ORDER; order++) {
7832 		struct page *page_head = page - (pfn & ((1 << order) - 1));
7833 
7834 		if (PageBuddy(page_head) && page_order(page_head) >= order)
7835 			break;
7836 	}
7837 	spin_unlock_irqrestore(&zone->lock, flags);
7838 
7839 	return order < MAX_ORDER;
7840 }
7841