xref: /openbmc/linux/mm/page_alloc.c (revision 6189f1b0)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/compiler.h>
26 #include <linux/kernel.h>
27 #include <linux/kmemcheck.h>
28 #include <linux/kasan.h>
29 #include <linux/module.h>
30 #include <linux/suspend.h>
31 #include <linux/pagevec.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/ratelimit.h>
35 #include <linux/oom.h>
36 #include <linux/notifier.h>
37 #include <linux/topology.h>
38 #include <linux/sysctl.h>
39 #include <linux/cpu.h>
40 #include <linux/cpuset.h>
41 #include <linux/memory_hotplug.h>
42 #include <linux/nodemask.h>
43 #include <linux/vmalloc.h>
44 #include <linux/vmstat.h>
45 #include <linux/mempolicy.h>
46 #include <linux/stop_machine.h>
47 #include <linux/sort.h>
48 #include <linux/pfn.h>
49 #include <linux/backing-dev.h>
50 #include <linux/fault-inject.h>
51 #include <linux/page-isolation.h>
52 #include <linux/page_ext.h>
53 #include <linux/debugobjects.h>
54 #include <linux/kmemleak.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <linux/prefetch.h>
58 #include <linux/mm_inline.h>
59 #include <linux/migrate.h>
60 #include <linux/page_ext.h>
61 #include <linux/hugetlb.h>
62 #include <linux/sched/rt.h>
63 #include <linux/page_owner.h>
64 #include <linux/kthread.h>
65 
66 #include <asm/sections.h>
67 #include <asm/tlbflush.h>
68 #include <asm/div64.h>
69 #include "internal.h"
70 
71 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
72 static DEFINE_MUTEX(pcp_batch_high_lock);
73 #define MIN_PERCPU_PAGELIST_FRACTION	(8)
74 
75 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
76 DEFINE_PER_CPU(int, numa_node);
77 EXPORT_PER_CPU_SYMBOL(numa_node);
78 #endif
79 
80 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
81 /*
82  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
83  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
84  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
85  * defined in <linux/topology.h>.
86  */
87 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
88 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
89 int _node_numa_mem_[MAX_NUMNODES];
90 #endif
91 
92 /*
93  * Array of node states.
94  */
95 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
96 	[N_POSSIBLE] = NODE_MASK_ALL,
97 	[N_ONLINE] = { { [0] = 1UL } },
98 #ifndef CONFIG_NUMA
99 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
100 #ifdef CONFIG_HIGHMEM
101 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
102 #endif
103 #ifdef CONFIG_MOVABLE_NODE
104 	[N_MEMORY] = { { [0] = 1UL } },
105 #endif
106 	[N_CPU] = { { [0] = 1UL } },
107 #endif	/* NUMA */
108 };
109 EXPORT_SYMBOL(node_states);
110 
111 /* Protect totalram_pages and zone->managed_pages */
112 static DEFINE_SPINLOCK(managed_page_count_lock);
113 
114 unsigned long totalram_pages __read_mostly;
115 unsigned long totalreserve_pages __read_mostly;
116 unsigned long totalcma_pages __read_mostly;
117 /*
118  * When calculating the number of globally allowed dirty pages, there
119  * is a certain number of per-zone reserves that should not be
120  * considered dirtyable memory.  This is the sum of those reserves
121  * over all existing zones that contribute dirtyable memory.
122  */
123 unsigned long dirty_balance_reserve __read_mostly;
124 
125 int percpu_pagelist_fraction;
126 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
127 
128 #ifdef CONFIG_PM_SLEEP
129 /*
130  * The following functions are used by the suspend/hibernate code to temporarily
131  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
132  * while devices are suspended.  To avoid races with the suspend/hibernate code,
133  * they should always be called with pm_mutex held (gfp_allowed_mask also should
134  * only be modified with pm_mutex held, unless the suspend/hibernate code is
135  * guaranteed not to run in parallel with that modification).
136  */
137 
138 static gfp_t saved_gfp_mask;
139 
140 void pm_restore_gfp_mask(void)
141 {
142 	WARN_ON(!mutex_is_locked(&pm_mutex));
143 	if (saved_gfp_mask) {
144 		gfp_allowed_mask = saved_gfp_mask;
145 		saved_gfp_mask = 0;
146 	}
147 }
148 
149 void pm_restrict_gfp_mask(void)
150 {
151 	WARN_ON(!mutex_is_locked(&pm_mutex));
152 	WARN_ON(saved_gfp_mask);
153 	saved_gfp_mask = gfp_allowed_mask;
154 	gfp_allowed_mask &= ~GFP_IOFS;
155 }
156 
157 bool pm_suspended_storage(void)
158 {
159 	if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
160 		return false;
161 	return true;
162 }
163 #endif /* CONFIG_PM_SLEEP */
164 
165 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
166 int pageblock_order __read_mostly;
167 #endif
168 
169 static void __free_pages_ok(struct page *page, unsigned int order);
170 
171 /*
172  * results with 256, 32 in the lowmem_reserve sysctl:
173  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
174  *	1G machine -> (16M dma, 784M normal, 224M high)
175  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
176  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
177  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
178  *
179  * TBD: should special case ZONE_DMA32 machines here - in those we normally
180  * don't need any ZONE_NORMAL reservation
181  */
182 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
183 #ifdef CONFIG_ZONE_DMA
184 	 256,
185 #endif
186 #ifdef CONFIG_ZONE_DMA32
187 	 256,
188 #endif
189 #ifdef CONFIG_HIGHMEM
190 	 32,
191 #endif
192 	 32,
193 };
194 
195 EXPORT_SYMBOL(totalram_pages);
196 
197 static char * const zone_names[MAX_NR_ZONES] = {
198 #ifdef CONFIG_ZONE_DMA
199 	 "DMA",
200 #endif
201 #ifdef CONFIG_ZONE_DMA32
202 	 "DMA32",
203 #endif
204 	 "Normal",
205 #ifdef CONFIG_HIGHMEM
206 	 "HighMem",
207 #endif
208 	 "Movable",
209 };
210 
211 int min_free_kbytes = 1024;
212 int user_min_free_kbytes = -1;
213 
214 static unsigned long __meminitdata nr_kernel_pages;
215 static unsigned long __meminitdata nr_all_pages;
216 static unsigned long __meminitdata dma_reserve;
217 
218 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
219 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
220 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
221 static unsigned long __initdata required_kernelcore;
222 static unsigned long __initdata required_movablecore;
223 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
224 
225 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
226 int movable_zone;
227 EXPORT_SYMBOL(movable_zone);
228 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
229 
230 #if MAX_NUMNODES > 1
231 int nr_node_ids __read_mostly = MAX_NUMNODES;
232 int nr_online_nodes __read_mostly = 1;
233 EXPORT_SYMBOL(nr_node_ids);
234 EXPORT_SYMBOL(nr_online_nodes);
235 #endif
236 
237 int page_group_by_mobility_disabled __read_mostly;
238 
239 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
240 static inline void reset_deferred_meminit(pg_data_t *pgdat)
241 {
242 	pgdat->first_deferred_pfn = ULONG_MAX;
243 }
244 
245 /* Returns true if the struct page for the pfn is uninitialised */
246 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
247 {
248 	if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
249 		return true;
250 
251 	return false;
252 }
253 
254 static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
255 {
256 	if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
257 		return true;
258 
259 	return false;
260 }
261 
262 /*
263  * Returns false when the remaining initialisation should be deferred until
264  * later in the boot cycle when it can be parallelised.
265  */
266 static inline bool update_defer_init(pg_data_t *pgdat,
267 				unsigned long pfn, unsigned long zone_end,
268 				unsigned long *nr_initialised)
269 {
270 	/* Always populate low zones for address-contrained allocations */
271 	if (zone_end < pgdat_end_pfn(pgdat))
272 		return true;
273 
274 	/* Initialise at least 2G of the highest zone */
275 	(*nr_initialised)++;
276 	if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
277 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
278 		pgdat->first_deferred_pfn = pfn;
279 		return false;
280 	}
281 
282 	return true;
283 }
284 #else
285 static inline void reset_deferred_meminit(pg_data_t *pgdat)
286 {
287 }
288 
289 static inline bool early_page_uninitialised(unsigned long pfn)
290 {
291 	return false;
292 }
293 
294 static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
295 {
296 	return false;
297 }
298 
299 static inline bool update_defer_init(pg_data_t *pgdat,
300 				unsigned long pfn, unsigned long zone_end,
301 				unsigned long *nr_initialised)
302 {
303 	return true;
304 }
305 #endif
306 
307 
308 void set_pageblock_migratetype(struct page *page, int migratetype)
309 {
310 	if (unlikely(page_group_by_mobility_disabled &&
311 		     migratetype < MIGRATE_PCPTYPES))
312 		migratetype = MIGRATE_UNMOVABLE;
313 
314 	set_pageblock_flags_group(page, (unsigned long)migratetype,
315 					PB_migrate, PB_migrate_end);
316 }
317 
318 #ifdef CONFIG_DEBUG_VM
319 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
320 {
321 	int ret = 0;
322 	unsigned seq;
323 	unsigned long pfn = page_to_pfn(page);
324 	unsigned long sp, start_pfn;
325 
326 	do {
327 		seq = zone_span_seqbegin(zone);
328 		start_pfn = zone->zone_start_pfn;
329 		sp = zone->spanned_pages;
330 		if (!zone_spans_pfn(zone, pfn))
331 			ret = 1;
332 	} while (zone_span_seqretry(zone, seq));
333 
334 	if (ret)
335 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
336 			pfn, zone_to_nid(zone), zone->name,
337 			start_pfn, start_pfn + sp);
338 
339 	return ret;
340 }
341 
342 static int page_is_consistent(struct zone *zone, struct page *page)
343 {
344 	if (!pfn_valid_within(page_to_pfn(page)))
345 		return 0;
346 	if (zone != page_zone(page))
347 		return 0;
348 
349 	return 1;
350 }
351 /*
352  * Temporary debugging check for pages not lying within a given zone.
353  */
354 static int bad_range(struct zone *zone, struct page *page)
355 {
356 	if (page_outside_zone_boundaries(zone, page))
357 		return 1;
358 	if (!page_is_consistent(zone, page))
359 		return 1;
360 
361 	return 0;
362 }
363 #else
364 static inline int bad_range(struct zone *zone, struct page *page)
365 {
366 	return 0;
367 }
368 #endif
369 
370 static void bad_page(struct page *page, const char *reason,
371 		unsigned long bad_flags)
372 {
373 	static unsigned long resume;
374 	static unsigned long nr_shown;
375 	static unsigned long nr_unshown;
376 
377 	/* Don't complain about poisoned pages */
378 	if (PageHWPoison(page)) {
379 		page_mapcount_reset(page); /* remove PageBuddy */
380 		return;
381 	}
382 
383 	/*
384 	 * Allow a burst of 60 reports, then keep quiet for that minute;
385 	 * or allow a steady drip of one report per second.
386 	 */
387 	if (nr_shown == 60) {
388 		if (time_before(jiffies, resume)) {
389 			nr_unshown++;
390 			goto out;
391 		}
392 		if (nr_unshown) {
393 			printk(KERN_ALERT
394 			      "BUG: Bad page state: %lu messages suppressed\n",
395 				nr_unshown);
396 			nr_unshown = 0;
397 		}
398 		nr_shown = 0;
399 	}
400 	if (nr_shown++ == 0)
401 		resume = jiffies + 60 * HZ;
402 
403 	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
404 		current->comm, page_to_pfn(page));
405 	dump_page_badflags(page, reason, bad_flags);
406 
407 	print_modules();
408 	dump_stack();
409 out:
410 	/* Leave bad fields for debug, except PageBuddy could make trouble */
411 	page_mapcount_reset(page); /* remove PageBuddy */
412 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
413 }
414 
415 /*
416  * Higher-order pages are called "compound pages".  They are structured thusly:
417  *
418  * The first PAGE_SIZE page is called the "head page".
419  *
420  * The remaining PAGE_SIZE pages are called "tail pages".
421  *
422  * All pages have PG_compound set.  All tail pages have their ->first_page
423  * pointing at the head page.
424  *
425  * The first tail page's ->lru.next holds the address of the compound page's
426  * put_page() function.  Its ->lru.prev holds the order of allocation.
427  * This usage means that zero-order pages may not be compound.
428  */
429 
430 static void free_compound_page(struct page *page)
431 {
432 	__free_pages_ok(page, compound_order(page));
433 }
434 
435 void prep_compound_page(struct page *page, unsigned long order)
436 {
437 	int i;
438 	int nr_pages = 1 << order;
439 
440 	set_compound_page_dtor(page, free_compound_page);
441 	set_compound_order(page, order);
442 	__SetPageHead(page);
443 	for (i = 1; i < nr_pages; i++) {
444 		struct page *p = page + i;
445 		set_page_count(p, 0);
446 		p->first_page = page;
447 		/* Make sure p->first_page is always valid for PageTail() */
448 		smp_wmb();
449 		__SetPageTail(p);
450 	}
451 }
452 
453 #ifdef CONFIG_DEBUG_PAGEALLOC
454 unsigned int _debug_guardpage_minorder;
455 bool _debug_pagealloc_enabled __read_mostly;
456 bool _debug_guardpage_enabled __read_mostly;
457 
458 static int __init early_debug_pagealloc(char *buf)
459 {
460 	if (!buf)
461 		return -EINVAL;
462 
463 	if (strcmp(buf, "on") == 0)
464 		_debug_pagealloc_enabled = true;
465 
466 	return 0;
467 }
468 early_param("debug_pagealloc", early_debug_pagealloc);
469 
470 static bool need_debug_guardpage(void)
471 {
472 	/* If we don't use debug_pagealloc, we don't need guard page */
473 	if (!debug_pagealloc_enabled())
474 		return false;
475 
476 	return true;
477 }
478 
479 static void init_debug_guardpage(void)
480 {
481 	if (!debug_pagealloc_enabled())
482 		return;
483 
484 	_debug_guardpage_enabled = true;
485 }
486 
487 struct page_ext_operations debug_guardpage_ops = {
488 	.need = need_debug_guardpage,
489 	.init = init_debug_guardpage,
490 };
491 
492 static int __init debug_guardpage_minorder_setup(char *buf)
493 {
494 	unsigned long res;
495 
496 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
497 		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
498 		return 0;
499 	}
500 	_debug_guardpage_minorder = res;
501 	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
502 	return 0;
503 }
504 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
505 
506 static inline void set_page_guard(struct zone *zone, struct page *page,
507 				unsigned int order, int migratetype)
508 {
509 	struct page_ext *page_ext;
510 
511 	if (!debug_guardpage_enabled())
512 		return;
513 
514 	page_ext = lookup_page_ext(page);
515 	__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
516 
517 	INIT_LIST_HEAD(&page->lru);
518 	set_page_private(page, order);
519 	/* Guard pages are not available for any usage */
520 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
521 }
522 
523 static inline void clear_page_guard(struct zone *zone, struct page *page,
524 				unsigned int order, int migratetype)
525 {
526 	struct page_ext *page_ext;
527 
528 	if (!debug_guardpage_enabled())
529 		return;
530 
531 	page_ext = lookup_page_ext(page);
532 	__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
533 
534 	set_page_private(page, 0);
535 	if (!is_migrate_isolate(migratetype))
536 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
537 }
538 #else
539 struct page_ext_operations debug_guardpage_ops = { NULL, };
540 static inline void set_page_guard(struct zone *zone, struct page *page,
541 				unsigned int order, int migratetype) {}
542 static inline void clear_page_guard(struct zone *zone, struct page *page,
543 				unsigned int order, int migratetype) {}
544 #endif
545 
546 static inline void set_page_order(struct page *page, unsigned int order)
547 {
548 	set_page_private(page, order);
549 	__SetPageBuddy(page);
550 }
551 
552 static inline void rmv_page_order(struct page *page)
553 {
554 	__ClearPageBuddy(page);
555 	set_page_private(page, 0);
556 }
557 
558 /*
559  * This function checks whether a page is free && is the buddy
560  * we can do coalesce a page and its buddy if
561  * (a) the buddy is not in a hole &&
562  * (b) the buddy is in the buddy system &&
563  * (c) a page and its buddy have the same order &&
564  * (d) a page and its buddy are in the same zone.
565  *
566  * For recording whether a page is in the buddy system, we set ->_mapcount
567  * PAGE_BUDDY_MAPCOUNT_VALUE.
568  * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
569  * serialized by zone->lock.
570  *
571  * For recording page's order, we use page_private(page).
572  */
573 static inline int page_is_buddy(struct page *page, struct page *buddy,
574 							unsigned int order)
575 {
576 	if (!pfn_valid_within(page_to_pfn(buddy)))
577 		return 0;
578 
579 	if (page_is_guard(buddy) && page_order(buddy) == order) {
580 		if (page_zone_id(page) != page_zone_id(buddy))
581 			return 0;
582 
583 		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
584 
585 		return 1;
586 	}
587 
588 	if (PageBuddy(buddy) && page_order(buddy) == order) {
589 		/*
590 		 * zone check is done late to avoid uselessly
591 		 * calculating zone/node ids for pages that could
592 		 * never merge.
593 		 */
594 		if (page_zone_id(page) != page_zone_id(buddy))
595 			return 0;
596 
597 		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
598 
599 		return 1;
600 	}
601 	return 0;
602 }
603 
604 /*
605  * Freeing function for a buddy system allocator.
606  *
607  * The concept of a buddy system is to maintain direct-mapped table
608  * (containing bit values) for memory blocks of various "orders".
609  * The bottom level table contains the map for the smallest allocatable
610  * units of memory (here, pages), and each level above it describes
611  * pairs of units from the levels below, hence, "buddies".
612  * At a high level, all that happens here is marking the table entry
613  * at the bottom level available, and propagating the changes upward
614  * as necessary, plus some accounting needed to play nicely with other
615  * parts of the VM system.
616  * At each level, we keep a list of pages, which are heads of continuous
617  * free pages of length of (1 << order) and marked with _mapcount
618  * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
619  * field.
620  * So when we are allocating or freeing one, we can derive the state of the
621  * other.  That is, if we allocate a small block, and both were
622  * free, the remainder of the region must be split into blocks.
623  * If a block is freed, and its buddy is also free, then this
624  * triggers coalescing into a block of larger size.
625  *
626  * -- nyc
627  */
628 
629 static inline void __free_one_page(struct page *page,
630 		unsigned long pfn,
631 		struct zone *zone, unsigned int order,
632 		int migratetype)
633 {
634 	unsigned long page_idx;
635 	unsigned long combined_idx;
636 	unsigned long uninitialized_var(buddy_idx);
637 	struct page *buddy;
638 	int max_order = MAX_ORDER;
639 
640 	VM_BUG_ON(!zone_is_initialized(zone));
641 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
642 
643 	VM_BUG_ON(migratetype == -1);
644 	if (is_migrate_isolate(migratetype)) {
645 		/*
646 		 * We restrict max order of merging to prevent merge
647 		 * between freepages on isolate pageblock and normal
648 		 * pageblock. Without this, pageblock isolation
649 		 * could cause incorrect freepage accounting.
650 		 */
651 		max_order = min(MAX_ORDER, pageblock_order + 1);
652 	} else {
653 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
654 	}
655 
656 	page_idx = pfn & ((1 << max_order) - 1);
657 
658 	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
659 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
660 
661 	while (order < max_order - 1) {
662 		buddy_idx = __find_buddy_index(page_idx, order);
663 		buddy = page + (buddy_idx - page_idx);
664 		if (!page_is_buddy(page, buddy, order))
665 			break;
666 		/*
667 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
668 		 * merge with it and move up one order.
669 		 */
670 		if (page_is_guard(buddy)) {
671 			clear_page_guard(zone, buddy, order, migratetype);
672 		} else {
673 			list_del(&buddy->lru);
674 			zone->free_area[order].nr_free--;
675 			rmv_page_order(buddy);
676 		}
677 		combined_idx = buddy_idx & page_idx;
678 		page = page + (combined_idx - page_idx);
679 		page_idx = combined_idx;
680 		order++;
681 	}
682 	set_page_order(page, order);
683 
684 	/*
685 	 * If this is not the largest possible page, check if the buddy
686 	 * of the next-highest order is free. If it is, it's possible
687 	 * that pages are being freed that will coalesce soon. In case,
688 	 * that is happening, add the free page to the tail of the list
689 	 * so it's less likely to be used soon and more likely to be merged
690 	 * as a higher order page
691 	 */
692 	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
693 		struct page *higher_page, *higher_buddy;
694 		combined_idx = buddy_idx & page_idx;
695 		higher_page = page + (combined_idx - page_idx);
696 		buddy_idx = __find_buddy_index(combined_idx, order + 1);
697 		higher_buddy = higher_page + (buddy_idx - combined_idx);
698 		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
699 			list_add_tail(&page->lru,
700 				&zone->free_area[order].free_list[migratetype]);
701 			goto out;
702 		}
703 	}
704 
705 	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
706 out:
707 	zone->free_area[order].nr_free++;
708 }
709 
710 static inline int free_pages_check(struct page *page)
711 {
712 	const char *bad_reason = NULL;
713 	unsigned long bad_flags = 0;
714 
715 	if (unlikely(page_mapcount(page)))
716 		bad_reason = "nonzero mapcount";
717 	if (unlikely(page->mapping != NULL))
718 		bad_reason = "non-NULL mapping";
719 	if (unlikely(atomic_read(&page->_count) != 0))
720 		bad_reason = "nonzero _count";
721 	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
722 		bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
723 		bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
724 	}
725 #ifdef CONFIG_MEMCG
726 	if (unlikely(page->mem_cgroup))
727 		bad_reason = "page still charged to cgroup";
728 #endif
729 	if (unlikely(bad_reason)) {
730 		bad_page(page, bad_reason, bad_flags);
731 		return 1;
732 	}
733 	page_cpupid_reset_last(page);
734 	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
735 		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
736 	return 0;
737 }
738 
739 /*
740  * Frees a number of pages from the PCP lists
741  * Assumes all pages on list are in same zone, and of same order.
742  * count is the number of pages to free.
743  *
744  * If the zone was previously in an "all pages pinned" state then look to
745  * see if this freeing clears that state.
746  *
747  * And clear the zone's pages_scanned counter, to hold off the "all pages are
748  * pinned" detection logic.
749  */
750 static void free_pcppages_bulk(struct zone *zone, int count,
751 					struct per_cpu_pages *pcp)
752 {
753 	int migratetype = 0;
754 	int batch_free = 0;
755 	int to_free = count;
756 	unsigned long nr_scanned;
757 
758 	spin_lock(&zone->lock);
759 	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
760 	if (nr_scanned)
761 		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
762 
763 	while (to_free) {
764 		struct page *page;
765 		struct list_head *list;
766 
767 		/*
768 		 * Remove pages from lists in a round-robin fashion. A
769 		 * batch_free count is maintained that is incremented when an
770 		 * empty list is encountered.  This is so more pages are freed
771 		 * off fuller lists instead of spinning excessively around empty
772 		 * lists
773 		 */
774 		do {
775 			batch_free++;
776 			if (++migratetype == MIGRATE_PCPTYPES)
777 				migratetype = 0;
778 			list = &pcp->lists[migratetype];
779 		} while (list_empty(list));
780 
781 		/* This is the only non-empty list. Free them all. */
782 		if (batch_free == MIGRATE_PCPTYPES)
783 			batch_free = to_free;
784 
785 		do {
786 			int mt;	/* migratetype of the to-be-freed page */
787 
788 			page = list_entry(list->prev, struct page, lru);
789 			/* must delete as __free_one_page list manipulates */
790 			list_del(&page->lru);
791 			mt = get_freepage_migratetype(page);
792 			if (unlikely(has_isolate_pageblock(zone)))
793 				mt = get_pageblock_migratetype(page);
794 
795 			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
796 			__free_one_page(page, page_to_pfn(page), zone, 0, mt);
797 			trace_mm_page_pcpu_drain(page, 0, mt);
798 		} while (--to_free && --batch_free && !list_empty(list));
799 	}
800 	spin_unlock(&zone->lock);
801 }
802 
803 static void free_one_page(struct zone *zone,
804 				struct page *page, unsigned long pfn,
805 				unsigned int order,
806 				int migratetype)
807 {
808 	unsigned long nr_scanned;
809 	spin_lock(&zone->lock);
810 	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
811 	if (nr_scanned)
812 		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
813 
814 	if (unlikely(has_isolate_pageblock(zone) ||
815 		is_migrate_isolate(migratetype))) {
816 		migratetype = get_pfnblock_migratetype(page, pfn);
817 	}
818 	__free_one_page(page, pfn, zone, order, migratetype);
819 	spin_unlock(&zone->lock);
820 }
821 
822 static int free_tail_pages_check(struct page *head_page, struct page *page)
823 {
824 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
825 		return 0;
826 	if (unlikely(!PageTail(page))) {
827 		bad_page(page, "PageTail not set", 0);
828 		return 1;
829 	}
830 	if (unlikely(page->first_page != head_page)) {
831 		bad_page(page, "first_page not consistent", 0);
832 		return 1;
833 	}
834 	return 0;
835 }
836 
837 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
838 				unsigned long zone, int nid)
839 {
840 	set_page_links(page, zone, nid, pfn);
841 	init_page_count(page);
842 	page_mapcount_reset(page);
843 	page_cpupid_reset_last(page);
844 
845 	INIT_LIST_HEAD(&page->lru);
846 #ifdef WANT_PAGE_VIRTUAL
847 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
848 	if (!is_highmem_idx(zone))
849 		set_page_address(page, __va(pfn << PAGE_SHIFT));
850 #endif
851 }
852 
853 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
854 					int nid)
855 {
856 	return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
857 }
858 
859 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
860 static void init_reserved_page(unsigned long pfn)
861 {
862 	pg_data_t *pgdat;
863 	int nid, zid;
864 
865 	if (!early_page_uninitialised(pfn))
866 		return;
867 
868 	nid = early_pfn_to_nid(pfn);
869 	pgdat = NODE_DATA(nid);
870 
871 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
872 		struct zone *zone = &pgdat->node_zones[zid];
873 
874 		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
875 			break;
876 	}
877 	__init_single_pfn(pfn, zid, nid);
878 }
879 #else
880 static inline void init_reserved_page(unsigned long pfn)
881 {
882 }
883 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
884 
885 /*
886  * Initialised pages do not have PageReserved set. This function is
887  * called for each range allocated by the bootmem allocator and
888  * marks the pages PageReserved. The remaining valid pages are later
889  * sent to the buddy page allocator.
890  */
891 void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
892 {
893 	unsigned long start_pfn = PFN_DOWN(start);
894 	unsigned long end_pfn = PFN_UP(end);
895 
896 	for (; start_pfn < end_pfn; start_pfn++) {
897 		if (pfn_valid(start_pfn)) {
898 			struct page *page = pfn_to_page(start_pfn);
899 
900 			init_reserved_page(start_pfn);
901 			SetPageReserved(page);
902 		}
903 	}
904 }
905 
906 static bool free_pages_prepare(struct page *page, unsigned int order)
907 {
908 	bool compound = PageCompound(page);
909 	int i, bad = 0;
910 
911 	VM_BUG_ON_PAGE(PageTail(page), page);
912 	VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
913 
914 	trace_mm_page_free(page, order);
915 	kmemcheck_free_shadow(page, order);
916 	kasan_free_pages(page, order);
917 
918 	if (PageAnon(page))
919 		page->mapping = NULL;
920 	bad += free_pages_check(page);
921 	for (i = 1; i < (1 << order); i++) {
922 		if (compound)
923 			bad += free_tail_pages_check(page, page + i);
924 		bad += free_pages_check(page + i);
925 	}
926 	if (bad)
927 		return false;
928 
929 	reset_page_owner(page, order);
930 
931 	if (!PageHighMem(page)) {
932 		debug_check_no_locks_freed(page_address(page),
933 					   PAGE_SIZE << order);
934 		debug_check_no_obj_freed(page_address(page),
935 					   PAGE_SIZE << order);
936 	}
937 	arch_free_page(page, order);
938 	kernel_map_pages(page, 1 << order, 0);
939 
940 	return true;
941 }
942 
943 static void __free_pages_ok(struct page *page, unsigned int order)
944 {
945 	unsigned long flags;
946 	int migratetype;
947 	unsigned long pfn = page_to_pfn(page);
948 
949 	if (!free_pages_prepare(page, order))
950 		return;
951 
952 	migratetype = get_pfnblock_migratetype(page, pfn);
953 	local_irq_save(flags);
954 	__count_vm_events(PGFREE, 1 << order);
955 	set_freepage_migratetype(page, migratetype);
956 	free_one_page(page_zone(page), page, pfn, order, migratetype);
957 	local_irq_restore(flags);
958 }
959 
960 static void __init __free_pages_boot_core(struct page *page,
961 					unsigned long pfn, unsigned int order)
962 {
963 	unsigned int nr_pages = 1 << order;
964 	struct page *p = page;
965 	unsigned int loop;
966 
967 	prefetchw(p);
968 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
969 		prefetchw(p + 1);
970 		__ClearPageReserved(p);
971 		set_page_count(p, 0);
972 	}
973 	__ClearPageReserved(p);
974 	set_page_count(p, 0);
975 
976 	page_zone(page)->managed_pages += nr_pages;
977 	set_page_refcounted(page);
978 	__free_pages(page, order);
979 }
980 
981 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
982 	defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
983 
984 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
985 
986 int __meminit early_pfn_to_nid(unsigned long pfn)
987 {
988 	static DEFINE_SPINLOCK(early_pfn_lock);
989 	int nid;
990 
991 	spin_lock(&early_pfn_lock);
992 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
993 	if (nid < 0)
994 		nid = 0;
995 	spin_unlock(&early_pfn_lock);
996 
997 	return nid;
998 }
999 #endif
1000 
1001 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
1002 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1003 					struct mminit_pfnnid_cache *state)
1004 {
1005 	int nid;
1006 
1007 	nid = __early_pfn_to_nid(pfn, state);
1008 	if (nid >= 0 && nid != node)
1009 		return false;
1010 	return true;
1011 }
1012 
1013 /* Only safe to use early in boot when initialisation is single-threaded */
1014 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1015 {
1016 	return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1017 }
1018 
1019 #else
1020 
1021 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1022 {
1023 	return true;
1024 }
1025 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1026 					struct mminit_pfnnid_cache *state)
1027 {
1028 	return true;
1029 }
1030 #endif
1031 
1032 
1033 void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
1034 							unsigned int order)
1035 {
1036 	if (early_page_uninitialised(pfn))
1037 		return;
1038 	return __free_pages_boot_core(page, pfn, order);
1039 }
1040 
1041 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1042 static void __init deferred_free_range(struct page *page,
1043 					unsigned long pfn, int nr_pages)
1044 {
1045 	int i;
1046 
1047 	if (!page)
1048 		return;
1049 
1050 	/* Free a large naturally-aligned chunk if possible */
1051 	if (nr_pages == MAX_ORDER_NR_PAGES &&
1052 	    (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
1053 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1054 		__free_pages_boot_core(page, pfn, MAX_ORDER-1);
1055 		return;
1056 	}
1057 
1058 	for (i = 0; i < nr_pages; i++, page++, pfn++)
1059 		__free_pages_boot_core(page, pfn, 0);
1060 }
1061 
1062 /* Completion tracking for deferred_init_memmap() threads */
1063 static atomic_t pgdat_init_n_undone __initdata;
1064 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1065 
1066 static inline void __init pgdat_init_report_one_done(void)
1067 {
1068 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1069 		complete(&pgdat_init_all_done_comp);
1070 }
1071 
1072 /* Initialise remaining memory on a node */
1073 static int __init deferred_init_memmap(void *data)
1074 {
1075 	pg_data_t *pgdat = data;
1076 	int nid = pgdat->node_id;
1077 	struct mminit_pfnnid_cache nid_init_state = { };
1078 	unsigned long start = jiffies;
1079 	unsigned long nr_pages = 0;
1080 	unsigned long walk_start, walk_end;
1081 	int i, zid;
1082 	struct zone *zone;
1083 	unsigned long first_init_pfn = pgdat->first_deferred_pfn;
1084 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1085 
1086 	if (first_init_pfn == ULONG_MAX) {
1087 		pgdat_init_report_one_done();
1088 		return 0;
1089 	}
1090 
1091 	/* Bind memory initialisation thread to a local node if possible */
1092 	if (!cpumask_empty(cpumask))
1093 		set_cpus_allowed_ptr(current, cpumask);
1094 
1095 	/* Sanity check boundaries */
1096 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1097 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1098 	pgdat->first_deferred_pfn = ULONG_MAX;
1099 
1100 	/* Only the highest zone is deferred so find it */
1101 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1102 		zone = pgdat->node_zones + zid;
1103 		if (first_init_pfn < zone_end_pfn(zone))
1104 			break;
1105 	}
1106 
1107 	for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1108 		unsigned long pfn, end_pfn;
1109 		struct page *page = NULL;
1110 		struct page *free_base_page = NULL;
1111 		unsigned long free_base_pfn = 0;
1112 		int nr_to_free = 0;
1113 
1114 		end_pfn = min(walk_end, zone_end_pfn(zone));
1115 		pfn = first_init_pfn;
1116 		if (pfn < walk_start)
1117 			pfn = walk_start;
1118 		if (pfn < zone->zone_start_pfn)
1119 			pfn = zone->zone_start_pfn;
1120 
1121 		for (; pfn < end_pfn; pfn++) {
1122 			if (!pfn_valid_within(pfn))
1123 				goto free_range;
1124 
1125 			/*
1126 			 * Ensure pfn_valid is checked every
1127 			 * MAX_ORDER_NR_PAGES for memory holes
1128 			 */
1129 			if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
1130 				if (!pfn_valid(pfn)) {
1131 					page = NULL;
1132 					goto free_range;
1133 				}
1134 			}
1135 
1136 			if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1137 				page = NULL;
1138 				goto free_range;
1139 			}
1140 
1141 			/* Minimise pfn page lookups and scheduler checks */
1142 			if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
1143 				page++;
1144 			} else {
1145 				nr_pages += nr_to_free;
1146 				deferred_free_range(free_base_page,
1147 						free_base_pfn, nr_to_free);
1148 				free_base_page = NULL;
1149 				free_base_pfn = nr_to_free = 0;
1150 
1151 				page = pfn_to_page(pfn);
1152 				cond_resched();
1153 			}
1154 
1155 			if (page->flags) {
1156 				VM_BUG_ON(page_zone(page) != zone);
1157 				goto free_range;
1158 			}
1159 
1160 			__init_single_page(page, pfn, zid, nid);
1161 			if (!free_base_page) {
1162 				free_base_page = page;
1163 				free_base_pfn = pfn;
1164 				nr_to_free = 0;
1165 			}
1166 			nr_to_free++;
1167 
1168 			/* Where possible, batch up pages for a single free */
1169 			continue;
1170 free_range:
1171 			/* Free the current block of pages to allocator */
1172 			nr_pages += nr_to_free;
1173 			deferred_free_range(free_base_page, free_base_pfn,
1174 								nr_to_free);
1175 			free_base_page = NULL;
1176 			free_base_pfn = nr_to_free = 0;
1177 		}
1178 
1179 		first_init_pfn = max(end_pfn, first_init_pfn);
1180 	}
1181 
1182 	/* Sanity check that the next zone really is unpopulated */
1183 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1184 
1185 	pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1186 					jiffies_to_msecs(jiffies - start));
1187 
1188 	pgdat_init_report_one_done();
1189 	return 0;
1190 }
1191 
1192 void __init page_alloc_init_late(void)
1193 {
1194 	int nid;
1195 
1196 	/* There will be num_node_state(N_MEMORY) threads */
1197 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1198 	for_each_node_state(nid, N_MEMORY) {
1199 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1200 	}
1201 
1202 	/* Block until all are initialised */
1203 	wait_for_completion(&pgdat_init_all_done_comp);
1204 
1205 	/* Reinit limits that are based on free pages after the kernel is up */
1206 	files_maxfiles_init();
1207 }
1208 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1209 
1210 #ifdef CONFIG_CMA
1211 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1212 void __init init_cma_reserved_pageblock(struct page *page)
1213 {
1214 	unsigned i = pageblock_nr_pages;
1215 	struct page *p = page;
1216 
1217 	do {
1218 		__ClearPageReserved(p);
1219 		set_page_count(p, 0);
1220 	} while (++p, --i);
1221 
1222 	set_pageblock_migratetype(page, MIGRATE_CMA);
1223 
1224 	if (pageblock_order >= MAX_ORDER) {
1225 		i = pageblock_nr_pages;
1226 		p = page;
1227 		do {
1228 			set_page_refcounted(p);
1229 			__free_pages(p, MAX_ORDER - 1);
1230 			p += MAX_ORDER_NR_PAGES;
1231 		} while (i -= MAX_ORDER_NR_PAGES);
1232 	} else {
1233 		set_page_refcounted(page);
1234 		__free_pages(page, pageblock_order);
1235 	}
1236 
1237 	adjust_managed_page_count(page, pageblock_nr_pages);
1238 }
1239 #endif
1240 
1241 /*
1242  * The order of subdivision here is critical for the IO subsystem.
1243  * Please do not alter this order without good reasons and regression
1244  * testing. Specifically, as large blocks of memory are subdivided,
1245  * the order in which smaller blocks are delivered depends on the order
1246  * they're subdivided in this function. This is the primary factor
1247  * influencing the order in which pages are delivered to the IO
1248  * subsystem according to empirical testing, and this is also justified
1249  * by considering the behavior of a buddy system containing a single
1250  * large block of memory acted on by a series of small allocations.
1251  * This behavior is a critical factor in sglist merging's success.
1252  *
1253  * -- nyc
1254  */
1255 static inline void expand(struct zone *zone, struct page *page,
1256 	int low, int high, struct free_area *area,
1257 	int migratetype)
1258 {
1259 	unsigned long size = 1 << high;
1260 
1261 	while (high > low) {
1262 		area--;
1263 		high--;
1264 		size >>= 1;
1265 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1266 
1267 		if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
1268 			debug_guardpage_enabled() &&
1269 			high < debug_guardpage_minorder()) {
1270 			/*
1271 			 * Mark as guard pages (or page), that will allow to
1272 			 * merge back to allocator when buddy will be freed.
1273 			 * Corresponding page table entries will not be touched,
1274 			 * pages will stay not present in virtual address space
1275 			 */
1276 			set_page_guard(zone, &page[size], high, migratetype);
1277 			continue;
1278 		}
1279 		list_add(&page[size].lru, &area->free_list[migratetype]);
1280 		area->nr_free++;
1281 		set_page_order(&page[size], high);
1282 	}
1283 }
1284 
1285 /*
1286  * This page is about to be returned from the page allocator
1287  */
1288 static inline int check_new_page(struct page *page)
1289 {
1290 	const char *bad_reason = NULL;
1291 	unsigned long bad_flags = 0;
1292 
1293 	if (unlikely(page_mapcount(page)))
1294 		bad_reason = "nonzero mapcount";
1295 	if (unlikely(page->mapping != NULL))
1296 		bad_reason = "non-NULL mapping";
1297 	if (unlikely(atomic_read(&page->_count) != 0))
1298 		bad_reason = "nonzero _count";
1299 	if (unlikely(page->flags & __PG_HWPOISON)) {
1300 		bad_reason = "HWPoisoned (hardware-corrupted)";
1301 		bad_flags = __PG_HWPOISON;
1302 	}
1303 	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1304 		bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1305 		bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1306 	}
1307 #ifdef CONFIG_MEMCG
1308 	if (unlikely(page->mem_cgroup))
1309 		bad_reason = "page still charged to cgroup";
1310 #endif
1311 	if (unlikely(bad_reason)) {
1312 		bad_page(page, bad_reason, bad_flags);
1313 		return 1;
1314 	}
1315 	return 0;
1316 }
1317 
1318 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1319 								int alloc_flags)
1320 {
1321 	int i;
1322 
1323 	for (i = 0; i < (1 << order); i++) {
1324 		struct page *p = page + i;
1325 		if (unlikely(check_new_page(p)))
1326 			return 1;
1327 	}
1328 
1329 	set_page_private(page, 0);
1330 	set_page_refcounted(page);
1331 
1332 	arch_alloc_page(page, order);
1333 	kernel_map_pages(page, 1 << order, 1);
1334 	kasan_alloc_pages(page, order);
1335 
1336 	if (gfp_flags & __GFP_ZERO)
1337 		for (i = 0; i < (1 << order); i++)
1338 			clear_highpage(page + i);
1339 
1340 	if (order && (gfp_flags & __GFP_COMP))
1341 		prep_compound_page(page, order);
1342 
1343 	set_page_owner(page, order, gfp_flags);
1344 
1345 	/*
1346 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1347 	 * allocate the page. The expectation is that the caller is taking
1348 	 * steps that will free more memory. The caller should avoid the page
1349 	 * being used for !PFMEMALLOC purposes.
1350 	 */
1351 	if (alloc_flags & ALLOC_NO_WATERMARKS)
1352 		set_page_pfmemalloc(page);
1353 	else
1354 		clear_page_pfmemalloc(page);
1355 
1356 	return 0;
1357 }
1358 
1359 /*
1360  * Go through the free lists for the given migratetype and remove
1361  * the smallest available page from the freelists
1362  */
1363 static inline
1364 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1365 						int migratetype)
1366 {
1367 	unsigned int current_order;
1368 	struct free_area *area;
1369 	struct page *page;
1370 
1371 	/* Find a page of the appropriate size in the preferred list */
1372 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1373 		area = &(zone->free_area[current_order]);
1374 		if (list_empty(&area->free_list[migratetype]))
1375 			continue;
1376 
1377 		page = list_entry(area->free_list[migratetype].next,
1378 							struct page, lru);
1379 		list_del(&page->lru);
1380 		rmv_page_order(page);
1381 		area->nr_free--;
1382 		expand(zone, page, order, current_order, area, migratetype);
1383 		set_freepage_migratetype(page, migratetype);
1384 		return page;
1385 	}
1386 
1387 	return NULL;
1388 }
1389 
1390 
1391 /*
1392  * This array describes the order lists are fallen back to when
1393  * the free lists for the desirable migrate type are depleted
1394  */
1395 static int fallbacks[MIGRATE_TYPES][4] = {
1396 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
1397 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
1398 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
1399 #ifdef CONFIG_CMA
1400 	[MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
1401 #endif
1402 	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
1403 #ifdef CONFIG_MEMORY_ISOLATION
1404 	[MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
1405 #endif
1406 };
1407 
1408 #ifdef CONFIG_CMA
1409 static struct page *__rmqueue_cma_fallback(struct zone *zone,
1410 					unsigned int order)
1411 {
1412 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1413 }
1414 #else
1415 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1416 					unsigned int order) { return NULL; }
1417 #endif
1418 
1419 /*
1420  * Move the free pages in a range to the free lists of the requested type.
1421  * Note that start_page and end_pages are not aligned on a pageblock
1422  * boundary. If alignment is required, use move_freepages_block()
1423  */
1424 int move_freepages(struct zone *zone,
1425 			  struct page *start_page, struct page *end_page,
1426 			  int migratetype)
1427 {
1428 	struct page *page;
1429 	unsigned long order;
1430 	int pages_moved = 0;
1431 
1432 #ifndef CONFIG_HOLES_IN_ZONE
1433 	/*
1434 	 * page_zone is not safe to call in this context when
1435 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1436 	 * anyway as we check zone boundaries in move_freepages_block().
1437 	 * Remove at a later date when no bug reports exist related to
1438 	 * grouping pages by mobility
1439 	 */
1440 	VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1441 #endif
1442 
1443 	for (page = start_page; page <= end_page;) {
1444 		/* Make sure we are not inadvertently changing nodes */
1445 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1446 
1447 		if (!pfn_valid_within(page_to_pfn(page))) {
1448 			page++;
1449 			continue;
1450 		}
1451 
1452 		if (!PageBuddy(page)) {
1453 			page++;
1454 			continue;
1455 		}
1456 
1457 		order = page_order(page);
1458 		list_move(&page->lru,
1459 			  &zone->free_area[order].free_list[migratetype]);
1460 		set_freepage_migratetype(page, migratetype);
1461 		page += 1 << order;
1462 		pages_moved += 1 << order;
1463 	}
1464 
1465 	return pages_moved;
1466 }
1467 
1468 int move_freepages_block(struct zone *zone, struct page *page,
1469 				int migratetype)
1470 {
1471 	unsigned long start_pfn, end_pfn;
1472 	struct page *start_page, *end_page;
1473 
1474 	start_pfn = page_to_pfn(page);
1475 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
1476 	start_page = pfn_to_page(start_pfn);
1477 	end_page = start_page + pageblock_nr_pages - 1;
1478 	end_pfn = start_pfn + pageblock_nr_pages - 1;
1479 
1480 	/* Do not cross zone boundaries */
1481 	if (!zone_spans_pfn(zone, start_pfn))
1482 		start_page = page;
1483 	if (!zone_spans_pfn(zone, end_pfn))
1484 		return 0;
1485 
1486 	return move_freepages(zone, start_page, end_page, migratetype);
1487 }
1488 
1489 static void change_pageblock_range(struct page *pageblock_page,
1490 					int start_order, int migratetype)
1491 {
1492 	int nr_pageblocks = 1 << (start_order - pageblock_order);
1493 
1494 	while (nr_pageblocks--) {
1495 		set_pageblock_migratetype(pageblock_page, migratetype);
1496 		pageblock_page += pageblock_nr_pages;
1497 	}
1498 }
1499 
1500 /*
1501  * When we are falling back to another migratetype during allocation, try to
1502  * steal extra free pages from the same pageblocks to satisfy further
1503  * allocations, instead of polluting multiple pageblocks.
1504  *
1505  * If we are stealing a relatively large buddy page, it is likely there will
1506  * be more free pages in the pageblock, so try to steal them all. For
1507  * reclaimable and unmovable allocations, we steal regardless of page size,
1508  * as fragmentation caused by those allocations polluting movable pageblocks
1509  * is worse than movable allocations stealing from unmovable and reclaimable
1510  * pageblocks.
1511  */
1512 static bool can_steal_fallback(unsigned int order, int start_mt)
1513 {
1514 	/*
1515 	 * Leaving this order check is intended, although there is
1516 	 * relaxed order check in next check. The reason is that
1517 	 * we can actually steal whole pageblock if this condition met,
1518 	 * but, below check doesn't guarantee it and that is just heuristic
1519 	 * so could be changed anytime.
1520 	 */
1521 	if (order >= pageblock_order)
1522 		return true;
1523 
1524 	if (order >= pageblock_order / 2 ||
1525 		start_mt == MIGRATE_RECLAIMABLE ||
1526 		start_mt == MIGRATE_UNMOVABLE ||
1527 		page_group_by_mobility_disabled)
1528 		return true;
1529 
1530 	return false;
1531 }
1532 
1533 /*
1534  * This function implements actual steal behaviour. If order is large enough,
1535  * we can steal whole pageblock. If not, we first move freepages in this
1536  * pageblock and check whether half of pages are moved or not. If half of
1537  * pages are moved, we can change migratetype of pageblock and permanently
1538  * use it's pages as requested migratetype in the future.
1539  */
1540 static void steal_suitable_fallback(struct zone *zone, struct page *page,
1541 							  int start_type)
1542 {
1543 	int current_order = page_order(page);
1544 	int pages;
1545 
1546 	/* Take ownership for orders >= pageblock_order */
1547 	if (current_order >= pageblock_order) {
1548 		change_pageblock_range(page, current_order, start_type);
1549 		return;
1550 	}
1551 
1552 	pages = move_freepages_block(zone, page, start_type);
1553 
1554 	/* Claim the whole block if over half of it is free */
1555 	if (pages >= (1 << (pageblock_order-1)) ||
1556 			page_group_by_mobility_disabled)
1557 		set_pageblock_migratetype(page, start_type);
1558 }
1559 
1560 /*
1561  * Check whether there is a suitable fallback freepage with requested order.
1562  * If only_stealable is true, this function returns fallback_mt only if
1563  * we can steal other freepages all together. This would help to reduce
1564  * fragmentation due to mixed migratetype pages in one pageblock.
1565  */
1566 int find_suitable_fallback(struct free_area *area, unsigned int order,
1567 			int migratetype, bool only_stealable, bool *can_steal)
1568 {
1569 	int i;
1570 	int fallback_mt;
1571 
1572 	if (area->nr_free == 0)
1573 		return -1;
1574 
1575 	*can_steal = false;
1576 	for (i = 0;; i++) {
1577 		fallback_mt = fallbacks[migratetype][i];
1578 		if (fallback_mt == MIGRATE_RESERVE)
1579 			break;
1580 
1581 		if (list_empty(&area->free_list[fallback_mt]))
1582 			continue;
1583 
1584 		if (can_steal_fallback(order, migratetype))
1585 			*can_steal = true;
1586 
1587 		if (!only_stealable)
1588 			return fallback_mt;
1589 
1590 		if (*can_steal)
1591 			return fallback_mt;
1592 	}
1593 
1594 	return -1;
1595 }
1596 
1597 /* Remove an element from the buddy allocator from the fallback list */
1598 static inline struct page *
1599 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
1600 {
1601 	struct free_area *area;
1602 	unsigned int current_order;
1603 	struct page *page;
1604 	int fallback_mt;
1605 	bool can_steal;
1606 
1607 	/* Find the largest possible block of pages in the other list */
1608 	for (current_order = MAX_ORDER-1;
1609 				current_order >= order && current_order <= MAX_ORDER-1;
1610 				--current_order) {
1611 		area = &(zone->free_area[current_order]);
1612 		fallback_mt = find_suitable_fallback(area, current_order,
1613 				start_migratetype, false, &can_steal);
1614 		if (fallback_mt == -1)
1615 			continue;
1616 
1617 		page = list_entry(area->free_list[fallback_mt].next,
1618 						struct page, lru);
1619 		if (can_steal)
1620 			steal_suitable_fallback(zone, page, start_migratetype);
1621 
1622 		/* Remove the page from the freelists */
1623 		area->nr_free--;
1624 		list_del(&page->lru);
1625 		rmv_page_order(page);
1626 
1627 		expand(zone, page, order, current_order, area,
1628 					start_migratetype);
1629 		/*
1630 		 * The freepage_migratetype may differ from pageblock's
1631 		 * migratetype depending on the decisions in
1632 		 * try_to_steal_freepages(). This is OK as long as it
1633 		 * does not differ for MIGRATE_CMA pageblocks. For CMA
1634 		 * we need to make sure unallocated pages flushed from
1635 		 * pcp lists are returned to the correct freelist.
1636 		 */
1637 		set_freepage_migratetype(page, start_migratetype);
1638 
1639 		trace_mm_page_alloc_extfrag(page, order, current_order,
1640 			start_migratetype, fallback_mt);
1641 
1642 		return page;
1643 	}
1644 
1645 	return NULL;
1646 }
1647 
1648 /*
1649  * Do the hard work of removing an element from the buddy allocator.
1650  * Call me with the zone->lock already held.
1651  */
1652 static struct page *__rmqueue(struct zone *zone, unsigned int order,
1653 						int migratetype)
1654 {
1655 	struct page *page;
1656 
1657 retry_reserve:
1658 	page = __rmqueue_smallest(zone, order, migratetype);
1659 
1660 	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1661 		if (migratetype == MIGRATE_MOVABLE)
1662 			page = __rmqueue_cma_fallback(zone, order);
1663 
1664 		if (!page)
1665 			page = __rmqueue_fallback(zone, order, migratetype);
1666 
1667 		/*
1668 		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1669 		 * is used because __rmqueue_smallest is an inline function
1670 		 * and we want just one call site
1671 		 */
1672 		if (!page) {
1673 			migratetype = MIGRATE_RESERVE;
1674 			goto retry_reserve;
1675 		}
1676 	}
1677 
1678 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
1679 	return page;
1680 }
1681 
1682 /*
1683  * Obtain a specified number of elements from the buddy allocator, all under
1684  * a single hold of the lock, for efficiency.  Add them to the supplied list.
1685  * Returns the number of new pages which were placed at *list.
1686  */
1687 static int rmqueue_bulk(struct zone *zone, unsigned int order,
1688 			unsigned long count, struct list_head *list,
1689 			int migratetype, bool cold)
1690 {
1691 	int i;
1692 
1693 	spin_lock(&zone->lock);
1694 	for (i = 0; i < count; ++i) {
1695 		struct page *page = __rmqueue(zone, order, migratetype);
1696 		if (unlikely(page == NULL))
1697 			break;
1698 
1699 		/*
1700 		 * Split buddy pages returned by expand() are received here
1701 		 * in physical page order. The page is added to the callers and
1702 		 * list and the list head then moves forward. From the callers
1703 		 * perspective, the linked list is ordered by page number in
1704 		 * some conditions. This is useful for IO devices that can
1705 		 * merge IO requests if the physical pages are ordered
1706 		 * properly.
1707 		 */
1708 		if (likely(!cold))
1709 			list_add(&page->lru, list);
1710 		else
1711 			list_add_tail(&page->lru, list);
1712 		list = &page->lru;
1713 		if (is_migrate_cma(get_freepage_migratetype(page)))
1714 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1715 					      -(1 << order));
1716 	}
1717 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1718 	spin_unlock(&zone->lock);
1719 	return i;
1720 }
1721 
1722 #ifdef CONFIG_NUMA
1723 /*
1724  * Called from the vmstat counter updater to drain pagesets of this
1725  * currently executing processor on remote nodes after they have
1726  * expired.
1727  *
1728  * Note that this function must be called with the thread pinned to
1729  * a single processor.
1730  */
1731 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1732 {
1733 	unsigned long flags;
1734 	int to_drain, batch;
1735 
1736 	local_irq_save(flags);
1737 	batch = READ_ONCE(pcp->batch);
1738 	to_drain = min(pcp->count, batch);
1739 	if (to_drain > 0) {
1740 		free_pcppages_bulk(zone, to_drain, pcp);
1741 		pcp->count -= to_drain;
1742 	}
1743 	local_irq_restore(flags);
1744 }
1745 #endif
1746 
1747 /*
1748  * Drain pcplists of the indicated processor and zone.
1749  *
1750  * The processor must either be the current processor and the
1751  * thread pinned to the current processor or a processor that
1752  * is not online.
1753  */
1754 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1755 {
1756 	unsigned long flags;
1757 	struct per_cpu_pageset *pset;
1758 	struct per_cpu_pages *pcp;
1759 
1760 	local_irq_save(flags);
1761 	pset = per_cpu_ptr(zone->pageset, cpu);
1762 
1763 	pcp = &pset->pcp;
1764 	if (pcp->count) {
1765 		free_pcppages_bulk(zone, pcp->count, pcp);
1766 		pcp->count = 0;
1767 	}
1768 	local_irq_restore(flags);
1769 }
1770 
1771 /*
1772  * Drain pcplists of all zones on the indicated processor.
1773  *
1774  * The processor must either be the current processor and the
1775  * thread pinned to the current processor or a processor that
1776  * is not online.
1777  */
1778 static void drain_pages(unsigned int cpu)
1779 {
1780 	struct zone *zone;
1781 
1782 	for_each_populated_zone(zone) {
1783 		drain_pages_zone(cpu, zone);
1784 	}
1785 }
1786 
1787 /*
1788  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1789  *
1790  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
1791  * the single zone's pages.
1792  */
1793 void drain_local_pages(struct zone *zone)
1794 {
1795 	int cpu = smp_processor_id();
1796 
1797 	if (zone)
1798 		drain_pages_zone(cpu, zone);
1799 	else
1800 		drain_pages(cpu);
1801 }
1802 
1803 /*
1804  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1805  *
1806  * When zone parameter is non-NULL, spill just the single zone's pages.
1807  *
1808  * Note that this code is protected against sending an IPI to an offline
1809  * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1810  * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1811  * nothing keeps CPUs from showing up after we populated the cpumask and
1812  * before the call to on_each_cpu_mask().
1813  */
1814 void drain_all_pages(struct zone *zone)
1815 {
1816 	int cpu;
1817 
1818 	/*
1819 	 * Allocate in the BSS so we wont require allocation in
1820 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1821 	 */
1822 	static cpumask_t cpus_with_pcps;
1823 
1824 	/*
1825 	 * We don't care about racing with CPU hotplug event
1826 	 * as offline notification will cause the notified
1827 	 * cpu to drain that CPU pcps and on_each_cpu_mask
1828 	 * disables preemption as part of its processing
1829 	 */
1830 	for_each_online_cpu(cpu) {
1831 		struct per_cpu_pageset *pcp;
1832 		struct zone *z;
1833 		bool has_pcps = false;
1834 
1835 		if (zone) {
1836 			pcp = per_cpu_ptr(zone->pageset, cpu);
1837 			if (pcp->pcp.count)
1838 				has_pcps = true;
1839 		} else {
1840 			for_each_populated_zone(z) {
1841 				pcp = per_cpu_ptr(z->pageset, cpu);
1842 				if (pcp->pcp.count) {
1843 					has_pcps = true;
1844 					break;
1845 				}
1846 			}
1847 		}
1848 
1849 		if (has_pcps)
1850 			cpumask_set_cpu(cpu, &cpus_with_pcps);
1851 		else
1852 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
1853 	}
1854 	on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
1855 								zone, 1);
1856 }
1857 
1858 #ifdef CONFIG_HIBERNATION
1859 
1860 void mark_free_pages(struct zone *zone)
1861 {
1862 	unsigned long pfn, max_zone_pfn;
1863 	unsigned long flags;
1864 	unsigned int order, t;
1865 	struct list_head *curr;
1866 
1867 	if (zone_is_empty(zone))
1868 		return;
1869 
1870 	spin_lock_irqsave(&zone->lock, flags);
1871 
1872 	max_zone_pfn = zone_end_pfn(zone);
1873 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1874 		if (pfn_valid(pfn)) {
1875 			struct page *page = pfn_to_page(pfn);
1876 
1877 			if (!swsusp_page_is_forbidden(page))
1878 				swsusp_unset_page_free(page);
1879 		}
1880 
1881 	for_each_migratetype_order(order, t) {
1882 		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1883 			unsigned long i;
1884 
1885 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1886 			for (i = 0; i < (1UL << order); i++)
1887 				swsusp_set_page_free(pfn_to_page(pfn + i));
1888 		}
1889 	}
1890 	spin_unlock_irqrestore(&zone->lock, flags);
1891 }
1892 #endif /* CONFIG_PM */
1893 
1894 /*
1895  * Free a 0-order page
1896  * cold == true ? free a cold page : free a hot page
1897  */
1898 void free_hot_cold_page(struct page *page, bool cold)
1899 {
1900 	struct zone *zone = page_zone(page);
1901 	struct per_cpu_pages *pcp;
1902 	unsigned long flags;
1903 	unsigned long pfn = page_to_pfn(page);
1904 	int migratetype;
1905 
1906 	if (!free_pages_prepare(page, 0))
1907 		return;
1908 
1909 	migratetype = get_pfnblock_migratetype(page, pfn);
1910 	set_freepage_migratetype(page, migratetype);
1911 	local_irq_save(flags);
1912 	__count_vm_event(PGFREE);
1913 
1914 	/*
1915 	 * We only track unmovable, reclaimable and movable on pcp lists.
1916 	 * Free ISOLATE pages back to the allocator because they are being
1917 	 * offlined but treat RESERVE as movable pages so we can get those
1918 	 * areas back if necessary. Otherwise, we may have to free
1919 	 * excessively into the page allocator
1920 	 */
1921 	if (migratetype >= MIGRATE_PCPTYPES) {
1922 		if (unlikely(is_migrate_isolate(migratetype))) {
1923 			free_one_page(zone, page, pfn, 0, migratetype);
1924 			goto out;
1925 		}
1926 		migratetype = MIGRATE_MOVABLE;
1927 	}
1928 
1929 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
1930 	if (!cold)
1931 		list_add(&page->lru, &pcp->lists[migratetype]);
1932 	else
1933 		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1934 	pcp->count++;
1935 	if (pcp->count >= pcp->high) {
1936 		unsigned long batch = READ_ONCE(pcp->batch);
1937 		free_pcppages_bulk(zone, batch, pcp);
1938 		pcp->count -= batch;
1939 	}
1940 
1941 out:
1942 	local_irq_restore(flags);
1943 }
1944 
1945 /*
1946  * Free a list of 0-order pages
1947  */
1948 void free_hot_cold_page_list(struct list_head *list, bool cold)
1949 {
1950 	struct page *page, *next;
1951 
1952 	list_for_each_entry_safe(page, next, list, lru) {
1953 		trace_mm_page_free_batched(page, cold);
1954 		free_hot_cold_page(page, cold);
1955 	}
1956 }
1957 
1958 /*
1959  * split_page takes a non-compound higher-order page, and splits it into
1960  * n (1<<order) sub-pages: page[0..n]
1961  * Each sub-page must be freed individually.
1962  *
1963  * Note: this is probably too low level an operation for use in drivers.
1964  * Please consult with lkml before using this in your driver.
1965  */
1966 void split_page(struct page *page, unsigned int order)
1967 {
1968 	int i;
1969 	gfp_t gfp_mask;
1970 
1971 	VM_BUG_ON_PAGE(PageCompound(page), page);
1972 	VM_BUG_ON_PAGE(!page_count(page), page);
1973 
1974 #ifdef CONFIG_KMEMCHECK
1975 	/*
1976 	 * Split shadow pages too, because free(page[0]) would
1977 	 * otherwise free the whole shadow.
1978 	 */
1979 	if (kmemcheck_page_is_tracked(page))
1980 		split_page(virt_to_page(page[0].shadow), order);
1981 #endif
1982 
1983 	gfp_mask = get_page_owner_gfp(page);
1984 	set_page_owner(page, 0, gfp_mask);
1985 	for (i = 1; i < (1 << order); i++) {
1986 		set_page_refcounted(page + i);
1987 		set_page_owner(page + i, 0, gfp_mask);
1988 	}
1989 }
1990 EXPORT_SYMBOL_GPL(split_page);
1991 
1992 int __isolate_free_page(struct page *page, unsigned int order)
1993 {
1994 	unsigned long watermark;
1995 	struct zone *zone;
1996 	int mt;
1997 
1998 	BUG_ON(!PageBuddy(page));
1999 
2000 	zone = page_zone(page);
2001 	mt = get_pageblock_migratetype(page);
2002 
2003 	if (!is_migrate_isolate(mt)) {
2004 		/* Obey watermarks as if the page was being allocated */
2005 		watermark = low_wmark_pages(zone) + (1 << order);
2006 		if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
2007 			return 0;
2008 
2009 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
2010 	}
2011 
2012 	/* Remove page from free list */
2013 	list_del(&page->lru);
2014 	zone->free_area[order].nr_free--;
2015 	rmv_page_order(page);
2016 
2017 	set_page_owner(page, order, __GFP_MOVABLE);
2018 
2019 	/* Set the pageblock if the isolated page is at least a pageblock */
2020 	if (order >= pageblock_order - 1) {
2021 		struct page *endpage = page + (1 << order) - 1;
2022 		for (; page < endpage; page += pageblock_nr_pages) {
2023 			int mt = get_pageblock_migratetype(page);
2024 			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
2025 				set_pageblock_migratetype(page,
2026 							  MIGRATE_MOVABLE);
2027 		}
2028 	}
2029 
2030 
2031 	return 1UL << order;
2032 }
2033 
2034 /*
2035  * Similar to split_page except the page is already free. As this is only
2036  * being used for migration, the migratetype of the block also changes.
2037  * As this is called with interrupts disabled, the caller is responsible
2038  * for calling arch_alloc_page() and kernel_map_page() after interrupts
2039  * are enabled.
2040  *
2041  * Note: this is probably too low level an operation for use in drivers.
2042  * Please consult with lkml before using this in your driver.
2043  */
2044 int split_free_page(struct page *page)
2045 {
2046 	unsigned int order;
2047 	int nr_pages;
2048 
2049 	order = page_order(page);
2050 
2051 	nr_pages = __isolate_free_page(page, order);
2052 	if (!nr_pages)
2053 		return 0;
2054 
2055 	/* Split into individual pages */
2056 	set_page_refcounted(page);
2057 	split_page(page, order);
2058 	return nr_pages;
2059 }
2060 
2061 /*
2062  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
2063  */
2064 static inline
2065 struct page *buffered_rmqueue(struct zone *preferred_zone,
2066 			struct zone *zone, unsigned int order,
2067 			gfp_t gfp_flags, int migratetype)
2068 {
2069 	unsigned long flags;
2070 	struct page *page;
2071 	bool cold = ((gfp_flags & __GFP_COLD) != 0);
2072 
2073 	if (likely(order == 0)) {
2074 		struct per_cpu_pages *pcp;
2075 		struct list_head *list;
2076 
2077 		local_irq_save(flags);
2078 		pcp = &this_cpu_ptr(zone->pageset)->pcp;
2079 		list = &pcp->lists[migratetype];
2080 		if (list_empty(list)) {
2081 			pcp->count += rmqueue_bulk(zone, 0,
2082 					pcp->batch, list,
2083 					migratetype, cold);
2084 			if (unlikely(list_empty(list)))
2085 				goto failed;
2086 		}
2087 
2088 		if (cold)
2089 			page = list_entry(list->prev, struct page, lru);
2090 		else
2091 			page = list_entry(list->next, struct page, lru);
2092 
2093 		list_del(&page->lru);
2094 		pcp->count--;
2095 	} else {
2096 		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
2097 			/*
2098 			 * __GFP_NOFAIL is not to be used in new code.
2099 			 *
2100 			 * All __GFP_NOFAIL callers should be fixed so that they
2101 			 * properly detect and handle allocation failures.
2102 			 *
2103 			 * We most definitely don't want callers attempting to
2104 			 * allocate greater than order-1 page units with
2105 			 * __GFP_NOFAIL.
2106 			 */
2107 			WARN_ON_ONCE(order > 1);
2108 		}
2109 		spin_lock_irqsave(&zone->lock, flags);
2110 		page = __rmqueue(zone, order, migratetype);
2111 		spin_unlock(&zone->lock);
2112 		if (!page)
2113 			goto failed;
2114 		__mod_zone_freepage_state(zone, -(1 << order),
2115 					  get_freepage_migratetype(page));
2116 	}
2117 
2118 	__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
2119 	if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
2120 	    !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
2121 		set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
2122 
2123 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
2124 	zone_statistics(preferred_zone, zone, gfp_flags);
2125 	local_irq_restore(flags);
2126 
2127 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
2128 	return page;
2129 
2130 failed:
2131 	local_irq_restore(flags);
2132 	return NULL;
2133 }
2134 
2135 #ifdef CONFIG_FAIL_PAGE_ALLOC
2136 
2137 static struct {
2138 	struct fault_attr attr;
2139 
2140 	u32 ignore_gfp_highmem;
2141 	u32 ignore_gfp_wait;
2142 	u32 min_order;
2143 } fail_page_alloc = {
2144 	.attr = FAULT_ATTR_INITIALIZER,
2145 	.ignore_gfp_wait = 1,
2146 	.ignore_gfp_highmem = 1,
2147 	.min_order = 1,
2148 };
2149 
2150 static int __init setup_fail_page_alloc(char *str)
2151 {
2152 	return setup_fault_attr(&fail_page_alloc.attr, str);
2153 }
2154 __setup("fail_page_alloc=", setup_fail_page_alloc);
2155 
2156 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2157 {
2158 	if (order < fail_page_alloc.min_order)
2159 		return false;
2160 	if (gfp_mask & __GFP_NOFAIL)
2161 		return false;
2162 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
2163 		return false;
2164 	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
2165 		return false;
2166 
2167 	return should_fail(&fail_page_alloc.attr, 1 << order);
2168 }
2169 
2170 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2171 
2172 static int __init fail_page_alloc_debugfs(void)
2173 {
2174 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
2175 	struct dentry *dir;
2176 
2177 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2178 					&fail_page_alloc.attr);
2179 	if (IS_ERR(dir))
2180 		return PTR_ERR(dir);
2181 
2182 	if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
2183 				&fail_page_alloc.ignore_gfp_wait))
2184 		goto fail;
2185 	if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2186 				&fail_page_alloc.ignore_gfp_highmem))
2187 		goto fail;
2188 	if (!debugfs_create_u32("min-order", mode, dir,
2189 				&fail_page_alloc.min_order))
2190 		goto fail;
2191 
2192 	return 0;
2193 fail:
2194 	debugfs_remove_recursive(dir);
2195 
2196 	return -ENOMEM;
2197 }
2198 
2199 late_initcall(fail_page_alloc_debugfs);
2200 
2201 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2202 
2203 #else /* CONFIG_FAIL_PAGE_ALLOC */
2204 
2205 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2206 {
2207 	return false;
2208 }
2209 
2210 #endif /* CONFIG_FAIL_PAGE_ALLOC */
2211 
2212 /*
2213  * Return true if free pages are above 'mark'. This takes into account the order
2214  * of the allocation.
2215  */
2216 static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2217 			unsigned long mark, int classzone_idx, int alloc_flags,
2218 			long free_pages)
2219 {
2220 	/* free_pages may go negative - that's OK */
2221 	long min = mark;
2222 	int o;
2223 	long free_cma = 0;
2224 
2225 	free_pages -= (1 << order) - 1;
2226 	if (alloc_flags & ALLOC_HIGH)
2227 		min -= min / 2;
2228 	if (alloc_flags & ALLOC_HARDER)
2229 		min -= min / 4;
2230 #ifdef CONFIG_CMA
2231 	/* If allocation can't use CMA areas don't use free CMA pages */
2232 	if (!(alloc_flags & ALLOC_CMA))
2233 		free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
2234 #endif
2235 
2236 	if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
2237 		return false;
2238 	for (o = 0; o < order; o++) {
2239 		/* At the next order, this order's pages become unavailable */
2240 		free_pages -= z->free_area[o].nr_free << o;
2241 
2242 		/* Require fewer higher order pages to be free */
2243 		min >>= 1;
2244 
2245 		if (free_pages <= min)
2246 			return false;
2247 	}
2248 	return true;
2249 }
2250 
2251 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2252 		      int classzone_idx, int alloc_flags)
2253 {
2254 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2255 					zone_page_state(z, NR_FREE_PAGES));
2256 }
2257 
2258 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
2259 			unsigned long mark, int classzone_idx, int alloc_flags)
2260 {
2261 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
2262 
2263 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2264 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2265 
2266 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2267 								free_pages);
2268 }
2269 
2270 #ifdef CONFIG_NUMA
2271 /*
2272  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
2273  * skip over zones that are not allowed by the cpuset, or that have
2274  * been recently (in last second) found to be nearly full.  See further
2275  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
2276  * that have to skip over a lot of full or unallowed zones.
2277  *
2278  * If the zonelist cache is present in the passed zonelist, then
2279  * returns a pointer to the allowed node mask (either the current
2280  * tasks mems_allowed, or node_states[N_MEMORY].)
2281  *
2282  * If the zonelist cache is not available for this zonelist, does
2283  * nothing and returns NULL.
2284  *
2285  * If the fullzones BITMAP in the zonelist cache is stale (more than
2286  * a second since last zap'd) then we zap it out (clear its bits.)
2287  *
2288  * We hold off even calling zlc_setup, until after we've checked the
2289  * first zone in the zonelist, on the theory that most allocations will
2290  * be satisfied from that first zone, so best to examine that zone as
2291  * quickly as we can.
2292  */
2293 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
2294 {
2295 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
2296 	nodemask_t *allowednodes;	/* zonelist_cache approximation */
2297 
2298 	zlc = zonelist->zlcache_ptr;
2299 	if (!zlc)
2300 		return NULL;
2301 
2302 	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
2303 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2304 		zlc->last_full_zap = jiffies;
2305 	}
2306 
2307 	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
2308 					&cpuset_current_mems_allowed :
2309 					&node_states[N_MEMORY];
2310 	return allowednodes;
2311 }
2312 
2313 /*
2314  * Given 'z' scanning a zonelist, run a couple of quick checks to see
2315  * if it is worth looking at further for free memory:
2316  *  1) Check that the zone isn't thought to be full (doesn't have its
2317  *     bit set in the zonelist_cache fullzones BITMAP).
2318  *  2) Check that the zones node (obtained from the zonelist_cache
2319  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
2320  * Return true (non-zero) if zone is worth looking at further, or
2321  * else return false (zero) if it is not.
2322  *
2323  * This check -ignores- the distinction between various watermarks,
2324  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
2325  * found to be full for any variation of these watermarks, it will
2326  * be considered full for up to one second by all requests, unless
2327  * we are so low on memory on all allowed nodes that we are forced
2328  * into the second scan of the zonelist.
2329  *
2330  * In the second scan we ignore this zonelist cache and exactly
2331  * apply the watermarks to all zones, even it is slower to do so.
2332  * We are low on memory in the second scan, and should leave no stone
2333  * unturned looking for a free page.
2334  */
2335 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
2336 						nodemask_t *allowednodes)
2337 {
2338 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
2339 	int i;				/* index of *z in zonelist zones */
2340 	int n;				/* node that zone *z is on */
2341 
2342 	zlc = zonelist->zlcache_ptr;
2343 	if (!zlc)
2344 		return 1;
2345 
2346 	i = z - zonelist->_zonerefs;
2347 	n = zlc->z_to_n[i];
2348 
2349 	/* This zone is worth trying if it is allowed but not full */
2350 	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
2351 }
2352 
2353 /*
2354  * Given 'z' scanning a zonelist, set the corresponding bit in
2355  * zlc->fullzones, so that subsequent attempts to allocate a page
2356  * from that zone don't waste time re-examining it.
2357  */
2358 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
2359 {
2360 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
2361 	int i;				/* index of *z in zonelist zones */
2362 
2363 	zlc = zonelist->zlcache_ptr;
2364 	if (!zlc)
2365 		return;
2366 
2367 	i = z - zonelist->_zonerefs;
2368 
2369 	set_bit(i, zlc->fullzones);
2370 }
2371 
2372 /*
2373  * clear all zones full, called after direct reclaim makes progress so that
2374  * a zone that was recently full is not skipped over for up to a second
2375  */
2376 static void zlc_clear_zones_full(struct zonelist *zonelist)
2377 {
2378 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
2379 
2380 	zlc = zonelist->zlcache_ptr;
2381 	if (!zlc)
2382 		return;
2383 
2384 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2385 }
2386 
2387 static bool zone_local(struct zone *local_zone, struct zone *zone)
2388 {
2389 	return local_zone->node == zone->node;
2390 }
2391 
2392 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2393 {
2394 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
2395 				RECLAIM_DISTANCE;
2396 }
2397 
2398 #else	/* CONFIG_NUMA */
2399 
2400 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
2401 {
2402 	return NULL;
2403 }
2404 
2405 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
2406 				nodemask_t *allowednodes)
2407 {
2408 	return 1;
2409 }
2410 
2411 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
2412 {
2413 }
2414 
2415 static void zlc_clear_zones_full(struct zonelist *zonelist)
2416 {
2417 }
2418 
2419 static bool zone_local(struct zone *local_zone, struct zone *zone)
2420 {
2421 	return true;
2422 }
2423 
2424 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2425 {
2426 	return true;
2427 }
2428 
2429 #endif	/* CONFIG_NUMA */
2430 
2431 static void reset_alloc_batches(struct zone *preferred_zone)
2432 {
2433 	struct zone *zone = preferred_zone->zone_pgdat->node_zones;
2434 
2435 	do {
2436 		mod_zone_page_state(zone, NR_ALLOC_BATCH,
2437 			high_wmark_pages(zone) - low_wmark_pages(zone) -
2438 			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
2439 		clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
2440 	} while (zone++ != preferred_zone);
2441 }
2442 
2443 /*
2444  * get_page_from_freelist goes through the zonelist trying to allocate
2445  * a page.
2446  */
2447 static struct page *
2448 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2449 						const struct alloc_context *ac)
2450 {
2451 	struct zonelist *zonelist = ac->zonelist;
2452 	struct zoneref *z;
2453 	struct page *page = NULL;
2454 	struct zone *zone;
2455 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
2456 	int zlc_active = 0;		/* set if using zonelist_cache */
2457 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
2458 	bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
2459 				(gfp_mask & __GFP_WRITE);
2460 	int nr_fair_skipped = 0;
2461 	bool zonelist_rescan;
2462 
2463 zonelist_scan:
2464 	zonelist_rescan = false;
2465 
2466 	/*
2467 	 * Scan zonelist, looking for a zone with enough free.
2468 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
2469 	 */
2470 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2471 								ac->nodemask) {
2472 		unsigned long mark;
2473 
2474 		if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
2475 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
2476 				continue;
2477 		if (cpusets_enabled() &&
2478 			(alloc_flags & ALLOC_CPUSET) &&
2479 			!cpuset_zone_allowed(zone, gfp_mask))
2480 				continue;
2481 		/*
2482 		 * Distribute pages in proportion to the individual
2483 		 * zone size to ensure fair page aging.  The zone a
2484 		 * page was allocated in should have no effect on the
2485 		 * time the page has in memory before being reclaimed.
2486 		 */
2487 		if (alloc_flags & ALLOC_FAIR) {
2488 			if (!zone_local(ac->preferred_zone, zone))
2489 				break;
2490 			if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
2491 				nr_fair_skipped++;
2492 				continue;
2493 			}
2494 		}
2495 		/*
2496 		 * When allocating a page cache page for writing, we
2497 		 * want to get it from a zone that is within its dirty
2498 		 * limit, such that no single zone holds more than its
2499 		 * proportional share of globally allowed dirty pages.
2500 		 * The dirty limits take into account the zone's
2501 		 * lowmem reserves and high watermark so that kswapd
2502 		 * should be able to balance it without having to
2503 		 * write pages from its LRU list.
2504 		 *
2505 		 * This may look like it could increase pressure on
2506 		 * lower zones by failing allocations in higher zones
2507 		 * before they are full.  But the pages that do spill
2508 		 * over are limited as the lower zones are protected
2509 		 * by this very same mechanism.  It should not become
2510 		 * a practical burden to them.
2511 		 *
2512 		 * XXX: For now, allow allocations to potentially
2513 		 * exceed the per-zone dirty limit in the slowpath
2514 		 * (ALLOC_WMARK_LOW unset) before going into reclaim,
2515 		 * which is important when on a NUMA setup the allowed
2516 		 * zones are together not big enough to reach the
2517 		 * global limit.  The proper fix for these situations
2518 		 * will require awareness of zones in the
2519 		 * dirty-throttling and the flusher threads.
2520 		 */
2521 		if (consider_zone_dirty && !zone_dirty_ok(zone))
2522 			continue;
2523 
2524 		mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2525 		if (!zone_watermark_ok(zone, order, mark,
2526 				       ac->classzone_idx, alloc_flags)) {
2527 			int ret;
2528 
2529 			/* Checked here to keep the fast path fast */
2530 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2531 			if (alloc_flags & ALLOC_NO_WATERMARKS)
2532 				goto try_this_zone;
2533 
2534 			if (IS_ENABLED(CONFIG_NUMA) &&
2535 					!did_zlc_setup && nr_online_nodes > 1) {
2536 				/*
2537 				 * we do zlc_setup if there are multiple nodes
2538 				 * and before considering the first zone allowed
2539 				 * by the cpuset.
2540 				 */
2541 				allowednodes = zlc_setup(zonelist, alloc_flags);
2542 				zlc_active = 1;
2543 				did_zlc_setup = 1;
2544 			}
2545 
2546 			if (zone_reclaim_mode == 0 ||
2547 			    !zone_allows_reclaim(ac->preferred_zone, zone))
2548 				goto this_zone_full;
2549 
2550 			/*
2551 			 * As we may have just activated ZLC, check if the first
2552 			 * eligible zone has failed zone_reclaim recently.
2553 			 */
2554 			if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
2555 				!zlc_zone_worth_trying(zonelist, z, allowednodes))
2556 				continue;
2557 
2558 			ret = zone_reclaim(zone, gfp_mask, order);
2559 			switch (ret) {
2560 			case ZONE_RECLAIM_NOSCAN:
2561 				/* did not scan */
2562 				continue;
2563 			case ZONE_RECLAIM_FULL:
2564 				/* scanned but unreclaimable */
2565 				continue;
2566 			default:
2567 				/* did we reclaim enough */
2568 				if (zone_watermark_ok(zone, order, mark,
2569 						ac->classzone_idx, alloc_flags))
2570 					goto try_this_zone;
2571 
2572 				/*
2573 				 * Failed to reclaim enough to meet watermark.
2574 				 * Only mark the zone full if checking the min
2575 				 * watermark or if we failed to reclaim just
2576 				 * 1<<order pages or else the page allocator
2577 				 * fastpath will prematurely mark zones full
2578 				 * when the watermark is between the low and
2579 				 * min watermarks.
2580 				 */
2581 				if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
2582 				    ret == ZONE_RECLAIM_SOME)
2583 					goto this_zone_full;
2584 
2585 				continue;
2586 			}
2587 		}
2588 
2589 try_this_zone:
2590 		page = buffered_rmqueue(ac->preferred_zone, zone, order,
2591 						gfp_mask, ac->migratetype);
2592 		if (page) {
2593 			if (prep_new_page(page, order, gfp_mask, alloc_flags))
2594 				goto try_this_zone;
2595 			return page;
2596 		}
2597 this_zone_full:
2598 		if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
2599 			zlc_mark_zone_full(zonelist, z);
2600 	}
2601 
2602 	/*
2603 	 * The first pass makes sure allocations are spread fairly within the
2604 	 * local node.  However, the local node might have free pages left
2605 	 * after the fairness batches are exhausted, and remote zones haven't
2606 	 * even been considered yet.  Try once more without fairness, and
2607 	 * include remote zones now, before entering the slowpath and waking
2608 	 * kswapd: prefer spilling to a remote zone over swapping locally.
2609 	 */
2610 	if (alloc_flags & ALLOC_FAIR) {
2611 		alloc_flags &= ~ALLOC_FAIR;
2612 		if (nr_fair_skipped) {
2613 			zonelist_rescan = true;
2614 			reset_alloc_batches(ac->preferred_zone);
2615 		}
2616 		if (nr_online_nodes > 1)
2617 			zonelist_rescan = true;
2618 	}
2619 
2620 	if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
2621 		/* Disable zlc cache for second zonelist scan */
2622 		zlc_active = 0;
2623 		zonelist_rescan = true;
2624 	}
2625 
2626 	if (zonelist_rescan)
2627 		goto zonelist_scan;
2628 
2629 	return NULL;
2630 }
2631 
2632 /*
2633  * Large machines with many possible nodes should not always dump per-node
2634  * meminfo in irq context.
2635  */
2636 static inline bool should_suppress_show_mem(void)
2637 {
2638 	bool ret = false;
2639 
2640 #if NODES_SHIFT > 8
2641 	ret = in_interrupt();
2642 #endif
2643 	return ret;
2644 }
2645 
2646 static DEFINE_RATELIMIT_STATE(nopage_rs,
2647 		DEFAULT_RATELIMIT_INTERVAL,
2648 		DEFAULT_RATELIMIT_BURST);
2649 
2650 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2651 {
2652 	unsigned int filter = SHOW_MEM_FILTER_NODES;
2653 
2654 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2655 	    debug_guardpage_minorder() > 0)
2656 		return;
2657 
2658 	/*
2659 	 * This documents exceptions given to allocations in certain
2660 	 * contexts that are allowed to allocate outside current's set
2661 	 * of allowed nodes.
2662 	 */
2663 	if (!(gfp_mask & __GFP_NOMEMALLOC))
2664 		if (test_thread_flag(TIF_MEMDIE) ||
2665 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
2666 			filter &= ~SHOW_MEM_FILTER_NODES;
2667 	if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2668 		filter &= ~SHOW_MEM_FILTER_NODES;
2669 
2670 	if (fmt) {
2671 		struct va_format vaf;
2672 		va_list args;
2673 
2674 		va_start(args, fmt);
2675 
2676 		vaf.fmt = fmt;
2677 		vaf.va = &args;
2678 
2679 		pr_warn("%pV", &vaf);
2680 
2681 		va_end(args);
2682 	}
2683 
2684 	pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
2685 		current->comm, order, gfp_mask);
2686 
2687 	dump_stack();
2688 	if (!should_suppress_show_mem())
2689 		show_mem(filter);
2690 }
2691 
2692 static inline struct page *
2693 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2694 	const struct alloc_context *ac, unsigned long *did_some_progress)
2695 {
2696 	struct page *page;
2697 
2698 	*did_some_progress = 0;
2699 
2700 	/*
2701 	 * Acquire the oom lock.  If that fails, somebody else is
2702 	 * making progress for us.
2703 	 */
2704 	if (!mutex_trylock(&oom_lock)) {
2705 		*did_some_progress = 1;
2706 		schedule_timeout_uninterruptible(1);
2707 		return NULL;
2708 	}
2709 
2710 	/*
2711 	 * Go through the zonelist yet one more time, keep very high watermark
2712 	 * here, this is only to catch a parallel oom killing, we must fail if
2713 	 * we're still under heavy pressure.
2714 	 */
2715 	page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
2716 					ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
2717 	if (page)
2718 		goto out;
2719 
2720 	if (!(gfp_mask & __GFP_NOFAIL)) {
2721 		/* Coredumps can quickly deplete all memory reserves */
2722 		if (current->flags & PF_DUMPCORE)
2723 			goto out;
2724 		/* The OOM killer will not help higher order allocs */
2725 		if (order > PAGE_ALLOC_COSTLY_ORDER)
2726 			goto out;
2727 		/* The OOM killer does not needlessly kill tasks for lowmem */
2728 		if (ac->high_zoneidx < ZONE_NORMAL)
2729 			goto out;
2730 		/* The OOM killer does not compensate for IO-less reclaim */
2731 		if (!(gfp_mask & __GFP_FS)) {
2732 			/*
2733 			 * XXX: Page reclaim didn't yield anything,
2734 			 * and the OOM killer can't be invoked, but
2735 			 * keep looping as per tradition.
2736 			 */
2737 			*did_some_progress = 1;
2738 			goto out;
2739 		}
2740 		if (pm_suspended_storage())
2741 			goto out;
2742 		/* The OOM killer may not free memory on a specific node */
2743 		if (gfp_mask & __GFP_THISNODE)
2744 			goto out;
2745 	}
2746 	/* Exhausted what can be done so it's blamo time */
2747 	if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
2748 			|| WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
2749 		*did_some_progress = 1;
2750 out:
2751 	mutex_unlock(&oom_lock);
2752 	return page;
2753 }
2754 
2755 #ifdef CONFIG_COMPACTION
2756 /* Try memory compaction for high-order allocations before reclaim */
2757 static struct page *
2758 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2759 		int alloc_flags, const struct alloc_context *ac,
2760 		enum migrate_mode mode, int *contended_compaction,
2761 		bool *deferred_compaction)
2762 {
2763 	unsigned long compact_result;
2764 	struct page *page;
2765 
2766 	if (!order)
2767 		return NULL;
2768 
2769 	current->flags |= PF_MEMALLOC;
2770 	compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
2771 						mode, contended_compaction);
2772 	current->flags &= ~PF_MEMALLOC;
2773 
2774 	switch (compact_result) {
2775 	case COMPACT_DEFERRED:
2776 		*deferred_compaction = true;
2777 		/* fall-through */
2778 	case COMPACT_SKIPPED:
2779 		return NULL;
2780 	default:
2781 		break;
2782 	}
2783 
2784 	/*
2785 	 * At least in one zone compaction wasn't deferred or skipped, so let's
2786 	 * count a compaction stall
2787 	 */
2788 	count_vm_event(COMPACTSTALL);
2789 
2790 	page = get_page_from_freelist(gfp_mask, order,
2791 					alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
2792 
2793 	if (page) {
2794 		struct zone *zone = page_zone(page);
2795 
2796 		zone->compact_blockskip_flush = false;
2797 		compaction_defer_reset(zone, order, true);
2798 		count_vm_event(COMPACTSUCCESS);
2799 		return page;
2800 	}
2801 
2802 	/*
2803 	 * It's bad if compaction run occurs and fails. The most likely reason
2804 	 * is that pages exist, but not enough to satisfy watermarks.
2805 	 */
2806 	count_vm_event(COMPACTFAIL);
2807 
2808 	cond_resched();
2809 
2810 	return NULL;
2811 }
2812 #else
2813 static inline struct page *
2814 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2815 		int alloc_flags, const struct alloc_context *ac,
2816 		enum migrate_mode mode, int *contended_compaction,
2817 		bool *deferred_compaction)
2818 {
2819 	return NULL;
2820 }
2821 #endif /* CONFIG_COMPACTION */
2822 
2823 /* Perform direct synchronous page reclaim */
2824 static int
2825 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
2826 					const struct alloc_context *ac)
2827 {
2828 	struct reclaim_state reclaim_state;
2829 	int progress;
2830 
2831 	cond_resched();
2832 
2833 	/* We now go into synchronous reclaim */
2834 	cpuset_memory_pressure_bump();
2835 	current->flags |= PF_MEMALLOC;
2836 	lockdep_set_current_reclaim_state(gfp_mask);
2837 	reclaim_state.reclaimed_slab = 0;
2838 	current->reclaim_state = &reclaim_state;
2839 
2840 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
2841 								ac->nodemask);
2842 
2843 	current->reclaim_state = NULL;
2844 	lockdep_clear_current_reclaim_state();
2845 	current->flags &= ~PF_MEMALLOC;
2846 
2847 	cond_resched();
2848 
2849 	return progress;
2850 }
2851 
2852 /* The really slow allocator path where we enter direct reclaim */
2853 static inline struct page *
2854 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2855 		int alloc_flags, const struct alloc_context *ac,
2856 		unsigned long *did_some_progress)
2857 {
2858 	struct page *page = NULL;
2859 	bool drained = false;
2860 
2861 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
2862 	if (unlikely(!(*did_some_progress)))
2863 		return NULL;
2864 
2865 	/* After successful reclaim, reconsider all zones for allocation */
2866 	if (IS_ENABLED(CONFIG_NUMA))
2867 		zlc_clear_zones_full(ac->zonelist);
2868 
2869 retry:
2870 	page = get_page_from_freelist(gfp_mask, order,
2871 					alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
2872 
2873 	/*
2874 	 * If an allocation failed after direct reclaim, it could be because
2875 	 * pages are pinned on the per-cpu lists. Drain them and try again
2876 	 */
2877 	if (!page && !drained) {
2878 		drain_all_pages(NULL);
2879 		drained = true;
2880 		goto retry;
2881 	}
2882 
2883 	return page;
2884 }
2885 
2886 /*
2887  * This is called in the allocator slow-path if the allocation request is of
2888  * sufficient urgency to ignore watermarks and take other desperate measures
2889  */
2890 static inline struct page *
2891 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2892 				const struct alloc_context *ac)
2893 {
2894 	struct page *page;
2895 
2896 	do {
2897 		page = get_page_from_freelist(gfp_mask, order,
2898 						ALLOC_NO_WATERMARKS, ac);
2899 
2900 		if (!page && gfp_mask & __GFP_NOFAIL)
2901 			wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC,
2902 									HZ/50);
2903 	} while (!page && (gfp_mask & __GFP_NOFAIL));
2904 
2905 	return page;
2906 }
2907 
2908 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
2909 {
2910 	struct zoneref *z;
2911 	struct zone *zone;
2912 
2913 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2914 						ac->high_zoneidx, ac->nodemask)
2915 		wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
2916 }
2917 
2918 static inline int
2919 gfp_to_alloc_flags(gfp_t gfp_mask)
2920 {
2921 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2922 	const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
2923 
2924 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2925 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2926 
2927 	/*
2928 	 * The caller may dip into page reserves a bit more if the caller
2929 	 * cannot run direct reclaim, or if the caller has realtime scheduling
2930 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2931 	 * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
2932 	 */
2933 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2934 
2935 	if (atomic) {
2936 		/*
2937 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
2938 		 * if it can't schedule.
2939 		 */
2940 		if (!(gfp_mask & __GFP_NOMEMALLOC))
2941 			alloc_flags |= ALLOC_HARDER;
2942 		/*
2943 		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
2944 		 * comment for __cpuset_node_allowed().
2945 		 */
2946 		alloc_flags &= ~ALLOC_CPUSET;
2947 	} else if (unlikely(rt_task(current)) && !in_interrupt())
2948 		alloc_flags |= ALLOC_HARDER;
2949 
2950 	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2951 		if (gfp_mask & __GFP_MEMALLOC)
2952 			alloc_flags |= ALLOC_NO_WATERMARKS;
2953 		else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2954 			alloc_flags |= ALLOC_NO_WATERMARKS;
2955 		else if (!in_interrupt() &&
2956 				((current->flags & PF_MEMALLOC) ||
2957 				 unlikely(test_thread_flag(TIF_MEMDIE))))
2958 			alloc_flags |= ALLOC_NO_WATERMARKS;
2959 	}
2960 #ifdef CONFIG_CMA
2961 	if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2962 		alloc_flags |= ALLOC_CMA;
2963 #endif
2964 	return alloc_flags;
2965 }
2966 
2967 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2968 {
2969 	return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
2970 }
2971 
2972 static inline struct page *
2973 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2974 						struct alloc_context *ac)
2975 {
2976 	const gfp_t wait = gfp_mask & __GFP_WAIT;
2977 	struct page *page = NULL;
2978 	int alloc_flags;
2979 	unsigned long pages_reclaimed = 0;
2980 	unsigned long did_some_progress;
2981 	enum migrate_mode migration_mode = MIGRATE_ASYNC;
2982 	bool deferred_compaction = false;
2983 	int contended_compaction = COMPACT_CONTENDED_NONE;
2984 
2985 	/*
2986 	 * In the slowpath, we sanity check order to avoid ever trying to
2987 	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2988 	 * be using allocators in order of preference for an area that is
2989 	 * too large.
2990 	 */
2991 	if (order >= MAX_ORDER) {
2992 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2993 		return NULL;
2994 	}
2995 
2996 	/*
2997 	 * If this allocation cannot block and it is for a specific node, then
2998 	 * fail early.  There's no need to wakeup kswapd or retry for a
2999 	 * speculative node-specific allocation.
3000 	 */
3001 	if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !wait)
3002 		goto nopage;
3003 
3004 retry:
3005 	if (!(gfp_mask & __GFP_NO_KSWAPD))
3006 		wake_all_kswapds(order, ac);
3007 
3008 	/*
3009 	 * OK, we're below the kswapd watermark and have kicked background
3010 	 * reclaim. Now things get more complex, so set up alloc_flags according
3011 	 * to how we want to proceed.
3012 	 */
3013 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
3014 
3015 	/*
3016 	 * Find the true preferred zone if the allocation is unconstrained by
3017 	 * cpusets.
3018 	 */
3019 	if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
3020 		struct zoneref *preferred_zoneref;
3021 		preferred_zoneref = first_zones_zonelist(ac->zonelist,
3022 				ac->high_zoneidx, NULL, &ac->preferred_zone);
3023 		ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
3024 	}
3025 
3026 	/* This is the last chance, in general, before the goto nopage. */
3027 	page = get_page_from_freelist(gfp_mask, order,
3028 				alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
3029 	if (page)
3030 		goto got_pg;
3031 
3032 	/* Allocate without watermarks if the context allows */
3033 	if (alloc_flags & ALLOC_NO_WATERMARKS) {
3034 		/*
3035 		 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
3036 		 * the allocation is high priority and these type of
3037 		 * allocations are system rather than user orientated
3038 		 */
3039 		ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3040 
3041 		page = __alloc_pages_high_priority(gfp_mask, order, ac);
3042 
3043 		if (page) {
3044 			goto got_pg;
3045 		}
3046 	}
3047 
3048 	/* Atomic allocations - we can't balance anything */
3049 	if (!wait) {
3050 		/*
3051 		 * All existing users of the deprecated __GFP_NOFAIL are
3052 		 * blockable, so warn of any new users that actually allow this
3053 		 * type of allocation to fail.
3054 		 */
3055 		WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
3056 		goto nopage;
3057 	}
3058 
3059 	/* Avoid recursion of direct reclaim */
3060 	if (current->flags & PF_MEMALLOC)
3061 		goto nopage;
3062 
3063 	/* Avoid allocations with no watermarks from looping endlessly */
3064 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
3065 		goto nopage;
3066 
3067 	/*
3068 	 * Try direct compaction. The first pass is asynchronous. Subsequent
3069 	 * attempts after direct reclaim are synchronous
3070 	 */
3071 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3072 					migration_mode,
3073 					&contended_compaction,
3074 					&deferred_compaction);
3075 	if (page)
3076 		goto got_pg;
3077 
3078 	/* Checks for THP-specific high-order allocations */
3079 	if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) {
3080 		/*
3081 		 * If compaction is deferred for high-order allocations, it is
3082 		 * because sync compaction recently failed. If this is the case
3083 		 * and the caller requested a THP allocation, we do not want
3084 		 * to heavily disrupt the system, so we fail the allocation
3085 		 * instead of entering direct reclaim.
3086 		 */
3087 		if (deferred_compaction)
3088 			goto nopage;
3089 
3090 		/*
3091 		 * In all zones where compaction was attempted (and not
3092 		 * deferred or skipped), lock contention has been detected.
3093 		 * For THP allocation we do not want to disrupt the others
3094 		 * so we fallback to base pages instead.
3095 		 */
3096 		if (contended_compaction == COMPACT_CONTENDED_LOCK)
3097 			goto nopage;
3098 
3099 		/*
3100 		 * If compaction was aborted due to need_resched(), we do not
3101 		 * want to further increase allocation latency, unless it is
3102 		 * khugepaged trying to collapse.
3103 		 */
3104 		if (contended_compaction == COMPACT_CONTENDED_SCHED
3105 			&& !(current->flags & PF_KTHREAD))
3106 			goto nopage;
3107 	}
3108 
3109 	/*
3110 	 * It can become very expensive to allocate transparent hugepages at
3111 	 * fault, so use asynchronous memory compaction for THP unless it is
3112 	 * khugepaged trying to collapse.
3113 	 */
3114 	if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE ||
3115 						(current->flags & PF_KTHREAD))
3116 		migration_mode = MIGRATE_SYNC_LIGHT;
3117 
3118 	/* Try direct reclaim and then allocating */
3119 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3120 							&did_some_progress);
3121 	if (page)
3122 		goto got_pg;
3123 
3124 	/* Do not loop if specifically requested */
3125 	if (gfp_mask & __GFP_NORETRY)
3126 		goto noretry;
3127 
3128 	/* Keep reclaiming pages as long as there is reasonable progress */
3129 	pages_reclaimed += did_some_progress;
3130 	if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
3131 	    ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
3132 		/* Wait for some write requests to complete then retry */
3133 		wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
3134 		goto retry;
3135 	}
3136 
3137 	/* Reclaim has failed us, start killing things */
3138 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3139 	if (page)
3140 		goto got_pg;
3141 
3142 	/* Retry as long as the OOM killer is making progress */
3143 	if (did_some_progress)
3144 		goto retry;
3145 
3146 noretry:
3147 	/*
3148 	 * High-order allocations do not necessarily loop after
3149 	 * direct reclaim and reclaim/compaction depends on compaction
3150 	 * being called after reclaim so call directly if necessary
3151 	 */
3152 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
3153 					    ac, migration_mode,
3154 					    &contended_compaction,
3155 					    &deferred_compaction);
3156 	if (page)
3157 		goto got_pg;
3158 nopage:
3159 	warn_alloc_failed(gfp_mask, order, NULL);
3160 got_pg:
3161 	return page;
3162 }
3163 
3164 /*
3165  * This is the 'heart' of the zoned buddy allocator.
3166  */
3167 struct page *
3168 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3169 			struct zonelist *zonelist, nodemask_t *nodemask)
3170 {
3171 	struct zoneref *preferred_zoneref;
3172 	struct page *page = NULL;
3173 	unsigned int cpuset_mems_cookie;
3174 	int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
3175 	gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
3176 	struct alloc_context ac = {
3177 		.high_zoneidx = gfp_zone(gfp_mask),
3178 		.nodemask = nodemask,
3179 		.migratetype = gfpflags_to_migratetype(gfp_mask),
3180 	};
3181 
3182 	gfp_mask &= gfp_allowed_mask;
3183 
3184 	lockdep_trace_alloc(gfp_mask);
3185 
3186 	might_sleep_if(gfp_mask & __GFP_WAIT);
3187 
3188 	if (should_fail_alloc_page(gfp_mask, order))
3189 		return NULL;
3190 
3191 	/*
3192 	 * Check the zones suitable for the gfp_mask contain at least one
3193 	 * valid zone. It's possible to have an empty zonelist as a result
3194 	 * of __GFP_THISNODE and a memoryless node
3195 	 */
3196 	if (unlikely(!zonelist->_zonerefs->zone))
3197 		return NULL;
3198 
3199 	if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
3200 		alloc_flags |= ALLOC_CMA;
3201 
3202 retry_cpuset:
3203 	cpuset_mems_cookie = read_mems_allowed_begin();
3204 
3205 	/* We set it here, as __alloc_pages_slowpath might have changed it */
3206 	ac.zonelist = zonelist;
3207 	/* The preferred zone is used for statistics later */
3208 	preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
3209 				ac.nodemask ? : &cpuset_current_mems_allowed,
3210 				&ac.preferred_zone);
3211 	if (!ac.preferred_zone)
3212 		goto out;
3213 	ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
3214 
3215 	/* First allocation attempt */
3216 	alloc_mask = gfp_mask|__GFP_HARDWALL;
3217 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
3218 	if (unlikely(!page)) {
3219 		/*
3220 		 * Runtime PM, block IO and its error handling path
3221 		 * can deadlock because I/O on the device might not
3222 		 * complete.
3223 		 */
3224 		alloc_mask = memalloc_noio_flags(gfp_mask);
3225 
3226 		page = __alloc_pages_slowpath(alloc_mask, order, &ac);
3227 	}
3228 
3229 	if (kmemcheck_enabled && page)
3230 		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3231 
3232 	trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
3233 
3234 out:
3235 	/*
3236 	 * When updating a task's mems_allowed, it is possible to race with
3237 	 * parallel threads in such a way that an allocation can fail while
3238 	 * the mask is being updated. If a page allocation is about to fail,
3239 	 * check if the cpuset changed during allocation and if so, retry.
3240 	 */
3241 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
3242 		goto retry_cpuset;
3243 
3244 	return page;
3245 }
3246 EXPORT_SYMBOL(__alloc_pages_nodemask);
3247 
3248 /*
3249  * Common helper functions.
3250  */
3251 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
3252 {
3253 	struct page *page;
3254 
3255 	/*
3256 	 * __get_free_pages() returns a 32-bit address, which cannot represent
3257 	 * a highmem page
3258 	 */
3259 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
3260 
3261 	page = alloc_pages(gfp_mask, order);
3262 	if (!page)
3263 		return 0;
3264 	return (unsigned long) page_address(page);
3265 }
3266 EXPORT_SYMBOL(__get_free_pages);
3267 
3268 unsigned long get_zeroed_page(gfp_t gfp_mask)
3269 {
3270 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
3271 }
3272 EXPORT_SYMBOL(get_zeroed_page);
3273 
3274 void __free_pages(struct page *page, unsigned int order)
3275 {
3276 	if (put_page_testzero(page)) {
3277 		if (order == 0)
3278 			free_hot_cold_page(page, false);
3279 		else
3280 			__free_pages_ok(page, order);
3281 	}
3282 }
3283 
3284 EXPORT_SYMBOL(__free_pages);
3285 
3286 void free_pages(unsigned long addr, unsigned int order)
3287 {
3288 	if (addr != 0) {
3289 		VM_BUG_ON(!virt_addr_valid((void *)addr));
3290 		__free_pages(virt_to_page((void *)addr), order);
3291 	}
3292 }
3293 
3294 EXPORT_SYMBOL(free_pages);
3295 
3296 /*
3297  * Page Fragment:
3298  *  An arbitrary-length arbitrary-offset area of memory which resides
3299  *  within a 0 or higher order page.  Multiple fragments within that page
3300  *  are individually refcounted, in the page's reference counter.
3301  *
3302  * The page_frag functions below provide a simple allocation framework for
3303  * page fragments.  This is used by the network stack and network device
3304  * drivers to provide a backing region of memory for use as either an
3305  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
3306  */
3307 static struct page *__page_frag_refill(struct page_frag_cache *nc,
3308 				       gfp_t gfp_mask)
3309 {
3310 	struct page *page = NULL;
3311 	gfp_t gfp = gfp_mask;
3312 
3313 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3314 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
3315 		    __GFP_NOMEMALLOC;
3316 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
3317 				PAGE_FRAG_CACHE_MAX_ORDER);
3318 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
3319 #endif
3320 	if (unlikely(!page))
3321 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
3322 
3323 	nc->va = page ? page_address(page) : NULL;
3324 
3325 	return page;
3326 }
3327 
3328 void *__alloc_page_frag(struct page_frag_cache *nc,
3329 			unsigned int fragsz, gfp_t gfp_mask)
3330 {
3331 	unsigned int size = PAGE_SIZE;
3332 	struct page *page;
3333 	int offset;
3334 
3335 	if (unlikely(!nc->va)) {
3336 refill:
3337 		page = __page_frag_refill(nc, gfp_mask);
3338 		if (!page)
3339 			return NULL;
3340 
3341 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3342 		/* if size can vary use size else just use PAGE_SIZE */
3343 		size = nc->size;
3344 #endif
3345 		/* Even if we own the page, we do not use atomic_set().
3346 		 * This would break get_page_unless_zero() users.
3347 		 */
3348 		atomic_add(size - 1, &page->_count);
3349 
3350 		/* reset page count bias and offset to start of new frag */
3351 		nc->pfmemalloc = page_is_pfmemalloc(page);
3352 		nc->pagecnt_bias = size;
3353 		nc->offset = size;
3354 	}
3355 
3356 	offset = nc->offset - fragsz;
3357 	if (unlikely(offset < 0)) {
3358 		page = virt_to_page(nc->va);
3359 
3360 		if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
3361 			goto refill;
3362 
3363 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3364 		/* if size can vary use size else just use PAGE_SIZE */
3365 		size = nc->size;
3366 #endif
3367 		/* OK, page count is 0, we can safely set it */
3368 		atomic_set(&page->_count, size);
3369 
3370 		/* reset page count bias and offset to start of new frag */
3371 		nc->pagecnt_bias = size;
3372 		offset = size - fragsz;
3373 	}
3374 
3375 	nc->pagecnt_bias--;
3376 	nc->offset = offset;
3377 
3378 	return nc->va + offset;
3379 }
3380 EXPORT_SYMBOL(__alloc_page_frag);
3381 
3382 /*
3383  * Frees a page fragment allocated out of either a compound or order 0 page.
3384  */
3385 void __free_page_frag(void *addr)
3386 {
3387 	struct page *page = virt_to_head_page(addr);
3388 
3389 	if (unlikely(put_page_testzero(page)))
3390 		__free_pages_ok(page, compound_order(page));
3391 }
3392 EXPORT_SYMBOL(__free_page_frag);
3393 
3394 /*
3395  * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
3396  * of the current memory cgroup.
3397  *
3398  * It should be used when the caller would like to use kmalloc, but since the
3399  * allocation is large, it has to fall back to the page allocator.
3400  */
3401 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
3402 {
3403 	struct page *page;
3404 	struct mem_cgroup *memcg = NULL;
3405 
3406 	if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
3407 		return NULL;
3408 	page = alloc_pages(gfp_mask, order);
3409 	memcg_kmem_commit_charge(page, memcg, order);
3410 	return page;
3411 }
3412 
3413 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
3414 {
3415 	struct page *page;
3416 	struct mem_cgroup *memcg = NULL;
3417 
3418 	if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
3419 		return NULL;
3420 	page = alloc_pages_node(nid, gfp_mask, order);
3421 	memcg_kmem_commit_charge(page, memcg, order);
3422 	return page;
3423 }
3424 
3425 /*
3426  * __free_kmem_pages and free_kmem_pages will free pages allocated with
3427  * alloc_kmem_pages.
3428  */
3429 void __free_kmem_pages(struct page *page, unsigned int order)
3430 {
3431 	memcg_kmem_uncharge_pages(page, order);
3432 	__free_pages(page, order);
3433 }
3434 
3435 void free_kmem_pages(unsigned long addr, unsigned int order)
3436 {
3437 	if (addr != 0) {
3438 		VM_BUG_ON(!virt_addr_valid((void *)addr));
3439 		__free_kmem_pages(virt_to_page((void *)addr), order);
3440 	}
3441 }
3442 
3443 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
3444 {
3445 	if (addr) {
3446 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
3447 		unsigned long used = addr + PAGE_ALIGN(size);
3448 
3449 		split_page(virt_to_page((void *)addr), order);
3450 		while (used < alloc_end) {
3451 			free_page(used);
3452 			used += PAGE_SIZE;
3453 		}
3454 	}
3455 	return (void *)addr;
3456 }
3457 
3458 /**
3459  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3460  * @size: the number of bytes to allocate
3461  * @gfp_mask: GFP flags for the allocation
3462  *
3463  * This function is similar to alloc_pages(), except that it allocates the
3464  * minimum number of pages to satisfy the request.  alloc_pages() can only
3465  * allocate memory in power-of-two pages.
3466  *
3467  * This function is also limited by MAX_ORDER.
3468  *
3469  * Memory allocated by this function must be released by free_pages_exact().
3470  */
3471 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3472 {
3473 	unsigned int order = get_order(size);
3474 	unsigned long addr;
3475 
3476 	addr = __get_free_pages(gfp_mask, order);
3477 	return make_alloc_exact(addr, order, size);
3478 }
3479 EXPORT_SYMBOL(alloc_pages_exact);
3480 
3481 /**
3482  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
3483  *			   pages on a node.
3484  * @nid: the preferred node ID where memory should be allocated
3485  * @size: the number of bytes to allocate
3486  * @gfp_mask: GFP flags for the allocation
3487  *
3488  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3489  * back.
3490  * Note this is not alloc_pages_exact_node() which allocates on a specific node,
3491  * but is not exact.
3492  */
3493 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
3494 {
3495 	unsigned order = get_order(size);
3496 	struct page *p = alloc_pages_node(nid, gfp_mask, order);
3497 	if (!p)
3498 		return NULL;
3499 	return make_alloc_exact((unsigned long)page_address(p), order, size);
3500 }
3501 
3502 /**
3503  * free_pages_exact - release memory allocated via alloc_pages_exact()
3504  * @virt: the value returned by alloc_pages_exact.
3505  * @size: size of allocation, same value as passed to alloc_pages_exact().
3506  *
3507  * Release the memory allocated by a previous call to alloc_pages_exact.
3508  */
3509 void free_pages_exact(void *virt, size_t size)
3510 {
3511 	unsigned long addr = (unsigned long)virt;
3512 	unsigned long end = addr + PAGE_ALIGN(size);
3513 
3514 	while (addr < end) {
3515 		free_page(addr);
3516 		addr += PAGE_SIZE;
3517 	}
3518 }
3519 EXPORT_SYMBOL(free_pages_exact);
3520 
3521 /**
3522  * nr_free_zone_pages - count number of pages beyond high watermark
3523  * @offset: The zone index of the highest zone
3524  *
3525  * nr_free_zone_pages() counts the number of counts pages which are beyond the
3526  * high watermark within all zones at or below a given zone index.  For each
3527  * zone, the number of pages is calculated as:
3528  *     managed_pages - high_pages
3529  */
3530 static unsigned long nr_free_zone_pages(int offset)
3531 {
3532 	struct zoneref *z;
3533 	struct zone *zone;
3534 
3535 	/* Just pick one node, since fallback list is circular */
3536 	unsigned long sum = 0;
3537 
3538 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
3539 
3540 	for_each_zone_zonelist(zone, z, zonelist, offset) {
3541 		unsigned long size = zone->managed_pages;
3542 		unsigned long high = high_wmark_pages(zone);
3543 		if (size > high)
3544 			sum += size - high;
3545 	}
3546 
3547 	return sum;
3548 }
3549 
3550 /**
3551  * nr_free_buffer_pages - count number of pages beyond high watermark
3552  *
3553  * nr_free_buffer_pages() counts the number of pages which are beyond the high
3554  * watermark within ZONE_DMA and ZONE_NORMAL.
3555  */
3556 unsigned long nr_free_buffer_pages(void)
3557 {
3558 	return nr_free_zone_pages(gfp_zone(GFP_USER));
3559 }
3560 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
3561 
3562 /**
3563  * nr_free_pagecache_pages - count number of pages beyond high watermark
3564  *
3565  * nr_free_pagecache_pages() counts the number of pages which are beyond the
3566  * high watermark within all zones.
3567  */
3568 unsigned long nr_free_pagecache_pages(void)
3569 {
3570 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
3571 }
3572 
3573 static inline void show_node(struct zone *zone)
3574 {
3575 	if (IS_ENABLED(CONFIG_NUMA))
3576 		printk("Node %d ", zone_to_nid(zone));
3577 }
3578 
3579 void si_meminfo(struct sysinfo *val)
3580 {
3581 	val->totalram = totalram_pages;
3582 	val->sharedram = global_page_state(NR_SHMEM);
3583 	val->freeram = global_page_state(NR_FREE_PAGES);
3584 	val->bufferram = nr_blockdev_pages();
3585 	val->totalhigh = totalhigh_pages;
3586 	val->freehigh = nr_free_highpages();
3587 	val->mem_unit = PAGE_SIZE;
3588 }
3589 
3590 EXPORT_SYMBOL(si_meminfo);
3591 
3592 #ifdef CONFIG_NUMA
3593 void si_meminfo_node(struct sysinfo *val, int nid)
3594 {
3595 	int zone_type;		/* needs to be signed */
3596 	unsigned long managed_pages = 0;
3597 	pg_data_t *pgdat = NODE_DATA(nid);
3598 
3599 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
3600 		managed_pages += pgdat->node_zones[zone_type].managed_pages;
3601 	val->totalram = managed_pages;
3602 	val->sharedram = node_page_state(nid, NR_SHMEM);
3603 	val->freeram = node_page_state(nid, NR_FREE_PAGES);
3604 #ifdef CONFIG_HIGHMEM
3605 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
3606 	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
3607 			NR_FREE_PAGES);
3608 #else
3609 	val->totalhigh = 0;
3610 	val->freehigh = 0;
3611 #endif
3612 	val->mem_unit = PAGE_SIZE;
3613 }
3614 #endif
3615 
3616 /*
3617  * Determine whether the node should be displayed or not, depending on whether
3618  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
3619  */
3620 bool skip_free_areas_node(unsigned int flags, int nid)
3621 {
3622 	bool ret = false;
3623 	unsigned int cpuset_mems_cookie;
3624 
3625 	if (!(flags & SHOW_MEM_FILTER_NODES))
3626 		goto out;
3627 
3628 	do {
3629 		cpuset_mems_cookie = read_mems_allowed_begin();
3630 		ret = !node_isset(nid, cpuset_current_mems_allowed);
3631 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
3632 out:
3633 	return ret;
3634 }
3635 
3636 #define K(x) ((x) << (PAGE_SHIFT-10))
3637 
3638 static void show_migration_types(unsigned char type)
3639 {
3640 	static const char types[MIGRATE_TYPES] = {
3641 		[MIGRATE_UNMOVABLE]	= 'U',
3642 		[MIGRATE_RECLAIMABLE]	= 'E',
3643 		[MIGRATE_MOVABLE]	= 'M',
3644 		[MIGRATE_RESERVE]	= 'R',
3645 #ifdef CONFIG_CMA
3646 		[MIGRATE_CMA]		= 'C',
3647 #endif
3648 #ifdef CONFIG_MEMORY_ISOLATION
3649 		[MIGRATE_ISOLATE]	= 'I',
3650 #endif
3651 	};
3652 	char tmp[MIGRATE_TYPES + 1];
3653 	char *p = tmp;
3654 	int i;
3655 
3656 	for (i = 0; i < MIGRATE_TYPES; i++) {
3657 		if (type & (1 << i))
3658 			*p++ = types[i];
3659 	}
3660 
3661 	*p = '\0';
3662 	printk("(%s) ", tmp);
3663 }
3664 
3665 /*
3666  * Show free area list (used inside shift_scroll-lock stuff)
3667  * We also calculate the percentage fragmentation. We do this by counting the
3668  * memory on each free list with the exception of the first item on the list.
3669  *
3670  * Bits in @filter:
3671  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
3672  *   cpuset.
3673  */
3674 void show_free_areas(unsigned int filter)
3675 {
3676 	unsigned long free_pcp = 0;
3677 	int cpu;
3678 	struct zone *zone;
3679 
3680 	for_each_populated_zone(zone) {
3681 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
3682 			continue;
3683 
3684 		for_each_online_cpu(cpu)
3685 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
3686 	}
3687 
3688 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
3689 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
3690 		" unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
3691 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
3692 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
3693 		" free:%lu free_pcp:%lu free_cma:%lu\n",
3694 		global_page_state(NR_ACTIVE_ANON),
3695 		global_page_state(NR_INACTIVE_ANON),
3696 		global_page_state(NR_ISOLATED_ANON),
3697 		global_page_state(NR_ACTIVE_FILE),
3698 		global_page_state(NR_INACTIVE_FILE),
3699 		global_page_state(NR_ISOLATED_FILE),
3700 		global_page_state(NR_UNEVICTABLE),
3701 		global_page_state(NR_FILE_DIRTY),
3702 		global_page_state(NR_WRITEBACK),
3703 		global_page_state(NR_UNSTABLE_NFS),
3704 		global_page_state(NR_SLAB_RECLAIMABLE),
3705 		global_page_state(NR_SLAB_UNRECLAIMABLE),
3706 		global_page_state(NR_FILE_MAPPED),
3707 		global_page_state(NR_SHMEM),
3708 		global_page_state(NR_PAGETABLE),
3709 		global_page_state(NR_BOUNCE),
3710 		global_page_state(NR_FREE_PAGES),
3711 		free_pcp,
3712 		global_page_state(NR_FREE_CMA_PAGES));
3713 
3714 	for_each_populated_zone(zone) {
3715 		int i;
3716 
3717 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
3718 			continue;
3719 
3720 		free_pcp = 0;
3721 		for_each_online_cpu(cpu)
3722 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
3723 
3724 		show_node(zone);
3725 		printk("%s"
3726 			" free:%lukB"
3727 			" min:%lukB"
3728 			" low:%lukB"
3729 			" high:%lukB"
3730 			" active_anon:%lukB"
3731 			" inactive_anon:%lukB"
3732 			" active_file:%lukB"
3733 			" inactive_file:%lukB"
3734 			" unevictable:%lukB"
3735 			" isolated(anon):%lukB"
3736 			" isolated(file):%lukB"
3737 			" present:%lukB"
3738 			" managed:%lukB"
3739 			" mlocked:%lukB"
3740 			" dirty:%lukB"
3741 			" writeback:%lukB"
3742 			" mapped:%lukB"
3743 			" shmem:%lukB"
3744 			" slab_reclaimable:%lukB"
3745 			" slab_unreclaimable:%lukB"
3746 			" kernel_stack:%lukB"
3747 			" pagetables:%lukB"
3748 			" unstable:%lukB"
3749 			" bounce:%lukB"
3750 			" free_pcp:%lukB"
3751 			" local_pcp:%ukB"
3752 			" free_cma:%lukB"
3753 			" writeback_tmp:%lukB"
3754 			" pages_scanned:%lu"
3755 			" all_unreclaimable? %s"
3756 			"\n",
3757 			zone->name,
3758 			K(zone_page_state(zone, NR_FREE_PAGES)),
3759 			K(min_wmark_pages(zone)),
3760 			K(low_wmark_pages(zone)),
3761 			K(high_wmark_pages(zone)),
3762 			K(zone_page_state(zone, NR_ACTIVE_ANON)),
3763 			K(zone_page_state(zone, NR_INACTIVE_ANON)),
3764 			K(zone_page_state(zone, NR_ACTIVE_FILE)),
3765 			K(zone_page_state(zone, NR_INACTIVE_FILE)),
3766 			K(zone_page_state(zone, NR_UNEVICTABLE)),
3767 			K(zone_page_state(zone, NR_ISOLATED_ANON)),
3768 			K(zone_page_state(zone, NR_ISOLATED_FILE)),
3769 			K(zone->present_pages),
3770 			K(zone->managed_pages),
3771 			K(zone_page_state(zone, NR_MLOCK)),
3772 			K(zone_page_state(zone, NR_FILE_DIRTY)),
3773 			K(zone_page_state(zone, NR_WRITEBACK)),
3774 			K(zone_page_state(zone, NR_FILE_MAPPED)),
3775 			K(zone_page_state(zone, NR_SHMEM)),
3776 			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3777 			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
3778 			zone_page_state(zone, NR_KERNEL_STACK) *
3779 				THREAD_SIZE / 1024,
3780 			K(zone_page_state(zone, NR_PAGETABLE)),
3781 			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3782 			K(zone_page_state(zone, NR_BOUNCE)),
3783 			K(free_pcp),
3784 			K(this_cpu_read(zone->pageset->pcp.count)),
3785 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
3786 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
3787 			K(zone_page_state(zone, NR_PAGES_SCANNED)),
3788 			(!zone_reclaimable(zone) ? "yes" : "no")
3789 			);
3790 		printk("lowmem_reserve[]:");
3791 		for (i = 0; i < MAX_NR_ZONES; i++)
3792 			printk(" %ld", zone->lowmem_reserve[i]);
3793 		printk("\n");
3794 	}
3795 
3796 	for_each_populated_zone(zone) {
3797 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
3798 		unsigned char types[MAX_ORDER];
3799 
3800 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
3801 			continue;
3802 		show_node(zone);
3803 		printk("%s: ", zone->name);
3804 
3805 		spin_lock_irqsave(&zone->lock, flags);
3806 		for (order = 0; order < MAX_ORDER; order++) {
3807 			struct free_area *area = &zone->free_area[order];
3808 			int type;
3809 
3810 			nr[order] = area->nr_free;
3811 			total += nr[order] << order;
3812 
3813 			types[order] = 0;
3814 			for (type = 0; type < MIGRATE_TYPES; type++) {
3815 				if (!list_empty(&area->free_list[type]))
3816 					types[order] |= 1 << type;
3817 			}
3818 		}
3819 		spin_unlock_irqrestore(&zone->lock, flags);
3820 		for (order = 0; order < MAX_ORDER; order++) {
3821 			printk("%lu*%lukB ", nr[order], K(1UL) << order);
3822 			if (nr[order])
3823 				show_migration_types(types[order]);
3824 		}
3825 		printk("= %lukB\n", K(total));
3826 	}
3827 
3828 	hugetlb_show_meminfo();
3829 
3830 	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3831 
3832 	show_swap_cache_info();
3833 }
3834 
3835 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3836 {
3837 	zoneref->zone = zone;
3838 	zoneref->zone_idx = zone_idx(zone);
3839 }
3840 
3841 /*
3842  * Builds allocation fallback zone lists.
3843  *
3844  * Add all populated zones of a node to the zonelist.
3845  */
3846 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3847 				int nr_zones)
3848 {
3849 	struct zone *zone;
3850 	enum zone_type zone_type = MAX_NR_ZONES;
3851 
3852 	do {
3853 		zone_type--;
3854 		zone = pgdat->node_zones + zone_type;
3855 		if (populated_zone(zone)) {
3856 			zoneref_set_zone(zone,
3857 				&zonelist->_zonerefs[nr_zones++]);
3858 			check_highest_zone(zone_type);
3859 		}
3860 	} while (zone_type);
3861 
3862 	return nr_zones;
3863 }
3864 
3865 
3866 /*
3867  *  zonelist_order:
3868  *  0 = automatic detection of better ordering.
3869  *  1 = order by ([node] distance, -zonetype)
3870  *  2 = order by (-zonetype, [node] distance)
3871  *
3872  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3873  *  the same zonelist. So only NUMA can configure this param.
3874  */
3875 #define ZONELIST_ORDER_DEFAULT  0
3876 #define ZONELIST_ORDER_NODE     1
3877 #define ZONELIST_ORDER_ZONE     2
3878 
3879 /* zonelist order in the kernel.
3880  * set_zonelist_order() will set this to NODE or ZONE.
3881  */
3882 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3883 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3884 
3885 
3886 #ifdef CONFIG_NUMA
3887 /* The value user specified ....changed by config */
3888 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3889 /* string for sysctl */
3890 #define NUMA_ZONELIST_ORDER_LEN	16
3891 char numa_zonelist_order[16] = "default";
3892 
3893 /*
3894  * interface for configure zonelist ordering.
3895  * command line option "numa_zonelist_order"
3896  *	= "[dD]efault	- default, automatic configuration.
3897  *	= "[nN]ode 	- order by node locality, then by zone within node
3898  *	= "[zZ]one      - order by zone, then by locality within zone
3899  */
3900 
3901 static int __parse_numa_zonelist_order(char *s)
3902 {
3903 	if (*s == 'd' || *s == 'D') {
3904 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3905 	} else if (*s == 'n' || *s == 'N') {
3906 		user_zonelist_order = ZONELIST_ORDER_NODE;
3907 	} else if (*s == 'z' || *s == 'Z') {
3908 		user_zonelist_order = ZONELIST_ORDER_ZONE;
3909 	} else {
3910 		printk(KERN_WARNING
3911 			"Ignoring invalid numa_zonelist_order value:  "
3912 			"%s\n", s);
3913 		return -EINVAL;
3914 	}
3915 	return 0;
3916 }
3917 
3918 static __init int setup_numa_zonelist_order(char *s)
3919 {
3920 	int ret;
3921 
3922 	if (!s)
3923 		return 0;
3924 
3925 	ret = __parse_numa_zonelist_order(s);
3926 	if (ret == 0)
3927 		strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3928 
3929 	return ret;
3930 }
3931 early_param("numa_zonelist_order", setup_numa_zonelist_order);
3932 
3933 /*
3934  * sysctl handler for numa_zonelist_order
3935  */
3936 int numa_zonelist_order_handler(struct ctl_table *table, int write,
3937 		void __user *buffer, size_t *length,
3938 		loff_t *ppos)
3939 {
3940 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
3941 	int ret;
3942 	static DEFINE_MUTEX(zl_order_mutex);
3943 
3944 	mutex_lock(&zl_order_mutex);
3945 	if (write) {
3946 		if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
3947 			ret = -EINVAL;
3948 			goto out;
3949 		}
3950 		strcpy(saved_string, (char *)table->data);
3951 	}
3952 	ret = proc_dostring(table, write, buffer, length, ppos);
3953 	if (ret)
3954 		goto out;
3955 	if (write) {
3956 		int oldval = user_zonelist_order;
3957 
3958 		ret = __parse_numa_zonelist_order((char *)table->data);
3959 		if (ret) {
3960 			/*
3961 			 * bogus value.  restore saved string
3962 			 */
3963 			strncpy((char *)table->data, saved_string,
3964 				NUMA_ZONELIST_ORDER_LEN);
3965 			user_zonelist_order = oldval;
3966 		} else if (oldval != user_zonelist_order) {
3967 			mutex_lock(&zonelists_mutex);
3968 			build_all_zonelists(NULL, NULL);
3969 			mutex_unlock(&zonelists_mutex);
3970 		}
3971 	}
3972 out:
3973 	mutex_unlock(&zl_order_mutex);
3974 	return ret;
3975 }
3976 
3977 
3978 #define MAX_NODE_LOAD (nr_online_nodes)
3979 static int node_load[MAX_NUMNODES];
3980 
3981 /**
3982  * find_next_best_node - find the next node that should appear in a given node's fallback list
3983  * @node: node whose fallback list we're appending
3984  * @used_node_mask: nodemask_t of already used nodes
3985  *
3986  * We use a number of factors to determine which is the next node that should
3987  * appear on a given node's fallback list.  The node should not have appeared
3988  * already in @node's fallback list, and it should be the next closest node
3989  * according to the distance array (which contains arbitrary distance values
3990  * from each node to each node in the system), and should also prefer nodes
3991  * with no CPUs, since presumably they'll have very little allocation pressure
3992  * on them otherwise.
3993  * It returns -1 if no node is found.
3994  */
3995 static int find_next_best_node(int node, nodemask_t *used_node_mask)
3996 {
3997 	int n, val;
3998 	int min_val = INT_MAX;
3999 	int best_node = NUMA_NO_NODE;
4000 	const struct cpumask *tmp = cpumask_of_node(0);
4001 
4002 	/* Use the local node if we haven't already */
4003 	if (!node_isset(node, *used_node_mask)) {
4004 		node_set(node, *used_node_mask);
4005 		return node;
4006 	}
4007 
4008 	for_each_node_state(n, N_MEMORY) {
4009 
4010 		/* Don't want a node to appear more than once */
4011 		if (node_isset(n, *used_node_mask))
4012 			continue;
4013 
4014 		/* Use the distance array to find the distance */
4015 		val = node_distance(node, n);
4016 
4017 		/* Penalize nodes under us ("prefer the next node") */
4018 		val += (n < node);
4019 
4020 		/* Give preference to headless and unused nodes */
4021 		tmp = cpumask_of_node(n);
4022 		if (!cpumask_empty(tmp))
4023 			val += PENALTY_FOR_NODE_WITH_CPUS;
4024 
4025 		/* Slight preference for less loaded node */
4026 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4027 		val += node_load[n];
4028 
4029 		if (val < min_val) {
4030 			min_val = val;
4031 			best_node = n;
4032 		}
4033 	}
4034 
4035 	if (best_node >= 0)
4036 		node_set(best_node, *used_node_mask);
4037 
4038 	return best_node;
4039 }
4040 
4041 
4042 /*
4043  * Build zonelists ordered by node and zones within node.
4044  * This results in maximum locality--normal zone overflows into local
4045  * DMA zone, if any--but risks exhausting DMA zone.
4046  */
4047 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
4048 {
4049 	int j;
4050 	struct zonelist *zonelist;
4051 
4052 	zonelist = &pgdat->node_zonelists[0];
4053 	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
4054 		;
4055 	j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4056 	zonelist->_zonerefs[j].zone = NULL;
4057 	zonelist->_zonerefs[j].zone_idx = 0;
4058 }
4059 
4060 /*
4061  * Build gfp_thisnode zonelists
4062  */
4063 static void build_thisnode_zonelists(pg_data_t *pgdat)
4064 {
4065 	int j;
4066 	struct zonelist *zonelist;
4067 
4068 	zonelist = &pgdat->node_zonelists[1];
4069 	j = build_zonelists_node(pgdat, zonelist, 0);
4070 	zonelist->_zonerefs[j].zone = NULL;
4071 	zonelist->_zonerefs[j].zone_idx = 0;
4072 }
4073 
4074 /*
4075  * Build zonelists ordered by zone and nodes within zones.
4076  * This results in conserving DMA zone[s] until all Normal memory is
4077  * exhausted, but results in overflowing to remote node while memory
4078  * may still exist in local DMA zone.
4079  */
4080 static int node_order[MAX_NUMNODES];
4081 
4082 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4083 {
4084 	int pos, j, node;
4085 	int zone_type;		/* needs to be signed */
4086 	struct zone *z;
4087 	struct zonelist *zonelist;
4088 
4089 	zonelist = &pgdat->node_zonelists[0];
4090 	pos = 0;
4091 	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4092 		for (j = 0; j < nr_nodes; j++) {
4093 			node = node_order[j];
4094 			z = &NODE_DATA(node)->node_zones[zone_type];
4095 			if (populated_zone(z)) {
4096 				zoneref_set_zone(z,
4097 					&zonelist->_zonerefs[pos++]);
4098 				check_highest_zone(zone_type);
4099 			}
4100 		}
4101 	}
4102 	zonelist->_zonerefs[pos].zone = NULL;
4103 	zonelist->_zonerefs[pos].zone_idx = 0;
4104 }
4105 
4106 #if defined(CONFIG_64BIT)
4107 /*
4108  * Devices that require DMA32/DMA are relatively rare and do not justify a
4109  * penalty to every machine in case the specialised case applies. Default
4110  * to Node-ordering on 64-bit NUMA machines
4111  */
4112 static int default_zonelist_order(void)
4113 {
4114 	return ZONELIST_ORDER_NODE;
4115 }
4116 #else
4117 /*
4118  * On 32-bit, the Normal zone needs to be preserved for allocations accessible
4119  * by the kernel. If processes running on node 0 deplete the low memory zone
4120  * then reclaim will occur more frequency increasing stalls and potentially
4121  * be easier to OOM if a large percentage of the zone is under writeback or
4122  * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
4123  * Hence, default to zone ordering on 32-bit.
4124  */
4125 static int default_zonelist_order(void)
4126 {
4127 	return ZONELIST_ORDER_ZONE;
4128 }
4129 #endif /* CONFIG_64BIT */
4130 
4131 static void set_zonelist_order(void)
4132 {
4133 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
4134 		current_zonelist_order = default_zonelist_order();
4135 	else
4136 		current_zonelist_order = user_zonelist_order;
4137 }
4138 
4139 static void build_zonelists(pg_data_t *pgdat)
4140 {
4141 	int j, node, load;
4142 	enum zone_type i;
4143 	nodemask_t used_mask;
4144 	int local_node, prev_node;
4145 	struct zonelist *zonelist;
4146 	int order = current_zonelist_order;
4147 
4148 	/* initialize zonelists */
4149 	for (i = 0; i < MAX_ZONELISTS; i++) {
4150 		zonelist = pgdat->node_zonelists + i;
4151 		zonelist->_zonerefs[0].zone = NULL;
4152 		zonelist->_zonerefs[0].zone_idx = 0;
4153 	}
4154 
4155 	/* NUMA-aware ordering of nodes */
4156 	local_node = pgdat->node_id;
4157 	load = nr_online_nodes;
4158 	prev_node = local_node;
4159 	nodes_clear(used_mask);
4160 
4161 	memset(node_order, 0, sizeof(node_order));
4162 	j = 0;
4163 
4164 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4165 		/*
4166 		 * We don't want to pressure a particular node.
4167 		 * So adding penalty to the first node in same
4168 		 * distance group to make it round-robin.
4169 		 */
4170 		if (node_distance(local_node, node) !=
4171 		    node_distance(local_node, prev_node))
4172 			node_load[node] = load;
4173 
4174 		prev_node = node;
4175 		load--;
4176 		if (order == ZONELIST_ORDER_NODE)
4177 			build_zonelists_in_node_order(pgdat, node);
4178 		else
4179 			node_order[j++] = node;	/* remember order */
4180 	}
4181 
4182 	if (order == ZONELIST_ORDER_ZONE) {
4183 		/* calculate node order -- i.e., DMA last! */
4184 		build_zonelists_in_zone_order(pgdat, j);
4185 	}
4186 
4187 	build_thisnode_zonelists(pgdat);
4188 }
4189 
4190 /* Construct the zonelist performance cache - see further mmzone.h */
4191 static void build_zonelist_cache(pg_data_t *pgdat)
4192 {
4193 	struct zonelist *zonelist;
4194 	struct zonelist_cache *zlc;
4195 	struct zoneref *z;
4196 
4197 	zonelist = &pgdat->node_zonelists[0];
4198 	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
4199 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
4200 	for (z = zonelist->_zonerefs; z->zone; z++)
4201 		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
4202 }
4203 
4204 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
4205 /*
4206  * Return node id of node used for "local" allocations.
4207  * I.e., first node id of first zone in arg node's generic zonelist.
4208  * Used for initializing percpu 'numa_mem', which is used primarily
4209  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
4210  */
4211 int local_memory_node(int node)
4212 {
4213 	struct zone *zone;
4214 
4215 	(void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
4216 				   gfp_zone(GFP_KERNEL),
4217 				   NULL,
4218 				   &zone);
4219 	return zone->node;
4220 }
4221 #endif
4222 
4223 #else	/* CONFIG_NUMA */
4224 
4225 static void set_zonelist_order(void)
4226 {
4227 	current_zonelist_order = ZONELIST_ORDER_ZONE;
4228 }
4229 
4230 static void build_zonelists(pg_data_t *pgdat)
4231 {
4232 	int node, local_node;
4233 	enum zone_type j;
4234 	struct zonelist *zonelist;
4235 
4236 	local_node = pgdat->node_id;
4237 
4238 	zonelist = &pgdat->node_zonelists[0];
4239 	j = build_zonelists_node(pgdat, zonelist, 0);
4240 
4241 	/*
4242 	 * Now we build the zonelist so that it contains the zones
4243 	 * of all the other nodes.
4244 	 * We don't want to pressure a particular node, so when
4245 	 * building the zones for node N, we make sure that the
4246 	 * zones coming right after the local ones are those from
4247 	 * node N+1 (modulo N)
4248 	 */
4249 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
4250 		if (!node_online(node))
4251 			continue;
4252 		j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4253 	}
4254 	for (node = 0; node < local_node; node++) {
4255 		if (!node_online(node))
4256 			continue;
4257 		j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4258 	}
4259 
4260 	zonelist->_zonerefs[j].zone = NULL;
4261 	zonelist->_zonerefs[j].zone_idx = 0;
4262 }
4263 
4264 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
4265 static void build_zonelist_cache(pg_data_t *pgdat)
4266 {
4267 	pgdat->node_zonelists[0].zlcache_ptr = NULL;
4268 }
4269 
4270 #endif	/* CONFIG_NUMA */
4271 
4272 /*
4273  * Boot pageset table. One per cpu which is going to be used for all
4274  * zones and all nodes. The parameters will be set in such a way
4275  * that an item put on a list will immediately be handed over to
4276  * the buddy list. This is safe since pageset manipulation is done
4277  * with interrupts disabled.
4278  *
4279  * The boot_pagesets must be kept even after bootup is complete for
4280  * unused processors and/or zones. They do play a role for bootstrapping
4281  * hotplugged processors.
4282  *
4283  * zoneinfo_show() and maybe other functions do
4284  * not check if the processor is online before following the pageset pointer.
4285  * Other parts of the kernel may not check if the zone is available.
4286  */
4287 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
4288 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
4289 static void setup_zone_pageset(struct zone *zone);
4290 
4291 /*
4292  * Global mutex to protect against size modification of zonelists
4293  * as well as to serialize pageset setup for the new populated zone.
4294  */
4295 DEFINE_MUTEX(zonelists_mutex);
4296 
4297 /* return values int ....just for stop_machine() */
4298 static int __build_all_zonelists(void *data)
4299 {
4300 	int nid;
4301 	int cpu;
4302 	pg_data_t *self = data;
4303 
4304 #ifdef CONFIG_NUMA
4305 	memset(node_load, 0, sizeof(node_load));
4306 #endif
4307 
4308 	if (self && !node_online(self->node_id)) {
4309 		build_zonelists(self);
4310 		build_zonelist_cache(self);
4311 	}
4312 
4313 	for_each_online_node(nid) {
4314 		pg_data_t *pgdat = NODE_DATA(nid);
4315 
4316 		build_zonelists(pgdat);
4317 		build_zonelist_cache(pgdat);
4318 	}
4319 
4320 	/*
4321 	 * Initialize the boot_pagesets that are going to be used
4322 	 * for bootstrapping processors. The real pagesets for
4323 	 * each zone will be allocated later when the per cpu
4324 	 * allocator is available.
4325 	 *
4326 	 * boot_pagesets are used also for bootstrapping offline
4327 	 * cpus if the system is already booted because the pagesets
4328 	 * are needed to initialize allocators on a specific cpu too.
4329 	 * F.e. the percpu allocator needs the page allocator which
4330 	 * needs the percpu allocator in order to allocate its pagesets
4331 	 * (a chicken-egg dilemma).
4332 	 */
4333 	for_each_possible_cpu(cpu) {
4334 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
4335 
4336 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
4337 		/*
4338 		 * We now know the "local memory node" for each node--
4339 		 * i.e., the node of the first zone in the generic zonelist.
4340 		 * Set up numa_mem percpu variable for on-line cpus.  During
4341 		 * boot, only the boot cpu should be on-line;  we'll init the
4342 		 * secondary cpus' numa_mem as they come on-line.  During
4343 		 * node/memory hotplug, we'll fixup all on-line cpus.
4344 		 */
4345 		if (cpu_online(cpu))
4346 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
4347 #endif
4348 	}
4349 
4350 	return 0;
4351 }
4352 
4353 static noinline void __init
4354 build_all_zonelists_init(void)
4355 {
4356 	__build_all_zonelists(NULL);
4357 	mminit_verify_zonelist();
4358 	cpuset_init_current_mems_allowed();
4359 }
4360 
4361 /*
4362  * Called with zonelists_mutex held always
4363  * unless system_state == SYSTEM_BOOTING.
4364  *
4365  * __ref due to (1) call of __meminit annotated setup_zone_pageset
4366  * [we're only called with non-NULL zone through __meminit paths] and
4367  * (2) call of __init annotated helper build_all_zonelists_init
4368  * [protected by SYSTEM_BOOTING].
4369  */
4370 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
4371 {
4372 	set_zonelist_order();
4373 
4374 	if (system_state == SYSTEM_BOOTING) {
4375 		build_all_zonelists_init();
4376 	} else {
4377 #ifdef CONFIG_MEMORY_HOTPLUG
4378 		if (zone)
4379 			setup_zone_pageset(zone);
4380 #endif
4381 		/* we have to stop all cpus to guarantee there is no user
4382 		   of zonelist */
4383 		stop_machine(__build_all_zonelists, pgdat, NULL);
4384 		/* cpuset refresh routine should be here */
4385 	}
4386 	vm_total_pages = nr_free_pagecache_pages();
4387 	/*
4388 	 * Disable grouping by mobility if the number of pages in the
4389 	 * system is too low to allow the mechanism to work. It would be
4390 	 * more accurate, but expensive to check per-zone. This check is
4391 	 * made on memory-hotadd so a system can start with mobility
4392 	 * disabled and enable it later
4393 	 */
4394 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
4395 		page_group_by_mobility_disabled = 1;
4396 	else
4397 		page_group_by_mobility_disabled = 0;
4398 
4399 	pr_info("Built %i zonelists in %s order, mobility grouping %s.  "
4400 		"Total pages: %ld\n",
4401 			nr_online_nodes,
4402 			zonelist_order_name[current_zonelist_order],
4403 			page_group_by_mobility_disabled ? "off" : "on",
4404 			vm_total_pages);
4405 #ifdef CONFIG_NUMA
4406 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
4407 #endif
4408 }
4409 
4410 /*
4411  * Helper functions to size the waitqueue hash table.
4412  * Essentially these want to choose hash table sizes sufficiently
4413  * large so that collisions trying to wait on pages are rare.
4414  * But in fact, the number of active page waitqueues on typical
4415  * systems is ridiculously low, less than 200. So this is even
4416  * conservative, even though it seems large.
4417  *
4418  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
4419  * waitqueues, i.e. the size of the waitq table given the number of pages.
4420  */
4421 #define PAGES_PER_WAITQUEUE	256
4422 
4423 #ifndef CONFIG_MEMORY_HOTPLUG
4424 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4425 {
4426 	unsigned long size = 1;
4427 
4428 	pages /= PAGES_PER_WAITQUEUE;
4429 
4430 	while (size < pages)
4431 		size <<= 1;
4432 
4433 	/*
4434 	 * Once we have dozens or even hundreds of threads sleeping
4435 	 * on IO we've got bigger problems than wait queue collision.
4436 	 * Limit the size of the wait table to a reasonable size.
4437 	 */
4438 	size = min(size, 4096UL);
4439 
4440 	return max(size, 4UL);
4441 }
4442 #else
4443 /*
4444  * A zone's size might be changed by hot-add, so it is not possible to determine
4445  * a suitable size for its wait_table.  So we use the maximum size now.
4446  *
4447  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
4448  *
4449  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
4450  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
4451  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
4452  *
4453  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
4454  * or more by the traditional way. (See above).  It equals:
4455  *
4456  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
4457  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
4458  *    powerpc (64K page size)             : =  (32G +16M)byte.
4459  */
4460 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4461 {
4462 	return 4096UL;
4463 }
4464 #endif
4465 
4466 /*
4467  * This is an integer logarithm so that shifts can be used later
4468  * to extract the more random high bits from the multiplicative
4469  * hash function before the remainder is taken.
4470  */
4471 static inline unsigned long wait_table_bits(unsigned long size)
4472 {
4473 	return ffz(~size);
4474 }
4475 
4476 /*
4477  * Check if a pageblock contains reserved pages
4478  */
4479 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
4480 {
4481 	unsigned long pfn;
4482 
4483 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4484 		if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
4485 			return 1;
4486 	}
4487 	return 0;
4488 }
4489 
4490 /*
4491  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
4492  * of blocks reserved is based on min_wmark_pages(zone). The memory within
4493  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
4494  * higher will lead to a bigger reserve which will get freed as contiguous
4495  * blocks as reclaim kicks in
4496  */
4497 static void setup_zone_migrate_reserve(struct zone *zone)
4498 {
4499 	unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
4500 	struct page *page;
4501 	unsigned long block_migratetype;
4502 	int reserve;
4503 	int old_reserve;
4504 
4505 	/*
4506 	 * Get the start pfn, end pfn and the number of blocks to reserve
4507 	 * We have to be careful to be aligned to pageblock_nr_pages to
4508 	 * make sure that we always check pfn_valid for the first page in
4509 	 * the block.
4510 	 */
4511 	start_pfn = zone->zone_start_pfn;
4512 	end_pfn = zone_end_pfn(zone);
4513 	start_pfn = roundup(start_pfn, pageblock_nr_pages);
4514 	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
4515 							pageblock_order;
4516 
4517 	/*
4518 	 * Reserve blocks are generally in place to help high-order atomic
4519 	 * allocations that are short-lived. A min_free_kbytes value that
4520 	 * would result in more than 2 reserve blocks for atomic allocations
4521 	 * is assumed to be in place to help anti-fragmentation for the
4522 	 * future allocation of hugepages at runtime.
4523 	 */
4524 	reserve = min(2, reserve);
4525 	old_reserve = zone->nr_migrate_reserve_block;
4526 
4527 	/* When memory hot-add, we almost always need to do nothing */
4528 	if (reserve == old_reserve)
4529 		return;
4530 	zone->nr_migrate_reserve_block = reserve;
4531 
4532 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
4533 		if (!early_page_nid_uninitialised(pfn, zone_to_nid(zone)))
4534 			return;
4535 
4536 		if (!pfn_valid(pfn))
4537 			continue;
4538 		page = pfn_to_page(pfn);
4539 
4540 		/* Watch out for overlapping nodes */
4541 		if (page_to_nid(page) != zone_to_nid(zone))
4542 			continue;
4543 
4544 		block_migratetype = get_pageblock_migratetype(page);
4545 
4546 		/* Only test what is necessary when the reserves are not met */
4547 		if (reserve > 0) {
4548 			/*
4549 			 * Blocks with reserved pages will never free, skip
4550 			 * them.
4551 			 */
4552 			block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
4553 			if (pageblock_is_reserved(pfn, block_end_pfn))
4554 				continue;
4555 
4556 			/* If this block is reserved, account for it */
4557 			if (block_migratetype == MIGRATE_RESERVE) {
4558 				reserve--;
4559 				continue;
4560 			}
4561 
4562 			/* Suitable for reserving if this block is movable */
4563 			if (block_migratetype == MIGRATE_MOVABLE) {
4564 				set_pageblock_migratetype(page,
4565 							MIGRATE_RESERVE);
4566 				move_freepages_block(zone, page,
4567 							MIGRATE_RESERVE);
4568 				reserve--;
4569 				continue;
4570 			}
4571 		} else if (!old_reserve) {
4572 			/*
4573 			 * At boot time we don't need to scan the whole zone
4574 			 * for turning off MIGRATE_RESERVE.
4575 			 */
4576 			break;
4577 		}
4578 
4579 		/*
4580 		 * If the reserve is met and this is a previous reserved block,
4581 		 * take it back
4582 		 */
4583 		if (block_migratetype == MIGRATE_RESERVE) {
4584 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4585 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
4586 		}
4587 	}
4588 }
4589 
4590 /*
4591  * Initially all pages are reserved - free ones are freed
4592  * up by free_all_bootmem() once the early boot process is
4593  * done. Non-atomic initialization, single-pass.
4594  */
4595 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4596 		unsigned long start_pfn, enum memmap_context context)
4597 {
4598 	pg_data_t *pgdat = NODE_DATA(nid);
4599 	unsigned long end_pfn = start_pfn + size;
4600 	unsigned long pfn;
4601 	struct zone *z;
4602 	unsigned long nr_initialised = 0;
4603 
4604 	if (highest_memmap_pfn < end_pfn - 1)
4605 		highest_memmap_pfn = end_pfn - 1;
4606 
4607 	z = &pgdat->node_zones[zone];
4608 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4609 		/*
4610 		 * There can be holes in boot-time mem_map[]s
4611 		 * handed to this function.  They do not
4612 		 * exist on hotplugged memory.
4613 		 */
4614 		if (context == MEMMAP_EARLY) {
4615 			if (!early_pfn_valid(pfn))
4616 				continue;
4617 			if (!early_pfn_in_nid(pfn, nid))
4618 				continue;
4619 			if (!update_defer_init(pgdat, pfn, end_pfn,
4620 						&nr_initialised))
4621 				break;
4622 		}
4623 
4624 		/*
4625 		 * Mark the block movable so that blocks are reserved for
4626 		 * movable at startup. This will force kernel allocations
4627 		 * to reserve their blocks rather than leaking throughout
4628 		 * the address space during boot when many long-lived
4629 		 * kernel allocations are made. Later some blocks near
4630 		 * the start are marked MIGRATE_RESERVE by
4631 		 * setup_zone_migrate_reserve()
4632 		 *
4633 		 * bitmap is created for zone's valid pfn range. but memmap
4634 		 * can be created for invalid pages (for alignment)
4635 		 * check here not to call set_pageblock_migratetype() against
4636 		 * pfn out of zone.
4637 		 */
4638 		if (!(pfn & (pageblock_nr_pages - 1))) {
4639 			struct page *page = pfn_to_page(pfn);
4640 
4641 			__init_single_page(page, pfn, zone, nid);
4642 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4643 		} else {
4644 			__init_single_pfn(pfn, zone, nid);
4645 		}
4646 	}
4647 }
4648 
4649 static void __meminit zone_init_free_lists(struct zone *zone)
4650 {
4651 	unsigned int order, t;
4652 	for_each_migratetype_order(order, t) {
4653 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
4654 		zone->free_area[order].nr_free = 0;
4655 	}
4656 }
4657 
4658 #ifndef __HAVE_ARCH_MEMMAP_INIT
4659 #define memmap_init(size, nid, zone, start_pfn) \
4660 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
4661 #endif
4662 
4663 static int zone_batchsize(struct zone *zone)
4664 {
4665 #ifdef CONFIG_MMU
4666 	int batch;
4667 
4668 	/*
4669 	 * The per-cpu-pages pools are set to around 1000th of the
4670 	 * size of the zone.  But no more than 1/2 of a meg.
4671 	 *
4672 	 * OK, so we don't know how big the cache is.  So guess.
4673 	 */
4674 	batch = zone->managed_pages / 1024;
4675 	if (batch * PAGE_SIZE > 512 * 1024)
4676 		batch = (512 * 1024) / PAGE_SIZE;
4677 	batch /= 4;		/* We effectively *= 4 below */
4678 	if (batch < 1)
4679 		batch = 1;
4680 
4681 	/*
4682 	 * Clamp the batch to a 2^n - 1 value. Having a power
4683 	 * of 2 value was found to be more likely to have
4684 	 * suboptimal cache aliasing properties in some cases.
4685 	 *
4686 	 * For example if 2 tasks are alternately allocating
4687 	 * batches of pages, one task can end up with a lot
4688 	 * of pages of one half of the possible page colors
4689 	 * and the other with pages of the other colors.
4690 	 */
4691 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
4692 
4693 	return batch;
4694 
4695 #else
4696 	/* The deferral and batching of frees should be suppressed under NOMMU
4697 	 * conditions.
4698 	 *
4699 	 * The problem is that NOMMU needs to be able to allocate large chunks
4700 	 * of contiguous memory as there's no hardware page translation to
4701 	 * assemble apparent contiguous memory from discontiguous pages.
4702 	 *
4703 	 * Queueing large contiguous runs of pages for batching, however,
4704 	 * causes the pages to actually be freed in smaller chunks.  As there
4705 	 * can be a significant delay between the individual batches being
4706 	 * recycled, this leads to the once large chunks of space being
4707 	 * fragmented and becoming unavailable for high-order allocations.
4708 	 */
4709 	return 0;
4710 #endif
4711 }
4712 
4713 /*
4714  * pcp->high and pcp->batch values are related and dependent on one another:
4715  * ->batch must never be higher then ->high.
4716  * The following function updates them in a safe manner without read side
4717  * locking.
4718  *
4719  * Any new users of pcp->batch and pcp->high should ensure they can cope with
4720  * those fields changing asynchronously (acording the the above rule).
4721  *
4722  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
4723  * outside of boot time (or some other assurance that no concurrent updaters
4724  * exist).
4725  */
4726 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
4727 		unsigned long batch)
4728 {
4729        /* start with a fail safe value for batch */
4730 	pcp->batch = 1;
4731 	smp_wmb();
4732 
4733        /* Update high, then batch, in order */
4734 	pcp->high = high;
4735 	smp_wmb();
4736 
4737 	pcp->batch = batch;
4738 }
4739 
4740 /* a companion to pageset_set_high() */
4741 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
4742 {
4743 	pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
4744 }
4745 
4746 static void pageset_init(struct per_cpu_pageset *p)
4747 {
4748 	struct per_cpu_pages *pcp;
4749 	int migratetype;
4750 
4751 	memset(p, 0, sizeof(*p));
4752 
4753 	pcp = &p->pcp;
4754 	pcp->count = 0;
4755 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4756 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
4757 }
4758 
4759 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
4760 {
4761 	pageset_init(p);
4762 	pageset_set_batch(p, batch);
4763 }
4764 
4765 /*
4766  * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
4767  * to the value high for the pageset p.
4768  */
4769 static void pageset_set_high(struct per_cpu_pageset *p,
4770 				unsigned long high)
4771 {
4772 	unsigned long batch = max(1UL, high / 4);
4773 	if ((high / 4) > (PAGE_SHIFT * 8))
4774 		batch = PAGE_SHIFT * 8;
4775 
4776 	pageset_update(&p->pcp, high, batch);
4777 }
4778 
4779 static void pageset_set_high_and_batch(struct zone *zone,
4780 				       struct per_cpu_pageset *pcp)
4781 {
4782 	if (percpu_pagelist_fraction)
4783 		pageset_set_high(pcp,
4784 			(zone->managed_pages /
4785 				percpu_pagelist_fraction));
4786 	else
4787 		pageset_set_batch(pcp, zone_batchsize(zone));
4788 }
4789 
4790 static void __meminit zone_pageset_init(struct zone *zone, int cpu)
4791 {
4792 	struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4793 
4794 	pageset_init(pcp);
4795 	pageset_set_high_and_batch(zone, pcp);
4796 }
4797 
4798 static void __meminit setup_zone_pageset(struct zone *zone)
4799 {
4800 	int cpu;
4801 	zone->pageset = alloc_percpu(struct per_cpu_pageset);
4802 	for_each_possible_cpu(cpu)
4803 		zone_pageset_init(zone, cpu);
4804 }
4805 
4806 /*
4807  * Allocate per cpu pagesets and initialize them.
4808  * Before this call only boot pagesets were available.
4809  */
4810 void __init setup_per_cpu_pageset(void)
4811 {
4812 	struct zone *zone;
4813 
4814 	for_each_populated_zone(zone)
4815 		setup_zone_pageset(zone);
4816 }
4817 
4818 static noinline __init_refok
4819 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
4820 {
4821 	int i;
4822 	size_t alloc_size;
4823 
4824 	/*
4825 	 * The per-page waitqueue mechanism uses hashed waitqueues
4826 	 * per zone.
4827 	 */
4828 	zone->wait_table_hash_nr_entries =
4829 		 wait_table_hash_nr_entries(zone_size_pages);
4830 	zone->wait_table_bits =
4831 		wait_table_bits(zone->wait_table_hash_nr_entries);
4832 	alloc_size = zone->wait_table_hash_nr_entries
4833 					* sizeof(wait_queue_head_t);
4834 
4835 	if (!slab_is_available()) {
4836 		zone->wait_table = (wait_queue_head_t *)
4837 			memblock_virt_alloc_node_nopanic(
4838 				alloc_size, zone->zone_pgdat->node_id);
4839 	} else {
4840 		/*
4841 		 * This case means that a zone whose size was 0 gets new memory
4842 		 * via memory hot-add.
4843 		 * But it may be the case that a new node was hot-added.  In
4844 		 * this case vmalloc() will not be able to use this new node's
4845 		 * memory - this wait_table must be initialized to use this new
4846 		 * node itself as well.
4847 		 * To use this new node's memory, further consideration will be
4848 		 * necessary.
4849 		 */
4850 		zone->wait_table = vmalloc(alloc_size);
4851 	}
4852 	if (!zone->wait_table)
4853 		return -ENOMEM;
4854 
4855 	for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
4856 		init_waitqueue_head(zone->wait_table + i);
4857 
4858 	return 0;
4859 }
4860 
4861 static __meminit void zone_pcp_init(struct zone *zone)
4862 {
4863 	/*
4864 	 * per cpu subsystem is not up at this point. The following code
4865 	 * relies on the ability of the linker to provide the
4866 	 * offset of a (static) per cpu variable into the per cpu area.
4867 	 */
4868 	zone->pageset = &boot_pageset;
4869 
4870 	if (populated_zone(zone))
4871 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
4872 			zone->name, zone->present_pages,
4873 					 zone_batchsize(zone));
4874 }
4875 
4876 int __meminit init_currently_empty_zone(struct zone *zone,
4877 					unsigned long zone_start_pfn,
4878 					unsigned long size,
4879 					enum memmap_context context)
4880 {
4881 	struct pglist_data *pgdat = zone->zone_pgdat;
4882 	int ret;
4883 	ret = zone_wait_table_init(zone, size);
4884 	if (ret)
4885 		return ret;
4886 	pgdat->nr_zones = zone_idx(zone) + 1;
4887 
4888 	zone->zone_start_pfn = zone_start_pfn;
4889 
4890 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
4891 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
4892 			pgdat->node_id,
4893 			(unsigned long)zone_idx(zone),
4894 			zone_start_pfn, (zone_start_pfn + size));
4895 
4896 	zone_init_free_lists(zone);
4897 
4898 	return 0;
4899 }
4900 
4901 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4902 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4903 
4904 /*
4905  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4906  */
4907 int __meminit __early_pfn_to_nid(unsigned long pfn,
4908 					struct mminit_pfnnid_cache *state)
4909 {
4910 	unsigned long start_pfn, end_pfn;
4911 	int nid;
4912 
4913 	if (state->last_start <= pfn && pfn < state->last_end)
4914 		return state->last_nid;
4915 
4916 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
4917 	if (nid != -1) {
4918 		state->last_start = start_pfn;
4919 		state->last_end = end_pfn;
4920 		state->last_nid = nid;
4921 	}
4922 
4923 	return nid;
4924 }
4925 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4926 
4927 /**
4928  * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
4929  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4930  * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
4931  *
4932  * If an architecture guarantees that all ranges registered contain no holes
4933  * and may be freed, this this function may be used instead of calling
4934  * memblock_free_early_nid() manually.
4935  */
4936 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4937 {
4938 	unsigned long start_pfn, end_pfn;
4939 	int i, this_nid;
4940 
4941 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4942 		start_pfn = min(start_pfn, max_low_pfn);
4943 		end_pfn = min(end_pfn, max_low_pfn);
4944 
4945 		if (start_pfn < end_pfn)
4946 			memblock_free_early_nid(PFN_PHYS(start_pfn),
4947 					(end_pfn - start_pfn) << PAGE_SHIFT,
4948 					this_nid);
4949 	}
4950 }
4951 
4952 /**
4953  * sparse_memory_present_with_active_regions - Call memory_present for each active range
4954  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
4955  *
4956  * If an architecture guarantees that all ranges registered contain no holes and may
4957  * be freed, this function may be used instead of calling memory_present() manually.
4958  */
4959 void __init sparse_memory_present_with_active_regions(int nid)
4960 {
4961 	unsigned long start_pfn, end_pfn;
4962 	int i, this_nid;
4963 
4964 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4965 		memory_present(this_nid, start_pfn, end_pfn);
4966 }
4967 
4968 /**
4969  * get_pfn_range_for_nid - Return the start and end page frames for a node
4970  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4971  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4972  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
4973  *
4974  * It returns the start and end page frame of a node based on information
4975  * provided by memblock_set_node(). If called for a node
4976  * with no available memory, a warning is printed and the start and end
4977  * PFNs will be 0.
4978  */
4979 void __meminit get_pfn_range_for_nid(unsigned int nid,
4980 			unsigned long *start_pfn, unsigned long *end_pfn)
4981 {
4982 	unsigned long this_start_pfn, this_end_pfn;
4983 	int i;
4984 
4985 	*start_pfn = -1UL;
4986 	*end_pfn = 0;
4987 
4988 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4989 		*start_pfn = min(*start_pfn, this_start_pfn);
4990 		*end_pfn = max(*end_pfn, this_end_pfn);
4991 	}
4992 
4993 	if (*start_pfn == -1UL)
4994 		*start_pfn = 0;
4995 }
4996 
4997 /*
4998  * This finds a zone that can be used for ZONE_MOVABLE pages. The
4999  * assumption is made that zones within a node are ordered in monotonic
5000  * increasing memory addresses so that the "highest" populated zone is used
5001  */
5002 static void __init find_usable_zone_for_movable(void)
5003 {
5004 	int zone_index;
5005 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5006 		if (zone_index == ZONE_MOVABLE)
5007 			continue;
5008 
5009 		if (arch_zone_highest_possible_pfn[zone_index] >
5010 				arch_zone_lowest_possible_pfn[zone_index])
5011 			break;
5012 	}
5013 
5014 	VM_BUG_ON(zone_index == -1);
5015 	movable_zone = zone_index;
5016 }
5017 
5018 /*
5019  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
5020  * because it is sized independent of architecture. Unlike the other zones,
5021  * the starting point for ZONE_MOVABLE is not fixed. It may be different
5022  * in each node depending on the size of each node and how evenly kernelcore
5023  * is distributed. This helper function adjusts the zone ranges
5024  * provided by the architecture for a given node by using the end of the
5025  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5026  * zones within a node are in order of monotonic increases memory addresses
5027  */
5028 static void __meminit adjust_zone_range_for_zone_movable(int nid,
5029 					unsigned long zone_type,
5030 					unsigned long node_start_pfn,
5031 					unsigned long node_end_pfn,
5032 					unsigned long *zone_start_pfn,
5033 					unsigned long *zone_end_pfn)
5034 {
5035 	/* Only adjust if ZONE_MOVABLE is on this node */
5036 	if (zone_movable_pfn[nid]) {
5037 		/* Size ZONE_MOVABLE */
5038 		if (zone_type == ZONE_MOVABLE) {
5039 			*zone_start_pfn = zone_movable_pfn[nid];
5040 			*zone_end_pfn = min(node_end_pfn,
5041 				arch_zone_highest_possible_pfn[movable_zone]);
5042 
5043 		/* Adjust for ZONE_MOVABLE starting within this range */
5044 		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
5045 				*zone_end_pfn > zone_movable_pfn[nid]) {
5046 			*zone_end_pfn = zone_movable_pfn[nid];
5047 
5048 		/* Check if this whole range is within ZONE_MOVABLE */
5049 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
5050 			*zone_start_pfn = *zone_end_pfn;
5051 	}
5052 }
5053 
5054 /*
5055  * Return the number of pages a zone spans in a node, including holes
5056  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5057  */
5058 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5059 					unsigned long zone_type,
5060 					unsigned long node_start_pfn,
5061 					unsigned long node_end_pfn,
5062 					unsigned long *ignored)
5063 {
5064 	unsigned long zone_start_pfn, zone_end_pfn;
5065 
5066 	/* When hotadd a new node, the node should be empty */
5067 	if (!node_start_pfn && !node_end_pfn)
5068 		return 0;
5069 
5070 	/* Get the start and end of the zone */
5071 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5072 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
5073 	adjust_zone_range_for_zone_movable(nid, zone_type,
5074 				node_start_pfn, node_end_pfn,
5075 				&zone_start_pfn, &zone_end_pfn);
5076 
5077 	/* Check that this node has pages within the zone's required range */
5078 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
5079 		return 0;
5080 
5081 	/* Move the zone boundaries inside the node if necessary */
5082 	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
5083 	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
5084 
5085 	/* Return the spanned pages */
5086 	return zone_end_pfn - zone_start_pfn;
5087 }
5088 
5089 /*
5090  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
5091  * then all holes in the requested range will be accounted for.
5092  */
5093 unsigned long __meminit __absent_pages_in_range(int nid,
5094 				unsigned long range_start_pfn,
5095 				unsigned long range_end_pfn)
5096 {
5097 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
5098 	unsigned long start_pfn, end_pfn;
5099 	int i;
5100 
5101 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5102 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5103 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5104 		nr_absent -= end_pfn - start_pfn;
5105 	}
5106 	return nr_absent;
5107 }
5108 
5109 /**
5110  * absent_pages_in_range - Return number of page frames in holes within a range
5111  * @start_pfn: The start PFN to start searching for holes
5112  * @end_pfn: The end PFN to stop searching for holes
5113  *
5114  * It returns the number of pages frames in memory holes within a range.
5115  */
5116 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5117 							unsigned long end_pfn)
5118 {
5119 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5120 }
5121 
5122 /* Return the number of page frames in holes in a zone on a node */
5123 static unsigned long __meminit zone_absent_pages_in_node(int nid,
5124 					unsigned long zone_type,
5125 					unsigned long node_start_pfn,
5126 					unsigned long node_end_pfn,
5127 					unsigned long *ignored)
5128 {
5129 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5130 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5131 	unsigned long zone_start_pfn, zone_end_pfn;
5132 
5133 	/* When hotadd a new node, the node should be empty */
5134 	if (!node_start_pfn && !node_end_pfn)
5135 		return 0;
5136 
5137 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5138 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5139 
5140 	adjust_zone_range_for_zone_movable(nid, zone_type,
5141 			node_start_pfn, node_end_pfn,
5142 			&zone_start_pfn, &zone_end_pfn);
5143 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5144 }
5145 
5146 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5147 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
5148 					unsigned long zone_type,
5149 					unsigned long node_start_pfn,
5150 					unsigned long node_end_pfn,
5151 					unsigned long *zones_size)
5152 {
5153 	return zones_size[zone_type];
5154 }
5155 
5156 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
5157 						unsigned long zone_type,
5158 						unsigned long node_start_pfn,
5159 						unsigned long node_end_pfn,
5160 						unsigned long *zholes_size)
5161 {
5162 	if (!zholes_size)
5163 		return 0;
5164 
5165 	return zholes_size[zone_type];
5166 }
5167 
5168 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5169 
5170 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
5171 						unsigned long node_start_pfn,
5172 						unsigned long node_end_pfn,
5173 						unsigned long *zones_size,
5174 						unsigned long *zholes_size)
5175 {
5176 	unsigned long realtotalpages = 0, totalpages = 0;
5177 	enum zone_type i;
5178 
5179 	for (i = 0; i < MAX_NR_ZONES; i++) {
5180 		struct zone *zone = pgdat->node_zones + i;
5181 		unsigned long size, real_size;
5182 
5183 		size = zone_spanned_pages_in_node(pgdat->node_id, i,
5184 						  node_start_pfn,
5185 						  node_end_pfn,
5186 						  zones_size);
5187 		real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
5188 						  node_start_pfn, node_end_pfn,
5189 						  zholes_size);
5190 		zone->spanned_pages = size;
5191 		zone->present_pages = real_size;
5192 
5193 		totalpages += size;
5194 		realtotalpages += real_size;
5195 	}
5196 
5197 	pgdat->node_spanned_pages = totalpages;
5198 	pgdat->node_present_pages = realtotalpages;
5199 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5200 							realtotalpages);
5201 }
5202 
5203 #ifndef CONFIG_SPARSEMEM
5204 /*
5205  * Calculate the size of the zone->blockflags rounded to an unsigned long
5206  * Start by making sure zonesize is a multiple of pageblock_order by rounding
5207  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
5208  * round what is now in bits to nearest long in bits, then return it in
5209  * bytes.
5210  */
5211 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
5212 {
5213 	unsigned long usemapsize;
5214 
5215 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
5216 	usemapsize = roundup(zonesize, pageblock_nr_pages);
5217 	usemapsize = usemapsize >> pageblock_order;
5218 	usemapsize *= NR_PAGEBLOCK_BITS;
5219 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5220 
5221 	return usemapsize / 8;
5222 }
5223 
5224 static void __init setup_usemap(struct pglist_data *pgdat,
5225 				struct zone *zone,
5226 				unsigned long zone_start_pfn,
5227 				unsigned long zonesize)
5228 {
5229 	unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
5230 	zone->pageblock_flags = NULL;
5231 	if (usemapsize)
5232 		zone->pageblock_flags =
5233 			memblock_virt_alloc_node_nopanic(usemapsize,
5234 							 pgdat->node_id);
5235 }
5236 #else
5237 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5238 				unsigned long zone_start_pfn, unsigned long zonesize) {}
5239 #endif /* CONFIG_SPARSEMEM */
5240 
5241 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
5242 
5243 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
5244 void __paginginit set_pageblock_order(void)
5245 {
5246 	unsigned int order;
5247 
5248 	/* Check that pageblock_nr_pages has not already been setup */
5249 	if (pageblock_order)
5250 		return;
5251 
5252 	if (HPAGE_SHIFT > PAGE_SHIFT)
5253 		order = HUGETLB_PAGE_ORDER;
5254 	else
5255 		order = MAX_ORDER - 1;
5256 
5257 	/*
5258 	 * Assume the largest contiguous order of interest is a huge page.
5259 	 * This value may be variable depending on boot parameters on IA64 and
5260 	 * powerpc.
5261 	 */
5262 	pageblock_order = order;
5263 }
5264 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5265 
5266 /*
5267  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
5268  * is unused as pageblock_order is set at compile-time. See
5269  * include/linux/pageblock-flags.h for the values of pageblock_order based on
5270  * the kernel config
5271  */
5272 void __paginginit set_pageblock_order(void)
5273 {
5274 }
5275 
5276 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5277 
5278 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5279 						   unsigned long present_pages)
5280 {
5281 	unsigned long pages = spanned_pages;
5282 
5283 	/*
5284 	 * Provide a more accurate estimation if there are holes within
5285 	 * the zone and SPARSEMEM is in use. If there are holes within the
5286 	 * zone, each populated memory region may cost us one or two extra
5287 	 * memmap pages due to alignment because memmap pages for each
5288 	 * populated regions may not naturally algined on page boundary.
5289 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
5290 	 */
5291 	if (spanned_pages > present_pages + (present_pages >> 4) &&
5292 	    IS_ENABLED(CONFIG_SPARSEMEM))
5293 		pages = present_pages;
5294 
5295 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
5296 }
5297 
5298 /*
5299  * Set up the zone data structures:
5300  *   - mark all pages reserved
5301  *   - mark all memory queues empty
5302  *   - clear the memory bitmaps
5303  *
5304  * NOTE: pgdat should get zeroed by caller.
5305  */
5306 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
5307 		unsigned long node_start_pfn, unsigned long node_end_pfn)
5308 {
5309 	enum zone_type j;
5310 	int nid = pgdat->node_id;
5311 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
5312 	int ret;
5313 
5314 	pgdat_resize_init(pgdat);
5315 #ifdef CONFIG_NUMA_BALANCING
5316 	spin_lock_init(&pgdat->numabalancing_migrate_lock);
5317 	pgdat->numabalancing_migrate_nr_pages = 0;
5318 	pgdat->numabalancing_migrate_next_window = jiffies;
5319 #endif
5320 	init_waitqueue_head(&pgdat->kswapd_wait);
5321 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
5322 	pgdat_page_ext_init(pgdat);
5323 
5324 	for (j = 0; j < MAX_NR_ZONES; j++) {
5325 		struct zone *zone = pgdat->node_zones + j;
5326 		unsigned long size, realsize, freesize, memmap_pages;
5327 
5328 		size = zone->spanned_pages;
5329 		realsize = freesize = zone->present_pages;
5330 
5331 		/*
5332 		 * Adjust freesize so that it accounts for how much memory
5333 		 * is used by this zone for memmap. This affects the watermark
5334 		 * and per-cpu initialisations
5335 		 */
5336 		memmap_pages = calc_memmap_size(size, realsize);
5337 		if (!is_highmem_idx(j)) {
5338 			if (freesize >= memmap_pages) {
5339 				freesize -= memmap_pages;
5340 				if (memmap_pages)
5341 					printk(KERN_DEBUG
5342 					       "  %s zone: %lu pages used for memmap\n",
5343 					       zone_names[j], memmap_pages);
5344 			} else
5345 				printk(KERN_WARNING
5346 					"  %s zone: %lu pages exceeds freesize %lu\n",
5347 					zone_names[j], memmap_pages, freesize);
5348 		}
5349 
5350 		/* Account for reserved pages */
5351 		if (j == 0 && freesize > dma_reserve) {
5352 			freesize -= dma_reserve;
5353 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
5354 					zone_names[0], dma_reserve);
5355 		}
5356 
5357 		if (!is_highmem_idx(j))
5358 			nr_kernel_pages += freesize;
5359 		/* Charge for highmem memmap if there are enough kernel pages */
5360 		else if (nr_kernel_pages > memmap_pages * 2)
5361 			nr_kernel_pages -= memmap_pages;
5362 		nr_all_pages += freesize;
5363 
5364 		/*
5365 		 * Set an approximate value for lowmem here, it will be adjusted
5366 		 * when the bootmem allocator frees pages into the buddy system.
5367 		 * And all highmem pages will be managed by the buddy system.
5368 		 */
5369 		zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
5370 #ifdef CONFIG_NUMA
5371 		zone->node = nid;
5372 		zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
5373 						/ 100;
5374 		zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
5375 #endif
5376 		zone->name = zone_names[j];
5377 		spin_lock_init(&zone->lock);
5378 		spin_lock_init(&zone->lru_lock);
5379 		zone_seqlock_init(zone);
5380 		zone->zone_pgdat = pgdat;
5381 		zone_pcp_init(zone);
5382 
5383 		/* For bootup, initialized properly in watermark setup */
5384 		mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
5385 
5386 		lruvec_init(&zone->lruvec);
5387 		if (!size)
5388 			continue;
5389 
5390 		set_pageblock_order();
5391 		setup_usemap(pgdat, zone, zone_start_pfn, size);
5392 		ret = init_currently_empty_zone(zone, zone_start_pfn,
5393 						size, MEMMAP_EARLY);
5394 		BUG_ON(ret);
5395 		memmap_init(size, nid, j, zone_start_pfn);
5396 		zone_start_pfn += size;
5397 	}
5398 }
5399 
5400 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
5401 {
5402 	/* Skip empty nodes */
5403 	if (!pgdat->node_spanned_pages)
5404 		return;
5405 
5406 #ifdef CONFIG_FLAT_NODE_MEM_MAP
5407 	/* ia64 gets its own node_mem_map, before this, without bootmem */
5408 	if (!pgdat->node_mem_map) {
5409 		unsigned long size, start, end;
5410 		struct page *map;
5411 
5412 		/*
5413 		 * The zone's endpoints aren't required to be MAX_ORDER
5414 		 * aligned but the node_mem_map endpoints must be in order
5415 		 * for the buddy allocator to function correctly.
5416 		 */
5417 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
5418 		end = pgdat_end_pfn(pgdat);
5419 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
5420 		size =  (end - start) * sizeof(struct page);
5421 		map = alloc_remap(pgdat->node_id, size);
5422 		if (!map)
5423 			map = memblock_virt_alloc_node_nopanic(size,
5424 							       pgdat->node_id);
5425 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
5426 	}
5427 #ifndef CONFIG_NEED_MULTIPLE_NODES
5428 	/*
5429 	 * With no DISCONTIG, the global mem_map is just set as node 0's
5430 	 */
5431 	if (pgdat == NODE_DATA(0)) {
5432 		mem_map = NODE_DATA(0)->node_mem_map;
5433 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5434 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
5435 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
5436 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5437 	}
5438 #endif
5439 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
5440 }
5441 
5442 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5443 		unsigned long node_start_pfn, unsigned long *zholes_size)
5444 {
5445 	pg_data_t *pgdat = NODE_DATA(nid);
5446 	unsigned long start_pfn = 0;
5447 	unsigned long end_pfn = 0;
5448 
5449 	/* pg_data_t should be reset to zero when it's allocated */
5450 	WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
5451 
5452 	reset_deferred_meminit(pgdat);
5453 	pgdat->node_id = nid;
5454 	pgdat->node_start_pfn = node_start_pfn;
5455 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5456 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
5457 	pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
5458 		(u64)start_pfn << PAGE_SHIFT, ((u64)end_pfn << PAGE_SHIFT) - 1);
5459 #endif
5460 	calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5461 				  zones_size, zholes_size);
5462 
5463 	alloc_node_mem_map(pgdat);
5464 #ifdef CONFIG_FLAT_NODE_MEM_MAP
5465 	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5466 		nid, (unsigned long)pgdat,
5467 		(unsigned long)pgdat->node_mem_map);
5468 #endif
5469 
5470 	free_area_init_core(pgdat, start_pfn, end_pfn);
5471 }
5472 
5473 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5474 
5475 #if MAX_NUMNODES > 1
5476 /*
5477  * Figure out the number of possible node ids.
5478  */
5479 void __init setup_nr_node_ids(void)
5480 {
5481 	unsigned int node;
5482 	unsigned int highest = 0;
5483 
5484 	for_each_node_mask(node, node_possible_map)
5485 		highest = node;
5486 	nr_node_ids = highest + 1;
5487 }
5488 #endif
5489 
5490 /**
5491  * node_map_pfn_alignment - determine the maximum internode alignment
5492  *
5493  * This function should be called after node map is populated and sorted.
5494  * It calculates the maximum power of two alignment which can distinguish
5495  * all the nodes.
5496  *
5497  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
5498  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
5499  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
5500  * shifted, 1GiB is enough and this function will indicate so.
5501  *
5502  * This is used to test whether pfn -> nid mapping of the chosen memory
5503  * model has fine enough granularity to avoid incorrect mapping for the
5504  * populated node map.
5505  *
5506  * Returns the determined alignment in pfn's.  0 if there is no alignment
5507  * requirement (single node).
5508  */
5509 unsigned long __init node_map_pfn_alignment(void)
5510 {
5511 	unsigned long accl_mask = 0, last_end = 0;
5512 	unsigned long start, end, mask;
5513 	int last_nid = -1;
5514 	int i, nid;
5515 
5516 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
5517 		if (!start || last_nid < 0 || last_nid == nid) {
5518 			last_nid = nid;
5519 			last_end = end;
5520 			continue;
5521 		}
5522 
5523 		/*
5524 		 * Start with a mask granular enough to pin-point to the
5525 		 * start pfn and tick off bits one-by-one until it becomes
5526 		 * too coarse to separate the current node from the last.
5527 		 */
5528 		mask = ~((1 << __ffs(start)) - 1);
5529 		while (mask && last_end <= (start & (mask << 1)))
5530 			mask <<= 1;
5531 
5532 		/* accumulate all internode masks */
5533 		accl_mask |= mask;
5534 	}
5535 
5536 	/* convert mask to number of pages */
5537 	return ~accl_mask + 1;
5538 }
5539 
5540 /* Find the lowest pfn for a node */
5541 static unsigned long __init find_min_pfn_for_node(int nid)
5542 {
5543 	unsigned long min_pfn = ULONG_MAX;
5544 	unsigned long start_pfn;
5545 	int i;
5546 
5547 	for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
5548 		min_pfn = min(min_pfn, start_pfn);
5549 
5550 	if (min_pfn == ULONG_MAX) {
5551 		printk(KERN_WARNING
5552 			"Could not find start_pfn for node %d\n", nid);
5553 		return 0;
5554 	}
5555 
5556 	return min_pfn;
5557 }
5558 
5559 /**
5560  * find_min_pfn_with_active_regions - Find the minimum PFN registered
5561  *
5562  * It returns the minimum PFN based on information provided via
5563  * memblock_set_node().
5564  */
5565 unsigned long __init find_min_pfn_with_active_regions(void)
5566 {
5567 	return find_min_pfn_for_node(MAX_NUMNODES);
5568 }
5569 
5570 /*
5571  * early_calculate_totalpages()
5572  * Sum pages in active regions for movable zone.
5573  * Populate N_MEMORY for calculating usable_nodes.
5574  */
5575 static unsigned long __init early_calculate_totalpages(void)
5576 {
5577 	unsigned long totalpages = 0;
5578 	unsigned long start_pfn, end_pfn;
5579 	int i, nid;
5580 
5581 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
5582 		unsigned long pages = end_pfn - start_pfn;
5583 
5584 		totalpages += pages;
5585 		if (pages)
5586 			node_set_state(nid, N_MEMORY);
5587 	}
5588 	return totalpages;
5589 }
5590 
5591 /*
5592  * Find the PFN the Movable zone begins in each node. Kernel memory
5593  * is spread evenly between nodes as long as the nodes have enough
5594  * memory. When they don't, some nodes will have more kernelcore than
5595  * others
5596  */
5597 static void __init find_zone_movable_pfns_for_nodes(void)
5598 {
5599 	int i, nid;
5600 	unsigned long usable_startpfn;
5601 	unsigned long kernelcore_node, kernelcore_remaining;
5602 	/* save the state before borrow the nodemask */
5603 	nodemask_t saved_node_state = node_states[N_MEMORY];
5604 	unsigned long totalpages = early_calculate_totalpages();
5605 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
5606 	struct memblock_region *r;
5607 
5608 	/* Need to find movable_zone earlier when movable_node is specified. */
5609 	find_usable_zone_for_movable();
5610 
5611 	/*
5612 	 * If movable_node is specified, ignore kernelcore and movablecore
5613 	 * options.
5614 	 */
5615 	if (movable_node_is_enabled()) {
5616 		for_each_memblock(memory, r) {
5617 			if (!memblock_is_hotpluggable(r))
5618 				continue;
5619 
5620 			nid = r->nid;
5621 
5622 			usable_startpfn = PFN_DOWN(r->base);
5623 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5624 				min(usable_startpfn, zone_movable_pfn[nid]) :
5625 				usable_startpfn;
5626 		}
5627 
5628 		goto out2;
5629 	}
5630 
5631 	/*
5632 	 * If movablecore=nn[KMG] was specified, calculate what size of
5633 	 * kernelcore that corresponds so that memory usable for
5634 	 * any allocation type is evenly spread. If both kernelcore
5635 	 * and movablecore are specified, then the value of kernelcore
5636 	 * will be used for required_kernelcore if it's greater than
5637 	 * what movablecore would have allowed.
5638 	 */
5639 	if (required_movablecore) {
5640 		unsigned long corepages;
5641 
5642 		/*
5643 		 * Round-up so that ZONE_MOVABLE is at least as large as what
5644 		 * was requested by the user
5645 		 */
5646 		required_movablecore =
5647 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
5648 		corepages = totalpages - required_movablecore;
5649 
5650 		required_kernelcore = max(required_kernelcore, corepages);
5651 	}
5652 
5653 	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
5654 	if (!required_kernelcore)
5655 		goto out;
5656 
5657 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
5658 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
5659 
5660 restart:
5661 	/* Spread kernelcore memory as evenly as possible throughout nodes */
5662 	kernelcore_node = required_kernelcore / usable_nodes;
5663 	for_each_node_state(nid, N_MEMORY) {
5664 		unsigned long start_pfn, end_pfn;
5665 
5666 		/*
5667 		 * Recalculate kernelcore_node if the division per node
5668 		 * now exceeds what is necessary to satisfy the requested
5669 		 * amount of memory for the kernel
5670 		 */
5671 		if (required_kernelcore < kernelcore_node)
5672 			kernelcore_node = required_kernelcore / usable_nodes;
5673 
5674 		/*
5675 		 * As the map is walked, we track how much memory is usable
5676 		 * by the kernel using kernelcore_remaining. When it is
5677 		 * 0, the rest of the node is usable by ZONE_MOVABLE
5678 		 */
5679 		kernelcore_remaining = kernelcore_node;
5680 
5681 		/* Go through each range of PFNs within this node */
5682 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5683 			unsigned long size_pages;
5684 
5685 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
5686 			if (start_pfn >= end_pfn)
5687 				continue;
5688 
5689 			/* Account for what is only usable for kernelcore */
5690 			if (start_pfn < usable_startpfn) {
5691 				unsigned long kernel_pages;
5692 				kernel_pages = min(end_pfn, usable_startpfn)
5693 								- start_pfn;
5694 
5695 				kernelcore_remaining -= min(kernel_pages,
5696 							kernelcore_remaining);
5697 				required_kernelcore -= min(kernel_pages,
5698 							required_kernelcore);
5699 
5700 				/* Continue if range is now fully accounted */
5701 				if (end_pfn <= usable_startpfn) {
5702 
5703 					/*
5704 					 * Push zone_movable_pfn to the end so
5705 					 * that if we have to rebalance
5706 					 * kernelcore across nodes, we will
5707 					 * not double account here
5708 					 */
5709 					zone_movable_pfn[nid] = end_pfn;
5710 					continue;
5711 				}
5712 				start_pfn = usable_startpfn;
5713 			}
5714 
5715 			/*
5716 			 * The usable PFN range for ZONE_MOVABLE is from
5717 			 * start_pfn->end_pfn. Calculate size_pages as the
5718 			 * number of pages used as kernelcore
5719 			 */
5720 			size_pages = end_pfn - start_pfn;
5721 			if (size_pages > kernelcore_remaining)
5722 				size_pages = kernelcore_remaining;
5723 			zone_movable_pfn[nid] = start_pfn + size_pages;
5724 
5725 			/*
5726 			 * Some kernelcore has been met, update counts and
5727 			 * break if the kernelcore for this node has been
5728 			 * satisfied
5729 			 */
5730 			required_kernelcore -= min(required_kernelcore,
5731 								size_pages);
5732 			kernelcore_remaining -= size_pages;
5733 			if (!kernelcore_remaining)
5734 				break;
5735 		}
5736 	}
5737 
5738 	/*
5739 	 * If there is still required_kernelcore, we do another pass with one
5740 	 * less node in the count. This will push zone_movable_pfn[nid] further
5741 	 * along on the nodes that still have memory until kernelcore is
5742 	 * satisfied
5743 	 */
5744 	usable_nodes--;
5745 	if (usable_nodes && required_kernelcore > usable_nodes)
5746 		goto restart;
5747 
5748 out2:
5749 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5750 	for (nid = 0; nid < MAX_NUMNODES; nid++)
5751 		zone_movable_pfn[nid] =
5752 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
5753 
5754 out:
5755 	/* restore the node_state */
5756 	node_states[N_MEMORY] = saved_node_state;
5757 }
5758 
5759 /* Any regular or high memory on that node ? */
5760 static void check_for_memory(pg_data_t *pgdat, int nid)
5761 {
5762 	enum zone_type zone_type;
5763 
5764 	if (N_MEMORY == N_NORMAL_MEMORY)
5765 		return;
5766 
5767 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
5768 		struct zone *zone = &pgdat->node_zones[zone_type];
5769 		if (populated_zone(zone)) {
5770 			node_set_state(nid, N_HIGH_MEMORY);
5771 			if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5772 			    zone_type <= ZONE_NORMAL)
5773 				node_set_state(nid, N_NORMAL_MEMORY);
5774 			break;
5775 		}
5776 	}
5777 }
5778 
5779 /**
5780  * free_area_init_nodes - Initialise all pg_data_t and zone data
5781  * @max_zone_pfn: an array of max PFNs for each zone
5782  *
5783  * This will call free_area_init_node() for each active node in the system.
5784  * Using the page ranges provided by memblock_set_node(), the size of each
5785  * zone in each node and their holes is calculated. If the maximum PFN
5786  * between two adjacent zones match, it is assumed that the zone is empty.
5787  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5788  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5789  * starts where the previous one ended. For example, ZONE_DMA32 starts
5790  * at arch_max_dma_pfn.
5791  */
5792 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5793 {
5794 	unsigned long start_pfn, end_pfn;
5795 	int i, nid;
5796 
5797 	/* Record where the zone boundaries are */
5798 	memset(arch_zone_lowest_possible_pfn, 0,
5799 				sizeof(arch_zone_lowest_possible_pfn));
5800 	memset(arch_zone_highest_possible_pfn, 0,
5801 				sizeof(arch_zone_highest_possible_pfn));
5802 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5803 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5804 	for (i = 1; i < MAX_NR_ZONES; i++) {
5805 		if (i == ZONE_MOVABLE)
5806 			continue;
5807 		arch_zone_lowest_possible_pfn[i] =
5808 			arch_zone_highest_possible_pfn[i-1];
5809 		arch_zone_highest_possible_pfn[i] =
5810 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5811 	}
5812 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5813 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5814 
5815 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
5816 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
5817 	find_zone_movable_pfns_for_nodes();
5818 
5819 	/* Print out the zone ranges */
5820 	pr_info("Zone ranges:\n");
5821 	for (i = 0; i < MAX_NR_ZONES; i++) {
5822 		if (i == ZONE_MOVABLE)
5823 			continue;
5824 		pr_info("  %-8s ", zone_names[i]);
5825 		if (arch_zone_lowest_possible_pfn[i] ==
5826 				arch_zone_highest_possible_pfn[i])
5827 			pr_cont("empty\n");
5828 		else
5829 			pr_cont("[mem %#018Lx-%#018Lx]\n",
5830 				(u64)arch_zone_lowest_possible_pfn[i]
5831 					<< PAGE_SHIFT,
5832 				((u64)arch_zone_highest_possible_pfn[i]
5833 					<< PAGE_SHIFT) - 1);
5834 	}
5835 
5836 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
5837 	pr_info("Movable zone start for each node\n");
5838 	for (i = 0; i < MAX_NUMNODES; i++) {
5839 		if (zone_movable_pfn[i])
5840 			pr_info("  Node %d: %#018Lx\n", i,
5841 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
5842 	}
5843 
5844 	/* Print out the early node map */
5845 	pr_info("Early memory node ranges\n");
5846 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
5847 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
5848 			(u64)start_pfn << PAGE_SHIFT,
5849 			((u64)end_pfn << PAGE_SHIFT) - 1);
5850 
5851 	/* Initialise every node */
5852 	mminit_verify_pageflags_layout();
5853 	setup_nr_node_ids();
5854 	for_each_online_node(nid) {
5855 		pg_data_t *pgdat = NODE_DATA(nid);
5856 		free_area_init_node(nid, NULL,
5857 				find_min_pfn_for_node(nid), NULL);
5858 
5859 		/* Any memory on that node */
5860 		if (pgdat->node_present_pages)
5861 			node_set_state(nid, N_MEMORY);
5862 		check_for_memory(pgdat, nid);
5863 	}
5864 }
5865 
5866 static int __init cmdline_parse_core(char *p, unsigned long *core)
5867 {
5868 	unsigned long long coremem;
5869 	if (!p)
5870 		return -EINVAL;
5871 
5872 	coremem = memparse(p, &p);
5873 	*core = coremem >> PAGE_SHIFT;
5874 
5875 	/* Paranoid check that UL is enough for the coremem value */
5876 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5877 
5878 	return 0;
5879 }
5880 
5881 /*
5882  * kernelcore=size sets the amount of memory for use for allocations that
5883  * cannot be reclaimed or migrated.
5884  */
5885 static int __init cmdline_parse_kernelcore(char *p)
5886 {
5887 	return cmdline_parse_core(p, &required_kernelcore);
5888 }
5889 
5890 /*
5891  * movablecore=size sets the amount of memory for use for allocations that
5892  * can be reclaimed or migrated.
5893  */
5894 static int __init cmdline_parse_movablecore(char *p)
5895 {
5896 	return cmdline_parse_core(p, &required_movablecore);
5897 }
5898 
5899 early_param("kernelcore", cmdline_parse_kernelcore);
5900 early_param("movablecore", cmdline_parse_movablecore);
5901 
5902 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5903 
5904 void adjust_managed_page_count(struct page *page, long count)
5905 {
5906 	spin_lock(&managed_page_count_lock);
5907 	page_zone(page)->managed_pages += count;
5908 	totalram_pages += count;
5909 #ifdef CONFIG_HIGHMEM
5910 	if (PageHighMem(page))
5911 		totalhigh_pages += count;
5912 #endif
5913 	spin_unlock(&managed_page_count_lock);
5914 }
5915 EXPORT_SYMBOL(adjust_managed_page_count);
5916 
5917 unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
5918 {
5919 	void *pos;
5920 	unsigned long pages = 0;
5921 
5922 	start = (void *)PAGE_ALIGN((unsigned long)start);
5923 	end = (void *)((unsigned long)end & PAGE_MASK);
5924 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
5925 		if ((unsigned int)poison <= 0xFF)
5926 			memset(pos, poison, PAGE_SIZE);
5927 		free_reserved_page(virt_to_page(pos));
5928 	}
5929 
5930 	if (pages && s)
5931 		pr_info("Freeing %s memory: %ldK (%p - %p)\n",
5932 			s, pages << (PAGE_SHIFT - 10), start, end);
5933 
5934 	return pages;
5935 }
5936 EXPORT_SYMBOL(free_reserved_area);
5937 
5938 #ifdef	CONFIG_HIGHMEM
5939 void free_highmem_page(struct page *page)
5940 {
5941 	__free_reserved_page(page);
5942 	totalram_pages++;
5943 	page_zone(page)->managed_pages++;
5944 	totalhigh_pages++;
5945 }
5946 #endif
5947 
5948 
5949 void __init mem_init_print_info(const char *str)
5950 {
5951 	unsigned long physpages, codesize, datasize, rosize, bss_size;
5952 	unsigned long init_code_size, init_data_size;
5953 
5954 	physpages = get_num_physpages();
5955 	codesize = _etext - _stext;
5956 	datasize = _edata - _sdata;
5957 	rosize = __end_rodata - __start_rodata;
5958 	bss_size = __bss_stop - __bss_start;
5959 	init_data_size = __init_end - __init_begin;
5960 	init_code_size = _einittext - _sinittext;
5961 
5962 	/*
5963 	 * Detect special cases and adjust section sizes accordingly:
5964 	 * 1) .init.* may be embedded into .data sections
5965 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
5966 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
5967 	 * 3) .rodata.* may be embedded into .text or .data sections.
5968 	 */
5969 #define adj_init_size(start, end, size, pos, adj) \
5970 	do { \
5971 		if (start <= pos && pos < end && size > adj) \
5972 			size -= adj; \
5973 	} while (0)
5974 
5975 	adj_init_size(__init_begin, __init_end, init_data_size,
5976 		     _sinittext, init_code_size);
5977 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
5978 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
5979 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
5980 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
5981 
5982 #undef	adj_init_size
5983 
5984 	pr_info("Memory: %luK/%luK available "
5985 	       "(%luK kernel code, %luK rwdata, %luK rodata, "
5986 	       "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
5987 #ifdef	CONFIG_HIGHMEM
5988 	       ", %luK highmem"
5989 #endif
5990 	       "%s%s)\n",
5991 	       nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
5992 	       codesize >> 10, datasize >> 10, rosize >> 10,
5993 	       (init_data_size + init_code_size) >> 10, bss_size >> 10,
5994 	       (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
5995 	       totalcma_pages << (PAGE_SHIFT-10),
5996 #ifdef	CONFIG_HIGHMEM
5997 	       totalhigh_pages << (PAGE_SHIFT-10),
5998 #endif
5999 	       str ? ", " : "", str ? str : "");
6000 }
6001 
6002 /**
6003  * set_dma_reserve - set the specified number of pages reserved in the first zone
6004  * @new_dma_reserve: The number of pages to mark reserved
6005  *
6006  * The per-cpu batchsize and zone watermarks are determined by present_pages.
6007  * In the DMA zone, a significant percentage may be consumed by kernel image
6008  * and other unfreeable allocations which can skew the watermarks badly. This
6009  * function may optionally be used to account for unfreeable pages in the
6010  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6011  * smaller per-cpu batchsize.
6012  */
6013 void __init set_dma_reserve(unsigned long new_dma_reserve)
6014 {
6015 	dma_reserve = new_dma_reserve;
6016 }
6017 
6018 void __init free_area_init(unsigned long *zones_size)
6019 {
6020 	free_area_init_node(0, zones_size,
6021 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6022 }
6023 
6024 static int page_alloc_cpu_notify(struct notifier_block *self,
6025 				 unsigned long action, void *hcpu)
6026 {
6027 	int cpu = (unsigned long)hcpu;
6028 
6029 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
6030 		lru_add_drain_cpu(cpu);
6031 		drain_pages(cpu);
6032 
6033 		/*
6034 		 * Spill the event counters of the dead processor
6035 		 * into the current processors event counters.
6036 		 * This artificially elevates the count of the current
6037 		 * processor.
6038 		 */
6039 		vm_events_fold_cpu(cpu);
6040 
6041 		/*
6042 		 * Zero the differential counters of the dead processor
6043 		 * so that the vm statistics are consistent.
6044 		 *
6045 		 * This is only okay since the processor is dead and cannot
6046 		 * race with what we are doing.
6047 		 */
6048 		cpu_vm_stats_fold(cpu);
6049 	}
6050 	return NOTIFY_OK;
6051 }
6052 
6053 void __init page_alloc_init(void)
6054 {
6055 	hotcpu_notifier(page_alloc_cpu_notify, 0);
6056 }
6057 
6058 /*
6059  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
6060  *	or min_free_kbytes changes.
6061  */
6062 static void calculate_totalreserve_pages(void)
6063 {
6064 	struct pglist_data *pgdat;
6065 	unsigned long reserve_pages = 0;
6066 	enum zone_type i, j;
6067 
6068 	for_each_online_pgdat(pgdat) {
6069 		for (i = 0; i < MAX_NR_ZONES; i++) {
6070 			struct zone *zone = pgdat->node_zones + i;
6071 			long max = 0;
6072 
6073 			/* Find valid and maximum lowmem_reserve in the zone */
6074 			for (j = i; j < MAX_NR_ZONES; j++) {
6075 				if (zone->lowmem_reserve[j] > max)
6076 					max = zone->lowmem_reserve[j];
6077 			}
6078 
6079 			/* we treat the high watermark as reserved pages. */
6080 			max += high_wmark_pages(zone);
6081 
6082 			if (max > zone->managed_pages)
6083 				max = zone->managed_pages;
6084 			reserve_pages += max;
6085 			/*
6086 			 * Lowmem reserves are not available to
6087 			 * GFP_HIGHUSER page cache allocations and
6088 			 * kswapd tries to balance zones to their high
6089 			 * watermark.  As a result, neither should be
6090 			 * regarded as dirtyable memory, to prevent a
6091 			 * situation where reclaim has to clean pages
6092 			 * in order to balance the zones.
6093 			 */
6094 			zone->dirty_balance_reserve = max;
6095 		}
6096 	}
6097 	dirty_balance_reserve = reserve_pages;
6098 	totalreserve_pages = reserve_pages;
6099 }
6100 
6101 /*
6102  * setup_per_zone_lowmem_reserve - called whenever
6103  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
6104  *	has a correct pages reserved value, so an adequate number of
6105  *	pages are left in the zone after a successful __alloc_pages().
6106  */
6107 static void setup_per_zone_lowmem_reserve(void)
6108 {
6109 	struct pglist_data *pgdat;
6110 	enum zone_type j, idx;
6111 
6112 	for_each_online_pgdat(pgdat) {
6113 		for (j = 0; j < MAX_NR_ZONES; j++) {
6114 			struct zone *zone = pgdat->node_zones + j;
6115 			unsigned long managed_pages = zone->managed_pages;
6116 
6117 			zone->lowmem_reserve[j] = 0;
6118 
6119 			idx = j;
6120 			while (idx) {
6121 				struct zone *lower_zone;
6122 
6123 				idx--;
6124 
6125 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
6126 					sysctl_lowmem_reserve_ratio[idx] = 1;
6127 
6128 				lower_zone = pgdat->node_zones + idx;
6129 				lower_zone->lowmem_reserve[j] = managed_pages /
6130 					sysctl_lowmem_reserve_ratio[idx];
6131 				managed_pages += lower_zone->managed_pages;
6132 			}
6133 		}
6134 	}
6135 
6136 	/* update totalreserve_pages */
6137 	calculate_totalreserve_pages();
6138 }
6139 
6140 static void __setup_per_zone_wmarks(void)
6141 {
6142 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6143 	unsigned long lowmem_pages = 0;
6144 	struct zone *zone;
6145 	unsigned long flags;
6146 
6147 	/* Calculate total number of !ZONE_HIGHMEM pages */
6148 	for_each_zone(zone) {
6149 		if (!is_highmem(zone))
6150 			lowmem_pages += zone->managed_pages;
6151 	}
6152 
6153 	for_each_zone(zone) {
6154 		u64 tmp;
6155 
6156 		spin_lock_irqsave(&zone->lock, flags);
6157 		tmp = (u64)pages_min * zone->managed_pages;
6158 		do_div(tmp, lowmem_pages);
6159 		if (is_highmem(zone)) {
6160 			/*
6161 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6162 			 * need highmem pages, so cap pages_min to a small
6163 			 * value here.
6164 			 *
6165 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
6166 			 * deltas control asynch page reclaim, and so should
6167 			 * not be capped for highmem.
6168 			 */
6169 			unsigned long min_pages;
6170 
6171 			min_pages = zone->managed_pages / 1024;
6172 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
6173 			zone->watermark[WMARK_MIN] = min_pages;
6174 		} else {
6175 			/*
6176 			 * If it's a lowmem zone, reserve a number of pages
6177 			 * proportionate to the zone's size.
6178 			 */
6179 			zone->watermark[WMARK_MIN] = tmp;
6180 		}
6181 
6182 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
6183 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
6184 
6185 		__mod_zone_page_state(zone, NR_ALLOC_BATCH,
6186 			high_wmark_pages(zone) - low_wmark_pages(zone) -
6187 			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
6188 
6189 		setup_zone_migrate_reserve(zone);
6190 		spin_unlock_irqrestore(&zone->lock, flags);
6191 	}
6192 
6193 	/* update totalreserve_pages */
6194 	calculate_totalreserve_pages();
6195 }
6196 
6197 /**
6198  * setup_per_zone_wmarks - called when min_free_kbytes changes
6199  * or when memory is hot-{added|removed}
6200  *
6201  * Ensures that the watermark[min,low,high] values for each zone are set
6202  * correctly with respect to min_free_kbytes.
6203  */
6204 void setup_per_zone_wmarks(void)
6205 {
6206 	mutex_lock(&zonelists_mutex);
6207 	__setup_per_zone_wmarks();
6208 	mutex_unlock(&zonelists_mutex);
6209 }
6210 
6211 /*
6212  * The inactive anon list should be small enough that the VM never has to
6213  * do too much work, but large enough that each inactive page has a chance
6214  * to be referenced again before it is swapped out.
6215  *
6216  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
6217  * INACTIVE_ANON pages on this zone's LRU, maintained by the
6218  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
6219  * the anonymous pages are kept on the inactive list.
6220  *
6221  * total     target    max
6222  * memory    ratio     inactive anon
6223  * -------------------------------------
6224  *   10MB       1         5MB
6225  *  100MB       1        50MB
6226  *    1GB       3       250MB
6227  *   10GB      10       0.9GB
6228  *  100GB      31         3GB
6229  *    1TB     101        10GB
6230  *   10TB     320        32GB
6231  */
6232 static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
6233 {
6234 	unsigned int gb, ratio;
6235 
6236 	/* Zone size in gigabytes */
6237 	gb = zone->managed_pages >> (30 - PAGE_SHIFT);
6238 	if (gb)
6239 		ratio = int_sqrt(10 * gb);
6240 	else
6241 		ratio = 1;
6242 
6243 	zone->inactive_ratio = ratio;
6244 }
6245 
6246 static void __meminit setup_per_zone_inactive_ratio(void)
6247 {
6248 	struct zone *zone;
6249 
6250 	for_each_zone(zone)
6251 		calculate_zone_inactive_ratio(zone);
6252 }
6253 
6254 /*
6255  * Initialise min_free_kbytes.
6256  *
6257  * For small machines we want it small (128k min).  For large machines
6258  * we want it large (64MB max).  But it is not linear, because network
6259  * bandwidth does not increase linearly with machine size.  We use
6260  *
6261  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
6262  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
6263  *
6264  * which yields
6265  *
6266  * 16MB:	512k
6267  * 32MB:	724k
6268  * 64MB:	1024k
6269  * 128MB:	1448k
6270  * 256MB:	2048k
6271  * 512MB:	2896k
6272  * 1024MB:	4096k
6273  * 2048MB:	5792k
6274  * 4096MB:	8192k
6275  * 8192MB:	11584k
6276  * 16384MB:	16384k
6277  */
6278 int __meminit init_per_zone_wmark_min(void)
6279 {
6280 	unsigned long lowmem_kbytes;
6281 	int new_min_free_kbytes;
6282 
6283 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
6284 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6285 
6286 	if (new_min_free_kbytes > user_min_free_kbytes) {
6287 		min_free_kbytes = new_min_free_kbytes;
6288 		if (min_free_kbytes < 128)
6289 			min_free_kbytes = 128;
6290 		if (min_free_kbytes > 65536)
6291 			min_free_kbytes = 65536;
6292 	} else {
6293 		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6294 				new_min_free_kbytes, user_min_free_kbytes);
6295 	}
6296 	setup_per_zone_wmarks();
6297 	refresh_zone_stat_thresholds();
6298 	setup_per_zone_lowmem_reserve();
6299 	setup_per_zone_inactive_ratio();
6300 	return 0;
6301 }
6302 module_init(init_per_zone_wmark_min)
6303 
6304 /*
6305  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
6306  *	that we can call two helper functions whenever min_free_kbytes
6307  *	changes.
6308  */
6309 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
6310 	void __user *buffer, size_t *length, loff_t *ppos)
6311 {
6312 	int rc;
6313 
6314 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6315 	if (rc)
6316 		return rc;
6317 
6318 	if (write) {
6319 		user_min_free_kbytes = min_free_kbytes;
6320 		setup_per_zone_wmarks();
6321 	}
6322 	return 0;
6323 }
6324 
6325 #ifdef CONFIG_NUMA
6326 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
6327 	void __user *buffer, size_t *length, loff_t *ppos)
6328 {
6329 	struct zone *zone;
6330 	int rc;
6331 
6332 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6333 	if (rc)
6334 		return rc;
6335 
6336 	for_each_zone(zone)
6337 		zone->min_unmapped_pages = (zone->managed_pages *
6338 				sysctl_min_unmapped_ratio) / 100;
6339 	return 0;
6340 }
6341 
6342 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
6343 	void __user *buffer, size_t *length, loff_t *ppos)
6344 {
6345 	struct zone *zone;
6346 	int rc;
6347 
6348 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6349 	if (rc)
6350 		return rc;
6351 
6352 	for_each_zone(zone)
6353 		zone->min_slab_pages = (zone->managed_pages *
6354 				sysctl_min_slab_ratio) / 100;
6355 	return 0;
6356 }
6357 #endif
6358 
6359 /*
6360  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6361  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6362  *	whenever sysctl_lowmem_reserve_ratio changes.
6363  *
6364  * The reserve ratio obviously has absolutely no relation with the
6365  * minimum watermarks. The lowmem reserve ratio can only make sense
6366  * if in function of the boot time zone sizes.
6367  */
6368 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
6369 	void __user *buffer, size_t *length, loff_t *ppos)
6370 {
6371 	proc_dointvec_minmax(table, write, buffer, length, ppos);
6372 	setup_per_zone_lowmem_reserve();
6373 	return 0;
6374 }
6375 
6376 /*
6377  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
6378  * cpu.  It is the fraction of total pages in each zone that a hot per cpu
6379  * pagelist can have before it gets flushed back to buddy allocator.
6380  */
6381 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
6382 	void __user *buffer, size_t *length, loff_t *ppos)
6383 {
6384 	struct zone *zone;
6385 	int old_percpu_pagelist_fraction;
6386 	int ret;
6387 
6388 	mutex_lock(&pcp_batch_high_lock);
6389 	old_percpu_pagelist_fraction = percpu_pagelist_fraction;
6390 
6391 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6392 	if (!write || ret < 0)
6393 		goto out;
6394 
6395 	/* Sanity checking to avoid pcp imbalance */
6396 	if (percpu_pagelist_fraction &&
6397 	    percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
6398 		percpu_pagelist_fraction = old_percpu_pagelist_fraction;
6399 		ret = -EINVAL;
6400 		goto out;
6401 	}
6402 
6403 	/* No change? */
6404 	if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
6405 		goto out;
6406 
6407 	for_each_populated_zone(zone) {
6408 		unsigned int cpu;
6409 
6410 		for_each_possible_cpu(cpu)
6411 			pageset_set_high_and_batch(zone,
6412 					per_cpu_ptr(zone->pageset, cpu));
6413 	}
6414 out:
6415 	mutex_unlock(&pcp_batch_high_lock);
6416 	return ret;
6417 }
6418 
6419 #ifdef CONFIG_NUMA
6420 int hashdist = HASHDIST_DEFAULT;
6421 
6422 static int __init set_hashdist(char *str)
6423 {
6424 	if (!str)
6425 		return 0;
6426 	hashdist = simple_strtoul(str, &str, 0);
6427 	return 1;
6428 }
6429 __setup("hashdist=", set_hashdist);
6430 #endif
6431 
6432 /*
6433  * allocate a large system hash table from bootmem
6434  * - it is assumed that the hash table must contain an exact power-of-2
6435  *   quantity of entries
6436  * - limit is the number of hash buckets, not the total allocation size
6437  */
6438 void *__init alloc_large_system_hash(const char *tablename,
6439 				     unsigned long bucketsize,
6440 				     unsigned long numentries,
6441 				     int scale,
6442 				     int flags,
6443 				     unsigned int *_hash_shift,
6444 				     unsigned int *_hash_mask,
6445 				     unsigned long low_limit,
6446 				     unsigned long high_limit)
6447 {
6448 	unsigned long long max = high_limit;
6449 	unsigned long log2qty, size;
6450 	void *table = NULL;
6451 
6452 	/* allow the kernel cmdline to have a say */
6453 	if (!numentries) {
6454 		/* round applicable memory size up to nearest megabyte */
6455 		numentries = nr_kernel_pages;
6456 
6457 		/* It isn't necessary when PAGE_SIZE >= 1MB */
6458 		if (PAGE_SHIFT < 20)
6459 			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
6460 
6461 		/* limit to 1 bucket per 2^scale bytes of low memory */
6462 		if (scale > PAGE_SHIFT)
6463 			numentries >>= (scale - PAGE_SHIFT);
6464 		else
6465 			numentries <<= (PAGE_SHIFT - scale);
6466 
6467 		/* Make sure we've got at least a 0-order allocation.. */
6468 		if (unlikely(flags & HASH_SMALL)) {
6469 			/* Makes no sense without HASH_EARLY */
6470 			WARN_ON(!(flags & HASH_EARLY));
6471 			if (!(numentries >> *_hash_shift)) {
6472 				numentries = 1UL << *_hash_shift;
6473 				BUG_ON(!numentries);
6474 			}
6475 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
6476 			numentries = PAGE_SIZE / bucketsize;
6477 	}
6478 	numentries = roundup_pow_of_two(numentries);
6479 
6480 	/* limit allocation size to 1/16 total memory by default */
6481 	if (max == 0) {
6482 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
6483 		do_div(max, bucketsize);
6484 	}
6485 	max = min(max, 0x80000000ULL);
6486 
6487 	if (numentries < low_limit)
6488 		numentries = low_limit;
6489 	if (numentries > max)
6490 		numentries = max;
6491 
6492 	log2qty = ilog2(numentries);
6493 
6494 	do {
6495 		size = bucketsize << log2qty;
6496 		if (flags & HASH_EARLY)
6497 			table = memblock_virt_alloc_nopanic(size, 0);
6498 		else if (hashdist)
6499 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
6500 		else {
6501 			/*
6502 			 * If bucketsize is not a power-of-two, we may free
6503 			 * some pages at the end of hash table which
6504 			 * alloc_pages_exact() automatically does
6505 			 */
6506 			if (get_order(size) < MAX_ORDER) {
6507 				table = alloc_pages_exact(size, GFP_ATOMIC);
6508 				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
6509 			}
6510 		}
6511 	} while (!table && size > PAGE_SIZE && --log2qty);
6512 
6513 	if (!table)
6514 		panic("Failed to allocate %s hash table\n", tablename);
6515 
6516 	printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
6517 	       tablename,
6518 	       (1UL << log2qty),
6519 	       ilog2(size) - PAGE_SHIFT,
6520 	       size);
6521 
6522 	if (_hash_shift)
6523 		*_hash_shift = log2qty;
6524 	if (_hash_mask)
6525 		*_hash_mask = (1 << log2qty) - 1;
6526 
6527 	return table;
6528 }
6529 
6530 /* Return a pointer to the bitmap storing bits affecting a block of pages */
6531 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
6532 							unsigned long pfn)
6533 {
6534 #ifdef CONFIG_SPARSEMEM
6535 	return __pfn_to_section(pfn)->pageblock_flags;
6536 #else
6537 	return zone->pageblock_flags;
6538 #endif /* CONFIG_SPARSEMEM */
6539 }
6540 
6541 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6542 {
6543 #ifdef CONFIG_SPARSEMEM
6544 	pfn &= (PAGES_PER_SECTION-1);
6545 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
6546 #else
6547 	pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
6548 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
6549 #endif /* CONFIG_SPARSEMEM */
6550 }
6551 
6552 /**
6553  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
6554  * @page: The page within the block of interest
6555  * @pfn: The target page frame number
6556  * @end_bitidx: The last bit of interest to retrieve
6557  * @mask: mask of bits that the caller is interested in
6558  *
6559  * Return: pageblock_bits flags
6560  */
6561 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6562 					unsigned long end_bitidx,
6563 					unsigned long mask)
6564 {
6565 	struct zone *zone;
6566 	unsigned long *bitmap;
6567 	unsigned long bitidx, word_bitidx;
6568 	unsigned long word;
6569 
6570 	zone = page_zone(page);
6571 	bitmap = get_pageblock_bitmap(zone, pfn);
6572 	bitidx = pfn_to_bitidx(zone, pfn);
6573 	word_bitidx = bitidx / BITS_PER_LONG;
6574 	bitidx &= (BITS_PER_LONG-1);
6575 
6576 	word = bitmap[word_bitidx];
6577 	bitidx += end_bitidx;
6578 	return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
6579 }
6580 
6581 /**
6582  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
6583  * @page: The page within the block of interest
6584  * @flags: The flags to set
6585  * @pfn: The target page frame number
6586  * @end_bitidx: The last bit of interest
6587  * @mask: mask of bits that the caller is interested in
6588  */
6589 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6590 					unsigned long pfn,
6591 					unsigned long end_bitidx,
6592 					unsigned long mask)
6593 {
6594 	struct zone *zone;
6595 	unsigned long *bitmap;
6596 	unsigned long bitidx, word_bitidx;
6597 	unsigned long old_word, word;
6598 
6599 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
6600 
6601 	zone = page_zone(page);
6602 	bitmap = get_pageblock_bitmap(zone, pfn);
6603 	bitidx = pfn_to_bitidx(zone, pfn);
6604 	word_bitidx = bitidx / BITS_PER_LONG;
6605 	bitidx &= (BITS_PER_LONG-1);
6606 
6607 	VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
6608 
6609 	bitidx += end_bitidx;
6610 	mask <<= (BITS_PER_LONG - bitidx - 1);
6611 	flags <<= (BITS_PER_LONG - bitidx - 1);
6612 
6613 	word = READ_ONCE(bitmap[word_bitidx]);
6614 	for (;;) {
6615 		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6616 		if (word == old_word)
6617 			break;
6618 		word = old_word;
6619 	}
6620 }
6621 
6622 /*
6623  * This function checks whether pageblock includes unmovable pages or not.
6624  * If @count is not zero, it is okay to include less @count unmovable pages
6625  *
6626  * PageLRU check without isolation or lru_lock could race so that
6627  * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
6628  * expect this function should be exact.
6629  */
6630 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6631 			 bool skip_hwpoisoned_pages)
6632 {
6633 	unsigned long pfn, iter, found;
6634 	int mt;
6635 
6636 	/*
6637 	 * For avoiding noise data, lru_add_drain_all() should be called
6638 	 * If ZONE_MOVABLE, the zone never contains unmovable pages
6639 	 */
6640 	if (zone_idx(zone) == ZONE_MOVABLE)
6641 		return false;
6642 	mt = get_pageblock_migratetype(page);
6643 	if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
6644 		return false;
6645 
6646 	pfn = page_to_pfn(page);
6647 	for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
6648 		unsigned long check = pfn + iter;
6649 
6650 		if (!pfn_valid_within(check))
6651 			continue;
6652 
6653 		page = pfn_to_page(check);
6654 
6655 		/*
6656 		 * Hugepages are not in LRU lists, but they're movable.
6657 		 * We need not scan over tail pages bacause we don't
6658 		 * handle each tail page individually in migration.
6659 		 */
6660 		if (PageHuge(page)) {
6661 			iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
6662 			continue;
6663 		}
6664 
6665 		/*
6666 		 * We can't use page_count without pin a page
6667 		 * because another CPU can free compound page.
6668 		 * This check already skips compound tails of THP
6669 		 * because their page->_count is zero at all time.
6670 		 */
6671 		if (!atomic_read(&page->_count)) {
6672 			if (PageBuddy(page))
6673 				iter += (1 << page_order(page)) - 1;
6674 			continue;
6675 		}
6676 
6677 		/*
6678 		 * The HWPoisoned page may be not in buddy system, and
6679 		 * page_count() is not 0.
6680 		 */
6681 		if (skip_hwpoisoned_pages && PageHWPoison(page))
6682 			continue;
6683 
6684 		if (!PageLRU(page))
6685 			found++;
6686 		/*
6687 		 * If there are RECLAIMABLE pages, we need to check
6688 		 * it.  But now, memory offline itself doesn't call
6689 		 * shrink_node_slabs() and it still to be fixed.
6690 		 */
6691 		/*
6692 		 * If the page is not RAM, page_count()should be 0.
6693 		 * we don't need more check. This is an _used_ not-movable page.
6694 		 *
6695 		 * The problematic thing here is PG_reserved pages. PG_reserved
6696 		 * is set to both of a memory hole page and a _used_ kernel
6697 		 * page at boot.
6698 		 */
6699 		if (found > count)
6700 			return true;
6701 	}
6702 	return false;
6703 }
6704 
6705 bool is_pageblock_removable_nolock(struct page *page)
6706 {
6707 	struct zone *zone;
6708 	unsigned long pfn;
6709 
6710 	/*
6711 	 * We have to be careful here because we are iterating over memory
6712 	 * sections which are not zone aware so we might end up outside of
6713 	 * the zone but still within the section.
6714 	 * We have to take care about the node as well. If the node is offline
6715 	 * its NODE_DATA will be NULL - see page_zone.
6716 	 */
6717 	if (!node_online(page_to_nid(page)))
6718 		return false;
6719 
6720 	zone = page_zone(page);
6721 	pfn = page_to_pfn(page);
6722 	if (!zone_spans_pfn(zone, pfn))
6723 		return false;
6724 
6725 	return !has_unmovable_pages(zone, page, 0, true);
6726 }
6727 
6728 #ifdef CONFIG_CMA
6729 
6730 static unsigned long pfn_max_align_down(unsigned long pfn)
6731 {
6732 	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6733 			     pageblock_nr_pages) - 1);
6734 }
6735 
6736 static unsigned long pfn_max_align_up(unsigned long pfn)
6737 {
6738 	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6739 				pageblock_nr_pages));
6740 }
6741 
6742 /* [start, end) must belong to a single zone. */
6743 static int __alloc_contig_migrate_range(struct compact_control *cc,
6744 					unsigned long start, unsigned long end)
6745 {
6746 	/* This function is based on compact_zone() from compaction.c. */
6747 	unsigned long nr_reclaimed;
6748 	unsigned long pfn = start;
6749 	unsigned int tries = 0;
6750 	int ret = 0;
6751 
6752 	migrate_prep();
6753 
6754 	while (pfn < end || !list_empty(&cc->migratepages)) {
6755 		if (fatal_signal_pending(current)) {
6756 			ret = -EINTR;
6757 			break;
6758 		}
6759 
6760 		if (list_empty(&cc->migratepages)) {
6761 			cc->nr_migratepages = 0;
6762 			pfn = isolate_migratepages_range(cc, pfn, end);
6763 			if (!pfn) {
6764 				ret = -EINTR;
6765 				break;
6766 			}
6767 			tries = 0;
6768 		} else if (++tries == 5) {
6769 			ret = ret < 0 ? ret : -EBUSY;
6770 			break;
6771 		}
6772 
6773 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6774 							&cc->migratepages);
6775 		cc->nr_migratepages -= nr_reclaimed;
6776 
6777 		ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
6778 				    NULL, 0, cc->mode, MR_CMA);
6779 	}
6780 	if (ret < 0) {
6781 		putback_movable_pages(&cc->migratepages);
6782 		return ret;
6783 	}
6784 	return 0;
6785 }
6786 
6787 /**
6788  * alloc_contig_range() -- tries to allocate given range of pages
6789  * @start:	start PFN to allocate
6790  * @end:	one-past-the-last PFN to allocate
6791  * @migratetype:	migratetype of the underlaying pageblocks (either
6792  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
6793  *			in range must have the same migratetype and it must
6794  *			be either of the two.
6795  *
6796  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6797  * aligned, however it's the caller's responsibility to guarantee that
6798  * we are the only thread that changes migrate type of pageblocks the
6799  * pages fall in.
6800  *
6801  * The PFN range must belong to a single zone.
6802  *
6803  * Returns zero on success or negative error code.  On success all
6804  * pages which PFN is in [start, end) are allocated for the caller and
6805  * need to be freed with free_contig_range().
6806  */
6807 int alloc_contig_range(unsigned long start, unsigned long end,
6808 		       unsigned migratetype)
6809 {
6810 	unsigned long outer_start, outer_end;
6811 	int ret = 0, order;
6812 
6813 	struct compact_control cc = {
6814 		.nr_migratepages = 0,
6815 		.order = -1,
6816 		.zone = page_zone(pfn_to_page(start)),
6817 		.mode = MIGRATE_SYNC,
6818 		.ignore_skip_hint = true,
6819 	};
6820 	INIT_LIST_HEAD(&cc.migratepages);
6821 
6822 	/*
6823 	 * What we do here is we mark all pageblocks in range as
6824 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
6825 	 * have different sizes, and due to the way page allocator
6826 	 * work, we align the range to biggest of the two pages so
6827 	 * that page allocator won't try to merge buddies from
6828 	 * different pageblocks and change MIGRATE_ISOLATE to some
6829 	 * other migration type.
6830 	 *
6831 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6832 	 * migrate the pages from an unaligned range (ie. pages that
6833 	 * we are interested in).  This will put all the pages in
6834 	 * range back to page allocator as MIGRATE_ISOLATE.
6835 	 *
6836 	 * When this is done, we take the pages in range from page
6837 	 * allocator removing them from the buddy system.  This way
6838 	 * page allocator will never consider using them.
6839 	 *
6840 	 * This lets us mark the pageblocks back as
6841 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6842 	 * aligned range but not in the unaligned, original range are
6843 	 * put back to page allocator so that buddy can use them.
6844 	 */
6845 
6846 	ret = start_isolate_page_range(pfn_max_align_down(start),
6847 				       pfn_max_align_up(end), migratetype,
6848 				       false);
6849 	if (ret)
6850 		return ret;
6851 
6852 	ret = __alloc_contig_migrate_range(&cc, start, end);
6853 	if (ret)
6854 		goto done;
6855 
6856 	/*
6857 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
6858 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
6859 	 * more, all pages in [start, end) are free in page allocator.
6860 	 * What we are going to do is to allocate all pages from
6861 	 * [start, end) (that is remove them from page allocator).
6862 	 *
6863 	 * The only problem is that pages at the beginning and at the
6864 	 * end of interesting range may be not aligned with pages that
6865 	 * page allocator holds, ie. they can be part of higher order
6866 	 * pages.  Because of this, we reserve the bigger range and
6867 	 * once this is done free the pages we are not interested in.
6868 	 *
6869 	 * We don't have to hold zone->lock here because the pages are
6870 	 * isolated thus they won't get removed from buddy.
6871 	 */
6872 
6873 	lru_add_drain_all();
6874 	drain_all_pages(cc.zone);
6875 
6876 	order = 0;
6877 	outer_start = start;
6878 	while (!PageBuddy(pfn_to_page(outer_start))) {
6879 		if (++order >= MAX_ORDER) {
6880 			ret = -EBUSY;
6881 			goto done;
6882 		}
6883 		outer_start &= ~0UL << order;
6884 	}
6885 
6886 	/* Make sure the range is really isolated. */
6887 	if (test_pages_isolated(outer_start, end, false)) {
6888 		pr_info("%s: [%lx, %lx) PFNs busy\n",
6889 			__func__, outer_start, end);
6890 		ret = -EBUSY;
6891 		goto done;
6892 	}
6893 
6894 	/* Grab isolated pages from freelists. */
6895 	outer_end = isolate_freepages_range(&cc, outer_start, end);
6896 	if (!outer_end) {
6897 		ret = -EBUSY;
6898 		goto done;
6899 	}
6900 
6901 	/* Free head and tail (if any) */
6902 	if (start != outer_start)
6903 		free_contig_range(outer_start, start - outer_start);
6904 	if (end != outer_end)
6905 		free_contig_range(end, outer_end - end);
6906 
6907 done:
6908 	undo_isolate_page_range(pfn_max_align_down(start),
6909 				pfn_max_align_up(end), migratetype);
6910 	return ret;
6911 }
6912 
6913 void free_contig_range(unsigned long pfn, unsigned nr_pages)
6914 {
6915 	unsigned int count = 0;
6916 
6917 	for (; nr_pages--; pfn++) {
6918 		struct page *page = pfn_to_page(pfn);
6919 
6920 		count += page_count(page) != 1;
6921 		__free_page(page);
6922 	}
6923 	WARN(count != 0, "%d pages are still in use!\n", count);
6924 }
6925 #endif
6926 
6927 #ifdef CONFIG_MEMORY_HOTPLUG
6928 /*
6929  * The zone indicated has a new number of managed_pages; batch sizes and percpu
6930  * page high values need to be recalulated.
6931  */
6932 void __meminit zone_pcp_update(struct zone *zone)
6933 {
6934 	unsigned cpu;
6935 	mutex_lock(&pcp_batch_high_lock);
6936 	for_each_possible_cpu(cpu)
6937 		pageset_set_high_and_batch(zone,
6938 				per_cpu_ptr(zone->pageset, cpu));
6939 	mutex_unlock(&pcp_batch_high_lock);
6940 }
6941 #endif
6942 
6943 void zone_pcp_reset(struct zone *zone)
6944 {
6945 	unsigned long flags;
6946 	int cpu;
6947 	struct per_cpu_pageset *pset;
6948 
6949 	/* avoid races with drain_pages()  */
6950 	local_irq_save(flags);
6951 	if (zone->pageset != &boot_pageset) {
6952 		for_each_online_cpu(cpu) {
6953 			pset = per_cpu_ptr(zone->pageset, cpu);
6954 			drain_zonestat(zone, pset);
6955 		}
6956 		free_percpu(zone->pageset);
6957 		zone->pageset = &boot_pageset;
6958 	}
6959 	local_irq_restore(flags);
6960 }
6961 
6962 #ifdef CONFIG_MEMORY_HOTREMOVE
6963 /*
6964  * All pages in the range must be isolated before calling this.
6965  */
6966 void
6967 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6968 {
6969 	struct page *page;
6970 	struct zone *zone;
6971 	unsigned int order, i;
6972 	unsigned long pfn;
6973 	unsigned long flags;
6974 	/* find the first valid pfn */
6975 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
6976 		if (pfn_valid(pfn))
6977 			break;
6978 	if (pfn == end_pfn)
6979 		return;
6980 	zone = page_zone(pfn_to_page(pfn));
6981 	spin_lock_irqsave(&zone->lock, flags);
6982 	pfn = start_pfn;
6983 	while (pfn < end_pfn) {
6984 		if (!pfn_valid(pfn)) {
6985 			pfn++;
6986 			continue;
6987 		}
6988 		page = pfn_to_page(pfn);
6989 		/*
6990 		 * The HWPoisoned page may be not in buddy system, and
6991 		 * page_count() is not 0.
6992 		 */
6993 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6994 			pfn++;
6995 			SetPageReserved(page);
6996 			continue;
6997 		}
6998 
6999 		BUG_ON(page_count(page));
7000 		BUG_ON(!PageBuddy(page));
7001 		order = page_order(page);
7002 #ifdef CONFIG_DEBUG_VM
7003 		printk(KERN_INFO "remove from free list %lx %d %lx\n",
7004 		       pfn, 1 << order, end_pfn);
7005 #endif
7006 		list_del(&page->lru);
7007 		rmv_page_order(page);
7008 		zone->free_area[order].nr_free--;
7009 		for (i = 0; i < (1 << order); i++)
7010 			SetPageReserved((page+i));
7011 		pfn += (1 << order);
7012 	}
7013 	spin_unlock_irqrestore(&zone->lock, flags);
7014 }
7015 #endif
7016 
7017 #ifdef CONFIG_MEMORY_FAILURE
7018 bool is_free_buddy_page(struct page *page)
7019 {
7020 	struct zone *zone = page_zone(page);
7021 	unsigned long pfn = page_to_pfn(page);
7022 	unsigned long flags;
7023 	unsigned int order;
7024 
7025 	spin_lock_irqsave(&zone->lock, flags);
7026 	for (order = 0; order < MAX_ORDER; order++) {
7027 		struct page *page_head = page - (pfn & ((1 << order) - 1));
7028 
7029 		if (PageBuddy(page_head) && page_order(page_head) >= order)
7030 			break;
7031 	}
7032 	spin_unlock_irqrestore(&zone->lock, flags);
7033 
7034 	return order < MAX_ORDER;
7035 }
7036 #endif
7037