xref: /openbmc/linux/mm/page_alloc.c (revision 179dd8c0)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/rwsem.h>
22 #include <linux/pagemap.h>
23 #include <linux/jiffies.h>
24 #include <linux/bootmem.h>
25 #include <linux/memblock.h>
26 #include <linux/compiler.h>
27 #include <linux/kernel.h>
28 #include <linux/kmemcheck.h>
29 #include <linux/kasan.h>
30 #include <linux/module.h>
31 #include <linux/suspend.h>
32 #include <linux/pagevec.h>
33 #include <linux/blkdev.h>
34 #include <linux/slab.h>
35 #include <linux/ratelimit.h>
36 #include <linux/oom.h>
37 #include <linux/notifier.h>
38 #include <linux/topology.h>
39 #include <linux/sysctl.h>
40 #include <linux/cpu.h>
41 #include <linux/cpuset.h>
42 #include <linux/memory_hotplug.h>
43 #include <linux/nodemask.h>
44 #include <linux/vmalloc.h>
45 #include <linux/vmstat.h>
46 #include <linux/mempolicy.h>
47 #include <linux/stop_machine.h>
48 #include <linux/sort.h>
49 #include <linux/pfn.h>
50 #include <linux/backing-dev.h>
51 #include <linux/fault-inject.h>
52 #include <linux/page-isolation.h>
53 #include <linux/page_ext.h>
54 #include <linux/debugobjects.h>
55 #include <linux/kmemleak.h>
56 #include <linux/compaction.h>
57 #include <trace/events/kmem.h>
58 #include <linux/prefetch.h>
59 #include <linux/mm_inline.h>
60 #include <linux/migrate.h>
61 #include <linux/page_ext.h>
62 #include <linux/hugetlb.h>
63 #include <linux/sched/rt.h>
64 #include <linux/page_owner.h>
65 #include <linux/kthread.h>
66 
67 #include <asm/sections.h>
68 #include <asm/tlbflush.h>
69 #include <asm/div64.h>
70 #include "internal.h"
71 
72 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
73 static DEFINE_MUTEX(pcp_batch_high_lock);
74 #define MIN_PERCPU_PAGELIST_FRACTION	(8)
75 
76 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
77 DEFINE_PER_CPU(int, numa_node);
78 EXPORT_PER_CPU_SYMBOL(numa_node);
79 #endif
80 
81 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
82 /*
83  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
84  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
85  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
86  * defined in <linux/topology.h>.
87  */
88 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
89 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
90 int _node_numa_mem_[MAX_NUMNODES];
91 #endif
92 
93 /*
94  * Array of node states.
95  */
96 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
97 	[N_POSSIBLE] = NODE_MASK_ALL,
98 	[N_ONLINE] = { { [0] = 1UL } },
99 #ifndef CONFIG_NUMA
100 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
101 #ifdef CONFIG_HIGHMEM
102 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
103 #endif
104 #ifdef CONFIG_MOVABLE_NODE
105 	[N_MEMORY] = { { [0] = 1UL } },
106 #endif
107 	[N_CPU] = { { [0] = 1UL } },
108 #endif	/* NUMA */
109 };
110 EXPORT_SYMBOL(node_states);
111 
112 /* Protect totalram_pages and zone->managed_pages */
113 static DEFINE_SPINLOCK(managed_page_count_lock);
114 
115 unsigned long totalram_pages __read_mostly;
116 unsigned long totalreserve_pages __read_mostly;
117 unsigned long totalcma_pages __read_mostly;
118 /*
119  * When calculating the number of globally allowed dirty pages, there
120  * is a certain number of per-zone reserves that should not be
121  * considered dirtyable memory.  This is the sum of those reserves
122  * over all existing zones that contribute dirtyable memory.
123  */
124 unsigned long dirty_balance_reserve __read_mostly;
125 
126 int percpu_pagelist_fraction;
127 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
128 
129 #ifdef CONFIG_PM_SLEEP
130 /*
131  * The following functions are used by the suspend/hibernate code to temporarily
132  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
133  * while devices are suspended.  To avoid races with the suspend/hibernate code,
134  * they should always be called with pm_mutex held (gfp_allowed_mask also should
135  * only be modified with pm_mutex held, unless the suspend/hibernate code is
136  * guaranteed not to run in parallel with that modification).
137  */
138 
139 static gfp_t saved_gfp_mask;
140 
141 void pm_restore_gfp_mask(void)
142 {
143 	WARN_ON(!mutex_is_locked(&pm_mutex));
144 	if (saved_gfp_mask) {
145 		gfp_allowed_mask = saved_gfp_mask;
146 		saved_gfp_mask = 0;
147 	}
148 }
149 
150 void pm_restrict_gfp_mask(void)
151 {
152 	WARN_ON(!mutex_is_locked(&pm_mutex));
153 	WARN_ON(saved_gfp_mask);
154 	saved_gfp_mask = gfp_allowed_mask;
155 	gfp_allowed_mask &= ~GFP_IOFS;
156 }
157 
158 bool pm_suspended_storage(void)
159 {
160 	if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
161 		return false;
162 	return true;
163 }
164 #endif /* CONFIG_PM_SLEEP */
165 
166 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
167 int pageblock_order __read_mostly;
168 #endif
169 
170 static void __free_pages_ok(struct page *page, unsigned int order);
171 
172 /*
173  * results with 256, 32 in the lowmem_reserve sysctl:
174  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
175  *	1G machine -> (16M dma, 784M normal, 224M high)
176  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
177  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
178  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
179  *
180  * TBD: should special case ZONE_DMA32 machines here - in those we normally
181  * don't need any ZONE_NORMAL reservation
182  */
183 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
184 #ifdef CONFIG_ZONE_DMA
185 	 256,
186 #endif
187 #ifdef CONFIG_ZONE_DMA32
188 	 256,
189 #endif
190 #ifdef CONFIG_HIGHMEM
191 	 32,
192 #endif
193 	 32,
194 };
195 
196 EXPORT_SYMBOL(totalram_pages);
197 
198 static char * const zone_names[MAX_NR_ZONES] = {
199 #ifdef CONFIG_ZONE_DMA
200 	 "DMA",
201 #endif
202 #ifdef CONFIG_ZONE_DMA32
203 	 "DMA32",
204 #endif
205 	 "Normal",
206 #ifdef CONFIG_HIGHMEM
207 	 "HighMem",
208 #endif
209 	 "Movable",
210 };
211 
212 int min_free_kbytes = 1024;
213 int user_min_free_kbytes = -1;
214 
215 static unsigned long __meminitdata nr_kernel_pages;
216 static unsigned long __meminitdata nr_all_pages;
217 static unsigned long __meminitdata dma_reserve;
218 
219 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
220 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
221 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
222 static unsigned long __initdata required_kernelcore;
223 static unsigned long __initdata required_movablecore;
224 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
225 
226 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
227 int movable_zone;
228 EXPORT_SYMBOL(movable_zone);
229 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
230 
231 #if MAX_NUMNODES > 1
232 int nr_node_ids __read_mostly = MAX_NUMNODES;
233 int nr_online_nodes __read_mostly = 1;
234 EXPORT_SYMBOL(nr_node_ids);
235 EXPORT_SYMBOL(nr_online_nodes);
236 #endif
237 
238 int page_group_by_mobility_disabled __read_mostly;
239 
240 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
241 static inline void reset_deferred_meminit(pg_data_t *pgdat)
242 {
243 	pgdat->first_deferred_pfn = ULONG_MAX;
244 }
245 
246 /* Returns true if the struct page for the pfn is uninitialised */
247 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
248 {
249 	int nid = early_pfn_to_nid(pfn);
250 
251 	if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
252 		return true;
253 
254 	return false;
255 }
256 
257 static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
258 {
259 	if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
260 		return true;
261 
262 	return false;
263 }
264 
265 /*
266  * Returns false when the remaining initialisation should be deferred until
267  * later in the boot cycle when it can be parallelised.
268  */
269 static inline bool update_defer_init(pg_data_t *pgdat,
270 				unsigned long pfn, unsigned long zone_end,
271 				unsigned long *nr_initialised)
272 {
273 	/* Always populate low zones for address-contrained allocations */
274 	if (zone_end < pgdat_end_pfn(pgdat))
275 		return true;
276 
277 	/* Initialise at least 2G of the highest zone */
278 	(*nr_initialised)++;
279 	if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
280 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
281 		pgdat->first_deferred_pfn = pfn;
282 		return false;
283 	}
284 
285 	return true;
286 }
287 #else
288 static inline void reset_deferred_meminit(pg_data_t *pgdat)
289 {
290 }
291 
292 static inline bool early_page_uninitialised(unsigned long pfn)
293 {
294 	return false;
295 }
296 
297 static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
298 {
299 	return false;
300 }
301 
302 static inline bool update_defer_init(pg_data_t *pgdat,
303 				unsigned long pfn, unsigned long zone_end,
304 				unsigned long *nr_initialised)
305 {
306 	return true;
307 }
308 #endif
309 
310 
311 void set_pageblock_migratetype(struct page *page, int migratetype)
312 {
313 	if (unlikely(page_group_by_mobility_disabled &&
314 		     migratetype < MIGRATE_PCPTYPES))
315 		migratetype = MIGRATE_UNMOVABLE;
316 
317 	set_pageblock_flags_group(page, (unsigned long)migratetype,
318 					PB_migrate, PB_migrate_end);
319 }
320 
321 #ifdef CONFIG_DEBUG_VM
322 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
323 {
324 	int ret = 0;
325 	unsigned seq;
326 	unsigned long pfn = page_to_pfn(page);
327 	unsigned long sp, start_pfn;
328 
329 	do {
330 		seq = zone_span_seqbegin(zone);
331 		start_pfn = zone->zone_start_pfn;
332 		sp = zone->spanned_pages;
333 		if (!zone_spans_pfn(zone, pfn))
334 			ret = 1;
335 	} while (zone_span_seqretry(zone, seq));
336 
337 	if (ret)
338 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
339 			pfn, zone_to_nid(zone), zone->name,
340 			start_pfn, start_pfn + sp);
341 
342 	return ret;
343 }
344 
345 static int page_is_consistent(struct zone *zone, struct page *page)
346 {
347 	if (!pfn_valid_within(page_to_pfn(page)))
348 		return 0;
349 	if (zone != page_zone(page))
350 		return 0;
351 
352 	return 1;
353 }
354 /*
355  * Temporary debugging check for pages not lying within a given zone.
356  */
357 static int bad_range(struct zone *zone, struct page *page)
358 {
359 	if (page_outside_zone_boundaries(zone, page))
360 		return 1;
361 	if (!page_is_consistent(zone, page))
362 		return 1;
363 
364 	return 0;
365 }
366 #else
367 static inline int bad_range(struct zone *zone, struct page *page)
368 {
369 	return 0;
370 }
371 #endif
372 
373 static void bad_page(struct page *page, const char *reason,
374 		unsigned long bad_flags)
375 {
376 	static unsigned long resume;
377 	static unsigned long nr_shown;
378 	static unsigned long nr_unshown;
379 
380 	/* Don't complain about poisoned pages */
381 	if (PageHWPoison(page)) {
382 		page_mapcount_reset(page); /* remove PageBuddy */
383 		return;
384 	}
385 
386 	/*
387 	 * Allow a burst of 60 reports, then keep quiet for that minute;
388 	 * or allow a steady drip of one report per second.
389 	 */
390 	if (nr_shown == 60) {
391 		if (time_before(jiffies, resume)) {
392 			nr_unshown++;
393 			goto out;
394 		}
395 		if (nr_unshown) {
396 			printk(KERN_ALERT
397 			      "BUG: Bad page state: %lu messages suppressed\n",
398 				nr_unshown);
399 			nr_unshown = 0;
400 		}
401 		nr_shown = 0;
402 	}
403 	if (nr_shown++ == 0)
404 		resume = jiffies + 60 * HZ;
405 
406 	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
407 		current->comm, page_to_pfn(page));
408 	dump_page_badflags(page, reason, bad_flags);
409 
410 	print_modules();
411 	dump_stack();
412 out:
413 	/* Leave bad fields for debug, except PageBuddy could make trouble */
414 	page_mapcount_reset(page); /* remove PageBuddy */
415 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
416 }
417 
418 /*
419  * Higher-order pages are called "compound pages".  They are structured thusly:
420  *
421  * The first PAGE_SIZE page is called the "head page".
422  *
423  * The remaining PAGE_SIZE pages are called "tail pages".
424  *
425  * All pages have PG_compound set.  All tail pages have their ->first_page
426  * pointing at the head page.
427  *
428  * The first tail page's ->lru.next holds the address of the compound page's
429  * put_page() function.  Its ->lru.prev holds the order of allocation.
430  * This usage means that zero-order pages may not be compound.
431  */
432 
433 static void free_compound_page(struct page *page)
434 {
435 	__free_pages_ok(page, compound_order(page));
436 }
437 
438 void prep_compound_page(struct page *page, unsigned long order)
439 {
440 	int i;
441 	int nr_pages = 1 << order;
442 
443 	set_compound_page_dtor(page, free_compound_page);
444 	set_compound_order(page, order);
445 	__SetPageHead(page);
446 	for (i = 1; i < nr_pages; i++) {
447 		struct page *p = page + i;
448 		set_page_count(p, 0);
449 		p->first_page = page;
450 		/* Make sure p->first_page is always valid for PageTail() */
451 		smp_wmb();
452 		__SetPageTail(p);
453 	}
454 }
455 
456 #ifdef CONFIG_DEBUG_PAGEALLOC
457 unsigned int _debug_guardpage_minorder;
458 bool _debug_pagealloc_enabled __read_mostly;
459 bool _debug_guardpage_enabled __read_mostly;
460 
461 static int __init early_debug_pagealloc(char *buf)
462 {
463 	if (!buf)
464 		return -EINVAL;
465 
466 	if (strcmp(buf, "on") == 0)
467 		_debug_pagealloc_enabled = true;
468 
469 	return 0;
470 }
471 early_param("debug_pagealloc", early_debug_pagealloc);
472 
473 static bool need_debug_guardpage(void)
474 {
475 	/* If we don't use debug_pagealloc, we don't need guard page */
476 	if (!debug_pagealloc_enabled())
477 		return false;
478 
479 	return true;
480 }
481 
482 static void init_debug_guardpage(void)
483 {
484 	if (!debug_pagealloc_enabled())
485 		return;
486 
487 	_debug_guardpage_enabled = true;
488 }
489 
490 struct page_ext_operations debug_guardpage_ops = {
491 	.need = need_debug_guardpage,
492 	.init = init_debug_guardpage,
493 };
494 
495 static int __init debug_guardpage_minorder_setup(char *buf)
496 {
497 	unsigned long res;
498 
499 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
500 		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
501 		return 0;
502 	}
503 	_debug_guardpage_minorder = res;
504 	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
505 	return 0;
506 }
507 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
508 
509 static inline void set_page_guard(struct zone *zone, struct page *page,
510 				unsigned int order, int migratetype)
511 {
512 	struct page_ext *page_ext;
513 
514 	if (!debug_guardpage_enabled())
515 		return;
516 
517 	page_ext = lookup_page_ext(page);
518 	__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
519 
520 	INIT_LIST_HEAD(&page->lru);
521 	set_page_private(page, order);
522 	/* Guard pages are not available for any usage */
523 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
524 }
525 
526 static inline void clear_page_guard(struct zone *zone, struct page *page,
527 				unsigned int order, int migratetype)
528 {
529 	struct page_ext *page_ext;
530 
531 	if (!debug_guardpage_enabled())
532 		return;
533 
534 	page_ext = lookup_page_ext(page);
535 	__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
536 
537 	set_page_private(page, 0);
538 	if (!is_migrate_isolate(migratetype))
539 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
540 }
541 #else
542 struct page_ext_operations debug_guardpage_ops = { NULL, };
543 static inline void set_page_guard(struct zone *zone, struct page *page,
544 				unsigned int order, int migratetype) {}
545 static inline void clear_page_guard(struct zone *zone, struct page *page,
546 				unsigned int order, int migratetype) {}
547 #endif
548 
549 static inline void set_page_order(struct page *page, unsigned int order)
550 {
551 	set_page_private(page, order);
552 	__SetPageBuddy(page);
553 }
554 
555 static inline void rmv_page_order(struct page *page)
556 {
557 	__ClearPageBuddy(page);
558 	set_page_private(page, 0);
559 }
560 
561 /*
562  * This function checks whether a page is free && is the buddy
563  * we can do coalesce a page and its buddy if
564  * (a) the buddy is not in a hole &&
565  * (b) the buddy is in the buddy system &&
566  * (c) a page and its buddy have the same order &&
567  * (d) a page and its buddy are in the same zone.
568  *
569  * For recording whether a page is in the buddy system, we set ->_mapcount
570  * PAGE_BUDDY_MAPCOUNT_VALUE.
571  * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
572  * serialized by zone->lock.
573  *
574  * For recording page's order, we use page_private(page).
575  */
576 static inline int page_is_buddy(struct page *page, struct page *buddy,
577 							unsigned int order)
578 {
579 	if (!pfn_valid_within(page_to_pfn(buddy)))
580 		return 0;
581 
582 	if (page_is_guard(buddy) && page_order(buddy) == order) {
583 		if (page_zone_id(page) != page_zone_id(buddy))
584 			return 0;
585 
586 		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
587 
588 		return 1;
589 	}
590 
591 	if (PageBuddy(buddy) && page_order(buddy) == order) {
592 		/*
593 		 * zone check is done late to avoid uselessly
594 		 * calculating zone/node ids for pages that could
595 		 * never merge.
596 		 */
597 		if (page_zone_id(page) != page_zone_id(buddy))
598 			return 0;
599 
600 		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
601 
602 		return 1;
603 	}
604 	return 0;
605 }
606 
607 /*
608  * Freeing function for a buddy system allocator.
609  *
610  * The concept of a buddy system is to maintain direct-mapped table
611  * (containing bit values) for memory blocks of various "orders".
612  * The bottom level table contains the map for the smallest allocatable
613  * units of memory (here, pages), and each level above it describes
614  * pairs of units from the levels below, hence, "buddies".
615  * At a high level, all that happens here is marking the table entry
616  * at the bottom level available, and propagating the changes upward
617  * as necessary, plus some accounting needed to play nicely with other
618  * parts of the VM system.
619  * At each level, we keep a list of pages, which are heads of continuous
620  * free pages of length of (1 << order) and marked with _mapcount
621  * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
622  * field.
623  * So when we are allocating or freeing one, we can derive the state of the
624  * other.  That is, if we allocate a small block, and both were
625  * free, the remainder of the region must be split into blocks.
626  * If a block is freed, and its buddy is also free, then this
627  * triggers coalescing into a block of larger size.
628  *
629  * -- nyc
630  */
631 
632 static inline void __free_one_page(struct page *page,
633 		unsigned long pfn,
634 		struct zone *zone, unsigned int order,
635 		int migratetype)
636 {
637 	unsigned long page_idx;
638 	unsigned long combined_idx;
639 	unsigned long uninitialized_var(buddy_idx);
640 	struct page *buddy;
641 	int max_order = MAX_ORDER;
642 
643 	VM_BUG_ON(!zone_is_initialized(zone));
644 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
645 
646 	VM_BUG_ON(migratetype == -1);
647 	if (is_migrate_isolate(migratetype)) {
648 		/*
649 		 * We restrict max order of merging to prevent merge
650 		 * between freepages on isolate pageblock and normal
651 		 * pageblock. Without this, pageblock isolation
652 		 * could cause incorrect freepage accounting.
653 		 */
654 		max_order = min(MAX_ORDER, pageblock_order + 1);
655 	} else {
656 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
657 	}
658 
659 	page_idx = pfn & ((1 << max_order) - 1);
660 
661 	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
662 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
663 
664 	while (order < max_order - 1) {
665 		buddy_idx = __find_buddy_index(page_idx, order);
666 		buddy = page + (buddy_idx - page_idx);
667 		if (!page_is_buddy(page, buddy, order))
668 			break;
669 		/*
670 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
671 		 * merge with it and move up one order.
672 		 */
673 		if (page_is_guard(buddy)) {
674 			clear_page_guard(zone, buddy, order, migratetype);
675 		} else {
676 			list_del(&buddy->lru);
677 			zone->free_area[order].nr_free--;
678 			rmv_page_order(buddy);
679 		}
680 		combined_idx = buddy_idx & page_idx;
681 		page = page + (combined_idx - page_idx);
682 		page_idx = combined_idx;
683 		order++;
684 	}
685 	set_page_order(page, order);
686 
687 	/*
688 	 * If this is not the largest possible page, check if the buddy
689 	 * of the next-highest order is free. If it is, it's possible
690 	 * that pages are being freed that will coalesce soon. In case,
691 	 * that is happening, add the free page to the tail of the list
692 	 * so it's less likely to be used soon and more likely to be merged
693 	 * as a higher order page
694 	 */
695 	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
696 		struct page *higher_page, *higher_buddy;
697 		combined_idx = buddy_idx & page_idx;
698 		higher_page = page + (combined_idx - page_idx);
699 		buddy_idx = __find_buddy_index(combined_idx, order + 1);
700 		higher_buddy = higher_page + (buddy_idx - combined_idx);
701 		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
702 			list_add_tail(&page->lru,
703 				&zone->free_area[order].free_list[migratetype]);
704 			goto out;
705 		}
706 	}
707 
708 	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
709 out:
710 	zone->free_area[order].nr_free++;
711 }
712 
713 static inline int free_pages_check(struct page *page)
714 {
715 	const char *bad_reason = NULL;
716 	unsigned long bad_flags = 0;
717 
718 	if (unlikely(page_mapcount(page)))
719 		bad_reason = "nonzero mapcount";
720 	if (unlikely(page->mapping != NULL))
721 		bad_reason = "non-NULL mapping";
722 	if (unlikely(atomic_read(&page->_count) != 0))
723 		bad_reason = "nonzero _count";
724 	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
725 		bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
726 		bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
727 	}
728 #ifdef CONFIG_MEMCG
729 	if (unlikely(page->mem_cgroup))
730 		bad_reason = "page still charged to cgroup";
731 #endif
732 	if (unlikely(bad_reason)) {
733 		bad_page(page, bad_reason, bad_flags);
734 		return 1;
735 	}
736 	page_cpupid_reset_last(page);
737 	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
738 		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
739 	return 0;
740 }
741 
742 /*
743  * Frees a number of pages from the PCP lists
744  * Assumes all pages on list are in same zone, and of same order.
745  * count is the number of pages to free.
746  *
747  * If the zone was previously in an "all pages pinned" state then look to
748  * see if this freeing clears that state.
749  *
750  * And clear the zone's pages_scanned counter, to hold off the "all pages are
751  * pinned" detection logic.
752  */
753 static void free_pcppages_bulk(struct zone *zone, int count,
754 					struct per_cpu_pages *pcp)
755 {
756 	int migratetype = 0;
757 	int batch_free = 0;
758 	int to_free = count;
759 	unsigned long nr_scanned;
760 
761 	spin_lock(&zone->lock);
762 	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
763 	if (nr_scanned)
764 		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
765 
766 	while (to_free) {
767 		struct page *page;
768 		struct list_head *list;
769 
770 		/*
771 		 * Remove pages from lists in a round-robin fashion. A
772 		 * batch_free count is maintained that is incremented when an
773 		 * empty list is encountered.  This is so more pages are freed
774 		 * off fuller lists instead of spinning excessively around empty
775 		 * lists
776 		 */
777 		do {
778 			batch_free++;
779 			if (++migratetype == MIGRATE_PCPTYPES)
780 				migratetype = 0;
781 			list = &pcp->lists[migratetype];
782 		} while (list_empty(list));
783 
784 		/* This is the only non-empty list. Free them all. */
785 		if (batch_free == MIGRATE_PCPTYPES)
786 			batch_free = to_free;
787 
788 		do {
789 			int mt;	/* migratetype of the to-be-freed page */
790 
791 			page = list_entry(list->prev, struct page, lru);
792 			/* must delete as __free_one_page list manipulates */
793 			list_del(&page->lru);
794 			mt = get_freepage_migratetype(page);
795 			if (unlikely(has_isolate_pageblock(zone)))
796 				mt = get_pageblock_migratetype(page);
797 
798 			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
799 			__free_one_page(page, page_to_pfn(page), zone, 0, mt);
800 			trace_mm_page_pcpu_drain(page, 0, mt);
801 		} while (--to_free && --batch_free && !list_empty(list));
802 	}
803 	spin_unlock(&zone->lock);
804 }
805 
806 static void free_one_page(struct zone *zone,
807 				struct page *page, unsigned long pfn,
808 				unsigned int order,
809 				int migratetype)
810 {
811 	unsigned long nr_scanned;
812 	spin_lock(&zone->lock);
813 	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
814 	if (nr_scanned)
815 		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
816 
817 	if (unlikely(has_isolate_pageblock(zone) ||
818 		is_migrate_isolate(migratetype))) {
819 		migratetype = get_pfnblock_migratetype(page, pfn);
820 	}
821 	__free_one_page(page, pfn, zone, order, migratetype);
822 	spin_unlock(&zone->lock);
823 }
824 
825 static int free_tail_pages_check(struct page *head_page, struct page *page)
826 {
827 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
828 		return 0;
829 	if (unlikely(!PageTail(page))) {
830 		bad_page(page, "PageTail not set", 0);
831 		return 1;
832 	}
833 	if (unlikely(page->first_page != head_page)) {
834 		bad_page(page, "first_page not consistent", 0);
835 		return 1;
836 	}
837 	return 0;
838 }
839 
840 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
841 				unsigned long zone, int nid)
842 {
843 	set_page_links(page, zone, nid, pfn);
844 	init_page_count(page);
845 	page_mapcount_reset(page);
846 	page_cpupid_reset_last(page);
847 
848 	INIT_LIST_HEAD(&page->lru);
849 #ifdef WANT_PAGE_VIRTUAL
850 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
851 	if (!is_highmem_idx(zone))
852 		set_page_address(page, __va(pfn << PAGE_SHIFT));
853 #endif
854 }
855 
856 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
857 					int nid)
858 {
859 	return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
860 }
861 
862 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
863 static void init_reserved_page(unsigned long pfn)
864 {
865 	pg_data_t *pgdat;
866 	int nid, zid;
867 
868 	if (!early_page_uninitialised(pfn))
869 		return;
870 
871 	nid = early_pfn_to_nid(pfn);
872 	pgdat = NODE_DATA(nid);
873 
874 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
875 		struct zone *zone = &pgdat->node_zones[zid];
876 
877 		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
878 			break;
879 	}
880 	__init_single_pfn(pfn, zid, nid);
881 }
882 #else
883 static inline void init_reserved_page(unsigned long pfn)
884 {
885 }
886 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
887 
888 /*
889  * Initialised pages do not have PageReserved set. This function is
890  * called for each range allocated by the bootmem allocator and
891  * marks the pages PageReserved. The remaining valid pages are later
892  * sent to the buddy page allocator.
893  */
894 void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
895 {
896 	unsigned long start_pfn = PFN_DOWN(start);
897 	unsigned long end_pfn = PFN_UP(end);
898 
899 	for (; start_pfn < end_pfn; start_pfn++) {
900 		if (pfn_valid(start_pfn)) {
901 			struct page *page = pfn_to_page(start_pfn);
902 
903 			init_reserved_page(start_pfn);
904 			SetPageReserved(page);
905 		}
906 	}
907 }
908 
909 static bool free_pages_prepare(struct page *page, unsigned int order)
910 {
911 	bool compound = PageCompound(page);
912 	int i, bad = 0;
913 
914 	VM_BUG_ON_PAGE(PageTail(page), page);
915 	VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
916 
917 	trace_mm_page_free(page, order);
918 	kmemcheck_free_shadow(page, order);
919 	kasan_free_pages(page, order);
920 
921 	if (PageAnon(page))
922 		page->mapping = NULL;
923 	bad += free_pages_check(page);
924 	for (i = 1; i < (1 << order); i++) {
925 		if (compound)
926 			bad += free_tail_pages_check(page, page + i);
927 		bad += free_pages_check(page + i);
928 	}
929 	if (bad)
930 		return false;
931 
932 	reset_page_owner(page, order);
933 
934 	if (!PageHighMem(page)) {
935 		debug_check_no_locks_freed(page_address(page),
936 					   PAGE_SIZE << order);
937 		debug_check_no_obj_freed(page_address(page),
938 					   PAGE_SIZE << order);
939 	}
940 	arch_free_page(page, order);
941 	kernel_map_pages(page, 1 << order, 0);
942 
943 	return true;
944 }
945 
946 static void __free_pages_ok(struct page *page, unsigned int order)
947 {
948 	unsigned long flags;
949 	int migratetype;
950 	unsigned long pfn = page_to_pfn(page);
951 
952 	if (!free_pages_prepare(page, order))
953 		return;
954 
955 	migratetype = get_pfnblock_migratetype(page, pfn);
956 	local_irq_save(flags);
957 	__count_vm_events(PGFREE, 1 << order);
958 	set_freepage_migratetype(page, migratetype);
959 	free_one_page(page_zone(page), page, pfn, order, migratetype);
960 	local_irq_restore(flags);
961 }
962 
963 static void __init __free_pages_boot_core(struct page *page,
964 					unsigned long pfn, unsigned int order)
965 {
966 	unsigned int nr_pages = 1 << order;
967 	struct page *p = page;
968 	unsigned int loop;
969 
970 	prefetchw(p);
971 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
972 		prefetchw(p + 1);
973 		__ClearPageReserved(p);
974 		set_page_count(p, 0);
975 	}
976 	__ClearPageReserved(p);
977 	set_page_count(p, 0);
978 
979 	page_zone(page)->managed_pages += nr_pages;
980 	set_page_refcounted(page);
981 	__free_pages(page, order);
982 }
983 
984 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
985 	defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
986 /* Only safe to use early in boot when initialisation is single-threaded */
987 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
988 
989 int __meminit early_pfn_to_nid(unsigned long pfn)
990 {
991 	int nid;
992 
993 	/* The system will behave unpredictably otherwise */
994 	BUG_ON(system_state != SYSTEM_BOOTING);
995 
996 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
997 	if (nid >= 0)
998 		return nid;
999 	/* just returns 0 */
1000 	return 0;
1001 }
1002 #endif
1003 
1004 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
1005 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1006 					struct mminit_pfnnid_cache *state)
1007 {
1008 	int nid;
1009 
1010 	nid = __early_pfn_to_nid(pfn, state);
1011 	if (nid >= 0 && nid != node)
1012 		return false;
1013 	return true;
1014 }
1015 
1016 /* Only safe to use early in boot when initialisation is single-threaded */
1017 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1018 {
1019 	return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1020 }
1021 
1022 #else
1023 
1024 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1025 {
1026 	return true;
1027 }
1028 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1029 					struct mminit_pfnnid_cache *state)
1030 {
1031 	return true;
1032 }
1033 #endif
1034 
1035 
1036 void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
1037 							unsigned int order)
1038 {
1039 	if (early_page_uninitialised(pfn))
1040 		return;
1041 	return __free_pages_boot_core(page, pfn, order);
1042 }
1043 
1044 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1045 static void __init deferred_free_range(struct page *page,
1046 					unsigned long pfn, int nr_pages)
1047 {
1048 	int i;
1049 
1050 	if (!page)
1051 		return;
1052 
1053 	/* Free a large naturally-aligned chunk if possible */
1054 	if (nr_pages == MAX_ORDER_NR_PAGES &&
1055 	    (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
1056 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1057 		__free_pages_boot_core(page, pfn, MAX_ORDER-1);
1058 		return;
1059 	}
1060 
1061 	for (i = 0; i < nr_pages; i++, page++, pfn++)
1062 		__free_pages_boot_core(page, pfn, 0);
1063 }
1064 
1065 static __initdata DECLARE_RWSEM(pgdat_init_rwsem);
1066 
1067 /* Initialise remaining memory on a node */
1068 static int __init deferred_init_memmap(void *data)
1069 {
1070 	pg_data_t *pgdat = data;
1071 	int nid = pgdat->node_id;
1072 	struct mminit_pfnnid_cache nid_init_state = { };
1073 	unsigned long start = jiffies;
1074 	unsigned long nr_pages = 0;
1075 	unsigned long walk_start, walk_end;
1076 	int i, zid;
1077 	struct zone *zone;
1078 	unsigned long first_init_pfn = pgdat->first_deferred_pfn;
1079 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1080 
1081 	if (first_init_pfn == ULONG_MAX) {
1082 		up_read(&pgdat_init_rwsem);
1083 		return 0;
1084 	}
1085 
1086 	/* Bind memory initialisation thread to a local node if possible */
1087 	if (!cpumask_empty(cpumask))
1088 		set_cpus_allowed_ptr(current, cpumask);
1089 
1090 	/* Sanity check boundaries */
1091 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1092 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1093 	pgdat->first_deferred_pfn = ULONG_MAX;
1094 
1095 	/* Only the highest zone is deferred so find it */
1096 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1097 		zone = pgdat->node_zones + zid;
1098 		if (first_init_pfn < zone_end_pfn(zone))
1099 			break;
1100 	}
1101 
1102 	for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1103 		unsigned long pfn, end_pfn;
1104 		struct page *page = NULL;
1105 		struct page *free_base_page = NULL;
1106 		unsigned long free_base_pfn = 0;
1107 		int nr_to_free = 0;
1108 
1109 		end_pfn = min(walk_end, zone_end_pfn(zone));
1110 		pfn = first_init_pfn;
1111 		if (pfn < walk_start)
1112 			pfn = walk_start;
1113 		if (pfn < zone->zone_start_pfn)
1114 			pfn = zone->zone_start_pfn;
1115 
1116 		for (; pfn < end_pfn; pfn++) {
1117 			if (!pfn_valid_within(pfn))
1118 				goto free_range;
1119 
1120 			/*
1121 			 * Ensure pfn_valid is checked every
1122 			 * MAX_ORDER_NR_PAGES for memory holes
1123 			 */
1124 			if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
1125 				if (!pfn_valid(pfn)) {
1126 					page = NULL;
1127 					goto free_range;
1128 				}
1129 			}
1130 
1131 			if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1132 				page = NULL;
1133 				goto free_range;
1134 			}
1135 
1136 			/* Minimise pfn page lookups and scheduler checks */
1137 			if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
1138 				page++;
1139 			} else {
1140 				nr_pages += nr_to_free;
1141 				deferred_free_range(free_base_page,
1142 						free_base_pfn, nr_to_free);
1143 				free_base_page = NULL;
1144 				free_base_pfn = nr_to_free = 0;
1145 
1146 				page = pfn_to_page(pfn);
1147 				cond_resched();
1148 			}
1149 
1150 			if (page->flags) {
1151 				VM_BUG_ON(page_zone(page) != zone);
1152 				goto free_range;
1153 			}
1154 
1155 			__init_single_page(page, pfn, zid, nid);
1156 			if (!free_base_page) {
1157 				free_base_page = page;
1158 				free_base_pfn = pfn;
1159 				nr_to_free = 0;
1160 			}
1161 			nr_to_free++;
1162 
1163 			/* Where possible, batch up pages for a single free */
1164 			continue;
1165 free_range:
1166 			/* Free the current block of pages to allocator */
1167 			nr_pages += nr_to_free;
1168 			deferred_free_range(free_base_page, free_base_pfn,
1169 								nr_to_free);
1170 			free_base_page = NULL;
1171 			free_base_pfn = nr_to_free = 0;
1172 		}
1173 
1174 		first_init_pfn = max(end_pfn, first_init_pfn);
1175 	}
1176 
1177 	/* Sanity check that the next zone really is unpopulated */
1178 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1179 
1180 	pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1181 					jiffies_to_msecs(jiffies - start));
1182 	up_read(&pgdat_init_rwsem);
1183 	return 0;
1184 }
1185 
1186 void __init page_alloc_init_late(void)
1187 {
1188 	int nid;
1189 
1190 	for_each_node_state(nid, N_MEMORY) {
1191 		down_read(&pgdat_init_rwsem);
1192 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1193 	}
1194 
1195 	/* Block until all are initialised */
1196 	down_write(&pgdat_init_rwsem);
1197 	up_write(&pgdat_init_rwsem);
1198 }
1199 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1200 
1201 #ifdef CONFIG_CMA
1202 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1203 void __init init_cma_reserved_pageblock(struct page *page)
1204 {
1205 	unsigned i = pageblock_nr_pages;
1206 	struct page *p = page;
1207 
1208 	do {
1209 		__ClearPageReserved(p);
1210 		set_page_count(p, 0);
1211 	} while (++p, --i);
1212 
1213 	set_pageblock_migratetype(page, MIGRATE_CMA);
1214 
1215 	if (pageblock_order >= MAX_ORDER) {
1216 		i = pageblock_nr_pages;
1217 		p = page;
1218 		do {
1219 			set_page_refcounted(p);
1220 			__free_pages(p, MAX_ORDER - 1);
1221 			p += MAX_ORDER_NR_PAGES;
1222 		} while (i -= MAX_ORDER_NR_PAGES);
1223 	} else {
1224 		set_page_refcounted(page);
1225 		__free_pages(page, pageblock_order);
1226 	}
1227 
1228 	adjust_managed_page_count(page, pageblock_nr_pages);
1229 }
1230 #endif
1231 
1232 /*
1233  * The order of subdivision here is critical for the IO subsystem.
1234  * Please do not alter this order without good reasons and regression
1235  * testing. Specifically, as large blocks of memory are subdivided,
1236  * the order in which smaller blocks are delivered depends on the order
1237  * they're subdivided in this function. This is the primary factor
1238  * influencing the order in which pages are delivered to the IO
1239  * subsystem according to empirical testing, and this is also justified
1240  * by considering the behavior of a buddy system containing a single
1241  * large block of memory acted on by a series of small allocations.
1242  * This behavior is a critical factor in sglist merging's success.
1243  *
1244  * -- nyc
1245  */
1246 static inline void expand(struct zone *zone, struct page *page,
1247 	int low, int high, struct free_area *area,
1248 	int migratetype)
1249 {
1250 	unsigned long size = 1 << high;
1251 
1252 	while (high > low) {
1253 		area--;
1254 		high--;
1255 		size >>= 1;
1256 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1257 
1258 		if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
1259 			debug_guardpage_enabled() &&
1260 			high < debug_guardpage_minorder()) {
1261 			/*
1262 			 * Mark as guard pages (or page), that will allow to
1263 			 * merge back to allocator when buddy will be freed.
1264 			 * Corresponding page table entries will not be touched,
1265 			 * pages will stay not present in virtual address space
1266 			 */
1267 			set_page_guard(zone, &page[size], high, migratetype);
1268 			continue;
1269 		}
1270 		list_add(&page[size].lru, &area->free_list[migratetype]);
1271 		area->nr_free++;
1272 		set_page_order(&page[size], high);
1273 	}
1274 }
1275 
1276 /*
1277  * This page is about to be returned from the page allocator
1278  */
1279 static inline int check_new_page(struct page *page)
1280 {
1281 	const char *bad_reason = NULL;
1282 	unsigned long bad_flags = 0;
1283 
1284 	if (unlikely(page_mapcount(page)))
1285 		bad_reason = "nonzero mapcount";
1286 	if (unlikely(page->mapping != NULL))
1287 		bad_reason = "non-NULL mapping";
1288 	if (unlikely(atomic_read(&page->_count) != 0))
1289 		bad_reason = "nonzero _count";
1290 	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1291 		bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1292 		bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1293 	}
1294 #ifdef CONFIG_MEMCG
1295 	if (unlikely(page->mem_cgroup))
1296 		bad_reason = "page still charged to cgroup";
1297 #endif
1298 	if (unlikely(bad_reason)) {
1299 		bad_page(page, bad_reason, bad_flags);
1300 		return 1;
1301 	}
1302 	return 0;
1303 }
1304 
1305 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1306 								int alloc_flags)
1307 {
1308 	int i;
1309 
1310 	for (i = 0; i < (1 << order); i++) {
1311 		struct page *p = page + i;
1312 		if (unlikely(check_new_page(p)))
1313 			return 1;
1314 	}
1315 
1316 	set_page_private(page, 0);
1317 	set_page_refcounted(page);
1318 
1319 	arch_alloc_page(page, order);
1320 	kernel_map_pages(page, 1 << order, 1);
1321 	kasan_alloc_pages(page, order);
1322 
1323 	if (gfp_flags & __GFP_ZERO)
1324 		for (i = 0; i < (1 << order); i++)
1325 			clear_highpage(page + i);
1326 
1327 	if (order && (gfp_flags & __GFP_COMP))
1328 		prep_compound_page(page, order);
1329 
1330 	set_page_owner(page, order, gfp_flags);
1331 
1332 	/*
1333 	 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
1334 	 * allocate the page. The expectation is that the caller is taking
1335 	 * steps that will free more memory. The caller should avoid the page
1336 	 * being used for !PFMEMALLOC purposes.
1337 	 */
1338 	page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
1339 
1340 	return 0;
1341 }
1342 
1343 /*
1344  * Go through the free lists for the given migratetype and remove
1345  * the smallest available page from the freelists
1346  */
1347 static inline
1348 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1349 						int migratetype)
1350 {
1351 	unsigned int current_order;
1352 	struct free_area *area;
1353 	struct page *page;
1354 
1355 	/* Find a page of the appropriate size in the preferred list */
1356 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1357 		area = &(zone->free_area[current_order]);
1358 		if (list_empty(&area->free_list[migratetype]))
1359 			continue;
1360 
1361 		page = list_entry(area->free_list[migratetype].next,
1362 							struct page, lru);
1363 		list_del(&page->lru);
1364 		rmv_page_order(page);
1365 		area->nr_free--;
1366 		expand(zone, page, order, current_order, area, migratetype);
1367 		set_freepage_migratetype(page, migratetype);
1368 		return page;
1369 	}
1370 
1371 	return NULL;
1372 }
1373 
1374 
1375 /*
1376  * This array describes the order lists are fallen back to when
1377  * the free lists for the desirable migrate type are depleted
1378  */
1379 static int fallbacks[MIGRATE_TYPES][4] = {
1380 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
1381 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
1382 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
1383 #ifdef CONFIG_CMA
1384 	[MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
1385 #endif
1386 	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
1387 #ifdef CONFIG_MEMORY_ISOLATION
1388 	[MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
1389 #endif
1390 };
1391 
1392 #ifdef CONFIG_CMA
1393 static struct page *__rmqueue_cma_fallback(struct zone *zone,
1394 					unsigned int order)
1395 {
1396 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1397 }
1398 #else
1399 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1400 					unsigned int order) { return NULL; }
1401 #endif
1402 
1403 /*
1404  * Move the free pages in a range to the free lists of the requested type.
1405  * Note that start_page and end_pages are not aligned on a pageblock
1406  * boundary. If alignment is required, use move_freepages_block()
1407  */
1408 int move_freepages(struct zone *zone,
1409 			  struct page *start_page, struct page *end_page,
1410 			  int migratetype)
1411 {
1412 	struct page *page;
1413 	unsigned long order;
1414 	int pages_moved = 0;
1415 
1416 #ifndef CONFIG_HOLES_IN_ZONE
1417 	/*
1418 	 * page_zone is not safe to call in this context when
1419 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1420 	 * anyway as we check zone boundaries in move_freepages_block().
1421 	 * Remove at a later date when no bug reports exist related to
1422 	 * grouping pages by mobility
1423 	 */
1424 	VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1425 #endif
1426 
1427 	for (page = start_page; page <= end_page;) {
1428 		/* Make sure we are not inadvertently changing nodes */
1429 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1430 
1431 		if (!pfn_valid_within(page_to_pfn(page))) {
1432 			page++;
1433 			continue;
1434 		}
1435 
1436 		if (!PageBuddy(page)) {
1437 			page++;
1438 			continue;
1439 		}
1440 
1441 		order = page_order(page);
1442 		list_move(&page->lru,
1443 			  &zone->free_area[order].free_list[migratetype]);
1444 		set_freepage_migratetype(page, migratetype);
1445 		page += 1 << order;
1446 		pages_moved += 1 << order;
1447 	}
1448 
1449 	return pages_moved;
1450 }
1451 
1452 int move_freepages_block(struct zone *zone, struct page *page,
1453 				int migratetype)
1454 {
1455 	unsigned long start_pfn, end_pfn;
1456 	struct page *start_page, *end_page;
1457 
1458 	start_pfn = page_to_pfn(page);
1459 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
1460 	start_page = pfn_to_page(start_pfn);
1461 	end_page = start_page + pageblock_nr_pages - 1;
1462 	end_pfn = start_pfn + pageblock_nr_pages - 1;
1463 
1464 	/* Do not cross zone boundaries */
1465 	if (!zone_spans_pfn(zone, start_pfn))
1466 		start_page = page;
1467 	if (!zone_spans_pfn(zone, end_pfn))
1468 		return 0;
1469 
1470 	return move_freepages(zone, start_page, end_page, migratetype);
1471 }
1472 
1473 static void change_pageblock_range(struct page *pageblock_page,
1474 					int start_order, int migratetype)
1475 {
1476 	int nr_pageblocks = 1 << (start_order - pageblock_order);
1477 
1478 	while (nr_pageblocks--) {
1479 		set_pageblock_migratetype(pageblock_page, migratetype);
1480 		pageblock_page += pageblock_nr_pages;
1481 	}
1482 }
1483 
1484 /*
1485  * When we are falling back to another migratetype during allocation, try to
1486  * steal extra free pages from the same pageblocks to satisfy further
1487  * allocations, instead of polluting multiple pageblocks.
1488  *
1489  * If we are stealing a relatively large buddy page, it is likely there will
1490  * be more free pages in the pageblock, so try to steal them all. For
1491  * reclaimable and unmovable allocations, we steal regardless of page size,
1492  * as fragmentation caused by those allocations polluting movable pageblocks
1493  * is worse than movable allocations stealing from unmovable and reclaimable
1494  * pageblocks.
1495  */
1496 static bool can_steal_fallback(unsigned int order, int start_mt)
1497 {
1498 	/*
1499 	 * Leaving this order check is intended, although there is
1500 	 * relaxed order check in next check. The reason is that
1501 	 * we can actually steal whole pageblock if this condition met,
1502 	 * but, below check doesn't guarantee it and that is just heuristic
1503 	 * so could be changed anytime.
1504 	 */
1505 	if (order >= pageblock_order)
1506 		return true;
1507 
1508 	if (order >= pageblock_order / 2 ||
1509 		start_mt == MIGRATE_RECLAIMABLE ||
1510 		start_mt == MIGRATE_UNMOVABLE ||
1511 		page_group_by_mobility_disabled)
1512 		return true;
1513 
1514 	return false;
1515 }
1516 
1517 /*
1518  * This function implements actual steal behaviour. If order is large enough,
1519  * we can steal whole pageblock. If not, we first move freepages in this
1520  * pageblock and check whether half of pages are moved or not. If half of
1521  * pages are moved, we can change migratetype of pageblock and permanently
1522  * use it's pages as requested migratetype in the future.
1523  */
1524 static void steal_suitable_fallback(struct zone *zone, struct page *page,
1525 							  int start_type)
1526 {
1527 	int current_order = page_order(page);
1528 	int pages;
1529 
1530 	/* Take ownership for orders >= pageblock_order */
1531 	if (current_order >= pageblock_order) {
1532 		change_pageblock_range(page, current_order, start_type);
1533 		return;
1534 	}
1535 
1536 	pages = move_freepages_block(zone, page, start_type);
1537 
1538 	/* Claim the whole block if over half of it is free */
1539 	if (pages >= (1 << (pageblock_order-1)) ||
1540 			page_group_by_mobility_disabled)
1541 		set_pageblock_migratetype(page, start_type);
1542 }
1543 
1544 /*
1545  * Check whether there is a suitable fallback freepage with requested order.
1546  * If only_stealable is true, this function returns fallback_mt only if
1547  * we can steal other freepages all together. This would help to reduce
1548  * fragmentation due to mixed migratetype pages in one pageblock.
1549  */
1550 int find_suitable_fallback(struct free_area *area, unsigned int order,
1551 			int migratetype, bool only_stealable, bool *can_steal)
1552 {
1553 	int i;
1554 	int fallback_mt;
1555 
1556 	if (area->nr_free == 0)
1557 		return -1;
1558 
1559 	*can_steal = false;
1560 	for (i = 0;; i++) {
1561 		fallback_mt = fallbacks[migratetype][i];
1562 		if (fallback_mt == MIGRATE_RESERVE)
1563 			break;
1564 
1565 		if (list_empty(&area->free_list[fallback_mt]))
1566 			continue;
1567 
1568 		if (can_steal_fallback(order, migratetype))
1569 			*can_steal = true;
1570 
1571 		if (!only_stealable)
1572 			return fallback_mt;
1573 
1574 		if (*can_steal)
1575 			return fallback_mt;
1576 	}
1577 
1578 	return -1;
1579 }
1580 
1581 /* Remove an element from the buddy allocator from the fallback list */
1582 static inline struct page *
1583 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
1584 {
1585 	struct free_area *area;
1586 	unsigned int current_order;
1587 	struct page *page;
1588 	int fallback_mt;
1589 	bool can_steal;
1590 
1591 	/* Find the largest possible block of pages in the other list */
1592 	for (current_order = MAX_ORDER-1;
1593 				current_order >= order && current_order <= MAX_ORDER-1;
1594 				--current_order) {
1595 		area = &(zone->free_area[current_order]);
1596 		fallback_mt = find_suitable_fallback(area, current_order,
1597 				start_migratetype, false, &can_steal);
1598 		if (fallback_mt == -1)
1599 			continue;
1600 
1601 		page = list_entry(area->free_list[fallback_mt].next,
1602 						struct page, lru);
1603 		if (can_steal)
1604 			steal_suitable_fallback(zone, page, start_migratetype);
1605 
1606 		/* Remove the page from the freelists */
1607 		area->nr_free--;
1608 		list_del(&page->lru);
1609 		rmv_page_order(page);
1610 
1611 		expand(zone, page, order, current_order, area,
1612 					start_migratetype);
1613 		/*
1614 		 * The freepage_migratetype may differ from pageblock's
1615 		 * migratetype depending on the decisions in
1616 		 * try_to_steal_freepages(). This is OK as long as it
1617 		 * does not differ for MIGRATE_CMA pageblocks. For CMA
1618 		 * we need to make sure unallocated pages flushed from
1619 		 * pcp lists are returned to the correct freelist.
1620 		 */
1621 		set_freepage_migratetype(page, start_migratetype);
1622 
1623 		trace_mm_page_alloc_extfrag(page, order, current_order,
1624 			start_migratetype, fallback_mt);
1625 
1626 		return page;
1627 	}
1628 
1629 	return NULL;
1630 }
1631 
1632 /*
1633  * Do the hard work of removing an element from the buddy allocator.
1634  * Call me with the zone->lock already held.
1635  */
1636 static struct page *__rmqueue(struct zone *zone, unsigned int order,
1637 						int migratetype)
1638 {
1639 	struct page *page;
1640 
1641 retry_reserve:
1642 	page = __rmqueue_smallest(zone, order, migratetype);
1643 
1644 	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1645 		if (migratetype == MIGRATE_MOVABLE)
1646 			page = __rmqueue_cma_fallback(zone, order);
1647 
1648 		if (!page)
1649 			page = __rmqueue_fallback(zone, order, migratetype);
1650 
1651 		/*
1652 		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1653 		 * is used because __rmqueue_smallest is an inline function
1654 		 * and we want just one call site
1655 		 */
1656 		if (!page) {
1657 			migratetype = MIGRATE_RESERVE;
1658 			goto retry_reserve;
1659 		}
1660 	}
1661 
1662 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
1663 	return page;
1664 }
1665 
1666 /*
1667  * Obtain a specified number of elements from the buddy allocator, all under
1668  * a single hold of the lock, for efficiency.  Add them to the supplied list.
1669  * Returns the number of new pages which were placed at *list.
1670  */
1671 static int rmqueue_bulk(struct zone *zone, unsigned int order,
1672 			unsigned long count, struct list_head *list,
1673 			int migratetype, bool cold)
1674 {
1675 	int i;
1676 
1677 	spin_lock(&zone->lock);
1678 	for (i = 0; i < count; ++i) {
1679 		struct page *page = __rmqueue(zone, order, migratetype);
1680 		if (unlikely(page == NULL))
1681 			break;
1682 
1683 		/*
1684 		 * Split buddy pages returned by expand() are received here
1685 		 * in physical page order. The page is added to the callers and
1686 		 * list and the list head then moves forward. From the callers
1687 		 * perspective, the linked list is ordered by page number in
1688 		 * some conditions. This is useful for IO devices that can
1689 		 * merge IO requests if the physical pages are ordered
1690 		 * properly.
1691 		 */
1692 		if (likely(!cold))
1693 			list_add(&page->lru, list);
1694 		else
1695 			list_add_tail(&page->lru, list);
1696 		list = &page->lru;
1697 		if (is_migrate_cma(get_freepage_migratetype(page)))
1698 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1699 					      -(1 << order));
1700 	}
1701 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1702 	spin_unlock(&zone->lock);
1703 	return i;
1704 }
1705 
1706 #ifdef CONFIG_NUMA
1707 /*
1708  * Called from the vmstat counter updater to drain pagesets of this
1709  * currently executing processor on remote nodes after they have
1710  * expired.
1711  *
1712  * Note that this function must be called with the thread pinned to
1713  * a single processor.
1714  */
1715 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1716 {
1717 	unsigned long flags;
1718 	int to_drain, batch;
1719 
1720 	local_irq_save(flags);
1721 	batch = READ_ONCE(pcp->batch);
1722 	to_drain = min(pcp->count, batch);
1723 	if (to_drain > 0) {
1724 		free_pcppages_bulk(zone, to_drain, pcp);
1725 		pcp->count -= to_drain;
1726 	}
1727 	local_irq_restore(flags);
1728 }
1729 #endif
1730 
1731 /*
1732  * Drain pcplists of the indicated processor and zone.
1733  *
1734  * The processor must either be the current processor and the
1735  * thread pinned to the current processor or a processor that
1736  * is not online.
1737  */
1738 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1739 {
1740 	unsigned long flags;
1741 	struct per_cpu_pageset *pset;
1742 	struct per_cpu_pages *pcp;
1743 
1744 	local_irq_save(flags);
1745 	pset = per_cpu_ptr(zone->pageset, cpu);
1746 
1747 	pcp = &pset->pcp;
1748 	if (pcp->count) {
1749 		free_pcppages_bulk(zone, pcp->count, pcp);
1750 		pcp->count = 0;
1751 	}
1752 	local_irq_restore(flags);
1753 }
1754 
1755 /*
1756  * Drain pcplists of all zones on the indicated processor.
1757  *
1758  * The processor must either be the current processor and the
1759  * thread pinned to the current processor or a processor that
1760  * is not online.
1761  */
1762 static void drain_pages(unsigned int cpu)
1763 {
1764 	struct zone *zone;
1765 
1766 	for_each_populated_zone(zone) {
1767 		drain_pages_zone(cpu, zone);
1768 	}
1769 }
1770 
1771 /*
1772  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1773  *
1774  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
1775  * the single zone's pages.
1776  */
1777 void drain_local_pages(struct zone *zone)
1778 {
1779 	int cpu = smp_processor_id();
1780 
1781 	if (zone)
1782 		drain_pages_zone(cpu, zone);
1783 	else
1784 		drain_pages(cpu);
1785 }
1786 
1787 /*
1788  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1789  *
1790  * When zone parameter is non-NULL, spill just the single zone's pages.
1791  *
1792  * Note that this code is protected against sending an IPI to an offline
1793  * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1794  * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1795  * nothing keeps CPUs from showing up after we populated the cpumask and
1796  * before the call to on_each_cpu_mask().
1797  */
1798 void drain_all_pages(struct zone *zone)
1799 {
1800 	int cpu;
1801 
1802 	/*
1803 	 * Allocate in the BSS so we wont require allocation in
1804 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1805 	 */
1806 	static cpumask_t cpus_with_pcps;
1807 
1808 	/*
1809 	 * We don't care about racing with CPU hotplug event
1810 	 * as offline notification will cause the notified
1811 	 * cpu to drain that CPU pcps and on_each_cpu_mask
1812 	 * disables preemption as part of its processing
1813 	 */
1814 	for_each_online_cpu(cpu) {
1815 		struct per_cpu_pageset *pcp;
1816 		struct zone *z;
1817 		bool has_pcps = false;
1818 
1819 		if (zone) {
1820 			pcp = per_cpu_ptr(zone->pageset, cpu);
1821 			if (pcp->pcp.count)
1822 				has_pcps = true;
1823 		} else {
1824 			for_each_populated_zone(z) {
1825 				pcp = per_cpu_ptr(z->pageset, cpu);
1826 				if (pcp->pcp.count) {
1827 					has_pcps = true;
1828 					break;
1829 				}
1830 			}
1831 		}
1832 
1833 		if (has_pcps)
1834 			cpumask_set_cpu(cpu, &cpus_with_pcps);
1835 		else
1836 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
1837 	}
1838 	on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
1839 								zone, 1);
1840 }
1841 
1842 #ifdef CONFIG_HIBERNATION
1843 
1844 void mark_free_pages(struct zone *zone)
1845 {
1846 	unsigned long pfn, max_zone_pfn;
1847 	unsigned long flags;
1848 	unsigned int order, t;
1849 	struct list_head *curr;
1850 
1851 	if (zone_is_empty(zone))
1852 		return;
1853 
1854 	spin_lock_irqsave(&zone->lock, flags);
1855 
1856 	max_zone_pfn = zone_end_pfn(zone);
1857 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1858 		if (pfn_valid(pfn)) {
1859 			struct page *page = pfn_to_page(pfn);
1860 
1861 			if (!swsusp_page_is_forbidden(page))
1862 				swsusp_unset_page_free(page);
1863 		}
1864 
1865 	for_each_migratetype_order(order, t) {
1866 		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1867 			unsigned long i;
1868 
1869 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1870 			for (i = 0; i < (1UL << order); i++)
1871 				swsusp_set_page_free(pfn_to_page(pfn + i));
1872 		}
1873 	}
1874 	spin_unlock_irqrestore(&zone->lock, flags);
1875 }
1876 #endif /* CONFIG_PM */
1877 
1878 /*
1879  * Free a 0-order page
1880  * cold == true ? free a cold page : free a hot page
1881  */
1882 void free_hot_cold_page(struct page *page, bool cold)
1883 {
1884 	struct zone *zone = page_zone(page);
1885 	struct per_cpu_pages *pcp;
1886 	unsigned long flags;
1887 	unsigned long pfn = page_to_pfn(page);
1888 	int migratetype;
1889 
1890 	if (!free_pages_prepare(page, 0))
1891 		return;
1892 
1893 	migratetype = get_pfnblock_migratetype(page, pfn);
1894 	set_freepage_migratetype(page, migratetype);
1895 	local_irq_save(flags);
1896 	__count_vm_event(PGFREE);
1897 
1898 	/*
1899 	 * We only track unmovable, reclaimable and movable on pcp lists.
1900 	 * Free ISOLATE pages back to the allocator because they are being
1901 	 * offlined but treat RESERVE as movable pages so we can get those
1902 	 * areas back if necessary. Otherwise, we may have to free
1903 	 * excessively into the page allocator
1904 	 */
1905 	if (migratetype >= MIGRATE_PCPTYPES) {
1906 		if (unlikely(is_migrate_isolate(migratetype))) {
1907 			free_one_page(zone, page, pfn, 0, migratetype);
1908 			goto out;
1909 		}
1910 		migratetype = MIGRATE_MOVABLE;
1911 	}
1912 
1913 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
1914 	if (!cold)
1915 		list_add(&page->lru, &pcp->lists[migratetype]);
1916 	else
1917 		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1918 	pcp->count++;
1919 	if (pcp->count >= pcp->high) {
1920 		unsigned long batch = READ_ONCE(pcp->batch);
1921 		free_pcppages_bulk(zone, batch, pcp);
1922 		pcp->count -= batch;
1923 	}
1924 
1925 out:
1926 	local_irq_restore(flags);
1927 }
1928 
1929 /*
1930  * Free a list of 0-order pages
1931  */
1932 void free_hot_cold_page_list(struct list_head *list, bool cold)
1933 {
1934 	struct page *page, *next;
1935 
1936 	list_for_each_entry_safe(page, next, list, lru) {
1937 		trace_mm_page_free_batched(page, cold);
1938 		free_hot_cold_page(page, cold);
1939 	}
1940 }
1941 
1942 /*
1943  * split_page takes a non-compound higher-order page, and splits it into
1944  * n (1<<order) sub-pages: page[0..n]
1945  * Each sub-page must be freed individually.
1946  *
1947  * Note: this is probably too low level an operation for use in drivers.
1948  * Please consult with lkml before using this in your driver.
1949  */
1950 void split_page(struct page *page, unsigned int order)
1951 {
1952 	int i;
1953 
1954 	VM_BUG_ON_PAGE(PageCompound(page), page);
1955 	VM_BUG_ON_PAGE(!page_count(page), page);
1956 
1957 #ifdef CONFIG_KMEMCHECK
1958 	/*
1959 	 * Split shadow pages too, because free(page[0]) would
1960 	 * otherwise free the whole shadow.
1961 	 */
1962 	if (kmemcheck_page_is_tracked(page))
1963 		split_page(virt_to_page(page[0].shadow), order);
1964 #endif
1965 
1966 	set_page_owner(page, 0, 0);
1967 	for (i = 1; i < (1 << order); i++) {
1968 		set_page_refcounted(page + i);
1969 		set_page_owner(page + i, 0, 0);
1970 	}
1971 }
1972 EXPORT_SYMBOL_GPL(split_page);
1973 
1974 int __isolate_free_page(struct page *page, unsigned int order)
1975 {
1976 	unsigned long watermark;
1977 	struct zone *zone;
1978 	int mt;
1979 
1980 	BUG_ON(!PageBuddy(page));
1981 
1982 	zone = page_zone(page);
1983 	mt = get_pageblock_migratetype(page);
1984 
1985 	if (!is_migrate_isolate(mt)) {
1986 		/* Obey watermarks as if the page was being allocated */
1987 		watermark = low_wmark_pages(zone) + (1 << order);
1988 		if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1989 			return 0;
1990 
1991 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
1992 	}
1993 
1994 	/* Remove page from free list */
1995 	list_del(&page->lru);
1996 	zone->free_area[order].nr_free--;
1997 	rmv_page_order(page);
1998 
1999 	/* Set the pageblock if the isolated page is at least a pageblock */
2000 	if (order >= pageblock_order - 1) {
2001 		struct page *endpage = page + (1 << order) - 1;
2002 		for (; page < endpage; page += pageblock_nr_pages) {
2003 			int mt = get_pageblock_migratetype(page);
2004 			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
2005 				set_pageblock_migratetype(page,
2006 							  MIGRATE_MOVABLE);
2007 		}
2008 	}
2009 
2010 	set_page_owner(page, order, 0);
2011 	return 1UL << order;
2012 }
2013 
2014 /*
2015  * Similar to split_page except the page is already free. As this is only
2016  * being used for migration, the migratetype of the block also changes.
2017  * As this is called with interrupts disabled, the caller is responsible
2018  * for calling arch_alloc_page() and kernel_map_page() after interrupts
2019  * are enabled.
2020  *
2021  * Note: this is probably too low level an operation for use in drivers.
2022  * Please consult with lkml before using this in your driver.
2023  */
2024 int split_free_page(struct page *page)
2025 {
2026 	unsigned int order;
2027 	int nr_pages;
2028 
2029 	order = page_order(page);
2030 
2031 	nr_pages = __isolate_free_page(page, order);
2032 	if (!nr_pages)
2033 		return 0;
2034 
2035 	/* Split into individual pages */
2036 	set_page_refcounted(page);
2037 	split_page(page, order);
2038 	return nr_pages;
2039 }
2040 
2041 /*
2042  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
2043  */
2044 static inline
2045 struct page *buffered_rmqueue(struct zone *preferred_zone,
2046 			struct zone *zone, unsigned int order,
2047 			gfp_t gfp_flags, int migratetype)
2048 {
2049 	unsigned long flags;
2050 	struct page *page;
2051 	bool cold = ((gfp_flags & __GFP_COLD) != 0);
2052 
2053 	if (likely(order == 0)) {
2054 		struct per_cpu_pages *pcp;
2055 		struct list_head *list;
2056 
2057 		local_irq_save(flags);
2058 		pcp = &this_cpu_ptr(zone->pageset)->pcp;
2059 		list = &pcp->lists[migratetype];
2060 		if (list_empty(list)) {
2061 			pcp->count += rmqueue_bulk(zone, 0,
2062 					pcp->batch, list,
2063 					migratetype, cold);
2064 			if (unlikely(list_empty(list)))
2065 				goto failed;
2066 		}
2067 
2068 		if (cold)
2069 			page = list_entry(list->prev, struct page, lru);
2070 		else
2071 			page = list_entry(list->next, struct page, lru);
2072 
2073 		list_del(&page->lru);
2074 		pcp->count--;
2075 	} else {
2076 		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
2077 			/*
2078 			 * __GFP_NOFAIL is not to be used in new code.
2079 			 *
2080 			 * All __GFP_NOFAIL callers should be fixed so that they
2081 			 * properly detect and handle allocation failures.
2082 			 *
2083 			 * We most definitely don't want callers attempting to
2084 			 * allocate greater than order-1 page units with
2085 			 * __GFP_NOFAIL.
2086 			 */
2087 			WARN_ON_ONCE(order > 1);
2088 		}
2089 		spin_lock_irqsave(&zone->lock, flags);
2090 		page = __rmqueue(zone, order, migratetype);
2091 		spin_unlock(&zone->lock);
2092 		if (!page)
2093 			goto failed;
2094 		__mod_zone_freepage_state(zone, -(1 << order),
2095 					  get_freepage_migratetype(page));
2096 	}
2097 
2098 	__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
2099 	if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
2100 	    !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
2101 		set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
2102 
2103 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
2104 	zone_statistics(preferred_zone, zone, gfp_flags);
2105 	local_irq_restore(flags);
2106 
2107 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
2108 	return page;
2109 
2110 failed:
2111 	local_irq_restore(flags);
2112 	return NULL;
2113 }
2114 
2115 #ifdef CONFIG_FAIL_PAGE_ALLOC
2116 
2117 static struct {
2118 	struct fault_attr attr;
2119 
2120 	u32 ignore_gfp_highmem;
2121 	u32 ignore_gfp_wait;
2122 	u32 min_order;
2123 } fail_page_alloc = {
2124 	.attr = FAULT_ATTR_INITIALIZER,
2125 	.ignore_gfp_wait = 1,
2126 	.ignore_gfp_highmem = 1,
2127 	.min_order = 1,
2128 };
2129 
2130 static int __init setup_fail_page_alloc(char *str)
2131 {
2132 	return setup_fault_attr(&fail_page_alloc.attr, str);
2133 }
2134 __setup("fail_page_alloc=", setup_fail_page_alloc);
2135 
2136 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2137 {
2138 	if (order < fail_page_alloc.min_order)
2139 		return false;
2140 	if (gfp_mask & __GFP_NOFAIL)
2141 		return false;
2142 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
2143 		return false;
2144 	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
2145 		return false;
2146 
2147 	return should_fail(&fail_page_alloc.attr, 1 << order);
2148 }
2149 
2150 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2151 
2152 static int __init fail_page_alloc_debugfs(void)
2153 {
2154 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
2155 	struct dentry *dir;
2156 
2157 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2158 					&fail_page_alloc.attr);
2159 	if (IS_ERR(dir))
2160 		return PTR_ERR(dir);
2161 
2162 	if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
2163 				&fail_page_alloc.ignore_gfp_wait))
2164 		goto fail;
2165 	if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2166 				&fail_page_alloc.ignore_gfp_highmem))
2167 		goto fail;
2168 	if (!debugfs_create_u32("min-order", mode, dir,
2169 				&fail_page_alloc.min_order))
2170 		goto fail;
2171 
2172 	return 0;
2173 fail:
2174 	debugfs_remove_recursive(dir);
2175 
2176 	return -ENOMEM;
2177 }
2178 
2179 late_initcall(fail_page_alloc_debugfs);
2180 
2181 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2182 
2183 #else /* CONFIG_FAIL_PAGE_ALLOC */
2184 
2185 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2186 {
2187 	return false;
2188 }
2189 
2190 #endif /* CONFIG_FAIL_PAGE_ALLOC */
2191 
2192 /*
2193  * Return true if free pages are above 'mark'. This takes into account the order
2194  * of the allocation.
2195  */
2196 static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2197 			unsigned long mark, int classzone_idx, int alloc_flags,
2198 			long free_pages)
2199 {
2200 	/* free_pages may go negative - that's OK */
2201 	long min = mark;
2202 	int o;
2203 	long free_cma = 0;
2204 
2205 	free_pages -= (1 << order) - 1;
2206 	if (alloc_flags & ALLOC_HIGH)
2207 		min -= min / 2;
2208 	if (alloc_flags & ALLOC_HARDER)
2209 		min -= min / 4;
2210 #ifdef CONFIG_CMA
2211 	/* If allocation can't use CMA areas don't use free CMA pages */
2212 	if (!(alloc_flags & ALLOC_CMA))
2213 		free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
2214 #endif
2215 
2216 	if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
2217 		return false;
2218 	for (o = 0; o < order; o++) {
2219 		/* At the next order, this order's pages become unavailable */
2220 		free_pages -= z->free_area[o].nr_free << o;
2221 
2222 		/* Require fewer higher order pages to be free */
2223 		min >>= 1;
2224 
2225 		if (free_pages <= min)
2226 			return false;
2227 	}
2228 	return true;
2229 }
2230 
2231 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2232 		      int classzone_idx, int alloc_flags)
2233 {
2234 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2235 					zone_page_state(z, NR_FREE_PAGES));
2236 }
2237 
2238 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
2239 			unsigned long mark, int classzone_idx, int alloc_flags)
2240 {
2241 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
2242 
2243 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2244 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2245 
2246 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2247 								free_pages);
2248 }
2249 
2250 #ifdef CONFIG_NUMA
2251 /*
2252  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
2253  * skip over zones that are not allowed by the cpuset, or that have
2254  * been recently (in last second) found to be nearly full.  See further
2255  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
2256  * that have to skip over a lot of full or unallowed zones.
2257  *
2258  * If the zonelist cache is present in the passed zonelist, then
2259  * returns a pointer to the allowed node mask (either the current
2260  * tasks mems_allowed, or node_states[N_MEMORY].)
2261  *
2262  * If the zonelist cache is not available for this zonelist, does
2263  * nothing and returns NULL.
2264  *
2265  * If the fullzones BITMAP in the zonelist cache is stale (more than
2266  * a second since last zap'd) then we zap it out (clear its bits.)
2267  *
2268  * We hold off even calling zlc_setup, until after we've checked the
2269  * first zone in the zonelist, on the theory that most allocations will
2270  * be satisfied from that first zone, so best to examine that zone as
2271  * quickly as we can.
2272  */
2273 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
2274 {
2275 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
2276 	nodemask_t *allowednodes;	/* zonelist_cache approximation */
2277 
2278 	zlc = zonelist->zlcache_ptr;
2279 	if (!zlc)
2280 		return NULL;
2281 
2282 	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
2283 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2284 		zlc->last_full_zap = jiffies;
2285 	}
2286 
2287 	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
2288 					&cpuset_current_mems_allowed :
2289 					&node_states[N_MEMORY];
2290 	return allowednodes;
2291 }
2292 
2293 /*
2294  * Given 'z' scanning a zonelist, run a couple of quick checks to see
2295  * if it is worth looking at further for free memory:
2296  *  1) Check that the zone isn't thought to be full (doesn't have its
2297  *     bit set in the zonelist_cache fullzones BITMAP).
2298  *  2) Check that the zones node (obtained from the zonelist_cache
2299  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
2300  * Return true (non-zero) if zone is worth looking at further, or
2301  * else return false (zero) if it is not.
2302  *
2303  * This check -ignores- the distinction between various watermarks,
2304  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
2305  * found to be full for any variation of these watermarks, it will
2306  * be considered full for up to one second by all requests, unless
2307  * we are so low on memory on all allowed nodes that we are forced
2308  * into the second scan of the zonelist.
2309  *
2310  * In the second scan we ignore this zonelist cache and exactly
2311  * apply the watermarks to all zones, even it is slower to do so.
2312  * We are low on memory in the second scan, and should leave no stone
2313  * unturned looking for a free page.
2314  */
2315 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
2316 						nodemask_t *allowednodes)
2317 {
2318 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
2319 	int i;				/* index of *z in zonelist zones */
2320 	int n;				/* node that zone *z is on */
2321 
2322 	zlc = zonelist->zlcache_ptr;
2323 	if (!zlc)
2324 		return 1;
2325 
2326 	i = z - zonelist->_zonerefs;
2327 	n = zlc->z_to_n[i];
2328 
2329 	/* This zone is worth trying if it is allowed but not full */
2330 	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
2331 }
2332 
2333 /*
2334  * Given 'z' scanning a zonelist, set the corresponding bit in
2335  * zlc->fullzones, so that subsequent attempts to allocate a page
2336  * from that zone don't waste time re-examining it.
2337  */
2338 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
2339 {
2340 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
2341 	int i;				/* index of *z in zonelist zones */
2342 
2343 	zlc = zonelist->zlcache_ptr;
2344 	if (!zlc)
2345 		return;
2346 
2347 	i = z - zonelist->_zonerefs;
2348 
2349 	set_bit(i, zlc->fullzones);
2350 }
2351 
2352 /*
2353  * clear all zones full, called after direct reclaim makes progress so that
2354  * a zone that was recently full is not skipped over for up to a second
2355  */
2356 static void zlc_clear_zones_full(struct zonelist *zonelist)
2357 {
2358 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
2359 
2360 	zlc = zonelist->zlcache_ptr;
2361 	if (!zlc)
2362 		return;
2363 
2364 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2365 }
2366 
2367 static bool zone_local(struct zone *local_zone, struct zone *zone)
2368 {
2369 	return local_zone->node == zone->node;
2370 }
2371 
2372 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2373 {
2374 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
2375 				RECLAIM_DISTANCE;
2376 }
2377 
2378 #else	/* CONFIG_NUMA */
2379 
2380 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
2381 {
2382 	return NULL;
2383 }
2384 
2385 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
2386 				nodemask_t *allowednodes)
2387 {
2388 	return 1;
2389 }
2390 
2391 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
2392 {
2393 }
2394 
2395 static void zlc_clear_zones_full(struct zonelist *zonelist)
2396 {
2397 }
2398 
2399 static bool zone_local(struct zone *local_zone, struct zone *zone)
2400 {
2401 	return true;
2402 }
2403 
2404 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2405 {
2406 	return true;
2407 }
2408 
2409 #endif	/* CONFIG_NUMA */
2410 
2411 static void reset_alloc_batches(struct zone *preferred_zone)
2412 {
2413 	struct zone *zone = preferred_zone->zone_pgdat->node_zones;
2414 
2415 	do {
2416 		mod_zone_page_state(zone, NR_ALLOC_BATCH,
2417 			high_wmark_pages(zone) - low_wmark_pages(zone) -
2418 			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
2419 		clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
2420 	} while (zone++ != preferred_zone);
2421 }
2422 
2423 /*
2424  * get_page_from_freelist goes through the zonelist trying to allocate
2425  * a page.
2426  */
2427 static struct page *
2428 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2429 						const struct alloc_context *ac)
2430 {
2431 	struct zonelist *zonelist = ac->zonelist;
2432 	struct zoneref *z;
2433 	struct page *page = NULL;
2434 	struct zone *zone;
2435 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
2436 	int zlc_active = 0;		/* set if using zonelist_cache */
2437 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
2438 	bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
2439 				(gfp_mask & __GFP_WRITE);
2440 	int nr_fair_skipped = 0;
2441 	bool zonelist_rescan;
2442 
2443 zonelist_scan:
2444 	zonelist_rescan = false;
2445 
2446 	/*
2447 	 * Scan zonelist, looking for a zone with enough free.
2448 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
2449 	 */
2450 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2451 								ac->nodemask) {
2452 		unsigned long mark;
2453 
2454 		if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
2455 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
2456 				continue;
2457 		if (cpusets_enabled() &&
2458 			(alloc_flags & ALLOC_CPUSET) &&
2459 			!cpuset_zone_allowed(zone, gfp_mask))
2460 				continue;
2461 		/*
2462 		 * Distribute pages in proportion to the individual
2463 		 * zone size to ensure fair page aging.  The zone a
2464 		 * page was allocated in should have no effect on the
2465 		 * time the page has in memory before being reclaimed.
2466 		 */
2467 		if (alloc_flags & ALLOC_FAIR) {
2468 			if (!zone_local(ac->preferred_zone, zone))
2469 				break;
2470 			if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
2471 				nr_fair_skipped++;
2472 				continue;
2473 			}
2474 		}
2475 		/*
2476 		 * When allocating a page cache page for writing, we
2477 		 * want to get it from a zone that is within its dirty
2478 		 * limit, such that no single zone holds more than its
2479 		 * proportional share of globally allowed dirty pages.
2480 		 * The dirty limits take into account the zone's
2481 		 * lowmem reserves and high watermark so that kswapd
2482 		 * should be able to balance it without having to
2483 		 * write pages from its LRU list.
2484 		 *
2485 		 * This may look like it could increase pressure on
2486 		 * lower zones by failing allocations in higher zones
2487 		 * before they are full.  But the pages that do spill
2488 		 * over are limited as the lower zones are protected
2489 		 * by this very same mechanism.  It should not become
2490 		 * a practical burden to them.
2491 		 *
2492 		 * XXX: For now, allow allocations to potentially
2493 		 * exceed the per-zone dirty limit in the slowpath
2494 		 * (ALLOC_WMARK_LOW unset) before going into reclaim,
2495 		 * which is important when on a NUMA setup the allowed
2496 		 * zones are together not big enough to reach the
2497 		 * global limit.  The proper fix for these situations
2498 		 * will require awareness of zones in the
2499 		 * dirty-throttling and the flusher threads.
2500 		 */
2501 		if (consider_zone_dirty && !zone_dirty_ok(zone))
2502 			continue;
2503 
2504 		mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2505 		if (!zone_watermark_ok(zone, order, mark,
2506 				       ac->classzone_idx, alloc_flags)) {
2507 			int ret;
2508 
2509 			/* Checked here to keep the fast path fast */
2510 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2511 			if (alloc_flags & ALLOC_NO_WATERMARKS)
2512 				goto try_this_zone;
2513 
2514 			if (IS_ENABLED(CONFIG_NUMA) &&
2515 					!did_zlc_setup && nr_online_nodes > 1) {
2516 				/*
2517 				 * we do zlc_setup if there are multiple nodes
2518 				 * and before considering the first zone allowed
2519 				 * by the cpuset.
2520 				 */
2521 				allowednodes = zlc_setup(zonelist, alloc_flags);
2522 				zlc_active = 1;
2523 				did_zlc_setup = 1;
2524 			}
2525 
2526 			if (zone_reclaim_mode == 0 ||
2527 			    !zone_allows_reclaim(ac->preferred_zone, zone))
2528 				goto this_zone_full;
2529 
2530 			/*
2531 			 * As we may have just activated ZLC, check if the first
2532 			 * eligible zone has failed zone_reclaim recently.
2533 			 */
2534 			if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
2535 				!zlc_zone_worth_trying(zonelist, z, allowednodes))
2536 				continue;
2537 
2538 			ret = zone_reclaim(zone, gfp_mask, order);
2539 			switch (ret) {
2540 			case ZONE_RECLAIM_NOSCAN:
2541 				/* did not scan */
2542 				continue;
2543 			case ZONE_RECLAIM_FULL:
2544 				/* scanned but unreclaimable */
2545 				continue;
2546 			default:
2547 				/* did we reclaim enough */
2548 				if (zone_watermark_ok(zone, order, mark,
2549 						ac->classzone_idx, alloc_flags))
2550 					goto try_this_zone;
2551 
2552 				/*
2553 				 * Failed to reclaim enough to meet watermark.
2554 				 * Only mark the zone full if checking the min
2555 				 * watermark or if we failed to reclaim just
2556 				 * 1<<order pages or else the page allocator
2557 				 * fastpath will prematurely mark zones full
2558 				 * when the watermark is between the low and
2559 				 * min watermarks.
2560 				 */
2561 				if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
2562 				    ret == ZONE_RECLAIM_SOME)
2563 					goto this_zone_full;
2564 
2565 				continue;
2566 			}
2567 		}
2568 
2569 try_this_zone:
2570 		page = buffered_rmqueue(ac->preferred_zone, zone, order,
2571 						gfp_mask, ac->migratetype);
2572 		if (page) {
2573 			if (prep_new_page(page, order, gfp_mask, alloc_flags))
2574 				goto try_this_zone;
2575 			return page;
2576 		}
2577 this_zone_full:
2578 		if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
2579 			zlc_mark_zone_full(zonelist, z);
2580 	}
2581 
2582 	/*
2583 	 * The first pass makes sure allocations are spread fairly within the
2584 	 * local node.  However, the local node might have free pages left
2585 	 * after the fairness batches are exhausted, and remote zones haven't
2586 	 * even been considered yet.  Try once more without fairness, and
2587 	 * include remote zones now, before entering the slowpath and waking
2588 	 * kswapd: prefer spilling to a remote zone over swapping locally.
2589 	 */
2590 	if (alloc_flags & ALLOC_FAIR) {
2591 		alloc_flags &= ~ALLOC_FAIR;
2592 		if (nr_fair_skipped) {
2593 			zonelist_rescan = true;
2594 			reset_alloc_batches(ac->preferred_zone);
2595 		}
2596 		if (nr_online_nodes > 1)
2597 			zonelist_rescan = true;
2598 	}
2599 
2600 	if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
2601 		/* Disable zlc cache for second zonelist scan */
2602 		zlc_active = 0;
2603 		zonelist_rescan = true;
2604 	}
2605 
2606 	if (zonelist_rescan)
2607 		goto zonelist_scan;
2608 
2609 	return NULL;
2610 }
2611 
2612 /*
2613  * Large machines with many possible nodes should not always dump per-node
2614  * meminfo in irq context.
2615  */
2616 static inline bool should_suppress_show_mem(void)
2617 {
2618 	bool ret = false;
2619 
2620 #if NODES_SHIFT > 8
2621 	ret = in_interrupt();
2622 #endif
2623 	return ret;
2624 }
2625 
2626 static DEFINE_RATELIMIT_STATE(nopage_rs,
2627 		DEFAULT_RATELIMIT_INTERVAL,
2628 		DEFAULT_RATELIMIT_BURST);
2629 
2630 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2631 {
2632 	unsigned int filter = SHOW_MEM_FILTER_NODES;
2633 
2634 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2635 	    debug_guardpage_minorder() > 0)
2636 		return;
2637 
2638 	/*
2639 	 * This documents exceptions given to allocations in certain
2640 	 * contexts that are allowed to allocate outside current's set
2641 	 * of allowed nodes.
2642 	 */
2643 	if (!(gfp_mask & __GFP_NOMEMALLOC))
2644 		if (test_thread_flag(TIF_MEMDIE) ||
2645 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
2646 			filter &= ~SHOW_MEM_FILTER_NODES;
2647 	if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2648 		filter &= ~SHOW_MEM_FILTER_NODES;
2649 
2650 	if (fmt) {
2651 		struct va_format vaf;
2652 		va_list args;
2653 
2654 		va_start(args, fmt);
2655 
2656 		vaf.fmt = fmt;
2657 		vaf.va = &args;
2658 
2659 		pr_warn("%pV", &vaf);
2660 
2661 		va_end(args);
2662 	}
2663 
2664 	pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
2665 		current->comm, order, gfp_mask);
2666 
2667 	dump_stack();
2668 	if (!should_suppress_show_mem())
2669 		show_mem(filter);
2670 }
2671 
2672 static inline struct page *
2673 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2674 	const struct alloc_context *ac, unsigned long *did_some_progress)
2675 {
2676 	struct page *page;
2677 
2678 	*did_some_progress = 0;
2679 
2680 	/*
2681 	 * Acquire the oom lock.  If that fails, somebody else is
2682 	 * making progress for us.
2683 	 */
2684 	if (!mutex_trylock(&oom_lock)) {
2685 		*did_some_progress = 1;
2686 		schedule_timeout_uninterruptible(1);
2687 		return NULL;
2688 	}
2689 
2690 	/*
2691 	 * Go through the zonelist yet one more time, keep very high watermark
2692 	 * here, this is only to catch a parallel oom killing, we must fail if
2693 	 * we're still under heavy pressure.
2694 	 */
2695 	page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
2696 					ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
2697 	if (page)
2698 		goto out;
2699 
2700 	if (!(gfp_mask & __GFP_NOFAIL)) {
2701 		/* Coredumps can quickly deplete all memory reserves */
2702 		if (current->flags & PF_DUMPCORE)
2703 			goto out;
2704 		/* The OOM killer will not help higher order allocs */
2705 		if (order > PAGE_ALLOC_COSTLY_ORDER)
2706 			goto out;
2707 		/* The OOM killer does not needlessly kill tasks for lowmem */
2708 		if (ac->high_zoneidx < ZONE_NORMAL)
2709 			goto out;
2710 		/* The OOM killer does not compensate for IO-less reclaim */
2711 		if (!(gfp_mask & __GFP_FS)) {
2712 			/*
2713 			 * XXX: Page reclaim didn't yield anything,
2714 			 * and the OOM killer can't be invoked, but
2715 			 * keep looping as per tradition.
2716 			 */
2717 			*did_some_progress = 1;
2718 			goto out;
2719 		}
2720 		if (pm_suspended_storage())
2721 			goto out;
2722 		/* The OOM killer may not free memory on a specific node */
2723 		if (gfp_mask & __GFP_THISNODE)
2724 			goto out;
2725 	}
2726 	/* Exhausted what can be done so it's blamo time */
2727 	if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
2728 			|| WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
2729 		*did_some_progress = 1;
2730 out:
2731 	mutex_unlock(&oom_lock);
2732 	return page;
2733 }
2734 
2735 #ifdef CONFIG_COMPACTION
2736 /* Try memory compaction for high-order allocations before reclaim */
2737 static struct page *
2738 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2739 		int alloc_flags, const struct alloc_context *ac,
2740 		enum migrate_mode mode, int *contended_compaction,
2741 		bool *deferred_compaction)
2742 {
2743 	unsigned long compact_result;
2744 	struct page *page;
2745 
2746 	if (!order)
2747 		return NULL;
2748 
2749 	current->flags |= PF_MEMALLOC;
2750 	compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
2751 						mode, contended_compaction);
2752 	current->flags &= ~PF_MEMALLOC;
2753 
2754 	switch (compact_result) {
2755 	case COMPACT_DEFERRED:
2756 		*deferred_compaction = true;
2757 		/* fall-through */
2758 	case COMPACT_SKIPPED:
2759 		return NULL;
2760 	default:
2761 		break;
2762 	}
2763 
2764 	/*
2765 	 * At least in one zone compaction wasn't deferred or skipped, so let's
2766 	 * count a compaction stall
2767 	 */
2768 	count_vm_event(COMPACTSTALL);
2769 
2770 	page = get_page_from_freelist(gfp_mask, order,
2771 					alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
2772 
2773 	if (page) {
2774 		struct zone *zone = page_zone(page);
2775 
2776 		zone->compact_blockskip_flush = false;
2777 		compaction_defer_reset(zone, order, true);
2778 		count_vm_event(COMPACTSUCCESS);
2779 		return page;
2780 	}
2781 
2782 	/*
2783 	 * It's bad if compaction run occurs and fails. The most likely reason
2784 	 * is that pages exist, but not enough to satisfy watermarks.
2785 	 */
2786 	count_vm_event(COMPACTFAIL);
2787 
2788 	cond_resched();
2789 
2790 	return NULL;
2791 }
2792 #else
2793 static inline struct page *
2794 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2795 		int alloc_flags, const struct alloc_context *ac,
2796 		enum migrate_mode mode, int *contended_compaction,
2797 		bool *deferred_compaction)
2798 {
2799 	return NULL;
2800 }
2801 #endif /* CONFIG_COMPACTION */
2802 
2803 /* Perform direct synchronous page reclaim */
2804 static int
2805 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
2806 					const struct alloc_context *ac)
2807 {
2808 	struct reclaim_state reclaim_state;
2809 	int progress;
2810 
2811 	cond_resched();
2812 
2813 	/* We now go into synchronous reclaim */
2814 	cpuset_memory_pressure_bump();
2815 	current->flags |= PF_MEMALLOC;
2816 	lockdep_set_current_reclaim_state(gfp_mask);
2817 	reclaim_state.reclaimed_slab = 0;
2818 	current->reclaim_state = &reclaim_state;
2819 
2820 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
2821 								ac->nodemask);
2822 
2823 	current->reclaim_state = NULL;
2824 	lockdep_clear_current_reclaim_state();
2825 	current->flags &= ~PF_MEMALLOC;
2826 
2827 	cond_resched();
2828 
2829 	return progress;
2830 }
2831 
2832 /* The really slow allocator path where we enter direct reclaim */
2833 static inline struct page *
2834 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2835 		int alloc_flags, const struct alloc_context *ac,
2836 		unsigned long *did_some_progress)
2837 {
2838 	struct page *page = NULL;
2839 	bool drained = false;
2840 
2841 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
2842 	if (unlikely(!(*did_some_progress)))
2843 		return NULL;
2844 
2845 	/* After successful reclaim, reconsider all zones for allocation */
2846 	if (IS_ENABLED(CONFIG_NUMA))
2847 		zlc_clear_zones_full(ac->zonelist);
2848 
2849 retry:
2850 	page = get_page_from_freelist(gfp_mask, order,
2851 					alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
2852 
2853 	/*
2854 	 * If an allocation failed after direct reclaim, it could be because
2855 	 * pages are pinned on the per-cpu lists. Drain them and try again
2856 	 */
2857 	if (!page && !drained) {
2858 		drain_all_pages(NULL);
2859 		drained = true;
2860 		goto retry;
2861 	}
2862 
2863 	return page;
2864 }
2865 
2866 /*
2867  * This is called in the allocator slow-path if the allocation request is of
2868  * sufficient urgency to ignore watermarks and take other desperate measures
2869  */
2870 static inline struct page *
2871 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2872 				const struct alloc_context *ac)
2873 {
2874 	struct page *page;
2875 
2876 	do {
2877 		page = get_page_from_freelist(gfp_mask, order,
2878 						ALLOC_NO_WATERMARKS, ac);
2879 
2880 		if (!page && gfp_mask & __GFP_NOFAIL)
2881 			wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC,
2882 									HZ/50);
2883 	} while (!page && (gfp_mask & __GFP_NOFAIL));
2884 
2885 	return page;
2886 }
2887 
2888 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
2889 {
2890 	struct zoneref *z;
2891 	struct zone *zone;
2892 
2893 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2894 						ac->high_zoneidx, ac->nodemask)
2895 		wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
2896 }
2897 
2898 static inline int
2899 gfp_to_alloc_flags(gfp_t gfp_mask)
2900 {
2901 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2902 	const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
2903 
2904 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2905 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2906 
2907 	/*
2908 	 * The caller may dip into page reserves a bit more if the caller
2909 	 * cannot run direct reclaim, or if the caller has realtime scheduling
2910 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2911 	 * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
2912 	 */
2913 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2914 
2915 	if (atomic) {
2916 		/*
2917 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
2918 		 * if it can't schedule.
2919 		 */
2920 		if (!(gfp_mask & __GFP_NOMEMALLOC))
2921 			alloc_flags |= ALLOC_HARDER;
2922 		/*
2923 		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
2924 		 * comment for __cpuset_node_allowed().
2925 		 */
2926 		alloc_flags &= ~ALLOC_CPUSET;
2927 	} else if (unlikely(rt_task(current)) && !in_interrupt())
2928 		alloc_flags |= ALLOC_HARDER;
2929 
2930 	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2931 		if (gfp_mask & __GFP_MEMALLOC)
2932 			alloc_flags |= ALLOC_NO_WATERMARKS;
2933 		else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2934 			alloc_flags |= ALLOC_NO_WATERMARKS;
2935 		else if (!in_interrupt() &&
2936 				((current->flags & PF_MEMALLOC) ||
2937 				 unlikely(test_thread_flag(TIF_MEMDIE))))
2938 			alloc_flags |= ALLOC_NO_WATERMARKS;
2939 	}
2940 #ifdef CONFIG_CMA
2941 	if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2942 		alloc_flags |= ALLOC_CMA;
2943 #endif
2944 	return alloc_flags;
2945 }
2946 
2947 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2948 {
2949 	return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
2950 }
2951 
2952 static inline struct page *
2953 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2954 						struct alloc_context *ac)
2955 {
2956 	const gfp_t wait = gfp_mask & __GFP_WAIT;
2957 	struct page *page = NULL;
2958 	int alloc_flags;
2959 	unsigned long pages_reclaimed = 0;
2960 	unsigned long did_some_progress;
2961 	enum migrate_mode migration_mode = MIGRATE_ASYNC;
2962 	bool deferred_compaction = false;
2963 	int contended_compaction = COMPACT_CONTENDED_NONE;
2964 
2965 	/*
2966 	 * In the slowpath, we sanity check order to avoid ever trying to
2967 	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2968 	 * be using allocators in order of preference for an area that is
2969 	 * too large.
2970 	 */
2971 	if (order >= MAX_ORDER) {
2972 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2973 		return NULL;
2974 	}
2975 
2976 	/*
2977 	 * If this allocation cannot block and it is for a specific node, then
2978 	 * fail early.  There's no need to wakeup kswapd or retry for a
2979 	 * speculative node-specific allocation.
2980 	 */
2981 	if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !wait)
2982 		goto nopage;
2983 
2984 retry:
2985 	if (!(gfp_mask & __GFP_NO_KSWAPD))
2986 		wake_all_kswapds(order, ac);
2987 
2988 	/*
2989 	 * OK, we're below the kswapd watermark and have kicked background
2990 	 * reclaim. Now things get more complex, so set up alloc_flags according
2991 	 * to how we want to proceed.
2992 	 */
2993 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
2994 
2995 	/*
2996 	 * Find the true preferred zone if the allocation is unconstrained by
2997 	 * cpusets.
2998 	 */
2999 	if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
3000 		struct zoneref *preferred_zoneref;
3001 		preferred_zoneref = first_zones_zonelist(ac->zonelist,
3002 				ac->high_zoneidx, NULL, &ac->preferred_zone);
3003 		ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
3004 	}
3005 
3006 	/* This is the last chance, in general, before the goto nopage. */
3007 	page = get_page_from_freelist(gfp_mask, order,
3008 				alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
3009 	if (page)
3010 		goto got_pg;
3011 
3012 	/* Allocate without watermarks if the context allows */
3013 	if (alloc_flags & ALLOC_NO_WATERMARKS) {
3014 		/*
3015 		 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
3016 		 * the allocation is high priority and these type of
3017 		 * allocations are system rather than user orientated
3018 		 */
3019 		ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3020 
3021 		page = __alloc_pages_high_priority(gfp_mask, order, ac);
3022 
3023 		if (page) {
3024 			goto got_pg;
3025 		}
3026 	}
3027 
3028 	/* Atomic allocations - we can't balance anything */
3029 	if (!wait) {
3030 		/*
3031 		 * All existing users of the deprecated __GFP_NOFAIL are
3032 		 * blockable, so warn of any new users that actually allow this
3033 		 * type of allocation to fail.
3034 		 */
3035 		WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
3036 		goto nopage;
3037 	}
3038 
3039 	/* Avoid recursion of direct reclaim */
3040 	if (current->flags & PF_MEMALLOC)
3041 		goto nopage;
3042 
3043 	/* Avoid allocations with no watermarks from looping endlessly */
3044 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
3045 		goto nopage;
3046 
3047 	/*
3048 	 * Try direct compaction. The first pass is asynchronous. Subsequent
3049 	 * attempts after direct reclaim are synchronous
3050 	 */
3051 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3052 					migration_mode,
3053 					&contended_compaction,
3054 					&deferred_compaction);
3055 	if (page)
3056 		goto got_pg;
3057 
3058 	/* Checks for THP-specific high-order allocations */
3059 	if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) {
3060 		/*
3061 		 * If compaction is deferred for high-order allocations, it is
3062 		 * because sync compaction recently failed. If this is the case
3063 		 * and the caller requested a THP allocation, we do not want
3064 		 * to heavily disrupt the system, so we fail the allocation
3065 		 * instead of entering direct reclaim.
3066 		 */
3067 		if (deferred_compaction)
3068 			goto nopage;
3069 
3070 		/*
3071 		 * In all zones where compaction was attempted (and not
3072 		 * deferred or skipped), lock contention has been detected.
3073 		 * For THP allocation we do not want to disrupt the others
3074 		 * so we fallback to base pages instead.
3075 		 */
3076 		if (contended_compaction == COMPACT_CONTENDED_LOCK)
3077 			goto nopage;
3078 
3079 		/*
3080 		 * If compaction was aborted due to need_resched(), we do not
3081 		 * want to further increase allocation latency, unless it is
3082 		 * khugepaged trying to collapse.
3083 		 */
3084 		if (contended_compaction == COMPACT_CONTENDED_SCHED
3085 			&& !(current->flags & PF_KTHREAD))
3086 			goto nopage;
3087 	}
3088 
3089 	/*
3090 	 * It can become very expensive to allocate transparent hugepages at
3091 	 * fault, so use asynchronous memory compaction for THP unless it is
3092 	 * khugepaged trying to collapse.
3093 	 */
3094 	if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE ||
3095 						(current->flags & PF_KTHREAD))
3096 		migration_mode = MIGRATE_SYNC_LIGHT;
3097 
3098 	/* Try direct reclaim and then allocating */
3099 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3100 							&did_some_progress);
3101 	if (page)
3102 		goto got_pg;
3103 
3104 	/* Do not loop if specifically requested */
3105 	if (gfp_mask & __GFP_NORETRY)
3106 		goto noretry;
3107 
3108 	/* Keep reclaiming pages as long as there is reasonable progress */
3109 	pages_reclaimed += did_some_progress;
3110 	if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
3111 	    ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
3112 		/* Wait for some write requests to complete then retry */
3113 		wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
3114 		goto retry;
3115 	}
3116 
3117 	/* Reclaim has failed us, start killing things */
3118 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3119 	if (page)
3120 		goto got_pg;
3121 
3122 	/* Retry as long as the OOM killer is making progress */
3123 	if (did_some_progress)
3124 		goto retry;
3125 
3126 noretry:
3127 	/*
3128 	 * High-order allocations do not necessarily loop after
3129 	 * direct reclaim and reclaim/compaction depends on compaction
3130 	 * being called after reclaim so call directly if necessary
3131 	 */
3132 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
3133 					    ac, migration_mode,
3134 					    &contended_compaction,
3135 					    &deferred_compaction);
3136 	if (page)
3137 		goto got_pg;
3138 nopage:
3139 	warn_alloc_failed(gfp_mask, order, NULL);
3140 got_pg:
3141 	return page;
3142 }
3143 
3144 /*
3145  * This is the 'heart' of the zoned buddy allocator.
3146  */
3147 struct page *
3148 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3149 			struct zonelist *zonelist, nodemask_t *nodemask)
3150 {
3151 	struct zoneref *preferred_zoneref;
3152 	struct page *page = NULL;
3153 	unsigned int cpuset_mems_cookie;
3154 	int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
3155 	gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
3156 	struct alloc_context ac = {
3157 		.high_zoneidx = gfp_zone(gfp_mask),
3158 		.nodemask = nodemask,
3159 		.migratetype = gfpflags_to_migratetype(gfp_mask),
3160 	};
3161 
3162 	gfp_mask &= gfp_allowed_mask;
3163 
3164 	lockdep_trace_alloc(gfp_mask);
3165 
3166 	might_sleep_if(gfp_mask & __GFP_WAIT);
3167 
3168 	if (should_fail_alloc_page(gfp_mask, order))
3169 		return NULL;
3170 
3171 	/*
3172 	 * Check the zones suitable for the gfp_mask contain at least one
3173 	 * valid zone. It's possible to have an empty zonelist as a result
3174 	 * of __GFP_THISNODE and a memoryless node
3175 	 */
3176 	if (unlikely(!zonelist->_zonerefs->zone))
3177 		return NULL;
3178 
3179 	if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
3180 		alloc_flags |= ALLOC_CMA;
3181 
3182 retry_cpuset:
3183 	cpuset_mems_cookie = read_mems_allowed_begin();
3184 
3185 	/* We set it here, as __alloc_pages_slowpath might have changed it */
3186 	ac.zonelist = zonelist;
3187 	/* The preferred zone is used for statistics later */
3188 	preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
3189 				ac.nodemask ? : &cpuset_current_mems_allowed,
3190 				&ac.preferred_zone);
3191 	if (!ac.preferred_zone)
3192 		goto out;
3193 	ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
3194 
3195 	/* First allocation attempt */
3196 	alloc_mask = gfp_mask|__GFP_HARDWALL;
3197 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
3198 	if (unlikely(!page)) {
3199 		/*
3200 		 * Runtime PM, block IO and its error handling path
3201 		 * can deadlock because I/O on the device might not
3202 		 * complete.
3203 		 */
3204 		alloc_mask = memalloc_noio_flags(gfp_mask);
3205 
3206 		page = __alloc_pages_slowpath(alloc_mask, order, &ac);
3207 	}
3208 
3209 	if (kmemcheck_enabled && page)
3210 		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3211 
3212 	trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
3213 
3214 out:
3215 	/*
3216 	 * When updating a task's mems_allowed, it is possible to race with
3217 	 * parallel threads in such a way that an allocation can fail while
3218 	 * the mask is being updated. If a page allocation is about to fail,
3219 	 * check if the cpuset changed during allocation and if so, retry.
3220 	 */
3221 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
3222 		goto retry_cpuset;
3223 
3224 	return page;
3225 }
3226 EXPORT_SYMBOL(__alloc_pages_nodemask);
3227 
3228 /*
3229  * Common helper functions.
3230  */
3231 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
3232 {
3233 	struct page *page;
3234 
3235 	/*
3236 	 * __get_free_pages() returns a 32-bit address, which cannot represent
3237 	 * a highmem page
3238 	 */
3239 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
3240 
3241 	page = alloc_pages(gfp_mask, order);
3242 	if (!page)
3243 		return 0;
3244 	return (unsigned long) page_address(page);
3245 }
3246 EXPORT_SYMBOL(__get_free_pages);
3247 
3248 unsigned long get_zeroed_page(gfp_t gfp_mask)
3249 {
3250 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
3251 }
3252 EXPORT_SYMBOL(get_zeroed_page);
3253 
3254 void __free_pages(struct page *page, unsigned int order)
3255 {
3256 	if (put_page_testzero(page)) {
3257 		if (order == 0)
3258 			free_hot_cold_page(page, false);
3259 		else
3260 			__free_pages_ok(page, order);
3261 	}
3262 }
3263 
3264 EXPORT_SYMBOL(__free_pages);
3265 
3266 void free_pages(unsigned long addr, unsigned int order)
3267 {
3268 	if (addr != 0) {
3269 		VM_BUG_ON(!virt_addr_valid((void *)addr));
3270 		__free_pages(virt_to_page((void *)addr), order);
3271 	}
3272 }
3273 
3274 EXPORT_SYMBOL(free_pages);
3275 
3276 /*
3277  * Page Fragment:
3278  *  An arbitrary-length arbitrary-offset area of memory which resides
3279  *  within a 0 or higher order page.  Multiple fragments within that page
3280  *  are individually refcounted, in the page's reference counter.
3281  *
3282  * The page_frag functions below provide a simple allocation framework for
3283  * page fragments.  This is used by the network stack and network device
3284  * drivers to provide a backing region of memory for use as either an
3285  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
3286  */
3287 static struct page *__page_frag_refill(struct page_frag_cache *nc,
3288 				       gfp_t gfp_mask)
3289 {
3290 	struct page *page = NULL;
3291 	gfp_t gfp = gfp_mask;
3292 
3293 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3294 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
3295 		    __GFP_NOMEMALLOC;
3296 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
3297 				PAGE_FRAG_CACHE_MAX_ORDER);
3298 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
3299 #endif
3300 	if (unlikely(!page))
3301 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
3302 
3303 	nc->va = page ? page_address(page) : NULL;
3304 
3305 	return page;
3306 }
3307 
3308 void *__alloc_page_frag(struct page_frag_cache *nc,
3309 			unsigned int fragsz, gfp_t gfp_mask)
3310 {
3311 	unsigned int size = PAGE_SIZE;
3312 	struct page *page;
3313 	int offset;
3314 
3315 	if (unlikely(!nc->va)) {
3316 refill:
3317 		page = __page_frag_refill(nc, gfp_mask);
3318 		if (!page)
3319 			return NULL;
3320 
3321 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3322 		/* if size can vary use size else just use PAGE_SIZE */
3323 		size = nc->size;
3324 #endif
3325 		/* Even if we own the page, we do not use atomic_set().
3326 		 * This would break get_page_unless_zero() users.
3327 		 */
3328 		atomic_add(size - 1, &page->_count);
3329 
3330 		/* reset page count bias and offset to start of new frag */
3331 		nc->pfmemalloc = page->pfmemalloc;
3332 		nc->pagecnt_bias = size;
3333 		nc->offset = size;
3334 	}
3335 
3336 	offset = nc->offset - fragsz;
3337 	if (unlikely(offset < 0)) {
3338 		page = virt_to_page(nc->va);
3339 
3340 		if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
3341 			goto refill;
3342 
3343 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3344 		/* if size can vary use size else just use PAGE_SIZE */
3345 		size = nc->size;
3346 #endif
3347 		/* OK, page count is 0, we can safely set it */
3348 		atomic_set(&page->_count, size);
3349 
3350 		/* reset page count bias and offset to start of new frag */
3351 		nc->pagecnt_bias = size;
3352 		offset = size - fragsz;
3353 	}
3354 
3355 	nc->pagecnt_bias--;
3356 	nc->offset = offset;
3357 
3358 	return nc->va + offset;
3359 }
3360 EXPORT_SYMBOL(__alloc_page_frag);
3361 
3362 /*
3363  * Frees a page fragment allocated out of either a compound or order 0 page.
3364  */
3365 void __free_page_frag(void *addr)
3366 {
3367 	struct page *page = virt_to_head_page(addr);
3368 
3369 	if (unlikely(put_page_testzero(page)))
3370 		__free_pages_ok(page, compound_order(page));
3371 }
3372 EXPORT_SYMBOL(__free_page_frag);
3373 
3374 /*
3375  * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
3376  * of the current memory cgroup.
3377  *
3378  * It should be used when the caller would like to use kmalloc, but since the
3379  * allocation is large, it has to fall back to the page allocator.
3380  */
3381 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
3382 {
3383 	struct page *page;
3384 	struct mem_cgroup *memcg = NULL;
3385 
3386 	if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
3387 		return NULL;
3388 	page = alloc_pages(gfp_mask, order);
3389 	memcg_kmem_commit_charge(page, memcg, order);
3390 	return page;
3391 }
3392 
3393 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
3394 {
3395 	struct page *page;
3396 	struct mem_cgroup *memcg = NULL;
3397 
3398 	if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
3399 		return NULL;
3400 	page = alloc_pages_node(nid, gfp_mask, order);
3401 	memcg_kmem_commit_charge(page, memcg, order);
3402 	return page;
3403 }
3404 
3405 /*
3406  * __free_kmem_pages and free_kmem_pages will free pages allocated with
3407  * alloc_kmem_pages.
3408  */
3409 void __free_kmem_pages(struct page *page, unsigned int order)
3410 {
3411 	memcg_kmem_uncharge_pages(page, order);
3412 	__free_pages(page, order);
3413 }
3414 
3415 void free_kmem_pages(unsigned long addr, unsigned int order)
3416 {
3417 	if (addr != 0) {
3418 		VM_BUG_ON(!virt_addr_valid((void *)addr));
3419 		__free_kmem_pages(virt_to_page((void *)addr), order);
3420 	}
3421 }
3422 
3423 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
3424 {
3425 	if (addr) {
3426 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
3427 		unsigned long used = addr + PAGE_ALIGN(size);
3428 
3429 		split_page(virt_to_page((void *)addr), order);
3430 		while (used < alloc_end) {
3431 			free_page(used);
3432 			used += PAGE_SIZE;
3433 		}
3434 	}
3435 	return (void *)addr;
3436 }
3437 
3438 /**
3439  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3440  * @size: the number of bytes to allocate
3441  * @gfp_mask: GFP flags for the allocation
3442  *
3443  * This function is similar to alloc_pages(), except that it allocates the
3444  * minimum number of pages to satisfy the request.  alloc_pages() can only
3445  * allocate memory in power-of-two pages.
3446  *
3447  * This function is also limited by MAX_ORDER.
3448  *
3449  * Memory allocated by this function must be released by free_pages_exact().
3450  */
3451 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3452 {
3453 	unsigned int order = get_order(size);
3454 	unsigned long addr;
3455 
3456 	addr = __get_free_pages(gfp_mask, order);
3457 	return make_alloc_exact(addr, order, size);
3458 }
3459 EXPORT_SYMBOL(alloc_pages_exact);
3460 
3461 /**
3462  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
3463  *			   pages on a node.
3464  * @nid: the preferred node ID where memory should be allocated
3465  * @size: the number of bytes to allocate
3466  * @gfp_mask: GFP flags for the allocation
3467  *
3468  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3469  * back.
3470  * Note this is not alloc_pages_exact_node() which allocates on a specific node,
3471  * but is not exact.
3472  */
3473 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
3474 {
3475 	unsigned order = get_order(size);
3476 	struct page *p = alloc_pages_node(nid, gfp_mask, order);
3477 	if (!p)
3478 		return NULL;
3479 	return make_alloc_exact((unsigned long)page_address(p), order, size);
3480 }
3481 
3482 /**
3483  * free_pages_exact - release memory allocated via alloc_pages_exact()
3484  * @virt: the value returned by alloc_pages_exact.
3485  * @size: size of allocation, same value as passed to alloc_pages_exact().
3486  *
3487  * Release the memory allocated by a previous call to alloc_pages_exact.
3488  */
3489 void free_pages_exact(void *virt, size_t size)
3490 {
3491 	unsigned long addr = (unsigned long)virt;
3492 	unsigned long end = addr + PAGE_ALIGN(size);
3493 
3494 	while (addr < end) {
3495 		free_page(addr);
3496 		addr += PAGE_SIZE;
3497 	}
3498 }
3499 EXPORT_SYMBOL(free_pages_exact);
3500 
3501 /**
3502  * nr_free_zone_pages - count number of pages beyond high watermark
3503  * @offset: The zone index of the highest zone
3504  *
3505  * nr_free_zone_pages() counts the number of counts pages which are beyond the
3506  * high watermark within all zones at or below a given zone index.  For each
3507  * zone, the number of pages is calculated as:
3508  *     managed_pages - high_pages
3509  */
3510 static unsigned long nr_free_zone_pages(int offset)
3511 {
3512 	struct zoneref *z;
3513 	struct zone *zone;
3514 
3515 	/* Just pick one node, since fallback list is circular */
3516 	unsigned long sum = 0;
3517 
3518 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
3519 
3520 	for_each_zone_zonelist(zone, z, zonelist, offset) {
3521 		unsigned long size = zone->managed_pages;
3522 		unsigned long high = high_wmark_pages(zone);
3523 		if (size > high)
3524 			sum += size - high;
3525 	}
3526 
3527 	return sum;
3528 }
3529 
3530 /**
3531  * nr_free_buffer_pages - count number of pages beyond high watermark
3532  *
3533  * nr_free_buffer_pages() counts the number of pages which are beyond the high
3534  * watermark within ZONE_DMA and ZONE_NORMAL.
3535  */
3536 unsigned long nr_free_buffer_pages(void)
3537 {
3538 	return nr_free_zone_pages(gfp_zone(GFP_USER));
3539 }
3540 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
3541 
3542 /**
3543  * nr_free_pagecache_pages - count number of pages beyond high watermark
3544  *
3545  * nr_free_pagecache_pages() counts the number of pages which are beyond the
3546  * high watermark within all zones.
3547  */
3548 unsigned long nr_free_pagecache_pages(void)
3549 {
3550 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
3551 }
3552 
3553 static inline void show_node(struct zone *zone)
3554 {
3555 	if (IS_ENABLED(CONFIG_NUMA))
3556 		printk("Node %d ", zone_to_nid(zone));
3557 }
3558 
3559 void si_meminfo(struct sysinfo *val)
3560 {
3561 	val->totalram = totalram_pages;
3562 	val->sharedram = global_page_state(NR_SHMEM);
3563 	val->freeram = global_page_state(NR_FREE_PAGES);
3564 	val->bufferram = nr_blockdev_pages();
3565 	val->totalhigh = totalhigh_pages;
3566 	val->freehigh = nr_free_highpages();
3567 	val->mem_unit = PAGE_SIZE;
3568 }
3569 
3570 EXPORT_SYMBOL(si_meminfo);
3571 
3572 #ifdef CONFIG_NUMA
3573 void si_meminfo_node(struct sysinfo *val, int nid)
3574 {
3575 	int zone_type;		/* needs to be signed */
3576 	unsigned long managed_pages = 0;
3577 	pg_data_t *pgdat = NODE_DATA(nid);
3578 
3579 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
3580 		managed_pages += pgdat->node_zones[zone_type].managed_pages;
3581 	val->totalram = managed_pages;
3582 	val->sharedram = node_page_state(nid, NR_SHMEM);
3583 	val->freeram = node_page_state(nid, NR_FREE_PAGES);
3584 #ifdef CONFIG_HIGHMEM
3585 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
3586 	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
3587 			NR_FREE_PAGES);
3588 #else
3589 	val->totalhigh = 0;
3590 	val->freehigh = 0;
3591 #endif
3592 	val->mem_unit = PAGE_SIZE;
3593 }
3594 #endif
3595 
3596 /*
3597  * Determine whether the node should be displayed or not, depending on whether
3598  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
3599  */
3600 bool skip_free_areas_node(unsigned int flags, int nid)
3601 {
3602 	bool ret = false;
3603 	unsigned int cpuset_mems_cookie;
3604 
3605 	if (!(flags & SHOW_MEM_FILTER_NODES))
3606 		goto out;
3607 
3608 	do {
3609 		cpuset_mems_cookie = read_mems_allowed_begin();
3610 		ret = !node_isset(nid, cpuset_current_mems_allowed);
3611 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
3612 out:
3613 	return ret;
3614 }
3615 
3616 #define K(x) ((x) << (PAGE_SHIFT-10))
3617 
3618 static void show_migration_types(unsigned char type)
3619 {
3620 	static const char types[MIGRATE_TYPES] = {
3621 		[MIGRATE_UNMOVABLE]	= 'U',
3622 		[MIGRATE_RECLAIMABLE]	= 'E',
3623 		[MIGRATE_MOVABLE]	= 'M',
3624 		[MIGRATE_RESERVE]	= 'R',
3625 #ifdef CONFIG_CMA
3626 		[MIGRATE_CMA]		= 'C',
3627 #endif
3628 #ifdef CONFIG_MEMORY_ISOLATION
3629 		[MIGRATE_ISOLATE]	= 'I',
3630 #endif
3631 	};
3632 	char tmp[MIGRATE_TYPES + 1];
3633 	char *p = tmp;
3634 	int i;
3635 
3636 	for (i = 0; i < MIGRATE_TYPES; i++) {
3637 		if (type & (1 << i))
3638 			*p++ = types[i];
3639 	}
3640 
3641 	*p = '\0';
3642 	printk("(%s) ", tmp);
3643 }
3644 
3645 /*
3646  * Show free area list (used inside shift_scroll-lock stuff)
3647  * We also calculate the percentage fragmentation. We do this by counting the
3648  * memory on each free list with the exception of the first item on the list.
3649  *
3650  * Bits in @filter:
3651  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
3652  *   cpuset.
3653  */
3654 void show_free_areas(unsigned int filter)
3655 {
3656 	unsigned long free_pcp = 0;
3657 	int cpu;
3658 	struct zone *zone;
3659 
3660 	for_each_populated_zone(zone) {
3661 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
3662 			continue;
3663 
3664 		for_each_online_cpu(cpu)
3665 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
3666 	}
3667 
3668 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
3669 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
3670 		" unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
3671 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
3672 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
3673 		" free:%lu free_pcp:%lu free_cma:%lu\n",
3674 		global_page_state(NR_ACTIVE_ANON),
3675 		global_page_state(NR_INACTIVE_ANON),
3676 		global_page_state(NR_ISOLATED_ANON),
3677 		global_page_state(NR_ACTIVE_FILE),
3678 		global_page_state(NR_INACTIVE_FILE),
3679 		global_page_state(NR_ISOLATED_FILE),
3680 		global_page_state(NR_UNEVICTABLE),
3681 		global_page_state(NR_FILE_DIRTY),
3682 		global_page_state(NR_WRITEBACK),
3683 		global_page_state(NR_UNSTABLE_NFS),
3684 		global_page_state(NR_SLAB_RECLAIMABLE),
3685 		global_page_state(NR_SLAB_UNRECLAIMABLE),
3686 		global_page_state(NR_FILE_MAPPED),
3687 		global_page_state(NR_SHMEM),
3688 		global_page_state(NR_PAGETABLE),
3689 		global_page_state(NR_BOUNCE),
3690 		global_page_state(NR_FREE_PAGES),
3691 		free_pcp,
3692 		global_page_state(NR_FREE_CMA_PAGES));
3693 
3694 	for_each_populated_zone(zone) {
3695 		int i;
3696 
3697 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
3698 			continue;
3699 
3700 		free_pcp = 0;
3701 		for_each_online_cpu(cpu)
3702 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
3703 
3704 		show_node(zone);
3705 		printk("%s"
3706 			" free:%lukB"
3707 			" min:%lukB"
3708 			" low:%lukB"
3709 			" high:%lukB"
3710 			" active_anon:%lukB"
3711 			" inactive_anon:%lukB"
3712 			" active_file:%lukB"
3713 			" inactive_file:%lukB"
3714 			" unevictable:%lukB"
3715 			" isolated(anon):%lukB"
3716 			" isolated(file):%lukB"
3717 			" present:%lukB"
3718 			" managed:%lukB"
3719 			" mlocked:%lukB"
3720 			" dirty:%lukB"
3721 			" writeback:%lukB"
3722 			" mapped:%lukB"
3723 			" shmem:%lukB"
3724 			" slab_reclaimable:%lukB"
3725 			" slab_unreclaimable:%lukB"
3726 			" kernel_stack:%lukB"
3727 			" pagetables:%lukB"
3728 			" unstable:%lukB"
3729 			" bounce:%lukB"
3730 			" free_pcp:%lukB"
3731 			" local_pcp:%ukB"
3732 			" free_cma:%lukB"
3733 			" writeback_tmp:%lukB"
3734 			" pages_scanned:%lu"
3735 			" all_unreclaimable? %s"
3736 			"\n",
3737 			zone->name,
3738 			K(zone_page_state(zone, NR_FREE_PAGES)),
3739 			K(min_wmark_pages(zone)),
3740 			K(low_wmark_pages(zone)),
3741 			K(high_wmark_pages(zone)),
3742 			K(zone_page_state(zone, NR_ACTIVE_ANON)),
3743 			K(zone_page_state(zone, NR_INACTIVE_ANON)),
3744 			K(zone_page_state(zone, NR_ACTIVE_FILE)),
3745 			K(zone_page_state(zone, NR_INACTIVE_FILE)),
3746 			K(zone_page_state(zone, NR_UNEVICTABLE)),
3747 			K(zone_page_state(zone, NR_ISOLATED_ANON)),
3748 			K(zone_page_state(zone, NR_ISOLATED_FILE)),
3749 			K(zone->present_pages),
3750 			K(zone->managed_pages),
3751 			K(zone_page_state(zone, NR_MLOCK)),
3752 			K(zone_page_state(zone, NR_FILE_DIRTY)),
3753 			K(zone_page_state(zone, NR_WRITEBACK)),
3754 			K(zone_page_state(zone, NR_FILE_MAPPED)),
3755 			K(zone_page_state(zone, NR_SHMEM)),
3756 			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3757 			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
3758 			zone_page_state(zone, NR_KERNEL_STACK) *
3759 				THREAD_SIZE / 1024,
3760 			K(zone_page_state(zone, NR_PAGETABLE)),
3761 			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3762 			K(zone_page_state(zone, NR_BOUNCE)),
3763 			K(free_pcp),
3764 			K(this_cpu_read(zone->pageset->pcp.count)),
3765 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
3766 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
3767 			K(zone_page_state(zone, NR_PAGES_SCANNED)),
3768 			(!zone_reclaimable(zone) ? "yes" : "no")
3769 			);
3770 		printk("lowmem_reserve[]:");
3771 		for (i = 0; i < MAX_NR_ZONES; i++)
3772 			printk(" %ld", zone->lowmem_reserve[i]);
3773 		printk("\n");
3774 	}
3775 
3776 	for_each_populated_zone(zone) {
3777 		unsigned long nr[MAX_ORDER], flags, order, total = 0;
3778 		unsigned char types[MAX_ORDER];
3779 
3780 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
3781 			continue;
3782 		show_node(zone);
3783 		printk("%s: ", zone->name);
3784 
3785 		spin_lock_irqsave(&zone->lock, flags);
3786 		for (order = 0; order < MAX_ORDER; order++) {
3787 			struct free_area *area = &zone->free_area[order];
3788 			int type;
3789 
3790 			nr[order] = area->nr_free;
3791 			total += nr[order] << order;
3792 
3793 			types[order] = 0;
3794 			for (type = 0; type < MIGRATE_TYPES; type++) {
3795 				if (!list_empty(&area->free_list[type]))
3796 					types[order] |= 1 << type;
3797 			}
3798 		}
3799 		spin_unlock_irqrestore(&zone->lock, flags);
3800 		for (order = 0; order < MAX_ORDER; order++) {
3801 			printk("%lu*%lukB ", nr[order], K(1UL) << order);
3802 			if (nr[order])
3803 				show_migration_types(types[order]);
3804 		}
3805 		printk("= %lukB\n", K(total));
3806 	}
3807 
3808 	hugetlb_show_meminfo();
3809 
3810 	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3811 
3812 	show_swap_cache_info();
3813 }
3814 
3815 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3816 {
3817 	zoneref->zone = zone;
3818 	zoneref->zone_idx = zone_idx(zone);
3819 }
3820 
3821 /*
3822  * Builds allocation fallback zone lists.
3823  *
3824  * Add all populated zones of a node to the zonelist.
3825  */
3826 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3827 				int nr_zones)
3828 {
3829 	struct zone *zone;
3830 	enum zone_type zone_type = MAX_NR_ZONES;
3831 
3832 	do {
3833 		zone_type--;
3834 		zone = pgdat->node_zones + zone_type;
3835 		if (populated_zone(zone)) {
3836 			zoneref_set_zone(zone,
3837 				&zonelist->_zonerefs[nr_zones++]);
3838 			check_highest_zone(zone_type);
3839 		}
3840 	} while (zone_type);
3841 
3842 	return nr_zones;
3843 }
3844 
3845 
3846 /*
3847  *  zonelist_order:
3848  *  0 = automatic detection of better ordering.
3849  *  1 = order by ([node] distance, -zonetype)
3850  *  2 = order by (-zonetype, [node] distance)
3851  *
3852  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3853  *  the same zonelist. So only NUMA can configure this param.
3854  */
3855 #define ZONELIST_ORDER_DEFAULT  0
3856 #define ZONELIST_ORDER_NODE     1
3857 #define ZONELIST_ORDER_ZONE     2
3858 
3859 /* zonelist order in the kernel.
3860  * set_zonelist_order() will set this to NODE or ZONE.
3861  */
3862 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3863 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3864 
3865 
3866 #ifdef CONFIG_NUMA
3867 /* The value user specified ....changed by config */
3868 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3869 /* string for sysctl */
3870 #define NUMA_ZONELIST_ORDER_LEN	16
3871 char numa_zonelist_order[16] = "default";
3872 
3873 /*
3874  * interface for configure zonelist ordering.
3875  * command line option "numa_zonelist_order"
3876  *	= "[dD]efault	- default, automatic configuration.
3877  *	= "[nN]ode 	- order by node locality, then by zone within node
3878  *	= "[zZ]one      - order by zone, then by locality within zone
3879  */
3880 
3881 static int __parse_numa_zonelist_order(char *s)
3882 {
3883 	if (*s == 'd' || *s == 'D') {
3884 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3885 	} else if (*s == 'n' || *s == 'N') {
3886 		user_zonelist_order = ZONELIST_ORDER_NODE;
3887 	} else if (*s == 'z' || *s == 'Z') {
3888 		user_zonelist_order = ZONELIST_ORDER_ZONE;
3889 	} else {
3890 		printk(KERN_WARNING
3891 			"Ignoring invalid numa_zonelist_order value:  "
3892 			"%s\n", s);
3893 		return -EINVAL;
3894 	}
3895 	return 0;
3896 }
3897 
3898 static __init int setup_numa_zonelist_order(char *s)
3899 {
3900 	int ret;
3901 
3902 	if (!s)
3903 		return 0;
3904 
3905 	ret = __parse_numa_zonelist_order(s);
3906 	if (ret == 0)
3907 		strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3908 
3909 	return ret;
3910 }
3911 early_param("numa_zonelist_order", setup_numa_zonelist_order);
3912 
3913 /*
3914  * sysctl handler for numa_zonelist_order
3915  */
3916 int numa_zonelist_order_handler(struct ctl_table *table, int write,
3917 		void __user *buffer, size_t *length,
3918 		loff_t *ppos)
3919 {
3920 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
3921 	int ret;
3922 	static DEFINE_MUTEX(zl_order_mutex);
3923 
3924 	mutex_lock(&zl_order_mutex);
3925 	if (write) {
3926 		if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
3927 			ret = -EINVAL;
3928 			goto out;
3929 		}
3930 		strcpy(saved_string, (char *)table->data);
3931 	}
3932 	ret = proc_dostring(table, write, buffer, length, ppos);
3933 	if (ret)
3934 		goto out;
3935 	if (write) {
3936 		int oldval = user_zonelist_order;
3937 
3938 		ret = __parse_numa_zonelist_order((char *)table->data);
3939 		if (ret) {
3940 			/*
3941 			 * bogus value.  restore saved string
3942 			 */
3943 			strncpy((char *)table->data, saved_string,
3944 				NUMA_ZONELIST_ORDER_LEN);
3945 			user_zonelist_order = oldval;
3946 		} else if (oldval != user_zonelist_order) {
3947 			mutex_lock(&zonelists_mutex);
3948 			build_all_zonelists(NULL, NULL);
3949 			mutex_unlock(&zonelists_mutex);
3950 		}
3951 	}
3952 out:
3953 	mutex_unlock(&zl_order_mutex);
3954 	return ret;
3955 }
3956 
3957 
3958 #define MAX_NODE_LOAD (nr_online_nodes)
3959 static int node_load[MAX_NUMNODES];
3960 
3961 /**
3962  * find_next_best_node - find the next node that should appear in a given node's fallback list
3963  * @node: node whose fallback list we're appending
3964  * @used_node_mask: nodemask_t of already used nodes
3965  *
3966  * We use a number of factors to determine which is the next node that should
3967  * appear on a given node's fallback list.  The node should not have appeared
3968  * already in @node's fallback list, and it should be the next closest node
3969  * according to the distance array (which contains arbitrary distance values
3970  * from each node to each node in the system), and should also prefer nodes
3971  * with no CPUs, since presumably they'll have very little allocation pressure
3972  * on them otherwise.
3973  * It returns -1 if no node is found.
3974  */
3975 static int find_next_best_node(int node, nodemask_t *used_node_mask)
3976 {
3977 	int n, val;
3978 	int min_val = INT_MAX;
3979 	int best_node = NUMA_NO_NODE;
3980 	const struct cpumask *tmp = cpumask_of_node(0);
3981 
3982 	/* Use the local node if we haven't already */
3983 	if (!node_isset(node, *used_node_mask)) {
3984 		node_set(node, *used_node_mask);
3985 		return node;
3986 	}
3987 
3988 	for_each_node_state(n, N_MEMORY) {
3989 
3990 		/* Don't want a node to appear more than once */
3991 		if (node_isset(n, *used_node_mask))
3992 			continue;
3993 
3994 		/* Use the distance array to find the distance */
3995 		val = node_distance(node, n);
3996 
3997 		/* Penalize nodes under us ("prefer the next node") */
3998 		val += (n < node);
3999 
4000 		/* Give preference to headless and unused nodes */
4001 		tmp = cpumask_of_node(n);
4002 		if (!cpumask_empty(tmp))
4003 			val += PENALTY_FOR_NODE_WITH_CPUS;
4004 
4005 		/* Slight preference for less loaded node */
4006 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4007 		val += node_load[n];
4008 
4009 		if (val < min_val) {
4010 			min_val = val;
4011 			best_node = n;
4012 		}
4013 	}
4014 
4015 	if (best_node >= 0)
4016 		node_set(best_node, *used_node_mask);
4017 
4018 	return best_node;
4019 }
4020 
4021 
4022 /*
4023  * Build zonelists ordered by node and zones within node.
4024  * This results in maximum locality--normal zone overflows into local
4025  * DMA zone, if any--but risks exhausting DMA zone.
4026  */
4027 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
4028 {
4029 	int j;
4030 	struct zonelist *zonelist;
4031 
4032 	zonelist = &pgdat->node_zonelists[0];
4033 	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
4034 		;
4035 	j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4036 	zonelist->_zonerefs[j].zone = NULL;
4037 	zonelist->_zonerefs[j].zone_idx = 0;
4038 }
4039 
4040 /*
4041  * Build gfp_thisnode zonelists
4042  */
4043 static void build_thisnode_zonelists(pg_data_t *pgdat)
4044 {
4045 	int j;
4046 	struct zonelist *zonelist;
4047 
4048 	zonelist = &pgdat->node_zonelists[1];
4049 	j = build_zonelists_node(pgdat, zonelist, 0);
4050 	zonelist->_zonerefs[j].zone = NULL;
4051 	zonelist->_zonerefs[j].zone_idx = 0;
4052 }
4053 
4054 /*
4055  * Build zonelists ordered by zone and nodes within zones.
4056  * This results in conserving DMA zone[s] until all Normal memory is
4057  * exhausted, but results in overflowing to remote node while memory
4058  * may still exist in local DMA zone.
4059  */
4060 static int node_order[MAX_NUMNODES];
4061 
4062 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4063 {
4064 	int pos, j, node;
4065 	int zone_type;		/* needs to be signed */
4066 	struct zone *z;
4067 	struct zonelist *zonelist;
4068 
4069 	zonelist = &pgdat->node_zonelists[0];
4070 	pos = 0;
4071 	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4072 		for (j = 0; j < nr_nodes; j++) {
4073 			node = node_order[j];
4074 			z = &NODE_DATA(node)->node_zones[zone_type];
4075 			if (populated_zone(z)) {
4076 				zoneref_set_zone(z,
4077 					&zonelist->_zonerefs[pos++]);
4078 				check_highest_zone(zone_type);
4079 			}
4080 		}
4081 	}
4082 	zonelist->_zonerefs[pos].zone = NULL;
4083 	zonelist->_zonerefs[pos].zone_idx = 0;
4084 }
4085 
4086 #if defined(CONFIG_64BIT)
4087 /*
4088  * Devices that require DMA32/DMA are relatively rare and do not justify a
4089  * penalty to every machine in case the specialised case applies. Default
4090  * to Node-ordering on 64-bit NUMA machines
4091  */
4092 static int default_zonelist_order(void)
4093 {
4094 	return ZONELIST_ORDER_NODE;
4095 }
4096 #else
4097 /*
4098  * On 32-bit, the Normal zone needs to be preserved for allocations accessible
4099  * by the kernel. If processes running on node 0 deplete the low memory zone
4100  * then reclaim will occur more frequency increasing stalls and potentially
4101  * be easier to OOM if a large percentage of the zone is under writeback or
4102  * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
4103  * Hence, default to zone ordering on 32-bit.
4104  */
4105 static int default_zonelist_order(void)
4106 {
4107 	return ZONELIST_ORDER_ZONE;
4108 }
4109 #endif /* CONFIG_64BIT */
4110 
4111 static void set_zonelist_order(void)
4112 {
4113 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
4114 		current_zonelist_order = default_zonelist_order();
4115 	else
4116 		current_zonelist_order = user_zonelist_order;
4117 }
4118 
4119 static void build_zonelists(pg_data_t *pgdat)
4120 {
4121 	int j, node, load;
4122 	enum zone_type i;
4123 	nodemask_t used_mask;
4124 	int local_node, prev_node;
4125 	struct zonelist *zonelist;
4126 	int order = current_zonelist_order;
4127 
4128 	/* initialize zonelists */
4129 	for (i = 0; i < MAX_ZONELISTS; i++) {
4130 		zonelist = pgdat->node_zonelists + i;
4131 		zonelist->_zonerefs[0].zone = NULL;
4132 		zonelist->_zonerefs[0].zone_idx = 0;
4133 	}
4134 
4135 	/* NUMA-aware ordering of nodes */
4136 	local_node = pgdat->node_id;
4137 	load = nr_online_nodes;
4138 	prev_node = local_node;
4139 	nodes_clear(used_mask);
4140 
4141 	memset(node_order, 0, sizeof(node_order));
4142 	j = 0;
4143 
4144 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4145 		/*
4146 		 * We don't want to pressure a particular node.
4147 		 * So adding penalty to the first node in same
4148 		 * distance group to make it round-robin.
4149 		 */
4150 		if (node_distance(local_node, node) !=
4151 		    node_distance(local_node, prev_node))
4152 			node_load[node] = load;
4153 
4154 		prev_node = node;
4155 		load--;
4156 		if (order == ZONELIST_ORDER_NODE)
4157 			build_zonelists_in_node_order(pgdat, node);
4158 		else
4159 			node_order[j++] = node;	/* remember order */
4160 	}
4161 
4162 	if (order == ZONELIST_ORDER_ZONE) {
4163 		/* calculate node order -- i.e., DMA last! */
4164 		build_zonelists_in_zone_order(pgdat, j);
4165 	}
4166 
4167 	build_thisnode_zonelists(pgdat);
4168 }
4169 
4170 /* Construct the zonelist performance cache - see further mmzone.h */
4171 static void build_zonelist_cache(pg_data_t *pgdat)
4172 {
4173 	struct zonelist *zonelist;
4174 	struct zonelist_cache *zlc;
4175 	struct zoneref *z;
4176 
4177 	zonelist = &pgdat->node_zonelists[0];
4178 	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
4179 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
4180 	for (z = zonelist->_zonerefs; z->zone; z++)
4181 		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
4182 }
4183 
4184 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
4185 /*
4186  * Return node id of node used for "local" allocations.
4187  * I.e., first node id of first zone in arg node's generic zonelist.
4188  * Used for initializing percpu 'numa_mem', which is used primarily
4189  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
4190  */
4191 int local_memory_node(int node)
4192 {
4193 	struct zone *zone;
4194 
4195 	(void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
4196 				   gfp_zone(GFP_KERNEL),
4197 				   NULL,
4198 				   &zone);
4199 	return zone->node;
4200 }
4201 #endif
4202 
4203 #else	/* CONFIG_NUMA */
4204 
4205 static void set_zonelist_order(void)
4206 {
4207 	current_zonelist_order = ZONELIST_ORDER_ZONE;
4208 }
4209 
4210 static void build_zonelists(pg_data_t *pgdat)
4211 {
4212 	int node, local_node;
4213 	enum zone_type j;
4214 	struct zonelist *zonelist;
4215 
4216 	local_node = pgdat->node_id;
4217 
4218 	zonelist = &pgdat->node_zonelists[0];
4219 	j = build_zonelists_node(pgdat, zonelist, 0);
4220 
4221 	/*
4222 	 * Now we build the zonelist so that it contains the zones
4223 	 * of all the other nodes.
4224 	 * We don't want to pressure a particular node, so when
4225 	 * building the zones for node N, we make sure that the
4226 	 * zones coming right after the local ones are those from
4227 	 * node N+1 (modulo N)
4228 	 */
4229 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
4230 		if (!node_online(node))
4231 			continue;
4232 		j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4233 	}
4234 	for (node = 0; node < local_node; node++) {
4235 		if (!node_online(node))
4236 			continue;
4237 		j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4238 	}
4239 
4240 	zonelist->_zonerefs[j].zone = NULL;
4241 	zonelist->_zonerefs[j].zone_idx = 0;
4242 }
4243 
4244 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
4245 static void build_zonelist_cache(pg_data_t *pgdat)
4246 {
4247 	pgdat->node_zonelists[0].zlcache_ptr = NULL;
4248 }
4249 
4250 #endif	/* CONFIG_NUMA */
4251 
4252 /*
4253  * Boot pageset table. One per cpu which is going to be used for all
4254  * zones and all nodes. The parameters will be set in such a way
4255  * that an item put on a list will immediately be handed over to
4256  * the buddy list. This is safe since pageset manipulation is done
4257  * with interrupts disabled.
4258  *
4259  * The boot_pagesets must be kept even after bootup is complete for
4260  * unused processors and/or zones. They do play a role for bootstrapping
4261  * hotplugged processors.
4262  *
4263  * zoneinfo_show() and maybe other functions do
4264  * not check if the processor is online before following the pageset pointer.
4265  * Other parts of the kernel may not check if the zone is available.
4266  */
4267 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
4268 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
4269 static void setup_zone_pageset(struct zone *zone);
4270 
4271 /*
4272  * Global mutex to protect against size modification of zonelists
4273  * as well as to serialize pageset setup for the new populated zone.
4274  */
4275 DEFINE_MUTEX(zonelists_mutex);
4276 
4277 /* return values int ....just for stop_machine() */
4278 static int __build_all_zonelists(void *data)
4279 {
4280 	int nid;
4281 	int cpu;
4282 	pg_data_t *self = data;
4283 
4284 #ifdef CONFIG_NUMA
4285 	memset(node_load, 0, sizeof(node_load));
4286 #endif
4287 
4288 	if (self && !node_online(self->node_id)) {
4289 		build_zonelists(self);
4290 		build_zonelist_cache(self);
4291 	}
4292 
4293 	for_each_online_node(nid) {
4294 		pg_data_t *pgdat = NODE_DATA(nid);
4295 
4296 		build_zonelists(pgdat);
4297 		build_zonelist_cache(pgdat);
4298 	}
4299 
4300 	/*
4301 	 * Initialize the boot_pagesets that are going to be used
4302 	 * for bootstrapping processors. The real pagesets for
4303 	 * each zone will be allocated later when the per cpu
4304 	 * allocator is available.
4305 	 *
4306 	 * boot_pagesets are used also for bootstrapping offline
4307 	 * cpus if the system is already booted because the pagesets
4308 	 * are needed to initialize allocators on a specific cpu too.
4309 	 * F.e. the percpu allocator needs the page allocator which
4310 	 * needs the percpu allocator in order to allocate its pagesets
4311 	 * (a chicken-egg dilemma).
4312 	 */
4313 	for_each_possible_cpu(cpu) {
4314 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
4315 
4316 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
4317 		/*
4318 		 * We now know the "local memory node" for each node--
4319 		 * i.e., the node of the first zone in the generic zonelist.
4320 		 * Set up numa_mem percpu variable for on-line cpus.  During
4321 		 * boot, only the boot cpu should be on-line;  we'll init the
4322 		 * secondary cpus' numa_mem as they come on-line.  During
4323 		 * node/memory hotplug, we'll fixup all on-line cpus.
4324 		 */
4325 		if (cpu_online(cpu))
4326 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
4327 #endif
4328 	}
4329 
4330 	return 0;
4331 }
4332 
4333 static noinline void __init
4334 build_all_zonelists_init(void)
4335 {
4336 	__build_all_zonelists(NULL);
4337 	mminit_verify_zonelist();
4338 	cpuset_init_current_mems_allowed();
4339 }
4340 
4341 /*
4342  * Called with zonelists_mutex held always
4343  * unless system_state == SYSTEM_BOOTING.
4344  *
4345  * __ref due to (1) call of __meminit annotated setup_zone_pageset
4346  * [we're only called with non-NULL zone through __meminit paths] and
4347  * (2) call of __init annotated helper build_all_zonelists_init
4348  * [protected by SYSTEM_BOOTING].
4349  */
4350 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
4351 {
4352 	set_zonelist_order();
4353 
4354 	if (system_state == SYSTEM_BOOTING) {
4355 		build_all_zonelists_init();
4356 	} else {
4357 #ifdef CONFIG_MEMORY_HOTPLUG
4358 		if (zone)
4359 			setup_zone_pageset(zone);
4360 #endif
4361 		/* we have to stop all cpus to guarantee there is no user
4362 		   of zonelist */
4363 		stop_machine(__build_all_zonelists, pgdat, NULL);
4364 		/* cpuset refresh routine should be here */
4365 	}
4366 	vm_total_pages = nr_free_pagecache_pages();
4367 	/*
4368 	 * Disable grouping by mobility if the number of pages in the
4369 	 * system is too low to allow the mechanism to work. It would be
4370 	 * more accurate, but expensive to check per-zone. This check is
4371 	 * made on memory-hotadd so a system can start with mobility
4372 	 * disabled and enable it later
4373 	 */
4374 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
4375 		page_group_by_mobility_disabled = 1;
4376 	else
4377 		page_group_by_mobility_disabled = 0;
4378 
4379 	pr_info("Built %i zonelists in %s order, mobility grouping %s.  "
4380 		"Total pages: %ld\n",
4381 			nr_online_nodes,
4382 			zonelist_order_name[current_zonelist_order],
4383 			page_group_by_mobility_disabled ? "off" : "on",
4384 			vm_total_pages);
4385 #ifdef CONFIG_NUMA
4386 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
4387 #endif
4388 }
4389 
4390 /*
4391  * Helper functions to size the waitqueue hash table.
4392  * Essentially these want to choose hash table sizes sufficiently
4393  * large so that collisions trying to wait on pages are rare.
4394  * But in fact, the number of active page waitqueues on typical
4395  * systems is ridiculously low, less than 200. So this is even
4396  * conservative, even though it seems large.
4397  *
4398  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
4399  * waitqueues, i.e. the size of the waitq table given the number of pages.
4400  */
4401 #define PAGES_PER_WAITQUEUE	256
4402 
4403 #ifndef CONFIG_MEMORY_HOTPLUG
4404 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4405 {
4406 	unsigned long size = 1;
4407 
4408 	pages /= PAGES_PER_WAITQUEUE;
4409 
4410 	while (size < pages)
4411 		size <<= 1;
4412 
4413 	/*
4414 	 * Once we have dozens or even hundreds of threads sleeping
4415 	 * on IO we've got bigger problems than wait queue collision.
4416 	 * Limit the size of the wait table to a reasonable size.
4417 	 */
4418 	size = min(size, 4096UL);
4419 
4420 	return max(size, 4UL);
4421 }
4422 #else
4423 /*
4424  * A zone's size might be changed by hot-add, so it is not possible to determine
4425  * a suitable size for its wait_table.  So we use the maximum size now.
4426  *
4427  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
4428  *
4429  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
4430  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
4431  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
4432  *
4433  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
4434  * or more by the traditional way. (See above).  It equals:
4435  *
4436  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
4437  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
4438  *    powerpc (64K page size)             : =  (32G +16M)byte.
4439  */
4440 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4441 {
4442 	return 4096UL;
4443 }
4444 #endif
4445 
4446 /*
4447  * This is an integer logarithm so that shifts can be used later
4448  * to extract the more random high bits from the multiplicative
4449  * hash function before the remainder is taken.
4450  */
4451 static inline unsigned long wait_table_bits(unsigned long size)
4452 {
4453 	return ffz(~size);
4454 }
4455 
4456 /*
4457  * Check if a pageblock contains reserved pages
4458  */
4459 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
4460 {
4461 	unsigned long pfn;
4462 
4463 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4464 		if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
4465 			return 1;
4466 	}
4467 	return 0;
4468 }
4469 
4470 /*
4471  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
4472  * of blocks reserved is based on min_wmark_pages(zone). The memory within
4473  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
4474  * higher will lead to a bigger reserve which will get freed as contiguous
4475  * blocks as reclaim kicks in
4476  */
4477 static void setup_zone_migrate_reserve(struct zone *zone)
4478 {
4479 	unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
4480 	struct page *page;
4481 	unsigned long block_migratetype;
4482 	int reserve;
4483 	int old_reserve;
4484 
4485 	/*
4486 	 * Get the start pfn, end pfn and the number of blocks to reserve
4487 	 * We have to be careful to be aligned to pageblock_nr_pages to
4488 	 * make sure that we always check pfn_valid for the first page in
4489 	 * the block.
4490 	 */
4491 	start_pfn = zone->zone_start_pfn;
4492 	end_pfn = zone_end_pfn(zone);
4493 	start_pfn = roundup(start_pfn, pageblock_nr_pages);
4494 	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
4495 							pageblock_order;
4496 
4497 	/*
4498 	 * Reserve blocks are generally in place to help high-order atomic
4499 	 * allocations that are short-lived. A min_free_kbytes value that
4500 	 * would result in more than 2 reserve blocks for atomic allocations
4501 	 * is assumed to be in place to help anti-fragmentation for the
4502 	 * future allocation of hugepages at runtime.
4503 	 */
4504 	reserve = min(2, reserve);
4505 	old_reserve = zone->nr_migrate_reserve_block;
4506 
4507 	/* When memory hot-add, we almost always need to do nothing */
4508 	if (reserve == old_reserve)
4509 		return;
4510 	zone->nr_migrate_reserve_block = reserve;
4511 
4512 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
4513 		if (!early_page_nid_uninitialised(pfn, zone_to_nid(zone)))
4514 			return;
4515 
4516 		if (!pfn_valid(pfn))
4517 			continue;
4518 		page = pfn_to_page(pfn);
4519 
4520 		/* Watch out for overlapping nodes */
4521 		if (page_to_nid(page) != zone_to_nid(zone))
4522 			continue;
4523 
4524 		block_migratetype = get_pageblock_migratetype(page);
4525 
4526 		/* Only test what is necessary when the reserves are not met */
4527 		if (reserve > 0) {
4528 			/*
4529 			 * Blocks with reserved pages will never free, skip
4530 			 * them.
4531 			 */
4532 			block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
4533 			if (pageblock_is_reserved(pfn, block_end_pfn))
4534 				continue;
4535 
4536 			/* If this block is reserved, account for it */
4537 			if (block_migratetype == MIGRATE_RESERVE) {
4538 				reserve--;
4539 				continue;
4540 			}
4541 
4542 			/* Suitable for reserving if this block is movable */
4543 			if (block_migratetype == MIGRATE_MOVABLE) {
4544 				set_pageblock_migratetype(page,
4545 							MIGRATE_RESERVE);
4546 				move_freepages_block(zone, page,
4547 							MIGRATE_RESERVE);
4548 				reserve--;
4549 				continue;
4550 			}
4551 		} else if (!old_reserve) {
4552 			/*
4553 			 * At boot time we don't need to scan the whole zone
4554 			 * for turning off MIGRATE_RESERVE.
4555 			 */
4556 			break;
4557 		}
4558 
4559 		/*
4560 		 * If the reserve is met and this is a previous reserved block,
4561 		 * take it back
4562 		 */
4563 		if (block_migratetype == MIGRATE_RESERVE) {
4564 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4565 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
4566 		}
4567 	}
4568 }
4569 
4570 /*
4571  * Initially all pages are reserved - free ones are freed
4572  * up by free_all_bootmem() once the early boot process is
4573  * done. Non-atomic initialization, single-pass.
4574  */
4575 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4576 		unsigned long start_pfn, enum memmap_context context)
4577 {
4578 	pg_data_t *pgdat = NODE_DATA(nid);
4579 	unsigned long end_pfn = start_pfn + size;
4580 	unsigned long pfn;
4581 	struct zone *z;
4582 	unsigned long nr_initialised = 0;
4583 
4584 	if (highest_memmap_pfn < end_pfn - 1)
4585 		highest_memmap_pfn = end_pfn - 1;
4586 
4587 	z = &pgdat->node_zones[zone];
4588 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4589 		/*
4590 		 * There can be holes in boot-time mem_map[]s
4591 		 * handed to this function.  They do not
4592 		 * exist on hotplugged memory.
4593 		 */
4594 		if (context == MEMMAP_EARLY) {
4595 			if (!early_pfn_valid(pfn))
4596 				continue;
4597 			if (!early_pfn_in_nid(pfn, nid))
4598 				continue;
4599 			if (!update_defer_init(pgdat, pfn, end_pfn,
4600 						&nr_initialised))
4601 				break;
4602 		}
4603 
4604 		/*
4605 		 * Mark the block movable so that blocks are reserved for
4606 		 * movable at startup. This will force kernel allocations
4607 		 * to reserve their blocks rather than leaking throughout
4608 		 * the address space during boot when many long-lived
4609 		 * kernel allocations are made. Later some blocks near
4610 		 * the start are marked MIGRATE_RESERVE by
4611 		 * setup_zone_migrate_reserve()
4612 		 *
4613 		 * bitmap is created for zone's valid pfn range. but memmap
4614 		 * can be created for invalid pages (for alignment)
4615 		 * check here not to call set_pageblock_migratetype() against
4616 		 * pfn out of zone.
4617 		 */
4618 		if (!(pfn & (pageblock_nr_pages - 1))) {
4619 			struct page *page = pfn_to_page(pfn);
4620 
4621 			__init_single_page(page, pfn, zone, nid);
4622 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4623 		} else {
4624 			__init_single_pfn(pfn, zone, nid);
4625 		}
4626 	}
4627 }
4628 
4629 static void __meminit zone_init_free_lists(struct zone *zone)
4630 {
4631 	unsigned int order, t;
4632 	for_each_migratetype_order(order, t) {
4633 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
4634 		zone->free_area[order].nr_free = 0;
4635 	}
4636 }
4637 
4638 #ifndef __HAVE_ARCH_MEMMAP_INIT
4639 #define memmap_init(size, nid, zone, start_pfn) \
4640 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
4641 #endif
4642 
4643 static int zone_batchsize(struct zone *zone)
4644 {
4645 #ifdef CONFIG_MMU
4646 	int batch;
4647 
4648 	/*
4649 	 * The per-cpu-pages pools are set to around 1000th of the
4650 	 * size of the zone.  But no more than 1/2 of a meg.
4651 	 *
4652 	 * OK, so we don't know how big the cache is.  So guess.
4653 	 */
4654 	batch = zone->managed_pages / 1024;
4655 	if (batch * PAGE_SIZE > 512 * 1024)
4656 		batch = (512 * 1024) / PAGE_SIZE;
4657 	batch /= 4;		/* We effectively *= 4 below */
4658 	if (batch < 1)
4659 		batch = 1;
4660 
4661 	/*
4662 	 * Clamp the batch to a 2^n - 1 value. Having a power
4663 	 * of 2 value was found to be more likely to have
4664 	 * suboptimal cache aliasing properties in some cases.
4665 	 *
4666 	 * For example if 2 tasks are alternately allocating
4667 	 * batches of pages, one task can end up with a lot
4668 	 * of pages of one half of the possible page colors
4669 	 * and the other with pages of the other colors.
4670 	 */
4671 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
4672 
4673 	return batch;
4674 
4675 #else
4676 	/* The deferral and batching of frees should be suppressed under NOMMU
4677 	 * conditions.
4678 	 *
4679 	 * The problem is that NOMMU needs to be able to allocate large chunks
4680 	 * of contiguous memory as there's no hardware page translation to
4681 	 * assemble apparent contiguous memory from discontiguous pages.
4682 	 *
4683 	 * Queueing large contiguous runs of pages for batching, however,
4684 	 * causes the pages to actually be freed in smaller chunks.  As there
4685 	 * can be a significant delay between the individual batches being
4686 	 * recycled, this leads to the once large chunks of space being
4687 	 * fragmented and becoming unavailable for high-order allocations.
4688 	 */
4689 	return 0;
4690 #endif
4691 }
4692 
4693 /*
4694  * pcp->high and pcp->batch values are related and dependent on one another:
4695  * ->batch must never be higher then ->high.
4696  * The following function updates them in a safe manner without read side
4697  * locking.
4698  *
4699  * Any new users of pcp->batch and pcp->high should ensure they can cope with
4700  * those fields changing asynchronously (acording the the above rule).
4701  *
4702  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
4703  * outside of boot time (or some other assurance that no concurrent updaters
4704  * exist).
4705  */
4706 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
4707 		unsigned long batch)
4708 {
4709        /* start with a fail safe value for batch */
4710 	pcp->batch = 1;
4711 	smp_wmb();
4712 
4713        /* Update high, then batch, in order */
4714 	pcp->high = high;
4715 	smp_wmb();
4716 
4717 	pcp->batch = batch;
4718 }
4719 
4720 /* a companion to pageset_set_high() */
4721 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
4722 {
4723 	pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
4724 }
4725 
4726 static void pageset_init(struct per_cpu_pageset *p)
4727 {
4728 	struct per_cpu_pages *pcp;
4729 	int migratetype;
4730 
4731 	memset(p, 0, sizeof(*p));
4732 
4733 	pcp = &p->pcp;
4734 	pcp->count = 0;
4735 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4736 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
4737 }
4738 
4739 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
4740 {
4741 	pageset_init(p);
4742 	pageset_set_batch(p, batch);
4743 }
4744 
4745 /*
4746  * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
4747  * to the value high for the pageset p.
4748  */
4749 static void pageset_set_high(struct per_cpu_pageset *p,
4750 				unsigned long high)
4751 {
4752 	unsigned long batch = max(1UL, high / 4);
4753 	if ((high / 4) > (PAGE_SHIFT * 8))
4754 		batch = PAGE_SHIFT * 8;
4755 
4756 	pageset_update(&p->pcp, high, batch);
4757 }
4758 
4759 static void pageset_set_high_and_batch(struct zone *zone,
4760 				       struct per_cpu_pageset *pcp)
4761 {
4762 	if (percpu_pagelist_fraction)
4763 		pageset_set_high(pcp,
4764 			(zone->managed_pages /
4765 				percpu_pagelist_fraction));
4766 	else
4767 		pageset_set_batch(pcp, zone_batchsize(zone));
4768 }
4769 
4770 static void __meminit zone_pageset_init(struct zone *zone, int cpu)
4771 {
4772 	struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4773 
4774 	pageset_init(pcp);
4775 	pageset_set_high_and_batch(zone, pcp);
4776 }
4777 
4778 static void __meminit setup_zone_pageset(struct zone *zone)
4779 {
4780 	int cpu;
4781 	zone->pageset = alloc_percpu(struct per_cpu_pageset);
4782 	for_each_possible_cpu(cpu)
4783 		zone_pageset_init(zone, cpu);
4784 }
4785 
4786 /*
4787  * Allocate per cpu pagesets and initialize them.
4788  * Before this call only boot pagesets were available.
4789  */
4790 void __init setup_per_cpu_pageset(void)
4791 {
4792 	struct zone *zone;
4793 
4794 	for_each_populated_zone(zone)
4795 		setup_zone_pageset(zone);
4796 }
4797 
4798 static noinline __init_refok
4799 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
4800 {
4801 	int i;
4802 	size_t alloc_size;
4803 
4804 	/*
4805 	 * The per-page waitqueue mechanism uses hashed waitqueues
4806 	 * per zone.
4807 	 */
4808 	zone->wait_table_hash_nr_entries =
4809 		 wait_table_hash_nr_entries(zone_size_pages);
4810 	zone->wait_table_bits =
4811 		wait_table_bits(zone->wait_table_hash_nr_entries);
4812 	alloc_size = zone->wait_table_hash_nr_entries
4813 					* sizeof(wait_queue_head_t);
4814 
4815 	if (!slab_is_available()) {
4816 		zone->wait_table = (wait_queue_head_t *)
4817 			memblock_virt_alloc_node_nopanic(
4818 				alloc_size, zone->zone_pgdat->node_id);
4819 	} else {
4820 		/*
4821 		 * This case means that a zone whose size was 0 gets new memory
4822 		 * via memory hot-add.
4823 		 * But it may be the case that a new node was hot-added.  In
4824 		 * this case vmalloc() will not be able to use this new node's
4825 		 * memory - this wait_table must be initialized to use this new
4826 		 * node itself as well.
4827 		 * To use this new node's memory, further consideration will be
4828 		 * necessary.
4829 		 */
4830 		zone->wait_table = vmalloc(alloc_size);
4831 	}
4832 	if (!zone->wait_table)
4833 		return -ENOMEM;
4834 
4835 	for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
4836 		init_waitqueue_head(zone->wait_table + i);
4837 
4838 	return 0;
4839 }
4840 
4841 static __meminit void zone_pcp_init(struct zone *zone)
4842 {
4843 	/*
4844 	 * per cpu subsystem is not up at this point. The following code
4845 	 * relies on the ability of the linker to provide the
4846 	 * offset of a (static) per cpu variable into the per cpu area.
4847 	 */
4848 	zone->pageset = &boot_pageset;
4849 
4850 	if (populated_zone(zone))
4851 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
4852 			zone->name, zone->present_pages,
4853 					 zone_batchsize(zone));
4854 }
4855 
4856 int __meminit init_currently_empty_zone(struct zone *zone,
4857 					unsigned long zone_start_pfn,
4858 					unsigned long size,
4859 					enum memmap_context context)
4860 {
4861 	struct pglist_data *pgdat = zone->zone_pgdat;
4862 	int ret;
4863 	ret = zone_wait_table_init(zone, size);
4864 	if (ret)
4865 		return ret;
4866 	pgdat->nr_zones = zone_idx(zone) + 1;
4867 
4868 	zone->zone_start_pfn = zone_start_pfn;
4869 
4870 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
4871 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
4872 			pgdat->node_id,
4873 			(unsigned long)zone_idx(zone),
4874 			zone_start_pfn, (zone_start_pfn + size));
4875 
4876 	zone_init_free_lists(zone);
4877 
4878 	return 0;
4879 }
4880 
4881 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4882 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4883 
4884 /*
4885  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4886  */
4887 int __meminit __early_pfn_to_nid(unsigned long pfn,
4888 					struct mminit_pfnnid_cache *state)
4889 {
4890 	unsigned long start_pfn, end_pfn;
4891 	int nid;
4892 
4893 	if (state->last_start <= pfn && pfn < state->last_end)
4894 		return state->last_nid;
4895 
4896 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
4897 	if (nid != -1) {
4898 		state->last_start = start_pfn;
4899 		state->last_end = end_pfn;
4900 		state->last_nid = nid;
4901 	}
4902 
4903 	return nid;
4904 }
4905 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4906 
4907 /**
4908  * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
4909  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4910  * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
4911  *
4912  * If an architecture guarantees that all ranges registered contain no holes
4913  * and may be freed, this this function may be used instead of calling
4914  * memblock_free_early_nid() manually.
4915  */
4916 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4917 {
4918 	unsigned long start_pfn, end_pfn;
4919 	int i, this_nid;
4920 
4921 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4922 		start_pfn = min(start_pfn, max_low_pfn);
4923 		end_pfn = min(end_pfn, max_low_pfn);
4924 
4925 		if (start_pfn < end_pfn)
4926 			memblock_free_early_nid(PFN_PHYS(start_pfn),
4927 					(end_pfn - start_pfn) << PAGE_SHIFT,
4928 					this_nid);
4929 	}
4930 }
4931 
4932 /**
4933  * sparse_memory_present_with_active_regions - Call memory_present for each active range
4934  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
4935  *
4936  * If an architecture guarantees that all ranges registered contain no holes and may
4937  * be freed, this function may be used instead of calling memory_present() manually.
4938  */
4939 void __init sparse_memory_present_with_active_regions(int nid)
4940 {
4941 	unsigned long start_pfn, end_pfn;
4942 	int i, this_nid;
4943 
4944 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4945 		memory_present(this_nid, start_pfn, end_pfn);
4946 }
4947 
4948 /**
4949  * get_pfn_range_for_nid - Return the start and end page frames for a node
4950  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4951  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4952  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
4953  *
4954  * It returns the start and end page frame of a node based on information
4955  * provided by memblock_set_node(). If called for a node
4956  * with no available memory, a warning is printed and the start and end
4957  * PFNs will be 0.
4958  */
4959 void __meminit get_pfn_range_for_nid(unsigned int nid,
4960 			unsigned long *start_pfn, unsigned long *end_pfn)
4961 {
4962 	unsigned long this_start_pfn, this_end_pfn;
4963 	int i;
4964 
4965 	*start_pfn = -1UL;
4966 	*end_pfn = 0;
4967 
4968 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4969 		*start_pfn = min(*start_pfn, this_start_pfn);
4970 		*end_pfn = max(*end_pfn, this_end_pfn);
4971 	}
4972 
4973 	if (*start_pfn == -1UL)
4974 		*start_pfn = 0;
4975 }
4976 
4977 /*
4978  * This finds a zone that can be used for ZONE_MOVABLE pages. The
4979  * assumption is made that zones within a node are ordered in monotonic
4980  * increasing memory addresses so that the "highest" populated zone is used
4981  */
4982 static void __init find_usable_zone_for_movable(void)
4983 {
4984 	int zone_index;
4985 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4986 		if (zone_index == ZONE_MOVABLE)
4987 			continue;
4988 
4989 		if (arch_zone_highest_possible_pfn[zone_index] >
4990 				arch_zone_lowest_possible_pfn[zone_index])
4991 			break;
4992 	}
4993 
4994 	VM_BUG_ON(zone_index == -1);
4995 	movable_zone = zone_index;
4996 }
4997 
4998 /*
4999  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
5000  * because it is sized independent of architecture. Unlike the other zones,
5001  * the starting point for ZONE_MOVABLE is not fixed. It may be different
5002  * in each node depending on the size of each node and how evenly kernelcore
5003  * is distributed. This helper function adjusts the zone ranges
5004  * provided by the architecture for a given node by using the end of the
5005  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5006  * zones within a node are in order of monotonic increases memory addresses
5007  */
5008 static void __meminit adjust_zone_range_for_zone_movable(int nid,
5009 					unsigned long zone_type,
5010 					unsigned long node_start_pfn,
5011 					unsigned long node_end_pfn,
5012 					unsigned long *zone_start_pfn,
5013 					unsigned long *zone_end_pfn)
5014 {
5015 	/* Only adjust if ZONE_MOVABLE is on this node */
5016 	if (zone_movable_pfn[nid]) {
5017 		/* Size ZONE_MOVABLE */
5018 		if (zone_type == ZONE_MOVABLE) {
5019 			*zone_start_pfn = zone_movable_pfn[nid];
5020 			*zone_end_pfn = min(node_end_pfn,
5021 				arch_zone_highest_possible_pfn[movable_zone]);
5022 
5023 		/* Adjust for ZONE_MOVABLE starting within this range */
5024 		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
5025 				*zone_end_pfn > zone_movable_pfn[nid]) {
5026 			*zone_end_pfn = zone_movable_pfn[nid];
5027 
5028 		/* Check if this whole range is within ZONE_MOVABLE */
5029 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
5030 			*zone_start_pfn = *zone_end_pfn;
5031 	}
5032 }
5033 
5034 /*
5035  * Return the number of pages a zone spans in a node, including holes
5036  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5037  */
5038 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5039 					unsigned long zone_type,
5040 					unsigned long node_start_pfn,
5041 					unsigned long node_end_pfn,
5042 					unsigned long *ignored)
5043 {
5044 	unsigned long zone_start_pfn, zone_end_pfn;
5045 
5046 	/* Get the start and end of the zone */
5047 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5048 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
5049 	adjust_zone_range_for_zone_movable(nid, zone_type,
5050 				node_start_pfn, node_end_pfn,
5051 				&zone_start_pfn, &zone_end_pfn);
5052 
5053 	/* Check that this node has pages within the zone's required range */
5054 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
5055 		return 0;
5056 
5057 	/* Move the zone boundaries inside the node if necessary */
5058 	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
5059 	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
5060 
5061 	/* Return the spanned pages */
5062 	return zone_end_pfn - zone_start_pfn;
5063 }
5064 
5065 /*
5066  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
5067  * then all holes in the requested range will be accounted for.
5068  */
5069 unsigned long __meminit __absent_pages_in_range(int nid,
5070 				unsigned long range_start_pfn,
5071 				unsigned long range_end_pfn)
5072 {
5073 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
5074 	unsigned long start_pfn, end_pfn;
5075 	int i;
5076 
5077 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5078 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5079 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5080 		nr_absent -= end_pfn - start_pfn;
5081 	}
5082 	return nr_absent;
5083 }
5084 
5085 /**
5086  * absent_pages_in_range - Return number of page frames in holes within a range
5087  * @start_pfn: The start PFN to start searching for holes
5088  * @end_pfn: The end PFN to stop searching for holes
5089  *
5090  * It returns the number of pages frames in memory holes within a range.
5091  */
5092 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5093 							unsigned long end_pfn)
5094 {
5095 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5096 }
5097 
5098 /* Return the number of page frames in holes in a zone on a node */
5099 static unsigned long __meminit zone_absent_pages_in_node(int nid,
5100 					unsigned long zone_type,
5101 					unsigned long node_start_pfn,
5102 					unsigned long node_end_pfn,
5103 					unsigned long *ignored)
5104 {
5105 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5106 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5107 	unsigned long zone_start_pfn, zone_end_pfn;
5108 
5109 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5110 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5111 
5112 	adjust_zone_range_for_zone_movable(nid, zone_type,
5113 			node_start_pfn, node_end_pfn,
5114 			&zone_start_pfn, &zone_end_pfn);
5115 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5116 }
5117 
5118 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5119 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
5120 					unsigned long zone_type,
5121 					unsigned long node_start_pfn,
5122 					unsigned long node_end_pfn,
5123 					unsigned long *zones_size)
5124 {
5125 	return zones_size[zone_type];
5126 }
5127 
5128 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
5129 						unsigned long zone_type,
5130 						unsigned long node_start_pfn,
5131 						unsigned long node_end_pfn,
5132 						unsigned long *zholes_size)
5133 {
5134 	if (!zholes_size)
5135 		return 0;
5136 
5137 	return zholes_size[zone_type];
5138 }
5139 
5140 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5141 
5142 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
5143 						unsigned long node_start_pfn,
5144 						unsigned long node_end_pfn,
5145 						unsigned long *zones_size,
5146 						unsigned long *zholes_size)
5147 {
5148 	unsigned long realtotalpages = 0, totalpages = 0;
5149 	enum zone_type i;
5150 
5151 	for (i = 0; i < MAX_NR_ZONES; i++) {
5152 		struct zone *zone = pgdat->node_zones + i;
5153 		unsigned long size, real_size;
5154 
5155 		size = zone_spanned_pages_in_node(pgdat->node_id, i,
5156 						  node_start_pfn,
5157 						  node_end_pfn,
5158 						  zones_size);
5159 		real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
5160 						  node_start_pfn, node_end_pfn,
5161 						  zholes_size);
5162 		zone->spanned_pages = size;
5163 		zone->present_pages = real_size;
5164 
5165 		totalpages += size;
5166 		realtotalpages += real_size;
5167 	}
5168 
5169 	pgdat->node_spanned_pages = totalpages;
5170 	pgdat->node_present_pages = realtotalpages;
5171 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5172 							realtotalpages);
5173 }
5174 
5175 #ifndef CONFIG_SPARSEMEM
5176 /*
5177  * Calculate the size of the zone->blockflags rounded to an unsigned long
5178  * Start by making sure zonesize is a multiple of pageblock_order by rounding
5179  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
5180  * round what is now in bits to nearest long in bits, then return it in
5181  * bytes.
5182  */
5183 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
5184 {
5185 	unsigned long usemapsize;
5186 
5187 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
5188 	usemapsize = roundup(zonesize, pageblock_nr_pages);
5189 	usemapsize = usemapsize >> pageblock_order;
5190 	usemapsize *= NR_PAGEBLOCK_BITS;
5191 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5192 
5193 	return usemapsize / 8;
5194 }
5195 
5196 static void __init setup_usemap(struct pglist_data *pgdat,
5197 				struct zone *zone,
5198 				unsigned long zone_start_pfn,
5199 				unsigned long zonesize)
5200 {
5201 	unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
5202 	zone->pageblock_flags = NULL;
5203 	if (usemapsize)
5204 		zone->pageblock_flags =
5205 			memblock_virt_alloc_node_nopanic(usemapsize,
5206 							 pgdat->node_id);
5207 }
5208 #else
5209 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5210 				unsigned long zone_start_pfn, unsigned long zonesize) {}
5211 #endif /* CONFIG_SPARSEMEM */
5212 
5213 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
5214 
5215 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
5216 void __paginginit set_pageblock_order(void)
5217 {
5218 	unsigned int order;
5219 
5220 	/* Check that pageblock_nr_pages has not already been setup */
5221 	if (pageblock_order)
5222 		return;
5223 
5224 	if (HPAGE_SHIFT > PAGE_SHIFT)
5225 		order = HUGETLB_PAGE_ORDER;
5226 	else
5227 		order = MAX_ORDER - 1;
5228 
5229 	/*
5230 	 * Assume the largest contiguous order of interest is a huge page.
5231 	 * This value may be variable depending on boot parameters on IA64 and
5232 	 * powerpc.
5233 	 */
5234 	pageblock_order = order;
5235 }
5236 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5237 
5238 /*
5239  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
5240  * is unused as pageblock_order is set at compile-time. See
5241  * include/linux/pageblock-flags.h for the values of pageblock_order based on
5242  * the kernel config
5243  */
5244 void __paginginit set_pageblock_order(void)
5245 {
5246 }
5247 
5248 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5249 
5250 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5251 						   unsigned long present_pages)
5252 {
5253 	unsigned long pages = spanned_pages;
5254 
5255 	/*
5256 	 * Provide a more accurate estimation if there are holes within
5257 	 * the zone and SPARSEMEM is in use. If there are holes within the
5258 	 * zone, each populated memory region may cost us one or two extra
5259 	 * memmap pages due to alignment because memmap pages for each
5260 	 * populated regions may not naturally algined on page boundary.
5261 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
5262 	 */
5263 	if (spanned_pages > present_pages + (present_pages >> 4) &&
5264 	    IS_ENABLED(CONFIG_SPARSEMEM))
5265 		pages = present_pages;
5266 
5267 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
5268 }
5269 
5270 /*
5271  * Set up the zone data structures:
5272  *   - mark all pages reserved
5273  *   - mark all memory queues empty
5274  *   - clear the memory bitmaps
5275  *
5276  * NOTE: pgdat should get zeroed by caller.
5277  */
5278 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
5279 		unsigned long node_start_pfn, unsigned long node_end_pfn)
5280 {
5281 	enum zone_type j;
5282 	int nid = pgdat->node_id;
5283 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
5284 	int ret;
5285 
5286 	pgdat_resize_init(pgdat);
5287 #ifdef CONFIG_NUMA_BALANCING
5288 	spin_lock_init(&pgdat->numabalancing_migrate_lock);
5289 	pgdat->numabalancing_migrate_nr_pages = 0;
5290 	pgdat->numabalancing_migrate_next_window = jiffies;
5291 #endif
5292 	init_waitqueue_head(&pgdat->kswapd_wait);
5293 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
5294 	pgdat_page_ext_init(pgdat);
5295 
5296 	for (j = 0; j < MAX_NR_ZONES; j++) {
5297 		struct zone *zone = pgdat->node_zones + j;
5298 		unsigned long size, realsize, freesize, memmap_pages;
5299 
5300 		size = zone->spanned_pages;
5301 		realsize = freesize = zone->present_pages;
5302 
5303 		/*
5304 		 * Adjust freesize so that it accounts for how much memory
5305 		 * is used by this zone for memmap. This affects the watermark
5306 		 * and per-cpu initialisations
5307 		 */
5308 		memmap_pages = calc_memmap_size(size, realsize);
5309 		if (!is_highmem_idx(j)) {
5310 			if (freesize >= memmap_pages) {
5311 				freesize -= memmap_pages;
5312 				if (memmap_pages)
5313 					printk(KERN_DEBUG
5314 					       "  %s zone: %lu pages used for memmap\n",
5315 					       zone_names[j], memmap_pages);
5316 			} else
5317 				printk(KERN_WARNING
5318 					"  %s zone: %lu pages exceeds freesize %lu\n",
5319 					zone_names[j], memmap_pages, freesize);
5320 		}
5321 
5322 		/* Account for reserved pages */
5323 		if (j == 0 && freesize > dma_reserve) {
5324 			freesize -= dma_reserve;
5325 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
5326 					zone_names[0], dma_reserve);
5327 		}
5328 
5329 		if (!is_highmem_idx(j))
5330 			nr_kernel_pages += freesize;
5331 		/* Charge for highmem memmap if there are enough kernel pages */
5332 		else if (nr_kernel_pages > memmap_pages * 2)
5333 			nr_kernel_pages -= memmap_pages;
5334 		nr_all_pages += freesize;
5335 
5336 		/*
5337 		 * Set an approximate value for lowmem here, it will be adjusted
5338 		 * when the bootmem allocator frees pages into the buddy system.
5339 		 * And all highmem pages will be managed by the buddy system.
5340 		 */
5341 		zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
5342 #ifdef CONFIG_NUMA
5343 		zone->node = nid;
5344 		zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
5345 						/ 100;
5346 		zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
5347 #endif
5348 		zone->name = zone_names[j];
5349 		spin_lock_init(&zone->lock);
5350 		spin_lock_init(&zone->lru_lock);
5351 		zone_seqlock_init(zone);
5352 		zone->zone_pgdat = pgdat;
5353 		zone_pcp_init(zone);
5354 
5355 		/* For bootup, initialized properly in watermark setup */
5356 		mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
5357 
5358 		lruvec_init(&zone->lruvec);
5359 		if (!size)
5360 			continue;
5361 
5362 		set_pageblock_order();
5363 		setup_usemap(pgdat, zone, zone_start_pfn, size);
5364 		ret = init_currently_empty_zone(zone, zone_start_pfn,
5365 						size, MEMMAP_EARLY);
5366 		BUG_ON(ret);
5367 		memmap_init(size, nid, j, zone_start_pfn);
5368 		zone_start_pfn += size;
5369 	}
5370 }
5371 
5372 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
5373 {
5374 	/* Skip empty nodes */
5375 	if (!pgdat->node_spanned_pages)
5376 		return;
5377 
5378 #ifdef CONFIG_FLAT_NODE_MEM_MAP
5379 	/* ia64 gets its own node_mem_map, before this, without bootmem */
5380 	if (!pgdat->node_mem_map) {
5381 		unsigned long size, start, end;
5382 		struct page *map;
5383 
5384 		/*
5385 		 * The zone's endpoints aren't required to be MAX_ORDER
5386 		 * aligned but the node_mem_map endpoints must be in order
5387 		 * for the buddy allocator to function correctly.
5388 		 */
5389 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
5390 		end = pgdat_end_pfn(pgdat);
5391 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
5392 		size =  (end - start) * sizeof(struct page);
5393 		map = alloc_remap(pgdat->node_id, size);
5394 		if (!map)
5395 			map = memblock_virt_alloc_node_nopanic(size,
5396 							       pgdat->node_id);
5397 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
5398 	}
5399 #ifndef CONFIG_NEED_MULTIPLE_NODES
5400 	/*
5401 	 * With no DISCONTIG, the global mem_map is just set as node 0's
5402 	 */
5403 	if (pgdat == NODE_DATA(0)) {
5404 		mem_map = NODE_DATA(0)->node_mem_map;
5405 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5406 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
5407 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
5408 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5409 	}
5410 #endif
5411 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
5412 }
5413 
5414 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5415 		unsigned long node_start_pfn, unsigned long *zholes_size)
5416 {
5417 	pg_data_t *pgdat = NODE_DATA(nid);
5418 	unsigned long start_pfn = 0;
5419 	unsigned long end_pfn = 0;
5420 
5421 	/* pg_data_t should be reset to zero when it's allocated */
5422 	WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
5423 
5424 	reset_deferred_meminit(pgdat);
5425 	pgdat->node_id = nid;
5426 	pgdat->node_start_pfn = node_start_pfn;
5427 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5428 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
5429 	pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
5430 		(u64)start_pfn << PAGE_SHIFT, ((u64)end_pfn << PAGE_SHIFT) - 1);
5431 #endif
5432 	calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5433 				  zones_size, zholes_size);
5434 
5435 	alloc_node_mem_map(pgdat);
5436 #ifdef CONFIG_FLAT_NODE_MEM_MAP
5437 	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5438 		nid, (unsigned long)pgdat,
5439 		(unsigned long)pgdat->node_mem_map);
5440 #endif
5441 
5442 	free_area_init_core(pgdat, start_pfn, end_pfn);
5443 }
5444 
5445 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5446 
5447 #if MAX_NUMNODES > 1
5448 /*
5449  * Figure out the number of possible node ids.
5450  */
5451 void __init setup_nr_node_ids(void)
5452 {
5453 	unsigned int node;
5454 	unsigned int highest = 0;
5455 
5456 	for_each_node_mask(node, node_possible_map)
5457 		highest = node;
5458 	nr_node_ids = highest + 1;
5459 }
5460 #endif
5461 
5462 /**
5463  * node_map_pfn_alignment - determine the maximum internode alignment
5464  *
5465  * This function should be called after node map is populated and sorted.
5466  * It calculates the maximum power of two alignment which can distinguish
5467  * all the nodes.
5468  *
5469  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
5470  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
5471  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
5472  * shifted, 1GiB is enough and this function will indicate so.
5473  *
5474  * This is used to test whether pfn -> nid mapping of the chosen memory
5475  * model has fine enough granularity to avoid incorrect mapping for the
5476  * populated node map.
5477  *
5478  * Returns the determined alignment in pfn's.  0 if there is no alignment
5479  * requirement (single node).
5480  */
5481 unsigned long __init node_map_pfn_alignment(void)
5482 {
5483 	unsigned long accl_mask = 0, last_end = 0;
5484 	unsigned long start, end, mask;
5485 	int last_nid = -1;
5486 	int i, nid;
5487 
5488 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
5489 		if (!start || last_nid < 0 || last_nid == nid) {
5490 			last_nid = nid;
5491 			last_end = end;
5492 			continue;
5493 		}
5494 
5495 		/*
5496 		 * Start with a mask granular enough to pin-point to the
5497 		 * start pfn and tick off bits one-by-one until it becomes
5498 		 * too coarse to separate the current node from the last.
5499 		 */
5500 		mask = ~((1 << __ffs(start)) - 1);
5501 		while (mask && last_end <= (start & (mask << 1)))
5502 			mask <<= 1;
5503 
5504 		/* accumulate all internode masks */
5505 		accl_mask |= mask;
5506 	}
5507 
5508 	/* convert mask to number of pages */
5509 	return ~accl_mask + 1;
5510 }
5511 
5512 /* Find the lowest pfn for a node */
5513 static unsigned long __init find_min_pfn_for_node(int nid)
5514 {
5515 	unsigned long min_pfn = ULONG_MAX;
5516 	unsigned long start_pfn;
5517 	int i;
5518 
5519 	for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
5520 		min_pfn = min(min_pfn, start_pfn);
5521 
5522 	if (min_pfn == ULONG_MAX) {
5523 		printk(KERN_WARNING
5524 			"Could not find start_pfn for node %d\n", nid);
5525 		return 0;
5526 	}
5527 
5528 	return min_pfn;
5529 }
5530 
5531 /**
5532  * find_min_pfn_with_active_regions - Find the minimum PFN registered
5533  *
5534  * It returns the minimum PFN based on information provided via
5535  * memblock_set_node().
5536  */
5537 unsigned long __init find_min_pfn_with_active_regions(void)
5538 {
5539 	return find_min_pfn_for_node(MAX_NUMNODES);
5540 }
5541 
5542 /*
5543  * early_calculate_totalpages()
5544  * Sum pages in active regions for movable zone.
5545  * Populate N_MEMORY for calculating usable_nodes.
5546  */
5547 static unsigned long __init early_calculate_totalpages(void)
5548 {
5549 	unsigned long totalpages = 0;
5550 	unsigned long start_pfn, end_pfn;
5551 	int i, nid;
5552 
5553 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
5554 		unsigned long pages = end_pfn - start_pfn;
5555 
5556 		totalpages += pages;
5557 		if (pages)
5558 			node_set_state(nid, N_MEMORY);
5559 	}
5560 	return totalpages;
5561 }
5562 
5563 /*
5564  * Find the PFN the Movable zone begins in each node. Kernel memory
5565  * is spread evenly between nodes as long as the nodes have enough
5566  * memory. When they don't, some nodes will have more kernelcore than
5567  * others
5568  */
5569 static void __init find_zone_movable_pfns_for_nodes(void)
5570 {
5571 	int i, nid;
5572 	unsigned long usable_startpfn;
5573 	unsigned long kernelcore_node, kernelcore_remaining;
5574 	/* save the state before borrow the nodemask */
5575 	nodemask_t saved_node_state = node_states[N_MEMORY];
5576 	unsigned long totalpages = early_calculate_totalpages();
5577 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
5578 	struct memblock_region *r;
5579 
5580 	/* Need to find movable_zone earlier when movable_node is specified. */
5581 	find_usable_zone_for_movable();
5582 
5583 	/*
5584 	 * If movable_node is specified, ignore kernelcore and movablecore
5585 	 * options.
5586 	 */
5587 	if (movable_node_is_enabled()) {
5588 		for_each_memblock(memory, r) {
5589 			if (!memblock_is_hotpluggable(r))
5590 				continue;
5591 
5592 			nid = r->nid;
5593 
5594 			usable_startpfn = PFN_DOWN(r->base);
5595 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5596 				min(usable_startpfn, zone_movable_pfn[nid]) :
5597 				usable_startpfn;
5598 		}
5599 
5600 		goto out2;
5601 	}
5602 
5603 	/*
5604 	 * If movablecore=nn[KMG] was specified, calculate what size of
5605 	 * kernelcore that corresponds so that memory usable for
5606 	 * any allocation type is evenly spread. If both kernelcore
5607 	 * and movablecore are specified, then the value of kernelcore
5608 	 * will be used for required_kernelcore if it's greater than
5609 	 * what movablecore would have allowed.
5610 	 */
5611 	if (required_movablecore) {
5612 		unsigned long corepages;
5613 
5614 		/*
5615 		 * Round-up so that ZONE_MOVABLE is at least as large as what
5616 		 * was requested by the user
5617 		 */
5618 		required_movablecore =
5619 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
5620 		corepages = totalpages - required_movablecore;
5621 
5622 		required_kernelcore = max(required_kernelcore, corepages);
5623 	}
5624 
5625 	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
5626 	if (!required_kernelcore)
5627 		goto out;
5628 
5629 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
5630 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
5631 
5632 restart:
5633 	/* Spread kernelcore memory as evenly as possible throughout nodes */
5634 	kernelcore_node = required_kernelcore / usable_nodes;
5635 	for_each_node_state(nid, N_MEMORY) {
5636 		unsigned long start_pfn, end_pfn;
5637 
5638 		/*
5639 		 * Recalculate kernelcore_node if the division per node
5640 		 * now exceeds what is necessary to satisfy the requested
5641 		 * amount of memory for the kernel
5642 		 */
5643 		if (required_kernelcore < kernelcore_node)
5644 			kernelcore_node = required_kernelcore / usable_nodes;
5645 
5646 		/*
5647 		 * As the map is walked, we track how much memory is usable
5648 		 * by the kernel using kernelcore_remaining. When it is
5649 		 * 0, the rest of the node is usable by ZONE_MOVABLE
5650 		 */
5651 		kernelcore_remaining = kernelcore_node;
5652 
5653 		/* Go through each range of PFNs within this node */
5654 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5655 			unsigned long size_pages;
5656 
5657 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
5658 			if (start_pfn >= end_pfn)
5659 				continue;
5660 
5661 			/* Account for what is only usable for kernelcore */
5662 			if (start_pfn < usable_startpfn) {
5663 				unsigned long kernel_pages;
5664 				kernel_pages = min(end_pfn, usable_startpfn)
5665 								- start_pfn;
5666 
5667 				kernelcore_remaining -= min(kernel_pages,
5668 							kernelcore_remaining);
5669 				required_kernelcore -= min(kernel_pages,
5670 							required_kernelcore);
5671 
5672 				/* Continue if range is now fully accounted */
5673 				if (end_pfn <= usable_startpfn) {
5674 
5675 					/*
5676 					 * Push zone_movable_pfn to the end so
5677 					 * that if we have to rebalance
5678 					 * kernelcore across nodes, we will
5679 					 * not double account here
5680 					 */
5681 					zone_movable_pfn[nid] = end_pfn;
5682 					continue;
5683 				}
5684 				start_pfn = usable_startpfn;
5685 			}
5686 
5687 			/*
5688 			 * The usable PFN range for ZONE_MOVABLE is from
5689 			 * start_pfn->end_pfn. Calculate size_pages as the
5690 			 * number of pages used as kernelcore
5691 			 */
5692 			size_pages = end_pfn - start_pfn;
5693 			if (size_pages > kernelcore_remaining)
5694 				size_pages = kernelcore_remaining;
5695 			zone_movable_pfn[nid] = start_pfn + size_pages;
5696 
5697 			/*
5698 			 * Some kernelcore has been met, update counts and
5699 			 * break if the kernelcore for this node has been
5700 			 * satisfied
5701 			 */
5702 			required_kernelcore -= min(required_kernelcore,
5703 								size_pages);
5704 			kernelcore_remaining -= size_pages;
5705 			if (!kernelcore_remaining)
5706 				break;
5707 		}
5708 	}
5709 
5710 	/*
5711 	 * If there is still required_kernelcore, we do another pass with one
5712 	 * less node in the count. This will push zone_movable_pfn[nid] further
5713 	 * along on the nodes that still have memory until kernelcore is
5714 	 * satisfied
5715 	 */
5716 	usable_nodes--;
5717 	if (usable_nodes && required_kernelcore > usable_nodes)
5718 		goto restart;
5719 
5720 out2:
5721 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5722 	for (nid = 0; nid < MAX_NUMNODES; nid++)
5723 		zone_movable_pfn[nid] =
5724 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
5725 
5726 out:
5727 	/* restore the node_state */
5728 	node_states[N_MEMORY] = saved_node_state;
5729 }
5730 
5731 /* Any regular or high memory on that node ? */
5732 static void check_for_memory(pg_data_t *pgdat, int nid)
5733 {
5734 	enum zone_type zone_type;
5735 
5736 	if (N_MEMORY == N_NORMAL_MEMORY)
5737 		return;
5738 
5739 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
5740 		struct zone *zone = &pgdat->node_zones[zone_type];
5741 		if (populated_zone(zone)) {
5742 			node_set_state(nid, N_HIGH_MEMORY);
5743 			if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5744 			    zone_type <= ZONE_NORMAL)
5745 				node_set_state(nid, N_NORMAL_MEMORY);
5746 			break;
5747 		}
5748 	}
5749 }
5750 
5751 /**
5752  * free_area_init_nodes - Initialise all pg_data_t and zone data
5753  * @max_zone_pfn: an array of max PFNs for each zone
5754  *
5755  * This will call free_area_init_node() for each active node in the system.
5756  * Using the page ranges provided by memblock_set_node(), the size of each
5757  * zone in each node and their holes is calculated. If the maximum PFN
5758  * between two adjacent zones match, it is assumed that the zone is empty.
5759  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5760  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5761  * starts where the previous one ended. For example, ZONE_DMA32 starts
5762  * at arch_max_dma_pfn.
5763  */
5764 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5765 {
5766 	unsigned long start_pfn, end_pfn;
5767 	int i, nid;
5768 
5769 	/* Record where the zone boundaries are */
5770 	memset(arch_zone_lowest_possible_pfn, 0,
5771 				sizeof(arch_zone_lowest_possible_pfn));
5772 	memset(arch_zone_highest_possible_pfn, 0,
5773 				sizeof(arch_zone_highest_possible_pfn));
5774 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5775 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5776 	for (i = 1; i < MAX_NR_ZONES; i++) {
5777 		if (i == ZONE_MOVABLE)
5778 			continue;
5779 		arch_zone_lowest_possible_pfn[i] =
5780 			arch_zone_highest_possible_pfn[i-1];
5781 		arch_zone_highest_possible_pfn[i] =
5782 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5783 	}
5784 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5785 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5786 
5787 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
5788 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
5789 	find_zone_movable_pfns_for_nodes();
5790 
5791 	/* Print out the zone ranges */
5792 	pr_info("Zone ranges:\n");
5793 	for (i = 0; i < MAX_NR_ZONES; i++) {
5794 		if (i == ZONE_MOVABLE)
5795 			continue;
5796 		pr_info("  %-8s ", zone_names[i]);
5797 		if (arch_zone_lowest_possible_pfn[i] ==
5798 				arch_zone_highest_possible_pfn[i])
5799 			pr_cont("empty\n");
5800 		else
5801 			pr_cont("[mem %#018Lx-%#018Lx]\n",
5802 				(u64)arch_zone_lowest_possible_pfn[i]
5803 					<< PAGE_SHIFT,
5804 				((u64)arch_zone_highest_possible_pfn[i]
5805 					<< PAGE_SHIFT) - 1);
5806 	}
5807 
5808 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
5809 	pr_info("Movable zone start for each node\n");
5810 	for (i = 0; i < MAX_NUMNODES; i++) {
5811 		if (zone_movable_pfn[i])
5812 			pr_info("  Node %d: %#018Lx\n", i,
5813 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
5814 	}
5815 
5816 	/* Print out the early node map */
5817 	pr_info("Early memory node ranges\n");
5818 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
5819 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
5820 			(u64)start_pfn << PAGE_SHIFT,
5821 			((u64)end_pfn << PAGE_SHIFT) - 1);
5822 
5823 	/* Initialise every node */
5824 	mminit_verify_pageflags_layout();
5825 	setup_nr_node_ids();
5826 	for_each_online_node(nid) {
5827 		pg_data_t *pgdat = NODE_DATA(nid);
5828 		free_area_init_node(nid, NULL,
5829 				find_min_pfn_for_node(nid), NULL);
5830 
5831 		/* Any memory on that node */
5832 		if (pgdat->node_present_pages)
5833 			node_set_state(nid, N_MEMORY);
5834 		check_for_memory(pgdat, nid);
5835 	}
5836 }
5837 
5838 static int __init cmdline_parse_core(char *p, unsigned long *core)
5839 {
5840 	unsigned long long coremem;
5841 	if (!p)
5842 		return -EINVAL;
5843 
5844 	coremem = memparse(p, &p);
5845 	*core = coremem >> PAGE_SHIFT;
5846 
5847 	/* Paranoid check that UL is enough for the coremem value */
5848 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5849 
5850 	return 0;
5851 }
5852 
5853 /*
5854  * kernelcore=size sets the amount of memory for use for allocations that
5855  * cannot be reclaimed or migrated.
5856  */
5857 static int __init cmdline_parse_kernelcore(char *p)
5858 {
5859 	return cmdline_parse_core(p, &required_kernelcore);
5860 }
5861 
5862 /*
5863  * movablecore=size sets the amount of memory for use for allocations that
5864  * can be reclaimed or migrated.
5865  */
5866 static int __init cmdline_parse_movablecore(char *p)
5867 {
5868 	return cmdline_parse_core(p, &required_movablecore);
5869 }
5870 
5871 early_param("kernelcore", cmdline_parse_kernelcore);
5872 early_param("movablecore", cmdline_parse_movablecore);
5873 
5874 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5875 
5876 void adjust_managed_page_count(struct page *page, long count)
5877 {
5878 	spin_lock(&managed_page_count_lock);
5879 	page_zone(page)->managed_pages += count;
5880 	totalram_pages += count;
5881 #ifdef CONFIG_HIGHMEM
5882 	if (PageHighMem(page))
5883 		totalhigh_pages += count;
5884 #endif
5885 	spin_unlock(&managed_page_count_lock);
5886 }
5887 EXPORT_SYMBOL(adjust_managed_page_count);
5888 
5889 unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
5890 {
5891 	void *pos;
5892 	unsigned long pages = 0;
5893 
5894 	start = (void *)PAGE_ALIGN((unsigned long)start);
5895 	end = (void *)((unsigned long)end & PAGE_MASK);
5896 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
5897 		if ((unsigned int)poison <= 0xFF)
5898 			memset(pos, poison, PAGE_SIZE);
5899 		free_reserved_page(virt_to_page(pos));
5900 	}
5901 
5902 	if (pages && s)
5903 		pr_info("Freeing %s memory: %ldK (%p - %p)\n",
5904 			s, pages << (PAGE_SHIFT - 10), start, end);
5905 
5906 	return pages;
5907 }
5908 EXPORT_SYMBOL(free_reserved_area);
5909 
5910 #ifdef	CONFIG_HIGHMEM
5911 void free_highmem_page(struct page *page)
5912 {
5913 	__free_reserved_page(page);
5914 	totalram_pages++;
5915 	page_zone(page)->managed_pages++;
5916 	totalhigh_pages++;
5917 }
5918 #endif
5919 
5920 
5921 void __init mem_init_print_info(const char *str)
5922 {
5923 	unsigned long physpages, codesize, datasize, rosize, bss_size;
5924 	unsigned long init_code_size, init_data_size;
5925 
5926 	physpages = get_num_physpages();
5927 	codesize = _etext - _stext;
5928 	datasize = _edata - _sdata;
5929 	rosize = __end_rodata - __start_rodata;
5930 	bss_size = __bss_stop - __bss_start;
5931 	init_data_size = __init_end - __init_begin;
5932 	init_code_size = _einittext - _sinittext;
5933 
5934 	/*
5935 	 * Detect special cases and adjust section sizes accordingly:
5936 	 * 1) .init.* may be embedded into .data sections
5937 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
5938 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
5939 	 * 3) .rodata.* may be embedded into .text or .data sections.
5940 	 */
5941 #define adj_init_size(start, end, size, pos, adj) \
5942 	do { \
5943 		if (start <= pos && pos < end && size > adj) \
5944 			size -= adj; \
5945 	} while (0)
5946 
5947 	adj_init_size(__init_begin, __init_end, init_data_size,
5948 		     _sinittext, init_code_size);
5949 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
5950 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
5951 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
5952 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
5953 
5954 #undef	adj_init_size
5955 
5956 	pr_info("Memory: %luK/%luK available "
5957 	       "(%luK kernel code, %luK rwdata, %luK rodata, "
5958 	       "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
5959 #ifdef	CONFIG_HIGHMEM
5960 	       ", %luK highmem"
5961 #endif
5962 	       "%s%s)\n",
5963 	       nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
5964 	       codesize >> 10, datasize >> 10, rosize >> 10,
5965 	       (init_data_size + init_code_size) >> 10, bss_size >> 10,
5966 	       (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
5967 	       totalcma_pages << (PAGE_SHIFT-10),
5968 #ifdef	CONFIG_HIGHMEM
5969 	       totalhigh_pages << (PAGE_SHIFT-10),
5970 #endif
5971 	       str ? ", " : "", str ? str : "");
5972 }
5973 
5974 /**
5975  * set_dma_reserve - set the specified number of pages reserved in the first zone
5976  * @new_dma_reserve: The number of pages to mark reserved
5977  *
5978  * The per-cpu batchsize and zone watermarks are determined by present_pages.
5979  * In the DMA zone, a significant percentage may be consumed by kernel image
5980  * and other unfreeable allocations which can skew the watermarks badly. This
5981  * function may optionally be used to account for unfreeable pages in the
5982  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5983  * smaller per-cpu batchsize.
5984  */
5985 void __init set_dma_reserve(unsigned long new_dma_reserve)
5986 {
5987 	dma_reserve = new_dma_reserve;
5988 }
5989 
5990 void __init free_area_init(unsigned long *zones_size)
5991 {
5992 	free_area_init_node(0, zones_size,
5993 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5994 }
5995 
5996 static int page_alloc_cpu_notify(struct notifier_block *self,
5997 				 unsigned long action, void *hcpu)
5998 {
5999 	int cpu = (unsigned long)hcpu;
6000 
6001 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
6002 		lru_add_drain_cpu(cpu);
6003 		drain_pages(cpu);
6004 
6005 		/*
6006 		 * Spill the event counters of the dead processor
6007 		 * into the current processors event counters.
6008 		 * This artificially elevates the count of the current
6009 		 * processor.
6010 		 */
6011 		vm_events_fold_cpu(cpu);
6012 
6013 		/*
6014 		 * Zero the differential counters of the dead processor
6015 		 * so that the vm statistics are consistent.
6016 		 *
6017 		 * This is only okay since the processor is dead and cannot
6018 		 * race with what we are doing.
6019 		 */
6020 		cpu_vm_stats_fold(cpu);
6021 	}
6022 	return NOTIFY_OK;
6023 }
6024 
6025 void __init page_alloc_init(void)
6026 {
6027 	hotcpu_notifier(page_alloc_cpu_notify, 0);
6028 }
6029 
6030 /*
6031  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
6032  *	or min_free_kbytes changes.
6033  */
6034 static void calculate_totalreserve_pages(void)
6035 {
6036 	struct pglist_data *pgdat;
6037 	unsigned long reserve_pages = 0;
6038 	enum zone_type i, j;
6039 
6040 	for_each_online_pgdat(pgdat) {
6041 		for (i = 0; i < MAX_NR_ZONES; i++) {
6042 			struct zone *zone = pgdat->node_zones + i;
6043 			long max = 0;
6044 
6045 			/* Find valid and maximum lowmem_reserve in the zone */
6046 			for (j = i; j < MAX_NR_ZONES; j++) {
6047 				if (zone->lowmem_reserve[j] > max)
6048 					max = zone->lowmem_reserve[j];
6049 			}
6050 
6051 			/* we treat the high watermark as reserved pages. */
6052 			max += high_wmark_pages(zone);
6053 
6054 			if (max > zone->managed_pages)
6055 				max = zone->managed_pages;
6056 			reserve_pages += max;
6057 			/*
6058 			 * Lowmem reserves are not available to
6059 			 * GFP_HIGHUSER page cache allocations and
6060 			 * kswapd tries to balance zones to their high
6061 			 * watermark.  As a result, neither should be
6062 			 * regarded as dirtyable memory, to prevent a
6063 			 * situation where reclaim has to clean pages
6064 			 * in order to balance the zones.
6065 			 */
6066 			zone->dirty_balance_reserve = max;
6067 		}
6068 	}
6069 	dirty_balance_reserve = reserve_pages;
6070 	totalreserve_pages = reserve_pages;
6071 }
6072 
6073 /*
6074  * setup_per_zone_lowmem_reserve - called whenever
6075  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
6076  *	has a correct pages reserved value, so an adequate number of
6077  *	pages are left in the zone after a successful __alloc_pages().
6078  */
6079 static void setup_per_zone_lowmem_reserve(void)
6080 {
6081 	struct pglist_data *pgdat;
6082 	enum zone_type j, idx;
6083 
6084 	for_each_online_pgdat(pgdat) {
6085 		for (j = 0; j < MAX_NR_ZONES; j++) {
6086 			struct zone *zone = pgdat->node_zones + j;
6087 			unsigned long managed_pages = zone->managed_pages;
6088 
6089 			zone->lowmem_reserve[j] = 0;
6090 
6091 			idx = j;
6092 			while (idx) {
6093 				struct zone *lower_zone;
6094 
6095 				idx--;
6096 
6097 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
6098 					sysctl_lowmem_reserve_ratio[idx] = 1;
6099 
6100 				lower_zone = pgdat->node_zones + idx;
6101 				lower_zone->lowmem_reserve[j] = managed_pages /
6102 					sysctl_lowmem_reserve_ratio[idx];
6103 				managed_pages += lower_zone->managed_pages;
6104 			}
6105 		}
6106 	}
6107 
6108 	/* update totalreserve_pages */
6109 	calculate_totalreserve_pages();
6110 }
6111 
6112 static void __setup_per_zone_wmarks(void)
6113 {
6114 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6115 	unsigned long lowmem_pages = 0;
6116 	struct zone *zone;
6117 	unsigned long flags;
6118 
6119 	/* Calculate total number of !ZONE_HIGHMEM pages */
6120 	for_each_zone(zone) {
6121 		if (!is_highmem(zone))
6122 			lowmem_pages += zone->managed_pages;
6123 	}
6124 
6125 	for_each_zone(zone) {
6126 		u64 tmp;
6127 
6128 		spin_lock_irqsave(&zone->lock, flags);
6129 		tmp = (u64)pages_min * zone->managed_pages;
6130 		do_div(tmp, lowmem_pages);
6131 		if (is_highmem(zone)) {
6132 			/*
6133 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6134 			 * need highmem pages, so cap pages_min to a small
6135 			 * value here.
6136 			 *
6137 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
6138 			 * deltas control asynch page reclaim, and so should
6139 			 * not be capped for highmem.
6140 			 */
6141 			unsigned long min_pages;
6142 
6143 			min_pages = zone->managed_pages / 1024;
6144 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
6145 			zone->watermark[WMARK_MIN] = min_pages;
6146 		} else {
6147 			/*
6148 			 * If it's a lowmem zone, reserve a number of pages
6149 			 * proportionate to the zone's size.
6150 			 */
6151 			zone->watermark[WMARK_MIN] = tmp;
6152 		}
6153 
6154 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
6155 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
6156 
6157 		__mod_zone_page_state(zone, NR_ALLOC_BATCH,
6158 			high_wmark_pages(zone) - low_wmark_pages(zone) -
6159 			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
6160 
6161 		setup_zone_migrate_reserve(zone);
6162 		spin_unlock_irqrestore(&zone->lock, flags);
6163 	}
6164 
6165 	/* update totalreserve_pages */
6166 	calculate_totalreserve_pages();
6167 }
6168 
6169 /**
6170  * setup_per_zone_wmarks - called when min_free_kbytes changes
6171  * or when memory is hot-{added|removed}
6172  *
6173  * Ensures that the watermark[min,low,high] values for each zone are set
6174  * correctly with respect to min_free_kbytes.
6175  */
6176 void setup_per_zone_wmarks(void)
6177 {
6178 	mutex_lock(&zonelists_mutex);
6179 	__setup_per_zone_wmarks();
6180 	mutex_unlock(&zonelists_mutex);
6181 }
6182 
6183 /*
6184  * The inactive anon list should be small enough that the VM never has to
6185  * do too much work, but large enough that each inactive page has a chance
6186  * to be referenced again before it is swapped out.
6187  *
6188  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
6189  * INACTIVE_ANON pages on this zone's LRU, maintained by the
6190  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
6191  * the anonymous pages are kept on the inactive list.
6192  *
6193  * total     target    max
6194  * memory    ratio     inactive anon
6195  * -------------------------------------
6196  *   10MB       1         5MB
6197  *  100MB       1        50MB
6198  *    1GB       3       250MB
6199  *   10GB      10       0.9GB
6200  *  100GB      31         3GB
6201  *    1TB     101        10GB
6202  *   10TB     320        32GB
6203  */
6204 static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
6205 {
6206 	unsigned int gb, ratio;
6207 
6208 	/* Zone size in gigabytes */
6209 	gb = zone->managed_pages >> (30 - PAGE_SHIFT);
6210 	if (gb)
6211 		ratio = int_sqrt(10 * gb);
6212 	else
6213 		ratio = 1;
6214 
6215 	zone->inactive_ratio = ratio;
6216 }
6217 
6218 static void __meminit setup_per_zone_inactive_ratio(void)
6219 {
6220 	struct zone *zone;
6221 
6222 	for_each_zone(zone)
6223 		calculate_zone_inactive_ratio(zone);
6224 }
6225 
6226 /*
6227  * Initialise min_free_kbytes.
6228  *
6229  * For small machines we want it small (128k min).  For large machines
6230  * we want it large (64MB max).  But it is not linear, because network
6231  * bandwidth does not increase linearly with machine size.  We use
6232  *
6233  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
6234  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
6235  *
6236  * which yields
6237  *
6238  * 16MB:	512k
6239  * 32MB:	724k
6240  * 64MB:	1024k
6241  * 128MB:	1448k
6242  * 256MB:	2048k
6243  * 512MB:	2896k
6244  * 1024MB:	4096k
6245  * 2048MB:	5792k
6246  * 4096MB:	8192k
6247  * 8192MB:	11584k
6248  * 16384MB:	16384k
6249  */
6250 int __meminit init_per_zone_wmark_min(void)
6251 {
6252 	unsigned long lowmem_kbytes;
6253 	int new_min_free_kbytes;
6254 
6255 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
6256 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6257 
6258 	if (new_min_free_kbytes > user_min_free_kbytes) {
6259 		min_free_kbytes = new_min_free_kbytes;
6260 		if (min_free_kbytes < 128)
6261 			min_free_kbytes = 128;
6262 		if (min_free_kbytes > 65536)
6263 			min_free_kbytes = 65536;
6264 	} else {
6265 		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6266 				new_min_free_kbytes, user_min_free_kbytes);
6267 	}
6268 	setup_per_zone_wmarks();
6269 	refresh_zone_stat_thresholds();
6270 	setup_per_zone_lowmem_reserve();
6271 	setup_per_zone_inactive_ratio();
6272 	return 0;
6273 }
6274 module_init(init_per_zone_wmark_min)
6275 
6276 /*
6277  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
6278  *	that we can call two helper functions whenever min_free_kbytes
6279  *	changes.
6280  */
6281 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
6282 	void __user *buffer, size_t *length, loff_t *ppos)
6283 {
6284 	int rc;
6285 
6286 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6287 	if (rc)
6288 		return rc;
6289 
6290 	if (write) {
6291 		user_min_free_kbytes = min_free_kbytes;
6292 		setup_per_zone_wmarks();
6293 	}
6294 	return 0;
6295 }
6296 
6297 #ifdef CONFIG_NUMA
6298 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
6299 	void __user *buffer, size_t *length, loff_t *ppos)
6300 {
6301 	struct zone *zone;
6302 	int rc;
6303 
6304 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6305 	if (rc)
6306 		return rc;
6307 
6308 	for_each_zone(zone)
6309 		zone->min_unmapped_pages = (zone->managed_pages *
6310 				sysctl_min_unmapped_ratio) / 100;
6311 	return 0;
6312 }
6313 
6314 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
6315 	void __user *buffer, size_t *length, loff_t *ppos)
6316 {
6317 	struct zone *zone;
6318 	int rc;
6319 
6320 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6321 	if (rc)
6322 		return rc;
6323 
6324 	for_each_zone(zone)
6325 		zone->min_slab_pages = (zone->managed_pages *
6326 				sysctl_min_slab_ratio) / 100;
6327 	return 0;
6328 }
6329 #endif
6330 
6331 /*
6332  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6333  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6334  *	whenever sysctl_lowmem_reserve_ratio changes.
6335  *
6336  * The reserve ratio obviously has absolutely no relation with the
6337  * minimum watermarks. The lowmem reserve ratio can only make sense
6338  * if in function of the boot time zone sizes.
6339  */
6340 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
6341 	void __user *buffer, size_t *length, loff_t *ppos)
6342 {
6343 	proc_dointvec_minmax(table, write, buffer, length, ppos);
6344 	setup_per_zone_lowmem_reserve();
6345 	return 0;
6346 }
6347 
6348 /*
6349  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
6350  * cpu.  It is the fraction of total pages in each zone that a hot per cpu
6351  * pagelist can have before it gets flushed back to buddy allocator.
6352  */
6353 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
6354 	void __user *buffer, size_t *length, loff_t *ppos)
6355 {
6356 	struct zone *zone;
6357 	int old_percpu_pagelist_fraction;
6358 	int ret;
6359 
6360 	mutex_lock(&pcp_batch_high_lock);
6361 	old_percpu_pagelist_fraction = percpu_pagelist_fraction;
6362 
6363 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6364 	if (!write || ret < 0)
6365 		goto out;
6366 
6367 	/* Sanity checking to avoid pcp imbalance */
6368 	if (percpu_pagelist_fraction &&
6369 	    percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
6370 		percpu_pagelist_fraction = old_percpu_pagelist_fraction;
6371 		ret = -EINVAL;
6372 		goto out;
6373 	}
6374 
6375 	/* No change? */
6376 	if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
6377 		goto out;
6378 
6379 	for_each_populated_zone(zone) {
6380 		unsigned int cpu;
6381 
6382 		for_each_possible_cpu(cpu)
6383 			pageset_set_high_and_batch(zone,
6384 					per_cpu_ptr(zone->pageset, cpu));
6385 	}
6386 out:
6387 	mutex_unlock(&pcp_batch_high_lock);
6388 	return ret;
6389 }
6390 
6391 #ifdef CONFIG_NUMA
6392 int hashdist = HASHDIST_DEFAULT;
6393 
6394 static int __init set_hashdist(char *str)
6395 {
6396 	if (!str)
6397 		return 0;
6398 	hashdist = simple_strtoul(str, &str, 0);
6399 	return 1;
6400 }
6401 __setup("hashdist=", set_hashdist);
6402 #endif
6403 
6404 /*
6405  * allocate a large system hash table from bootmem
6406  * - it is assumed that the hash table must contain an exact power-of-2
6407  *   quantity of entries
6408  * - limit is the number of hash buckets, not the total allocation size
6409  */
6410 void *__init alloc_large_system_hash(const char *tablename,
6411 				     unsigned long bucketsize,
6412 				     unsigned long numentries,
6413 				     int scale,
6414 				     int flags,
6415 				     unsigned int *_hash_shift,
6416 				     unsigned int *_hash_mask,
6417 				     unsigned long low_limit,
6418 				     unsigned long high_limit)
6419 {
6420 	unsigned long long max = high_limit;
6421 	unsigned long log2qty, size;
6422 	void *table = NULL;
6423 
6424 	/* allow the kernel cmdline to have a say */
6425 	if (!numentries) {
6426 		/* round applicable memory size up to nearest megabyte */
6427 		numentries = nr_kernel_pages;
6428 
6429 		/* It isn't necessary when PAGE_SIZE >= 1MB */
6430 		if (PAGE_SHIFT < 20)
6431 			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
6432 
6433 		/* limit to 1 bucket per 2^scale bytes of low memory */
6434 		if (scale > PAGE_SHIFT)
6435 			numentries >>= (scale - PAGE_SHIFT);
6436 		else
6437 			numentries <<= (PAGE_SHIFT - scale);
6438 
6439 		/* Make sure we've got at least a 0-order allocation.. */
6440 		if (unlikely(flags & HASH_SMALL)) {
6441 			/* Makes no sense without HASH_EARLY */
6442 			WARN_ON(!(flags & HASH_EARLY));
6443 			if (!(numentries >> *_hash_shift)) {
6444 				numentries = 1UL << *_hash_shift;
6445 				BUG_ON(!numentries);
6446 			}
6447 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
6448 			numentries = PAGE_SIZE / bucketsize;
6449 	}
6450 	numentries = roundup_pow_of_two(numentries);
6451 
6452 	/* limit allocation size to 1/16 total memory by default */
6453 	if (max == 0) {
6454 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
6455 		do_div(max, bucketsize);
6456 	}
6457 	max = min(max, 0x80000000ULL);
6458 
6459 	if (numentries < low_limit)
6460 		numentries = low_limit;
6461 	if (numentries > max)
6462 		numentries = max;
6463 
6464 	log2qty = ilog2(numentries);
6465 
6466 	do {
6467 		size = bucketsize << log2qty;
6468 		if (flags & HASH_EARLY)
6469 			table = memblock_virt_alloc_nopanic(size, 0);
6470 		else if (hashdist)
6471 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
6472 		else {
6473 			/*
6474 			 * If bucketsize is not a power-of-two, we may free
6475 			 * some pages at the end of hash table which
6476 			 * alloc_pages_exact() automatically does
6477 			 */
6478 			if (get_order(size) < MAX_ORDER) {
6479 				table = alloc_pages_exact(size, GFP_ATOMIC);
6480 				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
6481 			}
6482 		}
6483 	} while (!table && size > PAGE_SIZE && --log2qty);
6484 
6485 	if (!table)
6486 		panic("Failed to allocate %s hash table\n", tablename);
6487 
6488 	printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
6489 	       tablename,
6490 	       (1UL << log2qty),
6491 	       ilog2(size) - PAGE_SHIFT,
6492 	       size);
6493 
6494 	if (_hash_shift)
6495 		*_hash_shift = log2qty;
6496 	if (_hash_mask)
6497 		*_hash_mask = (1 << log2qty) - 1;
6498 
6499 	return table;
6500 }
6501 
6502 /* Return a pointer to the bitmap storing bits affecting a block of pages */
6503 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
6504 							unsigned long pfn)
6505 {
6506 #ifdef CONFIG_SPARSEMEM
6507 	return __pfn_to_section(pfn)->pageblock_flags;
6508 #else
6509 	return zone->pageblock_flags;
6510 #endif /* CONFIG_SPARSEMEM */
6511 }
6512 
6513 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6514 {
6515 #ifdef CONFIG_SPARSEMEM
6516 	pfn &= (PAGES_PER_SECTION-1);
6517 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
6518 #else
6519 	pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
6520 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
6521 #endif /* CONFIG_SPARSEMEM */
6522 }
6523 
6524 /**
6525  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
6526  * @page: The page within the block of interest
6527  * @pfn: The target page frame number
6528  * @end_bitidx: The last bit of interest to retrieve
6529  * @mask: mask of bits that the caller is interested in
6530  *
6531  * Return: pageblock_bits flags
6532  */
6533 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6534 					unsigned long end_bitidx,
6535 					unsigned long mask)
6536 {
6537 	struct zone *zone;
6538 	unsigned long *bitmap;
6539 	unsigned long bitidx, word_bitidx;
6540 	unsigned long word;
6541 
6542 	zone = page_zone(page);
6543 	bitmap = get_pageblock_bitmap(zone, pfn);
6544 	bitidx = pfn_to_bitidx(zone, pfn);
6545 	word_bitidx = bitidx / BITS_PER_LONG;
6546 	bitidx &= (BITS_PER_LONG-1);
6547 
6548 	word = bitmap[word_bitidx];
6549 	bitidx += end_bitidx;
6550 	return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
6551 }
6552 
6553 /**
6554  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
6555  * @page: The page within the block of interest
6556  * @flags: The flags to set
6557  * @pfn: The target page frame number
6558  * @end_bitidx: The last bit of interest
6559  * @mask: mask of bits that the caller is interested in
6560  */
6561 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6562 					unsigned long pfn,
6563 					unsigned long end_bitidx,
6564 					unsigned long mask)
6565 {
6566 	struct zone *zone;
6567 	unsigned long *bitmap;
6568 	unsigned long bitidx, word_bitidx;
6569 	unsigned long old_word, word;
6570 
6571 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
6572 
6573 	zone = page_zone(page);
6574 	bitmap = get_pageblock_bitmap(zone, pfn);
6575 	bitidx = pfn_to_bitidx(zone, pfn);
6576 	word_bitidx = bitidx / BITS_PER_LONG;
6577 	bitidx &= (BITS_PER_LONG-1);
6578 
6579 	VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
6580 
6581 	bitidx += end_bitidx;
6582 	mask <<= (BITS_PER_LONG - bitidx - 1);
6583 	flags <<= (BITS_PER_LONG - bitidx - 1);
6584 
6585 	word = READ_ONCE(bitmap[word_bitidx]);
6586 	for (;;) {
6587 		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6588 		if (word == old_word)
6589 			break;
6590 		word = old_word;
6591 	}
6592 }
6593 
6594 /*
6595  * This function checks whether pageblock includes unmovable pages or not.
6596  * If @count is not zero, it is okay to include less @count unmovable pages
6597  *
6598  * PageLRU check without isolation or lru_lock could race so that
6599  * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
6600  * expect this function should be exact.
6601  */
6602 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6603 			 bool skip_hwpoisoned_pages)
6604 {
6605 	unsigned long pfn, iter, found;
6606 	int mt;
6607 
6608 	/*
6609 	 * For avoiding noise data, lru_add_drain_all() should be called
6610 	 * If ZONE_MOVABLE, the zone never contains unmovable pages
6611 	 */
6612 	if (zone_idx(zone) == ZONE_MOVABLE)
6613 		return false;
6614 	mt = get_pageblock_migratetype(page);
6615 	if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
6616 		return false;
6617 
6618 	pfn = page_to_pfn(page);
6619 	for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
6620 		unsigned long check = pfn + iter;
6621 
6622 		if (!pfn_valid_within(check))
6623 			continue;
6624 
6625 		page = pfn_to_page(check);
6626 
6627 		/*
6628 		 * Hugepages are not in LRU lists, but they're movable.
6629 		 * We need not scan over tail pages bacause we don't
6630 		 * handle each tail page individually in migration.
6631 		 */
6632 		if (PageHuge(page)) {
6633 			iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
6634 			continue;
6635 		}
6636 
6637 		/*
6638 		 * We can't use page_count without pin a page
6639 		 * because another CPU can free compound page.
6640 		 * This check already skips compound tails of THP
6641 		 * because their page->_count is zero at all time.
6642 		 */
6643 		if (!atomic_read(&page->_count)) {
6644 			if (PageBuddy(page))
6645 				iter += (1 << page_order(page)) - 1;
6646 			continue;
6647 		}
6648 
6649 		/*
6650 		 * The HWPoisoned page may be not in buddy system, and
6651 		 * page_count() is not 0.
6652 		 */
6653 		if (skip_hwpoisoned_pages && PageHWPoison(page))
6654 			continue;
6655 
6656 		if (!PageLRU(page))
6657 			found++;
6658 		/*
6659 		 * If there are RECLAIMABLE pages, we need to check
6660 		 * it.  But now, memory offline itself doesn't call
6661 		 * shrink_node_slabs() and it still to be fixed.
6662 		 */
6663 		/*
6664 		 * If the page is not RAM, page_count()should be 0.
6665 		 * we don't need more check. This is an _used_ not-movable page.
6666 		 *
6667 		 * The problematic thing here is PG_reserved pages. PG_reserved
6668 		 * is set to both of a memory hole page and a _used_ kernel
6669 		 * page at boot.
6670 		 */
6671 		if (found > count)
6672 			return true;
6673 	}
6674 	return false;
6675 }
6676 
6677 bool is_pageblock_removable_nolock(struct page *page)
6678 {
6679 	struct zone *zone;
6680 	unsigned long pfn;
6681 
6682 	/*
6683 	 * We have to be careful here because we are iterating over memory
6684 	 * sections which are not zone aware so we might end up outside of
6685 	 * the zone but still within the section.
6686 	 * We have to take care about the node as well. If the node is offline
6687 	 * its NODE_DATA will be NULL - see page_zone.
6688 	 */
6689 	if (!node_online(page_to_nid(page)))
6690 		return false;
6691 
6692 	zone = page_zone(page);
6693 	pfn = page_to_pfn(page);
6694 	if (!zone_spans_pfn(zone, pfn))
6695 		return false;
6696 
6697 	return !has_unmovable_pages(zone, page, 0, true);
6698 }
6699 
6700 #ifdef CONFIG_CMA
6701 
6702 static unsigned long pfn_max_align_down(unsigned long pfn)
6703 {
6704 	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6705 			     pageblock_nr_pages) - 1);
6706 }
6707 
6708 static unsigned long pfn_max_align_up(unsigned long pfn)
6709 {
6710 	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6711 				pageblock_nr_pages));
6712 }
6713 
6714 /* [start, end) must belong to a single zone. */
6715 static int __alloc_contig_migrate_range(struct compact_control *cc,
6716 					unsigned long start, unsigned long end)
6717 {
6718 	/* This function is based on compact_zone() from compaction.c. */
6719 	unsigned long nr_reclaimed;
6720 	unsigned long pfn = start;
6721 	unsigned int tries = 0;
6722 	int ret = 0;
6723 
6724 	migrate_prep();
6725 
6726 	while (pfn < end || !list_empty(&cc->migratepages)) {
6727 		if (fatal_signal_pending(current)) {
6728 			ret = -EINTR;
6729 			break;
6730 		}
6731 
6732 		if (list_empty(&cc->migratepages)) {
6733 			cc->nr_migratepages = 0;
6734 			pfn = isolate_migratepages_range(cc, pfn, end);
6735 			if (!pfn) {
6736 				ret = -EINTR;
6737 				break;
6738 			}
6739 			tries = 0;
6740 		} else if (++tries == 5) {
6741 			ret = ret < 0 ? ret : -EBUSY;
6742 			break;
6743 		}
6744 
6745 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6746 							&cc->migratepages);
6747 		cc->nr_migratepages -= nr_reclaimed;
6748 
6749 		ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
6750 				    NULL, 0, cc->mode, MR_CMA);
6751 	}
6752 	if (ret < 0) {
6753 		putback_movable_pages(&cc->migratepages);
6754 		return ret;
6755 	}
6756 	return 0;
6757 }
6758 
6759 /**
6760  * alloc_contig_range() -- tries to allocate given range of pages
6761  * @start:	start PFN to allocate
6762  * @end:	one-past-the-last PFN to allocate
6763  * @migratetype:	migratetype of the underlaying pageblocks (either
6764  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
6765  *			in range must have the same migratetype and it must
6766  *			be either of the two.
6767  *
6768  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6769  * aligned, however it's the caller's responsibility to guarantee that
6770  * we are the only thread that changes migrate type of pageblocks the
6771  * pages fall in.
6772  *
6773  * The PFN range must belong to a single zone.
6774  *
6775  * Returns zero on success or negative error code.  On success all
6776  * pages which PFN is in [start, end) are allocated for the caller and
6777  * need to be freed with free_contig_range().
6778  */
6779 int alloc_contig_range(unsigned long start, unsigned long end,
6780 		       unsigned migratetype)
6781 {
6782 	unsigned long outer_start, outer_end;
6783 	int ret = 0, order;
6784 
6785 	struct compact_control cc = {
6786 		.nr_migratepages = 0,
6787 		.order = -1,
6788 		.zone = page_zone(pfn_to_page(start)),
6789 		.mode = MIGRATE_SYNC,
6790 		.ignore_skip_hint = true,
6791 	};
6792 	INIT_LIST_HEAD(&cc.migratepages);
6793 
6794 	/*
6795 	 * What we do here is we mark all pageblocks in range as
6796 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
6797 	 * have different sizes, and due to the way page allocator
6798 	 * work, we align the range to biggest of the two pages so
6799 	 * that page allocator won't try to merge buddies from
6800 	 * different pageblocks and change MIGRATE_ISOLATE to some
6801 	 * other migration type.
6802 	 *
6803 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6804 	 * migrate the pages from an unaligned range (ie. pages that
6805 	 * we are interested in).  This will put all the pages in
6806 	 * range back to page allocator as MIGRATE_ISOLATE.
6807 	 *
6808 	 * When this is done, we take the pages in range from page
6809 	 * allocator removing them from the buddy system.  This way
6810 	 * page allocator will never consider using them.
6811 	 *
6812 	 * This lets us mark the pageblocks back as
6813 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6814 	 * aligned range but not in the unaligned, original range are
6815 	 * put back to page allocator so that buddy can use them.
6816 	 */
6817 
6818 	ret = start_isolate_page_range(pfn_max_align_down(start),
6819 				       pfn_max_align_up(end), migratetype,
6820 				       false);
6821 	if (ret)
6822 		return ret;
6823 
6824 	ret = __alloc_contig_migrate_range(&cc, start, end);
6825 	if (ret)
6826 		goto done;
6827 
6828 	/*
6829 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
6830 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
6831 	 * more, all pages in [start, end) are free in page allocator.
6832 	 * What we are going to do is to allocate all pages from
6833 	 * [start, end) (that is remove them from page allocator).
6834 	 *
6835 	 * The only problem is that pages at the beginning and at the
6836 	 * end of interesting range may be not aligned with pages that
6837 	 * page allocator holds, ie. they can be part of higher order
6838 	 * pages.  Because of this, we reserve the bigger range and
6839 	 * once this is done free the pages we are not interested in.
6840 	 *
6841 	 * We don't have to hold zone->lock here because the pages are
6842 	 * isolated thus they won't get removed from buddy.
6843 	 */
6844 
6845 	lru_add_drain_all();
6846 	drain_all_pages(cc.zone);
6847 
6848 	order = 0;
6849 	outer_start = start;
6850 	while (!PageBuddy(pfn_to_page(outer_start))) {
6851 		if (++order >= MAX_ORDER) {
6852 			ret = -EBUSY;
6853 			goto done;
6854 		}
6855 		outer_start &= ~0UL << order;
6856 	}
6857 
6858 	/* Make sure the range is really isolated. */
6859 	if (test_pages_isolated(outer_start, end, false)) {
6860 		pr_info("%s: [%lx, %lx) PFNs busy\n",
6861 			__func__, outer_start, end);
6862 		ret = -EBUSY;
6863 		goto done;
6864 	}
6865 
6866 	/* Grab isolated pages from freelists. */
6867 	outer_end = isolate_freepages_range(&cc, outer_start, end);
6868 	if (!outer_end) {
6869 		ret = -EBUSY;
6870 		goto done;
6871 	}
6872 
6873 	/* Free head and tail (if any) */
6874 	if (start != outer_start)
6875 		free_contig_range(outer_start, start - outer_start);
6876 	if (end != outer_end)
6877 		free_contig_range(end, outer_end - end);
6878 
6879 done:
6880 	undo_isolate_page_range(pfn_max_align_down(start),
6881 				pfn_max_align_up(end), migratetype);
6882 	return ret;
6883 }
6884 
6885 void free_contig_range(unsigned long pfn, unsigned nr_pages)
6886 {
6887 	unsigned int count = 0;
6888 
6889 	for (; nr_pages--; pfn++) {
6890 		struct page *page = pfn_to_page(pfn);
6891 
6892 		count += page_count(page) != 1;
6893 		__free_page(page);
6894 	}
6895 	WARN(count != 0, "%d pages are still in use!\n", count);
6896 }
6897 #endif
6898 
6899 #ifdef CONFIG_MEMORY_HOTPLUG
6900 /*
6901  * The zone indicated has a new number of managed_pages; batch sizes and percpu
6902  * page high values need to be recalulated.
6903  */
6904 void __meminit zone_pcp_update(struct zone *zone)
6905 {
6906 	unsigned cpu;
6907 	mutex_lock(&pcp_batch_high_lock);
6908 	for_each_possible_cpu(cpu)
6909 		pageset_set_high_and_batch(zone,
6910 				per_cpu_ptr(zone->pageset, cpu));
6911 	mutex_unlock(&pcp_batch_high_lock);
6912 }
6913 #endif
6914 
6915 void zone_pcp_reset(struct zone *zone)
6916 {
6917 	unsigned long flags;
6918 	int cpu;
6919 	struct per_cpu_pageset *pset;
6920 
6921 	/* avoid races with drain_pages()  */
6922 	local_irq_save(flags);
6923 	if (zone->pageset != &boot_pageset) {
6924 		for_each_online_cpu(cpu) {
6925 			pset = per_cpu_ptr(zone->pageset, cpu);
6926 			drain_zonestat(zone, pset);
6927 		}
6928 		free_percpu(zone->pageset);
6929 		zone->pageset = &boot_pageset;
6930 	}
6931 	local_irq_restore(flags);
6932 }
6933 
6934 #ifdef CONFIG_MEMORY_HOTREMOVE
6935 /*
6936  * All pages in the range must be isolated before calling this.
6937  */
6938 void
6939 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6940 {
6941 	struct page *page;
6942 	struct zone *zone;
6943 	unsigned int order, i;
6944 	unsigned long pfn;
6945 	unsigned long flags;
6946 	/* find the first valid pfn */
6947 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
6948 		if (pfn_valid(pfn))
6949 			break;
6950 	if (pfn == end_pfn)
6951 		return;
6952 	zone = page_zone(pfn_to_page(pfn));
6953 	spin_lock_irqsave(&zone->lock, flags);
6954 	pfn = start_pfn;
6955 	while (pfn < end_pfn) {
6956 		if (!pfn_valid(pfn)) {
6957 			pfn++;
6958 			continue;
6959 		}
6960 		page = pfn_to_page(pfn);
6961 		/*
6962 		 * The HWPoisoned page may be not in buddy system, and
6963 		 * page_count() is not 0.
6964 		 */
6965 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6966 			pfn++;
6967 			SetPageReserved(page);
6968 			continue;
6969 		}
6970 
6971 		BUG_ON(page_count(page));
6972 		BUG_ON(!PageBuddy(page));
6973 		order = page_order(page);
6974 #ifdef CONFIG_DEBUG_VM
6975 		printk(KERN_INFO "remove from free list %lx %d %lx\n",
6976 		       pfn, 1 << order, end_pfn);
6977 #endif
6978 		list_del(&page->lru);
6979 		rmv_page_order(page);
6980 		zone->free_area[order].nr_free--;
6981 		for (i = 0; i < (1 << order); i++)
6982 			SetPageReserved((page+i));
6983 		pfn += (1 << order);
6984 	}
6985 	spin_unlock_irqrestore(&zone->lock, flags);
6986 }
6987 #endif
6988 
6989 #ifdef CONFIG_MEMORY_FAILURE
6990 bool is_free_buddy_page(struct page *page)
6991 {
6992 	struct zone *zone = page_zone(page);
6993 	unsigned long pfn = page_to_pfn(page);
6994 	unsigned long flags;
6995 	unsigned int order;
6996 
6997 	spin_lock_irqsave(&zone->lock, flags);
6998 	for (order = 0; order < MAX_ORDER; order++) {
6999 		struct page *page_head = page - (pfn & ((1 << order) - 1));
7000 
7001 		if (PageBuddy(page_head) && page_order(page_head) >= order)
7002 			break;
7003 	}
7004 	spin_unlock_irqrestore(&zone->lock, flags);
7005 
7006 	return order < MAX_ORDER;
7007 }
7008 #endif
7009