xref: /openbmc/linux/mm/page_alloc.c (revision 9b133f8d)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/compiler.h>
26 #include <linux/kernel.h>
27 #include <linux/kmemcheck.h>
28 #include <linux/module.h>
29 #include <linux/suspend.h>
30 #include <linux/pagevec.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/ratelimit.h>
34 #include <linux/oom.h>
35 #include <linux/notifier.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/stop_machine.h>
46 #include <linux/sort.h>
47 #include <linux/pfn.h>
48 #include <linux/backing-dev.h>
49 #include <linux/fault-inject.h>
50 #include <linux/page-isolation.h>
51 #include <linux/page_cgroup.h>
52 #include <linux/debugobjects.h>
53 #include <linux/kmemleak.h>
54 #include <linux/memory.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <linux/ftrace_event.h>
58 #include <linux/memcontrol.h>
59 #include <linux/prefetch.h>
60 
61 #include <asm/tlbflush.h>
62 #include <asm/div64.h>
63 #include "internal.h"
64 
65 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
66 DEFINE_PER_CPU(int, numa_node);
67 EXPORT_PER_CPU_SYMBOL(numa_node);
68 #endif
69 
70 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
71 /*
72  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
73  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
74  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
75  * defined in <linux/topology.h>.
76  */
77 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
78 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
79 #endif
80 
81 /*
82  * Array of node states.
83  */
84 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
85 	[N_POSSIBLE] = NODE_MASK_ALL,
86 	[N_ONLINE] = { { [0] = 1UL } },
87 #ifndef CONFIG_NUMA
88 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
89 #ifdef CONFIG_HIGHMEM
90 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
91 #endif
92 	[N_CPU] = { { [0] = 1UL } },
93 #endif	/* NUMA */
94 };
95 EXPORT_SYMBOL(node_states);
96 
97 unsigned long totalram_pages __read_mostly;
98 unsigned long totalreserve_pages __read_mostly;
99 int percpu_pagelist_fraction;
100 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
101 
102 #ifdef CONFIG_PM_SLEEP
103 /*
104  * The following functions are used by the suspend/hibernate code to temporarily
105  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
106  * while devices are suspended.  To avoid races with the suspend/hibernate code,
107  * they should always be called with pm_mutex held (gfp_allowed_mask also should
108  * only be modified with pm_mutex held, unless the suspend/hibernate code is
109  * guaranteed not to run in parallel with that modification).
110  */
111 
112 static gfp_t saved_gfp_mask;
113 
114 void pm_restore_gfp_mask(void)
115 {
116 	WARN_ON(!mutex_is_locked(&pm_mutex));
117 	if (saved_gfp_mask) {
118 		gfp_allowed_mask = saved_gfp_mask;
119 		saved_gfp_mask = 0;
120 	}
121 }
122 
123 void pm_restrict_gfp_mask(void)
124 {
125 	WARN_ON(!mutex_is_locked(&pm_mutex));
126 	WARN_ON(saved_gfp_mask);
127 	saved_gfp_mask = gfp_allowed_mask;
128 	gfp_allowed_mask &= ~GFP_IOFS;
129 }
130 #endif /* CONFIG_PM_SLEEP */
131 
132 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
133 int pageblock_order __read_mostly;
134 #endif
135 
136 static void __free_pages_ok(struct page *page, unsigned int order);
137 
138 /*
139  * results with 256, 32 in the lowmem_reserve sysctl:
140  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
141  *	1G machine -> (16M dma, 784M normal, 224M high)
142  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
143  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
144  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
145  *
146  * TBD: should special case ZONE_DMA32 machines here - in those we normally
147  * don't need any ZONE_NORMAL reservation
148  */
149 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
150 #ifdef CONFIG_ZONE_DMA
151 	 256,
152 #endif
153 #ifdef CONFIG_ZONE_DMA32
154 	 256,
155 #endif
156 #ifdef CONFIG_HIGHMEM
157 	 32,
158 #endif
159 	 32,
160 };
161 
162 EXPORT_SYMBOL(totalram_pages);
163 
164 static char * const zone_names[MAX_NR_ZONES] = {
165 #ifdef CONFIG_ZONE_DMA
166 	 "DMA",
167 #endif
168 #ifdef CONFIG_ZONE_DMA32
169 	 "DMA32",
170 #endif
171 	 "Normal",
172 #ifdef CONFIG_HIGHMEM
173 	 "HighMem",
174 #endif
175 	 "Movable",
176 };
177 
178 int min_free_kbytes = 1024;
179 
180 static unsigned long __meminitdata nr_kernel_pages;
181 static unsigned long __meminitdata nr_all_pages;
182 static unsigned long __meminitdata dma_reserve;
183 
184 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
185   /*
186    * MAX_ACTIVE_REGIONS determines the maximum number of distinct
187    * ranges of memory (RAM) that may be registered with add_active_range().
188    * Ranges passed to add_active_range() will be merged if possible
189    * so the number of times add_active_range() can be called is
190    * related to the number of nodes and the number of holes
191    */
192   #ifdef CONFIG_MAX_ACTIVE_REGIONS
193     /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
194     #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
195   #else
196     #if MAX_NUMNODES >= 32
197       /* If there can be many nodes, allow up to 50 holes per node */
198       #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
199     #else
200       /* By default, allow up to 256 distinct regions */
201       #define MAX_ACTIVE_REGIONS 256
202     #endif
203   #endif
204 
205   static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
206   static int __meminitdata nr_nodemap_entries;
207   static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
208   static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
209   static unsigned long __initdata required_kernelcore;
210   static unsigned long __initdata required_movablecore;
211   static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
212 
213   /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
214   int movable_zone;
215   EXPORT_SYMBOL(movable_zone);
216 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
217 
218 #if MAX_NUMNODES > 1
219 int nr_node_ids __read_mostly = MAX_NUMNODES;
220 int nr_online_nodes __read_mostly = 1;
221 EXPORT_SYMBOL(nr_node_ids);
222 EXPORT_SYMBOL(nr_online_nodes);
223 #endif
224 
225 int page_group_by_mobility_disabled __read_mostly;
226 
227 static void set_pageblock_migratetype(struct page *page, int migratetype)
228 {
229 
230 	if (unlikely(page_group_by_mobility_disabled))
231 		migratetype = MIGRATE_UNMOVABLE;
232 
233 	set_pageblock_flags_group(page, (unsigned long)migratetype,
234 					PB_migrate, PB_migrate_end);
235 }
236 
237 bool oom_killer_disabled __read_mostly;
238 
239 #ifdef CONFIG_DEBUG_VM
240 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
241 {
242 	int ret = 0;
243 	unsigned seq;
244 	unsigned long pfn = page_to_pfn(page);
245 
246 	do {
247 		seq = zone_span_seqbegin(zone);
248 		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
249 			ret = 1;
250 		else if (pfn < zone->zone_start_pfn)
251 			ret = 1;
252 	} while (zone_span_seqretry(zone, seq));
253 
254 	return ret;
255 }
256 
257 static int page_is_consistent(struct zone *zone, struct page *page)
258 {
259 	if (!pfn_valid_within(page_to_pfn(page)))
260 		return 0;
261 	if (zone != page_zone(page))
262 		return 0;
263 
264 	return 1;
265 }
266 /*
267  * Temporary debugging check for pages not lying within a given zone.
268  */
269 static int bad_range(struct zone *zone, struct page *page)
270 {
271 	if (page_outside_zone_boundaries(zone, page))
272 		return 1;
273 	if (!page_is_consistent(zone, page))
274 		return 1;
275 
276 	return 0;
277 }
278 #else
279 static inline int bad_range(struct zone *zone, struct page *page)
280 {
281 	return 0;
282 }
283 #endif
284 
285 static void bad_page(struct page *page)
286 {
287 	static unsigned long resume;
288 	static unsigned long nr_shown;
289 	static unsigned long nr_unshown;
290 
291 	/* Don't complain about poisoned pages */
292 	if (PageHWPoison(page)) {
293 		reset_page_mapcount(page); /* remove PageBuddy */
294 		return;
295 	}
296 
297 	/*
298 	 * Allow a burst of 60 reports, then keep quiet for that minute;
299 	 * or allow a steady drip of one report per second.
300 	 */
301 	if (nr_shown == 60) {
302 		if (time_before(jiffies, resume)) {
303 			nr_unshown++;
304 			goto out;
305 		}
306 		if (nr_unshown) {
307 			printk(KERN_ALERT
308 			      "BUG: Bad page state: %lu messages suppressed\n",
309 				nr_unshown);
310 			nr_unshown = 0;
311 		}
312 		nr_shown = 0;
313 	}
314 	if (nr_shown++ == 0)
315 		resume = jiffies + 60 * HZ;
316 
317 	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
318 		current->comm, page_to_pfn(page));
319 	dump_page(page);
320 
321 	dump_stack();
322 out:
323 	/* Leave bad fields for debug, except PageBuddy could make trouble */
324 	reset_page_mapcount(page); /* remove PageBuddy */
325 	add_taint(TAINT_BAD_PAGE);
326 }
327 
328 /*
329  * Higher-order pages are called "compound pages".  They are structured thusly:
330  *
331  * The first PAGE_SIZE page is called the "head page".
332  *
333  * The remaining PAGE_SIZE pages are called "tail pages".
334  *
335  * All pages have PG_compound set.  All pages have their ->private pointing at
336  * the head page (even the head page has this).
337  *
338  * The first tail page's ->lru.next holds the address of the compound page's
339  * put_page() function.  Its ->lru.prev holds the order of allocation.
340  * This usage means that zero-order pages may not be compound.
341  */
342 
343 static void free_compound_page(struct page *page)
344 {
345 	__free_pages_ok(page, compound_order(page));
346 }
347 
348 void prep_compound_page(struct page *page, unsigned long order)
349 {
350 	int i;
351 	int nr_pages = 1 << order;
352 
353 	set_compound_page_dtor(page, free_compound_page);
354 	set_compound_order(page, order);
355 	__SetPageHead(page);
356 	for (i = 1; i < nr_pages; i++) {
357 		struct page *p = page + i;
358 
359 		__SetPageTail(p);
360 		p->first_page = page;
361 	}
362 }
363 
364 /* update __split_huge_page_refcount if you change this function */
365 static int destroy_compound_page(struct page *page, unsigned long order)
366 {
367 	int i;
368 	int nr_pages = 1 << order;
369 	int bad = 0;
370 
371 	if (unlikely(compound_order(page) != order) ||
372 	    unlikely(!PageHead(page))) {
373 		bad_page(page);
374 		bad++;
375 	}
376 
377 	__ClearPageHead(page);
378 
379 	for (i = 1; i < nr_pages; i++) {
380 		struct page *p = page + i;
381 
382 		if (unlikely(!PageTail(p) || (p->first_page != page))) {
383 			bad_page(page);
384 			bad++;
385 		}
386 		__ClearPageTail(p);
387 	}
388 
389 	return bad;
390 }
391 
392 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
393 {
394 	int i;
395 
396 	/*
397 	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
398 	 * and __GFP_HIGHMEM from hard or soft interrupt context.
399 	 */
400 	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
401 	for (i = 0; i < (1 << order); i++)
402 		clear_highpage(page + i);
403 }
404 
405 static inline void set_page_order(struct page *page, int order)
406 {
407 	set_page_private(page, order);
408 	__SetPageBuddy(page);
409 }
410 
411 static inline void rmv_page_order(struct page *page)
412 {
413 	__ClearPageBuddy(page);
414 	set_page_private(page, 0);
415 }
416 
417 /*
418  * Locate the struct page for both the matching buddy in our
419  * pair (buddy1) and the combined O(n+1) page they form (page).
420  *
421  * 1) Any buddy B1 will have an order O twin B2 which satisfies
422  * the following equation:
423  *     B2 = B1 ^ (1 << O)
424  * For example, if the starting buddy (buddy2) is #8 its order
425  * 1 buddy is #10:
426  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
427  *
428  * 2) Any buddy B will have an order O+1 parent P which
429  * satisfies the following equation:
430  *     P = B & ~(1 << O)
431  *
432  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
433  */
434 static inline unsigned long
435 __find_buddy_index(unsigned long page_idx, unsigned int order)
436 {
437 	return page_idx ^ (1 << order);
438 }
439 
440 /*
441  * This function checks whether a page is free && is the buddy
442  * we can do coalesce a page and its buddy if
443  * (a) the buddy is not in a hole &&
444  * (b) the buddy is in the buddy system &&
445  * (c) a page and its buddy have the same order &&
446  * (d) a page and its buddy are in the same zone.
447  *
448  * For recording whether a page is in the buddy system, we set ->_mapcount -2.
449  * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
450  *
451  * For recording page's order, we use page_private(page).
452  */
453 static inline int page_is_buddy(struct page *page, struct page *buddy,
454 								int order)
455 {
456 	if (!pfn_valid_within(page_to_pfn(buddy)))
457 		return 0;
458 
459 	if (page_zone_id(page) != page_zone_id(buddy))
460 		return 0;
461 
462 	if (PageBuddy(buddy) && page_order(buddy) == order) {
463 		VM_BUG_ON(page_count(buddy) != 0);
464 		return 1;
465 	}
466 	return 0;
467 }
468 
469 /*
470  * Freeing function for a buddy system allocator.
471  *
472  * The concept of a buddy system is to maintain direct-mapped table
473  * (containing bit values) for memory blocks of various "orders".
474  * The bottom level table contains the map for the smallest allocatable
475  * units of memory (here, pages), and each level above it describes
476  * pairs of units from the levels below, hence, "buddies".
477  * At a high level, all that happens here is marking the table entry
478  * at the bottom level available, and propagating the changes upward
479  * as necessary, plus some accounting needed to play nicely with other
480  * parts of the VM system.
481  * At each level, we keep a list of pages, which are heads of continuous
482  * free pages of length of (1 << order) and marked with _mapcount -2. Page's
483  * order is recorded in page_private(page) field.
484  * So when we are allocating or freeing one, we can derive the state of the
485  * other.  That is, if we allocate a small block, and both were
486  * free, the remainder of the region must be split into blocks.
487  * If a block is freed, and its buddy is also free, then this
488  * triggers coalescing into a block of larger size.
489  *
490  * -- wli
491  */
492 
493 static inline void __free_one_page(struct page *page,
494 		struct zone *zone, unsigned int order,
495 		int migratetype)
496 {
497 	unsigned long page_idx;
498 	unsigned long combined_idx;
499 	unsigned long uninitialized_var(buddy_idx);
500 	struct page *buddy;
501 
502 	if (unlikely(PageCompound(page)))
503 		if (unlikely(destroy_compound_page(page, order)))
504 			return;
505 
506 	VM_BUG_ON(migratetype == -1);
507 
508 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
509 
510 	VM_BUG_ON(page_idx & ((1 << order) - 1));
511 	VM_BUG_ON(bad_range(zone, page));
512 
513 	while (order < MAX_ORDER-1) {
514 		buddy_idx = __find_buddy_index(page_idx, order);
515 		buddy = page + (buddy_idx - page_idx);
516 		if (!page_is_buddy(page, buddy, order))
517 			break;
518 
519 		/* Our buddy is free, merge with it and move up one order. */
520 		list_del(&buddy->lru);
521 		zone->free_area[order].nr_free--;
522 		rmv_page_order(buddy);
523 		combined_idx = buddy_idx & page_idx;
524 		page = page + (combined_idx - page_idx);
525 		page_idx = combined_idx;
526 		order++;
527 	}
528 	set_page_order(page, order);
529 
530 	/*
531 	 * If this is not the largest possible page, check if the buddy
532 	 * of the next-highest order is free. If it is, it's possible
533 	 * that pages are being freed that will coalesce soon. In case,
534 	 * that is happening, add the free page to the tail of the list
535 	 * so it's less likely to be used soon and more likely to be merged
536 	 * as a higher order page
537 	 */
538 	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
539 		struct page *higher_page, *higher_buddy;
540 		combined_idx = buddy_idx & page_idx;
541 		higher_page = page + (combined_idx - page_idx);
542 		buddy_idx = __find_buddy_index(combined_idx, order + 1);
543 		higher_buddy = page + (buddy_idx - combined_idx);
544 		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
545 			list_add_tail(&page->lru,
546 				&zone->free_area[order].free_list[migratetype]);
547 			goto out;
548 		}
549 	}
550 
551 	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
552 out:
553 	zone->free_area[order].nr_free++;
554 }
555 
556 /*
557  * free_page_mlock() -- clean up attempts to free and mlocked() page.
558  * Page should not be on lru, so no need to fix that up.
559  * free_pages_check() will verify...
560  */
561 static inline void free_page_mlock(struct page *page)
562 {
563 	__dec_zone_page_state(page, NR_MLOCK);
564 	__count_vm_event(UNEVICTABLE_MLOCKFREED);
565 }
566 
567 static inline int free_pages_check(struct page *page)
568 {
569 	if (unlikely(page_mapcount(page) |
570 		(page->mapping != NULL)  |
571 		(atomic_read(&page->_count) != 0) |
572 		(page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
573 		(mem_cgroup_bad_page_check(page)))) {
574 		bad_page(page);
575 		return 1;
576 	}
577 	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
578 		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
579 	return 0;
580 }
581 
582 /*
583  * Frees a number of pages from the PCP lists
584  * Assumes all pages on list are in same zone, and of same order.
585  * count is the number of pages to free.
586  *
587  * If the zone was previously in an "all pages pinned" state then look to
588  * see if this freeing clears that state.
589  *
590  * And clear the zone's pages_scanned counter, to hold off the "all pages are
591  * pinned" detection logic.
592  */
593 static void free_pcppages_bulk(struct zone *zone, int count,
594 					struct per_cpu_pages *pcp)
595 {
596 	int migratetype = 0;
597 	int batch_free = 0;
598 	int to_free = count;
599 
600 	spin_lock(&zone->lock);
601 	zone->all_unreclaimable = 0;
602 	zone->pages_scanned = 0;
603 
604 	while (to_free) {
605 		struct page *page;
606 		struct list_head *list;
607 
608 		/*
609 		 * Remove pages from lists in a round-robin fashion. A
610 		 * batch_free count is maintained that is incremented when an
611 		 * empty list is encountered.  This is so more pages are freed
612 		 * off fuller lists instead of spinning excessively around empty
613 		 * lists
614 		 */
615 		do {
616 			batch_free++;
617 			if (++migratetype == MIGRATE_PCPTYPES)
618 				migratetype = 0;
619 			list = &pcp->lists[migratetype];
620 		} while (list_empty(list));
621 
622 		/* This is the only non-empty list. Free them all. */
623 		if (batch_free == MIGRATE_PCPTYPES)
624 			batch_free = to_free;
625 
626 		do {
627 			page = list_entry(list->prev, struct page, lru);
628 			/* must delete as __free_one_page list manipulates */
629 			list_del(&page->lru);
630 			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
631 			__free_one_page(page, zone, 0, page_private(page));
632 			trace_mm_page_pcpu_drain(page, 0, page_private(page));
633 		} while (--to_free && --batch_free && !list_empty(list));
634 	}
635 	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
636 	spin_unlock(&zone->lock);
637 }
638 
639 static void free_one_page(struct zone *zone, struct page *page, int order,
640 				int migratetype)
641 {
642 	spin_lock(&zone->lock);
643 	zone->all_unreclaimable = 0;
644 	zone->pages_scanned = 0;
645 
646 	__free_one_page(page, zone, order, migratetype);
647 	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
648 	spin_unlock(&zone->lock);
649 }
650 
651 static bool free_pages_prepare(struct page *page, unsigned int order)
652 {
653 	int i;
654 	int bad = 0;
655 
656 	trace_mm_page_free_direct(page, order);
657 	kmemcheck_free_shadow(page, order);
658 
659 	if (PageAnon(page))
660 		page->mapping = NULL;
661 	for (i = 0; i < (1 << order); i++)
662 		bad += free_pages_check(page + i);
663 	if (bad)
664 		return false;
665 
666 	if (!PageHighMem(page)) {
667 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
668 		debug_check_no_obj_freed(page_address(page),
669 					   PAGE_SIZE << order);
670 	}
671 	arch_free_page(page, order);
672 	kernel_map_pages(page, 1 << order, 0);
673 
674 	return true;
675 }
676 
677 static void __free_pages_ok(struct page *page, unsigned int order)
678 {
679 	unsigned long flags;
680 	int wasMlocked = __TestClearPageMlocked(page);
681 
682 	if (!free_pages_prepare(page, order))
683 		return;
684 
685 	local_irq_save(flags);
686 	if (unlikely(wasMlocked))
687 		free_page_mlock(page);
688 	__count_vm_events(PGFREE, 1 << order);
689 	free_one_page(page_zone(page), page, order,
690 					get_pageblock_migratetype(page));
691 	local_irq_restore(flags);
692 }
693 
694 /*
695  * permit the bootmem allocator to evade page validation on high-order frees
696  */
697 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
698 {
699 	if (order == 0) {
700 		__ClearPageReserved(page);
701 		set_page_count(page, 0);
702 		set_page_refcounted(page);
703 		__free_page(page);
704 	} else {
705 		int loop;
706 
707 		prefetchw(page);
708 		for (loop = 0; loop < BITS_PER_LONG; loop++) {
709 			struct page *p = &page[loop];
710 
711 			if (loop + 1 < BITS_PER_LONG)
712 				prefetchw(p + 1);
713 			__ClearPageReserved(p);
714 			set_page_count(p, 0);
715 		}
716 
717 		set_page_refcounted(page);
718 		__free_pages(page, order);
719 	}
720 }
721 
722 
723 /*
724  * The order of subdivision here is critical for the IO subsystem.
725  * Please do not alter this order without good reasons and regression
726  * testing. Specifically, as large blocks of memory are subdivided,
727  * the order in which smaller blocks are delivered depends on the order
728  * they're subdivided in this function. This is the primary factor
729  * influencing the order in which pages are delivered to the IO
730  * subsystem according to empirical testing, and this is also justified
731  * by considering the behavior of a buddy system containing a single
732  * large block of memory acted on by a series of small allocations.
733  * This behavior is a critical factor in sglist merging's success.
734  *
735  * -- wli
736  */
737 static inline void expand(struct zone *zone, struct page *page,
738 	int low, int high, struct free_area *area,
739 	int migratetype)
740 {
741 	unsigned long size = 1 << high;
742 
743 	while (high > low) {
744 		area--;
745 		high--;
746 		size >>= 1;
747 		VM_BUG_ON(bad_range(zone, &page[size]));
748 		list_add(&page[size].lru, &area->free_list[migratetype]);
749 		area->nr_free++;
750 		set_page_order(&page[size], high);
751 	}
752 }
753 
754 /*
755  * This page is about to be returned from the page allocator
756  */
757 static inline int check_new_page(struct page *page)
758 {
759 	if (unlikely(page_mapcount(page) |
760 		(page->mapping != NULL)  |
761 		(atomic_read(&page->_count) != 0)  |
762 		(page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
763 		(mem_cgroup_bad_page_check(page)))) {
764 		bad_page(page);
765 		return 1;
766 	}
767 	return 0;
768 }
769 
770 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
771 {
772 	int i;
773 
774 	for (i = 0; i < (1 << order); i++) {
775 		struct page *p = page + i;
776 		if (unlikely(check_new_page(p)))
777 			return 1;
778 	}
779 
780 	set_page_private(page, 0);
781 	set_page_refcounted(page);
782 
783 	arch_alloc_page(page, order);
784 	kernel_map_pages(page, 1 << order, 1);
785 
786 	if (gfp_flags & __GFP_ZERO)
787 		prep_zero_page(page, order, gfp_flags);
788 
789 	if (order && (gfp_flags & __GFP_COMP))
790 		prep_compound_page(page, order);
791 
792 	return 0;
793 }
794 
795 /*
796  * Go through the free lists for the given migratetype and remove
797  * the smallest available page from the freelists
798  */
799 static inline
800 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
801 						int migratetype)
802 {
803 	unsigned int current_order;
804 	struct free_area * area;
805 	struct page *page;
806 
807 	/* Find a page of the appropriate size in the preferred list */
808 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
809 		area = &(zone->free_area[current_order]);
810 		if (list_empty(&area->free_list[migratetype]))
811 			continue;
812 
813 		page = list_entry(area->free_list[migratetype].next,
814 							struct page, lru);
815 		list_del(&page->lru);
816 		rmv_page_order(page);
817 		area->nr_free--;
818 		expand(zone, page, order, current_order, area, migratetype);
819 		return page;
820 	}
821 
822 	return NULL;
823 }
824 
825 
826 /*
827  * This array describes the order lists are fallen back to when
828  * the free lists for the desirable migrate type are depleted
829  */
830 static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
831 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
832 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
833 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
834 	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
835 };
836 
837 /*
838  * Move the free pages in a range to the free lists of the requested type.
839  * Note that start_page and end_pages are not aligned on a pageblock
840  * boundary. If alignment is required, use move_freepages_block()
841  */
842 static int move_freepages(struct zone *zone,
843 			  struct page *start_page, struct page *end_page,
844 			  int migratetype)
845 {
846 	struct page *page;
847 	unsigned long order;
848 	int pages_moved = 0;
849 
850 #ifndef CONFIG_HOLES_IN_ZONE
851 	/*
852 	 * page_zone is not safe to call in this context when
853 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
854 	 * anyway as we check zone boundaries in move_freepages_block().
855 	 * Remove at a later date when no bug reports exist related to
856 	 * grouping pages by mobility
857 	 */
858 	BUG_ON(page_zone(start_page) != page_zone(end_page));
859 #endif
860 
861 	for (page = start_page; page <= end_page;) {
862 		/* Make sure we are not inadvertently changing nodes */
863 		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
864 
865 		if (!pfn_valid_within(page_to_pfn(page))) {
866 			page++;
867 			continue;
868 		}
869 
870 		if (!PageBuddy(page)) {
871 			page++;
872 			continue;
873 		}
874 
875 		order = page_order(page);
876 		list_move(&page->lru,
877 			  &zone->free_area[order].free_list[migratetype]);
878 		page += 1 << order;
879 		pages_moved += 1 << order;
880 	}
881 
882 	return pages_moved;
883 }
884 
885 static int move_freepages_block(struct zone *zone, struct page *page,
886 				int migratetype)
887 {
888 	unsigned long start_pfn, end_pfn;
889 	struct page *start_page, *end_page;
890 
891 	start_pfn = page_to_pfn(page);
892 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
893 	start_page = pfn_to_page(start_pfn);
894 	end_page = start_page + pageblock_nr_pages - 1;
895 	end_pfn = start_pfn + pageblock_nr_pages - 1;
896 
897 	/* Do not cross zone boundaries */
898 	if (start_pfn < zone->zone_start_pfn)
899 		start_page = page;
900 	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
901 		return 0;
902 
903 	return move_freepages(zone, start_page, end_page, migratetype);
904 }
905 
906 static void change_pageblock_range(struct page *pageblock_page,
907 					int start_order, int migratetype)
908 {
909 	int nr_pageblocks = 1 << (start_order - pageblock_order);
910 
911 	while (nr_pageblocks--) {
912 		set_pageblock_migratetype(pageblock_page, migratetype);
913 		pageblock_page += pageblock_nr_pages;
914 	}
915 }
916 
917 /* Remove an element from the buddy allocator from the fallback list */
918 static inline struct page *
919 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
920 {
921 	struct free_area * area;
922 	int current_order;
923 	struct page *page;
924 	int migratetype, i;
925 
926 	/* Find the largest possible block of pages in the other list */
927 	for (current_order = MAX_ORDER-1; current_order >= order;
928 						--current_order) {
929 		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
930 			migratetype = fallbacks[start_migratetype][i];
931 
932 			/* MIGRATE_RESERVE handled later if necessary */
933 			if (migratetype == MIGRATE_RESERVE)
934 				continue;
935 
936 			area = &(zone->free_area[current_order]);
937 			if (list_empty(&area->free_list[migratetype]))
938 				continue;
939 
940 			page = list_entry(area->free_list[migratetype].next,
941 					struct page, lru);
942 			area->nr_free--;
943 
944 			/*
945 			 * If breaking a large block of pages, move all free
946 			 * pages to the preferred allocation list. If falling
947 			 * back for a reclaimable kernel allocation, be more
948 			 * aggressive about taking ownership of free pages
949 			 */
950 			if (unlikely(current_order >= (pageblock_order >> 1)) ||
951 					start_migratetype == MIGRATE_RECLAIMABLE ||
952 					page_group_by_mobility_disabled) {
953 				unsigned long pages;
954 				pages = move_freepages_block(zone, page,
955 								start_migratetype);
956 
957 				/* Claim the whole block if over half of it is free */
958 				if (pages >= (1 << (pageblock_order-1)) ||
959 						page_group_by_mobility_disabled)
960 					set_pageblock_migratetype(page,
961 								start_migratetype);
962 
963 				migratetype = start_migratetype;
964 			}
965 
966 			/* Remove the page from the freelists */
967 			list_del(&page->lru);
968 			rmv_page_order(page);
969 
970 			/* Take ownership for orders >= pageblock_order */
971 			if (current_order >= pageblock_order)
972 				change_pageblock_range(page, current_order,
973 							start_migratetype);
974 
975 			expand(zone, page, order, current_order, area, migratetype);
976 
977 			trace_mm_page_alloc_extfrag(page, order, current_order,
978 				start_migratetype, migratetype);
979 
980 			return page;
981 		}
982 	}
983 
984 	return NULL;
985 }
986 
987 /*
988  * Do the hard work of removing an element from the buddy allocator.
989  * Call me with the zone->lock already held.
990  */
991 static struct page *__rmqueue(struct zone *zone, unsigned int order,
992 						int migratetype)
993 {
994 	struct page *page;
995 
996 retry_reserve:
997 	page = __rmqueue_smallest(zone, order, migratetype);
998 
999 	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1000 		page = __rmqueue_fallback(zone, order, migratetype);
1001 
1002 		/*
1003 		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1004 		 * is used because __rmqueue_smallest is an inline function
1005 		 * and we want just one call site
1006 		 */
1007 		if (!page) {
1008 			migratetype = MIGRATE_RESERVE;
1009 			goto retry_reserve;
1010 		}
1011 	}
1012 
1013 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
1014 	return page;
1015 }
1016 
1017 /*
1018  * Obtain a specified number of elements from the buddy allocator, all under
1019  * a single hold of the lock, for efficiency.  Add them to the supplied list.
1020  * Returns the number of new pages which were placed at *list.
1021  */
1022 static int rmqueue_bulk(struct zone *zone, unsigned int order,
1023 			unsigned long count, struct list_head *list,
1024 			int migratetype, int cold)
1025 {
1026 	int i;
1027 
1028 	spin_lock(&zone->lock);
1029 	for (i = 0; i < count; ++i) {
1030 		struct page *page = __rmqueue(zone, order, migratetype);
1031 		if (unlikely(page == NULL))
1032 			break;
1033 
1034 		/*
1035 		 * Split buddy pages returned by expand() are received here
1036 		 * in physical page order. The page is added to the callers and
1037 		 * list and the list head then moves forward. From the callers
1038 		 * perspective, the linked list is ordered by page number in
1039 		 * some conditions. This is useful for IO devices that can
1040 		 * merge IO requests if the physical pages are ordered
1041 		 * properly.
1042 		 */
1043 		if (likely(cold == 0))
1044 			list_add(&page->lru, list);
1045 		else
1046 			list_add_tail(&page->lru, list);
1047 		set_page_private(page, migratetype);
1048 		list = &page->lru;
1049 	}
1050 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1051 	spin_unlock(&zone->lock);
1052 	return i;
1053 }
1054 
1055 #ifdef CONFIG_NUMA
1056 /*
1057  * Called from the vmstat counter updater to drain pagesets of this
1058  * currently executing processor on remote nodes after they have
1059  * expired.
1060  *
1061  * Note that this function must be called with the thread pinned to
1062  * a single processor.
1063  */
1064 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1065 {
1066 	unsigned long flags;
1067 	int to_drain;
1068 
1069 	local_irq_save(flags);
1070 	if (pcp->count >= pcp->batch)
1071 		to_drain = pcp->batch;
1072 	else
1073 		to_drain = pcp->count;
1074 	free_pcppages_bulk(zone, to_drain, pcp);
1075 	pcp->count -= to_drain;
1076 	local_irq_restore(flags);
1077 }
1078 #endif
1079 
1080 /*
1081  * Drain pages of the indicated processor.
1082  *
1083  * The processor must either be the current processor and the
1084  * thread pinned to the current processor or a processor that
1085  * is not online.
1086  */
1087 static void drain_pages(unsigned int cpu)
1088 {
1089 	unsigned long flags;
1090 	struct zone *zone;
1091 
1092 	for_each_populated_zone(zone) {
1093 		struct per_cpu_pageset *pset;
1094 		struct per_cpu_pages *pcp;
1095 
1096 		local_irq_save(flags);
1097 		pset = per_cpu_ptr(zone->pageset, cpu);
1098 
1099 		pcp = &pset->pcp;
1100 		if (pcp->count) {
1101 			free_pcppages_bulk(zone, pcp->count, pcp);
1102 			pcp->count = 0;
1103 		}
1104 		local_irq_restore(flags);
1105 	}
1106 }
1107 
1108 /*
1109  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1110  */
1111 void drain_local_pages(void *arg)
1112 {
1113 	drain_pages(smp_processor_id());
1114 }
1115 
1116 /*
1117  * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1118  */
1119 void drain_all_pages(void)
1120 {
1121 	on_each_cpu(drain_local_pages, NULL, 1);
1122 }
1123 
1124 #ifdef CONFIG_HIBERNATION
1125 
1126 void mark_free_pages(struct zone *zone)
1127 {
1128 	unsigned long pfn, max_zone_pfn;
1129 	unsigned long flags;
1130 	int order, t;
1131 	struct list_head *curr;
1132 
1133 	if (!zone->spanned_pages)
1134 		return;
1135 
1136 	spin_lock_irqsave(&zone->lock, flags);
1137 
1138 	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1139 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1140 		if (pfn_valid(pfn)) {
1141 			struct page *page = pfn_to_page(pfn);
1142 
1143 			if (!swsusp_page_is_forbidden(page))
1144 				swsusp_unset_page_free(page);
1145 		}
1146 
1147 	for_each_migratetype_order(order, t) {
1148 		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1149 			unsigned long i;
1150 
1151 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1152 			for (i = 0; i < (1UL << order); i++)
1153 				swsusp_set_page_free(pfn_to_page(pfn + i));
1154 		}
1155 	}
1156 	spin_unlock_irqrestore(&zone->lock, flags);
1157 }
1158 #endif /* CONFIG_PM */
1159 
1160 /*
1161  * Free a 0-order page
1162  * cold == 1 ? free a cold page : free a hot page
1163  */
1164 void free_hot_cold_page(struct page *page, int cold)
1165 {
1166 	struct zone *zone = page_zone(page);
1167 	struct per_cpu_pages *pcp;
1168 	unsigned long flags;
1169 	int migratetype;
1170 	int wasMlocked = __TestClearPageMlocked(page);
1171 
1172 	if (!free_pages_prepare(page, 0))
1173 		return;
1174 
1175 	migratetype = get_pageblock_migratetype(page);
1176 	set_page_private(page, migratetype);
1177 	local_irq_save(flags);
1178 	if (unlikely(wasMlocked))
1179 		free_page_mlock(page);
1180 	__count_vm_event(PGFREE);
1181 
1182 	/*
1183 	 * We only track unmovable, reclaimable and movable on pcp lists.
1184 	 * Free ISOLATE pages back to the allocator because they are being
1185 	 * offlined but treat RESERVE as movable pages so we can get those
1186 	 * areas back if necessary. Otherwise, we may have to free
1187 	 * excessively into the page allocator
1188 	 */
1189 	if (migratetype >= MIGRATE_PCPTYPES) {
1190 		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1191 			free_one_page(zone, page, 0, migratetype);
1192 			goto out;
1193 		}
1194 		migratetype = MIGRATE_MOVABLE;
1195 	}
1196 
1197 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
1198 	if (cold)
1199 		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1200 	else
1201 		list_add(&page->lru, &pcp->lists[migratetype]);
1202 	pcp->count++;
1203 	if (pcp->count >= pcp->high) {
1204 		free_pcppages_bulk(zone, pcp->batch, pcp);
1205 		pcp->count -= pcp->batch;
1206 	}
1207 
1208 out:
1209 	local_irq_restore(flags);
1210 }
1211 
1212 /*
1213  * split_page takes a non-compound higher-order page, and splits it into
1214  * n (1<<order) sub-pages: page[0..n]
1215  * Each sub-page must be freed individually.
1216  *
1217  * Note: this is probably too low level an operation for use in drivers.
1218  * Please consult with lkml before using this in your driver.
1219  */
1220 void split_page(struct page *page, unsigned int order)
1221 {
1222 	int i;
1223 
1224 	VM_BUG_ON(PageCompound(page));
1225 	VM_BUG_ON(!page_count(page));
1226 
1227 #ifdef CONFIG_KMEMCHECK
1228 	/*
1229 	 * Split shadow pages too, because free(page[0]) would
1230 	 * otherwise free the whole shadow.
1231 	 */
1232 	if (kmemcheck_page_is_tracked(page))
1233 		split_page(virt_to_page(page[0].shadow), order);
1234 #endif
1235 
1236 	for (i = 1; i < (1 << order); i++)
1237 		set_page_refcounted(page + i);
1238 }
1239 
1240 /*
1241  * Similar to split_page except the page is already free. As this is only
1242  * being used for migration, the migratetype of the block also changes.
1243  * As this is called with interrupts disabled, the caller is responsible
1244  * for calling arch_alloc_page() and kernel_map_page() after interrupts
1245  * are enabled.
1246  *
1247  * Note: this is probably too low level an operation for use in drivers.
1248  * Please consult with lkml before using this in your driver.
1249  */
1250 int split_free_page(struct page *page)
1251 {
1252 	unsigned int order;
1253 	unsigned long watermark;
1254 	struct zone *zone;
1255 
1256 	BUG_ON(!PageBuddy(page));
1257 
1258 	zone = page_zone(page);
1259 	order = page_order(page);
1260 
1261 	/* Obey watermarks as if the page was being allocated */
1262 	watermark = low_wmark_pages(zone) + (1 << order);
1263 	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1264 		return 0;
1265 
1266 	/* Remove page from free list */
1267 	list_del(&page->lru);
1268 	zone->free_area[order].nr_free--;
1269 	rmv_page_order(page);
1270 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1271 
1272 	/* Split into individual pages */
1273 	set_page_refcounted(page);
1274 	split_page(page, order);
1275 
1276 	if (order >= pageblock_order - 1) {
1277 		struct page *endpage = page + (1 << order) - 1;
1278 		for (; page < endpage; page += pageblock_nr_pages)
1279 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1280 	}
1281 
1282 	return 1 << order;
1283 }
1284 
1285 /*
1286  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1287  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1288  * or two.
1289  */
1290 static inline
1291 struct page *buffered_rmqueue(struct zone *preferred_zone,
1292 			struct zone *zone, int order, gfp_t gfp_flags,
1293 			int migratetype)
1294 {
1295 	unsigned long flags;
1296 	struct page *page;
1297 	int cold = !!(gfp_flags & __GFP_COLD);
1298 
1299 again:
1300 	if (likely(order == 0)) {
1301 		struct per_cpu_pages *pcp;
1302 		struct list_head *list;
1303 
1304 		local_irq_save(flags);
1305 		pcp = &this_cpu_ptr(zone->pageset)->pcp;
1306 		list = &pcp->lists[migratetype];
1307 		if (list_empty(list)) {
1308 			pcp->count += rmqueue_bulk(zone, 0,
1309 					pcp->batch, list,
1310 					migratetype, cold);
1311 			if (unlikely(list_empty(list)))
1312 				goto failed;
1313 		}
1314 
1315 		if (cold)
1316 			page = list_entry(list->prev, struct page, lru);
1317 		else
1318 			page = list_entry(list->next, struct page, lru);
1319 
1320 		list_del(&page->lru);
1321 		pcp->count--;
1322 	} else {
1323 		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1324 			/*
1325 			 * __GFP_NOFAIL is not to be used in new code.
1326 			 *
1327 			 * All __GFP_NOFAIL callers should be fixed so that they
1328 			 * properly detect and handle allocation failures.
1329 			 *
1330 			 * We most definitely don't want callers attempting to
1331 			 * allocate greater than order-1 page units with
1332 			 * __GFP_NOFAIL.
1333 			 */
1334 			WARN_ON_ONCE(order > 1);
1335 		}
1336 		spin_lock_irqsave(&zone->lock, flags);
1337 		page = __rmqueue(zone, order, migratetype);
1338 		spin_unlock(&zone->lock);
1339 		if (!page)
1340 			goto failed;
1341 		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1342 	}
1343 
1344 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1345 	zone_statistics(preferred_zone, zone, gfp_flags);
1346 	local_irq_restore(flags);
1347 
1348 	VM_BUG_ON(bad_range(zone, page));
1349 	if (prep_new_page(page, order, gfp_flags))
1350 		goto again;
1351 	return page;
1352 
1353 failed:
1354 	local_irq_restore(flags);
1355 	return NULL;
1356 }
1357 
1358 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1359 #define ALLOC_WMARK_MIN		WMARK_MIN
1360 #define ALLOC_WMARK_LOW		WMARK_LOW
1361 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1362 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1363 
1364 /* Mask to get the watermark bits */
1365 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1366 
1367 #define ALLOC_HARDER		0x10 /* try to alloc harder */
1368 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1369 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1370 
1371 #ifdef CONFIG_FAIL_PAGE_ALLOC
1372 
1373 static struct {
1374 	struct fault_attr attr;
1375 
1376 	u32 ignore_gfp_highmem;
1377 	u32 ignore_gfp_wait;
1378 	u32 min_order;
1379 } fail_page_alloc = {
1380 	.attr = FAULT_ATTR_INITIALIZER,
1381 	.ignore_gfp_wait = 1,
1382 	.ignore_gfp_highmem = 1,
1383 	.min_order = 1,
1384 };
1385 
1386 static int __init setup_fail_page_alloc(char *str)
1387 {
1388 	return setup_fault_attr(&fail_page_alloc.attr, str);
1389 }
1390 __setup("fail_page_alloc=", setup_fail_page_alloc);
1391 
1392 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1393 {
1394 	if (order < fail_page_alloc.min_order)
1395 		return 0;
1396 	if (gfp_mask & __GFP_NOFAIL)
1397 		return 0;
1398 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1399 		return 0;
1400 	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1401 		return 0;
1402 
1403 	return should_fail(&fail_page_alloc.attr, 1 << order);
1404 }
1405 
1406 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1407 
1408 static int __init fail_page_alloc_debugfs(void)
1409 {
1410 	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1411 	struct dentry *dir;
1412 
1413 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1414 					&fail_page_alloc.attr);
1415 	if (IS_ERR(dir))
1416 		return PTR_ERR(dir);
1417 
1418 	if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1419 				&fail_page_alloc.ignore_gfp_wait))
1420 		goto fail;
1421 	if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1422 				&fail_page_alloc.ignore_gfp_highmem))
1423 		goto fail;
1424 	if (!debugfs_create_u32("min-order", mode, dir,
1425 				&fail_page_alloc.min_order))
1426 		goto fail;
1427 
1428 	return 0;
1429 fail:
1430 	debugfs_remove_recursive(dir);
1431 
1432 	return -ENOMEM;
1433 }
1434 
1435 late_initcall(fail_page_alloc_debugfs);
1436 
1437 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1438 
1439 #else /* CONFIG_FAIL_PAGE_ALLOC */
1440 
1441 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1442 {
1443 	return 0;
1444 }
1445 
1446 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1447 
1448 /*
1449  * Return true if free pages are above 'mark'. This takes into account the order
1450  * of the allocation.
1451  */
1452 static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1453 		      int classzone_idx, int alloc_flags, long free_pages)
1454 {
1455 	/* free_pages my go negative - that's OK */
1456 	long min = mark;
1457 	int o;
1458 
1459 	free_pages -= (1 << order) + 1;
1460 	if (alloc_flags & ALLOC_HIGH)
1461 		min -= min / 2;
1462 	if (alloc_flags & ALLOC_HARDER)
1463 		min -= min / 4;
1464 
1465 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1466 		return false;
1467 	for (o = 0; o < order; o++) {
1468 		/* At the next order, this order's pages become unavailable */
1469 		free_pages -= z->free_area[o].nr_free << o;
1470 
1471 		/* Require fewer higher order pages to be free */
1472 		min >>= 1;
1473 
1474 		if (free_pages <= min)
1475 			return false;
1476 	}
1477 	return true;
1478 }
1479 
1480 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1481 		      int classzone_idx, int alloc_flags)
1482 {
1483 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1484 					zone_page_state(z, NR_FREE_PAGES));
1485 }
1486 
1487 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1488 		      int classzone_idx, int alloc_flags)
1489 {
1490 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
1491 
1492 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1493 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1494 
1495 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1496 								free_pages);
1497 }
1498 
1499 #ifdef CONFIG_NUMA
1500 /*
1501  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1502  * skip over zones that are not allowed by the cpuset, or that have
1503  * been recently (in last second) found to be nearly full.  See further
1504  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1505  * that have to skip over a lot of full or unallowed zones.
1506  *
1507  * If the zonelist cache is present in the passed in zonelist, then
1508  * returns a pointer to the allowed node mask (either the current
1509  * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1510  *
1511  * If the zonelist cache is not available for this zonelist, does
1512  * nothing and returns NULL.
1513  *
1514  * If the fullzones BITMAP in the zonelist cache is stale (more than
1515  * a second since last zap'd) then we zap it out (clear its bits.)
1516  *
1517  * We hold off even calling zlc_setup, until after we've checked the
1518  * first zone in the zonelist, on the theory that most allocations will
1519  * be satisfied from that first zone, so best to examine that zone as
1520  * quickly as we can.
1521  */
1522 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1523 {
1524 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1525 	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1526 
1527 	zlc = zonelist->zlcache_ptr;
1528 	if (!zlc)
1529 		return NULL;
1530 
1531 	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1532 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1533 		zlc->last_full_zap = jiffies;
1534 	}
1535 
1536 	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1537 					&cpuset_current_mems_allowed :
1538 					&node_states[N_HIGH_MEMORY];
1539 	return allowednodes;
1540 }
1541 
1542 /*
1543  * Given 'z' scanning a zonelist, run a couple of quick checks to see
1544  * if it is worth looking at further for free memory:
1545  *  1) Check that the zone isn't thought to be full (doesn't have its
1546  *     bit set in the zonelist_cache fullzones BITMAP).
1547  *  2) Check that the zones node (obtained from the zonelist_cache
1548  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1549  * Return true (non-zero) if zone is worth looking at further, or
1550  * else return false (zero) if it is not.
1551  *
1552  * This check -ignores- the distinction between various watermarks,
1553  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1554  * found to be full for any variation of these watermarks, it will
1555  * be considered full for up to one second by all requests, unless
1556  * we are so low on memory on all allowed nodes that we are forced
1557  * into the second scan of the zonelist.
1558  *
1559  * In the second scan we ignore this zonelist cache and exactly
1560  * apply the watermarks to all zones, even it is slower to do so.
1561  * We are low on memory in the second scan, and should leave no stone
1562  * unturned looking for a free page.
1563  */
1564 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1565 						nodemask_t *allowednodes)
1566 {
1567 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1568 	int i;				/* index of *z in zonelist zones */
1569 	int n;				/* node that zone *z is on */
1570 
1571 	zlc = zonelist->zlcache_ptr;
1572 	if (!zlc)
1573 		return 1;
1574 
1575 	i = z - zonelist->_zonerefs;
1576 	n = zlc->z_to_n[i];
1577 
1578 	/* This zone is worth trying if it is allowed but not full */
1579 	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1580 }
1581 
1582 /*
1583  * Given 'z' scanning a zonelist, set the corresponding bit in
1584  * zlc->fullzones, so that subsequent attempts to allocate a page
1585  * from that zone don't waste time re-examining it.
1586  */
1587 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1588 {
1589 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1590 	int i;				/* index of *z in zonelist zones */
1591 
1592 	zlc = zonelist->zlcache_ptr;
1593 	if (!zlc)
1594 		return;
1595 
1596 	i = z - zonelist->_zonerefs;
1597 
1598 	set_bit(i, zlc->fullzones);
1599 }
1600 
1601 /*
1602  * clear all zones full, called after direct reclaim makes progress so that
1603  * a zone that was recently full is not skipped over for up to a second
1604  */
1605 static void zlc_clear_zones_full(struct zonelist *zonelist)
1606 {
1607 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1608 
1609 	zlc = zonelist->zlcache_ptr;
1610 	if (!zlc)
1611 		return;
1612 
1613 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1614 }
1615 
1616 #else	/* CONFIG_NUMA */
1617 
1618 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1619 {
1620 	return NULL;
1621 }
1622 
1623 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1624 				nodemask_t *allowednodes)
1625 {
1626 	return 1;
1627 }
1628 
1629 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1630 {
1631 }
1632 
1633 static void zlc_clear_zones_full(struct zonelist *zonelist)
1634 {
1635 }
1636 #endif	/* CONFIG_NUMA */
1637 
1638 /*
1639  * get_page_from_freelist goes through the zonelist trying to allocate
1640  * a page.
1641  */
1642 static struct page *
1643 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1644 		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1645 		struct zone *preferred_zone, int migratetype)
1646 {
1647 	struct zoneref *z;
1648 	struct page *page = NULL;
1649 	int classzone_idx;
1650 	struct zone *zone;
1651 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1652 	int zlc_active = 0;		/* set if using zonelist_cache */
1653 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1654 
1655 	classzone_idx = zone_idx(preferred_zone);
1656 zonelist_scan:
1657 	/*
1658 	 * Scan zonelist, looking for a zone with enough free.
1659 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1660 	 */
1661 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1662 						high_zoneidx, nodemask) {
1663 		if (NUMA_BUILD && zlc_active &&
1664 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1665 				continue;
1666 		if ((alloc_flags & ALLOC_CPUSET) &&
1667 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1668 				continue;
1669 
1670 		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1671 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1672 			unsigned long mark;
1673 			int ret;
1674 
1675 			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1676 			if (zone_watermark_ok(zone, order, mark,
1677 				    classzone_idx, alloc_flags))
1678 				goto try_this_zone;
1679 
1680 			if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1681 				/*
1682 				 * we do zlc_setup if there are multiple nodes
1683 				 * and before considering the first zone allowed
1684 				 * by the cpuset.
1685 				 */
1686 				allowednodes = zlc_setup(zonelist, alloc_flags);
1687 				zlc_active = 1;
1688 				did_zlc_setup = 1;
1689 			}
1690 
1691 			if (zone_reclaim_mode == 0)
1692 				goto this_zone_full;
1693 
1694 			/*
1695 			 * As we may have just activated ZLC, check if the first
1696 			 * eligible zone has failed zone_reclaim recently.
1697 			 */
1698 			if (NUMA_BUILD && zlc_active &&
1699 				!zlc_zone_worth_trying(zonelist, z, allowednodes))
1700 				continue;
1701 
1702 			ret = zone_reclaim(zone, gfp_mask, order);
1703 			switch (ret) {
1704 			case ZONE_RECLAIM_NOSCAN:
1705 				/* did not scan */
1706 				continue;
1707 			case ZONE_RECLAIM_FULL:
1708 				/* scanned but unreclaimable */
1709 				continue;
1710 			default:
1711 				/* did we reclaim enough */
1712 				if (!zone_watermark_ok(zone, order, mark,
1713 						classzone_idx, alloc_flags))
1714 					goto this_zone_full;
1715 			}
1716 		}
1717 
1718 try_this_zone:
1719 		page = buffered_rmqueue(preferred_zone, zone, order,
1720 						gfp_mask, migratetype);
1721 		if (page)
1722 			break;
1723 this_zone_full:
1724 		if (NUMA_BUILD)
1725 			zlc_mark_zone_full(zonelist, z);
1726 	}
1727 
1728 	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1729 		/* Disable zlc cache for second zonelist scan */
1730 		zlc_active = 0;
1731 		goto zonelist_scan;
1732 	}
1733 	return page;
1734 }
1735 
1736 /*
1737  * Large machines with many possible nodes should not always dump per-node
1738  * meminfo in irq context.
1739  */
1740 static inline bool should_suppress_show_mem(void)
1741 {
1742 	bool ret = false;
1743 
1744 #if NODES_SHIFT > 8
1745 	ret = in_interrupt();
1746 #endif
1747 	return ret;
1748 }
1749 
1750 static DEFINE_RATELIMIT_STATE(nopage_rs,
1751 		DEFAULT_RATELIMIT_INTERVAL,
1752 		DEFAULT_RATELIMIT_BURST);
1753 
1754 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
1755 {
1756 	va_list args;
1757 	unsigned int filter = SHOW_MEM_FILTER_NODES;
1758 
1759 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
1760 		return;
1761 
1762 	/*
1763 	 * This documents exceptions given to allocations in certain
1764 	 * contexts that are allowed to allocate outside current's set
1765 	 * of allowed nodes.
1766 	 */
1767 	if (!(gfp_mask & __GFP_NOMEMALLOC))
1768 		if (test_thread_flag(TIF_MEMDIE) ||
1769 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
1770 			filter &= ~SHOW_MEM_FILTER_NODES;
1771 	if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
1772 		filter &= ~SHOW_MEM_FILTER_NODES;
1773 
1774 	if (fmt) {
1775 		printk(KERN_WARNING);
1776 		va_start(args, fmt);
1777 		vprintk(fmt, args);
1778 		va_end(args);
1779 	}
1780 
1781 	pr_warning("%s: page allocation failure: order:%d, mode:0x%x\n",
1782 		   current->comm, order, gfp_mask);
1783 
1784 	dump_stack();
1785 	if (!should_suppress_show_mem())
1786 		show_mem(filter);
1787 }
1788 
1789 static inline int
1790 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1791 				unsigned long pages_reclaimed)
1792 {
1793 	/* Do not loop if specifically requested */
1794 	if (gfp_mask & __GFP_NORETRY)
1795 		return 0;
1796 
1797 	/*
1798 	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1799 	 * means __GFP_NOFAIL, but that may not be true in other
1800 	 * implementations.
1801 	 */
1802 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1803 		return 1;
1804 
1805 	/*
1806 	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1807 	 * specified, then we retry until we no longer reclaim any pages
1808 	 * (above), or we've reclaimed an order of pages at least as
1809 	 * large as the allocation's order. In both cases, if the
1810 	 * allocation still fails, we stop retrying.
1811 	 */
1812 	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1813 		return 1;
1814 
1815 	/*
1816 	 * Don't let big-order allocations loop unless the caller
1817 	 * explicitly requests that.
1818 	 */
1819 	if (gfp_mask & __GFP_NOFAIL)
1820 		return 1;
1821 
1822 	return 0;
1823 }
1824 
1825 static inline struct page *
1826 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1827 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1828 	nodemask_t *nodemask, struct zone *preferred_zone,
1829 	int migratetype)
1830 {
1831 	struct page *page;
1832 
1833 	/* Acquire the OOM killer lock for the zones in zonelist */
1834 	if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
1835 		schedule_timeout_uninterruptible(1);
1836 		return NULL;
1837 	}
1838 
1839 	/*
1840 	 * Go through the zonelist yet one more time, keep very high watermark
1841 	 * here, this is only to catch a parallel oom killing, we must fail if
1842 	 * we're still under heavy pressure.
1843 	 */
1844 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1845 		order, zonelist, high_zoneidx,
1846 		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1847 		preferred_zone, migratetype);
1848 	if (page)
1849 		goto out;
1850 
1851 	if (!(gfp_mask & __GFP_NOFAIL)) {
1852 		/* The OOM killer will not help higher order allocs */
1853 		if (order > PAGE_ALLOC_COSTLY_ORDER)
1854 			goto out;
1855 		/* The OOM killer does not needlessly kill tasks for lowmem */
1856 		if (high_zoneidx < ZONE_NORMAL)
1857 			goto out;
1858 		/*
1859 		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1860 		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1861 		 * The caller should handle page allocation failure by itself if
1862 		 * it specifies __GFP_THISNODE.
1863 		 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1864 		 */
1865 		if (gfp_mask & __GFP_THISNODE)
1866 			goto out;
1867 	}
1868 	/* Exhausted what can be done so it's blamo time */
1869 	out_of_memory(zonelist, gfp_mask, order, nodemask);
1870 
1871 out:
1872 	clear_zonelist_oom(zonelist, gfp_mask);
1873 	return page;
1874 }
1875 
1876 #ifdef CONFIG_COMPACTION
1877 /* Try memory compaction for high-order allocations before reclaim */
1878 static struct page *
1879 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1880 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1881 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1882 	int migratetype, unsigned long *did_some_progress,
1883 	bool sync_migration)
1884 {
1885 	struct page *page;
1886 
1887 	if (!order || compaction_deferred(preferred_zone))
1888 		return NULL;
1889 
1890 	current->flags |= PF_MEMALLOC;
1891 	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1892 						nodemask, sync_migration);
1893 	current->flags &= ~PF_MEMALLOC;
1894 	if (*did_some_progress != COMPACT_SKIPPED) {
1895 
1896 		/* Page migration frees to the PCP lists but we want merging */
1897 		drain_pages(get_cpu());
1898 		put_cpu();
1899 
1900 		page = get_page_from_freelist(gfp_mask, nodemask,
1901 				order, zonelist, high_zoneidx,
1902 				alloc_flags, preferred_zone,
1903 				migratetype);
1904 		if (page) {
1905 			preferred_zone->compact_considered = 0;
1906 			preferred_zone->compact_defer_shift = 0;
1907 			count_vm_event(COMPACTSUCCESS);
1908 			return page;
1909 		}
1910 
1911 		/*
1912 		 * It's bad if compaction run occurs and fails.
1913 		 * The most likely reason is that pages exist,
1914 		 * but not enough to satisfy watermarks.
1915 		 */
1916 		count_vm_event(COMPACTFAIL);
1917 		defer_compaction(preferred_zone);
1918 
1919 		cond_resched();
1920 	}
1921 
1922 	return NULL;
1923 }
1924 #else
1925 static inline struct page *
1926 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1927 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1928 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1929 	int migratetype, unsigned long *did_some_progress,
1930 	bool sync_migration)
1931 {
1932 	return NULL;
1933 }
1934 #endif /* CONFIG_COMPACTION */
1935 
1936 /* The really slow allocator path where we enter direct reclaim */
1937 static inline struct page *
1938 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1939 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1940 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1941 	int migratetype, unsigned long *did_some_progress)
1942 {
1943 	struct page *page = NULL;
1944 	struct reclaim_state reclaim_state;
1945 	bool drained = false;
1946 
1947 	cond_resched();
1948 
1949 	/* We now go into synchronous reclaim */
1950 	cpuset_memory_pressure_bump();
1951 	current->flags |= PF_MEMALLOC;
1952 	lockdep_set_current_reclaim_state(gfp_mask);
1953 	reclaim_state.reclaimed_slab = 0;
1954 	current->reclaim_state = &reclaim_state;
1955 
1956 	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1957 
1958 	current->reclaim_state = NULL;
1959 	lockdep_clear_current_reclaim_state();
1960 	current->flags &= ~PF_MEMALLOC;
1961 
1962 	cond_resched();
1963 
1964 	if (unlikely(!(*did_some_progress)))
1965 		return NULL;
1966 
1967 	/* After successful reclaim, reconsider all zones for allocation */
1968 	if (NUMA_BUILD)
1969 		zlc_clear_zones_full(zonelist);
1970 
1971 retry:
1972 	page = get_page_from_freelist(gfp_mask, nodemask, order,
1973 					zonelist, high_zoneidx,
1974 					alloc_flags, preferred_zone,
1975 					migratetype);
1976 
1977 	/*
1978 	 * If an allocation failed after direct reclaim, it could be because
1979 	 * pages are pinned on the per-cpu lists. Drain them and try again
1980 	 */
1981 	if (!page && !drained) {
1982 		drain_all_pages();
1983 		drained = true;
1984 		goto retry;
1985 	}
1986 
1987 	return page;
1988 }
1989 
1990 /*
1991  * This is called in the allocator slow-path if the allocation request is of
1992  * sufficient urgency to ignore watermarks and take other desperate measures
1993  */
1994 static inline struct page *
1995 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1996 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1997 	nodemask_t *nodemask, struct zone *preferred_zone,
1998 	int migratetype)
1999 {
2000 	struct page *page;
2001 
2002 	do {
2003 		page = get_page_from_freelist(gfp_mask, nodemask, order,
2004 			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2005 			preferred_zone, migratetype);
2006 
2007 		if (!page && gfp_mask & __GFP_NOFAIL)
2008 			wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2009 	} while (!page && (gfp_mask & __GFP_NOFAIL));
2010 
2011 	return page;
2012 }
2013 
2014 static inline
2015 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
2016 						enum zone_type high_zoneidx,
2017 						enum zone_type classzone_idx)
2018 {
2019 	struct zoneref *z;
2020 	struct zone *zone;
2021 
2022 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
2023 		wakeup_kswapd(zone, order, classzone_idx);
2024 }
2025 
2026 static inline int
2027 gfp_to_alloc_flags(gfp_t gfp_mask)
2028 {
2029 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2030 	const gfp_t wait = gfp_mask & __GFP_WAIT;
2031 
2032 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2033 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2034 
2035 	/*
2036 	 * The caller may dip into page reserves a bit more if the caller
2037 	 * cannot run direct reclaim, or if the caller has realtime scheduling
2038 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2039 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2040 	 */
2041 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2042 
2043 	if (!wait) {
2044 		/*
2045 		 * Not worth trying to allocate harder for
2046 		 * __GFP_NOMEMALLOC even if it can't schedule.
2047 		 */
2048 		if  (!(gfp_mask & __GFP_NOMEMALLOC))
2049 			alloc_flags |= ALLOC_HARDER;
2050 		/*
2051 		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2052 		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
2053 		 */
2054 		alloc_flags &= ~ALLOC_CPUSET;
2055 	} else if (unlikely(rt_task(current)) && !in_interrupt())
2056 		alloc_flags |= ALLOC_HARDER;
2057 
2058 	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2059 		if (!in_interrupt() &&
2060 		    ((current->flags & PF_MEMALLOC) ||
2061 		     unlikely(test_thread_flag(TIF_MEMDIE))))
2062 			alloc_flags |= ALLOC_NO_WATERMARKS;
2063 	}
2064 
2065 	return alloc_flags;
2066 }
2067 
2068 static inline struct page *
2069 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2070 	struct zonelist *zonelist, enum zone_type high_zoneidx,
2071 	nodemask_t *nodemask, struct zone *preferred_zone,
2072 	int migratetype)
2073 {
2074 	const gfp_t wait = gfp_mask & __GFP_WAIT;
2075 	struct page *page = NULL;
2076 	int alloc_flags;
2077 	unsigned long pages_reclaimed = 0;
2078 	unsigned long did_some_progress;
2079 	bool sync_migration = false;
2080 
2081 	/*
2082 	 * In the slowpath, we sanity check order to avoid ever trying to
2083 	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2084 	 * be using allocators in order of preference for an area that is
2085 	 * too large.
2086 	 */
2087 	if (order >= MAX_ORDER) {
2088 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2089 		return NULL;
2090 	}
2091 
2092 	/*
2093 	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2094 	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2095 	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2096 	 * using a larger set of nodes after it has established that the
2097 	 * allowed per node queues are empty and that nodes are
2098 	 * over allocated.
2099 	 */
2100 	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2101 		goto nopage;
2102 
2103 restart:
2104 	if (!(gfp_mask & __GFP_NO_KSWAPD))
2105 		wake_all_kswapd(order, zonelist, high_zoneidx,
2106 						zone_idx(preferred_zone));
2107 
2108 	/*
2109 	 * OK, we're below the kswapd watermark and have kicked background
2110 	 * reclaim. Now things get more complex, so set up alloc_flags according
2111 	 * to how we want to proceed.
2112 	 */
2113 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
2114 
2115 	/*
2116 	 * Find the true preferred zone if the allocation is unconstrained by
2117 	 * cpusets.
2118 	 */
2119 	if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2120 		first_zones_zonelist(zonelist, high_zoneidx, NULL,
2121 					&preferred_zone);
2122 
2123 rebalance:
2124 	/* This is the last chance, in general, before the goto nopage. */
2125 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2126 			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2127 			preferred_zone, migratetype);
2128 	if (page)
2129 		goto got_pg;
2130 
2131 	/* Allocate without watermarks if the context allows */
2132 	if (alloc_flags & ALLOC_NO_WATERMARKS) {
2133 		page = __alloc_pages_high_priority(gfp_mask, order,
2134 				zonelist, high_zoneidx, nodemask,
2135 				preferred_zone, migratetype);
2136 		if (page)
2137 			goto got_pg;
2138 	}
2139 
2140 	/* Atomic allocations - we can't balance anything */
2141 	if (!wait)
2142 		goto nopage;
2143 
2144 	/* Avoid recursion of direct reclaim */
2145 	if (current->flags & PF_MEMALLOC)
2146 		goto nopage;
2147 
2148 	/* Avoid allocations with no watermarks from looping endlessly */
2149 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2150 		goto nopage;
2151 
2152 	/*
2153 	 * Try direct compaction. The first pass is asynchronous. Subsequent
2154 	 * attempts after direct reclaim are synchronous
2155 	 */
2156 	page = __alloc_pages_direct_compact(gfp_mask, order,
2157 					zonelist, high_zoneidx,
2158 					nodemask,
2159 					alloc_flags, preferred_zone,
2160 					migratetype, &did_some_progress,
2161 					sync_migration);
2162 	if (page)
2163 		goto got_pg;
2164 	sync_migration = true;
2165 
2166 	/* Try direct reclaim and then allocating */
2167 	page = __alloc_pages_direct_reclaim(gfp_mask, order,
2168 					zonelist, high_zoneidx,
2169 					nodemask,
2170 					alloc_flags, preferred_zone,
2171 					migratetype, &did_some_progress);
2172 	if (page)
2173 		goto got_pg;
2174 
2175 	/*
2176 	 * If we failed to make any progress reclaiming, then we are
2177 	 * running out of options and have to consider going OOM
2178 	 */
2179 	if (!did_some_progress) {
2180 		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2181 			if (oom_killer_disabled)
2182 				goto nopage;
2183 			page = __alloc_pages_may_oom(gfp_mask, order,
2184 					zonelist, high_zoneidx,
2185 					nodemask, preferred_zone,
2186 					migratetype);
2187 			if (page)
2188 				goto got_pg;
2189 
2190 			if (!(gfp_mask & __GFP_NOFAIL)) {
2191 				/*
2192 				 * The oom killer is not called for high-order
2193 				 * allocations that may fail, so if no progress
2194 				 * is being made, there are no other options and
2195 				 * retrying is unlikely to help.
2196 				 */
2197 				if (order > PAGE_ALLOC_COSTLY_ORDER)
2198 					goto nopage;
2199 				/*
2200 				 * The oom killer is not called for lowmem
2201 				 * allocations to prevent needlessly killing
2202 				 * innocent tasks.
2203 				 */
2204 				if (high_zoneidx < ZONE_NORMAL)
2205 					goto nopage;
2206 			}
2207 
2208 			goto restart;
2209 		}
2210 	}
2211 
2212 	/* Check if we should retry the allocation */
2213 	pages_reclaimed += did_some_progress;
2214 	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2215 		/* Wait for some write requests to complete then retry */
2216 		wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2217 		goto rebalance;
2218 	} else {
2219 		/*
2220 		 * High-order allocations do not necessarily loop after
2221 		 * direct reclaim and reclaim/compaction depends on compaction
2222 		 * being called after reclaim so call directly if necessary
2223 		 */
2224 		page = __alloc_pages_direct_compact(gfp_mask, order,
2225 					zonelist, high_zoneidx,
2226 					nodemask,
2227 					alloc_flags, preferred_zone,
2228 					migratetype, &did_some_progress,
2229 					sync_migration);
2230 		if (page)
2231 			goto got_pg;
2232 	}
2233 
2234 nopage:
2235 	warn_alloc_failed(gfp_mask, order, NULL);
2236 	return page;
2237 got_pg:
2238 	if (kmemcheck_enabled)
2239 		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2240 	return page;
2241 
2242 }
2243 
2244 /*
2245  * This is the 'heart' of the zoned buddy allocator.
2246  */
2247 struct page *
2248 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2249 			struct zonelist *zonelist, nodemask_t *nodemask)
2250 {
2251 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2252 	struct zone *preferred_zone;
2253 	struct page *page;
2254 	int migratetype = allocflags_to_migratetype(gfp_mask);
2255 
2256 	gfp_mask &= gfp_allowed_mask;
2257 
2258 	lockdep_trace_alloc(gfp_mask);
2259 
2260 	might_sleep_if(gfp_mask & __GFP_WAIT);
2261 
2262 	if (should_fail_alloc_page(gfp_mask, order))
2263 		return NULL;
2264 
2265 	/*
2266 	 * Check the zones suitable for the gfp_mask contain at least one
2267 	 * valid zone. It's possible to have an empty zonelist as a result
2268 	 * of GFP_THISNODE and a memoryless node
2269 	 */
2270 	if (unlikely(!zonelist->_zonerefs->zone))
2271 		return NULL;
2272 
2273 	get_mems_allowed();
2274 	/* The preferred zone is used for statistics later */
2275 	first_zones_zonelist(zonelist, high_zoneidx,
2276 				nodemask ? : &cpuset_current_mems_allowed,
2277 				&preferred_zone);
2278 	if (!preferred_zone) {
2279 		put_mems_allowed();
2280 		return NULL;
2281 	}
2282 
2283 	/* First allocation attempt */
2284 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2285 			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2286 			preferred_zone, migratetype);
2287 	if (unlikely(!page))
2288 		page = __alloc_pages_slowpath(gfp_mask, order,
2289 				zonelist, high_zoneidx, nodemask,
2290 				preferred_zone, migratetype);
2291 	put_mems_allowed();
2292 
2293 	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2294 	return page;
2295 }
2296 EXPORT_SYMBOL(__alloc_pages_nodemask);
2297 
2298 /*
2299  * Common helper functions.
2300  */
2301 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2302 {
2303 	struct page *page;
2304 
2305 	/*
2306 	 * __get_free_pages() returns a 32-bit address, which cannot represent
2307 	 * a highmem page
2308 	 */
2309 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2310 
2311 	page = alloc_pages(gfp_mask, order);
2312 	if (!page)
2313 		return 0;
2314 	return (unsigned long) page_address(page);
2315 }
2316 EXPORT_SYMBOL(__get_free_pages);
2317 
2318 unsigned long get_zeroed_page(gfp_t gfp_mask)
2319 {
2320 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2321 }
2322 EXPORT_SYMBOL(get_zeroed_page);
2323 
2324 void __pagevec_free(struct pagevec *pvec)
2325 {
2326 	int i = pagevec_count(pvec);
2327 
2328 	while (--i >= 0) {
2329 		trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2330 		free_hot_cold_page(pvec->pages[i], pvec->cold);
2331 	}
2332 }
2333 
2334 void __free_pages(struct page *page, unsigned int order)
2335 {
2336 	if (put_page_testzero(page)) {
2337 		if (order == 0)
2338 			free_hot_cold_page(page, 0);
2339 		else
2340 			__free_pages_ok(page, order);
2341 	}
2342 }
2343 
2344 EXPORT_SYMBOL(__free_pages);
2345 
2346 void free_pages(unsigned long addr, unsigned int order)
2347 {
2348 	if (addr != 0) {
2349 		VM_BUG_ON(!virt_addr_valid((void *)addr));
2350 		__free_pages(virt_to_page((void *)addr), order);
2351 	}
2352 }
2353 
2354 EXPORT_SYMBOL(free_pages);
2355 
2356 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2357 {
2358 	if (addr) {
2359 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2360 		unsigned long used = addr + PAGE_ALIGN(size);
2361 
2362 		split_page(virt_to_page((void *)addr), order);
2363 		while (used < alloc_end) {
2364 			free_page(used);
2365 			used += PAGE_SIZE;
2366 		}
2367 	}
2368 	return (void *)addr;
2369 }
2370 
2371 /**
2372  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2373  * @size: the number of bytes to allocate
2374  * @gfp_mask: GFP flags for the allocation
2375  *
2376  * This function is similar to alloc_pages(), except that it allocates the
2377  * minimum number of pages to satisfy the request.  alloc_pages() can only
2378  * allocate memory in power-of-two pages.
2379  *
2380  * This function is also limited by MAX_ORDER.
2381  *
2382  * Memory allocated by this function must be released by free_pages_exact().
2383  */
2384 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2385 {
2386 	unsigned int order = get_order(size);
2387 	unsigned long addr;
2388 
2389 	addr = __get_free_pages(gfp_mask, order);
2390 	return make_alloc_exact(addr, order, size);
2391 }
2392 EXPORT_SYMBOL(alloc_pages_exact);
2393 
2394 /**
2395  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2396  *			   pages on a node.
2397  * @nid: the preferred node ID where memory should be allocated
2398  * @size: the number of bytes to allocate
2399  * @gfp_mask: GFP flags for the allocation
2400  *
2401  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2402  * back.
2403  * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2404  * but is not exact.
2405  */
2406 void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2407 {
2408 	unsigned order = get_order(size);
2409 	struct page *p = alloc_pages_node(nid, gfp_mask, order);
2410 	if (!p)
2411 		return NULL;
2412 	return make_alloc_exact((unsigned long)page_address(p), order, size);
2413 }
2414 EXPORT_SYMBOL(alloc_pages_exact_nid);
2415 
2416 /**
2417  * free_pages_exact - release memory allocated via alloc_pages_exact()
2418  * @virt: the value returned by alloc_pages_exact.
2419  * @size: size of allocation, same value as passed to alloc_pages_exact().
2420  *
2421  * Release the memory allocated by a previous call to alloc_pages_exact.
2422  */
2423 void free_pages_exact(void *virt, size_t size)
2424 {
2425 	unsigned long addr = (unsigned long)virt;
2426 	unsigned long end = addr + PAGE_ALIGN(size);
2427 
2428 	while (addr < end) {
2429 		free_page(addr);
2430 		addr += PAGE_SIZE;
2431 	}
2432 }
2433 EXPORT_SYMBOL(free_pages_exact);
2434 
2435 static unsigned int nr_free_zone_pages(int offset)
2436 {
2437 	struct zoneref *z;
2438 	struct zone *zone;
2439 
2440 	/* Just pick one node, since fallback list is circular */
2441 	unsigned int sum = 0;
2442 
2443 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2444 
2445 	for_each_zone_zonelist(zone, z, zonelist, offset) {
2446 		unsigned long size = zone->present_pages;
2447 		unsigned long high = high_wmark_pages(zone);
2448 		if (size > high)
2449 			sum += size - high;
2450 	}
2451 
2452 	return sum;
2453 }
2454 
2455 /*
2456  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2457  */
2458 unsigned int nr_free_buffer_pages(void)
2459 {
2460 	return nr_free_zone_pages(gfp_zone(GFP_USER));
2461 }
2462 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2463 
2464 /*
2465  * Amount of free RAM allocatable within all zones
2466  */
2467 unsigned int nr_free_pagecache_pages(void)
2468 {
2469 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2470 }
2471 
2472 static inline void show_node(struct zone *zone)
2473 {
2474 	if (NUMA_BUILD)
2475 		printk("Node %d ", zone_to_nid(zone));
2476 }
2477 
2478 void si_meminfo(struct sysinfo *val)
2479 {
2480 	val->totalram = totalram_pages;
2481 	val->sharedram = 0;
2482 	val->freeram = global_page_state(NR_FREE_PAGES);
2483 	val->bufferram = nr_blockdev_pages();
2484 	val->totalhigh = totalhigh_pages;
2485 	val->freehigh = nr_free_highpages();
2486 	val->mem_unit = PAGE_SIZE;
2487 }
2488 
2489 EXPORT_SYMBOL(si_meminfo);
2490 
2491 #ifdef CONFIG_NUMA
2492 void si_meminfo_node(struct sysinfo *val, int nid)
2493 {
2494 	pg_data_t *pgdat = NODE_DATA(nid);
2495 
2496 	val->totalram = pgdat->node_present_pages;
2497 	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2498 #ifdef CONFIG_HIGHMEM
2499 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2500 	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2501 			NR_FREE_PAGES);
2502 #else
2503 	val->totalhigh = 0;
2504 	val->freehigh = 0;
2505 #endif
2506 	val->mem_unit = PAGE_SIZE;
2507 }
2508 #endif
2509 
2510 /*
2511  * Determine whether the node should be displayed or not, depending on whether
2512  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2513  */
2514 bool skip_free_areas_node(unsigned int flags, int nid)
2515 {
2516 	bool ret = false;
2517 
2518 	if (!(flags & SHOW_MEM_FILTER_NODES))
2519 		goto out;
2520 
2521 	get_mems_allowed();
2522 	ret = !node_isset(nid, cpuset_current_mems_allowed);
2523 	put_mems_allowed();
2524 out:
2525 	return ret;
2526 }
2527 
2528 #define K(x) ((x) << (PAGE_SHIFT-10))
2529 
2530 /*
2531  * Show free area list (used inside shift_scroll-lock stuff)
2532  * We also calculate the percentage fragmentation. We do this by counting the
2533  * memory on each free list with the exception of the first item on the list.
2534  * Suppresses nodes that are not allowed by current's cpuset if
2535  * SHOW_MEM_FILTER_NODES is passed.
2536  */
2537 void show_free_areas(unsigned int filter)
2538 {
2539 	int cpu;
2540 	struct zone *zone;
2541 
2542 	for_each_populated_zone(zone) {
2543 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
2544 			continue;
2545 		show_node(zone);
2546 		printk("%s per-cpu:\n", zone->name);
2547 
2548 		for_each_online_cpu(cpu) {
2549 			struct per_cpu_pageset *pageset;
2550 
2551 			pageset = per_cpu_ptr(zone->pageset, cpu);
2552 
2553 			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2554 			       cpu, pageset->pcp.high,
2555 			       pageset->pcp.batch, pageset->pcp.count);
2556 		}
2557 	}
2558 
2559 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2560 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2561 		" unevictable:%lu"
2562 		" dirty:%lu writeback:%lu unstable:%lu\n"
2563 		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2564 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2565 		global_page_state(NR_ACTIVE_ANON),
2566 		global_page_state(NR_INACTIVE_ANON),
2567 		global_page_state(NR_ISOLATED_ANON),
2568 		global_page_state(NR_ACTIVE_FILE),
2569 		global_page_state(NR_INACTIVE_FILE),
2570 		global_page_state(NR_ISOLATED_FILE),
2571 		global_page_state(NR_UNEVICTABLE),
2572 		global_page_state(NR_FILE_DIRTY),
2573 		global_page_state(NR_WRITEBACK),
2574 		global_page_state(NR_UNSTABLE_NFS),
2575 		global_page_state(NR_FREE_PAGES),
2576 		global_page_state(NR_SLAB_RECLAIMABLE),
2577 		global_page_state(NR_SLAB_UNRECLAIMABLE),
2578 		global_page_state(NR_FILE_MAPPED),
2579 		global_page_state(NR_SHMEM),
2580 		global_page_state(NR_PAGETABLE),
2581 		global_page_state(NR_BOUNCE));
2582 
2583 	for_each_populated_zone(zone) {
2584 		int i;
2585 
2586 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
2587 			continue;
2588 		show_node(zone);
2589 		printk("%s"
2590 			" free:%lukB"
2591 			" min:%lukB"
2592 			" low:%lukB"
2593 			" high:%lukB"
2594 			" active_anon:%lukB"
2595 			" inactive_anon:%lukB"
2596 			" active_file:%lukB"
2597 			" inactive_file:%lukB"
2598 			" unevictable:%lukB"
2599 			" isolated(anon):%lukB"
2600 			" isolated(file):%lukB"
2601 			" present:%lukB"
2602 			" mlocked:%lukB"
2603 			" dirty:%lukB"
2604 			" writeback:%lukB"
2605 			" mapped:%lukB"
2606 			" shmem:%lukB"
2607 			" slab_reclaimable:%lukB"
2608 			" slab_unreclaimable:%lukB"
2609 			" kernel_stack:%lukB"
2610 			" pagetables:%lukB"
2611 			" unstable:%lukB"
2612 			" bounce:%lukB"
2613 			" writeback_tmp:%lukB"
2614 			" pages_scanned:%lu"
2615 			" all_unreclaimable? %s"
2616 			"\n",
2617 			zone->name,
2618 			K(zone_page_state(zone, NR_FREE_PAGES)),
2619 			K(min_wmark_pages(zone)),
2620 			K(low_wmark_pages(zone)),
2621 			K(high_wmark_pages(zone)),
2622 			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2623 			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2624 			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2625 			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2626 			K(zone_page_state(zone, NR_UNEVICTABLE)),
2627 			K(zone_page_state(zone, NR_ISOLATED_ANON)),
2628 			K(zone_page_state(zone, NR_ISOLATED_FILE)),
2629 			K(zone->present_pages),
2630 			K(zone_page_state(zone, NR_MLOCK)),
2631 			K(zone_page_state(zone, NR_FILE_DIRTY)),
2632 			K(zone_page_state(zone, NR_WRITEBACK)),
2633 			K(zone_page_state(zone, NR_FILE_MAPPED)),
2634 			K(zone_page_state(zone, NR_SHMEM)),
2635 			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2636 			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2637 			zone_page_state(zone, NR_KERNEL_STACK) *
2638 				THREAD_SIZE / 1024,
2639 			K(zone_page_state(zone, NR_PAGETABLE)),
2640 			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2641 			K(zone_page_state(zone, NR_BOUNCE)),
2642 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2643 			zone->pages_scanned,
2644 			(zone->all_unreclaimable ? "yes" : "no")
2645 			);
2646 		printk("lowmem_reserve[]:");
2647 		for (i = 0; i < MAX_NR_ZONES; i++)
2648 			printk(" %lu", zone->lowmem_reserve[i]);
2649 		printk("\n");
2650 	}
2651 
2652 	for_each_populated_zone(zone) {
2653  		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2654 
2655 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
2656 			continue;
2657 		show_node(zone);
2658 		printk("%s: ", zone->name);
2659 
2660 		spin_lock_irqsave(&zone->lock, flags);
2661 		for (order = 0; order < MAX_ORDER; order++) {
2662 			nr[order] = zone->free_area[order].nr_free;
2663 			total += nr[order] << order;
2664 		}
2665 		spin_unlock_irqrestore(&zone->lock, flags);
2666 		for (order = 0; order < MAX_ORDER; order++)
2667 			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2668 		printk("= %lukB\n", K(total));
2669 	}
2670 
2671 	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2672 
2673 	show_swap_cache_info();
2674 }
2675 
2676 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2677 {
2678 	zoneref->zone = zone;
2679 	zoneref->zone_idx = zone_idx(zone);
2680 }
2681 
2682 /*
2683  * Builds allocation fallback zone lists.
2684  *
2685  * Add all populated zones of a node to the zonelist.
2686  */
2687 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2688 				int nr_zones, enum zone_type zone_type)
2689 {
2690 	struct zone *zone;
2691 
2692 	BUG_ON(zone_type >= MAX_NR_ZONES);
2693 	zone_type++;
2694 
2695 	do {
2696 		zone_type--;
2697 		zone = pgdat->node_zones + zone_type;
2698 		if (populated_zone(zone)) {
2699 			zoneref_set_zone(zone,
2700 				&zonelist->_zonerefs[nr_zones++]);
2701 			check_highest_zone(zone_type);
2702 		}
2703 
2704 	} while (zone_type);
2705 	return nr_zones;
2706 }
2707 
2708 
2709 /*
2710  *  zonelist_order:
2711  *  0 = automatic detection of better ordering.
2712  *  1 = order by ([node] distance, -zonetype)
2713  *  2 = order by (-zonetype, [node] distance)
2714  *
2715  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2716  *  the same zonelist. So only NUMA can configure this param.
2717  */
2718 #define ZONELIST_ORDER_DEFAULT  0
2719 #define ZONELIST_ORDER_NODE     1
2720 #define ZONELIST_ORDER_ZONE     2
2721 
2722 /* zonelist order in the kernel.
2723  * set_zonelist_order() will set this to NODE or ZONE.
2724  */
2725 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2726 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2727 
2728 
2729 #ifdef CONFIG_NUMA
2730 /* The value user specified ....changed by config */
2731 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2732 /* string for sysctl */
2733 #define NUMA_ZONELIST_ORDER_LEN	16
2734 char numa_zonelist_order[16] = "default";
2735 
2736 /*
2737  * interface for configure zonelist ordering.
2738  * command line option "numa_zonelist_order"
2739  *	= "[dD]efault	- default, automatic configuration.
2740  *	= "[nN]ode 	- order by node locality, then by zone within node
2741  *	= "[zZ]one      - order by zone, then by locality within zone
2742  */
2743 
2744 static int __parse_numa_zonelist_order(char *s)
2745 {
2746 	if (*s == 'd' || *s == 'D') {
2747 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2748 	} else if (*s == 'n' || *s == 'N') {
2749 		user_zonelist_order = ZONELIST_ORDER_NODE;
2750 	} else if (*s == 'z' || *s == 'Z') {
2751 		user_zonelist_order = ZONELIST_ORDER_ZONE;
2752 	} else {
2753 		printk(KERN_WARNING
2754 			"Ignoring invalid numa_zonelist_order value:  "
2755 			"%s\n", s);
2756 		return -EINVAL;
2757 	}
2758 	return 0;
2759 }
2760 
2761 static __init int setup_numa_zonelist_order(char *s)
2762 {
2763 	int ret;
2764 
2765 	if (!s)
2766 		return 0;
2767 
2768 	ret = __parse_numa_zonelist_order(s);
2769 	if (ret == 0)
2770 		strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2771 
2772 	return ret;
2773 }
2774 early_param("numa_zonelist_order", setup_numa_zonelist_order);
2775 
2776 /*
2777  * sysctl handler for numa_zonelist_order
2778  */
2779 int numa_zonelist_order_handler(ctl_table *table, int write,
2780 		void __user *buffer, size_t *length,
2781 		loff_t *ppos)
2782 {
2783 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2784 	int ret;
2785 	static DEFINE_MUTEX(zl_order_mutex);
2786 
2787 	mutex_lock(&zl_order_mutex);
2788 	if (write)
2789 		strcpy(saved_string, (char*)table->data);
2790 	ret = proc_dostring(table, write, buffer, length, ppos);
2791 	if (ret)
2792 		goto out;
2793 	if (write) {
2794 		int oldval = user_zonelist_order;
2795 		if (__parse_numa_zonelist_order((char*)table->data)) {
2796 			/*
2797 			 * bogus value.  restore saved string
2798 			 */
2799 			strncpy((char*)table->data, saved_string,
2800 				NUMA_ZONELIST_ORDER_LEN);
2801 			user_zonelist_order = oldval;
2802 		} else if (oldval != user_zonelist_order) {
2803 			mutex_lock(&zonelists_mutex);
2804 			build_all_zonelists(NULL);
2805 			mutex_unlock(&zonelists_mutex);
2806 		}
2807 	}
2808 out:
2809 	mutex_unlock(&zl_order_mutex);
2810 	return ret;
2811 }
2812 
2813 
2814 #define MAX_NODE_LOAD (nr_online_nodes)
2815 static int node_load[MAX_NUMNODES];
2816 
2817 /**
2818  * find_next_best_node - find the next node that should appear in a given node's fallback list
2819  * @node: node whose fallback list we're appending
2820  * @used_node_mask: nodemask_t of already used nodes
2821  *
2822  * We use a number of factors to determine which is the next node that should
2823  * appear on a given node's fallback list.  The node should not have appeared
2824  * already in @node's fallback list, and it should be the next closest node
2825  * according to the distance array (which contains arbitrary distance values
2826  * from each node to each node in the system), and should also prefer nodes
2827  * with no CPUs, since presumably they'll have very little allocation pressure
2828  * on them otherwise.
2829  * It returns -1 if no node is found.
2830  */
2831 static int find_next_best_node(int node, nodemask_t *used_node_mask)
2832 {
2833 	int n, val;
2834 	int min_val = INT_MAX;
2835 	int best_node = -1;
2836 	const struct cpumask *tmp = cpumask_of_node(0);
2837 
2838 	/* Use the local node if we haven't already */
2839 	if (!node_isset(node, *used_node_mask)) {
2840 		node_set(node, *used_node_mask);
2841 		return node;
2842 	}
2843 
2844 	for_each_node_state(n, N_HIGH_MEMORY) {
2845 
2846 		/* Don't want a node to appear more than once */
2847 		if (node_isset(n, *used_node_mask))
2848 			continue;
2849 
2850 		/* Use the distance array to find the distance */
2851 		val = node_distance(node, n);
2852 
2853 		/* Penalize nodes under us ("prefer the next node") */
2854 		val += (n < node);
2855 
2856 		/* Give preference to headless and unused nodes */
2857 		tmp = cpumask_of_node(n);
2858 		if (!cpumask_empty(tmp))
2859 			val += PENALTY_FOR_NODE_WITH_CPUS;
2860 
2861 		/* Slight preference for less loaded node */
2862 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2863 		val += node_load[n];
2864 
2865 		if (val < min_val) {
2866 			min_val = val;
2867 			best_node = n;
2868 		}
2869 	}
2870 
2871 	if (best_node >= 0)
2872 		node_set(best_node, *used_node_mask);
2873 
2874 	return best_node;
2875 }
2876 
2877 
2878 /*
2879  * Build zonelists ordered by node and zones within node.
2880  * This results in maximum locality--normal zone overflows into local
2881  * DMA zone, if any--but risks exhausting DMA zone.
2882  */
2883 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2884 {
2885 	int j;
2886 	struct zonelist *zonelist;
2887 
2888 	zonelist = &pgdat->node_zonelists[0];
2889 	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2890 		;
2891 	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2892 							MAX_NR_ZONES - 1);
2893 	zonelist->_zonerefs[j].zone = NULL;
2894 	zonelist->_zonerefs[j].zone_idx = 0;
2895 }
2896 
2897 /*
2898  * Build gfp_thisnode zonelists
2899  */
2900 static void build_thisnode_zonelists(pg_data_t *pgdat)
2901 {
2902 	int j;
2903 	struct zonelist *zonelist;
2904 
2905 	zonelist = &pgdat->node_zonelists[1];
2906 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2907 	zonelist->_zonerefs[j].zone = NULL;
2908 	zonelist->_zonerefs[j].zone_idx = 0;
2909 }
2910 
2911 /*
2912  * Build zonelists ordered by zone and nodes within zones.
2913  * This results in conserving DMA zone[s] until all Normal memory is
2914  * exhausted, but results in overflowing to remote node while memory
2915  * may still exist in local DMA zone.
2916  */
2917 static int node_order[MAX_NUMNODES];
2918 
2919 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2920 {
2921 	int pos, j, node;
2922 	int zone_type;		/* needs to be signed */
2923 	struct zone *z;
2924 	struct zonelist *zonelist;
2925 
2926 	zonelist = &pgdat->node_zonelists[0];
2927 	pos = 0;
2928 	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2929 		for (j = 0; j < nr_nodes; j++) {
2930 			node = node_order[j];
2931 			z = &NODE_DATA(node)->node_zones[zone_type];
2932 			if (populated_zone(z)) {
2933 				zoneref_set_zone(z,
2934 					&zonelist->_zonerefs[pos++]);
2935 				check_highest_zone(zone_type);
2936 			}
2937 		}
2938 	}
2939 	zonelist->_zonerefs[pos].zone = NULL;
2940 	zonelist->_zonerefs[pos].zone_idx = 0;
2941 }
2942 
2943 static int default_zonelist_order(void)
2944 {
2945 	int nid, zone_type;
2946 	unsigned long low_kmem_size,total_size;
2947 	struct zone *z;
2948 	int average_size;
2949 	/*
2950          * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
2951 	 * If they are really small and used heavily, the system can fall
2952 	 * into OOM very easily.
2953 	 * This function detect ZONE_DMA/DMA32 size and configures zone order.
2954 	 */
2955 	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2956 	low_kmem_size = 0;
2957 	total_size = 0;
2958 	for_each_online_node(nid) {
2959 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2960 			z = &NODE_DATA(nid)->node_zones[zone_type];
2961 			if (populated_zone(z)) {
2962 				if (zone_type < ZONE_NORMAL)
2963 					low_kmem_size += z->present_pages;
2964 				total_size += z->present_pages;
2965 			} else if (zone_type == ZONE_NORMAL) {
2966 				/*
2967 				 * If any node has only lowmem, then node order
2968 				 * is preferred to allow kernel allocations
2969 				 * locally; otherwise, they can easily infringe
2970 				 * on other nodes when there is an abundance of
2971 				 * lowmem available to allocate from.
2972 				 */
2973 				return ZONELIST_ORDER_NODE;
2974 			}
2975 		}
2976 	}
2977 	if (!low_kmem_size ||  /* there are no DMA area. */
2978 	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2979 		return ZONELIST_ORDER_NODE;
2980 	/*
2981 	 * look into each node's config.
2982   	 * If there is a node whose DMA/DMA32 memory is very big area on
2983  	 * local memory, NODE_ORDER may be suitable.
2984          */
2985 	average_size = total_size /
2986 				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2987 	for_each_online_node(nid) {
2988 		low_kmem_size = 0;
2989 		total_size = 0;
2990 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2991 			z = &NODE_DATA(nid)->node_zones[zone_type];
2992 			if (populated_zone(z)) {
2993 				if (zone_type < ZONE_NORMAL)
2994 					low_kmem_size += z->present_pages;
2995 				total_size += z->present_pages;
2996 			}
2997 		}
2998 		if (low_kmem_size &&
2999 		    total_size > average_size && /* ignore small node */
3000 		    low_kmem_size > total_size * 70/100)
3001 			return ZONELIST_ORDER_NODE;
3002 	}
3003 	return ZONELIST_ORDER_ZONE;
3004 }
3005 
3006 static void set_zonelist_order(void)
3007 {
3008 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3009 		current_zonelist_order = default_zonelist_order();
3010 	else
3011 		current_zonelist_order = user_zonelist_order;
3012 }
3013 
3014 static void build_zonelists(pg_data_t *pgdat)
3015 {
3016 	int j, node, load;
3017 	enum zone_type i;
3018 	nodemask_t used_mask;
3019 	int local_node, prev_node;
3020 	struct zonelist *zonelist;
3021 	int order = current_zonelist_order;
3022 
3023 	/* initialize zonelists */
3024 	for (i = 0; i < MAX_ZONELISTS; i++) {
3025 		zonelist = pgdat->node_zonelists + i;
3026 		zonelist->_zonerefs[0].zone = NULL;
3027 		zonelist->_zonerefs[0].zone_idx = 0;
3028 	}
3029 
3030 	/* NUMA-aware ordering of nodes */
3031 	local_node = pgdat->node_id;
3032 	load = nr_online_nodes;
3033 	prev_node = local_node;
3034 	nodes_clear(used_mask);
3035 
3036 	memset(node_order, 0, sizeof(node_order));
3037 	j = 0;
3038 
3039 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3040 		int distance = node_distance(local_node, node);
3041 
3042 		/*
3043 		 * If another node is sufficiently far away then it is better
3044 		 * to reclaim pages in a zone before going off node.
3045 		 */
3046 		if (distance > RECLAIM_DISTANCE)
3047 			zone_reclaim_mode = 1;
3048 
3049 		/*
3050 		 * We don't want to pressure a particular node.
3051 		 * So adding penalty to the first node in same
3052 		 * distance group to make it round-robin.
3053 		 */
3054 		if (distance != node_distance(local_node, prev_node))
3055 			node_load[node] = load;
3056 
3057 		prev_node = node;
3058 		load--;
3059 		if (order == ZONELIST_ORDER_NODE)
3060 			build_zonelists_in_node_order(pgdat, node);
3061 		else
3062 			node_order[j++] = node;	/* remember order */
3063 	}
3064 
3065 	if (order == ZONELIST_ORDER_ZONE) {
3066 		/* calculate node order -- i.e., DMA last! */
3067 		build_zonelists_in_zone_order(pgdat, j);
3068 	}
3069 
3070 	build_thisnode_zonelists(pgdat);
3071 }
3072 
3073 /* Construct the zonelist performance cache - see further mmzone.h */
3074 static void build_zonelist_cache(pg_data_t *pgdat)
3075 {
3076 	struct zonelist *zonelist;
3077 	struct zonelist_cache *zlc;
3078 	struct zoneref *z;
3079 
3080 	zonelist = &pgdat->node_zonelists[0];
3081 	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3082 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3083 	for (z = zonelist->_zonerefs; z->zone; z++)
3084 		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3085 }
3086 
3087 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3088 /*
3089  * Return node id of node used for "local" allocations.
3090  * I.e., first node id of first zone in arg node's generic zonelist.
3091  * Used for initializing percpu 'numa_mem', which is used primarily
3092  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3093  */
3094 int local_memory_node(int node)
3095 {
3096 	struct zone *zone;
3097 
3098 	(void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3099 				   gfp_zone(GFP_KERNEL),
3100 				   NULL,
3101 				   &zone);
3102 	return zone->node;
3103 }
3104 #endif
3105 
3106 #else	/* CONFIG_NUMA */
3107 
3108 static void set_zonelist_order(void)
3109 {
3110 	current_zonelist_order = ZONELIST_ORDER_ZONE;
3111 }
3112 
3113 static void build_zonelists(pg_data_t *pgdat)
3114 {
3115 	int node, local_node;
3116 	enum zone_type j;
3117 	struct zonelist *zonelist;
3118 
3119 	local_node = pgdat->node_id;
3120 
3121 	zonelist = &pgdat->node_zonelists[0];
3122 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3123 
3124 	/*
3125 	 * Now we build the zonelist so that it contains the zones
3126 	 * of all the other nodes.
3127 	 * We don't want to pressure a particular node, so when
3128 	 * building the zones for node N, we make sure that the
3129 	 * zones coming right after the local ones are those from
3130 	 * node N+1 (modulo N)
3131 	 */
3132 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3133 		if (!node_online(node))
3134 			continue;
3135 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3136 							MAX_NR_ZONES - 1);
3137 	}
3138 	for (node = 0; node < local_node; node++) {
3139 		if (!node_online(node))
3140 			continue;
3141 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3142 							MAX_NR_ZONES - 1);
3143 	}
3144 
3145 	zonelist->_zonerefs[j].zone = NULL;
3146 	zonelist->_zonerefs[j].zone_idx = 0;
3147 }
3148 
3149 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3150 static void build_zonelist_cache(pg_data_t *pgdat)
3151 {
3152 	pgdat->node_zonelists[0].zlcache_ptr = NULL;
3153 }
3154 
3155 #endif	/* CONFIG_NUMA */
3156 
3157 /*
3158  * Boot pageset table. One per cpu which is going to be used for all
3159  * zones and all nodes. The parameters will be set in such a way
3160  * that an item put on a list will immediately be handed over to
3161  * the buddy list. This is safe since pageset manipulation is done
3162  * with interrupts disabled.
3163  *
3164  * The boot_pagesets must be kept even after bootup is complete for
3165  * unused processors and/or zones. They do play a role for bootstrapping
3166  * hotplugged processors.
3167  *
3168  * zoneinfo_show() and maybe other functions do
3169  * not check if the processor is online before following the pageset pointer.
3170  * Other parts of the kernel may not check if the zone is available.
3171  */
3172 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3173 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3174 static void setup_zone_pageset(struct zone *zone);
3175 
3176 /*
3177  * Global mutex to protect against size modification of zonelists
3178  * as well as to serialize pageset setup for the new populated zone.
3179  */
3180 DEFINE_MUTEX(zonelists_mutex);
3181 
3182 /* return values int ....just for stop_machine() */
3183 static __init_refok int __build_all_zonelists(void *data)
3184 {
3185 	int nid;
3186 	int cpu;
3187 
3188 #ifdef CONFIG_NUMA
3189 	memset(node_load, 0, sizeof(node_load));
3190 #endif
3191 	for_each_online_node(nid) {
3192 		pg_data_t *pgdat = NODE_DATA(nid);
3193 
3194 		build_zonelists(pgdat);
3195 		build_zonelist_cache(pgdat);
3196 	}
3197 
3198 	/*
3199 	 * Initialize the boot_pagesets that are going to be used
3200 	 * for bootstrapping processors. The real pagesets for
3201 	 * each zone will be allocated later when the per cpu
3202 	 * allocator is available.
3203 	 *
3204 	 * boot_pagesets are used also for bootstrapping offline
3205 	 * cpus if the system is already booted because the pagesets
3206 	 * are needed to initialize allocators on a specific cpu too.
3207 	 * F.e. the percpu allocator needs the page allocator which
3208 	 * needs the percpu allocator in order to allocate its pagesets
3209 	 * (a chicken-egg dilemma).
3210 	 */
3211 	for_each_possible_cpu(cpu) {
3212 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3213 
3214 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3215 		/*
3216 		 * We now know the "local memory node" for each node--
3217 		 * i.e., the node of the first zone in the generic zonelist.
3218 		 * Set up numa_mem percpu variable for on-line cpus.  During
3219 		 * boot, only the boot cpu should be on-line;  we'll init the
3220 		 * secondary cpus' numa_mem as they come on-line.  During
3221 		 * node/memory hotplug, we'll fixup all on-line cpus.
3222 		 */
3223 		if (cpu_online(cpu))
3224 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3225 #endif
3226 	}
3227 
3228 	return 0;
3229 }
3230 
3231 /*
3232  * Called with zonelists_mutex held always
3233  * unless system_state == SYSTEM_BOOTING.
3234  */
3235 void __ref build_all_zonelists(void *data)
3236 {
3237 	set_zonelist_order();
3238 
3239 	if (system_state == SYSTEM_BOOTING) {
3240 		__build_all_zonelists(NULL);
3241 		mminit_verify_zonelist();
3242 		cpuset_init_current_mems_allowed();
3243 	} else {
3244 		/* we have to stop all cpus to guarantee there is no user
3245 		   of zonelist */
3246 #ifdef CONFIG_MEMORY_HOTPLUG
3247 		if (data)
3248 			setup_zone_pageset((struct zone *)data);
3249 #endif
3250 		stop_machine(__build_all_zonelists, NULL, NULL);
3251 		/* cpuset refresh routine should be here */
3252 	}
3253 	vm_total_pages = nr_free_pagecache_pages();
3254 	/*
3255 	 * Disable grouping by mobility if the number of pages in the
3256 	 * system is too low to allow the mechanism to work. It would be
3257 	 * more accurate, but expensive to check per-zone. This check is
3258 	 * made on memory-hotadd so a system can start with mobility
3259 	 * disabled and enable it later
3260 	 */
3261 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3262 		page_group_by_mobility_disabled = 1;
3263 	else
3264 		page_group_by_mobility_disabled = 0;
3265 
3266 	printk("Built %i zonelists in %s order, mobility grouping %s.  "
3267 		"Total pages: %ld\n",
3268 			nr_online_nodes,
3269 			zonelist_order_name[current_zonelist_order],
3270 			page_group_by_mobility_disabled ? "off" : "on",
3271 			vm_total_pages);
3272 #ifdef CONFIG_NUMA
3273 	printk("Policy zone: %s\n", zone_names[policy_zone]);
3274 #endif
3275 }
3276 
3277 /*
3278  * Helper functions to size the waitqueue hash table.
3279  * Essentially these want to choose hash table sizes sufficiently
3280  * large so that collisions trying to wait on pages are rare.
3281  * But in fact, the number of active page waitqueues on typical
3282  * systems is ridiculously low, less than 200. So this is even
3283  * conservative, even though it seems large.
3284  *
3285  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3286  * waitqueues, i.e. the size of the waitq table given the number of pages.
3287  */
3288 #define PAGES_PER_WAITQUEUE	256
3289 
3290 #ifndef CONFIG_MEMORY_HOTPLUG
3291 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3292 {
3293 	unsigned long size = 1;
3294 
3295 	pages /= PAGES_PER_WAITQUEUE;
3296 
3297 	while (size < pages)
3298 		size <<= 1;
3299 
3300 	/*
3301 	 * Once we have dozens or even hundreds of threads sleeping
3302 	 * on IO we've got bigger problems than wait queue collision.
3303 	 * Limit the size of the wait table to a reasonable size.
3304 	 */
3305 	size = min(size, 4096UL);
3306 
3307 	return max(size, 4UL);
3308 }
3309 #else
3310 /*
3311  * A zone's size might be changed by hot-add, so it is not possible to determine
3312  * a suitable size for its wait_table.  So we use the maximum size now.
3313  *
3314  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
3315  *
3316  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
3317  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3318  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
3319  *
3320  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3321  * or more by the traditional way. (See above).  It equals:
3322  *
3323  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
3324  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
3325  *    powerpc (64K page size)             : =  (32G +16M)byte.
3326  */
3327 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3328 {
3329 	return 4096UL;
3330 }
3331 #endif
3332 
3333 /*
3334  * This is an integer logarithm so that shifts can be used later
3335  * to extract the more random high bits from the multiplicative
3336  * hash function before the remainder is taken.
3337  */
3338 static inline unsigned long wait_table_bits(unsigned long size)
3339 {
3340 	return ffz(~size);
3341 }
3342 
3343 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3344 
3345 /*
3346  * Check if a pageblock contains reserved pages
3347  */
3348 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3349 {
3350 	unsigned long pfn;
3351 
3352 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3353 		if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3354 			return 1;
3355 	}
3356 	return 0;
3357 }
3358 
3359 /*
3360  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3361  * of blocks reserved is based on min_wmark_pages(zone). The memory within
3362  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3363  * higher will lead to a bigger reserve which will get freed as contiguous
3364  * blocks as reclaim kicks in
3365  */
3366 static void setup_zone_migrate_reserve(struct zone *zone)
3367 {
3368 	unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3369 	struct page *page;
3370 	unsigned long block_migratetype;
3371 	int reserve;
3372 
3373 	/* Get the start pfn, end pfn and the number of blocks to reserve */
3374 	start_pfn = zone->zone_start_pfn;
3375 	end_pfn = start_pfn + zone->spanned_pages;
3376 	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3377 							pageblock_order;
3378 
3379 	/*
3380 	 * Reserve blocks are generally in place to help high-order atomic
3381 	 * allocations that are short-lived. A min_free_kbytes value that
3382 	 * would result in more than 2 reserve blocks for atomic allocations
3383 	 * is assumed to be in place to help anti-fragmentation for the
3384 	 * future allocation of hugepages at runtime.
3385 	 */
3386 	reserve = min(2, reserve);
3387 
3388 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3389 		if (!pfn_valid(pfn))
3390 			continue;
3391 		page = pfn_to_page(pfn);
3392 
3393 		/* Watch out for overlapping nodes */
3394 		if (page_to_nid(page) != zone_to_nid(zone))
3395 			continue;
3396 
3397 		/* Blocks with reserved pages will never free, skip them. */
3398 		block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3399 		if (pageblock_is_reserved(pfn, block_end_pfn))
3400 			continue;
3401 
3402 		block_migratetype = get_pageblock_migratetype(page);
3403 
3404 		/* If this block is reserved, account for it */
3405 		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3406 			reserve--;
3407 			continue;
3408 		}
3409 
3410 		/* Suitable for reserving if this block is movable */
3411 		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3412 			set_pageblock_migratetype(page, MIGRATE_RESERVE);
3413 			move_freepages_block(zone, page, MIGRATE_RESERVE);
3414 			reserve--;
3415 			continue;
3416 		}
3417 
3418 		/*
3419 		 * If the reserve is met and this is a previous reserved block,
3420 		 * take it back
3421 		 */
3422 		if (block_migratetype == MIGRATE_RESERVE) {
3423 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3424 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
3425 		}
3426 	}
3427 }
3428 
3429 /*
3430  * Initially all pages are reserved - free ones are freed
3431  * up by free_all_bootmem() once the early boot process is
3432  * done. Non-atomic initialization, single-pass.
3433  */
3434 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3435 		unsigned long start_pfn, enum memmap_context context)
3436 {
3437 	struct page *page;
3438 	unsigned long end_pfn = start_pfn + size;
3439 	unsigned long pfn;
3440 	struct zone *z;
3441 
3442 	if (highest_memmap_pfn < end_pfn - 1)
3443 		highest_memmap_pfn = end_pfn - 1;
3444 
3445 	z = &NODE_DATA(nid)->node_zones[zone];
3446 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3447 		/*
3448 		 * There can be holes in boot-time mem_map[]s
3449 		 * handed to this function.  They do not
3450 		 * exist on hotplugged memory.
3451 		 */
3452 		if (context == MEMMAP_EARLY) {
3453 			if (!early_pfn_valid(pfn))
3454 				continue;
3455 			if (!early_pfn_in_nid(pfn, nid))
3456 				continue;
3457 		}
3458 		page = pfn_to_page(pfn);
3459 		set_page_links(page, zone, nid, pfn);
3460 		mminit_verify_page_links(page, zone, nid, pfn);
3461 		init_page_count(page);
3462 		reset_page_mapcount(page);
3463 		SetPageReserved(page);
3464 		/*
3465 		 * Mark the block movable so that blocks are reserved for
3466 		 * movable at startup. This will force kernel allocations
3467 		 * to reserve their blocks rather than leaking throughout
3468 		 * the address space during boot when many long-lived
3469 		 * kernel allocations are made. Later some blocks near
3470 		 * the start are marked MIGRATE_RESERVE by
3471 		 * setup_zone_migrate_reserve()
3472 		 *
3473 		 * bitmap is created for zone's valid pfn range. but memmap
3474 		 * can be created for invalid pages (for alignment)
3475 		 * check here not to call set_pageblock_migratetype() against
3476 		 * pfn out of zone.
3477 		 */
3478 		if ((z->zone_start_pfn <= pfn)
3479 		    && (pfn < z->zone_start_pfn + z->spanned_pages)
3480 		    && !(pfn & (pageblock_nr_pages - 1)))
3481 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3482 
3483 		INIT_LIST_HEAD(&page->lru);
3484 #ifdef WANT_PAGE_VIRTUAL
3485 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
3486 		if (!is_highmem_idx(zone))
3487 			set_page_address(page, __va(pfn << PAGE_SHIFT));
3488 #endif
3489 	}
3490 }
3491 
3492 static void __meminit zone_init_free_lists(struct zone *zone)
3493 {
3494 	int order, t;
3495 	for_each_migratetype_order(order, t) {
3496 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3497 		zone->free_area[order].nr_free = 0;
3498 	}
3499 }
3500 
3501 #ifndef __HAVE_ARCH_MEMMAP_INIT
3502 #define memmap_init(size, nid, zone, start_pfn) \
3503 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3504 #endif
3505 
3506 static int zone_batchsize(struct zone *zone)
3507 {
3508 #ifdef CONFIG_MMU
3509 	int batch;
3510 
3511 	/*
3512 	 * The per-cpu-pages pools are set to around 1000th of the
3513 	 * size of the zone.  But no more than 1/2 of a meg.
3514 	 *
3515 	 * OK, so we don't know how big the cache is.  So guess.
3516 	 */
3517 	batch = zone->present_pages / 1024;
3518 	if (batch * PAGE_SIZE > 512 * 1024)
3519 		batch = (512 * 1024) / PAGE_SIZE;
3520 	batch /= 4;		/* We effectively *= 4 below */
3521 	if (batch < 1)
3522 		batch = 1;
3523 
3524 	/*
3525 	 * Clamp the batch to a 2^n - 1 value. Having a power
3526 	 * of 2 value was found to be more likely to have
3527 	 * suboptimal cache aliasing properties in some cases.
3528 	 *
3529 	 * For example if 2 tasks are alternately allocating
3530 	 * batches of pages, one task can end up with a lot
3531 	 * of pages of one half of the possible page colors
3532 	 * and the other with pages of the other colors.
3533 	 */
3534 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3535 
3536 	return batch;
3537 
3538 #else
3539 	/* The deferral and batching of frees should be suppressed under NOMMU
3540 	 * conditions.
3541 	 *
3542 	 * The problem is that NOMMU needs to be able to allocate large chunks
3543 	 * of contiguous memory as there's no hardware page translation to
3544 	 * assemble apparent contiguous memory from discontiguous pages.
3545 	 *
3546 	 * Queueing large contiguous runs of pages for batching, however,
3547 	 * causes the pages to actually be freed in smaller chunks.  As there
3548 	 * can be a significant delay between the individual batches being
3549 	 * recycled, this leads to the once large chunks of space being
3550 	 * fragmented and becoming unavailable for high-order allocations.
3551 	 */
3552 	return 0;
3553 #endif
3554 }
3555 
3556 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3557 {
3558 	struct per_cpu_pages *pcp;
3559 	int migratetype;
3560 
3561 	memset(p, 0, sizeof(*p));
3562 
3563 	pcp = &p->pcp;
3564 	pcp->count = 0;
3565 	pcp->high = 6 * batch;
3566 	pcp->batch = max(1UL, 1 * batch);
3567 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3568 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
3569 }
3570 
3571 /*
3572  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3573  * to the value high for the pageset p.
3574  */
3575 
3576 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3577 				unsigned long high)
3578 {
3579 	struct per_cpu_pages *pcp;
3580 
3581 	pcp = &p->pcp;
3582 	pcp->high = high;
3583 	pcp->batch = max(1UL, high/4);
3584 	if ((high/4) > (PAGE_SHIFT * 8))
3585 		pcp->batch = PAGE_SHIFT * 8;
3586 }
3587 
3588 static void setup_zone_pageset(struct zone *zone)
3589 {
3590 	int cpu;
3591 
3592 	zone->pageset = alloc_percpu(struct per_cpu_pageset);
3593 
3594 	for_each_possible_cpu(cpu) {
3595 		struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3596 
3597 		setup_pageset(pcp, zone_batchsize(zone));
3598 
3599 		if (percpu_pagelist_fraction)
3600 			setup_pagelist_highmark(pcp,
3601 				(zone->present_pages /
3602 					percpu_pagelist_fraction));
3603 	}
3604 }
3605 
3606 /*
3607  * Allocate per cpu pagesets and initialize them.
3608  * Before this call only boot pagesets were available.
3609  */
3610 void __init setup_per_cpu_pageset(void)
3611 {
3612 	struct zone *zone;
3613 
3614 	for_each_populated_zone(zone)
3615 		setup_zone_pageset(zone);
3616 }
3617 
3618 static noinline __init_refok
3619 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3620 {
3621 	int i;
3622 	struct pglist_data *pgdat = zone->zone_pgdat;
3623 	size_t alloc_size;
3624 
3625 	/*
3626 	 * The per-page waitqueue mechanism uses hashed waitqueues
3627 	 * per zone.
3628 	 */
3629 	zone->wait_table_hash_nr_entries =
3630 		 wait_table_hash_nr_entries(zone_size_pages);
3631 	zone->wait_table_bits =
3632 		wait_table_bits(zone->wait_table_hash_nr_entries);
3633 	alloc_size = zone->wait_table_hash_nr_entries
3634 					* sizeof(wait_queue_head_t);
3635 
3636 	if (!slab_is_available()) {
3637 		zone->wait_table = (wait_queue_head_t *)
3638 			alloc_bootmem_node_nopanic(pgdat, alloc_size);
3639 	} else {
3640 		/*
3641 		 * This case means that a zone whose size was 0 gets new memory
3642 		 * via memory hot-add.
3643 		 * But it may be the case that a new node was hot-added.  In
3644 		 * this case vmalloc() will not be able to use this new node's
3645 		 * memory - this wait_table must be initialized to use this new
3646 		 * node itself as well.
3647 		 * To use this new node's memory, further consideration will be
3648 		 * necessary.
3649 		 */
3650 		zone->wait_table = vmalloc(alloc_size);
3651 	}
3652 	if (!zone->wait_table)
3653 		return -ENOMEM;
3654 
3655 	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3656 		init_waitqueue_head(zone->wait_table + i);
3657 
3658 	return 0;
3659 }
3660 
3661 static int __zone_pcp_update(void *data)
3662 {
3663 	struct zone *zone = data;
3664 	int cpu;
3665 	unsigned long batch = zone_batchsize(zone), flags;
3666 
3667 	for_each_possible_cpu(cpu) {
3668 		struct per_cpu_pageset *pset;
3669 		struct per_cpu_pages *pcp;
3670 
3671 		pset = per_cpu_ptr(zone->pageset, cpu);
3672 		pcp = &pset->pcp;
3673 
3674 		local_irq_save(flags);
3675 		free_pcppages_bulk(zone, pcp->count, pcp);
3676 		setup_pageset(pset, batch);
3677 		local_irq_restore(flags);
3678 	}
3679 	return 0;
3680 }
3681 
3682 void zone_pcp_update(struct zone *zone)
3683 {
3684 	stop_machine(__zone_pcp_update, zone, NULL);
3685 }
3686 
3687 static __meminit void zone_pcp_init(struct zone *zone)
3688 {
3689 	/*
3690 	 * per cpu subsystem is not up at this point. The following code
3691 	 * relies on the ability of the linker to provide the
3692 	 * offset of a (static) per cpu variable into the per cpu area.
3693 	 */
3694 	zone->pageset = &boot_pageset;
3695 
3696 	if (zone->present_pages)
3697 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
3698 			zone->name, zone->present_pages,
3699 					 zone_batchsize(zone));
3700 }
3701 
3702 __meminit int init_currently_empty_zone(struct zone *zone,
3703 					unsigned long zone_start_pfn,
3704 					unsigned long size,
3705 					enum memmap_context context)
3706 {
3707 	struct pglist_data *pgdat = zone->zone_pgdat;
3708 	int ret;
3709 	ret = zone_wait_table_init(zone, size);
3710 	if (ret)
3711 		return ret;
3712 	pgdat->nr_zones = zone_idx(zone) + 1;
3713 
3714 	zone->zone_start_pfn = zone_start_pfn;
3715 
3716 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3717 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3718 			pgdat->node_id,
3719 			(unsigned long)zone_idx(zone),
3720 			zone_start_pfn, (zone_start_pfn + size));
3721 
3722 	zone_init_free_lists(zone);
3723 
3724 	return 0;
3725 }
3726 
3727 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3728 /*
3729  * Basic iterator support. Return the first range of PFNs for a node
3730  * Note: nid == MAX_NUMNODES returns first region regardless of node
3731  */
3732 static int __meminit first_active_region_index_in_nid(int nid)
3733 {
3734 	int i;
3735 
3736 	for (i = 0; i < nr_nodemap_entries; i++)
3737 		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3738 			return i;
3739 
3740 	return -1;
3741 }
3742 
3743 /*
3744  * Basic iterator support. Return the next active range of PFNs for a node
3745  * Note: nid == MAX_NUMNODES returns next region regardless of node
3746  */
3747 static int __meminit next_active_region_index_in_nid(int index, int nid)
3748 {
3749 	for (index = index + 1; index < nr_nodemap_entries; index++)
3750 		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3751 			return index;
3752 
3753 	return -1;
3754 }
3755 
3756 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3757 /*
3758  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3759  * Architectures may implement their own version but if add_active_range()
3760  * was used and there are no special requirements, this is a convenient
3761  * alternative
3762  */
3763 int __meminit __early_pfn_to_nid(unsigned long pfn)
3764 {
3765 	int i;
3766 
3767 	for (i = 0; i < nr_nodemap_entries; i++) {
3768 		unsigned long start_pfn = early_node_map[i].start_pfn;
3769 		unsigned long end_pfn = early_node_map[i].end_pfn;
3770 
3771 		if (start_pfn <= pfn && pfn < end_pfn)
3772 			return early_node_map[i].nid;
3773 	}
3774 	/* This is a memory hole */
3775 	return -1;
3776 }
3777 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3778 
3779 int __meminit early_pfn_to_nid(unsigned long pfn)
3780 {
3781 	int nid;
3782 
3783 	nid = __early_pfn_to_nid(pfn);
3784 	if (nid >= 0)
3785 		return nid;
3786 	/* just returns 0 */
3787 	return 0;
3788 }
3789 
3790 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
3791 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3792 {
3793 	int nid;
3794 
3795 	nid = __early_pfn_to_nid(pfn);
3796 	if (nid >= 0 && nid != node)
3797 		return false;
3798 	return true;
3799 }
3800 #endif
3801 
3802 /* Basic iterator support to walk early_node_map[] */
3803 #define for_each_active_range_index_in_nid(i, nid) \
3804 	for (i = first_active_region_index_in_nid(nid); i != -1; \
3805 				i = next_active_region_index_in_nid(i, nid))
3806 
3807 /**
3808  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3809  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3810  * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3811  *
3812  * If an architecture guarantees that all ranges registered with
3813  * add_active_ranges() contain no holes and may be freed, this
3814  * this function may be used instead of calling free_bootmem() manually.
3815  */
3816 void __init free_bootmem_with_active_regions(int nid,
3817 						unsigned long max_low_pfn)
3818 {
3819 	int i;
3820 
3821 	for_each_active_range_index_in_nid(i, nid) {
3822 		unsigned long size_pages = 0;
3823 		unsigned long end_pfn = early_node_map[i].end_pfn;
3824 
3825 		if (early_node_map[i].start_pfn >= max_low_pfn)
3826 			continue;
3827 
3828 		if (end_pfn > max_low_pfn)
3829 			end_pfn = max_low_pfn;
3830 
3831 		size_pages = end_pfn - early_node_map[i].start_pfn;
3832 		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3833 				PFN_PHYS(early_node_map[i].start_pfn),
3834 				size_pages << PAGE_SHIFT);
3835 	}
3836 }
3837 
3838 #ifdef CONFIG_HAVE_MEMBLOCK
3839 /*
3840  * Basic iterator support. Return the last range of PFNs for a node
3841  * Note: nid == MAX_NUMNODES returns last region regardless of node
3842  */
3843 static int __meminit last_active_region_index_in_nid(int nid)
3844 {
3845 	int i;
3846 
3847 	for (i = nr_nodemap_entries - 1; i >= 0; i--)
3848 		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3849 			return i;
3850 
3851 	return -1;
3852 }
3853 
3854 /*
3855  * Basic iterator support. Return the previous active range of PFNs for a node
3856  * Note: nid == MAX_NUMNODES returns next region regardless of node
3857  */
3858 static int __meminit previous_active_region_index_in_nid(int index, int nid)
3859 {
3860 	for (index = index - 1; index >= 0; index--)
3861 		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3862 			return index;
3863 
3864 	return -1;
3865 }
3866 
3867 #define for_each_active_range_index_in_nid_reverse(i, nid) \
3868 	for (i = last_active_region_index_in_nid(nid); i != -1; \
3869 				i = previous_active_region_index_in_nid(i, nid))
3870 
3871 u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3872 					u64 goal, u64 limit)
3873 {
3874 	int i;
3875 
3876 	/* Need to go over early_node_map to find out good range for node */
3877 	for_each_active_range_index_in_nid_reverse(i, nid) {
3878 		u64 addr;
3879 		u64 ei_start, ei_last;
3880 		u64 final_start, final_end;
3881 
3882 		ei_last = early_node_map[i].end_pfn;
3883 		ei_last <<= PAGE_SHIFT;
3884 		ei_start = early_node_map[i].start_pfn;
3885 		ei_start <<= PAGE_SHIFT;
3886 
3887 		final_start = max(ei_start, goal);
3888 		final_end = min(ei_last, limit);
3889 
3890 		if (final_start >= final_end)
3891 			continue;
3892 
3893 		addr = memblock_find_in_range(final_start, final_end, size, align);
3894 
3895 		if (addr == MEMBLOCK_ERROR)
3896 			continue;
3897 
3898 		return addr;
3899 	}
3900 
3901 	return MEMBLOCK_ERROR;
3902 }
3903 #endif
3904 
3905 int __init add_from_early_node_map(struct range *range, int az,
3906 				   int nr_range, int nid)
3907 {
3908 	int i;
3909 	u64 start, end;
3910 
3911 	/* need to go over early_node_map to find out good range for node */
3912 	for_each_active_range_index_in_nid(i, nid) {
3913 		start = early_node_map[i].start_pfn;
3914 		end = early_node_map[i].end_pfn;
3915 		nr_range = add_range(range, az, nr_range, start, end);
3916 	}
3917 	return nr_range;
3918 }
3919 
3920 void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3921 {
3922 	int i;
3923 	int ret;
3924 
3925 	for_each_active_range_index_in_nid(i, nid) {
3926 		ret = work_fn(early_node_map[i].start_pfn,
3927 			      early_node_map[i].end_pfn, data);
3928 		if (ret)
3929 			break;
3930 	}
3931 }
3932 /**
3933  * sparse_memory_present_with_active_regions - Call memory_present for each active range
3934  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3935  *
3936  * If an architecture guarantees that all ranges registered with
3937  * add_active_ranges() contain no holes and may be freed, this
3938  * function may be used instead of calling memory_present() manually.
3939  */
3940 void __init sparse_memory_present_with_active_regions(int nid)
3941 {
3942 	int i;
3943 
3944 	for_each_active_range_index_in_nid(i, nid)
3945 		memory_present(early_node_map[i].nid,
3946 				early_node_map[i].start_pfn,
3947 				early_node_map[i].end_pfn);
3948 }
3949 
3950 /**
3951  * get_pfn_range_for_nid - Return the start and end page frames for a node
3952  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3953  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3954  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3955  *
3956  * It returns the start and end page frame of a node based on information
3957  * provided by an arch calling add_active_range(). If called for a node
3958  * with no available memory, a warning is printed and the start and end
3959  * PFNs will be 0.
3960  */
3961 void __meminit get_pfn_range_for_nid(unsigned int nid,
3962 			unsigned long *start_pfn, unsigned long *end_pfn)
3963 {
3964 	int i;
3965 	*start_pfn = -1UL;
3966 	*end_pfn = 0;
3967 
3968 	for_each_active_range_index_in_nid(i, nid) {
3969 		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3970 		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3971 	}
3972 
3973 	if (*start_pfn == -1UL)
3974 		*start_pfn = 0;
3975 }
3976 
3977 /*
3978  * This finds a zone that can be used for ZONE_MOVABLE pages. The
3979  * assumption is made that zones within a node are ordered in monotonic
3980  * increasing memory addresses so that the "highest" populated zone is used
3981  */
3982 static void __init find_usable_zone_for_movable(void)
3983 {
3984 	int zone_index;
3985 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3986 		if (zone_index == ZONE_MOVABLE)
3987 			continue;
3988 
3989 		if (arch_zone_highest_possible_pfn[zone_index] >
3990 				arch_zone_lowest_possible_pfn[zone_index])
3991 			break;
3992 	}
3993 
3994 	VM_BUG_ON(zone_index == -1);
3995 	movable_zone = zone_index;
3996 }
3997 
3998 /*
3999  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4000  * because it is sized independent of architecture. Unlike the other zones,
4001  * the starting point for ZONE_MOVABLE is not fixed. It may be different
4002  * in each node depending on the size of each node and how evenly kernelcore
4003  * is distributed. This helper function adjusts the zone ranges
4004  * provided by the architecture for a given node by using the end of the
4005  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4006  * zones within a node are in order of monotonic increases memory addresses
4007  */
4008 static void __meminit adjust_zone_range_for_zone_movable(int nid,
4009 					unsigned long zone_type,
4010 					unsigned long node_start_pfn,
4011 					unsigned long node_end_pfn,
4012 					unsigned long *zone_start_pfn,
4013 					unsigned long *zone_end_pfn)
4014 {
4015 	/* Only adjust if ZONE_MOVABLE is on this node */
4016 	if (zone_movable_pfn[nid]) {
4017 		/* Size ZONE_MOVABLE */
4018 		if (zone_type == ZONE_MOVABLE) {
4019 			*zone_start_pfn = zone_movable_pfn[nid];
4020 			*zone_end_pfn = min(node_end_pfn,
4021 				arch_zone_highest_possible_pfn[movable_zone]);
4022 
4023 		/* Adjust for ZONE_MOVABLE starting within this range */
4024 		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4025 				*zone_end_pfn > zone_movable_pfn[nid]) {
4026 			*zone_end_pfn = zone_movable_pfn[nid];
4027 
4028 		/* Check if this whole range is within ZONE_MOVABLE */
4029 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
4030 			*zone_start_pfn = *zone_end_pfn;
4031 	}
4032 }
4033 
4034 /*
4035  * Return the number of pages a zone spans in a node, including holes
4036  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4037  */
4038 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4039 					unsigned long zone_type,
4040 					unsigned long *ignored)
4041 {
4042 	unsigned long node_start_pfn, node_end_pfn;
4043 	unsigned long zone_start_pfn, zone_end_pfn;
4044 
4045 	/* Get the start and end of the node and zone */
4046 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4047 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4048 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4049 	adjust_zone_range_for_zone_movable(nid, zone_type,
4050 				node_start_pfn, node_end_pfn,
4051 				&zone_start_pfn, &zone_end_pfn);
4052 
4053 	/* Check that this node has pages within the zone's required range */
4054 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4055 		return 0;
4056 
4057 	/* Move the zone boundaries inside the node if necessary */
4058 	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4059 	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4060 
4061 	/* Return the spanned pages */
4062 	return zone_end_pfn - zone_start_pfn;
4063 }
4064 
4065 /*
4066  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4067  * then all holes in the requested range will be accounted for.
4068  */
4069 unsigned long __meminit __absent_pages_in_range(int nid,
4070 				unsigned long range_start_pfn,
4071 				unsigned long range_end_pfn)
4072 {
4073 	int i = 0;
4074 	unsigned long prev_end_pfn = 0, hole_pages = 0;
4075 	unsigned long start_pfn;
4076 
4077 	/* Find the end_pfn of the first active range of pfns in the node */
4078 	i = first_active_region_index_in_nid(nid);
4079 	if (i == -1)
4080 		return 0;
4081 
4082 	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4083 
4084 	/* Account for ranges before physical memory on this node */
4085 	if (early_node_map[i].start_pfn > range_start_pfn)
4086 		hole_pages = prev_end_pfn - range_start_pfn;
4087 
4088 	/* Find all holes for the zone within the node */
4089 	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
4090 
4091 		/* No need to continue if prev_end_pfn is outside the zone */
4092 		if (prev_end_pfn >= range_end_pfn)
4093 			break;
4094 
4095 		/* Make sure the end of the zone is not within the hole */
4096 		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4097 		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
4098 
4099 		/* Update the hole size cound and move on */
4100 		if (start_pfn > range_start_pfn) {
4101 			BUG_ON(prev_end_pfn > start_pfn);
4102 			hole_pages += start_pfn - prev_end_pfn;
4103 		}
4104 		prev_end_pfn = early_node_map[i].end_pfn;
4105 	}
4106 
4107 	/* Account for ranges past physical memory on this node */
4108 	if (range_end_pfn > prev_end_pfn)
4109 		hole_pages += range_end_pfn -
4110 				max(range_start_pfn, prev_end_pfn);
4111 
4112 	return hole_pages;
4113 }
4114 
4115 /**
4116  * absent_pages_in_range - Return number of page frames in holes within a range
4117  * @start_pfn: The start PFN to start searching for holes
4118  * @end_pfn: The end PFN to stop searching for holes
4119  *
4120  * It returns the number of pages frames in memory holes within a range.
4121  */
4122 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4123 							unsigned long end_pfn)
4124 {
4125 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4126 }
4127 
4128 /* Return the number of page frames in holes in a zone on a node */
4129 static unsigned long __meminit zone_absent_pages_in_node(int nid,
4130 					unsigned long zone_type,
4131 					unsigned long *ignored)
4132 {
4133 	unsigned long node_start_pfn, node_end_pfn;
4134 	unsigned long zone_start_pfn, zone_end_pfn;
4135 
4136 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4137 	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
4138 							node_start_pfn);
4139 	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
4140 							node_end_pfn);
4141 
4142 	adjust_zone_range_for_zone_movable(nid, zone_type,
4143 			node_start_pfn, node_end_pfn,
4144 			&zone_start_pfn, &zone_end_pfn);
4145 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4146 }
4147 
4148 #else
4149 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4150 					unsigned long zone_type,
4151 					unsigned long *zones_size)
4152 {
4153 	return zones_size[zone_type];
4154 }
4155 
4156 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4157 						unsigned long zone_type,
4158 						unsigned long *zholes_size)
4159 {
4160 	if (!zholes_size)
4161 		return 0;
4162 
4163 	return zholes_size[zone_type];
4164 }
4165 
4166 #endif
4167 
4168 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4169 		unsigned long *zones_size, unsigned long *zholes_size)
4170 {
4171 	unsigned long realtotalpages, totalpages = 0;
4172 	enum zone_type i;
4173 
4174 	for (i = 0; i < MAX_NR_ZONES; i++)
4175 		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4176 								zones_size);
4177 	pgdat->node_spanned_pages = totalpages;
4178 
4179 	realtotalpages = totalpages;
4180 	for (i = 0; i < MAX_NR_ZONES; i++)
4181 		realtotalpages -=
4182 			zone_absent_pages_in_node(pgdat->node_id, i,
4183 								zholes_size);
4184 	pgdat->node_present_pages = realtotalpages;
4185 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4186 							realtotalpages);
4187 }
4188 
4189 #ifndef CONFIG_SPARSEMEM
4190 /*
4191  * Calculate the size of the zone->blockflags rounded to an unsigned long
4192  * Start by making sure zonesize is a multiple of pageblock_order by rounding
4193  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4194  * round what is now in bits to nearest long in bits, then return it in
4195  * bytes.
4196  */
4197 static unsigned long __init usemap_size(unsigned long zonesize)
4198 {
4199 	unsigned long usemapsize;
4200 
4201 	usemapsize = roundup(zonesize, pageblock_nr_pages);
4202 	usemapsize = usemapsize >> pageblock_order;
4203 	usemapsize *= NR_PAGEBLOCK_BITS;
4204 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4205 
4206 	return usemapsize / 8;
4207 }
4208 
4209 static void __init setup_usemap(struct pglist_data *pgdat,
4210 				struct zone *zone, unsigned long zonesize)
4211 {
4212 	unsigned long usemapsize = usemap_size(zonesize);
4213 	zone->pageblock_flags = NULL;
4214 	if (usemapsize)
4215 		zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4216 								   usemapsize);
4217 }
4218 #else
4219 static inline void setup_usemap(struct pglist_data *pgdat,
4220 				struct zone *zone, unsigned long zonesize) {}
4221 #endif /* CONFIG_SPARSEMEM */
4222 
4223 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4224 
4225 /* Return a sensible default order for the pageblock size. */
4226 static inline int pageblock_default_order(void)
4227 {
4228 	if (HPAGE_SHIFT > PAGE_SHIFT)
4229 		return HUGETLB_PAGE_ORDER;
4230 
4231 	return MAX_ORDER-1;
4232 }
4233 
4234 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4235 static inline void __init set_pageblock_order(unsigned int order)
4236 {
4237 	/* Check that pageblock_nr_pages has not already been setup */
4238 	if (pageblock_order)
4239 		return;
4240 
4241 	/*
4242 	 * Assume the largest contiguous order of interest is a huge page.
4243 	 * This value may be variable depending on boot parameters on IA64
4244 	 */
4245 	pageblock_order = order;
4246 }
4247 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4248 
4249 /*
4250  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4251  * and pageblock_default_order() are unused as pageblock_order is set
4252  * at compile-time. See include/linux/pageblock-flags.h for the values of
4253  * pageblock_order based on the kernel config
4254  */
4255 static inline int pageblock_default_order(unsigned int order)
4256 {
4257 	return MAX_ORDER-1;
4258 }
4259 #define set_pageblock_order(x)	do {} while (0)
4260 
4261 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4262 
4263 /*
4264  * Set up the zone data structures:
4265  *   - mark all pages reserved
4266  *   - mark all memory queues empty
4267  *   - clear the memory bitmaps
4268  */
4269 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4270 		unsigned long *zones_size, unsigned long *zholes_size)
4271 {
4272 	enum zone_type j;
4273 	int nid = pgdat->node_id;
4274 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
4275 	int ret;
4276 
4277 	pgdat_resize_init(pgdat);
4278 	pgdat->nr_zones = 0;
4279 	init_waitqueue_head(&pgdat->kswapd_wait);
4280 	pgdat->kswapd_max_order = 0;
4281 	pgdat_page_cgroup_init(pgdat);
4282 
4283 	for (j = 0; j < MAX_NR_ZONES; j++) {
4284 		struct zone *zone = pgdat->node_zones + j;
4285 		unsigned long size, realsize, memmap_pages;
4286 		enum lru_list l;
4287 
4288 		size = zone_spanned_pages_in_node(nid, j, zones_size);
4289 		realsize = size - zone_absent_pages_in_node(nid, j,
4290 								zholes_size);
4291 
4292 		/*
4293 		 * Adjust realsize so that it accounts for how much memory
4294 		 * is used by this zone for memmap. This affects the watermark
4295 		 * and per-cpu initialisations
4296 		 */
4297 		memmap_pages =
4298 			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4299 		if (realsize >= memmap_pages) {
4300 			realsize -= memmap_pages;
4301 			if (memmap_pages)
4302 				printk(KERN_DEBUG
4303 				       "  %s zone: %lu pages used for memmap\n",
4304 				       zone_names[j], memmap_pages);
4305 		} else
4306 			printk(KERN_WARNING
4307 				"  %s zone: %lu pages exceeds realsize %lu\n",
4308 				zone_names[j], memmap_pages, realsize);
4309 
4310 		/* Account for reserved pages */
4311 		if (j == 0 && realsize > dma_reserve) {
4312 			realsize -= dma_reserve;
4313 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4314 					zone_names[0], dma_reserve);
4315 		}
4316 
4317 		if (!is_highmem_idx(j))
4318 			nr_kernel_pages += realsize;
4319 		nr_all_pages += realsize;
4320 
4321 		zone->spanned_pages = size;
4322 		zone->present_pages = realsize;
4323 #ifdef CONFIG_NUMA
4324 		zone->node = nid;
4325 		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4326 						/ 100;
4327 		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4328 #endif
4329 		zone->name = zone_names[j];
4330 		spin_lock_init(&zone->lock);
4331 		spin_lock_init(&zone->lru_lock);
4332 		zone_seqlock_init(zone);
4333 		zone->zone_pgdat = pgdat;
4334 
4335 		zone_pcp_init(zone);
4336 		for_each_lru(l)
4337 			INIT_LIST_HEAD(&zone->lru[l].list);
4338 		zone->reclaim_stat.recent_rotated[0] = 0;
4339 		zone->reclaim_stat.recent_rotated[1] = 0;
4340 		zone->reclaim_stat.recent_scanned[0] = 0;
4341 		zone->reclaim_stat.recent_scanned[1] = 0;
4342 		zap_zone_vm_stats(zone);
4343 		zone->flags = 0;
4344 		if (!size)
4345 			continue;
4346 
4347 		set_pageblock_order(pageblock_default_order());
4348 		setup_usemap(pgdat, zone, size);
4349 		ret = init_currently_empty_zone(zone, zone_start_pfn,
4350 						size, MEMMAP_EARLY);
4351 		BUG_ON(ret);
4352 		memmap_init(size, nid, j, zone_start_pfn);
4353 		zone_start_pfn += size;
4354 	}
4355 }
4356 
4357 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4358 {
4359 	/* Skip empty nodes */
4360 	if (!pgdat->node_spanned_pages)
4361 		return;
4362 
4363 #ifdef CONFIG_FLAT_NODE_MEM_MAP
4364 	/* ia64 gets its own node_mem_map, before this, without bootmem */
4365 	if (!pgdat->node_mem_map) {
4366 		unsigned long size, start, end;
4367 		struct page *map;
4368 
4369 		/*
4370 		 * The zone's endpoints aren't required to be MAX_ORDER
4371 		 * aligned but the node_mem_map endpoints must be in order
4372 		 * for the buddy allocator to function correctly.
4373 		 */
4374 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4375 		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4376 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
4377 		size =  (end - start) * sizeof(struct page);
4378 		map = alloc_remap(pgdat->node_id, size);
4379 		if (!map)
4380 			map = alloc_bootmem_node_nopanic(pgdat, size);
4381 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4382 	}
4383 #ifndef CONFIG_NEED_MULTIPLE_NODES
4384 	/*
4385 	 * With no DISCONTIG, the global mem_map is just set as node 0's
4386 	 */
4387 	if (pgdat == NODE_DATA(0)) {
4388 		mem_map = NODE_DATA(0)->node_mem_map;
4389 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4390 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4391 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4392 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4393 	}
4394 #endif
4395 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
4396 }
4397 
4398 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4399 		unsigned long node_start_pfn, unsigned long *zholes_size)
4400 {
4401 	pg_data_t *pgdat = NODE_DATA(nid);
4402 
4403 	pgdat->node_id = nid;
4404 	pgdat->node_start_pfn = node_start_pfn;
4405 	calculate_node_totalpages(pgdat, zones_size, zholes_size);
4406 
4407 	alloc_node_mem_map(pgdat);
4408 #ifdef CONFIG_FLAT_NODE_MEM_MAP
4409 	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4410 		nid, (unsigned long)pgdat,
4411 		(unsigned long)pgdat->node_mem_map);
4412 #endif
4413 
4414 	free_area_init_core(pgdat, zones_size, zholes_size);
4415 }
4416 
4417 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4418 
4419 #if MAX_NUMNODES > 1
4420 /*
4421  * Figure out the number of possible node ids.
4422  */
4423 static void __init setup_nr_node_ids(void)
4424 {
4425 	unsigned int node;
4426 	unsigned int highest = 0;
4427 
4428 	for_each_node_mask(node, node_possible_map)
4429 		highest = node;
4430 	nr_node_ids = highest + 1;
4431 }
4432 #else
4433 static inline void setup_nr_node_ids(void)
4434 {
4435 }
4436 #endif
4437 
4438 /**
4439  * add_active_range - Register a range of PFNs backed by physical memory
4440  * @nid: The node ID the range resides on
4441  * @start_pfn: The start PFN of the available physical memory
4442  * @end_pfn: The end PFN of the available physical memory
4443  *
4444  * These ranges are stored in an early_node_map[] and later used by
4445  * free_area_init_nodes() to calculate zone sizes and holes. If the
4446  * range spans a memory hole, it is up to the architecture to ensure
4447  * the memory is not freed by the bootmem allocator. If possible
4448  * the range being registered will be merged with existing ranges.
4449  */
4450 void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4451 						unsigned long end_pfn)
4452 {
4453 	int i;
4454 
4455 	mminit_dprintk(MMINIT_TRACE, "memory_register",
4456 			"Entering add_active_range(%d, %#lx, %#lx) "
4457 			"%d entries of %d used\n",
4458 			nid, start_pfn, end_pfn,
4459 			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
4460 
4461 	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4462 
4463 	/* Merge with existing active regions if possible */
4464 	for (i = 0; i < nr_nodemap_entries; i++) {
4465 		if (early_node_map[i].nid != nid)
4466 			continue;
4467 
4468 		/* Skip if an existing region covers this new one */
4469 		if (start_pfn >= early_node_map[i].start_pfn &&
4470 				end_pfn <= early_node_map[i].end_pfn)
4471 			return;
4472 
4473 		/* Merge forward if suitable */
4474 		if (start_pfn <= early_node_map[i].end_pfn &&
4475 				end_pfn > early_node_map[i].end_pfn) {
4476 			early_node_map[i].end_pfn = end_pfn;
4477 			return;
4478 		}
4479 
4480 		/* Merge backward if suitable */
4481 		if (start_pfn < early_node_map[i].start_pfn &&
4482 				end_pfn >= early_node_map[i].start_pfn) {
4483 			early_node_map[i].start_pfn = start_pfn;
4484 			return;
4485 		}
4486 	}
4487 
4488 	/* Check that early_node_map is large enough */
4489 	if (i >= MAX_ACTIVE_REGIONS) {
4490 		printk(KERN_CRIT "More than %d memory regions, truncating\n",
4491 							MAX_ACTIVE_REGIONS);
4492 		return;
4493 	}
4494 
4495 	early_node_map[i].nid = nid;
4496 	early_node_map[i].start_pfn = start_pfn;
4497 	early_node_map[i].end_pfn = end_pfn;
4498 	nr_nodemap_entries = i + 1;
4499 }
4500 
4501 /**
4502  * remove_active_range - Shrink an existing registered range of PFNs
4503  * @nid: The node id the range is on that should be shrunk
4504  * @start_pfn: The new PFN of the range
4505  * @end_pfn: The new PFN of the range
4506  *
4507  * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4508  * The map is kept near the end physical page range that has already been
4509  * registered. This function allows an arch to shrink an existing registered
4510  * range.
4511  */
4512 void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4513 				unsigned long end_pfn)
4514 {
4515 	int i, j;
4516 	int removed = 0;
4517 
4518 	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4519 			  nid, start_pfn, end_pfn);
4520 
4521 	/* Find the old active region end and shrink */
4522 	for_each_active_range_index_in_nid(i, nid) {
4523 		if (early_node_map[i].start_pfn >= start_pfn &&
4524 		    early_node_map[i].end_pfn <= end_pfn) {
4525 			/* clear it */
4526 			early_node_map[i].start_pfn = 0;
4527 			early_node_map[i].end_pfn = 0;
4528 			removed = 1;
4529 			continue;
4530 		}
4531 		if (early_node_map[i].start_pfn < start_pfn &&
4532 		    early_node_map[i].end_pfn > start_pfn) {
4533 			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4534 			early_node_map[i].end_pfn = start_pfn;
4535 			if (temp_end_pfn > end_pfn)
4536 				add_active_range(nid, end_pfn, temp_end_pfn);
4537 			continue;
4538 		}
4539 		if (early_node_map[i].start_pfn >= start_pfn &&
4540 		    early_node_map[i].end_pfn > end_pfn &&
4541 		    early_node_map[i].start_pfn < end_pfn) {
4542 			early_node_map[i].start_pfn = end_pfn;
4543 			continue;
4544 		}
4545 	}
4546 
4547 	if (!removed)
4548 		return;
4549 
4550 	/* remove the blank ones */
4551 	for (i = nr_nodemap_entries - 1; i > 0; i--) {
4552 		if (early_node_map[i].nid != nid)
4553 			continue;
4554 		if (early_node_map[i].end_pfn)
4555 			continue;
4556 		/* we found it, get rid of it */
4557 		for (j = i; j < nr_nodemap_entries - 1; j++)
4558 			memcpy(&early_node_map[j], &early_node_map[j+1],
4559 				sizeof(early_node_map[j]));
4560 		j = nr_nodemap_entries - 1;
4561 		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4562 		nr_nodemap_entries--;
4563 	}
4564 }
4565 
4566 /**
4567  * remove_all_active_ranges - Remove all currently registered regions
4568  *
4569  * During discovery, it may be found that a table like SRAT is invalid
4570  * and an alternative discovery method must be used. This function removes
4571  * all currently registered regions.
4572  */
4573 void __init remove_all_active_ranges(void)
4574 {
4575 	memset(early_node_map, 0, sizeof(early_node_map));
4576 	nr_nodemap_entries = 0;
4577 }
4578 
4579 /* Compare two active node_active_regions */
4580 static int __init cmp_node_active_region(const void *a, const void *b)
4581 {
4582 	struct node_active_region *arange = (struct node_active_region *)a;
4583 	struct node_active_region *brange = (struct node_active_region *)b;
4584 
4585 	/* Done this way to avoid overflows */
4586 	if (arange->start_pfn > brange->start_pfn)
4587 		return 1;
4588 	if (arange->start_pfn < brange->start_pfn)
4589 		return -1;
4590 
4591 	return 0;
4592 }
4593 
4594 /* sort the node_map by start_pfn */
4595 void __init sort_node_map(void)
4596 {
4597 	sort(early_node_map, (size_t)nr_nodemap_entries,
4598 			sizeof(struct node_active_region),
4599 			cmp_node_active_region, NULL);
4600 }
4601 
4602 /**
4603  * node_map_pfn_alignment - determine the maximum internode alignment
4604  *
4605  * This function should be called after node map is populated and sorted.
4606  * It calculates the maximum power of two alignment which can distinguish
4607  * all the nodes.
4608  *
4609  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
4610  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
4611  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
4612  * shifted, 1GiB is enough and this function will indicate so.
4613  *
4614  * This is used to test whether pfn -> nid mapping of the chosen memory
4615  * model has fine enough granularity to avoid incorrect mapping for the
4616  * populated node map.
4617  *
4618  * Returns the determined alignment in pfn's.  0 if there is no alignment
4619  * requirement (single node).
4620  */
4621 unsigned long __init node_map_pfn_alignment(void)
4622 {
4623 	unsigned long accl_mask = 0, last_end = 0;
4624 	int last_nid = -1;
4625 	int i;
4626 
4627 	for_each_active_range_index_in_nid(i, MAX_NUMNODES) {
4628 		int nid = early_node_map[i].nid;
4629 		unsigned long start = early_node_map[i].start_pfn;
4630 		unsigned long end = early_node_map[i].end_pfn;
4631 		unsigned long mask;
4632 
4633 		if (!start || last_nid < 0 || last_nid == nid) {
4634 			last_nid = nid;
4635 			last_end = end;
4636 			continue;
4637 		}
4638 
4639 		/*
4640 		 * Start with a mask granular enough to pin-point to the
4641 		 * start pfn and tick off bits one-by-one until it becomes
4642 		 * too coarse to separate the current node from the last.
4643 		 */
4644 		mask = ~((1 << __ffs(start)) - 1);
4645 		while (mask && last_end <= (start & (mask << 1)))
4646 			mask <<= 1;
4647 
4648 		/* accumulate all internode masks */
4649 		accl_mask |= mask;
4650 	}
4651 
4652 	/* convert mask to number of pages */
4653 	return ~accl_mask + 1;
4654 }
4655 
4656 /* Find the lowest pfn for a node */
4657 static unsigned long __init find_min_pfn_for_node(int nid)
4658 {
4659 	int i;
4660 	unsigned long min_pfn = ULONG_MAX;
4661 
4662 	/* Assuming a sorted map, the first range found has the starting pfn */
4663 	for_each_active_range_index_in_nid(i, nid)
4664 		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4665 
4666 	if (min_pfn == ULONG_MAX) {
4667 		printk(KERN_WARNING
4668 			"Could not find start_pfn for node %d\n", nid);
4669 		return 0;
4670 	}
4671 
4672 	return min_pfn;
4673 }
4674 
4675 /**
4676  * find_min_pfn_with_active_regions - Find the minimum PFN registered
4677  *
4678  * It returns the minimum PFN based on information provided via
4679  * add_active_range().
4680  */
4681 unsigned long __init find_min_pfn_with_active_regions(void)
4682 {
4683 	return find_min_pfn_for_node(MAX_NUMNODES);
4684 }
4685 
4686 /*
4687  * early_calculate_totalpages()
4688  * Sum pages in active regions for movable zone.
4689  * Populate N_HIGH_MEMORY for calculating usable_nodes.
4690  */
4691 static unsigned long __init early_calculate_totalpages(void)
4692 {
4693 	int i;
4694 	unsigned long totalpages = 0;
4695 
4696 	for (i = 0; i < nr_nodemap_entries; i++) {
4697 		unsigned long pages = early_node_map[i].end_pfn -
4698 						early_node_map[i].start_pfn;
4699 		totalpages += pages;
4700 		if (pages)
4701 			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4702 	}
4703   	return totalpages;
4704 }
4705 
4706 /*
4707  * Find the PFN the Movable zone begins in each node. Kernel memory
4708  * is spread evenly between nodes as long as the nodes have enough
4709  * memory. When they don't, some nodes will have more kernelcore than
4710  * others
4711  */
4712 static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4713 {
4714 	int i, nid;
4715 	unsigned long usable_startpfn;
4716 	unsigned long kernelcore_node, kernelcore_remaining;
4717 	/* save the state before borrow the nodemask */
4718 	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4719 	unsigned long totalpages = early_calculate_totalpages();
4720 	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4721 
4722 	/*
4723 	 * If movablecore was specified, calculate what size of
4724 	 * kernelcore that corresponds so that memory usable for
4725 	 * any allocation type is evenly spread. If both kernelcore
4726 	 * and movablecore are specified, then the value of kernelcore
4727 	 * will be used for required_kernelcore if it's greater than
4728 	 * what movablecore would have allowed.
4729 	 */
4730 	if (required_movablecore) {
4731 		unsigned long corepages;
4732 
4733 		/*
4734 		 * Round-up so that ZONE_MOVABLE is at least as large as what
4735 		 * was requested by the user
4736 		 */
4737 		required_movablecore =
4738 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4739 		corepages = totalpages - required_movablecore;
4740 
4741 		required_kernelcore = max(required_kernelcore, corepages);
4742 	}
4743 
4744 	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4745 	if (!required_kernelcore)
4746 		goto out;
4747 
4748 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4749 	find_usable_zone_for_movable();
4750 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4751 
4752 restart:
4753 	/* Spread kernelcore memory as evenly as possible throughout nodes */
4754 	kernelcore_node = required_kernelcore / usable_nodes;
4755 	for_each_node_state(nid, N_HIGH_MEMORY) {
4756 		/*
4757 		 * Recalculate kernelcore_node if the division per node
4758 		 * now exceeds what is necessary to satisfy the requested
4759 		 * amount of memory for the kernel
4760 		 */
4761 		if (required_kernelcore < kernelcore_node)
4762 			kernelcore_node = required_kernelcore / usable_nodes;
4763 
4764 		/*
4765 		 * As the map is walked, we track how much memory is usable
4766 		 * by the kernel using kernelcore_remaining. When it is
4767 		 * 0, the rest of the node is usable by ZONE_MOVABLE
4768 		 */
4769 		kernelcore_remaining = kernelcore_node;
4770 
4771 		/* Go through each range of PFNs within this node */
4772 		for_each_active_range_index_in_nid(i, nid) {
4773 			unsigned long start_pfn, end_pfn;
4774 			unsigned long size_pages;
4775 
4776 			start_pfn = max(early_node_map[i].start_pfn,
4777 						zone_movable_pfn[nid]);
4778 			end_pfn = early_node_map[i].end_pfn;
4779 			if (start_pfn >= end_pfn)
4780 				continue;
4781 
4782 			/* Account for what is only usable for kernelcore */
4783 			if (start_pfn < usable_startpfn) {
4784 				unsigned long kernel_pages;
4785 				kernel_pages = min(end_pfn, usable_startpfn)
4786 								- start_pfn;
4787 
4788 				kernelcore_remaining -= min(kernel_pages,
4789 							kernelcore_remaining);
4790 				required_kernelcore -= min(kernel_pages,
4791 							required_kernelcore);
4792 
4793 				/* Continue if range is now fully accounted */
4794 				if (end_pfn <= usable_startpfn) {
4795 
4796 					/*
4797 					 * Push zone_movable_pfn to the end so
4798 					 * that if we have to rebalance
4799 					 * kernelcore across nodes, we will
4800 					 * not double account here
4801 					 */
4802 					zone_movable_pfn[nid] = end_pfn;
4803 					continue;
4804 				}
4805 				start_pfn = usable_startpfn;
4806 			}
4807 
4808 			/*
4809 			 * The usable PFN range for ZONE_MOVABLE is from
4810 			 * start_pfn->end_pfn. Calculate size_pages as the
4811 			 * number of pages used as kernelcore
4812 			 */
4813 			size_pages = end_pfn - start_pfn;
4814 			if (size_pages > kernelcore_remaining)
4815 				size_pages = kernelcore_remaining;
4816 			zone_movable_pfn[nid] = start_pfn + size_pages;
4817 
4818 			/*
4819 			 * Some kernelcore has been met, update counts and
4820 			 * break if the kernelcore for this node has been
4821 			 * satisified
4822 			 */
4823 			required_kernelcore -= min(required_kernelcore,
4824 								size_pages);
4825 			kernelcore_remaining -= size_pages;
4826 			if (!kernelcore_remaining)
4827 				break;
4828 		}
4829 	}
4830 
4831 	/*
4832 	 * If there is still required_kernelcore, we do another pass with one
4833 	 * less node in the count. This will push zone_movable_pfn[nid] further
4834 	 * along on the nodes that still have memory until kernelcore is
4835 	 * satisified
4836 	 */
4837 	usable_nodes--;
4838 	if (usable_nodes && required_kernelcore > usable_nodes)
4839 		goto restart;
4840 
4841 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4842 	for (nid = 0; nid < MAX_NUMNODES; nid++)
4843 		zone_movable_pfn[nid] =
4844 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4845 
4846 out:
4847 	/* restore the node_state */
4848 	node_states[N_HIGH_MEMORY] = saved_node_state;
4849 }
4850 
4851 /* Any regular memory on that node ? */
4852 static void check_for_regular_memory(pg_data_t *pgdat)
4853 {
4854 #ifdef CONFIG_HIGHMEM
4855 	enum zone_type zone_type;
4856 
4857 	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4858 		struct zone *zone = &pgdat->node_zones[zone_type];
4859 		if (zone->present_pages)
4860 			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4861 	}
4862 #endif
4863 }
4864 
4865 /**
4866  * free_area_init_nodes - Initialise all pg_data_t and zone data
4867  * @max_zone_pfn: an array of max PFNs for each zone
4868  *
4869  * This will call free_area_init_node() for each active node in the system.
4870  * Using the page ranges provided by add_active_range(), the size of each
4871  * zone in each node and their holes is calculated. If the maximum PFN
4872  * between two adjacent zones match, it is assumed that the zone is empty.
4873  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4874  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4875  * starts where the previous one ended. For example, ZONE_DMA32 starts
4876  * at arch_max_dma_pfn.
4877  */
4878 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4879 {
4880 	unsigned long nid;
4881 	int i;
4882 
4883 	/* Sort early_node_map as initialisation assumes it is sorted */
4884 	sort_node_map();
4885 
4886 	/* Record where the zone boundaries are */
4887 	memset(arch_zone_lowest_possible_pfn, 0,
4888 				sizeof(arch_zone_lowest_possible_pfn));
4889 	memset(arch_zone_highest_possible_pfn, 0,
4890 				sizeof(arch_zone_highest_possible_pfn));
4891 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4892 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4893 	for (i = 1; i < MAX_NR_ZONES; i++) {
4894 		if (i == ZONE_MOVABLE)
4895 			continue;
4896 		arch_zone_lowest_possible_pfn[i] =
4897 			arch_zone_highest_possible_pfn[i-1];
4898 		arch_zone_highest_possible_pfn[i] =
4899 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4900 	}
4901 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4902 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4903 
4904 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4905 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4906 	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4907 
4908 	/* Print out the zone ranges */
4909 	printk("Zone PFN ranges:\n");
4910 	for (i = 0; i < MAX_NR_ZONES; i++) {
4911 		if (i == ZONE_MOVABLE)
4912 			continue;
4913 		printk("  %-8s ", zone_names[i]);
4914 		if (arch_zone_lowest_possible_pfn[i] ==
4915 				arch_zone_highest_possible_pfn[i])
4916 			printk("empty\n");
4917 		else
4918 			printk("%0#10lx -> %0#10lx\n",
4919 				arch_zone_lowest_possible_pfn[i],
4920 				arch_zone_highest_possible_pfn[i]);
4921 	}
4922 
4923 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4924 	printk("Movable zone start PFN for each node\n");
4925 	for (i = 0; i < MAX_NUMNODES; i++) {
4926 		if (zone_movable_pfn[i])
4927 			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4928 	}
4929 
4930 	/* Print out the early_node_map[] */
4931 	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4932 	for (i = 0; i < nr_nodemap_entries; i++)
4933 		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4934 						early_node_map[i].start_pfn,
4935 						early_node_map[i].end_pfn);
4936 
4937 	/* Initialise every node */
4938 	mminit_verify_pageflags_layout();
4939 	setup_nr_node_ids();
4940 	for_each_online_node(nid) {
4941 		pg_data_t *pgdat = NODE_DATA(nid);
4942 		free_area_init_node(nid, NULL,
4943 				find_min_pfn_for_node(nid), NULL);
4944 
4945 		/* Any memory on that node */
4946 		if (pgdat->node_present_pages)
4947 			node_set_state(nid, N_HIGH_MEMORY);
4948 		check_for_regular_memory(pgdat);
4949 	}
4950 }
4951 
4952 static int __init cmdline_parse_core(char *p, unsigned long *core)
4953 {
4954 	unsigned long long coremem;
4955 	if (!p)
4956 		return -EINVAL;
4957 
4958 	coremem = memparse(p, &p);
4959 	*core = coremem >> PAGE_SHIFT;
4960 
4961 	/* Paranoid check that UL is enough for the coremem value */
4962 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4963 
4964 	return 0;
4965 }
4966 
4967 /*
4968  * kernelcore=size sets the amount of memory for use for allocations that
4969  * cannot be reclaimed or migrated.
4970  */
4971 static int __init cmdline_parse_kernelcore(char *p)
4972 {
4973 	return cmdline_parse_core(p, &required_kernelcore);
4974 }
4975 
4976 /*
4977  * movablecore=size sets the amount of memory for use for allocations that
4978  * can be reclaimed or migrated.
4979  */
4980 static int __init cmdline_parse_movablecore(char *p)
4981 {
4982 	return cmdline_parse_core(p, &required_movablecore);
4983 }
4984 
4985 early_param("kernelcore", cmdline_parse_kernelcore);
4986 early_param("movablecore", cmdline_parse_movablecore);
4987 
4988 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4989 
4990 /**
4991  * set_dma_reserve - set the specified number of pages reserved in the first zone
4992  * @new_dma_reserve: The number of pages to mark reserved
4993  *
4994  * The per-cpu batchsize and zone watermarks are determined by present_pages.
4995  * In the DMA zone, a significant percentage may be consumed by kernel image
4996  * and other unfreeable allocations which can skew the watermarks badly. This
4997  * function may optionally be used to account for unfreeable pages in the
4998  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4999  * smaller per-cpu batchsize.
5000  */
5001 void __init set_dma_reserve(unsigned long new_dma_reserve)
5002 {
5003 	dma_reserve = new_dma_reserve;
5004 }
5005 
5006 void __init free_area_init(unsigned long *zones_size)
5007 {
5008 	free_area_init_node(0, zones_size,
5009 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5010 }
5011 
5012 static int page_alloc_cpu_notify(struct notifier_block *self,
5013 				 unsigned long action, void *hcpu)
5014 {
5015 	int cpu = (unsigned long)hcpu;
5016 
5017 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
5018 		drain_pages(cpu);
5019 
5020 		/*
5021 		 * Spill the event counters of the dead processor
5022 		 * into the current processors event counters.
5023 		 * This artificially elevates the count of the current
5024 		 * processor.
5025 		 */
5026 		vm_events_fold_cpu(cpu);
5027 
5028 		/*
5029 		 * Zero the differential counters of the dead processor
5030 		 * so that the vm statistics are consistent.
5031 		 *
5032 		 * This is only okay since the processor is dead and cannot
5033 		 * race with what we are doing.
5034 		 */
5035 		refresh_cpu_vm_stats(cpu);
5036 	}
5037 	return NOTIFY_OK;
5038 }
5039 
5040 void __init page_alloc_init(void)
5041 {
5042 	hotcpu_notifier(page_alloc_cpu_notify, 0);
5043 }
5044 
5045 /*
5046  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
5047  *	or min_free_kbytes changes.
5048  */
5049 static void calculate_totalreserve_pages(void)
5050 {
5051 	struct pglist_data *pgdat;
5052 	unsigned long reserve_pages = 0;
5053 	enum zone_type i, j;
5054 
5055 	for_each_online_pgdat(pgdat) {
5056 		for (i = 0; i < MAX_NR_ZONES; i++) {
5057 			struct zone *zone = pgdat->node_zones + i;
5058 			unsigned long max = 0;
5059 
5060 			/* Find valid and maximum lowmem_reserve in the zone */
5061 			for (j = i; j < MAX_NR_ZONES; j++) {
5062 				if (zone->lowmem_reserve[j] > max)
5063 					max = zone->lowmem_reserve[j];
5064 			}
5065 
5066 			/* we treat the high watermark as reserved pages. */
5067 			max += high_wmark_pages(zone);
5068 
5069 			if (max > zone->present_pages)
5070 				max = zone->present_pages;
5071 			reserve_pages += max;
5072 		}
5073 	}
5074 	totalreserve_pages = reserve_pages;
5075 }
5076 
5077 /*
5078  * setup_per_zone_lowmem_reserve - called whenever
5079  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
5080  *	has a correct pages reserved value, so an adequate number of
5081  *	pages are left in the zone after a successful __alloc_pages().
5082  */
5083 static void setup_per_zone_lowmem_reserve(void)
5084 {
5085 	struct pglist_data *pgdat;
5086 	enum zone_type j, idx;
5087 
5088 	for_each_online_pgdat(pgdat) {
5089 		for (j = 0; j < MAX_NR_ZONES; j++) {
5090 			struct zone *zone = pgdat->node_zones + j;
5091 			unsigned long present_pages = zone->present_pages;
5092 
5093 			zone->lowmem_reserve[j] = 0;
5094 
5095 			idx = j;
5096 			while (idx) {
5097 				struct zone *lower_zone;
5098 
5099 				idx--;
5100 
5101 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
5102 					sysctl_lowmem_reserve_ratio[idx] = 1;
5103 
5104 				lower_zone = pgdat->node_zones + idx;
5105 				lower_zone->lowmem_reserve[j] = present_pages /
5106 					sysctl_lowmem_reserve_ratio[idx];
5107 				present_pages += lower_zone->present_pages;
5108 			}
5109 		}
5110 	}
5111 
5112 	/* update totalreserve_pages */
5113 	calculate_totalreserve_pages();
5114 }
5115 
5116 /**
5117  * setup_per_zone_wmarks - called when min_free_kbytes changes
5118  * or when memory is hot-{added|removed}
5119  *
5120  * Ensures that the watermark[min,low,high] values for each zone are set
5121  * correctly with respect to min_free_kbytes.
5122  */
5123 void setup_per_zone_wmarks(void)
5124 {
5125 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5126 	unsigned long lowmem_pages = 0;
5127 	struct zone *zone;
5128 	unsigned long flags;
5129 
5130 	/* Calculate total number of !ZONE_HIGHMEM pages */
5131 	for_each_zone(zone) {
5132 		if (!is_highmem(zone))
5133 			lowmem_pages += zone->present_pages;
5134 	}
5135 
5136 	for_each_zone(zone) {
5137 		u64 tmp;
5138 
5139 		spin_lock_irqsave(&zone->lock, flags);
5140 		tmp = (u64)pages_min * zone->present_pages;
5141 		do_div(tmp, lowmem_pages);
5142 		if (is_highmem(zone)) {
5143 			/*
5144 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5145 			 * need highmem pages, so cap pages_min to a small
5146 			 * value here.
5147 			 *
5148 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5149 			 * deltas controls asynch page reclaim, and so should
5150 			 * not be capped for highmem.
5151 			 */
5152 			int min_pages;
5153 
5154 			min_pages = zone->present_pages / 1024;
5155 			if (min_pages < SWAP_CLUSTER_MAX)
5156 				min_pages = SWAP_CLUSTER_MAX;
5157 			if (min_pages > 128)
5158 				min_pages = 128;
5159 			zone->watermark[WMARK_MIN] = min_pages;
5160 		} else {
5161 			/*
5162 			 * If it's a lowmem zone, reserve a number of pages
5163 			 * proportionate to the zone's size.
5164 			 */
5165 			zone->watermark[WMARK_MIN] = tmp;
5166 		}
5167 
5168 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
5169 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5170 		setup_zone_migrate_reserve(zone);
5171 		spin_unlock_irqrestore(&zone->lock, flags);
5172 	}
5173 
5174 	/* update totalreserve_pages */
5175 	calculate_totalreserve_pages();
5176 }
5177 
5178 /*
5179  * The inactive anon list should be small enough that the VM never has to
5180  * do too much work, but large enough that each inactive page has a chance
5181  * to be referenced again before it is swapped out.
5182  *
5183  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5184  * INACTIVE_ANON pages on this zone's LRU, maintained by the
5185  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5186  * the anonymous pages are kept on the inactive list.
5187  *
5188  * total     target    max
5189  * memory    ratio     inactive anon
5190  * -------------------------------------
5191  *   10MB       1         5MB
5192  *  100MB       1        50MB
5193  *    1GB       3       250MB
5194  *   10GB      10       0.9GB
5195  *  100GB      31         3GB
5196  *    1TB     101        10GB
5197  *   10TB     320        32GB
5198  */
5199 static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5200 {
5201 	unsigned int gb, ratio;
5202 
5203 	/* Zone size in gigabytes */
5204 	gb = zone->present_pages >> (30 - PAGE_SHIFT);
5205 	if (gb)
5206 		ratio = int_sqrt(10 * gb);
5207 	else
5208 		ratio = 1;
5209 
5210 	zone->inactive_ratio = ratio;
5211 }
5212 
5213 static void __meminit setup_per_zone_inactive_ratio(void)
5214 {
5215 	struct zone *zone;
5216 
5217 	for_each_zone(zone)
5218 		calculate_zone_inactive_ratio(zone);
5219 }
5220 
5221 /*
5222  * Initialise min_free_kbytes.
5223  *
5224  * For small machines we want it small (128k min).  For large machines
5225  * we want it large (64MB max).  But it is not linear, because network
5226  * bandwidth does not increase linearly with machine size.  We use
5227  *
5228  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5229  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
5230  *
5231  * which yields
5232  *
5233  * 16MB:	512k
5234  * 32MB:	724k
5235  * 64MB:	1024k
5236  * 128MB:	1448k
5237  * 256MB:	2048k
5238  * 512MB:	2896k
5239  * 1024MB:	4096k
5240  * 2048MB:	5792k
5241  * 4096MB:	8192k
5242  * 8192MB:	11584k
5243  * 16384MB:	16384k
5244  */
5245 int __meminit init_per_zone_wmark_min(void)
5246 {
5247 	unsigned long lowmem_kbytes;
5248 
5249 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5250 
5251 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5252 	if (min_free_kbytes < 128)
5253 		min_free_kbytes = 128;
5254 	if (min_free_kbytes > 65536)
5255 		min_free_kbytes = 65536;
5256 	setup_per_zone_wmarks();
5257 	refresh_zone_stat_thresholds();
5258 	setup_per_zone_lowmem_reserve();
5259 	setup_per_zone_inactive_ratio();
5260 	return 0;
5261 }
5262 module_init(init_per_zone_wmark_min)
5263 
5264 /*
5265  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5266  *	that we can call two helper functions whenever min_free_kbytes
5267  *	changes.
5268  */
5269 int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
5270 	void __user *buffer, size_t *length, loff_t *ppos)
5271 {
5272 	proc_dointvec(table, write, buffer, length, ppos);
5273 	if (write)
5274 		setup_per_zone_wmarks();
5275 	return 0;
5276 }
5277 
5278 #ifdef CONFIG_NUMA
5279 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
5280 	void __user *buffer, size_t *length, loff_t *ppos)
5281 {
5282 	struct zone *zone;
5283 	int rc;
5284 
5285 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5286 	if (rc)
5287 		return rc;
5288 
5289 	for_each_zone(zone)
5290 		zone->min_unmapped_pages = (zone->present_pages *
5291 				sysctl_min_unmapped_ratio) / 100;
5292 	return 0;
5293 }
5294 
5295 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5296 	void __user *buffer, size_t *length, loff_t *ppos)
5297 {
5298 	struct zone *zone;
5299 	int rc;
5300 
5301 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5302 	if (rc)
5303 		return rc;
5304 
5305 	for_each_zone(zone)
5306 		zone->min_slab_pages = (zone->present_pages *
5307 				sysctl_min_slab_ratio) / 100;
5308 	return 0;
5309 }
5310 #endif
5311 
5312 /*
5313  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5314  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5315  *	whenever sysctl_lowmem_reserve_ratio changes.
5316  *
5317  * The reserve ratio obviously has absolutely no relation with the
5318  * minimum watermarks. The lowmem reserve ratio can only make sense
5319  * if in function of the boot time zone sizes.
5320  */
5321 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5322 	void __user *buffer, size_t *length, loff_t *ppos)
5323 {
5324 	proc_dointvec_minmax(table, write, buffer, length, ppos);
5325 	setup_per_zone_lowmem_reserve();
5326 	return 0;
5327 }
5328 
5329 /*
5330  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5331  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
5332  * can have before it gets flushed back to buddy allocator.
5333  */
5334 
5335 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5336 	void __user *buffer, size_t *length, loff_t *ppos)
5337 {
5338 	struct zone *zone;
5339 	unsigned int cpu;
5340 	int ret;
5341 
5342 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5343 	if (!write || (ret == -EINVAL))
5344 		return ret;
5345 	for_each_populated_zone(zone) {
5346 		for_each_possible_cpu(cpu) {
5347 			unsigned long  high;
5348 			high = zone->present_pages / percpu_pagelist_fraction;
5349 			setup_pagelist_highmark(
5350 				per_cpu_ptr(zone->pageset, cpu), high);
5351 		}
5352 	}
5353 	return 0;
5354 }
5355 
5356 int hashdist = HASHDIST_DEFAULT;
5357 
5358 #ifdef CONFIG_NUMA
5359 static int __init set_hashdist(char *str)
5360 {
5361 	if (!str)
5362 		return 0;
5363 	hashdist = simple_strtoul(str, &str, 0);
5364 	return 1;
5365 }
5366 __setup("hashdist=", set_hashdist);
5367 #endif
5368 
5369 /*
5370  * allocate a large system hash table from bootmem
5371  * - it is assumed that the hash table must contain an exact power-of-2
5372  *   quantity of entries
5373  * - limit is the number of hash buckets, not the total allocation size
5374  */
5375 void *__init alloc_large_system_hash(const char *tablename,
5376 				     unsigned long bucketsize,
5377 				     unsigned long numentries,
5378 				     int scale,
5379 				     int flags,
5380 				     unsigned int *_hash_shift,
5381 				     unsigned int *_hash_mask,
5382 				     unsigned long limit)
5383 {
5384 	unsigned long long max = limit;
5385 	unsigned long log2qty, size;
5386 	void *table = NULL;
5387 
5388 	/* allow the kernel cmdline to have a say */
5389 	if (!numentries) {
5390 		/* round applicable memory size up to nearest megabyte */
5391 		numentries = nr_kernel_pages;
5392 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5393 		numentries >>= 20 - PAGE_SHIFT;
5394 		numentries <<= 20 - PAGE_SHIFT;
5395 
5396 		/* limit to 1 bucket per 2^scale bytes of low memory */
5397 		if (scale > PAGE_SHIFT)
5398 			numentries >>= (scale - PAGE_SHIFT);
5399 		else
5400 			numentries <<= (PAGE_SHIFT - scale);
5401 
5402 		/* Make sure we've got at least a 0-order allocation.. */
5403 		if (unlikely(flags & HASH_SMALL)) {
5404 			/* Makes no sense without HASH_EARLY */
5405 			WARN_ON(!(flags & HASH_EARLY));
5406 			if (!(numentries >> *_hash_shift)) {
5407 				numentries = 1UL << *_hash_shift;
5408 				BUG_ON(!numentries);
5409 			}
5410 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5411 			numentries = PAGE_SIZE / bucketsize;
5412 	}
5413 	numentries = roundup_pow_of_two(numentries);
5414 
5415 	/* limit allocation size to 1/16 total memory by default */
5416 	if (max == 0) {
5417 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5418 		do_div(max, bucketsize);
5419 	}
5420 
5421 	if (numentries > max)
5422 		numentries = max;
5423 
5424 	log2qty = ilog2(numentries);
5425 
5426 	do {
5427 		size = bucketsize << log2qty;
5428 		if (flags & HASH_EARLY)
5429 			table = alloc_bootmem_nopanic(size);
5430 		else if (hashdist)
5431 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5432 		else {
5433 			/*
5434 			 * If bucketsize is not a power-of-two, we may free
5435 			 * some pages at the end of hash table which
5436 			 * alloc_pages_exact() automatically does
5437 			 */
5438 			if (get_order(size) < MAX_ORDER) {
5439 				table = alloc_pages_exact(size, GFP_ATOMIC);
5440 				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5441 			}
5442 		}
5443 	} while (!table && size > PAGE_SIZE && --log2qty);
5444 
5445 	if (!table)
5446 		panic("Failed to allocate %s hash table\n", tablename);
5447 
5448 	printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5449 	       tablename,
5450 	       (1UL << log2qty),
5451 	       ilog2(size) - PAGE_SHIFT,
5452 	       size);
5453 
5454 	if (_hash_shift)
5455 		*_hash_shift = log2qty;
5456 	if (_hash_mask)
5457 		*_hash_mask = (1 << log2qty) - 1;
5458 
5459 	return table;
5460 }
5461 
5462 /* Return a pointer to the bitmap storing bits affecting a block of pages */
5463 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5464 							unsigned long pfn)
5465 {
5466 #ifdef CONFIG_SPARSEMEM
5467 	return __pfn_to_section(pfn)->pageblock_flags;
5468 #else
5469 	return zone->pageblock_flags;
5470 #endif /* CONFIG_SPARSEMEM */
5471 }
5472 
5473 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5474 {
5475 #ifdef CONFIG_SPARSEMEM
5476 	pfn &= (PAGES_PER_SECTION-1);
5477 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5478 #else
5479 	pfn = pfn - zone->zone_start_pfn;
5480 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5481 #endif /* CONFIG_SPARSEMEM */
5482 }
5483 
5484 /**
5485  * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
5486  * @page: The page within the block of interest
5487  * @start_bitidx: The first bit of interest to retrieve
5488  * @end_bitidx: The last bit of interest
5489  * returns pageblock_bits flags
5490  */
5491 unsigned long get_pageblock_flags_group(struct page *page,
5492 					int start_bitidx, int end_bitidx)
5493 {
5494 	struct zone *zone;
5495 	unsigned long *bitmap;
5496 	unsigned long pfn, bitidx;
5497 	unsigned long flags = 0;
5498 	unsigned long value = 1;
5499 
5500 	zone = page_zone(page);
5501 	pfn = page_to_pfn(page);
5502 	bitmap = get_pageblock_bitmap(zone, pfn);
5503 	bitidx = pfn_to_bitidx(zone, pfn);
5504 
5505 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5506 		if (test_bit(bitidx + start_bitidx, bitmap))
5507 			flags |= value;
5508 
5509 	return flags;
5510 }
5511 
5512 /**
5513  * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5514  * @page: The page within the block of interest
5515  * @start_bitidx: The first bit of interest
5516  * @end_bitidx: The last bit of interest
5517  * @flags: The flags to set
5518  */
5519 void set_pageblock_flags_group(struct page *page, unsigned long flags,
5520 					int start_bitidx, int end_bitidx)
5521 {
5522 	struct zone *zone;
5523 	unsigned long *bitmap;
5524 	unsigned long pfn, bitidx;
5525 	unsigned long value = 1;
5526 
5527 	zone = page_zone(page);
5528 	pfn = page_to_pfn(page);
5529 	bitmap = get_pageblock_bitmap(zone, pfn);
5530 	bitidx = pfn_to_bitidx(zone, pfn);
5531 	VM_BUG_ON(pfn < zone->zone_start_pfn);
5532 	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5533 
5534 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5535 		if (flags & value)
5536 			__set_bit(bitidx + start_bitidx, bitmap);
5537 		else
5538 			__clear_bit(bitidx + start_bitidx, bitmap);
5539 }
5540 
5541 /*
5542  * This is designed as sub function...plz see page_isolation.c also.
5543  * set/clear page block's type to be ISOLATE.
5544  * page allocater never alloc memory from ISOLATE block.
5545  */
5546 
5547 static int
5548 __count_immobile_pages(struct zone *zone, struct page *page, int count)
5549 {
5550 	unsigned long pfn, iter, found;
5551 	/*
5552 	 * For avoiding noise data, lru_add_drain_all() should be called
5553 	 * If ZONE_MOVABLE, the zone never contains immobile pages
5554 	 */
5555 	if (zone_idx(zone) == ZONE_MOVABLE)
5556 		return true;
5557 
5558 	if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
5559 		return true;
5560 
5561 	pfn = page_to_pfn(page);
5562 	for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5563 		unsigned long check = pfn + iter;
5564 
5565 		if (!pfn_valid_within(check))
5566 			continue;
5567 
5568 		page = pfn_to_page(check);
5569 		if (!page_count(page)) {
5570 			if (PageBuddy(page))
5571 				iter += (1 << page_order(page)) - 1;
5572 			continue;
5573 		}
5574 		if (!PageLRU(page))
5575 			found++;
5576 		/*
5577 		 * If there are RECLAIMABLE pages, we need to check it.
5578 		 * But now, memory offline itself doesn't call shrink_slab()
5579 		 * and it still to be fixed.
5580 		 */
5581 		/*
5582 		 * If the page is not RAM, page_count()should be 0.
5583 		 * we don't need more check. This is an _used_ not-movable page.
5584 		 *
5585 		 * The problematic thing here is PG_reserved pages. PG_reserved
5586 		 * is set to both of a memory hole page and a _used_ kernel
5587 		 * page at boot.
5588 		 */
5589 		if (found > count)
5590 			return false;
5591 	}
5592 	return true;
5593 }
5594 
5595 bool is_pageblock_removable_nolock(struct page *page)
5596 {
5597 	struct zone *zone = page_zone(page);
5598 	return __count_immobile_pages(zone, page, 0);
5599 }
5600 
5601 int set_migratetype_isolate(struct page *page)
5602 {
5603 	struct zone *zone;
5604 	unsigned long flags, pfn;
5605 	struct memory_isolate_notify arg;
5606 	int notifier_ret;
5607 	int ret = -EBUSY;
5608 
5609 	zone = page_zone(page);
5610 
5611 	spin_lock_irqsave(&zone->lock, flags);
5612 
5613 	pfn = page_to_pfn(page);
5614 	arg.start_pfn = pfn;
5615 	arg.nr_pages = pageblock_nr_pages;
5616 	arg.pages_found = 0;
5617 
5618 	/*
5619 	 * It may be possible to isolate a pageblock even if the
5620 	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5621 	 * notifier chain is used by balloon drivers to return the
5622 	 * number of pages in a range that are held by the balloon
5623 	 * driver to shrink memory. If all the pages are accounted for
5624 	 * by balloons, are free, or on the LRU, isolation can continue.
5625 	 * Later, for example, when memory hotplug notifier runs, these
5626 	 * pages reported as "can be isolated" should be isolated(freed)
5627 	 * by the balloon driver through the memory notifier chain.
5628 	 */
5629 	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5630 	notifier_ret = notifier_to_errno(notifier_ret);
5631 	if (notifier_ret)
5632 		goto out;
5633 	/*
5634 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
5635 	 * We just check MOVABLE pages.
5636 	 */
5637 	if (__count_immobile_pages(zone, page, arg.pages_found))
5638 		ret = 0;
5639 
5640 	/*
5641 	 * immobile means "not-on-lru" paes. If immobile is larger than
5642 	 * removable-by-driver pages reported by notifier, we'll fail.
5643 	 */
5644 
5645 out:
5646 	if (!ret) {
5647 		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5648 		move_freepages_block(zone, page, MIGRATE_ISOLATE);
5649 	}
5650 
5651 	spin_unlock_irqrestore(&zone->lock, flags);
5652 	if (!ret)
5653 		drain_all_pages();
5654 	return ret;
5655 }
5656 
5657 void unset_migratetype_isolate(struct page *page)
5658 {
5659 	struct zone *zone;
5660 	unsigned long flags;
5661 	zone = page_zone(page);
5662 	spin_lock_irqsave(&zone->lock, flags);
5663 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5664 		goto out;
5665 	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5666 	move_freepages_block(zone, page, MIGRATE_MOVABLE);
5667 out:
5668 	spin_unlock_irqrestore(&zone->lock, flags);
5669 }
5670 
5671 #ifdef CONFIG_MEMORY_HOTREMOVE
5672 /*
5673  * All pages in the range must be isolated before calling this.
5674  */
5675 void
5676 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5677 {
5678 	struct page *page;
5679 	struct zone *zone;
5680 	int order, i;
5681 	unsigned long pfn;
5682 	unsigned long flags;
5683 	/* find the first valid pfn */
5684 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
5685 		if (pfn_valid(pfn))
5686 			break;
5687 	if (pfn == end_pfn)
5688 		return;
5689 	zone = page_zone(pfn_to_page(pfn));
5690 	spin_lock_irqsave(&zone->lock, flags);
5691 	pfn = start_pfn;
5692 	while (pfn < end_pfn) {
5693 		if (!pfn_valid(pfn)) {
5694 			pfn++;
5695 			continue;
5696 		}
5697 		page = pfn_to_page(pfn);
5698 		BUG_ON(page_count(page));
5699 		BUG_ON(!PageBuddy(page));
5700 		order = page_order(page);
5701 #ifdef CONFIG_DEBUG_VM
5702 		printk(KERN_INFO "remove from free list %lx %d %lx\n",
5703 		       pfn, 1 << order, end_pfn);
5704 #endif
5705 		list_del(&page->lru);
5706 		rmv_page_order(page);
5707 		zone->free_area[order].nr_free--;
5708 		__mod_zone_page_state(zone, NR_FREE_PAGES,
5709 				      - (1UL << order));
5710 		for (i = 0; i < (1 << order); i++)
5711 			SetPageReserved((page+i));
5712 		pfn += (1 << order);
5713 	}
5714 	spin_unlock_irqrestore(&zone->lock, flags);
5715 }
5716 #endif
5717 
5718 #ifdef CONFIG_MEMORY_FAILURE
5719 bool is_free_buddy_page(struct page *page)
5720 {
5721 	struct zone *zone = page_zone(page);
5722 	unsigned long pfn = page_to_pfn(page);
5723 	unsigned long flags;
5724 	int order;
5725 
5726 	spin_lock_irqsave(&zone->lock, flags);
5727 	for (order = 0; order < MAX_ORDER; order++) {
5728 		struct page *page_head = page - (pfn & ((1 << order) - 1));
5729 
5730 		if (PageBuddy(page_head) && page_order(page_head) >= order)
5731 			break;
5732 	}
5733 	spin_unlock_irqrestore(&zone->lock, flags);
5734 
5735 	return order < MAX_ORDER;
5736 }
5737 #endif
5738 
5739 static struct trace_print_flags pageflag_names[] = {
5740 	{1UL << PG_locked,		"locked"	},
5741 	{1UL << PG_error,		"error"		},
5742 	{1UL << PG_referenced,		"referenced"	},
5743 	{1UL << PG_uptodate,		"uptodate"	},
5744 	{1UL << PG_dirty,		"dirty"		},
5745 	{1UL << PG_lru,			"lru"		},
5746 	{1UL << PG_active,		"active"	},
5747 	{1UL << PG_slab,		"slab"		},
5748 	{1UL << PG_owner_priv_1,	"owner_priv_1"	},
5749 	{1UL << PG_arch_1,		"arch_1"	},
5750 	{1UL << PG_reserved,		"reserved"	},
5751 	{1UL << PG_private,		"private"	},
5752 	{1UL << PG_private_2,		"private_2"	},
5753 	{1UL << PG_writeback,		"writeback"	},
5754 #ifdef CONFIG_PAGEFLAGS_EXTENDED
5755 	{1UL << PG_head,		"head"		},
5756 	{1UL << PG_tail,		"tail"		},
5757 #else
5758 	{1UL << PG_compound,		"compound"	},
5759 #endif
5760 	{1UL << PG_swapcache,		"swapcache"	},
5761 	{1UL << PG_mappedtodisk,	"mappedtodisk"	},
5762 	{1UL << PG_reclaim,		"reclaim"	},
5763 	{1UL << PG_swapbacked,		"swapbacked"	},
5764 	{1UL << PG_unevictable,		"unevictable"	},
5765 #ifdef CONFIG_MMU
5766 	{1UL << PG_mlocked,		"mlocked"	},
5767 #endif
5768 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
5769 	{1UL << PG_uncached,		"uncached"	},
5770 #endif
5771 #ifdef CONFIG_MEMORY_FAILURE
5772 	{1UL << PG_hwpoison,		"hwpoison"	},
5773 #endif
5774 	{-1UL,				NULL		},
5775 };
5776 
5777 static void dump_page_flags(unsigned long flags)
5778 {
5779 	const char *delim = "";
5780 	unsigned long mask;
5781 	int i;
5782 
5783 	printk(KERN_ALERT "page flags: %#lx(", flags);
5784 
5785 	/* remove zone id */
5786 	flags &= (1UL << NR_PAGEFLAGS) - 1;
5787 
5788 	for (i = 0; pageflag_names[i].name && flags; i++) {
5789 
5790 		mask = pageflag_names[i].mask;
5791 		if ((flags & mask) != mask)
5792 			continue;
5793 
5794 		flags &= ~mask;
5795 		printk("%s%s", delim, pageflag_names[i].name);
5796 		delim = "|";
5797 	}
5798 
5799 	/* check for left over flags */
5800 	if (flags)
5801 		printk("%s%#lx", delim, flags);
5802 
5803 	printk(")\n");
5804 }
5805 
5806 void dump_page(struct page *page)
5807 {
5808 	printk(KERN_ALERT
5809 	       "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5810 		page, atomic_read(&page->_count), page_mapcount(page),
5811 		page->mapping, page->index);
5812 	dump_page_flags(page->flags);
5813 	mem_cgroup_print_bad_page(page);
5814 }
5815