xref: /openbmc/linux/mm/page_alloc.c (revision 4dc7ccf7)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/compiler.h>
25 #include <linux/kernel.h>
26 #include <linux/kmemcheck.h>
27 #include <linux/module.h>
28 #include <linux/suspend.h>
29 #include <linux/pagevec.h>
30 #include <linux/blkdev.h>
31 #include <linux/slab.h>
32 #include <linux/oom.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/sysctl.h>
36 #include <linux/cpu.h>
37 #include <linux/cpuset.h>
38 #include <linux/memory_hotplug.h>
39 #include <linux/nodemask.h>
40 #include <linux/vmalloc.h>
41 #include <linux/mempolicy.h>
42 #include <linux/stop_machine.h>
43 #include <linux/sort.h>
44 #include <linux/pfn.h>
45 #include <linux/backing-dev.h>
46 #include <linux/fault-inject.h>
47 #include <linux/page-isolation.h>
48 #include <linux/page_cgroup.h>
49 #include <linux/debugobjects.h>
50 #include <linux/kmemleak.h>
51 #include <linux/memory.h>
52 #include <trace/events/kmem.h>
53 #include <linux/ftrace_event.h>
54 
55 #include <asm/tlbflush.h>
56 #include <asm/div64.h>
57 #include "internal.h"
58 
59 /*
60  * Array of node states.
61  */
62 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
63 	[N_POSSIBLE] = NODE_MASK_ALL,
64 	[N_ONLINE] = { { [0] = 1UL } },
65 #ifndef CONFIG_NUMA
66 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
67 #ifdef CONFIG_HIGHMEM
68 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
69 #endif
70 	[N_CPU] = { { [0] = 1UL } },
71 #endif	/* NUMA */
72 };
73 EXPORT_SYMBOL(node_states);
74 
75 unsigned long totalram_pages __read_mostly;
76 unsigned long totalreserve_pages __read_mostly;
77 int percpu_pagelist_fraction;
78 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
79 
80 #ifdef CONFIG_PM_SLEEP
81 /*
82  * The following functions are used by the suspend/hibernate code to temporarily
83  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
84  * while devices are suspended.  To avoid races with the suspend/hibernate code,
85  * they should always be called with pm_mutex held (gfp_allowed_mask also should
86  * only be modified with pm_mutex held, unless the suspend/hibernate code is
87  * guaranteed not to run in parallel with that modification).
88  */
89 void set_gfp_allowed_mask(gfp_t mask)
90 {
91 	WARN_ON(!mutex_is_locked(&pm_mutex));
92 	gfp_allowed_mask = mask;
93 }
94 
95 gfp_t clear_gfp_allowed_mask(gfp_t mask)
96 {
97 	gfp_t ret = gfp_allowed_mask;
98 
99 	WARN_ON(!mutex_is_locked(&pm_mutex));
100 	gfp_allowed_mask &= ~mask;
101 	return ret;
102 }
103 #endif /* CONFIG_PM_SLEEP */
104 
105 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
106 int pageblock_order __read_mostly;
107 #endif
108 
109 static void __free_pages_ok(struct page *page, unsigned int order);
110 
111 /*
112  * results with 256, 32 in the lowmem_reserve sysctl:
113  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
114  *	1G machine -> (16M dma, 784M normal, 224M high)
115  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
116  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
117  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
118  *
119  * TBD: should special case ZONE_DMA32 machines here - in those we normally
120  * don't need any ZONE_NORMAL reservation
121  */
122 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
123 #ifdef CONFIG_ZONE_DMA
124 	 256,
125 #endif
126 #ifdef CONFIG_ZONE_DMA32
127 	 256,
128 #endif
129 #ifdef CONFIG_HIGHMEM
130 	 32,
131 #endif
132 	 32,
133 };
134 
135 EXPORT_SYMBOL(totalram_pages);
136 
137 static char * const zone_names[MAX_NR_ZONES] = {
138 #ifdef CONFIG_ZONE_DMA
139 	 "DMA",
140 #endif
141 #ifdef CONFIG_ZONE_DMA32
142 	 "DMA32",
143 #endif
144 	 "Normal",
145 #ifdef CONFIG_HIGHMEM
146 	 "HighMem",
147 #endif
148 	 "Movable",
149 };
150 
151 int min_free_kbytes = 1024;
152 
153 static unsigned long __meminitdata nr_kernel_pages;
154 static unsigned long __meminitdata nr_all_pages;
155 static unsigned long __meminitdata dma_reserve;
156 
157 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
158   /*
159    * MAX_ACTIVE_REGIONS determines the maximum number of distinct
160    * ranges of memory (RAM) that may be registered with add_active_range().
161    * Ranges passed to add_active_range() will be merged if possible
162    * so the number of times add_active_range() can be called is
163    * related to the number of nodes and the number of holes
164    */
165   #ifdef CONFIG_MAX_ACTIVE_REGIONS
166     /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
167     #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
168   #else
169     #if MAX_NUMNODES >= 32
170       /* If there can be many nodes, allow up to 50 holes per node */
171       #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
172     #else
173       /* By default, allow up to 256 distinct regions */
174       #define MAX_ACTIVE_REGIONS 256
175     #endif
176   #endif
177 
178   static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
179   static int __meminitdata nr_nodemap_entries;
180   static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
181   static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
182   static unsigned long __initdata required_kernelcore;
183   static unsigned long __initdata required_movablecore;
184   static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
185 
186   /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
187   int movable_zone;
188   EXPORT_SYMBOL(movable_zone);
189 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
190 
191 #if MAX_NUMNODES > 1
192 int nr_node_ids __read_mostly = MAX_NUMNODES;
193 int nr_online_nodes __read_mostly = 1;
194 EXPORT_SYMBOL(nr_node_ids);
195 EXPORT_SYMBOL(nr_online_nodes);
196 #endif
197 
198 int page_group_by_mobility_disabled __read_mostly;
199 
200 static void set_pageblock_migratetype(struct page *page, int migratetype)
201 {
202 
203 	if (unlikely(page_group_by_mobility_disabled))
204 		migratetype = MIGRATE_UNMOVABLE;
205 
206 	set_pageblock_flags_group(page, (unsigned long)migratetype,
207 					PB_migrate, PB_migrate_end);
208 }
209 
210 bool oom_killer_disabled __read_mostly;
211 
212 #ifdef CONFIG_DEBUG_VM
213 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
214 {
215 	int ret = 0;
216 	unsigned seq;
217 	unsigned long pfn = page_to_pfn(page);
218 
219 	do {
220 		seq = zone_span_seqbegin(zone);
221 		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
222 			ret = 1;
223 		else if (pfn < zone->zone_start_pfn)
224 			ret = 1;
225 	} while (zone_span_seqretry(zone, seq));
226 
227 	return ret;
228 }
229 
230 static int page_is_consistent(struct zone *zone, struct page *page)
231 {
232 	if (!pfn_valid_within(page_to_pfn(page)))
233 		return 0;
234 	if (zone != page_zone(page))
235 		return 0;
236 
237 	return 1;
238 }
239 /*
240  * Temporary debugging check for pages not lying within a given zone.
241  */
242 static int bad_range(struct zone *zone, struct page *page)
243 {
244 	if (page_outside_zone_boundaries(zone, page))
245 		return 1;
246 	if (!page_is_consistent(zone, page))
247 		return 1;
248 
249 	return 0;
250 }
251 #else
252 static inline int bad_range(struct zone *zone, struct page *page)
253 {
254 	return 0;
255 }
256 #endif
257 
258 static void bad_page(struct page *page)
259 {
260 	static unsigned long resume;
261 	static unsigned long nr_shown;
262 	static unsigned long nr_unshown;
263 
264 	/* Don't complain about poisoned pages */
265 	if (PageHWPoison(page)) {
266 		__ClearPageBuddy(page);
267 		return;
268 	}
269 
270 	/*
271 	 * Allow a burst of 60 reports, then keep quiet for that minute;
272 	 * or allow a steady drip of one report per second.
273 	 */
274 	if (nr_shown == 60) {
275 		if (time_before(jiffies, resume)) {
276 			nr_unshown++;
277 			goto out;
278 		}
279 		if (nr_unshown) {
280 			printk(KERN_ALERT
281 			      "BUG: Bad page state: %lu messages suppressed\n",
282 				nr_unshown);
283 			nr_unshown = 0;
284 		}
285 		nr_shown = 0;
286 	}
287 	if (nr_shown++ == 0)
288 		resume = jiffies + 60 * HZ;
289 
290 	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
291 		current->comm, page_to_pfn(page));
292 	dump_page(page);
293 
294 	dump_stack();
295 out:
296 	/* Leave bad fields for debug, except PageBuddy could make trouble */
297 	__ClearPageBuddy(page);
298 	add_taint(TAINT_BAD_PAGE);
299 }
300 
301 /*
302  * Higher-order pages are called "compound pages".  They are structured thusly:
303  *
304  * The first PAGE_SIZE page is called the "head page".
305  *
306  * The remaining PAGE_SIZE pages are called "tail pages".
307  *
308  * All pages have PG_compound set.  All pages have their ->private pointing at
309  * the head page (even the head page has this).
310  *
311  * The first tail page's ->lru.next holds the address of the compound page's
312  * put_page() function.  Its ->lru.prev holds the order of allocation.
313  * This usage means that zero-order pages may not be compound.
314  */
315 
316 static void free_compound_page(struct page *page)
317 {
318 	__free_pages_ok(page, compound_order(page));
319 }
320 
321 void prep_compound_page(struct page *page, unsigned long order)
322 {
323 	int i;
324 	int nr_pages = 1 << order;
325 
326 	set_compound_page_dtor(page, free_compound_page);
327 	set_compound_order(page, order);
328 	__SetPageHead(page);
329 	for (i = 1; i < nr_pages; i++) {
330 		struct page *p = page + i;
331 
332 		__SetPageTail(p);
333 		p->first_page = page;
334 	}
335 }
336 
337 static int destroy_compound_page(struct page *page, unsigned long order)
338 {
339 	int i;
340 	int nr_pages = 1 << order;
341 	int bad = 0;
342 
343 	if (unlikely(compound_order(page) != order) ||
344 	    unlikely(!PageHead(page))) {
345 		bad_page(page);
346 		bad++;
347 	}
348 
349 	__ClearPageHead(page);
350 
351 	for (i = 1; i < nr_pages; i++) {
352 		struct page *p = page + i;
353 
354 		if (unlikely(!PageTail(p) || (p->first_page != page))) {
355 			bad_page(page);
356 			bad++;
357 		}
358 		__ClearPageTail(p);
359 	}
360 
361 	return bad;
362 }
363 
364 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
365 {
366 	int i;
367 
368 	/*
369 	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
370 	 * and __GFP_HIGHMEM from hard or soft interrupt context.
371 	 */
372 	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
373 	for (i = 0; i < (1 << order); i++)
374 		clear_highpage(page + i);
375 }
376 
377 static inline void set_page_order(struct page *page, int order)
378 {
379 	set_page_private(page, order);
380 	__SetPageBuddy(page);
381 }
382 
383 static inline void rmv_page_order(struct page *page)
384 {
385 	__ClearPageBuddy(page);
386 	set_page_private(page, 0);
387 }
388 
389 /*
390  * Locate the struct page for both the matching buddy in our
391  * pair (buddy1) and the combined O(n+1) page they form (page).
392  *
393  * 1) Any buddy B1 will have an order O twin B2 which satisfies
394  * the following equation:
395  *     B2 = B1 ^ (1 << O)
396  * For example, if the starting buddy (buddy2) is #8 its order
397  * 1 buddy is #10:
398  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
399  *
400  * 2) Any buddy B will have an order O+1 parent P which
401  * satisfies the following equation:
402  *     P = B & ~(1 << O)
403  *
404  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
405  */
406 static inline struct page *
407 __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
408 {
409 	unsigned long buddy_idx = page_idx ^ (1 << order);
410 
411 	return page + (buddy_idx - page_idx);
412 }
413 
414 static inline unsigned long
415 __find_combined_index(unsigned long page_idx, unsigned int order)
416 {
417 	return (page_idx & ~(1 << order));
418 }
419 
420 /*
421  * This function checks whether a page is free && is the buddy
422  * we can do coalesce a page and its buddy if
423  * (a) the buddy is not in a hole &&
424  * (b) the buddy is in the buddy system &&
425  * (c) a page and its buddy have the same order &&
426  * (d) a page and its buddy are in the same zone.
427  *
428  * For recording whether a page is in the buddy system, we use PG_buddy.
429  * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
430  *
431  * For recording page's order, we use page_private(page).
432  */
433 static inline int page_is_buddy(struct page *page, struct page *buddy,
434 								int order)
435 {
436 	if (!pfn_valid_within(page_to_pfn(buddy)))
437 		return 0;
438 
439 	if (page_zone_id(page) != page_zone_id(buddy))
440 		return 0;
441 
442 	if (PageBuddy(buddy) && page_order(buddy) == order) {
443 		VM_BUG_ON(page_count(buddy) != 0);
444 		return 1;
445 	}
446 	return 0;
447 }
448 
449 /*
450  * Freeing function for a buddy system allocator.
451  *
452  * The concept of a buddy system is to maintain direct-mapped table
453  * (containing bit values) for memory blocks of various "orders".
454  * The bottom level table contains the map for the smallest allocatable
455  * units of memory (here, pages), and each level above it describes
456  * pairs of units from the levels below, hence, "buddies".
457  * At a high level, all that happens here is marking the table entry
458  * at the bottom level available, and propagating the changes upward
459  * as necessary, plus some accounting needed to play nicely with other
460  * parts of the VM system.
461  * At each level, we keep a list of pages, which are heads of continuous
462  * free pages of length of (1 << order) and marked with PG_buddy. Page's
463  * order is recorded in page_private(page) field.
464  * So when we are allocating or freeing one, we can derive the state of the
465  * other.  That is, if we allocate a small block, and both were
466  * free, the remainder of the region must be split into blocks.
467  * If a block is freed, and its buddy is also free, then this
468  * triggers coalescing into a block of larger size.
469  *
470  * -- wli
471  */
472 
473 static inline void __free_one_page(struct page *page,
474 		struct zone *zone, unsigned int order,
475 		int migratetype)
476 {
477 	unsigned long page_idx;
478 
479 	if (unlikely(PageCompound(page)))
480 		if (unlikely(destroy_compound_page(page, order)))
481 			return;
482 
483 	VM_BUG_ON(migratetype == -1);
484 
485 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
486 
487 	VM_BUG_ON(page_idx & ((1 << order) - 1));
488 	VM_BUG_ON(bad_range(zone, page));
489 
490 	while (order < MAX_ORDER-1) {
491 		unsigned long combined_idx;
492 		struct page *buddy;
493 
494 		buddy = __page_find_buddy(page, page_idx, order);
495 		if (!page_is_buddy(page, buddy, order))
496 			break;
497 
498 		/* Our buddy is free, merge with it and move up one order. */
499 		list_del(&buddy->lru);
500 		zone->free_area[order].nr_free--;
501 		rmv_page_order(buddy);
502 		combined_idx = __find_combined_index(page_idx, order);
503 		page = page + (combined_idx - page_idx);
504 		page_idx = combined_idx;
505 		order++;
506 	}
507 	set_page_order(page, order);
508 	list_add(&page->lru,
509 		&zone->free_area[order].free_list[migratetype]);
510 	zone->free_area[order].nr_free++;
511 }
512 
513 /*
514  * free_page_mlock() -- clean up attempts to free and mlocked() page.
515  * Page should not be on lru, so no need to fix that up.
516  * free_pages_check() will verify...
517  */
518 static inline void free_page_mlock(struct page *page)
519 {
520 	__dec_zone_page_state(page, NR_MLOCK);
521 	__count_vm_event(UNEVICTABLE_MLOCKFREED);
522 }
523 
524 static inline int free_pages_check(struct page *page)
525 {
526 	if (unlikely(page_mapcount(page) |
527 		(page->mapping != NULL)  |
528 		(atomic_read(&page->_count) != 0) |
529 		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
530 		bad_page(page);
531 		return 1;
532 	}
533 	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
534 		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
535 	return 0;
536 }
537 
538 /*
539  * Frees a number of pages from the PCP lists
540  * Assumes all pages on list are in same zone, and of same order.
541  * count is the number of pages to free.
542  *
543  * If the zone was previously in an "all pages pinned" state then look to
544  * see if this freeing clears that state.
545  *
546  * And clear the zone's pages_scanned counter, to hold off the "all pages are
547  * pinned" detection logic.
548  */
549 static void free_pcppages_bulk(struct zone *zone, int count,
550 					struct per_cpu_pages *pcp)
551 {
552 	int migratetype = 0;
553 	int batch_free = 0;
554 
555 	spin_lock(&zone->lock);
556 	zone->all_unreclaimable = 0;
557 	zone->pages_scanned = 0;
558 
559 	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
560 	while (count) {
561 		struct page *page;
562 		struct list_head *list;
563 
564 		/*
565 		 * Remove pages from lists in a round-robin fashion. A
566 		 * batch_free count is maintained that is incremented when an
567 		 * empty list is encountered.  This is so more pages are freed
568 		 * off fuller lists instead of spinning excessively around empty
569 		 * lists
570 		 */
571 		do {
572 			batch_free++;
573 			if (++migratetype == MIGRATE_PCPTYPES)
574 				migratetype = 0;
575 			list = &pcp->lists[migratetype];
576 		} while (list_empty(list));
577 
578 		do {
579 			page = list_entry(list->prev, struct page, lru);
580 			/* must delete as __free_one_page list manipulates */
581 			list_del(&page->lru);
582 			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
583 			__free_one_page(page, zone, 0, page_private(page));
584 			trace_mm_page_pcpu_drain(page, 0, page_private(page));
585 		} while (--count && --batch_free && !list_empty(list));
586 	}
587 	spin_unlock(&zone->lock);
588 }
589 
590 static void free_one_page(struct zone *zone, struct page *page, int order,
591 				int migratetype)
592 {
593 	spin_lock(&zone->lock);
594 	zone->all_unreclaimable = 0;
595 	zone->pages_scanned = 0;
596 
597 	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
598 	__free_one_page(page, zone, order, migratetype);
599 	spin_unlock(&zone->lock);
600 }
601 
602 static void __free_pages_ok(struct page *page, unsigned int order)
603 {
604 	unsigned long flags;
605 	int i;
606 	int bad = 0;
607 	int wasMlocked = __TestClearPageMlocked(page);
608 
609 	trace_mm_page_free_direct(page, order);
610 	kmemcheck_free_shadow(page, order);
611 
612 	for (i = 0 ; i < (1 << order) ; ++i)
613 		bad += free_pages_check(page + i);
614 	if (bad)
615 		return;
616 
617 	if (!PageHighMem(page)) {
618 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
619 		debug_check_no_obj_freed(page_address(page),
620 					   PAGE_SIZE << order);
621 	}
622 	arch_free_page(page, order);
623 	kernel_map_pages(page, 1 << order, 0);
624 
625 	local_irq_save(flags);
626 	if (unlikely(wasMlocked))
627 		free_page_mlock(page);
628 	__count_vm_events(PGFREE, 1 << order);
629 	free_one_page(page_zone(page), page, order,
630 					get_pageblock_migratetype(page));
631 	local_irq_restore(flags);
632 }
633 
634 /*
635  * permit the bootmem allocator to evade page validation on high-order frees
636  */
637 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
638 {
639 	if (order == 0) {
640 		__ClearPageReserved(page);
641 		set_page_count(page, 0);
642 		set_page_refcounted(page);
643 		__free_page(page);
644 	} else {
645 		int loop;
646 
647 		prefetchw(page);
648 		for (loop = 0; loop < BITS_PER_LONG; loop++) {
649 			struct page *p = &page[loop];
650 
651 			if (loop + 1 < BITS_PER_LONG)
652 				prefetchw(p + 1);
653 			__ClearPageReserved(p);
654 			set_page_count(p, 0);
655 		}
656 
657 		set_page_refcounted(page);
658 		__free_pages(page, order);
659 	}
660 }
661 
662 
663 /*
664  * The order of subdivision here is critical for the IO subsystem.
665  * Please do not alter this order without good reasons and regression
666  * testing. Specifically, as large blocks of memory are subdivided,
667  * the order in which smaller blocks are delivered depends on the order
668  * they're subdivided in this function. This is the primary factor
669  * influencing the order in which pages are delivered to the IO
670  * subsystem according to empirical testing, and this is also justified
671  * by considering the behavior of a buddy system containing a single
672  * large block of memory acted on by a series of small allocations.
673  * This behavior is a critical factor in sglist merging's success.
674  *
675  * -- wli
676  */
677 static inline void expand(struct zone *zone, struct page *page,
678 	int low, int high, struct free_area *area,
679 	int migratetype)
680 {
681 	unsigned long size = 1 << high;
682 
683 	while (high > low) {
684 		area--;
685 		high--;
686 		size >>= 1;
687 		VM_BUG_ON(bad_range(zone, &page[size]));
688 		list_add(&page[size].lru, &area->free_list[migratetype]);
689 		area->nr_free++;
690 		set_page_order(&page[size], high);
691 	}
692 }
693 
694 /*
695  * This page is about to be returned from the page allocator
696  */
697 static inline int check_new_page(struct page *page)
698 {
699 	if (unlikely(page_mapcount(page) |
700 		(page->mapping != NULL)  |
701 		(atomic_read(&page->_count) != 0)  |
702 		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
703 		bad_page(page);
704 		return 1;
705 	}
706 	return 0;
707 }
708 
709 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
710 {
711 	int i;
712 
713 	for (i = 0; i < (1 << order); i++) {
714 		struct page *p = page + i;
715 		if (unlikely(check_new_page(p)))
716 			return 1;
717 	}
718 
719 	set_page_private(page, 0);
720 	set_page_refcounted(page);
721 
722 	arch_alloc_page(page, order);
723 	kernel_map_pages(page, 1 << order, 1);
724 
725 	if (gfp_flags & __GFP_ZERO)
726 		prep_zero_page(page, order, gfp_flags);
727 
728 	if (order && (gfp_flags & __GFP_COMP))
729 		prep_compound_page(page, order);
730 
731 	return 0;
732 }
733 
734 /*
735  * Go through the free lists for the given migratetype and remove
736  * the smallest available page from the freelists
737  */
738 static inline
739 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
740 						int migratetype)
741 {
742 	unsigned int current_order;
743 	struct free_area * area;
744 	struct page *page;
745 
746 	/* Find a page of the appropriate size in the preferred list */
747 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
748 		area = &(zone->free_area[current_order]);
749 		if (list_empty(&area->free_list[migratetype]))
750 			continue;
751 
752 		page = list_entry(area->free_list[migratetype].next,
753 							struct page, lru);
754 		list_del(&page->lru);
755 		rmv_page_order(page);
756 		area->nr_free--;
757 		expand(zone, page, order, current_order, area, migratetype);
758 		return page;
759 	}
760 
761 	return NULL;
762 }
763 
764 
765 /*
766  * This array describes the order lists are fallen back to when
767  * the free lists for the desirable migrate type are depleted
768  */
769 static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
770 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
771 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
772 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
773 	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
774 };
775 
776 /*
777  * Move the free pages in a range to the free lists of the requested type.
778  * Note that start_page and end_pages are not aligned on a pageblock
779  * boundary. If alignment is required, use move_freepages_block()
780  */
781 static int move_freepages(struct zone *zone,
782 			  struct page *start_page, struct page *end_page,
783 			  int migratetype)
784 {
785 	struct page *page;
786 	unsigned long order;
787 	int pages_moved = 0;
788 
789 #ifndef CONFIG_HOLES_IN_ZONE
790 	/*
791 	 * page_zone is not safe to call in this context when
792 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
793 	 * anyway as we check zone boundaries in move_freepages_block().
794 	 * Remove at a later date when no bug reports exist related to
795 	 * grouping pages by mobility
796 	 */
797 	BUG_ON(page_zone(start_page) != page_zone(end_page));
798 #endif
799 
800 	for (page = start_page; page <= end_page;) {
801 		/* Make sure we are not inadvertently changing nodes */
802 		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
803 
804 		if (!pfn_valid_within(page_to_pfn(page))) {
805 			page++;
806 			continue;
807 		}
808 
809 		if (!PageBuddy(page)) {
810 			page++;
811 			continue;
812 		}
813 
814 		order = page_order(page);
815 		list_del(&page->lru);
816 		list_add(&page->lru,
817 			&zone->free_area[order].free_list[migratetype]);
818 		page += 1 << order;
819 		pages_moved += 1 << order;
820 	}
821 
822 	return pages_moved;
823 }
824 
825 static int move_freepages_block(struct zone *zone, struct page *page,
826 				int migratetype)
827 {
828 	unsigned long start_pfn, end_pfn;
829 	struct page *start_page, *end_page;
830 
831 	start_pfn = page_to_pfn(page);
832 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
833 	start_page = pfn_to_page(start_pfn);
834 	end_page = start_page + pageblock_nr_pages - 1;
835 	end_pfn = start_pfn + pageblock_nr_pages - 1;
836 
837 	/* Do not cross zone boundaries */
838 	if (start_pfn < zone->zone_start_pfn)
839 		start_page = page;
840 	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
841 		return 0;
842 
843 	return move_freepages(zone, start_page, end_page, migratetype);
844 }
845 
846 static void change_pageblock_range(struct page *pageblock_page,
847 					int start_order, int migratetype)
848 {
849 	int nr_pageblocks = 1 << (start_order - pageblock_order);
850 
851 	while (nr_pageblocks--) {
852 		set_pageblock_migratetype(pageblock_page, migratetype);
853 		pageblock_page += pageblock_nr_pages;
854 	}
855 }
856 
857 /* Remove an element from the buddy allocator from the fallback list */
858 static inline struct page *
859 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
860 {
861 	struct free_area * area;
862 	int current_order;
863 	struct page *page;
864 	int migratetype, i;
865 
866 	/* Find the largest possible block of pages in the other list */
867 	for (current_order = MAX_ORDER-1; current_order >= order;
868 						--current_order) {
869 		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
870 			migratetype = fallbacks[start_migratetype][i];
871 
872 			/* MIGRATE_RESERVE handled later if necessary */
873 			if (migratetype == MIGRATE_RESERVE)
874 				continue;
875 
876 			area = &(zone->free_area[current_order]);
877 			if (list_empty(&area->free_list[migratetype]))
878 				continue;
879 
880 			page = list_entry(area->free_list[migratetype].next,
881 					struct page, lru);
882 			area->nr_free--;
883 
884 			/*
885 			 * If breaking a large block of pages, move all free
886 			 * pages to the preferred allocation list. If falling
887 			 * back for a reclaimable kernel allocation, be more
888 			 * agressive about taking ownership of free pages
889 			 */
890 			if (unlikely(current_order >= (pageblock_order >> 1)) ||
891 					start_migratetype == MIGRATE_RECLAIMABLE ||
892 					page_group_by_mobility_disabled) {
893 				unsigned long pages;
894 				pages = move_freepages_block(zone, page,
895 								start_migratetype);
896 
897 				/* Claim the whole block if over half of it is free */
898 				if (pages >= (1 << (pageblock_order-1)) ||
899 						page_group_by_mobility_disabled)
900 					set_pageblock_migratetype(page,
901 								start_migratetype);
902 
903 				migratetype = start_migratetype;
904 			}
905 
906 			/* Remove the page from the freelists */
907 			list_del(&page->lru);
908 			rmv_page_order(page);
909 
910 			/* Take ownership for orders >= pageblock_order */
911 			if (current_order >= pageblock_order)
912 				change_pageblock_range(page, current_order,
913 							start_migratetype);
914 
915 			expand(zone, page, order, current_order, area, migratetype);
916 
917 			trace_mm_page_alloc_extfrag(page, order, current_order,
918 				start_migratetype, migratetype);
919 
920 			return page;
921 		}
922 	}
923 
924 	return NULL;
925 }
926 
927 /*
928  * Do the hard work of removing an element from the buddy allocator.
929  * Call me with the zone->lock already held.
930  */
931 static struct page *__rmqueue(struct zone *zone, unsigned int order,
932 						int migratetype)
933 {
934 	struct page *page;
935 
936 retry_reserve:
937 	page = __rmqueue_smallest(zone, order, migratetype);
938 
939 	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
940 		page = __rmqueue_fallback(zone, order, migratetype);
941 
942 		/*
943 		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
944 		 * is used because __rmqueue_smallest is an inline function
945 		 * and we want just one call site
946 		 */
947 		if (!page) {
948 			migratetype = MIGRATE_RESERVE;
949 			goto retry_reserve;
950 		}
951 	}
952 
953 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
954 	return page;
955 }
956 
957 /*
958  * Obtain a specified number of elements from the buddy allocator, all under
959  * a single hold of the lock, for efficiency.  Add them to the supplied list.
960  * Returns the number of new pages which were placed at *list.
961  */
962 static int rmqueue_bulk(struct zone *zone, unsigned int order,
963 			unsigned long count, struct list_head *list,
964 			int migratetype, int cold)
965 {
966 	int i;
967 
968 	spin_lock(&zone->lock);
969 	for (i = 0; i < count; ++i) {
970 		struct page *page = __rmqueue(zone, order, migratetype);
971 		if (unlikely(page == NULL))
972 			break;
973 
974 		/*
975 		 * Split buddy pages returned by expand() are received here
976 		 * in physical page order. The page is added to the callers and
977 		 * list and the list head then moves forward. From the callers
978 		 * perspective, the linked list is ordered by page number in
979 		 * some conditions. This is useful for IO devices that can
980 		 * merge IO requests if the physical pages are ordered
981 		 * properly.
982 		 */
983 		if (likely(cold == 0))
984 			list_add(&page->lru, list);
985 		else
986 			list_add_tail(&page->lru, list);
987 		set_page_private(page, migratetype);
988 		list = &page->lru;
989 	}
990 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
991 	spin_unlock(&zone->lock);
992 	return i;
993 }
994 
995 #ifdef CONFIG_NUMA
996 /*
997  * Called from the vmstat counter updater to drain pagesets of this
998  * currently executing processor on remote nodes after they have
999  * expired.
1000  *
1001  * Note that this function must be called with the thread pinned to
1002  * a single processor.
1003  */
1004 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1005 {
1006 	unsigned long flags;
1007 	int to_drain;
1008 
1009 	local_irq_save(flags);
1010 	if (pcp->count >= pcp->batch)
1011 		to_drain = pcp->batch;
1012 	else
1013 		to_drain = pcp->count;
1014 	free_pcppages_bulk(zone, to_drain, pcp);
1015 	pcp->count -= to_drain;
1016 	local_irq_restore(flags);
1017 }
1018 #endif
1019 
1020 /*
1021  * Drain pages of the indicated processor.
1022  *
1023  * The processor must either be the current processor and the
1024  * thread pinned to the current processor or a processor that
1025  * is not online.
1026  */
1027 static void drain_pages(unsigned int cpu)
1028 {
1029 	unsigned long flags;
1030 	struct zone *zone;
1031 
1032 	for_each_populated_zone(zone) {
1033 		struct per_cpu_pageset *pset;
1034 		struct per_cpu_pages *pcp;
1035 
1036 		local_irq_save(flags);
1037 		pset = per_cpu_ptr(zone->pageset, cpu);
1038 
1039 		pcp = &pset->pcp;
1040 		free_pcppages_bulk(zone, pcp->count, pcp);
1041 		pcp->count = 0;
1042 		local_irq_restore(flags);
1043 	}
1044 }
1045 
1046 /*
1047  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1048  */
1049 void drain_local_pages(void *arg)
1050 {
1051 	drain_pages(smp_processor_id());
1052 }
1053 
1054 /*
1055  * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1056  */
1057 void drain_all_pages(void)
1058 {
1059 	on_each_cpu(drain_local_pages, NULL, 1);
1060 }
1061 
1062 #ifdef CONFIG_HIBERNATION
1063 
1064 void mark_free_pages(struct zone *zone)
1065 {
1066 	unsigned long pfn, max_zone_pfn;
1067 	unsigned long flags;
1068 	int order, t;
1069 	struct list_head *curr;
1070 
1071 	if (!zone->spanned_pages)
1072 		return;
1073 
1074 	spin_lock_irqsave(&zone->lock, flags);
1075 
1076 	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1077 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1078 		if (pfn_valid(pfn)) {
1079 			struct page *page = pfn_to_page(pfn);
1080 
1081 			if (!swsusp_page_is_forbidden(page))
1082 				swsusp_unset_page_free(page);
1083 		}
1084 
1085 	for_each_migratetype_order(order, t) {
1086 		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1087 			unsigned long i;
1088 
1089 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1090 			for (i = 0; i < (1UL << order); i++)
1091 				swsusp_set_page_free(pfn_to_page(pfn + i));
1092 		}
1093 	}
1094 	spin_unlock_irqrestore(&zone->lock, flags);
1095 }
1096 #endif /* CONFIG_PM */
1097 
1098 /*
1099  * Free a 0-order page
1100  * cold == 1 ? free a cold page : free a hot page
1101  */
1102 void free_hot_cold_page(struct page *page, int cold)
1103 {
1104 	struct zone *zone = page_zone(page);
1105 	struct per_cpu_pages *pcp;
1106 	unsigned long flags;
1107 	int migratetype;
1108 	int wasMlocked = __TestClearPageMlocked(page);
1109 
1110 	trace_mm_page_free_direct(page, 0);
1111 	kmemcheck_free_shadow(page, 0);
1112 
1113 	if (PageAnon(page))
1114 		page->mapping = NULL;
1115 	if (free_pages_check(page))
1116 		return;
1117 
1118 	if (!PageHighMem(page)) {
1119 		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1120 		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1121 	}
1122 	arch_free_page(page, 0);
1123 	kernel_map_pages(page, 1, 0);
1124 
1125 	migratetype = get_pageblock_migratetype(page);
1126 	set_page_private(page, migratetype);
1127 	local_irq_save(flags);
1128 	if (unlikely(wasMlocked))
1129 		free_page_mlock(page);
1130 	__count_vm_event(PGFREE);
1131 
1132 	/*
1133 	 * We only track unmovable, reclaimable and movable on pcp lists.
1134 	 * Free ISOLATE pages back to the allocator because they are being
1135 	 * offlined but treat RESERVE as movable pages so we can get those
1136 	 * areas back if necessary. Otherwise, we may have to free
1137 	 * excessively into the page allocator
1138 	 */
1139 	if (migratetype >= MIGRATE_PCPTYPES) {
1140 		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1141 			free_one_page(zone, page, 0, migratetype);
1142 			goto out;
1143 		}
1144 		migratetype = MIGRATE_MOVABLE;
1145 	}
1146 
1147 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
1148 	if (cold)
1149 		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1150 	else
1151 		list_add(&page->lru, &pcp->lists[migratetype]);
1152 	pcp->count++;
1153 	if (pcp->count >= pcp->high) {
1154 		free_pcppages_bulk(zone, pcp->batch, pcp);
1155 		pcp->count -= pcp->batch;
1156 	}
1157 
1158 out:
1159 	local_irq_restore(flags);
1160 }
1161 
1162 /*
1163  * split_page takes a non-compound higher-order page, and splits it into
1164  * n (1<<order) sub-pages: page[0..n]
1165  * Each sub-page must be freed individually.
1166  *
1167  * Note: this is probably too low level an operation for use in drivers.
1168  * Please consult with lkml before using this in your driver.
1169  */
1170 void split_page(struct page *page, unsigned int order)
1171 {
1172 	int i;
1173 
1174 	VM_BUG_ON(PageCompound(page));
1175 	VM_BUG_ON(!page_count(page));
1176 
1177 #ifdef CONFIG_KMEMCHECK
1178 	/*
1179 	 * Split shadow pages too, because free(page[0]) would
1180 	 * otherwise free the whole shadow.
1181 	 */
1182 	if (kmemcheck_page_is_tracked(page))
1183 		split_page(virt_to_page(page[0].shadow), order);
1184 #endif
1185 
1186 	for (i = 1; i < (1 << order); i++)
1187 		set_page_refcounted(page + i);
1188 }
1189 
1190 /*
1191  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1192  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1193  * or two.
1194  */
1195 static inline
1196 struct page *buffered_rmqueue(struct zone *preferred_zone,
1197 			struct zone *zone, int order, gfp_t gfp_flags,
1198 			int migratetype)
1199 {
1200 	unsigned long flags;
1201 	struct page *page;
1202 	int cold = !!(gfp_flags & __GFP_COLD);
1203 
1204 again:
1205 	if (likely(order == 0)) {
1206 		struct per_cpu_pages *pcp;
1207 		struct list_head *list;
1208 
1209 		local_irq_save(flags);
1210 		pcp = &this_cpu_ptr(zone->pageset)->pcp;
1211 		list = &pcp->lists[migratetype];
1212 		if (list_empty(list)) {
1213 			pcp->count += rmqueue_bulk(zone, 0,
1214 					pcp->batch, list,
1215 					migratetype, cold);
1216 			if (unlikely(list_empty(list)))
1217 				goto failed;
1218 		}
1219 
1220 		if (cold)
1221 			page = list_entry(list->prev, struct page, lru);
1222 		else
1223 			page = list_entry(list->next, struct page, lru);
1224 
1225 		list_del(&page->lru);
1226 		pcp->count--;
1227 	} else {
1228 		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1229 			/*
1230 			 * __GFP_NOFAIL is not to be used in new code.
1231 			 *
1232 			 * All __GFP_NOFAIL callers should be fixed so that they
1233 			 * properly detect and handle allocation failures.
1234 			 *
1235 			 * We most definitely don't want callers attempting to
1236 			 * allocate greater than order-1 page units with
1237 			 * __GFP_NOFAIL.
1238 			 */
1239 			WARN_ON_ONCE(order > 1);
1240 		}
1241 		spin_lock_irqsave(&zone->lock, flags);
1242 		page = __rmqueue(zone, order, migratetype);
1243 		spin_unlock(&zone->lock);
1244 		if (!page)
1245 			goto failed;
1246 		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1247 	}
1248 
1249 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1250 	zone_statistics(preferred_zone, zone);
1251 	local_irq_restore(flags);
1252 
1253 	VM_BUG_ON(bad_range(zone, page));
1254 	if (prep_new_page(page, order, gfp_flags))
1255 		goto again;
1256 	return page;
1257 
1258 failed:
1259 	local_irq_restore(flags);
1260 	return NULL;
1261 }
1262 
1263 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1264 #define ALLOC_WMARK_MIN		WMARK_MIN
1265 #define ALLOC_WMARK_LOW		WMARK_LOW
1266 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1267 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1268 
1269 /* Mask to get the watermark bits */
1270 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1271 
1272 #define ALLOC_HARDER		0x10 /* try to alloc harder */
1273 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1274 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1275 
1276 #ifdef CONFIG_FAIL_PAGE_ALLOC
1277 
1278 static struct fail_page_alloc_attr {
1279 	struct fault_attr attr;
1280 
1281 	u32 ignore_gfp_highmem;
1282 	u32 ignore_gfp_wait;
1283 	u32 min_order;
1284 
1285 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1286 
1287 	struct dentry *ignore_gfp_highmem_file;
1288 	struct dentry *ignore_gfp_wait_file;
1289 	struct dentry *min_order_file;
1290 
1291 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1292 
1293 } fail_page_alloc = {
1294 	.attr = FAULT_ATTR_INITIALIZER,
1295 	.ignore_gfp_wait = 1,
1296 	.ignore_gfp_highmem = 1,
1297 	.min_order = 1,
1298 };
1299 
1300 static int __init setup_fail_page_alloc(char *str)
1301 {
1302 	return setup_fault_attr(&fail_page_alloc.attr, str);
1303 }
1304 __setup("fail_page_alloc=", setup_fail_page_alloc);
1305 
1306 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1307 {
1308 	if (order < fail_page_alloc.min_order)
1309 		return 0;
1310 	if (gfp_mask & __GFP_NOFAIL)
1311 		return 0;
1312 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1313 		return 0;
1314 	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1315 		return 0;
1316 
1317 	return should_fail(&fail_page_alloc.attr, 1 << order);
1318 }
1319 
1320 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1321 
1322 static int __init fail_page_alloc_debugfs(void)
1323 {
1324 	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1325 	struct dentry *dir;
1326 	int err;
1327 
1328 	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1329 				       "fail_page_alloc");
1330 	if (err)
1331 		return err;
1332 	dir = fail_page_alloc.attr.dentries.dir;
1333 
1334 	fail_page_alloc.ignore_gfp_wait_file =
1335 		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1336 				      &fail_page_alloc.ignore_gfp_wait);
1337 
1338 	fail_page_alloc.ignore_gfp_highmem_file =
1339 		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1340 				      &fail_page_alloc.ignore_gfp_highmem);
1341 	fail_page_alloc.min_order_file =
1342 		debugfs_create_u32("min-order", mode, dir,
1343 				   &fail_page_alloc.min_order);
1344 
1345 	if (!fail_page_alloc.ignore_gfp_wait_file ||
1346             !fail_page_alloc.ignore_gfp_highmem_file ||
1347             !fail_page_alloc.min_order_file) {
1348 		err = -ENOMEM;
1349 		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1350 		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1351 		debugfs_remove(fail_page_alloc.min_order_file);
1352 		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1353 	}
1354 
1355 	return err;
1356 }
1357 
1358 late_initcall(fail_page_alloc_debugfs);
1359 
1360 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1361 
1362 #else /* CONFIG_FAIL_PAGE_ALLOC */
1363 
1364 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1365 {
1366 	return 0;
1367 }
1368 
1369 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1370 
1371 /*
1372  * Return 1 if free pages are above 'mark'. This takes into account the order
1373  * of the allocation.
1374  */
1375 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1376 		      int classzone_idx, int alloc_flags)
1377 {
1378 	/* free_pages my go negative - that's OK */
1379 	long min = mark;
1380 	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1381 	int o;
1382 
1383 	if (alloc_flags & ALLOC_HIGH)
1384 		min -= min / 2;
1385 	if (alloc_flags & ALLOC_HARDER)
1386 		min -= min / 4;
1387 
1388 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1389 		return 0;
1390 	for (o = 0; o < order; o++) {
1391 		/* At the next order, this order's pages become unavailable */
1392 		free_pages -= z->free_area[o].nr_free << o;
1393 
1394 		/* Require fewer higher order pages to be free */
1395 		min >>= 1;
1396 
1397 		if (free_pages <= min)
1398 			return 0;
1399 	}
1400 	return 1;
1401 }
1402 
1403 #ifdef CONFIG_NUMA
1404 /*
1405  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1406  * skip over zones that are not allowed by the cpuset, or that have
1407  * been recently (in last second) found to be nearly full.  See further
1408  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1409  * that have to skip over a lot of full or unallowed zones.
1410  *
1411  * If the zonelist cache is present in the passed in zonelist, then
1412  * returns a pointer to the allowed node mask (either the current
1413  * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1414  *
1415  * If the zonelist cache is not available for this zonelist, does
1416  * nothing and returns NULL.
1417  *
1418  * If the fullzones BITMAP in the zonelist cache is stale (more than
1419  * a second since last zap'd) then we zap it out (clear its bits.)
1420  *
1421  * We hold off even calling zlc_setup, until after we've checked the
1422  * first zone in the zonelist, on the theory that most allocations will
1423  * be satisfied from that first zone, so best to examine that zone as
1424  * quickly as we can.
1425  */
1426 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1427 {
1428 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1429 	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1430 
1431 	zlc = zonelist->zlcache_ptr;
1432 	if (!zlc)
1433 		return NULL;
1434 
1435 	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1436 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1437 		zlc->last_full_zap = jiffies;
1438 	}
1439 
1440 	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1441 					&cpuset_current_mems_allowed :
1442 					&node_states[N_HIGH_MEMORY];
1443 	return allowednodes;
1444 }
1445 
1446 /*
1447  * Given 'z' scanning a zonelist, run a couple of quick checks to see
1448  * if it is worth looking at further for free memory:
1449  *  1) Check that the zone isn't thought to be full (doesn't have its
1450  *     bit set in the zonelist_cache fullzones BITMAP).
1451  *  2) Check that the zones node (obtained from the zonelist_cache
1452  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1453  * Return true (non-zero) if zone is worth looking at further, or
1454  * else return false (zero) if it is not.
1455  *
1456  * This check -ignores- the distinction between various watermarks,
1457  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1458  * found to be full for any variation of these watermarks, it will
1459  * be considered full for up to one second by all requests, unless
1460  * we are so low on memory on all allowed nodes that we are forced
1461  * into the second scan of the zonelist.
1462  *
1463  * In the second scan we ignore this zonelist cache and exactly
1464  * apply the watermarks to all zones, even it is slower to do so.
1465  * We are low on memory in the second scan, and should leave no stone
1466  * unturned looking for a free page.
1467  */
1468 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1469 						nodemask_t *allowednodes)
1470 {
1471 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1472 	int i;				/* index of *z in zonelist zones */
1473 	int n;				/* node that zone *z is on */
1474 
1475 	zlc = zonelist->zlcache_ptr;
1476 	if (!zlc)
1477 		return 1;
1478 
1479 	i = z - zonelist->_zonerefs;
1480 	n = zlc->z_to_n[i];
1481 
1482 	/* This zone is worth trying if it is allowed but not full */
1483 	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1484 }
1485 
1486 /*
1487  * Given 'z' scanning a zonelist, set the corresponding bit in
1488  * zlc->fullzones, so that subsequent attempts to allocate a page
1489  * from that zone don't waste time re-examining it.
1490  */
1491 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1492 {
1493 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1494 	int i;				/* index of *z in zonelist zones */
1495 
1496 	zlc = zonelist->zlcache_ptr;
1497 	if (!zlc)
1498 		return;
1499 
1500 	i = z - zonelist->_zonerefs;
1501 
1502 	set_bit(i, zlc->fullzones);
1503 }
1504 
1505 #else	/* CONFIG_NUMA */
1506 
1507 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1508 {
1509 	return NULL;
1510 }
1511 
1512 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1513 				nodemask_t *allowednodes)
1514 {
1515 	return 1;
1516 }
1517 
1518 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1519 {
1520 }
1521 #endif	/* CONFIG_NUMA */
1522 
1523 /*
1524  * get_page_from_freelist goes through the zonelist trying to allocate
1525  * a page.
1526  */
1527 static struct page *
1528 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1529 		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1530 		struct zone *preferred_zone, int migratetype)
1531 {
1532 	struct zoneref *z;
1533 	struct page *page = NULL;
1534 	int classzone_idx;
1535 	struct zone *zone;
1536 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1537 	int zlc_active = 0;		/* set if using zonelist_cache */
1538 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1539 
1540 	classzone_idx = zone_idx(preferred_zone);
1541 zonelist_scan:
1542 	/*
1543 	 * Scan zonelist, looking for a zone with enough free.
1544 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1545 	 */
1546 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1547 						high_zoneidx, nodemask) {
1548 		if (NUMA_BUILD && zlc_active &&
1549 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1550 				continue;
1551 		if ((alloc_flags & ALLOC_CPUSET) &&
1552 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1553 				goto try_next_zone;
1554 
1555 		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1556 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1557 			unsigned long mark;
1558 			int ret;
1559 
1560 			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1561 			if (zone_watermark_ok(zone, order, mark,
1562 				    classzone_idx, alloc_flags))
1563 				goto try_this_zone;
1564 
1565 			if (zone_reclaim_mode == 0)
1566 				goto this_zone_full;
1567 
1568 			ret = zone_reclaim(zone, gfp_mask, order);
1569 			switch (ret) {
1570 			case ZONE_RECLAIM_NOSCAN:
1571 				/* did not scan */
1572 				goto try_next_zone;
1573 			case ZONE_RECLAIM_FULL:
1574 				/* scanned but unreclaimable */
1575 				goto this_zone_full;
1576 			default:
1577 				/* did we reclaim enough */
1578 				if (!zone_watermark_ok(zone, order, mark,
1579 						classzone_idx, alloc_flags))
1580 					goto this_zone_full;
1581 			}
1582 		}
1583 
1584 try_this_zone:
1585 		page = buffered_rmqueue(preferred_zone, zone, order,
1586 						gfp_mask, migratetype);
1587 		if (page)
1588 			break;
1589 this_zone_full:
1590 		if (NUMA_BUILD)
1591 			zlc_mark_zone_full(zonelist, z);
1592 try_next_zone:
1593 		if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1594 			/*
1595 			 * we do zlc_setup after the first zone is tried but only
1596 			 * if there are multiple nodes make it worthwhile
1597 			 */
1598 			allowednodes = zlc_setup(zonelist, alloc_flags);
1599 			zlc_active = 1;
1600 			did_zlc_setup = 1;
1601 		}
1602 	}
1603 
1604 	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1605 		/* Disable zlc cache for second zonelist scan */
1606 		zlc_active = 0;
1607 		goto zonelist_scan;
1608 	}
1609 	return page;
1610 }
1611 
1612 static inline int
1613 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1614 				unsigned long pages_reclaimed)
1615 {
1616 	/* Do not loop if specifically requested */
1617 	if (gfp_mask & __GFP_NORETRY)
1618 		return 0;
1619 
1620 	/*
1621 	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1622 	 * means __GFP_NOFAIL, but that may not be true in other
1623 	 * implementations.
1624 	 */
1625 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1626 		return 1;
1627 
1628 	/*
1629 	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1630 	 * specified, then we retry until we no longer reclaim any pages
1631 	 * (above), or we've reclaimed an order of pages at least as
1632 	 * large as the allocation's order. In both cases, if the
1633 	 * allocation still fails, we stop retrying.
1634 	 */
1635 	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1636 		return 1;
1637 
1638 	/*
1639 	 * Don't let big-order allocations loop unless the caller
1640 	 * explicitly requests that.
1641 	 */
1642 	if (gfp_mask & __GFP_NOFAIL)
1643 		return 1;
1644 
1645 	return 0;
1646 }
1647 
1648 static inline struct page *
1649 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1650 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1651 	nodemask_t *nodemask, struct zone *preferred_zone,
1652 	int migratetype)
1653 {
1654 	struct page *page;
1655 
1656 	/* Acquire the OOM killer lock for the zones in zonelist */
1657 	if (!try_set_zone_oom(zonelist, gfp_mask)) {
1658 		schedule_timeout_uninterruptible(1);
1659 		return NULL;
1660 	}
1661 
1662 	/*
1663 	 * Go through the zonelist yet one more time, keep very high watermark
1664 	 * here, this is only to catch a parallel oom killing, we must fail if
1665 	 * we're still under heavy pressure.
1666 	 */
1667 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1668 		order, zonelist, high_zoneidx,
1669 		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1670 		preferred_zone, migratetype);
1671 	if (page)
1672 		goto out;
1673 
1674 	if (!(gfp_mask & __GFP_NOFAIL)) {
1675 		/* The OOM killer will not help higher order allocs */
1676 		if (order > PAGE_ALLOC_COSTLY_ORDER)
1677 			goto out;
1678 		/*
1679 		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1680 		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1681 		 * The caller should handle page allocation failure by itself if
1682 		 * it specifies __GFP_THISNODE.
1683 		 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1684 		 */
1685 		if (gfp_mask & __GFP_THISNODE)
1686 			goto out;
1687 	}
1688 	/* Exhausted what can be done so it's blamo time */
1689 	out_of_memory(zonelist, gfp_mask, order, nodemask);
1690 
1691 out:
1692 	clear_zonelist_oom(zonelist, gfp_mask);
1693 	return page;
1694 }
1695 
1696 /* The really slow allocator path where we enter direct reclaim */
1697 static inline struct page *
1698 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1699 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1700 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1701 	int migratetype, unsigned long *did_some_progress)
1702 {
1703 	struct page *page = NULL;
1704 	struct reclaim_state reclaim_state;
1705 	struct task_struct *p = current;
1706 
1707 	cond_resched();
1708 
1709 	/* We now go into synchronous reclaim */
1710 	cpuset_memory_pressure_bump();
1711 	p->flags |= PF_MEMALLOC;
1712 	lockdep_set_current_reclaim_state(gfp_mask);
1713 	reclaim_state.reclaimed_slab = 0;
1714 	p->reclaim_state = &reclaim_state;
1715 
1716 	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1717 
1718 	p->reclaim_state = NULL;
1719 	lockdep_clear_current_reclaim_state();
1720 	p->flags &= ~PF_MEMALLOC;
1721 
1722 	cond_resched();
1723 
1724 	if (order != 0)
1725 		drain_all_pages();
1726 
1727 	if (likely(*did_some_progress))
1728 		page = get_page_from_freelist(gfp_mask, nodemask, order,
1729 					zonelist, high_zoneidx,
1730 					alloc_flags, preferred_zone,
1731 					migratetype);
1732 	return page;
1733 }
1734 
1735 /*
1736  * This is called in the allocator slow-path if the allocation request is of
1737  * sufficient urgency to ignore watermarks and take other desperate measures
1738  */
1739 static inline struct page *
1740 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1741 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1742 	nodemask_t *nodemask, struct zone *preferred_zone,
1743 	int migratetype)
1744 {
1745 	struct page *page;
1746 
1747 	do {
1748 		page = get_page_from_freelist(gfp_mask, nodemask, order,
1749 			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1750 			preferred_zone, migratetype);
1751 
1752 		if (!page && gfp_mask & __GFP_NOFAIL)
1753 			congestion_wait(BLK_RW_ASYNC, HZ/50);
1754 	} while (!page && (gfp_mask & __GFP_NOFAIL));
1755 
1756 	return page;
1757 }
1758 
1759 static inline
1760 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1761 						enum zone_type high_zoneidx)
1762 {
1763 	struct zoneref *z;
1764 	struct zone *zone;
1765 
1766 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1767 		wakeup_kswapd(zone, order);
1768 }
1769 
1770 static inline int
1771 gfp_to_alloc_flags(gfp_t gfp_mask)
1772 {
1773 	struct task_struct *p = current;
1774 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1775 	const gfp_t wait = gfp_mask & __GFP_WAIT;
1776 
1777 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1778 	BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1779 
1780 	/*
1781 	 * The caller may dip into page reserves a bit more if the caller
1782 	 * cannot run direct reclaim, or if the caller has realtime scheduling
1783 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1784 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1785 	 */
1786 	alloc_flags |= (gfp_mask & __GFP_HIGH);
1787 
1788 	if (!wait) {
1789 		alloc_flags |= ALLOC_HARDER;
1790 		/*
1791 		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1792 		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1793 		 */
1794 		alloc_flags &= ~ALLOC_CPUSET;
1795 	} else if (unlikely(rt_task(p)) && !in_interrupt())
1796 		alloc_flags |= ALLOC_HARDER;
1797 
1798 	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1799 		if (!in_interrupt() &&
1800 		    ((p->flags & PF_MEMALLOC) ||
1801 		     unlikely(test_thread_flag(TIF_MEMDIE))))
1802 			alloc_flags |= ALLOC_NO_WATERMARKS;
1803 	}
1804 
1805 	return alloc_flags;
1806 }
1807 
1808 static inline struct page *
1809 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1810 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1811 	nodemask_t *nodemask, struct zone *preferred_zone,
1812 	int migratetype)
1813 {
1814 	const gfp_t wait = gfp_mask & __GFP_WAIT;
1815 	struct page *page = NULL;
1816 	int alloc_flags;
1817 	unsigned long pages_reclaimed = 0;
1818 	unsigned long did_some_progress;
1819 	struct task_struct *p = current;
1820 
1821 	/*
1822 	 * In the slowpath, we sanity check order to avoid ever trying to
1823 	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1824 	 * be using allocators in order of preference for an area that is
1825 	 * too large.
1826 	 */
1827 	if (order >= MAX_ORDER) {
1828 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1829 		return NULL;
1830 	}
1831 
1832 	/*
1833 	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1834 	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1835 	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1836 	 * using a larger set of nodes after it has established that the
1837 	 * allowed per node queues are empty and that nodes are
1838 	 * over allocated.
1839 	 */
1840 	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1841 		goto nopage;
1842 
1843 restart:
1844 	wake_all_kswapd(order, zonelist, high_zoneidx);
1845 
1846 	/*
1847 	 * OK, we're below the kswapd watermark and have kicked background
1848 	 * reclaim. Now things get more complex, so set up alloc_flags according
1849 	 * to how we want to proceed.
1850 	 */
1851 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
1852 
1853 	/* This is the last chance, in general, before the goto nopage. */
1854 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1855 			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1856 			preferred_zone, migratetype);
1857 	if (page)
1858 		goto got_pg;
1859 
1860 rebalance:
1861 	/* Allocate without watermarks if the context allows */
1862 	if (alloc_flags & ALLOC_NO_WATERMARKS) {
1863 		page = __alloc_pages_high_priority(gfp_mask, order,
1864 				zonelist, high_zoneidx, nodemask,
1865 				preferred_zone, migratetype);
1866 		if (page)
1867 			goto got_pg;
1868 	}
1869 
1870 	/* Atomic allocations - we can't balance anything */
1871 	if (!wait)
1872 		goto nopage;
1873 
1874 	/* Avoid recursion of direct reclaim */
1875 	if (p->flags & PF_MEMALLOC)
1876 		goto nopage;
1877 
1878 	/* Avoid allocations with no watermarks from looping endlessly */
1879 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
1880 		goto nopage;
1881 
1882 	/* Try direct reclaim and then allocating */
1883 	page = __alloc_pages_direct_reclaim(gfp_mask, order,
1884 					zonelist, high_zoneidx,
1885 					nodemask,
1886 					alloc_flags, preferred_zone,
1887 					migratetype, &did_some_progress);
1888 	if (page)
1889 		goto got_pg;
1890 
1891 	/*
1892 	 * If we failed to make any progress reclaiming, then we are
1893 	 * running out of options and have to consider going OOM
1894 	 */
1895 	if (!did_some_progress) {
1896 		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1897 			if (oom_killer_disabled)
1898 				goto nopage;
1899 			page = __alloc_pages_may_oom(gfp_mask, order,
1900 					zonelist, high_zoneidx,
1901 					nodemask, preferred_zone,
1902 					migratetype);
1903 			if (page)
1904 				goto got_pg;
1905 
1906 			/*
1907 			 * The OOM killer does not trigger for high-order
1908 			 * ~__GFP_NOFAIL allocations so if no progress is being
1909 			 * made, there are no other options and retrying is
1910 			 * unlikely to help.
1911 			 */
1912 			if (order > PAGE_ALLOC_COSTLY_ORDER &&
1913 						!(gfp_mask & __GFP_NOFAIL))
1914 				goto nopage;
1915 
1916 			goto restart;
1917 		}
1918 	}
1919 
1920 	/* Check if we should retry the allocation */
1921 	pages_reclaimed += did_some_progress;
1922 	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1923 		/* Wait for some write requests to complete then retry */
1924 		congestion_wait(BLK_RW_ASYNC, HZ/50);
1925 		goto rebalance;
1926 	}
1927 
1928 nopage:
1929 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1930 		printk(KERN_WARNING "%s: page allocation failure."
1931 			" order:%d, mode:0x%x\n",
1932 			p->comm, order, gfp_mask);
1933 		dump_stack();
1934 		show_mem();
1935 	}
1936 	return page;
1937 got_pg:
1938 	if (kmemcheck_enabled)
1939 		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1940 	return page;
1941 
1942 }
1943 
1944 /*
1945  * This is the 'heart' of the zoned buddy allocator.
1946  */
1947 struct page *
1948 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1949 			struct zonelist *zonelist, nodemask_t *nodemask)
1950 {
1951 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1952 	struct zone *preferred_zone;
1953 	struct page *page;
1954 	int migratetype = allocflags_to_migratetype(gfp_mask);
1955 
1956 	gfp_mask &= gfp_allowed_mask;
1957 
1958 	lockdep_trace_alloc(gfp_mask);
1959 
1960 	might_sleep_if(gfp_mask & __GFP_WAIT);
1961 
1962 	if (should_fail_alloc_page(gfp_mask, order))
1963 		return NULL;
1964 
1965 	/*
1966 	 * Check the zones suitable for the gfp_mask contain at least one
1967 	 * valid zone. It's possible to have an empty zonelist as a result
1968 	 * of GFP_THISNODE and a memoryless node
1969 	 */
1970 	if (unlikely(!zonelist->_zonerefs->zone))
1971 		return NULL;
1972 
1973 	/* The preferred zone is used for statistics later */
1974 	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
1975 	if (!preferred_zone)
1976 		return NULL;
1977 
1978 	/* First allocation attempt */
1979 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1980 			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1981 			preferred_zone, migratetype);
1982 	if (unlikely(!page))
1983 		page = __alloc_pages_slowpath(gfp_mask, order,
1984 				zonelist, high_zoneidx, nodemask,
1985 				preferred_zone, migratetype);
1986 
1987 	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
1988 	return page;
1989 }
1990 EXPORT_SYMBOL(__alloc_pages_nodemask);
1991 
1992 /*
1993  * Common helper functions.
1994  */
1995 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1996 {
1997 	struct page *page;
1998 
1999 	/*
2000 	 * __get_free_pages() returns a 32-bit address, which cannot represent
2001 	 * a highmem page
2002 	 */
2003 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2004 
2005 	page = alloc_pages(gfp_mask, order);
2006 	if (!page)
2007 		return 0;
2008 	return (unsigned long) page_address(page);
2009 }
2010 EXPORT_SYMBOL(__get_free_pages);
2011 
2012 unsigned long get_zeroed_page(gfp_t gfp_mask)
2013 {
2014 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2015 }
2016 EXPORT_SYMBOL(get_zeroed_page);
2017 
2018 void __pagevec_free(struct pagevec *pvec)
2019 {
2020 	int i = pagevec_count(pvec);
2021 
2022 	while (--i >= 0) {
2023 		trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2024 		free_hot_cold_page(pvec->pages[i], pvec->cold);
2025 	}
2026 }
2027 
2028 void __free_pages(struct page *page, unsigned int order)
2029 {
2030 	if (put_page_testzero(page)) {
2031 		if (order == 0)
2032 			free_hot_cold_page(page, 0);
2033 		else
2034 			__free_pages_ok(page, order);
2035 	}
2036 }
2037 
2038 EXPORT_SYMBOL(__free_pages);
2039 
2040 void free_pages(unsigned long addr, unsigned int order)
2041 {
2042 	if (addr != 0) {
2043 		VM_BUG_ON(!virt_addr_valid((void *)addr));
2044 		__free_pages(virt_to_page((void *)addr), order);
2045 	}
2046 }
2047 
2048 EXPORT_SYMBOL(free_pages);
2049 
2050 /**
2051  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2052  * @size: the number of bytes to allocate
2053  * @gfp_mask: GFP flags for the allocation
2054  *
2055  * This function is similar to alloc_pages(), except that it allocates the
2056  * minimum number of pages to satisfy the request.  alloc_pages() can only
2057  * allocate memory in power-of-two pages.
2058  *
2059  * This function is also limited by MAX_ORDER.
2060  *
2061  * Memory allocated by this function must be released by free_pages_exact().
2062  */
2063 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2064 {
2065 	unsigned int order = get_order(size);
2066 	unsigned long addr;
2067 
2068 	addr = __get_free_pages(gfp_mask, order);
2069 	if (addr) {
2070 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2071 		unsigned long used = addr + PAGE_ALIGN(size);
2072 
2073 		split_page(virt_to_page((void *)addr), order);
2074 		while (used < alloc_end) {
2075 			free_page(used);
2076 			used += PAGE_SIZE;
2077 		}
2078 	}
2079 
2080 	return (void *)addr;
2081 }
2082 EXPORT_SYMBOL(alloc_pages_exact);
2083 
2084 /**
2085  * free_pages_exact - release memory allocated via alloc_pages_exact()
2086  * @virt: the value returned by alloc_pages_exact.
2087  * @size: size of allocation, same value as passed to alloc_pages_exact().
2088  *
2089  * Release the memory allocated by a previous call to alloc_pages_exact.
2090  */
2091 void free_pages_exact(void *virt, size_t size)
2092 {
2093 	unsigned long addr = (unsigned long)virt;
2094 	unsigned long end = addr + PAGE_ALIGN(size);
2095 
2096 	while (addr < end) {
2097 		free_page(addr);
2098 		addr += PAGE_SIZE;
2099 	}
2100 }
2101 EXPORT_SYMBOL(free_pages_exact);
2102 
2103 static unsigned int nr_free_zone_pages(int offset)
2104 {
2105 	struct zoneref *z;
2106 	struct zone *zone;
2107 
2108 	/* Just pick one node, since fallback list is circular */
2109 	unsigned int sum = 0;
2110 
2111 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2112 
2113 	for_each_zone_zonelist(zone, z, zonelist, offset) {
2114 		unsigned long size = zone->present_pages;
2115 		unsigned long high = high_wmark_pages(zone);
2116 		if (size > high)
2117 			sum += size - high;
2118 	}
2119 
2120 	return sum;
2121 }
2122 
2123 /*
2124  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2125  */
2126 unsigned int nr_free_buffer_pages(void)
2127 {
2128 	return nr_free_zone_pages(gfp_zone(GFP_USER));
2129 }
2130 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2131 
2132 /*
2133  * Amount of free RAM allocatable within all zones
2134  */
2135 unsigned int nr_free_pagecache_pages(void)
2136 {
2137 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2138 }
2139 
2140 static inline void show_node(struct zone *zone)
2141 {
2142 	if (NUMA_BUILD)
2143 		printk("Node %d ", zone_to_nid(zone));
2144 }
2145 
2146 void si_meminfo(struct sysinfo *val)
2147 {
2148 	val->totalram = totalram_pages;
2149 	val->sharedram = 0;
2150 	val->freeram = global_page_state(NR_FREE_PAGES);
2151 	val->bufferram = nr_blockdev_pages();
2152 	val->totalhigh = totalhigh_pages;
2153 	val->freehigh = nr_free_highpages();
2154 	val->mem_unit = PAGE_SIZE;
2155 }
2156 
2157 EXPORT_SYMBOL(si_meminfo);
2158 
2159 #ifdef CONFIG_NUMA
2160 void si_meminfo_node(struct sysinfo *val, int nid)
2161 {
2162 	pg_data_t *pgdat = NODE_DATA(nid);
2163 
2164 	val->totalram = pgdat->node_present_pages;
2165 	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2166 #ifdef CONFIG_HIGHMEM
2167 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2168 	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2169 			NR_FREE_PAGES);
2170 #else
2171 	val->totalhigh = 0;
2172 	val->freehigh = 0;
2173 #endif
2174 	val->mem_unit = PAGE_SIZE;
2175 }
2176 #endif
2177 
2178 #define K(x) ((x) << (PAGE_SHIFT-10))
2179 
2180 /*
2181  * Show free area list (used inside shift_scroll-lock stuff)
2182  * We also calculate the percentage fragmentation. We do this by counting the
2183  * memory on each free list with the exception of the first item on the list.
2184  */
2185 void show_free_areas(void)
2186 {
2187 	int cpu;
2188 	struct zone *zone;
2189 
2190 	for_each_populated_zone(zone) {
2191 		show_node(zone);
2192 		printk("%s per-cpu:\n", zone->name);
2193 
2194 		for_each_online_cpu(cpu) {
2195 			struct per_cpu_pageset *pageset;
2196 
2197 			pageset = per_cpu_ptr(zone->pageset, cpu);
2198 
2199 			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2200 			       cpu, pageset->pcp.high,
2201 			       pageset->pcp.batch, pageset->pcp.count);
2202 		}
2203 	}
2204 
2205 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2206 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2207 		" unevictable:%lu"
2208 		" dirty:%lu writeback:%lu unstable:%lu\n"
2209 		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2210 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2211 		global_page_state(NR_ACTIVE_ANON),
2212 		global_page_state(NR_INACTIVE_ANON),
2213 		global_page_state(NR_ISOLATED_ANON),
2214 		global_page_state(NR_ACTIVE_FILE),
2215 		global_page_state(NR_INACTIVE_FILE),
2216 		global_page_state(NR_ISOLATED_FILE),
2217 		global_page_state(NR_UNEVICTABLE),
2218 		global_page_state(NR_FILE_DIRTY),
2219 		global_page_state(NR_WRITEBACK),
2220 		global_page_state(NR_UNSTABLE_NFS),
2221 		global_page_state(NR_FREE_PAGES),
2222 		global_page_state(NR_SLAB_RECLAIMABLE),
2223 		global_page_state(NR_SLAB_UNRECLAIMABLE),
2224 		global_page_state(NR_FILE_MAPPED),
2225 		global_page_state(NR_SHMEM),
2226 		global_page_state(NR_PAGETABLE),
2227 		global_page_state(NR_BOUNCE));
2228 
2229 	for_each_populated_zone(zone) {
2230 		int i;
2231 
2232 		show_node(zone);
2233 		printk("%s"
2234 			" free:%lukB"
2235 			" min:%lukB"
2236 			" low:%lukB"
2237 			" high:%lukB"
2238 			" active_anon:%lukB"
2239 			" inactive_anon:%lukB"
2240 			" active_file:%lukB"
2241 			" inactive_file:%lukB"
2242 			" unevictable:%lukB"
2243 			" isolated(anon):%lukB"
2244 			" isolated(file):%lukB"
2245 			" present:%lukB"
2246 			" mlocked:%lukB"
2247 			" dirty:%lukB"
2248 			" writeback:%lukB"
2249 			" mapped:%lukB"
2250 			" shmem:%lukB"
2251 			" slab_reclaimable:%lukB"
2252 			" slab_unreclaimable:%lukB"
2253 			" kernel_stack:%lukB"
2254 			" pagetables:%lukB"
2255 			" unstable:%lukB"
2256 			" bounce:%lukB"
2257 			" writeback_tmp:%lukB"
2258 			" pages_scanned:%lu"
2259 			" all_unreclaimable? %s"
2260 			"\n",
2261 			zone->name,
2262 			K(zone_page_state(zone, NR_FREE_PAGES)),
2263 			K(min_wmark_pages(zone)),
2264 			K(low_wmark_pages(zone)),
2265 			K(high_wmark_pages(zone)),
2266 			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2267 			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2268 			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2269 			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2270 			K(zone_page_state(zone, NR_UNEVICTABLE)),
2271 			K(zone_page_state(zone, NR_ISOLATED_ANON)),
2272 			K(zone_page_state(zone, NR_ISOLATED_FILE)),
2273 			K(zone->present_pages),
2274 			K(zone_page_state(zone, NR_MLOCK)),
2275 			K(zone_page_state(zone, NR_FILE_DIRTY)),
2276 			K(zone_page_state(zone, NR_WRITEBACK)),
2277 			K(zone_page_state(zone, NR_FILE_MAPPED)),
2278 			K(zone_page_state(zone, NR_SHMEM)),
2279 			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2280 			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2281 			zone_page_state(zone, NR_KERNEL_STACK) *
2282 				THREAD_SIZE / 1024,
2283 			K(zone_page_state(zone, NR_PAGETABLE)),
2284 			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2285 			K(zone_page_state(zone, NR_BOUNCE)),
2286 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2287 			zone->pages_scanned,
2288 			(zone->all_unreclaimable ? "yes" : "no")
2289 			);
2290 		printk("lowmem_reserve[]:");
2291 		for (i = 0; i < MAX_NR_ZONES; i++)
2292 			printk(" %lu", zone->lowmem_reserve[i]);
2293 		printk("\n");
2294 	}
2295 
2296 	for_each_populated_zone(zone) {
2297  		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2298 
2299 		show_node(zone);
2300 		printk("%s: ", zone->name);
2301 
2302 		spin_lock_irqsave(&zone->lock, flags);
2303 		for (order = 0; order < MAX_ORDER; order++) {
2304 			nr[order] = zone->free_area[order].nr_free;
2305 			total += nr[order] << order;
2306 		}
2307 		spin_unlock_irqrestore(&zone->lock, flags);
2308 		for (order = 0; order < MAX_ORDER; order++)
2309 			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2310 		printk("= %lukB\n", K(total));
2311 	}
2312 
2313 	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2314 
2315 	show_swap_cache_info();
2316 }
2317 
2318 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2319 {
2320 	zoneref->zone = zone;
2321 	zoneref->zone_idx = zone_idx(zone);
2322 }
2323 
2324 /*
2325  * Builds allocation fallback zone lists.
2326  *
2327  * Add all populated zones of a node to the zonelist.
2328  */
2329 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2330 				int nr_zones, enum zone_type zone_type)
2331 {
2332 	struct zone *zone;
2333 
2334 	BUG_ON(zone_type >= MAX_NR_ZONES);
2335 	zone_type++;
2336 
2337 	do {
2338 		zone_type--;
2339 		zone = pgdat->node_zones + zone_type;
2340 		if (populated_zone(zone)) {
2341 			zoneref_set_zone(zone,
2342 				&zonelist->_zonerefs[nr_zones++]);
2343 			check_highest_zone(zone_type);
2344 		}
2345 
2346 	} while (zone_type);
2347 	return nr_zones;
2348 }
2349 
2350 
2351 /*
2352  *  zonelist_order:
2353  *  0 = automatic detection of better ordering.
2354  *  1 = order by ([node] distance, -zonetype)
2355  *  2 = order by (-zonetype, [node] distance)
2356  *
2357  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2358  *  the same zonelist. So only NUMA can configure this param.
2359  */
2360 #define ZONELIST_ORDER_DEFAULT  0
2361 #define ZONELIST_ORDER_NODE     1
2362 #define ZONELIST_ORDER_ZONE     2
2363 
2364 /* zonelist order in the kernel.
2365  * set_zonelist_order() will set this to NODE or ZONE.
2366  */
2367 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2368 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2369 
2370 
2371 #ifdef CONFIG_NUMA
2372 /* The value user specified ....changed by config */
2373 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2374 /* string for sysctl */
2375 #define NUMA_ZONELIST_ORDER_LEN	16
2376 char numa_zonelist_order[16] = "default";
2377 
2378 /*
2379  * interface for configure zonelist ordering.
2380  * command line option "numa_zonelist_order"
2381  *	= "[dD]efault	- default, automatic configuration.
2382  *	= "[nN]ode 	- order by node locality, then by zone within node
2383  *	= "[zZ]one      - order by zone, then by locality within zone
2384  */
2385 
2386 static int __parse_numa_zonelist_order(char *s)
2387 {
2388 	if (*s == 'd' || *s == 'D') {
2389 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2390 	} else if (*s == 'n' || *s == 'N') {
2391 		user_zonelist_order = ZONELIST_ORDER_NODE;
2392 	} else if (*s == 'z' || *s == 'Z') {
2393 		user_zonelist_order = ZONELIST_ORDER_ZONE;
2394 	} else {
2395 		printk(KERN_WARNING
2396 			"Ignoring invalid numa_zonelist_order value:  "
2397 			"%s\n", s);
2398 		return -EINVAL;
2399 	}
2400 	return 0;
2401 }
2402 
2403 static __init int setup_numa_zonelist_order(char *s)
2404 {
2405 	if (s)
2406 		return __parse_numa_zonelist_order(s);
2407 	return 0;
2408 }
2409 early_param("numa_zonelist_order", setup_numa_zonelist_order);
2410 
2411 /*
2412  * sysctl handler for numa_zonelist_order
2413  */
2414 int numa_zonelist_order_handler(ctl_table *table, int write,
2415 		void __user *buffer, size_t *length,
2416 		loff_t *ppos)
2417 {
2418 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2419 	int ret;
2420 	static DEFINE_MUTEX(zl_order_mutex);
2421 
2422 	mutex_lock(&zl_order_mutex);
2423 	if (write)
2424 		strcpy(saved_string, (char*)table->data);
2425 	ret = proc_dostring(table, write, buffer, length, ppos);
2426 	if (ret)
2427 		goto out;
2428 	if (write) {
2429 		int oldval = user_zonelist_order;
2430 		if (__parse_numa_zonelist_order((char*)table->data)) {
2431 			/*
2432 			 * bogus value.  restore saved string
2433 			 */
2434 			strncpy((char*)table->data, saved_string,
2435 				NUMA_ZONELIST_ORDER_LEN);
2436 			user_zonelist_order = oldval;
2437 		} else if (oldval != user_zonelist_order)
2438 			build_all_zonelists();
2439 	}
2440 out:
2441 	mutex_unlock(&zl_order_mutex);
2442 	return ret;
2443 }
2444 
2445 
2446 #define MAX_NODE_LOAD (nr_online_nodes)
2447 static int node_load[MAX_NUMNODES];
2448 
2449 /**
2450  * find_next_best_node - find the next node that should appear in a given node's fallback list
2451  * @node: node whose fallback list we're appending
2452  * @used_node_mask: nodemask_t of already used nodes
2453  *
2454  * We use a number of factors to determine which is the next node that should
2455  * appear on a given node's fallback list.  The node should not have appeared
2456  * already in @node's fallback list, and it should be the next closest node
2457  * according to the distance array (which contains arbitrary distance values
2458  * from each node to each node in the system), and should also prefer nodes
2459  * with no CPUs, since presumably they'll have very little allocation pressure
2460  * on them otherwise.
2461  * It returns -1 if no node is found.
2462  */
2463 static int find_next_best_node(int node, nodemask_t *used_node_mask)
2464 {
2465 	int n, val;
2466 	int min_val = INT_MAX;
2467 	int best_node = -1;
2468 	const struct cpumask *tmp = cpumask_of_node(0);
2469 
2470 	/* Use the local node if we haven't already */
2471 	if (!node_isset(node, *used_node_mask)) {
2472 		node_set(node, *used_node_mask);
2473 		return node;
2474 	}
2475 
2476 	for_each_node_state(n, N_HIGH_MEMORY) {
2477 
2478 		/* Don't want a node to appear more than once */
2479 		if (node_isset(n, *used_node_mask))
2480 			continue;
2481 
2482 		/* Use the distance array to find the distance */
2483 		val = node_distance(node, n);
2484 
2485 		/* Penalize nodes under us ("prefer the next node") */
2486 		val += (n < node);
2487 
2488 		/* Give preference to headless and unused nodes */
2489 		tmp = cpumask_of_node(n);
2490 		if (!cpumask_empty(tmp))
2491 			val += PENALTY_FOR_NODE_WITH_CPUS;
2492 
2493 		/* Slight preference for less loaded node */
2494 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2495 		val += node_load[n];
2496 
2497 		if (val < min_val) {
2498 			min_val = val;
2499 			best_node = n;
2500 		}
2501 	}
2502 
2503 	if (best_node >= 0)
2504 		node_set(best_node, *used_node_mask);
2505 
2506 	return best_node;
2507 }
2508 
2509 
2510 /*
2511  * Build zonelists ordered by node and zones within node.
2512  * This results in maximum locality--normal zone overflows into local
2513  * DMA zone, if any--but risks exhausting DMA zone.
2514  */
2515 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2516 {
2517 	int j;
2518 	struct zonelist *zonelist;
2519 
2520 	zonelist = &pgdat->node_zonelists[0];
2521 	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2522 		;
2523 	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2524 							MAX_NR_ZONES - 1);
2525 	zonelist->_zonerefs[j].zone = NULL;
2526 	zonelist->_zonerefs[j].zone_idx = 0;
2527 }
2528 
2529 /*
2530  * Build gfp_thisnode zonelists
2531  */
2532 static void build_thisnode_zonelists(pg_data_t *pgdat)
2533 {
2534 	int j;
2535 	struct zonelist *zonelist;
2536 
2537 	zonelist = &pgdat->node_zonelists[1];
2538 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2539 	zonelist->_zonerefs[j].zone = NULL;
2540 	zonelist->_zonerefs[j].zone_idx = 0;
2541 }
2542 
2543 /*
2544  * Build zonelists ordered by zone and nodes within zones.
2545  * This results in conserving DMA zone[s] until all Normal memory is
2546  * exhausted, but results in overflowing to remote node while memory
2547  * may still exist in local DMA zone.
2548  */
2549 static int node_order[MAX_NUMNODES];
2550 
2551 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2552 {
2553 	int pos, j, node;
2554 	int zone_type;		/* needs to be signed */
2555 	struct zone *z;
2556 	struct zonelist *zonelist;
2557 
2558 	zonelist = &pgdat->node_zonelists[0];
2559 	pos = 0;
2560 	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2561 		for (j = 0; j < nr_nodes; j++) {
2562 			node = node_order[j];
2563 			z = &NODE_DATA(node)->node_zones[zone_type];
2564 			if (populated_zone(z)) {
2565 				zoneref_set_zone(z,
2566 					&zonelist->_zonerefs[pos++]);
2567 				check_highest_zone(zone_type);
2568 			}
2569 		}
2570 	}
2571 	zonelist->_zonerefs[pos].zone = NULL;
2572 	zonelist->_zonerefs[pos].zone_idx = 0;
2573 }
2574 
2575 static int default_zonelist_order(void)
2576 {
2577 	int nid, zone_type;
2578 	unsigned long low_kmem_size,total_size;
2579 	struct zone *z;
2580 	int average_size;
2581 	/*
2582          * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2583 	 * If they are really small and used heavily, the system can fall
2584 	 * into OOM very easily.
2585 	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2586 	 */
2587 	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2588 	low_kmem_size = 0;
2589 	total_size = 0;
2590 	for_each_online_node(nid) {
2591 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2592 			z = &NODE_DATA(nid)->node_zones[zone_type];
2593 			if (populated_zone(z)) {
2594 				if (zone_type < ZONE_NORMAL)
2595 					low_kmem_size += z->present_pages;
2596 				total_size += z->present_pages;
2597 			}
2598 		}
2599 	}
2600 	if (!low_kmem_size ||  /* there are no DMA area. */
2601 	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2602 		return ZONELIST_ORDER_NODE;
2603 	/*
2604 	 * look into each node's config.
2605   	 * If there is a node whose DMA/DMA32 memory is very big area on
2606  	 * local memory, NODE_ORDER may be suitable.
2607          */
2608 	average_size = total_size /
2609 				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2610 	for_each_online_node(nid) {
2611 		low_kmem_size = 0;
2612 		total_size = 0;
2613 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2614 			z = &NODE_DATA(nid)->node_zones[zone_type];
2615 			if (populated_zone(z)) {
2616 				if (zone_type < ZONE_NORMAL)
2617 					low_kmem_size += z->present_pages;
2618 				total_size += z->present_pages;
2619 			}
2620 		}
2621 		if (low_kmem_size &&
2622 		    total_size > average_size && /* ignore small node */
2623 		    low_kmem_size > total_size * 70/100)
2624 			return ZONELIST_ORDER_NODE;
2625 	}
2626 	return ZONELIST_ORDER_ZONE;
2627 }
2628 
2629 static void set_zonelist_order(void)
2630 {
2631 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2632 		current_zonelist_order = default_zonelist_order();
2633 	else
2634 		current_zonelist_order = user_zonelist_order;
2635 }
2636 
2637 static void build_zonelists(pg_data_t *pgdat)
2638 {
2639 	int j, node, load;
2640 	enum zone_type i;
2641 	nodemask_t used_mask;
2642 	int local_node, prev_node;
2643 	struct zonelist *zonelist;
2644 	int order = current_zonelist_order;
2645 
2646 	/* initialize zonelists */
2647 	for (i = 0; i < MAX_ZONELISTS; i++) {
2648 		zonelist = pgdat->node_zonelists + i;
2649 		zonelist->_zonerefs[0].zone = NULL;
2650 		zonelist->_zonerefs[0].zone_idx = 0;
2651 	}
2652 
2653 	/* NUMA-aware ordering of nodes */
2654 	local_node = pgdat->node_id;
2655 	load = nr_online_nodes;
2656 	prev_node = local_node;
2657 	nodes_clear(used_mask);
2658 
2659 	memset(node_order, 0, sizeof(node_order));
2660 	j = 0;
2661 
2662 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2663 		int distance = node_distance(local_node, node);
2664 
2665 		/*
2666 		 * If another node is sufficiently far away then it is better
2667 		 * to reclaim pages in a zone before going off node.
2668 		 */
2669 		if (distance > RECLAIM_DISTANCE)
2670 			zone_reclaim_mode = 1;
2671 
2672 		/*
2673 		 * We don't want to pressure a particular node.
2674 		 * So adding penalty to the first node in same
2675 		 * distance group to make it round-robin.
2676 		 */
2677 		if (distance != node_distance(local_node, prev_node))
2678 			node_load[node] = load;
2679 
2680 		prev_node = node;
2681 		load--;
2682 		if (order == ZONELIST_ORDER_NODE)
2683 			build_zonelists_in_node_order(pgdat, node);
2684 		else
2685 			node_order[j++] = node;	/* remember order */
2686 	}
2687 
2688 	if (order == ZONELIST_ORDER_ZONE) {
2689 		/* calculate node order -- i.e., DMA last! */
2690 		build_zonelists_in_zone_order(pgdat, j);
2691 	}
2692 
2693 	build_thisnode_zonelists(pgdat);
2694 }
2695 
2696 /* Construct the zonelist performance cache - see further mmzone.h */
2697 static void build_zonelist_cache(pg_data_t *pgdat)
2698 {
2699 	struct zonelist *zonelist;
2700 	struct zonelist_cache *zlc;
2701 	struct zoneref *z;
2702 
2703 	zonelist = &pgdat->node_zonelists[0];
2704 	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2705 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2706 	for (z = zonelist->_zonerefs; z->zone; z++)
2707 		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2708 }
2709 
2710 
2711 #else	/* CONFIG_NUMA */
2712 
2713 static void set_zonelist_order(void)
2714 {
2715 	current_zonelist_order = ZONELIST_ORDER_ZONE;
2716 }
2717 
2718 static void build_zonelists(pg_data_t *pgdat)
2719 {
2720 	int node, local_node;
2721 	enum zone_type j;
2722 	struct zonelist *zonelist;
2723 
2724 	local_node = pgdat->node_id;
2725 
2726 	zonelist = &pgdat->node_zonelists[0];
2727 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2728 
2729 	/*
2730 	 * Now we build the zonelist so that it contains the zones
2731 	 * of all the other nodes.
2732 	 * We don't want to pressure a particular node, so when
2733 	 * building the zones for node N, we make sure that the
2734 	 * zones coming right after the local ones are those from
2735 	 * node N+1 (modulo N)
2736 	 */
2737 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2738 		if (!node_online(node))
2739 			continue;
2740 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2741 							MAX_NR_ZONES - 1);
2742 	}
2743 	for (node = 0; node < local_node; node++) {
2744 		if (!node_online(node))
2745 			continue;
2746 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2747 							MAX_NR_ZONES - 1);
2748 	}
2749 
2750 	zonelist->_zonerefs[j].zone = NULL;
2751 	zonelist->_zonerefs[j].zone_idx = 0;
2752 }
2753 
2754 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2755 static void build_zonelist_cache(pg_data_t *pgdat)
2756 {
2757 	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2758 }
2759 
2760 #endif	/* CONFIG_NUMA */
2761 
2762 /*
2763  * Boot pageset table. One per cpu which is going to be used for all
2764  * zones and all nodes. The parameters will be set in such a way
2765  * that an item put on a list will immediately be handed over to
2766  * the buddy list. This is safe since pageset manipulation is done
2767  * with interrupts disabled.
2768  *
2769  * The boot_pagesets must be kept even after bootup is complete for
2770  * unused processors and/or zones. They do play a role for bootstrapping
2771  * hotplugged processors.
2772  *
2773  * zoneinfo_show() and maybe other functions do
2774  * not check if the processor is online before following the pageset pointer.
2775  * Other parts of the kernel may not check if the zone is available.
2776  */
2777 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
2778 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
2779 
2780 /* return values int ....just for stop_machine() */
2781 static int __build_all_zonelists(void *dummy)
2782 {
2783 	int nid;
2784 	int cpu;
2785 
2786 #ifdef CONFIG_NUMA
2787 	memset(node_load, 0, sizeof(node_load));
2788 #endif
2789 	for_each_online_node(nid) {
2790 		pg_data_t *pgdat = NODE_DATA(nid);
2791 
2792 		build_zonelists(pgdat);
2793 		build_zonelist_cache(pgdat);
2794 	}
2795 
2796 	/*
2797 	 * Initialize the boot_pagesets that are going to be used
2798 	 * for bootstrapping processors. The real pagesets for
2799 	 * each zone will be allocated later when the per cpu
2800 	 * allocator is available.
2801 	 *
2802 	 * boot_pagesets are used also for bootstrapping offline
2803 	 * cpus if the system is already booted because the pagesets
2804 	 * are needed to initialize allocators on a specific cpu too.
2805 	 * F.e. the percpu allocator needs the page allocator which
2806 	 * needs the percpu allocator in order to allocate its pagesets
2807 	 * (a chicken-egg dilemma).
2808 	 */
2809 	for_each_possible_cpu(cpu)
2810 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
2811 
2812 	return 0;
2813 }
2814 
2815 void build_all_zonelists(void)
2816 {
2817 	set_zonelist_order();
2818 
2819 	if (system_state == SYSTEM_BOOTING) {
2820 		__build_all_zonelists(NULL);
2821 		mminit_verify_zonelist();
2822 		cpuset_init_current_mems_allowed();
2823 	} else {
2824 		/* we have to stop all cpus to guarantee there is no user
2825 		   of zonelist */
2826 		stop_machine(__build_all_zonelists, NULL, NULL);
2827 		/* cpuset refresh routine should be here */
2828 	}
2829 	vm_total_pages = nr_free_pagecache_pages();
2830 	/*
2831 	 * Disable grouping by mobility if the number of pages in the
2832 	 * system is too low to allow the mechanism to work. It would be
2833 	 * more accurate, but expensive to check per-zone. This check is
2834 	 * made on memory-hotadd so a system can start with mobility
2835 	 * disabled and enable it later
2836 	 */
2837 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2838 		page_group_by_mobility_disabled = 1;
2839 	else
2840 		page_group_by_mobility_disabled = 0;
2841 
2842 	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2843 		"Total pages: %ld\n",
2844 			nr_online_nodes,
2845 			zonelist_order_name[current_zonelist_order],
2846 			page_group_by_mobility_disabled ? "off" : "on",
2847 			vm_total_pages);
2848 #ifdef CONFIG_NUMA
2849 	printk("Policy zone: %s\n", zone_names[policy_zone]);
2850 #endif
2851 }
2852 
2853 /*
2854  * Helper functions to size the waitqueue hash table.
2855  * Essentially these want to choose hash table sizes sufficiently
2856  * large so that collisions trying to wait on pages are rare.
2857  * But in fact, the number of active page waitqueues on typical
2858  * systems is ridiculously low, less than 200. So this is even
2859  * conservative, even though it seems large.
2860  *
2861  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2862  * waitqueues, i.e. the size of the waitq table given the number of pages.
2863  */
2864 #define PAGES_PER_WAITQUEUE	256
2865 
2866 #ifndef CONFIG_MEMORY_HOTPLUG
2867 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2868 {
2869 	unsigned long size = 1;
2870 
2871 	pages /= PAGES_PER_WAITQUEUE;
2872 
2873 	while (size < pages)
2874 		size <<= 1;
2875 
2876 	/*
2877 	 * Once we have dozens or even hundreds of threads sleeping
2878 	 * on IO we've got bigger problems than wait queue collision.
2879 	 * Limit the size of the wait table to a reasonable size.
2880 	 */
2881 	size = min(size, 4096UL);
2882 
2883 	return max(size, 4UL);
2884 }
2885 #else
2886 /*
2887  * A zone's size might be changed by hot-add, so it is not possible to determine
2888  * a suitable size for its wait_table.  So we use the maximum size now.
2889  *
2890  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2891  *
2892  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2893  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2894  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2895  *
2896  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2897  * or more by the traditional way. (See above).  It equals:
2898  *
2899  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2900  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2901  *    powerpc (64K page size)             : =  (32G +16M)byte.
2902  */
2903 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2904 {
2905 	return 4096UL;
2906 }
2907 #endif
2908 
2909 /*
2910  * This is an integer logarithm so that shifts can be used later
2911  * to extract the more random high bits from the multiplicative
2912  * hash function before the remainder is taken.
2913  */
2914 static inline unsigned long wait_table_bits(unsigned long size)
2915 {
2916 	return ffz(~size);
2917 }
2918 
2919 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2920 
2921 /*
2922  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2923  * of blocks reserved is based on min_wmark_pages(zone). The memory within
2924  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
2925  * higher will lead to a bigger reserve which will get freed as contiguous
2926  * blocks as reclaim kicks in
2927  */
2928 static void setup_zone_migrate_reserve(struct zone *zone)
2929 {
2930 	unsigned long start_pfn, pfn, end_pfn;
2931 	struct page *page;
2932 	unsigned long block_migratetype;
2933 	int reserve;
2934 
2935 	/* Get the start pfn, end pfn and the number of blocks to reserve */
2936 	start_pfn = zone->zone_start_pfn;
2937 	end_pfn = start_pfn + zone->spanned_pages;
2938 	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2939 							pageblock_order;
2940 
2941 	/*
2942 	 * Reserve blocks are generally in place to help high-order atomic
2943 	 * allocations that are short-lived. A min_free_kbytes value that
2944 	 * would result in more than 2 reserve blocks for atomic allocations
2945 	 * is assumed to be in place to help anti-fragmentation for the
2946 	 * future allocation of hugepages at runtime.
2947 	 */
2948 	reserve = min(2, reserve);
2949 
2950 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2951 		if (!pfn_valid(pfn))
2952 			continue;
2953 		page = pfn_to_page(pfn);
2954 
2955 		/* Watch out for overlapping nodes */
2956 		if (page_to_nid(page) != zone_to_nid(zone))
2957 			continue;
2958 
2959 		/* Blocks with reserved pages will never free, skip them. */
2960 		if (PageReserved(page))
2961 			continue;
2962 
2963 		block_migratetype = get_pageblock_migratetype(page);
2964 
2965 		/* If this block is reserved, account for it */
2966 		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2967 			reserve--;
2968 			continue;
2969 		}
2970 
2971 		/* Suitable for reserving if this block is movable */
2972 		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2973 			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2974 			move_freepages_block(zone, page, MIGRATE_RESERVE);
2975 			reserve--;
2976 			continue;
2977 		}
2978 
2979 		/*
2980 		 * If the reserve is met and this is a previous reserved block,
2981 		 * take it back
2982 		 */
2983 		if (block_migratetype == MIGRATE_RESERVE) {
2984 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2985 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2986 		}
2987 	}
2988 }
2989 
2990 /*
2991  * Initially all pages are reserved - free ones are freed
2992  * up by free_all_bootmem() once the early boot process is
2993  * done. Non-atomic initialization, single-pass.
2994  */
2995 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2996 		unsigned long start_pfn, enum memmap_context context)
2997 {
2998 	struct page *page;
2999 	unsigned long end_pfn = start_pfn + size;
3000 	unsigned long pfn;
3001 	struct zone *z;
3002 
3003 	if (highest_memmap_pfn < end_pfn - 1)
3004 		highest_memmap_pfn = end_pfn - 1;
3005 
3006 	z = &NODE_DATA(nid)->node_zones[zone];
3007 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3008 		/*
3009 		 * There can be holes in boot-time mem_map[]s
3010 		 * handed to this function.  They do not
3011 		 * exist on hotplugged memory.
3012 		 */
3013 		if (context == MEMMAP_EARLY) {
3014 			if (!early_pfn_valid(pfn))
3015 				continue;
3016 			if (!early_pfn_in_nid(pfn, nid))
3017 				continue;
3018 		}
3019 		page = pfn_to_page(pfn);
3020 		set_page_links(page, zone, nid, pfn);
3021 		mminit_verify_page_links(page, zone, nid, pfn);
3022 		init_page_count(page);
3023 		reset_page_mapcount(page);
3024 		SetPageReserved(page);
3025 		/*
3026 		 * Mark the block movable so that blocks are reserved for
3027 		 * movable at startup. This will force kernel allocations
3028 		 * to reserve their blocks rather than leaking throughout
3029 		 * the address space during boot when many long-lived
3030 		 * kernel allocations are made. Later some blocks near
3031 		 * the start are marked MIGRATE_RESERVE by
3032 		 * setup_zone_migrate_reserve()
3033 		 *
3034 		 * bitmap is created for zone's valid pfn range. but memmap
3035 		 * can be created for invalid pages (for alignment)
3036 		 * check here not to call set_pageblock_migratetype() against
3037 		 * pfn out of zone.
3038 		 */
3039 		if ((z->zone_start_pfn <= pfn)
3040 		    && (pfn < z->zone_start_pfn + z->spanned_pages)
3041 		    && !(pfn & (pageblock_nr_pages - 1)))
3042 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3043 
3044 		INIT_LIST_HEAD(&page->lru);
3045 #ifdef WANT_PAGE_VIRTUAL
3046 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
3047 		if (!is_highmem_idx(zone))
3048 			set_page_address(page, __va(pfn << PAGE_SHIFT));
3049 #endif
3050 	}
3051 }
3052 
3053 static void __meminit zone_init_free_lists(struct zone *zone)
3054 {
3055 	int order, t;
3056 	for_each_migratetype_order(order, t) {
3057 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3058 		zone->free_area[order].nr_free = 0;
3059 	}
3060 }
3061 
3062 #ifndef __HAVE_ARCH_MEMMAP_INIT
3063 #define memmap_init(size, nid, zone, start_pfn) \
3064 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3065 #endif
3066 
3067 static int zone_batchsize(struct zone *zone)
3068 {
3069 #ifdef CONFIG_MMU
3070 	int batch;
3071 
3072 	/*
3073 	 * The per-cpu-pages pools are set to around 1000th of the
3074 	 * size of the zone.  But no more than 1/2 of a meg.
3075 	 *
3076 	 * OK, so we don't know how big the cache is.  So guess.
3077 	 */
3078 	batch = zone->present_pages / 1024;
3079 	if (batch * PAGE_SIZE > 512 * 1024)
3080 		batch = (512 * 1024) / PAGE_SIZE;
3081 	batch /= 4;		/* We effectively *= 4 below */
3082 	if (batch < 1)
3083 		batch = 1;
3084 
3085 	/*
3086 	 * Clamp the batch to a 2^n - 1 value. Having a power
3087 	 * of 2 value was found to be more likely to have
3088 	 * suboptimal cache aliasing properties in some cases.
3089 	 *
3090 	 * For example if 2 tasks are alternately allocating
3091 	 * batches of pages, one task can end up with a lot
3092 	 * of pages of one half of the possible page colors
3093 	 * and the other with pages of the other colors.
3094 	 */
3095 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3096 
3097 	return batch;
3098 
3099 #else
3100 	/* The deferral and batching of frees should be suppressed under NOMMU
3101 	 * conditions.
3102 	 *
3103 	 * The problem is that NOMMU needs to be able to allocate large chunks
3104 	 * of contiguous memory as there's no hardware page translation to
3105 	 * assemble apparent contiguous memory from discontiguous pages.
3106 	 *
3107 	 * Queueing large contiguous runs of pages for batching, however,
3108 	 * causes the pages to actually be freed in smaller chunks.  As there
3109 	 * can be a significant delay between the individual batches being
3110 	 * recycled, this leads to the once large chunks of space being
3111 	 * fragmented and becoming unavailable for high-order allocations.
3112 	 */
3113 	return 0;
3114 #endif
3115 }
3116 
3117 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3118 {
3119 	struct per_cpu_pages *pcp;
3120 	int migratetype;
3121 
3122 	memset(p, 0, sizeof(*p));
3123 
3124 	pcp = &p->pcp;
3125 	pcp->count = 0;
3126 	pcp->high = 6 * batch;
3127 	pcp->batch = max(1UL, 1 * batch);
3128 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3129 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
3130 }
3131 
3132 /*
3133  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3134  * to the value high for the pageset p.
3135  */
3136 
3137 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3138 				unsigned long high)
3139 {
3140 	struct per_cpu_pages *pcp;
3141 
3142 	pcp = &p->pcp;
3143 	pcp->high = high;
3144 	pcp->batch = max(1UL, high/4);
3145 	if ((high/4) > (PAGE_SHIFT * 8))
3146 		pcp->batch = PAGE_SHIFT * 8;
3147 }
3148 
3149 /*
3150  * Allocate per cpu pagesets and initialize them.
3151  * Before this call only boot pagesets were available.
3152  * Boot pagesets will no longer be used by this processorr
3153  * after setup_per_cpu_pageset().
3154  */
3155 void __init setup_per_cpu_pageset(void)
3156 {
3157 	struct zone *zone;
3158 	int cpu;
3159 
3160 	for_each_populated_zone(zone) {
3161 		zone->pageset = alloc_percpu(struct per_cpu_pageset);
3162 
3163 		for_each_possible_cpu(cpu) {
3164 			struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3165 
3166 			setup_pageset(pcp, zone_batchsize(zone));
3167 
3168 			if (percpu_pagelist_fraction)
3169 				setup_pagelist_highmark(pcp,
3170 					(zone->present_pages /
3171 						percpu_pagelist_fraction));
3172 		}
3173 	}
3174 }
3175 
3176 static noinline __init_refok
3177 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3178 {
3179 	int i;
3180 	struct pglist_data *pgdat = zone->zone_pgdat;
3181 	size_t alloc_size;
3182 
3183 	/*
3184 	 * The per-page waitqueue mechanism uses hashed waitqueues
3185 	 * per zone.
3186 	 */
3187 	zone->wait_table_hash_nr_entries =
3188 		 wait_table_hash_nr_entries(zone_size_pages);
3189 	zone->wait_table_bits =
3190 		wait_table_bits(zone->wait_table_hash_nr_entries);
3191 	alloc_size = zone->wait_table_hash_nr_entries
3192 					* sizeof(wait_queue_head_t);
3193 
3194 	if (!slab_is_available()) {
3195 		zone->wait_table = (wait_queue_head_t *)
3196 			alloc_bootmem_node(pgdat, alloc_size);
3197 	} else {
3198 		/*
3199 		 * This case means that a zone whose size was 0 gets new memory
3200 		 * via memory hot-add.
3201 		 * But it may be the case that a new node was hot-added.  In
3202 		 * this case vmalloc() will not be able to use this new node's
3203 		 * memory - this wait_table must be initialized to use this new
3204 		 * node itself as well.
3205 		 * To use this new node's memory, further consideration will be
3206 		 * necessary.
3207 		 */
3208 		zone->wait_table = vmalloc(alloc_size);
3209 	}
3210 	if (!zone->wait_table)
3211 		return -ENOMEM;
3212 
3213 	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3214 		init_waitqueue_head(zone->wait_table + i);
3215 
3216 	return 0;
3217 }
3218 
3219 static int __zone_pcp_update(void *data)
3220 {
3221 	struct zone *zone = data;
3222 	int cpu;
3223 	unsigned long batch = zone_batchsize(zone), flags;
3224 
3225 	for_each_possible_cpu(cpu) {
3226 		struct per_cpu_pageset *pset;
3227 		struct per_cpu_pages *pcp;
3228 
3229 		pset = per_cpu_ptr(zone->pageset, cpu);
3230 		pcp = &pset->pcp;
3231 
3232 		local_irq_save(flags);
3233 		free_pcppages_bulk(zone, pcp->count, pcp);
3234 		setup_pageset(pset, batch);
3235 		local_irq_restore(flags);
3236 	}
3237 	return 0;
3238 }
3239 
3240 void zone_pcp_update(struct zone *zone)
3241 {
3242 	stop_machine(__zone_pcp_update, zone, NULL);
3243 }
3244 
3245 static __meminit void zone_pcp_init(struct zone *zone)
3246 {
3247 	/*
3248 	 * per cpu subsystem is not up at this point. The following code
3249 	 * relies on the ability of the linker to provide the
3250 	 * offset of a (static) per cpu variable into the per cpu area.
3251 	 */
3252 	zone->pageset = &boot_pageset;
3253 
3254 	if (zone->present_pages)
3255 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
3256 			zone->name, zone->present_pages,
3257 					 zone_batchsize(zone));
3258 }
3259 
3260 __meminit int init_currently_empty_zone(struct zone *zone,
3261 					unsigned long zone_start_pfn,
3262 					unsigned long size,
3263 					enum memmap_context context)
3264 {
3265 	struct pglist_data *pgdat = zone->zone_pgdat;
3266 	int ret;
3267 	ret = zone_wait_table_init(zone, size);
3268 	if (ret)
3269 		return ret;
3270 	pgdat->nr_zones = zone_idx(zone) + 1;
3271 
3272 	zone->zone_start_pfn = zone_start_pfn;
3273 
3274 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3275 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3276 			pgdat->node_id,
3277 			(unsigned long)zone_idx(zone),
3278 			zone_start_pfn, (zone_start_pfn + size));
3279 
3280 	zone_init_free_lists(zone);
3281 
3282 	return 0;
3283 }
3284 
3285 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3286 /*
3287  * Basic iterator support. Return the first range of PFNs for a node
3288  * Note: nid == MAX_NUMNODES returns first region regardless of node
3289  */
3290 static int __meminit first_active_region_index_in_nid(int nid)
3291 {
3292 	int i;
3293 
3294 	for (i = 0; i < nr_nodemap_entries; i++)
3295 		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3296 			return i;
3297 
3298 	return -1;
3299 }
3300 
3301 /*
3302  * Basic iterator support. Return the next active range of PFNs for a node
3303  * Note: nid == MAX_NUMNODES returns next region regardless of node
3304  */
3305 static int __meminit next_active_region_index_in_nid(int index, int nid)
3306 {
3307 	for (index = index + 1; index < nr_nodemap_entries; index++)
3308 		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3309 			return index;
3310 
3311 	return -1;
3312 }
3313 
3314 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3315 /*
3316  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3317  * Architectures may implement their own version but if add_active_range()
3318  * was used and there are no special requirements, this is a convenient
3319  * alternative
3320  */
3321 int __meminit __early_pfn_to_nid(unsigned long pfn)
3322 {
3323 	int i;
3324 
3325 	for (i = 0; i < nr_nodemap_entries; i++) {
3326 		unsigned long start_pfn = early_node_map[i].start_pfn;
3327 		unsigned long end_pfn = early_node_map[i].end_pfn;
3328 
3329 		if (start_pfn <= pfn && pfn < end_pfn)
3330 			return early_node_map[i].nid;
3331 	}
3332 	/* This is a memory hole */
3333 	return -1;
3334 }
3335 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3336 
3337 int __meminit early_pfn_to_nid(unsigned long pfn)
3338 {
3339 	int nid;
3340 
3341 	nid = __early_pfn_to_nid(pfn);
3342 	if (nid >= 0)
3343 		return nid;
3344 	/* just returns 0 */
3345 	return 0;
3346 }
3347 
3348 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
3349 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3350 {
3351 	int nid;
3352 
3353 	nid = __early_pfn_to_nid(pfn);
3354 	if (nid >= 0 && nid != node)
3355 		return false;
3356 	return true;
3357 }
3358 #endif
3359 
3360 /* Basic iterator support to walk early_node_map[] */
3361 #define for_each_active_range_index_in_nid(i, nid) \
3362 	for (i = first_active_region_index_in_nid(nid); i != -1; \
3363 				i = next_active_region_index_in_nid(i, nid))
3364 
3365 /**
3366  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3367  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3368  * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3369  *
3370  * If an architecture guarantees that all ranges registered with
3371  * add_active_ranges() contain no holes and may be freed, this
3372  * this function may be used instead of calling free_bootmem() manually.
3373  */
3374 void __init free_bootmem_with_active_regions(int nid,
3375 						unsigned long max_low_pfn)
3376 {
3377 	int i;
3378 
3379 	for_each_active_range_index_in_nid(i, nid) {
3380 		unsigned long size_pages = 0;
3381 		unsigned long end_pfn = early_node_map[i].end_pfn;
3382 
3383 		if (early_node_map[i].start_pfn >= max_low_pfn)
3384 			continue;
3385 
3386 		if (end_pfn > max_low_pfn)
3387 			end_pfn = max_low_pfn;
3388 
3389 		size_pages = end_pfn - early_node_map[i].start_pfn;
3390 		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3391 				PFN_PHYS(early_node_map[i].start_pfn),
3392 				size_pages << PAGE_SHIFT);
3393 	}
3394 }
3395 
3396 int __init add_from_early_node_map(struct range *range, int az,
3397 				   int nr_range, int nid)
3398 {
3399 	int i;
3400 	u64 start, end;
3401 
3402 	/* need to go over early_node_map to find out good range for node */
3403 	for_each_active_range_index_in_nid(i, nid) {
3404 		start = early_node_map[i].start_pfn;
3405 		end = early_node_map[i].end_pfn;
3406 		nr_range = add_range(range, az, nr_range, start, end);
3407 	}
3408 	return nr_range;
3409 }
3410 
3411 #ifdef CONFIG_NO_BOOTMEM
3412 void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
3413 					u64 goal, u64 limit)
3414 {
3415 	int i;
3416 	void *ptr;
3417 
3418 	/* need to go over early_node_map to find out good range for node */
3419 	for_each_active_range_index_in_nid(i, nid) {
3420 		u64 addr;
3421 		u64 ei_start, ei_last;
3422 
3423 		ei_last = early_node_map[i].end_pfn;
3424 		ei_last <<= PAGE_SHIFT;
3425 		ei_start = early_node_map[i].start_pfn;
3426 		ei_start <<= PAGE_SHIFT;
3427 		addr = find_early_area(ei_start, ei_last,
3428 					 goal, limit, size, align);
3429 
3430 		if (addr == -1ULL)
3431 			continue;
3432 
3433 #if 0
3434 		printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
3435 				nid,
3436 				ei_start, ei_last, goal, limit, size,
3437 				align, addr);
3438 #endif
3439 
3440 		ptr = phys_to_virt(addr);
3441 		memset(ptr, 0, size);
3442 		reserve_early_without_check(addr, addr + size, "BOOTMEM");
3443 		return ptr;
3444 	}
3445 
3446 	return NULL;
3447 }
3448 #endif
3449 
3450 
3451 void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3452 {
3453 	int i;
3454 	int ret;
3455 
3456 	for_each_active_range_index_in_nid(i, nid) {
3457 		ret = work_fn(early_node_map[i].start_pfn,
3458 			      early_node_map[i].end_pfn, data);
3459 		if (ret)
3460 			break;
3461 	}
3462 }
3463 /**
3464  * sparse_memory_present_with_active_regions - Call memory_present for each active range
3465  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3466  *
3467  * If an architecture guarantees that all ranges registered with
3468  * add_active_ranges() contain no holes and may be freed, this
3469  * function may be used instead of calling memory_present() manually.
3470  */
3471 void __init sparse_memory_present_with_active_regions(int nid)
3472 {
3473 	int i;
3474 
3475 	for_each_active_range_index_in_nid(i, nid)
3476 		memory_present(early_node_map[i].nid,
3477 				early_node_map[i].start_pfn,
3478 				early_node_map[i].end_pfn);
3479 }
3480 
3481 /**
3482  * get_pfn_range_for_nid - Return the start and end page frames for a node
3483  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3484  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3485  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3486  *
3487  * It returns the start and end page frame of a node based on information
3488  * provided by an arch calling add_active_range(). If called for a node
3489  * with no available memory, a warning is printed and the start and end
3490  * PFNs will be 0.
3491  */
3492 void __meminit get_pfn_range_for_nid(unsigned int nid,
3493 			unsigned long *start_pfn, unsigned long *end_pfn)
3494 {
3495 	int i;
3496 	*start_pfn = -1UL;
3497 	*end_pfn = 0;
3498 
3499 	for_each_active_range_index_in_nid(i, nid) {
3500 		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3501 		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3502 	}
3503 
3504 	if (*start_pfn == -1UL)
3505 		*start_pfn = 0;
3506 }
3507 
3508 /*
3509  * This finds a zone that can be used for ZONE_MOVABLE pages. The
3510  * assumption is made that zones within a node are ordered in monotonic
3511  * increasing memory addresses so that the "highest" populated zone is used
3512  */
3513 static void __init find_usable_zone_for_movable(void)
3514 {
3515 	int zone_index;
3516 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3517 		if (zone_index == ZONE_MOVABLE)
3518 			continue;
3519 
3520 		if (arch_zone_highest_possible_pfn[zone_index] >
3521 				arch_zone_lowest_possible_pfn[zone_index])
3522 			break;
3523 	}
3524 
3525 	VM_BUG_ON(zone_index == -1);
3526 	movable_zone = zone_index;
3527 }
3528 
3529 /*
3530  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3531  * because it is sized independant of architecture. Unlike the other zones,
3532  * the starting point for ZONE_MOVABLE is not fixed. It may be different
3533  * in each node depending on the size of each node and how evenly kernelcore
3534  * is distributed. This helper function adjusts the zone ranges
3535  * provided by the architecture for a given node by using the end of the
3536  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3537  * zones within a node are in order of monotonic increases memory addresses
3538  */
3539 static void __meminit adjust_zone_range_for_zone_movable(int nid,
3540 					unsigned long zone_type,
3541 					unsigned long node_start_pfn,
3542 					unsigned long node_end_pfn,
3543 					unsigned long *zone_start_pfn,
3544 					unsigned long *zone_end_pfn)
3545 {
3546 	/* Only adjust if ZONE_MOVABLE is on this node */
3547 	if (zone_movable_pfn[nid]) {
3548 		/* Size ZONE_MOVABLE */
3549 		if (zone_type == ZONE_MOVABLE) {
3550 			*zone_start_pfn = zone_movable_pfn[nid];
3551 			*zone_end_pfn = min(node_end_pfn,
3552 				arch_zone_highest_possible_pfn[movable_zone]);
3553 
3554 		/* Adjust for ZONE_MOVABLE starting within this range */
3555 		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3556 				*zone_end_pfn > zone_movable_pfn[nid]) {
3557 			*zone_end_pfn = zone_movable_pfn[nid];
3558 
3559 		/* Check if this whole range is within ZONE_MOVABLE */
3560 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3561 			*zone_start_pfn = *zone_end_pfn;
3562 	}
3563 }
3564 
3565 /*
3566  * Return the number of pages a zone spans in a node, including holes
3567  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3568  */
3569 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3570 					unsigned long zone_type,
3571 					unsigned long *ignored)
3572 {
3573 	unsigned long node_start_pfn, node_end_pfn;
3574 	unsigned long zone_start_pfn, zone_end_pfn;
3575 
3576 	/* Get the start and end of the node and zone */
3577 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3578 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3579 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3580 	adjust_zone_range_for_zone_movable(nid, zone_type,
3581 				node_start_pfn, node_end_pfn,
3582 				&zone_start_pfn, &zone_end_pfn);
3583 
3584 	/* Check that this node has pages within the zone's required range */
3585 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3586 		return 0;
3587 
3588 	/* Move the zone boundaries inside the node if necessary */
3589 	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3590 	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3591 
3592 	/* Return the spanned pages */
3593 	return zone_end_pfn - zone_start_pfn;
3594 }
3595 
3596 /*
3597  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3598  * then all holes in the requested range will be accounted for.
3599  */
3600 unsigned long __meminit __absent_pages_in_range(int nid,
3601 				unsigned long range_start_pfn,
3602 				unsigned long range_end_pfn)
3603 {
3604 	int i = 0;
3605 	unsigned long prev_end_pfn = 0, hole_pages = 0;
3606 	unsigned long start_pfn;
3607 
3608 	/* Find the end_pfn of the first active range of pfns in the node */
3609 	i = first_active_region_index_in_nid(nid);
3610 	if (i == -1)
3611 		return 0;
3612 
3613 	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3614 
3615 	/* Account for ranges before physical memory on this node */
3616 	if (early_node_map[i].start_pfn > range_start_pfn)
3617 		hole_pages = prev_end_pfn - range_start_pfn;
3618 
3619 	/* Find all holes for the zone within the node */
3620 	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3621 
3622 		/* No need to continue if prev_end_pfn is outside the zone */
3623 		if (prev_end_pfn >= range_end_pfn)
3624 			break;
3625 
3626 		/* Make sure the end of the zone is not within the hole */
3627 		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3628 		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3629 
3630 		/* Update the hole size cound and move on */
3631 		if (start_pfn > range_start_pfn) {
3632 			BUG_ON(prev_end_pfn > start_pfn);
3633 			hole_pages += start_pfn - prev_end_pfn;
3634 		}
3635 		prev_end_pfn = early_node_map[i].end_pfn;
3636 	}
3637 
3638 	/* Account for ranges past physical memory on this node */
3639 	if (range_end_pfn > prev_end_pfn)
3640 		hole_pages += range_end_pfn -
3641 				max(range_start_pfn, prev_end_pfn);
3642 
3643 	return hole_pages;
3644 }
3645 
3646 /**
3647  * absent_pages_in_range - Return number of page frames in holes within a range
3648  * @start_pfn: The start PFN to start searching for holes
3649  * @end_pfn: The end PFN to stop searching for holes
3650  *
3651  * It returns the number of pages frames in memory holes within a range.
3652  */
3653 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3654 							unsigned long end_pfn)
3655 {
3656 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3657 }
3658 
3659 /* Return the number of page frames in holes in a zone on a node */
3660 static unsigned long __meminit zone_absent_pages_in_node(int nid,
3661 					unsigned long zone_type,
3662 					unsigned long *ignored)
3663 {
3664 	unsigned long node_start_pfn, node_end_pfn;
3665 	unsigned long zone_start_pfn, zone_end_pfn;
3666 
3667 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3668 	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3669 							node_start_pfn);
3670 	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3671 							node_end_pfn);
3672 
3673 	adjust_zone_range_for_zone_movable(nid, zone_type,
3674 			node_start_pfn, node_end_pfn,
3675 			&zone_start_pfn, &zone_end_pfn);
3676 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3677 }
3678 
3679 #else
3680 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3681 					unsigned long zone_type,
3682 					unsigned long *zones_size)
3683 {
3684 	return zones_size[zone_type];
3685 }
3686 
3687 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3688 						unsigned long zone_type,
3689 						unsigned long *zholes_size)
3690 {
3691 	if (!zholes_size)
3692 		return 0;
3693 
3694 	return zholes_size[zone_type];
3695 }
3696 
3697 #endif
3698 
3699 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3700 		unsigned long *zones_size, unsigned long *zholes_size)
3701 {
3702 	unsigned long realtotalpages, totalpages = 0;
3703 	enum zone_type i;
3704 
3705 	for (i = 0; i < MAX_NR_ZONES; i++)
3706 		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3707 								zones_size);
3708 	pgdat->node_spanned_pages = totalpages;
3709 
3710 	realtotalpages = totalpages;
3711 	for (i = 0; i < MAX_NR_ZONES; i++)
3712 		realtotalpages -=
3713 			zone_absent_pages_in_node(pgdat->node_id, i,
3714 								zholes_size);
3715 	pgdat->node_present_pages = realtotalpages;
3716 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3717 							realtotalpages);
3718 }
3719 
3720 #ifndef CONFIG_SPARSEMEM
3721 /*
3722  * Calculate the size of the zone->blockflags rounded to an unsigned long
3723  * Start by making sure zonesize is a multiple of pageblock_order by rounding
3724  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3725  * round what is now in bits to nearest long in bits, then return it in
3726  * bytes.
3727  */
3728 static unsigned long __init usemap_size(unsigned long zonesize)
3729 {
3730 	unsigned long usemapsize;
3731 
3732 	usemapsize = roundup(zonesize, pageblock_nr_pages);
3733 	usemapsize = usemapsize >> pageblock_order;
3734 	usemapsize *= NR_PAGEBLOCK_BITS;
3735 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3736 
3737 	return usemapsize / 8;
3738 }
3739 
3740 static void __init setup_usemap(struct pglist_data *pgdat,
3741 				struct zone *zone, unsigned long zonesize)
3742 {
3743 	unsigned long usemapsize = usemap_size(zonesize);
3744 	zone->pageblock_flags = NULL;
3745 	if (usemapsize)
3746 		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3747 }
3748 #else
3749 static void inline setup_usemap(struct pglist_data *pgdat,
3750 				struct zone *zone, unsigned long zonesize) {}
3751 #endif /* CONFIG_SPARSEMEM */
3752 
3753 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3754 
3755 /* Return a sensible default order for the pageblock size. */
3756 static inline int pageblock_default_order(void)
3757 {
3758 	if (HPAGE_SHIFT > PAGE_SHIFT)
3759 		return HUGETLB_PAGE_ORDER;
3760 
3761 	return MAX_ORDER-1;
3762 }
3763 
3764 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3765 static inline void __init set_pageblock_order(unsigned int order)
3766 {
3767 	/* Check that pageblock_nr_pages has not already been setup */
3768 	if (pageblock_order)
3769 		return;
3770 
3771 	/*
3772 	 * Assume the largest contiguous order of interest is a huge page.
3773 	 * This value may be variable depending on boot parameters on IA64
3774 	 */
3775 	pageblock_order = order;
3776 }
3777 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3778 
3779 /*
3780  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3781  * and pageblock_default_order() are unused as pageblock_order is set
3782  * at compile-time. See include/linux/pageblock-flags.h for the values of
3783  * pageblock_order based on the kernel config
3784  */
3785 static inline int pageblock_default_order(unsigned int order)
3786 {
3787 	return MAX_ORDER-1;
3788 }
3789 #define set_pageblock_order(x)	do {} while (0)
3790 
3791 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3792 
3793 /*
3794  * Set up the zone data structures:
3795  *   - mark all pages reserved
3796  *   - mark all memory queues empty
3797  *   - clear the memory bitmaps
3798  */
3799 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3800 		unsigned long *zones_size, unsigned long *zholes_size)
3801 {
3802 	enum zone_type j;
3803 	int nid = pgdat->node_id;
3804 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3805 	int ret;
3806 
3807 	pgdat_resize_init(pgdat);
3808 	pgdat->nr_zones = 0;
3809 	init_waitqueue_head(&pgdat->kswapd_wait);
3810 	pgdat->kswapd_max_order = 0;
3811 	pgdat_page_cgroup_init(pgdat);
3812 
3813 	for (j = 0; j < MAX_NR_ZONES; j++) {
3814 		struct zone *zone = pgdat->node_zones + j;
3815 		unsigned long size, realsize, memmap_pages;
3816 		enum lru_list l;
3817 
3818 		size = zone_spanned_pages_in_node(nid, j, zones_size);
3819 		realsize = size - zone_absent_pages_in_node(nid, j,
3820 								zholes_size);
3821 
3822 		/*
3823 		 * Adjust realsize so that it accounts for how much memory
3824 		 * is used by this zone for memmap. This affects the watermark
3825 		 * and per-cpu initialisations
3826 		 */
3827 		memmap_pages =
3828 			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3829 		if (realsize >= memmap_pages) {
3830 			realsize -= memmap_pages;
3831 			if (memmap_pages)
3832 				printk(KERN_DEBUG
3833 				       "  %s zone: %lu pages used for memmap\n",
3834 				       zone_names[j], memmap_pages);
3835 		} else
3836 			printk(KERN_WARNING
3837 				"  %s zone: %lu pages exceeds realsize %lu\n",
3838 				zone_names[j], memmap_pages, realsize);
3839 
3840 		/* Account for reserved pages */
3841 		if (j == 0 && realsize > dma_reserve) {
3842 			realsize -= dma_reserve;
3843 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3844 					zone_names[0], dma_reserve);
3845 		}
3846 
3847 		if (!is_highmem_idx(j))
3848 			nr_kernel_pages += realsize;
3849 		nr_all_pages += realsize;
3850 
3851 		zone->spanned_pages = size;
3852 		zone->present_pages = realsize;
3853 #ifdef CONFIG_NUMA
3854 		zone->node = nid;
3855 		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3856 						/ 100;
3857 		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3858 #endif
3859 		zone->name = zone_names[j];
3860 		spin_lock_init(&zone->lock);
3861 		spin_lock_init(&zone->lru_lock);
3862 		zone_seqlock_init(zone);
3863 		zone->zone_pgdat = pgdat;
3864 
3865 		zone->prev_priority = DEF_PRIORITY;
3866 
3867 		zone_pcp_init(zone);
3868 		for_each_lru(l) {
3869 			INIT_LIST_HEAD(&zone->lru[l].list);
3870 			zone->reclaim_stat.nr_saved_scan[l] = 0;
3871 		}
3872 		zone->reclaim_stat.recent_rotated[0] = 0;
3873 		zone->reclaim_stat.recent_rotated[1] = 0;
3874 		zone->reclaim_stat.recent_scanned[0] = 0;
3875 		zone->reclaim_stat.recent_scanned[1] = 0;
3876 		zap_zone_vm_stats(zone);
3877 		zone->flags = 0;
3878 		if (!size)
3879 			continue;
3880 
3881 		set_pageblock_order(pageblock_default_order());
3882 		setup_usemap(pgdat, zone, size);
3883 		ret = init_currently_empty_zone(zone, zone_start_pfn,
3884 						size, MEMMAP_EARLY);
3885 		BUG_ON(ret);
3886 		memmap_init(size, nid, j, zone_start_pfn);
3887 		zone_start_pfn += size;
3888 	}
3889 }
3890 
3891 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3892 {
3893 	/* Skip empty nodes */
3894 	if (!pgdat->node_spanned_pages)
3895 		return;
3896 
3897 #ifdef CONFIG_FLAT_NODE_MEM_MAP
3898 	/* ia64 gets its own node_mem_map, before this, without bootmem */
3899 	if (!pgdat->node_mem_map) {
3900 		unsigned long size, start, end;
3901 		struct page *map;
3902 
3903 		/*
3904 		 * The zone's endpoints aren't required to be MAX_ORDER
3905 		 * aligned but the node_mem_map endpoints must be in order
3906 		 * for the buddy allocator to function correctly.
3907 		 */
3908 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3909 		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3910 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3911 		size =  (end - start) * sizeof(struct page);
3912 		map = alloc_remap(pgdat->node_id, size);
3913 		if (!map)
3914 			map = alloc_bootmem_node(pgdat, size);
3915 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3916 	}
3917 #ifndef CONFIG_NEED_MULTIPLE_NODES
3918 	/*
3919 	 * With no DISCONTIG, the global mem_map is just set as node 0's
3920 	 */
3921 	if (pgdat == NODE_DATA(0)) {
3922 		mem_map = NODE_DATA(0)->node_mem_map;
3923 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3924 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3925 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3926 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3927 	}
3928 #endif
3929 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
3930 }
3931 
3932 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3933 		unsigned long node_start_pfn, unsigned long *zholes_size)
3934 {
3935 	pg_data_t *pgdat = NODE_DATA(nid);
3936 
3937 	pgdat->node_id = nid;
3938 	pgdat->node_start_pfn = node_start_pfn;
3939 	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3940 
3941 	alloc_node_mem_map(pgdat);
3942 #ifdef CONFIG_FLAT_NODE_MEM_MAP
3943 	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3944 		nid, (unsigned long)pgdat,
3945 		(unsigned long)pgdat->node_mem_map);
3946 #endif
3947 
3948 	free_area_init_core(pgdat, zones_size, zholes_size);
3949 }
3950 
3951 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3952 
3953 #if MAX_NUMNODES > 1
3954 /*
3955  * Figure out the number of possible node ids.
3956  */
3957 static void __init setup_nr_node_ids(void)
3958 {
3959 	unsigned int node;
3960 	unsigned int highest = 0;
3961 
3962 	for_each_node_mask(node, node_possible_map)
3963 		highest = node;
3964 	nr_node_ids = highest + 1;
3965 }
3966 #else
3967 static inline void setup_nr_node_ids(void)
3968 {
3969 }
3970 #endif
3971 
3972 /**
3973  * add_active_range - Register a range of PFNs backed by physical memory
3974  * @nid: The node ID the range resides on
3975  * @start_pfn: The start PFN of the available physical memory
3976  * @end_pfn: The end PFN of the available physical memory
3977  *
3978  * These ranges are stored in an early_node_map[] and later used by
3979  * free_area_init_nodes() to calculate zone sizes and holes. If the
3980  * range spans a memory hole, it is up to the architecture to ensure
3981  * the memory is not freed by the bootmem allocator. If possible
3982  * the range being registered will be merged with existing ranges.
3983  */
3984 void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3985 						unsigned long end_pfn)
3986 {
3987 	int i;
3988 
3989 	mminit_dprintk(MMINIT_TRACE, "memory_register",
3990 			"Entering add_active_range(%d, %#lx, %#lx) "
3991 			"%d entries of %d used\n",
3992 			nid, start_pfn, end_pfn,
3993 			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3994 
3995 	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3996 
3997 	/* Merge with existing active regions if possible */
3998 	for (i = 0; i < nr_nodemap_entries; i++) {
3999 		if (early_node_map[i].nid != nid)
4000 			continue;
4001 
4002 		/* Skip if an existing region covers this new one */
4003 		if (start_pfn >= early_node_map[i].start_pfn &&
4004 				end_pfn <= early_node_map[i].end_pfn)
4005 			return;
4006 
4007 		/* Merge forward if suitable */
4008 		if (start_pfn <= early_node_map[i].end_pfn &&
4009 				end_pfn > early_node_map[i].end_pfn) {
4010 			early_node_map[i].end_pfn = end_pfn;
4011 			return;
4012 		}
4013 
4014 		/* Merge backward if suitable */
4015 		if (start_pfn < early_node_map[i].start_pfn &&
4016 				end_pfn >= early_node_map[i].start_pfn) {
4017 			early_node_map[i].start_pfn = start_pfn;
4018 			return;
4019 		}
4020 	}
4021 
4022 	/* Check that early_node_map is large enough */
4023 	if (i >= MAX_ACTIVE_REGIONS) {
4024 		printk(KERN_CRIT "More than %d memory regions, truncating\n",
4025 							MAX_ACTIVE_REGIONS);
4026 		return;
4027 	}
4028 
4029 	early_node_map[i].nid = nid;
4030 	early_node_map[i].start_pfn = start_pfn;
4031 	early_node_map[i].end_pfn = end_pfn;
4032 	nr_nodemap_entries = i + 1;
4033 }
4034 
4035 /**
4036  * remove_active_range - Shrink an existing registered range of PFNs
4037  * @nid: The node id the range is on that should be shrunk
4038  * @start_pfn: The new PFN of the range
4039  * @end_pfn: The new PFN of the range
4040  *
4041  * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4042  * The map is kept near the end physical page range that has already been
4043  * registered. This function allows an arch to shrink an existing registered
4044  * range.
4045  */
4046 void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4047 				unsigned long end_pfn)
4048 {
4049 	int i, j;
4050 	int removed = 0;
4051 
4052 	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4053 			  nid, start_pfn, end_pfn);
4054 
4055 	/* Find the old active region end and shrink */
4056 	for_each_active_range_index_in_nid(i, nid) {
4057 		if (early_node_map[i].start_pfn >= start_pfn &&
4058 		    early_node_map[i].end_pfn <= end_pfn) {
4059 			/* clear it */
4060 			early_node_map[i].start_pfn = 0;
4061 			early_node_map[i].end_pfn = 0;
4062 			removed = 1;
4063 			continue;
4064 		}
4065 		if (early_node_map[i].start_pfn < start_pfn &&
4066 		    early_node_map[i].end_pfn > start_pfn) {
4067 			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4068 			early_node_map[i].end_pfn = start_pfn;
4069 			if (temp_end_pfn > end_pfn)
4070 				add_active_range(nid, end_pfn, temp_end_pfn);
4071 			continue;
4072 		}
4073 		if (early_node_map[i].start_pfn >= start_pfn &&
4074 		    early_node_map[i].end_pfn > end_pfn &&
4075 		    early_node_map[i].start_pfn < end_pfn) {
4076 			early_node_map[i].start_pfn = end_pfn;
4077 			continue;
4078 		}
4079 	}
4080 
4081 	if (!removed)
4082 		return;
4083 
4084 	/* remove the blank ones */
4085 	for (i = nr_nodemap_entries - 1; i > 0; i--) {
4086 		if (early_node_map[i].nid != nid)
4087 			continue;
4088 		if (early_node_map[i].end_pfn)
4089 			continue;
4090 		/* we found it, get rid of it */
4091 		for (j = i; j < nr_nodemap_entries - 1; j++)
4092 			memcpy(&early_node_map[j], &early_node_map[j+1],
4093 				sizeof(early_node_map[j]));
4094 		j = nr_nodemap_entries - 1;
4095 		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4096 		nr_nodemap_entries--;
4097 	}
4098 }
4099 
4100 /**
4101  * remove_all_active_ranges - Remove all currently registered regions
4102  *
4103  * During discovery, it may be found that a table like SRAT is invalid
4104  * and an alternative discovery method must be used. This function removes
4105  * all currently registered regions.
4106  */
4107 void __init remove_all_active_ranges(void)
4108 {
4109 	memset(early_node_map, 0, sizeof(early_node_map));
4110 	nr_nodemap_entries = 0;
4111 }
4112 
4113 /* Compare two active node_active_regions */
4114 static int __init cmp_node_active_region(const void *a, const void *b)
4115 {
4116 	struct node_active_region *arange = (struct node_active_region *)a;
4117 	struct node_active_region *brange = (struct node_active_region *)b;
4118 
4119 	/* Done this way to avoid overflows */
4120 	if (arange->start_pfn > brange->start_pfn)
4121 		return 1;
4122 	if (arange->start_pfn < brange->start_pfn)
4123 		return -1;
4124 
4125 	return 0;
4126 }
4127 
4128 /* sort the node_map by start_pfn */
4129 void __init sort_node_map(void)
4130 {
4131 	sort(early_node_map, (size_t)nr_nodemap_entries,
4132 			sizeof(struct node_active_region),
4133 			cmp_node_active_region, NULL);
4134 }
4135 
4136 /* Find the lowest pfn for a node */
4137 static unsigned long __init find_min_pfn_for_node(int nid)
4138 {
4139 	int i;
4140 	unsigned long min_pfn = ULONG_MAX;
4141 
4142 	/* Assuming a sorted map, the first range found has the starting pfn */
4143 	for_each_active_range_index_in_nid(i, nid)
4144 		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4145 
4146 	if (min_pfn == ULONG_MAX) {
4147 		printk(KERN_WARNING
4148 			"Could not find start_pfn for node %d\n", nid);
4149 		return 0;
4150 	}
4151 
4152 	return min_pfn;
4153 }
4154 
4155 /**
4156  * find_min_pfn_with_active_regions - Find the minimum PFN registered
4157  *
4158  * It returns the minimum PFN based on information provided via
4159  * add_active_range().
4160  */
4161 unsigned long __init find_min_pfn_with_active_regions(void)
4162 {
4163 	return find_min_pfn_for_node(MAX_NUMNODES);
4164 }
4165 
4166 /*
4167  * early_calculate_totalpages()
4168  * Sum pages in active regions for movable zone.
4169  * Populate N_HIGH_MEMORY for calculating usable_nodes.
4170  */
4171 static unsigned long __init early_calculate_totalpages(void)
4172 {
4173 	int i;
4174 	unsigned long totalpages = 0;
4175 
4176 	for (i = 0; i < nr_nodemap_entries; i++) {
4177 		unsigned long pages = early_node_map[i].end_pfn -
4178 						early_node_map[i].start_pfn;
4179 		totalpages += pages;
4180 		if (pages)
4181 			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4182 	}
4183   	return totalpages;
4184 }
4185 
4186 /*
4187  * Find the PFN the Movable zone begins in each node. Kernel memory
4188  * is spread evenly between nodes as long as the nodes have enough
4189  * memory. When they don't, some nodes will have more kernelcore than
4190  * others
4191  */
4192 static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4193 {
4194 	int i, nid;
4195 	unsigned long usable_startpfn;
4196 	unsigned long kernelcore_node, kernelcore_remaining;
4197 	/* save the state before borrow the nodemask */
4198 	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4199 	unsigned long totalpages = early_calculate_totalpages();
4200 	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4201 
4202 	/*
4203 	 * If movablecore was specified, calculate what size of
4204 	 * kernelcore that corresponds so that memory usable for
4205 	 * any allocation type is evenly spread. If both kernelcore
4206 	 * and movablecore are specified, then the value of kernelcore
4207 	 * will be used for required_kernelcore if it's greater than
4208 	 * what movablecore would have allowed.
4209 	 */
4210 	if (required_movablecore) {
4211 		unsigned long corepages;
4212 
4213 		/*
4214 		 * Round-up so that ZONE_MOVABLE is at least as large as what
4215 		 * was requested by the user
4216 		 */
4217 		required_movablecore =
4218 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4219 		corepages = totalpages - required_movablecore;
4220 
4221 		required_kernelcore = max(required_kernelcore, corepages);
4222 	}
4223 
4224 	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4225 	if (!required_kernelcore)
4226 		goto out;
4227 
4228 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4229 	find_usable_zone_for_movable();
4230 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4231 
4232 restart:
4233 	/* Spread kernelcore memory as evenly as possible throughout nodes */
4234 	kernelcore_node = required_kernelcore / usable_nodes;
4235 	for_each_node_state(nid, N_HIGH_MEMORY) {
4236 		/*
4237 		 * Recalculate kernelcore_node if the division per node
4238 		 * now exceeds what is necessary to satisfy the requested
4239 		 * amount of memory for the kernel
4240 		 */
4241 		if (required_kernelcore < kernelcore_node)
4242 			kernelcore_node = required_kernelcore / usable_nodes;
4243 
4244 		/*
4245 		 * As the map is walked, we track how much memory is usable
4246 		 * by the kernel using kernelcore_remaining. When it is
4247 		 * 0, the rest of the node is usable by ZONE_MOVABLE
4248 		 */
4249 		kernelcore_remaining = kernelcore_node;
4250 
4251 		/* Go through each range of PFNs within this node */
4252 		for_each_active_range_index_in_nid(i, nid) {
4253 			unsigned long start_pfn, end_pfn;
4254 			unsigned long size_pages;
4255 
4256 			start_pfn = max(early_node_map[i].start_pfn,
4257 						zone_movable_pfn[nid]);
4258 			end_pfn = early_node_map[i].end_pfn;
4259 			if (start_pfn >= end_pfn)
4260 				continue;
4261 
4262 			/* Account for what is only usable for kernelcore */
4263 			if (start_pfn < usable_startpfn) {
4264 				unsigned long kernel_pages;
4265 				kernel_pages = min(end_pfn, usable_startpfn)
4266 								- start_pfn;
4267 
4268 				kernelcore_remaining -= min(kernel_pages,
4269 							kernelcore_remaining);
4270 				required_kernelcore -= min(kernel_pages,
4271 							required_kernelcore);
4272 
4273 				/* Continue if range is now fully accounted */
4274 				if (end_pfn <= usable_startpfn) {
4275 
4276 					/*
4277 					 * Push zone_movable_pfn to the end so
4278 					 * that if we have to rebalance
4279 					 * kernelcore across nodes, we will
4280 					 * not double account here
4281 					 */
4282 					zone_movable_pfn[nid] = end_pfn;
4283 					continue;
4284 				}
4285 				start_pfn = usable_startpfn;
4286 			}
4287 
4288 			/*
4289 			 * The usable PFN range for ZONE_MOVABLE is from
4290 			 * start_pfn->end_pfn. Calculate size_pages as the
4291 			 * number of pages used as kernelcore
4292 			 */
4293 			size_pages = end_pfn - start_pfn;
4294 			if (size_pages > kernelcore_remaining)
4295 				size_pages = kernelcore_remaining;
4296 			zone_movable_pfn[nid] = start_pfn + size_pages;
4297 
4298 			/*
4299 			 * Some kernelcore has been met, update counts and
4300 			 * break if the kernelcore for this node has been
4301 			 * satisified
4302 			 */
4303 			required_kernelcore -= min(required_kernelcore,
4304 								size_pages);
4305 			kernelcore_remaining -= size_pages;
4306 			if (!kernelcore_remaining)
4307 				break;
4308 		}
4309 	}
4310 
4311 	/*
4312 	 * If there is still required_kernelcore, we do another pass with one
4313 	 * less node in the count. This will push zone_movable_pfn[nid] further
4314 	 * along on the nodes that still have memory until kernelcore is
4315 	 * satisified
4316 	 */
4317 	usable_nodes--;
4318 	if (usable_nodes && required_kernelcore > usable_nodes)
4319 		goto restart;
4320 
4321 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4322 	for (nid = 0; nid < MAX_NUMNODES; nid++)
4323 		zone_movable_pfn[nid] =
4324 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4325 
4326 out:
4327 	/* restore the node_state */
4328 	node_states[N_HIGH_MEMORY] = saved_node_state;
4329 }
4330 
4331 /* Any regular memory on that node ? */
4332 static void check_for_regular_memory(pg_data_t *pgdat)
4333 {
4334 #ifdef CONFIG_HIGHMEM
4335 	enum zone_type zone_type;
4336 
4337 	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4338 		struct zone *zone = &pgdat->node_zones[zone_type];
4339 		if (zone->present_pages)
4340 			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4341 	}
4342 #endif
4343 }
4344 
4345 /**
4346  * free_area_init_nodes - Initialise all pg_data_t and zone data
4347  * @max_zone_pfn: an array of max PFNs for each zone
4348  *
4349  * This will call free_area_init_node() for each active node in the system.
4350  * Using the page ranges provided by add_active_range(), the size of each
4351  * zone in each node and their holes is calculated. If the maximum PFN
4352  * between two adjacent zones match, it is assumed that the zone is empty.
4353  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4354  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4355  * starts where the previous one ended. For example, ZONE_DMA32 starts
4356  * at arch_max_dma_pfn.
4357  */
4358 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4359 {
4360 	unsigned long nid;
4361 	int i;
4362 
4363 	/* Sort early_node_map as initialisation assumes it is sorted */
4364 	sort_node_map();
4365 
4366 	/* Record where the zone boundaries are */
4367 	memset(arch_zone_lowest_possible_pfn, 0,
4368 				sizeof(arch_zone_lowest_possible_pfn));
4369 	memset(arch_zone_highest_possible_pfn, 0,
4370 				sizeof(arch_zone_highest_possible_pfn));
4371 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4372 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4373 	for (i = 1; i < MAX_NR_ZONES; i++) {
4374 		if (i == ZONE_MOVABLE)
4375 			continue;
4376 		arch_zone_lowest_possible_pfn[i] =
4377 			arch_zone_highest_possible_pfn[i-1];
4378 		arch_zone_highest_possible_pfn[i] =
4379 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4380 	}
4381 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4382 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4383 
4384 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4385 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4386 	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4387 
4388 	/* Print out the zone ranges */
4389 	printk("Zone PFN ranges:\n");
4390 	for (i = 0; i < MAX_NR_ZONES; i++) {
4391 		if (i == ZONE_MOVABLE)
4392 			continue;
4393 		printk("  %-8s ", zone_names[i]);
4394 		if (arch_zone_lowest_possible_pfn[i] ==
4395 				arch_zone_highest_possible_pfn[i])
4396 			printk("empty\n");
4397 		else
4398 			printk("%0#10lx -> %0#10lx\n",
4399 				arch_zone_lowest_possible_pfn[i],
4400 				arch_zone_highest_possible_pfn[i]);
4401 	}
4402 
4403 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4404 	printk("Movable zone start PFN for each node\n");
4405 	for (i = 0; i < MAX_NUMNODES; i++) {
4406 		if (zone_movable_pfn[i])
4407 			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4408 	}
4409 
4410 	/* Print out the early_node_map[] */
4411 	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4412 	for (i = 0; i < nr_nodemap_entries; i++)
4413 		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4414 						early_node_map[i].start_pfn,
4415 						early_node_map[i].end_pfn);
4416 
4417 	/* Initialise every node */
4418 	mminit_verify_pageflags_layout();
4419 	setup_nr_node_ids();
4420 	for_each_online_node(nid) {
4421 		pg_data_t *pgdat = NODE_DATA(nid);
4422 		free_area_init_node(nid, NULL,
4423 				find_min_pfn_for_node(nid), NULL);
4424 
4425 		/* Any memory on that node */
4426 		if (pgdat->node_present_pages)
4427 			node_set_state(nid, N_HIGH_MEMORY);
4428 		check_for_regular_memory(pgdat);
4429 	}
4430 }
4431 
4432 static int __init cmdline_parse_core(char *p, unsigned long *core)
4433 {
4434 	unsigned long long coremem;
4435 	if (!p)
4436 		return -EINVAL;
4437 
4438 	coremem = memparse(p, &p);
4439 	*core = coremem >> PAGE_SHIFT;
4440 
4441 	/* Paranoid check that UL is enough for the coremem value */
4442 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4443 
4444 	return 0;
4445 }
4446 
4447 /*
4448  * kernelcore=size sets the amount of memory for use for allocations that
4449  * cannot be reclaimed or migrated.
4450  */
4451 static int __init cmdline_parse_kernelcore(char *p)
4452 {
4453 	return cmdline_parse_core(p, &required_kernelcore);
4454 }
4455 
4456 /*
4457  * movablecore=size sets the amount of memory for use for allocations that
4458  * can be reclaimed or migrated.
4459  */
4460 static int __init cmdline_parse_movablecore(char *p)
4461 {
4462 	return cmdline_parse_core(p, &required_movablecore);
4463 }
4464 
4465 early_param("kernelcore", cmdline_parse_kernelcore);
4466 early_param("movablecore", cmdline_parse_movablecore);
4467 
4468 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4469 
4470 /**
4471  * set_dma_reserve - set the specified number of pages reserved in the first zone
4472  * @new_dma_reserve: The number of pages to mark reserved
4473  *
4474  * The per-cpu batchsize and zone watermarks are determined by present_pages.
4475  * In the DMA zone, a significant percentage may be consumed by kernel image
4476  * and other unfreeable allocations which can skew the watermarks badly. This
4477  * function may optionally be used to account for unfreeable pages in the
4478  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4479  * smaller per-cpu batchsize.
4480  */
4481 void __init set_dma_reserve(unsigned long new_dma_reserve)
4482 {
4483 	dma_reserve = new_dma_reserve;
4484 }
4485 
4486 #ifndef CONFIG_NEED_MULTIPLE_NODES
4487 struct pglist_data __refdata contig_page_data = {
4488 #ifndef CONFIG_NO_BOOTMEM
4489  .bdata = &bootmem_node_data[0]
4490 #endif
4491  };
4492 EXPORT_SYMBOL(contig_page_data);
4493 #endif
4494 
4495 void __init free_area_init(unsigned long *zones_size)
4496 {
4497 	free_area_init_node(0, zones_size,
4498 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4499 }
4500 
4501 static int page_alloc_cpu_notify(struct notifier_block *self,
4502 				 unsigned long action, void *hcpu)
4503 {
4504 	int cpu = (unsigned long)hcpu;
4505 
4506 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4507 		drain_pages(cpu);
4508 
4509 		/*
4510 		 * Spill the event counters of the dead processor
4511 		 * into the current processors event counters.
4512 		 * This artificially elevates the count of the current
4513 		 * processor.
4514 		 */
4515 		vm_events_fold_cpu(cpu);
4516 
4517 		/*
4518 		 * Zero the differential counters of the dead processor
4519 		 * so that the vm statistics are consistent.
4520 		 *
4521 		 * This is only okay since the processor is dead and cannot
4522 		 * race with what we are doing.
4523 		 */
4524 		refresh_cpu_vm_stats(cpu);
4525 	}
4526 	return NOTIFY_OK;
4527 }
4528 
4529 void __init page_alloc_init(void)
4530 {
4531 	hotcpu_notifier(page_alloc_cpu_notify, 0);
4532 }
4533 
4534 /*
4535  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4536  *	or min_free_kbytes changes.
4537  */
4538 static void calculate_totalreserve_pages(void)
4539 {
4540 	struct pglist_data *pgdat;
4541 	unsigned long reserve_pages = 0;
4542 	enum zone_type i, j;
4543 
4544 	for_each_online_pgdat(pgdat) {
4545 		for (i = 0; i < MAX_NR_ZONES; i++) {
4546 			struct zone *zone = pgdat->node_zones + i;
4547 			unsigned long max = 0;
4548 
4549 			/* Find valid and maximum lowmem_reserve in the zone */
4550 			for (j = i; j < MAX_NR_ZONES; j++) {
4551 				if (zone->lowmem_reserve[j] > max)
4552 					max = zone->lowmem_reserve[j];
4553 			}
4554 
4555 			/* we treat the high watermark as reserved pages. */
4556 			max += high_wmark_pages(zone);
4557 
4558 			if (max > zone->present_pages)
4559 				max = zone->present_pages;
4560 			reserve_pages += max;
4561 		}
4562 	}
4563 	totalreserve_pages = reserve_pages;
4564 }
4565 
4566 /*
4567  * setup_per_zone_lowmem_reserve - called whenever
4568  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4569  *	has a correct pages reserved value, so an adequate number of
4570  *	pages are left in the zone after a successful __alloc_pages().
4571  */
4572 static void setup_per_zone_lowmem_reserve(void)
4573 {
4574 	struct pglist_data *pgdat;
4575 	enum zone_type j, idx;
4576 
4577 	for_each_online_pgdat(pgdat) {
4578 		for (j = 0; j < MAX_NR_ZONES; j++) {
4579 			struct zone *zone = pgdat->node_zones + j;
4580 			unsigned long present_pages = zone->present_pages;
4581 
4582 			zone->lowmem_reserve[j] = 0;
4583 
4584 			idx = j;
4585 			while (idx) {
4586 				struct zone *lower_zone;
4587 
4588 				idx--;
4589 
4590 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4591 					sysctl_lowmem_reserve_ratio[idx] = 1;
4592 
4593 				lower_zone = pgdat->node_zones + idx;
4594 				lower_zone->lowmem_reserve[j] = present_pages /
4595 					sysctl_lowmem_reserve_ratio[idx];
4596 				present_pages += lower_zone->present_pages;
4597 			}
4598 		}
4599 	}
4600 
4601 	/* update totalreserve_pages */
4602 	calculate_totalreserve_pages();
4603 }
4604 
4605 /**
4606  * setup_per_zone_wmarks - called when min_free_kbytes changes
4607  * or when memory is hot-{added|removed}
4608  *
4609  * Ensures that the watermark[min,low,high] values for each zone are set
4610  * correctly with respect to min_free_kbytes.
4611  */
4612 void setup_per_zone_wmarks(void)
4613 {
4614 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4615 	unsigned long lowmem_pages = 0;
4616 	struct zone *zone;
4617 	unsigned long flags;
4618 
4619 	/* Calculate total number of !ZONE_HIGHMEM pages */
4620 	for_each_zone(zone) {
4621 		if (!is_highmem(zone))
4622 			lowmem_pages += zone->present_pages;
4623 	}
4624 
4625 	for_each_zone(zone) {
4626 		u64 tmp;
4627 
4628 		spin_lock_irqsave(&zone->lock, flags);
4629 		tmp = (u64)pages_min * zone->present_pages;
4630 		do_div(tmp, lowmem_pages);
4631 		if (is_highmem(zone)) {
4632 			/*
4633 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4634 			 * need highmem pages, so cap pages_min to a small
4635 			 * value here.
4636 			 *
4637 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4638 			 * deltas controls asynch page reclaim, and so should
4639 			 * not be capped for highmem.
4640 			 */
4641 			int min_pages;
4642 
4643 			min_pages = zone->present_pages / 1024;
4644 			if (min_pages < SWAP_CLUSTER_MAX)
4645 				min_pages = SWAP_CLUSTER_MAX;
4646 			if (min_pages > 128)
4647 				min_pages = 128;
4648 			zone->watermark[WMARK_MIN] = min_pages;
4649 		} else {
4650 			/*
4651 			 * If it's a lowmem zone, reserve a number of pages
4652 			 * proportionate to the zone's size.
4653 			 */
4654 			zone->watermark[WMARK_MIN] = tmp;
4655 		}
4656 
4657 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
4658 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4659 		setup_zone_migrate_reserve(zone);
4660 		spin_unlock_irqrestore(&zone->lock, flags);
4661 	}
4662 
4663 	/* update totalreserve_pages */
4664 	calculate_totalreserve_pages();
4665 }
4666 
4667 /*
4668  * The inactive anon list should be small enough that the VM never has to
4669  * do too much work, but large enough that each inactive page has a chance
4670  * to be referenced again before it is swapped out.
4671  *
4672  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4673  * INACTIVE_ANON pages on this zone's LRU, maintained by the
4674  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4675  * the anonymous pages are kept on the inactive list.
4676  *
4677  * total     target    max
4678  * memory    ratio     inactive anon
4679  * -------------------------------------
4680  *   10MB       1         5MB
4681  *  100MB       1        50MB
4682  *    1GB       3       250MB
4683  *   10GB      10       0.9GB
4684  *  100GB      31         3GB
4685  *    1TB     101        10GB
4686  *   10TB     320        32GB
4687  */
4688 void calculate_zone_inactive_ratio(struct zone *zone)
4689 {
4690 	unsigned int gb, ratio;
4691 
4692 	/* Zone size in gigabytes */
4693 	gb = zone->present_pages >> (30 - PAGE_SHIFT);
4694 	if (gb)
4695 		ratio = int_sqrt(10 * gb);
4696 	else
4697 		ratio = 1;
4698 
4699 	zone->inactive_ratio = ratio;
4700 }
4701 
4702 static void __init setup_per_zone_inactive_ratio(void)
4703 {
4704 	struct zone *zone;
4705 
4706 	for_each_zone(zone)
4707 		calculate_zone_inactive_ratio(zone);
4708 }
4709 
4710 /*
4711  * Initialise min_free_kbytes.
4712  *
4713  * For small machines we want it small (128k min).  For large machines
4714  * we want it large (64MB max).  But it is not linear, because network
4715  * bandwidth does not increase linearly with machine size.  We use
4716  *
4717  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4718  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4719  *
4720  * which yields
4721  *
4722  * 16MB:	512k
4723  * 32MB:	724k
4724  * 64MB:	1024k
4725  * 128MB:	1448k
4726  * 256MB:	2048k
4727  * 512MB:	2896k
4728  * 1024MB:	4096k
4729  * 2048MB:	5792k
4730  * 4096MB:	8192k
4731  * 8192MB:	11584k
4732  * 16384MB:	16384k
4733  */
4734 static int __init init_per_zone_wmark_min(void)
4735 {
4736 	unsigned long lowmem_kbytes;
4737 
4738 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4739 
4740 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4741 	if (min_free_kbytes < 128)
4742 		min_free_kbytes = 128;
4743 	if (min_free_kbytes > 65536)
4744 		min_free_kbytes = 65536;
4745 	setup_per_zone_wmarks();
4746 	setup_per_zone_lowmem_reserve();
4747 	setup_per_zone_inactive_ratio();
4748 	return 0;
4749 }
4750 module_init(init_per_zone_wmark_min)
4751 
4752 /*
4753  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4754  *	that we can call two helper functions whenever min_free_kbytes
4755  *	changes.
4756  */
4757 int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4758 	void __user *buffer, size_t *length, loff_t *ppos)
4759 {
4760 	proc_dointvec(table, write, buffer, length, ppos);
4761 	if (write)
4762 		setup_per_zone_wmarks();
4763 	return 0;
4764 }
4765 
4766 #ifdef CONFIG_NUMA
4767 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4768 	void __user *buffer, size_t *length, loff_t *ppos)
4769 {
4770 	struct zone *zone;
4771 	int rc;
4772 
4773 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4774 	if (rc)
4775 		return rc;
4776 
4777 	for_each_zone(zone)
4778 		zone->min_unmapped_pages = (zone->present_pages *
4779 				sysctl_min_unmapped_ratio) / 100;
4780 	return 0;
4781 }
4782 
4783 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4784 	void __user *buffer, size_t *length, loff_t *ppos)
4785 {
4786 	struct zone *zone;
4787 	int rc;
4788 
4789 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4790 	if (rc)
4791 		return rc;
4792 
4793 	for_each_zone(zone)
4794 		zone->min_slab_pages = (zone->present_pages *
4795 				sysctl_min_slab_ratio) / 100;
4796 	return 0;
4797 }
4798 #endif
4799 
4800 /*
4801  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4802  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4803  *	whenever sysctl_lowmem_reserve_ratio changes.
4804  *
4805  * The reserve ratio obviously has absolutely no relation with the
4806  * minimum watermarks. The lowmem reserve ratio can only make sense
4807  * if in function of the boot time zone sizes.
4808  */
4809 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4810 	void __user *buffer, size_t *length, loff_t *ppos)
4811 {
4812 	proc_dointvec_minmax(table, write, buffer, length, ppos);
4813 	setup_per_zone_lowmem_reserve();
4814 	return 0;
4815 }
4816 
4817 /*
4818  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4819  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4820  * can have before it gets flushed back to buddy allocator.
4821  */
4822 
4823 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4824 	void __user *buffer, size_t *length, loff_t *ppos)
4825 {
4826 	struct zone *zone;
4827 	unsigned int cpu;
4828 	int ret;
4829 
4830 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
4831 	if (!write || (ret == -EINVAL))
4832 		return ret;
4833 	for_each_populated_zone(zone) {
4834 		for_each_possible_cpu(cpu) {
4835 			unsigned long  high;
4836 			high = zone->present_pages / percpu_pagelist_fraction;
4837 			setup_pagelist_highmark(
4838 				per_cpu_ptr(zone->pageset, cpu), high);
4839 		}
4840 	}
4841 	return 0;
4842 }
4843 
4844 int hashdist = HASHDIST_DEFAULT;
4845 
4846 #ifdef CONFIG_NUMA
4847 static int __init set_hashdist(char *str)
4848 {
4849 	if (!str)
4850 		return 0;
4851 	hashdist = simple_strtoul(str, &str, 0);
4852 	return 1;
4853 }
4854 __setup("hashdist=", set_hashdist);
4855 #endif
4856 
4857 /*
4858  * allocate a large system hash table from bootmem
4859  * - it is assumed that the hash table must contain an exact power-of-2
4860  *   quantity of entries
4861  * - limit is the number of hash buckets, not the total allocation size
4862  */
4863 void *__init alloc_large_system_hash(const char *tablename,
4864 				     unsigned long bucketsize,
4865 				     unsigned long numentries,
4866 				     int scale,
4867 				     int flags,
4868 				     unsigned int *_hash_shift,
4869 				     unsigned int *_hash_mask,
4870 				     unsigned long limit)
4871 {
4872 	unsigned long long max = limit;
4873 	unsigned long log2qty, size;
4874 	void *table = NULL;
4875 
4876 	/* allow the kernel cmdline to have a say */
4877 	if (!numentries) {
4878 		/* round applicable memory size up to nearest megabyte */
4879 		numentries = nr_kernel_pages;
4880 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4881 		numentries >>= 20 - PAGE_SHIFT;
4882 		numentries <<= 20 - PAGE_SHIFT;
4883 
4884 		/* limit to 1 bucket per 2^scale bytes of low memory */
4885 		if (scale > PAGE_SHIFT)
4886 			numentries >>= (scale - PAGE_SHIFT);
4887 		else
4888 			numentries <<= (PAGE_SHIFT - scale);
4889 
4890 		/* Make sure we've got at least a 0-order allocation.. */
4891 		if (unlikely(flags & HASH_SMALL)) {
4892 			/* Makes no sense without HASH_EARLY */
4893 			WARN_ON(!(flags & HASH_EARLY));
4894 			if (!(numentries >> *_hash_shift)) {
4895 				numentries = 1UL << *_hash_shift;
4896 				BUG_ON(!numentries);
4897 			}
4898 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4899 			numentries = PAGE_SIZE / bucketsize;
4900 	}
4901 	numentries = roundup_pow_of_two(numentries);
4902 
4903 	/* limit allocation size to 1/16 total memory by default */
4904 	if (max == 0) {
4905 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4906 		do_div(max, bucketsize);
4907 	}
4908 
4909 	if (numentries > max)
4910 		numentries = max;
4911 
4912 	log2qty = ilog2(numentries);
4913 
4914 	do {
4915 		size = bucketsize << log2qty;
4916 		if (flags & HASH_EARLY)
4917 			table = alloc_bootmem_nopanic(size);
4918 		else if (hashdist)
4919 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4920 		else {
4921 			/*
4922 			 * If bucketsize is not a power-of-two, we may free
4923 			 * some pages at the end of hash table which
4924 			 * alloc_pages_exact() automatically does
4925 			 */
4926 			if (get_order(size) < MAX_ORDER) {
4927 				table = alloc_pages_exact(size, GFP_ATOMIC);
4928 				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4929 			}
4930 		}
4931 	} while (!table && size > PAGE_SIZE && --log2qty);
4932 
4933 	if (!table)
4934 		panic("Failed to allocate %s hash table\n", tablename);
4935 
4936 	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4937 	       tablename,
4938 	       (1U << log2qty),
4939 	       ilog2(size) - PAGE_SHIFT,
4940 	       size);
4941 
4942 	if (_hash_shift)
4943 		*_hash_shift = log2qty;
4944 	if (_hash_mask)
4945 		*_hash_mask = (1 << log2qty) - 1;
4946 
4947 	return table;
4948 }
4949 
4950 /* Return a pointer to the bitmap storing bits affecting a block of pages */
4951 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4952 							unsigned long pfn)
4953 {
4954 #ifdef CONFIG_SPARSEMEM
4955 	return __pfn_to_section(pfn)->pageblock_flags;
4956 #else
4957 	return zone->pageblock_flags;
4958 #endif /* CONFIG_SPARSEMEM */
4959 }
4960 
4961 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4962 {
4963 #ifdef CONFIG_SPARSEMEM
4964 	pfn &= (PAGES_PER_SECTION-1);
4965 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4966 #else
4967 	pfn = pfn - zone->zone_start_pfn;
4968 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4969 #endif /* CONFIG_SPARSEMEM */
4970 }
4971 
4972 /**
4973  * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4974  * @page: The page within the block of interest
4975  * @start_bitidx: The first bit of interest to retrieve
4976  * @end_bitidx: The last bit of interest
4977  * returns pageblock_bits flags
4978  */
4979 unsigned long get_pageblock_flags_group(struct page *page,
4980 					int start_bitidx, int end_bitidx)
4981 {
4982 	struct zone *zone;
4983 	unsigned long *bitmap;
4984 	unsigned long pfn, bitidx;
4985 	unsigned long flags = 0;
4986 	unsigned long value = 1;
4987 
4988 	zone = page_zone(page);
4989 	pfn = page_to_pfn(page);
4990 	bitmap = get_pageblock_bitmap(zone, pfn);
4991 	bitidx = pfn_to_bitidx(zone, pfn);
4992 
4993 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4994 		if (test_bit(bitidx + start_bitidx, bitmap))
4995 			flags |= value;
4996 
4997 	return flags;
4998 }
4999 
5000 /**
5001  * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5002  * @page: The page within the block of interest
5003  * @start_bitidx: The first bit of interest
5004  * @end_bitidx: The last bit of interest
5005  * @flags: The flags to set
5006  */
5007 void set_pageblock_flags_group(struct page *page, unsigned long flags,
5008 					int start_bitidx, int end_bitidx)
5009 {
5010 	struct zone *zone;
5011 	unsigned long *bitmap;
5012 	unsigned long pfn, bitidx;
5013 	unsigned long value = 1;
5014 
5015 	zone = page_zone(page);
5016 	pfn = page_to_pfn(page);
5017 	bitmap = get_pageblock_bitmap(zone, pfn);
5018 	bitidx = pfn_to_bitidx(zone, pfn);
5019 	VM_BUG_ON(pfn < zone->zone_start_pfn);
5020 	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5021 
5022 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5023 		if (flags & value)
5024 			__set_bit(bitidx + start_bitidx, bitmap);
5025 		else
5026 			__clear_bit(bitidx + start_bitidx, bitmap);
5027 }
5028 
5029 /*
5030  * This is designed as sub function...plz see page_isolation.c also.
5031  * set/clear page block's type to be ISOLATE.
5032  * page allocater never alloc memory from ISOLATE block.
5033  */
5034 
5035 int set_migratetype_isolate(struct page *page)
5036 {
5037 	struct zone *zone;
5038 	struct page *curr_page;
5039 	unsigned long flags, pfn, iter;
5040 	unsigned long immobile = 0;
5041 	struct memory_isolate_notify arg;
5042 	int notifier_ret;
5043 	int ret = -EBUSY;
5044 	int zone_idx;
5045 
5046 	zone = page_zone(page);
5047 	zone_idx = zone_idx(zone);
5048 
5049 	spin_lock_irqsave(&zone->lock, flags);
5050 	if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
5051 	    zone_idx == ZONE_MOVABLE) {
5052 		ret = 0;
5053 		goto out;
5054 	}
5055 
5056 	pfn = page_to_pfn(page);
5057 	arg.start_pfn = pfn;
5058 	arg.nr_pages = pageblock_nr_pages;
5059 	arg.pages_found = 0;
5060 
5061 	/*
5062 	 * It may be possible to isolate a pageblock even if the
5063 	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5064 	 * notifier chain is used by balloon drivers to return the
5065 	 * number of pages in a range that are held by the balloon
5066 	 * driver to shrink memory. If all the pages are accounted for
5067 	 * by balloons, are free, or on the LRU, isolation can continue.
5068 	 * Later, for example, when memory hotplug notifier runs, these
5069 	 * pages reported as "can be isolated" should be isolated(freed)
5070 	 * by the balloon driver through the memory notifier chain.
5071 	 */
5072 	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5073 	notifier_ret = notifier_to_errno(notifier_ret);
5074 	if (notifier_ret || !arg.pages_found)
5075 		goto out;
5076 
5077 	for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
5078 		if (!pfn_valid_within(pfn))
5079 			continue;
5080 
5081 		curr_page = pfn_to_page(iter);
5082 		if (!page_count(curr_page) || PageLRU(curr_page))
5083 			continue;
5084 
5085 		immobile++;
5086 	}
5087 
5088 	if (arg.pages_found == immobile)
5089 		ret = 0;
5090 
5091 out:
5092 	if (!ret) {
5093 		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5094 		move_freepages_block(zone, page, MIGRATE_ISOLATE);
5095 	}
5096 
5097 	spin_unlock_irqrestore(&zone->lock, flags);
5098 	if (!ret)
5099 		drain_all_pages();
5100 	return ret;
5101 }
5102 
5103 void unset_migratetype_isolate(struct page *page)
5104 {
5105 	struct zone *zone;
5106 	unsigned long flags;
5107 	zone = page_zone(page);
5108 	spin_lock_irqsave(&zone->lock, flags);
5109 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5110 		goto out;
5111 	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5112 	move_freepages_block(zone, page, MIGRATE_MOVABLE);
5113 out:
5114 	spin_unlock_irqrestore(&zone->lock, flags);
5115 }
5116 
5117 #ifdef CONFIG_MEMORY_HOTREMOVE
5118 /*
5119  * All pages in the range must be isolated before calling this.
5120  */
5121 void
5122 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5123 {
5124 	struct page *page;
5125 	struct zone *zone;
5126 	int order, i;
5127 	unsigned long pfn;
5128 	unsigned long flags;
5129 	/* find the first valid pfn */
5130 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
5131 		if (pfn_valid(pfn))
5132 			break;
5133 	if (pfn == end_pfn)
5134 		return;
5135 	zone = page_zone(pfn_to_page(pfn));
5136 	spin_lock_irqsave(&zone->lock, flags);
5137 	pfn = start_pfn;
5138 	while (pfn < end_pfn) {
5139 		if (!pfn_valid(pfn)) {
5140 			pfn++;
5141 			continue;
5142 		}
5143 		page = pfn_to_page(pfn);
5144 		BUG_ON(page_count(page));
5145 		BUG_ON(!PageBuddy(page));
5146 		order = page_order(page);
5147 #ifdef CONFIG_DEBUG_VM
5148 		printk(KERN_INFO "remove from free list %lx %d %lx\n",
5149 		       pfn, 1 << order, end_pfn);
5150 #endif
5151 		list_del(&page->lru);
5152 		rmv_page_order(page);
5153 		zone->free_area[order].nr_free--;
5154 		__mod_zone_page_state(zone, NR_FREE_PAGES,
5155 				      - (1UL << order));
5156 		for (i = 0; i < (1 << order); i++)
5157 			SetPageReserved((page+i));
5158 		pfn += (1 << order);
5159 	}
5160 	spin_unlock_irqrestore(&zone->lock, flags);
5161 }
5162 #endif
5163 
5164 #ifdef CONFIG_MEMORY_FAILURE
5165 bool is_free_buddy_page(struct page *page)
5166 {
5167 	struct zone *zone = page_zone(page);
5168 	unsigned long pfn = page_to_pfn(page);
5169 	unsigned long flags;
5170 	int order;
5171 
5172 	spin_lock_irqsave(&zone->lock, flags);
5173 	for (order = 0; order < MAX_ORDER; order++) {
5174 		struct page *page_head = page - (pfn & ((1 << order) - 1));
5175 
5176 		if (PageBuddy(page_head) && page_order(page_head) >= order)
5177 			break;
5178 	}
5179 	spin_unlock_irqrestore(&zone->lock, flags);
5180 
5181 	return order < MAX_ORDER;
5182 }
5183 #endif
5184 
5185 static struct trace_print_flags pageflag_names[] = {
5186 	{1UL << PG_locked,		"locked"	},
5187 	{1UL << PG_error,		"error"		},
5188 	{1UL << PG_referenced,		"referenced"	},
5189 	{1UL << PG_uptodate,		"uptodate"	},
5190 	{1UL << PG_dirty,		"dirty"		},
5191 	{1UL << PG_lru,			"lru"		},
5192 	{1UL << PG_active,		"active"	},
5193 	{1UL << PG_slab,		"slab"		},
5194 	{1UL << PG_owner_priv_1,	"owner_priv_1"	},
5195 	{1UL << PG_arch_1,		"arch_1"	},
5196 	{1UL << PG_reserved,		"reserved"	},
5197 	{1UL << PG_private,		"private"	},
5198 	{1UL << PG_private_2,		"private_2"	},
5199 	{1UL << PG_writeback,		"writeback"	},
5200 #ifdef CONFIG_PAGEFLAGS_EXTENDED
5201 	{1UL << PG_head,		"head"		},
5202 	{1UL << PG_tail,		"tail"		},
5203 #else
5204 	{1UL << PG_compound,		"compound"	},
5205 #endif
5206 	{1UL << PG_swapcache,		"swapcache"	},
5207 	{1UL << PG_mappedtodisk,	"mappedtodisk"	},
5208 	{1UL << PG_reclaim,		"reclaim"	},
5209 	{1UL << PG_buddy,		"buddy"		},
5210 	{1UL << PG_swapbacked,		"swapbacked"	},
5211 	{1UL << PG_unevictable,		"unevictable"	},
5212 #ifdef CONFIG_MMU
5213 	{1UL << PG_mlocked,		"mlocked"	},
5214 #endif
5215 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
5216 	{1UL << PG_uncached,		"uncached"	},
5217 #endif
5218 #ifdef CONFIG_MEMORY_FAILURE
5219 	{1UL << PG_hwpoison,		"hwpoison"	},
5220 #endif
5221 	{-1UL,				NULL		},
5222 };
5223 
5224 static void dump_page_flags(unsigned long flags)
5225 {
5226 	const char *delim = "";
5227 	unsigned long mask;
5228 	int i;
5229 
5230 	printk(KERN_ALERT "page flags: %#lx(", flags);
5231 
5232 	/* remove zone id */
5233 	flags &= (1UL << NR_PAGEFLAGS) - 1;
5234 
5235 	for (i = 0; pageflag_names[i].name && flags; i++) {
5236 
5237 		mask = pageflag_names[i].mask;
5238 		if ((flags & mask) != mask)
5239 			continue;
5240 
5241 		flags &= ~mask;
5242 		printk("%s%s", delim, pageflag_names[i].name);
5243 		delim = "|";
5244 	}
5245 
5246 	/* check for left over flags */
5247 	if (flags)
5248 		printk("%s%#lx", delim, flags);
5249 
5250 	printk(")\n");
5251 }
5252 
5253 void dump_page(struct page *page)
5254 {
5255 	printk(KERN_ALERT
5256 	       "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5257 		page, page_count(page), page_mapcount(page),
5258 		page->mapping, page->index);
5259 	dump_page_flags(page->flags);
5260 }
5261