xref: /openbmc/linux/mm/page_alloc.c (revision a1e58bbd)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/compiler.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/suspend.h>
28 #include <linux/pagevec.h>
29 #include <linux/blkdev.h>
30 #include <linux/slab.h>
31 #include <linux/oom.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
34 #include <linux/sysctl.h>
35 #include <linux/cpu.h>
36 #include <linux/cpuset.h>
37 #include <linux/memory_hotplug.h>
38 #include <linux/nodemask.h>
39 #include <linux/vmalloc.h>
40 #include <linux/mempolicy.h>
41 #include <linux/stop_machine.h>
42 #include <linux/sort.h>
43 #include <linux/pfn.h>
44 #include <linux/backing-dev.h>
45 #include <linux/fault-inject.h>
46 #include <linux/page-isolation.h>
47 #include <linux/memcontrol.h>
48 
49 #include <asm/tlbflush.h>
50 #include <asm/div64.h>
51 #include "internal.h"
52 
53 /*
54  * Array of node states.
55  */
56 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
57 	[N_POSSIBLE] = NODE_MASK_ALL,
58 	[N_ONLINE] = { { [0] = 1UL } },
59 #ifndef CONFIG_NUMA
60 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
61 #ifdef CONFIG_HIGHMEM
62 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
63 #endif
64 	[N_CPU] = { { [0] = 1UL } },
65 #endif	/* NUMA */
66 };
67 EXPORT_SYMBOL(node_states);
68 
69 unsigned long totalram_pages __read_mostly;
70 unsigned long totalreserve_pages __read_mostly;
71 long nr_swap_pages;
72 int percpu_pagelist_fraction;
73 
74 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
75 int pageblock_order __read_mostly;
76 #endif
77 
78 static void __free_pages_ok(struct page *page, unsigned int order);
79 
80 /*
81  * results with 256, 32 in the lowmem_reserve sysctl:
82  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
83  *	1G machine -> (16M dma, 784M normal, 224M high)
84  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
85  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
86  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
87  *
88  * TBD: should special case ZONE_DMA32 machines here - in those we normally
89  * don't need any ZONE_NORMAL reservation
90  */
91 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
92 #ifdef CONFIG_ZONE_DMA
93 	 256,
94 #endif
95 #ifdef CONFIG_ZONE_DMA32
96 	 256,
97 #endif
98 #ifdef CONFIG_HIGHMEM
99 	 32,
100 #endif
101 	 32,
102 };
103 
104 EXPORT_SYMBOL(totalram_pages);
105 
106 static char * const zone_names[MAX_NR_ZONES] = {
107 #ifdef CONFIG_ZONE_DMA
108 	 "DMA",
109 #endif
110 #ifdef CONFIG_ZONE_DMA32
111 	 "DMA32",
112 #endif
113 	 "Normal",
114 #ifdef CONFIG_HIGHMEM
115 	 "HighMem",
116 #endif
117 	 "Movable",
118 };
119 
120 int min_free_kbytes = 1024;
121 
122 unsigned long __meminitdata nr_kernel_pages;
123 unsigned long __meminitdata nr_all_pages;
124 static unsigned long __meminitdata dma_reserve;
125 
126 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
127   /*
128    * MAX_ACTIVE_REGIONS determines the maximum number of distinct
129    * ranges of memory (RAM) that may be registered with add_active_range().
130    * Ranges passed to add_active_range() will be merged if possible
131    * so the number of times add_active_range() can be called is
132    * related to the number of nodes and the number of holes
133    */
134   #ifdef CONFIG_MAX_ACTIVE_REGIONS
135     /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
136     #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
137   #else
138     #if MAX_NUMNODES >= 32
139       /* If there can be many nodes, allow up to 50 holes per node */
140       #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
141     #else
142       /* By default, allow up to 256 distinct regions */
143       #define MAX_ACTIVE_REGIONS 256
144     #endif
145   #endif
146 
147   static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
148   static int __meminitdata nr_nodemap_entries;
149   static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
150   static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
151 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
152   static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
153   static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
154 #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
155   unsigned long __initdata required_kernelcore;
156   static unsigned long __initdata required_movablecore;
157   unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
158 
159   /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
160   int movable_zone;
161   EXPORT_SYMBOL(movable_zone);
162 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
163 
164 #if MAX_NUMNODES > 1
165 int nr_node_ids __read_mostly = MAX_NUMNODES;
166 EXPORT_SYMBOL(nr_node_ids);
167 #endif
168 
169 int page_group_by_mobility_disabled __read_mostly;
170 
171 static void set_pageblock_migratetype(struct page *page, int migratetype)
172 {
173 	set_pageblock_flags_group(page, (unsigned long)migratetype,
174 					PB_migrate, PB_migrate_end);
175 }
176 
177 #ifdef CONFIG_DEBUG_VM
178 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
179 {
180 	int ret = 0;
181 	unsigned seq;
182 	unsigned long pfn = page_to_pfn(page);
183 
184 	do {
185 		seq = zone_span_seqbegin(zone);
186 		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
187 			ret = 1;
188 		else if (pfn < zone->zone_start_pfn)
189 			ret = 1;
190 	} while (zone_span_seqretry(zone, seq));
191 
192 	return ret;
193 }
194 
195 static int page_is_consistent(struct zone *zone, struct page *page)
196 {
197 	if (!pfn_valid_within(page_to_pfn(page)))
198 		return 0;
199 	if (zone != page_zone(page))
200 		return 0;
201 
202 	return 1;
203 }
204 /*
205  * Temporary debugging check for pages not lying within a given zone.
206  */
207 static int bad_range(struct zone *zone, struct page *page)
208 {
209 	if (page_outside_zone_boundaries(zone, page))
210 		return 1;
211 	if (!page_is_consistent(zone, page))
212 		return 1;
213 
214 	return 0;
215 }
216 #else
217 static inline int bad_range(struct zone *zone, struct page *page)
218 {
219 	return 0;
220 }
221 #endif
222 
223 static void bad_page(struct page *page)
224 {
225 	void *pc = page_get_page_cgroup(page);
226 
227 	printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
228 		"page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
229 		current->comm, page, (int)(2*sizeof(unsigned long)),
230 		(unsigned long)page->flags, page->mapping,
231 		page_mapcount(page), page_count(page));
232 	if (pc) {
233 		printk(KERN_EMERG "cgroup:%p\n", pc);
234 		page_reset_bad_cgroup(page);
235 	}
236 	printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
237 		KERN_EMERG "Backtrace:\n");
238 	dump_stack();
239 	page->flags &= ~(1 << PG_lru	|
240 			1 << PG_private |
241 			1 << PG_locked	|
242 			1 << PG_active	|
243 			1 << PG_dirty	|
244 			1 << PG_reclaim |
245 			1 << PG_slab    |
246 			1 << PG_swapcache |
247 			1 << PG_writeback |
248 			1 << PG_buddy );
249 	set_page_count(page, 0);
250 	reset_page_mapcount(page);
251 	page->mapping = NULL;
252 	add_taint(TAINT_BAD_PAGE);
253 }
254 
255 /*
256  * Higher-order pages are called "compound pages".  They are structured thusly:
257  *
258  * The first PAGE_SIZE page is called the "head page".
259  *
260  * The remaining PAGE_SIZE pages are called "tail pages".
261  *
262  * All pages have PG_compound set.  All pages have their ->private pointing at
263  * the head page (even the head page has this).
264  *
265  * The first tail page's ->lru.next holds the address of the compound page's
266  * put_page() function.  Its ->lru.prev holds the order of allocation.
267  * This usage means that zero-order pages may not be compound.
268  */
269 
270 static void free_compound_page(struct page *page)
271 {
272 	__free_pages_ok(page, compound_order(page));
273 }
274 
275 static void prep_compound_page(struct page *page, unsigned long order)
276 {
277 	int i;
278 	int nr_pages = 1 << order;
279 
280 	set_compound_page_dtor(page, free_compound_page);
281 	set_compound_order(page, order);
282 	__SetPageHead(page);
283 	for (i = 1; i < nr_pages; i++) {
284 		struct page *p = page + i;
285 
286 		__SetPageTail(p);
287 		p->first_page = page;
288 	}
289 }
290 
291 static void destroy_compound_page(struct page *page, unsigned long order)
292 {
293 	int i;
294 	int nr_pages = 1 << order;
295 
296 	if (unlikely(compound_order(page) != order))
297 		bad_page(page);
298 
299 	if (unlikely(!PageHead(page)))
300 			bad_page(page);
301 	__ClearPageHead(page);
302 	for (i = 1; i < nr_pages; i++) {
303 		struct page *p = page + i;
304 
305 		if (unlikely(!PageTail(p) |
306 				(p->first_page != page)))
307 			bad_page(page);
308 		__ClearPageTail(p);
309 	}
310 }
311 
312 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
313 {
314 	int i;
315 
316 	/*
317 	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
318 	 * and __GFP_HIGHMEM from hard or soft interrupt context.
319 	 */
320 	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
321 	for (i = 0; i < (1 << order); i++)
322 		clear_highpage(page + i);
323 }
324 
325 static inline void set_page_order(struct page *page, int order)
326 {
327 	set_page_private(page, order);
328 	__SetPageBuddy(page);
329 }
330 
331 static inline void rmv_page_order(struct page *page)
332 {
333 	__ClearPageBuddy(page);
334 	set_page_private(page, 0);
335 }
336 
337 /*
338  * Locate the struct page for both the matching buddy in our
339  * pair (buddy1) and the combined O(n+1) page they form (page).
340  *
341  * 1) Any buddy B1 will have an order O twin B2 which satisfies
342  * the following equation:
343  *     B2 = B1 ^ (1 << O)
344  * For example, if the starting buddy (buddy2) is #8 its order
345  * 1 buddy is #10:
346  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
347  *
348  * 2) Any buddy B will have an order O+1 parent P which
349  * satisfies the following equation:
350  *     P = B & ~(1 << O)
351  *
352  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
353  */
354 static inline struct page *
355 __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
356 {
357 	unsigned long buddy_idx = page_idx ^ (1 << order);
358 
359 	return page + (buddy_idx - page_idx);
360 }
361 
362 static inline unsigned long
363 __find_combined_index(unsigned long page_idx, unsigned int order)
364 {
365 	return (page_idx & ~(1 << order));
366 }
367 
368 /*
369  * This function checks whether a page is free && is the buddy
370  * we can do coalesce a page and its buddy if
371  * (a) the buddy is not in a hole &&
372  * (b) the buddy is in the buddy system &&
373  * (c) a page and its buddy have the same order &&
374  * (d) a page and its buddy are in the same zone.
375  *
376  * For recording whether a page is in the buddy system, we use PG_buddy.
377  * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
378  *
379  * For recording page's order, we use page_private(page).
380  */
381 static inline int page_is_buddy(struct page *page, struct page *buddy,
382 								int order)
383 {
384 	if (!pfn_valid_within(page_to_pfn(buddy)))
385 		return 0;
386 
387 	if (page_zone_id(page) != page_zone_id(buddy))
388 		return 0;
389 
390 	if (PageBuddy(buddy) && page_order(buddy) == order) {
391 		BUG_ON(page_count(buddy) != 0);
392 		return 1;
393 	}
394 	return 0;
395 }
396 
397 /*
398  * Freeing function for a buddy system allocator.
399  *
400  * The concept of a buddy system is to maintain direct-mapped table
401  * (containing bit values) for memory blocks of various "orders".
402  * The bottom level table contains the map for the smallest allocatable
403  * units of memory (here, pages), and each level above it describes
404  * pairs of units from the levels below, hence, "buddies".
405  * At a high level, all that happens here is marking the table entry
406  * at the bottom level available, and propagating the changes upward
407  * as necessary, plus some accounting needed to play nicely with other
408  * parts of the VM system.
409  * At each level, we keep a list of pages, which are heads of continuous
410  * free pages of length of (1 << order) and marked with PG_buddy. Page's
411  * order is recorded in page_private(page) field.
412  * So when we are allocating or freeing one, we can derive the state of the
413  * other.  That is, if we allocate a small block, and both were
414  * free, the remainder of the region must be split into blocks.
415  * If a block is freed, and its buddy is also free, then this
416  * triggers coalescing into a block of larger size.
417  *
418  * -- wli
419  */
420 
421 static inline void __free_one_page(struct page *page,
422 		struct zone *zone, unsigned int order)
423 {
424 	unsigned long page_idx;
425 	int order_size = 1 << order;
426 	int migratetype = get_pageblock_migratetype(page);
427 
428 	if (unlikely(PageCompound(page)))
429 		destroy_compound_page(page, order);
430 
431 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
432 
433 	VM_BUG_ON(page_idx & (order_size - 1));
434 	VM_BUG_ON(bad_range(zone, page));
435 
436 	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
437 	while (order < MAX_ORDER-1) {
438 		unsigned long combined_idx;
439 		struct page *buddy;
440 
441 		buddy = __page_find_buddy(page, page_idx, order);
442 		if (!page_is_buddy(page, buddy, order))
443 			break;		/* Move the buddy up one level. */
444 
445 		list_del(&buddy->lru);
446 		zone->free_area[order].nr_free--;
447 		rmv_page_order(buddy);
448 		combined_idx = __find_combined_index(page_idx, order);
449 		page = page + (combined_idx - page_idx);
450 		page_idx = combined_idx;
451 		order++;
452 	}
453 	set_page_order(page, order);
454 	list_add(&page->lru,
455 		&zone->free_area[order].free_list[migratetype]);
456 	zone->free_area[order].nr_free++;
457 }
458 
459 static inline int free_pages_check(struct page *page)
460 {
461 	if (unlikely(page_mapcount(page) |
462 		(page->mapping != NULL)  |
463 		(page_get_page_cgroup(page) != NULL) |
464 		(page_count(page) != 0)  |
465 		(page->flags & (
466 			1 << PG_lru	|
467 			1 << PG_private |
468 			1 << PG_locked	|
469 			1 << PG_active	|
470 			1 << PG_slab	|
471 			1 << PG_swapcache |
472 			1 << PG_writeback |
473 			1 << PG_reserved |
474 			1 << PG_buddy ))))
475 		bad_page(page);
476 	if (PageDirty(page))
477 		__ClearPageDirty(page);
478 	/*
479 	 * For now, we report if PG_reserved was found set, but do not
480 	 * clear it, and do not free the page.  But we shall soon need
481 	 * to do more, for when the ZERO_PAGE count wraps negative.
482 	 */
483 	return PageReserved(page);
484 }
485 
486 /*
487  * Frees a list of pages.
488  * Assumes all pages on list are in same zone, and of same order.
489  * count is the number of pages to free.
490  *
491  * If the zone was previously in an "all pages pinned" state then look to
492  * see if this freeing clears that state.
493  *
494  * And clear the zone's pages_scanned counter, to hold off the "all pages are
495  * pinned" detection logic.
496  */
497 static void free_pages_bulk(struct zone *zone, int count,
498 					struct list_head *list, int order)
499 {
500 	spin_lock(&zone->lock);
501 	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
502 	zone->pages_scanned = 0;
503 	while (count--) {
504 		struct page *page;
505 
506 		VM_BUG_ON(list_empty(list));
507 		page = list_entry(list->prev, struct page, lru);
508 		/* have to delete it as __free_one_page list manipulates */
509 		list_del(&page->lru);
510 		__free_one_page(page, zone, order);
511 	}
512 	spin_unlock(&zone->lock);
513 }
514 
515 static void free_one_page(struct zone *zone, struct page *page, int order)
516 {
517 	spin_lock(&zone->lock);
518 	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
519 	zone->pages_scanned = 0;
520 	__free_one_page(page, zone, order);
521 	spin_unlock(&zone->lock);
522 }
523 
524 static void __free_pages_ok(struct page *page, unsigned int order)
525 {
526 	unsigned long flags;
527 	int i;
528 	int reserved = 0;
529 
530 	for (i = 0 ; i < (1 << order) ; ++i)
531 		reserved += free_pages_check(page + i);
532 	if (reserved)
533 		return;
534 
535 	if (!PageHighMem(page))
536 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
537 	arch_free_page(page, order);
538 	kernel_map_pages(page, 1 << order, 0);
539 
540 	local_irq_save(flags);
541 	__count_vm_events(PGFREE, 1 << order);
542 	free_one_page(page_zone(page), page, order);
543 	local_irq_restore(flags);
544 }
545 
546 /*
547  * permit the bootmem allocator to evade page validation on high-order frees
548  */
549 void __init __free_pages_bootmem(struct page *page, unsigned int order)
550 {
551 	if (order == 0) {
552 		__ClearPageReserved(page);
553 		set_page_count(page, 0);
554 		set_page_refcounted(page);
555 		__free_page(page);
556 	} else {
557 		int loop;
558 
559 		prefetchw(page);
560 		for (loop = 0; loop < BITS_PER_LONG; loop++) {
561 			struct page *p = &page[loop];
562 
563 			if (loop + 1 < BITS_PER_LONG)
564 				prefetchw(p + 1);
565 			__ClearPageReserved(p);
566 			set_page_count(p, 0);
567 		}
568 
569 		set_page_refcounted(page);
570 		__free_pages(page, order);
571 	}
572 }
573 
574 
575 /*
576  * The order of subdivision here is critical for the IO subsystem.
577  * Please do not alter this order without good reasons and regression
578  * testing. Specifically, as large blocks of memory are subdivided,
579  * the order in which smaller blocks are delivered depends on the order
580  * they're subdivided in this function. This is the primary factor
581  * influencing the order in which pages are delivered to the IO
582  * subsystem according to empirical testing, and this is also justified
583  * by considering the behavior of a buddy system containing a single
584  * large block of memory acted on by a series of small allocations.
585  * This behavior is a critical factor in sglist merging's success.
586  *
587  * -- wli
588  */
589 static inline void expand(struct zone *zone, struct page *page,
590 	int low, int high, struct free_area *area,
591 	int migratetype)
592 {
593 	unsigned long size = 1 << high;
594 
595 	while (high > low) {
596 		area--;
597 		high--;
598 		size >>= 1;
599 		VM_BUG_ON(bad_range(zone, &page[size]));
600 		list_add(&page[size].lru, &area->free_list[migratetype]);
601 		area->nr_free++;
602 		set_page_order(&page[size], high);
603 	}
604 }
605 
606 /*
607  * This page is about to be returned from the page allocator
608  */
609 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
610 {
611 	if (unlikely(page_mapcount(page) |
612 		(page->mapping != NULL)  |
613 		(page_get_page_cgroup(page) != NULL) |
614 		(page_count(page) != 0)  |
615 		(page->flags & (
616 			1 << PG_lru	|
617 			1 << PG_private	|
618 			1 << PG_locked	|
619 			1 << PG_active	|
620 			1 << PG_dirty	|
621 			1 << PG_slab    |
622 			1 << PG_swapcache |
623 			1 << PG_writeback |
624 			1 << PG_reserved |
625 			1 << PG_buddy ))))
626 		bad_page(page);
627 
628 	/*
629 	 * For now, we report if PG_reserved was found set, but do not
630 	 * clear it, and do not allocate the page: as a safety net.
631 	 */
632 	if (PageReserved(page))
633 		return 1;
634 
635 	page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
636 			1 << PG_referenced | 1 << PG_arch_1 |
637 			1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
638 	set_page_private(page, 0);
639 	set_page_refcounted(page);
640 
641 	arch_alloc_page(page, order);
642 	kernel_map_pages(page, 1 << order, 1);
643 
644 	if (gfp_flags & __GFP_ZERO)
645 		prep_zero_page(page, order, gfp_flags);
646 
647 	if (order && (gfp_flags & __GFP_COMP))
648 		prep_compound_page(page, order);
649 
650 	return 0;
651 }
652 
653 /*
654  * Go through the free lists for the given migratetype and remove
655  * the smallest available page from the freelists
656  */
657 static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
658 						int migratetype)
659 {
660 	unsigned int current_order;
661 	struct free_area * area;
662 	struct page *page;
663 
664 	/* Find a page of the appropriate size in the preferred list */
665 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
666 		area = &(zone->free_area[current_order]);
667 		if (list_empty(&area->free_list[migratetype]))
668 			continue;
669 
670 		page = list_entry(area->free_list[migratetype].next,
671 							struct page, lru);
672 		list_del(&page->lru);
673 		rmv_page_order(page);
674 		area->nr_free--;
675 		__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
676 		expand(zone, page, order, current_order, area, migratetype);
677 		return page;
678 	}
679 
680 	return NULL;
681 }
682 
683 
684 /*
685  * This array describes the order lists are fallen back to when
686  * the free lists for the desirable migrate type are depleted
687  */
688 static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
689 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
690 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
691 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
692 	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
693 };
694 
695 /*
696  * Move the free pages in a range to the free lists of the requested type.
697  * Note that start_page and end_pages are not aligned on a pageblock
698  * boundary. If alignment is required, use move_freepages_block()
699  */
700 int move_freepages(struct zone *zone,
701 			struct page *start_page, struct page *end_page,
702 			int migratetype)
703 {
704 	struct page *page;
705 	unsigned long order;
706 	int pages_moved = 0;
707 
708 #ifndef CONFIG_HOLES_IN_ZONE
709 	/*
710 	 * page_zone is not safe to call in this context when
711 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
712 	 * anyway as we check zone boundaries in move_freepages_block().
713 	 * Remove at a later date when no bug reports exist related to
714 	 * grouping pages by mobility
715 	 */
716 	BUG_ON(page_zone(start_page) != page_zone(end_page));
717 #endif
718 
719 	for (page = start_page; page <= end_page;) {
720 		if (!pfn_valid_within(page_to_pfn(page))) {
721 			page++;
722 			continue;
723 		}
724 
725 		if (!PageBuddy(page)) {
726 			page++;
727 			continue;
728 		}
729 
730 		order = page_order(page);
731 		list_del(&page->lru);
732 		list_add(&page->lru,
733 			&zone->free_area[order].free_list[migratetype]);
734 		page += 1 << order;
735 		pages_moved += 1 << order;
736 	}
737 
738 	return pages_moved;
739 }
740 
741 int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
742 {
743 	unsigned long start_pfn, end_pfn;
744 	struct page *start_page, *end_page;
745 
746 	start_pfn = page_to_pfn(page);
747 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
748 	start_page = pfn_to_page(start_pfn);
749 	end_page = start_page + pageblock_nr_pages - 1;
750 	end_pfn = start_pfn + pageblock_nr_pages - 1;
751 
752 	/* Do not cross zone boundaries */
753 	if (start_pfn < zone->zone_start_pfn)
754 		start_page = page;
755 	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
756 		return 0;
757 
758 	return move_freepages(zone, start_page, end_page, migratetype);
759 }
760 
761 /* Remove an element from the buddy allocator from the fallback list */
762 static struct page *__rmqueue_fallback(struct zone *zone, int order,
763 						int start_migratetype)
764 {
765 	struct free_area * area;
766 	int current_order;
767 	struct page *page;
768 	int migratetype, i;
769 
770 	/* Find the largest possible block of pages in the other list */
771 	for (current_order = MAX_ORDER-1; current_order >= order;
772 						--current_order) {
773 		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
774 			migratetype = fallbacks[start_migratetype][i];
775 
776 			/* MIGRATE_RESERVE handled later if necessary */
777 			if (migratetype == MIGRATE_RESERVE)
778 				continue;
779 
780 			area = &(zone->free_area[current_order]);
781 			if (list_empty(&area->free_list[migratetype]))
782 				continue;
783 
784 			page = list_entry(area->free_list[migratetype].next,
785 					struct page, lru);
786 			area->nr_free--;
787 
788 			/*
789 			 * If breaking a large block of pages, move all free
790 			 * pages to the preferred allocation list. If falling
791 			 * back for a reclaimable kernel allocation, be more
792 			 * agressive about taking ownership of free pages
793 			 */
794 			if (unlikely(current_order >= (pageblock_order >> 1)) ||
795 					start_migratetype == MIGRATE_RECLAIMABLE) {
796 				unsigned long pages;
797 				pages = move_freepages_block(zone, page,
798 								start_migratetype);
799 
800 				/* Claim the whole block if over half of it is free */
801 				if (pages >= (1 << (pageblock_order-1)))
802 					set_pageblock_migratetype(page,
803 								start_migratetype);
804 
805 				migratetype = start_migratetype;
806 			}
807 
808 			/* Remove the page from the freelists */
809 			list_del(&page->lru);
810 			rmv_page_order(page);
811 			__mod_zone_page_state(zone, NR_FREE_PAGES,
812 							-(1UL << order));
813 
814 			if (current_order == pageblock_order)
815 				set_pageblock_migratetype(page,
816 							start_migratetype);
817 
818 			expand(zone, page, order, current_order, area, migratetype);
819 			return page;
820 		}
821 	}
822 
823 	/* Use MIGRATE_RESERVE rather than fail an allocation */
824 	return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
825 }
826 
827 /*
828  * Do the hard work of removing an element from the buddy allocator.
829  * Call me with the zone->lock already held.
830  */
831 static struct page *__rmqueue(struct zone *zone, unsigned int order,
832 						int migratetype)
833 {
834 	struct page *page;
835 
836 	page = __rmqueue_smallest(zone, order, migratetype);
837 
838 	if (unlikely(!page))
839 		page = __rmqueue_fallback(zone, order, migratetype);
840 
841 	return page;
842 }
843 
844 /*
845  * Obtain a specified number of elements from the buddy allocator, all under
846  * a single hold of the lock, for efficiency.  Add them to the supplied list.
847  * Returns the number of new pages which were placed at *list.
848  */
849 static int rmqueue_bulk(struct zone *zone, unsigned int order,
850 			unsigned long count, struct list_head *list,
851 			int migratetype)
852 {
853 	int i;
854 
855 	spin_lock(&zone->lock);
856 	for (i = 0; i < count; ++i) {
857 		struct page *page = __rmqueue(zone, order, migratetype);
858 		if (unlikely(page == NULL))
859 			break;
860 
861 		/*
862 		 * Split buddy pages returned by expand() are received here
863 		 * in physical page order. The page is added to the callers and
864 		 * list and the list head then moves forward. From the callers
865 		 * perspective, the linked list is ordered by page number in
866 		 * some conditions. This is useful for IO devices that can
867 		 * merge IO requests if the physical pages are ordered
868 		 * properly.
869 		 */
870 		list_add(&page->lru, list);
871 		set_page_private(page, migratetype);
872 		list = &page->lru;
873 	}
874 	spin_unlock(&zone->lock);
875 	return i;
876 }
877 
878 #ifdef CONFIG_NUMA
879 /*
880  * Called from the vmstat counter updater to drain pagesets of this
881  * currently executing processor on remote nodes after they have
882  * expired.
883  *
884  * Note that this function must be called with the thread pinned to
885  * a single processor.
886  */
887 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
888 {
889 	unsigned long flags;
890 	int to_drain;
891 
892 	local_irq_save(flags);
893 	if (pcp->count >= pcp->batch)
894 		to_drain = pcp->batch;
895 	else
896 		to_drain = pcp->count;
897 	free_pages_bulk(zone, to_drain, &pcp->list, 0);
898 	pcp->count -= to_drain;
899 	local_irq_restore(flags);
900 }
901 #endif
902 
903 /*
904  * Drain pages of the indicated processor.
905  *
906  * The processor must either be the current processor and the
907  * thread pinned to the current processor or a processor that
908  * is not online.
909  */
910 static void drain_pages(unsigned int cpu)
911 {
912 	unsigned long flags;
913 	struct zone *zone;
914 
915 	for_each_zone(zone) {
916 		struct per_cpu_pageset *pset;
917 		struct per_cpu_pages *pcp;
918 
919 		if (!populated_zone(zone))
920 			continue;
921 
922 		pset = zone_pcp(zone, cpu);
923 
924 		pcp = &pset->pcp;
925 		local_irq_save(flags);
926 		free_pages_bulk(zone, pcp->count, &pcp->list, 0);
927 		pcp->count = 0;
928 		local_irq_restore(flags);
929 	}
930 }
931 
932 /*
933  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
934  */
935 void drain_local_pages(void *arg)
936 {
937 	drain_pages(smp_processor_id());
938 }
939 
940 /*
941  * Spill all the per-cpu pages from all CPUs back into the buddy allocator
942  */
943 void drain_all_pages(void)
944 {
945 	on_each_cpu(drain_local_pages, NULL, 0, 1);
946 }
947 
948 #ifdef CONFIG_HIBERNATION
949 
950 void mark_free_pages(struct zone *zone)
951 {
952 	unsigned long pfn, max_zone_pfn;
953 	unsigned long flags;
954 	int order, t;
955 	struct list_head *curr;
956 
957 	if (!zone->spanned_pages)
958 		return;
959 
960 	spin_lock_irqsave(&zone->lock, flags);
961 
962 	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
963 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
964 		if (pfn_valid(pfn)) {
965 			struct page *page = pfn_to_page(pfn);
966 
967 			if (!swsusp_page_is_forbidden(page))
968 				swsusp_unset_page_free(page);
969 		}
970 
971 	for_each_migratetype_order(order, t) {
972 		list_for_each(curr, &zone->free_area[order].free_list[t]) {
973 			unsigned long i;
974 
975 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
976 			for (i = 0; i < (1UL << order); i++)
977 				swsusp_set_page_free(pfn_to_page(pfn + i));
978 		}
979 	}
980 	spin_unlock_irqrestore(&zone->lock, flags);
981 }
982 #endif /* CONFIG_PM */
983 
984 /*
985  * Free a 0-order page
986  */
987 static void free_hot_cold_page(struct page *page, int cold)
988 {
989 	struct zone *zone = page_zone(page);
990 	struct per_cpu_pages *pcp;
991 	unsigned long flags;
992 
993 	if (PageAnon(page))
994 		page->mapping = NULL;
995 	if (free_pages_check(page))
996 		return;
997 
998 	if (!PageHighMem(page))
999 		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1000 	arch_free_page(page, 0);
1001 	kernel_map_pages(page, 1, 0);
1002 
1003 	pcp = &zone_pcp(zone, get_cpu())->pcp;
1004 	local_irq_save(flags);
1005 	__count_vm_event(PGFREE);
1006 	if (cold)
1007 		list_add_tail(&page->lru, &pcp->list);
1008 	else
1009 		list_add(&page->lru, &pcp->list);
1010 	set_page_private(page, get_pageblock_migratetype(page));
1011 	pcp->count++;
1012 	if (pcp->count >= pcp->high) {
1013 		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1014 		pcp->count -= pcp->batch;
1015 	}
1016 	local_irq_restore(flags);
1017 	put_cpu();
1018 }
1019 
1020 void free_hot_page(struct page *page)
1021 {
1022 	free_hot_cold_page(page, 0);
1023 }
1024 
1025 void free_cold_page(struct page *page)
1026 {
1027 	free_hot_cold_page(page, 1);
1028 }
1029 
1030 /*
1031  * split_page takes a non-compound higher-order page, and splits it into
1032  * n (1<<order) sub-pages: page[0..n]
1033  * Each sub-page must be freed individually.
1034  *
1035  * Note: this is probably too low level an operation for use in drivers.
1036  * Please consult with lkml before using this in your driver.
1037  */
1038 void split_page(struct page *page, unsigned int order)
1039 {
1040 	int i;
1041 
1042 	VM_BUG_ON(PageCompound(page));
1043 	VM_BUG_ON(!page_count(page));
1044 	for (i = 1; i < (1 << order); i++)
1045 		set_page_refcounted(page + i);
1046 }
1047 
1048 /*
1049  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1050  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1051  * or two.
1052  */
1053 static struct page *buffered_rmqueue(struct zonelist *zonelist,
1054 			struct zone *zone, int order, gfp_t gfp_flags)
1055 {
1056 	unsigned long flags;
1057 	struct page *page;
1058 	int cold = !!(gfp_flags & __GFP_COLD);
1059 	int cpu;
1060 	int migratetype = allocflags_to_migratetype(gfp_flags);
1061 
1062 again:
1063 	cpu  = get_cpu();
1064 	if (likely(order == 0)) {
1065 		struct per_cpu_pages *pcp;
1066 
1067 		pcp = &zone_pcp(zone, cpu)->pcp;
1068 		local_irq_save(flags);
1069 		if (!pcp->count) {
1070 			pcp->count = rmqueue_bulk(zone, 0,
1071 					pcp->batch, &pcp->list, migratetype);
1072 			if (unlikely(!pcp->count))
1073 				goto failed;
1074 		}
1075 
1076 		/* Find a page of the appropriate migrate type */
1077 		if (cold) {
1078 			list_for_each_entry_reverse(page, &pcp->list, lru)
1079 				if (page_private(page) == migratetype)
1080 					break;
1081 		} else {
1082 			list_for_each_entry(page, &pcp->list, lru)
1083 				if (page_private(page) == migratetype)
1084 					break;
1085 		}
1086 
1087 		/* Allocate more to the pcp list if necessary */
1088 		if (unlikely(&page->lru == &pcp->list)) {
1089 			pcp->count += rmqueue_bulk(zone, 0,
1090 					pcp->batch, &pcp->list, migratetype);
1091 			page = list_entry(pcp->list.next, struct page, lru);
1092 		}
1093 
1094 		list_del(&page->lru);
1095 		pcp->count--;
1096 	} else {
1097 		spin_lock_irqsave(&zone->lock, flags);
1098 		page = __rmqueue(zone, order, migratetype);
1099 		spin_unlock(&zone->lock);
1100 		if (!page)
1101 			goto failed;
1102 	}
1103 
1104 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1105 	zone_statistics(zonelist, zone);
1106 	local_irq_restore(flags);
1107 	put_cpu();
1108 
1109 	VM_BUG_ON(bad_range(zone, page));
1110 	if (prep_new_page(page, order, gfp_flags))
1111 		goto again;
1112 	return page;
1113 
1114 failed:
1115 	local_irq_restore(flags);
1116 	put_cpu();
1117 	return NULL;
1118 }
1119 
1120 #define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
1121 #define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
1122 #define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
1123 #define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
1124 #define ALLOC_HARDER		0x10 /* try to alloc harder */
1125 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1126 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1127 
1128 #ifdef CONFIG_FAIL_PAGE_ALLOC
1129 
1130 static struct fail_page_alloc_attr {
1131 	struct fault_attr attr;
1132 
1133 	u32 ignore_gfp_highmem;
1134 	u32 ignore_gfp_wait;
1135 	u32 min_order;
1136 
1137 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1138 
1139 	struct dentry *ignore_gfp_highmem_file;
1140 	struct dentry *ignore_gfp_wait_file;
1141 	struct dentry *min_order_file;
1142 
1143 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1144 
1145 } fail_page_alloc = {
1146 	.attr = FAULT_ATTR_INITIALIZER,
1147 	.ignore_gfp_wait = 1,
1148 	.ignore_gfp_highmem = 1,
1149 	.min_order = 1,
1150 };
1151 
1152 static int __init setup_fail_page_alloc(char *str)
1153 {
1154 	return setup_fault_attr(&fail_page_alloc.attr, str);
1155 }
1156 __setup("fail_page_alloc=", setup_fail_page_alloc);
1157 
1158 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1159 {
1160 	if (order < fail_page_alloc.min_order)
1161 		return 0;
1162 	if (gfp_mask & __GFP_NOFAIL)
1163 		return 0;
1164 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1165 		return 0;
1166 	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1167 		return 0;
1168 
1169 	return should_fail(&fail_page_alloc.attr, 1 << order);
1170 }
1171 
1172 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1173 
1174 static int __init fail_page_alloc_debugfs(void)
1175 {
1176 	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1177 	struct dentry *dir;
1178 	int err;
1179 
1180 	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1181 				       "fail_page_alloc");
1182 	if (err)
1183 		return err;
1184 	dir = fail_page_alloc.attr.dentries.dir;
1185 
1186 	fail_page_alloc.ignore_gfp_wait_file =
1187 		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1188 				      &fail_page_alloc.ignore_gfp_wait);
1189 
1190 	fail_page_alloc.ignore_gfp_highmem_file =
1191 		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1192 				      &fail_page_alloc.ignore_gfp_highmem);
1193 	fail_page_alloc.min_order_file =
1194 		debugfs_create_u32("min-order", mode, dir,
1195 				   &fail_page_alloc.min_order);
1196 
1197 	if (!fail_page_alloc.ignore_gfp_wait_file ||
1198             !fail_page_alloc.ignore_gfp_highmem_file ||
1199             !fail_page_alloc.min_order_file) {
1200 		err = -ENOMEM;
1201 		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1202 		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1203 		debugfs_remove(fail_page_alloc.min_order_file);
1204 		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1205 	}
1206 
1207 	return err;
1208 }
1209 
1210 late_initcall(fail_page_alloc_debugfs);
1211 
1212 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1213 
1214 #else /* CONFIG_FAIL_PAGE_ALLOC */
1215 
1216 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1217 {
1218 	return 0;
1219 }
1220 
1221 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1222 
1223 /*
1224  * Return 1 if free pages are above 'mark'. This takes into account the order
1225  * of the allocation.
1226  */
1227 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1228 		      int classzone_idx, int alloc_flags)
1229 {
1230 	/* free_pages my go negative - that's OK */
1231 	long min = mark;
1232 	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1233 	int o;
1234 
1235 	if (alloc_flags & ALLOC_HIGH)
1236 		min -= min / 2;
1237 	if (alloc_flags & ALLOC_HARDER)
1238 		min -= min / 4;
1239 
1240 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1241 		return 0;
1242 	for (o = 0; o < order; o++) {
1243 		/* At the next order, this order's pages become unavailable */
1244 		free_pages -= z->free_area[o].nr_free << o;
1245 
1246 		/* Require fewer higher order pages to be free */
1247 		min >>= 1;
1248 
1249 		if (free_pages <= min)
1250 			return 0;
1251 	}
1252 	return 1;
1253 }
1254 
1255 #ifdef CONFIG_NUMA
1256 /*
1257  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1258  * skip over zones that are not allowed by the cpuset, or that have
1259  * been recently (in last second) found to be nearly full.  See further
1260  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1261  * that have to skip over a lot of full or unallowed zones.
1262  *
1263  * If the zonelist cache is present in the passed in zonelist, then
1264  * returns a pointer to the allowed node mask (either the current
1265  * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1266  *
1267  * If the zonelist cache is not available for this zonelist, does
1268  * nothing and returns NULL.
1269  *
1270  * If the fullzones BITMAP in the zonelist cache is stale (more than
1271  * a second since last zap'd) then we zap it out (clear its bits.)
1272  *
1273  * We hold off even calling zlc_setup, until after we've checked the
1274  * first zone in the zonelist, on the theory that most allocations will
1275  * be satisfied from that first zone, so best to examine that zone as
1276  * quickly as we can.
1277  */
1278 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1279 {
1280 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1281 	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1282 
1283 	zlc = zonelist->zlcache_ptr;
1284 	if (!zlc)
1285 		return NULL;
1286 
1287        if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1288 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1289 		zlc->last_full_zap = jiffies;
1290 	}
1291 
1292 	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1293 					&cpuset_current_mems_allowed :
1294 					&node_states[N_HIGH_MEMORY];
1295 	return allowednodes;
1296 }
1297 
1298 /*
1299  * Given 'z' scanning a zonelist, run a couple of quick checks to see
1300  * if it is worth looking at further for free memory:
1301  *  1) Check that the zone isn't thought to be full (doesn't have its
1302  *     bit set in the zonelist_cache fullzones BITMAP).
1303  *  2) Check that the zones node (obtained from the zonelist_cache
1304  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1305  * Return true (non-zero) if zone is worth looking at further, or
1306  * else return false (zero) if it is not.
1307  *
1308  * This check -ignores- the distinction between various watermarks,
1309  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1310  * found to be full for any variation of these watermarks, it will
1311  * be considered full for up to one second by all requests, unless
1312  * we are so low on memory on all allowed nodes that we are forced
1313  * into the second scan of the zonelist.
1314  *
1315  * In the second scan we ignore this zonelist cache and exactly
1316  * apply the watermarks to all zones, even it is slower to do so.
1317  * We are low on memory in the second scan, and should leave no stone
1318  * unturned looking for a free page.
1319  */
1320 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
1321 						nodemask_t *allowednodes)
1322 {
1323 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1324 	int i;				/* index of *z in zonelist zones */
1325 	int n;				/* node that zone *z is on */
1326 
1327 	zlc = zonelist->zlcache_ptr;
1328 	if (!zlc)
1329 		return 1;
1330 
1331 	i = z - zonelist->zones;
1332 	n = zlc->z_to_n[i];
1333 
1334 	/* This zone is worth trying if it is allowed but not full */
1335 	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1336 }
1337 
1338 /*
1339  * Given 'z' scanning a zonelist, set the corresponding bit in
1340  * zlc->fullzones, so that subsequent attempts to allocate a page
1341  * from that zone don't waste time re-examining it.
1342  */
1343 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
1344 {
1345 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1346 	int i;				/* index of *z in zonelist zones */
1347 
1348 	zlc = zonelist->zlcache_ptr;
1349 	if (!zlc)
1350 		return;
1351 
1352 	i = z - zonelist->zones;
1353 
1354 	set_bit(i, zlc->fullzones);
1355 }
1356 
1357 #else	/* CONFIG_NUMA */
1358 
1359 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1360 {
1361 	return NULL;
1362 }
1363 
1364 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
1365 				nodemask_t *allowednodes)
1366 {
1367 	return 1;
1368 }
1369 
1370 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
1371 {
1372 }
1373 #endif	/* CONFIG_NUMA */
1374 
1375 /*
1376  * get_page_from_freelist goes through the zonelist trying to allocate
1377  * a page.
1378  */
1379 static struct page *
1380 get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
1381 		struct zonelist *zonelist, int alloc_flags)
1382 {
1383 	struct zone **z;
1384 	struct page *page = NULL;
1385 	int classzone_idx = zone_idx(zonelist->zones[0]);
1386 	struct zone *zone;
1387 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1388 	int zlc_active = 0;		/* set if using zonelist_cache */
1389 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1390 	enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */
1391 
1392 zonelist_scan:
1393 	/*
1394 	 * Scan zonelist, looking for a zone with enough free.
1395 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1396 	 */
1397 	z = zonelist->zones;
1398 
1399 	do {
1400 		/*
1401 		 * In NUMA, this could be a policy zonelist which contains
1402 		 * zones that may not be allowed by the current gfp_mask.
1403 		 * Check the zone is allowed by the current flags
1404 		 */
1405 		if (unlikely(alloc_should_filter_zonelist(zonelist))) {
1406 			if (highest_zoneidx == -1)
1407 				highest_zoneidx = gfp_zone(gfp_mask);
1408 			if (zone_idx(*z) > highest_zoneidx)
1409 				continue;
1410 		}
1411 
1412 		if (NUMA_BUILD && zlc_active &&
1413 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1414 				continue;
1415 		zone = *z;
1416 		if ((alloc_flags & ALLOC_CPUSET) &&
1417 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1418 				goto try_next_zone;
1419 
1420 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1421 			unsigned long mark;
1422 			if (alloc_flags & ALLOC_WMARK_MIN)
1423 				mark = zone->pages_min;
1424 			else if (alloc_flags & ALLOC_WMARK_LOW)
1425 				mark = zone->pages_low;
1426 			else
1427 				mark = zone->pages_high;
1428 			if (!zone_watermark_ok(zone, order, mark,
1429 				    classzone_idx, alloc_flags)) {
1430 				if (!zone_reclaim_mode ||
1431 				    !zone_reclaim(zone, gfp_mask, order))
1432 					goto this_zone_full;
1433 			}
1434 		}
1435 
1436 		page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
1437 		if (page)
1438 			break;
1439 this_zone_full:
1440 		if (NUMA_BUILD)
1441 			zlc_mark_zone_full(zonelist, z);
1442 try_next_zone:
1443 		if (NUMA_BUILD && !did_zlc_setup) {
1444 			/* we do zlc_setup after the first zone is tried */
1445 			allowednodes = zlc_setup(zonelist, alloc_flags);
1446 			zlc_active = 1;
1447 			did_zlc_setup = 1;
1448 		}
1449 	} while (*(++z) != NULL);
1450 
1451 	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1452 		/* Disable zlc cache for second zonelist scan */
1453 		zlc_active = 0;
1454 		goto zonelist_scan;
1455 	}
1456 	return page;
1457 }
1458 
1459 /*
1460  * This is the 'heart' of the zoned buddy allocator.
1461  */
1462 struct page *
1463 __alloc_pages(gfp_t gfp_mask, unsigned int order,
1464 		struct zonelist *zonelist)
1465 {
1466 	const gfp_t wait = gfp_mask & __GFP_WAIT;
1467 	struct zone **z;
1468 	struct page *page;
1469 	struct reclaim_state reclaim_state;
1470 	struct task_struct *p = current;
1471 	int do_retry;
1472 	int alloc_flags;
1473 	int did_some_progress;
1474 
1475 	might_sleep_if(wait);
1476 
1477 	if (should_fail_alloc_page(gfp_mask, order))
1478 		return NULL;
1479 
1480 restart:
1481 	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
1482 
1483 	if (unlikely(*z == NULL)) {
1484 		/*
1485 		 * Happens if we have an empty zonelist as a result of
1486 		 * GFP_THISNODE being used on a memoryless node
1487 		 */
1488 		return NULL;
1489 	}
1490 
1491 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
1492 				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
1493 	if (page)
1494 		goto got_pg;
1495 
1496 	/*
1497 	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1498 	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1499 	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1500 	 * using a larger set of nodes after it has established that the
1501 	 * allowed per node queues are empty and that nodes are
1502 	 * over allocated.
1503 	 */
1504 	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1505 		goto nopage;
1506 
1507 	for (z = zonelist->zones; *z; z++)
1508 		wakeup_kswapd(*z, order);
1509 
1510 	/*
1511 	 * OK, we're below the kswapd watermark and have kicked background
1512 	 * reclaim. Now things get more complex, so set up alloc_flags according
1513 	 * to how we want to proceed.
1514 	 *
1515 	 * The caller may dip into page reserves a bit more if the caller
1516 	 * cannot run direct reclaim, or if the caller has realtime scheduling
1517 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1518 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1519 	 */
1520 	alloc_flags = ALLOC_WMARK_MIN;
1521 	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1522 		alloc_flags |= ALLOC_HARDER;
1523 	if (gfp_mask & __GFP_HIGH)
1524 		alloc_flags |= ALLOC_HIGH;
1525 	if (wait)
1526 		alloc_flags |= ALLOC_CPUSET;
1527 
1528 	/*
1529 	 * Go through the zonelist again. Let __GFP_HIGH and allocations
1530 	 * coming from realtime tasks go deeper into reserves.
1531 	 *
1532 	 * This is the last chance, in general, before the goto nopage.
1533 	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1534 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1535 	 */
1536 	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
1537 	if (page)
1538 		goto got_pg;
1539 
1540 	/* This allocation should allow future memory freeing. */
1541 
1542 rebalance:
1543 	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1544 			&& !in_interrupt()) {
1545 		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
1546 nofail_alloc:
1547 			/* go through the zonelist yet again, ignoring mins */
1548 			page = get_page_from_freelist(gfp_mask, order,
1549 				zonelist, ALLOC_NO_WATERMARKS);
1550 			if (page)
1551 				goto got_pg;
1552 			if (gfp_mask & __GFP_NOFAIL) {
1553 				congestion_wait(WRITE, HZ/50);
1554 				goto nofail_alloc;
1555 			}
1556 		}
1557 		goto nopage;
1558 	}
1559 
1560 	/* Atomic allocations - we can't balance anything */
1561 	if (!wait)
1562 		goto nopage;
1563 
1564 	cond_resched();
1565 
1566 	/* We now go into synchronous reclaim */
1567 	cpuset_memory_pressure_bump();
1568 	p->flags |= PF_MEMALLOC;
1569 	reclaim_state.reclaimed_slab = 0;
1570 	p->reclaim_state = &reclaim_state;
1571 
1572 	did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
1573 
1574 	p->reclaim_state = NULL;
1575 	p->flags &= ~PF_MEMALLOC;
1576 
1577 	cond_resched();
1578 
1579 	if (order != 0)
1580 		drain_all_pages();
1581 
1582 	if (likely(did_some_progress)) {
1583 		page = get_page_from_freelist(gfp_mask, order,
1584 						zonelist, alloc_flags);
1585 		if (page)
1586 			goto got_pg;
1587 	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1588 		if (!try_set_zone_oom(zonelist)) {
1589 			schedule_timeout_uninterruptible(1);
1590 			goto restart;
1591 		}
1592 
1593 		/*
1594 		 * Go through the zonelist yet one more time, keep
1595 		 * very high watermark here, this is only to catch
1596 		 * a parallel oom killing, we must fail if we're still
1597 		 * under heavy pressure.
1598 		 */
1599 		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
1600 				zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1601 		if (page) {
1602 			clear_zonelist_oom(zonelist);
1603 			goto got_pg;
1604 		}
1605 
1606 		/* The OOM killer will not help higher order allocs so fail */
1607 		if (order > PAGE_ALLOC_COSTLY_ORDER) {
1608 			clear_zonelist_oom(zonelist);
1609 			goto nopage;
1610 		}
1611 
1612 		out_of_memory(zonelist, gfp_mask, order);
1613 		clear_zonelist_oom(zonelist);
1614 		goto restart;
1615 	}
1616 
1617 	/*
1618 	 * Don't let big-order allocations loop unless the caller explicitly
1619 	 * requests that.  Wait for some write requests to complete then retry.
1620 	 *
1621 	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
1622 	 * <= 3, but that may not be true in other implementations.
1623 	 */
1624 	do_retry = 0;
1625 	if (!(gfp_mask & __GFP_NORETRY)) {
1626 		if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
1627 						(gfp_mask & __GFP_REPEAT))
1628 			do_retry = 1;
1629 		if (gfp_mask & __GFP_NOFAIL)
1630 			do_retry = 1;
1631 	}
1632 	if (do_retry) {
1633 		congestion_wait(WRITE, HZ/50);
1634 		goto rebalance;
1635 	}
1636 
1637 nopage:
1638 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1639 		printk(KERN_WARNING "%s: page allocation failure."
1640 			" order:%d, mode:0x%x\n",
1641 			p->comm, order, gfp_mask);
1642 		dump_stack();
1643 		show_mem();
1644 	}
1645 got_pg:
1646 	return page;
1647 }
1648 
1649 EXPORT_SYMBOL(__alloc_pages);
1650 
1651 /*
1652  * Common helper functions.
1653  */
1654 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1655 {
1656 	struct page * page;
1657 	page = alloc_pages(gfp_mask, order);
1658 	if (!page)
1659 		return 0;
1660 	return (unsigned long) page_address(page);
1661 }
1662 
1663 EXPORT_SYMBOL(__get_free_pages);
1664 
1665 unsigned long get_zeroed_page(gfp_t gfp_mask)
1666 {
1667 	struct page * page;
1668 
1669 	/*
1670 	 * get_zeroed_page() returns a 32-bit address, which cannot represent
1671 	 * a highmem page
1672 	 */
1673 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1674 
1675 	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1676 	if (page)
1677 		return (unsigned long) page_address(page);
1678 	return 0;
1679 }
1680 
1681 EXPORT_SYMBOL(get_zeroed_page);
1682 
1683 void __pagevec_free(struct pagevec *pvec)
1684 {
1685 	int i = pagevec_count(pvec);
1686 
1687 	while (--i >= 0)
1688 		free_hot_cold_page(pvec->pages[i], pvec->cold);
1689 }
1690 
1691 void __free_pages(struct page *page, unsigned int order)
1692 {
1693 	if (put_page_testzero(page)) {
1694 		if (order == 0)
1695 			free_hot_page(page);
1696 		else
1697 			__free_pages_ok(page, order);
1698 	}
1699 }
1700 
1701 EXPORT_SYMBOL(__free_pages);
1702 
1703 void free_pages(unsigned long addr, unsigned int order)
1704 {
1705 	if (addr != 0) {
1706 		VM_BUG_ON(!virt_addr_valid((void *)addr));
1707 		__free_pages(virt_to_page((void *)addr), order);
1708 	}
1709 }
1710 
1711 EXPORT_SYMBOL(free_pages);
1712 
1713 static unsigned int nr_free_zone_pages(int offset)
1714 {
1715 	/* Just pick one node, since fallback list is circular */
1716 	pg_data_t *pgdat = NODE_DATA(numa_node_id());
1717 	unsigned int sum = 0;
1718 
1719 	struct zonelist *zonelist = pgdat->node_zonelists + offset;
1720 	struct zone **zonep = zonelist->zones;
1721 	struct zone *zone;
1722 
1723 	for (zone = *zonep++; zone; zone = *zonep++) {
1724 		unsigned long size = zone->present_pages;
1725 		unsigned long high = zone->pages_high;
1726 		if (size > high)
1727 			sum += size - high;
1728 	}
1729 
1730 	return sum;
1731 }
1732 
1733 /*
1734  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1735  */
1736 unsigned int nr_free_buffer_pages(void)
1737 {
1738 	return nr_free_zone_pages(gfp_zone(GFP_USER));
1739 }
1740 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1741 
1742 /*
1743  * Amount of free RAM allocatable within all zones
1744  */
1745 unsigned int nr_free_pagecache_pages(void)
1746 {
1747 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1748 }
1749 
1750 static inline void show_node(struct zone *zone)
1751 {
1752 	if (NUMA_BUILD)
1753 		printk("Node %d ", zone_to_nid(zone));
1754 }
1755 
1756 void si_meminfo(struct sysinfo *val)
1757 {
1758 	val->totalram = totalram_pages;
1759 	val->sharedram = 0;
1760 	val->freeram = global_page_state(NR_FREE_PAGES);
1761 	val->bufferram = nr_blockdev_pages();
1762 	val->totalhigh = totalhigh_pages;
1763 	val->freehigh = nr_free_highpages();
1764 	val->mem_unit = PAGE_SIZE;
1765 }
1766 
1767 EXPORT_SYMBOL(si_meminfo);
1768 
1769 #ifdef CONFIG_NUMA
1770 void si_meminfo_node(struct sysinfo *val, int nid)
1771 {
1772 	pg_data_t *pgdat = NODE_DATA(nid);
1773 
1774 	val->totalram = pgdat->node_present_pages;
1775 	val->freeram = node_page_state(nid, NR_FREE_PAGES);
1776 #ifdef CONFIG_HIGHMEM
1777 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1778 	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1779 			NR_FREE_PAGES);
1780 #else
1781 	val->totalhigh = 0;
1782 	val->freehigh = 0;
1783 #endif
1784 	val->mem_unit = PAGE_SIZE;
1785 }
1786 #endif
1787 
1788 #define K(x) ((x) << (PAGE_SHIFT-10))
1789 
1790 /*
1791  * Show free area list (used inside shift_scroll-lock stuff)
1792  * We also calculate the percentage fragmentation. We do this by counting the
1793  * memory on each free list with the exception of the first item on the list.
1794  */
1795 void show_free_areas(void)
1796 {
1797 	int cpu;
1798 	struct zone *zone;
1799 
1800 	for_each_zone(zone) {
1801 		if (!populated_zone(zone))
1802 			continue;
1803 
1804 		show_node(zone);
1805 		printk("%s per-cpu:\n", zone->name);
1806 
1807 		for_each_online_cpu(cpu) {
1808 			struct per_cpu_pageset *pageset;
1809 
1810 			pageset = zone_pcp(zone, cpu);
1811 
1812 			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1813 			       cpu, pageset->pcp.high,
1814 			       pageset->pcp.batch, pageset->pcp.count);
1815 		}
1816 	}
1817 
1818 	printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
1819 		" free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
1820 		global_page_state(NR_ACTIVE),
1821 		global_page_state(NR_INACTIVE),
1822 		global_page_state(NR_FILE_DIRTY),
1823 		global_page_state(NR_WRITEBACK),
1824 		global_page_state(NR_UNSTABLE_NFS),
1825 		global_page_state(NR_FREE_PAGES),
1826 		global_page_state(NR_SLAB_RECLAIMABLE) +
1827 			global_page_state(NR_SLAB_UNRECLAIMABLE),
1828 		global_page_state(NR_FILE_MAPPED),
1829 		global_page_state(NR_PAGETABLE),
1830 		global_page_state(NR_BOUNCE));
1831 
1832 	for_each_zone(zone) {
1833 		int i;
1834 
1835 		if (!populated_zone(zone))
1836 			continue;
1837 
1838 		show_node(zone);
1839 		printk("%s"
1840 			" free:%lukB"
1841 			" min:%lukB"
1842 			" low:%lukB"
1843 			" high:%lukB"
1844 			" active:%lukB"
1845 			" inactive:%lukB"
1846 			" present:%lukB"
1847 			" pages_scanned:%lu"
1848 			" all_unreclaimable? %s"
1849 			"\n",
1850 			zone->name,
1851 			K(zone_page_state(zone, NR_FREE_PAGES)),
1852 			K(zone->pages_min),
1853 			K(zone->pages_low),
1854 			K(zone->pages_high),
1855 			K(zone_page_state(zone, NR_ACTIVE)),
1856 			K(zone_page_state(zone, NR_INACTIVE)),
1857 			K(zone->present_pages),
1858 			zone->pages_scanned,
1859 			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
1860 			);
1861 		printk("lowmem_reserve[]:");
1862 		for (i = 0; i < MAX_NR_ZONES; i++)
1863 			printk(" %lu", zone->lowmem_reserve[i]);
1864 		printk("\n");
1865 	}
1866 
1867 	for_each_zone(zone) {
1868  		unsigned long nr[MAX_ORDER], flags, order, total = 0;
1869 
1870 		if (!populated_zone(zone))
1871 			continue;
1872 
1873 		show_node(zone);
1874 		printk("%s: ", zone->name);
1875 
1876 		spin_lock_irqsave(&zone->lock, flags);
1877 		for (order = 0; order < MAX_ORDER; order++) {
1878 			nr[order] = zone->free_area[order].nr_free;
1879 			total += nr[order] << order;
1880 		}
1881 		spin_unlock_irqrestore(&zone->lock, flags);
1882 		for (order = 0; order < MAX_ORDER; order++)
1883 			printk("%lu*%lukB ", nr[order], K(1UL) << order);
1884 		printk("= %lukB\n", K(total));
1885 	}
1886 
1887 	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
1888 
1889 	show_swap_cache_info();
1890 }
1891 
1892 /*
1893  * Builds allocation fallback zone lists.
1894  *
1895  * Add all populated zones of a node to the zonelist.
1896  */
1897 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1898 				int nr_zones, enum zone_type zone_type)
1899 {
1900 	struct zone *zone;
1901 
1902 	BUG_ON(zone_type >= MAX_NR_ZONES);
1903 	zone_type++;
1904 
1905 	do {
1906 		zone_type--;
1907 		zone = pgdat->node_zones + zone_type;
1908 		if (populated_zone(zone)) {
1909 			zonelist->zones[nr_zones++] = zone;
1910 			check_highest_zone(zone_type);
1911 		}
1912 
1913 	} while (zone_type);
1914 	return nr_zones;
1915 }
1916 
1917 
1918 /*
1919  *  zonelist_order:
1920  *  0 = automatic detection of better ordering.
1921  *  1 = order by ([node] distance, -zonetype)
1922  *  2 = order by (-zonetype, [node] distance)
1923  *
1924  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
1925  *  the same zonelist. So only NUMA can configure this param.
1926  */
1927 #define ZONELIST_ORDER_DEFAULT  0
1928 #define ZONELIST_ORDER_NODE     1
1929 #define ZONELIST_ORDER_ZONE     2
1930 
1931 /* zonelist order in the kernel.
1932  * set_zonelist_order() will set this to NODE or ZONE.
1933  */
1934 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
1935 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
1936 
1937 
1938 #ifdef CONFIG_NUMA
1939 /* The value user specified ....changed by config */
1940 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1941 /* string for sysctl */
1942 #define NUMA_ZONELIST_ORDER_LEN	16
1943 char numa_zonelist_order[16] = "default";
1944 
1945 /*
1946  * interface for configure zonelist ordering.
1947  * command line option "numa_zonelist_order"
1948  *	= "[dD]efault	- default, automatic configuration.
1949  *	= "[nN]ode 	- order by node locality, then by zone within node
1950  *	= "[zZ]one      - order by zone, then by locality within zone
1951  */
1952 
1953 static int __parse_numa_zonelist_order(char *s)
1954 {
1955 	if (*s == 'd' || *s == 'D') {
1956 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1957 	} else if (*s == 'n' || *s == 'N') {
1958 		user_zonelist_order = ZONELIST_ORDER_NODE;
1959 	} else if (*s == 'z' || *s == 'Z') {
1960 		user_zonelist_order = ZONELIST_ORDER_ZONE;
1961 	} else {
1962 		printk(KERN_WARNING
1963 			"Ignoring invalid numa_zonelist_order value:  "
1964 			"%s\n", s);
1965 		return -EINVAL;
1966 	}
1967 	return 0;
1968 }
1969 
1970 static __init int setup_numa_zonelist_order(char *s)
1971 {
1972 	if (s)
1973 		return __parse_numa_zonelist_order(s);
1974 	return 0;
1975 }
1976 early_param("numa_zonelist_order", setup_numa_zonelist_order);
1977 
1978 /*
1979  * sysctl handler for numa_zonelist_order
1980  */
1981 int numa_zonelist_order_handler(ctl_table *table, int write,
1982 		struct file *file, void __user *buffer, size_t *length,
1983 		loff_t *ppos)
1984 {
1985 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
1986 	int ret;
1987 
1988 	if (write)
1989 		strncpy(saved_string, (char*)table->data,
1990 			NUMA_ZONELIST_ORDER_LEN);
1991 	ret = proc_dostring(table, write, file, buffer, length, ppos);
1992 	if (ret)
1993 		return ret;
1994 	if (write) {
1995 		int oldval = user_zonelist_order;
1996 		if (__parse_numa_zonelist_order((char*)table->data)) {
1997 			/*
1998 			 * bogus value.  restore saved string
1999 			 */
2000 			strncpy((char*)table->data, saved_string,
2001 				NUMA_ZONELIST_ORDER_LEN);
2002 			user_zonelist_order = oldval;
2003 		} else if (oldval != user_zonelist_order)
2004 			build_all_zonelists();
2005 	}
2006 	return 0;
2007 }
2008 
2009 
2010 #define MAX_NODE_LOAD (num_online_nodes())
2011 static int node_load[MAX_NUMNODES];
2012 
2013 /**
2014  * find_next_best_node - find the next node that should appear in a given node's fallback list
2015  * @node: node whose fallback list we're appending
2016  * @used_node_mask: nodemask_t of already used nodes
2017  *
2018  * We use a number of factors to determine which is the next node that should
2019  * appear on a given node's fallback list.  The node should not have appeared
2020  * already in @node's fallback list, and it should be the next closest node
2021  * according to the distance array (which contains arbitrary distance values
2022  * from each node to each node in the system), and should also prefer nodes
2023  * with no CPUs, since presumably they'll have very little allocation pressure
2024  * on them otherwise.
2025  * It returns -1 if no node is found.
2026  */
2027 static int find_next_best_node(int node, nodemask_t *used_node_mask)
2028 {
2029 	int n, val;
2030 	int min_val = INT_MAX;
2031 	int best_node = -1;
2032 
2033 	/* Use the local node if we haven't already */
2034 	if (!node_isset(node, *used_node_mask)) {
2035 		node_set(node, *used_node_mask);
2036 		return node;
2037 	}
2038 
2039 	for_each_node_state(n, N_HIGH_MEMORY) {
2040 		cpumask_t tmp;
2041 
2042 		/* Don't want a node to appear more than once */
2043 		if (node_isset(n, *used_node_mask))
2044 			continue;
2045 
2046 		/* Use the distance array to find the distance */
2047 		val = node_distance(node, n);
2048 
2049 		/* Penalize nodes under us ("prefer the next node") */
2050 		val += (n < node);
2051 
2052 		/* Give preference to headless and unused nodes */
2053 		tmp = node_to_cpumask(n);
2054 		if (!cpus_empty(tmp))
2055 			val += PENALTY_FOR_NODE_WITH_CPUS;
2056 
2057 		/* Slight preference for less loaded node */
2058 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2059 		val += node_load[n];
2060 
2061 		if (val < min_val) {
2062 			min_val = val;
2063 			best_node = n;
2064 		}
2065 	}
2066 
2067 	if (best_node >= 0)
2068 		node_set(best_node, *used_node_mask);
2069 
2070 	return best_node;
2071 }
2072 
2073 
2074 /*
2075  * Build zonelists ordered by node and zones within node.
2076  * This results in maximum locality--normal zone overflows into local
2077  * DMA zone, if any--but risks exhausting DMA zone.
2078  */
2079 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2080 {
2081 	enum zone_type i;
2082 	int j;
2083 	struct zonelist *zonelist;
2084 
2085 	for (i = 0; i < MAX_NR_ZONES; i++) {
2086 		zonelist = pgdat->node_zonelists + i;
2087 		for (j = 0; zonelist->zones[j] != NULL; j++)
2088 			;
2089  		j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
2090 		zonelist->zones[j] = NULL;
2091 	}
2092 }
2093 
2094 /*
2095  * Build gfp_thisnode zonelists
2096  */
2097 static void build_thisnode_zonelists(pg_data_t *pgdat)
2098 {
2099 	enum zone_type i;
2100 	int j;
2101 	struct zonelist *zonelist;
2102 
2103 	for (i = 0; i < MAX_NR_ZONES; i++) {
2104 		zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i;
2105 		j = build_zonelists_node(pgdat, zonelist, 0, i);
2106 		zonelist->zones[j] = NULL;
2107 	}
2108 }
2109 
2110 /*
2111  * Build zonelists ordered by zone and nodes within zones.
2112  * This results in conserving DMA zone[s] until all Normal memory is
2113  * exhausted, but results in overflowing to remote node while memory
2114  * may still exist in local DMA zone.
2115  */
2116 static int node_order[MAX_NUMNODES];
2117 
2118 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2119 {
2120 	enum zone_type i;
2121 	int pos, j, node;
2122 	int zone_type;		/* needs to be signed */
2123 	struct zone *z;
2124 	struct zonelist *zonelist;
2125 
2126 	for (i = 0; i < MAX_NR_ZONES; i++) {
2127 		zonelist = pgdat->node_zonelists + i;
2128 		pos = 0;
2129 		for (zone_type = i; zone_type >= 0; zone_type--) {
2130 			for (j = 0; j < nr_nodes; j++) {
2131 				node = node_order[j];
2132 				z = &NODE_DATA(node)->node_zones[zone_type];
2133 				if (populated_zone(z)) {
2134 					zonelist->zones[pos++] = z;
2135 					check_highest_zone(zone_type);
2136 				}
2137 			}
2138 		}
2139 		zonelist->zones[pos] = NULL;
2140 	}
2141 }
2142 
2143 static int default_zonelist_order(void)
2144 {
2145 	int nid, zone_type;
2146 	unsigned long low_kmem_size,total_size;
2147 	struct zone *z;
2148 	int average_size;
2149 	/*
2150          * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2151 	 * If they are really small and used heavily, the system can fall
2152 	 * into OOM very easily.
2153 	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2154 	 */
2155 	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2156 	low_kmem_size = 0;
2157 	total_size = 0;
2158 	for_each_online_node(nid) {
2159 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2160 			z = &NODE_DATA(nid)->node_zones[zone_type];
2161 			if (populated_zone(z)) {
2162 				if (zone_type < ZONE_NORMAL)
2163 					low_kmem_size += z->present_pages;
2164 				total_size += z->present_pages;
2165 			}
2166 		}
2167 	}
2168 	if (!low_kmem_size ||  /* there are no DMA area. */
2169 	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2170 		return ZONELIST_ORDER_NODE;
2171 	/*
2172 	 * look into each node's config.
2173   	 * If there is a node whose DMA/DMA32 memory is very big area on
2174  	 * local memory, NODE_ORDER may be suitable.
2175          */
2176 	average_size = total_size /
2177 				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2178 	for_each_online_node(nid) {
2179 		low_kmem_size = 0;
2180 		total_size = 0;
2181 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2182 			z = &NODE_DATA(nid)->node_zones[zone_type];
2183 			if (populated_zone(z)) {
2184 				if (zone_type < ZONE_NORMAL)
2185 					low_kmem_size += z->present_pages;
2186 				total_size += z->present_pages;
2187 			}
2188 		}
2189 		if (low_kmem_size &&
2190 		    total_size > average_size && /* ignore small node */
2191 		    low_kmem_size > total_size * 70/100)
2192 			return ZONELIST_ORDER_NODE;
2193 	}
2194 	return ZONELIST_ORDER_ZONE;
2195 }
2196 
2197 static void set_zonelist_order(void)
2198 {
2199 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2200 		current_zonelist_order = default_zonelist_order();
2201 	else
2202 		current_zonelist_order = user_zonelist_order;
2203 }
2204 
2205 static void build_zonelists(pg_data_t *pgdat)
2206 {
2207 	int j, node, load;
2208 	enum zone_type i;
2209 	nodemask_t used_mask;
2210 	int local_node, prev_node;
2211 	struct zonelist *zonelist;
2212 	int order = current_zonelist_order;
2213 
2214 	/* initialize zonelists */
2215 	for (i = 0; i < MAX_ZONELISTS; i++) {
2216 		zonelist = pgdat->node_zonelists + i;
2217 		zonelist->zones[0] = NULL;
2218 	}
2219 
2220 	/* NUMA-aware ordering of nodes */
2221 	local_node = pgdat->node_id;
2222 	load = num_online_nodes();
2223 	prev_node = local_node;
2224 	nodes_clear(used_mask);
2225 
2226 	memset(node_load, 0, sizeof(node_load));
2227 	memset(node_order, 0, sizeof(node_order));
2228 	j = 0;
2229 
2230 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2231 		int distance = node_distance(local_node, node);
2232 
2233 		/*
2234 		 * If another node is sufficiently far away then it is better
2235 		 * to reclaim pages in a zone before going off node.
2236 		 */
2237 		if (distance > RECLAIM_DISTANCE)
2238 			zone_reclaim_mode = 1;
2239 
2240 		/*
2241 		 * We don't want to pressure a particular node.
2242 		 * So adding penalty to the first node in same
2243 		 * distance group to make it round-robin.
2244 		 */
2245 		if (distance != node_distance(local_node, prev_node))
2246 			node_load[node] = load;
2247 
2248 		prev_node = node;
2249 		load--;
2250 		if (order == ZONELIST_ORDER_NODE)
2251 			build_zonelists_in_node_order(pgdat, node);
2252 		else
2253 			node_order[j++] = node;	/* remember order */
2254 	}
2255 
2256 	if (order == ZONELIST_ORDER_ZONE) {
2257 		/* calculate node order -- i.e., DMA last! */
2258 		build_zonelists_in_zone_order(pgdat, j);
2259 	}
2260 
2261 	build_thisnode_zonelists(pgdat);
2262 }
2263 
2264 /* Construct the zonelist performance cache - see further mmzone.h */
2265 static void build_zonelist_cache(pg_data_t *pgdat)
2266 {
2267 	int i;
2268 
2269 	for (i = 0; i < MAX_NR_ZONES; i++) {
2270 		struct zonelist *zonelist;
2271 		struct zonelist_cache *zlc;
2272 		struct zone **z;
2273 
2274 		zonelist = pgdat->node_zonelists + i;
2275 		zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2276 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2277 		for (z = zonelist->zones; *z; z++)
2278 			zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
2279 	}
2280 }
2281 
2282 
2283 #else	/* CONFIG_NUMA */
2284 
2285 static void set_zonelist_order(void)
2286 {
2287 	current_zonelist_order = ZONELIST_ORDER_ZONE;
2288 }
2289 
2290 static void build_zonelists(pg_data_t *pgdat)
2291 {
2292 	int node, local_node;
2293 	enum zone_type i,j;
2294 
2295 	local_node = pgdat->node_id;
2296 	for (i = 0; i < MAX_NR_ZONES; i++) {
2297 		struct zonelist *zonelist;
2298 
2299 		zonelist = pgdat->node_zonelists + i;
2300 
2301  		j = build_zonelists_node(pgdat, zonelist, 0, i);
2302  		/*
2303  		 * Now we build the zonelist so that it contains the zones
2304  		 * of all the other nodes.
2305  		 * We don't want to pressure a particular node, so when
2306  		 * building the zones for node N, we make sure that the
2307  		 * zones coming right after the local ones are those from
2308  		 * node N+1 (modulo N)
2309  		 */
2310 		for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2311 			if (!node_online(node))
2312 				continue;
2313 			j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
2314 		}
2315 		for (node = 0; node < local_node; node++) {
2316 			if (!node_online(node))
2317 				continue;
2318 			j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
2319 		}
2320 
2321 		zonelist->zones[j] = NULL;
2322 	}
2323 }
2324 
2325 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2326 static void build_zonelist_cache(pg_data_t *pgdat)
2327 {
2328 	int i;
2329 
2330 	for (i = 0; i < MAX_NR_ZONES; i++)
2331 		pgdat->node_zonelists[i].zlcache_ptr = NULL;
2332 }
2333 
2334 #endif	/* CONFIG_NUMA */
2335 
2336 /* return values int ....just for stop_machine_run() */
2337 static int __build_all_zonelists(void *dummy)
2338 {
2339 	int nid;
2340 
2341 	for_each_online_node(nid) {
2342 		pg_data_t *pgdat = NODE_DATA(nid);
2343 
2344 		build_zonelists(pgdat);
2345 		build_zonelist_cache(pgdat);
2346 	}
2347 	return 0;
2348 }
2349 
2350 void build_all_zonelists(void)
2351 {
2352 	set_zonelist_order();
2353 
2354 	if (system_state == SYSTEM_BOOTING) {
2355 		__build_all_zonelists(NULL);
2356 		cpuset_init_current_mems_allowed();
2357 	} else {
2358 		/* we have to stop all cpus to guarantee there is no user
2359 		   of zonelist */
2360 		stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
2361 		/* cpuset refresh routine should be here */
2362 	}
2363 	vm_total_pages = nr_free_pagecache_pages();
2364 	/*
2365 	 * Disable grouping by mobility if the number of pages in the
2366 	 * system is too low to allow the mechanism to work. It would be
2367 	 * more accurate, but expensive to check per-zone. This check is
2368 	 * made on memory-hotadd so a system can start with mobility
2369 	 * disabled and enable it later
2370 	 */
2371 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2372 		page_group_by_mobility_disabled = 1;
2373 	else
2374 		page_group_by_mobility_disabled = 0;
2375 
2376 	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2377 		"Total pages: %ld\n",
2378 			num_online_nodes(),
2379 			zonelist_order_name[current_zonelist_order],
2380 			page_group_by_mobility_disabled ? "off" : "on",
2381 			vm_total_pages);
2382 #ifdef CONFIG_NUMA
2383 	printk("Policy zone: %s\n", zone_names[policy_zone]);
2384 #endif
2385 }
2386 
2387 /*
2388  * Helper functions to size the waitqueue hash table.
2389  * Essentially these want to choose hash table sizes sufficiently
2390  * large so that collisions trying to wait on pages are rare.
2391  * But in fact, the number of active page waitqueues on typical
2392  * systems is ridiculously low, less than 200. So this is even
2393  * conservative, even though it seems large.
2394  *
2395  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2396  * waitqueues, i.e. the size of the waitq table given the number of pages.
2397  */
2398 #define PAGES_PER_WAITQUEUE	256
2399 
2400 #ifndef CONFIG_MEMORY_HOTPLUG
2401 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2402 {
2403 	unsigned long size = 1;
2404 
2405 	pages /= PAGES_PER_WAITQUEUE;
2406 
2407 	while (size < pages)
2408 		size <<= 1;
2409 
2410 	/*
2411 	 * Once we have dozens or even hundreds of threads sleeping
2412 	 * on IO we've got bigger problems than wait queue collision.
2413 	 * Limit the size of the wait table to a reasonable size.
2414 	 */
2415 	size = min(size, 4096UL);
2416 
2417 	return max(size, 4UL);
2418 }
2419 #else
2420 /*
2421  * A zone's size might be changed by hot-add, so it is not possible to determine
2422  * a suitable size for its wait_table.  So we use the maximum size now.
2423  *
2424  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2425  *
2426  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2427  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2428  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2429  *
2430  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2431  * or more by the traditional way. (See above).  It equals:
2432  *
2433  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2434  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2435  *    powerpc (64K page size)             : =  (32G +16M)byte.
2436  */
2437 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2438 {
2439 	return 4096UL;
2440 }
2441 #endif
2442 
2443 /*
2444  * This is an integer logarithm so that shifts can be used later
2445  * to extract the more random high bits from the multiplicative
2446  * hash function before the remainder is taken.
2447  */
2448 static inline unsigned long wait_table_bits(unsigned long size)
2449 {
2450 	return ffz(~size);
2451 }
2452 
2453 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2454 
2455 /*
2456  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2457  * of blocks reserved is based on zone->pages_min. The memory within the
2458  * reserve will tend to store contiguous free pages. Setting min_free_kbytes
2459  * higher will lead to a bigger reserve which will get freed as contiguous
2460  * blocks as reclaim kicks in
2461  */
2462 static void setup_zone_migrate_reserve(struct zone *zone)
2463 {
2464 	unsigned long start_pfn, pfn, end_pfn;
2465 	struct page *page;
2466 	unsigned long reserve, block_migratetype;
2467 
2468 	/* Get the start pfn, end pfn and the number of blocks to reserve */
2469 	start_pfn = zone->zone_start_pfn;
2470 	end_pfn = start_pfn + zone->spanned_pages;
2471 	reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2472 							pageblock_order;
2473 
2474 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2475 		if (!pfn_valid(pfn))
2476 			continue;
2477 		page = pfn_to_page(pfn);
2478 
2479 		/* Blocks with reserved pages will never free, skip them. */
2480 		if (PageReserved(page))
2481 			continue;
2482 
2483 		block_migratetype = get_pageblock_migratetype(page);
2484 
2485 		/* If this block is reserved, account for it */
2486 		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2487 			reserve--;
2488 			continue;
2489 		}
2490 
2491 		/* Suitable for reserving if this block is movable */
2492 		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2493 			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2494 			move_freepages_block(zone, page, MIGRATE_RESERVE);
2495 			reserve--;
2496 			continue;
2497 		}
2498 
2499 		/*
2500 		 * If the reserve is met and this is a previous reserved block,
2501 		 * take it back
2502 		 */
2503 		if (block_migratetype == MIGRATE_RESERVE) {
2504 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2505 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2506 		}
2507 	}
2508 }
2509 
2510 /*
2511  * Initially all pages are reserved - free ones are freed
2512  * up by free_all_bootmem() once the early boot process is
2513  * done. Non-atomic initialization, single-pass.
2514  */
2515 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2516 		unsigned long start_pfn, enum memmap_context context)
2517 {
2518 	struct page *page;
2519 	unsigned long end_pfn = start_pfn + size;
2520 	unsigned long pfn;
2521 
2522 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2523 		/*
2524 		 * There can be holes in boot-time mem_map[]s
2525 		 * handed to this function.  They do not
2526 		 * exist on hotplugged memory.
2527 		 */
2528 		if (context == MEMMAP_EARLY) {
2529 			if (!early_pfn_valid(pfn))
2530 				continue;
2531 			if (!early_pfn_in_nid(pfn, nid))
2532 				continue;
2533 		}
2534 		page = pfn_to_page(pfn);
2535 		set_page_links(page, zone, nid, pfn);
2536 		init_page_count(page);
2537 		reset_page_mapcount(page);
2538 		SetPageReserved(page);
2539 
2540 		/*
2541 		 * Mark the block movable so that blocks are reserved for
2542 		 * movable at startup. This will force kernel allocations
2543 		 * to reserve their blocks rather than leaking throughout
2544 		 * the address space during boot when many long-lived
2545 		 * kernel allocations are made. Later some blocks near
2546 		 * the start are marked MIGRATE_RESERVE by
2547 		 * setup_zone_migrate_reserve()
2548 		 */
2549 		if ((pfn & (pageblock_nr_pages-1)))
2550 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2551 
2552 		INIT_LIST_HEAD(&page->lru);
2553 #ifdef WANT_PAGE_VIRTUAL
2554 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
2555 		if (!is_highmem_idx(zone))
2556 			set_page_address(page, __va(pfn << PAGE_SHIFT));
2557 #endif
2558 	}
2559 }
2560 
2561 static void __meminit zone_init_free_lists(struct zone *zone)
2562 {
2563 	int order, t;
2564 	for_each_migratetype_order(order, t) {
2565 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2566 		zone->free_area[order].nr_free = 0;
2567 	}
2568 }
2569 
2570 #ifndef __HAVE_ARCH_MEMMAP_INIT
2571 #define memmap_init(size, nid, zone, start_pfn) \
2572 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2573 #endif
2574 
2575 static int zone_batchsize(struct zone *zone)
2576 {
2577 	int batch;
2578 
2579 	/*
2580 	 * The per-cpu-pages pools are set to around 1000th of the
2581 	 * size of the zone.  But no more than 1/2 of a meg.
2582 	 *
2583 	 * OK, so we don't know how big the cache is.  So guess.
2584 	 */
2585 	batch = zone->present_pages / 1024;
2586 	if (batch * PAGE_SIZE > 512 * 1024)
2587 		batch = (512 * 1024) / PAGE_SIZE;
2588 	batch /= 4;		/* We effectively *= 4 below */
2589 	if (batch < 1)
2590 		batch = 1;
2591 
2592 	/*
2593 	 * Clamp the batch to a 2^n - 1 value. Having a power
2594 	 * of 2 value was found to be more likely to have
2595 	 * suboptimal cache aliasing properties in some cases.
2596 	 *
2597 	 * For example if 2 tasks are alternately allocating
2598 	 * batches of pages, one task can end up with a lot
2599 	 * of pages of one half of the possible page colors
2600 	 * and the other with pages of the other colors.
2601 	 */
2602 	batch = (1 << (fls(batch + batch/2)-1)) - 1;
2603 
2604 	return batch;
2605 }
2606 
2607 inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2608 {
2609 	struct per_cpu_pages *pcp;
2610 
2611 	memset(p, 0, sizeof(*p));
2612 
2613 	pcp = &p->pcp;
2614 	pcp->count = 0;
2615 	pcp->high = 6 * batch;
2616 	pcp->batch = max(1UL, 1 * batch);
2617 	INIT_LIST_HEAD(&pcp->list);
2618 }
2619 
2620 /*
2621  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2622  * to the value high for the pageset p.
2623  */
2624 
2625 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2626 				unsigned long high)
2627 {
2628 	struct per_cpu_pages *pcp;
2629 
2630 	pcp = &p->pcp;
2631 	pcp->high = high;
2632 	pcp->batch = max(1UL, high/4);
2633 	if ((high/4) > (PAGE_SHIFT * 8))
2634 		pcp->batch = PAGE_SHIFT * 8;
2635 }
2636 
2637 
2638 #ifdef CONFIG_NUMA
2639 /*
2640  * Boot pageset table. One per cpu which is going to be used for all
2641  * zones and all nodes. The parameters will be set in such a way
2642  * that an item put on a list will immediately be handed over to
2643  * the buddy list. This is safe since pageset manipulation is done
2644  * with interrupts disabled.
2645  *
2646  * Some NUMA counter updates may also be caught by the boot pagesets.
2647  *
2648  * The boot_pagesets must be kept even after bootup is complete for
2649  * unused processors and/or zones. They do play a role for bootstrapping
2650  * hotplugged processors.
2651  *
2652  * zoneinfo_show() and maybe other functions do
2653  * not check if the processor is online before following the pageset pointer.
2654  * Other parts of the kernel may not check if the zone is available.
2655  */
2656 static struct per_cpu_pageset boot_pageset[NR_CPUS];
2657 
2658 /*
2659  * Dynamically allocate memory for the
2660  * per cpu pageset array in struct zone.
2661  */
2662 static int __cpuinit process_zones(int cpu)
2663 {
2664 	struct zone *zone, *dzone;
2665 	int node = cpu_to_node(cpu);
2666 
2667 	node_set_state(node, N_CPU);	/* this node has a cpu */
2668 
2669 	for_each_zone(zone) {
2670 
2671 		if (!populated_zone(zone))
2672 			continue;
2673 
2674 		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2675 					 GFP_KERNEL, node);
2676 		if (!zone_pcp(zone, cpu))
2677 			goto bad;
2678 
2679 		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
2680 
2681 		if (percpu_pagelist_fraction)
2682 			setup_pagelist_highmark(zone_pcp(zone, cpu),
2683 			 	(zone->present_pages / percpu_pagelist_fraction));
2684 	}
2685 
2686 	return 0;
2687 bad:
2688 	for_each_zone(dzone) {
2689 		if (!populated_zone(dzone))
2690 			continue;
2691 		if (dzone == zone)
2692 			break;
2693 		kfree(zone_pcp(dzone, cpu));
2694 		zone_pcp(dzone, cpu) = NULL;
2695 	}
2696 	return -ENOMEM;
2697 }
2698 
2699 static inline void free_zone_pagesets(int cpu)
2700 {
2701 	struct zone *zone;
2702 
2703 	for_each_zone(zone) {
2704 		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2705 
2706 		/* Free per_cpu_pageset if it is slab allocated */
2707 		if (pset != &boot_pageset[cpu])
2708 			kfree(pset);
2709 		zone_pcp(zone, cpu) = NULL;
2710 	}
2711 }
2712 
2713 static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2714 		unsigned long action,
2715 		void *hcpu)
2716 {
2717 	int cpu = (long)hcpu;
2718 	int ret = NOTIFY_OK;
2719 
2720 	switch (action) {
2721 	case CPU_UP_PREPARE:
2722 	case CPU_UP_PREPARE_FROZEN:
2723 		if (process_zones(cpu))
2724 			ret = NOTIFY_BAD;
2725 		break;
2726 	case CPU_UP_CANCELED:
2727 	case CPU_UP_CANCELED_FROZEN:
2728 	case CPU_DEAD:
2729 	case CPU_DEAD_FROZEN:
2730 		free_zone_pagesets(cpu);
2731 		break;
2732 	default:
2733 		break;
2734 	}
2735 	return ret;
2736 }
2737 
2738 static struct notifier_block __cpuinitdata pageset_notifier =
2739 	{ &pageset_cpuup_callback, NULL, 0 };
2740 
2741 void __init setup_per_cpu_pageset(void)
2742 {
2743 	int err;
2744 
2745 	/* Initialize per_cpu_pageset for cpu 0.
2746 	 * A cpuup callback will do this for every cpu
2747 	 * as it comes online
2748 	 */
2749 	err = process_zones(smp_processor_id());
2750 	BUG_ON(err);
2751 	register_cpu_notifier(&pageset_notifier);
2752 }
2753 
2754 #endif
2755 
2756 static noinline __init_refok
2757 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2758 {
2759 	int i;
2760 	struct pglist_data *pgdat = zone->zone_pgdat;
2761 	size_t alloc_size;
2762 
2763 	/*
2764 	 * The per-page waitqueue mechanism uses hashed waitqueues
2765 	 * per zone.
2766 	 */
2767 	zone->wait_table_hash_nr_entries =
2768 		 wait_table_hash_nr_entries(zone_size_pages);
2769 	zone->wait_table_bits =
2770 		wait_table_bits(zone->wait_table_hash_nr_entries);
2771 	alloc_size = zone->wait_table_hash_nr_entries
2772 					* sizeof(wait_queue_head_t);
2773 
2774  	if (system_state == SYSTEM_BOOTING) {
2775 		zone->wait_table = (wait_queue_head_t *)
2776 			alloc_bootmem_node(pgdat, alloc_size);
2777 	} else {
2778 		/*
2779 		 * This case means that a zone whose size was 0 gets new memory
2780 		 * via memory hot-add.
2781 		 * But it may be the case that a new node was hot-added.  In
2782 		 * this case vmalloc() will not be able to use this new node's
2783 		 * memory - this wait_table must be initialized to use this new
2784 		 * node itself as well.
2785 		 * To use this new node's memory, further consideration will be
2786 		 * necessary.
2787 		 */
2788 		zone->wait_table = vmalloc(alloc_size);
2789 	}
2790 	if (!zone->wait_table)
2791 		return -ENOMEM;
2792 
2793 	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
2794 		init_waitqueue_head(zone->wait_table + i);
2795 
2796 	return 0;
2797 }
2798 
2799 static __meminit void zone_pcp_init(struct zone *zone)
2800 {
2801 	int cpu;
2802 	unsigned long batch = zone_batchsize(zone);
2803 
2804 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
2805 #ifdef CONFIG_NUMA
2806 		/* Early boot. Slab allocator not functional yet */
2807 		zone_pcp(zone, cpu) = &boot_pageset[cpu];
2808 		setup_pageset(&boot_pageset[cpu],0);
2809 #else
2810 		setup_pageset(zone_pcp(zone,cpu), batch);
2811 #endif
2812 	}
2813 	if (zone->present_pages)
2814 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
2815 			zone->name, zone->present_pages, batch);
2816 }
2817 
2818 __meminit int init_currently_empty_zone(struct zone *zone,
2819 					unsigned long zone_start_pfn,
2820 					unsigned long size,
2821 					enum memmap_context context)
2822 {
2823 	struct pglist_data *pgdat = zone->zone_pgdat;
2824 	int ret;
2825 	ret = zone_wait_table_init(zone, size);
2826 	if (ret)
2827 		return ret;
2828 	pgdat->nr_zones = zone_idx(zone) + 1;
2829 
2830 	zone->zone_start_pfn = zone_start_pfn;
2831 
2832 	memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
2833 
2834 	zone_init_free_lists(zone);
2835 
2836 	return 0;
2837 }
2838 
2839 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2840 /*
2841  * Basic iterator support. Return the first range of PFNs for a node
2842  * Note: nid == MAX_NUMNODES returns first region regardless of node
2843  */
2844 static int __meminit first_active_region_index_in_nid(int nid)
2845 {
2846 	int i;
2847 
2848 	for (i = 0; i < nr_nodemap_entries; i++)
2849 		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2850 			return i;
2851 
2852 	return -1;
2853 }
2854 
2855 /*
2856  * Basic iterator support. Return the next active range of PFNs for a node
2857  * Note: nid == MAX_NUMNODES returns next region regardless of node
2858  */
2859 static int __meminit next_active_region_index_in_nid(int index, int nid)
2860 {
2861 	for (index = index + 1; index < nr_nodemap_entries; index++)
2862 		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2863 			return index;
2864 
2865 	return -1;
2866 }
2867 
2868 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2869 /*
2870  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2871  * Architectures may implement their own version but if add_active_range()
2872  * was used and there are no special requirements, this is a convenient
2873  * alternative
2874  */
2875 int __meminit early_pfn_to_nid(unsigned long pfn)
2876 {
2877 	int i;
2878 
2879 	for (i = 0; i < nr_nodemap_entries; i++) {
2880 		unsigned long start_pfn = early_node_map[i].start_pfn;
2881 		unsigned long end_pfn = early_node_map[i].end_pfn;
2882 
2883 		if (start_pfn <= pfn && pfn < end_pfn)
2884 			return early_node_map[i].nid;
2885 	}
2886 
2887 	return 0;
2888 }
2889 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2890 
2891 /* Basic iterator support to walk early_node_map[] */
2892 #define for_each_active_range_index_in_nid(i, nid) \
2893 	for (i = first_active_region_index_in_nid(nid); i != -1; \
2894 				i = next_active_region_index_in_nid(i, nid))
2895 
2896 /**
2897  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
2898  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
2899  * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
2900  *
2901  * If an architecture guarantees that all ranges registered with
2902  * add_active_ranges() contain no holes and may be freed, this
2903  * this function may be used instead of calling free_bootmem() manually.
2904  */
2905 void __init free_bootmem_with_active_regions(int nid,
2906 						unsigned long max_low_pfn)
2907 {
2908 	int i;
2909 
2910 	for_each_active_range_index_in_nid(i, nid) {
2911 		unsigned long size_pages = 0;
2912 		unsigned long end_pfn = early_node_map[i].end_pfn;
2913 
2914 		if (early_node_map[i].start_pfn >= max_low_pfn)
2915 			continue;
2916 
2917 		if (end_pfn > max_low_pfn)
2918 			end_pfn = max_low_pfn;
2919 
2920 		size_pages = end_pfn - early_node_map[i].start_pfn;
2921 		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
2922 				PFN_PHYS(early_node_map[i].start_pfn),
2923 				size_pages << PAGE_SHIFT);
2924 	}
2925 }
2926 
2927 /**
2928  * sparse_memory_present_with_active_regions - Call memory_present for each active range
2929  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
2930  *
2931  * If an architecture guarantees that all ranges registered with
2932  * add_active_ranges() contain no holes and may be freed, this
2933  * function may be used instead of calling memory_present() manually.
2934  */
2935 void __init sparse_memory_present_with_active_regions(int nid)
2936 {
2937 	int i;
2938 
2939 	for_each_active_range_index_in_nid(i, nid)
2940 		memory_present(early_node_map[i].nid,
2941 				early_node_map[i].start_pfn,
2942 				early_node_map[i].end_pfn);
2943 }
2944 
2945 /**
2946  * push_node_boundaries - Push node boundaries to at least the requested boundary
2947  * @nid: The nid of the node to push the boundary for
2948  * @start_pfn: The start pfn of the node
2949  * @end_pfn: The end pfn of the node
2950  *
2951  * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
2952  * time. Specifically, on x86_64, SRAT will report ranges that can potentially
2953  * be hotplugged even though no physical memory exists. This function allows
2954  * an arch to push out the node boundaries so mem_map is allocated that can
2955  * be used later.
2956  */
2957 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
2958 void __init push_node_boundaries(unsigned int nid,
2959 		unsigned long start_pfn, unsigned long end_pfn)
2960 {
2961 	printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
2962 			nid, start_pfn, end_pfn);
2963 
2964 	/* Initialise the boundary for this node if necessary */
2965 	if (node_boundary_end_pfn[nid] == 0)
2966 		node_boundary_start_pfn[nid] = -1UL;
2967 
2968 	/* Update the boundaries */
2969 	if (node_boundary_start_pfn[nid] > start_pfn)
2970 		node_boundary_start_pfn[nid] = start_pfn;
2971 	if (node_boundary_end_pfn[nid] < end_pfn)
2972 		node_boundary_end_pfn[nid] = end_pfn;
2973 }
2974 
2975 /* If necessary, push the node boundary out for reserve hotadd */
2976 static void __meminit account_node_boundary(unsigned int nid,
2977 		unsigned long *start_pfn, unsigned long *end_pfn)
2978 {
2979 	printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
2980 			nid, *start_pfn, *end_pfn);
2981 
2982 	/* Return if boundary information has not been provided */
2983 	if (node_boundary_end_pfn[nid] == 0)
2984 		return;
2985 
2986 	/* Check the boundaries and update if necessary */
2987 	if (node_boundary_start_pfn[nid] < *start_pfn)
2988 		*start_pfn = node_boundary_start_pfn[nid];
2989 	if (node_boundary_end_pfn[nid] > *end_pfn)
2990 		*end_pfn = node_boundary_end_pfn[nid];
2991 }
2992 #else
2993 void __init push_node_boundaries(unsigned int nid,
2994 		unsigned long start_pfn, unsigned long end_pfn) {}
2995 
2996 static void __meminit account_node_boundary(unsigned int nid,
2997 		unsigned long *start_pfn, unsigned long *end_pfn) {}
2998 #endif
2999 
3000 
3001 /**
3002  * get_pfn_range_for_nid - Return the start and end page frames for a node
3003  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3004  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3005  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3006  *
3007  * It returns the start and end page frame of a node based on information
3008  * provided by an arch calling add_active_range(). If called for a node
3009  * with no available memory, a warning is printed and the start and end
3010  * PFNs will be 0.
3011  */
3012 void __meminit get_pfn_range_for_nid(unsigned int nid,
3013 			unsigned long *start_pfn, unsigned long *end_pfn)
3014 {
3015 	int i;
3016 	*start_pfn = -1UL;
3017 	*end_pfn = 0;
3018 
3019 	for_each_active_range_index_in_nid(i, nid) {
3020 		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3021 		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3022 	}
3023 
3024 	if (*start_pfn == -1UL)
3025 		*start_pfn = 0;
3026 
3027 	/* Push the node boundaries out if requested */
3028 	account_node_boundary(nid, start_pfn, end_pfn);
3029 }
3030 
3031 /*
3032  * This finds a zone that can be used for ZONE_MOVABLE pages. The
3033  * assumption is made that zones within a node are ordered in monotonic
3034  * increasing memory addresses so that the "highest" populated zone is used
3035  */
3036 void __init find_usable_zone_for_movable(void)
3037 {
3038 	int zone_index;
3039 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3040 		if (zone_index == ZONE_MOVABLE)
3041 			continue;
3042 
3043 		if (arch_zone_highest_possible_pfn[zone_index] >
3044 				arch_zone_lowest_possible_pfn[zone_index])
3045 			break;
3046 	}
3047 
3048 	VM_BUG_ON(zone_index == -1);
3049 	movable_zone = zone_index;
3050 }
3051 
3052 /*
3053  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3054  * because it is sized independant of architecture. Unlike the other zones,
3055  * the starting point for ZONE_MOVABLE is not fixed. It may be different
3056  * in each node depending on the size of each node and how evenly kernelcore
3057  * is distributed. This helper function adjusts the zone ranges
3058  * provided by the architecture for a given node by using the end of the
3059  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3060  * zones within a node are in order of monotonic increases memory addresses
3061  */
3062 void __meminit adjust_zone_range_for_zone_movable(int nid,
3063 					unsigned long zone_type,
3064 					unsigned long node_start_pfn,
3065 					unsigned long node_end_pfn,
3066 					unsigned long *zone_start_pfn,
3067 					unsigned long *zone_end_pfn)
3068 {
3069 	/* Only adjust if ZONE_MOVABLE is on this node */
3070 	if (zone_movable_pfn[nid]) {
3071 		/* Size ZONE_MOVABLE */
3072 		if (zone_type == ZONE_MOVABLE) {
3073 			*zone_start_pfn = zone_movable_pfn[nid];
3074 			*zone_end_pfn = min(node_end_pfn,
3075 				arch_zone_highest_possible_pfn[movable_zone]);
3076 
3077 		/* Adjust for ZONE_MOVABLE starting within this range */
3078 		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3079 				*zone_end_pfn > zone_movable_pfn[nid]) {
3080 			*zone_end_pfn = zone_movable_pfn[nid];
3081 
3082 		/* Check if this whole range is within ZONE_MOVABLE */
3083 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3084 			*zone_start_pfn = *zone_end_pfn;
3085 	}
3086 }
3087 
3088 /*
3089  * Return the number of pages a zone spans in a node, including holes
3090  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3091  */
3092 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3093 					unsigned long zone_type,
3094 					unsigned long *ignored)
3095 {
3096 	unsigned long node_start_pfn, node_end_pfn;
3097 	unsigned long zone_start_pfn, zone_end_pfn;
3098 
3099 	/* Get the start and end of the node and zone */
3100 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3101 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3102 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3103 	adjust_zone_range_for_zone_movable(nid, zone_type,
3104 				node_start_pfn, node_end_pfn,
3105 				&zone_start_pfn, &zone_end_pfn);
3106 
3107 	/* Check that this node has pages within the zone's required range */
3108 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3109 		return 0;
3110 
3111 	/* Move the zone boundaries inside the node if necessary */
3112 	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3113 	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3114 
3115 	/* Return the spanned pages */
3116 	return zone_end_pfn - zone_start_pfn;
3117 }
3118 
3119 /*
3120  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3121  * then all holes in the requested range will be accounted for.
3122  */
3123 unsigned long __meminit __absent_pages_in_range(int nid,
3124 				unsigned long range_start_pfn,
3125 				unsigned long range_end_pfn)
3126 {
3127 	int i = 0;
3128 	unsigned long prev_end_pfn = 0, hole_pages = 0;
3129 	unsigned long start_pfn;
3130 
3131 	/* Find the end_pfn of the first active range of pfns in the node */
3132 	i = first_active_region_index_in_nid(nid);
3133 	if (i == -1)
3134 		return 0;
3135 
3136 	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3137 
3138 	/* Account for ranges before physical memory on this node */
3139 	if (early_node_map[i].start_pfn > range_start_pfn)
3140 		hole_pages = prev_end_pfn - range_start_pfn;
3141 
3142 	/* Find all holes for the zone within the node */
3143 	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3144 
3145 		/* No need to continue if prev_end_pfn is outside the zone */
3146 		if (prev_end_pfn >= range_end_pfn)
3147 			break;
3148 
3149 		/* Make sure the end of the zone is not within the hole */
3150 		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3151 		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3152 
3153 		/* Update the hole size cound and move on */
3154 		if (start_pfn > range_start_pfn) {
3155 			BUG_ON(prev_end_pfn > start_pfn);
3156 			hole_pages += start_pfn - prev_end_pfn;
3157 		}
3158 		prev_end_pfn = early_node_map[i].end_pfn;
3159 	}
3160 
3161 	/* Account for ranges past physical memory on this node */
3162 	if (range_end_pfn > prev_end_pfn)
3163 		hole_pages += range_end_pfn -
3164 				max(range_start_pfn, prev_end_pfn);
3165 
3166 	return hole_pages;
3167 }
3168 
3169 /**
3170  * absent_pages_in_range - Return number of page frames in holes within a range
3171  * @start_pfn: The start PFN to start searching for holes
3172  * @end_pfn: The end PFN to stop searching for holes
3173  *
3174  * It returns the number of pages frames in memory holes within a range.
3175  */
3176 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3177 							unsigned long end_pfn)
3178 {
3179 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3180 }
3181 
3182 /* Return the number of page frames in holes in a zone on a node */
3183 static unsigned long __meminit zone_absent_pages_in_node(int nid,
3184 					unsigned long zone_type,
3185 					unsigned long *ignored)
3186 {
3187 	unsigned long node_start_pfn, node_end_pfn;
3188 	unsigned long zone_start_pfn, zone_end_pfn;
3189 
3190 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3191 	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3192 							node_start_pfn);
3193 	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3194 							node_end_pfn);
3195 
3196 	adjust_zone_range_for_zone_movable(nid, zone_type,
3197 			node_start_pfn, node_end_pfn,
3198 			&zone_start_pfn, &zone_end_pfn);
3199 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3200 }
3201 
3202 #else
3203 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3204 					unsigned long zone_type,
3205 					unsigned long *zones_size)
3206 {
3207 	return zones_size[zone_type];
3208 }
3209 
3210 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3211 						unsigned long zone_type,
3212 						unsigned long *zholes_size)
3213 {
3214 	if (!zholes_size)
3215 		return 0;
3216 
3217 	return zholes_size[zone_type];
3218 }
3219 
3220 #endif
3221 
3222 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3223 		unsigned long *zones_size, unsigned long *zholes_size)
3224 {
3225 	unsigned long realtotalpages, totalpages = 0;
3226 	enum zone_type i;
3227 
3228 	for (i = 0; i < MAX_NR_ZONES; i++)
3229 		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3230 								zones_size);
3231 	pgdat->node_spanned_pages = totalpages;
3232 
3233 	realtotalpages = totalpages;
3234 	for (i = 0; i < MAX_NR_ZONES; i++)
3235 		realtotalpages -=
3236 			zone_absent_pages_in_node(pgdat->node_id, i,
3237 								zholes_size);
3238 	pgdat->node_present_pages = realtotalpages;
3239 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3240 							realtotalpages);
3241 }
3242 
3243 #ifndef CONFIG_SPARSEMEM
3244 /*
3245  * Calculate the size of the zone->blockflags rounded to an unsigned long
3246  * Start by making sure zonesize is a multiple of pageblock_order by rounding
3247  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3248  * round what is now in bits to nearest long in bits, then return it in
3249  * bytes.
3250  */
3251 static unsigned long __init usemap_size(unsigned long zonesize)
3252 {
3253 	unsigned long usemapsize;
3254 
3255 	usemapsize = roundup(zonesize, pageblock_nr_pages);
3256 	usemapsize = usemapsize >> pageblock_order;
3257 	usemapsize *= NR_PAGEBLOCK_BITS;
3258 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3259 
3260 	return usemapsize / 8;
3261 }
3262 
3263 static void __init setup_usemap(struct pglist_data *pgdat,
3264 				struct zone *zone, unsigned long zonesize)
3265 {
3266 	unsigned long usemapsize = usemap_size(zonesize);
3267 	zone->pageblock_flags = NULL;
3268 	if (usemapsize) {
3269 		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3270 		memset(zone->pageblock_flags, 0, usemapsize);
3271 	}
3272 }
3273 #else
3274 static void inline setup_usemap(struct pglist_data *pgdat,
3275 				struct zone *zone, unsigned long zonesize) {}
3276 #endif /* CONFIG_SPARSEMEM */
3277 
3278 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3279 
3280 /* Return a sensible default order for the pageblock size. */
3281 static inline int pageblock_default_order(void)
3282 {
3283 	if (HPAGE_SHIFT > PAGE_SHIFT)
3284 		return HUGETLB_PAGE_ORDER;
3285 
3286 	return MAX_ORDER-1;
3287 }
3288 
3289 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3290 static inline void __init set_pageblock_order(unsigned int order)
3291 {
3292 	/* Check that pageblock_nr_pages has not already been setup */
3293 	if (pageblock_order)
3294 		return;
3295 
3296 	/*
3297 	 * Assume the largest contiguous order of interest is a huge page.
3298 	 * This value may be variable depending on boot parameters on IA64
3299 	 */
3300 	pageblock_order = order;
3301 }
3302 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3303 
3304 /*
3305  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3306  * and pageblock_default_order() are unused as pageblock_order is set
3307  * at compile-time. See include/linux/pageblock-flags.h for the values of
3308  * pageblock_order based on the kernel config
3309  */
3310 static inline int pageblock_default_order(unsigned int order)
3311 {
3312 	return MAX_ORDER-1;
3313 }
3314 #define set_pageblock_order(x)	do {} while (0)
3315 
3316 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3317 
3318 /*
3319  * Set up the zone data structures:
3320  *   - mark all pages reserved
3321  *   - mark all memory queues empty
3322  *   - clear the memory bitmaps
3323  */
3324 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3325 		unsigned long *zones_size, unsigned long *zholes_size)
3326 {
3327 	enum zone_type j;
3328 	int nid = pgdat->node_id;
3329 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3330 	int ret;
3331 
3332 	pgdat_resize_init(pgdat);
3333 	pgdat->nr_zones = 0;
3334 	init_waitqueue_head(&pgdat->kswapd_wait);
3335 	pgdat->kswapd_max_order = 0;
3336 
3337 	for (j = 0; j < MAX_NR_ZONES; j++) {
3338 		struct zone *zone = pgdat->node_zones + j;
3339 		unsigned long size, realsize, memmap_pages;
3340 
3341 		size = zone_spanned_pages_in_node(nid, j, zones_size);
3342 		realsize = size - zone_absent_pages_in_node(nid, j,
3343 								zholes_size);
3344 
3345 		/*
3346 		 * Adjust realsize so that it accounts for how much memory
3347 		 * is used by this zone for memmap. This affects the watermark
3348 		 * and per-cpu initialisations
3349 		 */
3350 		memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
3351 		if (realsize >= memmap_pages) {
3352 			realsize -= memmap_pages;
3353 			printk(KERN_DEBUG
3354 				"  %s zone: %lu pages used for memmap\n",
3355 				zone_names[j], memmap_pages);
3356 		} else
3357 			printk(KERN_WARNING
3358 				"  %s zone: %lu pages exceeds realsize %lu\n",
3359 				zone_names[j], memmap_pages, realsize);
3360 
3361 		/* Account for reserved pages */
3362 		if (j == 0 && realsize > dma_reserve) {
3363 			realsize -= dma_reserve;
3364 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3365 					zone_names[0], dma_reserve);
3366 		}
3367 
3368 		if (!is_highmem_idx(j))
3369 			nr_kernel_pages += realsize;
3370 		nr_all_pages += realsize;
3371 
3372 		zone->spanned_pages = size;
3373 		zone->present_pages = realsize;
3374 #ifdef CONFIG_NUMA
3375 		zone->node = nid;
3376 		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3377 						/ 100;
3378 		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3379 #endif
3380 		zone->name = zone_names[j];
3381 		spin_lock_init(&zone->lock);
3382 		spin_lock_init(&zone->lru_lock);
3383 		zone_seqlock_init(zone);
3384 		zone->zone_pgdat = pgdat;
3385 
3386 		zone->prev_priority = DEF_PRIORITY;
3387 
3388 		zone_pcp_init(zone);
3389 		INIT_LIST_HEAD(&zone->active_list);
3390 		INIT_LIST_HEAD(&zone->inactive_list);
3391 		zone->nr_scan_active = 0;
3392 		zone->nr_scan_inactive = 0;
3393 		zap_zone_vm_stats(zone);
3394 		zone->flags = 0;
3395 		if (!size)
3396 			continue;
3397 
3398 		set_pageblock_order(pageblock_default_order());
3399 		setup_usemap(pgdat, zone, size);
3400 		ret = init_currently_empty_zone(zone, zone_start_pfn,
3401 						size, MEMMAP_EARLY);
3402 		BUG_ON(ret);
3403 		zone_start_pfn += size;
3404 	}
3405 }
3406 
3407 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3408 {
3409 	/* Skip empty nodes */
3410 	if (!pgdat->node_spanned_pages)
3411 		return;
3412 
3413 #ifdef CONFIG_FLAT_NODE_MEM_MAP
3414 	/* ia64 gets its own node_mem_map, before this, without bootmem */
3415 	if (!pgdat->node_mem_map) {
3416 		unsigned long size, start, end;
3417 		struct page *map;
3418 
3419 		/*
3420 		 * The zone's endpoints aren't required to be MAX_ORDER
3421 		 * aligned but the node_mem_map endpoints must be in order
3422 		 * for the buddy allocator to function correctly.
3423 		 */
3424 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3425 		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3426 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3427 		size =  (end - start) * sizeof(struct page);
3428 		map = alloc_remap(pgdat->node_id, size);
3429 		if (!map)
3430 			map = alloc_bootmem_node(pgdat, size);
3431 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3432 	}
3433 #ifndef CONFIG_NEED_MULTIPLE_NODES
3434 	/*
3435 	 * With no DISCONTIG, the global mem_map is just set as node 0's
3436 	 */
3437 	if (pgdat == NODE_DATA(0)) {
3438 		mem_map = NODE_DATA(0)->node_mem_map;
3439 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3440 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3441 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3442 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3443 	}
3444 #endif
3445 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
3446 }
3447 
3448 void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
3449 		unsigned long *zones_size, unsigned long node_start_pfn,
3450 		unsigned long *zholes_size)
3451 {
3452 	pgdat->node_id = nid;
3453 	pgdat->node_start_pfn = node_start_pfn;
3454 	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3455 
3456 	alloc_node_mem_map(pgdat);
3457 
3458 	free_area_init_core(pgdat, zones_size, zholes_size);
3459 }
3460 
3461 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3462 
3463 #if MAX_NUMNODES > 1
3464 /*
3465  * Figure out the number of possible node ids.
3466  */
3467 static void __init setup_nr_node_ids(void)
3468 {
3469 	unsigned int node;
3470 	unsigned int highest = 0;
3471 
3472 	for_each_node_mask(node, node_possible_map)
3473 		highest = node;
3474 	nr_node_ids = highest + 1;
3475 }
3476 #else
3477 static inline void setup_nr_node_ids(void)
3478 {
3479 }
3480 #endif
3481 
3482 /**
3483  * add_active_range - Register a range of PFNs backed by physical memory
3484  * @nid: The node ID the range resides on
3485  * @start_pfn: The start PFN of the available physical memory
3486  * @end_pfn: The end PFN of the available physical memory
3487  *
3488  * These ranges are stored in an early_node_map[] and later used by
3489  * free_area_init_nodes() to calculate zone sizes and holes. If the
3490  * range spans a memory hole, it is up to the architecture to ensure
3491  * the memory is not freed by the bootmem allocator. If possible
3492  * the range being registered will be merged with existing ranges.
3493  */
3494 void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3495 						unsigned long end_pfn)
3496 {
3497 	int i;
3498 
3499 	printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
3500 			  "%d entries of %d used\n",
3501 			  nid, start_pfn, end_pfn,
3502 			  nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3503 
3504 	/* Merge with existing active regions if possible */
3505 	for (i = 0; i < nr_nodemap_entries; i++) {
3506 		if (early_node_map[i].nid != nid)
3507 			continue;
3508 
3509 		/* Skip if an existing region covers this new one */
3510 		if (start_pfn >= early_node_map[i].start_pfn &&
3511 				end_pfn <= early_node_map[i].end_pfn)
3512 			return;
3513 
3514 		/* Merge forward if suitable */
3515 		if (start_pfn <= early_node_map[i].end_pfn &&
3516 				end_pfn > early_node_map[i].end_pfn) {
3517 			early_node_map[i].end_pfn = end_pfn;
3518 			return;
3519 		}
3520 
3521 		/* Merge backward if suitable */
3522 		if (start_pfn < early_node_map[i].end_pfn &&
3523 				end_pfn >= early_node_map[i].start_pfn) {
3524 			early_node_map[i].start_pfn = start_pfn;
3525 			return;
3526 		}
3527 	}
3528 
3529 	/* Check that early_node_map is large enough */
3530 	if (i >= MAX_ACTIVE_REGIONS) {
3531 		printk(KERN_CRIT "More than %d memory regions, truncating\n",
3532 							MAX_ACTIVE_REGIONS);
3533 		return;
3534 	}
3535 
3536 	early_node_map[i].nid = nid;
3537 	early_node_map[i].start_pfn = start_pfn;
3538 	early_node_map[i].end_pfn = end_pfn;
3539 	nr_nodemap_entries = i + 1;
3540 }
3541 
3542 /**
3543  * shrink_active_range - Shrink an existing registered range of PFNs
3544  * @nid: The node id the range is on that should be shrunk
3545  * @old_end_pfn: The old end PFN of the range
3546  * @new_end_pfn: The new PFN of the range
3547  *
3548  * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3549  * The map is kept at the end physical page range that has already been
3550  * registered with add_active_range(). This function allows an arch to shrink
3551  * an existing registered range.
3552  */
3553 void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
3554 						unsigned long new_end_pfn)
3555 {
3556 	int i;
3557 
3558 	/* Find the old active region end and shrink */
3559 	for_each_active_range_index_in_nid(i, nid)
3560 		if (early_node_map[i].end_pfn == old_end_pfn) {
3561 			early_node_map[i].end_pfn = new_end_pfn;
3562 			break;
3563 		}
3564 }
3565 
3566 /**
3567  * remove_all_active_ranges - Remove all currently registered regions
3568  *
3569  * During discovery, it may be found that a table like SRAT is invalid
3570  * and an alternative discovery method must be used. This function removes
3571  * all currently registered regions.
3572  */
3573 void __init remove_all_active_ranges(void)
3574 {
3575 	memset(early_node_map, 0, sizeof(early_node_map));
3576 	nr_nodemap_entries = 0;
3577 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3578 	memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3579 	memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3580 #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
3581 }
3582 
3583 /* Compare two active node_active_regions */
3584 static int __init cmp_node_active_region(const void *a, const void *b)
3585 {
3586 	struct node_active_region *arange = (struct node_active_region *)a;
3587 	struct node_active_region *brange = (struct node_active_region *)b;
3588 
3589 	/* Done this way to avoid overflows */
3590 	if (arange->start_pfn > brange->start_pfn)
3591 		return 1;
3592 	if (arange->start_pfn < brange->start_pfn)
3593 		return -1;
3594 
3595 	return 0;
3596 }
3597 
3598 /* sort the node_map by start_pfn */
3599 static void __init sort_node_map(void)
3600 {
3601 	sort(early_node_map, (size_t)nr_nodemap_entries,
3602 			sizeof(struct node_active_region),
3603 			cmp_node_active_region, NULL);
3604 }
3605 
3606 /* Find the lowest pfn for a node */
3607 unsigned long __init find_min_pfn_for_node(unsigned long nid)
3608 {
3609 	int i;
3610 	unsigned long min_pfn = ULONG_MAX;
3611 
3612 	/* Assuming a sorted map, the first range found has the starting pfn */
3613 	for_each_active_range_index_in_nid(i, nid)
3614 		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
3615 
3616 	if (min_pfn == ULONG_MAX) {
3617 		printk(KERN_WARNING
3618 			"Could not find start_pfn for node %lu\n", nid);
3619 		return 0;
3620 	}
3621 
3622 	return min_pfn;
3623 }
3624 
3625 /**
3626  * find_min_pfn_with_active_regions - Find the minimum PFN registered
3627  *
3628  * It returns the minimum PFN based on information provided via
3629  * add_active_range().
3630  */
3631 unsigned long __init find_min_pfn_with_active_regions(void)
3632 {
3633 	return find_min_pfn_for_node(MAX_NUMNODES);
3634 }
3635 
3636 /**
3637  * find_max_pfn_with_active_regions - Find the maximum PFN registered
3638  *
3639  * It returns the maximum PFN based on information provided via
3640  * add_active_range().
3641  */
3642 unsigned long __init find_max_pfn_with_active_regions(void)
3643 {
3644 	int i;
3645 	unsigned long max_pfn = 0;
3646 
3647 	for (i = 0; i < nr_nodemap_entries; i++)
3648 		max_pfn = max(max_pfn, early_node_map[i].end_pfn);
3649 
3650 	return max_pfn;
3651 }
3652 
3653 /*
3654  * early_calculate_totalpages()
3655  * Sum pages in active regions for movable zone.
3656  * Populate N_HIGH_MEMORY for calculating usable_nodes.
3657  */
3658 static unsigned long __init early_calculate_totalpages(void)
3659 {
3660 	int i;
3661 	unsigned long totalpages = 0;
3662 
3663 	for (i = 0; i < nr_nodemap_entries; i++) {
3664 		unsigned long pages = early_node_map[i].end_pfn -
3665 						early_node_map[i].start_pfn;
3666 		totalpages += pages;
3667 		if (pages)
3668 			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3669 	}
3670   	return totalpages;
3671 }
3672 
3673 /*
3674  * Find the PFN the Movable zone begins in each node. Kernel memory
3675  * is spread evenly between nodes as long as the nodes have enough
3676  * memory. When they don't, some nodes will have more kernelcore than
3677  * others
3678  */
3679 void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3680 {
3681 	int i, nid;
3682 	unsigned long usable_startpfn;
3683 	unsigned long kernelcore_node, kernelcore_remaining;
3684 	unsigned long totalpages = early_calculate_totalpages();
3685 	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
3686 
3687 	/*
3688 	 * If movablecore was specified, calculate what size of
3689 	 * kernelcore that corresponds so that memory usable for
3690 	 * any allocation type is evenly spread. If both kernelcore
3691 	 * and movablecore are specified, then the value of kernelcore
3692 	 * will be used for required_kernelcore if it's greater than
3693 	 * what movablecore would have allowed.
3694 	 */
3695 	if (required_movablecore) {
3696 		unsigned long corepages;
3697 
3698 		/*
3699 		 * Round-up so that ZONE_MOVABLE is at least as large as what
3700 		 * was requested by the user
3701 		 */
3702 		required_movablecore =
3703 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3704 		corepages = totalpages - required_movablecore;
3705 
3706 		required_kernelcore = max(required_kernelcore, corepages);
3707 	}
3708 
3709 	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
3710 	if (!required_kernelcore)
3711 		return;
3712 
3713 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3714 	find_usable_zone_for_movable();
3715 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3716 
3717 restart:
3718 	/* Spread kernelcore memory as evenly as possible throughout nodes */
3719 	kernelcore_node = required_kernelcore / usable_nodes;
3720 	for_each_node_state(nid, N_HIGH_MEMORY) {
3721 		/*
3722 		 * Recalculate kernelcore_node if the division per node
3723 		 * now exceeds what is necessary to satisfy the requested
3724 		 * amount of memory for the kernel
3725 		 */
3726 		if (required_kernelcore < kernelcore_node)
3727 			kernelcore_node = required_kernelcore / usable_nodes;
3728 
3729 		/*
3730 		 * As the map is walked, we track how much memory is usable
3731 		 * by the kernel using kernelcore_remaining. When it is
3732 		 * 0, the rest of the node is usable by ZONE_MOVABLE
3733 		 */
3734 		kernelcore_remaining = kernelcore_node;
3735 
3736 		/* Go through each range of PFNs within this node */
3737 		for_each_active_range_index_in_nid(i, nid) {
3738 			unsigned long start_pfn, end_pfn;
3739 			unsigned long size_pages;
3740 
3741 			start_pfn = max(early_node_map[i].start_pfn,
3742 						zone_movable_pfn[nid]);
3743 			end_pfn = early_node_map[i].end_pfn;
3744 			if (start_pfn >= end_pfn)
3745 				continue;
3746 
3747 			/* Account for what is only usable for kernelcore */
3748 			if (start_pfn < usable_startpfn) {
3749 				unsigned long kernel_pages;
3750 				kernel_pages = min(end_pfn, usable_startpfn)
3751 								- start_pfn;
3752 
3753 				kernelcore_remaining -= min(kernel_pages,
3754 							kernelcore_remaining);
3755 				required_kernelcore -= min(kernel_pages,
3756 							required_kernelcore);
3757 
3758 				/* Continue if range is now fully accounted */
3759 				if (end_pfn <= usable_startpfn) {
3760 
3761 					/*
3762 					 * Push zone_movable_pfn to the end so
3763 					 * that if we have to rebalance
3764 					 * kernelcore across nodes, we will
3765 					 * not double account here
3766 					 */
3767 					zone_movable_pfn[nid] = end_pfn;
3768 					continue;
3769 				}
3770 				start_pfn = usable_startpfn;
3771 			}
3772 
3773 			/*
3774 			 * The usable PFN range for ZONE_MOVABLE is from
3775 			 * start_pfn->end_pfn. Calculate size_pages as the
3776 			 * number of pages used as kernelcore
3777 			 */
3778 			size_pages = end_pfn - start_pfn;
3779 			if (size_pages > kernelcore_remaining)
3780 				size_pages = kernelcore_remaining;
3781 			zone_movable_pfn[nid] = start_pfn + size_pages;
3782 
3783 			/*
3784 			 * Some kernelcore has been met, update counts and
3785 			 * break if the kernelcore for this node has been
3786 			 * satisified
3787 			 */
3788 			required_kernelcore -= min(required_kernelcore,
3789 								size_pages);
3790 			kernelcore_remaining -= size_pages;
3791 			if (!kernelcore_remaining)
3792 				break;
3793 		}
3794 	}
3795 
3796 	/*
3797 	 * If there is still required_kernelcore, we do another pass with one
3798 	 * less node in the count. This will push zone_movable_pfn[nid] further
3799 	 * along on the nodes that still have memory until kernelcore is
3800 	 * satisified
3801 	 */
3802 	usable_nodes--;
3803 	if (usable_nodes && required_kernelcore > usable_nodes)
3804 		goto restart;
3805 
3806 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3807 	for (nid = 0; nid < MAX_NUMNODES; nid++)
3808 		zone_movable_pfn[nid] =
3809 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3810 }
3811 
3812 /* Any regular memory on that node ? */
3813 static void check_for_regular_memory(pg_data_t *pgdat)
3814 {
3815 #ifdef CONFIG_HIGHMEM
3816 	enum zone_type zone_type;
3817 
3818 	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3819 		struct zone *zone = &pgdat->node_zones[zone_type];
3820 		if (zone->present_pages)
3821 			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3822 	}
3823 #endif
3824 }
3825 
3826 /**
3827  * free_area_init_nodes - Initialise all pg_data_t and zone data
3828  * @max_zone_pfn: an array of max PFNs for each zone
3829  *
3830  * This will call free_area_init_node() for each active node in the system.
3831  * Using the page ranges provided by add_active_range(), the size of each
3832  * zone in each node and their holes is calculated. If the maximum PFN
3833  * between two adjacent zones match, it is assumed that the zone is empty.
3834  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
3835  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
3836  * starts where the previous one ended. For example, ZONE_DMA32 starts
3837  * at arch_max_dma_pfn.
3838  */
3839 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3840 {
3841 	unsigned long nid;
3842 	enum zone_type i;
3843 
3844 	/* Sort early_node_map as initialisation assumes it is sorted */
3845 	sort_node_map();
3846 
3847 	/* Record where the zone boundaries are */
3848 	memset(arch_zone_lowest_possible_pfn, 0,
3849 				sizeof(arch_zone_lowest_possible_pfn));
3850 	memset(arch_zone_highest_possible_pfn, 0,
3851 				sizeof(arch_zone_highest_possible_pfn));
3852 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3853 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
3854 	for (i = 1; i < MAX_NR_ZONES; i++) {
3855 		if (i == ZONE_MOVABLE)
3856 			continue;
3857 		arch_zone_lowest_possible_pfn[i] =
3858 			arch_zone_highest_possible_pfn[i-1];
3859 		arch_zone_highest_possible_pfn[i] =
3860 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
3861 	}
3862 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
3863 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
3864 
3865 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
3866 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
3867 	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
3868 
3869 	/* Print out the zone ranges */
3870 	printk("Zone PFN ranges:\n");
3871 	for (i = 0; i < MAX_NR_ZONES; i++) {
3872 		if (i == ZONE_MOVABLE)
3873 			continue;
3874 		printk("  %-8s %8lu -> %8lu\n",
3875 				zone_names[i],
3876 				arch_zone_lowest_possible_pfn[i],
3877 				arch_zone_highest_possible_pfn[i]);
3878 	}
3879 
3880 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
3881 	printk("Movable zone start PFN for each node\n");
3882 	for (i = 0; i < MAX_NUMNODES; i++) {
3883 		if (zone_movable_pfn[i])
3884 			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
3885 	}
3886 
3887 	/* Print out the early_node_map[] */
3888 	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
3889 	for (i = 0; i < nr_nodemap_entries; i++)
3890 		printk("  %3d: %8lu -> %8lu\n", early_node_map[i].nid,
3891 						early_node_map[i].start_pfn,
3892 						early_node_map[i].end_pfn);
3893 
3894 	/* Initialise every node */
3895 	setup_nr_node_ids();
3896 	for_each_online_node(nid) {
3897 		pg_data_t *pgdat = NODE_DATA(nid);
3898 		free_area_init_node(nid, pgdat, NULL,
3899 				find_min_pfn_for_node(nid), NULL);
3900 
3901 		/* Any memory on that node */
3902 		if (pgdat->node_present_pages)
3903 			node_set_state(nid, N_HIGH_MEMORY);
3904 		check_for_regular_memory(pgdat);
3905 	}
3906 }
3907 
3908 static int __init cmdline_parse_core(char *p, unsigned long *core)
3909 {
3910 	unsigned long long coremem;
3911 	if (!p)
3912 		return -EINVAL;
3913 
3914 	coremem = memparse(p, &p);
3915 	*core = coremem >> PAGE_SHIFT;
3916 
3917 	/* Paranoid check that UL is enough for the coremem value */
3918 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
3919 
3920 	return 0;
3921 }
3922 
3923 /*
3924  * kernelcore=size sets the amount of memory for use for allocations that
3925  * cannot be reclaimed or migrated.
3926  */
3927 static int __init cmdline_parse_kernelcore(char *p)
3928 {
3929 	return cmdline_parse_core(p, &required_kernelcore);
3930 }
3931 
3932 /*
3933  * movablecore=size sets the amount of memory for use for allocations that
3934  * can be reclaimed or migrated.
3935  */
3936 static int __init cmdline_parse_movablecore(char *p)
3937 {
3938 	return cmdline_parse_core(p, &required_movablecore);
3939 }
3940 
3941 early_param("kernelcore", cmdline_parse_kernelcore);
3942 early_param("movablecore", cmdline_parse_movablecore);
3943 
3944 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3945 
3946 /**
3947  * set_dma_reserve - set the specified number of pages reserved in the first zone
3948  * @new_dma_reserve: The number of pages to mark reserved
3949  *
3950  * The per-cpu batchsize and zone watermarks are determined by present_pages.
3951  * In the DMA zone, a significant percentage may be consumed by kernel image
3952  * and other unfreeable allocations which can skew the watermarks badly. This
3953  * function may optionally be used to account for unfreeable pages in the
3954  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
3955  * smaller per-cpu batchsize.
3956  */
3957 void __init set_dma_reserve(unsigned long new_dma_reserve)
3958 {
3959 	dma_reserve = new_dma_reserve;
3960 }
3961 
3962 #ifndef CONFIG_NEED_MULTIPLE_NODES
3963 static bootmem_data_t contig_bootmem_data;
3964 struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
3965 
3966 EXPORT_SYMBOL(contig_page_data);
3967 #endif
3968 
3969 void __init free_area_init(unsigned long *zones_size)
3970 {
3971 	free_area_init_node(0, NODE_DATA(0), zones_size,
3972 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
3973 }
3974 
3975 static int page_alloc_cpu_notify(struct notifier_block *self,
3976 				 unsigned long action, void *hcpu)
3977 {
3978 	int cpu = (unsigned long)hcpu;
3979 
3980 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
3981 		drain_pages(cpu);
3982 
3983 		/*
3984 		 * Spill the event counters of the dead processor
3985 		 * into the current processors event counters.
3986 		 * This artificially elevates the count of the current
3987 		 * processor.
3988 		 */
3989 		vm_events_fold_cpu(cpu);
3990 
3991 		/*
3992 		 * Zero the differential counters of the dead processor
3993 		 * so that the vm statistics are consistent.
3994 		 *
3995 		 * This is only okay since the processor is dead and cannot
3996 		 * race with what we are doing.
3997 		 */
3998 		refresh_cpu_vm_stats(cpu);
3999 	}
4000 	return NOTIFY_OK;
4001 }
4002 
4003 void __init page_alloc_init(void)
4004 {
4005 	hotcpu_notifier(page_alloc_cpu_notify, 0);
4006 }
4007 
4008 /*
4009  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4010  *	or min_free_kbytes changes.
4011  */
4012 static void calculate_totalreserve_pages(void)
4013 {
4014 	struct pglist_data *pgdat;
4015 	unsigned long reserve_pages = 0;
4016 	enum zone_type i, j;
4017 
4018 	for_each_online_pgdat(pgdat) {
4019 		for (i = 0; i < MAX_NR_ZONES; i++) {
4020 			struct zone *zone = pgdat->node_zones + i;
4021 			unsigned long max = 0;
4022 
4023 			/* Find valid and maximum lowmem_reserve in the zone */
4024 			for (j = i; j < MAX_NR_ZONES; j++) {
4025 				if (zone->lowmem_reserve[j] > max)
4026 					max = zone->lowmem_reserve[j];
4027 			}
4028 
4029 			/* we treat pages_high as reserved pages. */
4030 			max += zone->pages_high;
4031 
4032 			if (max > zone->present_pages)
4033 				max = zone->present_pages;
4034 			reserve_pages += max;
4035 		}
4036 	}
4037 	totalreserve_pages = reserve_pages;
4038 }
4039 
4040 /*
4041  * setup_per_zone_lowmem_reserve - called whenever
4042  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4043  *	has a correct pages reserved value, so an adequate number of
4044  *	pages are left in the zone after a successful __alloc_pages().
4045  */
4046 static void setup_per_zone_lowmem_reserve(void)
4047 {
4048 	struct pglist_data *pgdat;
4049 	enum zone_type j, idx;
4050 
4051 	for_each_online_pgdat(pgdat) {
4052 		for (j = 0; j < MAX_NR_ZONES; j++) {
4053 			struct zone *zone = pgdat->node_zones + j;
4054 			unsigned long present_pages = zone->present_pages;
4055 
4056 			zone->lowmem_reserve[j] = 0;
4057 
4058 			idx = j;
4059 			while (idx) {
4060 				struct zone *lower_zone;
4061 
4062 				idx--;
4063 
4064 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4065 					sysctl_lowmem_reserve_ratio[idx] = 1;
4066 
4067 				lower_zone = pgdat->node_zones + idx;
4068 				lower_zone->lowmem_reserve[j] = present_pages /
4069 					sysctl_lowmem_reserve_ratio[idx];
4070 				present_pages += lower_zone->present_pages;
4071 			}
4072 		}
4073 	}
4074 
4075 	/* update totalreserve_pages */
4076 	calculate_totalreserve_pages();
4077 }
4078 
4079 /**
4080  * setup_per_zone_pages_min - called when min_free_kbytes changes.
4081  *
4082  * Ensures that the pages_{min,low,high} values for each zone are set correctly
4083  * with respect to min_free_kbytes.
4084  */
4085 void setup_per_zone_pages_min(void)
4086 {
4087 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4088 	unsigned long lowmem_pages = 0;
4089 	struct zone *zone;
4090 	unsigned long flags;
4091 
4092 	/* Calculate total number of !ZONE_HIGHMEM pages */
4093 	for_each_zone(zone) {
4094 		if (!is_highmem(zone))
4095 			lowmem_pages += zone->present_pages;
4096 	}
4097 
4098 	for_each_zone(zone) {
4099 		u64 tmp;
4100 
4101 		spin_lock_irqsave(&zone->lru_lock, flags);
4102 		tmp = (u64)pages_min * zone->present_pages;
4103 		do_div(tmp, lowmem_pages);
4104 		if (is_highmem(zone)) {
4105 			/*
4106 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4107 			 * need highmem pages, so cap pages_min to a small
4108 			 * value here.
4109 			 *
4110 			 * The (pages_high-pages_low) and (pages_low-pages_min)
4111 			 * deltas controls asynch page reclaim, and so should
4112 			 * not be capped for highmem.
4113 			 */
4114 			int min_pages;
4115 
4116 			min_pages = zone->present_pages / 1024;
4117 			if (min_pages < SWAP_CLUSTER_MAX)
4118 				min_pages = SWAP_CLUSTER_MAX;
4119 			if (min_pages > 128)
4120 				min_pages = 128;
4121 			zone->pages_min = min_pages;
4122 		} else {
4123 			/*
4124 			 * If it's a lowmem zone, reserve a number of pages
4125 			 * proportionate to the zone's size.
4126 			 */
4127 			zone->pages_min = tmp;
4128 		}
4129 
4130 		zone->pages_low   = zone->pages_min + (tmp >> 2);
4131 		zone->pages_high  = zone->pages_min + (tmp >> 1);
4132 		setup_zone_migrate_reserve(zone);
4133 		spin_unlock_irqrestore(&zone->lru_lock, flags);
4134 	}
4135 
4136 	/* update totalreserve_pages */
4137 	calculate_totalreserve_pages();
4138 }
4139 
4140 /*
4141  * Initialise min_free_kbytes.
4142  *
4143  * For small machines we want it small (128k min).  For large machines
4144  * we want it large (64MB max).  But it is not linear, because network
4145  * bandwidth does not increase linearly with machine size.  We use
4146  *
4147  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4148  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4149  *
4150  * which yields
4151  *
4152  * 16MB:	512k
4153  * 32MB:	724k
4154  * 64MB:	1024k
4155  * 128MB:	1448k
4156  * 256MB:	2048k
4157  * 512MB:	2896k
4158  * 1024MB:	4096k
4159  * 2048MB:	5792k
4160  * 4096MB:	8192k
4161  * 8192MB:	11584k
4162  * 16384MB:	16384k
4163  */
4164 static int __init init_per_zone_pages_min(void)
4165 {
4166 	unsigned long lowmem_kbytes;
4167 
4168 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4169 
4170 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4171 	if (min_free_kbytes < 128)
4172 		min_free_kbytes = 128;
4173 	if (min_free_kbytes > 65536)
4174 		min_free_kbytes = 65536;
4175 	setup_per_zone_pages_min();
4176 	setup_per_zone_lowmem_reserve();
4177 	return 0;
4178 }
4179 module_init(init_per_zone_pages_min)
4180 
4181 /*
4182  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4183  *	that we can call two helper functions whenever min_free_kbytes
4184  *	changes.
4185  */
4186 int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4187 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4188 {
4189 	proc_dointvec(table, write, file, buffer, length, ppos);
4190 	if (write)
4191 		setup_per_zone_pages_min();
4192 	return 0;
4193 }
4194 
4195 #ifdef CONFIG_NUMA
4196 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4197 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4198 {
4199 	struct zone *zone;
4200 	int rc;
4201 
4202 	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4203 	if (rc)
4204 		return rc;
4205 
4206 	for_each_zone(zone)
4207 		zone->min_unmapped_pages = (zone->present_pages *
4208 				sysctl_min_unmapped_ratio) / 100;
4209 	return 0;
4210 }
4211 
4212 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4213 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4214 {
4215 	struct zone *zone;
4216 	int rc;
4217 
4218 	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4219 	if (rc)
4220 		return rc;
4221 
4222 	for_each_zone(zone)
4223 		zone->min_slab_pages = (zone->present_pages *
4224 				sysctl_min_slab_ratio) / 100;
4225 	return 0;
4226 }
4227 #endif
4228 
4229 /*
4230  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4231  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4232  *	whenever sysctl_lowmem_reserve_ratio changes.
4233  *
4234  * The reserve ratio obviously has absolutely no relation with the
4235  * pages_min watermarks. The lowmem reserve ratio can only make sense
4236  * if in function of the boot time zone sizes.
4237  */
4238 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4239 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4240 {
4241 	proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4242 	setup_per_zone_lowmem_reserve();
4243 	return 0;
4244 }
4245 
4246 /*
4247  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4248  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4249  * can have before it gets flushed back to buddy allocator.
4250  */
4251 
4252 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4253 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4254 {
4255 	struct zone *zone;
4256 	unsigned int cpu;
4257 	int ret;
4258 
4259 	ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4260 	if (!write || (ret == -EINVAL))
4261 		return ret;
4262 	for_each_zone(zone) {
4263 		for_each_online_cpu(cpu) {
4264 			unsigned long  high;
4265 			high = zone->present_pages / percpu_pagelist_fraction;
4266 			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4267 		}
4268 	}
4269 	return 0;
4270 }
4271 
4272 int hashdist = HASHDIST_DEFAULT;
4273 
4274 #ifdef CONFIG_NUMA
4275 static int __init set_hashdist(char *str)
4276 {
4277 	if (!str)
4278 		return 0;
4279 	hashdist = simple_strtoul(str, &str, 0);
4280 	return 1;
4281 }
4282 __setup("hashdist=", set_hashdist);
4283 #endif
4284 
4285 /*
4286  * allocate a large system hash table from bootmem
4287  * - it is assumed that the hash table must contain an exact power-of-2
4288  *   quantity of entries
4289  * - limit is the number of hash buckets, not the total allocation size
4290  */
4291 void *__init alloc_large_system_hash(const char *tablename,
4292 				     unsigned long bucketsize,
4293 				     unsigned long numentries,
4294 				     int scale,
4295 				     int flags,
4296 				     unsigned int *_hash_shift,
4297 				     unsigned int *_hash_mask,
4298 				     unsigned long limit)
4299 {
4300 	unsigned long long max = limit;
4301 	unsigned long log2qty, size;
4302 	void *table = NULL;
4303 
4304 	/* allow the kernel cmdline to have a say */
4305 	if (!numentries) {
4306 		/* round applicable memory size up to nearest megabyte */
4307 		numentries = nr_kernel_pages;
4308 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4309 		numentries >>= 20 - PAGE_SHIFT;
4310 		numentries <<= 20 - PAGE_SHIFT;
4311 
4312 		/* limit to 1 bucket per 2^scale bytes of low memory */
4313 		if (scale > PAGE_SHIFT)
4314 			numentries >>= (scale - PAGE_SHIFT);
4315 		else
4316 			numentries <<= (PAGE_SHIFT - scale);
4317 
4318 		/* Make sure we've got at least a 0-order allocation.. */
4319 		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4320 			numentries = PAGE_SIZE / bucketsize;
4321 	}
4322 	numentries = roundup_pow_of_two(numentries);
4323 
4324 	/* limit allocation size to 1/16 total memory by default */
4325 	if (max == 0) {
4326 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4327 		do_div(max, bucketsize);
4328 	}
4329 
4330 	if (numentries > max)
4331 		numentries = max;
4332 
4333 	log2qty = ilog2(numentries);
4334 
4335 	do {
4336 		size = bucketsize << log2qty;
4337 		if (flags & HASH_EARLY)
4338 			table = alloc_bootmem(size);
4339 		else if (hashdist)
4340 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4341 		else {
4342 			unsigned long order;
4343 			for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
4344 				;
4345 			table = (void*) __get_free_pages(GFP_ATOMIC, order);
4346 			/*
4347 			 * If bucketsize is not a power-of-two, we may free
4348 			 * some pages at the end of hash table.
4349 			 */
4350 			if (table) {
4351 				unsigned long alloc_end = (unsigned long)table +
4352 						(PAGE_SIZE << order);
4353 				unsigned long used = (unsigned long)table +
4354 						PAGE_ALIGN(size);
4355 				split_page(virt_to_page(table), order);
4356 				while (used < alloc_end) {
4357 					free_page(used);
4358 					used += PAGE_SIZE;
4359 				}
4360 			}
4361 		}
4362 	} while (!table && size > PAGE_SIZE && --log2qty);
4363 
4364 	if (!table)
4365 		panic("Failed to allocate %s hash table\n", tablename);
4366 
4367 	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4368 	       tablename,
4369 	       (1U << log2qty),
4370 	       ilog2(size) - PAGE_SHIFT,
4371 	       size);
4372 
4373 	if (_hash_shift)
4374 		*_hash_shift = log2qty;
4375 	if (_hash_mask)
4376 		*_hash_mask = (1 << log2qty) - 1;
4377 
4378 	return table;
4379 }
4380 
4381 #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
4382 struct page *pfn_to_page(unsigned long pfn)
4383 {
4384 	return __pfn_to_page(pfn);
4385 }
4386 unsigned long page_to_pfn(struct page *page)
4387 {
4388 	return __page_to_pfn(page);
4389 }
4390 EXPORT_SYMBOL(pfn_to_page);
4391 EXPORT_SYMBOL(page_to_pfn);
4392 #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
4393 
4394 /* Return a pointer to the bitmap storing bits affecting a block of pages */
4395 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4396 							unsigned long pfn)
4397 {
4398 #ifdef CONFIG_SPARSEMEM
4399 	return __pfn_to_section(pfn)->pageblock_flags;
4400 #else
4401 	return zone->pageblock_flags;
4402 #endif /* CONFIG_SPARSEMEM */
4403 }
4404 
4405 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4406 {
4407 #ifdef CONFIG_SPARSEMEM
4408 	pfn &= (PAGES_PER_SECTION-1);
4409 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4410 #else
4411 	pfn = pfn - zone->zone_start_pfn;
4412 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4413 #endif /* CONFIG_SPARSEMEM */
4414 }
4415 
4416 /**
4417  * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4418  * @page: The page within the block of interest
4419  * @start_bitidx: The first bit of interest to retrieve
4420  * @end_bitidx: The last bit of interest
4421  * returns pageblock_bits flags
4422  */
4423 unsigned long get_pageblock_flags_group(struct page *page,
4424 					int start_bitidx, int end_bitidx)
4425 {
4426 	struct zone *zone;
4427 	unsigned long *bitmap;
4428 	unsigned long pfn, bitidx;
4429 	unsigned long flags = 0;
4430 	unsigned long value = 1;
4431 
4432 	zone = page_zone(page);
4433 	pfn = page_to_pfn(page);
4434 	bitmap = get_pageblock_bitmap(zone, pfn);
4435 	bitidx = pfn_to_bitidx(zone, pfn);
4436 
4437 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4438 		if (test_bit(bitidx + start_bitidx, bitmap))
4439 			flags |= value;
4440 
4441 	return flags;
4442 }
4443 
4444 /**
4445  * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4446  * @page: The page within the block of interest
4447  * @start_bitidx: The first bit of interest
4448  * @end_bitidx: The last bit of interest
4449  * @flags: The flags to set
4450  */
4451 void set_pageblock_flags_group(struct page *page, unsigned long flags,
4452 					int start_bitidx, int end_bitidx)
4453 {
4454 	struct zone *zone;
4455 	unsigned long *bitmap;
4456 	unsigned long pfn, bitidx;
4457 	unsigned long value = 1;
4458 
4459 	zone = page_zone(page);
4460 	pfn = page_to_pfn(page);
4461 	bitmap = get_pageblock_bitmap(zone, pfn);
4462 	bitidx = pfn_to_bitidx(zone, pfn);
4463 
4464 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4465 		if (flags & value)
4466 			__set_bit(bitidx + start_bitidx, bitmap);
4467 		else
4468 			__clear_bit(bitidx + start_bitidx, bitmap);
4469 }
4470 
4471 /*
4472  * This is designed as sub function...plz see page_isolation.c also.
4473  * set/clear page block's type to be ISOLATE.
4474  * page allocater never alloc memory from ISOLATE block.
4475  */
4476 
4477 int set_migratetype_isolate(struct page *page)
4478 {
4479 	struct zone *zone;
4480 	unsigned long flags;
4481 	int ret = -EBUSY;
4482 
4483 	zone = page_zone(page);
4484 	spin_lock_irqsave(&zone->lock, flags);
4485 	/*
4486 	 * In future, more migrate types will be able to be isolation target.
4487 	 */
4488 	if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4489 		goto out;
4490 	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4491 	move_freepages_block(zone, page, MIGRATE_ISOLATE);
4492 	ret = 0;
4493 out:
4494 	spin_unlock_irqrestore(&zone->lock, flags);
4495 	if (!ret)
4496 		drain_all_pages();
4497 	return ret;
4498 }
4499 
4500 void unset_migratetype_isolate(struct page *page)
4501 {
4502 	struct zone *zone;
4503 	unsigned long flags;
4504 	zone = page_zone(page);
4505 	spin_lock_irqsave(&zone->lock, flags);
4506 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4507 		goto out;
4508 	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4509 	move_freepages_block(zone, page, MIGRATE_MOVABLE);
4510 out:
4511 	spin_unlock_irqrestore(&zone->lock, flags);
4512 }
4513 
4514 #ifdef CONFIG_MEMORY_HOTREMOVE
4515 /*
4516  * All pages in the range must be isolated before calling this.
4517  */
4518 void
4519 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4520 {
4521 	struct page *page;
4522 	struct zone *zone;
4523 	int order, i;
4524 	unsigned long pfn;
4525 	unsigned long flags;
4526 	/* find the first valid pfn */
4527 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
4528 		if (pfn_valid(pfn))
4529 			break;
4530 	if (pfn == end_pfn)
4531 		return;
4532 	zone = page_zone(pfn_to_page(pfn));
4533 	spin_lock_irqsave(&zone->lock, flags);
4534 	pfn = start_pfn;
4535 	while (pfn < end_pfn) {
4536 		if (!pfn_valid(pfn)) {
4537 			pfn++;
4538 			continue;
4539 		}
4540 		page = pfn_to_page(pfn);
4541 		BUG_ON(page_count(page));
4542 		BUG_ON(!PageBuddy(page));
4543 		order = page_order(page);
4544 #ifdef CONFIG_DEBUG_VM
4545 		printk(KERN_INFO "remove from free list %lx %d %lx\n",
4546 		       pfn, 1 << order, end_pfn);
4547 #endif
4548 		list_del(&page->lru);
4549 		rmv_page_order(page);
4550 		zone->free_area[order].nr_free--;
4551 		__mod_zone_page_state(zone, NR_FREE_PAGES,
4552 				      - (1UL << order));
4553 		for (i = 0; i < (1 << order); i++)
4554 			SetPageReserved((page+i));
4555 		pfn += (1 << order);
4556 	}
4557 	spin_unlock_irqrestore(&zone->lock, flags);
4558 }
4559 #endif
4560