xref: /openbmc/linux/mm/page_alloc.c (revision fd589a8f)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/compiler.h>
25 #include <linux/kernel.h>
26 #include <linux/kmemcheck.h>
27 #include <linux/module.h>
28 #include <linux/suspend.h>
29 #include <linux/pagevec.h>
30 #include <linux/blkdev.h>
31 #include <linux/slab.h>
32 #include <linux/oom.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/sysctl.h>
36 #include <linux/cpu.h>
37 #include <linux/cpuset.h>
38 #include <linux/memory_hotplug.h>
39 #include <linux/nodemask.h>
40 #include <linux/vmalloc.h>
41 #include <linux/mempolicy.h>
42 #include <linux/stop_machine.h>
43 #include <linux/sort.h>
44 #include <linux/pfn.h>
45 #include <linux/backing-dev.h>
46 #include <linux/fault-inject.h>
47 #include <linux/page-isolation.h>
48 #include <linux/page_cgroup.h>
49 #include <linux/debugobjects.h>
50 #include <linux/kmemleak.h>
51 
52 #include <asm/tlbflush.h>
53 #include <asm/div64.h>
54 #include "internal.h"
55 
56 /*
57  * Array of node states.
58  */
59 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
60 	[N_POSSIBLE] = NODE_MASK_ALL,
61 	[N_ONLINE] = { { [0] = 1UL } },
62 #ifndef CONFIG_NUMA
63 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
64 #ifdef CONFIG_HIGHMEM
65 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
66 #endif
67 	[N_CPU] = { { [0] = 1UL } },
68 #endif	/* NUMA */
69 };
70 EXPORT_SYMBOL(node_states);
71 
72 unsigned long totalram_pages __read_mostly;
73 unsigned long totalreserve_pages __read_mostly;
74 unsigned long highest_memmap_pfn __read_mostly;
75 int percpu_pagelist_fraction;
76 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
77 
78 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
79 int pageblock_order __read_mostly;
80 #endif
81 
82 static void __free_pages_ok(struct page *page, unsigned int order);
83 
84 /*
85  * results with 256, 32 in the lowmem_reserve sysctl:
86  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
87  *	1G machine -> (16M dma, 784M normal, 224M high)
88  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
89  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
90  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
91  *
92  * TBD: should special case ZONE_DMA32 machines here - in those we normally
93  * don't need any ZONE_NORMAL reservation
94  */
95 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
96 #ifdef CONFIG_ZONE_DMA
97 	 256,
98 #endif
99 #ifdef CONFIG_ZONE_DMA32
100 	 256,
101 #endif
102 #ifdef CONFIG_HIGHMEM
103 	 32,
104 #endif
105 	 32,
106 };
107 
108 EXPORT_SYMBOL(totalram_pages);
109 
110 static char * const zone_names[MAX_NR_ZONES] = {
111 #ifdef CONFIG_ZONE_DMA
112 	 "DMA",
113 #endif
114 #ifdef CONFIG_ZONE_DMA32
115 	 "DMA32",
116 #endif
117 	 "Normal",
118 #ifdef CONFIG_HIGHMEM
119 	 "HighMem",
120 #endif
121 	 "Movable",
122 };
123 
124 int min_free_kbytes = 1024;
125 
126 unsigned long __meminitdata nr_kernel_pages;
127 unsigned long __meminitdata nr_all_pages;
128 static unsigned long __meminitdata dma_reserve;
129 
130 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
131   /*
132    * MAX_ACTIVE_REGIONS determines the maximum number of distinct
133    * ranges of memory (RAM) that may be registered with add_active_range().
134    * Ranges passed to add_active_range() will be merged if possible
135    * so the number of times add_active_range() can be called is
136    * related to the number of nodes and the number of holes
137    */
138   #ifdef CONFIG_MAX_ACTIVE_REGIONS
139     /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
140     #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
141   #else
142     #if MAX_NUMNODES >= 32
143       /* If there can be many nodes, allow up to 50 holes per node */
144       #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
145     #else
146       /* By default, allow up to 256 distinct regions */
147       #define MAX_ACTIVE_REGIONS 256
148     #endif
149   #endif
150 
151   static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
152   static int __meminitdata nr_nodemap_entries;
153   static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
154   static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
155   static unsigned long __initdata required_kernelcore;
156   static unsigned long __initdata required_movablecore;
157   static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
158 
159   /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
160   int movable_zone;
161   EXPORT_SYMBOL(movable_zone);
162 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
163 
164 #if MAX_NUMNODES > 1
165 int nr_node_ids __read_mostly = MAX_NUMNODES;
166 int nr_online_nodes __read_mostly = 1;
167 EXPORT_SYMBOL(nr_node_ids);
168 EXPORT_SYMBOL(nr_online_nodes);
169 #endif
170 
171 int page_group_by_mobility_disabled __read_mostly;
172 
173 static void set_pageblock_migratetype(struct page *page, int migratetype)
174 {
175 
176 	if (unlikely(page_group_by_mobility_disabled))
177 		migratetype = MIGRATE_UNMOVABLE;
178 
179 	set_pageblock_flags_group(page, (unsigned long)migratetype,
180 					PB_migrate, PB_migrate_end);
181 }
182 
183 bool oom_killer_disabled __read_mostly;
184 
185 #ifdef CONFIG_DEBUG_VM
186 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
187 {
188 	int ret = 0;
189 	unsigned seq;
190 	unsigned long pfn = page_to_pfn(page);
191 
192 	do {
193 		seq = zone_span_seqbegin(zone);
194 		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
195 			ret = 1;
196 		else if (pfn < zone->zone_start_pfn)
197 			ret = 1;
198 	} while (zone_span_seqretry(zone, seq));
199 
200 	return ret;
201 }
202 
203 static int page_is_consistent(struct zone *zone, struct page *page)
204 {
205 	if (!pfn_valid_within(page_to_pfn(page)))
206 		return 0;
207 	if (zone != page_zone(page))
208 		return 0;
209 
210 	return 1;
211 }
212 /*
213  * Temporary debugging check for pages not lying within a given zone.
214  */
215 static int bad_range(struct zone *zone, struct page *page)
216 {
217 	if (page_outside_zone_boundaries(zone, page))
218 		return 1;
219 	if (!page_is_consistent(zone, page))
220 		return 1;
221 
222 	return 0;
223 }
224 #else
225 static inline int bad_range(struct zone *zone, struct page *page)
226 {
227 	return 0;
228 }
229 #endif
230 
231 static void bad_page(struct page *page)
232 {
233 	static unsigned long resume;
234 	static unsigned long nr_shown;
235 	static unsigned long nr_unshown;
236 
237 	/*
238 	 * Allow a burst of 60 reports, then keep quiet for that minute;
239 	 * or allow a steady drip of one report per second.
240 	 */
241 	if (nr_shown == 60) {
242 		if (time_before(jiffies, resume)) {
243 			nr_unshown++;
244 			goto out;
245 		}
246 		if (nr_unshown) {
247 			printk(KERN_ALERT
248 			      "BUG: Bad page state: %lu messages suppressed\n",
249 				nr_unshown);
250 			nr_unshown = 0;
251 		}
252 		nr_shown = 0;
253 	}
254 	if (nr_shown++ == 0)
255 		resume = jiffies + 60 * HZ;
256 
257 	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
258 		current->comm, page_to_pfn(page));
259 	printk(KERN_ALERT
260 		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
261 		page, (void *)page->flags, page_count(page),
262 		page_mapcount(page), page->mapping, page->index);
263 
264 	dump_stack();
265 out:
266 	/* Leave bad fields for debug, except PageBuddy could make trouble */
267 	__ClearPageBuddy(page);
268 	add_taint(TAINT_BAD_PAGE);
269 }
270 
271 /*
272  * Higher-order pages are called "compound pages".  They are structured thusly:
273  *
274  * The first PAGE_SIZE page is called the "head page".
275  *
276  * The remaining PAGE_SIZE pages are called "tail pages".
277  *
278  * All pages have PG_compound set.  All pages have their ->private pointing at
279  * the head page (even the head page has this).
280  *
281  * The first tail page's ->lru.next holds the address of the compound page's
282  * put_page() function.  Its ->lru.prev holds the order of allocation.
283  * This usage means that zero-order pages may not be compound.
284  */
285 
286 static void free_compound_page(struct page *page)
287 {
288 	__free_pages_ok(page, compound_order(page));
289 }
290 
291 void prep_compound_page(struct page *page, unsigned long order)
292 {
293 	int i;
294 	int nr_pages = 1 << order;
295 
296 	set_compound_page_dtor(page, free_compound_page);
297 	set_compound_order(page, order);
298 	__SetPageHead(page);
299 	for (i = 1; i < nr_pages; i++) {
300 		struct page *p = page + i;
301 
302 		__SetPageTail(p);
303 		p->first_page = page;
304 	}
305 }
306 
307 static int destroy_compound_page(struct page *page, unsigned long order)
308 {
309 	int i;
310 	int nr_pages = 1 << order;
311 	int bad = 0;
312 
313 	if (unlikely(compound_order(page) != order) ||
314 	    unlikely(!PageHead(page))) {
315 		bad_page(page);
316 		bad++;
317 	}
318 
319 	__ClearPageHead(page);
320 
321 	for (i = 1; i < nr_pages; i++) {
322 		struct page *p = page + i;
323 
324 		if (unlikely(!PageTail(p) || (p->first_page != page))) {
325 			bad_page(page);
326 			bad++;
327 		}
328 		__ClearPageTail(p);
329 	}
330 
331 	return bad;
332 }
333 
334 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
335 {
336 	int i;
337 
338 	/*
339 	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
340 	 * and __GFP_HIGHMEM from hard or soft interrupt context.
341 	 */
342 	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
343 	for (i = 0; i < (1 << order); i++)
344 		clear_highpage(page + i);
345 }
346 
347 static inline void set_page_order(struct page *page, int order)
348 {
349 	set_page_private(page, order);
350 	__SetPageBuddy(page);
351 }
352 
353 static inline void rmv_page_order(struct page *page)
354 {
355 	__ClearPageBuddy(page);
356 	set_page_private(page, 0);
357 }
358 
359 /*
360  * Locate the struct page for both the matching buddy in our
361  * pair (buddy1) and the combined O(n+1) page they form (page).
362  *
363  * 1) Any buddy B1 will have an order O twin B2 which satisfies
364  * the following equation:
365  *     B2 = B1 ^ (1 << O)
366  * For example, if the starting buddy (buddy2) is #8 its order
367  * 1 buddy is #10:
368  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
369  *
370  * 2) Any buddy B will have an order O+1 parent P which
371  * satisfies the following equation:
372  *     P = B & ~(1 << O)
373  *
374  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
375  */
376 static inline struct page *
377 __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
378 {
379 	unsigned long buddy_idx = page_idx ^ (1 << order);
380 
381 	return page + (buddy_idx - page_idx);
382 }
383 
384 static inline unsigned long
385 __find_combined_index(unsigned long page_idx, unsigned int order)
386 {
387 	return (page_idx & ~(1 << order));
388 }
389 
390 /*
391  * This function checks whether a page is free && is the buddy
392  * we can do coalesce a page and its buddy if
393  * (a) the buddy is not in a hole &&
394  * (b) the buddy is in the buddy system &&
395  * (c) a page and its buddy have the same order &&
396  * (d) a page and its buddy are in the same zone.
397  *
398  * For recording whether a page is in the buddy system, we use PG_buddy.
399  * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
400  *
401  * For recording page's order, we use page_private(page).
402  */
403 static inline int page_is_buddy(struct page *page, struct page *buddy,
404 								int order)
405 {
406 	if (!pfn_valid_within(page_to_pfn(buddy)))
407 		return 0;
408 
409 	if (page_zone_id(page) != page_zone_id(buddy))
410 		return 0;
411 
412 	if (PageBuddy(buddy) && page_order(buddy) == order) {
413 		VM_BUG_ON(page_count(buddy) != 0);
414 		return 1;
415 	}
416 	return 0;
417 }
418 
419 /*
420  * Freeing function for a buddy system allocator.
421  *
422  * The concept of a buddy system is to maintain direct-mapped table
423  * (containing bit values) for memory blocks of various "orders".
424  * The bottom level table contains the map for the smallest allocatable
425  * units of memory (here, pages), and each level above it describes
426  * pairs of units from the levels below, hence, "buddies".
427  * At a high level, all that happens here is marking the table entry
428  * at the bottom level available, and propagating the changes upward
429  * as necessary, plus some accounting needed to play nicely with other
430  * parts of the VM system.
431  * At each level, we keep a list of pages, which are heads of continuous
432  * free pages of length of (1 << order) and marked with PG_buddy. Page's
433  * order is recorded in page_private(page) field.
434  * So when we are allocating or freeing one, we can derive the state of the
435  * other.  That is, if we allocate a small block, and both were
436  * free, the remainder of the region must be split into blocks.
437  * If a block is freed, and its buddy is also free, then this
438  * triggers coalescing into a block of larger size.
439  *
440  * -- wli
441  */
442 
443 static inline void __free_one_page(struct page *page,
444 		struct zone *zone, unsigned int order,
445 		int migratetype)
446 {
447 	unsigned long page_idx;
448 
449 	if (unlikely(PageCompound(page)))
450 		if (unlikely(destroy_compound_page(page, order)))
451 			return;
452 
453 	VM_BUG_ON(migratetype == -1);
454 
455 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
456 
457 	VM_BUG_ON(page_idx & ((1 << order) - 1));
458 	VM_BUG_ON(bad_range(zone, page));
459 
460 	while (order < MAX_ORDER-1) {
461 		unsigned long combined_idx;
462 		struct page *buddy;
463 
464 		buddy = __page_find_buddy(page, page_idx, order);
465 		if (!page_is_buddy(page, buddy, order))
466 			break;
467 
468 		/* Our buddy is free, merge with it and move up one order. */
469 		list_del(&buddy->lru);
470 		zone->free_area[order].nr_free--;
471 		rmv_page_order(buddy);
472 		combined_idx = __find_combined_index(page_idx, order);
473 		page = page + (combined_idx - page_idx);
474 		page_idx = combined_idx;
475 		order++;
476 	}
477 	set_page_order(page, order);
478 	list_add(&page->lru,
479 		&zone->free_area[order].free_list[migratetype]);
480 	zone->free_area[order].nr_free++;
481 }
482 
483 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
484 /*
485  * free_page_mlock() -- clean up attempts to free and mlocked() page.
486  * Page should not be on lru, so no need to fix that up.
487  * free_pages_check() will verify...
488  */
489 static inline void free_page_mlock(struct page *page)
490 {
491 	__dec_zone_page_state(page, NR_MLOCK);
492 	__count_vm_event(UNEVICTABLE_MLOCKFREED);
493 }
494 #else
495 static void free_page_mlock(struct page *page) { }
496 #endif
497 
498 static inline int free_pages_check(struct page *page)
499 {
500 	if (unlikely(page_mapcount(page) |
501 		(page->mapping != NULL)  |
502 		(atomic_read(&page->_count) != 0) |
503 		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
504 		bad_page(page);
505 		return 1;
506 	}
507 	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
508 		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
509 	return 0;
510 }
511 
512 /*
513  * Frees a list of pages.
514  * Assumes all pages on list are in same zone, and of same order.
515  * count is the number of pages to free.
516  *
517  * If the zone was previously in an "all pages pinned" state then look to
518  * see if this freeing clears that state.
519  *
520  * And clear the zone's pages_scanned counter, to hold off the "all pages are
521  * pinned" detection logic.
522  */
523 static void free_pages_bulk(struct zone *zone, int count,
524 					struct list_head *list, int order)
525 {
526 	spin_lock(&zone->lock);
527 	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
528 	zone->pages_scanned = 0;
529 
530 	__mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
531 	while (count--) {
532 		struct page *page;
533 
534 		VM_BUG_ON(list_empty(list));
535 		page = list_entry(list->prev, struct page, lru);
536 		/* have to delete it as __free_one_page list manipulates */
537 		list_del(&page->lru);
538 		__free_one_page(page, zone, order, page_private(page));
539 	}
540 	spin_unlock(&zone->lock);
541 }
542 
543 static void free_one_page(struct zone *zone, struct page *page, int order,
544 				int migratetype)
545 {
546 	spin_lock(&zone->lock);
547 	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
548 	zone->pages_scanned = 0;
549 
550 	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
551 	__free_one_page(page, zone, order, migratetype);
552 	spin_unlock(&zone->lock);
553 }
554 
555 static void __free_pages_ok(struct page *page, unsigned int order)
556 {
557 	unsigned long flags;
558 	int i;
559 	int bad = 0;
560 	int wasMlocked = TestClearPageMlocked(page);
561 
562 	kmemcheck_free_shadow(page, order);
563 
564 	for (i = 0 ; i < (1 << order) ; ++i)
565 		bad += free_pages_check(page + i);
566 	if (bad)
567 		return;
568 
569 	if (!PageHighMem(page)) {
570 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
571 		debug_check_no_obj_freed(page_address(page),
572 					   PAGE_SIZE << order);
573 	}
574 	arch_free_page(page, order);
575 	kernel_map_pages(page, 1 << order, 0);
576 
577 	local_irq_save(flags);
578 	if (unlikely(wasMlocked))
579 		free_page_mlock(page);
580 	__count_vm_events(PGFREE, 1 << order);
581 	free_one_page(page_zone(page), page, order,
582 					get_pageblock_migratetype(page));
583 	local_irq_restore(flags);
584 }
585 
586 /*
587  * permit the bootmem allocator to evade page validation on high-order frees
588  */
589 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
590 {
591 	if (order == 0) {
592 		__ClearPageReserved(page);
593 		set_page_count(page, 0);
594 		set_page_refcounted(page);
595 		__free_page(page);
596 	} else {
597 		int loop;
598 
599 		prefetchw(page);
600 		for (loop = 0; loop < BITS_PER_LONG; loop++) {
601 			struct page *p = &page[loop];
602 
603 			if (loop + 1 < BITS_PER_LONG)
604 				prefetchw(p + 1);
605 			__ClearPageReserved(p);
606 			set_page_count(p, 0);
607 		}
608 
609 		set_page_refcounted(page);
610 		__free_pages(page, order);
611 	}
612 }
613 
614 
615 /*
616  * The order of subdivision here is critical for the IO subsystem.
617  * Please do not alter this order without good reasons and regression
618  * testing. Specifically, as large blocks of memory are subdivided,
619  * the order in which smaller blocks are delivered depends on the order
620  * they're subdivided in this function. This is the primary factor
621  * influencing the order in which pages are delivered to the IO
622  * subsystem according to empirical testing, and this is also justified
623  * by considering the behavior of a buddy system containing a single
624  * large block of memory acted on by a series of small allocations.
625  * This behavior is a critical factor in sglist merging's success.
626  *
627  * -- wli
628  */
629 static inline void expand(struct zone *zone, struct page *page,
630 	int low, int high, struct free_area *area,
631 	int migratetype)
632 {
633 	unsigned long size = 1 << high;
634 
635 	while (high > low) {
636 		area--;
637 		high--;
638 		size >>= 1;
639 		VM_BUG_ON(bad_range(zone, &page[size]));
640 		list_add(&page[size].lru, &area->free_list[migratetype]);
641 		area->nr_free++;
642 		set_page_order(&page[size], high);
643 	}
644 }
645 
646 /*
647  * This page is about to be returned from the page allocator
648  */
649 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
650 {
651 	if (unlikely(page_mapcount(page) |
652 		(page->mapping != NULL)  |
653 		(atomic_read(&page->_count) != 0)  |
654 		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
655 		bad_page(page);
656 		return 1;
657 	}
658 
659 	set_page_private(page, 0);
660 	set_page_refcounted(page);
661 
662 	arch_alloc_page(page, order);
663 	kernel_map_pages(page, 1 << order, 1);
664 
665 	if (gfp_flags & __GFP_ZERO)
666 		prep_zero_page(page, order, gfp_flags);
667 
668 	if (order && (gfp_flags & __GFP_COMP))
669 		prep_compound_page(page, order);
670 
671 	return 0;
672 }
673 
674 /*
675  * Go through the free lists for the given migratetype and remove
676  * the smallest available page from the freelists
677  */
678 static inline
679 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
680 						int migratetype)
681 {
682 	unsigned int current_order;
683 	struct free_area * area;
684 	struct page *page;
685 
686 	/* Find a page of the appropriate size in the preferred list */
687 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
688 		area = &(zone->free_area[current_order]);
689 		if (list_empty(&area->free_list[migratetype]))
690 			continue;
691 
692 		page = list_entry(area->free_list[migratetype].next,
693 							struct page, lru);
694 		list_del(&page->lru);
695 		rmv_page_order(page);
696 		area->nr_free--;
697 		expand(zone, page, order, current_order, area, migratetype);
698 		return page;
699 	}
700 
701 	return NULL;
702 }
703 
704 
705 /*
706  * This array describes the order lists are fallen back to when
707  * the free lists for the desirable migrate type are depleted
708  */
709 static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
710 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
711 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
712 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
713 	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
714 };
715 
716 /*
717  * Move the free pages in a range to the free lists of the requested type.
718  * Note that start_page and end_pages are not aligned on a pageblock
719  * boundary. If alignment is required, use move_freepages_block()
720  */
721 static int move_freepages(struct zone *zone,
722 			  struct page *start_page, struct page *end_page,
723 			  int migratetype)
724 {
725 	struct page *page;
726 	unsigned long order;
727 	int pages_moved = 0;
728 
729 #ifndef CONFIG_HOLES_IN_ZONE
730 	/*
731 	 * page_zone is not safe to call in this context when
732 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
733 	 * anyway as we check zone boundaries in move_freepages_block().
734 	 * Remove at a later date when no bug reports exist related to
735 	 * grouping pages by mobility
736 	 */
737 	BUG_ON(page_zone(start_page) != page_zone(end_page));
738 #endif
739 
740 	for (page = start_page; page <= end_page;) {
741 		/* Make sure we are not inadvertently changing nodes */
742 		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
743 
744 		if (!pfn_valid_within(page_to_pfn(page))) {
745 			page++;
746 			continue;
747 		}
748 
749 		if (!PageBuddy(page)) {
750 			page++;
751 			continue;
752 		}
753 
754 		order = page_order(page);
755 		list_del(&page->lru);
756 		list_add(&page->lru,
757 			&zone->free_area[order].free_list[migratetype]);
758 		page += 1 << order;
759 		pages_moved += 1 << order;
760 	}
761 
762 	return pages_moved;
763 }
764 
765 static int move_freepages_block(struct zone *zone, struct page *page,
766 				int migratetype)
767 {
768 	unsigned long start_pfn, end_pfn;
769 	struct page *start_page, *end_page;
770 
771 	start_pfn = page_to_pfn(page);
772 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
773 	start_page = pfn_to_page(start_pfn);
774 	end_page = start_page + pageblock_nr_pages - 1;
775 	end_pfn = start_pfn + pageblock_nr_pages - 1;
776 
777 	/* Do not cross zone boundaries */
778 	if (start_pfn < zone->zone_start_pfn)
779 		start_page = page;
780 	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
781 		return 0;
782 
783 	return move_freepages(zone, start_page, end_page, migratetype);
784 }
785 
786 /* Remove an element from the buddy allocator from the fallback list */
787 static inline struct page *
788 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
789 {
790 	struct free_area * area;
791 	int current_order;
792 	struct page *page;
793 	int migratetype, i;
794 
795 	/* Find the largest possible block of pages in the other list */
796 	for (current_order = MAX_ORDER-1; current_order >= order;
797 						--current_order) {
798 		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
799 			migratetype = fallbacks[start_migratetype][i];
800 
801 			/* MIGRATE_RESERVE handled later if necessary */
802 			if (migratetype == MIGRATE_RESERVE)
803 				continue;
804 
805 			area = &(zone->free_area[current_order]);
806 			if (list_empty(&area->free_list[migratetype]))
807 				continue;
808 
809 			page = list_entry(area->free_list[migratetype].next,
810 					struct page, lru);
811 			area->nr_free--;
812 
813 			/*
814 			 * If breaking a large block of pages, move all free
815 			 * pages to the preferred allocation list. If falling
816 			 * back for a reclaimable kernel allocation, be more
817 			 * agressive about taking ownership of free pages
818 			 */
819 			if (unlikely(current_order >= (pageblock_order >> 1)) ||
820 					start_migratetype == MIGRATE_RECLAIMABLE ||
821 					page_group_by_mobility_disabled) {
822 				unsigned long pages;
823 				pages = move_freepages_block(zone, page,
824 								start_migratetype);
825 
826 				/* Claim the whole block if over half of it is free */
827 				if (pages >= (1 << (pageblock_order-1)) ||
828 						page_group_by_mobility_disabled)
829 					set_pageblock_migratetype(page,
830 								start_migratetype);
831 
832 				migratetype = start_migratetype;
833 			}
834 
835 			/* Remove the page from the freelists */
836 			list_del(&page->lru);
837 			rmv_page_order(page);
838 
839 			if (current_order == pageblock_order)
840 				set_pageblock_migratetype(page,
841 							start_migratetype);
842 
843 			expand(zone, page, order, current_order, area, migratetype);
844 			return page;
845 		}
846 	}
847 
848 	return NULL;
849 }
850 
851 /*
852  * Do the hard work of removing an element from the buddy allocator.
853  * Call me with the zone->lock already held.
854  */
855 static struct page *__rmqueue(struct zone *zone, unsigned int order,
856 						int migratetype)
857 {
858 	struct page *page;
859 
860 retry_reserve:
861 	page = __rmqueue_smallest(zone, order, migratetype);
862 
863 	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
864 		page = __rmqueue_fallback(zone, order, migratetype);
865 
866 		/*
867 		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
868 		 * is used because __rmqueue_smallest is an inline function
869 		 * and we want just one call site
870 		 */
871 		if (!page) {
872 			migratetype = MIGRATE_RESERVE;
873 			goto retry_reserve;
874 		}
875 	}
876 
877 	return page;
878 }
879 
880 /*
881  * Obtain a specified number of elements from the buddy allocator, all under
882  * a single hold of the lock, for efficiency.  Add them to the supplied list.
883  * Returns the number of new pages which were placed at *list.
884  */
885 static int rmqueue_bulk(struct zone *zone, unsigned int order,
886 			unsigned long count, struct list_head *list,
887 			int migratetype, int cold)
888 {
889 	int i;
890 
891 	spin_lock(&zone->lock);
892 	for (i = 0; i < count; ++i) {
893 		struct page *page = __rmqueue(zone, order, migratetype);
894 		if (unlikely(page == NULL))
895 			break;
896 
897 		/*
898 		 * Split buddy pages returned by expand() are received here
899 		 * in physical page order. The page is added to the callers and
900 		 * list and the list head then moves forward. From the callers
901 		 * perspective, the linked list is ordered by page number in
902 		 * some conditions. This is useful for IO devices that can
903 		 * merge IO requests if the physical pages are ordered
904 		 * properly.
905 		 */
906 		if (likely(cold == 0))
907 			list_add(&page->lru, list);
908 		else
909 			list_add_tail(&page->lru, list);
910 		set_page_private(page, migratetype);
911 		list = &page->lru;
912 	}
913 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
914 	spin_unlock(&zone->lock);
915 	return i;
916 }
917 
918 #ifdef CONFIG_NUMA
919 /*
920  * Called from the vmstat counter updater to drain pagesets of this
921  * currently executing processor on remote nodes after they have
922  * expired.
923  *
924  * Note that this function must be called with the thread pinned to
925  * a single processor.
926  */
927 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
928 {
929 	unsigned long flags;
930 	int to_drain;
931 
932 	local_irq_save(flags);
933 	if (pcp->count >= pcp->batch)
934 		to_drain = pcp->batch;
935 	else
936 		to_drain = pcp->count;
937 	free_pages_bulk(zone, to_drain, &pcp->list, 0);
938 	pcp->count -= to_drain;
939 	local_irq_restore(flags);
940 }
941 #endif
942 
943 /*
944  * Drain pages of the indicated processor.
945  *
946  * The processor must either be the current processor and the
947  * thread pinned to the current processor or a processor that
948  * is not online.
949  */
950 static void drain_pages(unsigned int cpu)
951 {
952 	unsigned long flags;
953 	struct zone *zone;
954 
955 	for_each_populated_zone(zone) {
956 		struct per_cpu_pageset *pset;
957 		struct per_cpu_pages *pcp;
958 
959 		pset = zone_pcp(zone, cpu);
960 
961 		pcp = &pset->pcp;
962 		local_irq_save(flags);
963 		free_pages_bulk(zone, pcp->count, &pcp->list, 0);
964 		pcp->count = 0;
965 		local_irq_restore(flags);
966 	}
967 }
968 
969 /*
970  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
971  */
972 void drain_local_pages(void *arg)
973 {
974 	drain_pages(smp_processor_id());
975 }
976 
977 /*
978  * Spill all the per-cpu pages from all CPUs back into the buddy allocator
979  */
980 void drain_all_pages(void)
981 {
982 	on_each_cpu(drain_local_pages, NULL, 1);
983 }
984 
985 #ifdef CONFIG_HIBERNATION
986 
987 void mark_free_pages(struct zone *zone)
988 {
989 	unsigned long pfn, max_zone_pfn;
990 	unsigned long flags;
991 	int order, t;
992 	struct list_head *curr;
993 
994 	if (!zone->spanned_pages)
995 		return;
996 
997 	spin_lock_irqsave(&zone->lock, flags);
998 
999 	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1000 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1001 		if (pfn_valid(pfn)) {
1002 			struct page *page = pfn_to_page(pfn);
1003 
1004 			if (!swsusp_page_is_forbidden(page))
1005 				swsusp_unset_page_free(page);
1006 		}
1007 
1008 	for_each_migratetype_order(order, t) {
1009 		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1010 			unsigned long i;
1011 
1012 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1013 			for (i = 0; i < (1UL << order); i++)
1014 				swsusp_set_page_free(pfn_to_page(pfn + i));
1015 		}
1016 	}
1017 	spin_unlock_irqrestore(&zone->lock, flags);
1018 }
1019 #endif /* CONFIG_PM */
1020 
1021 /*
1022  * Free a 0-order page
1023  */
1024 static void free_hot_cold_page(struct page *page, int cold)
1025 {
1026 	struct zone *zone = page_zone(page);
1027 	struct per_cpu_pages *pcp;
1028 	unsigned long flags;
1029 	int wasMlocked = TestClearPageMlocked(page);
1030 
1031 	kmemcheck_free_shadow(page, 0);
1032 
1033 	if (PageAnon(page))
1034 		page->mapping = NULL;
1035 	if (free_pages_check(page))
1036 		return;
1037 
1038 	if (!PageHighMem(page)) {
1039 		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1040 		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1041 	}
1042 	arch_free_page(page, 0);
1043 	kernel_map_pages(page, 1, 0);
1044 
1045 	pcp = &zone_pcp(zone, get_cpu())->pcp;
1046 	set_page_private(page, get_pageblock_migratetype(page));
1047 	local_irq_save(flags);
1048 	if (unlikely(wasMlocked))
1049 		free_page_mlock(page);
1050 	__count_vm_event(PGFREE);
1051 
1052 	if (cold)
1053 		list_add_tail(&page->lru, &pcp->list);
1054 	else
1055 		list_add(&page->lru, &pcp->list);
1056 	pcp->count++;
1057 	if (pcp->count >= pcp->high) {
1058 		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1059 		pcp->count -= pcp->batch;
1060 	}
1061 	local_irq_restore(flags);
1062 	put_cpu();
1063 }
1064 
1065 void free_hot_page(struct page *page)
1066 {
1067 	free_hot_cold_page(page, 0);
1068 }
1069 
1070 void free_cold_page(struct page *page)
1071 {
1072 	free_hot_cold_page(page, 1);
1073 }
1074 
1075 /*
1076  * split_page takes a non-compound higher-order page, and splits it into
1077  * n (1<<order) sub-pages: page[0..n]
1078  * Each sub-page must be freed individually.
1079  *
1080  * Note: this is probably too low level an operation for use in drivers.
1081  * Please consult with lkml before using this in your driver.
1082  */
1083 void split_page(struct page *page, unsigned int order)
1084 {
1085 	int i;
1086 
1087 	VM_BUG_ON(PageCompound(page));
1088 	VM_BUG_ON(!page_count(page));
1089 
1090 #ifdef CONFIG_KMEMCHECK
1091 	/*
1092 	 * Split shadow pages too, because free(page[0]) would
1093 	 * otherwise free the whole shadow.
1094 	 */
1095 	if (kmemcheck_page_is_tracked(page))
1096 		split_page(virt_to_page(page[0].shadow), order);
1097 #endif
1098 
1099 	for (i = 1; i < (1 << order); i++)
1100 		set_page_refcounted(page + i);
1101 }
1102 
1103 /*
1104  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1105  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1106  * or two.
1107  */
1108 static inline
1109 struct page *buffered_rmqueue(struct zone *preferred_zone,
1110 			struct zone *zone, int order, gfp_t gfp_flags,
1111 			int migratetype)
1112 {
1113 	unsigned long flags;
1114 	struct page *page;
1115 	int cold = !!(gfp_flags & __GFP_COLD);
1116 	int cpu;
1117 
1118 again:
1119 	cpu  = get_cpu();
1120 	if (likely(order == 0)) {
1121 		struct per_cpu_pages *pcp;
1122 
1123 		pcp = &zone_pcp(zone, cpu)->pcp;
1124 		local_irq_save(flags);
1125 		if (!pcp->count) {
1126 			pcp->count = rmqueue_bulk(zone, 0,
1127 					pcp->batch, &pcp->list,
1128 					migratetype, cold);
1129 			if (unlikely(!pcp->count))
1130 				goto failed;
1131 		}
1132 
1133 		/* Find a page of the appropriate migrate type */
1134 		if (cold) {
1135 			list_for_each_entry_reverse(page, &pcp->list, lru)
1136 				if (page_private(page) == migratetype)
1137 					break;
1138 		} else {
1139 			list_for_each_entry(page, &pcp->list, lru)
1140 				if (page_private(page) == migratetype)
1141 					break;
1142 		}
1143 
1144 		/* Allocate more to the pcp list if necessary */
1145 		if (unlikely(&page->lru == &pcp->list)) {
1146 			pcp->count += rmqueue_bulk(zone, 0,
1147 					pcp->batch, &pcp->list,
1148 					migratetype, cold);
1149 			page = list_entry(pcp->list.next, struct page, lru);
1150 		}
1151 
1152 		list_del(&page->lru);
1153 		pcp->count--;
1154 	} else {
1155 		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1156 			/*
1157 			 * __GFP_NOFAIL is not to be used in new code.
1158 			 *
1159 			 * All __GFP_NOFAIL callers should be fixed so that they
1160 			 * properly detect and handle allocation failures.
1161 			 *
1162 			 * We most definitely don't want callers attempting to
1163 			 * allocate greater than order-1 page units with
1164 			 * __GFP_NOFAIL.
1165 			 */
1166 			WARN_ON_ONCE(order > 1);
1167 		}
1168 		spin_lock_irqsave(&zone->lock, flags);
1169 		page = __rmqueue(zone, order, migratetype);
1170 		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1171 		spin_unlock(&zone->lock);
1172 		if (!page)
1173 			goto failed;
1174 	}
1175 
1176 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1177 	zone_statistics(preferred_zone, zone);
1178 	local_irq_restore(flags);
1179 	put_cpu();
1180 
1181 	VM_BUG_ON(bad_range(zone, page));
1182 	if (prep_new_page(page, order, gfp_flags))
1183 		goto again;
1184 	return page;
1185 
1186 failed:
1187 	local_irq_restore(flags);
1188 	put_cpu();
1189 	return NULL;
1190 }
1191 
1192 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1193 #define ALLOC_WMARK_MIN		WMARK_MIN
1194 #define ALLOC_WMARK_LOW		WMARK_LOW
1195 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1196 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1197 
1198 /* Mask to get the watermark bits */
1199 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1200 
1201 #define ALLOC_HARDER		0x10 /* try to alloc harder */
1202 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1203 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1204 
1205 #ifdef CONFIG_FAIL_PAGE_ALLOC
1206 
1207 static struct fail_page_alloc_attr {
1208 	struct fault_attr attr;
1209 
1210 	u32 ignore_gfp_highmem;
1211 	u32 ignore_gfp_wait;
1212 	u32 min_order;
1213 
1214 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1215 
1216 	struct dentry *ignore_gfp_highmem_file;
1217 	struct dentry *ignore_gfp_wait_file;
1218 	struct dentry *min_order_file;
1219 
1220 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1221 
1222 } fail_page_alloc = {
1223 	.attr = FAULT_ATTR_INITIALIZER,
1224 	.ignore_gfp_wait = 1,
1225 	.ignore_gfp_highmem = 1,
1226 	.min_order = 1,
1227 };
1228 
1229 static int __init setup_fail_page_alloc(char *str)
1230 {
1231 	return setup_fault_attr(&fail_page_alloc.attr, str);
1232 }
1233 __setup("fail_page_alloc=", setup_fail_page_alloc);
1234 
1235 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1236 {
1237 	if (order < fail_page_alloc.min_order)
1238 		return 0;
1239 	if (gfp_mask & __GFP_NOFAIL)
1240 		return 0;
1241 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1242 		return 0;
1243 	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1244 		return 0;
1245 
1246 	return should_fail(&fail_page_alloc.attr, 1 << order);
1247 }
1248 
1249 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1250 
1251 static int __init fail_page_alloc_debugfs(void)
1252 {
1253 	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1254 	struct dentry *dir;
1255 	int err;
1256 
1257 	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1258 				       "fail_page_alloc");
1259 	if (err)
1260 		return err;
1261 	dir = fail_page_alloc.attr.dentries.dir;
1262 
1263 	fail_page_alloc.ignore_gfp_wait_file =
1264 		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1265 				      &fail_page_alloc.ignore_gfp_wait);
1266 
1267 	fail_page_alloc.ignore_gfp_highmem_file =
1268 		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1269 				      &fail_page_alloc.ignore_gfp_highmem);
1270 	fail_page_alloc.min_order_file =
1271 		debugfs_create_u32("min-order", mode, dir,
1272 				   &fail_page_alloc.min_order);
1273 
1274 	if (!fail_page_alloc.ignore_gfp_wait_file ||
1275             !fail_page_alloc.ignore_gfp_highmem_file ||
1276             !fail_page_alloc.min_order_file) {
1277 		err = -ENOMEM;
1278 		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1279 		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1280 		debugfs_remove(fail_page_alloc.min_order_file);
1281 		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1282 	}
1283 
1284 	return err;
1285 }
1286 
1287 late_initcall(fail_page_alloc_debugfs);
1288 
1289 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1290 
1291 #else /* CONFIG_FAIL_PAGE_ALLOC */
1292 
1293 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1294 {
1295 	return 0;
1296 }
1297 
1298 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1299 
1300 /*
1301  * Return 1 if free pages are above 'mark'. This takes into account the order
1302  * of the allocation.
1303  */
1304 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1305 		      int classzone_idx, int alloc_flags)
1306 {
1307 	/* free_pages my go negative - that's OK */
1308 	long min = mark;
1309 	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1310 	int o;
1311 
1312 	if (alloc_flags & ALLOC_HIGH)
1313 		min -= min / 2;
1314 	if (alloc_flags & ALLOC_HARDER)
1315 		min -= min / 4;
1316 
1317 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1318 		return 0;
1319 	for (o = 0; o < order; o++) {
1320 		/* At the next order, this order's pages become unavailable */
1321 		free_pages -= z->free_area[o].nr_free << o;
1322 
1323 		/* Require fewer higher order pages to be free */
1324 		min >>= 1;
1325 
1326 		if (free_pages <= min)
1327 			return 0;
1328 	}
1329 	return 1;
1330 }
1331 
1332 #ifdef CONFIG_NUMA
1333 /*
1334  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1335  * skip over zones that are not allowed by the cpuset, or that have
1336  * been recently (in last second) found to be nearly full.  See further
1337  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1338  * that have to skip over a lot of full or unallowed zones.
1339  *
1340  * If the zonelist cache is present in the passed in zonelist, then
1341  * returns a pointer to the allowed node mask (either the current
1342  * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1343  *
1344  * If the zonelist cache is not available for this zonelist, does
1345  * nothing and returns NULL.
1346  *
1347  * If the fullzones BITMAP in the zonelist cache is stale (more than
1348  * a second since last zap'd) then we zap it out (clear its bits.)
1349  *
1350  * We hold off even calling zlc_setup, until after we've checked the
1351  * first zone in the zonelist, on the theory that most allocations will
1352  * be satisfied from that first zone, so best to examine that zone as
1353  * quickly as we can.
1354  */
1355 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1356 {
1357 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1358 	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1359 
1360 	zlc = zonelist->zlcache_ptr;
1361 	if (!zlc)
1362 		return NULL;
1363 
1364 	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1365 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1366 		zlc->last_full_zap = jiffies;
1367 	}
1368 
1369 	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1370 					&cpuset_current_mems_allowed :
1371 					&node_states[N_HIGH_MEMORY];
1372 	return allowednodes;
1373 }
1374 
1375 /*
1376  * Given 'z' scanning a zonelist, run a couple of quick checks to see
1377  * if it is worth looking at further for free memory:
1378  *  1) Check that the zone isn't thought to be full (doesn't have its
1379  *     bit set in the zonelist_cache fullzones BITMAP).
1380  *  2) Check that the zones node (obtained from the zonelist_cache
1381  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1382  * Return true (non-zero) if zone is worth looking at further, or
1383  * else return false (zero) if it is not.
1384  *
1385  * This check -ignores- the distinction between various watermarks,
1386  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1387  * found to be full for any variation of these watermarks, it will
1388  * be considered full for up to one second by all requests, unless
1389  * we are so low on memory on all allowed nodes that we are forced
1390  * into the second scan of the zonelist.
1391  *
1392  * In the second scan we ignore this zonelist cache and exactly
1393  * apply the watermarks to all zones, even it is slower to do so.
1394  * We are low on memory in the second scan, and should leave no stone
1395  * unturned looking for a free page.
1396  */
1397 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1398 						nodemask_t *allowednodes)
1399 {
1400 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1401 	int i;				/* index of *z in zonelist zones */
1402 	int n;				/* node that zone *z is on */
1403 
1404 	zlc = zonelist->zlcache_ptr;
1405 	if (!zlc)
1406 		return 1;
1407 
1408 	i = z - zonelist->_zonerefs;
1409 	n = zlc->z_to_n[i];
1410 
1411 	/* This zone is worth trying if it is allowed but not full */
1412 	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1413 }
1414 
1415 /*
1416  * Given 'z' scanning a zonelist, set the corresponding bit in
1417  * zlc->fullzones, so that subsequent attempts to allocate a page
1418  * from that zone don't waste time re-examining it.
1419  */
1420 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1421 {
1422 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1423 	int i;				/* index of *z in zonelist zones */
1424 
1425 	zlc = zonelist->zlcache_ptr;
1426 	if (!zlc)
1427 		return;
1428 
1429 	i = z - zonelist->_zonerefs;
1430 
1431 	set_bit(i, zlc->fullzones);
1432 }
1433 
1434 #else	/* CONFIG_NUMA */
1435 
1436 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1437 {
1438 	return NULL;
1439 }
1440 
1441 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1442 				nodemask_t *allowednodes)
1443 {
1444 	return 1;
1445 }
1446 
1447 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1448 {
1449 }
1450 #endif	/* CONFIG_NUMA */
1451 
1452 /*
1453  * get_page_from_freelist goes through the zonelist trying to allocate
1454  * a page.
1455  */
1456 static struct page *
1457 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1458 		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1459 		struct zone *preferred_zone, int migratetype)
1460 {
1461 	struct zoneref *z;
1462 	struct page *page = NULL;
1463 	int classzone_idx;
1464 	struct zone *zone;
1465 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1466 	int zlc_active = 0;		/* set if using zonelist_cache */
1467 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1468 
1469 	classzone_idx = zone_idx(preferred_zone);
1470 zonelist_scan:
1471 	/*
1472 	 * Scan zonelist, looking for a zone with enough free.
1473 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1474 	 */
1475 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1476 						high_zoneidx, nodemask) {
1477 		if (NUMA_BUILD && zlc_active &&
1478 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1479 				continue;
1480 		if ((alloc_flags & ALLOC_CPUSET) &&
1481 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1482 				goto try_next_zone;
1483 
1484 		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1485 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1486 			unsigned long mark;
1487 			int ret;
1488 
1489 			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1490 			if (zone_watermark_ok(zone, order, mark,
1491 				    classzone_idx, alloc_flags))
1492 				goto try_this_zone;
1493 
1494 			if (zone_reclaim_mode == 0)
1495 				goto this_zone_full;
1496 
1497 			ret = zone_reclaim(zone, gfp_mask, order);
1498 			switch (ret) {
1499 			case ZONE_RECLAIM_NOSCAN:
1500 				/* did not scan */
1501 				goto try_next_zone;
1502 			case ZONE_RECLAIM_FULL:
1503 				/* scanned but unreclaimable */
1504 				goto this_zone_full;
1505 			default:
1506 				/* did we reclaim enough */
1507 				if (!zone_watermark_ok(zone, order, mark,
1508 						classzone_idx, alloc_flags))
1509 					goto this_zone_full;
1510 			}
1511 		}
1512 
1513 try_this_zone:
1514 		page = buffered_rmqueue(preferred_zone, zone, order,
1515 						gfp_mask, migratetype);
1516 		if (page)
1517 			break;
1518 this_zone_full:
1519 		if (NUMA_BUILD)
1520 			zlc_mark_zone_full(zonelist, z);
1521 try_next_zone:
1522 		if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1523 			/*
1524 			 * we do zlc_setup after the first zone is tried but only
1525 			 * if there are multiple nodes make it worthwhile
1526 			 */
1527 			allowednodes = zlc_setup(zonelist, alloc_flags);
1528 			zlc_active = 1;
1529 			did_zlc_setup = 1;
1530 		}
1531 	}
1532 
1533 	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1534 		/* Disable zlc cache for second zonelist scan */
1535 		zlc_active = 0;
1536 		goto zonelist_scan;
1537 	}
1538 	return page;
1539 }
1540 
1541 static inline int
1542 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1543 				unsigned long pages_reclaimed)
1544 {
1545 	/* Do not loop if specifically requested */
1546 	if (gfp_mask & __GFP_NORETRY)
1547 		return 0;
1548 
1549 	/*
1550 	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1551 	 * means __GFP_NOFAIL, but that may not be true in other
1552 	 * implementations.
1553 	 */
1554 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1555 		return 1;
1556 
1557 	/*
1558 	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1559 	 * specified, then we retry until we no longer reclaim any pages
1560 	 * (above), or we've reclaimed an order of pages at least as
1561 	 * large as the allocation's order. In both cases, if the
1562 	 * allocation still fails, we stop retrying.
1563 	 */
1564 	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1565 		return 1;
1566 
1567 	/*
1568 	 * Don't let big-order allocations loop unless the caller
1569 	 * explicitly requests that.
1570 	 */
1571 	if (gfp_mask & __GFP_NOFAIL)
1572 		return 1;
1573 
1574 	return 0;
1575 }
1576 
1577 static inline struct page *
1578 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1579 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1580 	nodemask_t *nodemask, struct zone *preferred_zone,
1581 	int migratetype)
1582 {
1583 	struct page *page;
1584 
1585 	/* Acquire the OOM killer lock for the zones in zonelist */
1586 	if (!try_set_zone_oom(zonelist, gfp_mask)) {
1587 		schedule_timeout_uninterruptible(1);
1588 		return NULL;
1589 	}
1590 
1591 	/*
1592 	 * Go through the zonelist yet one more time, keep very high watermark
1593 	 * here, this is only to catch a parallel oom killing, we must fail if
1594 	 * we're still under heavy pressure.
1595 	 */
1596 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1597 		order, zonelist, high_zoneidx,
1598 		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1599 		preferred_zone, migratetype);
1600 	if (page)
1601 		goto out;
1602 
1603 	/* The OOM killer will not help higher order allocs */
1604 	if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
1605 		goto out;
1606 
1607 	/* Exhausted what can be done so it's blamo time */
1608 	out_of_memory(zonelist, gfp_mask, order);
1609 
1610 out:
1611 	clear_zonelist_oom(zonelist, gfp_mask);
1612 	return page;
1613 }
1614 
1615 /* The really slow allocator path where we enter direct reclaim */
1616 static inline struct page *
1617 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1618 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1619 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1620 	int migratetype, unsigned long *did_some_progress)
1621 {
1622 	struct page *page = NULL;
1623 	struct reclaim_state reclaim_state;
1624 	struct task_struct *p = current;
1625 
1626 	cond_resched();
1627 
1628 	/* We now go into synchronous reclaim */
1629 	cpuset_memory_pressure_bump();
1630 
1631 	/*
1632 	 * The task's cpuset might have expanded its set of allowable nodes
1633 	 */
1634 	p->flags |= PF_MEMALLOC;
1635 	lockdep_set_current_reclaim_state(gfp_mask);
1636 	reclaim_state.reclaimed_slab = 0;
1637 	p->reclaim_state = &reclaim_state;
1638 
1639 	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1640 
1641 	p->reclaim_state = NULL;
1642 	lockdep_clear_current_reclaim_state();
1643 	p->flags &= ~PF_MEMALLOC;
1644 
1645 	cond_resched();
1646 
1647 	if (order != 0)
1648 		drain_all_pages();
1649 
1650 	if (likely(*did_some_progress))
1651 		page = get_page_from_freelist(gfp_mask, nodemask, order,
1652 					zonelist, high_zoneidx,
1653 					alloc_flags, preferred_zone,
1654 					migratetype);
1655 	return page;
1656 }
1657 
1658 /*
1659  * This is called in the allocator slow-path if the allocation request is of
1660  * sufficient urgency to ignore watermarks and take other desperate measures
1661  */
1662 static inline struct page *
1663 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1664 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1665 	nodemask_t *nodemask, struct zone *preferred_zone,
1666 	int migratetype)
1667 {
1668 	struct page *page;
1669 
1670 	do {
1671 		page = get_page_from_freelist(gfp_mask, nodemask, order,
1672 			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1673 			preferred_zone, migratetype);
1674 
1675 		if (!page && gfp_mask & __GFP_NOFAIL)
1676 			congestion_wait(BLK_RW_ASYNC, HZ/50);
1677 	} while (!page && (gfp_mask & __GFP_NOFAIL));
1678 
1679 	return page;
1680 }
1681 
1682 static inline
1683 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1684 						enum zone_type high_zoneidx)
1685 {
1686 	struct zoneref *z;
1687 	struct zone *zone;
1688 
1689 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1690 		wakeup_kswapd(zone, order);
1691 }
1692 
1693 static inline int
1694 gfp_to_alloc_flags(gfp_t gfp_mask)
1695 {
1696 	struct task_struct *p = current;
1697 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1698 	const gfp_t wait = gfp_mask & __GFP_WAIT;
1699 
1700 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1701 	BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1702 
1703 	/*
1704 	 * The caller may dip into page reserves a bit more if the caller
1705 	 * cannot run direct reclaim, or if the caller has realtime scheduling
1706 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1707 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1708 	 */
1709 	alloc_flags |= (gfp_mask & __GFP_HIGH);
1710 
1711 	if (!wait) {
1712 		alloc_flags |= ALLOC_HARDER;
1713 		/*
1714 		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1715 		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1716 		 */
1717 		alloc_flags &= ~ALLOC_CPUSET;
1718 	} else if (unlikely(rt_task(p)))
1719 		alloc_flags |= ALLOC_HARDER;
1720 
1721 	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1722 		if (!in_interrupt() &&
1723 		    ((p->flags & PF_MEMALLOC) ||
1724 		     unlikely(test_thread_flag(TIF_MEMDIE))))
1725 			alloc_flags |= ALLOC_NO_WATERMARKS;
1726 	}
1727 
1728 	return alloc_flags;
1729 }
1730 
1731 static inline struct page *
1732 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1733 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1734 	nodemask_t *nodemask, struct zone *preferred_zone,
1735 	int migratetype)
1736 {
1737 	const gfp_t wait = gfp_mask & __GFP_WAIT;
1738 	struct page *page = NULL;
1739 	int alloc_flags;
1740 	unsigned long pages_reclaimed = 0;
1741 	unsigned long did_some_progress;
1742 	struct task_struct *p = current;
1743 
1744 	/*
1745 	 * In the slowpath, we sanity check order to avoid ever trying to
1746 	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1747 	 * be using allocators in order of preference for an area that is
1748 	 * too large.
1749 	 */
1750 	if (order >= MAX_ORDER) {
1751 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1752 		return NULL;
1753 	}
1754 
1755 	/*
1756 	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1757 	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1758 	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1759 	 * using a larger set of nodes after it has established that the
1760 	 * allowed per node queues are empty and that nodes are
1761 	 * over allocated.
1762 	 */
1763 	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1764 		goto nopage;
1765 
1766 	wake_all_kswapd(order, zonelist, high_zoneidx);
1767 
1768 	/*
1769 	 * OK, we're below the kswapd watermark and have kicked background
1770 	 * reclaim. Now things get more complex, so set up alloc_flags according
1771 	 * to how we want to proceed.
1772 	 */
1773 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
1774 
1775 restart:
1776 	/* This is the last chance, in general, before the goto nopage. */
1777 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1778 			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1779 			preferred_zone, migratetype);
1780 	if (page)
1781 		goto got_pg;
1782 
1783 rebalance:
1784 	/* Allocate without watermarks if the context allows */
1785 	if (alloc_flags & ALLOC_NO_WATERMARKS) {
1786 		page = __alloc_pages_high_priority(gfp_mask, order,
1787 				zonelist, high_zoneidx, nodemask,
1788 				preferred_zone, migratetype);
1789 		if (page)
1790 			goto got_pg;
1791 	}
1792 
1793 	/* Atomic allocations - we can't balance anything */
1794 	if (!wait)
1795 		goto nopage;
1796 
1797 	/* Avoid recursion of direct reclaim */
1798 	if (p->flags & PF_MEMALLOC)
1799 		goto nopage;
1800 
1801 	/* Avoid allocations with no watermarks from looping endlessly */
1802 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
1803 		goto nopage;
1804 
1805 	/* Try direct reclaim and then allocating */
1806 	page = __alloc_pages_direct_reclaim(gfp_mask, order,
1807 					zonelist, high_zoneidx,
1808 					nodemask,
1809 					alloc_flags, preferred_zone,
1810 					migratetype, &did_some_progress);
1811 	if (page)
1812 		goto got_pg;
1813 
1814 	/*
1815 	 * If we failed to make any progress reclaiming, then we are
1816 	 * running out of options and have to consider going OOM
1817 	 */
1818 	if (!did_some_progress) {
1819 		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1820 			if (oom_killer_disabled)
1821 				goto nopage;
1822 			page = __alloc_pages_may_oom(gfp_mask, order,
1823 					zonelist, high_zoneidx,
1824 					nodemask, preferred_zone,
1825 					migratetype);
1826 			if (page)
1827 				goto got_pg;
1828 
1829 			/*
1830 			 * The OOM killer does not trigger for high-order
1831 			 * ~__GFP_NOFAIL allocations so if no progress is being
1832 			 * made, there are no other options and retrying is
1833 			 * unlikely to help.
1834 			 */
1835 			if (order > PAGE_ALLOC_COSTLY_ORDER &&
1836 						!(gfp_mask & __GFP_NOFAIL))
1837 				goto nopage;
1838 
1839 			goto restart;
1840 		}
1841 	}
1842 
1843 	/* Check if we should retry the allocation */
1844 	pages_reclaimed += did_some_progress;
1845 	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1846 		/* Wait for some write requests to complete then retry */
1847 		congestion_wait(BLK_RW_ASYNC, HZ/50);
1848 		goto rebalance;
1849 	}
1850 
1851 nopage:
1852 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1853 		printk(KERN_WARNING "%s: page allocation failure."
1854 			" order:%d, mode:0x%x\n",
1855 			p->comm, order, gfp_mask);
1856 		dump_stack();
1857 		show_mem();
1858 	}
1859 	return page;
1860 got_pg:
1861 	if (kmemcheck_enabled)
1862 		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1863 	return page;
1864 
1865 }
1866 
1867 /*
1868  * This is the 'heart' of the zoned buddy allocator.
1869  */
1870 struct page *
1871 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1872 			struct zonelist *zonelist, nodemask_t *nodemask)
1873 {
1874 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1875 	struct zone *preferred_zone;
1876 	struct page *page;
1877 	int migratetype = allocflags_to_migratetype(gfp_mask);
1878 
1879 	gfp_mask &= gfp_allowed_mask;
1880 
1881 	lockdep_trace_alloc(gfp_mask);
1882 
1883 	might_sleep_if(gfp_mask & __GFP_WAIT);
1884 
1885 	if (should_fail_alloc_page(gfp_mask, order))
1886 		return NULL;
1887 
1888 	/*
1889 	 * Check the zones suitable for the gfp_mask contain at least one
1890 	 * valid zone. It's possible to have an empty zonelist as a result
1891 	 * of GFP_THISNODE and a memoryless node
1892 	 */
1893 	if (unlikely(!zonelist->_zonerefs->zone))
1894 		return NULL;
1895 
1896 	/* The preferred zone is used for statistics later */
1897 	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
1898 	if (!preferred_zone)
1899 		return NULL;
1900 
1901 	/* First allocation attempt */
1902 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1903 			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1904 			preferred_zone, migratetype);
1905 	if (unlikely(!page))
1906 		page = __alloc_pages_slowpath(gfp_mask, order,
1907 				zonelist, high_zoneidx, nodemask,
1908 				preferred_zone, migratetype);
1909 
1910 	return page;
1911 }
1912 EXPORT_SYMBOL(__alloc_pages_nodemask);
1913 
1914 /*
1915  * Common helper functions.
1916  */
1917 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1918 {
1919 	struct page * page;
1920 	page = alloc_pages(gfp_mask, order);
1921 	if (!page)
1922 		return 0;
1923 	return (unsigned long) page_address(page);
1924 }
1925 
1926 EXPORT_SYMBOL(__get_free_pages);
1927 
1928 unsigned long get_zeroed_page(gfp_t gfp_mask)
1929 {
1930 	struct page * page;
1931 
1932 	/*
1933 	 * get_zeroed_page() returns a 32-bit address, which cannot represent
1934 	 * a highmem page
1935 	 */
1936 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1937 
1938 	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1939 	if (page)
1940 		return (unsigned long) page_address(page);
1941 	return 0;
1942 }
1943 
1944 EXPORT_SYMBOL(get_zeroed_page);
1945 
1946 void __pagevec_free(struct pagevec *pvec)
1947 {
1948 	int i = pagevec_count(pvec);
1949 
1950 	while (--i >= 0)
1951 		free_hot_cold_page(pvec->pages[i], pvec->cold);
1952 }
1953 
1954 void __free_pages(struct page *page, unsigned int order)
1955 {
1956 	if (put_page_testzero(page)) {
1957 		if (order == 0)
1958 			free_hot_page(page);
1959 		else
1960 			__free_pages_ok(page, order);
1961 	}
1962 }
1963 
1964 EXPORT_SYMBOL(__free_pages);
1965 
1966 void free_pages(unsigned long addr, unsigned int order)
1967 {
1968 	if (addr != 0) {
1969 		VM_BUG_ON(!virt_addr_valid((void *)addr));
1970 		__free_pages(virt_to_page((void *)addr), order);
1971 	}
1972 }
1973 
1974 EXPORT_SYMBOL(free_pages);
1975 
1976 /**
1977  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
1978  * @size: the number of bytes to allocate
1979  * @gfp_mask: GFP flags for the allocation
1980  *
1981  * This function is similar to alloc_pages(), except that it allocates the
1982  * minimum number of pages to satisfy the request.  alloc_pages() can only
1983  * allocate memory in power-of-two pages.
1984  *
1985  * This function is also limited by MAX_ORDER.
1986  *
1987  * Memory allocated by this function must be released by free_pages_exact().
1988  */
1989 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
1990 {
1991 	unsigned int order = get_order(size);
1992 	unsigned long addr;
1993 
1994 	addr = __get_free_pages(gfp_mask, order);
1995 	if (addr) {
1996 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
1997 		unsigned long used = addr + PAGE_ALIGN(size);
1998 
1999 		split_page(virt_to_page((void *)addr), order);
2000 		while (used < alloc_end) {
2001 			free_page(used);
2002 			used += PAGE_SIZE;
2003 		}
2004 	}
2005 
2006 	return (void *)addr;
2007 }
2008 EXPORT_SYMBOL(alloc_pages_exact);
2009 
2010 /**
2011  * free_pages_exact - release memory allocated via alloc_pages_exact()
2012  * @virt: the value returned by alloc_pages_exact.
2013  * @size: size of allocation, same value as passed to alloc_pages_exact().
2014  *
2015  * Release the memory allocated by a previous call to alloc_pages_exact.
2016  */
2017 void free_pages_exact(void *virt, size_t size)
2018 {
2019 	unsigned long addr = (unsigned long)virt;
2020 	unsigned long end = addr + PAGE_ALIGN(size);
2021 
2022 	while (addr < end) {
2023 		free_page(addr);
2024 		addr += PAGE_SIZE;
2025 	}
2026 }
2027 EXPORT_SYMBOL(free_pages_exact);
2028 
2029 static unsigned int nr_free_zone_pages(int offset)
2030 {
2031 	struct zoneref *z;
2032 	struct zone *zone;
2033 
2034 	/* Just pick one node, since fallback list is circular */
2035 	unsigned int sum = 0;
2036 
2037 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2038 
2039 	for_each_zone_zonelist(zone, z, zonelist, offset) {
2040 		unsigned long size = zone->present_pages;
2041 		unsigned long high = high_wmark_pages(zone);
2042 		if (size > high)
2043 			sum += size - high;
2044 	}
2045 
2046 	return sum;
2047 }
2048 
2049 /*
2050  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2051  */
2052 unsigned int nr_free_buffer_pages(void)
2053 {
2054 	return nr_free_zone_pages(gfp_zone(GFP_USER));
2055 }
2056 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2057 
2058 /*
2059  * Amount of free RAM allocatable within all zones
2060  */
2061 unsigned int nr_free_pagecache_pages(void)
2062 {
2063 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2064 }
2065 
2066 static inline void show_node(struct zone *zone)
2067 {
2068 	if (NUMA_BUILD)
2069 		printk("Node %d ", zone_to_nid(zone));
2070 }
2071 
2072 void si_meminfo(struct sysinfo *val)
2073 {
2074 	val->totalram = totalram_pages;
2075 	val->sharedram = 0;
2076 	val->freeram = global_page_state(NR_FREE_PAGES);
2077 	val->bufferram = nr_blockdev_pages();
2078 	val->totalhigh = totalhigh_pages;
2079 	val->freehigh = nr_free_highpages();
2080 	val->mem_unit = PAGE_SIZE;
2081 }
2082 
2083 EXPORT_SYMBOL(si_meminfo);
2084 
2085 #ifdef CONFIG_NUMA
2086 void si_meminfo_node(struct sysinfo *val, int nid)
2087 {
2088 	pg_data_t *pgdat = NODE_DATA(nid);
2089 
2090 	val->totalram = pgdat->node_present_pages;
2091 	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2092 #ifdef CONFIG_HIGHMEM
2093 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2094 	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2095 			NR_FREE_PAGES);
2096 #else
2097 	val->totalhigh = 0;
2098 	val->freehigh = 0;
2099 #endif
2100 	val->mem_unit = PAGE_SIZE;
2101 }
2102 #endif
2103 
2104 #define K(x) ((x) << (PAGE_SHIFT-10))
2105 
2106 /*
2107  * Show free area list (used inside shift_scroll-lock stuff)
2108  * We also calculate the percentage fragmentation. We do this by counting the
2109  * memory on each free list with the exception of the first item on the list.
2110  */
2111 void show_free_areas(void)
2112 {
2113 	int cpu;
2114 	struct zone *zone;
2115 
2116 	for_each_populated_zone(zone) {
2117 		show_node(zone);
2118 		printk("%s per-cpu:\n", zone->name);
2119 
2120 		for_each_online_cpu(cpu) {
2121 			struct per_cpu_pageset *pageset;
2122 
2123 			pageset = zone_pcp(zone, cpu);
2124 
2125 			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2126 			       cpu, pageset->pcp.high,
2127 			       pageset->pcp.batch, pageset->pcp.count);
2128 		}
2129 	}
2130 
2131 	printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
2132 		" inactive_file:%lu"
2133 		" unevictable:%lu"
2134 		" dirty:%lu writeback:%lu unstable:%lu\n"
2135 		" free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
2136 		global_page_state(NR_ACTIVE_ANON),
2137 		global_page_state(NR_ACTIVE_FILE),
2138 		global_page_state(NR_INACTIVE_ANON),
2139 		global_page_state(NR_INACTIVE_FILE),
2140 		global_page_state(NR_UNEVICTABLE),
2141 		global_page_state(NR_FILE_DIRTY),
2142 		global_page_state(NR_WRITEBACK),
2143 		global_page_state(NR_UNSTABLE_NFS),
2144 		global_page_state(NR_FREE_PAGES),
2145 		global_page_state(NR_SLAB_RECLAIMABLE) +
2146 			global_page_state(NR_SLAB_UNRECLAIMABLE),
2147 		global_page_state(NR_FILE_MAPPED),
2148 		global_page_state(NR_PAGETABLE),
2149 		global_page_state(NR_BOUNCE));
2150 
2151 	for_each_populated_zone(zone) {
2152 		int i;
2153 
2154 		show_node(zone);
2155 		printk("%s"
2156 			" free:%lukB"
2157 			" min:%lukB"
2158 			" low:%lukB"
2159 			" high:%lukB"
2160 			" active_anon:%lukB"
2161 			" inactive_anon:%lukB"
2162 			" active_file:%lukB"
2163 			" inactive_file:%lukB"
2164 			" unevictable:%lukB"
2165 			" present:%lukB"
2166 			" pages_scanned:%lu"
2167 			" all_unreclaimable? %s"
2168 			"\n",
2169 			zone->name,
2170 			K(zone_page_state(zone, NR_FREE_PAGES)),
2171 			K(min_wmark_pages(zone)),
2172 			K(low_wmark_pages(zone)),
2173 			K(high_wmark_pages(zone)),
2174 			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2175 			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2176 			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2177 			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2178 			K(zone_page_state(zone, NR_UNEVICTABLE)),
2179 			K(zone->present_pages),
2180 			zone->pages_scanned,
2181 			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
2182 			);
2183 		printk("lowmem_reserve[]:");
2184 		for (i = 0; i < MAX_NR_ZONES; i++)
2185 			printk(" %lu", zone->lowmem_reserve[i]);
2186 		printk("\n");
2187 	}
2188 
2189 	for_each_populated_zone(zone) {
2190  		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2191 
2192 		show_node(zone);
2193 		printk("%s: ", zone->name);
2194 
2195 		spin_lock_irqsave(&zone->lock, flags);
2196 		for (order = 0; order < MAX_ORDER; order++) {
2197 			nr[order] = zone->free_area[order].nr_free;
2198 			total += nr[order] << order;
2199 		}
2200 		spin_unlock_irqrestore(&zone->lock, flags);
2201 		for (order = 0; order < MAX_ORDER; order++)
2202 			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2203 		printk("= %lukB\n", K(total));
2204 	}
2205 
2206 	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2207 
2208 	show_swap_cache_info();
2209 }
2210 
2211 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2212 {
2213 	zoneref->zone = zone;
2214 	zoneref->zone_idx = zone_idx(zone);
2215 }
2216 
2217 /*
2218  * Builds allocation fallback zone lists.
2219  *
2220  * Add all populated zones of a node to the zonelist.
2221  */
2222 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2223 				int nr_zones, enum zone_type zone_type)
2224 {
2225 	struct zone *zone;
2226 
2227 	BUG_ON(zone_type >= MAX_NR_ZONES);
2228 	zone_type++;
2229 
2230 	do {
2231 		zone_type--;
2232 		zone = pgdat->node_zones + zone_type;
2233 		if (populated_zone(zone)) {
2234 			zoneref_set_zone(zone,
2235 				&zonelist->_zonerefs[nr_zones++]);
2236 			check_highest_zone(zone_type);
2237 		}
2238 
2239 	} while (zone_type);
2240 	return nr_zones;
2241 }
2242 
2243 
2244 /*
2245  *  zonelist_order:
2246  *  0 = automatic detection of better ordering.
2247  *  1 = order by ([node] distance, -zonetype)
2248  *  2 = order by (-zonetype, [node] distance)
2249  *
2250  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2251  *  the same zonelist. So only NUMA can configure this param.
2252  */
2253 #define ZONELIST_ORDER_DEFAULT  0
2254 #define ZONELIST_ORDER_NODE     1
2255 #define ZONELIST_ORDER_ZONE     2
2256 
2257 /* zonelist order in the kernel.
2258  * set_zonelist_order() will set this to NODE or ZONE.
2259  */
2260 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2261 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2262 
2263 
2264 #ifdef CONFIG_NUMA
2265 /* The value user specified ....changed by config */
2266 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2267 /* string for sysctl */
2268 #define NUMA_ZONELIST_ORDER_LEN	16
2269 char numa_zonelist_order[16] = "default";
2270 
2271 /*
2272  * interface for configure zonelist ordering.
2273  * command line option "numa_zonelist_order"
2274  *	= "[dD]efault	- default, automatic configuration.
2275  *	= "[nN]ode 	- order by node locality, then by zone within node
2276  *	= "[zZ]one      - order by zone, then by locality within zone
2277  */
2278 
2279 static int __parse_numa_zonelist_order(char *s)
2280 {
2281 	if (*s == 'd' || *s == 'D') {
2282 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2283 	} else if (*s == 'n' || *s == 'N') {
2284 		user_zonelist_order = ZONELIST_ORDER_NODE;
2285 	} else if (*s == 'z' || *s == 'Z') {
2286 		user_zonelist_order = ZONELIST_ORDER_ZONE;
2287 	} else {
2288 		printk(KERN_WARNING
2289 			"Ignoring invalid numa_zonelist_order value:  "
2290 			"%s\n", s);
2291 		return -EINVAL;
2292 	}
2293 	return 0;
2294 }
2295 
2296 static __init int setup_numa_zonelist_order(char *s)
2297 {
2298 	if (s)
2299 		return __parse_numa_zonelist_order(s);
2300 	return 0;
2301 }
2302 early_param("numa_zonelist_order", setup_numa_zonelist_order);
2303 
2304 /*
2305  * sysctl handler for numa_zonelist_order
2306  */
2307 int numa_zonelist_order_handler(ctl_table *table, int write,
2308 		struct file *file, void __user *buffer, size_t *length,
2309 		loff_t *ppos)
2310 {
2311 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2312 	int ret;
2313 
2314 	if (write)
2315 		strncpy(saved_string, (char*)table->data,
2316 			NUMA_ZONELIST_ORDER_LEN);
2317 	ret = proc_dostring(table, write, file, buffer, length, ppos);
2318 	if (ret)
2319 		return ret;
2320 	if (write) {
2321 		int oldval = user_zonelist_order;
2322 		if (__parse_numa_zonelist_order((char*)table->data)) {
2323 			/*
2324 			 * bogus value.  restore saved string
2325 			 */
2326 			strncpy((char*)table->data, saved_string,
2327 				NUMA_ZONELIST_ORDER_LEN);
2328 			user_zonelist_order = oldval;
2329 		} else if (oldval != user_zonelist_order)
2330 			build_all_zonelists();
2331 	}
2332 	return 0;
2333 }
2334 
2335 
2336 #define MAX_NODE_LOAD (nr_online_nodes)
2337 static int node_load[MAX_NUMNODES];
2338 
2339 /**
2340  * find_next_best_node - find the next node that should appear in a given node's fallback list
2341  * @node: node whose fallback list we're appending
2342  * @used_node_mask: nodemask_t of already used nodes
2343  *
2344  * We use a number of factors to determine which is the next node that should
2345  * appear on a given node's fallback list.  The node should not have appeared
2346  * already in @node's fallback list, and it should be the next closest node
2347  * according to the distance array (which contains arbitrary distance values
2348  * from each node to each node in the system), and should also prefer nodes
2349  * with no CPUs, since presumably they'll have very little allocation pressure
2350  * on them otherwise.
2351  * It returns -1 if no node is found.
2352  */
2353 static int find_next_best_node(int node, nodemask_t *used_node_mask)
2354 {
2355 	int n, val;
2356 	int min_val = INT_MAX;
2357 	int best_node = -1;
2358 	const struct cpumask *tmp = cpumask_of_node(0);
2359 
2360 	/* Use the local node if we haven't already */
2361 	if (!node_isset(node, *used_node_mask)) {
2362 		node_set(node, *used_node_mask);
2363 		return node;
2364 	}
2365 
2366 	for_each_node_state(n, N_HIGH_MEMORY) {
2367 
2368 		/* Don't want a node to appear more than once */
2369 		if (node_isset(n, *used_node_mask))
2370 			continue;
2371 
2372 		/* Use the distance array to find the distance */
2373 		val = node_distance(node, n);
2374 
2375 		/* Penalize nodes under us ("prefer the next node") */
2376 		val += (n < node);
2377 
2378 		/* Give preference to headless and unused nodes */
2379 		tmp = cpumask_of_node(n);
2380 		if (!cpumask_empty(tmp))
2381 			val += PENALTY_FOR_NODE_WITH_CPUS;
2382 
2383 		/* Slight preference for less loaded node */
2384 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2385 		val += node_load[n];
2386 
2387 		if (val < min_val) {
2388 			min_val = val;
2389 			best_node = n;
2390 		}
2391 	}
2392 
2393 	if (best_node >= 0)
2394 		node_set(best_node, *used_node_mask);
2395 
2396 	return best_node;
2397 }
2398 
2399 
2400 /*
2401  * Build zonelists ordered by node and zones within node.
2402  * This results in maximum locality--normal zone overflows into local
2403  * DMA zone, if any--but risks exhausting DMA zone.
2404  */
2405 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2406 {
2407 	int j;
2408 	struct zonelist *zonelist;
2409 
2410 	zonelist = &pgdat->node_zonelists[0];
2411 	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2412 		;
2413 	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2414 							MAX_NR_ZONES - 1);
2415 	zonelist->_zonerefs[j].zone = NULL;
2416 	zonelist->_zonerefs[j].zone_idx = 0;
2417 }
2418 
2419 /*
2420  * Build gfp_thisnode zonelists
2421  */
2422 static void build_thisnode_zonelists(pg_data_t *pgdat)
2423 {
2424 	int j;
2425 	struct zonelist *zonelist;
2426 
2427 	zonelist = &pgdat->node_zonelists[1];
2428 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2429 	zonelist->_zonerefs[j].zone = NULL;
2430 	zonelist->_zonerefs[j].zone_idx = 0;
2431 }
2432 
2433 /*
2434  * Build zonelists ordered by zone and nodes within zones.
2435  * This results in conserving DMA zone[s] until all Normal memory is
2436  * exhausted, but results in overflowing to remote node while memory
2437  * may still exist in local DMA zone.
2438  */
2439 static int node_order[MAX_NUMNODES];
2440 
2441 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2442 {
2443 	int pos, j, node;
2444 	int zone_type;		/* needs to be signed */
2445 	struct zone *z;
2446 	struct zonelist *zonelist;
2447 
2448 	zonelist = &pgdat->node_zonelists[0];
2449 	pos = 0;
2450 	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2451 		for (j = 0; j < nr_nodes; j++) {
2452 			node = node_order[j];
2453 			z = &NODE_DATA(node)->node_zones[zone_type];
2454 			if (populated_zone(z)) {
2455 				zoneref_set_zone(z,
2456 					&zonelist->_zonerefs[pos++]);
2457 				check_highest_zone(zone_type);
2458 			}
2459 		}
2460 	}
2461 	zonelist->_zonerefs[pos].zone = NULL;
2462 	zonelist->_zonerefs[pos].zone_idx = 0;
2463 }
2464 
2465 static int default_zonelist_order(void)
2466 {
2467 	int nid, zone_type;
2468 	unsigned long low_kmem_size,total_size;
2469 	struct zone *z;
2470 	int average_size;
2471 	/*
2472          * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2473 	 * If they are really small and used heavily, the system can fall
2474 	 * into OOM very easily.
2475 	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2476 	 */
2477 	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2478 	low_kmem_size = 0;
2479 	total_size = 0;
2480 	for_each_online_node(nid) {
2481 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2482 			z = &NODE_DATA(nid)->node_zones[zone_type];
2483 			if (populated_zone(z)) {
2484 				if (zone_type < ZONE_NORMAL)
2485 					low_kmem_size += z->present_pages;
2486 				total_size += z->present_pages;
2487 			}
2488 		}
2489 	}
2490 	if (!low_kmem_size ||  /* there are no DMA area. */
2491 	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2492 		return ZONELIST_ORDER_NODE;
2493 	/*
2494 	 * look into each node's config.
2495   	 * If there is a node whose DMA/DMA32 memory is very big area on
2496  	 * local memory, NODE_ORDER may be suitable.
2497          */
2498 	average_size = total_size /
2499 				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2500 	for_each_online_node(nid) {
2501 		low_kmem_size = 0;
2502 		total_size = 0;
2503 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2504 			z = &NODE_DATA(nid)->node_zones[zone_type];
2505 			if (populated_zone(z)) {
2506 				if (zone_type < ZONE_NORMAL)
2507 					low_kmem_size += z->present_pages;
2508 				total_size += z->present_pages;
2509 			}
2510 		}
2511 		if (low_kmem_size &&
2512 		    total_size > average_size && /* ignore small node */
2513 		    low_kmem_size > total_size * 70/100)
2514 			return ZONELIST_ORDER_NODE;
2515 	}
2516 	return ZONELIST_ORDER_ZONE;
2517 }
2518 
2519 static void set_zonelist_order(void)
2520 {
2521 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2522 		current_zonelist_order = default_zonelist_order();
2523 	else
2524 		current_zonelist_order = user_zonelist_order;
2525 }
2526 
2527 static void build_zonelists(pg_data_t *pgdat)
2528 {
2529 	int j, node, load;
2530 	enum zone_type i;
2531 	nodemask_t used_mask;
2532 	int local_node, prev_node;
2533 	struct zonelist *zonelist;
2534 	int order = current_zonelist_order;
2535 
2536 	/* initialize zonelists */
2537 	for (i = 0; i < MAX_ZONELISTS; i++) {
2538 		zonelist = pgdat->node_zonelists + i;
2539 		zonelist->_zonerefs[0].zone = NULL;
2540 		zonelist->_zonerefs[0].zone_idx = 0;
2541 	}
2542 
2543 	/* NUMA-aware ordering of nodes */
2544 	local_node = pgdat->node_id;
2545 	load = nr_online_nodes;
2546 	prev_node = local_node;
2547 	nodes_clear(used_mask);
2548 
2549 	memset(node_order, 0, sizeof(node_order));
2550 	j = 0;
2551 
2552 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2553 		int distance = node_distance(local_node, node);
2554 
2555 		/*
2556 		 * If another node is sufficiently far away then it is better
2557 		 * to reclaim pages in a zone before going off node.
2558 		 */
2559 		if (distance > RECLAIM_DISTANCE)
2560 			zone_reclaim_mode = 1;
2561 
2562 		/*
2563 		 * We don't want to pressure a particular node.
2564 		 * So adding penalty to the first node in same
2565 		 * distance group to make it round-robin.
2566 		 */
2567 		if (distance != node_distance(local_node, prev_node))
2568 			node_load[node] = load;
2569 
2570 		prev_node = node;
2571 		load--;
2572 		if (order == ZONELIST_ORDER_NODE)
2573 			build_zonelists_in_node_order(pgdat, node);
2574 		else
2575 			node_order[j++] = node;	/* remember order */
2576 	}
2577 
2578 	if (order == ZONELIST_ORDER_ZONE) {
2579 		/* calculate node order -- i.e., DMA last! */
2580 		build_zonelists_in_zone_order(pgdat, j);
2581 	}
2582 
2583 	build_thisnode_zonelists(pgdat);
2584 }
2585 
2586 /* Construct the zonelist performance cache - see further mmzone.h */
2587 static void build_zonelist_cache(pg_data_t *pgdat)
2588 {
2589 	struct zonelist *zonelist;
2590 	struct zonelist_cache *zlc;
2591 	struct zoneref *z;
2592 
2593 	zonelist = &pgdat->node_zonelists[0];
2594 	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2595 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2596 	for (z = zonelist->_zonerefs; z->zone; z++)
2597 		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2598 }
2599 
2600 
2601 #else	/* CONFIG_NUMA */
2602 
2603 static void set_zonelist_order(void)
2604 {
2605 	current_zonelist_order = ZONELIST_ORDER_ZONE;
2606 }
2607 
2608 static void build_zonelists(pg_data_t *pgdat)
2609 {
2610 	int node, local_node;
2611 	enum zone_type j;
2612 	struct zonelist *zonelist;
2613 
2614 	local_node = pgdat->node_id;
2615 
2616 	zonelist = &pgdat->node_zonelists[0];
2617 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2618 
2619 	/*
2620 	 * Now we build the zonelist so that it contains the zones
2621 	 * of all the other nodes.
2622 	 * We don't want to pressure a particular node, so when
2623 	 * building the zones for node N, we make sure that the
2624 	 * zones coming right after the local ones are those from
2625 	 * node N+1 (modulo N)
2626 	 */
2627 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2628 		if (!node_online(node))
2629 			continue;
2630 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2631 							MAX_NR_ZONES - 1);
2632 	}
2633 	for (node = 0; node < local_node; node++) {
2634 		if (!node_online(node))
2635 			continue;
2636 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2637 							MAX_NR_ZONES - 1);
2638 	}
2639 
2640 	zonelist->_zonerefs[j].zone = NULL;
2641 	zonelist->_zonerefs[j].zone_idx = 0;
2642 }
2643 
2644 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2645 static void build_zonelist_cache(pg_data_t *pgdat)
2646 {
2647 	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2648 }
2649 
2650 #endif	/* CONFIG_NUMA */
2651 
2652 /* return values int ....just for stop_machine() */
2653 static int __build_all_zonelists(void *dummy)
2654 {
2655 	int nid;
2656 
2657 #ifdef CONFIG_NUMA
2658 	memset(node_load, 0, sizeof(node_load));
2659 #endif
2660 	for_each_online_node(nid) {
2661 		pg_data_t *pgdat = NODE_DATA(nid);
2662 
2663 		build_zonelists(pgdat);
2664 		build_zonelist_cache(pgdat);
2665 	}
2666 	return 0;
2667 }
2668 
2669 void build_all_zonelists(void)
2670 {
2671 	set_zonelist_order();
2672 
2673 	if (system_state == SYSTEM_BOOTING) {
2674 		__build_all_zonelists(NULL);
2675 		mminit_verify_zonelist();
2676 		cpuset_init_current_mems_allowed();
2677 	} else {
2678 		/* we have to stop all cpus to guarantee there is no user
2679 		   of zonelist */
2680 		stop_machine(__build_all_zonelists, NULL, NULL);
2681 		/* cpuset refresh routine should be here */
2682 	}
2683 	vm_total_pages = nr_free_pagecache_pages();
2684 	/*
2685 	 * Disable grouping by mobility if the number of pages in the
2686 	 * system is too low to allow the mechanism to work. It would be
2687 	 * more accurate, but expensive to check per-zone. This check is
2688 	 * made on memory-hotadd so a system can start with mobility
2689 	 * disabled and enable it later
2690 	 */
2691 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2692 		page_group_by_mobility_disabled = 1;
2693 	else
2694 		page_group_by_mobility_disabled = 0;
2695 
2696 	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2697 		"Total pages: %ld\n",
2698 			nr_online_nodes,
2699 			zonelist_order_name[current_zonelist_order],
2700 			page_group_by_mobility_disabled ? "off" : "on",
2701 			vm_total_pages);
2702 #ifdef CONFIG_NUMA
2703 	printk("Policy zone: %s\n", zone_names[policy_zone]);
2704 #endif
2705 }
2706 
2707 /*
2708  * Helper functions to size the waitqueue hash table.
2709  * Essentially these want to choose hash table sizes sufficiently
2710  * large so that collisions trying to wait on pages are rare.
2711  * But in fact, the number of active page waitqueues on typical
2712  * systems is ridiculously low, less than 200. So this is even
2713  * conservative, even though it seems large.
2714  *
2715  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2716  * waitqueues, i.e. the size of the waitq table given the number of pages.
2717  */
2718 #define PAGES_PER_WAITQUEUE	256
2719 
2720 #ifndef CONFIG_MEMORY_HOTPLUG
2721 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2722 {
2723 	unsigned long size = 1;
2724 
2725 	pages /= PAGES_PER_WAITQUEUE;
2726 
2727 	while (size < pages)
2728 		size <<= 1;
2729 
2730 	/*
2731 	 * Once we have dozens or even hundreds of threads sleeping
2732 	 * on IO we've got bigger problems than wait queue collision.
2733 	 * Limit the size of the wait table to a reasonable size.
2734 	 */
2735 	size = min(size, 4096UL);
2736 
2737 	return max(size, 4UL);
2738 }
2739 #else
2740 /*
2741  * A zone's size might be changed by hot-add, so it is not possible to determine
2742  * a suitable size for its wait_table.  So we use the maximum size now.
2743  *
2744  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2745  *
2746  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2747  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2748  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2749  *
2750  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2751  * or more by the traditional way. (See above).  It equals:
2752  *
2753  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2754  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2755  *    powerpc (64K page size)             : =  (32G +16M)byte.
2756  */
2757 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2758 {
2759 	return 4096UL;
2760 }
2761 #endif
2762 
2763 /*
2764  * This is an integer logarithm so that shifts can be used later
2765  * to extract the more random high bits from the multiplicative
2766  * hash function before the remainder is taken.
2767  */
2768 static inline unsigned long wait_table_bits(unsigned long size)
2769 {
2770 	return ffz(~size);
2771 }
2772 
2773 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2774 
2775 /*
2776  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2777  * of blocks reserved is based on min_wmark_pages(zone). The memory within
2778  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
2779  * higher will lead to a bigger reserve which will get freed as contiguous
2780  * blocks as reclaim kicks in
2781  */
2782 static void setup_zone_migrate_reserve(struct zone *zone)
2783 {
2784 	unsigned long start_pfn, pfn, end_pfn;
2785 	struct page *page;
2786 	unsigned long reserve, block_migratetype;
2787 
2788 	/* Get the start pfn, end pfn and the number of blocks to reserve */
2789 	start_pfn = zone->zone_start_pfn;
2790 	end_pfn = start_pfn + zone->spanned_pages;
2791 	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2792 							pageblock_order;
2793 
2794 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2795 		if (!pfn_valid(pfn))
2796 			continue;
2797 		page = pfn_to_page(pfn);
2798 
2799 		/* Watch out for overlapping nodes */
2800 		if (page_to_nid(page) != zone_to_nid(zone))
2801 			continue;
2802 
2803 		/* Blocks with reserved pages will never free, skip them. */
2804 		if (PageReserved(page))
2805 			continue;
2806 
2807 		block_migratetype = get_pageblock_migratetype(page);
2808 
2809 		/* If this block is reserved, account for it */
2810 		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2811 			reserve--;
2812 			continue;
2813 		}
2814 
2815 		/* Suitable for reserving if this block is movable */
2816 		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2817 			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2818 			move_freepages_block(zone, page, MIGRATE_RESERVE);
2819 			reserve--;
2820 			continue;
2821 		}
2822 
2823 		/*
2824 		 * If the reserve is met and this is a previous reserved block,
2825 		 * take it back
2826 		 */
2827 		if (block_migratetype == MIGRATE_RESERVE) {
2828 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2829 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2830 		}
2831 	}
2832 }
2833 
2834 /*
2835  * Initially all pages are reserved - free ones are freed
2836  * up by free_all_bootmem() once the early boot process is
2837  * done. Non-atomic initialization, single-pass.
2838  */
2839 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2840 		unsigned long start_pfn, enum memmap_context context)
2841 {
2842 	struct page *page;
2843 	unsigned long end_pfn = start_pfn + size;
2844 	unsigned long pfn;
2845 	struct zone *z;
2846 
2847 	if (highest_memmap_pfn < end_pfn - 1)
2848 		highest_memmap_pfn = end_pfn - 1;
2849 
2850 	z = &NODE_DATA(nid)->node_zones[zone];
2851 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2852 		/*
2853 		 * There can be holes in boot-time mem_map[]s
2854 		 * handed to this function.  They do not
2855 		 * exist on hotplugged memory.
2856 		 */
2857 		if (context == MEMMAP_EARLY) {
2858 			if (!early_pfn_valid(pfn))
2859 				continue;
2860 			if (!early_pfn_in_nid(pfn, nid))
2861 				continue;
2862 		}
2863 		page = pfn_to_page(pfn);
2864 		set_page_links(page, zone, nid, pfn);
2865 		mminit_verify_page_links(page, zone, nid, pfn);
2866 		init_page_count(page);
2867 		reset_page_mapcount(page);
2868 		SetPageReserved(page);
2869 		/*
2870 		 * Mark the block movable so that blocks are reserved for
2871 		 * movable at startup. This will force kernel allocations
2872 		 * to reserve their blocks rather than leaking throughout
2873 		 * the address space during boot when many long-lived
2874 		 * kernel allocations are made. Later some blocks near
2875 		 * the start are marked MIGRATE_RESERVE by
2876 		 * setup_zone_migrate_reserve()
2877 		 *
2878 		 * bitmap is created for zone's valid pfn range. but memmap
2879 		 * can be created for invalid pages (for alignment)
2880 		 * check here not to call set_pageblock_migratetype() against
2881 		 * pfn out of zone.
2882 		 */
2883 		if ((z->zone_start_pfn <= pfn)
2884 		    && (pfn < z->zone_start_pfn + z->spanned_pages)
2885 		    && !(pfn & (pageblock_nr_pages - 1)))
2886 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2887 
2888 		INIT_LIST_HEAD(&page->lru);
2889 #ifdef WANT_PAGE_VIRTUAL
2890 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
2891 		if (!is_highmem_idx(zone))
2892 			set_page_address(page, __va(pfn << PAGE_SHIFT));
2893 #endif
2894 	}
2895 }
2896 
2897 static void __meminit zone_init_free_lists(struct zone *zone)
2898 {
2899 	int order, t;
2900 	for_each_migratetype_order(order, t) {
2901 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2902 		zone->free_area[order].nr_free = 0;
2903 	}
2904 }
2905 
2906 #ifndef __HAVE_ARCH_MEMMAP_INIT
2907 #define memmap_init(size, nid, zone, start_pfn) \
2908 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2909 #endif
2910 
2911 static int zone_batchsize(struct zone *zone)
2912 {
2913 #ifdef CONFIG_MMU
2914 	int batch;
2915 
2916 	/*
2917 	 * The per-cpu-pages pools are set to around 1000th of the
2918 	 * size of the zone.  But no more than 1/2 of a meg.
2919 	 *
2920 	 * OK, so we don't know how big the cache is.  So guess.
2921 	 */
2922 	batch = zone->present_pages / 1024;
2923 	if (batch * PAGE_SIZE > 512 * 1024)
2924 		batch = (512 * 1024) / PAGE_SIZE;
2925 	batch /= 4;		/* We effectively *= 4 below */
2926 	if (batch < 1)
2927 		batch = 1;
2928 
2929 	/*
2930 	 * Clamp the batch to a 2^n - 1 value. Having a power
2931 	 * of 2 value was found to be more likely to have
2932 	 * suboptimal cache aliasing properties in some cases.
2933 	 *
2934 	 * For example if 2 tasks are alternately allocating
2935 	 * batches of pages, one task can end up with a lot
2936 	 * of pages of one half of the possible page colors
2937 	 * and the other with pages of the other colors.
2938 	 */
2939 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
2940 
2941 	return batch;
2942 
2943 #else
2944 	/* The deferral and batching of frees should be suppressed under NOMMU
2945 	 * conditions.
2946 	 *
2947 	 * The problem is that NOMMU needs to be able to allocate large chunks
2948 	 * of contiguous memory as there's no hardware page translation to
2949 	 * assemble apparent contiguous memory from discontiguous pages.
2950 	 *
2951 	 * Queueing large contiguous runs of pages for batching, however,
2952 	 * causes the pages to actually be freed in smaller chunks.  As there
2953 	 * can be a significant delay between the individual batches being
2954 	 * recycled, this leads to the once large chunks of space being
2955 	 * fragmented and becoming unavailable for high-order allocations.
2956 	 */
2957 	return 0;
2958 #endif
2959 }
2960 
2961 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2962 {
2963 	struct per_cpu_pages *pcp;
2964 
2965 	memset(p, 0, sizeof(*p));
2966 
2967 	pcp = &p->pcp;
2968 	pcp->count = 0;
2969 	pcp->high = 6 * batch;
2970 	pcp->batch = max(1UL, 1 * batch);
2971 	INIT_LIST_HEAD(&pcp->list);
2972 }
2973 
2974 /*
2975  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2976  * to the value high for the pageset p.
2977  */
2978 
2979 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2980 				unsigned long high)
2981 {
2982 	struct per_cpu_pages *pcp;
2983 
2984 	pcp = &p->pcp;
2985 	pcp->high = high;
2986 	pcp->batch = max(1UL, high/4);
2987 	if ((high/4) > (PAGE_SHIFT * 8))
2988 		pcp->batch = PAGE_SHIFT * 8;
2989 }
2990 
2991 
2992 #ifdef CONFIG_NUMA
2993 /*
2994  * Boot pageset table. One per cpu which is going to be used for all
2995  * zones and all nodes. The parameters will be set in such a way
2996  * that an item put on a list will immediately be handed over to
2997  * the buddy list. This is safe since pageset manipulation is done
2998  * with interrupts disabled.
2999  *
3000  * Some NUMA counter updates may also be caught by the boot pagesets.
3001  *
3002  * The boot_pagesets must be kept even after bootup is complete for
3003  * unused processors and/or zones. They do play a role for bootstrapping
3004  * hotplugged processors.
3005  *
3006  * zoneinfo_show() and maybe other functions do
3007  * not check if the processor is online before following the pageset pointer.
3008  * Other parts of the kernel may not check if the zone is available.
3009  */
3010 static struct per_cpu_pageset boot_pageset[NR_CPUS];
3011 
3012 /*
3013  * Dynamically allocate memory for the
3014  * per cpu pageset array in struct zone.
3015  */
3016 static int __cpuinit process_zones(int cpu)
3017 {
3018 	struct zone *zone, *dzone;
3019 	int node = cpu_to_node(cpu);
3020 
3021 	node_set_state(node, N_CPU);	/* this node has a cpu */
3022 
3023 	for_each_populated_zone(zone) {
3024 		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
3025 					 GFP_KERNEL, node);
3026 		if (!zone_pcp(zone, cpu))
3027 			goto bad;
3028 
3029 		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
3030 
3031 		if (percpu_pagelist_fraction)
3032 			setup_pagelist_highmark(zone_pcp(zone, cpu),
3033 			 	(zone->present_pages / percpu_pagelist_fraction));
3034 	}
3035 
3036 	return 0;
3037 bad:
3038 	for_each_zone(dzone) {
3039 		if (!populated_zone(dzone))
3040 			continue;
3041 		if (dzone == zone)
3042 			break;
3043 		kfree(zone_pcp(dzone, cpu));
3044 		zone_pcp(dzone, cpu) = &boot_pageset[cpu];
3045 	}
3046 	return -ENOMEM;
3047 }
3048 
3049 static inline void free_zone_pagesets(int cpu)
3050 {
3051 	struct zone *zone;
3052 
3053 	for_each_zone(zone) {
3054 		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
3055 
3056 		/* Free per_cpu_pageset if it is slab allocated */
3057 		if (pset != &boot_pageset[cpu])
3058 			kfree(pset);
3059 		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3060 	}
3061 }
3062 
3063 static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
3064 		unsigned long action,
3065 		void *hcpu)
3066 {
3067 	int cpu = (long)hcpu;
3068 	int ret = NOTIFY_OK;
3069 
3070 	switch (action) {
3071 	case CPU_UP_PREPARE:
3072 	case CPU_UP_PREPARE_FROZEN:
3073 		if (process_zones(cpu))
3074 			ret = NOTIFY_BAD;
3075 		break;
3076 	case CPU_UP_CANCELED:
3077 	case CPU_UP_CANCELED_FROZEN:
3078 	case CPU_DEAD:
3079 	case CPU_DEAD_FROZEN:
3080 		free_zone_pagesets(cpu);
3081 		break;
3082 	default:
3083 		break;
3084 	}
3085 	return ret;
3086 }
3087 
3088 static struct notifier_block __cpuinitdata pageset_notifier =
3089 	{ &pageset_cpuup_callback, NULL, 0 };
3090 
3091 void __init setup_per_cpu_pageset(void)
3092 {
3093 	int err;
3094 
3095 	/* Initialize per_cpu_pageset for cpu 0.
3096 	 * A cpuup callback will do this for every cpu
3097 	 * as it comes online
3098 	 */
3099 	err = process_zones(smp_processor_id());
3100 	BUG_ON(err);
3101 	register_cpu_notifier(&pageset_notifier);
3102 }
3103 
3104 #endif
3105 
3106 static noinline __init_refok
3107 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3108 {
3109 	int i;
3110 	struct pglist_data *pgdat = zone->zone_pgdat;
3111 	size_t alloc_size;
3112 
3113 	/*
3114 	 * The per-page waitqueue mechanism uses hashed waitqueues
3115 	 * per zone.
3116 	 */
3117 	zone->wait_table_hash_nr_entries =
3118 		 wait_table_hash_nr_entries(zone_size_pages);
3119 	zone->wait_table_bits =
3120 		wait_table_bits(zone->wait_table_hash_nr_entries);
3121 	alloc_size = zone->wait_table_hash_nr_entries
3122 					* sizeof(wait_queue_head_t);
3123 
3124 	if (!slab_is_available()) {
3125 		zone->wait_table = (wait_queue_head_t *)
3126 			alloc_bootmem_node(pgdat, alloc_size);
3127 	} else {
3128 		/*
3129 		 * This case means that a zone whose size was 0 gets new memory
3130 		 * via memory hot-add.
3131 		 * But it may be the case that a new node was hot-added.  In
3132 		 * this case vmalloc() will not be able to use this new node's
3133 		 * memory - this wait_table must be initialized to use this new
3134 		 * node itself as well.
3135 		 * To use this new node's memory, further consideration will be
3136 		 * necessary.
3137 		 */
3138 		zone->wait_table = vmalloc(alloc_size);
3139 	}
3140 	if (!zone->wait_table)
3141 		return -ENOMEM;
3142 
3143 	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3144 		init_waitqueue_head(zone->wait_table + i);
3145 
3146 	return 0;
3147 }
3148 
3149 static __meminit void zone_pcp_init(struct zone *zone)
3150 {
3151 	int cpu;
3152 	unsigned long batch = zone_batchsize(zone);
3153 
3154 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
3155 #ifdef CONFIG_NUMA
3156 		/* Early boot. Slab allocator not functional yet */
3157 		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3158 		setup_pageset(&boot_pageset[cpu],0);
3159 #else
3160 		setup_pageset(zone_pcp(zone,cpu), batch);
3161 #endif
3162 	}
3163 	if (zone->present_pages)
3164 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
3165 			zone->name, zone->present_pages, batch);
3166 }
3167 
3168 __meminit int init_currently_empty_zone(struct zone *zone,
3169 					unsigned long zone_start_pfn,
3170 					unsigned long size,
3171 					enum memmap_context context)
3172 {
3173 	struct pglist_data *pgdat = zone->zone_pgdat;
3174 	int ret;
3175 	ret = zone_wait_table_init(zone, size);
3176 	if (ret)
3177 		return ret;
3178 	pgdat->nr_zones = zone_idx(zone) + 1;
3179 
3180 	zone->zone_start_pfn = zone_start_pfn;
3181 
3182 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3183 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3184 			pgdat->node_id,
3185 			(unsigned long)zone_idx(zone),
3186 			zone_start_pfn, (zone_start_pfn + size));
3187 
3188 	zone_init_free_lists(zone);
3189 
3190 	return 0;
3191 }
3192 
3193 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3194 /*
3195  * Basic iterator support. Return the first range of PFNs for a node
3196  * Note: nid == MAX_NUMNODES returns first region regardless of node
3197  */
3198 static int __meminit first_active_region_index_in_nid(int nid)
3199 {
3200 	int i;
3201 
3202 	for (i = 0; i < nr_nodemap_entries; i++)
3203 		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3204 			return i;
3205 
3206 	return -1;
3207 }
3208 
3209 /*
3210  * Basic iterator support. Return the next active range of PFNs for a node
3211  * Note: nid == MAX_NUMNODES returns next region regardless of node
3212  */
3213 static int __meminit next_active_region_index_in_nid(int index, int nid)
3214 {
3215 	for (index = index + 1; index < nr_nodemap_entries; index++)
3216 		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3217 			return index;
3218 
3219 	return -1;
3220 }
3221 
3222 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3223 /*
3224  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3225  * Architectures may implement their own version but if add_active_range()
3226  * was used and there are no special requirements, this is a convenient
3227  * alternative
3228  */
3229 int __meminit __early_pfn_to_nid(unsigned long pfn)
3230 {
3231 	int i;
3232 
3233 	for (i = 0; i < nr_nodemap_entries; i++) {
3234 		unsigned long start_pfn = early_node_map[i].start_pfn;
3235 		unsigned long end_pfn = early_node_map[i].end_pfn;
3236 
3237 		if (start_pfn <= pfn && pfn < end_pfn)
3238 			return early_node_map[i].nid;
3239 	}
3240 	/* This is a memory hole */
3241 	return -1;
3242 }
3243 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3244 
3245 int __meminit early_pfn_to_nid(unsigned long pfn)
3246 {
3247 	int nid;
3248 
3249 	nid = __early_pfn_to_nid(pfn);
3250 	if (nid >= 0)
3251 		return nid;
3252 	/* just returns 0 */
3253 	return 0;
3254 }
3255 
3256 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
3257 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3258 {
3259 	int nid;
3260 
3261 	nid = __early_pfn_to_nid(pfn);
3262 	if (nid >= 0 && nid != node)
3263 		return false;
3264 	return true;
3265 }
3266 #endif
3267 
3268 /* Basic iterator support to walk early_node_map[] */
3269 #define for_each_active_range_index_in_nid(i, nid) \
3270 	for (i = first_active_region_index_in_nid(nid); i != -1; \
3271 				i = next_active_region_index_in_nid(i, nid))
3272 
3273 /**
3274  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3275  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3276  * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3277  *
3278  * If an architecture guarantees that all ranges registered with
3279  * add_active_ranges() contain no holes and may be freed, this
3280  * this function may be used instead of calling free_bootmem() manually.
3281  */
3282 void __init free_bootmem_with_active_regions(int nid,
3283 						unsigned long max_low_pfn)
3284 {
3285 	int i;
3286 
3287 	for_each_active_range_index_in_nid(i, nid) {
3288 		unsigned long size_pages = 0;
3289 		unsigned long end_pfn = early_node_map[i].end_pfn;
3290 
3291 		if (early_node_map[i].start_pfn >= max_low_pfn)
3292 			continue;
3293 
3294 		if (end_pfn > max_low_pfn)
3295 			end_pfn = max_low_pfn;
3296 
3297 		size_pages = end_pfn - early_node_map[i].start_pfn;
3298 		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3299 				PFN_PHYS(early_node_map[i].start_pfn),
3300 				size_pages << PAGE_SHIFT);
3301 	}
3302 }
3303 
3304 void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3305 {
3306 	int i;
3307 	int ret;
3308 
3309 	for_each_active_range_index_in_nid(i, nid) {
3310 		ret = work_fn(early_node_map[i].start_pfn,
3311 			      early_node_map[i].end_pfn, data);
3312 		if (ret)
3313 			break;
3314 	}
3315 }
3316 /**
3317  * sparse_memory_present_with_active_regions - Call memory_present for each active range
3318  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3319  *
3320  * If an architecture guarantees that all ranges registered with
3321  * add_active_ranges() contain no holes and may be freed, this
3322  * function may be used instead of calling memory_present() manually.
3323  */
3324 void __init sparse_memory_present_with_active_regions(int nid)
3325 {
3326 	int i;
3327 
3328 	for_each_active_range_index_in_nid(i, nid)
3329 		memory_present(early_node_map[i].nid,
3330 				early_node_map[i].start_pfn,
3331 				early_node_map[i].end_pfn);
3332 }
3333 
3334 /**
3335  * get_pfn_range_for_nid - Return the start and end page frames for a node
3336  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3337  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3338  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3339  *
3340  * It returns the start and end page frame of a node based on information
3341  * provided by an arch calling add_active_range(). If called for a node
3342  * with no available memory, a warning is printed and the start and end
3343  * PFNs will be 0.
3344  */
3345 void __meminit get_pfn_range_for_nid(unsigned int nid,
3346 			unsigned long *start_pfn, unsigned long *end_pfn)
3347 {
3348 	int i;
3349 	*start_pfn = -1UL;
3350 	*end_pfn = 0;
3351 
3352 	for_each_active_range_index_in_nid(i, nid) {
3353 		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3354 		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3355 	}
3356 
3357 	if (*start_pfn == -1UL)
3358 		*start_pfn = 0;
3359 }
3360 
3361 /*
3362  * This finds a zone that can be used for ZONE_MOVABLE pages. The
3363  * assumption is made that zones within a node are ordered in monotonic
3364  * increasing memory addresses so that the "highest" populated zone is used
3365  */
3366 static void __init find_usable_zone_for_movable(void)
3367 {
3368 	int zone_index;
3369 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3370 		if (zone_index == ZONE_MOVABLE)
3371 			continue;
3372 
3373 		if (arch_zone_highest_possible_pfn[zone_index] >
3374 				arch_zone_lowest_possible_pfn[zone_index])
3375 			break;
3376 	}
3377 
3378 	VM_BUG_ON(zone_index == -1);
3379 	movable_zone = zone_index;
3380 }
3381 
3382 /*
3383  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3384  * because it is sized independant of architecture. Unlike the other zones,
3385  * the starting point for ZONE_MOVABLE is not fixed. It may be different
3386  * in each node depending on the size of each node and how evenly kernelcore
3387  * is distributed. This helper function adjusts the zone ranges
3388  * provided by the architecture for a given node by using the end of the
3389  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3390  * zones within a node are in order of monotonic increases memory addresses
3391  */
3392 static void __meminit adjust_zone_range_for_zone_movable(int nid,
3393 					unsigned long zone_type,
3394 					unsigned long node_start_pfn,
3395 					unsigned long node_end_pfn,
3396 					unsigned long *zone_start_pfn,
3397 					unsigned long *zone_end_pfn)
3398 {
3399 	/* Only adjust if ZONE_MOVABLE is on this node */
3400 	if (zone_movable_pfn[nid]) {
3401 		/* Size ZONE_MOVABLE */
3402 		if (zone_type == ZONE_MOVABLE) {
3403 			*zone_start_pfn = zone_movable_pfn[nid];
3404 			*zone_end_pfn = min(node_end_pfn,
3405 				arch_zone_highest_possible_pfn[movable_zone]);
3406 
3407 		/* Adjust for ZONE_MOVABLE starting within this range */
3408 		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3409 				*zone_end_pfn > zone_movable_pfn[nid]) {
3410 			*zone_end_pfn = zone_movable_pfn[nid];
3411 
3412 		/* Check if this whole range is within ZONE_MOVABLE */
3413 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3414 			*zone_start_pfn = *zone_end_pfn;
3415 	}
3416 }
3417 
3418 /*
3419  * Return the number of pages a zone spans in a node, including holes
3420  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3421  */
3422 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3423 					unsigned long zone_type,
3424 					unsigned long *ignored)
3425 {
3426 	unsigned long node_start_pfn, node_end_pfn;
3427 	unsigned long zone_start_pfn, zone_end_pfn;
3428 
3429 	/* Get the start and end of the node and zone */
3430 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3431 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3432 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3433 	adjust_zone_range_for_zone_movable(nid, zone_type,
3434 				node_start_pfn, node_end_pfn,
3435 				&zone_start_pfn, &zone_end_pfn);
3436 
3437 	/* Check that this node has pages within the zone's required range */
3438 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3439 		return 0;
3440 
3441 	/* Move the zone boundaries inside the node if necessary */
3442 	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3443 	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3444 
3445 	/* Return the spanned pages */
3446 	return zone_end_pfn - zone_start_pfn;
3447 }
3448 
3449 /*
3450  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3451  * then all holes in the requested range will be accounted for.
3452  */
3453 static unsigned long __meminit __absent_pages_in_range(int nid,
3454 				unsigned long range_start_pfn,
3455 				unsigned long range_end_pfn)
3456 {
3457 	int i = 0;
3458 	unsigned long prev_end_pfn = 0, hole_pages = 0;
3459 	unsigned long start_pfn;
3460 
3461 	/* Find the end_pfn of the first active range of pfns in the node */
3462 	i = first_active_region_index_in_nid(nid);
3463 	if (i == -1)
3464 		return 0;
3465 
3466 	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3467 
3468 	/* Account for ranges before physical memory on this node */
3469 	if (early_node_map[i].start_pfn > range_start_pfn)
3470 		hole_pages = prev_end_pfn - range_start_pfn;
3471 
3472 	/* Find all holes for the zone within the node */
3473 	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3474 
3475 		/* No need to continue if prev_end_pfn is outside the zone */
3476 		if (prev_end_pfn >= range_end_pfn)
3477 			break;
3478 
3479 		/* Make sure the end of the zone is not within the hole */
3480 		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3481 		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3482 
3483 		/* Update the hole size cound and move on */
3484 		if (start_pfn > range_start_pfn) {
3485 			BUG_ON(prev_end_pfn > start_pfn);
3486 			hole_pages += start_pfn - prev_end_pfn;
3487 		}
3488 		prev_end_pfn = early_node_map[i].end_pfn;
3489 	}
3490 
3491 	/* Account for ranges past physical memory on this node */
3492 	if (range_end_pfn > prev_end_pfn)
3493 		hole_pages += range_end_pfn -
3494 				max(range_start_pfn, prev_end_pfn);
3495 
3496 	return hole_pages;
3497 }
3498 
3499 /**
3500  * absent_pages_in_range - Return number of page frames in holes within a range
3501  * @start_pfn: The start PFN to start searching for holes
3502  * @end_pfn: The end PFN to stop searching for holes
3503  *
3504  * It returns the number of pages frames in memory holes within a range.
3505  */
3506 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3507 							unsigned long end_pfn)
3508 {
3509 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3510 }
3511 
3512 /* Return the number of page frames in holes in a zone on a node */
3513 static unsigned long __meminit zone_absent_pages_in_node(int nid,
3514 					unsigned long zone_type,
3515 					unsigned long *ignored)
3516 {
3517 	unsigned long node_start_pfn, node_end_pfn;
3518 	unsigned long zone_start_pfn, zone_end_pfn;
3519 
3520 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3521 	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3522 							node_start_pfn);
3523 	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3524 							node_end_pfn);
3525 
3526 	adjust_zone_range_for_zone_movable(nid, zone_type,
3527 			node_start_pfn, node_end_pfn,
3528 			&zone_start_pfn, &zone_end_pfn);
3529 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3530 }
3531 
3532 #else
3533 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3534 					unsigned long zone_type,
3535 					unsigned long *zones_size)
3536 {
3537 	return zones_size[zone_type];
3538 }
3539 
3540 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3541 						unsigned long zone_type,
3542 						unsigned long *zholes_size)
3543 {
3544 	if (!zholes_size)
3545 		return 0;
3546 
3547 	return zholes_size[zone_type];
3548 }
3549 
3550 #endif
3551 
3552 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3553 		unsigned long *zones_size, unsigned long *zholes_size)
3554 {
3555 	unsigned long realtotalpages, totalpages = 0;
3556 	enum zone_type i;
3557 
3558 	for (i = 0; i < MAX_NR_ZONES; i++)
3559 		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3560 								zones_size);
3561 	pgdat->node_spanned_pages = totalpages;
3562 
3563 	realtotalpages = totalpages;
3564 	for (i = 0; i < MAX_NR_ZONES; i++)
3565 		realtotalpages -=
3566 			zone_absent_pages_in_node(pgdat->node_id, i,
3567 								zholes_size);
3568 	pgdat->node_present_pages = realtotalpages;
3569 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3570 							realtotalpages);
3571 }
3572 
3573 #ifndef CONFIG_SPARSEMEM
3574 /*
3575  * Calculate the size of the zone->blockflags rounded to an unsigned long
3576  * Start by making sure zonesize is a multiple of pageblock_order by rounding
3577  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3578  * round what is now in bits to nearest long in bits, then return it in
3579  * bytes.
3580  */
3581 static unsigned long __init usemap_size(unsigned long zonesize)
3582 {
3583 	unsigned long usemapsize;
3584 
3585 	usemapsize = roundup(zonesize, pageblock_nr_pages);
3586 	usemapsize = usemapsize >> pageblock_order;
3587 	usemapsize *= NR_PAGEBLOCK_BITS;
3588 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3589 
3590 	return usemapsize / 8;
3591 }
3592 
3593 static void __init setup_usemap(struct pglist_data *pgdat,
3594 				struct zone *zone, unsigned long zonesize)
3595 {
3596 	unsigned long usemapsize = usemap_size(zonesize);
3597 	zone->pageblock_flags = NULL;
3598 	if (usemapsize)
3599 		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3600 }
3601 #else
3602 static void inline setup_usemap(struct pglist_data *pgdat,
3603 				struct zone *zone, unsigned long zonesize) {}
3604 #endif /* CONFIG_SPARSEMEM */
3605 
3606 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3607 
3608 /* Return a sensible default order for the pageblock size. */
3609 static inline int pageblock_default_order(void)
3610 {
3611 	if (HPAGE_SHIFT > PAGE_SHIFT)
3612 		return HUGETLB_PAGE_ORDER;
3613 
3614 	return MAX_ORDER-1;
3615 }
3616 
3617 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3618 static inline void __init set_pageblock_order(unsigned int order)
3619 {
3620 	/* Check that pageblock_nr_pages has not already been setup */
3621 	if (pageblock_order)
3622 		return;
3623 
3624 	/*
3625 	 * Assume the largest contiguous order of interest is a huge page.
3626 	 * This value may be variable depending on boot parameters on IA64
3627 	 */
3628 	pageblock_order = order;
3629 }
3630 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3631 
3632 /*
3633  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3634  * and pageblock_default_order() are unused as pageblock_order is set
3635  * at compile-time. See include/linux/pageblock-flags.h for the values of
3636  * pageblock_order based on the kernel config
3637  */
3638 static inline int pageblock_default_order(unsigned int order)
3639 {
3640 	return MAX_ORDER-1;
3641 }
3642 #define set_pageblock_order(x)	do {} while (0)
3643 
3644 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3645 
3646 /*
3647  * Set up the zone data structures:
3648  *   - mark all pages reserved
3649  *   - mark all memory queues empty
3650  *   - clear the memory bitmaps
3651  */
3652 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3653 		unsigned long *zones_size, unsigned long *zholes_size)
3654 {
3655 	enum zone_type j;
3656 	int nid = pgdat->node_id;
3657 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3658 	int ret;
3659 
3660 	pgdat_resize_init(pgdat);
3661 	pgdat->nr_zones = 0;
3662 	init_waitqueue_head(&pgdat->kswapd_wait);
3663 	pgdat->kswapd_max_order = 0;
3664 	pgdat_page_cgroup_init(pgdat);
3665 
3666 	for (j = 0; j < MAX_NR_ZONES; j++) {
3667 		struct zone *zone = pgdat->node_zones + j;
3668 		unsigned long size, realsize, memmap_pages;
3669 		enum lru_list l;
3670 
3671 		size = zone_spanned_pages_in_node(nid, j, zones_size);
3672 		realsize = size - zone_absent_pages_in_node(nid, j,
3673 								zholes_size);
3674 
3675 		/*
3676 		 * Adjust realsize so that it accounts for how much memory
3677 		 * is used by this zone for memmap. This affects the watermark
3678 		 * and per-cpu initialisations
3679 		 */
3680 		memmap_pages =
3681 			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3682 		if (realsize >= memmap_pages) {
3683 			realsize -= memmap_pages;
3684 			if (memmap_pages)
3685 				printk(KERN_DEBUG
3686 				       "  %s zone: %lu pages used for memmap\n",
3687 				       zone_names[j], memmap_pages);
3688 		} else
3689 			printk(KERN_WARNING
3690 				"  %s zone: %lu pages exceeds realsize %lu\n",
3691 				zone_names[j], memmap_pages, realsize);
3692 
3693 		/* Account for reserved pages */
3694 		if (j == 0 && realsize > dma_reserve) {
3695 			realsize -= dma_reserve;
3696 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3697 					zone_names[0], dma_reserve);
3698 		}
3699 
3700 		if (!is_highmem_idx(j))
3701 			nr_kernel_pages += realsize;
3702 		nr_all_pages += realsize;
3703 
3704 		zone->spanned_pages = size;
3705 		zone->present_pages = realsize;
3706 #ifdef CONFIG_NUMA
3707 		zone->node = nid;
3708 		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3709 						/ 100;
3710 		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3711 #endif
3712 		zone->name = zone_names[j];
3713 		spin_lock_init(&zone->lock);
3714 		spin_lock_init(&zone->lru_lock);
3715 		zone_seqlock_init(zone);
3716 		zone->zone_pgdat = pgdat;
3717 
3718 		zone->prev_priority = DEF_PRIORITY;
3719 
3720 		zone_pcp_init(zone);
3721 		for_each_lru(l) {
3722 			INIT_LIST_HEAD(&zone->lru[l].list);
3723 			zone->lru[l].nr_saved_scan = 0;
3724 		}
3725 		zone->reclaim_stat.recent_rotated[0] = 0;
3726 		zone->reclaim_stat.recent_rotated[1] = 0;
3727 		zone->reclaim_stat.recent_scanned[0] = 0;
3728 		zone->reclaim_stat.recent_scanned[1] = 0;
3729 		zap_zone_vm_stats(zone);
3730 		zone->flags = 0;
3731 		if (!size)
3732 			continue;
3733 
3734 		set_pageblock_order(pageblock_default_order());
3735 		setup_usemap(pgdat, zone, size);
3736 		ret = init_currently_empty_zone(zone, zone_start_pfn,
3737 						size, MEMMAP_EARLY);
3738 		BUG_ON(ret);
3739 		memmap_init(size, nid, j, zone_start_pfn);
3740 		zone_start_pfn += size;
3741 	}
3742 }
3743 
3744 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3745 {
3746 	/* Skip empty nodes */
3747 	if (!pgdat->node_spanned_pages)
3748 		return;
3749 
3750 #ifdef CONFIG_FLAT_NODE_MEM_MAP
3751 	/* ia64 gets its own node_mem_map, before this, without bootmem */
3752 	if (!pgdat->node_mem_map) {
3753 		unsigned long size, start, end;
3754 		struct page *map;
3755 
3756 		/*
3757 		 * The zone's endpoints aren't required to be MAX_ORDER
3758 		 * aligned but the node_mem_map endpoints must be in order
3759 		 * for the buddy allocator to function correctly.
3760 		 */
3761 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3762 		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3763 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3764 		size =  (end - start) * sizeof(struct page);
3765 		map = alloc_remap(pgdat->node_id, size);
3766 		if (!map)
3767 			map = alloc_bootmem_node(pgdat, size);
3768 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3769 	}
3770 #ifndef CONFIG_NEED_MULTIPLE_NODES
3771 	/*
3772 	 * With no DISCONTIG, the global mem_map is just set as node 0's
3773 	 */
3774 	if (pgdat == NODE_DATA(0)) {
3775 		mem_map = NODE_DATA(0)->node_mem_map;
3776 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3777 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3778 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3779 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3780 	}
3781 #endif
3782 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
3783 }
3784 
3785 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3786 		unsigned long node_start_pfn, unsigned long *zholes_size)
3787 {
3788 	pg_data_t *pgdat = NODE_DATA(nid);
3789 
3790 	pgdat->node_id = nid;
3791 	pgdat->node_start_pfn = node_start_pfn;
3792 	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3793 
3794 	alloc_node_mem_map(pgdat);
3795 #ifdef CONFIG_FLAT_NODE_MEM_MAP
3796 	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3797 		nid, (unsigned long)pgdat,
3798 		(unsigned long)pgdat->node_mem_map);
3799 #endif
3800 
3801 	free_area_init_core(pgdat, zones_size, zholes_size);
3802 }
3803 
3804 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3805 
3806 #if MAX_NUMNODES > 1
3807 /*
3808  * Figure out the number of possible node ids.
3809  */
3810 static void __init setup_nr_node_ids(void)
3811 {
3812 	unsigned int node;
3813 	unsigned int highest = 0;
3814 
3815 	for_each_node_mask(node, node_possible_map)
3816 		highest = node;
3817 	nr_node_ids = highest + 1;
3818 }
3819 #else
3820 static inline void setup_nr_node_ids(void)
3821 {
3822 }
3823 #endif
3824 
3825 /**
3826  * add_active_range - Register a range of PFNs backed by physical memory
3827  * @nid: The node ID the range resides on
3828  * @start_pfn: The start PFN of the available physical memory
3829  * @end_pfn: The end PFN of the available physical memory
3830  *
3831  * These ranges are stored in an early_node_map[] and later used by
3832  * free_area_init_nodes() to calculate zone sizes and holes. If the
3833  * range spans a memory hole, it is up to the architecture to ensure
3834  * the memory is not freed by the bootmem allocator. If possible
3835  * the range being registered will be merged with existing ranges.
3836  */
3837 void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3838 						unsigned long end_pfn)
3839 {
3840 	int i;
3841 
3842 	mminit_dprintk(MMINIT_TRACE, "memory_register",
3843 			"Entering add_active_range(%d, %#lx, %#lx) "
3844 			"%d entries of %d used\n",
3845 			nid, start_pfn, end_pfn,
3846 			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3847 
3848 	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3849 
3850 	/* Merge with existing active regions if possible */
3851 	for (i = 0; i < nr_nodemap_entries; i++) {
3852 		if (early_node_map[i].nid != nid)
3853 			continue;
3854 
3855 		/* Skip if an existing region covers this new one */
3856 		if (start_pfn >= early_node_map[i].start_pfn &&
3857 				end_pfn <= early_node_map[i].end_pfn)
3858 			return;
3859 
3860 		/* Merge forward if suitable */
3861 		if (start_pfn <= early_node_map[i].end_pfn &&
3862 				end_pfn > early_node_map[i].end_pfn) {
3863 			early_node_map[i].end_pfn = end_pfn;
3864 			return;
3865 		}
3866 
3867 		/* Merge backward if suitable */
3868 		if (start_pfn < early_node_map[i].end_pfn &&
3869 				end_pfn >= early_node_map[i].start_pfn) {
3870 			early_node_map[i].start_pfn = start_pfn;
3871 			return;
3872 		}
3873 	}
3874 
3875 	/* Check that early_node_map is large enough */
3876 	if (i >= MAX_ACTIVE_REGIONS) {
3877 		printk(KERN_CRIT "More than %d memory regions, truncating\n",
3878 							MAX_ACTIVE_REGIONS);
3879 		return;
3880 	}
3881 
3882 	early_node_map[i].nid = nid;
3883 	early_node_map[i].start_pfn = start_pfn;
3884 	early_node_map[i].end_pfn = end_pfn;
3885 	nr_nodemap_entries = i + 1;
3886 }
3887 
3888 /**
3889  * remove_active_range - Shrink an existing registered range of PFNs
3890  * @nid: The node id the range is on that should be shrunk
3891  * @start_pfn: The new PFN of the range
3892  * @end_pfn: The new PFN of the range
3893  *
3894  * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3895  * The map is kept near the end physical page range that has already been
3896  * registered. This function allows an arch to shrink an existing registered
3897  * range.
3898  */
3899 void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3900 				unsigned long end_pfn)
3901 {
3902 	int i, j;
3903 	int removed = 0;
3904 
3905 	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3906 			  nid, start_pfn, end_pfn);
3907 
3908 	/* Find the old active region end and shrink */
3909 	for_each_active_range_index_in_nid(i, nid) {
3910 		if (early_node_map[i].start_pfn >= start_pfn &&
3911 		    early_node_map[i].end_pfn <= end_pfn) {
3912 			/* clear it */
3913 			early_node_map[i].start_pfn = 0;
3914 			early_node_map[i].end_pfn = 0;
3915 			removed = 1;
3916 			continue;
3917 		}
3918 		if (early_node_map[i].start_pfn < start_pfn &&
3919 		    early_node_map[i].end_pfn > start_pfn) {
3920 			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3921 			early_node_map[i].end_pfn = start_pfn;
3922 			if (temp_end_pfn > end_pfn)
3923 				add_active_range(nid, end_pfn, temp_end_pfn);
3924 			continue;
3925 		}
3926 		if (early_node_map[i].start_pfn >= start_pfn &&
3927 		    early_node_map[i].end_pfn > end_pfn &&
3928 		    early_node_map[i].start_pfn < end_pfn) {
3929 			early_node_map[i].start_pfn = end_pfn;
3930 			continue;
3931 		}
3932 	}
3933 
3934 	if (!removed)
3935 		return;
3936 
3937 	/* remove the blank ones */
3938 	for (i = nr_nodemap_entries - 1; i > 0; i--) {
3939 		if (early_node_map[i].nid != nid)
3940 			continue;
3941 		if (early_node_map[i].end_pfn)
3942 			continue;
3943 		/* we found it, get rid of it */
3944 		for (j = i; j < nr_nodemap_entries - 1; j++)
3945 			memcpy(&early_node_map[j], &early_node_map[j+1],
3946 				sizeof(early_node_map[j]));
3947 		j = nr_nodemap_entries - 1;
3948 		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3949 		nr_nodemap_entries--;
3950 	}
3951 }
3952 
3953 /**
3954  * remove_all_active_ranges - Remove all currently registered regions
3955  *
3956  * During discovery, it may be found that a table like SRAT is invalid
3957  * and an alternative discovery method must be used. This function removes
3958  * all currently registered regions.
3959  */
3960 void __init remove_all_active_ranges(void)
3961 {
3962 	memset(early_node_map, 0, sizeof(early_node_map));
3963 	nr_nodemap_entries = 0;
3964 }
3965 
3966 /* Compare two active node_active_regions */
3967 static int __init cmp_node_active_region(const void *a, const void *b)
3968 {
3969 	struct node_active_region *arange = (struct node_active_region *)a;
3970 	struct node_active_region *brange = (struct node_active_region *)b;
3971 
3972 	/* Done this way to avoid overflows */
3973 	if (arange->start_pfn > brange->start_pfn)
3974 		return 1;
3975 	if (arange->start_pfn < brange->start_pfn)
3976 		return -1;
3977 
3978 	return 0;
3979 }
3980 
3981 /* sort the node_map by start_pfn */
3982 static void __init sort_node_map(void)
3983 {
3984 	sort(early_node_map, (size_t)nr_nodemap_entries,
3985 			sizeof(struct node_active_region),
3986 			cmp_node_active_region, NULL);
3987 }
3988 
3989 /* Find the lowest pfn for a node */
3990 static unsigned long __init find_min_pfn_for_node(int nid)
3991 {
3992 	int i;
3993 	unsigned long min_pfn = ULONG_MAX;
3994 
3995 	/* Assuming a sorted map, the first range found has the starting pfn */
3996 	for_each_active_range_index_in_nid(i, nid)
3997 		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
3998 
3999 	if (min_pfn == ULONG_MAX) {
4000 		printk(KERN_WARNING
4001 			"Could not find start_pfn for node %d\n", nid);
4002 		return 0;
4003 	}
4004 
4005 	return min_pfn;
4006 }
4007 
4008 /**
4009  * find_min_pfn_with_active_regions - Find the minimum PFN registered
4010  *
4011  * It returns the minimum PFN based on information provided via
4012  * add_active_range().
4013  */
4014 unsigned long __init find_min_pfn_with_active_regions(void)
4015 {
4016 	return find_min_pfn_for_node(MAX_NUMNODES);
4017 }
4018 
4019 /*
4020  * early_calculate_totalpages()
4021  * Sum pages in active regions for movable zone.
4022  * Populate N_HIGH_MEMORY for calculating usable_nodes.
4023  */
4024 static unsigned long __init early_calculate_totalpages(void)
4025 {
4026 	int i;
4027 	unsigned long totalpages = 0;
4028 
4029 	for (i = 0; i < nr_nodemap_entries; i++) {
4030 		unsigned long pages = early_node_map[i].end_pfn -
4031 						early_node_map[i].start_pfn;
4032 		totalpages += pages;
4033 		if (pages)
4034 			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4035 	}
4036   	return totalpages;
4037 }
4038 
4039 /*
4040  * Find the PFN the Movable zone begins in each node. Kernel memory
4041  * is spread evenly between nodes as long as the nodes have enough
4042  * memory. When they don't, some nodes will have more kernelcore than
4043  * others
4044  */
4045 static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4046 {
4047 	int i, nid;
4048 	unsigned long usable_startpfn;
4049 	unsigned long kernelcore_node, kernelcore_remaining;
4050 	/* save the state before borrow the nodemask */
4051 	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4052 	unsigned long totalpages = early_calculate_totalpages();
4053 	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4054 
4055 	/*
4056 	 * If movablecore was specified, calculate what size of
4057 	 * kernelcore that corresponds so that memory usable for
4058 	 * any allocation type is evenly spread. If both kernelcore
4059 	 * and movablecore are specified, then the value of kernelcore
4060 	 * will be used for required_kernelcore if it's greater than
4061 	 * what movablecore would have allowed.
4062 	 */
4063 	if (required_movablecore) {
4064 		unsigned long corepages;
4065 
4066 		/*
4067 		 * Round-up so that ZONE_MOVABLE is at least as large as what
4068 		 * was requested by the user
4069 		 */
4070 		required_movablecore =
4071 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4072 		corepages = totalpages - required_movablecore;
4073 
4074 		required_kernelcore = max(required_kernelcore, corepages);
4075 	}
4076 
4077 	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4078 	if (!required_kernelcore)
4079 		goto out;
4080 
4081 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4082 	find_usable_zone_for_movable();
4083 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4084 
4085 restart:
4086 	/* Spread kernelcore memory as evenly as possible throughout nodes */
4087 	kernelcore_node = required_kernelcore / usable_nodes;
4088 	for_each_node_state(nid, N_HIGH_MEMORY) {
4089 		/*
4090 		 * Recalculate kernelcore_node if the division per node
4091 		 * now exceeds what is necessary to satisfy the requested
4092 		 * amount of memory for the kernel
4093 		 */
4094 		if (required_kernelcore < kernelcore_node)
4095 			kernelcore_node = required_kernelcore / usable_nodes;
4096 
4097 		/*
4098 		 * As the map is walked, we track how much memory is usable
4099 		 * by the kernel using kernelcore_remaining. When it is
4100 		 * 0, the rest of the node is usable by ZONE_MOVABLE
4101 		 */
4102 		kernelcore_remaining = kernelcore_node;
4103 
4104 		/* Go through each range of PFNs within this node */
4105 		for_each_active_range_index_in_nid(i, nid) {
4106 			unsigned long start_pfn, end_pfn;
4107 			unsigned long size_pages;
4108 
4109 			start_pfn = max(early_node_map[i].start_pfn,
4110 						zone_movable_pfn[nid]);
4111 			end_pfn = early_node_map[i].end_pfn;
4112 			if (start_pfn >= end_pfn)
4113 				continue;
4114 
4115 			/* Account for what is only usable for kernelcore */
4116 			if (start_pfn < usable_startpfn) {
4117 				unsigned long kernel_pages;
4118 				kernel_pages = min(end_pfn, usable_startpfn)
4119 								- start_pfn;
4120 
4121 				kernelcore_remaining -= min(kernel_pages,
4122 							kernelcore_remaining);
4123 				required_kernelcore -= min(kernel_pages,
4124 							required_kernelcore);
4125 
4126 				/* Continue if range is now fully accounted */
4127 				if (end_pfn <= usable_startpfn) {
4128 
4129 					/*
4130 					 * Push zone_movable_pfn to the end so
4131 					 * that if we have to rebalance
4132 					 * kernelcore across nodes, we will
4133 					 * not double account here
4134 					 */
4135 					zone_movable_pfn[nid] = end_pfn;
4136 					continue;
4137 				}
4138 				start_pfn = usable_startpfn;
4139 			}
4140 
4141 			/*
4142 			 * The usable PFN range for ZONE_MOVABLE is from
4143 			 * start_pfn->end_pfn. Calculate size_pages as the
4144 			 * number of pages used as kernelcore
4145 			 */
4146 			size_pages = end_pfn - start_pfn;
4147 			if (size_pages > kernelcore_remaining)
4148 				size_pages = kernelcore_remaining;
4149 			zone_movable_pfn[nid] = start_pfn + size_pages;
4150 
4151 			/*
4152 			 * Some kernelcore has been met, update counts and
4153 			 * break if the kernelcore for this node has been
4154 			 * satisified
4155 			 */
4156 			required_kernelcore -= min(required_kernelcore,
4157 								size_pages);
4158 			kernelcore_remaining -= size_pages;
4159 			if (!kernelcore_remaining)
4160 				break;
4161 		}
4162 	}
4163 
4164 	/*
4165 	 * If there is still required_kernelcore, we do another pass with one
4166 	 * less node in the count. This will push zone_movable_pfn[nid] further
4167 	 * along on the nodes that still have memory until kernelcore is
4168 	 * satisified
4169 	 */
4170 	usable_nodes--;
4171 	if (usable_nodes && required_kernelcore > usable_nodes)
4172 		goto restart;
4173 
4174 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4175 	for (nid = 0; nid < MAX_NUMNODES; nid++)
4176 		zone_movable_pfn[nid] =
4177 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4178 
4179 out:
4180 	/* restore the node_state */
4181 	node_states[N_HIGH_MEMORY] = saved_node_state;
4182 }
4183 
4184 /* Any regular memory on that node ? */
4185 static void check_for_regular_memory(pg_data_t *pgdat)
4186 {
4187 #ifdef CONFIG_HIGHMEM
4188 	enum zone_type zone_type;
4189 
4190 	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4191 		struct zone *zone = &pgdat->node_zones[zone_type];
4192 		if (zone->present_pages)
4193 			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4194 	}
4195 #endif
4196 }
4197 
4198 /**
4199  * free_area_init_nodes - Initialise all pg_data_t and zone data
4200  * @max_zone_pfn: an array of max PFNs for each zone
4201  *
4202  * This will call free_area_init_node() for each active node in the system.
4203  * Using the page ranges provided by add_active_range(), the size of each
4204  * zone in each node and their holes is calculated. If the maximum PFN
4205  * between two adjacent zones match, it is assumed that the zone is empty.
4206  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4207  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4208  * starts where the previous one ended. For example, ZONE_DMA32 starts
4209  * at arch_max_dma_pfn.
4210  */
4211 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4212 {
4213 	unsigned long nid;
4214 	int i;
4215 
4216 	/* Sort early_node_map as initialisation assumes it is sorted */
4217 	sort_node_map();
4218 
4219 	/* Record where the zone boundaries are */
4220 	memset(arch_zone_lowest_possible_pfn, 0,
4221 				sizeof(arch_zone_lowest_possible_pfn));
4222 	memset(arch_zone_highest_possible_pfn, 0,
4223 				sizeof(arch_zone_highest_possible_pfn));
4224 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4225 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4226 	for (i = 1; i < MAX_NR_ZONES; i++) {
4227 		if (i == ZONE_MOVABLE)
4228 			continue;
4229 		arch_zone_lowest_possible_pfn[i] =
4230 			arch_zone_highest_possible_pfn[i-1];
4231 		arch_zone_highest_possible_pfn[i] =
4232 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4233 	}
4234 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4235 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4236 
4237 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4238 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4239 	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4240 
4241 	/* Print out the zone ranges */
4242 	printk("Zone PFN ranges:\n");
4243 	for (i = 0; i < MAX_NR_ZONES; i++) {
4244 		if (i == ZONE_MOVABLE)
4245 			continue;
4246 		printk("  %-8s %0#10lx -> %0#10lx\n",
4247 				zone_names[i],
4248 				arch_zone_lowest_possible_pfn[i],
4249 				arch_zone_highest_possible_pfn[i]);
4250 	}
4251 
4252 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4253 	printk("Movable zone start PFN for each node\n");
4254 	for (i = 0; i < MAX_NUMNODES; i++) {
4255 		if (zone_movable_pfn[i])
4256 			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4257 	}
4258 
4259 	/* Print out the early_node_map[] */
4260 	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4261 	for (i = 0; i < nr_nodemap_entries; i++)
4262 		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4263 						early_node_map[i].start_pfn,
4264 						early_node_map[i].end_pfn);
4265 
4266 	/* Initialise every node */
4267 	mminit_verify_pageflags_layout();
4268 	setup_nr_node_ids();
4269 	for_each_online_node(nid) {
4270 		pg_data_t *pgdat = NODE_DATA(nid);
4271 		free_area_init_node(nid, NULL,
4272 				find_min_pfn_for_node(nid), NULL);
4273 
4274 		/* Any memory on that node */
4275 		if (pgdat->node_present_pages)
4276 			node_set_state(nid, N_HIGH_MEMORY);
4277 		check_for_regular_memory(pgdat);
4278 	}
4279 }
4280 
4281 static int __init cmdline_parse_core(char *p, unsigned long *core)
4282 {
4283 	unsigned long long coremem;
4284 	if (!p)
4285 		return -EINVAL;
4286 
4287 	coremem = memparse(p, &p);
4288 	*core = coremem >> PAGE_SHIFT;
4289 
4290 	/* Paranoid check that UL is enough for the coremem value */
4291 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4292 
4293 	return 0;
4294 }
4295 
4296 /*
4297  * kernelcore=size sets the amount of memory for use for allocations that
4298  * cannot be reclaimed or migrated.
4299  */
4300 static int __init cmdline_parse_kernelcore(char *p)
4301 {
4302 	return cmdline_parse_core(p, &required_kernelcore);
4303 }
4304 
4305 /*
4306  * movablecore=size sets the amount of memory for use for allocations that
4307  * can be reclaimed or migrated.
4308  */
4309 static int __init cmdline_parse_movablecore(char *p)
4310 {
4311 	return cmdline_parse_core(p, &required_movablecore);
4312 }
4313 
4314 early_param("kernelcore", cmdline_parse_kernelcore);
4315 early_param("movablecore", cmdline_parse_movablecore);
4316 
4317 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4318 
4319 /**
4320  * set_dma_reserve - set the specified number of pages reserved in the first zone
4321  * @new_dma_reserve: The number of pages to mark reserved
4322  *
4323  * The per-cpu batchsize and zone watermarks are determined by present_pages.
4324  * In the DMA zone, a significant percentage may be consumed by kernel image
4325  * and other unfreeable allocations which can skew the watermarks badly. This
4326  * function may optionally be used to account for unfreeable pages in the
4327  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4328  * smaller per-cpu batchsize.
4329  */
4330 void __init set_dma_reserve(unsigned long new_dma_reserve)
4331 {
4332 	dma_reserve = new_dma_reserve;
4333 }
4334 
4335 #ifndef CONFIG_NEED_MULTIPLE_NODES
4336 struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4337 EXPORT_SYMBOL(contig_page_data);
4338 #endif
4339 
4340 void __init free_area_init(unsigned long *zones_size)
4341 {
4342 	free_area_init_node(0, zones_size,
4343 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4344 }
4345 
4346 static int page_alloc_cpu_notify(struct notifier_block *self,
4347 				 unsigned long action, void *hcpu)
4348 {
4349 	int cpu = (unsigned long)hcpu;
4350 
4351 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4352 		drain_pages(cpu);
4353 
4354 		/*
4355 		 * Spill the event counters of the dead processor
4356 		 * into the current processors event counters.
4357 		 * This artificially elevates the count of the current
4358 		 * processor.
4359 		 */
4360 		vm_events_fold_cpu(cpu);
4361 
4362 		/*
4363 		 * Zero the differential counters of the dead processor
4364 		 * so that the vm statistics are consistent.
4365 		 *
4366 		 * This is only okay since the processor is dead and cannot
4367 		 * race with what we are doing.
4368 		 */
4369 		refresh_cpu_vm_stats(cpu);
4370 	}
4371 	return NOTIFY_OK;
4372 }
4373 
4374 void __init page_alloc_init(void)
4375 {
4376 	hotcpu_notifier(page_alloc_cpu_notify, 0);
4377 }
4378 
4379 /*
4380  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4381  *	or min_free_kbytes changes.
4382  */
4383 static void calculate_totalreserve_pages(void)
4384 {
4385 	struct pglist_data *pgdat;
4386 	unsigned long reserve_pages = 0;
4387 	enum zone_type i, j;
4388 
4389 	for_each_online_pgdat(pgdat) {
4390 		for (i = 0; i < MAX_NR_ZONES; i++) {
4391 			struct zone *zone = pgdat->node_zones + i;
4392 			unsigned long max = 0;
4393 
4394 			/* Find valid and maximum lowmem_reserve in the zone */
4395 			for (j = i; j < MAX_NR_ZONES; j++) {
4396 				if (zone->lowmem_reserve[j] > max)
4397 					max = zone->lowmem_reserve[j];
4398 			}
4399 
4400 			/* we treat the high watermark as reserved pages. */
4401 			max += high_wmark_pages(zone);
4402 
4403 			if (max > zone->present_pages)
4404 				max = zone->present_pages;
4405 			reserve_pages += max;
4406 		}
4407 	}
4408 	totalreserve_pages = reserve_pages;
4409 }
4410 
4411 /*
4412  * setup_per_zone_lowmem_reserve - called whenever
4413  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4414  *	has a correct pages reserved value, so an adequate number of
4415  *	pages are left in the zone after a successful __alloc_pages().
4416  */
4417 static void setup_per_zone_lowmem_reserve(void)
4418 {
4419 	struct pglist_data *pgdat;
4420 	enum zone_type j, idx;
4421 
4422 	for_each_online_pgdat(pgdat) {
4423 		for (j = 0; j < MAX_NR_ZONES; j++) {
4424 			struct zone *zone = pgdat->node_zones + j;
4425 			unsigned long present_pages = zone->present_pages;
4426 
4427 			zone->lowmem_reserve[j] = 0;
4428 
4429 			idx = j;
4430 			while (idx) {
4431 				struct zone *lower_zone;
4432 
4433 				idx--;
4434 
4435 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4436 					sysctl_lowmem_reserve_ratio[idx] = 1;
4437 
4438 				lower_zone = pgdat->node_zones + idx;
4439 				lower_zone->lowmem_reserve[j] = present_pages /
4440 					sysctl_lowmem_reserve_ratio[idx];
4441 				present_pages += lower_zone->present_pages;
4442 			}
4443 		}
4444 	}
4445 
4446 	/* update totalreserve_pages */
4447 	calculate_totalreserve_pages();
4448 }
4449 
4450 /**
4451  * setup_per_zone_wmarks - called when min_free_kbytes changes
4452  * or when memory is hot-{added|removed}
4453  *
4454  * Ensures that the watermark[min,low,high] values for each zone are set
4455  * correctly with respect to min_free_kbytes.
4456  */
4457 void setup_per_zone_wmarks(void)
4458 {
4459 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4460 	unsigned long lowmem_pages = 0;
4461 	struct zone *zone;
4462 	unsigned long flags;
4463 
4464 	/* Calculate total number of !ZONE_HIGHMEM pages */
4465 	for_each_zone(zone) {
4466 		if (!is_highmem(zone))
4467 			lowmem_pages += zone->present_pages;
4468 	}
4469 
4470 	for_each_zone(zone) {
4471 		u64 tmp;
4472 
4473 		spin_lock_irqsave(&zone->lock, flags);
4474 		tmp = (u64)pages_min * zone->present_pages;
4475 		do_div(tmp, lowmem_pages);
4476 		if (is_highmem(zone)) {
4477 			/*
4478 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4479 			 * need highmem pages, so cap pages_min to a small
4480 			 * value here.
4481 			 *
4482 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4483 			 * deltas controls asynch page reclaim, and so should
4484 			 * not be capped for highmem.
4485 			 */
4486 			int min_pages;
4487 
4488 			min_pages = zone->present_pages / 1024;
4489 			if (min_pages < SWAP_CLUSTER_MAX)
4490 				min_pages = SWAP_CLUSTER_MAX;
4491 			if (min_pages > 128)
4492 				min_pages = 128;
4493 			zone->watermark[WMARK_MIN] = min_pages;
4494 		} else {
4495 			/*
4496 			 * If it's a lowmem zone, reserve a number of pages
4497 			 * proportionate to the zone's size.
4498 			 */
4499 			zone->watermark[WMARK_MIN] = tmp;
4500 		}
4501 
4502 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
4503 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4504 		setup_zone_migrate_reserve(zone);
4505 		spin_unlock_irqrestore(&zone->lock, flags);
4506 	}
4507 
4508 	/* update totalreserve_pages */
4509 	calculate_totalreserve_pages();
4510 }
4511 
4512 /**
4513  * The inactive anon list should be small enough that the VM never has to
4514  * do too much work, but large enough that each inactive page has a chance
4515  * to be referenced again before it is swapped out.
4516  *
4517  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4518  * INACTIVE_ANON pages on this zone's LRU, maintained by the
4519  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4520  * the anonymous pages are kept on the inactive list.
4521  *
4522  * total     target    max
4523  * memory    ratio     inactive anon
4524  * -------------------------------------
4525  *   10MB       1         5MB
4526  *  100MB       1        50MB
4527  *    1GB       3       250MB
4528  *   10GB      10       0.9GB
4529  *  100GB      31         3GB
4530  *    1TB     101        10GB
4531  *   10TB     320        32GB
4532  */
4533 void calculate_zone_inactive_ratio(struct zone *zone)
4534 {
4535 	unsigned int gb, ratio;
4536 
4537 	/* Zone size in gigabytes */
4538 	gb = zone->present_pages >> (30 - PAGE_SHIFT);
4539 	if (gb)
4540 		ratio = int_sqrt(10 * gb);
4541 	else
4542 		ratio = 1;
4543 
4544 	zone->inactive_ratio = ratio;
4545 }
4546 
4547 static void __init setup_per_zone_inactive_ratio(void)
4548 {
4549 	struct zone *zone;
4550 
4551 	for_each_zone(zone)
4552 		calculate_zone_inactive_ratio(zone);
4553 }
4554 
4555 /*
4556  * Initialise min_free_kbytes.
4557  *
4558  * For small machines we want it small (128k min).  For large machines
4559  * we want it large (64MB max).  But it is not linear, because network
4560  * bandwidth does not increase linearly with machine size.  We use
4561  *
4562  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4563  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4564  *
4565  * which yields
4566  *
4567  * 16MB:	512k
4568  * 32MB:	724k
4569  * 64MB:	1024k
4570  * 128MB:	1448k
4571  * 256MB:	2048k
4572  * 512MB:	2896k
4573  * 1024MB:	4096k
4574  * 2048MB:	5792k
4575  * 4096MB:	8192k
4576  * 8192MB:	11584k
4577  * 16384MB:	16384k
4578  */
4579 static int __init init_per_zone_wmark_min(void)
4580 {
4581 	unsigned long lowmem_kbytes;
4582 
4583 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4584 
4585 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4586 	if (min_free_kbytes < 128)
4587 		min_free_kbytes = 128;
4588 	if (min_free_kbytes > 65536)
4589 		min_free_kbytes = 65536;
4590 	setup_per_zone_wmarks();
4591 	setup_per_zone_lowmem_reserve();
4592 	setup_per_zone_inactive_ratio();
4593 	return 0;
4594 }
4595 module_init(init_per_zone_wmark_min)
4596 
4597 /*
4598  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4599  *	that we can call two helper functions whenever min_free_kbytes
4600  *	changes.
4601  */
4602 int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4603 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4604 {
4605 	proc_dointvec(table, write, file, buffer, length, ppos);
4606 	if (write)
4607 		setup_per_zone_wmarks();
4608 	return 0;
4609 }
4610 
4611 #ifdef CONFIG_NUMA
4612 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4613 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4614 {
4615 	struct zone *zone;
4616 	int rc;
4617 
4618 	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4619 	if (rc)
4620 		return rc;
4621 
4622 	for_each_zone(zone)
4623 		zone->min_unmapped_pages = (zone->present_pages *
4624 				sysctl_min_unmapped_ratio) / 100;
4625 	return 0;
4626 }
4627 
4628 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4629 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4630 {
4631 	struct zone *zone;
4632 	int rc;
4633 
4634 	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4635 	if (rc)
4636 		return rc;
4637 
4638 	for_each_zone(zone)
4639 		zone->min_slab_pages = (zone->present_pages *
4640 				sysctl_min_slab_ratio) / 100;
4641 	return 0;
4642 }
4643 #endif
4644 
4645 /*
4646  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4647  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4648  *	whenever sysctl_lowmem_reserve_ratio changes.
4649  *
4650  * The reserve ratio obviously has absolutely no relation with the
4651  * minimum watermarks. The lowmem reserve ratio can only make sense
4652  * if in function of the boot time zone sizes.
4653  */
4654 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4655 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4656 {
4657 	proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4658 	setup_per_zone_lowmem_reserve();
4659 	return 0;
4660 }
4661 
4662 /*
4663  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4664  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4665  * can have before it gets flushed back to buddy allocator.
4666  */
4667 
4668 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4669 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4670 {
4671 	struct zone *zone;
4672 	unsigned int cpu;
4673 	int ret;
4674 
4675 	ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4676 	if (!write || (ret == -EINVAL))
4677 		return ret;
4678 	for_each_populated_zone(zone) {
4679 		for_each_online_cpu(cpu) {
4680 			unsigned long  high;
4681 			high = zone->present_pages / percpu_pagelist_fraction;
4682 			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4683 		}
4684 	}
4685 	return 0;
4686 }
4687 
4688 int hashdist = HASHDIST_DEFAULT;
4689 
4690 #ifdef CONFIG_NUMA
4691 static int __init set_hashdist(char *str)
4692 {
4693 	if (!str)
4694 		return 0;
4695 	hashdist = simple_strtoul(str, &str, 0);
4696 	return 1;
4697 }
4698 __setup("hashdist=", set_hashdist);
4699 #endif
4700 
4701 /*
4702  * allocate a large system hash table from bootmem
4703  * - it is assumed that the hash table must contain an exact power-of-2
4704  *   quantity of entries
4705  * - limit is the number of hash buckets, not the total allocation size
4706  */
4707 void *__init alloc_large_system_hash(const char *tablename,
4708 				     unsigned long bucketsize,
4709 				     unsigned long numentries,
4710 				     int scale,
4711 				     int flags,
4712 				     unsigned int *_hash_shift,
4713 				     unsigned int *_hash_mask,
4714 				     unsigned long limit)
4715 {
4716 	unsigned long long max = limit;
4717 	unsigned long log2qty, size;
4718 	void *table = NULL;
4719 
4720 	/* allow the kernel cmdline to have a say */
4721 	if (!numentries) {
4722 		/* round applicable memory size up to nearest megabyte */
4723 		numentries = nr_kernel_pages;
4724 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4725 		numentries >>= 20 - PAGE_SHIFT;
4726 		numentries <<= 20 - PAGE_SHIFT;
4727 
4728 		/* limit to 1 bucket per 2^scale bytes of low memory */
4729 		if (scale > PAGE_SHIFT)
4730 			numentries >>= (scale - PAGE_SHIFT);
4731 		else
4732 			numentries <<= (PAGE_SHIFT - scale);
4733 
4734 		/* Make sure we've got at least a 0-order allocation.. */
4735 		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4736 			numentries = PAGE_SIZE / bucketsize;
4737 	}
4738 	numentries = roundup_pow_of_two(numentries);
4739 
4740 	/* limit allocation size to 1/16 total memory by default */
4741 	if (max == 0) {
4742 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4743 		do_div(max, bucketsize);
4744 	}
4745 
4746 	if (numentries > max)
4747 		numentries = max;
4748 
4749 	log2qty = ilog2(numentries);
4750 
4751 	do {
4752 		size = bucketsize << log2qty;
4753 		if (flags & HASH_EARLY)
4754 			table = alloc_bootmem_nopanic(size);
4755 		else if (hashdist)
4756 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4757 		else {
4758 			/*
4759 			 * If bucketsize is not a power-of-two, we may free
4760 			 * some pages at the end of hash table which
4761 			 * alloc_pages_exact() automatically does
4762 			 */
4763 			if (get_order(size) < MAX_ORDER) {
4764 				table = alloc_pages_exact(size, GFP_ATOMIC);
4765 				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4766 			}
4767 		}
4768 	} while (!table && size > PAGE_SIZE && --log2qty);
4769 
4770 	if (!table)
4771 		panic("Failed to allocate %s hash table\n", tablename);
4772 
4773 	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4774 	       tablename,
4775 	       (1U << log2qty),
4776 	       ilog2(size) - PAGE_SHIFT,
4777 	       size);
4778 
4779 	if (_hash_shift)
4780 		*_hash_shift = log2qty;
4781 	if (_hash_mask)
4782 		*_hash_mask = (1 << log2qty) - 1;
4783 
4784 	return table;
4785 }
4786 
4787 /* Return a pointer to the bitmap storing bits affecting a block of pages */
4788 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4789 							unsigned long pfn)
4790 {
4791 #ifdef CONFIG_SPARSEMEM
4792 	return __pfn_to_section(pfn)->pageblock_flags;
4793 #else
4794 	return zone->pageblock_flags;
4795 #endif /* CONFIG_SPARSEMEM */
4796 }
4797 
4798 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4799 {
4800 #ifdef CONFIG_SPARSEMEM
4801 	pfn &= (PAGES_PER_SECTION-1);
4802 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4803 #else
4804 	pfn = pfn - zone->zone_start_pfn;
4805 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4806 #endif /* CONFIG_SPARSEMEM */
4807 }
4808 
4809 /**
4810  * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4811  * @page: The page within the block of interest
4812  * @start_bitidx: The first bit of interest to retrieve
4813  * @end_bitidx: The last bit of interest
4814  * returns pageblock_bits flags
4815  */
4816 unsigned long get_pageblock_flags_group(struct page *page,
4817 					int start_bitidx, int end_bitidx)
4818 {
4819 	struct zone *zone;
4820 	unsigned long *bitmap;
4821 	unsigned long pfn, bitidx;
4822 	unsigned long flags = 0;
4823 	unsigned long value = 1;
4824 
4825 	zone = page_zone(page);
4826 	pfn = page_to_pfn(page);
4827 	bitmap = get_pageblock_bitmap(zone, pfn);
4828 	bitidx = pfn_to_bitidx(zone, pfn);
4829 
4830 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4831 		if (test_bit(bitidx + start_bitidx, bitmap))
4832 			flags |= value;
4833 
4834 	return flags;
4835 }
4836 
4837 /**
4838  * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4839  * @page: The page within the block of interest
4840  * @start_bitidx: The first bit of interest
4841  * @end_bitidx: The last bit of interest
4842  * @flags: The flags to set
4843  */
4844 void set_pageblock_flags_group(struct page *page, unsigned long flags,
4845 					int start_bitidx, int end_bitidx)
4846 {
4847 	struct zone *zone;
4848 	unsigned long *bitmap;
4849 	unsigned long pfn, bitidx;
4850 	unsigned long value = 1;
4851 
4852 	zone = page_zone(page);
4853 	pfn = page_to_pfn(page);
4854 	bitmap = get_pageblock_bitmap(zone, pfn);
4855 	bitidx = pfn_to_bitidx(zone, pfn);
4856 	VM_BUG_ON(pfn < zone->zone_start_pfn);
4857 	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4858 
4859 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4860 		if (flags & value)
4861 			__set_bit(bitidx + start_bitidx, bitmap);
4862 		else
4863 			__clear_bit(bitidx + start_bitidx, bitmap);
4864 }
4865 
4866 /*
4867  * This is designed as sub function...plz see page_isolation.c also.
4868  * set/clear page block's type to be ISOLATE.
4869  * page allocater never alloc memory from ISOLATE block.
4870  */
4871 
4872 int set_migratetype_isolate(struct page *page)
4873 {
4874 	struct zone *zone;
4875 	unsigned long flags;
4876 	int ret = -EBUSY;
4877 
4878 	zone = page_zone(page);
4879 	spin_lock_irqsave(&zone->lock, flags);
4880 	/*
4881 	 * In future, more migrate types will be able to be isolation target.
4882 	 */
4883 	if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4884 		goto out;
4885 	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4886 	move_freepages_block(zone, page, MIGRATE_ISOLATE);
4887 	ret = 0;
4888 out:
4889 	spin_unlock_irqrestore(&zone->lock, flags);
4890 	if (!ret)
4891 		drain_all_pages();
4892 	return ret;
4893 }
4894 
4895 void unset_migratetype_isolate(struct page *page)
4896 {
4897 	struct zone *zone;
4898 	unsigned long flags;
4899 	zone = page_zone(page);
4900 	spin_lock_irqsave(&zone->lock, flags);
4901 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4902 		goto out;
4903 	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4904 	move_freepages_block(zone, page, MIGRATE_MOVABLE);
4905 out:
4906 	spin_unlock_irqrestore(&zone->lock, flags);
4907 }
4908 
4909 #ifdef CONFIG_MEMORY_HOTREMOVE
4910 /*
4911  * All pages in the range must be isolated before calling this.
4912  */
4913 void
4914 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4915 {
4916 	struct page *page;
4917 	struct zone *zone;
4918 	int order, i;
4919 	unsigned long pfn;
4920 	unsigned long flags;
4921 	/* find the first valid pfn */
4922 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
4923 		if (pfn_valid(pfn))
4924 			break;
4925 	if (pfn == end_pfn)
4926 		return;
4927 	zone = page_zone(pfn_to_page(pfn));
4928 	spin_lock_irqsave(&zone->lock, flags);
4929 	pfn = start_pfn;
4930 	while (pfn < end_pfn) {
4931 		if (!pfn_valid(pfn)) {
4932 			pfn++;
4933 			continue;
4934 		}
4935 		page = pfn_to_page(pfn);
4936 		BUG_ON(page_count(page));
4937 		BUG_ON(!PageBuddy(page));
4938 		order = page_order(page);
4939 #ifdef CONFIG_DEBUG_VM
4940 		printk(KERN_INFO "remove from free list %lx %d %lx\n",
4941 		       pfn, 1 << order, end_pfn);
4942 #endif
4943 		list_del(&page->lru);
4944 		rmv_page_order(page);
4945 		zone->free_area[order].nr_free--;
4946 		__mod_zone_page_state(zone, NR_FREE_PAGES,
4947 				      - (1UL << order));
4948 		for (i = 0; i < (1 << order); i++)
4949 			SetPageReserved((page+i));
4950 		pfn += (1 << order);
4951 	}
4952 	spin_unlock_irqrestore(&zone->lock, flags);
4953 }
4954 #endif
4955