xref: /openbmc/linux/mm/page_alloc.c (revision 78c99ba1)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/compiler.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/suspend.h>
28 #include <linux/pagevec.h>
29 #include <linux/blkdev.h>
30 #include <linux/slab.h>
31 #include <linux/oom.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
34 #include <linux/sysctl.h>
35 #include <linux/cpu.h>
36 #include <linux/cpuset.h>
37 #include <linux/memory_hotplug.h>
38 #include <linux/nodemask.h>
39 #include <linux/vmalloc.h>
40 #include <linux/mempolicy.h>
41 #include <linux/stop_machine.h>
42 #include <linux/sort.h>
43 #include <linux/pfn.h>
44 #include <linux/backing-dev.h>
45 #include <linux/fault-inject.h>
46 #include <linux/page-isolation.h>
47 #include <linux/page_cgroup.h>
48 #include <linux/debugobjects.h>
49 #include <linux/kmemleak.h>
50 
51 #include <asm/tlbflush.h>
52 #include <asm/div64.h>
53 #include "internal.h"
54 
55 /*
56  * Array of node states.
57  */
58 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
59 	[N_POSSIBLE] = NODE_MASK_ALL,
60 	[N_ONLINE] = { { [0] = 1UL } },
61 #ifndef CONFIG_NUMA
62 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
63 #ifdef CONFIG_HIGHMEM
64 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
65 #endif
66 	[N_CPU] = { { [0] = 1UL } },
67 #endif	/* NUMA */
68 };
69 EXPORT_SYMBOL(node_states);
70 
71 unsigned long totalram_pages __read_mostly;
72 unsigned long totalreserve_pages __read_mostly;
73 unsigned long highest_memmap_pfn __read_mostly;
74 int percpu_pagelist_fraction;
75 
76 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
77 int pageblock_order __read_mostly;
78 #endif
79 
80 static void __free_pages_ok(struct page *page, unsigned int order);
81 
82 /*
83  * results with 256, 32 in the lowmem_reserve sysctl:
84  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
85  *	1G machine -> (16M dma, 784M normal, 224M high)
86  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
87  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
88  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
89  *
90  * TBD: should special case ZONE_DMA32 machines here - in those we normally
91  * don't need any ZONE_NORMAL reservation
92  */
93 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
94 #ifdef CONFIG_ZONE_DMA
95 	 256,
96 #endif
97 #ifdef CONFIG_ZONE_DMA32
98 	 256,
99 #endif
100 #ifdef CONFIG_HIGHMEM
101 	 32,
102 #endif
103 	 32,
104 };
105 
106 EXPORT_SYMBOL(totalram_pages);
107 
108 static char * const zone_names[MAX_NR_ZONES] = {
109 #ifdef CONFIG_ZONE_DMA
110 	 "DMA",
111 #endif
112 #ifdef CONFIG_ZONE_DMA32
113 	 "DMA32",
114 #endif
115 	 "Normal",
116 #ifdef CONFIG_HIGHMEM
117 	 "HighMem",
118 #endif
119 	 "Movable",
120 };
121 
122 int min_free_kbytes = 1024;
123 
124 unsigned long __meminitdata nr_kernel_pages;
125 unsigned long __meminitdata nr_all_pages;
126 static unsigned long __meminitdata dma_reserve;
127 
128 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
129   /*
130    * MAX_ACTIVE_REGIONS determines the maximum number of distinct
131    * ranges of memory (RAM) that may be registered with add_active_range().
132    * Ranges passed to add_active_range() will be merged if possible
133    * so the number of times add_active_range() can be called is
134    * related to the number of nodes and the number of holes
135    */
136   #ifdef CONFIG_MAX_ACTIVE_REGIONS
137     /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
138     #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
139   #else
140     #if MAX_NUMNODES >= 32
141       /* If there can be many nodes, allow up to 50 holes per node */
142       #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
143     #else
144       /* By default, allow up to 256 distinct regions */
145       #define MAX_ACTIVE_REGIONS 256
146     #endif
147   #endif
148 
149   static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
150   static int __meminitdata nr_nodemap_entries;
151   static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
152   static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
153   static unsigned long __initdata required_kernelcore;
154   static unsigned long __initdata required_movablecore;
155   static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
156 
157   /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
158   int movable_zone;
159   EXPORT_SYMBOL(movable_zone);
160 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
161 
162 #if MAX_NUMNODES > 1
163 int nr_node_ids __read_mostly = MAX_NUMNODES;
164 EXPORT_SYMBOL(nr_node_ids);
165 #endif
166 
167 int page_group_by_mobility_disabled __read_mostly;
168 
169 static void set_pageblock_migratetype(struct page *page, int migratetype)
170 {
171 	set_pageblock_flags_group(page, (unsigned long)migratetype,
172 					PB_migrate, PB_migrate_end);
173 }
174 
175 #ifdef CONFIG_DEBUG_VM
176 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
177 {
178 	int ret = 0;
179 	unsigned seq;
180 	unsigned long pfn = page_to_pfn(page);
181 
182 	do {
183 		seq = zone_span_seqbegin(zone);
184 		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
185 			ret = 1;
186 		else if (pfn < zone->zone_start_pfn)
187 			ret = 1;
188 	} while (zone_span_seqretry(zone, seq));
189 
190 	return ret;
191 }
192 
193 static int page_is_consistent(struct zone *zone, struct page *page)
194 {
195 	if (!pfn_valid_within(page_to_pfn(page)))
196 		return 0;
197 	if (zone != page_zone(page))
198 		return 0;
199 
200 	return 1;
201 }
202 /*
203  * Temporary debugging check for pages not lying within a given zone.
204  */
205 static int bad_range(struct zone *zone, struct page *page)
206 {
207 	if (page_outside_zone_boundaries(zone, page))
208 		return 1;
209 	if (!page_is_consistent(zone, page))
210 		return 1;
211 
212 	return 0;
213 }
214 #else
215 static inline int bad_range(struct zone *zone, struct page *page)
216 {
217 	return 0;
218 }
219 #endif
220 
221 static void bad_page(struct page *page)
222 {
223 	static unsigned long resume;
224 	static unsigned long nr_shown;
225 	static unsigned long nr_unshown;
226 
227 	/*
228 	 * Allow a burst of 60 reports, then keep quiet for that minute;
229 	 * or allow a steady drip of one report per second.
230 	 */
231 	if (nr_shown == 60) {
232 		if (time_before(jiffies, resume)) {
233 			nr_unshown++;
234 			goto out;
235 		}
236 		if (nr_unshown) {
237 			printk(KERN_ALERT
238 			      "BUG: Bad page state: %lu messages suppressed\n",
239 				nr_unshown);
240 			nr_unshown = 0;
241 		}
242 		nr_shown = 0;
243 	}
244 	if (nr_shown++ == 0)
245 		resume = jiffies + 60 * HZ;
246 
247 	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
248 		current->comm, page_to_pfn(page));
249 	printk(KERN_ALERT
250 		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
251 		page, (void *)page->flags, page_count(page),
252 		page_mapcount(page), page->mapping, page->index);
253 
254 	dump_stack();
255 out:
256 	/* Leave bad fields for debug, except PageBuddy could make trouble */
257 	__ClearPageBuddy(page);
258 	add_taint(TAINT_BAD_PAGE);
259 }
260 
261 /*
262  * Higher-order pages are called "compound pages".  They are structured thusly:
263  *
264  * The first PAGE_SIZE page is called the "head page".
265  *
266  * The remaining PAGE_SIZE pages are called "tail pages".
267  *
268  * All pages have PG_compound set.  All pages have their ->private pointing at
269  * the head page (even the head page has this).
270  *
271  * The first tail page's ->lru.next holds the address of the compound page's
272  * put_page() function.  Its ->lru.prev holds the order of allocation.
273  * This usage means that zero-order pages may not be compound.
274  */
275 
276 static void free_compound_page(struct page *page)
277 {
278 	__free_pages_ok(page, compound_order(page));
279 }
280 
281 void prep_compound_page(struct page *page, unsigned long order)
282 {
283 	int i;
284 	int nr_pages = 1 << order;
285 
286 	set_compound_page_dtor(page, free_compound_page);
287 	set_compound_order(page, order);
288 	__SetPageHead(page);
289 	for (i = 1; i < nr_pages; i++) {
290 		struct page *p = page + i;
291 
292 		__SetPageTail(p);
293 		p->first_page = page;
294 	}
295 }
296 
297 #ifdef CONFIG_HUGETLBFS
298 void prep_compound_gigantic_page(struct page *page, unsigned long order)
299 {
300 	int i;
301 	int nr_pages = 1 << order;
302 	struct page *p = page + 1;
303 
304 	set_compound_page_dtor(page, free_compound_page);
305 	set_compound_order(page, order);
306 	__SetPageHead(page);
307 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
308 		__SetPageTail(p);
309 		p->first_page = page;
310 	}
311 }
312 #endif
313 
314 static int destroy_compound_page(struct page *page, unsigned long order)
315 {
316 	int i;
317 	int nr_pages = 1 << order;
318 	int bad = 0;
319 
320 	if (unlikely(compound_order(page) != order) ||
321 	    unlikely(!PageHead(page))) {
322 		bad_page(page);
323 		bad++;
324 	}
325 
326 	__ClearPageHead(page);
327 
328 	for (i = 1; i < nr_pages; i++) {
329 		struct page *p = page + i;
330 
331 		if (unlikely(!PageTail(p) || (p->first_page != page))) {
332 			bad_page(page);
333 			bad++;
334 		}
335 		__ClearPageTail(p);
336 	}
337 
338 	return bad;
339 }
340 
341 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
342 {
343 	int i;
344 
345 	/*
346 	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
347 	 * and __GFP_HIGHMEM from hard or soft interrupt context.
348 	 */
349 	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
350 	for (i = 0; i < (1 << order); i++)
351 		clear_highpage(page + i);
352 }
353 
354 static inline void set_page_order(struct page *page, int order)
355 {
356 	set_page_private(page, order);
357 	__SetPageBuddy(page);
358 }
359 
360 static inline void rmv_page_order(struct page *page)
361 {
362 	__ClearPageBuddy(page);
363 	set_page_private(page, 0);
364 }
365 
366 /*
367  * Locate the struct page for both the matching buddy in our
368  * pair (buddy1) and the combined O(n+1) page they form (page).
369  *
370  * 1) Any buddy B1 will have an order O twin B2 which satisfies
371  * the following equation:
372  *     B2 = B1 ^ (1 << O)
373  * For example, if the starting buddy (buddy2) is #8 its order
374  * 1 buddy is #10:
375  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
376  *
377  * 2) Any buddy B will have an order O+1 parent P which
378  * satisfies the following equation:
379  *     P = B & ~(1 << O)
380  *
381  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
382  */
383 static inline struct page *
384 __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
385 {
386 	unsigned long buddy_idx = page_idx ^ (1 << order);
387 
388 	return page + (buddy_idx - page_idx);
389 }
390 
391 static inline unsigned long
392 __find_combined_index(unsigned long page_idx, unsigned int order)
393 {
394 	return (page_idx & ~(1 << order));
395 }
396 
397 /*
398  * This function checks whether a page is free && is the buddy
399  * we can do coalesce a page and its buddy if
400  * (a) the buddy is not in a hole &&
401  * (b) the buddy is in the buddy system &&
402  * (c) a page and its buddy have the same order &&
403  * (d) a page and its buddy are in the same zone.
404  *
405  * For recording whether a page is in the buddy system, we use PG_buddy.
406  * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
407  *
408  * For recording page's order, we use page_private(page).
409  */
410 static inline int page_is_buddy(struct page *page, struct page *buddy,
411 								int order)
412 {
413 	if (!pfn_valid_within(page_to_pfn(buddy)))
414 		return 0;
415 
416 	if (page_zone_id(page) != page_zone_id(buddy))
417 		return 0;
418 
419 	if (PageBuddy(buddy) && page_order(buddy) == order) {
420 		BUG_ON(page_count(buddy) != 0);
421 		return 1;
422 	}
423 	return 0;
424 }
425 
426 /*
427  * Freeing function for a buddy system allocator.
428  *
429  * The concept of a buddy system is to maintain direct-mapped table
430  * (containing bit values) for memory blocks of various "orders".
431  * The bottom level table contains the map for the smallest allocatable
432  * units of memory (here, pages), and each level above it describes
433  * pairs of units from the levels below, hence, "buddies".
434  * At a high level, all that happens here is marking the table entry
435  * at the bottom level available, and propagating the changes upward
436  * as necessary, plus some accounting needed to play nicely with other
437  * parts of the VM system.
438  * At each level, we keep a list of pages, which are heads of continuous
439  * free pages of length of (1 << order) and marked with PG_buddy. Page's
440  * order is recorded in page_private(page) field.
441  * So when we are allocating or freeing one, we can derive the state of the
442  * other.  That is, if we allocate a small block, and both were
443  * free, the remainder of the region must be split into blocks.
444  * If a block is freed, and its buddy is also free, then this
445  * triggers coalescing into a block of larger size.
446  *
447  * -- wli
448  */
449 
450 static inline void __free_one_page(struct page *page,
451 		struct zone *zone, unsigned int order)
452 {
453 	unsigned long page_idx;
454 	int order_size = 1 << order;
455 	int migratetype = get_pageblock_migratetype(page);
456 
457 	if (unlikely(PageCompound(page)))
458 		if (unlikely(destroy_compound_page(page, order)))
459 			return;
460 
461 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
462 
463 	VM_BUG_ON(page_idx & (order_size - 1));
464 	VM_BUG_ON(bad_range(zone, page));
465 
466 	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
467 	while (order < MAX_ORDER-1) {
468 		unsigned long combined_idx;
469 		struct page *buddy;
470 
471 		buddy = __page_find_buddy(page, page_idx, order);
472 		if (!page_is_buddy(page, buddy, order))
473 			break;
474 
475 		/* Our buddy is free, merge with it and move up one order. */
476 		list_del(&buddy->lru);
477 		zone->free_area[order].nr_free--;
478 		rmv_page_order(buddy);
479 		combined_idx = __find_combined_index(page_idx, order);
480 		page = page + (combined_idx - page_idx);
481 		page_idx = combined_idx;
482 		order++;
483 	}
484 	set_page_order(page, order);
485 	list_add(&page->lru,
486 		&zone->free_area[order].free_list[migratetype]);
487 	zone->free_area[order].nr_free++;
488 }
489 
490 static inline int free_pages_check(struct page *page)
491 {
492 	free_page_mlock(page);
493 	if (unlikely(page_mapcount(page) |
494 		(page->mapping != NULL)  |
495 		(page_count(page) != 0)  |
496 		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
497 		bad_page(page);
498 		return 1;
499 	}
500 	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
501 		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
502 	return 0;
503 }
504 
505 /*
506  * Frees a list of pages.
507  * Assumes all pages on list are in same zone, and of same order.
508  * count is the number of pages to free.
509  *
510  * If the zone was previously in an "all pages pinned" state then look to
511  * see if this freeing clears that state.
512  *
513  * And clear the zone's pages_scanned counter, to hold off the "all pages are
514  * pinned" detection logic.
515  */
516 static void free_pages_bulk(struct zone *zone, int count,
517 					struct list_head *list, int order)
518 {
519 	spin_lock(&zone->lock);
520 	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
521 	zone->pages_scanned = 0;
522 	while (count--) {
523 		struct page *page;
524 
525 		VM_BUG_ON(list_empty(list));
526 		page = list_entry(list->prev, struct page, lru);
527 		/* have to delete it as __free_one_page list manipulates */
528 		list_del(&page->lru);
529 		__free_one_page(page, zone, order);
530 	}
531 	spin_unlock(&zone->lock);
532 }
533 
534 static void free_one_page(struct zone *zone, struct page *page, int order)
535 {
536 	spin_lock(&zone->lock);
537 	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
538 	zone->pages_scanned = 0;
539 	__free_one_page(page, zone, order);
540 	spin_unlock(&zone->lock);
541 }
542 
543 static void __free_pages_ok(struct page *page, unsigned int order)
544 {
545 	unsigned long flags;
546 	int i;
547 	int bad = 0;
548 
549 	for (i = 0 ; i < (1 << order) ; ++i)
550 		bad += free_pages_check(page + i);
551 	if (bad)
552 		return;
553 
554 	if (!PageHighMem(page)) {
555 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
556 		debug_check_no_obj_freed(page_address(page),
557 					   PAGE_SIZE << order);
558 	}
559 	arch_free_page(page, order);
560 	kernel_map_pages(page, 1 << order, 0);
561 
562 	local_irq_save(flags);
563 	__count_vm_events(PGFREE, 1 << order);
564 	free_one_page(page_zone(page), page, order);
565 	local_irq_restore(flags);
566 }
567 
568 /*
569  * permit the bootmem allocator to evade page validation on high-order frees
570  */
571 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
572 {
573 	if (order == 0) {
574 		__ClearPageReserved(page);
575 		set_page_count(page, 0);
576 		set_page_refcounted(page);
577 		__free_page(page);
578 	} else {
579 		int loop;
580 
581 		prefetchw(page);
582 		for (loop = 0; loop < BITS_PER_LONG; loop++) {
583 			struct page *p = &page[loop];
584 
585 			if (loop + 1 < BITS_PER_LONG)
586 				prefetchw(p + 1);
587 			__ClearPageReserved(p);
588 			set_page_count(p, 0);
589 		}
590 
591 		set_page_refcounted(page);
592 		__free_pages(page, order);
593 	}
594 }
595 
596 
597 /*
598  * The order of subdivision here is critical for the IO subsystem.
599  * Please do not alter this order without good reasons and regression
600  * testing. Specifically, as large blocks of memory are subdivided,
601  * the order in which smaller blocks are delivered depends on the order
602  * they're subdivided in this function. This is the primary factor
603  * influencing the order in which pages are delivered to the IO
604  * subsystem according to empirical testing, and this is also justified
605  * by considering the behavior of a buddy system containing a single
606  * large block of memory acted on by a series of small allocations.
607  * This behavior is a critical factor in sglist merging's success.
608  *
609  * -- wli
610  */
611 static inline void expand(struct zone *zone, struct page *page,
612 	int low, int high, struct free_area *area,
613 	int migratetype)
614 {
615 	unsigned long size = 1 << high;
616 
617 	while (high > low) {
618 		area--;
619 		high--;
620 		size >>= 1;
621 		VM_BUG_ON(bad_range(zone, &page[size]));
622 		list_add(&page[size].lru, &area->free_list[migratetype]);
623 		area->nr_free++;
624 		set_page_order(&page[size], high);
625 	}
626 }
627 
628 /*
629  * This page is about to be returned from the page allocator
630  */
631 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
632 {
633 	if (unlikely(page_mapcount(page) |
634 		(page->mapping != NULL)  |
635 		(page_count(page) != 0)  |
636 		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
637 		bad_page(page);
638 		return 1;
639 	}
640 
641 	set_page_private(page, 0);
642 	set_page_refcounted(page);
643 
644 	arch_alloc_page(page, order);
645 	kernel_map_pages(page, 1 << order, 1);
646 
647 	if (gfp_flags & __GFP_ZERO)
648 		prep_zero_page(page, order, gfp_flags);
649 
650 	if (order && (gfp_flags & __GFP_COMP))
651 		prep_compound_page(page, order);
652 
653 	return 0;
654 }
655 
656 /*
657  * Go through the free lists for the given migratetype and remove
658  * the smallest available page from the freelists
659  */
660 static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
661 						int migratetype)
662 {
663 	unsigned int current_order;
664 	struct free_area * area;
665 	struct page *page;
666 
667 	/* Find a page of the appropriate size in the preferred list */
668 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
669 		area = &(zone->free_area[current_order]);
670 		if (list_empty(&area->free_list[migratetype]))
671 			continue;
672 
673 		page = list_entry(area->free_list[migratetype].next,
674 							struct page, lru);
675 		list_del(&page->lru);
676 		rmv_page_order(page);
677 		area->nr_free--;
678 		__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
679 		expand(zone, page, order, current_order, area, migratetype);
680 		return page;
681 	}
682 
683 	return NULL;
684 }
685 
686 
687 /*
688  * This array describes the order lists are fallen back to when
689  * the free lists for the desirable migrate type are depleted
690  */
691 static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
692 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
693 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
694 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
695 	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
696 };
697 
698 /*
699  * Move the free pages in a range to the free lists of the requested type.
700  * Note that start_page and end_pages are not aligned on a pageblock
701  * boundary. If alignment is required, use move_freepages_block()
702  */
703 static int move_freepages(struct zone *zone,
704 			  struct page *start_page, struct page *end_page,
705 			  int migratetype)
706 {
707 	struct page *page;
708 	unsigned long order;
709 	int pages_moved = 0;
710 
711 #ifndef CONFIG_HOLES_IN_ZONE
712 	/*
713 	 * page_zone is not safe to call in this context when
714 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
715 	 * anyway as we check zone boundaries in move_freepages_block().
716 	 * Remove at a later date when no bug reports exist related to
717 	 * grouping pages by mobility
718 	 */
719 	BUG_ON(page_zone(start_page) != page_zone(end_page));
720 #endif
721 
722 	for (page = start_page; page <= end_page;) {
723 		/* Make sure we are not inadvertently changing nodes */
724 		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
725 
726 		if (!pfn_valid_within(page_to_pfn(page))) {
727 			page++;
728 			continue;
729 		}
730 
731 		if (!PageBuddy(page)) {
732 			page++;
733 			continue;
734 		}
735 
736 		order = page_order(page);
737 		list_del(&page->lru);
738 		list_add(&page->lru,
739 			&zone->free_area[order].free_list[migratetype]);
740 		page += 1 << order;
741 		pages_moved += 1 << order;
742 	}
743 
744 	return pages_moved;
745 }
746 
747 static int move_freepages_block(struct zone *zone, struct page *page,
748 				int migratetype)
749 {
750 	unsigned long start_pfn, end_pfn;
751 	struct page *start_page, *end_page;
752 
753 	start_pfn = page_to_pfn(page);
754 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
755 	start_page = pfn_to_page(start_pfn);
756 	end_page = start_page + pageblock_nr_pages - 1;
757 	end_pfn = start_pfn + pageblock_nr_pages - 1;
758 
759 	/* Do not cross zone boundaries */
760 	if (start_pfn < zone->zone_start_pfn)
761 		start_page = page;
762 	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
763 		return 0;
764 
765 	return move_freepages(zone, start_page, end_page, migratetype);
766 }
767 
768 /* Remove an element from the buddy allocator from the fallback list */
769 static struct page *__rmqueue_fallback(struct zone *zone, int order,
770 						int start_migratetype)
771 {
772 	struct free_area * area;
773 	int current_order;
774 	struct page *page;
775 	int migratetype, i;
776 
777 	/* Find the largest possible block of pages in the other list */
778 	for (current_order = MAX_ORDER-1; current_order >= order;
779 						--current_order) {
780 		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
781 			migratetype = fallbacks[start_migratetype][i];
782 
783 			/* MIGRATE_RESERVE handled later if necessary */
784 			if (migratetype == MIGRATE_RESERVE)
785 				continue;
786 
787 			area = &(zone->free_area[current_order]);
788 			if (list_empty(&area->free_list[migratetype]))
789 				continue;
790 
791 			page = list_entry(area->free_list[migratetype].next,
792 					struct page, lru);
793 			area->nr_free--;
794 
795 			/*
796 			 * If breaking a large block of pages, move all free
797 			 * pages to the preferred allocation list. If falling
798 			 * back for a reclaimable kernel allocation, be more
799 			 * agressive about taking ownership of free pages
800 			 */
801 			if (unlikely(current_order >= (pageblock_order >> 1)) ||
802 					start_migratetype == MIGRATE_RECLAIMABLE) {
803 				unsigned long pages;
804 				pages = move_freepages_block(zone, page,
805 								start_migratetype);
806 
807 				/* Claim the whole block if over half of it is free */
808 				if (pages >= (1 << (pageblock_order-1)))
809 					set_pageblock_migratetype(page,
810 								start_migratetype);
811 
812 				migratetype = start_migratetype;
813 			}
814 
815 			/* Remove the page from the freelists */
816 			list_del(&page->lru);
817 			rmv_page_order(page);
818 			__mod_zone_page_state(zone, NR_FREE_PAGES,
819 							-(1UL << order));
820 
821 			if (current_order == pageblock_order)
822 				set_pageblock_migratetype(page,
823 							start_migratetype);
824 
825 			expand(zone, page, order, current_order, area, migratetype);
826 			return page;
827 		}
828 	}
829 
830 	/* Use MIGRATE_RESERVE rather than fail an allocation */
831 	return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
832 }
833 
834 /*
835  * Do the hard work of removing an element from the buddy allocator.
836  * Call me with the zone->lock already held.
837  */
838 static struct page *__rmqueue(struct zone *zone, unsigned int order,
839 						int migratetype)
840 {
841 	struct page *page;
842 
843 	page = __rmqueue_smallest(zone, order, migratetype);
844 
845 	if (unlikely(!page))
846 		page = __rmqueue_fallback(zone, order, migratetype);
847 
848 	return page;
849 }
850 
851 /*
852  * Obtain a specified number of elements from the buddy allocator, all under
853  * a single hold of the lock, for efficiency.  Add them to the supplied list.
854  * Returns the number of new pages which were placed at *list.
855  */
856 static int rmqueue_bulk(struct zone *zone, unsigned int order,
857 			unsigned long count, struct list_head *list,
858 			int migratetype)
859 {
860 	int i;
861 
862 	spin_lock(&zone->lock);
863 	for (i = 0; i < count; ++i) {
864 		struct page *page = __rmqueue(zone, order, migratetype);
865 		if (unlikely(page == NULL))
866 			break;
867 
868 		/*
869 		 * Split buddy pages returned by expand() are received here
870 		 * in physical page order. The page is added to the callers and
871 		 * list and the list head then moves forward. From the callers
872 		 * perspective, the linked list is ordered by page number in
873 		 * some conditions. This is useful for IO devices that can
874 		 * merge IO requests if the physical pages are ordered
875 		 * properly.
876 		 */
877 		list_add(&page->lru, list);
878 		set_page_private(page, migratetype);
879 		list = &page->lru;
880 	}
881 	spin_unlock(&zone->lock);
882 	return i;
883 }
884 
885 #ifdef CONFIG_NUMA
886 /*
887  * Called from the vmstat counter updater to drain pagesets of this
888  * currently executing processor on remote nodes after they have
889  * expired.
890  *
891  * Note that this function must be called with the thread pinned to
892  * a single processor.
893  */
894 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
895 {
896 	unsigned long flags;
897 	int to_drain;
898 
899 	local_irq_save(flags);
900 	if (pcp->count >= pcp->batch)
901 		to_drain = pcp->batch;
902 	else
903 		to_drain = pcp->count;
904 	free_pages_bulk(zone, to_drain, &pcp->list, 0);
905 	pcp->count -= to_drain;
906 	local_irq_restore(flags);
907 }
908 #endif
909 
910 /*
911  * Drain pages of the indicated processor.
912  *
913  * The processor must either be the current processor and the
914  * thread pinned to the current processor or a processor that
915  * is not online.
916  */
917 static void drain_pages(unsigned int cpu)
918 {
919 	unsigned long flags;
920 	struct zone *zone;
921 
922 	for_each_populated_zone(zone) {
923 		struct per_cpu_pageset *pset;
924 		struct per_cpu_pages *pcp;
925 
926 		pset = zone_pcp(zone, cpu);
927 
928 		pcp = &pset->pcp;
929 		local_irq_save(flags);
930 		free_pages_bulk(zone, pcp->count, &pcp->list, 0);
931 		pcp->count = 0;
932 		local_irq_restore(flags);
933 	}
934 }
935 
936 /*
937  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
938  */
939 void drain_local_pages(void *arg)
940 {
941 	drain_pages(smp_processor_id());
942 }
943 
944 /*
945  * Spill all the per-cpu pages from all CPUs back into the buddy allocator
946  */
947 void drain_all_pages(void)
948 {
949 	on_each_cpu(drain_local_pages, NULL, 1);
950 }
951 
952 #ifdef CONFIG_HIBERNATION
953 
954 void mark_free_pages(struct zone *zone)
955 {
956 	unsigned long pfn, max_zone_pfn;
957 	unsigned long flags;
958 	int order, t;
959 	struct list_head *curr;
960 
961 	if (!zone->spanned_pages)
962 		return;
963 
964 	spin_lock_irqsave(&zone->lock, flags);
965 
966 	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
967 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
968 		if (pfn_valid(pfn)) {
969 			struct page *page = pfn_to_page(pfn);
970 
971 			if (!swsusp_page_is_forbidden(page))
972 				swsusp_unset_page_free(page);
973 		}
974 
975 	for_each_migratetype_order(order, t) {
976 		list_for_each(curr, &zone->free_area[order].free_list[t]) {
977 			unsigned long i;
978 
979 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
980 			for (i = 0; i < (1UL << order); i++)
981 				swsusp_set_page_free(pfn_to_page(pfn + i));
982 		}
983 	}
984 	spin_unlock_irqrestore(&zone->lock, flags);
985 }
986 #endif /* CONFIG_PM */
987 
988 /*
989  * Free a 0-order page
990  */
991 static void free_hot_cold_page(struct page *page, int cold)
992 {
993 	struct zone *zone = page_zone(page);
994 	struct per_cpu_pages *pcp;
995 	unsigned long flags;
996 
997 	if (PageAnon(page))
998 		page->mapping = NULL;
999 	if (free_pages_check(page))
1000 		return;
1001 
1002 	if (!PageHighMem(page)) {
1003 		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1004 		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1005 	}
1006 	arch_free_page(page, 0);
1007 	kernel_map_pages(page, 1, 0);
1008 
1009 	pcp = &zone_pcp(zone, get_cpu())->pcp;
1010 	local_irq_save(flags);
1011 	__count_vm_event(PGFREE);
1012 	if (cold)
1013 		list_add_tail(&page->lru, &pcp->list);
1014 	else
1015 		list_add(&page->lru, &pcp->list);
1016 	set_page_private(page, get_pageblock_migratetype(page));
1017 	pcp->count++;
1018 	if (pcp->count >= pcp->high) {
1019 		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1020 		pcp->count -= pcp->batch;
1021 	}
1022 	local_irq_restore(flags);
1023 	put_cpu();
1024 }
1025 
1026 void free_hot_page(struct page *page)
1027 {
1028 	free_hot_cold_page(page, 0);
1029 }
1030 
1031 void free_cold_page(struct page *page)
1032 {
1033 	free_hot_cold_page(page, 1);
1034 }
1035 
1036 /*
1037  * split_page takes a non-compound higher-order page, and splits it into
1038  * n (1<<order) sub-pages: page[0..n]
1039  * Each sub-page must be freed individually.
1040  *
1041  * Note: this is probably too low level an operation for use in drivers.
1042  * Please consult with lkml before using this in your driver.
1043  */
1044 void split_page(struct page *page, unsigned int order)
1045 {
1046 	int i;
1047 
1048 	VM_BUG_ON(PageCompound(page));
1049 	VM_BUG_ON(!page_count(page));
1050 	for (i = 1; i < (1 << order); i++)
1051 		set_page_refcounted(page + i);
1052 }
1053 
1054 /*
1055  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1056  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1057  * or two.
1058  */
1059 static struct page *buffered_rmqueue(struct zone *preferred_zone,
1060 			struct zone *zone, int order, gfp_t gfp_flags)
1061 {
1062 	unsigned long flags;
1063 	struct page *page;
1064 	int cold = !!(gfp_flags & __GFP_COLD);
1065 	int cpu;
1066 	int migratetype = allocflags_to_migratetype(gfp_flags);
1067 
1068 again:
1069 	cpu  = get_cpu();
1070 	if (likely(order == 0)) {
1071 		struct per_cpu_pages *pcp;
1072 
1073 		pcp = &zone_pcp(zone, cpu)->pcp;
1074 		local_irq_save(flags);
1075 		if (!pcp->count) {
1076 			pcp->count = rmqueue_bulk(zone, 0,
1077 					pcp->batch, &pcp->list, migratetype);
1078 			if (unlikely(!pcp->count))
1079 				goto failed;
1080 		}
1081 
1082 		/* Find a page of the appropriate migrate type */
1083 		if (cold) {
1084 			list_for_each_entry_reverse(page, &pcp->list, lru)
1085 				if (page_private(page) == migratetype)
1086 					break;
1087 		} else {
1088 			list_for_each_entry(page, &pcp->list, lru)
1089 				if (page_private(page) == migratetype)
1090 					break;
1091 		}
1092 
1093 		/* Allocate more to the pcp list if necessary */
1094 		if (unlikely(&page->lru == &pcp->list)) {
1095 			pcp->count += rmqueue_bulk(zone, 0,
1096 					pcp->batch, &pcp->list, migratetype);
1097 			page = list_entry(pcp->list.next, struct page, lru);
1098 		}
1099 
1100 		list_del(&page->lru);
1101 		pcp->count--;
1102 	} else {
1103 		spin_lock_irqsave(&zone->lock, flags);
1104 		page = __rmqueue(zone, order, migratetype);
1105 		spin_unlock(&zone->lock);
1106 		if (!page)
1107 			goto failed;
1108 	}
1109 
1110 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1111 	zone_statistics(preferred_zone, zone);
1112 	local_irq_restore(flags);
1113 	put_cpu();
1114 
1115 	VM_BUG_ON(bad_range(zone, page));
1116 	if (prep_new_page(page, order, gfp_flags))
1117 		goto again;
1118 	return page;
1119 
1120 failed:
1121 	local_irq_restore(flags);
1122 	put_cpu();
1123 	return NULL;
1124 }
1125 
1126 #define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
1127 #define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
1128 #define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
1129 #define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
1130 #define ALLOC_HARDER		0x10 /* try to alloc harder */
1131 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1132 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1133 
1134 #ifdef CONFIG_FAIL_PAGE_ALLOC
1135 
1136 static struct fail_page_alloc_attr {
1137 	struct fault_attr attr;
1138 
1139 	u32 ignore_gfp_highmem;
1140 	u32 ignore_gfp_wait;
1141 	u32 min_order;
1142 
1143 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1144 
1145 	struct dentry *ignore_gfp_highmem_file;
1146 	struct dentry *ignore_gfp_wait_file;
1147 	struct dentry *min_order_file;
1148 
1149 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1150 
1151 } fail_page_alloc = {
1152 	.attr = FAULT_ATTR_INITIALIZER,
1153 	.ignore_gfp_wait = 1,
1154 	.ignore_gfp_highmem = 1,
1155 	.min_order = 1,
1156 };
1157 
1158 static int __init setup_fail_page_alloc(char *str)
1159 {
1160 	return setup_fault_attr(&fail_page_alloc.attr, str);
1161 }
1162 __setup("fail_page_alloc=", setup_fail_page_alloc);
1163 
1164 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1165 {
1166 	if (order < fail_page_alloc.min_order)
1167 		return 0;
1168 	if (gfp_mask & __GFP_NOFAIL)
1169 		return 0;
1170 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1171 		return 0;
1172 	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1173 		return 0;
1174 
1175 	return should_fail(&fail_page_alloc.attr, 1 << order);
1176 }
1177 
1178 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1179 
1180 static int __init fail_page_alloc_debugfs(void)
1181 {
1182 	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1183 	struct dentry *dir;
1184 	int err;
1185 
1186 	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1187 				       "fail_page_alloc");
1188 	if (err)
1189 		return err;
1190 	dir = fail_page_alloc.attr.dentries.dir;
1191 
1192 	fail_page_alloc.ignore_gfp_wait_file =
1193 		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1194 				      &fail_page_alloc.ignore_gfp_wait);
1195 
1196 	fail_page_alloc.ignore_gfp_highmem_file =
1197 		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1198 				      &fail_page_alloc.ignore_gfp_highmem);
1199 	fail_page_alloc.min_order_file =
1200 		debugfs_create_u32("min-order", mode, dir,
1201 				   &fail_page_alloc.min_order);
1202 
1203 	if (!fail_page_alloc.ignore_gfp_wait_file ||
1204             !fail_page_alloc.ignore_gfp_highmem_file ||
1205             !fail_page_alloc.min_order_file) {
1206 		err = -ENOMEM;
1207 		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1208 		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1209 		debugfs_remove(fail_page_alloc.min_order_file);
1210 		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1211 	}
1212 
1213 	return err;
1214 }
1215 
1216 late_initcall(fail_page_alloc_debugfs);
1217 
1218 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1219 
1220 #else /* CONFIG_FAIL_PAGE_ALLOC */
1221 
1222 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1223 {
1224 	return 0;
1225 }
1226 
1227 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1228 
1229 /*
1230  * Return 1 if free pages are above 'mark'. This takes into account the order
1231  * of the allocation.
1232  */
1233 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1234 		      int classzone_idx, int alloc_flags)
1235 {
1236 	/* free_pages my go negative - that's OK */
1237 	long min = mark;
1238 	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1239 	int o;
1240 
1241 	if (alloc_flags & ALLOC_HIGH)
1242 		min -= min / 2;
1243 	if (alloc_flags & ALLOC_HARDER)
1244 		min -= min / 4;
1245 
1246 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1247 		return 0;
1248 	for (o = 0; o < order; o++) {
1249 		/* At the next order, this order's pages become unavailable */
1250 		free_pages -= z->free_area[o].nr_free << o;
1251 
1252 		/* Require fewer higher order pages to be free */
1253 		min >>= 1;
1254 
1255 		if (free_pages <= min)
1256 			return 0;
1257 	}
1258 	return 1;
1259 }
1260 
1261 #ifdef CONFIG_NUMA
1262 /*
1263  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1264  * skip over zones that are not allowed by the cpuset, or that have
1265  * been recently (in last second) found to be nearly full.  See further
1266  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1267  * that have to skip over a lot of full or unallowed zones.
1268  *
1269  * If the zonelist cache is present in the passed in zonelist, then
1270  * returns a pointer to the allowed node mask (either the current
1271  * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1272  *
1273  * If the zonelist cache is not available for this zonelist, does
1274  * nothing and returns NULL.
1275  *
1276  * If the fullzones BITMAP in the zonelist cache is stale (more than
1277  * a second since last zap'd) then we zap it out (clear its bits.)
1278  *
1279  * We hold off even calling zlc_setup, until after we've checked the
1280  * first zone in the zonelist, on the theory that most allocations will
1281  * be satisfied from that first zone, so best to examine that zone as
1282  * quickly as we can.
1283  */
1284 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1285 {
1286 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1287 	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1288 
1289 	zlc = zonelist->zlcache_ptr;
1290 	if (!zlc)
1291 		return NULL;
1292 
1293 	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1294 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1295 		zlc->last_full_zap = jiffies;
1296 	}
1297 
1298 	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1299 					&cpuset_current_mems_allowed :
1300 					&node_states[N_HIGH_MEMORY];
1301 	return allowednodes;
1302 }
1303 
1304 /*
1305  * Given 'z' scanning a zonelist, run a couple of quick checks to see
1306  * if it is worth looking at further for free memory:
1307  *  1) Check that the zone isn't thought to be full (doesn't have its
1308  *     bit set in the zonelist_cache fullzones BITMAP).
1309  *  2) Check that the zones node (obtained from the zonelist_cache
1310  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1311  * Return true (non-zero) if zone is worth looking at further, or
1312  * else return false (zero) if it is not.
1313  *
1314  * This check -ignores- the distinction between various watermarks,
1315  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1316  * found to be full for any variation of these watermarks, it will
1317  * be considered full for up to one second by all requests, unless
1318  * we are so low on memory on all allowed nodes that we are forced
1319  * into the second scan of the zonelist.
1320  *
1321  * In the second scan we ignore this zonelist cache and exactly
1322  * apply the watermarks to all zones, even it is slower to do so.
1323  * We are low on memory in the second scan, and should leave no stone
1324  * unturned looking for a free page.
1325  */
1326 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1327 						nodemask_t *allowednodes)
1328 {
1329 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1330 	int i;				/* index of *z in zonelist zones */
1331 	int n;				/* node that zone *z is on */
1332 
1333 	zlc = zonelist->zlcache_ptr;
1334 	if (!zlc)
1335 		return 1;
1336 
1337 	i = z - zonelist->_zonerefs;
1338 	n = zlc->z_to_n[i];
1339 
1340 	/* This zone is worth trying if it is allowed but not full */
1341 	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1342 }
1343 
1344 /*
1345  * Given 'z' scanning a zonelist, set the corresponding bit in
1346  * zlc->fullzones, so that subsequent attempts to allocate a page
1347  * from that zone don't waste time re-examining it.
1348  */
1349 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1350 {
1351 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1352 	int i;				/* index of *z in zonelist zones */
1353 
1354 	zlc = zonelist->zlcache_ptr;
1355 	if (!zlc)
1356 		return;
1357 
1358 	i = z - zonelist->_zonerefs;
1359 
1360 	set_bit(i, zlc->fullzones);
1361 }
1362 
1363 #else	/* CONFIG_NUMA */
1364 
1365 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1366 {
1367 	return NULL;
1368 }
1369 
1370 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1371 				nodemask_t *allowednodes)
1372 {
1373 	return 1;
1374 }
1375 
1376 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1377 {
1378 }
1379 #endif	/* CONFIG_NUMA */
1380 
1381 /*
1382  * get_page_from_freelist goes through the zonelist trying to allocate
1383  * a page.
1384  */
1385 static struct page *
1386 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1387 		struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
1388 {
1389 	struct zoneref *z;
1390 	struct page *page = NULL;
1391 	int classzone_idx;
1392 	struct zone *zone, *preferred_zone;
1393 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1394 	int zlc_active = 0;		/* set if using zonelist_cache */
1395 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1396 
1397 	(void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
1398 							&preferred_zone);
1399 	if (!preferred_zone)
1400 		return NULL;
1401 
1402 	classzone_idx = zone_idx(preferred_zone);
1403 
1404 zonelist_scan:
1405 	/*
1406 	 * Scan zonelist, looking for a zone with enough free.
1407 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1408 	 */
1409 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1410 						high_zoneidx, nodemask) {
1411 		if (NUMA_BUILD && zlc_active &&
1412 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1413 				continue;
1414 		if ((alloc_flags & ALLOC_CPUSET) &&
1415 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1416 				goto try_next_zone;
1417 
1418 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1419 			unsigned long mark;
1420 			if (alloc_flags & ALLOC_WMARK_MIN)
1421 				mark = zone->pages_min;
1422 			else if (alloc_flags & ALLOC_WMARK_LOW)
1423 				mark = zone->pages_low;
1424 			else
1425 				mark = zone->pages_high;
1426 			if (!zone_watermark_ok(zone, order, mark,
1427 				    classzone_idx, alloc_flags)) {
1428 				if (!zone_reclaim_mode ||
1429 				    !zone_reclaim(zone, gfp_mask, order))
1430 					goto this_zone_full;
1431 			}
1432 		}
1433 
1434 		page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
1435 		if (page)
1436 			break;
1437 this_zone_full:
1438 		if (NUMA_BUILD)
1439 			zlc_mark_zone_full(zonelist, z);
1440 try_next_zone:
1441 		if (NUMA_BUILD && !did_zlc_setup) {
1442 			/* we do zlc_setup after the first zone is tried */
1443 			allowednodes = zlc_setup(zonelist, alloc_flags);
1444 			zlc_active = 1;
1445 			did_zlc_setup = 1;
1446 		}
1447 	}
1448 
1449 	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1450 		/* Disable zlc cache for second zonelist scan */
1451 		zlc_active = 0;
1452 		goto zonelist_scan;
1453 	}
1454 	return page;
1455 }
1456 
1457 /*
1458  * This is the 'heart' of the zoned buddy allocator.
1459  */
1460 struct page *
1461 __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1462 			struct zonelist *zonelist, nodemask_t *nodemask)
1463 {
1464 	const gfp_t wait = gfp_mask & __GFP_WAIT;
1465 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1466 	struct zoneref *z;
1467 	struct zone *zone;
1468 	struct page *page;
1469 	struct reclaim_state reclaim_state;
1470 	struct task_struct *p = current;
1471 	int do_retry;
1472 	int alloc_flags;
1473 	unsigned long did_some_progress;
1474 	unsigned long pages_reclaimed = 0;
1475 
1476 	lockdep_trace_alloc(gfp_mask);
1477 
1478 	might_sleep_if(wait);
1479 
1480 	if (should_fail_alloc_page(gfp_mask, order))
1481 		return NULL;
1482 
1483 restart:
1484 	z = zonelist->_zonerefs;  /* the list of zones suitable for gfp_mask */
1485 
1486 	if (unlikely(!z->zone)) {
1487 		/*
1488 		 * Happens if we have an empty zonelist as a result of
1489 		 * GFP_THISNODE being used on a memoryless node
1490 		 */
1491 		return NULL;
1492 	}
1493 
1494 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1495 			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
1496 	if (page)
1497 		goto got_pg;
1498 
1499 	/*
1500 	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1501 	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1502 	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1503 	 * using a larger set of nodes after it has established that the
1504 	 * allowed per node queues are empty and that nodes are
1505 	 * over allocated.
1506 	 */
1507 	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1508 		goto nopage;
1509 
1510 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1511 		wakeup_kswapd(zone, order);
1512 
1513 	/*
1514 	 * OK, we're below the kswapd watermark and have kicked background
1515 	 * reclaim. Now things get more complex, so set up alloc_flags according
1516 	 * to how we want to proceed.
1517 	 *
1518 	 * The caller may dip into page reserves a bit more if the caller
1519 	 * cannot run direct reclaim, or if the caller has realtime scheduling
1520 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1521 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1522 	 */
1523 	alloc_flags = ALLOC_WMARK_MIN;
1524 	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1525 		alloc_flags |= ALLOC_HARDER;
1526 	if (gfp_mask & __GFP_HIGH)
1527 		alloc_flags |= ALLOC_HIGH;
1528 	if (wait)
1529 		alloc_flags |= ALLOC_CPUSET;
1530 
1531 	/*
1532 	 * Go through the zonelist again. Let __GFP_HIGH and allocations
1533 	 * coming from realtime tasks go deeper into reserves.
1534 	 *
1535 	 * This is the last chance, in general, before the goto nopage.
1536 	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1537 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1538 	 */
1539 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1540 						high_zoneidx, alloc_flags);
1541 	if (page)
1542 		goto got_pg;
1543 
1544 	/* This allocation should allow future memory freeing. */
1545 
1546 rebalance:
1547 	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1548 			&& !in_interrupt()) {
1549 		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
1550 nofail_alloc:
1551 			/* go through the zonelist yet again, ignoring mins */
1552 			page = get_page_from_freelist(gfp_mask, nodemask, order,
1553 				zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
1554 			if (page)
1555 				goto got_pg;
1556 			if (gfp_mask & __GFP_NOFAIL) {
1557 				congestion_wait(WRITE, HZ/50);
1558 				goto nofail_alloc;
1559 			}
1560 		}
1561 		goto nopage;
1562 	}
1563 
1564 	/* Atomic allocations - we can't balance anything */
1565 	if (!wait)
1566 		goto nopage;
1567 
1568 	cond_resched();
1569 
1570 	/* We now go into synchronous reclaim */
1571 	cpuset_memory_pressure_bump();
1572 	/*
1573 	 * The task's cpuset might have expanded its set of allowable nodes
1574 	 */
1575 	cpuset_update_task_memory_state();
1576 	p->flags |= PF_MEMALLOC;
1577 
1578 	lockdep_set_current_reclaim_state(gfp_mask);
1579 	reclaim_state.reclaimed_slab = 0;
1580 	p->reclaim_state = &reclaim_state;
1581 
1582 	did_some_progress = try_to_free_pages(zonelist, order,
1583 						gfp_mask, nodemask);
1584 
1585 	p->reclaim_state = NULL;
1586 	lockdep_clear_current_reclaim_state();
1587 	p->flags &= ~PF_MEMALLOC;
1588 
1589 	cond_resched();
1590 
1591 	if (order != 0)
1592 		drain_all_pages();
1593 
1594 	if (likely(did_some_progress)) {
1595 		page = get_page_from_freelist(gfp_mask, nodemask, order,
1596 					zonelist, high_zoneidx, alloc_flags);
1597 		if (page)
1598 			goto got_pg;
1599 	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1600 		if (!try_set_zone_oom(zonelist, gfp_mask)) {
1601 			schedule_timeout_uninterruptible(1);
1602 			goto restart;
1603 		}
1604 
1605 		/*
1606 		 * Go through the zonelist yet one more time, keep
1607 		 * very high watermark here, this is only to catch
1608 		 * a parallel oom killing, we must fail if we're still
1609 		 * under heavy pressure.
1610 		 */
1611 		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1612 			order, zonelist, high_zoneidx,
1613 			ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1614 		if (page) {
1615 			clear_zonelist_oom(zonelist, gfp_mask);
1616 			goto got_pg;
1617 		}
1618 
1619 		/* The OOM killer will not help higher order allocs so fail */
1620 		if (order > PAGE_ALLOC_COSTLY_ORDER) {
1621 			clear_zonelist_oom(zonelist, gfp_mask);
1622 			goto nopage;
1623 		}
1624 
1625 		out_of_memory(zonelist, gfp_mask, order);
1626 		clear_zonelist_oom(zonelist, gfp_mask);
1627 		goto restart;
1628 	}
1629 
1630 	/*
1631 	 * Don't let big-order allocations loop unless the caller explicitly
1632 	 * requests that.  Wait for some write requests to complete then retry.
1633 	 *
1634 	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1635 	 * means __GFP_NOFAIL, but that may not be true in other
1636 	 * implementations.
1637 	 *
1638 	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1639 	 * specified, then we retry until we no longer reclaim any pages
1640 	 * (above), or we've reclaimed an order of pages at least as
1641 	 * large as the allocation's order. In both cases, if the
1642 	 * allocation still fails, we stop retrying.
1643 	 */
1644 	pages_reclaimed += did_some_progress;
1645 	do_retry = 0;
1646 	if (!(gfp_mask & __GFP_NORETRY)) {
1647 		if (order <= PAGE_ALLOC_COSTLY_ORDER) {
1648 			do_retry = 1;
1649 		} else {
1650 			if (gfp_mask & __GFP_REPEAT &&
1651 				pages_reclaimed < (1 << order))
1652 					do_retry = 1;
1653 		}
1654 		if (gfp_mask & __GFP_NOFAIL)
1655 			do_retry = 1;
1656 	}
1657 	if (do_retry) {
1658 		congestion_wait(WRITE, HZ/50);
1659 		goto rebalance;
1660 	}
1661 
1662 nopage:
1663 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1664 		printk(KERN_WARNING "%s: page allocation failure."
1665 			" order:%d, mode:0x%x\n",
1666 			p->comm, order, gfp_mask);
1667 		dump_stack();
1668 		show_mem();
1669 	}
1670 got_pg:
1671 	return page;
1672 }
1673 EXPORT_SYMBOL(__alloc_pages_internal);
1674 
1675 /*
1676  * Common helper functions.
1677  */
1678 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1679 {
1680 	struct page * page;
1681 	page = alloc_pages(gfp_mask, order);
1682 	if (!page)
1683 		return 0;
1684 	return (unsigned long) page_address(page);
1685 }
1686 
1687 EXPORT_SYMBOL(__get_free_pages);
1688 
1689 unsigned long get_zeroed_page(gfp_t gfp_mask)
1690 {
1691 	struct page * page;
1692 
1693 	/*
1694 	 * get_zeroed_page() returns a 32-bit address, which cannot represent
1695 	 * a highmem page
1696 	 */
1697 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1698 
1699 	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1700 	if (page)
1701 		return (unsigned long) page_address(page);
1702 	return 0;
1703 }
1704 
1705 EXPORT_SYMBOL(get_zeroed_page);
1706 
1707 void __pagevec_free(struct pagevec *pvec)
1708 {
1709 	int i = pagevec_count(pvec);
1710 
1711 	while (--i >= 0)
1712 		free_hot_cold_page(pvec->pages[i], pvec->cold);
1713 }
1714 
1715 void __free_pages(struct page *page, unsigned int order)
1716 {
1717 	if (put_page_testzero(page)) {
1718 		if (order == 0)
1719 			free_hot_page(page);
1720 		else
1721 			__free_pages_ok(page, order);
1722 	}
1723 }
1724 
1725 EXPORT_SYMBOL(__free_pages);
1726 
1727 void free_pages(unsigned long addr, unsigned int order)
1728 {
1729 	if (addr != 0) {
1730 		VM_BUG_ON(!virt_addr_valid((void *)addr));
1731 		__free_pages(virt_to_page((void *)addr), order);
1732 	}
1733 }
1734 
1735 EXPORT_SYMBOL(free_pages);
1736 
1737 /**
1738  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
1739  * @size: the number of bytes to allocate
1740  * @gfp_mask: GFP flags for the allocation
1741  *
1742  * This function is similar to alloc_pages(), except that it allocates the
1743  * minimum number of pages to satisfy the request.  alloc_pages() can only
1744  * allocate memory in power-of-two pages.
1745  *
1746  * This function is also limited by MAX_ORDER.
1747  *
1748  * Memory allocated by this function must be released by free_pages_exact().
1749  */
1750 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
1751 {
1752 	unsigned int order = get_order(size);
1753 	unsigned long addr;
1754 
1755 	addr = __get_free_pages(gfp_mask, order);
1756 	if (addr) {
1757 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
1758 		unsigned long used = addr + PAGE_ALIGN(size);
1759 
1760 		split_page(virt_to_page(addr), order);
1761 		while (used < alloc_end) {
1762 			free_page(used);
1763 			used += PAGE_SIZE;
1764 		}
1765 	}
1766 
1767 	return (void *)addr;
1768 }
1769 EXPORT_SYMBOL(alloc_pages_exact);
1770 
1771 /**
1772  * free_pages_exact - release memory allocated via alloc_pages_exact()
1773  * @virt: the value returned by alloc_pages_exact.
1774  * @size: size of allocation, same value as passed to alloc_pages_exact().
1775  *
1776  * Release the memory allocated by a previous call to alloc_pages_exact.
1777  */
1778 void free_pages_exact(void *virt, size_t size)
1779 {
1780 	unsigned long addr = (unsigned long)virt;
1781 	unsigned long end = addr + PAGE_ALIGN(size);
1782 
1783 	while (addr < end) {
1784 		free_page(addr);
1785 		addr += PAGE_SIZE;
1786 	}
1787 }
1788 EXPORT_SYMBOL(free_pages_exact);
1789 
1790 static unsigned int nr_free_zone_pages(int offset)
1791 {
1792 	struct zoneref *z;
1793 	struct zone *zone;
1794 
1795 	/* Just pick one node, since fallback list is circular */
1796 	unsigned int sum = 0;
1797 
1798 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1799 
1800 	for_each_zone_zonelist(zone, z, zonelist, offset) {
1801 		unsigned long size = zone->present_pages;
1802 		unsigned long high = zone->pages_high;
1803 		if (size > high)
1804 			sum += size - high;
1805 	}
1806 
1807 	return sum;
1808 }
1809 
1810 /*
1811  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1812  */
1813 unsigned int nr_free_buffer_pages(void)
1814 {
1815 	return nr_free_zone_pages(gfp_zone(GFP_USER));
1816 }
1817 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1818 
1819 /*
1820  * Amount of free RAM allocatable within all zones
1821  */
1822 unsigned int nr_free_pagecache_pages(void)
1823 {
1824 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1825 }
1826 
1827 static inline void show_node(struct zone *zone)
1828 {
1829 	if (NUMA_BUILD)
1830 		printk("Node %d ", zone_to_nid(zone));
1831 }
1832 
1833 void si_meminfo(struct sysinfo *val)
1834 {
1835 	val->totalram = totalram_pages;
1836 	val->sharedram = 0;
1837 	val->freeram = global_page_state(NR_FREE_PAGES);
1838 	val->bufferram = nr_blockdev_pages();
1839 	val->totalhigh = totalhigh_pages;
1840 	val->freehigh = nr_free_highpages();
1841 	val->mem_unit = PAGE_SIZE;
1842 }
1843 
1844 EXPORT_SYMBOL(si_meminfo);
1845 
1846 #ifdef CONFIG_NUMA
1847 void si_meminfo_node(struct sysinfo *val, int nid)
1848 {
1849 	pg_data_t *pgdat = NODE_DATA(nid);
1850 
1851 	val->totalram = pgdat->node_present_pages;
1852 	val->freeram = node_page_state(nid, NR_FREE_PAGES);
1853 #ifdef CONFIG_HIGHMEM
1854 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1855 	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1856 			NR_FREE_PAGES);
1857 #else
1858 	val->totalhigh = 0;
1859 	val->freehigh = 0;
1860 #endif
1861 	val->mem_unit = PAGE_SIZE;
1862 }
1863 #endif
1864 
1865 #define K(x) ((x) << (PAGE_SHIFT-10))
1866 
1867 /*
1868  * Show free area list (used inside shift_scroll-lock stuff)
1869  * We also calculate the percentage fragmentation. We do this by counting the
1870  * memory on each free list with the exception of the first item on the list.
1871  */
1872 void show_free_areas(void)
1873 {
1874 	int cpu;
1875 	struct zone *zone;
1876 
1877 	for_each_populated_zone(zone) {
1878 		show_node(zone);
1879 		printk("%s per-cpu:\n", zone->name);
1880 
1881 		for_each_online_cpu(cpu) {
1882 			struct per_cpu_pageset *pageset;
1883 
1884 			pageset = zone_pcp(zone, cpu);
1885 
1886 			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1887 			       cpu, pageset->pcp.high,
1888 			       pageset->pcp.batch, pageset->pcp.count);
1889 		}
1890 	}
1891 
1892 	printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
1893 		" inactive_file:%lu"
1894 //TODO:  check/adjust line lengths
1895 #ifdef CONFIG_UNEVICTABLE_LRU
1896 		" unevictable:%lu"
1897 #endif
1898 		" dirty:%lu writeback:%lu unstable:%lu\n"
1899 		" free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
1900 		global_page_state(NR_ACTIVE_ANON),
1901 		global_page_state(NR_ACTIVE_FILE),
1902 		global_page_state(NR_INACTIVE_ANON),
1903 		global_page_state(NR_INACTIVE_FILE),
1904 #ifdef CONFIG_UNEVICTABLE_LRU
1905 		global_page_state(NR_UNEVICTABLE),
1906 #endif
1907 		global_page_state(NR_FILE_DIRTY),
1908 		global_page_state(NR_WRITEBACK),
1909 		global_page_state(NR_UNSTABLE_NFS),
1910 		global_page_state(NR_FREE_PAGES),
1911 		global_page_state(NR_SLAB_RECLAIMABLE) +
1912 			global_page_state(NR_SLAB_UNRECLAIMABLE),
1913 		global_page_state(NR_FILE_MAPPED),
1914 		global_page_state(NR_PAGETABLE),
1915 		global_page_state(NR_BOUNCE));
1916 
1917 	for_each_populated_zone(zone) {
1918 		int i;
1919 
1920 		show_node(zone);
1921 		printk("%s"
1922 			" free:%lukB"
1923 			" min:%lukB"
1924 			" low:%lukB"
1925 			" high:%lukB"
1926 			" active_anon:%lukB"
1927 			" inactive_anon:%lukB"
1928 			" active_file:%lukB"
1929 			" inactive_file:%lukB"
1930 #ifdef CONFIG_UNEVICTABLE_LRU
1931 			" unevictable:%lukB"
1932 #endif
1933 			" present:%lukB"
1934 			" pages_scanned:%lu"
1935 			" all_unreclaimable? %s"
1936 			"\n",
1937 			zone->name,
1938 			K(zone_page_state(zone, NR_FREE_PAGES)),
1939 			K(zone->pages_min),
1940 			K(zone->pages_low),
1941 			K(zone->pages_high),
1942 			K(zone_page_state(zone, NR_ACTIVE_ANON)),
1943 			K(zone_page_state(zone, NR_INACTIVE_ANON)),
1944 			K(zone_page_state(zone, NR_ACTIVE_FILE)),
1945 			K(zone_page_state(zone, NR_INACTIVE_FILE)),
1946 #ifdef CONFIG_UNEVICTABLE_LRU
1947 			K(zone_page_state(zone, NR_UNEVICTABLE)),
1948 #endif
1949 			K(zone->present_pages),
1950 			zone->pages_scanned,
1951 			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
1952 			);
1953 		printk("lowmem_reserve[]:");
1954 		for (i = 0; i < MAX_NR_ZONES; i++)
1955 			printk(" %lu", zone->lowmem_reserve[i]);
1956 		printk("\n");
1957 	}
1958 
1959 	for_each_populated_zone(zone) {
1960  		unsigned long nr[MAX_ORDER], flags, order, total = 0;
1961 
1962 		show_node(zone);
1963 		printk("%s: ", zone->name);
1964 
1965 		spin_lock_irqsave(&zone->lock, flags);
1966 		for (order = 0; order < MAX_ORDER; order++) {
1967 			nr[order] = zone->free_area[order].nr_free;
1968 			total += nr[order] << order;
1969 		}
1970 		spin_unlock_irqrestore(&zone->lock, flags);
1971 		for (order = 0; order < MAX_ORDER; order++)
1972 			printk("%lu*%lukB ", nr[order], K(1UL) << order);
1973 		printk("= %lukB\n", K(total));
1974 	}
1975 
1976 	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
1977 
1978 	show_swap_cache_info();
1979 }
1980 
1981 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
1982 {
1983 	zoneref->zone = zone;
1984 	zoneref->zone_idx = zone_idx(zone);
1985 }
1986 
1987 /*
1988  * Builds allocation fallback zone lists.
1989  *
1990  * Add all populated zones of a node to the zonelist.
1991  */
1992 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1993 				int nr_zones, enum zone_type zone_type)
1994 {
1995 	struct zone *zone;
1996 
1997 	BUG_ON(zone_type >= MAX_NR_ZONES);
1998 	zone_type++;
1999 
2000 	do {
2001 		zone_type--;
2002 		zone = pgdat->node_zones + zone_type;
2003 		if (populated_zone(zone)) {
2004 			zoneref_set_zone(zone,
2005 				&zonelist->_zonerefs[nr_zones++]);
2006 			check_highest_zone(zone_type);
2007 		}
2008 
2009 	} while (zone_type);
2010 	return nr_zones;
2011 }
2012 
2013 
2014 /*
2015  *  zonelist_order:
2016  *  0 = automatic detection of better ordering.
2017  *  1 = order by ([node] distance, -zonetype)
2018  *  2 = order by (-zonetype, [node] distance)
2019  *
2020  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2021  *  the same zonelist. So only NUMA can configure this param.
2022  */
2023 #define ZONELIST_ORDER_DEFAULT  0
2024 #define ZONELIST_ORDER_NODE     1
2025 #define ZONELIST_ORDER_ZONE     2
2026 
2027 /* zonelist order in the kernel.
2028  * set_zonelist_order() will set this to NODE or ZONE.
2029  */
2030 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2031 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2032 
2033 
2034 #ifdef CONFIG_NUMA
2035 /* The value user specified ....changed by config */
2036 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2037 /* string for sysctl */
2038 #define NUMA_ZONELIST_ORDER_LEN	16
2039 char numa_zonelist_order[16] = "default";
2040 
2041 /*
2042  * interface for configure zonelist ordering.
2043  * command line option "numa_zonelist_order"
2044  *	= "[dD]efault	- default, automatic configuration.
2045  *	= "[nN]ode 	- order by node locality, then by zone within node
2046  *	= "[zZ]one      - order by zone, then by locality within zone
2047  */
2048 
2049 static int __parse_numa_zonelist_order(char *s)
2050 {
2051 	if (*s == 'd' || *s == 'D') {
2052 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2053 	} else if (*s == 'n' || *s == 'N') {
2054 		user_zonelist_order = ZONELIST_ORDER_NODE;
2055 	} else if (*s == 'z' || *s == 'Z') {
2056 		user_zonelist_order = ZONELIST_ORDER_ZONE;
2057 	} else {
2058 		printk(KERN_WARNING
2059 			"Ignoring invalid numa_zonelist_order value:  "
2060 			"%s\n", s);
2061 		return -EINVAL;
2062 	}
2063 	return 0;
2064 }
2065 
2066 static __init int setup_numa_zonelist_order(char *s)
2067 {
2068 	if (s)
2069 		return __parse_numa_zonelist_order(s);
2070 	return 0;
2071 }
2072 early_param("numa_zonelist_order", setup_numa_zonelist_order);
2073 
2074 /*
2075  * sysctl handler for numa_zonelist_order
2076  */
2077 int numa_zonelist_order_handler(ctl_table *table, int write,
2078 		struct file *file, void __user *buffer, size_t *length,
2079 		loff_t *ppos)
2080 {
2081 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2082 	int ret;
2083 
2084 	if (write)
2085 		strncpy(saved_string, (char*)table->data,
2086 			NUMA_ZONELIST_ORDER_LEN);
2087 	ret = proc_dostring(table, write, file, buffer, length, ppos);
2088 	if (ret)
2089 		return ret;
2090 	if (write) {
2091 		int oldval = user_zonelist_order;
2092 		if (__parse_numa_zonelist_order((char*)table->data)) {
2093 			/*
2094 			 * bogus value.  restore saved string
2095 			 */
2096 			strncpy((char*)table->data, saved_string,
2097 				NUMA_ZONELIST_ORDER_LEN);
2098 			user_zonelist_order = oldval;
2099 		} else if (oldval != user_zonelist_order)
2100 			build_all_zonelists();
2101 	}
2102 	return 0;
2103 }
2104 
2105 
2106 #define MAX_NODE_LOAD (num_online_nodes())
2107 static int node_load[MAX_NUMNODES];
2108 
2109 /**
2110  * find_next_best_node - find the next node that should appear in a given node's fallback list
2111  * @node: node whose fallback list we're appending
2112  * @used_node_mask: nodemask_t of already used nodes
2113  *
2114  * We use a number of factors to determine which is the next node that should
2115  * appear on a given node's fallback list.  The node should not have appeared
2116  * already in @node's fallback list, and it should be the next closest node
2117  * according to the distance array (which contains arbitrary distance values
2118  * from each node to each node in the system), and should also prefer nodes
2119  * with no CPUs, since presumably they'll have very little allocation pressure
2120  * on them otherwise.
2121  * It returns -1 if no node is found.
2122  */
2123 static int find_next_best_node(int node, nodemask_t *used_node_mask)
2124 {
2125 	int n, val;
2126 	int min_val = INT_MAX;
2127 	int best_node = -1;
2128 	const struct cpumask *tmp = cpumask_of_node(0);
2129 
2130 	/* Use the local node if we haven't already */
2131 	if (!node_isset(node, *used_node_mask)) {
2132 		node_set(node, *used_node_mask);
2133 		return node;
2134 	}
2135 
2136 	for_each_node_state(n, N_HIGH_MEMORY) {
2137 
2138 		/* Don't want a node to appear more than once */
2139 		if (node_isset(n, *used_node_mask))
2140 			continue;
2141 
2142 		/* Use the distance array to find the distance */
2143 		val = node_distance(node, n);
2144 
2145 		/* Penalize nodes under us ("prefer the next node") */
2146 		val += (n < node);
2147 
2148 		/* Give preference to headless and unused nodes */
2149 		tmp = cpumask_of_node(n);
2150 		if (!cpumask_empty(tmp))
2151 			val += PENALTY_FOR_NODE_WITH_CPUS;
2152 
2153 		/* Slight preference for less loaded node */
2154 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2155 		val += node_load[n];
2156 
2157 		if (val < min_val) {
2158 			min_val = val;
2159 			best_node = n;
2160 		}
2161 	}
2162 
2163 	if (best_node >= 0)
2164 		node_set(best_node, *used_node_mask);
2165 
2166 	return best_node;
2167 }
2168 
2169 
2170 /*
2171  * Build zonelists ordered by node and zones within node.
2172  * This results in maximum locality--normal zone overflows into local
2173  * DMA zone, if any--but risks exhausting DMA zone.
2174  */
2175 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2176 {
2177 	int j;
2178 	struct zonelist *zonelist;
2179 
2180 	zonelist = &pgdat->node_zonelists[0];
2181 	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2182 		;
2183 	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2184 							MAX_NR_ZONES - 1);
2185 	zonelist->_zonerefs[j].zone = NULL;
2186 	zonelist->_zonerefs[j].zone_idx = 0;
2187 }
2188 
2189 /*
2190  * Build gfp_thisnode zonelists
2191  */
2192 static void build_thisnode_zonelists(pg_data_t *pgdat)
2193 {
2194 	int j;
2195 	struct zonelist *zonelist;
2196 
2197 	zonelist = &pgdat->node_zonelists[1];
2198 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2199 	zonelist->_zonerefs[j].zone = NULL;
2200 	zonelist->_zonerefs[j].zone_idx = 0;
2201 }
2202 
2203 /*
2204  * Build zonelists ordered by zone and nodes within zones.
2205  * This results in conserving DMA zone[s] until all Normal memory is
2206  * exhausted, but results in overflowing to remote node while memory
2207  * may still exist in local DMA zone.
2208  */
2209 static int node_order[MAX_NUMNODES];
2210 
2211 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2212 {
2213 	int pos, j, node;
2214 	int zone_type;		/* needs to be signed */
2215 	struct zone *z;
2216 	struct zonelist *zonelist;
2217 
2218 	zonelist = &pgdat->node_zonelists[0];
2219 	pos = 0;
2220 	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2221 		for (j = 0; j < nr_nodes; j++) {
2222 			node = node_order[j];
2223 			z = &NODE_DATA(node)->node_zones[zone_type];
2224 			if (populated_zone(z)) {
2225 				zoneref_set_zone(z,
2226 					&zonelist->_zonerefs[pos++]);
2227 				check_highest_zone(zone_type);
2228 			}
2229 		}
2230 	}
2231 	zonelist->_zonerefs[pos].zone = NULL;
2232 	zonelist->_zonerefs[pos].zone_idx = 0;
2233 }
2234 
2235 static int default_zonelist_order(void)
2236 {
2237 	int nid, zone_type;
2238 	unsigned long low_kmem_size,total_size;
2239 	struct zone *z;
2240 	int average_size;
2241 	/*
2242          * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2243 	 * If they are really small and used heavily, the system can fall
2244 	 * into OOM very easily.
2245 	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2246 	 */
2247 	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2248 	low_kmem_size = 0;
2249 	total_size = 0;
2250 	for_each_online_node(nid) {
2251 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2252 			z = &NODE_DATA(nid)->node_zones[zone_type];
2253 			if (populated_zone(z)) {
2254 				if (zone_type < ZONE_NORMAL)
2255 					low_kmem_size += z->present_pages;
2256 				total_size += z->present_pages;
2257 			}
2258 		}
2259 	}
2260 	if (!low_kmem_size ||  /* there are no DMA area. */
2261 	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2262 		return ZONELIST_ORDER_NODE;
2263 	/*
2264 	 * look into each node's config.
2265   	 * If there is a node whose DMA/DMA32 memory is very big area on
2266  	 * local memory, NODE_ORDER may be suitable.
2267          */
2268 	average_size = total_size /
2269 				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2270 	for_each_online_node(nid) {
2271 		low_kmem_size = 0;
2272 		total_size = 0;
2273 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2274 			z = &NODE_DATA(nid)->node_zones[zone_type];
2275 			if (populated_zone(z)) {
2276 				if (zone_type < ZONE_NORMAL)
2277 					low_kmem_size += z->present_pages;
2278 				total_size += z->present_pages;
2279 			}
2280 		}
2281 		if (low_kmem_size &&
2282 		    total_size > average_size && /* ignore small node */
2283 		    low_kmem_size > total_size * 70/100)
2284 			return ZONELIST_ORDER_NODE;
2285 	}
2286 	return ZONELIST_ORDER_ZONE;
2287 }
2288 
2289 static void set_zonelist_order(void)
2290 {
2291 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2292 		current_zonelist_order = default_zonelist_order();
2293 	else
2294 		current_zonelist_order = user_zonelist_order;
2295 }
2296 
2297 static void build_zonelists(pg_data_t *pgdat)
2298 {
2299 	int j, node, load;
2300 	enum zone_type i;
2301 	nodemask_t used_mask;
2302 	int local_node, prev_node;
2303 	struct zonelist *zonelist;
2304 	int order = current_zonelist_order;
2305 
2306 	/* initialize zonelists */
2307 	for (i = 0; i < MAX_ZONELISTS; i++) {
2308 		zonelist = pgdat->node_zonelists + i;
2309 		zonelist->_zonerefs[0].zone = NULL;
2310 		zonelist->_zonerefs[0].zone_idx = 0;
2311 	}
2312 
2313 	/* NUMA-aware ordering of nodes */
2314 	local_node = pgdat->node_id;
2315 	load = num_online_nodes();
2316 	prev_node = local_node;
2317 	nodes_clear(used_mask);
2318 
2319 	memset(node_load, 0, sizeof(node_load));
2320 	memset(node_order, 0, sizeof(node_order));
2321 	j = 0;
2322 
2323 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2324 		int distance = node_distance(local_node, node);
2325 
2326 		/*
2327 		 * If another node is sufficiently far away then it is better
2328 		 * to reclaim pages in a zone before going off node.
2329 		 */
2330 		if (distance > RECLAIM_DISTANCE)
2331 			zone_reclaim_mode = 1;
2332 
2333 		/*
2334 		 * We don't want to pressure a particular node.
2335 		 * So adding penalty to the first node in same
2336 		 * distance group to make it round-robin.
2337 		 */
2338 		if (distance != node_distance(local_node, prev_node))
2339 			node_load[node] = load;
2340 
2341 		prev_node = node;
2342 		load--;
2343 		if (order == ZONELIST_ORDER_NODE)
2344 			build_zonelists_in_node_order(pgdat, node);
2345 		else
2346 			node_order[j++] = node;	/* remember order */
2347 	}
2348 
2349 	if (order == ZONELIST_ORDER_ZONE) {
2350 		/* calculate node order -- i.e., DMA last! */
2351 		build_zonelists_in_zone_order(pgdat, j);
2352 	}
2353 
2354 	build_thisnode_zonelists(pgdat);
2355 }
2356 
2357 /* Construct the zonelist performance cache - see further mmzone.h */
2358 static void build_zonelist_cache(pg_data_t *pgdat)
2359 {
2360 	struct zonelist *zonelist;
2361 	struct zonelist_cache *zlc;
2362 	struct zoneref *z;
2363 
2364 	zonelist = &pgdat->node_zonelists[0];
2365 	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2366 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2367 	for (z = zonelist->_zonerefs; z->zone; z++)
2368 		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2369 }
2370 
2371 
2372 #else	/* CONFIG_NUMA */
2373 
2374 static void set_zonelist_order(void)
2375 {
2376 	current_zonelist_order = ZONELIST_ORDER_ZONE;
2377 }
2378 
2379 static void build_zonelists(pg_data_t *pgdat)
2380 {
2381 	int node, local_node;
2382 	enum zone_type j;
2383 	struct zonelist *zonelist;
2384 
2385 	local_node = pgdat->node_id;
2386 
2387 	zonelist = &pgdat->node_zonelists[0];
2388 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2389 
2390 	/*
2391 	 * Now we build the zonelist so that it contains the zones
2392 	 * of all the other nodes.
2393 	 * We don't want to pressure a particular node, so when
2394 	 * building the zones for node N, we make sure that the
2395 	 * zones coming right after the local ones are those from
2396 	 * node N+1 (modulo N)
2397 	 */
2398 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2399 		if (!node_online(node))
2400 			continue;
2401 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2402 							MAX_NR_ZONES - 1);
2403 	}
2404 	for (node = 0; node < local_node; node++) {
2405 		if (!node_online(node))
2406 			continue;
2407 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2408 							MAX_NR_ZONES - 1);
2409 	}
2410 
2411 	zonelist->_zonerefs[j].zone = NULL;
2412 	zonelist->_zonerefs[j].zone_idx = 0;
2413 }
2414 
2415 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2416 static void build_zonelist_cache(pg_data_t *pgdat)
2417 {
2418 	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2419 }
2420 
2421 #endif	/* CONFIG_NUMA */
2422 
2423 /* return values int ....just for stop_machine() */
2424 static int __build_all_zonelists(void *dummy)
2425 {
2426 	int nid;
2427 
2428 	for_each_online_node(nid) {
2429 		pg_data_t *pgdat = NODE_DATA(nid);
2430 
2431 		build_zonelists(pgdat);
2432 		build_zonelist_cache(pgdat);
2433 	}
2434 	return 0;
2435 }
2436 
2437 void build_all_zonelists(void)
2438 {
2439 	set_zonelist_order();
2440 
2441 	if (system_state == SYSTEM_BOOTING) {
2442 		__build_all_zonelists(NULL);
2443 		mminit_verify_zonelist();
2444 		cpuset_init_current_mems_allowed();
2445 	} else {
2446 		/* we have to stop all cpus to guarantee there is no user
2447 		   of zonelist */
2448 		stop_machine(__build_all_zonelists, NULL, NULL);
2449 		/* cpuset refresh routine should be here */
2450 	}
2451 	vm_total_pages = nr_free_pagecache_pages();
2452 	/*
2453 	 * Disable grouping by mobility if the number of pages in the
2454 	 * system is too low to allow the mechanism to work. It would be
2455 	 * more accurate, but expensive to check per-zone. This check is
2456 	 * made on memory-hotadd so a system can start with mobility
2457 	 * disabled and enable it later
2458 	 */
2459 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2460 		page_group_by_mobility_disabled = 1;
2461 	else
2462 		page_group_by_mobility_disabled = 0;
2463 
2464 	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2465 		"Total pages: %ld\n",
2466 			num_online_nodes(),
2467 			zonelist_order_name[current_zonelist_order],
2468 			page_group_by_mobility_disabled ? "off" : "on",
2469 			vm_total_pages);
2470 #ifdef CONFIG_NUMA
2471 	printk("Policy zone: %s\n", zone_names[policy_zone]);
2472 #endif
2473 }
2474 
2475 /*
2476  * Helper functions to size the waitqueue hash table.
2477  * Essentially these want to choose hash table sizes sufficiently
2478  * large so that collisions trying to wait on pages are rare.
2479  * But in fact, the number of active page waitqueues on typical
2480  * systems is ridiculously low, less than 200. So this is even
2481  * conservative, even though it seems large.
2482  *
2483  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2484  * waitqueues, i.e. the size of the waitq table given the number of pages.
2485  */
2486 #define PAGES_PER_WAITQUEUE	256
2487 
2488 #ifndef CONFIG_MEMORY_HOTPLUG
2489 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2490 {
2491 	unsigned long size = 1;
2492 
2493 	pages /= PAGES_PER_WAITQUEUE;
2494 
2495 	while (size < pages)
2496 		size <<= 1;
2497 
2498 	/*
2499 	 * Once we have dozens or even hundreds of threads sleeping
2500 	 * on IO we've got bigger problems than wait queue collision.
2501 	 * Limit the size of the wait table to a reasonable size.
2502 	 */
2503 	size = min(size, 4096UL);
2504 
2505 	return max(size, 4UL);
2506 }
2507 #else
2508 /*
2509  * A zone's size might be changed by hot-add, so it is not possible to determine
2510  * a suitable size for its wait_table.  So we use the maximum size now.
2511  *
2512  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2513  *
2514  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2515  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2516  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2517  *
2518  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2519  * or more by the traditional way. (See above).  It equals:
2520  *
2521  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2522  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2523  *    powerpc (64K page size)             : =  (32G +16M)byte.
2524  */
2525 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2526 {
2527 	return 4096UL;
2528 }
2529 #endif
2530 
2531 /*
2532  * This is an integer logarithm so that shifts can be used later
2533  * to extract the more random high bits from the multiplicative
2534  * hash function before the remainder is taken.
2535  */
2536 static inline unsigned long wait_table_bits(unsigned long size)
2537 {
2538 	return ffz(~size);
2539 }
2540 
2541 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2542 
2543 /*
2544  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2545  * of blocks reserved is based on zone->pages_min. The memory within the
2546  * reserve will tend to store contiguous free pages. Setting min_free_kbytes
2547  * higher will lead to a bigger reserve which will get freed as contiguous
2548  * blocks as reclaim kicks in
2549  */
2550 static void setup_zone_migrate_reserve(struct zone *zone)
2551 {
2552 	unsigned long start_pfn, pfn, end_pfn;
2553 	struct page *page;
2554 	unsigned long reserve, block_migratetype;
2555 
2556 	/* Get the start pfn, end pfn and the number of blocks to reserve */
2557 	start_pfn = zone->zone_start_pfn;
2558 	end_pfn = start_pfn + zone->spanned_pages;
2559 	reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2560 							pageblock_order;
2561 
2562 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2563 		if (!pfn_valid(pfn))
2564 			continue;
2565 		page = pfn_to_page(pfn);
2566 
2567 		/* Watch out for overlapping nodes */
2568 		if (page_to_nid(page) != zone_to_nid(zone))
2569 			continue;
2570 
2571 		/* Blocks with reserved pages will never free, skip them. */
2572 		if (PageReserved(page))
2573 			continue;
2574 
2575 		block_migratetype = get_pageblock_migratetype(page);
2576 
2577 		/* If this block is reserved, account for it */
2578 		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2579 			reserve--;
2580 			continue;
2581 		}
2582 
2583 		/* Suitable for reserving if this block is movable */
2584 		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2585 			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2586 			move_freepages_block(zone, page, MIGRATE_RESERVE);
2587 			reserve--;
2588 			continue;
2589 		}
2590 
2591 		/*
2592 		 * If the reserve is met and this is a previous reserved block,
2593 		 * take it back
2594 		 */
2595 		if (block_migratetype == MIGRATE_RESERVE) {
2596 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2597 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2598 		}
2599 	}
2600 }
2601 
2602 /*
2603  * Initially all pages are reserved - free ones are freed
2604  * up by free_all_bootmem() once the early boot process is
2605  * done. Non-atomic initialization, single-pass.
2606  */
2607 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2608 		unsigned long start_pfn, enum memmap_context context)
2609 {
2610 	struct page *page;
2611 	unsigned long end_pfn = start_pfn + size;
2612 	unsigned long pfn;
2613 	struct zone *z;
2614 
2615 	if (highest_memmap_pfn < end_pfn - 1)
2616 		highest_memmap_pfn = end_pfn - 1;
2617 
2618 	z = &NODE_DATA(nid)->node_zones[zone];
2619 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2620 		/*
2621 		 * There can be holes in boot-time mem_map[]s
2622 		 * handed to this function.  They do not
2623 		 * exist on hotplugged memory.
2624 		 */
2625 		if (context == MEMMAP_EARLY) {
2626 			if (!early_pfn_valid(pfn))
2627 				continue;
2628 			if (!early_pfn_in_nid(pfn, nid))
2629 				continue;
2630 		}
2631 		page = pfn_to_page(pfn);
2632 		set_page_links(page, zone, nid, pfn);
2633 		mminit_verify_page_links(page, zone, nid, pfn);
2634 		init_page_count(page);
2635 		reset_page_mapcount(page);
2636 		SetPageReserved(page);
2637 		/*
2638 		 * Mark the block movable so that blocks are reserved for
2639 		 * movable at startup. This will force kernel allocations
2640 		 * to reserve their blocks rather than leaking throughout
2641 		 * the address space during boot when many long-lived
2642 		 * kernel allocations are made. Later some blocks near
2643 		 * the start are marked MIGRATE_RESERVE by
2644 		 * setup_zone_migrate_reserve()
2645 		 *
2646 		 * bitmap is created for zone's valid pfn range. but memmap
2647 		 * can be created for invalid pages (for alignment)
2648 		 * check here not to call set_pageblock_migratetype() against
2649 		 * pfn out of zone.
2650 		 */
2651 		if ((z->zone_start_pfn <= pfn)
2652 		    && (pfn < z->zone_start_pfn + z->spanned_pages)
2653 		    && !(pfn & (pageblock_nr_pages - 1)))
2654 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2655 
2656 		INIT_LIST_HEAD(&page->lru);
2657 #ifdef WANT_PAGE_VIRTUAL
2658 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
2659 		if (!is_highmem_idx(zone))
2660 			set_page_address(page, __va(pfn << PAGE_SHIFT));
2661 #endif
2662 	}
2663 }
2664 
2665 static void __meminit zone_init_free_lists(struct zone *zone)
2666 {
2667 	int order, t;
2668 	for_each_migratetype_order(order, t) {
2669 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2670 		zone->free_area[order].nr_free = 0;
2671 	}
2672 }
2673 
2674 #ifndef __HAVE_ARCH_MEMMAP_INIT
2675 #define memmap_init(size, nid, zone, start_pfn) \
2676 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2677 #endif
2678 
2679 static int zone_batchsize(struct zone *zone)
2680 {
2681 #ifdef CONFIG_MMU
2682 	int batch;
2683 
2684 	/*
2685 	 * The per-cpu-pages pools are set to around 1000th of the
2686 	 * size of the zone.  But no more than 1/2 of a meg.
2687 	 *
2688 	 * OK, so we don't know how big the cache is.  So guess.
2689 	 */
2690 	batch = zone->present_pages / 1024;
2691 	if (batch * PAGE_SIZE > 512 * 1024)
2692 		batch = (512 * 1024) / PAGE_SIZE;
2693 	batch /= 4;		/* We effectively *= 4 below */
2694 	if (batch < 1)
2695 		batch = 1;
2696 
2697 	/*
2698 	 * Clamp the batch to a 2^n - 1 value. Having a power
2699 	 * of 2 value was found to be more likely to have
2700 	 * suboptimal cache aliasing properties in some cases.
2701 	 *
2702 	 * For example if 2 tasks are alternately allocating
2703 	 * batches of pages, one task can end up with a lot
2704 	 * of pages of one half of the possible page colors
2705 	 * and the other with pages of the other colors.
2706 	 */
2707 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
2708 
2709 	return batch;
2710 
2711 #else
2712 	/* The deferral and batching of frees should be suppressed under NOMMU
2713 	 * conditions.
2714 	 *
2715 	 * The problem is that NOMMU needs to be able to allocate large chunks
2716 	 * of contiguous memory as there's no hardware page translation to
2717 	 * assemble apparent contiguous memory from discontiguous pages.
2718 	 *
2719 	 * Queueing large contiguous runs of pages for batching, however,
2720 	 * causes the pages to actually be freed in smaller chunks.  As there
2721 	 * can be a significant delay between the individual batches being
2722 	 * recycled, this leads to the once large chunks of space being
2723 	 * fragmented and becoming unavailable for high-order allocations.
2724 	 */
2725 	return 0;
2726 #endif
2727 }
2728 
2729 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2730 {
2731 	struct per_cpu_pages *pcp;
2732 
2733 	memset(p, 0, sizeof(*p));
2734 
2735 	pcp = &p->pcp;
2736 	pcp->count = 0;
2737 	pcp->high = 6 * batch;
2738 	pcp->batch = max(1UL, 1 * batch);
2739 	INIT_LIST_HEAD(&pcp->list);
2740 }
2741 
2742 /*
2743  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2744  * to the value high for the pageset p.
2745  */
2746 
2747 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2748 				unsigned long high)
2749 {
2750 	struct per_cpu_pages *pcp;
2751 
2752 	pcp = &p->pcp;
2753 	pcp->high = high;
2754 	pcp->batch = max(1UL, high/4);
2755 	if ((high/4) > (PAGE_SHIFT * 8))
2756 		pcp->batch = PAGE_SHIFT * 8;
2757 }
2758 
2759 
2760 #ifdef CONFIG_NUMA
2761 /*
2762  * Boot pageset table. One per cpu which is going to be used for all
2763  * zones and all nodes. The parameters will be set in such a way
2764  * that an item put on a list will immediately be handed over to
2765  * the buddy list. This is safe since pageset manipulation is done
2766  * with interrupts disabled.
2767  *
2768  * Some NUMA counter updates may also be caught by the boot pagesets.
2769  *
2770  * The boot_pagesets must be kept even after bootup is complete for
2771  * unused processors and/or zones. They do play a role for bootstrapping
2772  * hotplugged processors.
2773  *
2774  * zoneinfo_show() and maybe other functions do
2775  * not check if the processor is online before following the pageset pointer.
2776  * Other parts of the kernel may not check if the zone is available.
2777  */
2778 static struct per_cpu_pageset boot_pageset[NR_CPUS];
2779 
2780 /*
2781  * Dynamically allocate memory for the
2782  * per cpu pageset array in struct zone.
2783  */
2784 static int __cpuinit process_zones(int cpu)
2785 {
2786 	struct zone *zone, *dzone;
2787 	int node = cpu_to_node(cpu);
2788 
2789 	node_set_state(node, N_CPU);	/* this node has a cpu */
2790 
2791 	for_each_populated_zone(zone) {
2792 		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2793 					 GFP_KERNEL, node);
2794 		if (!zone_pcp(zone, cpu))
2795 			goto bad;
2796 
2797 		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
2798 
2799 		if (percpu_pagelist_fraction)
2800 			setup_pagelist_highmark(zone_pcp(zone, cpu),
2801 			 	(zone->present_pages / percpu_pagelist_fraction));
2802 	}
2803 
2804 	return 0;
2805 bad:
2806 	for_each_zone(dzone) {
2807 		if (!populated_zone(dzone))
2808 			continue;
2809 		if (dzone == zone)
2810 			break;
2811 		kfree(zone_pcp(dzone, cpu));
2812 		zone_pcp(dzone, cpu) = NULL;
2813 	}
2814 	return -ENOMEM;
2815 }
2816 
2817 static inline void free_zone_pagesets(int cpu)
2818 {
2819 	struct zone *zone;
2820 
2821 	for_each_zone(zone) {
2822 		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2823 
2824 		/* Free per_cpu_pageset if it is slab allocated */
2825 		if (pset != &boot_pageset[cpu])
2826 			kfree(pset);
2827 		zone_pcp(zone, cpu) = NULL;
2828 	}
2829 }
2830 
2831 static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2832 		unsigned long action,
2833 		void *hcpu)
2834 {
2835 	int cpu = (long)hcpu;
2836 	int ret = NOTIFY_OK;
2837 
2838 	switch (action) {
2839 	case CPU_UP_PREPARE:
2840 	case CPU_UP_PREPARE_FROZEN:
2841 		if (process_zones(cpu))
2842 			ret = NOTIFY_BAD;
2843 		break;
2844 	case CPU_UP_CANCELED:
2845 	case CPU_UP_CANCELED_FROZEN:
2846 	case CPU_DEAD:
2847 	case CPU_DEAD_FROZEN:
2848 		free_zone_pagesets(cpu);
2849 		break;
2850 	default:
2851 		break;
2852 	}
2853 	return ret;
2854 }
2855 
2856 static struct notifier_block __cpuinitdata pageset_notifier =
2857 	{ &pageset_cpuup_callback, NULL, 0 };
2858 
2859 void __init setup_per_cpu_pageset(void)
2860 {
2861 	int err;
2862 
2863 	/* Initialize per_cpu_pageset for cpu 0.
2864 	 * A cpuup callback will do this for every cpu
2865 	 * as it comes online
2866 	 */
2867 	err = process_zones(smp_processor_id());
2868 	BUG_ON(err);
2869 	register_cpu_notifier(&pageset_notifier);
2870 }
2871 
2872 #endif
2873 
2874 static noinline __init_refok
2875 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2876 {
2877 	int i;
2878 	struct pglist_data *pgdat = zone->zone_pgdat;
2879 	size_t alloc_size;
2880 
2881 	/*
2882 	 * The per-page waitqueue mechanism uses hashed waitqueues
2883 	 * per zone.
2884 	 */
2885 	zone->wait_table_hash_nr_entries =
2886 		 wait_table_hash_nr_entries(zone_size_pages);
2887 	zone->wait_table_bits =
2888 		wait_table_bits(zone->wait_table_hash_nr_entries);
2889 	alloc_size = zone->wait_table_hash_nr_entries
2890 					* sizeof(wait_queue_head_t);
2891 
2892 	if (!slab_is_available()) {
2893 		zone->wait_table = (wait_queue_head_t *)
2894 			alloc_bootmem_node(pgdat, alloc_size);
2895 	} else {
2896 		/*
2897 		 * This case means that a zone whose size was 0 gets new memory
2898 		 * via memory hot-add.
2899 		 * But it may be the case that a new node was hot-added.  In
2900 		 * this case vmalloc() will not be able to use this new node's
2901 		 * memory - this wait_table must be initialized to use this new
2902 		 * node itself as well.
2903 		 * To use this new node's memory, further consideration will be
2904 		 * necessary.
2905 		 */
2906 		zone->wait_table = vmalloc(alloc_size);
2907 	}
2908 	if (!zone->wait_table)
2909 		return -ENOMEM;
2910 
2911 	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
2912 		init_waitqueue_head(zone->wait_table + i);
2913 
2914 	return 0;
2915 }
2916 
2917 static __meminit void zone_pcp_init(struct zone *zone)
2918 {
2919 	int cpu;
2920 	unsigned long batch = zone_batchsize(zone);
2921 
2922 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
2923 #ifdef CONFIG_NUMA
2924 		/* Early boot. Slab allocator not functional yet */
2925 		zone_pcp(zone, cpu) = &boot_pageset[cpu];
2926 		setup_pageset(&boot_pageset[cpu],0);
2927 #else
2928 		setup_pageset(zone_pcp(zone,cpu), batch);
2929 #endif
2930 	}
2931 	if (zone->present_pages)
2932 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
2933 			zone->name, zone->present_pages, batch);
2934 }
2935 
2936 __meminit int init_currently_empty_zone(struct zone *zone,
2937 					unsigned long zone_start_pfn,
2938 					unsigned long size,
2939 					enum memmap_context context)
2940 {
2941 	struct pglist_data *pgdat = zone->zone_pgdat;
2942 	int ret;
2943 	ret = zone_wait_table_init(zone, size);
2944 	if (ret)
2945 		return ret;
2946 	pgdat->nr_zones = zone_idx(zone) + 1;
2947 
2948 	zone->zone_start_pfn = zone_start_pfn;
2949 
2950 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
2951 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
2952 			pgdat->node_id,
2953 			(unsigned long)zone_idx(zone),
2954 			zone_start_pfn, (zone_start_pfn + size));
2955 
2956 	zone_init_free_lists(zone);
2957 
2958 	return 0;
2959 }
2960 
2961 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2962 /*
2963  * Basic iterator support. Return the first range of PFNs for a node
2964  * Note: nid == MAX_NUMNODES returns first region regardless of node
2965  */
2966 static int __meminit first_active_region_index_in_nid(int nid)
2967 {
2968 	int i;
2969 
2970 	for (i = 0; i < nr_nodemap_entries; i++)
2971 		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2972 			return i;
2973 
2974 	return -1;
2975 }
2976 
2977 /*
2978  * Basic iterator support. Return the next active range of PFNs for a node
2979  * Note: nid == MAX_NUMNODES returns next region regardless of node
2980  */
2981 static int __meminit next_active_region_index_in_nid(int index, int nid)
2982 {
2983 	for (index = index + 1; index < nr_nodemap_entries; index++)
2984 		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2985 			return index;
2986 
2987 	return -1;
2988 }
2989 
2990 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2991 /*
2992  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2993  * Architectures may implement their own version but if add_active_range()
2994  * was used and there are no special requirements, this is a convenient
2995  * alternative
2996  */
2997 int __meminit __early_pfn_to_nid(unsigned long pfn)
2998 {
2999 	int i;
3000 
3001 	for (i = 0; i < nr_nodemap_entries; i++) {
3002 		unsigned long start_pfn = early_node_map[i].start_pfn;
3003 		unsigned long end_pfn = early_node_map[i].end_pfn;
3004 
3005 		if (start_pfn <= pfn && pfn < end_pfn)
3006 			return early_node_map[i].nid;
3007 	}
3008 	/* This is a memory hole */
3009 	return -1;
3010 }
3011 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3012 
3013 int __meminit early_pfn_to_nid(unsigned long pfn)
3014 {
3015 	int nid;
3016 
3017 	nid = __early_pfn_to_nid(pfn);
3018 	if (nid >= 0)
3019 		return nid;
3020 	/* just returns 0 */
3021 	return 0;
3022 }
3023 
3024 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
3025 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3026 {
3027 	int nid;
3028 
3029 	nid = __early_pfn_to_nid(pfn);
3030 	if (nid >= 0 && nid != node)
3031 		return false;
3032 	return true;
3033 }
3034 #endif
3035 
3036 /* Basic iterator support to walk early_node_map[] */
3037 #define for_each_active_range_index_in_nid(i, nid) \
3038 	for (i = first_active_region_index_in_nid(nid); i != -1; \
3039 				i = next_active_region_index_in_nid(i, nid))
3040 
3041 /**
3042  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3043  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3044  * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3045  *
3046  * If an architecture guarantees that all ranges registered with
3047  * add_active_ranges() contain no holes and may be freed, this
3048  * this function may be used instead of calling free_bootmem() manually.
3049  */
3050 void __init free_bootmem_with_active_regions(int nid,
3051 						unsigned long max_low_pfn)
3052 {
3053 	int i;
3054 
3055 	for_each_active_range_index_in_nid(i, nid) {
3056 		unsigned long size_pages = 0;
3057 		unsigned long end_pfn = early_node_map[i].end_pfn;
3058 
3059 		if (early_node_map[i].start_pfn >= max_low_pfn)
3060 			continue;
3061 
3062 		if (end_pfn > max_low_pfn)
3063 			end_pfn = max_low_pfn;
3064 
3065 		size_pages = end_pfn - early_node_map[i].start_pfn;
3066 		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3067 				PFN_PHYS(early_node_map[i].start_pfn),
3068 				size_pages << PAGE_SHIFT);
3069 	}
3070 }
3071 
3072 void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3073 {
3074 	int i;
3075 	int ret;
3076 
3077 	for_each_active_range_index_in_nid(i, nid) {
3078 		ret = work_fn(early_node_map[i].start_pfn,
3079 			      early_node_map[i].end_pfn, data);
3080 		if (ret)
3081 			break;
3082 	}
3083 }
3084 /**
3085  * sparse_memory_present_with_active_regions - Call memory_present for each active range
3086  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3087  *
3088  * If an architecture guarantees that all ranges registered with
3089  * add_active_ranges() contain no holes and may be freed, this
3090  * function may be used instead of calling memory_present() manually.
3091  */
3092 void __init sparse_memory_present_with_active_regions(int nid)
3093 {
3094 	int i;
3095 
3096 	for_each_active_range_index_in_nid(i, nid)
3097 		memory_present(early_node_map[i].nid,
3098 				early_node_map[i].start_pfn,
3099 				early_node_map[i].end_pfn);
3100 }
3101 
3102 /**
3103  * get_pfn_range_for_nid - Return the start and end page frames for a node
3104  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3105  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3106  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3107  *
3108  * It returns the start and end page frame of a node based on information
3109  * provided by an arch calling add_active_range(). If called for a node
3110  * with no available memory, a warning is printed and the start and end
3111  * PFNs will be 0.
3112  */
3113 void __meminit get_pfn_range_for_nid(unsigned int nid,
3114 			unsigned long *start_pfn, unsigned long *end_pfn)
3115 {
3116 	int i;
3117 	*start_pfn = -1UL;
3118 	*end_pfn = 0;
3119 
3120 	for_each_active_range_index_in_nid(i, nid) {
3121 		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3122 		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3123 	}
3124 
3125 	if (*start_pfn == -1UL)
3126 		*start_pfn = 0;
3127 }
3128 
3129 /*
3130  * This finds a zone that can be used for ZONE_MOVABLE pages. The
3131  * assumption is made that zones within a node are ordered in monotonic
3132  * increasing memory addresses so that the "highest" populated zone is used
3133  */
3134 static void __init find_usable_zone_for_movable(void)
3135 {
3136 	int zone_index;
3137 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3138 		if (zone_index == ZONE_MOVABLE)
3139 			continue;
3140 
3141 		if (arch_zone_highest_possible_pfn[zone_index] >
3142 				arch_zone_lowest_possible_pfn[zone_index])
3143 			break;
3144 	}
3145 
3146 	VM_BUG_ON(zone_index == -1);
3147 	movable_zone = zone_index;
3148 }
3149 
3150 /*
3151  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3152  * because it is sized independant of architecture. Unlike the other zones,
3153  * the starting point for ZONE_MOVABLE is not fixed. It may be different
3154  * in each node depending on the size of each node and how evenly kernelcore
3155  * is distributed. This helper function adjusts the zone ranges
3156  * provided by the architecture for a given node by using the end of the
3157  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3158  * zones within a node are in order of monotonic increases memory addresses
3159  */
3160 static void __meminit adjust_zone_range_for_zone_movable(int nid,
3161 					unsigned long zone_type,
3162 					unsigned long node_start_pfn,
3163 					unsigned long node_end_pfn,
3164 					unsigned long *zone_start_pfn,
3165 					unsigned long *zone_end_pfn)
3166 {
3167 	/* Only adjust if ZONE_MOVABLE is on this node */
3168 	if (zone_movable_pfn[nid]) {
3169 		/* Size ZONE_MOVABLE */
3170 		if (zone_type == ZONE_MOVABLE) {
3171 			*zone_start_pfn = zone_movable_pfn[nid];
3172 			*zone_end_pfn = min(node_end_pfn,
3173 				arch_zone_highest_possible_pfn[movable_zone]);
3174 
3175 		/* Adjust for ZONE_MOVABLE starting within this range */
3176 		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3177 				*zone_end_pfn > zone_movable_pfn[nid]) {
3178 			*zone_end_pfn = zone_movable_pfn[nid];
3179 
3180 		/* Check if this whole range is within ZONE_MOVABLE */
3181 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3182 			*zone_start_pfn = *zone_end_pfn;
3183 	}
3184 }
3185 
3186 /*
3187  * Return the number of pages a zone spans in a node, including holes
3188  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3189  */
3190 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3191 					unsigned long zone_type,
3192 					unsigned long *ignored)
3193 {
3194 	unsigned long node_start_pfn, node_end_pfn;
3195 	unsigned long zone_start_pfn, zone_end_pfn;
3196 
3197 	/* Get the start and end of the node and zone */
3198 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3199 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3200 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3201 	adjust_zone_range_for_zone_movable(nid, zone_type,
3202 				node_start_pfn, node_end_pfn,
3203 				&zone_start_pfn, &zone_end_pfn);
3204 
3205 	/* Check that this node has pages within the zone's required range */
3206 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3207 		return 0;
3208 
3209 	/* Move the zone boundaries inside the node if necessary */
3210 	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3211 	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3212 
3213 	/* Return the spanned pages */
3214 	return zone_end_pfn - zone_start_pfn;
3215 }
3216 
3217 /*
3218  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3219  * then all holes in the requested range will be accounted for.
3220  */
3221 static unsigned long __meminit __absent_pages_in_range(int nid,
3222 				unsigned long range_start_pfn,
3223 				unsigned long range_end_pfn)
3224 {
3225 	int i = 0;
3226 	unsigned long prev_end_pfn = 0, hole_pages = 0;
3227 	unsigned long start_pfn;
3228 
3229 	/* Find the end_pfn of the first active range of pfns in the node */
3230 	i = first_active_region_index_in_nid(nid);
3231 	if (i == -1)
3232 		return 0;
3233 
3234 	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3235 
3236 	/* Account for ranges before physical memory on this node */
3237 	if (early_node_map[i].start_pfn > range_start_pfn)
3238 		hole_pages = prev_end_pfn - range_start_pfn;
3239 
3240 	/* Find all holes for the zone within the node */
3241 	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3242 
3243 		/* No need to continue if prev_end_pfn is outside the zone */
3244 		if (prev_end_pfn >= range_end_pfn)
3245 			break;
3246 
3247 		/* Make sure the end of the zone is not within the hole */
3248 		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3249 		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3250 
3251 		/* Update the hole size cound and move on */
3252 		if (start_pfn > range_start_pfn) {
3253 			BUG_ON(prev_end_pfn > start_pfn);
3254 			hole_pages += start_pfn - prev_end_pfn;
3255 		}
3256 		prev_end_pfn = early_node_map[i].end_pfn;
3257 	}
3258 
3259 	/* Account for ranges past physical memory on this node */
3260 	if (range_end_pfn > prev_end_pfn)
3261 		hole_pages += range_end_pfn -
3262 				max(range_start_pfn, prev_end_pfn);
3263 
3264 	return hole_pages;
3265 }
3266 
3267 /**
3268  * absent_pages_in_range - Return number of page frames in holes within a range
3269  * @start_pfn: The start PFN to start searching for holes
3270  * @end_pfn: The end PFN to stop searching for holes
3271  *
3272  * It returns the number of pages frames in memory holes within a range.
3273  */
3274 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3275 							unsigned long end_pfn)
3276 {
3277 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3278 }
3279 
3280 /* Return the number of page frames in holes in a zone on a node */
3281 static unsigned long __meminit zone_absent_pages_in_node(int nid,
3282 					unsigned long zone_type,
3283 					unsigned long *ignored)
3284 {
3285 	unsigned long node_start_pfn, node_end_pfn;
3286 	unsigned long zone_start_pfn, zone_end_pfn;
3287 
3288 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3289 	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3290 							node_start_pfn);
3291 	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3292 							node_end_pfn);
3293 
3294 	adjust_zone_range_for_zone_movable(nid, zone_type,
3295 			node_start_pfn, node_end_pfn,
3296 			&zone_start_pfn, &zone_end_pfn);
3297 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3298 }
3299 
3300 #else
3301 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3302 					unsigned long zone_type,
3303 					unsigned long *zones_size)
3304 {
3305 	return zones_size[zone_type];
3306 }
3307 
3308 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3309 						unsigned long zone_type,
3310 						unsigned long *zholes_size)
3311 {
3312 	if (!zholes_size)
3313 		return 0;
3314 
3315 	return zholes_size[zone_type];
3316 }
3317 
3318 #endif
3319 
3320 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3321 		unsigned long *zones_size, unsigned long *zholes_size)
3322 {
3323 	unsigned long realtotalpages, totalpages = 0;
3324 	enum zone_type i;
3325 
3326 	for (i = 0; i < MAX_NR_ZONES; i++)
3327 		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3328 								zones_size);
3329 	pgdat->node_spanned_pages = totalpages;
3330 
3331 	realtotalpages = totalpages;
3332 	for (i = 0; i < MAX_NR_ZONES; i++)
3333 		realtotalpages -=
3334 			zone_absent_pages_in_node(pgdat->node_id, i,
3335 								zholes_size);
3336 	pgdat->node_present_pages = realtotalpages;
3337 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3338 							realtotalpages);
3339 }
3340 
3341 #ifndef CONFIG_SPARSEMEM
3342 /*
3343  * Calculate the size of the zone->blockflags rounded to an unsigned long
3344  * Start by making sure zonesize is a multiple of pageblock_order by rounding
3345  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3346  * round what is now in bits to nearest long in bits, then return it in
3347  * bytes.
3348  */
3349 static unsigned long __init usemap_size(unsigned long zonesize)
3350 {
3351 	unsigned long usemapsize;
3352 
3353 	usemapsize = roundup(zonesize, pageblock_nr_pages);
3354 	usemapsize = usemapsize >> pageblock_order;
3355 	usemapsize *= NR_PAGEBLOCK_BITS;
3356 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3357 
3358 	return usemapsize / 8;
3359 }
3360 
3361 static void __init setup_usemap(struct pglist_data *pgdat,
3362 				struct zone *zone, unsigned long zonesize)
3363 {
3364 	unsigned long usemapsize = usemap_size(zonesize);
3365 	zone->pageblock_flags = NULL;
3366 	if (usemapsize)
3367 		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3368 }
3369 #else
3370 static void inline setup_usemap(struct pglist_data *pgdat,
3371 				struct zone *zone, unsigned long zonesize) {}
3372 #endif /* CONFIG_SPARSEMEM */
3373 
3374 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3375 
3376 /* Return a sensible default order for the pageblock size. */
3377 static inline int pageblock_default_order(void)
3378 {
3379 	if (HPAGE_SHIFT > PAGE_SHIFT)
3380 		return HUGETLB_PAGE_ORDER;
3381 
3382 	return MAX_ORDER-1;
3383 }
3384 
3385 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3386 static inline void __init set_pageblock_order(unsigned int order)
3387 {
3388 	/* Check that pageblock_nr_pages has not already been setup */
3389 	if (pageblock_order)
3390 		return;
3391 
3392 	/*
3393 	 * Assume the largest contiguous order of interest is a huge page.
3394 	 * This value may be variable depending on boot parameters on IA64
3395 	 */
3396 	pageblock_order = order;
3397 }
3398 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3399 
3400 /*
3401  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3402  * and pageblock_default_order() are unused as pageblock_order is set
3403  * at compile-time. See include/linux/pageblock-flags.h for the values of
3404  * pageblock_order based on the kernel config
3405  */
3406 static inline int pageblock_default_order(unsigned int order)
3407 {
3408 	return MAX_ORDER-1;
3409 }
3410 #define set_pageblock_order(x)	do {} while (0)
3411 
3412 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3413 
3414 /*
3415  * Set up the zone data structures:
3416  *   - mark all pages reserved
3417  *   - mark all memory queues empty
3418  *   - clear the memory bitmaps
3419  */
3420 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3421 		unsigned long *zones_size, unsigned long *zholes_size)
3422 {
3423 	enum zone_type j;
3424 	int nid = pgdat->node_id;
3425 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3426 	int ret;
3427 
3428 	pgdat_resize_init(pgdat);
3429 	pgdat->nr_zones = 0;
3430 	init_waitqueue_head(&pgdat->kswapd_wait);
3431 	pgdat->kswapd_max_order = 0;
3432 	pgdat_page_cgroup_init(pgdat);
3433 
3434 	for (j = 0; j < MAX_NR_ZONES; j++) {
3435 		struct zone *zone = pgdat->node_zones + j;
3436 		unsigned long size, realsize, memmap_pages;
3437 		enum lru_list l;
3438 
3439 		size = zone_spanned_pages_in_node(nid, j, zones_size);
3440 		realsize = size - zone_absent_pages_in_node(nid, j,
3441 								zholes_size);
3442 
3443 		/*
3444 		 * Adjust realsize so that it accounts for how much memory
3445 		 * is used by this zone for memmap. This affects the watermark
3446 		 * and per-cpu initialisations
3447 		 */
3448 		memmap_pages =
3449 			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3450 		if (realsize >= memmap_pages) {
3451 			realsize -= memmap_pages;
3452 			if (memmap_pages)
3453 				printk(KERN_DEBUG
3454 				       "  %s zone: %lu pages used for memmap\n",
3455 				       zone_names[j], memmap_pages);
3456 		} else
3457 			printk(KERN_WARNING
3458 				"  %s zone: %lu pages exceeds realsize %lu\n",
3459 				zone_names[j], memmap_pages, realsize);
3460 
3461 		/* Account for reserved pages */
3462 		if (j == 0 && realsize > dma_reserve) {
3463 			realsize -= dma_reserve;
3464 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3465 					zone_names[0], dma_reserve);
3466 		}
3467 
3468 		if (!is_highmem_idx(j))
3469 			nr_kernel_pages += realsize;
3470 		nr_all_pages += realsize;
3471 
3472 		zone->spanned_pages = size;
3473 		zone->present_pages = realsize;
3474 #ifdef CONFIG_NUMA
3475 		zone->node = nid;
3476 		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3477 						/ 100;
3478 		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3479 #endif
3480 		zone->name = zone_names[j];
3481 		spin_lock_init(&zone->lock);
3482 		spin_lock_init(&zone->lru_lock);
3483 		zone_seqlock_init(zone);
3484 		zone->zone_pgdat = pgdat;
3485 
3486 		zone->prev_priority = DEF_PRIORITY;
3487 
3488 		zone_pcp_init(zone);
3489 		for_each_lru(l) {
3490 			INIT_LIST_HEAD(&zone->lru[l].list);
3491 			zone->lru[l].nr_scan = 0;
3492 		}
3493 		zone->reclaim_stat.recent_rotated[0] = 0;
3494 		zone->reclaim_stat.recent_rotated[1] = 0;
3495 		zone->reclaim_stat.recent_scanned[0] = 0;
3496 		zone->reclaim_stat.recent_scanned[1] = 0;
3497 		zap_zone_vm_stats(zone);
3498 		zone->flags = 0;
3499 		if (!size)
3500 			continue;
3501 
3502 		set_pageblock_order(pageblock_default_order());
3503 		setup_usemap(pgdat, zone, size);
3504 		ret = init_currently_empty_zone(zone, zone_start_pfn,
3505 						size, MEMMAP_EARLY);
3506 		BUG_ON(ret);
3507 		memmap_init(size, nid, j, zone_start_pfn);
3508 		zone_start_pfn += size;
3509 	}
3510 }
3511 
3512 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3513 {
3514 	/* Skip empty nodes */
3515 	if (!pgdat->node_spanned_pages)
3516 		return;
3517 
3518 #ifdef CONFIG_FLAT_NODE_MEM_MAP
3519 	/* ia64 gets its own node_mem_map, before this, without bootmem */
3520 	if (!pgdat->node_mem_map) {
3521 		unsigned long size, start, end;
3522 		struct page *map;
3523 
3524 		/*
3525 		 * The zone's endpoints aren't required to be MAX_ORDER
3526 		 * aligned but the node_mem_map endpoints must be in order
3527 		 * for the buddy allocator to function correctly.
3528 		 */
3529 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3530 		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3531 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3532 		size =  (end - start) * sizeof(struct page);
3533 		map = alloc_remap(pgdat->node_id, size);
3534 		if (!map)
3535 			map = alloc_bootmem_node(pgdat, size);
3536 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3537 	}
3538 #ifndef CONFIG_NEED_MULTIPLE_NODES
3539 	/*
3540 	 * With no DISCONTIG, the global mem_map is just set as node 0's
3541 	 */
3542 	if (pgdat == NODE_DATA(0)) {
3543 		mem_map = NODE_DATA(0)->node_mem_map;
3544 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3545 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3546 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3547 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3548 	}
3549 #endif
3550 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
3551 }
3552 
3553 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3554 		unsigned long node_start_pfn, unsigned long *zholes_size)
3555 {
3556 	pg_data_t *pgdat = NODE_DATA(nid);
3557 
3558 	pgdat->node_id = nid;
3559 	pgdat->node_start_pfn = node_start_pfn;
3560 	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3561 
3562 	alloc_node_mem_map(pgdat);
3563 #ifdef CONFIG_FLAT_NODE_MEM_MAP
3564 	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3565 		nid, (unsigned long)pgdat,
3566 		(unsigned long)pgdat->node_mem_map);
3567 #endif
3568 
3569 	free_area_init_core(pgdat, zones_size, zholes_size);
3570 }
3571 
3572 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3573 
3574 #if MAX_NUMNODES > 1
3575 /*
3576  * Figure out the number of possible node ids.
3577  */
3578 static void __init setup_nr_node_ids(void)
3579 {
3580 	unsigned int node;
3581 	unsigned int highest = 0;
3582 
3583 	for_each_node_mask(node, node_possible_map)
3584 		highest = node;
3585 	nr_node_ids = highest + 1;
3586 }
3587 #else
3588 static inline void setup_nr_node_ids(void)
3589 {
3590 }
3591 #endif
3592 
3593 /**
3594  * add_active_range - Register a range of PFNs backed by physical memory
3595  * @nid: The node ID the range resides on
3596  * @start_pfn: The start PFN of the available physical memory
3597  * @end_pfn: The end PFN of the available physical memory
3598  *
3599  * These ranges are stored in an early_node_map[] and later used by
3600  * free_area_init_nodes() to calculate zone sizes and holes. If the
3601  * range spans a memory hole, it is up to the architecture to ensure
3602  * the memory is not freed by the bootmem allocator. If possible
3603  * the range being registered will be merged with existing ranges.
3604  */
3605 void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3606 						unsigned long end_pfn)
3607 {
3608 	int i;
3609 
3610 	mminit_dprintk(MMINIT_TRACE, "memory_register",
3611 			"Entering add_active_range(%d, %#lx, %#lx) "
3612 			"%d entries of %d used\n",
3613 			nid, start_pfn, end_pfn,
3614 			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3615 
3616 	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3617 
3618 	/* Merge with existing active regions if possible */
3619 	for (i = 0; i < nr_nodemap_entries; i++) {
3620 		if (early_node_map[i].nid != nid)
3621 			continue;
3622 
3623 		/* Skip if an existing region covers this new one */
3624 		if (start_pfn >= early_node_map[i].start_pfn &&
3625 				end_pfn <= early_node_map[i].end_pfn)
3626 			return;
3627 
3628 		/* Merge forward if suitable */
3629 		if (start_pfn <= early_node_map[i].end_pfn &&
3630 				end_pfn > early_node_map[i].end_pfn) {
3631 			early_node_map[i].end_pfn = end_pfn;
3632 			return;
3633 		}
3634 
3635 		/* Merge backward if suitable */
3636 		if (start_pfn < early_node_map[i].end_pfn &&
3637 				end_pfn >= early_node_map[i].start_pfn) {
3638 			early_node_map[i].start_pfn = start_pfn;
3639 			return;
3640 		}
3641 	}
3642 
3643 	/* Check that early_node_map is large enough */
3644 	if (i >= MAX_ACTIVE_REGIONS) {
3645 		printk(KERN_CRIT "More than %d memory regions, truncating\n",
3646 							MAX_ACTIVE_REGIONS);
3647 		return;
3648 	}
3649 
3650 	early_node_map[i].nid = nid;
3651 	early_node_map[i].start_pfn = start_pfn;
3652 	early_node_map[i].end_pfn = end_pfn;
3653 	nr_nodemap_entries = i + 1;
3654 }
3655 
3656 /**
3657  * remove_active_range - Shrink an existing registered range of PFNs
3658  * @nid: The node id the range is on that should be shrunk
3659  * @start_pfn: The new PFN of the range
3660  * @end_pfn: The new PFN of the range
3661  *
3662  * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3663  * The map is kept near the end physical page range that has already been
3664  * registered. This function allows an arch to shrink an existing registered
3665  * range.
3666  */
3667 void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3668 				unsigned long end_pfn)
3669 {
3670 	int i, j;
3671 	int removed = 0;
3672 
3673 	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3674 			  nid, start_pfn, end_pfn);
3675 
3676 	/* Find the old active region end and shrink */
3677 	for_each_active_range_index_in_nid(i, nid) {
3678 		if (early_node_map[i].start_pfn >= start_pfn &&
3679 		    early_node_map[i].end_pfn <= end_pfn) {
3680 			/* clear it */
3681 			early_node_map[i].start_pfn = 0;
3682 			early_node_map[i].end_pfn = 0;
3683 			removed = 1;
3684 			continue;
3685 		}
3686 		if (early_node_map[i].start_pfn < start_pfn &&
3687 		    early_node_map[i].end_pfn > start_pfn) {
3688 			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3689 			early_node_map[i].end_pfn = start_pfn;
3690 			if (temp_end_pfn > end_pfn)
3691 				add_active_range(nid, end_pfn, temp_end_pfn);
3692 			continue;
3693 		}
3694 		if (early_node_map[i].start_pfn >= start_pfn &&
3695 		    early_node_map[i].end_pfn > end_pfn &&
3696 		    early_node_map[i].start_pfn < end_pfn) {
3697 			early_node_map[i].start_pfn = end_pfn;
3698 			continue;
3699 		}
3700 	}
3701 
3702 	if (!removed)
3703 		return;
3704 
3705 	/* remove the blank ones */
3706 	for (i = nr_nodemap_entries - 1; i > 0; i--) {
3707 		if (early_node_map[i].nid != nid)
3708 			continue;
3709 		if (early_node_map[i].end_pfn)
3710 			continue;
3711 		/* we found it, get rid of it */
3712 		for (j = i; j < nr_nodemap_entries - 1; j++)
3713 			memcpy(&early_node_map[j], &early_node_map[j+1],
3714 				sizeof(early_node_map[j]));
3715 		j = nr_nodemap_entries - 1;
3716 		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3717 		nr_nodemap_entries--;
3718 	}
3719 }
3720 
3721 /**
3722  * remove_all_active_ranges - Remove all currently registered regions
3723  *
3724  * During discovery, it may be found that a table like SRAT is invalid
3725  * and an alternative discovery method must be used. This function removes
3726  * all currently registered regions.
3727  */
3728 void __init remove_all_active_ranges(void)
3729 {
3730 	memset(early_node_map, 0, sizeof(early_node_map));
3731 	nr_nodemap_entries = 0;
3732 }
3733 
3734 /* Compare two active node_active_regions */
3735 static int __init cmp_node_active_region(const void *a, const void *b)
3736 {
3737 	struct node_active_region *arange = (struct node_active_region *)a;
3738 	struct node_active_region *brange = (struct node_active_region *)b;
3739 
3740 	/* Done this way to avoid overflows */
3741 	if (arange->start_pfn > brange->start_pfn)
3742 		return 1;
3743 	if (arange->start_pfn < brange->start_pfn)
3744 		return -1;
3745 
3746 	return 0;
3747 }
3748 
3749 /* sort the node_map by start_pfn */
3750 static void __init sort_node_map(void)
3751 {
3752 	sort(early_node_map, (size_t)nr_nodemap_entries,
3753 			sizeof(struct node_active_region),
3754 			cmp_node_active_region, NULL);
3755 }
3756 
3757 /* Find the lowest pfn for a node */
3758 static unsigned long __init find_min_pfn_for_node(int nid)
3759 {
3760 	int i;
3761 	unsigned long min_pfn = ULONG_MAX;
3762 
3763 	/* Assuming a sorted map, the first range found has the starting pfn */
3764 	for_each_active_range_index_in_nid(i, nid)
3765 		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
3766 
3767 	if (min_pfn == ULONG_MAX) {
3768 		printk(KERN_WARNING
3769 			"Could not find start_pfn for node %d\n", nid);
3770 		return 0;
3771 	}
3772 
3773 	return min_pfn;
3774 }
3775 
3776 /**
3777  * find_min_pfn_with_active_regions - Find the minimum PFN registered
3778  *
3779  * It returns the minimum PFN based on information provided via
3780  * add_active_range().
3781  */
3782 unsigned long __init find_min_pfn_with_active_regions(void)
3783 {
3784 	return find_min_pfn_for_node(MAX_NUMNODES);
3785 }
3786 
3787 /*
3788  * early_calculate_totalpages()
3789  * Sum pages in active regions for movable zone.
3790  * Populate N_HIGH_MEMORY for calculating usable_nodes.
3791  */
3792 static unsigned long __init early_calculate_totalpages(void)
3793 {
3794 	int i;
3795 	unsigned long totalpages = 0;
3796 
3797 	for (i = 0; i < nr_nodemap_entries; i++) {
3798 		unsigned long pages = early_node_map[i].end_pfn -
3799 						early_node_map[i].start_pfn;
3800 		totalpages += pages;
3801 		if (pages)
3802 			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3803 	}
3804   	return totalpages;
3805 }
3806 
3807 /*
3808  * Find the PFN the Movable zone begins in each node. Kernel memory
3809  * is spread evenly between nodes as long as the nodes have enough
3810  * memory. When they don't, some nodes will have more kernelcore than
3811  * others
3812  */
3813 static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3814 {
3815 	int i, nid;
3816 	unsigned long usable_startpfn;
3817 	unsigned long kernelcore_node, kernelcore_remaining;
3818 	unsigned long totalpages = early_calculate_totalpages();
3819 	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
3820 
3821 	/*
3822 	 * If movablecore was specified, calculate what size of
3823 	 * kernelcore that corresponds so that memory usable for
3824 	 * any allocation type is evenly spread. If both kernelcore
3825 	 * and movablecore are specified, then the value of kernelcore
3826 	 * will be used for required_kernelcore if it's greater than
3827 	 * what movablecore would have allowed.
3828 	 */
3829 	if (required_movablecore) {
3830 		unsigned long corepages;
3831 
3832 		/*
3833 		 * Round-up so that ZONE_MOVABLE is at least as large as what
3834 		 * was requested by the user
3835 		 */
3836 		required_movablecore =
3837 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3838 		corepages = totalpages - required_movablecore;
3839 
3840 		required_kernelcore = max(required_kernelcore, corepages);
3841 	}
3842 
3843 	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
3844 	if (!required_kernelcore)
3845 		return;
3846 
3847 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3848 	find_usable_zone_for_movable();
3849 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3850 
3851 restart:
3852 	/* Spread kernelcore memory as evenly as possible throughout nodes */
3853 	kernelcore_node = required_kernelcore / usable_nodes;
3854 	for_each_node_state(nid, N_HIGH_MEMORY) {
3855 		/*
3856 		 * Recalculate kernelcore_node if the division per node
3857 		 * now exceeds what is necessary to satisfy the requested
3858 		 * amount of memory for the kernel
3859 		 */
3860 		if (required_kernelcore < kernelcore_node)
3861 			kernelcore_node = required_kernelcore / usable_nodes;
3862 
3863 		/*
3864 		 * As the map is walked, we track how much memory is usable
3865 		 * by the kernel using kernelcore_remaining. When it is
3866 		 * 0, the rest of the node is usable by ZONE_MOVABLE
3867 		 */
3868 		kernelcore_remaining = kernelcore_node;
3869 
3870 		/* Go through each range of PFNs within this node */
3871 		for_each_active_range_index_in_nid(i, nid) {
3872 			unsigned long start_pfn, end_pfn;
3873 			unsigned long size_pages;
3874 
3875 			start_pfn = max(early_node_map[i].start_pfn,
3876 						zone_movable_pfn[nid]);
3877 			end_pfn = early_node_map[i].end_pfn;
3878 			if (start_pfn >= end_pfn)
3879 				continue;
3880 
3881 			/* Account for what is only usable for kernelcore */
3882 			if (start_pfn < usable_startpfn) {
3883 				unsigned long kernel_pages;
3884 				kernel_pages = min(end_pfn, usable_startpfn)
3885 								- start_pfn;
3886 
3887 				kernelcore_remaining -= min(kernel_pages,
3888 							kernelcore_remaining);
3889 				required_kernelcore -= min(kernel_pages,
3890 							required_kernelcore);
3891 
3892 				/* Continue if range is now fully accounted */
3893 				if (end_pfn <= usable_startpfn) {
3894 
3895 					/*
3896 					 * Push zone_movable_pfn to the end so
3897 					 * that if we have to rebalance
3898 					 * kernelcore across nodes, we will
3899 					 * not double account here
3900 					 */
3901 					zone_movable_pfn[nid] = end_pfn;
3902 					continue;
3903 				}
3904 				start_pfn = usable_startpfn;
3905 			}
3906 
3907 			/*
3908 			 * The usable PFN range for ZONE_MOVABLE is from
3909 			 * start_pfn->end_pfn. Calculate size_pages as the
3910 			 * number of pages used as kernelcore
3911 			 */
3912 			size_pages = end_pfn - start_pfn;
3913 			if (size_pages > kernelcore_remaining)
3914 				size_pages = kernelcore_remaining;
3915 			zone_movable_pfn[nid] = start_pfn + size_pages;
3916 
3917 			/*
3918 			 * Some kernelcore has been met, update counts and
3919 			 * break if the kernelcore for this node has been
3920 			 * satisified
3921 			 */
3922 			required_kernelcore -= min(required_kernelcore,
3923 								size_pages);
3924 			kernelcore_remaining -= size_pages;
3925 			if (!kernelcore_remaining)
3926 				break;
3927 		}
3928 	}
3929 
3930 	/*
3931 	 * If there is still required_kernelcore, we do another pass with one
3932 	 * less node in the count. This will push zone_movable_pfn[nid] further
3933 	 * along on the nodes that still have memory until kernelcore is
3934 	 * satisified
3935 	 */
3936 	usable_nodes--;
3937 	if (usable_nodes && required_kernelcore > usable_nodes)
3938 		goto restart;
3939 
3940 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3941 	for (nid = 0; nid < MAX_NUMNODES; nid++)
3942 		zone_movable_pfn[nid] =
3943 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3944 }
3945 
3946 /* Any regular memory on that node ? */
3947 static void check_for_regular_memory(pg_data_t *pgdat)
3948 {
3949 #ifdef CONFIG_HIGHMEM
3950 	enum zone_type zone_type;
3951 
3952 	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3953 		struct zone *zone = &pgdat->node_zones[zone_type];
3954 		if (zone->present_pages)
3955 			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3956 	}
3957 #endif
3958 }
3959 
3960 /**
3961  * free_area_init_nodes - Initialise all pg_data_t and zone data
3962  * @max_zone_pfn: an array of max PFNs for each zone
3963  *
3964  * This will call free_area_init_node() for each active node in the system.
3965  * Using the page ranges provided by add_active_range(), the size of each
3966  * zone in each node and their holes is calculated. If the maximum PFN
3967  * between two adjacent zones match, it is assumed that the zone is empty.
3968  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
3969  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
3970  * starts where the previous one ended. For example, ZONE_DMA32 starts
3971  * at arch_max_dma_pfn.
3972  */
3973 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3974 {
3975 	unsigned long nid;
3976 	int i;
3977 
3978 	/* Sort early_node_map as initialisation assumes it is sorted */
3979 	sort_node_map();
3980 
3981 	/* Record where the zone boundaries are */
3982 	memset(arch_zone_lowest_possible_pfn, 0,
3983 				sizeof(arch_zone_lowest_possible_pfn));
3984 	memset(arch_zone_highest_possible_pfn, 0,
3985 				sizeof(arch_zone_highest_possible_pfn));
3986 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3987 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
3988 	for (i = 1; i < MAX_NR_ZONES; i++) {
3989 		if (i == ZONE_MOVABLE)
3990 			continue;
3991 		arch_zone_lowest_possible_pfn[i] =
3992 			arch_zone_highest_possible_pfn[i-1];
3993 		arch_zone_highest_possible_pfn[i] =
3994 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
3995 	}
3996 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
3997 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
3998 
3999 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4000 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4001 	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4002 
4003 	/* Print out the zone ranges */
4004 	printk("Zone PFN ranges:\n");
4005 	for (i = 0; i < MAX_NR_ZONES; i++) {
4006 		if (i == ZONE_MOVABLE)
4007 			continue;
4008 		printk("  %-8s %0#10lx -> %0#10lx\n",
4009 				zone_names[i],
4010 				arch_zone_lowest_possible_pfn[i],
4011 				arch_zone_highest_possible_pfn[i]);
4012 	}
4013 
4014 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4015 	printk("Movable zone start PFN for each node\n");
4016 	for (i = 0; i < MAX_NUMNODES; i++) {
4017 		if (zone_movable_pfn[i])
4018 			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4019 	}
4020 
4021 	/* Print out the early_node_map[] */
4022 	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4023 	for (i = 0; i < nr_nodemap_entries; i++)
4024 		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4025 						early_node_map[i].start_pfn,
4026 						early_node_map[i].end_pfn);
4027 
4028 	/* Initialise every node */
4029 	mminit_verify_pageflags_layout();
4030 	setup_nr_node_ids();
4031 	for_each_online_node(nid) {
4032 		pg_data_t *pgdat = NODE_DATA(nid);
4033 		free_area_init_node(nid, NULL,
4034 				find_min_pfn_for_node(nid), NULL);
4035 
4036 		/* Any memory on that node */
4037 		if (pgdat->node_present_pages)
4038 			node_set_state(nid, N_HIGH_MEMORY);
4039 		check_for_regular_memory(pgdat);
4040 	}
4041 }
4042 
4043 static int __init cmdline_parse_core(char *p, unsigned long *core)
4044 {
4045 	unsigned long long coremem;
4046 	if (!p)
4047 		return -EINVAL;
4048 
4049 	coremem = memparse(p, &p);
4050 	*core = coremem >> PAGE_SHIFT;
4051 
4052 	/* Paranoid check that UL is enough for the coremem value */
4053 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4054 
4055 	return 0;
4056 }
4057 
4058 /*
4059  * kernelcore=size sets the amount of memory for use for allocations that
4060  * cannot be reclaimed or migrated.
4061  */
4062 static int __init cmdline_parse_kernelcore(char *p)
4063 {
4064 	return cmdline_parse_core(p, &required_kernelcore);
4065 }
4066 
4067 /*
4068  * movablecore=size sets the amount of memory for use for allocations that
4069  * can be reclaimed or migrated.
4070  */
4071 static int __init cmdline_parse_movablecore(char *p)
4072 {
4073 	return cmdline_parse_core(p, &required_movablecore);
4074 }
4075 
4076 early_param("kernelcore", cmdline_parse_kernelcore);
4077 early_param("movablecore", cmdline_parse_movablecore);
4078 
4079 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4080 
4081 /**
4082  * set_dma_reserve - set the specified number of pages reserved in the first zone
4083  * @new_dma_reserve: The number of pages to mark reserved
4084  *
4085  * The per-cpu batchsize and zone watermarks are determined by present_pages.
4086  * In the DMA zone, a significant percentage may be consumed by kernel image
4087  * and other unfreeable allocations which can skew the watermarks badly. This
4088  * function may optionally be used to account for unfreeable pages in the
4089  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4090  * smaller per-cpu batchsize.
4091  */
4092 void __init set_dma_reserve(unsigned long new_dma_reserve)
4093 {
4094 	dma_reserve = new_dma_reserve;
4095 }
4096 
4097 #ifndef CONFIG_NEED_MULTIPLE_NODES
4098 struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4099 EXPORT_SYMBOL(contig_page_data);
4100 #endif
4101 
4102 void __init free_area_init(unsigned long *zones_size)
4103 {
4104 	free_area_init_node(0, zones_size,
4105 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4106 }
4107 
4108 static int page_alloc_cpu_notify(struct notifier_block *self,
4109 				 unsigned long action, void *hcpu)
4110 {
4111 	int cpu = (unsigned long)hcpu;
4112 
4113 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4114 		drain_pages(cpu);
4115 
4116 		/*
4117 		 * Spill the event counters of the dead processor
4118 		 * into the current processors event counters.
4119 		 * This artificially elevates the count of the current
4120 		 * processor.
4121 		 */
4122 		vm_events_fold_cpu(cpu);
4123 
4124 		/*
4125 		 * Zero the differential counters of the dead processor
4126 		 * so that the vm statistics are consistent.
4127 		 *
4128 		 * This is only okay since the processor is dead and cannot
4129 		 * race with what we are doing.
4130 		 */
4131 		refresh_cpu_vm_stats(cpu);
4132 	}
4133 	return NOTIFY_OK;
4134 }
4135 
4136 void __init page_alloc_init(void)
4137 {
4138 	hotcpu_notifier(page_alloc_cpu_notify, 0);
4139 }
4140 
4141 /*
4142  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4143  *	or min_free_kbytes changes.
4144  */
4145 static void calculate_totalreserve_pages(void)
4146 {
4147 	struct pglist_data *pgdat;
4148 	unsigned long reserve_pages = 0;
4149 	enum zone_type i, j;
4150 
4151 	for_each_online_pgdat(pgdat) {
4152 		for (i = 0; i < MAX_NR_ZONES; i++) {
4153 			struct zone *zone = pgdat->node_zones + i;
4154 			unsigned long max = 0;
4155 
4156 			/* Find valid and maximum lowmem_reserve in the zone */
4157 			for (j = i; j < MAX_NR_ZONES; j++) {
4158 				if (zone->lowmem_reserve[j] > max)
4159 					max = zone->lowmem_reserve[j];
4160 			}
4161 
4162 			/* we treat pages_high as reserved pages. */
4163 			max += zone->pages_high;
4164 
4165 			if (max > zone->present_pages)
4166 				max = zone->present_pages;
4167 			reserve_pages += max;
4168 		}
4169 	}
4170 	totalreserve_pages = reserve_pages;
4171 }
4172 
4173 /*
4174  * setup_per_zone_lowmem_reserve - called whenever
4175  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4176  *	has a correct pages reserved value, so an adequate number of
4177  *	pages are left in the zone after a successful __alloc_pages().
4178  */
4179 static void setup_per_zone_lowmem_reserve(void)
4180 {
4181 	struct pglist_data *pgdat;
4182 	enum zone_type j, idx;
4183 
4184 	for_each_online_pgdat(pgdat) {
4185 		for (j = 0; j < MAX_NR_ZONES; j++) {
4186 			struct zone *zone = pgdat->node_zones + j;
4187 			unsigned long present_pages = zone->present_pages;
4188 
4189 			zone->lowmem_reserve[j] = 0;
4190 
4191 			idx = j;
4192 			while (idx) {
4193 				struct zone *lower_zone;
4194 
4195 				idx--;
4196 
4197 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4198 					sysctl_lowmem_reserve_ratio[idx] = 1;
4199 
4200 				lower_zone = pgdat->node_zones + idx;
4201 				lower_zone->lowmem_reserve[j] = present_pages /
4202 					sysctl_lowmem_reserve_ratio[idx];
4203 				present_pages += lower_zone->present_pages;
4204 			}
4205 		}
4206 	}
4207 
4208 	/* update totalreserve_pages */
4209 	calculate_totalreserve_pages();
4210 }
4211 
4212 /**
4213  * setup_per_zone_pages_min - called when min_free_kbytes changes.
4214  *
4215  * Ensures that the pages_{min,low,high} values for each zone are set correctly
4216  * with respect to min_free_kbytes.
4217  */
4218 void setup_per_zone_pages_min(void)
4219 {
4220 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4221 	unsigned long lowmem_pages = 0;
4222 	struct zone *zone;
4223 	unsigned long flags;
4224 
4225 	/* Calculate total number of !ZONE_HIGHMEM pages */
4226 	for_each_zone(zone) {
4227 		if (!is_highmem(zone))
4228 			lowmem_pages += zone->present_pages;
4229 	}
4230 
4231 	for_each_zone(zone) {
4232 		u64 tmp;
4233 
4234 		spin_lock_irqsave(&zone->lock, flags);
4235 		tmp = (u64)pages_min * zone->present_pages;
4236 		do_div(tmp, lowmem_pages);
4237 		if (is_highmem(zone)) {
4238 			/*
4239 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4240 			 * need highmem pages, so cap pages_min to a small
4241 			 * value here.
4242 			 *
4243 			 * The (pages_high-pages_low) and (pages_low-pages_min)
4244 			 * deltas controls asynch page reclaim, and so should
4245 			 * not be capped for highmem.
4246 			 */
4247 			int min_pages;
4248 
4249 			min_pages = zone->present_pages / 1024;
4250 			if (min_pages < SWAP_CLUSTER_MAX)
4251 				min_pages = SWAP_CLUSTER_MAX;
4252 			if (min_pages > 128)
4253 				min_pages = 128;
4254 			zone->pages_min = min_pages;
4255 		} else {
4256 			/*
4257 			 * If it's a lowmem zone, reserve a number of pages
4258 			 * proportionate to the zone's size.
4259 			 */
4260 			zone->pages_min = tmp;
4261 		}
4262 
4263 		zone->pages_low   = zone->pages_min + (tmp >> 2);
4264 		zone->pages_high  = zone->pages_min + (tmp >> 1);
4265 		setup_zone_migrate_reserve(zone);
4266 		spin_unlock_irqrestore(&zone->lock, flags);
4267 	}
4268 
4269 	/* update totalreserve_pages */
4270 	calculate_totalreserve_pages();
4271 }
4272 
4273 /**
4274  * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
4275  *
4276  * The inactive anon list should be small enough that the VM never has to
4277  * do too much work, but large enough that each inactive page has a chance
4278  * to be referenced again before it is swapped out.
4279  *
4280  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4281  * INACTIVE_ANON pages on this zone's LRU, maintained by the
4282  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4283  * the anonymous pages are kept on the inactive list.
4284  *
4285  * total     target    max
4286  * memory    ratio     inactive anon
4287  * -------------------------------------
4288  *   10MB       1         5MB
4289  *  100MB       1        50MB
4290  *    1GB       3       250MB
4291  *   10GB      10       0.9GB
4292  *  100GB      31         3GB
4293  *    1TB     101        10GB
4294  *   10TB     320        32GB
4295  */
4296 static void setup_per_zone_inactive_ratio(void)
4297 {
4298 	struct zone *zone;
4299 
4300 	for_each_zone(zone) {
4301 		unsigned int gb, ratio;
4302 
4303 		/* Zone size in gigabytes */
4304 		gb = zone->present_pages >> (30 - PAGE_SHIFT);
4305 		ratio = int_sqrt(10 * gb);
4306 		if (!ratio)
4307 			ratio = 1;
4308 
4309 		zone->inactive_ratio = ratio;
4310 	}
4311 }
4312 
4313 /*
4314  * Initialise min_free_kbytes.
4315  *
4316  * For small machines we want it small (128k min).  For large machines
4317  * we want it large (64MB max).  But it is not linear, because network
4318  * bandwidth does not increase linearly with machine size.  We use
4319  *
4320  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4321  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4322  *
4323  * which yields
4324  *
4325  * 16MB:	512k
4326  * 32MB:	724k
4327  * 64MB:	1024k
4328  * 128MB:	1448k
4329  * 256MB:	2048k
4330  * 512MB:	2896k
4331  * 1024MB:	4096k
4332  * 2048MB:	5792k
4333  * 4096MB:	8192k
4334  * 8192MB:	11584k
4335  * 16384MB:	16384k
4336  */
4337 static int __init init_per_zone_pages_min(void)
4338 {
4339 	unsigned long lowmem_kbytes;
4340 
4341 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4342 
4343 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4344 	if (min_free_kbytes < 128)
4345 		min_free_kbytes = 128;
4346 	if (min_free_kbytes > 65536)
4347 		min_free_kbytes = 65536;
4348 	setup_per_zone_pages_min();
4349 	setup_per_zone_lowmem_reserve();
4350 	setup_per_zone_inactive_ratio();
4351 	return 0;
4352 }
4353 module_init(init_per_zone_pages_min)
4354 
4355 /*
4356  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4357  *	that we can call two helper functions whenever min_free_kbytes
4358  *	changes.
4359  */
4360 int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4361 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4362 {
4363 	proc_dointvec(table, write, file, buffer, length, ppos);
4364 	if (write)
4365 		setup_per_zone_pages_min();
4366 	return 0;
4367 }
4368 
4369 #ifdef CONFIG_NUMA
4370 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4371 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4372 {
4373 	struct zone *zone;
4374 	int rc;
4375 
4376 	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4377 	if (rc)
4378 		return rc;
4379 
4380 	for_each_zone(zone)
4381 		zone->min_unmapped_pages = (zone->present_pages *
4382 				sysctl_min_unmapped_ratio) / 100;
4383 	return 0;
4384 }
4385 
4386 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4387 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4388 {
4389 	struct zone *zone;
4390 	int rc;
4391 
4392 	rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4393 	if (rc)
4394 		return rc;
4395 
4396 	for_each_zone(zone)
4397 		zone->min_slab_pages = (zone->present_pages *
4398 				sysctl_min_slab_ratio) / 100;
4399 	return 0;
4400 }
4401 #endif
4402 
4403 /*
4404  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4405  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4406  *	whenever sysctl_lowmem_reserve_ratio changes.
4407  *
4408  * The reserve ratio obviously has absolutely no relation with the
4409  * pages_min watermarks. The lowmem reserve ratio can only make sense
4410  * if in function of the boot time zone sizes.
4411  */
4412 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4413 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4414 {
4415 	proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4416 	setup_per_zone_lowmem_reserve();
4417 	return 0;
4418 }
4419 
4420 /*
4421  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4422  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4423  * can have before it gets flushed back to buddy allocator.
4424  */
4425 
4426 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4427 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4428 {
4429 	struct zone *zone;
4430 	unsigned int cpu;
4431 	int ret;
4432 
4433 	ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4434 	if (!write || (ret == -EINVAL))
4435 		return ret;
4436 	for_each_zone(zone) {
4437 		for_each_online_cpu(cpu) {
4438 			unsigned long  high;
4439 			high = zone->present_pages / percpu_pagelist_fraction;
4440 			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4441 		}
4442 	}
4443 	return 0;
4444 }
4445 
4446 int hashdist = HASHDIST_DEFAULT;
4447 
4448 #ifdef CONFIG_NUMA
4449 static int __init set_hashdist(char *str)
4450 {
4451 	if (!str)
4452 		return 0;
4453 	hashdist = simple_strtoul(str, &str, 0);
4454 	return 1;
4455 }
4456 __setup("hashdist=", set_hashdist);
4457 #endif
4458 
4459 /*
4460  * allocate a large system hash table from bootmem
4461  * - it is assumed that the hash table must contain an exact power-of-2
4462  *   quantity of entries
4463  * - limit is the number of hash buckets, not the total allocation size
4464  */
4465 void *__init alloc_large_system_hash(const char *tablename,
4466 				     unsigned long bucketsize,
4467 				     unsigned long numentries,
4468 				     int scale,
4469 				     int flags,
4470 				     unsigned int *_hash_shift,
4471 				     unsigned int *_hash_mask,
4472 				     unsigned long limit)
4473 {
4474 	unsigned long long max = limit;
4475 	unsigned long log2qty, size;
4476 	void *table = NULL;
4477 
4478 	/* allow the kernel cmdline to have a say */
4479 	if (!numentries) {
4480 		/* round applicable memory size up to nearest megabyte */
4481 		numentries = nr_kernel_pages;
4482 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4483 		numentries >>= 20 - PAGE_SHIFT;
4484 		numentries <<= 20 - PAGE_SHIFT;
4485 
4486 		/* limit to 1 bucket per 2^scale bytes of low memory */
4487 		if (scale > PAGE_SHIFT)
4488 			numentries >>= (scale - PAGE_SHIFT);
4489 		else
4490 			numentries <<= (PAGE_SHIFT - scale);
4491 
4492 		/* Make sure we've got at least a 0-order allocation.. */
4493 		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4494 			numentries = PAGE_SIZE / bucketsize;
4495 	}
4496 	numentries = roundup_pow_of_two(numentries);
4497 
4498 	/* limit allocation size to 1/16 total memory by default */
4499 	if (max == 0) {
4500 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4501 		do_div(max, bucketsize);
4502 	}
4503 
4504 	if (numentries > max)
4505 		numentries = max;
4506 
4507 	log2qty = ilog2(numentries);
4508 
4509 	do {
4510 		size = bucketsize << log2qty;
4511 		if (flags & HASH_EARLY)
4512 			table = alloc_bootmem_nopanic(size);
4513 		else if (hashdist)
4514 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4515 		else {
4516 			unsigned long order = get_order(size);
4517 			table = (void*) __get_free_pages(GFP_ATOMIC, order);
4518 			/*
4519 			 * If bucketsize is not a power-of-two, we may free
4520 			 * some pages at the end of hash table.
4521 			 */
4522 			if (table) {
4523 				unsigned long alloc_end = (unsigned long)table +
4524 						(PAGE_SIZE << order);
4525 				unsigned long used = (unsigned long)table +
4526 						PAGE_ALIGN(size);
4527 				split_page(virt_to_page(table), order);
4528 				while (used < alloc_end) {
4529 					free_page(used);
4530 					used += PAGE_SIZE;
4531 				}
4532 			}
4533 		}
4534 	} while (!table && size > PAGE_SIZE && --log2qty);
4535 
4536 	if (!table)
4537 		panic("Failed to allocate %s hash table\n", tablename);
4538 
4539 	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4540 	       tablename,
4541 	       (1U << log2qty),
4542 	       ilog2(size) - PAGE_SHIFT,
4543 	       size);
4544 
4545 	if (_hash_shift)
4546 		*_hash_shift = log2qty;
4547 	if (_hash_mask)
4548 		*_hash_mask = (1 << log2qty) - 1;
4549 
4550 	/*
4551 	 * If hashdist is set, the table allocation is done with __vmalloc()
4552 	 * which invokes the kmemleak_alloc() callback. This function may also
4553 	 * be called before the slab and kmemleak are initialised when
4554 	 * kmemleak simply buffers the request to be executed later
4555 	 * (GFP_ATOMIC flag ignored in this case).
4556 	 */
4557 	if (!hashdist)
4558 		kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4559 
4560 	return table;
4561 }
4562 
4563 /* Return a pointer to the bitmap storing bits affecting a block of pages */
4564 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4565 							unsigned long pfn)
4566 {
4567 #ifdef CONFIG_SPARSEMEM
4568 	return __pfn_to_section(pfn)->pageblock_flags;
4569 #else
4570 	return zone->pageblock_flags;
4571 #endif /* CONFIG_SPARSEMEM */
4572 }
4573 
4574 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4575 {
4576 #ifdef CONFIG_SPARSEMEM
4577 	pfn &= (PAGES_PER_SECTION-1);
4578 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4579 #else
4580 	pfn = pfn - zone->zone_start_pfn;
4581 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4582 #endif /* CONFIG_SPARSEMEM */
4583 }
4584 
4585 /**
4586  * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4587  * @page: The page within the block of interest
4588  * @start_bitidx: The first bit of interest to retrieve
4589  * @end_bitidx: The last bit of interest
4590  * returns pageblock_bits flags
4591  */
4592 unsigned long get_pageblock_flags_group(struct page *page,
4593 					int start_bitidx, int end_bitidx)
4594 {
4595 	struct zone *zone;
4596 	unsigned long *bitmap;
4597 	unsigned long pfn, bitidx;
4598 	unsigned long flags = 0;
4599 	unsigned long value = 1;
4600 
4601 	zone = page_zone(page);
4602 	pfn = page_to_pfn(page);
4603 	bitmap = get_pageblock_bitmap(zone, pfn);
4604 	bitidx = pfn_to_bitidx(zone, pfn);
4605 
4606 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4607 		if (test_bit(bitidx + start_bitidx, bitmap))
4608 			flags |= value;
4609 
4610 	return flags;
4611 }
4612 
4613 /**
4614  * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4615  * @page: The page within the block of interest
4616  * @start_bitidx: The first bit of interest
4617  * @end_bitidx: The last bit of interest
4618  * @flags: The flags to set
4619  */
4620 void set_pageblock_flags_group(struct page *page, unsigned long flags,
4621 					int start_bitidx, int end_bitidx)
4622 {
4623 	struct zone *zone;
4624 	unsigned long *bitmap;
4625 	unsigned long pfn, bitidx;
4626 	unsigned long value = 1;
4627 
4628 	zone = page_zone(page);
4629 	pfn = page_to_pfn(page);
4630 	bitmap = get_pageblock_bitmap(zone, pfn);
4631 	bitidx = pfn_to_bitidx(zone, pfn);
4632 	VM_BUG_ON(pfn < zone->zone_start_pfn);
4633 	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4634 
4635 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4636 		if (flags & value)
4637 			__set_bit(bitidx + start_bitidx, bitmap);
4638 		else
4639 			__clear_bit(bitidx + start_bitidx, bitmap);
4640 }
4641 
4642 /*
4643  * This is designed as sub function...plz see page_isolation.c also.
4644  * set/clear page block's type to be ISOLATE.
4645  * page allocater never alloc memory from ISOLATE block.
4646  */
4647 
4648 int set_migratetype_isolate(struct page *page)
4649 {
4650 	struct zone *zone;
4651 	unsigned long flags;
4652 	int ret = -EBUSY;
4653 
4654 	zone = page_zone(page);
4655 	spin_lock_irqsave(&zone->lock, flags);
4656 	/*
4657 	 * In future, more migrate types will be able to be isolation target.
4658 	 */
4659 	if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4660 		goto out;
4661 	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4662 	move_freepages_block(zone, page, MIGRATE_ISOLATE);
4663 	ret = 0;
4664 out:
4665 	spin_unlock_irqrestore(&zone->lock, flags);
4666 	if (!ret)
4667 		drain_all_pages();
4668 	return ret;
4669 }
4670 
4671 void unset_migratetype_isolate(struct page *page)
4672 {
4673 	struct zone *zone;
4674 	unsigned long flags;
4675 	zone = page_zone(page);
4676 	spin_lock_irqsave(&zone->lock, flags);
4677 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4678 		goto out;
4679 	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4680 	move_freepages_block(zone, page, MIGRATE_MOVABLE);
4681 out:
4682 	spin_unlock_irqrestore(&zone->lock, flags);
4683 }
4684 
4685 #ifdef CONFIG_MEMORY_HOTREMOVE
4686 /*
4687  * All pages in the range must be isolated before calling this.
4688  */
4689 void
4690 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4691 {
4692 	struct page *page;
4693 	struct zone *zone;
4694 	int order, i;
4695 	unsigned long pfn;
4696 	unsigned long flags;
4697 	/* find the first valid pfn */
4698 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
4699 		if (pfn_valid(pfn))
4700 			break;
4701 	if (pfn == end_pfn)
4702 		return;
4703 	zone = page_zone(pfn_to_page(pfn));
4704 	spin_lock_irqsave(&zone->lock, flags);
4705 	pfn = start_pfn;
4706 	while (pfn < end_pfn) {
4707 		if (!pfn_valid(pfn)) {
4708 			pfn++;
4709 			continue;
4710 		}
4711 		page = pfn_to_page(pfn);
4712 		BUG_ON(page_count(page));
4713 		BUG_ON(!PageBuddy(page));
4714 		order = page_order(page);
4715 #ifdef CONFIG_DEBUG_VM
4716 		printk(KERN_INFO "remove from free list %lx %d %lx\n",
4717 		       pfn, 1 << order, end_pfn);
4718 #endif
4719 		list_del(&page->lru);
4720 		rmv_page_order(page);
4721 		zone->free_area[order].nr_free--;
4722 		__mod_zone_page_state(zone, NR_FREE_PAGES,
4723 				      - (1UL << order));
4724 		for (i = 0; i < (1 << order); i++)
4725 			SetPageReserved((page+i));
4726 		pfn += (1 << order);
4727 	}
4728 	spin_unlock_irqrestore(&zone->lock, flags);
4729 }
4730 #endif
4731