xref: /openbmc/linux/mm/page_alloc.c (revision a09d2831)
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/compiler.h>
25 #include <linux/kernel.h>
26 #include <linux/kmemcheck.h>
27 #include <linux/module.h>
28 #include <linux/suspend.h>
29 #include <linux/pagevec.h>
30 #include <linux/blkdev.h>
31 #include <linux/slab.h>
32 #include <linux/oom.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/sysctl.h>
36 #include <linux/cpu.h>
37 #include <linux/cpuset.h>
38 #include <linux/memory_hotplug.h>
39 #include <linux/nodemask.h>
40 #include <linux/vmalloc.h>
41 #include <linux/mempolicy.h>
42 #include <linux/stop_machine.h>
43 #include <linux/sort.h>
44 #include <linux/pfn.h>
45 #include <linux/backing-dev.h>
46 #include <linux/fault-inject.h>
47 #include <linux/page-isolation.h>
48 #include <linux/page_cgroup.h>
49 #include <linux/debugobjects.h>
50 #include <linux/kmemleak.h>
51 #include <linux/memory.h>
52 #include <trace/events/kmem.h>
53 
54 #include <asm/tlbflush.h>
55 #include <asm/div64.h>
56 #include "internal.h"
57 
58 /*
59  * Array of node states.
60  */
61 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
62 	[N_POSSIBLE] = NODE_MASK_ALL,
63 	[N_ONLINE] = { { [0] = 1UL } },
64 #ifndef CONFIG_NUMA
65 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
66 #ifdef CONFIG_HIGHMEM
67 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
68 #endif
69 	[N_CPU] = { { [0] = 1UL } },
70 #endif	/* NUMA */
71 };
72 EXPORT_SYMBOL(node_states);
73 
74 unsigned long totalram_pages __read_mostly;
75 unsigned long totalreserve_pages __read_mostly;
76 int percpu_pagelist_fraction;
77 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
78 
79 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
80 int pageblock_order __read_mostly;
81 #endif
82 
83 static void __free_pages_ok(struct page *page, unsigned int order);
84 
85 /*
86  * results with 256, 32 in the lowmem_reserve sysctl:
87  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
88  *	1G machine -> (16M dma, 784M normal, 224M high)
89  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
90  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
91  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
92  *
93  * TBD: should special case ZONE_DMA32 machines here - in those we normally
94  * don't need any ZONE_NORMAL reservation
95  */
96 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
97 #ifdef CONFIG_ZONE_DMA
98 	 256,
99 #endif
100 #ifdef CONFIG_ZONE_DMA32
101 	 256,
102 #endif
103 #ifdef CONFIG_HIGHMEM
104 	 32,
105 #endif
106 	 32,
107 };
108 
109 EXPORT_SYMBOL(totalram_pages);
110 
111 static char * const zone_names[MAX_NR_ZONES] = {
112 #ifdef CONFIG_ZONE_DMA
113 	 "DMA",
114 #endif
115 #ifdef CONFIG_ZONE_DMA32
116 	 "DMA32",
117 #endif
118 	 "Normal",
119 #ifdef CONFIG_HIGHMEM
120 	 "HighMem",
121 #endif
122 	 "Movable",
123 };
124 
125 int min_free_kbytes = 1024;
126 
127 static unsigned long __meminitdata nr_kernel_pages;
128 static unsigned long __meminitdata nr_all_pages;
129 static unsigned long __meminitdata dma_reserve;
130 
131 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
132   /*
133    * MAX_ACTIVE_REGIONS determines the maximum number of distinct
134    * ranges of memory (RAM) that may be registered with add_active_range().
135    * Ranges passed to add_active_range() will be merged if possible
136    * so the number of times add_active_range() can be called is
137    * related to the number of nodes and the number of holes
138    */
139   #ifdef CONFIG_MAX_ACTIVE_REGIONS
140     /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
141     #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
142   #else
143     #if MAX_NUMNODES >= 32
144       /* If there can be many nodes, allow up to 50 holes per node */
145       #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
146     #else
147       /* By default, allow up to 256 distinct regions */
148       #define MAX_ACTIVE_REGIONS 256
149     #endif
150   #endif
151 
152   static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
153   static int __meminitdata nr_nodemap_entries;
154   static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
155   static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
156   static unsigned long __initdata required_kernelcore;
157   static unsigned long __initdata required_movablecore;
158   static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
159 
160   /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
161   int movable_zone;
162   EXPORT_SYMBOL(movable_zone);
163 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
164 
165 #if MAX_NUMNODES > 1
166 int nr_node_ids __read_mostly = MAX_NUMNODES;
167 int nr_online_nodes __read_mostly = 1;
168 EXPORT_SYMBOL(nr_node_ids);
169 EXPORT_SYMBOL(nr_online_nodes);
170 #endif
171 
172 int page_group_by_mobility_disabled __read_mostly;
173 
174 static void set_pageblock_migratetype(struct page *page, int migratetype)
175 {
176 
177 	if (unlikely(page_group_by_mobility_disabled))
178 		migratetype = MIGRATE_UNMOVABLE;
179 
180 	set_pageblock_flags_group(page, (unsigned long)migratetype,
181 					PB_migrate, PB_migrate_end);
182 }
183 
184 bool oom_killer_disabled __read_mostly;
185 
186 #ifdef CONFIG_DEBUG_VM
187 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
188 {
189 	int ret = 0;
190 	unsigned seq;
191 	unsigned long pfn = page_to_pfn(page);
192 
193 	do {
194 		seq = zone_span_seqbegin(zone);
195 		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
196 			ret = 1;
197 		else if (pfn < zone->zone_start_pfn)
198 			ret = 1;
199 	} while (zone_span_seqretry(zone, seq));
200 
201 	return ret;
202 }
203 
204 static int page_is_consistent(struct zone *zone, struct page *page)
205 {
206 	if (!pfn_valid_within(page_to_pfn(page)))
207 		return 0;
208 	if (zone != page_zone(page))
209 		return 0;
210 
211 	return 1;
212 }
213 /*
214  * Temporary debugging check for pages not lying within a given zone.
215  */
216 static int bad_range(struct zone *zone, struct page *page)
217 {
218 	if (page_outside_zone_boundaries(zone, page))
219 		return 1;
220 	if (!page_is_consistent(zone, page))
221 		return 1;
222 
223 	return 0;
224 }
225 #else
226 static inline int bad_range(struct zone *zone, struct page *page)
227 {
228 	return 0;
229 }
230 #endif
231 
232 static void bad_page(struct page *page)
233 {
234 	static unsigned long resume;
235 	static unsigned long nr_shown;
236 	static unsigned long nr_unshown;
237 
238 	/* Don't complain about poisoned pages */
239 	if (PageHWPoison(page)) {
240 		__ClearPageBuddy(page);
241 		return;
242 	}
243 
244 	/*
245 	 * Allow a burst of 60 reports, then keep quiet for that minute;
246 	 * or allow a steady drip of one report per second.
247 	 */
248 	if (nr_shown == 60) {
249 		if (time_before(jiffies, resume)) {
250 			nr_unshown++;
251 			goto out;
252 		}
253 		if (nr_unshown) {
254 			printk(KERN_ALERT
255 			      "BUG: Bad page state: %lu messages suppressed\n",
256 				nr_unshown);
257 			nr_unshown = 0;
258 		}
259 		nr_shown = 0;
260 	}
261 	if (nr_shown++ == 0)
262 		resume = jiffies + 60 * HZ;
263 
264 	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
265 		current->comm, page_to_pfn(page));
266 	printk(KERN_ALERT
267 		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
268 		page, (void *)page->flags, page_count(page),
269 		page_mapcount(page), page->mapping, page->index);
270 
271 	dump_stack();
272 out:
273 	/* Leave bad fields for debug, except PageBuddy could make trouble */
274 	__ClearPageBuddy(page);
275 	add_taint(TAINT_BAD_PAGE);
276 }
277 
278 /*
279  * Higher-order pages are called "compound pages".  They are structured thusly:
280  *
281  * The first PAGE_SIZE page is called the "head page".
282  *
283  * The remaining PAGE_SIZE pages are called "tail pages".
284  *
285  * All pages have PG_compound set.  All pages have their ->private pointing at
286  * the head page (even the head page has this).
287  *
288  * The first tail page's ->lru.next holds the address of the compound page's
289  * put_page() function.  Its ->lru.prev holds the order of allocation.
290  * This usage means that zero-order pages may not be compound.
291  */
292 
293 static void free_compound_page(struct page *page)
294 {
295 	__free_pages_ok(page, compound_order(page));
296 }
297 
298 void prep_compound_page(struct page *page, unsigned long order)
299 {
300 	int i;
301 	int nr_pages = 1 << order;
302 
303 	set_compound_page_dtor(page, free_compound_page);
304 	set_compound_order(page, order);
305 	__SetPageHead(page);
306 	for (i = 1; i < nr_pages; i++) {
307 		struct page *p = page + i;
308 
309 		__SetPageTail(p);
310 		p->first_page = page;
311 	}
312 }
313 
314 static int destroy_compound_page(struct page *page, unsigned long order)
315 {
316 	int i;
317 	int nr_pages = 1 << order;
318 	int bad = 0;
319 
320 	if (unlikely(compound_order(page) != order) ||
321 	    unlikely(!PageHead(page))) {
322 		bad_page(page);
323 		bad++;
324 	}
325 
326 	__ClearPageHead(page);
327 
328 	for (i = 1; i < nr_pages; i++) {
329 		struct page *p = page + i;
330 
331 		if (unlikely(!PageTail(p) || (p->first_page != page))) {
332 			bad_page(page);
333 			bad++;
334 		}
335 		__ClearPageTail(p);
336 	}
337 
338 	return bad;
339 }
340 
341 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
342 {
343 	int i;
344 
345 	/*
346 	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
347 	 * and __GFP_HIGHMEM from hard or soft interrupt context.
348 	 */
349 	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
350 	for (i = 0; i < (1 << order); i++)
351 		clear_highpage(page + i);
352 }
353 
354 static inline void set_page_order(struct page *page, int order)
355 {
356 	set_page_private(page, order);
357 	__SetPageBuddy(page);
358 }
359 
360 static inline void rmv_page_order(struct page *page)
361 {
362 	__ClearPageBuddy(page);
363 	set_page_private(page, 0);
364 }
365 
366 /*
367  * Locate the struct page for both the matching buddy in our
368  * pair (buddy1) and the combined O(n+1) page they form (page).
369  *
370  * 1) Any buddy B1 will have an order O twin B2 which satisfies
371  * the following equation:
372  *     B2 = B1 ^ (1 << O)
373  * For example, if the starting buddy (buddy2) is #8 its order
374  * 1 buddy is #10:
375  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
376  *
377  * 2) Any buddy B will have an order O+1 parent P which
378  * satisfies the following equation:
379  *     P = B & ~(1 << O)
380  *
381  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
382  */
383 static inline struct page *
384 __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
385 {
386 	unsigned long buddy_idx = page_idx ^ (1 << order);
387 
388 	return page + (buddy_idx - page_idx);
389 }
390 
391 static inline unsigned long
392 __find_combined_index(unsigned long page_idx, unsigned int order)
393 {
394 	return (page_idx & ~(1 << order));
395 }
396 
397 /*
398  * This function checks whether a page is free && is the buddy
399  * we can do coalesce a page and its buddy if
400  * (a) the buddy is not in a hole &&
401  * (b) the buddy is in the buddy system &&
402  * (c) a page and its buddy have the same order &&
403  * (d) a page and its buddy are in the same zone.
404  *
405  * For recording whether a page is in the buddy system, we use PG_buddy.
406  * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
407  *
408  * For recording page's order, we use page_private(page).
409  */
410 static inline int page_is_buddy(struct page *page, struct page *buddy,
411 								int order)
412 {
413 	if (!pfn_valid_within(page_to_pfn(buddy)))
414 		return 0;
415 
416 	if (page_zone_id(page) != page_zone_id(buddy))
417 		return 0;
418 
419 	if (PageBuddy(buddy) && page_order(buddy) == order) {
420 		VM_BUG_ON(page_count(buddy) != 0);
421 		return 1;
422 	}
423 	return 0;
424 }
425 
426 /*
427  * Freeing function for a buddy system allocator.
428  *
429  * The concept of a buddy system is to maintain direct-mapped table
430  * (containing bit values) for memory blocks of various "orders".
431  * The bottom level table contains the map for the smallest allocatable
432  * units of memory (here, pages), and each level above it describes
433  * pairs of units from the levels below, hence, "buddies".
434  * At a high level, all that happens here is marking the table entry
435  * at the bottom level available, and propagating the changes upward
436  * as necessary, plus some accounting needed to play nicely with other
437  * parts of the VM system.
438  * At each level, we keep a list of pages, which are heads of continuous
439  * free pages of length of (1 << order) and marked with PG_buddy. Page's
440  * order is recorded in page_private(page) field.
441  * So when we are allocating or freeing one, we can derive the state of the
442  * other.  That is, if we allocate a small block, and both were
443  * free, the remainder of the region must be split into blocks.
444  * If a block is freed, and its buddy is also free, then this
445  * triggers coalescing into a block of larger size.
446  *
447  * -- wli
448  */
449 
450 static inline void __free_one_page(struct page *page,
451 		struct zone *zone, unsigned int order,
452 		int migratetype)
453 {
454 	unsigned long page_idx;
455 
456 	if (unlikely(PageCompound(page)))
457 		if (unlikely(destroy_compound_page(page, order)))
458 			return;
459 
460 	VM_BUG_ON(migratetype == -1);
461 
462 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
463 
464 	VM_BUG_ON(page_idx & ((1 << order) - 1));
465 	VM_BUG_ON(bad_range(zone, page));
466 
467 	while (order < MAX_ORDER-1) {
468 		unsigned long combined_idx;
469 		struct page *buddy;
470 
471 		buddy = __page_find_buddy(page, page_idx, order);
472 		if (!page_is_buddy(page, buddy, order))
473 			break;
474 
475 		/* Our buddy is free, merge with it and move up one order. */
476 		list_del(&buddy->lru);
477 		zone->free_area[order].nr_free--;
478 		rmv_page_order(buddy);
479 		combined_idx = __find_combined_index(page_idx, order);
480 		page = page + (combined_idx - page_idx);
481 		page_idx = combined_idx;
482 		order++;
483 	}
484 	set_page_order(page, order);
485 	list_add(&page->lru,
486 		&zone->free_area[order].free_list[migratetype]);
487 	zone->free_area[order].nr_free++;
488 }
489 
490 /*
491  * free_page_mlock() -- clean up attempts to free and mlocked() page.
492  * Page should not be on lru, so no need to fix that up.
493  * free_pages_check() will verify...
494  */
495 static inline void free_page_mlock(struct page *page)
496 {
497 	__dec_zone_page_state(page, NR_MLOCK);
498 	__count_vm_event(UNEVICTABLE_MLOCKFREED);
499 }
500 
501 static inline int free_pages_check(struct page *page)
502 {
503 	if (unlikely(page_mapcount(page) |
504 		(page->mapping != NULL)  |
505 		(atomic_read(&page->_count) != 0) |
506 		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
507 		bad_page(page);
508 		return 1;
509 	}
510 	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
511 		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
512 	return 0;
513 }
514 
515 /*
516  * Frees a number of pages from the PCP lists
517  * Assumes all pages on list are in same zone, and of same order.
518  * count is the number of pages to free.
519  *
520  * If the zone was previously in an "all pages pinned" state then look to
521  * see if this freeing clears that state.
522  *
523  * And clear the zone's pages_scanned counter, to hold off the "all pages are
524  * pinned" detection logic.
525  */
526 static void free_pcppages_bulk(struct zone *zone, int count,
527 					struct per_cpu_pages *pcp)
528 {
529 	int migratetype = 0;
530 	int batch_free = 0;
531 
532 	spin_lock(&zone->lock);
533 	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
534 	zone->pages_scanned = 0;
535 
536 	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
537 	while (count) {
538 		struct page *page;
539 		struct list_head *list;
540 
541 		/*
542 		 * Remove pages from lists in a round-robin fashion. A
543 		 * batch_free count is maintained that is incremented when an
544 		 * empty list is encountered.  This is so more pages are freed
545 		 * off fuller lists instead of spinning excessively around empty
546 		 * lists
547 		 */
548 		do {
549 			batch_free++;
550 			if (++migratetype == MIGRATE_PCPTYPES)
551 				migratetype = 0;
552 			list = &pcp->lists[migratetype];
553 		} while (list_empty(list));
554 
555 		do {
556 			page = list_entry(list->prev, struct page, lru);
557 			/* must delete as __free_one_page list manipulates */
558 			list_del(&page->lru);
559 			__free_one_page(page, zone, 0, migratetype);
560 			trace_mm_page_pcpu_drain(page, 0, migratetype);
561 		} while (--count && --batch_free && !list_empty(list));
562 	}
563 	spin_unlock(&zone->lock);
564 }
565 
566 static void free_one_page(struct zone *zone, struct page *page, int order,
567 				int migratetype)
568 {
569 	spin_lock(&zone->lock);
570 	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
571 	zone->pages_scanned = 0;
572 
573 	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
574 	__free_one_page(page, zone, order, migratetype);
575 	spin_unlock(&zone->lock);
576 }
577 
578 static void __free_pages_ok(struct page *page, unsigned int order)
579 {
580 	unsigned long flags;
581 	int i;
582 	int bad = 0;
583 	int wasMlocked = __TestClearPageMlocked(page);
584 
585 	kmemcheck_free_shadow(page, order);
586 
587 	for (i = 0 ; i < (1 << order) ; ++i)
588 		bad += free_pages_check(page + i);
589 	if (bad)
590 		return;
591 
592 	if (!PageHighMem(page)) {
593 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
594 		debug_check_no_obj_freed(page_address(page),
595 					   PAGE_SIZE << order);
596 	}
597 	arch_free_page(page, order);
598 	kernel_map_pages(page, 1 << order, 0);
599 
600 	local_irq_save(flags);
601 	if (unlikely(wasMlocked))
602 		free_page_mlock(page);
603 	__count_vm_events(PGFREE, 1 << order);
604 	free_one_page(page_zone(page), page, order,
605 					get_pageblock_migratetype(page));
606 	local_irq_restore(flags);
607 }
608 
609 /*
610  * permit the bootmem allocator to evade page validation on high-order frees
611  */
612 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
613 {
614 	if (order == 0) {
615 		__ClearPageReserved(page);
616 		set_page_count(page, 0);
617 		set_page_refcounted(page);
618 		__free_page(page);
619 	} else {
620 		int loop;
621 
622 		prefetchw(page);
623 		for (loop = 0; loop < BITS_PER_LONG; loop++) {
624 			struct page *p = &page[loop];
625 
626 			if (loop + 1 < BITS_PER_LONG)
627 				prefetchw(p + 1);
628 			__ClearPageReserved(p);
629 			set_page_count(p, 0);
630 		}
631 
632 		set_page_refcounted(page);
633 		__free_pages(page, order);
634 	}
635 }
636 
637 
638 /*
639  * The order of subdivision here is critical for the IO subsystem.
640  * Please do not alter this order without good reasons and regression
641  * testing. Specifically, as large blocks of memory are subdivided,
642  * the order in which smaller blocks are delivered depends on the order
643  * they're subdivided in this function. This is the primary factor
644  * influencing the order in which pages are delivered to the IO
645  * subsystem according to empirical testing, and this is also justified
646  * by considering the behavior of a buddy system containing a single
647  * large block of memory acted on by a series of small allocations.
648  * This behavior is a critical factor in sglist merging's success.
649  *
650  * -- wli
651  */
652 static inline void expand(struct zone *zone, struct page *page,
653 	int low, int high, struct free_area *area,
654 	int migratetype)
655 {
656 	unsigned long size = 1 << high;
657 
658 	while (high > low) {
659 		area--;
660 		high--;
661 		size >>= 1;
662 		VM_BUG_ON(bad_range(zone, &page[size]));
663 		list_add(&page[size].lru, &area->free_list[migratetype]);
664 		area->nr_free++;
665 		set_page_order(&page[size], high);
666 	}
667 }
668 
669 /*
670  * This page is about to be returned from the page allocator
671  */
672 static inline int check_new_page(struct page *page)
673 {
674 	if (unlikely(page_mapcount(page) |
675 		(page->mapping != NULL)  |
676 		(atomic_read(&page->_count) != 0)  |
677 		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
678 		bad_page(page);
679 		return 1;
680 	}
681 	return 0;
682 }
683 
684 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
685 {
686 	int i;
687 
688 	for (i = 0; i < (1 << order); i++) {
689 		struct page *p = page + i;
690 		if (unlikely(check_new_page(p)))
691 			return 1;
692 	}
693 
694 	set_page_private(page, 0);
695 	set_page_refcounted(page);
696 
697 	arch_alloc_page(page, order);
698 	kernel_map_pages(page, 1 << order, 1);
699 
700 	if (gfp_flags & __GFP_ZERO)
701 		prep_zero_page(page, order, gfp_flags);
702 
703 	if (order && (gfp_flags & __GFP_COMP))
704 		prep_compound_page(page, order);
705 
706 	return 0;
707 }
708 
709 /*
710  * Go through the free lists for the given migratetype and remove
711  * the smallest available page from the freelists
712  */
713 static inline
714 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
715 						int migratetype)
716 {
717 	unsigned int current_order;
718 	struct free_area * area;
719 	struct page *page;
720 
721 	/* Find a page of the appropriate size in the preferred list */
722 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
723 		area = &(zone->free_area[current_order]);
724 		if (list_empty(&area->free_list[migratetype]))
725 			continue;
726 
727 		page = list_entry(area->free_list[migratetype].next,
728 							struct page, lru);
729 		list_del(&page->lru);
730 		rmv_page_order(page);
731 		area->nr_free--;
732 		expand(zone, page, order, current_order, area, migratetype);
733 		return page;
734 	}
735 
736 	return NULL;
737 }
738 
739 
740 /*
741  * This array describes the order lists are fallen back to when
742  * the free lists for the desirable migrate type are depleted
743  */
744 static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
745 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
746 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
747 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
748 	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
749 };
750 
751 /*
752  * Move the free pages in a range to the free lists of the requested type.
753  * Note that start_page and end_pages are not aligned on a pageblock
754  * boundary. If alignment is required, use move_freepages_block()
755  */
756 static int move_freepages(struct zone *zone,
757 			  struct page *start_page, struct page *end_page,
758 			  int migratetype)
759 {
760 	struct page *page;
761 	unsigned long order;
762 	int pages_moved = 0;
763 
764 #ifndef CONFIG_HOLES_IN_ZONE
765 	/*
766 	 * page_zone is not safe to call in this context when
767 	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
768 	 * anyway as we check zone boundaries in move_freepages_block().
769 	 * Remove at a later date when no bug reports exist related to
770 	 * grouping pages by mobility
771 	 */
772 	BUG_ON(page_zone(start_page) != page_zone(end_page));
773 #endif
774 
775 	for (page = start_page; page <= end_page;) {
776 		/* Make sure we are not inadvertently changing nodes */
777 		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
778 
779 		if (!pfn_valid_within(page_to_pfn(page))) {
780 			page++;
781 			continue;
782 		}
783 
784 		if (!PageBuddy(page)) {
785 			page++;
786 			continue;
787 		}
788 
789 		order = page_order(page);
790 		list_del(&page->lru);
791 		list_add(&page->lru,
792 			&zone->free_area[order].free_list[migratetype]);
793 		page += 1 << order;
794 		pages_moved += 1 << order;
795 	}
796 
797 	return pages_moved;
798 }
799 
800 static int move_freepages_block(struct zone *zone, struct page *page,
801 				int migratetype)
802 {
803 	unsigned long start_pfn, end_pfn;
804 	struct page *start_page, *end_page;
805 
806 	start_pfn = page_to_pfn(page);
807 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
808 	start_page = pfn_to_page(start_pfn);
809 	end_page = start_page + pageblock_nr_pages - 1;
810 	end_pfn = start_pfn + pageblock_nr_pages - 1;
811 
812 	/* Do not cross zone boundaries */
813 	if (start_pfn < zone->zone_start_pfn)
814 		start_page = page;
815 	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
816 		return 0;
817 
818 	return move_freepages(zone, start_page, end_page, migratetype);
819 }
820 
821 static void change_pageblock_range(struct page *pageblock_page,
822 					int start_order, int migratetype)
823 {
824 	int nr_pageblocks = 1 << (start_order - pageblock_order);
825 
826 	while (nr_pageblocks--) {
827 		set_pageblock_migratetype(pageblock_page, migratetype);
828 		pageblock_page += pageblock_nr_pages;
829 	}
830 }
831 
832 /* Remove an element from the buddy allocator from the fallback list */
833 static inline struct page *
834 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
835 {
836 	struct free_area * area;
837 	int current_order;
838 	struct page *page;
839 	int migratetype, i;
840 
841 	/* Find the largest possible block of pages in the other list */
842 	for (current_order = MAX_ORDER-1; current_order >= order;
843 						--current_order) {
844 		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
845 			migratetype = fallbacks[start_migratetype][i];
846 
847 			/* MIGRATE_RESERVE handled later if necessary */
848 			if (migratetype == MIGRATE_RESERVE)
849 				continue;
850 
851 			area = &(zone->free_area[current_order]);
852 			if (list_empty(&area->free_list[migratetype]))
853 				continue;
854 
855 			page = list_entry(area->free_list[migratetype].next,
856 					struct page, lru);
857 			area->nr_free--;
858 
859 			/*
860 			 * If breaking a large block of pages, move all free
861 			 * pages to the preferred allocation list. If falling
862 			 * back for a reclaimable kernel allocation, be more
863 			 * agressive about taking ownership of free pages
864 			 */
865 			if (unlikely(current_order >= (pageblock_order >> 1)) ||
866 					start_migratetype == MIGRATE_RECLAIMABLE ||
867 					page_group_by_mobility_disabled) {
868 				unsigned long pages;
869 				pages = move_freepages_block(zone, page,
870 								start_migratetype);
871 
872 				/* Claim the whole block if over half of it is free */
873 				if (pages >= (1 << (pageblock_order-1)) ||
874 						page_group_by_mobility_disabled)
875 					set_pageblock_migratetype(page,
876 								start_migratetype);
877 
878 				migratetype = start_migratetype;
879 			}
880 
881 			/* Remove the page from the freelists */
882 			list_del(&page->lru);
883 			rmv_page_order(page);
884 
885 			/* Take ownership for orders >= pageblock_order */
886 			if (current_order >= pageblock_order)
887 				change_pageblock_range(page, current_order,
888 							start_migratetype);
889 
890 			expand(zone, page, order, current_order, area, migratetype);
891 
892 			trace_mm_page_alloc_extfrag(page, order, current_order,
893 				start_migratetype, migratetype);
894 
895 			return page;
896 		}
897 	}
898 
899 	return NULL;
900 }
901 
902 /*
903  * Do the hard work of removing an element from the buddy allocator.
904  * Call me with the zone->lock already held.
905  */
906 static struct page *__rmqueue(struct zone *zone, unsigned int order,
907 						int migratetype)
908 {
909 	struct page *page;
910 
911 retry_reserve:
912 	page = __rmqueue_smallest(zone, order, migratetype);
913 
914 	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
915 		page = __rmqueue_fallback(zone, order, migratetype);
916 
917 		/*
918 		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
919 		 * is used because __rmqueue_smallest is an inline function
920 		 * and we want just one call site
921 		 */
922 		if (!page) {
923 			migratetype = MIGRATE_RESERVE;
924 			goto retry_reserve;
925 		}
926 	}
927 
928 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
929 	return page;
930 }
931 
932 /*
933  * Obtain a specified number of elements from the buddy allocator, all under
934  * a single hold of the lock, for efficiency.  Add them to the supplied list.
935  * Returns the number of new pages which were placed at *list.
936  */
937 static int rmqueue_bulk(struct zone *zone, unsigned int order,
938 			unsigned long count, struct list_head *list,
939 			int migratetype, int cold)
940 {
941 	int i;
942 
943 	spin_lock(&zone->lock);
944 	for (i = 0; i < count; ++i) {
945 		struct page *page = __rmqueue(zone, order, migratetype);
946 		if (unlikely(page == NULL))
947 			break;
948 
949 		/*
950 		 * Split buddy pages returned by expand() are received here
951 		 * in physical page order. The page is added to the callers and
952 		 * list and the list head then moves forward. From the callers
953 		 * perspective, the linked list is ordered by page number in
954 		 * some conditions. This is useful for IO devices that can
955 		 * merge IO requests if the physical pages are ordered
956 		 * properly.
957 		 */
958 		if (likely(cold == 0))
959 			list_add(&page->lru, list);
960 		else
961 			list_add_tail(&page->lru, list);
962 		set_page_private(page, migratetype);
963 		list = &page->lru;
964 	}
965 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
966 	spin_unlock(&zone->lock);
967 	return i;
968 }
969 
970 #ifdef CONFIG_NUMA
971 /*
972  * Called from the vmstat counter updater to drain pagesets of this
973  * currently executing processor on remote nodes after they have
974  * expired.
975  *
976  * Note that this function must be called with the thread pinned to
977  * a single processor.
978  */
979 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
980 {
981 	unsigned long flags;
982 	int to_drain;
983 
984 	local_irq_save(flags);
985 	if (pcp->count >= pcp->batch)
986 		to_drain = pcp->batch;
987 	else
988 		to_drain = pcp->count;
989 	free_pcppages_bulk(zone, to_drain, pcp);
990 	pcp->count -= to_drain;
991 	local_irq_restore(flags);
992 }
993 #endif
994 
995 /*
996  * Drain pages of the indicated processor.
997  *
998  * The processor must either be the current processor and the
999  * thread pinned to the current processor or a processor that
1000  * is not online.
1001  */
1002 static void drain_pages(unsigned int cpu)
1003 {
1004 	unsigned long flags;
1005 	struct zone *zone;
1006 
1007 	for_each_populated_zone(zone) {
1008 		struct per_cpu_pageset *pset;
1009 		struct per_cpu_pages *pcp;
1010 
1011 		pset = zone_pcp(zone, cpu);
1012 
1013 		pcp = &pset->pcp;
1014 		local_irq_save(flags);
1015 		free_pcppages_bulk(zone, pcp->count, pcp);
1016 		pcp->count = 0;
1017 		local_irq_restore(flags);
1018 	}
1019 }
1020 
1021 /*
1022  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1023  */
1024 void drain_local_pages(void *arg)
1025 {
1026 	drain_pages(smp_processor_id());
1027 }
1028 
1029 /*
1030  * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1031  */
1032 void drain_all_pages(void)
1033 {
1034 	on_each_cpu(drain_local_pages, NULL, 1);
1035 }
1036 
1037 #ifdef CONFIG_HIBERNATION
1038 
1039 void mark_free_pages(struct zone *zone)
1040 {
1041 	unsigned long pfn, max_zone_pfn;
1042 	unsigned long flags;
1043 	int order, t;
1044 	struct list_head *curr;
1045 
1046 	if (!zone->spanned_pages)
1047 		return;
1048 
1049 	spin_lock_irqsave(&zone->lock, flags);
1050 
1051 	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1052 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1053 		if (pfn_valid(pfn)) {
1054 			struct page *page = pfn_to_page(pfn);
1055 
1056 			if (!swsusp_page_is_forbidden(page))
1057 				swsusp_unset_page_free(page);
1058 		}
1059 
1060 	for_each_migratetype_order(order, t) {
1061 		list_for_each(curr, &zone->free_area[order].free_list[t]) {
1062 			unsigned long i;
1063 
1064 			pfn = page_to_pfn(list_entry(curr, struct page, lru));
1065 			for (i = 0; i < (1UL << order); i++)
1066 				swsusp_set_page_free(pfn_to_page(pfn + i));
1067 		}
1068 	}
1069 	spin_unlock_irqrestore(&zone->lock, flags);
1070 }
1071 #endif /* CONFIG_PM */
1072 
1073 /*
1074  * Free a 0-order page
1075  */
1076 static void free_hot_cold_page(struct page *page, int cold)
1077 {
1078 	struct zone *zone = page_zone(page);
1079 	struct per_cpu_pages *pcp;
1080 	unsigned long flags;
1081 	int migratetype;
1082 	int wasMlocked = __TestClearPageMlocked(page);
1083 
1084 	kmemcheck_free_shadow(page, 0);
1085 
1086 	if (PageAnon(page))
1087 		page->mapping = NULL;
1088 	if (free_pages_check(page))
1089 		return;
1090 
1091 	if (!PageHighMem(page)) {
1092 		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1093 		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1094 	}
1095 	arch_free_page(page, 0);
1096 	kernel_map_pages(page, 1, 0);
1097 
1098 	pcp = &zone_pcp(zone, get_cpu())->pcp;
1099 	migratetype = get_pageblock_migratetype(page);
1100 	set_page_private(page, migratetype);
1101 	local_irq_save(flags);
1102 	if (unlikely(wasMlocked))
1103 		free_page_mlock(page);
1104 	__count_vm_event(PGFREE);
1105 
1106 	/*
1107 	 * We only track unmovable, reclaimable and movable on pcp lists.
1108 	 * Free ISOLATE pages back to the allocator because they are being
1109 	 * offlined but treat RESERVE as movable pages so we can get those
1110 	 * areas back if necessary. Otherwise, we may have to free
1111 	 * excessively into the page allocator
1112 	 */
1113 	if (migratetype >= MIGRATE_PCPTYPES) {
1114 		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1115 			free_one_page(zone, page, 0, migratetype);
1116 			goto out;
1117 		}
1118 		migratetype = MIGRATE_MOVABLE;
1119 	}
1120 
1121 	if (cold)
1122 		list_add_tail(&page->lru, &pcp->lists[migratetype]);
1123 	else
1124 		list_add(&page->lru, &pcp->lists[migratetype]);
1125 	pcp->count++;
1126 	if (pcp->count >= pcp->high) {
1127 		free_pcppages_bulk(zone, pcp->batch, pcp);
1128 		pcp->count -= pcp->batch;
1129 	}
1130 
1131 out:
1132 	local_irq_restore(flags);
1133 	put_cpu();
1134 }
1135 
1136 void free_hot_page(struct page *page)
1137 {
1138 	trace_mm_page_free_direct(page, 0);
1139 	free_hot_cold_page(page, 0);
1140 }
1141 
1142 /*
1143  * split_page takes a non-compound higher-order page, and splits it into
1144  * n (1<<order) sub-pages: page[0..n]
1145  * Each sub-page must be freed individually.
1146  *
1147  * Note: this is probably too low level an operation for use in drivers.
1148  * Please consult with lkml before using this in your driver.
1149  */
1150 void split_page(struct page *page, unsigned int order)
1151 {
1152 	int i;
1153 
1154 	VM_BUG_ON(PageCompound(page));
1155 	VM_BUG_ON(!page_count(page));
1156 
1157 #ifdef CONFIG_KMEMCHECK
1158 	/*
1159 	 * Split shadow pages too, because free(page[0]) would
1160 	 * otherwise free the whole shadow.
1161 	 */
1162 	if (kmemcheck_page_is_tracked(page))
1163 		split_page(virt_to_page(page[0].shadow), order);
1164 #endif
1165 
1166 	for (i = 1; i < (1 << order); i++)
1167 		set_page_refcounted(page + i);
1168 }
1169 
1170 /*
1171  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1172  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1173  * or two.
1174  */
1175 static inline
1176 struct page *buffered_rmqueue(struct zone *preferred_zone,
1177 			struct zone *zone, int order, gfp_t gfp_flags,
1178 			int migratetype)
1179 {
1180 	unsigned long flags;
1181 	struct page *page;
1182 	int cold = !!(gfp_flags & __GFP_COLD);
1183 	int cpu;
1184 
1185 again:
1186 	cpu  = get_cpu();
1187 	if (likely(order == 0)) {
1188 		struct per_cpu_pages *pcp;
1189 		struct list_head *list;
1190 
1191 		pcp = &zone_pcp(zone, cpu)->pcp;
1192 		list = &pcp->lists[migratetype];
1193 		local_irq_save(flags);
1194 		if (list_empty(list)) {
1195 			pcp->count += rmqueue_bulk(zone, 0,
1196 					pcp->batch, list,
1197 					migratetype, cold);
1198 			if (unlikely(list_empty(list)))
1199 				goto failed;
1200 		}
1201 
1202 		if (cold)
1203 			page = list_entry(list->prev, struct page, lru);
1204 		else
1205 			page = list_entry(list->next, struct page, lru);
1206 
1207 		list_del(&page->lru);
1208 		pcp->count--;
1209 	} else {
1210 		if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1211 			/*
1212 			 * __GFP_NOFAIL is not to be used in new code.
1213 			 *
1214 			 * All __GFP_NOFAIL callers should be fixed so that they
1215 			 * properly detect and handle allocation failures.
1216 			 *
1217 			 * We most definitely don't want callers attempting to
1218 			 * allocate greater than order-1 page units with
1219 			 * __GFP_NOFAIL.
1220 			 */
1221 			WARN_ON_ONCE(order > 1);
1222 		}
1223 		spin_lock_irqsave(&zone->lock, flags);
1224 		page = __rmqueue(zone, order, migratetype);
1225 		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1226 		spin_unlock(&zone->lock);
1227 		if (!page)
1228 			goto failed;
1229 	}
1230 
1231 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
1232 	zone_statistics(preferred_zone, zone);
1233 	local_irq_restore(flags);
1234 	put_cpu();
1235 
1236 	VM_BUG_ON(bad_range(zone, page));
1237 	if (prep_new_page(page, order, gfp_flags))
1238 		goto again;
1239 	return page;
1240 
1241 failed:
1242 	local_irq_restore(flags);
1243 	put_cpu();
1244 	return NULL;
1245 }
1246 
1247 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1248 #define ALLOC_WMARK_MIN		WMARK_MIN
1249 #define ALLOC_WMARK_LOW		WMARK_LOW
1250 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1251 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1252 
1253 /* Mask to get the watermark bits */
1254 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1255 
1256 #define ALLOC_HARDER		0x10 /* try to alloc harder */
1257 #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
1258 #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
1259 
1260 #ifdef CONFIG_FAIL_PAGE_ALLOC
1261 
1262 static struct fail_page_alloc_attr {
1263 	struct fault_attr attr;
1264 
1265 	u32 ignore_gfp_highmem;
1266 	u32 ignore_gfp_wait;
1267 	u32 min_order;
1268 
1269 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1270 
1271 	struct dentry *ignore_gfp_highmem_file;
1272 	struct dentry *ignore_gfp_wait_file;
1273 	struct dentry *min_order_file;
1274 
1275 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1276 
1277 } fail_page_alloc = {
1278 	.attr = FAULT_ATTR_INITIALIZER,
1279 	.ignore_gfp_wait = 1,
1280 	.ignore_gfp_highmem = 1,
1281 	.min_order = 1,
1282 };
1283 
1284 static int __init setup_fail_page_alloc(char *str)
1285 {
1286 	return setup_fault_attr(&fail_page_alloc.attr, str);
1287 }
1288 __setup("fail_page_alloc=", setup_fail_page_alloc);
1289 
1290 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1291 {
1292 	if (order < fail_page_alloc.min_order)
1293 		return 0;
1294 	if (gfp_mask & __GFP_NOFAIL)
1295 		return 0;
1296 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1297 		return 0;
1298 	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1299 		return 0;
1300 
1301 	return should_fail(&fail_page_alloc.attr, 1 << order);
1302 }
1303 
1304 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1305 
1306 static int __init fail_page_alloc_debugfs(void)
1307 {
1308 	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1309 	struct dentry *dir;
1310 	int err;
1311 
1312 	err = init_fault_attr_dentries(&fail_page_alloc.attr,
1313 				       "fail_page_alloc");
1314 	if (err)
1315 		return err;
1316 	dir = fail_page_alloc.attr.dentries.dir;
1317 
1318 	fail_page_alloc.ignore_gfp_wait_file =
1319 		debugfs_create_bool("ignore-gfp-wait", mode, dir,
1320 				      &fail_page_alloc.ignore_gfp_wait);
1321 
1322 	fail_page_alloc.ignore_gfp_highmem_file =
1323 		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1324 				      &fail_page_alloc.ignore_gfp_highmem);
1325 	fail_page_alloc.min_order_file =
1326 		debugfs_create_u32("min-order", mode, dir,
1327 				   &fail_page_alloc.min_order);
1328 
1329 	if (!fail_page_alloc.ignore_gfp_wait_file ||
1330             !fail_page_alloc.ignore_gfp_highmem_file ||
1331             !fail_page_alloc.min_order_file) {
1332 		err = -ENOMEM;
1333 		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1334 		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1335 		debugfs_remove(fail_page_alloc.min_order_file);
1336 		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1337 	}
1338 
1339 	return err;
1340 }
1341 
1342 late_initcall(fail_page_alloc_debugfs);
1343 
1344 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1345 
1346 #else /* CONFIG_FAIL_PAGE_ALLOC */
1347 
1348 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1349 {
1350 	return 0;
1351 }
1352 
1353 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1354 
1355 /*
1356  * Return 1 if free pages are above 'mark'. This takes into account the order
1357  * of the allocation.
1358  */
1359 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1360 		      int classzone_idx, int alloc_flags)
1361 {
1362 	/* free_pages my go negative - that's OK */
1363 	long min = mark;
1364 	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1365 	int o;
1366 
1367 	if (alloc_flags & ALLOC_HIGH)
1368 		min -= min / 2;
1369 	if (alloc_flags & ALLOC_HARDER)
1370 		min -= min / 4;
1371 
1372 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1373 		return 0;
1374 	for (o = 0; o < order; o++) {
1375 		/* At the next order, this order's pages become unavailable */
1376 		free_pages -= z->free_area[o].nr_free << o;
1377 
1378 		/* Require fewer higher order pages to be free */
1379 		min >>= 1;
1380 
1381 		if (free_pages <= min)
1382 			return 0;
1383 	}
1384 	return 1;
1385 }
1386 
1387 #ifdef CONFIG_NUMA
1388 /*
1389  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1390  * skip over zones that are not allowed by the cpuset, or that have
1391  * been recently (in last second) found to be nearly full.  See further
1392  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1393  * that have to skip over a lot of full or unallowed zones.
1394  *
1395  * If the zonelist cache is present in the passed in zonelist, then
1396  * returns a pointer to the allowed node mask (either the current
1397  * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1398  *
1399  * If the zonelist cache is not available for this zonelist, does
1400  * nothing and returns NULL.
1401  *
1402  * If the fullzones BITMAP in the zonelist cache is stale (more than
1403  * a second since last zap'd) then we zap it out (clear its bits.)
1404  *
1405  * We hold off even calling zlc_setup, until after we've checked the
1406  * first zone in the zonelist, on the theory that most allocations will
1407  * be satisfied from that first zone, so best to examine that zone as
1408  * quickly as we can.
1409  */
1410 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1411 {
1412 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1413 	nodemask_t *allowednodes;	/* zonelist_cache approximation */
1414 
1415 	zlc = zonelist->zlcache_ptr;
1416 	if (!zlc)
1417 		return NULL;
1418 
1419 	if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1420 		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1421 		zlc->last_full_zap = jiffies;
1422 	}
1423 
1424 	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1425 					&cpuset_current_mems_allowed :
1426 					&node_states[N_HIGH_MEMORY];
1427 	return allowednodes;
1428 }
1429 
1430 /*
1431  * Given 'z' scanning a zonelist, run a couple of quick checks to see
1432  * if it is worth looking at further for free memory:
1433  *  1) Check that the zone isn't thought to be full (doesn't have its
1434  *     bit set in the zonelist_cache fullzones BITMAP).
1435  *  2) Check that the zones node (obtained from the zonelist_cache
1436  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1437  * Return true (non-zero) if zone is worth looking at further, or
1438  * else return false (zero) if it is not.
1439  *
1440  * This check -ignores- the distinction between various watermarks,
1441  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1442  * found to be full for any variation of these watermarks, it will
1443  * be considered full for up to one second by all requests, unless
1444  * we are so low on memory on all allowed nodes that we are forced
1445  * into the second scan of the zonelist.
1446  *
1447  * In the second scan we ignore this zonelist cache and exactly
1448  * apply the watermarks to all zones, even it is slower to do so.
1449  * We are low on memory in the second scan, and should leave no stone
1450  * unturned looking for a free page.
1451  */
1452 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1453 						nodemask_t *allowednodes)
1454 {
1455 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1456 	int i;				/* index of *z in zonelist zones */
1457 	int n;				/* node that zone *z is on */
1458 
1459 	zlc = zonelist->zlcache_ptr;
1460 	if (!zlc)
1461 		return 1;
1462 
1463 	i = z - zonelist->_zonerefs;
1464 	n = zlc->z_to_n[i];
1465 
1466 	/* This zone is worth trying if it is allowed but not full */
1467 	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1468 }
1469 
1470 /*
1471  * Given 'z' scanning a zonelist, set the corresponding bit in
1472  * zlc->fullzones, so that subsequent attempts to allocate a page
1473  * from that zone don't waste time re-examining it.
1474  */
1475 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1476 {
1477 	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
1478 	int i;				/* index of *z in zonelist zones */
1479 
1480 	zlc = zonelist->zlcache_ptr;
1481 	if (!zlc)
1482 		return;
1483 
1484 	i = z - zonelist->_zonerefs;
1485 
1486 	set_bit(i, zlc->fullzones);
1487 }
1488 
1489 #else	/* CONFIG_NUMA */
1490 
1491 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1492 {
1493 	return NULL;
1494 }
1495 
1496 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1497 				nodemask_t *allowednodes)
1498 {
1499 	return 1;
1500 }
1501 
1502 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1503 {
1504 }
1505 #endif	/* CONFIG_NUMA */
1506 
1507 /*
1508  * get_page_from_freelist goes through the zonelist trying to allocate
1509  * a page.
1510  */
1511 static struct page *
1512 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1513 		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1514 		struct zone *preferred_zone, int migratetype)
1515 {
1516 	struct zoneref *z;
1517 	struct page *page = NULL;
1518 	int classzone_idx;
1519 	struct zone *zone;
1520 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1521 	int zlc_active = 0;		/* set if using zonelist_cache */
1522 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
1523 
1524 	classzone_idx = zone_idx(preferred_zone);
1525 zonelist_scan:
1526 	/*
1527 	 * Scan zonelist, looking for a zone with enough free.
1528 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1529 	 */
1530 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
1531 						high_zoneidx, nodemask) {
1532 		if (NUMA_BUILD && zlc_active &&
1533 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
1534 				continue;
1535 		if ((alloc_flags & ALLOC_CPUSET) &&
1536 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
1537 				goto try_next_zone;
1538 
1539 		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1540 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1541 			unsigned long mark;
1542 			int ret;
1543 
1544 			mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1545 			if (zone_watermark_ok(zone, order, mark,
1546 				    classzone_idx, alloc_flags))
1547 				goto try_this_zone;
1548 
1549 			if (zone_reclaim_mode == 0)
1550 				goto this_zone_full;
1551 
1552 			ret = zone_reclaim(zone, gfp_mask, order);
1553 			switch (ret) {
1554 			case ZONE_RECLAIM_NOSCAN:
1555 				/* did not scan */
1556 				goto try_next_zone;
1557 			case ZONE_RECLAIM_FULL:
1558 				/* scanned but unreclaimable */
1559 				goto this_zone_full;
1560 			default:
1561 				/* did we reclaim enough */
1562 				if (!zone_watermark_ok(zone, order, mark,
1563 						classzone_idx, alloc_flags))
1564 					goto this_zone_full;
1565 			}
1566 		}
1567 
1568 try_this_zone:
1569 		page = buffered_rmqueue(preferred_zone, zone, order,
1570 						gfp_mask, migratetype);
1571 		if (page)
1572 			break;
1573 this_zone_full:
1574 		if (NUMA_BUILD)
1575 			zlc_mark_zone_full(zonelist, z);
1576 try_next_zone:
1577 		if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1578 			/*
1579 			 * we do zlc_setup after the first zone is tried but only
1580 			 * if there are multiple nodes make it worthwhile
1581 			 */
1582 			allowednodes = zlc_setup(zonelist, alloc_flags);
1583 			zlc_active = 1;
1584 			did_zlc_setup = 1;
1585 		}
1586 	}
1587 
1588 	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1589 		/* Disable zlc cache for second zonelist scan */
1590 		zlc_active = 0;
1591 		goto zonelist_scan;
1592 	}
1593 	return page;
1594 }
1595 
1596 static inline int
1597 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1598 				unsigned long pages_reclaimed)
1599 {
1600 	/* Do not loop if specifically requested */
1601 	if (gfp_mask & __GFP_NORETRY)
1602 		return 0;
1603 
1604 	/*
1605 	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1606 	 * means __GFP_NOFAIL, but that may not be true in other
1607 	 * implementations.
1608 	 */
1609 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
1610 		return 1;
1611 
1612 	/*
1613 	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1614 	 * specified, then we retry until we no longer reclaim any pages
1615 	 * (above), or we've reclaimed an order of pages at least as
1616 	 * large as the allocation's order. In both cases, if the
1617 	 * allocation still fails, we stop retrying.
1618 	 */
1619 	if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1620 		return 1;
1621 
1622 	/*
1623 	 * Don't let big-order allocations loop unless the caller
1624 	 * explicitly requests that.
1625 	 */
1626 	if (gfp_mask & __GFP_NOFAIL)
1627 		return 1;
1628 
1629 	return 0;
1630 }
1631 
1632 static inline struct page *
1633 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1634 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1635 	nodemask_t *nodemask, struct zone *preferred_zone,
1636 	int migratetype)
1637 {
1638 	struct page *page;
1639 
1640 	/* Acquire the OOM killer lock for the zones in zonelist */
1641 	if (!try_set_zone_oom(zonelist, gfp_mask)) {
1642 		schedule_timeout_uninterruptible(1);
1643 		return NULL;
1644 	}
1645 
1646 	/*
1647 	 * Go through the zonelist yet one more time, keep very high watermark
1648 	 * here, this is only to catch a parallel oom killing, we must fail if
1649 	 * we're still under heavy pressure.
1650 	 */
1651 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1652 		order, zonelist, high_zoneidx,
1653 		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1654 		preferred_zone, migratetype);
1655 	if (page)
1656 		goto out;
1657 
1658 	if (!(gfp_mask & __GFP_NOFAIL)) {
1659 		/* The OOM killer will not help higher order allocs */
1660 		if (order > PAGE_ALLOC_COSTLY_ORDER)
1661 			goto out;
1662 		/*
1663 		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1664 		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1665 		 * The caller should handle page allocation failure by itself if
1666 		 * it specifies __GFP_THISNODE.
1667 		 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1668 		 */
1669 		if (gfp_mask & __GFP_THISNODE)
1670 			goto out;
1671 	}
1672 	/* Exhausted what can be done so it's blamo time */
1673 	out_of_memory(zonelist, gfp_mask, order, nodemask);
1674 
1675 out:
1676 	clear_zonelist_oom(zonelist, gfp_mask);
1677 	return page;
1678 }
1679 
1680 /* The really slow allocator path where we enter direct reclaim */
1681 static inline struct page *
1682 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1683 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1684 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1685 	int migratetype, unsigned long *did_some_progress)
1686 {
1687 	struct page *page = NULL;
1688 	struct reclaim_state reclaim_state;
1689 	struct task_struct *p = current;
1690 
1691 	cond_resched();
1692 
1693 	/* We now go into synchronous reclaim */
1694 	cpuset_memory_pressure_bump();
1695 	p->flags |= PF_MEMALLOC;
1696 	lockdep_set_current_reclaim_state(gfp_mask);
1697 	reclaim_state.reclaimed_slab = 0;
1698 	p->reclaim_state = &reclaim_state;
1699 
1700 	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1701 
1702 	p->reclaim_state = NULL;
1703 	lockdep_clear_current_reclaim_state();
1704 	p->flags &= ~PF_MEMALLOC;
1705 
1706 	cond_resched();
1707 
1708 	if (order != 0)
1709 		drain_all_pages();
1710 
1711 	if (likely(*did_some_progress))
1712 		page = get_page_from_freelist(gfp_mask, nodemask, order,
1713 					zonelist, high_zoneidx,
1714 					alloc_flags, preferred_zone,
1715 					migratetype);
1716 	return page;
1717 }
1718 
1719 /*
1720  * This is called in the allocator slow-path if the allocation request is of
1721  * sufficient urgency to ignore watermarks and take other desperate measures
1722  */
1723 static inline struct page *
1724 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1725 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1726 	nodemask_t *nodemask, struct zone *preferred_zone,
1727 	int migratetype)
1728 {
1729 	struct page *page;
1730 
1731 	do {
1732 		page = get_page_from_freelist(gfp_mask, nodemask, order,
1733 			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1734 			preferred_zone, migratetype);
1735 
1736 		if (!page && gfp_mask & __GFP_NOFAIL)
1737 			congestion_wait(BLK_RW_ASYNC, HZ/50);
1738 	} while (!page && (gfp_mask & __GFP_NOFAIL));
1739 
1740 	return page;
1741 }
1742 
1743 static inline
1744 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1745 						enum zone_type high_zoneidx)
1746 {
1747 	struct zoneref *z;
1748 	struct zone *zone;
1749 
1750 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1751 		wakeup_kswapd(zone, order);
1752 }
1753 
1754 static inline int
1755 gfp_to_alloc_flags(gfp_t gfp_mask)
1756 {
1757 	struct task_struct *p = current;
1758 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1759 	const gfp_t wait = gfp_mask & __GFP_WAIT;
1760 
1761 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1762 	BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1763 
1764 	/*
1765 	 * The caller may dip into page reserves a bit more if the caller
1766 	 * cannot run direct reclaim, or if the caller has realtime scheduling
1767 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1768 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1769 	 */
1770 	alloc_flags |= (gfp_mask & __GFP_HIGH);
1771 
1772 	if (!wait) {
1773 		alloc_flags |= ALLOC_HARDER;
1774 		/*
1775 		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1776 		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1777 		 */
1778 		alloc_flags &= ~ALLOC_CPUSET;
1779 	} else if (unlikely(rt_task(p)) && !in_interrupt())
1780 		alloc_flags |= ALLOC_HARDER;
1781 
1782 	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1783 		if (!in_interrupt() &&
1784 		    ((p->flags & PF_MEMALLOC) ||
1785 		     unlikely(test_thread_flag(TIF_MEMDIE))))
1786 			alloc_flags |= ALLOC_NO_WATERMARKS;
1787 	}
1788 
1789 	return alloc_flags;
1790 }
1791 
1792 static inline struct page *
1793 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1794 	struct zonelist *zonelist, enum zone_type high_zoneidx,
1795 	nodemask_t *nodemask, struct zone *preferred_zone,
1796 	int migratetype)
1797 {
1798 	const gfp_t wait = gfp_mask & __GFP_WAIT;
1799 	struct page *page = NULL;
1800 	int alloc_flags;
1801 	unsigned long pages_reclaimed = 0;
1802 	unsigned long did_some_progress;
1803 	struct task_struct *p = current;
1804 
1805 	/*
1806 	 * In the slowpath, we sanity check order to avoid ever trying to
1807 	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1808 	 * be using allocators in order of preference for an area that is
1809 	 * too large.
1810 	 */
1811 	if (order >= MAX_ORDER) {
1812 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1813 		return NULL;
1814 	}
1815 
1816 	/*
1817 	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1818 	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1819 	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1820 	 * using a larger set of nodes after it has established that the
1821 	 * allowed per node queues are empty and that nodes are
1822 	 * over allocated.
1823 	 */
1824 	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1825 		goto nopage;
1826 
1827 restart:
1828 	wake_all_kswapd(order, zonelist, high_zoneidx);
1829 
1830 	/*
1831 	 * OK, we're below the kswapd watermark and have kicked background
1832 	 * reclaim. Now things get more complex, so set up alloc_flags according
1833 	 * to how we want to proceed.
1834 	 */
1835 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
1836 
1837 	/* This is the last chance, in general, before the goto nopage. */
1838 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1839 			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1840 			preferred_zone, migratetype);
1841 	if (page)
1842 		goto got_pg;
1843 
1844 rebalance:
1845 	/* Allocate without watermarks if the context allows */
1846 	if (alloc_flags & ALLOC_NO_WATERMARKS) {
1847 		page = __alloc_pages_high_priority(gfp_mask, order,
1848 				zonelist, high_zoneidx, nodemask,
1849 				preferred_zone, migratetype);
1850 		if (page)
1851 			goto got_pg;
1852 	}
1853 
1854 	/* Atomic allocations - we can't balance anything */
1855 	if (!wait)
1856 		goto nopage;
1857 
1858 	/* Avoid recursion of direct reclaim */
1859 	if (p->flags & PF_MEMALLOC)
1860 		goto nopage;
1861 
1862 	/* Avoid allocations with no watermarks from looping endlessly */
1863 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
1864 		goto nopage;
1865 
1866 	/* Try direct reclaim and then allocating */
1867 	page = __alloc_pages_direct_reclaim(gfp_mask, order,
1868 					zonelist, high_zoneidx,
1869 					nodemask,
1870 					alloc_flags, preferred_zone,
1871 					migratetype, &did_some_progress);
1872 	if (page)
1873 		goto got_pg;
1874 
1875 	/*
1876 	 * If we failed to make any progress reclaiming, then we are
1877 	 * running out of options and have to consider going OOM
1878 	 */
1879 	if (!did_some_progress) {
1880 		if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1881 			if (oom_killer_disabled)
1882 				goto nopage;
1883 			page = __alloc_pages_may_oom(gfp_mask, order,
1884 					zonelist, high_zoneidx,
1885 					nodemask, preferred_zone,
1886 					migratetype);
1887 			if (page)
1888 				goto got_pg;
1889 
1890 			/*
1891 			 * The OOM killer does not trigger for high-order
1892 			 * ~__GFP_NOFAIL allocations so if no progress is being
1893 			 * made, there are no other options and retrying is
1894 			 * unlikely to help.
1895 			 */
1896 			if (order > PAGE_ALLOC_COSTLY_ORDER &&
1897 						!(gfp_mask & __GFP_NOFAIL))
1898 				goto nopage;
1899 
1900 			goto restart;
1901 		}
1902 	}
1903 
1904 	/* Check if we should retry the allocation */
1905 	pages_reclaimed += did_some_progress;
1906 	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1907 		/* Wait for some write requests to complete then retry */
1908 		congestion_wait(BLK_RW_ASYNC, HZ/50);
1909 		goto rebalance;
1910 	}
1911 
1912 nopage:
1913 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1914 		printk(KERN_WARNING "%s: page allocation failure."
1915 			" order:%d, mode:0x%x\n",
1916 			p->comm, order, gfp_mask);
1917 		dump_stack();
1918 		show_mem();
1919 	}
1920 	return page;
1921 got_pg:
1922 	if (kmemcheck_enabled)
1923 		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1924 	return page;
1925 
1926 }
1927 
1928 /*
1929  * This is the 'heart' of the zoned buddy allocator.
1930  */
1931 struct page *
1932 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1933 			struct zonelist *zonelist, nodemask_t *nodemask)
1934 {
1935 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1936 	struct zone *preferred_zone;
1937 	struct page *page;
1938 	int migratetype = allocflags_to_migratetype(gfp_mask);
1939 
1940 	gfp_mask &= gfp_allowed_mask;
1941 
1942 	lockdep_trace_alloc(gfp_mask);
1943 
1944 	might_sleep_if(gfp_mask & __GFP_WAIT);
1945 
1946 	if (should_fail_alloc_page(gfp_mask, order))
1947 		return NULL;
1948 
1949 	/*
1950 	 * Check the zones suitable for the gfp_mask contain at least one
1951 	 * valid zone. It's possible to have an empty zonelist as a result
1952 	 * of GFP_THISNODE and a memoryless node
1953 	 */
1954 	if (unlikely(!zonelist->_zonerefs->zone))
1955 		return NULL;
1956 
1957 	/* The preferred zone is used for statistics later */
1958 	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
1959 	if (!preferred_zone)
1960 		return NULL;
1961 
1962 	/* First allocation attempt */
1963 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1964 			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1965 			preferred_zone, migratetype);
1966 	if (unlikely(!page))
1967 		page = __alloc_pages_slowpath(gfp_mask, order,
1968 				zonelist, high_zoneidx, nodemask,
1969 				preferred_zone, migratetype);
1970 
1971 	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
1972 	return page;
1973 }
1974 EXPORT_SYMBOL(__alloc_pages_nodemask);
1975 
1976 /*
1977  * Common helper functions.
1978  */
1979 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1980 {
1981 	struct page *page;
1982 
1983 	/*
1984 	 * __get_free_pages() returns a 32-bit address, which cannot represent
1985 	 * a highmem page
1986 	 */
1987 	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1988 
1989 	page = alloc_pages(gfp_mask, order);
1990 	if (!page)
1991 		return 0;
1992 	return (unsigned long) page_address(page);
1993 }
1994 EXPORT_SYMBOL(__get_free_pages);
1995 
1996 unsigned long get_zeroed_page(gfp_t gfp_mask)
1997 {
1998 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1999 }
2000 EXPORT_SYMBOL(get_zeroed_page);
2001 
2002 void __pagevec_free(struct pagevec *pvec)
2003 {
2004 	int i = pagevec_count(pvec);
2005 
2006 	while (--i >= 0) {
2007 		trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2008 		free_hot_cold_page(pvec->pages[i], pvec->cold);
2009 	}
2010 }
2011 
2012 void __free_pages(struct page *page, unsigned int order)
2013 {
2014 	if (put_page_testzero(page)) {
2015 		trace_mm_page_free_direct(page, order);
2016 		if (order == 0)
2017 			free_hot_page(page);
2018 		else
2019 			__free_pages_ok(page, order);
2020 	}
2021 }
2022 
2023 EXPORT_SYMBOL(__free_pages);
2024 
2025 void free_pages(unsigned long addr, unsigned int order)
2026 {
2027 	if (addr != 0) {
2028 		VM_BUG_ON(!virt_addr_valid((void *)addr));
2029 		__free_pages(virt_to_page((void *)addr), order);
2030 	}
2031 }
2032 
2033 EXPORT_SYMBOL(free_pages);
2034 
2035 /**
2036  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2037  * @size: the number of bytes to allocate
2038  * @gfp_mask: GFP flags for the allocation
2039  *
2040  * This function is similar to alloc_pages(), except that it allocates the
2041  * minimum number of pages to satisfy the request.  alloc_pages() can only
2042  * allocate memory in power-of-two pages.
2043  *
2044  * This function is also limited by MAX_ORDER.
2045  *
2046  * Memory allocated by this function must be released by free_pages_exact().
2047  */
2048 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2049 {
2050 	unsigned int order = get_order(size);
2051 	unsigned long addr;
2052 
2053 	addr = __get_free_pages(gfp_mask, order);
2054 	if (addr) {
2055 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
2056 		unsigned long used = addr + PAGE_ALIGN(size);
2057 
2058 		split_page(virt_to_page((void *)addr), order);
2059 		while (used < alloc_end) {
2060 			free_page(used);
2061 			used += PAGE_SIZE;
2062 		}
2063 	}
2064 
2065 	return (void *)addr;
2066 }
2067 EXPORT_SYMBOL(alloc_pages_exact);
2068 
2069 /**
2070  * free_pages_exact - release memory allocated via alloc_pages_exact()
2071  * @virt: the value returned by alloc_pages_exact.
2072  * @size: size of allocation, same value as passed to alloc_pages_exact().
2073  *
2074  * Release the memory allocated by a previous call to alloc_pages_exact.
2075  */
2076 void free_pages_exact(void *virt, size_t size)
2077 {
2078 	unsigned long addr = (unsigned long)virt;
2079 	unsigned long end = addr + PAGE_ALIGN(size);
2080 
2081 	while (addr < end) {
2082 		free_page(addr);
2083 		addr += PAGE_SIZE;
2084 	}
2085 }
2086 EXPORT_SYMBOL(free_pages_exact);
2087 
2088 static unsigned int nr_free_zone_pages(int offset)
2089 {
2090 	struct zoneref *z;
2091 	struct zone *zone;
2092 
2093 	/* Just pick one node, since fallback list is circular */
2094 	unsigned int sum = 0;
2095 
2096 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2097 
2098 	for_each_zone_zonelist(zone, z, zonelist, offset) {
2099 		unsigned long size = zone->present_pages;
2100 		unsigned long high = high_wmark_pages(zone);
2101 		if (size > high)
2102 			sum += size - high;
2103 	}
2104 
2105 	return sum;
2106 }
2107 
2108 /*
2109  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2110  */
2111 unsigned int nr_free_buffer_pages(void)
2112 {
2113 	return nr_free_zone_pages(gfp_zone(GFP_USER));
2114 }
2115 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2116 
2117 /*
2118  * Amount of free RAM allocatable within all zones
2119  */
2120 unsigned int nr_free_pagecache_pages(void)
2121 {
2122 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2123 }
2124 
2125 static inline void show_node(struct zone *zone)
2126 {
2127 	if (NUMA_BUILD)
2128 		printk("Node %d ", zone_to_nid(zone));
2129 }
2130 
2131 void si_meminfo(struct sysinfo *val)
2132 {
2133 	val->totalram = totalram_pages;
2134 	val->sharedram = 0;
2135 	val->freeram = global_page_state(NR_FREE_PAGES);
2136 	val->bufferram = nr_blockdev_pages();
2137 	val->totalhigh = totalhigh_pages;
2138 	val->freehigh = nr_free_highpages();
2139 	val->mem_unit = PAGE_SIZE;
2140 }
2141 
2142 EXPORT_SYMBOL(si_meminfo);
2143 
2144 #ifdef CONFIG_NUMA
2145 void si_meminfo_node(struct sysinfo *val, int nid)
2146 {
2147 	pg_data_t *pgdat = NODE_DATA(nid);
2148 
2149 	val->totalram = pgdat->node_present_pages;
2150 	val->freeram = node_page_state(nid, NR_FREE_PAGES);
2151 #ifdef CONFIG_HIGHMEM
2152 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2153 	val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2154 			NR_FREE_PAGES);
2155 #else
2156 	val->totalhigh = 0;
2157 	val->freehigh = 0;
2158 #endif
2159 	val->mem_unit = PAGE_SIZE;
2160 }
2161 #endif
2162 
2163 #define K(x) ((x) << (PAGE_SHIFT-10))
2164 
2165 /*
2166  * Show free area list (used inside shift_scroll-lock stuff)
2167  * We also calculate the percentage fragmentation. We do this by counting the
2168  * memory on each free list with the exception of the first item on the list.
2169  */
2170 void show_free_areas(void)
2171 {
2172 	int cpu;
2173 	struct zone *zone;
2174 
2175 	for_each_populated_zone(zone) {
2176 		show_node(zone);
2177 		printk("%s per-cpu:\n", zone->name);
2178 
2179 		for_each_online_cpu(cpu) {
2180 			struct per_cpu_pageset *pageset;
2181 
2182 			pageset = zone_pcp(zone, cpu);
2183 
2184 			printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2185 			       cpu, pageset->pcp.high,
2186 			       pageset->pcp.batch, pageset->pcp.count);
2187 		}
2188 	}
2189 
2190 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2191 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2192 		" unevictable:%lu"
2193 		" dirty:%lu writeback:%lu unstable:%lu\n"
2194 		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2195 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2196 		global_page_state(NR_ACTIVE_ANON),
2197 		global_page_state(NR_INACTIVE_ANON),
2198 		global_page_state(NR_ISOLATED_ANON),
2199 		global_page_state(NR_ACTIVE_FILE),
2200 		global_page_state(NR_INACTIVE_FILE),
2201 		global_page_state(NR_ISOLATED_FILE),
2202 		global_page_state(NR_UNEVICTABLE),
2203 		global_page_state(NR_FILE_DIRTY),
2204 		global_page_state(NR_WRITEBACK),
2205 		global_page_state(NR_UNSTABLE_NFS),
2206 		global_page_state(NR_FREE_PAGES),
2207 		global_page_state(NR_SLAB_RECLAIMABLE),
2208 		global_page_state(NR_SLAB_UNRECLAIMABLE),
2209 		global_page_state(NR_FILE_MAPPED),
2210 		global_page_state(NR_SHMEM),
2211 		global_page_state(NR_PAGETABLE),
2212 		global_page_state(NR_BOUNCE));
2213 
2214 	for_each_populated_zone(zone) {
2215 		int i;
2216 
2217 		show_node(zone);
2218 		printk("%s"
2219 			" free:%lukB"
2220 			" min:%lukB"
2221 			" low:%lukB"
2222 			" high:%lukB"
2223 			" active_anon:%lukB"
2224 			" inactive_anon:%lukB"
2225 			" active_file:%lukB"
2226 			" inactive_file:%lukB"
2227 			" unevictable:%lukB"
2228 			" isolated(anon):%lukB"
2229 			" isolated(file):%lukB"
2230 			" present:%lukB"
2231 			" mlocked:%lukB"
2232 			" dirty:%lukB"
2233 			" writeback:%lukB"
2234 			" mapped:%lukB"
2235 			" shmem:%lukB"
2236 			" slab_reclaimable:%lukB"
2237 			" slab_unreclaimable:%lukB"
2238 			" kernel_stack:%lukB"
2239 			" pagetables:%lukB"
2240 			" unstable:%lukB"
2241 			" bounce:%lukB"
2242 			" writeback_tmp:%lukB"
2243 			" pages_scanned:%lu"
2244 			" all_unreclaimable? %s"
2245 			"\n",
2246 			zone->name,
2247 			K(zone_page_state(zone, NR_FREE_PAGES)),
2248 			K(min_wmark_pages(zone)),
2249 			K(low_wmark_pages(zone)),
2250 			K(high_wmark_pages(zone)),
2251 			K(zone_page_state(zone, NR_ACTIVE_ANON)),
2252 			K(zone_page_state(zone, NR_INACTIVE_ANON)),
2253 			K(zone_page_state(zone, NR_ACTIVE_FILE)),
2254 			K(zone_page_state(zone, NR_INACTIVE_FILE)),
2255 			K(zone_page_state(zone, NR_UNEVICTABLE)),
2256 			K(zone_page_state(zone, NR_ISOLATED_ANON)),
2257 			K(zone_page_state(zone, NR_ISOLATED_FILE)),
2258 			K(zone->present_pages),
2259 			K(zone_page_state(zone, NR_MLOCK)),
2260 			K(zone_page_state(zone, NR_FILE_DIRTY)),
2261 			K(zone_page_state(zone, NR_WRITEBACK)),
2262 			K(zone_page_state(zone, NR_FILE_MAPPED)),
2263 			K(zone_page_state(zone, NR_SHMEM)),
2264 			K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2265 			K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2266 			zone_page_state(zone, NR_KERNEL_STACK) *
2267 				THREAD_SIZE / 1024,
2268 			K(zone_page_state(zone, NR_PAGETABLE)),
2269 			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2270 			K(zone_page_state(zone, NR_BOUNCE)),
2271 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2272 			zone->pages_scanned,
2273 			(zone_is_all_unreclaimable(zone) ? "yes" : "no")
2274 			);
2275 		printk("lowmem_reserve[]:");
2276 		for (i = 0; i < MAX_NR_ZONES; i++)
2277 			printk(" %lu", zone->lowmem_reserve[i]);
2278 		printk("\n");
2279 	}
2280 
2281 	for_each_populated_zone(zone) {
2282  		unsigned long nr[MAX_ORDER], flags, order, total = 0;
2283 
2284 		show_node(zone);
2285 		printk("%s: ", zone->name);
2286 
2287 		spin_lock_irqsave(&zone->lock, flags);
2288 		for (order = 0; order < MAX_ORDER; order++) {
2289 			nr[order] = zone->free_area[order].nr_free;
2290 			total += nr[order] << order;
2291 		}
2292 		spin_unlock_irqrestore(&zone->lock, flags);
2293 		for (order = 0; order < MAX_ORDER; order++)
2294 			printk("%lu*%lukB ", nr[order], K(1UL) << order);
2295 		printk("= %lukB\n", K(total));
2296 	}
2297 
2298 	printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2299 
2300 	show_swap_cache_info();
2301 }
2302 
2303 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2304 {
2305 	zoneref->zone = zone;
2306 	zoneref->zone_idx = zone_idx(zone);
2307 }
2308 
2309 /*
2310  * Builds allocation fallback zone lists.
2311  *
2312  * Add all populated zones of a node to the zonelist.
2313  */
2314 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2315 				int nr_zones, enum zone_type zone_type)
2316 {
2317 	struct zone *zone;
2318 
2319 	BUG_ON(zone_type >= MAX_NR_ZONES);
2320 	zone_type++;
2321 
2322 	do {
2323 		zone_type--;
2324 		zone = pgdat->node_zones + zone_type;
2325 		if (populated_zone(zone)) {
2326 			zoneref_set_zone(zone,
2327 				&zonelist->_zonerefs[nr_zones++]);
2328 			check_highest_zone(zone_type);
2329 		}
2330 
2331 	} while (zone_type);
2332 	return nr_zones;
2333 }
2334 
2335 
2336 /*
2337  *  zonelist_order:
2338  *  0 = automatic detection of better ordering.
2339  *  1 = order by ([node] distance, -zonetype)
2340  *  2 = order by (-zonetype, [node] distance)
2341  *
2342  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2343  *  the same zonelist. So only NUMA can configure this param.
2344  */
2345 #define ZONELIST_ORDER_DEFAULT  0
2346 #define ZONELIST_ORDER_NODE     1
2347 #define ZONELIST_ORDER_ZONE     2
2348 
2349 /* zonelist order in the kernel.
2350  * set_zonelist_order() will set this to NODE or ZONE.
2351  */
2352 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2353 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2354 
2355 
2356 #ifdef CONFIG_NUMA
2357 /* The value user specified ....changed by config */
2358 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2359 /* string for sysctl */
2360 #define NUMA_ZONELIST_ORDER_LEN	16
2361 char numa_zonelist_order[16] = "default";
2362 
2363 /*
2364  * interface for configure zonelist ordering.
2365  * command line option "numa_zonelist_order"
2366  *	= "[dD]efault	- default, automatic configuration.
2367  *	= "[nN]ode 	- order by node locality, then by zone within node
2368  *	= "[zZ]one      - order by zone, then by locality within zone
2369  */
2370 
2371 static int __parse_numa_zonelist_order(char *s)
2372 {
2373 	if (*s == 'd' || *s == 'D') {
2374 		user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2375 	} else if (*s == 'n' || *s == 'N') {
2376 		user_zonelist_order = ZONELIST_ORDER_NODE;
2377 	} else if (*s == 'z' || *s == 'Z') {
2378 		user_zonelist_order = ZONELIST_ORDER_ZONE;
2379 	} else {
2380 		printk(KERN_WARNING
2381 			"Ignoring invalid numa_zonelist_order value:  "
2382 			"%s\n", s);
2383 		return -EINVAL;
2384 	}
2385 	return 0;
2386 }
2387 
2388 static __init int setup_numa_zonelist_order(char *s)
2389 {
2390 	if (s)
2391 		return __parse_numa_zonelist_order(s);
2392 	return 0;
2393 }
2394 early_param("numa_zonelist_order", setup_numa_zonelist_order);
2395 
2396 /*
2397  * sysctl handler for numa_zonelist_order
2398  */
2399 int numa_zonelist_order_handler(ctl_table *table, int write,
2400 		void __user *buffer, size_t *length,
2401 		loff_t *ppos)
2402 {
2403 	char saved_string[NUMA_ZONELIST_ORDER_LEN];
2404 	int ret;
2405 	static DEFINE_MUTEX(zl_order_mutex);
2406 
2407 	mutex_lock(&zl_order_mutex);
2408 	if (write)
2409 		strcpy(saved_string, (char*)table->data);
2410 	ret = proc_dostring(table, write, buffer, length, ppos);
2411 	if (ret)
2412 		goto out;
2413 	if (write) {
2414 		int oldval = user_zonelist_order;
2415 		if (__parse_numa_zonelist_order((char*)table->data)) {
2416 			/*
2417 			 * bogus value.  restore saved string
2418 			 */
2419 			strncpy((char*)table->data, saved_string,
2420 				NUMA_ZONELIST_ORDER_LEN);
2421 			user_zonelist_order = oldval;
2422 		} else if (oldval != user_zonelist_order)
2423 			build_all_zonelists();
2424 	}
2425 out:
2426 	mutex_unlock(&zl_order_mutex);
2427 	return ret;
2428 }
2429 
2430 
2431 #define MAX_NODE_LOAD (nr_online_nodes)
2432 static int node_load[MAX_NUMNODES];
2433 
2434 /**
2435  * find_next_best_node - find the next node that should appear in a given node's fallback list
2436  * @node: node whose fallback list we're appending
2437  * @used_node_mask: nodemask_t of already used nodes
2438  *
2439  * We use a number of factors to determine which is the next node that should
2440  * appear on a given node's fallback list.  The node should not have appeared
2441  * already in @node's fallback list, and it should be the next closest node
2442  * according to the distance array (which contains arbitrary distance values
2443  * from each node to each node in the system), and should also prefer nodes
2444  * with no CPUs, since presumably they'll have very little allocation pressure
2445  * on them otherwise.
2446  * It returns -1 if no node is found.
2447  */
2448 static int find_next_best_node(int node, nodemask_t *used_node_mask)
2449 {
2450 	int n, val;
2451 	int min_val = INT_MAX;
2452 	int best_node = -1;
2453 	const struct cpumask *tmp = cpumask_of_node(0);
2454 
2455 	/* Use the local node if we haven't already */
2456 	if (!node_isset(node, *used_node_mask)) {
2457 		node_set(node, *used_node_mask);
2458 		return node;
2459 	}
2460 
2461 	for_each_node_state(n, N_HIGH_MEMORY) {
2462 
2463 		/* Don't want a node to appear more than once */
2464 		if (node_isset(n, *used_node_mask))
2465 			continue;
2466 
2467 		/* Use the distance array to find the distance */
2468 		val = node_distance(node, n);
2469 
2470 		/* Penalize nodes under us ("prefer the next node") */
2471 		val += (n < node);
2472 
2473 		/* Give preference to headless and unused nodes */
2474 		tmp = cpumask_of_node(n);
2475 		if (!cpumask_empty(tmp))
2476 			val += PENALTY_FOR_NODE_WITH_CPUS;
2477 
2478 		/* Slight preference for less loaded node */
2479 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2480 		val += node_load[n];
2481 
2482 		if (val < min_val) {
2483 			min_val = val;
2484 			best_node = n;
2485 		}
2486 	}
2487 
2488 	if (best_node >= 0)
2489 		node_set(best_node, *used_node_mask);
2490 
2491 	return best_node;
2492 }
2493 
2494 
2495 /*
2496  * Build zonelists ordered by node and zones within node.
2497  * This results in maximum locality--normal zone overflows into local
2498  * DMA zone, if any--but risks exhausting DMA zone.
2499  */
2500 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2501 {
2502 	int j;
2503 	struct zonelist *zonelist;
2504 
2505 	zonelist = &pgdat->node_zonelists[0];
2506 	for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2507 		;
2508 	j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2509 							MAX_NR_ZONES - 1);
2510 	zonelist->_zonerefs[j].zone = NULL;
2511 	zonelist->_zonerefs[j].zone_idx = 0;
2512 }
2513 
2514 /*
2515  * Build gfp_thisnode zonelists
2516  */
2517 static void build_thisnode_zonelists(pg_data_t *pgdat)
2518 {
2519 	int j;
2520 	struct zonelist *zonelist;
2521 
2522 	zonelist = &pgdat->node_zonelists[1];
2523 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2524 	zonelist->_zonerefs[j].zone = NULL;
2525 	zonelist->_zonerefs[j].zone_idx = 0;
2526 }
2527 
2528 /*
2529  * Build zonelists ordered by zone and nodes within zones.
2530  * This results in conserving DMA zone[s] until all Normal memory is
2531  * exhausted, but results in overflowing to remote node while memory
2532  * may still exist in local DMA zone.
2533  */
2534 static int node_order[MAX_NUMNODES];
2535 
2536 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2537 {
2538 	int pos, j, node;
2539 	int zone_type;		/* needs to be signed */
2540 	struct zone *z;
2541 	struct zonelist *zonelist;
2542 
2543 	zonelist = &pgdat->node_zonelists[0];
2544 	pos = 0;
2545 	for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2546 		for (j = 0; j < nr_nodes; j++) {
2547 			node = node_order[j];
2548 			z = &NODE_DATA(node)->node_zones[zone_type];
2549 			if (populated_zone(z)) {
2550 				zoneref_set_zone(z,
2551 					&zonelist->_zonerefs[pos++]);
2552 				check_highest_zone(zone_type);
2553 			}
2554 		}
2555 	}
2556 	zonelist->_zonerefs[pos].zone = NULL;
2557 	zonelist->_zonerefs[pos].zone_idx = 0;
2558 }
2559 
2560 static int default_zonelist_order(void)
2561 {
2562 	int nid, zone_type;
2563 	unsigned long low_kmem_size,total_size;
2564 	struct zone *z;
2565 	int average_size;
2566 	/*
2567          * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2568 	 * If they are really small and used heavily, the system can fall
2569 	 * into OOM very easily.
2570 	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2571 	 */
2572 	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2573 	low_kmem_size = 0;
2574 	total_size = 0;
2575 	for_each_online_node(nid) {
2576 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2577 			z = &NODE_DATA(nid)->node_zones[zone_type];
2578 			if (populated_zone(z)) {
2579 				if (zone_type < ZONE_NORMAL)
2580 					low_kmem_size += z->present_pages;
2581 				total_size += z->present_pages;
2582 			}
2583 		}
2584 	}
2585 	if (!low_kmem_size ||  /* there are no DMA area. */
2586 	    low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2587 		return ZONELIST_ORDER_NODE;
2588 	/*
2589 	 * look into each node's config.
2590   	 * If there is a node whose DMA/DMA32 memory is very big area on
2591  	 * local memory, NODE_ORDER may be suitable.
2592          */
2593 	average_size = total_size /
2594 				(nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2595 	for_each_online_node(nid) {
2596 		low_kmem_size = 0;
2597 		total_size = 0;
2598 		for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2599 			z = &NODE_DATA(nid)->node_zones[zone_type];
2600 			if (populated_zone(z)) {
2601 				if (zone_type < ZONE_NORMAL)
2602 					low_kmem_size += z->present_pages;
2603 				total_size += z->present_pages;
2604 			}
2605 		}
2606 		if (low_kmem_size &&
2607 		    total_size > average_size && /* ignore small node */
2608 		    low_kmem_size > total_size * 70/100)
2609 			return ZONELIST_ORDER_NODE;
2610 	}
2611 	return ZONELIST_ORDER_ZONE;
2612 }
2613 
2614 static void set_zonelist_order(void)
2615 {
2616 	if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2617 		current_zonelist_order = default_zonelist_order();
2618 	else
2619 		current_zonelist_order = user_zonelist_order;
2620 }
2621 
2622 static void build_zonelists(pg_data_t *pgdat)
2623 {
2624 	int j, node, load;
2625 	enum zone_type i;
2626 	nodemask_t used_mask;
2627 	int local_node, prev_node;
2628 	struct zonelist *zonelist;
2629 	int order = current_zonelist_order;
2630 
2631 	/* initialize zonelists */
2632 	for (i = 0; i < MAX_ZONELISTS; i++) {
2633 		zonelist = pgdat->node_zonelists + i;
2634 		zonelist->_zonerefs[0].zone = NULL;
2635 		zonelist->_zonerefs[0].zone_idx = 0;
2636 	}
2637 
2638 	/* NUMA-aware ordering of nodes */
2639 	local_node = pgdat->node_id;
2640 	load = nr_online_nodes;
2641 	prev_node = local_node;
2642 	nodes_clear(used_mask);
2643 
2644 	memset(node_order, 0, sizeof(node_order));
2645 	j = 0;
2646 
2647 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2648 		int distance = node_distance(local_node, node);
2649 
2650 		/*
2651 		 * If another node is sufficiently far away then it is better
2652 		 * to reclaim pages in a zone before going off node.
2653 		 */
2654 		if (distance > RECLAIM_DISTANCE)
2655 			zone_reclaim_mode = 1;
2656 
2657 		/*
2658 		 * We don't want to pressure a particular node.
2659 		 * So adding penalty to the first node in same
2660 		 * distance group to make it round-robin.
2661 		 */
2662 		if (distance != node_distance(local_node, prev_node))
2663 			node_load[node] = load;
2664 
2665 		prev_node = node;
2666 		load--;
2667 		if (order == ZONELIST_ORDER_NODE)
2668 			build_zonelists_in_node_order(pgdat, node);
2669 		else
2670 			node_order[j++] = node;	/* remember order */
2671 	}
2672 
2673 	if (order == ZONELIST_ORDER_ZONE) {
2674 		/* calculate node order -- i.e., DMA last! */
2675 		build_zonelists_in_zone_order(pgdat, j);
2676 	}
2677 
2678 	build_thisnode_zonelists(pgdat);
2679 }
2680 
2681 /* Construct the zonelist performance cache - see further mmzone.h */
2682 static void build_zonelist_cache(pg_data_t *pgdat)
2683 {
2684 	struct zonelist *zonelist;
2685 	struct zonelist_cache *zlc;
2686 	struct zoneref *z;
2687 
2688 	zonelist = &pgdat->node_zonelists[0];
2689 	zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2690 	bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2691 	for (z = zonelist->_zonerefs; z->zone; z++)
2692 		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2693 }
2694 
2695 
2696 #else	/* CONFIG_NUMA */
2697 
2698 static void set_zonelist_order(void)
2699 {
2700 	current_zonelist_order = ZONELIST_ORDER_ZONE;
2701 }
2702 
2703 static void build_zonelists(pg_data_t *pgdat)
2704 {
2705 	int node, local_node;
2706 	enum zone_type j;
2707 	struct zonelist *zonelist;
2708 
2709 	local_node = pgdat->node_id;
2710 
2711 	zonelist = &pgdat->node_zonelists[0];
2712 	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2713 
2714 	/*
2715 	 * Now we build the zonelist so that it contains the zones
2716 	 * of all the other nodes.
2717 	 * We don't want to pressure a particular node, so when
2718 	 * building the zones for node N, we make sure that the
2719 	 * zones coming right after the local ones are those from
2720 	 * node N+1 (modulo N)
2721 	 */
2722 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2723 		if (!node_online(node))
2724 			continue;
2725 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2726 							MAX_NR_ZONES - 1);
2727 	}
2728 	for (node = 0; node < local_node; node++) {
2729 		if (!node_online(node))
2730 			continue;
2731 		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2732 							MAX_NR_ZONES - 1);
2733 	}
2734 
2735 	zonelist->_zonerefs[j].zone = NULL;
2736 	zonelist->_zonerefs[j].zone_idx = 0;
2737 }
2738 
2739 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2740 static void build_zonelist_cache(pg_data_t *pgdat)
2741 {
2742 	pgdat->node_zonelists[0].zlcache_ptr = NULL;
2743 }
2744 
2745 #endif	/* CONFIG_NUMA */
2746 
2747 /* return values int ....just for stop_machine() */
2748 static int __build_all_zonelists(void *dummy)
2749 {
2750 	int nid;
2751 
2752 #ifdef CONFIG_NUMA
2753 	memset(node_load, 0, sizeof(node_load));
2754 #endif
2755 	for_each_online_node(nid) {
2756 		pg_data_t *pgdat = NODE_DATA(nid);
2757 
2758 		build_zonelists(pgdat);
2759 		build_zonelist_cache(pgdat);
2760 	}
2761 	return 0;
2762 }
2763 
2764 void build_all_zonelists(void)
2765 {
2766 	set_zonelist_order();
2767 
2768 	if (system_state == SYSTEM_BOOTING) {
2769 		__build_all_zonelists(NULL);
2770 		mminit_verify_zonelist();
2771 		cpuset_init_current_mems_allowed();
2772 	} else {
2773 		/* we have to stop all cpus to guarantee there is no user
2774 		   of zonelist */
2775 		stop_machine(__build_all_zonelists, NULL, NULL);
2776 		/* cpuset refresh routine should be here */
2777 	}
2778 	vm_total_pages = nr_free_pagecache_pages();
2779 	/*
2780 	 * Disable grouping by mobility if the number of pages in the
2781 	 * system is too low to allow the mechanism to work. It would be
2782 	 * more accurate, but expensive to check per-zone. This check is
2783 	 * made on memory-hotadd so a system can start with mobility
2784 	 * disabled and enable it later
2785 	 */
2786 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2787 		page_group_by_mobility_disabled = 1;
2788 	else
2789 		page_group_by_mobility_disabled = 0;
2790 
2791 	printk("Built %i zonelists in %s order, mobility grouping %s.  "
2792 		"Total pages: %ld\n",
2793 			nr_online_nodes,
2794 			zonelist_order_name[current_zonelist_order],
2795 			page_group_by_mobility_disabled ? "off" : "on",
2796 			vm_total_pages);
2797 #ifdef CONFIG_NUMA
2798 	printk("Policy zone: %s\n", zone_names[policy_zone]);
2799 #endif
2800 }
2801 
2802 /*
2803  * Helper functions to size the waitqueue hash table.
2804  * Essentially these want to choose hash table sizes sufficiently
2805  * large so that collisions trying to wait on pages are rare.
2806  * But in fact, the number of active page waitqueues on typical
2807  * systems is ridiculously low, less than 200. So this is even
2808  * conservative, even though it seems large.
2809  *
2810  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2811  * waitqueues, i.e. the size of the waitq table given the number of pages.
2812  */
2813 #define PAGES_PER_WAITQUEUE	256
2814 
2815 #ifndef CONFIG_MEMORY_HOTPLUG
2816 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2817 {
2818 	unsigned long size = 1;
2819 
2820 	pages /= PAGES_PER_WAITQUEUE;
2821 
2822 	while (size < pages)
2823 		size <<= 1;
2824 
2825 	/*
2826 	 * Once we have dozens or even hundreds of threads sleeping
2827 	 * on IO we've got bigger problems than wait queue collision.
2828 	 * Limit the size of the wait table to a reasonable size.
2829 	 */
2830 	size = min(size, 4096UL);
2831 
2832 	return max(size, 4UL);
2833 }
2834 #else
2835 /*
2836  * A zone's size might be changed by hot-add, so it is not possible to determine
2837  * a suitable size for its wait_table.  So we use the maximum size now.
2838  *
2839  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2840  *
2841  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2842  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2843  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2844  *
2845  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2846  * or more by the traditional way. (See above).  It equals:
2847  *
2848  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2849  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2850  *    powerpc (64K page size)             : =  (32G +16M)byte.
2851  */
2852 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2853 {
2854 	return 4096UL;
2855 }
2856 #endif
2857 
2858 /*
2859  * This is an integer logarithm so that shifts can be used later
2860  * to extract the more random high bits from the multiplicative
2861  * hash function before the remainder is taken.
2862  */
2863 static inline unsigned long wait_table_bits(unsigned long size)
2864 {
2865 	return ffz(~size);
2866 }
2867 
2868 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2869 
2870 /*
2871  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2872  * of blocks reserved is based on min_wmark_pages(zone). The memory within
2873  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
2874  * higher will lead to a bigger reserve which will get freed as contiguous
2875  * blocks as reclaim kicks in
2876  */
2877 static void setup_zone_migrate_reserve(struct zone *zone)
2878 {
2879 	unsigned long start_pfn, pfn, end_pfn;
2880 	struct page *page;
2881 	unsigned long block_migratetype;
2882 	int reserve;
2883 
2884 	/* Get the start pfn, end pfn and the number of blocks to reserve */
2885 	start_pfn = zone->zone_start_pfn;
2886 	end_pfn = start_pfn + zone->spanned_pages;
2887 	reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2888 							pageblock_order;
2889 
2890 	/*
2891 	 * Reserve blocks are generally in place to help high-order atomic
2892 	 * allocations that are short-lived. A min_free_kbytes value that
2893 	 * would result in more than 2 reserve blocks for atomic allocations
2894 	 * is assumed to be in place to help anti-fragmentation for the
2895 	 * future allocation of hugepages at runtime.
2896 	 */
2897 	reserve = min(2, reserve);
2898 
2899 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2900 		if (!pfn_valid(pfn))
2901 			continue;
2902 		page = pfn_to_page(pfn);
2903 
2904 		/* Watch out for overlapping nodes */
2905 		if (page_to_nid(page) != zone_to_nid(zone))
2906 			continue;
2907 
2908 		/* Blocks with reserved pages will never free, skip them. */
2909 		if (PageReserved(page))
2910 			continue;
2911 
2912 		block_migratetype = get_pageblock_migratetype(page);
2913 
2914 		/* If this block is reserved, account for it */
2915 		if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2916 			reserve--;
2917 			continue;
2918 		}
2919 
2920 		/* Suitable for reserving if this block is movable */
2921 		if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2922 			set_pageblock_migratetype(page, MIGRATE_RESERVE);
2923 			move_freepages_block(zone, page, MIGRATE_RESERVE);
2924 			reserve--;
2925 			continue;
2926 		}
2927 
2928 		/*
2929 		 * If the reserve is met and this is a previous reserved block,
2930 		 * take it back
2931 		 */
2932 		if (block_migratetype == MIGRATE_RESERVE) {
2933 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2934 			move_freepages_block(zone, page, MIGRATE_MOVABLE);
2935 		}
2936 	}
2937 }
2938 
2939 /*
2940  * Initially all pages are reserved - free ones are freed
2941  * up by free_all_bootmem() once the early boot process is
2942  * done. Non-atomic initialization, single-pass.
2943  */
2944 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2945 		unsigned long start_pfn, enum memmap_context context)
2946 {
2947 	struct page *page;
2948 	unsigned long end_pfn = start_pfn + size;
2949 	unsigned long pfn;
2950 	struct zone *z;
2951 
2952 	if (highest_memmap_pfn < end_pfn - 1)
2953 		highest_memmap_pfn = end_pfn - 1;
2954 
2955 	z = &NODE_DATA(nid)->node_zones[zone];
2956 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2957 		/*
2958 		 * There can be holes in boot-time mem_map[]s
2959 		 * handed to this function.  They do not
2960 		 * exist on hotplugged memory.
2961 		 */
2962 		if (context == MEMMAP_EARLY) {
2963 			if (!early_pfn_valid(pfn))
2964 				continue;
2965 			if (!early_pfn_in_nid(pfn, nid))
2966 				continue;
2967 		}
2968 		page = pfn_to_page(pfn);
2969 		set_page_links(page, zone, nid, pfn);
2970 		mminit_verify_page_links(page, zone, nid, pfn);
2971 		init_page_count(page);
2972 		reset_page_mapcount(page);
2973 		SetPageReserved(page);
2974 		/*
2975 		 * Mark the block movable so that blocks are reserved for
2976 		 * movable at startup. This will force kernel allocations
2977 		 * to reserve their blocks rather than leaking throughout
2978 		 * the address space during boot when many long-lived
2979 		 * kernel allocations are made. Later some blocks near
2980 		 * the start are marked MIGRATE_RESERVE by
2981 		 * setup_zone_migrate_reserve()
2982 		 *
2983 		 * bitmap is created for zone's valid pfn range. but memmap
2984 		 * can be created for invalid pages (for alignment)
2985 		 * check here not to call set_pageblock_migratetype() against
2986 		 * pfn out of zone.
2987 		 */
2988 		if ((z->zone_start_pfn <= pfn)
2989 		    && (pfn < z->zone_start_pfn + z->spanned_pages)
2990 		    && !(pfn & (pageblock_nr_pages - 1)))
2991 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2992 
2993 		INIT_LIST_HEAD(&page->lru);
2994 #ifdef WANT_PAGE_VIRTUAL
2995 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
2996 		if (!is_highmem_idx(zone))
2997 			set_page_address(page, __va(pfn << PAGE_SHIFT));
2998 #endif
2999 	}
3000 }
3001 
3002 static void __meminit zone_init_free_lists(struct zone *zone)
3003 {
3004 	int order, t;
3005 	for_each_migratetype_order(order, t) {
3006 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3007 		zone->free_area[order].nr_free = 0;
3008 	}
3009 }
3010 
3011 #ifndef __HAVE_ARCH_MEMMAP_INIT
3012 #define memmap_init(size, nid, zone, start_pfn) \
3013 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3014 #endif
3015 
3016 static int zone_batchsize(struct zone *zone)
3017 {
3018 #ifdef CONFIG_MMU
3019 	int batch;
3020 
3021 	/*
3022 	 * The per-cpu-pages pools are set to around 1000th of the
3023 	 * size of the zone.  But no more than 1/2 of a meg.
3024 	 *
3025 	 * OK, so we don't know how big the cache is.  So guess.
3026 	 */
3027 	batch = zone->present_pages / 1024;
3028 	if (batch * PAGE_SIZE > 512 * 1024)
3029 		batch = (512 * 1024) / PAGE_SIZE;
3030 	batch /= 4;		/* We effectively *= 4 below */
3031 	if (batch < 1)
3032 		batch = 1;
3033 
3034 	/*
3035 	 * Clamp the batch to a 2^n - 1 value. Having a power
3036 	 * of 2 value was found to be more likely to have
3037 	 * suboptimal cache aliasing properties in some cases.
3038 	 *
3039 	 * For example if 2 tasks are alternately allocating
3040 	 * batches of pages, one task can end up with a lot
3041 	 * of pages of one half of the possible page colors
3042 	 * and the other with pages of the other colors.
3043 	 */
3044 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
3045 
3046 	return batch;
3047 
3048 #else
3049 	/* The deferral and batching of frees should be suppressed under NOMMU
3050 	 * conditions.
3051 	 *
3052 	 * The problem is that NOMMU needs to be able to allocate large chunks
3053 	 * of contiguous memory as there's no hardware page translation to
3054 	 * assemble apparent contiguous memory from discontiguous pages.
3055 	 *
3056 	 * Queueing large contiguous runs of pages for batching, however,
3057 	 * causes the pages to actually be freed in smaller chunks.  As there
3058 	 * can be a significant delay between the individual batches being
3059 	 * recycled, this leads to the once large chunks of space being
3060 	 * fragmented and becoming unavailable for high-order allocations.
3061 	 */
3062 	return 0;
3063 #endif
3064 }
3065 
3066 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3067 {
3068 	struct per_cpu_pages *pcp;
3069 	int migratetype;
3070 
3071 	memset(p, 0, sizeof(*p));
3072 
3073 	pcp = &p->pcp;
3074 	pcp->count = 0;
3075 	pcp->high = 6 * batch;
3076 	pcp->batch = max(1UL, 1 * batch);
3077 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3078 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
3079 }
3080 
3081 /*
3082  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3083  * to the value high for the pageset p.
3084  */
3085 
3086 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3087 				unsigned long high)
3088 {
3089 	struct per_cpu_pages *pcp;
3090 
3091 	pcp = &p->pcp;
3092 	pcp->high = high;
3093 	pcp->batch = max(1UL, high/4);
3094 	if ((high/4) > (PAGE_SHIFT * 8))
3095 		pcp->batch = PAGE_SHIFT * 8;
3096 }
3097 
3098 
3099 #ifdef CONFIG_NUMA
3100 /*
3101  * Boot pageset table. One per cpu which is going to be used for all
3102  * zones and all nodes. The parameters will be set in such a way
3103  * that an item put on a list will immediately be handed over to
3104  * the buddy list. This is safe since pageset manipulation is done
3105  * with interrupts disabled.
3106  *
3107  * Some NUMA counter updates may also be caught by the boot pagesets.
3108  *
3109  * The boot_pagesets must be kept even after bootup is complete for
3110  * unused processors and/or zones. They do play a role for bootstrapping
3111  * hotplugged processors.
3112  *
3113  * zoneinfo_show() and maybe other functions do
3114  * not check if the processor is online before following the pageset pointer.
3115  * Other parts of the kernel may not check if the zone is available.
3116  */
3117 static struct per_cpu_pageset boot_pageset[NR_CPUS];
3118 
3119 /*
3120  * Dynamically allocate memory for the
3121  * per cpu pageset array in struct zone.
3122  */
3123 static int __cpuinit process_zones(int cpu)
3124 {
3125 	struct zone *zone, *dzone;
3126 	int node = cpu_to_node(cpu);
3127 
3128 	node_set_state(node, N_CPU);	/* this node has a cpu */
3129 
3130 	for_each_populated_zone(zone) {
3131 		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
3132 					 GFP_KERNEL, node);
3133 		if (!zone_pcp(zone, cpu))
3134 			goto bad;
3135 
3136 		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
3137 
3138 		if (percpu_pagelist_fraction)
3139 			setup_pagelist_highmark(zone_pcp(zone, cpu),
3140 			    (zone->present_pages / percpu_pagelist_fraction));
3141 	}
3142 
3143 	return 0;
3144 bad:
3145 	for_each_zone(dzone) {
3146 		if (!populated_zone(dzone))
3147 			continue;
3148 		if (dzone == zone)
3149 			break;
3150 		kfree(zone_pcp(dzone, cpu));
3151 		zone_pcp(dzone, cpu) = &boot_pageset[cpu];
3152 	}
3153 	return -ENOMEM;
3154 }
3155 
3156 static inline void free_zone_pagesets(int cpu)
3157 {
3158 	struct zone *zone;
3159 
3160 	for_each_zone(zone) {
3161 		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
3162 
3163 		/* Free per_cpu_pageset if it is slab allocated */
3164 		if (pset != &boot_pageset[cpu])
3165 			kfree(pset);
3166 		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3167 	}
3168 }
3169 
3170 static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
3171 		unsigned long action,
3172 		void *hcpu)
3173 {
3174 	int cpu = (long)hcpu;
3175 	int ret = NOTIFY_OK;
3176 
3177 	switch (action) {
3178 	case CPU_UP_PREPARE:
3179 	case CPU_UP_PREPARE_FROZEN:
3180 		if (process_zones(cpu))
3181 			ret = NOTIFY_BAD;
3182 		break;
3183 	case CPU_UP_CANCELED:
3184 	case CPU_UP_CANCELED_FROZEN:
3185 	case CPU_DEAD:
3186 	case CPU_DEAD_FROZEN:
3187 		free_zone_pagesets(cpu);
3188 		break;
3189 	default:
3190 		break;
3191 	}
3192 	return ret;
3193 }
3194 
3195 static struct notifier_block __cpuinitdata pageset_notifier =
3196 	{ &pageset_cpuup_callback, NULL, 0 };
3197 
3198 void __init setup_per_cpu_pageset(void)
3199 {
3200 	int err;
3201 
3202 	/* Initialize per_cpu_pageset for cpu 0.
3203 	 * A cpuup callback will do this for every cpu
3204 	 * as it comes online
3205 	 */
3206 	err = process_zones(smp_processor_id());
3207 	BUG_ON(err);
3208 	register_cpu_notifier(&pageset_notifier);
3209 }
3210 
3211 #endif
3212 
3213 static noinline __init_refok
3214 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3215 {
3216 	int i;
3217 	struct pglist_data *pgdat = zone->zone_pgdat;
3218 	size_t alloc_size;
3219 
3220 	/*
3221 	 * The per-page waitqueue mechanism uses hashed waitqueues
3222 	 * per zone.
3223 	 */
3224 	zone->wait_table_hash_nr_entries =
3225 		 wait_table_hash_nr_entries(zone_size_pages);
3226 	zone->wait_table_bits =
3227 		wait_table_bits(zone->wait_table_hash_nr_entries);
3228 	alloc_size = zone->wait_table_hash_nr_entries
3229 					* sizeof(wait_queue_head_t);
3230 
3231 	if (!slab_is_available()) {
3232 		zone->wait_table = (wait_queue_head_t *)
3233 			alloc_bootmem_node(pgdat, alloc_size);
3234 	} else {
3235 		/*
3236 		 * This case means that a zone whose size was 0 gets new memory
3237 		 * via memory hot-add.
3238 		 * But it may be the case that a new node was hot-added.  In
3239 		 * this case vmalloc() will not be able to use this new node's
3240 		 * memory - this wait_table must be initialized to use this new
3241 		 * node itself as well.
3242 		 * To use this new node's memory, further consideration will be
3243 		 * necessary.
3244 		 */
3245 		zone->wait_table = vmalloc(alloc_size);
3246 	}
3247 	if (!zone->wait_table)
3248 		return -ENOMEM;
3249 
3250 	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3251 		init_waitqueue_head(zone->wait_table + i);
3252 
3253 	return 0;
3254 }
3255 
3256 static int __zone_pcp_update(void *data)
3257 {
3258 	struct zone *zone = data;
3259 	int cpu;
3260 	unsigned long batch = zone_batchsize(zone), flags;
3261 
3262 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
3263 		struct per_cpu_pageset *pset;
3264 		struct per_cpu_pages *pcp;
3265 
3266 		pset = zone_pcp(zone, cpu);
3267 		pcp = &pset->pcp;
3268 
3269 		local_irq_save(flags);
3270 		free_pcppages_bulk(zone, pcp->count, pcp);
3271 		setup_pageset(pset, batch);
3272 		local_irq_restore(flags);
3273 	}
3274 	return 0;
3275 }
3276 
3277 void zone_pcp_update(struct zone *zone)
3278 {
3279 	stop_machine(__zone_pcp_update, zone, NULL);
3280 }
3281 
3282 static __meminit void zone_pcp_init(struct zone *zone)
3283 {
3284 	int cpu;
3285 	unsigned long batch = zone_batchsize(zone);
3286 
3287 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
3288 #ifdef CONFIG_NUMA
3289 		/* Early boot. Slab allocator not functional yet */
3290 		zone_pcp(zone, cpu) = &boot_pageset[cpu];
3291 		setup_pageset(&boot_pageset[cpu],0);
3292 #else
3293 		setup_pageset(zone_pcp(zone,cpu), batch);
3294 #endif
3295 	}
3296 	if (zone->present_pages)
3297 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
3298 			zone->name, zone->present_pages, batch);
3299 }
3300 
3301 __meminit int init_currently_empty_zone(struct zone *zone,
3302 					unsigned long zone_start_pfn,
3303 					unsigned long size,
3304 					enum memmap_context context)
3305 {
3306 	struct pglist_data *pgdat = zone->zone_pgdat;
3307 	int ret;
3308 	ret = zone_wait_table_init(zone, size);
3309 	if (ret)
3310 		return ret;
3311 	pgdat->nr_zones = zone_idx(zone) + 1;
3312 
3313 	zone->zone_start_pfn = zone_start_pfn;
3314 
3315 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
3316 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
3317 			pgdat->node_id,
3318 			(unsigned long)zone_idx(zone),
3319 			zone_start_pfn, (zone_start_pfn + size));
3320 
3321 	zone_init_free_lists(zone);
3322 
3323 	return 0;
3324 }
3325 
3326 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3327 /*
3328  * Basic iterator support. Return the first range of PFNs for a node
3329  * Note: nid == MAX_NUMNODES returns first region regardless of node
3330  */
3331 static int __meminit first_active_region_index_in_nid(int nid)
3332 {
3333 	int i;
3334 
3335 	for (i = 0; i < nr_nodemap_entries; i++)
3336 		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3337 			return i;
3338 
3339 	return -1;
3340 }
3341 
3342 /*
3343  * Basic iterator support. Return the next active range of PFNs for a node
3344  * Note: nid == MAX_NUMNODES returns next region regardless of node
3345  */
3346 static int __meminit next_active_region_index_in_nid(int index, int nid)
3347 {
3348 	for (index = index + 1; index < nr_nodemap_entries; index++)
3349 		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3350 			return index;
3351 
3352 	return -1;
3353 }
3354 
3355 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3356 /*
3357  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3358  * Architectures may implement their own version but if add_active_range()
3359  * was used and there are no special requirements, this is a convenient
3360  * alternative
3361  */
3362 int __meminit __early_pfn_to_nid(unsigned long pfn)
3363 {
3364 	int i;
3365 
3366 	for (i = 0; i < nr_nodemap_entries; i++) {
3367 		unsigned long start_pfn = early_node_map[i].start_pfn;
3368 		unsigned long end_pfn = early_node_map[i].end_pfn;
3369 
3370 		if (start_pfn <= pfn && pfn < end_pfn)
3371 			return early_node_map[i].nid;
3372 	}
3373 	/* This is a memory hole */
3374 	return -1;
3375 }
3376 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3377 
3378 int __meminit early_pfn_to_nid(unsigned long pfn)
3379 {
3380 	int nid;
3381 
3382 	nid = __early_pfn_to_nid(pfn);
3383 	if (nid >= 0)
3384 		return nid;
3385 	/* just returns 0 */
3386 	return 0;
3387 }
3388 
3389 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
3390 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3391 {
3392 	int nid;
3393 
3394 	nid = __early_pfn_to_nid(pfn);
3395 	if (nid >= 0 && nid != node)
3396 		return false;
3397 	return true;
3398 }
3399 #endif
3400 
3401 /* Basic iterator support to walk early_node_map[] */
3402 #define for_each_active_range_index_in_nid(i, nid) \
3403 	for (i = first_active_region_index_in_nid(nid); i != -1; \
3404 				i = next_active_region_index_in_nid(i, nid))
3405 
3406 /**
3407  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3408  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3409  * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3410  *
3411  * If an architecture guarantees that all ranges registered with
3412  * add_active_ranges() contain no holes and may be freed, this
3413  * this function may be used instead of calling free_bootmem() manually.
3414  */
3415 void __init free_bootmem_with_active_regions(int nid,
3416 						unsigned long max_low_pfn)
3417 {
3418 	int i;
3419 
3420 	for_each_active_range_index_in_nid(i, nid) {
3421 		unsigned long size_pages = 0;
3422 		unsigned long end_pfn = early_node_map[i].end_pfn;
3423 
3424 		if (early_node_map[i].start_pfn >= max_low_pfn)
3425 			continue;
3426 
3427 		if (end_pfn > max_low_pfn)
3428 			end_pfn = max_low_pfn;
3429 
3430 		size_pages = end_pfn - early_node_map[i].start_pfn;
3431 		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3432 				PFN_PHYS(early_node_map[i].start_pfn),
3433 				size_pages << PAGE_SHIFT);
3434 	}
3435 }
3436 
3437 void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3438 {
3439 	int i;
3440 	int ret;
3441 
3442 	for_each_active_range_index_in_nid(i, nid) {
3443 		ret = work_fn(early_node_map[i].start_pfn,
3444 			      early_node_map[i].end_pfn, data);
3445 		if (ret)
3446 			break;
3447 	}
3448 }
3449 /**
3450  * sparse_memory_present_with_active_regions - Call memory_present for each active range
3451  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3452  *
3453  * If an architecture guarantees that all ranges registered with
3454  * add_active_ranges() contain no holes and may be freed, this
3455  * function may be used instead of calling memory_present() manually.
3456  */
3457 void __init sparse_memory_present_with_active_regions(int nid)
3458 {
3459 	int i;
3460 
3461 	for_each_active_range_index_in_nid(i, nid)
3462 		memory_present(early_node_map[i].nid,
3463 				early_node_map[i].start_pfn,
3464 				early_node_map[i].end_pfn);
3465 }
3466 
3467 /**
3468  * get_pfn_range_for_nid - Return the start and end page frames for a node
3469  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3470  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3471  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3472  *
3473  * It returns the start and end page frame of a node based on information
3474  * provided by an arch calling add_active_range(). If called for a node
3475  * with no available memory, a warning is printed and the start and end
3476  * PFNs will be 0.
3477  */
3478 void __meminit get_pfn_range_for_nid(unsigned int nid,
3479 			unsigned long *start_pfn, unsigned long *end_pfn)
3480 {
3481 	int i;
3482 	*start_pfn = -1UL;
3483 	*end_pfn = 0;
3484 
3485 	for_each_active_range_index_in_nid(i, nid) {
3486 		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3487 		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3488 	}
3489 
3490 	if (*start_pfn == -1UL)
3491 		*start_pfn = 0;
3492 }
3493 
3494 /*
3495  * This finds a zone that can be used for ZONE_MOVABLE pages. The
3496  * assumption is made that zones within a node are ordered in monotonic
3497  * increasing memory addresses so that the "highest" populated zone is used
3498  */
3499 static void __init find_usable_zone_for_movable(void)
3500 {
3501 	int zone_index;
3502 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3503 		if (zone_index == ZONE_MOVABLE)
3504 			continue;
3505 
3506 		if (arch_zone_highest_possible_pfn[zone_index] >
3507 				arch_zone_lowest_possible_pfn[zone_index])
3508 			break;
3509 	}
3510 
3511 	VM_BUG_ON(zone_index == -1);
3512 	movable_zone = zone_index;
3513 }
3514 
3515 /*
3516  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3517  * because it is sized independant of architecture. Unlike the other zones,
3518  * the starting point for ZONE_MOVABLE is not fixed. It may be different
3519  * in each node depending on the size of each node and how evenly kernelcore
3520  * is distributed. This helper function adjusts the zone ranges
3521  * provided by the architecture for a given node by using the end of the
3522  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3523  * zones within a node are in order of monotonic increases memory addresses
3524  */
3525 static void __meminit adjust_zone_range_for_zone_movable(int nid,
3526 					unsigned long zone_type,
3527 					unsigned long node_start_pfn,
3528 					unsigned long node_end_pfn,
3529 					unsigned long *zone_start_pfn,
3530 					unsigned long *zone_end_pfn)
3531 {
3532 	/* Only adjust if ZONE_MOVABLE is on this node */
3533 	if (zone_movable_pfn[nid]) {
3534 		/* Size ZONE_MOVABLE */
3535 		if (zone_type == ZONE_MOVABLE) {
3536 			*zone_start_pfn = zone_movable_pfn[nid];
3537 			*zone_end_pfn = min(node_end_pfn,
3538 				arch_zone_highest_possible_pfn[movable_zone]);
3539 
3540 		/* Adjust for ZONE_MOVABLE starting within this range */
3541 		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3542 				*zone_end_pfn > zone_movable_pfn[nid]) {
3543 			*zone_end_pfn = zone_movable_pfn[nid];
3544 
3545 		/* Check if this whole range is within ZONE_MOVABLE */
3546 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
3547 			*zone_start_pfn = *zone_end_pfn;
3548 	}
3549 }
3550 
3551 /*
3552  * Return the number of pages a zone spans in a node, including holes
3553  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3554  */
3555 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3556 					unsigned long zone_type,
3557 					unsigned long *ignored)
3558 {
3559 	unsigned long node_start_pfn, node_end_pfn;
3560 	unsigned long zone_start_pfn, zone_end_pfn;
3561 
3562 	/* Get the start and end of the node and zone */
3563 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3564 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3565 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3566 	adjust_zone_range_for_zone_movable(nid, zone_type,
3567 				node_start_pfn, node_end_pfn,
3568 				&zone_start_pfn, &zone_end_pfn);
3569 
3570 	/* Check that this node has pages within the zone's required range */
3571 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3572 		return 0;
3573 
3574 	/* Move the zone boundaries inside the node if necessary */
3575 	zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3576 	zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3577 
3578 	/* Return the spanned pages */
3579 	return zone_end_pfn - zone_start_pfn;
3580 }
3581 
3582 /*
3583  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3584  * then all holes in the requested range will be accounted for.
3585  */
3586 unsigned long __meminit __absent_pages_in_range(int nid,
3587 				unsigned long range_start_pfn,
3588 				unsigned long range_end_pfn)
3589 {
3590 	int i = 0;
3591 	unsigned long prev_end_pfn = 0, hole_pages = 0;
3592 	unsigned long start_pfn;
3593 
3594 	/* Find the end_pfn of the first active range of pfns in the node */
3595 	i = first_active_region_index_in_nid(nid);
3596 	if (i == -1)
3597 		return 0;
3598 
3599 	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3600 
3601 	/* Account for ranges before physical memory on this node */
3602 	if (early_node_map[i].start_pfn > range_start_pfn)
3603 		hole_pages = prev_end_pfn - range_start_pfn;
3604 
3605 	/* Find all holes for the zone within the node */
3606 	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3607 
3608 		/* No need to continue if prev_end_pfn is outside the zone */
3609 		if (prev_end_pfn >= range_end_pfn)
3610 			break;
3611 
3612 		/* Make sure the end of the zone is not within the hole */
3613 		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3614 		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3615 
3616 		/* Update the hole size cound and move on */
3617 		if (start_pfn > range_start_pfn) {
3618 			BUG_ON(prev_end_pfn > start_pfn);
3619 			hole_pages += start_pfn - prev_end_pfn;
3620 		}
3621 		prev_end_pfn = early_node_map[i].end_pfn;
3622 	}
3623 
3624 	/* Account for ranges past physical memory on this node */
3625 	if (range_end_pfn > prev_end_pfn)
3626 		hole_pages += range_end_pfn -
3627 				max(range_start_pfn, prev_end_pfn);
3628 
3629 	return hole_pages;
3630 }
3631 
3632 /**
3633  * absent_pages_in_range - Return number of page frames in holes within a range
3634  * @start_pfn: The start PFN to start searching for holes
3635  * @end_pfn: The end PFN to stop searching for holes
3636  *
3637  * It returns the number of pages frames in memory holes within a range.
3638  */
3639 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3640 							unsigned long end_pfn)
3641 {
3642 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3643 }
3644 
3645 /* Return the number of page frames in holes in a zone on a node */
3646 static unsigned long __meminit zone_absent_pages_in_node(int nid,
3647 					unsigned long zone_type,
3648 					unsigned long *ignored)
3649 {
3650 	unsigned long node_start_pfn, node_end_pfn;
3651 	unsigned long zone_start_pfn, zone_end_pfn;
3652 
3653 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3654 	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3655 							node_start_pfn);
3656 	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3657 							node_end_pfn);
3658 
3659 	adjust_zone_range_for_zone_movable(nid, zone_type,
3660 			node_start_pfn, node_end_pfn,
3661 			&zone_start_pfn, &zone_end_pfn);
3662 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3663 }
3664 
3665 #else
3666 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3667 					unsigned long zone_type,
3668 					unsigned long *zones_size)
3669 {
3670 	return zones_size[zone_type];
3671 }
3672 
3673 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3674 						unsigned long zone_type,
3675 						unsigned long *zholes_size)
3676 {
3677 	if (!zholes_size)
3678 		return 0;
3679 
3680 	return zholes_size[zone_type];
3681 }
3682 
3683 #endif
3684 
3685 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3686 		unsigned long *zones_size, unsigned long *zholes_size)
3687 {
3688 	unsigned long realtotalpages, totalpages = 0;
3689 	enum zone_type i;
3690 
3691 	for (i = 0; i < MAX_NR_ZONES; i++)
3692 		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3693 								zones_size);
3694 	pgdat->node_spanned_pages = totalpages;
3695 
3696 	realtotalpages = totalpages;
3697 	for (i = 0; i < MAX_NR_ZONES; i++)
3698 		realtotalpages -=
3699 			zone_absent_pages_in_node(pgdat->node_id, i,
3700 								zholes_size);
3701 	pgdat->node_present_pages = realtotalpages;
3702 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3703 							realtotalpages);
3704 }
3705 
3706 #ifndef CONFIG_SPARSEMEM
3707 /*
3708  * Calculate the size of the zone->blockflags rounded to an unsigned long
3709  * Start by making sure zonesize is a multiple of pageblock_order by rounding
3710  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3711  * round what is now in bits to nearest long in bits, then return it in
3712  * bytes.
3713  */
3714 static unsigned long __init usemap_size(unsigned long zonesize)
3715 {
3716 	unsigned long usemapsize;
3717 
3718 	usemapsize = roundup(zonesize, pageblock_nr_pages);
3719 	usemapsize = usemapsize >> pageblock_order;
3720 	usemapsize *= NR_PAGEBLOCK_BITS;
3721 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3722 
3723 	return usemapsize / 8;
3724 }
3725 
3726 static void __init setup_usemap(struct pglist_data *pgdat,
3727 				struct zone *zone, unsigned long zonesize)
3728 {
3729 	unsigned long usemapsize = usemap_size(zonesize);
3730 	zone->pageblock_flags = NULL;
3731 	if (usemapsize)
3732 		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3733 }
3734 #else
3735 static void inline setup_usemap(struct pglist_data *pgdat,
3736 				struct zone *zone, unsigned long zonesize) {}
3737 #endif /* CONFIG_SPARSEMEM */
3738 
3739 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3740 
3741 /* Return a sensible default order for the pageblock size. */
3742 static inline int pageblock_default_order(void)
3743 {
3744 	if (HPAGE_SHIFT > PAGE_SHIFT)
3745 		return HUGETLB_PAGE_ORDER;
3746 
3747 	return MAX_ORDER-1;
3748 }
3749 
3750 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3751 static inline void __init set_pageblock_order(unsigned int order)
3752 {
3753 	/* Check that pageblock_nr_pages has not already been setup */
3754 	if (pageblock_order)
3755 		return;
3756 
3757 	/*
3758 	 * Assume the largest contiguous order of interest is a huge page.
3759 	 * This value may be variable depending on boot parameters on IA64
3760 	 */
3761 	pageblock_order = order;
3762 }
3763 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3764 
3765 /*
3766  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3767  * and pageblock_default_order() are unused as pageblock_order is set
3768  * at compile-time. See include/linux/pageblock-flags.h for the values of
3769  * pageblock_order based on the kernel config
3770  */
3771 static inline int pageblock_default_order(unsigned int order)
3772 {
3773 	return MAX_ORDER-1;
3774 }
3775 #define set_pageblock_order(x)	do {} while (0)
3776 
3777 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3778 
3779 /*
3780  * Set up the zone data structures:
3781  *   - mark all pages reserved
3782  *   - mark all memory queues empty
3783  *   - clear the memory bitmaps
3784  */
3785 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3786 		unsigned long *zones_size, unsigned long *zholes_size)
3787 {
3788 	enum zone_type j;
3789 	int nid = pgdat->node_id;
3790 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
3791 	int ret;
3792 
3793 	pgdat_resize_init(pgdat);
3794 	pgdat->nr_zones = 0;
3795 	init_waitqueue_head(&pgdat->kswapd_wait);
3796 	pgdat->kswapd_max_order = 0;
3797 	pgdat_page_cgroup_init(pgdat);
3798 
3799 	for (j = 0; j < MAX_NR_ZONES; j++) {
3800 		struct zone *zone = pgdat->node_zones + j;
3801 		unsigned long size, realsize, memmap_pages;
3802 		enum lru_list l;
3803 
3804 		size = zone_spanned_pages_in_node(nid, j, zones_size);
3805 		realsize = size - zone_absent_pages_in_node(nid, j,
3806 								zholes_size);
3807 
3808 		/*
3809 		 * Adjust realsize so that it accounts for how much memory
3810 		 * is used by this zone for memmap. This affects the watermark
3811 		 * and per-cpu initialisations
3812 		 */
3813 		memmap_pages =
3814 			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3815 		if (realsize >= memmap_pages) {
3816 			realsize -= memmap_pages;
3817 			if (memmap_pages)
3818 				printk(KERN_DEBUG
3819 				       "  %s zone: %lu pages used for memmap\n",
3820 				       zone_names[j], memmap_pages);
3821 		} else
3822 			printk(KERN_WARNING
3823 				"  %s zone: %lu pages exceeds realsize %lu\n",
3824 				zone_names[j], memmap_pages, realsize);
3825 
3826 		/* Account for reserved pages */
3827 		if (j == 0 && realsize > dma_reserve) {
3828 			realsize -= dma_reserve;
3829 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3830 					zone_names[0], dma_reserve);
3831 		}
3832 
3833 		if (!is_highmem_idx(j))
3834 			nr_kernel_pages += realsize;
3835 		nr_all_pages += realsize;
3836 
3837 		zone->spanned_pages = size;
3838 		zone->present_pages = realsize;
3839 #ifdef CONFIG_NUMA
3840 		zone->node = nid;
3841 		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3842 						/ 100;
3843 		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3844 #endif
3845 		zone->name = zone_names[j];
3846 		spin_lock_init(&zone->lock);
3847 		spin_lock_init(&zone->lru_lock);
3848 		zone_seqlock_init(zone);
3849 		zone->zone_pgdat = pgdat;
3850 
3851 		zone->prev_priority = DEF_PRIORITY;
3852 
3853 		zone_pcp_init(zone);
3854 		for_each_lru(l) {
3855 			INIT_LIST_HEAD(&zone->lru[l].list);
3856 			zone->reclaim_stat.nr_saved_scan[l] = 0;
3857 		}
3858 		zone->reclaim_stat.recent_rotated[0] = 0;
3859 		zone->reclaim_stat.recent_rotated[1] = 0;
3860 		zone->reclaim_stat.recent_scanned[0] = 0;
3861 		zone->reclaim_stat.recent_scanned[1] = 0;
3862 		zap_zone_vm_stats(zone);
3863 		zone->flags = 0;
3864 		if (!size)
3865 			continue;
3866 
3867 		set_pageblock_order(pageblock_default_order());
3868 		setup_usemap(pgdat, zone, size);
3869 		ret = init_currently_empty_zone(zone, zone_start_pfn,
3870 						size, MEMMAP_EARLY);
3871 		BUG_ON(ret);
3872 		memmap_init(size, nid, j, zone_start_pfn);
3873 		zone_start_pfn += size;
3874 	}
3875 }
3876 
3877 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3878 {
3879 	/* Skip empty nodes */
3880 	if (!pgdat->node_spanned_pages)
3881 		return;
3882 
3883 #ifdef CONFIG_FLAT_NODE_MEM_MAP
3884 	/* ia64 gets its own node_mem_map, before this, without bootmem */
3885 	if (!pgdat->node_mem_map) {
3886 		unsigned long size, start, end;
3887 		struct page *map;
3888 
3889 		/*
3890 		 * The zone's endpoints aren't required to be MAX_ORDER
3891 		 * aligned but the node_mem_map endpoints must be in order
3892 		 * for the buddy allocator to function correctly.
3893 		 */
3894 		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3895 		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3896 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
3897 		size =  (end - start) * sizeof(struct page);
3898 		map = alloc_remap(pgdat->node_id, size);
3899 		if (!map)
3900 			map = alloc_bootmem_node(pgdat, size);
3901 		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3902 	}
3903 #ifndef CONFIG_NEED_MULTIPLE_NODES
3904 	/*
3905 	 * With no DISCONTIG, the global mem_map is just set as node 0's
3906 	 */
3907 	if (pgdat == NODE_DATA(0)) {
3908 		mem_map = NODE_DATA(0)->node_mem_map;
3909 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3910 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3911 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3912 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3913 	}
3914 #endif
3915 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
3916 }
3917 
3918 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3919 		unsigned long node_start_pfn, unsigned long *zholes_size)
3920 {
3921 	pg_data_t *pgdat = NODE_DATA(nid);
3922 
3923 	pgdat->node_id = nid;
3924 	pgdat->node_start_pfn = node_start_pfn;
3925 	calculate_node_totalpages(pgdat, zones_size, zholes_size);
3926 
3927 	alloc_node_mem_map(pgdat);
3928 #ifdef CONFIG_FLAT_NODE_MEM_MAP
3929 	printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3930 		nid, (unsigned long)pgdat,
3931 		(unsigned long)pgdat->node_mem_map);
3932 #endif
3933 
3934 	free_area_init_core(pgdat, zones_size, zholes_size);
3935 }
3936 
3937 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3938 
3939 #if MAX_NUMNODES > 1
3940 /*
3941  * Figure out the number of possible node ids.
3942  */
3943 static void __init setup_nr_node_ids(void)
3944 {
3945 	unsigned int node;
3946 	unsigned int highest = 0;
3947 
3948 	for_each_node_mask(node, node_possible_map)
3949 		highest = node;
3950 	nr_node_ids = highest + 1;
3951 }
3952 #else
3953 static inline void setup_nr_node_ids(void)
3954 {
3955 }
3956 #endif
3957 
3958 /**
3959  * add_active_range - Register a range of PFNs backed by physical memory
3960  * @nid: The node ID the range resides on
3961  * @start_pfn: The start PFN of the available physical memory
3962  * @end_pfn: The end PFN of the available physical memory
3963  *
3964  * These ranges are stored in an early_node_map[] and later used by
3965  * free_area_init_nodes() to calculate zone sizes and holes. If the
3966  * range spans a memory hole, it is up to the architecture to ensure
3967  * the memory is not freed by the bootmem allocator. If possible
3968  * the range being registered will be merged with existing ranges.
3969  */
3970 void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3971 						unsigned long end_pfn)
3972 {
3973 	int i;
3974 
3975 	mminit_dprintk(MMINIT_TRACE, "memory_register",
3976 			"Entering add_active_range(%d, %#lx, %#lx) "
3977 			"%d entries of %d used\n",
3978 			nid, start_pfn, end_pfn,
3979 			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3980 
3981 	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3982 
3983 	/* Merge with existing active regions if possible */
3984 	for (i = 0; i < nr_nodemap_entries; i++) {
3985 		if (early_node_map[i].nid != nid)
3986 			continue;
3987 
3988 		/* Skip if an existing region covers this new one */
3989 		if (start_pfn >= early_node_map[i].start_pfn &&
3990 				end_pfn <= early_node_map[i].end_pfn)
3991 			return;
3992 
3993 		/* Merge forward if suitable */
3994 		if (start_pfn <= early_node_map[i].end_pfn &&
3995 				end_pfn > early_node_map[i].end_pfn) {
3996 			early_node_map[i].end_pfn = end_pfn;
3997 			return;
3998 		}
3999 
4000 		/* Merge backward if suitable */
4001 		if (start_pfn < early_node_map[i].end_pfn &&
4002 				end_pfn >= early_node_map[i].start_pfn) {
4003 			early_node_map[i].start_pfn = start_pfn;
4004 			return;
4005 		}
4006 	}
4007 
4008 	/* Check that early_node_map is large enough */
4009 	if (i >= MAX_ACTIVE_REGIONS) {
4010 		printk(KERN_CRIT "More than %d memory regions, truncating\n",
4011 							MAX_ACTIVE_REGIONS);
4012 		return;
4013 	}
4014 
4015 	early_node_map[i].nid = nid;
4016 	early_node_map[i].start_pfn = start_pfn;
4017 	early_node_map[i].end_pfn = end_pfn;
4018 	nr_nodemap_entries = i + 1;
4019 }
4020 
4021 /**
4022  * remove_active_range - Shrink an existing registered range of PFNs
4023  * @nid: The node id the range is on that should be shrunk
4024  * @start_pfn: The new PFN of the range
4025  * @end_pfn: The new PFN of the range
4026  *
4027  * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4028  * The map is kept near the end physical page range that has already been
4029  * registered. This function allows an arch to shrink an existing registered
4030  * range.
4031  */
4032 void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4033 				unsigned long end_pfn)
4034 {
4035 	int i, j;
4036 	int removed = 0;
4037 
4038 	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4039 			  nid, start_pfn, end_pfn);
4040 
4041 	/* Find the old active region end and shrink */
4042 	for_each_active_range_index_in_nid(i, nid) {
4043 		if (early_node_map[i].start_pfn >= start_pfn &&
4044 		    early_node_map[i].end_pfn <= end_pfn) {
4045 			/* clear it */
4046 			early_node_map[i].start_pfn = 0;
4047 			early_node_map[i].end_pfn = 0;
4048 			removed = 1;
4049 			continue;
4050 		}
4051 		if (early_node_map[i].start_pfn < start_pfn &&
4052 		    early_node_map[i].end_pfn > start_pfn) {
4053 			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4054 			early_node_map[i].end_pfn = start_pfn;
4055 			if (temp_end_pfn > end_pfn)
4056 				add_active_range(nid, end_pfn, temp_end_pfn);
4057 			continue;
4058 		}
4059 		if (early_node_map[i].start_pfn >= start_pfn &&
4060 		    early_node_map[i].end_pfn > end_pfn &&
4061 		    early_node_map[i].start_pfn < end_pfn) {
4062 			early_node_map[i].start_pfn = end_pfn;
4063 			continue;
4064 		}
4065 	}
4066 
4067 	if (!removed)
4068 		return;
4069 
4070 	/* remove the blank ones */
4071 	for (i = nr_nodemap_entries - 1; i > 0; i--) {
4072 		if (early_node_map[i].nid != nid)
4073 			continue;
4074 		if (early_node_map[i].end_pfn)
4075 			continue;
4076 		/* we found it, get rid of it */
4077 		for (j = i; j < nr_nodemap_entries - 1; j++)
4078 			memcpy(&early_node_map[j], &early_node_map[j+1],
4079 				sizeof(early_node_map[j]));
4080 		j = nr_nodemap_entries - 1;
4081 		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4082 		nr_nodemap_entries--;
4083 	}
4084 }
4085 
4086 /**
4087  * remove_all_active_ranges - Remove all currently registered regions
4088  *
4089  * During discovery, it may be found that a table like SRAT is invalid
4090  * and an alternative discovery method must be used. This function removes
4091  * all currently registered regions.
4092  */
4093 void __init remove_all_active_ranges(void)
4094 {
4095 	memset(early_node_map, 0, sizeof(early_node_map));
4096 	nr_nodemap_entries = 0;
4097 }
4098 
4099 /* Compare two active node_active_regions */
4100 static int __init cmp_node_active_region(const void *a, const void *b)
4101 {
4102 	struct node_active_region *arange = (struct node_active_region *)a;
4103 	struct node_active_region *brange = (struct node_active_region *)b;
4104 
4105 	/* Done this way to avoid overflows */
4106 	if (arange->start_pfn > brange->start_pfn)
4107 		return 1;
4108 	if (arange->start_pfn < brange->start_pfn)
4109 		return -1;
4110 
4111 	return 0;
4112 }
4113 
4114 /* sort the node_map by start_pfn */
4115 void __init sort_node_map(void)
4116 {
4117 	sort(early_node_map, (size_t)nr_nodemap_entries,
4118 			sizeof(struct node_active_region),
4119 			cmp_node_active_region, NULL);
4120 }
4121 
4122 /* Find the lowest pfn for a node */
4123 static unsigned long __init find_min_pfn_for_node(int nid)
4124 {
4125 	int i;
4126 	unsigned long min_pfn = ULONG_MAX;
4127 
4128 	/* Assuming a sorted map, the first range found has the starting pfn */
4129 	for_each_active_range_index_in_nid(i, nid)
4130 		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4131 
4132 	if (min_pfn == ULONG_MAX) {
4133 		printk(KERN_WARNING
4134 			"Could not find start_pfn for node %d\n", nid);
4135 		return 0;
4136 	}
4137 
4138 	return min_pfn;
4139 }
4140 
4141 /**
4142  * find_min_pfn_with_active_regions - Find the minimum PFN registered
4143  *
4144  * It returns the minimum PFN based on information provided via
4145  * add_active_range().
4146  */
4147 unsigned long __init find_min_pfn_with_active_regions(void)
4148 {
4149 	return find_min_pfn_for_node(MAX_NUMNODES);
4150 }
4151 
4152 /*
4153  * early_calculate_totalpages()
4154  * Sum pages in active regions for movable zone.
4155  * Populate N_HIGH_MEMORY for calculating usable_nodes.
4156  */
4157 static unsigned long __init early_calculate_totalpages(void)
4158 {
4159 	int i;
4160 	unsigned long totalpages = 0;
4161 
4162 	for (i = 0; i < nr_nodemap_entries; i++) {
4163 		unsigned long pages = early_node_map[i].end_pfn -
4164 						early_node_map[i].start_pfn;
4165 		totalpages += pages;
4166 		if (pages)
4167 			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4168 	}
4169   	return totalpages;
4170 }
4171 
4172 /*
4173  * Find the PFN the Movable zone begins in each node. Kernel memory
4174  * is spread evenly between nodes as long as the nodes have enough
4175  * memory. When they don't, some nodes will have more kernelcore than
4176  * others
4177  */
4178 static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4179 {
4180 	int i, nid;
4181 	unsigned long usable_startpfn;
4182 	unsigned long kernelcore_node, kernelcore_remaining;
4183 	/* save the state before borrow the nodemask */
4184 	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4185 	unsigned long totalpages = early_calculate_totalpages();
4186 	int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4187 
4188 	/*
4189 	 * If movablecore was specified, calculate what size of
4190 	 * kernelcore that corresponds so that memory usable for
4191 	 * any allocation type is evenly spread. If both kernelcore
4192 	 * and movablecore are specified, then the value of kernelcore
4193 	 * will be used for required_kernelcore if it's greater than
4194 	 * what movablecore would have allowed.
4195 	 */
4196 	if (required_movablecore) {
4197 		unsigned long corepages;
4198 
4199 		/*
4200 		 * Round-up so that ZONE_MOVABLE is at least as large as what
4201 		 * was requested by the user
4202 		 */
4203 		required_movablecore =
4204 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4205 		corepages = totalpages - required_movablecore;
4206 
4207 		required_kernelcore = max(required_kernelcore, corepages);
4208 	}
4209 
4210 	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
4211 	if (!required_kernelcore)
4212 		goto out;
4213 
4214 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4215 	find_usable_zone_for_movable();
4216 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4217 
4218 restart:
4219 	/* Spread kernelcore memory as evenly as possible throughout nodes */
4220 	kernelcore_node = required_kernelcore / usable_nodes;
4221 	for_each_node_state(nid, N_HIGH_MEMORY) {
4222 		/*
4223 		 * Recalculate kernelcore_node if the division per node
4224 		 * now exceeds what is necessary to satisfy the requested
4225 		 * amount of memory for the kernel
4226 		 */
4227 		if (required_kernelcore < kernelcore_node)
4228 			kernelcore_node = required_kernelcore / usable_nodes;
4229 
4230 		/*
4231 		 * As the map is walked, we track how much memory is usable
4232 		 * by the kernel using kernelcore_remaining. When it is
4233 		 * 0, the rest of the node is usable by ZONE_MOVABLE
4234 		 */
4235 		kernelcore_remaining = kernelcore_node;
4236 
4237 		/* Go through each range of PFNs within this node */
4238 		for_each_active_range_index_in_nid(i, nid) {
4239 			unsigned long start_pfn, end_pfn;
4240 			unsigned long size_pages;
4241 
4242 			start_pfn = max(early_node_map[i].start_pfn,
4243 						zone_movable_pfn[nid]);
4244 			end_pfn = early_node_map[i].end_pfn;
4245 			if (start_pfn >= end_pfn)
4246 				continue;
4247 
4248 			/* Account for what is only usable for kernelcore */
4249 			if (start_pfn < usable_startpfn) {
4250 				unsigned long kernel_pages;
4251 				kernel_pages = min(end_pfn, usable_startpfn)
4252 								- start_pfn;
4253 
4254 				kernelcore_remaining -= min(kernel_pages,
4255 							kernelcore_remaining);
4256 				required_kernelcore -= min(kernel_pages,
4257 							required_kernelcore);
4258 
4259 				/* Continue if range is now fully accounted */
4260 				if (end_pfn <= usable_startpfn) {
4261 
4262 					/*
4263 					 * Push zone_movable_pfn to the end so
4264 					 * that if we have to rebalance
4265 					 * kernelcore across nodes, we will
4266 					 * not double account here
4267 					 */
4268 					zone_movable_pfn[nid] = end_pfn;
4269 					continue;
4270 				}
4271 				start_pfn = usable_startpfn;
4272 			}
4273 
4274 			/*
4275 			 * The usable PFN range for ZONE_MOVABLE is from
4276 			 * start_pfn->end_pfn. Calculate size_pages as the
4277 			 * number of pages used as kernelcore
4278 			 */
4279 			size_pages = end_pfn - start_pfn;
4280 			if (size_pages > kernelcore_remaining)
4281 				size_pages = kernelcore_remaining;
4282 			zone_movable_pfn[nid] = start_pfn + size_pages;
4283 
4284 			/*
4285 			 * Some kernelcore has been met, update counts and
4286 			 * break if the kernelcore for this node has been
4287 			 * satisified
4288 			 */
4289 			required_kernelcore -= min(required_kernelcore,
4290 								size_pages);
4291 			kernelcore_remaining -= size_pages;
4292 			if (!kernelcore_remaining)
4293 				break;
4294 		}
4295 	}
4296 
4297 	/*
4298 	 * If there is still required_kernelcore, we do another pass with one
4299 	 * less node in the count. This will push zone_movable_pfn[nid] further
4300 	 * along on the nodes that still have memory until kernelcore is
4301 	 * satisified
4302 	 */
4303 	usable_nodes--;
4304 	if (usable_nodes && required_kernelcore > usable_nodes)
4305 		goto restart;
4306 
4307 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4308 	for (nid = 0; nid < MAX_NUMNODES; nid++)
4309 		zone_movable_pfn[nid] =
4310 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4311 
4312 out:
4313 	/* restore the node_state */
4314 	node_states[N_HIGH_MEMORY] = saved_node_state;
4315 }
4316 
4317 /* Any regular memory on that node ? */
4318 static void check_for_regular_memory(pg_data_t *pgdat)
4319 {
4320 #ifdef CONFIG_HIGHMEM
4321 	enum zone_type zone_type;
4322 
4323 	for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4324 		struct zone *zone = &pgdat->node_zones[zone_type];
4325 		if (zone->present_pages)
4326 			node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4327 	}
4328 #endif
4329 }
4330 
4331 /**
4332  * free_area_init_nodes - Initialise all pg_data_t and zone data
4333  * @max_zone_pfn: an array of max PFNs for each zone
4334  *
4335  * This will call free_area_init_node() for each active node in the system.
4336  * Using the page ranges provided by add_active_range(), the size of each
4337  * zone in each node and their holes is calculated. If the maximum PFN
4338  * between two adjacent zones match, it is assumed that the zone is empty.
4339  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4340  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4341  * starts where the previous one ended. For example, ZONE_DMA32 starts
4342  * at arch_max_dma_pfn.
4343  */
4344 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4345 {
4346 	unsigned long nid;
4347 	int i;
4348 
4349 	/* Sort early_node_map as initialisation assumes it is sorted */
4350 	sort_node_map();
4351 
4352 	/* Record where the zone boundaries are */
4353 	memset(arch_zone_lowest_possible_pfn, 0,
4354 				sizeof(arch_zone_lowest_possible_pfn));
4355 	memset(arch_zone_highest_possible_pfn, 0,
4356 				sizeof(arch_zone_highest_possible_pfn));
4357 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4358 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4359 	for (i = 1; i < MAX_NR_ZONES; i++) {
4360 		if (i == ZONE_MOVABLE)
4361 			continue;
4362 		arch_zone_lowest_possible_pfn[i] =
4363 			arch_zone_highest_possible_pfn[i-1];
4364 		arch_zone_highest_possible_pfn[i] =
4365 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4366 	}
4367 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4368 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4369 
4370 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
4371 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4372 	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4373 
4374 	/* Print out the zone ranges */
4375 	printk("Zone PFN ranges:\n");
4376 	for (i = 0; i < MAX_NR_ZONES; i++) {
4377 		if (i == ZONE_MOVABLE)
4378 			continue;
4379 		printk("  %-8s %0#10lx -> %0#10lx\n",
4380 				zone_names[i],
4381 				arch_zone_lowest_possible_pfn[i],
4382 				arch_zone_highest_possible_pfn[i]);
4383 	}
4384 
4385 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
4386 	printk("Movable zone start PFN for each node\n");
4387 	for (i = 0; i < MAX_NUMNODES; i++) {
4388 		if (zone_movable_pfn[i])
4389 			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4390 	}
4391 
4392 	/* Print out the early_node_map[] */
4393 	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4394 	for (i = 0; i < nr_nodemap_entries; i++)
4395 		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4396 						early_node_map[i].start_pfn,
4397 						early_node_map[i].end_pfn);
4398 
4399 	/* Initialise every node */
4400 	mminit_verify_pageflags_layout();
4401 	setup_nr_node_ids();
4402 	for_each_online_node(nid) {
4403 		pg_data_t *pgdat = NODE_DATA(nid);
4404 		free_area_init_node(nid, NULL,
4405 				find_min_pfn_for_node(nid), NULL);
4406 
4407 		/* Any memory on that node */
4408 		if (pgdat->node_present_pages)
4409 			node_set_state(nid, N_HIGH_MEMORY);
4410 		check_for_regular_memory(pgdat);
4411 	}
4412 }
4413 
4414 static int __init cmdline_parse_core(char *p, unsigned long *core)
4415 {
4416 	unsigned long long coremem;
4417 	if (!p)
4418 		return -EINVAL;
4419 
4420 	coremem = memparse(p, &p);
4421 	*core = coremem >> PAGE_SHIFT;
4422 
4423 	/* Paranoid check that UL is enough for the coremem value */
4424 	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4425 
4426 	return 0;
4427 }
4428 
4429 /*
4430  * kernelcore=size sets the amount of memory for use for allocations that
4431  * cannot be reclaimed or migrated.
4432  */
4433 static int __init cmdline_parse_kernelcore(char *p)
4434 {
4435 	return cmdline_parse_core(p, &required_kernelcore);
4436 }
4437 
4438 /*
4439  * movablecore=size sets the amount of memory for use for allocations that
4440  * can be reclaimed or migrated.
4441  */
4442 static int __init cmdline_parse_movablecore(char *p)
4443 {
4444 	return cmdline_parse_core(p, &required_movablecore);
4445 }
4446 
4447 early_param("kernelcore", cmdline_parse_kernelcore);
4448 early_param("movablecore", cmdline_parse_movablecore);
4449 
4450 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4451 
4452 /**
4453  * set_dma_reserve - set the specified number of pages reserved in the first zone
4454  * @new_dma_reserve: The number of pages to mark reserved
4455  *
4456  * The per-cpu batchsize and zone watermarks are determined by present_pages.
4457  * In the DMA zone, a significant percentage may be consumed by kernel image
4458  * and other unfreeable allocations which can skew the watermarks badly. This
4459  * function may optionally be used to account for unfreeable pages in the
4460  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4461  * smaller per-cpu batchsize.
4462  */
4463 void __init set_dma_reserve(unsigned long new_dma_reserve)
4464 {
4465 	dma_reserve = new_dma_reserve;
4466 }
4467 
4468 #ifndef CONFIG_NEED_MULTIPLE_NODES
4469 struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4470 EXPORT_SYMBOL(contig_page_data);
4471 #endif
4472 
4473 void __init free_area_init(unsigned long *zones_size)
4474 {
4475 	free_area_init_node(0, zones_size,
4476 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4477 }
4478 
4479 static int page_alloc_cpu_notify(struct notifier_block *self,
4480 				 unsigned long action, void *hcpu)
4481 {
4482 	int cpu = (unsigned long)hcpu;
4483 
4484 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4485 		drain_pages(cpu);
4486 
4487 		/*
4488 		 * Spill the event counters of the dead processor
4489 		 * into the current processors event counters.
4490 		 * This artificially elevates the count of the current
4491 		 * processor.
4492 		 */
4493 		vm_events_fold_cpu(cpu);
4494 
4495 		/*
4496 		 * Zero the differential counters of the dead processor
4497 		 * so that the vm statistics are consistent.
4498 		 *
4499 		 * This is only okay since the processor is dead and cannot
4500 		 * race with what we are doing.
4501 		 */
4502 		refresh_cpu_vm_stats(cpu);
4503 	}
4504 	return NOTIFY_OK;
4505 }
4506 
4507 void __init page_alloc_init(void)
4508 {
4509 	hotcpu_notifier(page_alloc_cpu_notify, 0);
4510 }
4511 
4512 /*
4513  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4514  *	or min_free_kbytes changes.
4515  */
4516 static void calculate_totalreserve_pages(void)
4517 {
4518 	struct pglist_data *pgdat;
4519 	unsigned long reserve_pages = 0;
4520 	enum zone_type i, j;
4521 
4522 	for_each_online_pgdat(pgdat) {
4523 		for (i = 0; i < MAX_NR_ZONES; i++) {
4524 			struct zone *zone = pgdat->node_zones + i;
4525 			unsigned long max = 0;
4526 
4527 			/* Find valid and maximum lowmem_reserve in the zone */
4528 			for (j = i; j < MAX_NR_ZONES; j++) {
4529 				if (zone->lowmem_reserve[j] > max)
4530 					max = zone->lowmem_reserve[j];
4531 			}
4532 
4533 			/* we treat the high watermark as reserved pages. */
4534 			max += high_wmark_pages(zone);
4535 
4536 			if (max > zone->present_pages)
4537 				max = zone->present_pages;
4538 			reserve_pages += max;
4539 		}
4540 	}
4541 	totalreserve_pages = reserve_pages;
4542 }
4543 
4544 /*
4545  * setup_per_zone_lowmem_reserve - called whenever
4546  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4547  *	has a correct pages reserved value, so an adequate number of
4548  *	pages are left in the zone after a successful __alloc_pages().
4549  */
4550 static void setup_per_zone_lowmem_reserve(void)
4551 {
4552 	struct pglist_data *pgdat;
4553 	enum zone_type j, idx;
4554 
4555 	for_each_online_pgdat(pgdat) {
4556 		for (j = 0; j < MAX_NR_ZONES; j++) {
4557 			struct zone *zone = pgdat->node_zones + j;
4558 			unsigned long present_pages = zone->present_pages;
4559 
4560 			zone->lowmem_reserve[j] = 0;
4561 
4562 			idx = j;
4563 			while (idx) {
4564 				struct zone *lower_zone;
4565 
4566 				idx--;
4567 
4568 				if (sysctl_lowmem_reserve_ratio[idx] < 1)
4569 					sysctl_lowmem_reserve_ratio[idx] = 1;
4570 
4571 				lower_zone = pgdat->node_zones + idx;
4572 				lower_zone->lowmem_reserve[j] = present_pages /
4573 					sysctl_lowmem_reserve_ratio[idx];
4574 				present_pages += lower_zone->present_pages;
4575 			}
4576 		}
4577 	}
4578 
4579 	/* update totalreserve_pages */
4580 	calculate_totalreserve_pages();
4581 }
4582 
4583 /**
4584  * setup_per_zone_wmarks - called when min_free_kbytes changes
4585  * or when memory is hot-{added|removed}
4586  *
4587  * Ensures that the watermark[min,low,high] values for each zone are set
4588  * correctly with respect to min_free_kbytes.
4589  */
4590 void setup_per_zone_wmarks(void)
4591 {
4592 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4593 	unsigned long lowmem_pages = 0;
4594 	struct zone *zone;
4595 	unsigned long flags;
4596 
4597 	/* Calculate total number of !ZONE_HIGHMEM pages */
4598 	for_each_zone(zone) {
4599 		if (!is_highmem(zone))
4600 			lowmem_pages += zone->present_pages;
4601 	}
4602 
4603 	for_each_zone(zone) {
4604 		u64 tmp;
4605 
4606 		spin_lock_irqsave(&zone->lock, flags);
4607 		tmp = (u64)pages_min * zone->present_pages;
4608 		do_div(tmp, lowmem_pages);
4609 		if (is_highmem(zone)) {
4610 			/*
4611 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4612 			 * need highmem pages, so cap pages_min to a small
4613 			 * value here.
4614 			 *
4615 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4616 			 * deltas controls asynch page reclaim, and so should
4617 			 * not be capped for highmem.
4618 			 */
4619 			int min_pages;
4620 
4621 			min_pages = zone->present_pages / 1024;
4622 			if (min_pages < SWAP_CLUSTER_MAX)
4623 				min_pages = SWAP_CLUSTER_MAX;
4624 			if (min_pages > 128)
4625 				min_pages = 128;
4626 			zone->watermark[WMARK_MIN] = min_pages;
4627 		} else {
4628 			/*
4629 			 * If it's a lowmem zone, reserve a number of pages
4630 			 * proportionate to the zone's size.
4631 			 */
4632 			zone->watermark[WMARK_MIN] = tmp;
4633 		}
4634 
4635 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
4636 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4637 		setup_zone_migrate_reserve(zone);
4638 		spin_unlock_irqrestore(&zone->lock, flags);
4639 	}
4640 
4641 	/* update totalreserve_pages */
4642 	calculate_totalreserve_pages();
4643 }
4644 
4645 /*
4646  * The inactive anon list should be small enough that the VM never has to
4647  * do too much work, but large enough that each inactive page has a chance
4648  * to be referenced again before it is swapped out.
4649  *
4650  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4651  * INACTIVE_ANON pages on this zone's LRU, maintained by the
4652  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4653  * the anonymous pages are kept on the inactive list.
4654  *
4655  * total     target    max
4656  * memory    ratio     inactive anon
4657  * -------------------------------------
4658  *   10MB       1         5MB
4659  *  100MB       1        50MB
4660  *    1GB       3       250MB
4661  *   10GB      10       0.9GB
4662  *  100GB      31         3GB
4663  *    1TB     101        10GB
4664  *   10TB     320        32GB
4665  */
4666 void calculate_zone_inactive_ratio(struct zone *zone)
4667 {
4668 	unsigned int gb, ratio;
4669 
4670 	/* Zone size in gigabytes */
4671 	gb = zone->present_pages >> (30 - PAGE_SHIFT);
4672 	if (gb)
4673 		ratio = int_sqrt(10 * gb);
4674 	else
4675 		ratio = 1;
4676 
4677 	zone->inactive_ratio = ratio;
4678 }
4679 
4680 static void __init setup_per_zone_inactive_ratio(void)
4681 {
4682 	struct zone *zone;
4683 
4684 	for_each_zone(zone)
4685 		calculate_zone_inactive_ratio(zone);
4686 }
4687 
4688 /*
4689  * Initialise min_free_kbytes.
4690  *
4691  * For small machines we want it small (128k min).  For large machines
4692  * we want it large (64MB max).  But it is not linear, because network
4693  * bandwidth does not increase linearly with machine size.  We use
4694  *
4695  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4696  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
4697  *
4698  * which yields
4699  *
4700  * 16MB:	512k
4701  * 32MB:	724k
4702  * 64MB:	1024k
4703  * 128MB:	1448k
4704  * 256MB:	2048k
4705  * 512MB:	2896k
4706  * 1024MB:	4096k
4707  * 2048MB:	5792k
4708  * 4096MB:	8192k
4709  * 8192MB:	11584k
4710  * 16384MB:	16384k
4711  */
4712 static int __init init_per_zone_wmark_min(void)
4713 {
4714 	unsigned long lowmem_kbytes;
4715 
4716 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4717 
4718 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4719 	if (min_free_kbytes < 128)
4720 		min_free_kbytes = 128;
4721 	if (min_free_kbytes > 65536)
4722 		min_free_kbytes = 65536;
4723 	setup_per_zone_wmarks();
4724 	setup_per_zone_lowmem_reserve();
4725 	setup_per_zone_inactive_ratio();
4726 	return 0;
4727 }
4728 module_init(init_per_zone_wmark_min)
4729 
4730 /*
4731  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4732  *	that we can call two helper functions whenever min_free_kbytes
4733  *	changes.
4734  */
4735 int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4736 	void __user *buffer, size_t *length, loff_t *ppos)
4737 {
4738 	proc_dointvec(table, write, buffer, length, ppos);
4739 	if (write)
4740 		setup_per_zone_wmarks();
4741 	return 0;
4742 }
4743 
4744 #ifdef CONFIG_NUMA
4745 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4746 	void __user *buffer, size_t *length, loff_t *ppos)
4747 {
4748 	struct zone *zone;
4749 	int rc;
4750 
4751 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4752 	if (rc)
4753 		return rc;
4754 
4755 	for_each_zone(zone)
4756 		zone->min_unmapped_pages = (zone->present_pages *
4757 				sysctl_min_unmapped_ratio) / 100;
4758 	return 0;
4759 }
4760 
4761 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4762 	void __user *buffer, size_t *length, loff_t *ppos)
4763 {
4764 	struct zone *zone;
4765 	int rc;
4766 
4767 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4768 	if (rc)
4769 		return rc;
4770 
4771 	for_each_zone(zone)
4772 		zone->min_slab_pages = (zone->present_pages *
4773 				sysctl_min_slab_ratio) / 100;
4774 	return 0;
4775 }
4776 #endif
4777 
4778 /*
4779  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4780  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4781  *	whenever sysctl_lowmem_reserve_ratio changes.
4782  *
4783  * The reserve ratio obviously has absolutely no relation with the
4784  * minimum watermarks. The lowmem reserve ratio can only make sense
4785  * if in function of the boot time zone sizes.
4786  */
4787 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4788 	void __user *buffer, size_t *length, loff_t *ppos)
4789 {
4790 	proc_dointvec_minmax(table, write, buffer, length, ppos);
4791 	setup_per_zone_lowmem_reserve();
4792 	return 0;
4793 }
4794 
4795 /*
4796  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4797  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4798  * can have before it gets flushed back to buddy allocator.
4799  */
4800 
4801 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4802 	void __user *buffer, size_t *length, loff_t *ppos)
4803 {
4804 	struct zone *zone;
4805 	unsigned int cpu;
4806 	int ret;
4807 
4808 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
4809 	if (!write || (ret == -EINVAL))
4810 		return ret;
4811 	for_each_populated_zone(zone) {
4812 		for_each_online_cpu(cpu) {
4813 			unsigned long  high;
4814 			high = zone->present_pages / percpu_pagelist_fraction;
4815 			setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4816 		}
4817 	}
4818 	return 0;
4819 }
4820 
4821 int hashdist = HASHDIST_DEFAULT;
4822 
4823 #ifdef CONFIG_NUMA
4824 static int __init set_hashdist(char *str)
4825 {
4826 	if (!str)
4827 		return 0;
4828 	hashdist = simple_strtoul(str, &str, 0);
4829 	return 1;
4830 }
4831 __setup("hashdist=", set_hashdist);
4832 #endif
4833 
4834 /*
4835  * allocate a large system hash table from bootmem
4836  * - it is assumed that the hash table must contain an exact power-of-2
4837  *   quantity of entries
4838  * - limit is the number of hash buckets, not the total allocation size
4839  */
4840 void *__init alloc_large_system_hash(const char *tablename,
4841 				     unsigned long bucketsize,
4842 				     unsigned long numentries,
4843 				     int scale,
4844 				     int flags,
4845 				     unsigned int *_hash_shift,
4846 				     unsigned int *_hash_mask,
4847 				     unsigned long limit)
4848 {
4849 	unsigned long long max = limit;
4850 	unsigned long log2qty, size;
4851 	void *table = NULL;
4852 
4853 	/* allow the kernel cmdline to have a say */
4854 	if (!numentries) {
4855 		/* round applicable memory size up to nearest megabyte */
4856 		numentries = nr_kernel_pages;
4857 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4858 		numentries >>= 20 - PAGE_SHIFT;
4859 		numentries <<= 20 - PAGE_SHIFT;
4860 
4861 		/* limit to 1 bucket per 2^scale bytes of low memory */
4862 		if (scale > PAGE_SHIFT)
4863 			numentries >>= (scale - PAGE_SHIFT);
4864 		else
4865 			numentries <<= (PAGE_SHIFT - scale);
4866 
4867 		/* Make sure we've got at least a 0-order allocation.. */
4868 		if (unlikely(flags & HASH_SMALL)) {
4869 			/* Makes no sense without HASH_EARLY */
4870 			WARN_ON(!(flags & HASH_EARLY));
4871 			if (!(numentries >> *_hash_shift)) {
4872 				numentries = 1UL << *_hash_shift;
4873 				BUG_ON(!numentries);
4874 			}
4875 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4876 			numentries = PAGE_SIZE / bucketsize;
4877 	}
4878 	numentries = roundup_pow_of_two(numentries);
4879 
4880 	/* limit allocation size to 1/16 total memory by default */
4881 	if (max == 0) {
4882 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4883 		do_div(max, bucketsize);
4884 	}
4885 
4886 	if (numentries > max)
4887 		numentries = max;
4888 
4889 	log2qty = ilog2(numentries);
4890 
4891 	do {
4892 		size = bucketsize << log2qty;
4893 		if (flags & HASH_EARLY)
4894 			table = alloc_bootmem_nopanic(size);
4895 		else if (hashdist)
4896 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4897 		else {
4898 			/*
4899 			 * If bucketsize is not a power-of-two, we may free
4900 			 * some pages at the end of hash table which
4901 			 * alloc_pages_exact() automatically does
4902 			 */
4903 			if (get_order(size) < MAX_ORDER) {
4904 				table = alloc_pages_exact(size, GFP_ATOMIC);
4905 				kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4906 			}
4907 		}
4908 	} while (!table && size > PAGE_SIZE && --log2qty);
4909 
4910 	if (!table)
4911 		panic("Failed to allocate %s hash table\n", tablename);
4912 
4913 	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4914 	       tablename,
4915 	       (1U << log2qty),
4916 	       ilog2(size) - PAGE_SHIFT,
4917 	       size);
4918 
4919 	if (_hash_shift)
4920 		*_hash_shift = log2qty;
4921 	if (_hash_mask)
4922 		*_hash_mask = (1 << log2qty) - 1;
4923 
4924 	return table;
4925 }
4926 
4927 /* Return a pointer to the bitmap storing bits affecting a block of pages */
4928 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4929 							unsigned long pfn)
4930 {
4931 #ifdef CONFIG_SPARSEMEM
4932 	return __pfn_to_section(pfn)->pageblock_flags;
4933 #else
4934 	return zone->pageblock_flags;
4935 #endif /* CONFIG_SPARSEMEM */
4936 }
4937 
4938 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4939 {
4940 #ifdef CONFIG_SPARSEMEM
4941 	pfn &= (PAGES_PER_SECTION-1);
4942 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4943 #else
4944 	pfn = pfn - zone->zone_start_pfn;
4945 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4946 #endif /* CONFIG_SPARSEMEM */
4947 }
4948 
4949 /**
4950  * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4951  * @page: The page within the block of interest
4952  * @start_bitidx: The first bit of interest to retrieve
4953  * @end_bitidx: The last bit of interest
4954  * returns pageblock_bits flags
4955  */
4956 unsigned long get_pageblock_flags_group(struct page *page,
4957 					int start_bitidx, int end_bitidx)
4958 {
4959 	struct zone *zone;
4960 	unsigned long *bitmap;
4961 	unsigned long pfn, bitidx;
4962 	unsigned long flags = 0;
4963 	unsigned long value = 1;
4964 
4965 	zone = page_zone(page);
4966 	pfn = page_to_pfn(page);
4967 	bitmap = get_pageblock_bitmap(zone, pfn);
4968 	bitidx = pfn_to_bitidx(zone, pfn);
4969 
4970 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4971 		if (test_bit(bitidx + start_bitidx, bitmap))
4972 			flags |= value;
4973 
4974 	return flags;
4975 }
4976 
4977 /**
4978  * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4979  * @page: The page within the block of interest
4980  * @start_bitidx: The first bit of interest
4981  * @end_bitidx: The last bit of interest
4982  * @flags: The flags to set
4983  */
4984 void set_pageblock_flags_group(struct page *page, unsigned long flags,
4985 					int start_bitidx, int end_bitidx)
4986 {
4987 	struct zone *zone;
4988 	unsigned long *bitmap;
4989 	unsigned long pfn, bitidx;
4990 	unsigned long value = 1;
4991 
4992 	zone = page_zone(page);
4993 	pfn = page_to_pfn(page);
4994 	bitmap = get_pageblock_bitmap(zone, pfn);
4995 	bitidx = pfn_to_bitidx(zone, pfn);
4996 	VM_BUG_ON(pfn < zone->zone_start_pfn);
4997 	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4998 
4999 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5000 		if (flags & value)
5001 			__set_bit(bitidx + start_bitidx, bitmap);
5002 		else
5003 			__clear_bit(bitidx + start_bitidx, bitmap);
5004 }
5005 
5006 /*
5007  * This is designed as sub function...plz see page_isolation.c also.
5008  * set/clear page block's type to be ISOLATE.
5009  * page allocater never alloc memory from ISOLATE block.
5010  */
5011 
5012 int set_migratetype_isolate(struct page *page)
5013 {
5014 	struct zone *zone;
5015 	struct page *curr_page;
5016 	unsigned long flags, pfn, iter;
5017 	unsigned long immobile = 0;
5018 	struct memory_isolate_notify arg;
5019 	int notifier_ret;
5020 	int ret = -EBUSY;
5021 	int zone_idx;
5022 
5023 	zone = page_zone(page);
5024 	zone_idx = zone_idx(zone);
5025 
5026 	spin_lock_irqsave(&zone->lock, flags);
5027 	if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
5028 	    zone_idx == ZONE_MOVABLE) {
5029 		ret = 0;
5030 		goto out;
5031 	}
5032 
5033 	pfn = page_to_pfn(page);
5034 	arg.start_pfn = pfn;
5035 	arg.nr_pages = pageblock_nr_pages;
5036 	arg.pages_found = 0;
5037 
5038 	/*
5039 	 * It may be possible to isolate a pageblock even if the
5040 	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5041 	 * notifier chain is used by balloon drivers to return the
5042 	 * number of pages in a range that are held by the balloon
5043 	 * driver to shrink memory. If all the pages are accounted for
5044 	 * by balloons, are free, or on the LRU, isolation can continue.
5045 	 * Later, for example, when memory hotplug notifier runs, these
5046 	 * pages reported as "can be isolated" should be isolated(freed)
5047 	 * by the balloon driver through the memory notifier chain.
5048 	 */
5049 	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5050 	notifier_ret = notifier_to_errno(notifier_ret);
5051 	if (notifier_ret || !arg.pages_found)
5052 		goto out;
5053 
5054 	for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
5055 		if (!pfn_valid_within(pfn))
5056 			continue;
5057 
5058 		curr_page = pfn_to_page(iter);
5059 		if (!page_count(curr_page) || PageLRU(curr_page))
5060 			continue;
5061 
5062 		immobile++;
5063 	}
5064 
5065 	if (arg.pages_found == immobile)
5066 		ret = 0;
5067 
5068 out:
5069 	if (!ret) {
5070 		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5071 		move_freepages_block(zone, page, MIGRATE_ISOLATE);
5072 	}
5073 
5074 	spin_unlock_irqrestore(&zone->lock, flags);
5075 	if (!ret)
5076 		drain_all_pages();
5077 	return ret;
5078 }
5079 
5080 void unset_migratetype_isolate(struct page *page)
5081 {
5082 	struct zone *zone;
5083 	unsigned long flags;
5084 	zone = page_zone(page);
5085 	spin_lock_irqsave(&zone->lock, flags);
5086 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5087 		goto out;
5088 	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5089 	move_freepages_block(zone, page, MIGRATE_MOVABLE);
5090 out:
5091 	spin_unlock_irqrestore(&zone->lock, flags);
5092 }
5093 
5094 #ifdef CONFIG_MEMORY_HOTREMOVE
5095 /*
5096  * All pages in the range must be isolated before calling this.
5097  */
5098 void
5099 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5100 {
5101 	struct page *page;
5102 	struct zone *zone;
5103 	int order, i;
5104 	unsigned long pfn;
5105 	unsigned long flags;
5106 	/* find the first valid pfn */
5107 	for (pfn = start_pfn; pfn < end_pfn; pfn++)
5108 		if (pfn_valid(pfn))
5109 			break;
5110 	if (pfn == end_pfn)
5111 		return;
5112 	zone = page_zone(pfn_to_page(pfn));
5113 	spin_lock_irqsave(&zone->lock, flags);
5114 	pfn = start_pfn;
5115 	while (pfn < end_pfn) {
5116 		if (!pfn_valid(pfn)) {
5117 			pfn++;
5118 			continue;
5119 		}
5120 		page = pfn_to_page(pfn);
5121 		BUG_ON(page_count(page));
5122 		BUG_ON(!PageBuddy(page));
5123 		order = page_order(page);
5124 #ifdef CONFIG_DEBUG_VM
5125 		printk(KERN_INFO "remove from free list %lx %d %lx\n",
5126 		       pfn, 1 << order, end_pfn);
5127 #endif
5128 		list_del(&page->lru);
5129 		rmv_page_order(page);
5130 		zone->free_area[order].nr_free--;
5131 		__mod_zone_page_state(zone, NR_FREE_PAGES,
5132 				      - (1UL << order));
5133 		for (i = 0; i < (1 << order); i++)
5134 			SetPageReserved((page+i));
5135 		pfn += (1 << order);
5136 	}
5137 	spin_unlock_irqrestore(&zone->lock, flags);
5138 }
5139 #endif
5140 
5141 #ifdef CONFIG_MEMORY_FAILURE
5142 bool is_free_buddy_page(struct page *page)
5143 {
5144 	struct zone *zone = page_zone(page);
5145 	unsigned long pfn = page_to_pfn(page);
5146 	unsigned long flags;
5147 	int order;
5148 
5149 	spin_lock_irqsave(&zone->lock, flags);
5150 	for (order = 0; order < MAX_ORDER; order++) {
5151 		struct page *page_head = page - (pfn & ((1 << order) - 1));
5152 
5153 		if (PageBuddy(page_head) && page_order(page_head) >= order)
5154 			break;
5155 	}
5156 	spin_unlock_irqrestore(&zone->lock, flags);
5157 
5158 	return order < MAX_ORDER;
5159 }
5160 #endif
5161