xref: /openbmc/linux/mm/hugetlb.c (revision 384740dc)
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 
21 #include <asm/page.h>
22 #include <asm/pgtable.h>
23 #include <asm/io.h>
24 
25 #include <linux/hugetlb.h>
26 #include "internal.h"
27 
28 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
29 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
30 unsigned long hugepages_treat_as_movable;
31 
32 static int max_hstate;
33 unsigned int default_hstate_idx;
34 struct hstate hstates[HUGE_MAX_HSTATE];
35 
36 __initdata LIST_HEAD(huge_boot_pages);
37 
38 /* for command line parsing */
39 static struct hstate * __initdata parsed_hstate;
40 static unsigned long __initdata default_hstate_max_huge_pages;
41 static unsigned long __initdata default_hstate_size;
42 
43 #define for_each_hstate(h) \
44 	for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
45 
46 /*
47  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
48  */
49 static DEFINE_SPINLOCK(hugetlb_lock);
50 
51 /*
52  * Region tracking -- allows tracking of reservations and instantiated pages
53  *                    across the pages in a mapping.
54  *
55  * The region data structures are protected by a combination of the mmap_sem
56  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
57  * must either hold the mmap_sem for write, or the mmap_sem for read and
58  * the hugetlb_instantiation mutex:
59  *
60  * 	down_write(&mm->mmap_sem);
61  * or
62  * 	down_read(&mm->mmap_sem);
63  * 	mutex_lock(&hugetlb_instantiation_mutex);
64  */
65 struct file_region {
66 	struct list_head link;
67 	long from;
68 	long to;
69 };
70 
71 static long region_add(struct list_head *head, long f, long t)
72 {
73 	struct file_region *rg, *nrg, *trg;
74 
75 	/* Locate the region we are either in or before. */
76 	list_for_each_entry(rg, head, link)
77 		if (f <= rg->to)
78 			break;
79 
80 	/* Round our left edge to the current segment if it encloses us. */
81 	if (f > rg->from)
82 		f = rg->from;
83 
84 	/* Check for and consume any regions we now overlap with. */
85 	nrg = rg;
86 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
87 		if (&rg->link == head)
88 			break;
89 		if (rg->from > t)
90 			break;
91 
92 		/* If this area reaches higher then extend our area to
93 		 * include it completely.  If this is not the first area
94 		 * which we intend to reuse, free it. */
95 		if (rg->to > t)
96 			t = rg->to;
97 		if (rg != nrg) {
98 			list_del(&rg->link);
99 			kfree(rg);
100 		}
101 	}
102 	nrg->from = f;
103 	nrg->to = t;
104 	return 0;
105 }
106 
107 static long region_chg(struct list_head *head, long f, long t)
108 {
109 	struct file_region *rg, *nrg;
110 	long chg = 0;
111 
112 	/* Locate the region we are before or in. */
113 	list_for_each_entry(rg, head, link)
114 		if (f <= rg->to)
115 			break;
116 
117 	/* If we are below the current region then a new region is required.
118 	 * Subtle, allocate a new region at the position but make it zero
119 	 * size such that we can guarantee to record the reservation. */
120 	if (&rg->link == head || t < rg->from) {
121 		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
122 		if (!nrg)
123 			return -ENOMEM;
124 		nrg->from = f;
125 		nrg->to   = f;
126 		INIT_LIST_HEAD(&nrg->link);
127 		list_add(&nrg->link, rg->link.prev);
128 
129 		return t - f;
130 	}
131 
132 	/* Round our left edge to the current segment if it encloses us. */
133 	if (f > rg->from)
134 		f = rg->from;
135 	chg = t - f;
136 
137 	/* Check for and consume any regions we now overlap with. */
138 	list_for_each_entry(rg, rg->link.prev, link) {
139 		if (&rg->link == head)
140 			break;
141 		if (rg->from > t)
142 			return chg;
143 
144 		/* We overlap with this area, if it extends futher than
145 		 * us then we must extend ourselves.  Account for its
146 		 * existing reservation. */
147 		if (rg->to > t) {
148 			chg += rg->to - t;
149 			t = rg->to;
150 		}
151 		chg -= rg->to - rg->from;
152 	}
153 	return chg;
154 }
155 
156 static long region_truncate(struct list_head *head, long end)
157 {
158 	struct file_region *rg, *trg;
159 	long chg = 0;
160 
161 	/* Locate the region we are either in or before. */
162 	list_for_each_entry(rg, head, link)
163 		if (end <= rg->to)
164 			break;
165 	if (&rg->link == head)
166 		return 0;
167 
168 	/* If we are in the middle of a region then adjust it. */
169 	if (end > rg->from) {
170 		chg = rg->to - end;
171 		rg->to = end;
172 		rg = list_entry(rg->link.next, typeof(*rg), link);
173 	}
174 
175 	/* Drop any remaining regions. */
176 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
177 		if (&rg->link == head)
178 			break;
179 		chg += rg->to - rg->from;
180 		list_del(&rg->link);
181 		kfree(rg);
182 	}
183 	return chg;
184 }
185 
186 static long region_count(struct list_head *head, long f, long t)
187 {
188 	struct file_region *rg;
189 	long chg = 0;
190 
191 	/* Locate each segment we overlap with, and count that overlap. */
192 	list_for_each_entry(rg, head, link) {
193 		int seg_from;
194 		int seg_to;
195 
196 		if (rg->to <= f)
197 			continue;
198 		if (rg->from >= t)
199 			break;
200 
201 		seg_from = max(rg->from, f);
202 		seg_to = min(rg->to, t);
203 
204 		chg += seg_to - seg_from;
205 	}
206 
207 	return chg;
208 }
209 
210 /*
211  * Convert the address within this vma to the page offset within
212  * the mapping, in pagecache page units; huge pages here.
213  */
214 static pgoff_t vma_hugecache_offset(struct hstate *h,
215 			struct vm_area_struct *vma, unsigned long address)
216 {
217 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
218 			(vma->vm_pgoff >> huge_page_order(h));
219 }
220 
221 /*
222  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
223  * bits of the reservation map pointer, which are always clear due to
224  * alignment.
225  */
226 #define HPAGE_RESV_OWNER    (1UL << 0)
227 #define HPAGE_RESV_UNMAPPED (1UL << 1)
228 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
229 
230 /*
231  * These helpers are used to track how many pages are reserved for
232  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
233  * is guaranteed to have their future faults succeed.
234  *
235  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
236  * the reserve counters are updated with the hugetlb_lock held. It is safe
237  * to reset the VMA at fork() time as it is not in use yet and there is no
238  * chance of the global counters getting corrupted as a result of the values.
239  *
240  * The private mapping reservation is represented in a subtly different
241  * manner to a shared mapping.  A shared mapping has a region map associated
242  * with the underlying file, this region map represents the backing file
243  * pages which have ever had a reservation assigned which this persists even
244  * after the page is instantiated.  A private mapping has a region map
245  * associated with the original mmap which is attached to all VMAs which
246  * reference it, this region map represents those offsets which have consumed
247  * reservation ie. where pages have been instantiated.
248  */
249 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
250 {
251 	return (unsigned long)vma->vm_private_data;
252 }
253 
254 static void set_vma_private_data(struct vm_area_struct *vma,
255 							unsigned long value)
256 {
257 	vma->vm_private_data = (void *)value;
258 }
259 
260 struct resv_map {
261 	struct kref refs;
262 	struct list_head regions;
263 };
264 
265 struct resv_map *resv_map_alloc(void)
266 {
267 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
268 	if (!resv_map)
269 		return NULL;
270 
271 	kref_init(&resv_map->refs);
272 	INIT_LIST_HEAD(&resv_map->regions);
273 
274 	return resv_map;
275 }
276 
277 void resv_map_release(struct kref *ref)
278 {
279 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
280 
281 	/* Clear out any active regions before we release the map. */
282 	region_truncate(&resv_map->regions, 0);
283 	kfree(resv_map);
284 }
285 
286 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
287 {
288 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
289 	if (!(vma->vm_flags & VM_SHARED))
290 		return (struct resv_map *)(get_vma_private_data(vma) &
291 							~HPAGE_RESV_MASK);
292 	return 0;
293 }
294 
295 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
296 {
297 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
298 	VM_BUG_ON(vma->vm_flags & VM_SHARED);
299 
300 	set_vma_private_data(vma, (get_vma_private_data(vma) &
301 				HPAGE_RESV_MASK) | (unsigned long)map);
302 }
303 
304 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
305 {
306 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
307 	VM_BUG_ON(vma->vm_flags & VM_SHARED);
308 
309 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
310 }
311 
312 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
313 {
314 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
315 
316 	return (get_vma_private_data(vma) & flag) != 0;
317 }
318 
319 /* Decrement the reserved pages in the hugepage pool by one */
320 static void decrement_hugepage_resv_vma(struct hstate *h,
321 			struct vm_area_struct *vma)
322 {
323 	if (vma->vm_flags & VM_NORESERVE)
324 		return;
325 
326 	if (vma->vm_flags & VM_SHARED) {
327 		/* Shared mappings always use reserves */
328 		h->resv_huge_pages--;
329 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
330 		/*
331 		 * Only the process that called mmap() has reserves for
332 		 * private mappings.
333 		 */
334 		h->resv_huge_pages--;
335 	}
336 }
337 
338 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
339 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
340 {
341 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
342 	if (!(vma->vm_flags & VM_SHARED))
343 		vma->vm_private_data = (void *)0;
344 }
345 
346 /* Returns true if the VMA has associated reserve pages */
347 static int vma_has_reserves(struct vm_area_struct *vma)
348 {
349 	if (vma->vm_flags & VM_SHARED)
350 		return 1;
351 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
352 		return 1;
353 	return 0;
354 }
355 
356 static void clear_huge_page(struct page *page,
357 			unsigned long addr, unsigned long sz)
358 {
359 	int i;
360 
361 	might_sleep();
362 	for (i = 0; i < sz/PAGE_SIZE; i++) {
363 		cond_resched();
364 		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
365 	}
366 }
367 
368 static void copy_huge_page(struct page *dst, struct page *src,
369 			   unsigned long addr, struct vm_area_struct *vma)
370 {
371 	int i;
372 	struct hstate *h = hstate_vma(vma);
373 
374 	might_sleep();
375 	for (i = 0; i < pages_per_huge_page(h); i++) {
376 		cond_resched();
377 		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
378 	}
379 }
380 
381 static void enqueue_huge_page(struct hstate *h, struct page *page)
382 {
383 	int nid = page_to_nid(page);
384 	list_add(&page->lru, &h->hugepage_freelists[nid]);
385 	h->free_huge_pages++;
386 	h->free_huge_pages_node[nid]++;
387 }
388 
389 static struct page *dequeue_huge_page(struct hstate *h)
390 {
391 	int nid;
392 	struct page *page = NULL;
393 
394 	for (nid = 0; nid < MAX_NUMNODES; ++nid) {
395 		if (!list_empty(&h->hugepage_freelists[nid])) {
396 			page = list_entry(h->hugepage_freelists[nid].next,
397 					  struct page, lru);
398 			list_del(&page->lru);
399 			h->free_huge_pages--;
400 			h->free_huge_pages_node[nid]--;
401 			break;
402 		}
403 	}
404 	return page;
405 }
406 
407 static struct page *dequeue_huge_page_vma(struct hstate *h,
408 				struct vm_area_struct *vma,
409 				unsigned long address, int avoid_reserve)
410 {
411 	int nid;
412 	struct page *page = NULL;
413 	struct mempolicy *mpol;
414 	nodemask_t *nodemask;
415 	struct zonelist *zonelist = huge_zonelist(vma, address,
416 					htlb_alloc_mask, &mpol, &nodemask);
417 	struct zone *zone;
418 	struct zoneref *z;
419 
420 	/*
421 	 * A child process with MAP_PRIVATE mappings created by their parent
422 	 * have no page reserves. This check ensures that reservations are
423 	 * not "stolen". The child may still get SIGKILLed
424 	 */
425 	if (!vma_has_reserves(vma) &&
426 			h->free_huge_pages - h->resv_huge_pages == 0)
427 		return NULL;
428 
429 	/* If reserves cannot be used, ensure enough pages are in the pool */
430 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
431 		return NULL;
432 
433 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
434 						MAX_NR_ZONES - 1, nodemask) {
435 		nid = zone_to_nid(zone);
436 		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
437 		    !list_empty(&h->hugepage_freelists[nid])) {
438 			page = list_entry(h->hugepage_freelists[nid].next,
439 					  struct page, lru);
440 			list_del(&page->lru);
441 			h->free_huge_pages--;
442 			h->free_huge_pages_node[nid]--;
443 
444 			if (!avoid_reserve)
445 				decrement_hugepage_resv_vma(h, vma);
446 
447 			break;
448 		}
449 	}
450 	mpol_cond_put(mpol);
451 	return page;
452 }
453 
454 static void update_and_free_page(struct hstate *h, struct page *page)
455 {
456 	int i;
457 
458 	h->nr_huge_pages--;
459 	h->nr_huge_pages_node[page_to_nid(page)]--;
460 	for (i = 0; i < pages_per_huge_page(h); i++) {
461 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
462 				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
463 				1 << PG_private | 1<< PG_writeback);
464 	}
465 	set_compound_page_dtor(page, NULL);
466 	set_page_refcounted(page);
467 	arch_release_hugepage(page);
468 	__free_pages(page, huge_page_order(h));
469 }
470 
471 struct hstate *size_to_hstate(unsigned long size)
472 {
473 	struct hstate *h;
474 
475 	for_each_hstate(h) {
476 		if (huge_page_size(h) == size)
477 			return h;
478 	}
479 	return NULL;
480 }
481 
482 static void free_huge_page(struct page *page)
483 {
484 	/*
485 	 * Can't pass hstate in here because it is called from the
486 	 * compound page destructor.
487 	 */
488 	struct hstate *h = page_hstate(page);
489 	int nid = page_to_nid(page);
490 	struct address_space *mapping;
491 
492 	mapping = (struct address_space *) page_private(page);
493 	set_page_private(page, 0);
494 	BUG_ON(page_count(page));
495 	INIT_LIST_HEAD(&page->lru);
496 
497 	spin_lock(&hugetlb_lock);
498 	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
499 		update_and_free_page(h, page);
500 		h->surplus_huge_pages--;
501 		h->surplus_huge_pages_node[nid]--;
502 	} else {
503 		enqueue_huge_page(h, page);
504 	}
505 	spin_unlock(&hugetlb_lock);
506 	if (mapping)
507 		hugetlb_put_quota(mapping, 1);
508 }
509 
510 /*
511  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
512  * balanced by operating on them in a round-robin fashion.
513  * Returns 1 if an adjustment was made.
514  */
515 static int adjust_pool_surplus(struct hstate *h, int delta)
516 {
517 	static int prev_nid;
518 	int nid = prev_nid;
519 	int ret = 0;
520 
521 	VM_BUG_ON(delta != -1 && delta != 1);
522 	do {
523 		nid = next_node(nid, node_online_map);
524 		if (nid == MAX_NUMNODES)
525 			nid = first_node(node_online_map);
526 
527 		/* To shrink on this node, there must be a surplus page */
528 		if (delta < 0 && !h->surplus_huge_pages_node[nid])
529 			continue;
530 		/* Surplus cannot exceed the total number of pages */
531 		if (delta > 0 && h->surplus_huge_pages_node[nid] >=
532 						h->nr_huge_pages_node[nid])
533 			continue;
534 
535 		h->surplus_huge_pages += delta;
536 		h->surplus_huge_pages_node[nid] += delta;
537 		ret = 1;
538 		break;
539 	} while (nid != prev_nid);
540 
541 	prev_nid = nid;
542 	return ret;
543 }
544 
545 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
546 {
547 	set_compound_page_dtor(page, free_huge_page);
548 	spin_lock(&hugetlb_lock);
549 	h->nr_huge_pages++;
550 	h->nr_huge_pages_node[nid]++;
551 	spin_unlock(&hugetlb_lock);
552 	put_page(page); /* free it into the hugepage allocator */
553 }
554 
555 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
556 {
557 	struct page *page;
558 
559 	if (h->order >= MAX_ORDER)
560 		return NULL;
561 
562 	page = alloc_pages_node(nid,
563 		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
564 						__GFP_REPEAT|__GFP_NOWARN,
565 		huge_page_order(h));
566 	if (page) {
567 		if (arch_prepare_hugepage(page)) {
568 			__free_pages(page, huge_page_order(h));
569 			return NULL;
570 		}
571 		prep_new_huge_page(h, page, nid);
572 	}
573 
574 	return page;
575 }
576 
577 /*
578  * Use a helper variable to find the next node and then
579  * copy it back to hugetlb_next_nid afterwards:
580  * otherwise there's a window in which a racer might
581  * pass invalid nid MAX_NUMNODES to alloc_pages_node.
582  * But we don't need to use a spin_lock here: it really
583  * doesn't matter if occasionally a racer chooses the
584  * same nid as we do.  Move nid forward in the mask even
585  * if we just successfully allocated a hugepage so that
586  * the next caller gets hugepages on the next node.
587  */
588 static int hstate_next_node(struct hstate *h)
589 {
590 	int next_nid;
591 	next_nid = next_node(h->hugetlb_next_nid, node_online_map);
592 	if (next_nid == MAX_NUMNODES)
593 		next_nid = first_node(node_online_map);
594 	h->hugetlb_next_nid = next_nid;
595 	return next_nid;
596 }
597 
598 static int alloc_fresh_huge_page(struct hstate *h)
599 {
600 	struct page *page;
601 	int start_nid;
602 	int next_nid;
603 	int ret = 0;
604 
605 	start_nid = h->hugetlb_next_nid;
606 
607 	do {
608 		page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
609 		if (page)
610 			ret = 1;
611 		next_nid = hstate_next_node(h);
612 	} while (!page && h->hugetlb_next_nid != start_nid);
613 
614 	if (ret)
615 		count_vm_event(HTLB_BUDDY_PGALLOC);
616 	else
617 		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
618 
619 	return ret;
620 }
621 
622 static struct page *alloc_buddy_huge_page(struct hstate *h,
623 			struct vm_area_struct *vma, unsigned long address)
624 {
625 	struct page *page;
626 	unsigned int nid;
627 
628 	if (h->order >= MAX_ORDER)
629 		return NULL;
630 
631 	/*
632 	 * Assume we will successfully allocate the surplus page to
633 	 * prevent racing processes from causing the surplus to exceed
634 	 * overcommit
635 	 *
636 	 * This however introduces a different race, where a process B
637 	 * tries to grow the static hugepage pool while alloc_pages() is
638 	 * called by process A. B will only examine the per-node
639 	 * counters in determining if surplus huge pages can be
640 	 * converted to normal huge pages in adjust_pool_surplus(). A
641 	 * won't be able to increment the per-node counter, until the
642 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
643 	 * no more huge pages can be converted from surplus to normal
644 	 * state (and doesn't try to convert again). Thus, we have a
645 	 * case where a surplus huge page exists, the pool is grown, and
646 	 * the surplus huge page still exists after, even though it
647 	 * should just have been converted to a normal huge page. This
648 	 * does not leak memory, though, as the hugepage will be freed
649 	 * once it is out of use. It also does not allow the counters to
650 	 * go out of whack in adjust_pool_surplus() as we don't modify
651 	 * the node values until we've gotten the hugepage and only the
652 	 * per-node value is checked there.
653 	 */
654 	spin_lock(&hugetlb_lock);
655 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
656 		spin_unlock(&hugetlb_lock);
657 		return NULL;
658 	} else {
659 		h->nr_huge_pages++;
660 		h->surplus_huge_pages++;
661 	}
662 	spin_unlock(&hugetlb_lock);
663 
664 	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
665 					__GFP_REPEAT|__GFP_NOWARN,
666 					huge_page_order(h));
667 
668 	if (page && arch_prepare_hugepage(page)) {
669 		__free_pages(page, huge_page_order(h));
670 		return NULL;
671 	}
672 
673 	spin_lock(&hugetlb_lock);
674 	if (page) {
675 		/*
676 		 * This page is now managed by the hugetlb allocator and has
677 		 * no users -- drop the buddy allocator's reference.
678 		 */
679 		put_page_testzero(page);
680 		VM_BUG_ON(page_count(page));
681 		nid = page_to_nid(page);
682 		set_compound_page_dtor(page, free_huge_page);
683 		/*
684 		 * We incremented the global counters already
685 		 */
686 		h->nr_huge_pages_node[nid]++;
687 		h->surplus_huge_pages_node[nid]++;
688 		__count_vm_event(HTLB_BUDDY_PGALLOC);
689 	} else {
690 		h->nr_huge_pages--;
691 		h->surplus_huge_pages--;
692 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
693 	}
694 	spin_unlock(&hugetlb_lock);
695 
696 	return page;
697 }
698 
699 /*
700  * Increase the hugetlb pool such that it can accomodate a reservation
701  * of size 'delta'.
702  */
703 static int gather_surplus_pages(struct hstate *h, int delta)
704 {
705 	struct list_head surplus_list;
706 	struct page *page, *tmp;
707 	int ret, i;
708 	int needed, allocated;
709 
710 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
711 	if (needed <= 0) {
712 		h->resv_huge_pages += delta;
713 		return 0;
714 	}
715 
716 	allocated = 0;
717 	INIT_LIST_HEAD(&surplus_list);
718 
719 	ret = -ENOMEM;
720 retry:
721 	spin_unlock(&hugetlb_lock);
722 	for (i = 0; i < needed; i++) {
723 		page = alloc_buddy_huge_page(h, NULL, 0);
724 		if (!page) {
725 			/*
726 			 * We were not able to allocate enough pages to
727 			 * satisfy the entire reservation so we free what
728 			 * we've allocated so far.
729 			 */
730 			spin_lock(&hugetlb_lock);
731 			needed = 0;
732 			goto free;
733 		}
734 
735 		list_add(&page->lru, &surplus_list);
736 	}
737 	allocated += needed;
738 
739 	/*
740 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
741 	 * because either resv_huge_pages or free_huge_pages may have changed.
742 	 */
743 	spin_lock(&hugetlb_lock);
744 	needed = (h->resv_huge_pages + delta) -
745 			(h->free_huge_pages + allocated);
746 	if (needed > 0)
747 		goto retry;
748 
749 	/*
750 	 * The surplus_list now contains _at_least_ the number of extra pages
751 	 * needed to accomodate the reservation.  Add the appropriate number
752 	 * of pages to the hugetlb pool and free the extras back to the buddy
753 	 * allocator.  Commit the entire reservation here to prevent another
754 	 * process from stealing the pages as they are added to the pool but
755 	 * before they are reserved.
756 	 */
757 	needed += allocated;
758 	h->resv_huge_pages += delta;
759 	ret = 0;
760 free:
761 	/* Free the needed pages to the hugetlb pool */
762 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
763 		if ((--needed) < 0)
764 			break;
765 		list_del(&page->lru);
766 		enqueue_huge_page(h, page);
767 	}
768 
769 	/* Free unnecessary surplus pages to the buddy allocator */
770 	if (!list_empty(&surplus_list)) {
771 		spin_unlock(&hugetlb_lock);
772 		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
773 			list_del(&page->lru);
774 			/*
775 			 * The page has a reference count of zero already, so
776 			 * call free_huge_page directly instead of using
777 			 * put_page.  This must be done with hugetlb_lock
778 			 * unlocked which is safe because free_huge_page takes
779 			 * hugetlb_lock before deciding how to free the page.
780 			 */
781 			free_huge_page(page);
782 		}
783 		spin_lock(&hugetlb_lock);
784 	}
785 
786 	return ret;
787 }
788 
789 /*
790  * When releasing a hugetlb pool reservation, any surplus pages that were
791  * allocated to satisfy the reservation must be explicitly freed if they were
792  * never used.
793  */
794 static void return_unused_surplus_pages(struct hstate *h,
795 					unsigned long unused_resv_pages)
796 {
797 	static int nid = -1;
798 	struct page *page;
799 	unsigned long nr_pages;
800 
801 	/*
802 	 * We want to release as many surplus pages as possible, spread
803 	 * evenly across all nodes. Iterate across all nodes until we
804 	 * can no longer free unreserved surplus pages. This occurs when
805 	 * the nodes with surplus pages have no free pages.
806 	 */
807 	unsigned long remaining_iterations = num_online_nodes();
808 
809 	/* Uncommit the reservation */
810 	h->resv_huge_pages -= unused_resv_pages;
811 
812 	/* Cannot return gigantic pages currently */
813 	if (h->order >= MAX_ORDER)
814 		return;
815 
816 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
817 
818 	while (remaining_iterations-- && nr_pages) {
819 		nid = next_node(nid, node_online_map);
820 		if (nid == MAX_NUMNODES)
821 			nid = first_node(node_online_map);
822 
823 		if (!h->surplus_huge_pages_node[nid])
824 			continue;
825 
826 		if (!list_empty(&h->hugepage_freelists[nid])) {
827 			page = list_entry(h->hugepage_freelists[nid].next,
828 					  struct page, lru);
829 			list_del(&page->lru);
830 			update_and_free_page(h, page);
831 			h->free_huge_pages--;
832 			h->free_huge_pages_node[nid]--;
833 			h->surplus_huge_pages--;
834 			h->surplus_huge_pages_node[nid]--;
835 			nr_pages--;
836 			remaining_iterations = num_online_nodes();
837 		}
838 	}
839 }
840 
841 /*
842  * Determine if the huge page at addr within the vma has an associated
843  * reservation.  Where it does not we will need to logically increase
844  * reservation and actually increase quota before an allocation can occur.
845  * Where any new reservation would be required the reservation change is
846  * prepared, but not committed.  Once the page has been quota'd allocated
847  * an instantiated the change should be committed via vma_commit_reservation.
848  * No action is required on failure.
849  */
850 static int vma_needs_reservation(struct hstate *h,
851 			struct vm_area_struct *vma, unsigned long addr)
852 {
853 	struct address_space *mapping = vma->vm_file->f_mapping;
854 	struct inode *inode = mapping->host;
855 
856 	if (vma->vm_flags & VM_SHARED) {
857 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
858 		return region_chg(&inode->i_mapping->private_list,
859 							idx, idx + 1);
860 
861 	} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
862 		return 1;
863 
864 	} else  {
865 		int err;
866 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
867 		struct resv_map *reservations = vma_resv_map(vma);
868 
869 		err = region_chg(&reservations->regions, idx, idx + 1);
870 		if (err < 0)
871 			return err;
872 		return 0;
873 	}
874 }
875 static void vma_commit_reservation(struct hstate *h,
876 			struct vm_area_struct *vma, unsigned long addr)
877 {
878 	struct address_space *mapping = vma->vm_file->f_mapping;
879 	struct inode *inode = mapping->host;
880 
881 	if (vma->vm_flags & VM_SHARED) {
882 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
883 		region_add(&inode->i_mapping->private_list, idx, idx + 1);
884 
885 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
886 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
887 		struct resv_map *reservations = vma_resv_map(vma);
888 
889 		/* Mark this page used in the map. */
890 		region_add(&reservations->regions, idx, idx + 1);
891 	}
892 }
893 
894 static struct page *alloc_huge_page(struct vm_area_struct *vma,
895 				    unsigned long addr, int avoid_reserve)
896 {
897 	struct hstate *h = hstate_vma(vma);
898 	struct page *page;
899 	struct address_space *mapping = vma->vm_file->f_mapping;
900 	struct inode *inode = mapping->host;
901 	unsigned int chg;
902 
903 	/*
904 	 * Processes that did not create the mapping will have no reserves and
905 	 * will not have accounted against quota. Check that the quota can be
906 	 * made before satisfying the allocation
907 	 * MAP_NORESERVE mappings may also need pages and quota allocated
908 	 * if no reserve mapping overlaps.
909 	 */
910 	chg = vma_needs_reservation(h, vma, addr);
911 	if (chg < 0)
912 		return ERR_PTR(chg);
913 	if (chg)
914 		if (hugetlb_get_quota(inode->i_mapping, chg))
915 			return ERR_PTR(-ENOSPC);
916 
917 	spin_lock(&hugetlb_lock);
918 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
919 	spin_unlock(&hugetlb_lock);
920 
921 	if (!page) {
922 		page = alloc_buddy_huge_page(h, vma, addr);
923 		if (!page) {
924 			hugetlb_put_quota(inode->i_mapping, chg);
925 			return ERR_PTR(-VM_FAULT_OOM);
926 		}
927 	}
928 
929 	set_page_refcounted(page);
930 	set_page_private(page, (unsigned long) mapping);
931 
932 	vma_commit_reservation(h, vma, addr);
933 
934 	return page;
935 }
936 
937 __attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
938 {
939 	struct huge_bootmem_page *m;
940 	int nr_nodes = nodes_weight(node_online_map);
941 
942 	while (nr_nodes) {
943 		void *addr;
944 
945 		addr = __alloc_bootmem_node_nopanic(
946 				NODE_DATA(h->hugetlb_next_nid),
947 				huge_page_size(h), huge_page_size(h), 0);
948 
949 		if (addr) {
950 			/*
951 			 * Use the beginning of the huge page to store the
952 			 * huge_bootmem_page struct (until gather_bootmem
953 			 * puts them into the mem_map).
954 			 */
955 			m = addr;
956 			if (m)
957 				goto found;
958 		}
959 		hstate_next_node(h);
960 		nr_nodes--;
961 	}
962 	return 0;
963 
964 found:
965 	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
966 	/* Put them into a private list first because mem_map is not up yet */
967 	list_add(&m->list, &huge_boot_pages);
968 	m->hstate = h;
969 	return 1;
970 }
971 
972 /* Put bootmem huge pages into the standard lists after mem_map is up */
973 static void __init gather_bootmem_prealloc(void)
974 {
975 	struct huge_bootmem_page *m;
976 
977 	list_for_each_entry(m, &huge_boot_pages, list) {
978 		struct page *page = virt_to_page(m);
979 		struct hstate *h = m->hstate;
980 		__ClearPageReserved(page);
981 		WARN_ON(page_count(page) != 1);
982 		prep_compound_page(page, h->order);
983 		prep_new_huge_page(h, page, page_to_nid(page));
984 	}
985 }
986 
987 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
988 {
989 	unsigned long i;
990 
991 	for (i = 0; i < h->max_huge_pages; ++i) {
992 		if (h->order >= MAX_ORDER) {
993 			if (!alloc_bootmem_huge_page(h))
994 				break;
995 		} else if (!alloc_fresh_huge_page(h))
996 			break;
997 	}
998 	h->max_huge_pages = i;
999 }
1000 
1001 static void __init hugetlb_init_hstates(void)
1002 {
1003 	struct hstate *h;
1004 
1005 	for_each_hstate(h) {
1006 		/* oversize hugepages were init'ed in early boot */
1007 		if (h->order < MAX_ORDER)
1008 			hugetlb_hstate_alloc_pages(h);
1009 	}
1010 }
1011 
1012 static char * __init memfmt(char *buf, unsigned long n)
1013 {
1014 	if (n >= (1UL << 30))
1015 		sprintf(buf, "%lu GB", n >> 30);
1016 	else if (n >= (1UL << 20))
1017 		sprintf(buf, "%lu MB", n >> 20);
1018 	else
1019 		sprintf(buf, "%lu KB", n >> 10);
1020 	return buf;
1021 }
1022 
1023 static void __init report_hugepages(void)
1024 {
1025 	struct hstate *h;
1026 
1027 	for_each_hstate(h) {
1028 		char buf[32];
1029 		printk(KERN_INFO "HugeTLB registered %s page size, "
1030 				 "pre-allocated %ld pages\n",
1031 			memfmt(buf, huge_page_size(h)),
1032 			h->free_huge_pages);
1033 	}
1034 }
1035 
1036 #ifdef CONFIG_HIGHMEM
1037 static void try_to_free_low(struct hstate *h, unsigned long count)
1038 {
1039 	int i;
1040 
1041 	if (h->order >= MAX_ORDER)
1042 		return;
1043 
1044 	for (i = 0; i < MAX_NUMNODES; ++i) {
1045 		struct page *page, *next;
1046 		struct list_head *freel = &h->hugepage_freelists[i];
1047 		list_for_each_entry_safe(page, next, freel, lru) {
1048 			if (count >= h->nr_huge_pages)
1049 				return;
1050 			if (PageHighMem(page))
1051 				continue;
1052 			list_del(&page->lru);
1053 			update_and_free_page(h, page);
1054 			h->free_huge_pages--;
1055 			h->free_huge_pages_node[page_to_nid(page)]--;
1056 		}
1057 	}
1058 }
1059 #else
1060 static inline void try_to_free_low(struct hstate *h, unsigned long count)
1061 {
1062 }
1063 #endif
1064 
1065 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1066 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
1067 {
1068 	unsigned long min_count, ret;
1069 
1070 	if (h->order >= MAX_ORDER)
1071 		return h->max_huge_pages;
1072 
1073 	/*
1074 	 * Increase the pool size
1075 	 * First take pages out of surplus state.  Then make up the
1076 	 * remaining difference by allocating fresh huge pages.
1077 	 *
1078 	 * We might race with alloc_buddy_huge_page() here and be unable
1079 	 * to convert a surplus huge page to a normal huge page. That is
1080 	 * not critical, though, it just means the overall size of the
1081 	 * pool might be one hugepage larger than it needs to be, but
1082 	 * within all the constraints specified by the sysctls.
1083 	 */
1084 	spin_lock(&hugetlb_lock);
1085 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1086 		if (!adjust_pool_surplus(h, -1))
1087 			break;
1088 	}
1089 
1090 	while (count > persistent_huge_pages(h)) {
1091 		/*
1092 		 * If this allocation races such that we no longer need the
1093 		 * page, free_huge_page will handle it by freeing the page
1094 		 * and reducing the surplus.
1095 		 */
1096 		spin_unlock(&hugetlb_lock);
1097 		ret = alloc_fresh_huge_page(h);
1098 		spin_lock(&hugetlb_lock);
1099 		if (!ret)
1100 			goto out;
1101 
1102 	}
1103 
1104 	/*
1105 	 * Decrease the pool size
1106 	 * First return free pages to the buddy allocator (being careful
1107 	 * to keep enough around to satisfy reservations).  Then place
1108 	 * pages into surplus state as needed so the pool will shrink
1109 	 * to the desired size as pages become free.
1110 	 *
1111 	 * By placing pages into the surplus state independent of the
1112 	 * overcommit value, we are allowing the surplus pool size to
1113 	 * exceed overcommit. There are few sane options here. Since
1114 	 * alloc_buddy_huge_page() is checking the global counter,
1115 	 * though, we'll note that we're not allowed to exceed surplus
1116 	 * and won't grow the pool anywhere else. Not until one of the
1117 	 * sysctls are changed, or the surplus pages go out of use.
1118 	 */
1119 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1120 	min_count = max(count, min_count);
1121 	try_to_free_low(h, min_count);
1122 	while (min_count < persistent_huge_pages(h)) {
1123 		struct page *page = dequeue_huge_page(h);
1124 		if (!page)
1125 			break;
1126 		update_and_free_page(h, page);
1127 	}
1128 	while (count < persistent_huge_pages(h)) {
1129 		if (!adjust_pool_surplus(h, 1))
1130 			break;
1131 	}
1132 out:
1133 	ret = persistent_huge_pages(h);
1134 	spin_unlock(&hugetlb_lock);
1135 	return ret;
1136 }
1137 
1138 #define HSTATE_ATTR_RO(_name) \
1139 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1140 
1141 #define HSTATE_ATTR(_name) \
1142 	static struct kobj_attribute _name##_attr = \
1143 		__ATTR(_name, 0644, _name##_show, _name##_store)
1144 
1145 static struct kobject *hugepages_kobj;
1146 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1147 
1148 static struct hstate *kobj_to_hstate(struct kobject *kobj)
1149 {
1150 	int i;
1151 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
1152 		if (hstate_kobjs[i] == kobj)
1153 			return &hstates[i];
1154 	BUG();
1155 	return NULL;
1156 }
1157 
1158 static ssize_t nr_hugepages_show(struct kobject *kobj,
1159 					struct kobj_attribute *attr, char *buf)
1160 {
1161 	struct hstate *h = kobj_to_hstate(kobj);
1162 	return sprintf(buf, "%lu\n", h->nr_huge_pages);
1163 }
1164 static ssize_t nr_hugepages_store(struct kobject *kobj,
1165 		struct kobj_attribute *attr, const char *buf, size_t count)
1166 {
1167 	int err;
1168 	unsigned long input;
1169 	struct hstate *h = kobj_to_hstate(kobj);
1170 
1171 	err = strict_strtoul(buf, 10, &input);
1172 	if (err)
1173 		return 0;
1174 
1175 	h->max_huge_pages = set_max_huge_pages(h, input);
1176 
1177 	return count;
1178 }
1179 HSTATE_ATTR(nr_hugepages);
1180 
1181 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1182 					struct kobj_attribute *attr, char *buf)
1183 {
1184 	struct hstate *h = kobj_to_hstate(kobj);
1185 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1186 }
1187 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1188 		struct kobj_attribute *attr, const char *buf, size_t count)
1189 {
1190 	int err;
1191 	unsigned long input;
1192 	struct hstate *h = kobj_to_hstate(kobj);
1193 
1194 	err = strict_strtoul(buf, 10, &input);
1195 	if (err)
1196 		return 0;
1197 
1198 	spin_lock(&hugetlb_lock);
1199 	h->nr_overcommit_huge_pages = input;
1200 	spin_unlock(&hugetlb_lock);
1201 
1202 	return count;
1203 }
1204 HSTATE_ATTR(nr_overcommit_hugepages);
1205 
1206 static ssize_t free_hugepages_show(struct kobject *kobj,
1207 					struct kobj_attribute *attr, char *buf)
1208 {
1209 	struct hstate *h = kobj_to_hstate(kobj);
1210 	return sprintf(buf, "%lu\n", h->free_huge_pages);
1211 }
1212 HSTATE_ATTR_RO(free_hugepages);
1213 
1214 static ssize_t resv_hugepages_show(struct kobject *kobj,
1215 					struct kobj_attribute *attr, char *buf)
1216 {
1217 	struct hstate *h = kobj_to_hstate(kobj);
1218 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
1219 }
1220 HSTATE_ATTR_RO(resv_hugepages);
1221 
1222 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1223 					struct kobj_attribute *attr, char *buf)
1224 {
1225 	struct hstate *h = kobj_to_hstate(kobj);
1226 	return sprintf(buf, "%lu\n", h->surplus_huge_pages);
1227 }
1228 HSTATE_ATTR_RO(surplus_hugepages);
1229 
1230 static struct attribute *hstate_attrs[] = {
1231 	&nr_hugepages_attr.attr,
1232 	&nr_overcommit_hugepages_attr.attr,
1233 	&free_hugepages_attr.attr,
1234 	&resv_hugepages_attr.attr,
1235 	&surplus_hugepages_attr.attr,
1236 	NULL,
1237 };
1238 
1239 static struct attribute_group hstate_attr_group = {
1240 	.attrs = hstate_attrs,
1241 };
1242 
1243 static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
1244 {
1245 	int retval;
1246 
1247 	hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
1248 							hugepages_kobj);
1249 	if (!hstate_kobjs[h - hstates])
1250 		return -ENOMEM;
1251 
1252 	retval = sysfs_create_group(hstate_kobjs[h - hstates],
1253 							&hstate_attr_group);
1254 	if (retval)
1255 		kobject_put(hstate_kobjs[h - hstates]);
1256 
1257 	return retval;
1258 }
1259 
1260 static void __init hugetlb_sysfs_init(void)
1261 {
1262 	struct hstate *h;
1263 	int err;
1264 
1265 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1266 	if (!hugepages_kobj)
1267 		return;
1268 
1269 	for_each_hstate(h) {
1270 		err = hugetlb_sysfs_add_hstate(h);
1271 		if (err)
1272 			printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1273 								h->name);
1274 	}
1275 }
1276 
1277 static void __exit hugetlb_exit(void)
1278 {
1279 	struct hstate *h;
1280 
1281 	for_each_hstate(h) {
1282 		kobject_put(hstate_kobjs[h - hstates]);
1283 	}
1284 
1285 	kobject_put(hugepages_kobj);
1286 }
1287 module_exit(hugetlb_exit);
1288 
1289 static int __init hugetlb_init(void)
1290 {
1291 	/* Some platform decide whether they support huge pages at boot
1292 	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1293 	 * there is no such support
1294 	 */
1295 	if (HPAGE_SHIFT == 0)
1296 		return 0;
1297 
1298 	if (!size_to_hstate(default_hstate_size)) {
1299 		default_hstate_size = HPAGE_SIZE;
1300 		if (!size_to_hstate(default_hstate_size))
1301 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1302 	}
1303 	default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1304 	if (default_hstate_max_huge_pages)
1305 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1306 
1307 	hugetlb_init_hstates();
1308 
1309 	gather_bootmem_prealloc();
1310 
1311 	report_hugepages();
1312 
1313 	hugetlb_sysfs_init();
1314 
1315 	return 0;
1316 }
1317 module_init(hugetlb_init);
1318 
1319 /* Should be called on processing a hugepagesz=... option */
1320 void __init hugetlb_add_hstate(unsigned order)
1321 {
1322 	struct hstate *h;
1323 	unsigned long i;
1324 
1325 	if (size_to_hstate(PAGE_SIZE << order)) {
1326 		printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1327 		return;
1328 	}
1329 	BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1330 	BUG_ON(order == 0);
1331 	h = &hstates[max_hstate++];
1332 	h->order = order;
1333 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1334 	h->nr_huge_pages = 0;
1335 	h->free_huge_pages = 0;
1336 	for (i = 0; i < MAX_NUMNODES; ++i)
1337 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1338 	h->hugetlb_next_nid = first_node(node_online_map);
1339 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1340 					huge_page_size(h)/1024);
1341 
1342 	parsed_hstate = h;
1343 }
1344 
1345 static int __init hugetlb_nrpages_setup(char *s)
1346 {
1347 	unsigned long *mhp;
1348 	static unsigned long *last_mhp;
1349 
1350 	/*
1351 	 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1352 	 * so this hugepages= parameter goes to the "default hstate".
1353 	 */
1354 	if (!max_hstate)
1355 		mhp = &default_hstate_max_huge_pages;
1356 	else
1357 		mhp = &parsed_hstate->max_huge_pages;
1358 
1359 	if (mhp == last_mhp) {
1360 		printk(KERN_WARNING "hugepages= specified twice without "
1361 			"interleaving hugepagesz=, ignoring\n");
1362 		return 1;
1363 	}
1364 
1365 	if (sscanf(s, "%lu", mhp) <= 0)
1366 		*mhp = 0;
1367 
1368 	/*
1369 	 * Global state is always initialized later in hugetlb_init.
1370 	 * But we need to allocate >= MAX_ORDER hstates here early to still
1371 	 * use the bootmem allocator.
1372 	 */
1373 	if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1374 		hugetlb_hstate_alloc_pages(parsed_hstate);
1375 
1376 	last_mhp = mhp;
1377 
1378 	return 1;
1379 }
1380 __setup("hugepages=", hugetlb_nrpages_setup);
1381 
1382 static int __init hugetlb_default_setup(char *s)
1383 {
1384 	default_hstate_size = memparse(s, &s);
1385 	return 1;
1386 }
1387 __setup("default_hugepagesz=", hugetlb_default_setup);
1388 
1389 static unsigned int cpuset_mems_nr(unsigned int *array)
1390 {
1391 	int node;
1392 	unsigned int nr = 0;
1393 
1394 	for_each_node_mask(node, cpuset_current_mems_allowed)
1395 		nr += array[node];
1396 
1397 	return nr;
1398 }
1399 
1400 #ifdef CONFIG_SYSCTL
1401 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1402 			   struct file *file, void __user *buffer,
1403 			   size_t *length, loff_t *ppos)
1404 {
1405 	struct hstate *h = &default_hstate;
1406 	unsigned long tmp;
1407 
1408 	if (!write)
1409 		tmp = h->max_huge_pages;
1410 
1411 	table->data = &tmp;
1412 	table->maxlen = sizeof(unsigned long);
1413 	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
1414 
1415 	if (write)
1416 		h->max_huge_pages = set_max_huge_pages(h, tmp);
1417 
1418 	return 0;
1419 }
1420 
1421 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1422 			struct file *file, void __user *buffer,
1423 			size_t *length, loff_t *ppos)
1424 {
1425 	proc_dointvec(table, write, file, buffer, length, ppos);
1426 	if (hugepages_treat_as_movable)
1427 		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1428 	else
1429 		htlb_alloc_mask = GFP_HIGHUSER;
1430 	return 0;
1431 }
1432 
1433 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1434 			struct file *file, void __user *buffer,
1435 			size_t *length, loff_t *ppos)
1436 {
1437 	struct hstate *h = &default_hstate;
1438 	unsigned long tmp;
1439 
1440 	if (!write)
1441 		tmp = h->nr_overcommit_huge_pages;
1442 
1443 	table->data = &tmp;
1444 	table->maxlen = sizeof(unsigned long);
1445 	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
1446 
1447 	if (write) {
1448 		spin_lock(&hugetlb_lock);
1449 		h->nr_overcommit_huge_pages = tmp;
1450 		spin_unlock(&hugetlb_lock);
1451 	}
1452 
1453 	return 0;
1454 }
1455 
1456 #endif /* CONFIG_SYSCTL */
1457 
1458 int hugetlb_report_meminfo(char *buf)
1459 {
1460 	struct hstate *h = &default_hstate;
1461 	return sprintf(buf,
1462 			"HugePages_Total: %5lu\n"
1463 			"HugePages_Free:  %5lu\n"
1464 			"HugePages_Rsvd:  %5lu\n"
1465 			"HugePages_Surp:  %5lu\n"
1466 			"Hugepagesize:    %5lu kB\n",
1467 			h->nr_huge_pages,
1468 			h->free_huge_pages,
1469 			h->resv_huge_pages,
1470 			h->surplus_huge_pages,
1471 			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1472 }
1473 
1474 int hugetlb_report_node_meminfo(int nid, char *buf)
1475 {
1476 	struct hstate *h = &default_hstate;
1477 	return sprintf(buf,
1478 		"Node %d HugePages_Total: %5u\n"
1479 		"Node %d HugePages_Free:  %5u\n"
1480 		"Node %d HugePages_Surp:  %5u\n",
1481 		nid, h->nr_huge_pages_node[nid],
1482 		nid, h->free_huge_pages_node[nid],
1483 		nid, h->surplus_huge_pages_node[nid]);
1484 }
1485 
1486 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
1487 unsigned long hugetlb_total_pages(void)
1488 {
1489 	struct hstate *h = &default_hstate;
1490 	return h->nr_huge_pages * pages_per_huge_page(h);
1491 }
1492 
1493 static int hugetlb_acct_memory(struct hstate *h, long delta)
1494 {
1495 	int ret = -ENOMEM;
1496 
1497 	spin_lock(&hugetlb_lock);
1498 	/*
1499 	 * When cpuset is configured, it breaks the strict hugetlb page
1500 	 * reservation as the accounting is done on a global variable. Such
1501 	 * reservation is completely rubbish in the presence of cpuset because
1502 	 * the reservation is not checked against page availability for the
1503 	 * current cpuset. Application can still potentially OOM'ed by kernel
1504 	 * with lack of free htlb page in cpuset that the task is in.
1505 	 * Attempt to enforce strict accounting with cpuset is almost
1506 	 * impossible (or too ugly) because cpuset is too fluid that
1507 	 * task or memory node can be dynamically moved between cpusets.
1508 	 *
1509 	 * The change of semantics for shared hugetlb mapping with cpuset is
1510 	 * undesirable. However, in order to preserve some of the semantics,
1511 	 * we fall back to check against current free page availability as
1512 	 * a best attempt and hopefully to minimize the impact of changing
1513 	 * semantics that cpuset has.
1514 	 */
1515 	if (delta > 0) {
1516 		if (gather_surplus_pages(h, delta) < 0)
1517 			goto out;
1518 
1519 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
1520 			return_unused_surplus_pages(h, delta);
1521 			goto out;
1522 		}
1523 	}
1524 
1525 	ret = 0;
1526 	if (delta < 0)
1527 		return_unused_surplus_pages(h, (unsigned long) -delta);
1528 
1529 out:
1530 	spin_unlock(&hugetlb_lock);
1531 	return ret;
1532 }
1533 
1534 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
1535 {
1536 	struct resv_map *reservations = vma_resv_map(vma);
1537 
1538 	/*
1539 	 * This new VMA should share its siblings reservation map if present.
1540 	 * The VMA will only ever have a valid reservation map pointer where
1541 	 * it is being copied for another still existing VMA.  As that VMA
1542 	 * has a reference to the reservation map it cannot dissappear until
1543 	 * after this open call completes.  It is therefore safe to take a
1544 	 * new reference here without additional locking.
1545 	 */
1546 	if (reservations)
1547 		kref_get(&reservations->refs);
1548 }
1549 
1550 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
1551 {
1552 	struct hstate *h = hstate_vma(vma);
1553 	struct resv_map *reservations = vma_resv_map(vma);
1554 	unsigned long reserve;
1555 	unsigned long start;
1556 	unsigned long end;
1557 
1558 	if (reservations) {
1559 		start = vma_hugecache_offset(h, vma, vma->vm_start);
1560 		end = vma_hugecache_offset(h, vma, vma->vm_end);
1561 
1562 		reserve = (end - start) -
1563 			region_count(&reservations->regions, start, end);
1564 
1565 		kref_put(&reservations->refs, resv_map_release);
1566 
1567 		if (reserve) {
1568 			hugetlb_acct_memory(h, -reserve);
1569 			hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
1570 		}
1571 	}
1572 }
1573 
1574 /*
1575  * We cannot handle pagefaults against hugetlb pages at all.  They cause
1576  * handle_mm_fault() to try to instantiate regular-sized pages in the
1577  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
1578  * this far.
1579  */
1580 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1581 {
1582 	BUG();
1583 	return 0;
1584 }
1585 
1586 struct vm_operations_struct hugetlb_vm_ops = {
1587 	.fault = hugetlb_vm_op_fault,
1588 	.open = hugetlb_vm_op_open,
1589 	.close = hugetlb_vm_op_close,
1590 };
1591 
1592 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
1593 				int writable)
1594 {
1595 	pte_t entry;
1596 
1597 	if (writable) {
1598 		entry =
1599 		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
1600 	} else {
1601 		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
1602 	}
1603 	entry = pte_mkyoung(entry);
1604 	entry = pte_mkhuge(entry);
1605 
1606 	return entry;
1607 }
1608 
1609 static void set_huge_ptep_writable(struct vm_area_struct *vma,
1610 				   unsigned long address, pte_t *ptep)
1611 {
1612 	pte_t entry;
1613 
1614 	entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
1615 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
1616 		update_mmu_cache(vma, address, entry);
1617 	}
1618 }
1619 
1620 
1621 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
1622 			    struct vm_area_struct *vma)
1623 {
1624 	pte_t *src_pte, *dst_pte, entry;
1625 	struct page *ptepage;
1626 	unsigned long addr;
1627 	int cow;
1628 	struct hstate *h = hstate_vma(vma);
1629 	unsigned long sz = huge_page_size(h);
1630 
1631 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1632 
1633 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
1634 		src_pte = huge_pte_offset(src, addr);
1635 		if (!src_pte)
1636 			continue;
1637 		dst_pte = huge_pte_alloc(dst, addr, sz);
1638 		if (!dst_pte)
1639 			goto nomem;
1640 
1641 		/* If the pagetables are shared don't copy or take references */
1642 		if (dst_pte == src_pte)
1643 			continue;
1644 
1645 		spin_lock(&dst->page_table_lock);
1646 		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
1647 		if (!huge_pte_none(huge_ptep_get(src_pte))) {
1648 			if (cow)
1649 				huge_ptep_set_wrprotect(src, addr, src_pte);
1650 			entry = huge_ptep_get(src_pte);
1651 			ptepage = pte_page(entry);
1652 			get_page(ptepage);
1653 			set_huge_pte_at(dst, addr, dst_pte, entry);
1654 		}
1655 		spin_unlock(&src->page_table_lock);
1656 		spin_unlock(&dst->page_table_lock);
1657 	}
1658 	return 0;
1659 
1660 nomem:
1661 	return -ENOMEM;
1662 }
1663 
1664 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1665 			    unsigned long end, struct page *ref_page)
1666 {
1667 	struct mm_struct *mm = vma->vm_mm;
1668 	unsigned long address;
1669 	pte_t *ptep;
1670 	pte_t pte;
1671 	struct page *page;
1672 	struct page *tmp;
1673 	struct hstate *h = hstate_vma(vma);
1674 	unsigned long sz = huge_page_size(h);
1675 
1676 	/*
1677 	 * A page gathering list, protected by per file i_mmap_lock. The
1678 	 * lock is used to avoid list corruption from multiple unmapping
1679 	 * of the same page since we are using page->lru.
1680 	 */
1681 	LIST_HEAD(page_list);
1682 
1683 	WARN_ON(!is_vm_hugetlb_page(vma));
1684 	BUG_ON(start & ~huge_page_mask(h));
1685 	BUG_ON(end & ~huge_page_mask(h));
1686 
1687 	mmu_notifier_invalidate_range_start(mm, start, end);
1688 	spin_lock(&mm->page_table_lock);
1689 	for (address = start; address < end; address += sz) {
1690 		ptep = huge_pte_offset(mm, address);
1691 		if (!ptep)
1692 			continue;
1693 
1694 		if (huge_pmd_unshare(mm, &address, ptep))
1695 			continue;
1696 
1697 		/*
1698 		 * If a reference page is supplied, it is because a specific
1699 		 * page is being unmapped, not a range. Ensure the page we
1700 		 * are about to unmap is the actual page of interest.
1701 		 */
1702 		if (ref_page) {
1703 			pte = huge_ptep_get(ptep);
1704 			if (huge_pte_none(pte))
1705 				continue;
1706 			page = pte_page(pte);
1707 			if (page != ref_page)
1708 				continue;
1709 
1710 			/*
1711 			 * Mark the VMA as having unmapped its page so that
1712 			 * future faults in this VMA will fail rather than
1713 			 * looking like data was lost
1714 			 */
1715 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
1716 		}
1717 
1718 		pte = huge_ptep_get_and_clear(mm, address, ptep);
1719 		if (huge_pte_none(pte))
1720 			continue;
1721 
1722 		page = pte_page(pte);
1723 		if (pte_dirty(pte))
1724 			set_page_dirty(page);
1725 		list_add(&page->lru, &page_list);
1726 	}
1727 	spin_unlock(&mm->page_table_lock);
1728 	flush_tlb_range(vma, start, end);
1729 	mmu_notifier_invalidate_range_end(mm, start, end);
1730 	list_for_each_entry_safe(page, tmp, &page_list, lru) {
1731 		list_del(&page->lru);
1732 		put_page(page);
1733 	}
1734 }
1735 
1736 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1737 			  unsigned long end, struct page *ref_page)
1738 {
1739 	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1740 	__unmap_hugepage_range(vma, start, end, ref_page);
1741 	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1742 }
1743 
1744 /*
1745  * This is called when the original mapper is failing to COW a MAP_PRIVATE
1746  * mappping it owns the reserve page for. The intention is to unmap the page
1747  * from other VMAs and let the children be SIGKILLed if they are faulting the
1748  * same region.
1749  */
1750 int unmap_ref_private(struct mm_struct *mm,
1751 					struct vm_area_struct *vma,
1752 					struct page *page,
1753 					unsigned long address)
1754 {
1755 	struct vm_area_struct *iter_vma;
1756 	struct address_space *mapping;
1757 	struct prio_tree_iter iter;
1758 	pgoff_t pgoff;
1759 
1760 	/*
1761 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
1762 	 * from page cache lookup which is in HPAGE_SIZE units.
1763 	 */
1764 	address = address & huge_page_mask(hstate_vma(vma));
1765 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
1766 		+ (vma->vm_pgoff >> PAGE_SHIFT);
1767 	mapping = (struct address_space *)page_private(page);
1768 
1769 	vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1770 		/* Do not unmap the current VMA */
1771 		if (iter_vma == vma)
1772 			continue;
1773 
1774 		/*
1775 		 * Unmap the page from other VMAs without their own reserves.
1776 		 * They get marked to be SIGKILLed if they fault in these
1777 		 * areas. This is because a future no-page fault on this VMA
1778 		 * could insert a zeroed page instead of the data existing
1779 		 * from the time of fork. This would look like data corruption
1780 		 */
1781 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
1782 			unmap_hugepage_range(iter_vma,
1783 				address, address + HPAGE_SIZE,
1784 				page);
1785 	}
1786 
1787 	return 1;
1788 }
1789 
1790 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
1791 			unsigned long address, pte_t *ptep, pte_t pte,
1792 			struct page *pagecache_page)
1793 {
1794 	struct hstate *h = hstate_vma(vma);
1795 	struct page *old_page, *new_page;
1796 	int avoidcopy;
1797 	int outside_reserve = 0;
1798 
1799 	old_page = pte_page(pte);
1800 
1801 retry_avoidcopy:
1802 	/* If no-one else is actually using this page, avoid the copy
1803 	 * and just make the page writable */
1804 	avoidcopy = (page_count(old_page) == 1);
1805 	if (avoidcopy) {
1806 		set_huge_ptep_writable(vma, address, ptep);
1807 		return 0;
1808 	}
1809 
1810 	/*
1811 	 * If the process that created a MAP_PRIVATE mapping is about to
1812 	 * perform a COW due to a shared page count, attempt to satisfy
1813 	 * the allocation without using the existing reserves. The pagecache
1814 	 * page is used to determine if the reserve at this address was
1815 	 * consumed or not. If reserves were used, a partial faulted mapping
1816 	 * at the time of fork() could consume its reserves on COW instead
1817 	 * of the full address range.
1818 	 */
1819 	if (!(vma->vm_flags & VM_SHARED) &&
1820 			is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1821 			old_page != pagecache_page)
1822 		outside_reserve = 1;
1823 
1824 	page_cache_get(old_page);
1825 	new_page = alloc_huge_page(vma, address, outside_reserve);
1826 
1827 	if (IS_ERR(new_page)) {
1828 		page_cache_release(old_page);
1829 
1830 		/*
1831 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
1832 		 * it is due to references held by a child and an insufficient
1833 		 * huge page pool. To guarantee the original mappers
1834 		 * reliability, unmap the page from child processes. The child
1835 		 * may get SIGKILLed if it later faults.
1836 		 */
1837 		if (outside_reserve) {
1838 			BUG_ON(huge_pte_none(pte));
1839 			if (unmap_ref_private(mm, vma, old_page, address)) {
1840 				BUG_ON(page_count(old_page) != 1);
1841 				BUG_ON(huge_pte_none(pte));
1842 				goto retry_avoidcopy;
1843 			}
1844 			WARN_ON_ONCE(1);
1845 		}
1846 
1847 		return -PTR_ERR(new_page);
1848 	}
1849 
1850 	spin_unlock(&mm->page_table_lock);
1851 	copy_huge_page(new_page, old_page, address, vma);
1852 	__SetPageUptodate(new_page);
1853 	spin_lock(&mm->page_table_lock);
1854 
1855 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
1856 	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
1857 		/* Break COW */
1858 		huge_ptep_clear_flush(vma, address, ptep);
1859 		set_huge_pte_at(mm, address, ptep,
1860 				make_huge_pte(vma, new_page, 1));
1861 		/* Make the old page be freed below */
1862 		new_page = old_page;
1863 	}
1864 	page_cache_release(new_page);
1865 	page_cache_release(old_page);
1866 	return 0;
1867 }
1868 
1869 /* Return the pagecache page at a given address within a VMA */
1870 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
1871 			struct vm_area_struct *vma, unsigned long address)
1872 {
1873 	struct address_space *mapping;
1874 	pgoff_t idx;
1875 
1876 	mapping = vma->vm_file->f_mapping;
1877 	idx = vma_hugecache_offset(h, vma, address);
1878 
1879 	return find_lock_page(mapping, idx);
1880 }
1881 
1882 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1883 			unsigned long address, pte_t *ptep, int write_access)
1884 {
1885 	struct hstate *h = hstate_vma(vma);
1886 	int ret = VM_FAULT_SIGBUS;
1887 	pgoff_t idx;
1888 	unsigned long size;
1889 	struct page *page;
1890 	struct address_space *mapping;
1891 	pte_t new_pte;
1892 
1893 	/*
1894 	 * Currently, we are forced to kill the process in the event the
1895 	 * original mapper has unmapped pages from the child due to a failed
1896 	 * COW. Warn that such a situation has occured as it may not be obvious
1897 	 */
1898 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
1899 		printk(KERN_WARNING
1900 			"PID %d killed due to inadequate hugepage pool\n",
1901 			current->pid);
1902 		return ret;
1903 	}
1904 
1905 	mapping = vma->vm_file->f_mapping;
1906 	idx = vma_hugecache_offset(h, vma, address);
1907 
1908 	/*
1909 	 * Use page lock to guard against racing truncation
1910 	 * before we get page_table_lock.
1911 	 */
1912 retry:
1913 	page = find_lock_page(mapping, idx);
1914 	if (!page) {
1915 		size = i_size_read(mapping->host) >> huge_page_shift(h);
1916 		if (idx >= size)
1917 			goto out;
1918 		page = alloc_huge_page(vma, address, 0);
1919 		if (IS_ERR(page)) {
1920 			ret = -PTR_ERR(page);
1921 			goto out;
1922 		}
1923 		clear_huge_page(page, address, huge_page_size(h));
1924 		__SetPageUptodate(page);
1925 
1926 		if (vma->vm_flags & VM_SHARED) {
1927 			int err;
1928 			struct inode *inode = mapping->host;
1929 
1930 			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
1931 			if (err) {
1932 				put_page(page);
1933 				if (err == -EEXIST)
1934 					goto retry;
1935 				goto out;
1936 			}
1937 
1938 			spin_lock(&inode->i_lock);
1939 			inode->i_blocks += blocks_per_huge_page(h);
1940 			spin_unlock(&inode->i_lock);
1941 		} else
1942 			lock_page(page);
1943 	}
1944 
1945 	/*
1946 	 * If we are going to COW a private mapping later, we examine the
1947 	 * pending reservations for this page now. This will ensure that
1948 	 * any allocations necessary to record that reservation occur outside
1949 	 * the spinlock.
1950 	 */
1951 	if (write_access && !(vma->vm_flags & VM_SHARED))
1952 		if (vma_needs_reservation(h, vma, address) < 0) {
1953 			ret = VM_FAULT_OOM;
1954 			goto backout_unlocked;
1955 		}
1956 
1957 	spin_lock(&mm->page_table_lock);
1958 	size = i_size_read(mapping->host) >> huge_page_shift(h);
1959 	if (idx >= size)
1960 		goto backout;
1961 
1962 	ret = 0;
1963 	if (!huge_pte_none(huge_ptep_get(ptep)))
1964 		goto backout;
1965 
1966 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
1967 				&& (vma->vm_flags & VM_SHARED)));
1968 	set_huge_pte_at(mm, address, ptep, new_pte);
1969 
1970 	if (write_access && !(vma->vm_flags & VM_SHARED)) {
1971 		/* Optimization, do the COW without a second fault */
1972 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
1973 	}
1974 
1975 	spin_unlock(&mm->page_table_lock);
1976 	unlock_page(page);
1977 out:
1978 	return ret;
1979 
1980 backout:
1981 	spin_unlock(&mm->page_table_lock);
1982 backout_unlocked:
1983 	unlock_page(page);
1984 	put_page(page);
1985 	goto out;
1986 }
1987 
1988 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1989 			unsigned long address, int write_access)
1990 {
1991 	pte_t *ptep;
1992 	pte_t entry;
1993 	int ret;
1994 	struct page *pagecache_page = NULL;
1995 	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
1996 	struct hstate *h = hstate_vma(vma);
1997 
1998 	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
1999 	if (!ptep)
2000 		return VM_FAULT_OOM;
2001 
2002 	/*
2003 	 * Serialize hugepage allocation and instantiation, so that we don't
2004 	 * get spurious allocation failures if two CPUs race to instantiate
2005 	 * the same page in the page cache.
2006 	 */
2007 	mutex_lock(&hugetlb_instantiation_mutex);
2008 	entry = huge_ptep_get(ptep);
2009 	if (huge_pte_none(entry)) {
2010 		ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
2011 		goto out_unlock;
2012 	}
2013 
2014 	ret = 0;
2015 
2016 	/*
2017 	 * If we are going to COW the mapping later, we examine the pending
2018 	 * reservations for this page now. This will ensure that any
2019 	 * allocations necessary to record that reservation occur outside the
2020 	 * spinlock. For private mappings, we also lookup the pagecache
2021 	 * page now as it is used to determine if a reservation has been
2022 	 * consumed.
2023 	 */
2024 	if (write_access && !pte_write(entry)) {
2025 		if (vma_needs_reservation(h, vma, address) < 0) {
2026 			ret = VM_FAULT_OOM;
2027 			goto out_unlock;
2028 		}
2029 
2030 		if (!(vma->vm_flags & VM_SHARED))
2031 			pagecache_page = hugetlbfs_pagecache_page(h,
2032 								vma, address);
2033 	}
2034 
2035 	spin_lock(&mm->page_table_lock);
2036 	/* Check for a racing update before calling hugetlb_cow */
2037 	if (likely(pte_same(entry, huge_ptep_get(ptep))))
2038 		if (write_access && !pte_write(entry))
2039 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
2040 							pagecache_page);
2041 	spin_unlock(&mm->page_table_lock);
2042 
2043 	if (pagecache_page) {
2044 		unlock_page(pagecache_page);
2045 		put_page(pagecache_page);
2046 	}
2047 
2048 out_unlock:
2049 	mutex_unlock(&hugetlb_instantiation_mutex);
2050 
2051 	return ret;
2052 }
2053 
2054 /* Can be overriden by architectures */
2055 __attribute__((weak)) struct page *
2056 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2057 	       pud_t *pud, int write)
2058 {
2059 	BUG();
2060 	return NULL;
2061 }
2062 
2063 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2064 			struct page **pages, struct vm_area_struct **vmas,
2065 			unsigned long *position, int *length, int i,
2066 			int write)
2067 {
2068 	unsigned long pfn_offset;
2069 	unsigned long vaddr = *position;
2070 	int remainder = *length;
2071 	struct hstate *h = hstate_vma(vma);
2072 
2073 	spin_lock(&mm->page_table_lock);
2074 	while (vaddr < vma->vm_end && remainder) {
2075 		pte_t *pte;
2076 		struct page *page;
2077 
2078 		/*
2079 		 * Some archs (sparc64, sh*) have multiple pte_ts to
2080 		 * each hugepage.  We have to make * sure we get the
2081 		 * first, for the page indexing below to work.
2082 		 */
2083 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2084 
2085 		if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
2086 		    (write && !pte_write(huge_ptep_get(pte)))) {
2087 			int ret;
2088 
2089 			spin_unlock(&mm->page_table_lock);
2090 			ret = hugetlb_fault(mm, vma, vaddr, write);
2091 			spin_lock(&mm->page_table_lock);
2092 			if (!(ret & VM_FAULT_ERROR))
2093 				continue;
2094 
2095 			remainder = 0;
2096 			if (!i)
2097 				i = -EFAULT;
2098 			break;
2099 		}
2100 
2101 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2102 		page = pte_page(huge_ptep_get(pte));
2103 same_page:
2104 		if (pages) {
2105 			get_page(page);
2106 			pages[i] = page + pfn_offset;
2107 		}
2108 
2109 		if (vmas)
2110 			vmas[i] = vma;
2111 
2112 		vaddr += PAGE_SIZE;
2113 		++pfn_offset;
2114 		--remainder;
2115 		++i;
2116 		if (vaddr < vma->vm_end && remainder &&
2117 				pfn_offset < pages_per_huge_page(h)) {
2118 			/*
2119 			 * We use pfn_offset to avoid touching the pageframes
2120 			 * of this compound page.
2121 			 */
2122 			goto same_page;
2123 		}
2124 	}
2125 	spin_unlock(&mm->page_table_lock);
2126 	*length = remainder;
2127 	*position = vaddr;
2128 
2129 	return i;
2130 }
2131 
2132 void hugetlb_change_protection(struct vm_area_struct *vma,
2133 		unsigned long address, unsigned long end, pgprot_t newprot)
2134 {
2135 	struct mm_struct *mm = vma->vm_mm;
2136 	unsigned long start = address;
2137 	pte_t *ptep;
2138 	pte_t pte;
2139 	struct hstate *h = hstate_vma(vma);
2140 
2141 	BUG_ON(address >= end);
2142 	flush_cache_range(vma, address, end);
2143 
2144 	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2145 	spin_lock(&mm->page_table_lock);
2146 	for (; address < end; address += huge_page_size(h)) {
2147 		ptep = huge_pte_offset(mm, address);
2148 		if (!ptep)
2149 			continue;
2150 		if (huge_pmd_unshare(mm, &address, ptep))
2151 			continue;
2152 		if (!huge_pte_none(huge_ptep_get(ptep))) {
2153 			pte = huge_ptep_get_and_clear(mm, address, ptep);
2154 			pte = pte_mkhuge(pte_modify(pte, newprot));
2155 			set_huge_pte_at(mm, address, ptep, pte);
2156 		}
2157 	}
2158 	spin_unlock(&mm->page_table_lock);
2159 	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2160 
2161 	flush_tlb_range(vma, start, end);
2162 }
2163 
2164 int hugetlb_reserve_pages(struct inode *inode,
2165 					long from, long to,
2166 					struct vm_area_struct *vma)
2167 {
2168 	long ret, chg;
2169 	struct hstate *h = hstate_inode(inode);
2170 
2171 	if (vma && vma->vm_flags & VM_NORESERVE)
2172 		return 0;
2173 
2174 	/*
2175 	 * Shared mappings base their reservation on the number of pages that
2176 	 * are already allocated on behalf of the file. Private mappings need
2177 	 * to reserve the full area even if read-only as mprotect() may be
2178 	 * called to make the mapping read-write. Assume !vma is a shm mapping
2179 	 */
2180 	if (!vma || vma->vm_flags & VM_SHARED)
2181 		chg = region_chg(&inode->i_mapping->private_list, from, to);
2182 	else {
2183 		struct resv_map *resv_map = resv_map_alloc();
2184 		if (!resv_map)
2185 			return -ENOMEM;
2186 
2187 		chg = to - from;
2188 
2189 		set_vma_resv_map(vma, resv_map);
2190 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2191 	}
2192 
2193 	if (chg < 0)
2194 		return chg;
2195 
2196 	if (hugetlb_get_quota(inode->i_mapping, chg))
2197 		return -ENOSPC;
2198 	ret = hugetlb_acct_memory(h, chg);
2199 	if (ret < 0) {
2200 		hugetlb_put_quota(inode->i_mapping, chg);
2201 		return ret;
2202 	}
2203 	if (!vma || vma->vm_flags & VM_SHARED)
2204 		region_add(&inode->i_mapping->private_list, from, to);
2205 	return 0;
2206 }
2207 
2208 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2209 {
2210 	struct hstate *h = hstate_inode(inode);
2211 	long chg = region_truncate(&inode->i_mapping->private_list, offset);
2212 
2213 	spin_lock(&inode->i_lock);
2214 	inode->i_blocks -= blocks_per_huge_page(h);
2215 	spin_unlock(&inode->i_lock);
2216 
2217 	hugetlb_put_quota(inode->i_mapping, (chg - freed));
2218 	hugetlb_acct_memory(h, -(chg - freed));
2219 }
2220