xref: /openbmc/linux/mm/hugetlb.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/io.h>
28 
29 #include <linux/hugetlb.h>
30 #include <linux/node.h>
31 #include "internal.h"
32 
33 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
34 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35 unsigned long hugepages_treat_as_movable;
36 
37 static int max_hstate;
38 unsigned int default_hstate_idx;
39 struct hstate hstates[HUGE_MAX_HSTATE];
40 
41 __initdata LIST_HEAD(huge_boot_pages);
42 
43 /* for command line parsing */
44 static struct hstate * __initdata parsed_hstate;
45 static unsigned long __initdata default_hstate_max_huge_pages;
46 static unsigned long __initdata default_hstate_size;
47 
48 #define for_each_hstate(h) \
49 	for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
50 
51 /*
52  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
53  */
54 static DEFINE_SPINLOCK(hugetlb_lock);
55 
56 /*
57  * Region tracking -- allows tracking of reservations and instantiated pages
58  *                    across the pages in a mapping.
59  *
60  * The region data structures are protected by a combination of the mmap_sem
61  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
62  * must either hold the mmap_sem for write, or the mmap_sem for read and
63  * the hugetlb_instantiation mutex:
64  *
65  * 	down_write(&mm->mmap_sem);
66  * or
67  * 	down_read(&mm->mmap_sem);
68  * 	mutex_lock(&hugetlb_instantiation_mutex);
69  */
70 struct file_region {
71 	struct list_head link;
72 	long from;
73 	long to;
74 };
75 
76 static long region_add(struct list_head *head, long f, long t)
77 {
78 	struct file_region *rg, *nrg, *trg;
79 
80 	/* Locate the region we are either in or before. */
81 	list_for_each_entry(rg, head, link)
82 		if (f <= rg->to)
83 			break;
84 
85 	/* Round our left edge to the current segment if it encloses us. */
86 	if (f > rg->from)
87 		f = rg->from;
88 
89 	/* Check for and consume any regions we now overlap with. */
90 	nrg = rg;
91 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
92 		if (&rg->link == head)
93 			break;
94 		if (rg->from > t)
95 			break;
96 
97 		/* If this area reaches higher then extend our area to
98 		 * include it completely.  If this is not the first area
99 		 * which we intend to reuse, free it. */
100 		if (rg->to > t)
101 			t = rg->to;
102 		if (rg != nrg) {
103 			list_del(&rg->link);
104 			kfree(rg);
105 		}
106 	}
107 	nrg->from = f;
108 	nrg->to = t;
109 	return 0;
110 }
111 
112 static long region_chg(struct list_head *head, long f, long t)
113 {
114 	struct file_region *rg, *nrg;
115 	long chg = 0;
116 
117 	/* Locate the region we are before or in. */
118 	list_for_each_entry(rg, head, link)
119 		if (f <= rg->to)
120 			break;
121 
122 	/* If we are below the current region then a new region is required.
123 	 * Subtle, allocate a new region at the position but make it zero
124 	 * size such that we can guarantee to record the reservation. */
125 	if (&rg->link == head || t < rg->from) {
126 		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
127 		if (!nrg)
128 			return -ENOMEM;
129 		nrg->from = f;
130 		nrg->to   = f;
131 		INIT_LIST_HEAD(&nrg->link);
132 		list_add(&nrg->link, rg->link.prev);
133 
134 		return t - f;
135 	}
136 
137 	/* Round our left edge to the current segment if it encloses us. */
138 	if (f > rg->from)
139 		f = rg->from;
140 	chg = t - f;
141 
142 	/* Check for and consume any regions we now overlap with. */
143 	list_for_each_entry(rg, rg->link.prev, link) {
144 		if (&rg->link == head)
145 			break;
146 		if (rg->from > t)
147 			return chg;
148 
149 		/* We overlap with this area, if it extends futher than
150 		 * us then we must extend ourselves.  Account for its
151 		 * existing reservation. */
152 		if (rg->to > t) {
153 			chg += rg->to - t;
154 			t = rg->to;
155 		}
156 		chg -= rg->to - rg->from;
157 	}
158 	return chg;
159 }
160 
161 static long region_truncate(struct list_head *head, long end)
162 {
163 	struct file_region *rg, *trg;
164 	long chg = 0;
165 
166 	/* Locate the region we are either in or before. */
167 	list_for_each_entry(rg, head, link)
168 		if (end <= rg->to)
169 			break;
170 	if (&rg->link == head)
171 		return 0;
172 
173 	/* If we are in the middle of a region then adjust it. */
174 	if (end > rg->from) {
175 		chg = rg->to - end;
176 		rg->to = end;
177 		rg = list_entry(rg->link.next, typeof(*rg), link);
178 	}
179 
180 	/* Drop any remaining regions. */
181 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
182 		if (&rg->link == head)
183 			break;
184 		chg += rg->to - rg->from;
185 		list_del(&rg->link);
186 		kfree(rg);
187 	}
188 	return chg;
189 }
190 
191 static long region_count(struct list_head *head, long f, long t)
192 {
193 	struct file_region *rg;
194 	long chg = 0;
195 
196 	/* Locate each segment we overlap with, and count that overlap. */
197 	list_for_each_entry(rg, head, link) {
198 		int seg_from;
199 		int seg_to;
200 
201 		if (rg->to <= f)
202 			continue;
203 		if (rg->from >= t)
204 			break;
205 
206 		seg_from = max(rg->from, f);
207 		seg_to = min(rg->to, t);
208 
209 		chg += seg_to - seg_from;
210 	}
211 
212 	return chg;
213 }
214 
215 /*
216  * Convert the address within this vma to the page offset within
217  * the mapping, in pagecache page units; huge pages here.
218  */
219 static pgoff_t vma_hugecache_offset(struct hstate *h,
220 			struct vm_area_struct *vma, unsigned long address)
221 {
222 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
223 			(vma->vm_pgoff >> huge_page_order(h));
224 }
225 
226 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
227 				     unsigned long address)
228 {
229 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
230 }
231 
232 /*
233  * Return the size of the pages allocated when backing a VMA. In the majority
234  * cases this will be same size as used by the page table entries.
235  */
236 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
237 {
238 	struct hstate *hstate;
239 
240 	if (!is_vm_hugetlb_page(vma))
241 		return PAGE_SIZE;
242 
243 	hstate = hstate_vma(vma);
244 
245 	return 1UL << (hstate->order + PAGE_SHIFT);
246 }
247 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
248 
249 /*
250  * Return the page size being used by the MMU to back a VMA. In the majority
251  * of cases, the page size used by the kernel matches the MMU size. On
252  * architectures where it differs, an architecture-specific version of this
253  * function is required.
254  */
255 #ifndef vma_mmu_pagesize
256 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
257 {
258 	return vma_kernel_pagesize(vma);
259 }
260 #endif
261 
262 /*
263  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
264  * bits of the reservation map pointer, which are always clear due to
265  * alignment.
266  */
267 #define HPAGE_RESV_OWNER    (1UL << 0)
268 #define HPAGE_RESV_UNMAPPED (1UL << 1)
269 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
270 
271 /*
272  * These helpers are used to track how many pages are reserved for
273  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
274  * is guaranteed to have their future faults succeed.
275  *
276  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
277  * the reserve counters are updated with the hugetlb_lock held. It is safe
278  * to reset the VMA at fork() time as it is not in use yet and there is no
279  * chance of the global counters getting corrupted as a result of the values.
280  *
281  * The private mapping reservation is represented in a subtly different
282  * manner to a shared mapping.  A shared mapping has a region map associated
283  * with the underlying file, this region map represents the backing file
284  * pages which have ever had a reservation assigned which this persists even
285  * after the page is instantiated.  A private mapping has a region map
286  * associated with the original mmap which is attached to all VMAs which
287  * reference it, this region map represents those offsets which have consumed
288  * reservation ie. where pages have been instantiated.
289  */
290 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
291 {
292 	return (unsigned long)vma->vm_private_data;
293 }
294 
295 static void set_vma_private_data(struct vm_area_struct *vma,
296 							unsigned long value)
297 {
298 	vma->vm_private_data = (void *)value;
299 }
300 
301 struct resv_map {
302 	struct kref refs;
303 	struct list_head regions;
304 };
305 
306 static struct resv_map *resv_map_alloc(void)
307 {
308 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
309 	if (!resv_map)
310 		return NULL;
311 
312 	kref_init(&resv_map->refs);
313 	INIT_LIST_HEAD(&resv_map->regions);
314 
315 	return resv_map;
316 }
317 
318 static void resv_map_release(struct kref *ref)
319 {
320 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
321 
322 	/* Clear out any active regions before we release the map. */
323 	region_truncate(&resv_map->regions, 0);
324 	kfree(resv_map);
325 }
326 
327 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
328 {
329 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
330 	if (!(vma->vm_flags & VM_MAYSHARE))
331 		return (struct resv_map *)(get_vma_private_data(vma) &
332 							~HPAGE_RESV_MASK);
333 	return NULL;
334 }
335 
336 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
337 {
338 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
339 	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
340 
341 	set_vma_private_data(vma, (get_vma_private_data(vma) &
342 				HPAGE_RESV_MASK) | (unsigned long)map);
343 }
344 
345 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
346 {
347 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
348 	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
349 
350 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
351 }
352 
353 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
354 {
355 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
356 
357 	return (get_vma_private_data(vma) & flag) != 0;
358 }
359 
360 /* Decrement the reserved pages in the hugepage pool by one */
361 static void decrement_hugepage_resv_vma(struct hstate *h,
362 			struct vm_area_struct *vma)
363 {
364 	if (vma->vm_flags & VM_NORESERVE)
365 		return;
366 
367 	if (vma->vm_flags & VM_MAYSHARE) {
368 		/* Shared mappings always use reserves */
369 		h->resv_huge_pages--;
370 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
371 		/*
372 		 * Only the process that called mmap() has reserves for
373 		 * private mappings.
374 		 */
375 		h->resv_huge_pages--;
376 	}
377 }
378 
379 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
380 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
381 {
382 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
383 	if (!(vma->vm_flags & VM_MAYSHARE))
384 		vma->vm_private_data = (void *)0;
385 }
386 
387 /* Returns true if the VMA has associated reserve pages */
388 static int vma_has_reserves(struct vm_area_struct *vma)
389 {
390 	if (vma->vm_flags & VM_MAYSHARE)
391 		return 1;
392 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
393 		return 1;
394 	return 0;
395 }
396 
397 static void copy_gigantic_page(struct page *dst, struct page *src)
398 {
399 	int i;
400 	struct hstate *h = page_hstate(src);
401 	struct page *dst_base = dst;
402 	struct page *src_base = src;
403 
404 	for (i = 0; i < pages_per_huge_page(h); ) {
405 		cond_resched();
406 		copy_highpage(dst, src);
407 
408 		i++;
409 		dst = mem_map_next(dst, dst_base, i);
410 		src = mem_map_next(src, src_base, i);
411 	}
412 }
413 
414 void copy_huge_page(struct page *dst, struct page *src)
415 {
416 	int i;
417 	struct hstate *h = page_hstate(src);
418 
419 	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
420 		copy_gigantic_page(dst, src);
421 		return;
422 	}
423 
424 	might_sleep();
425 	for (i = 0; i < pages_per_huge_page(h); i++) {
426 		cond_resched();
427 		copy_highpage(dst + i, src + i);
428 	}
429 }
430 
431 static void enqueue_huge_page(struct hstate *h, struct page *page)
432 {
433 	int nid = page_to_nid(page);
434 	list_add(&page->lru, &h->hugepage_freelists[nid]);
435 	h->free_huge_pages++;
436 	h->free_huge_pages_node[nid]++;
437 }
438 
439 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
440 {
441 	struct page *page;
442 
443 	if (list_empty(&h->hugepage_freelists[nid]))
444 		return NULL;
445 	page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
446 	list_del(&page->lru);
447 	set_page_refcounted(page);
448 	h->free_huge_pages--;
449 	h->free_huge_pages_node[nid]--;
450 	return page;
451 }
452 
453 static struct page *dequeue_huge_page_vma(struct hstate *h,
454 				struct vm_area_struct *vma,
455 				unsigned long address, int avoid_reserve)
456 {
457 	struct page *page = NULL;
458 	struct mempolicy *mpol;
459 	nodemask_t *nodemask;
460 	struct zonelist *zonelist;
461 	struct zone *zone;
462 	struct zoneref *z;
463 
464 	get_mems_allowed();
465 	zonelist = huge_zonelist(vma, address,
466 					htlb_alloc_mask, &mpol, &nodemask);
467 	/*
468 	 * A child process with MAP_PRIVATE mappings created by their parent
469 	 * have no page reserves. This check ensures that reservations are
470 	 * not "stolen". The child may still get SIGKILLed
471 	 */
472 	if (!vma_has_reserves(vma) &&
473 			h->free_huge_pages - h->resv_huge_pages == 0)
474 		goto err;
475 
476 	/* If reserves cannot be used, ensure enough pages are in the pool */
477 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
478 		goto err;;
479 
480 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
481 						MAX_NR_ZONES - 1, nodemask) {
482 		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
483 			page = dequeue_huge_page_node(h, zone_to_nid(zone));
484 			if (page) {
485 				if (!avoid_reserve)
486 					decrement_hugepage_resv_vma(h, vma);
487 				break;
488 			}
489 		}
490 	}
491 err:
492 	mpol_cond_put(mpol);
493 	put_mems_allowed();
494 	return page;
495 }
496 
497 static void update_and_free_page(struct hstate *h, struct page *page)
498 {
499 	int i;
500 
501 	VM_BUG_ON(h->order >= MAX_ORDER);
502 
503 	h->nr_huge_pages--;
504 	h->nr_huge_pages_node[page_to_nid(page)]--;
505 	for (i = 0; i < pages_per_huge_page(h); i++) {
506 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
507 				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
508 				1 << PG_private | 1<< PG_writeback);
509 	}
510 	set_compound_page_dtor(page, NULL);
511 	set_page_refcounted(page);
512 	arch_release_hugepage(page);
513 	__free_pages(page, huge_page_order(h));
514 }
515 
516 struct hstate *size_to_hstate(unsigned long size)
517 {
518 	struct hstate *h;
519 
520 	for_each_hstate(h) {
521 		if (huge_page_size(h) == size)
522 			return h;
523 	}
524 	return NULL;
525 }
526 
527 static void free_huge_page(struct page *page)
528 {
529 	/*
530 	 * Can't pass hstate in here because it is called from the
531 	 * compound page destructor.
532 	 */
533 	struct hstate *h = page_hstate(page);
534 	int nid = page_to_nid(page);
535 	struct address_space *mapping;
536 
537 	mapping = (struct address_space *) page_private(page);
538 	set_page_private(page, 0);
539 	page->mapping = NULL;
540 	BUG_ON(page_count(page));
541 	BUG_ON(page_mapcount(page));
542 	INIT_LIST_HEAD(&page->lru);
543 
544 	spin_lock(&hugetlb_lock);
545 	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
546 		update_and_free_page(h, page);
547 		h->surplus_huge_pages--;
548 		h->surplus_huge_pages_node[nid]--;
549 	} else {
550 		enqueue_huge_page(h, page);
551 	}
552 	spin_unlock(&hugetlb_lock);
553 	if (mapping)
554 		hugetlb_put_quota(mapping, 1);
555 }
556 
557 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
558 {
559 	set_compound_page_dtor(page, free_huge_page);
560 	spin_lock(&hugetlb_lock);
561 	h->nr_huge_pages++;
562 	h->nr_huge_pages_node[nid]++;
563 	spin_unlock(&hugetlb_lock);
564 	put_page(page); /* free it into the hugepage allocator */
565 }
566 
567 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
568 {
569 	int i;
570 	int nr_pages = 1 << order;
571 	struct page *p = page + 1;
572 
573 	/* we rely on prep_new_huge_page to set the destructor */
574 	set_compound_order(page, order);
575 	__SetPageHead(page);
576 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
577 		__SetPageTail(p);
578 		p->first_page = page;
579 	}
580 }
581 
582 int PageHuge(struct page *page)
583 {
584 	compound_page_dtor *dtor;
585 
586 	if (!PageCompound(page))
587 		return 0;
588 
589 	page = compound_head(page);
590 	dtor = get_compound_page_dtor(page);
591 
592 	return dtor == free_huge_page;
593 }
594 
595 EXPORT_SYMBOL_GPL(PageHuge);
596 
597 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
598 {
599 	struct page *page;
600 
601 	if (h->order >= MAX_ORDER)
602 		return NULL;
603 
604 	page = alloc_pages_exact_node(nid,
605 		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
606 						__GFP_REPEAT|__GFP_NOWARN,
607 		huge_page_order(h));
608 	if (page) {
609 		if (arch_prepare_hugepage(page)) {
610 			__free_pages(page, huge_page_order(h));
611 			return NULL;
612 		}
613 		prep_new_huge_page(h, page, nid);
614 	}
615 
616 	return page;
617 }
618 
619 /*
620  * common helper functions for hstate_next_node_to_{alloc|free}.
621  * We may have allocated or freed a huge page based on a different
622  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
623  * be outside of *nodes_allowed.  Ensure that we use an allowed
624  * node for alloc or free.
625  */
626 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
627 {
628 	nid = next_node(nid, *nodes_allowed);
629 	if (nid == MAX_NUMNODES)
630 		nid = first_node(*nodes_allowed);
631 	VM_BUG_ON(nid >= MAX_NUMNODES);
632 
633 	return nid;
634 }
635 
636 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
637 {
638 	if (!node_isset(nid, *nodes_allowed))
639 		nid = next_node_allowed(nid, nodes_allowed);
640 	return nid;
641 }
642 
643 /*
644  * returns the previously saved node ["this node"] from which to
645  * allocate a persistent huge page for the pool and advance the
646  * next node from which to allocate, handling wrap at end of node
647  * mask.
648  */
649 static int hstate_next_node_to_alloc(struct hstate *h,
650 					nodemask_t *nodes_allowed)
651 {
652 	int nid;
653 
654 	VM_BUG_ON(!nodes_allowed);
655 
656 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
657 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
658 
659 	return nid;
660 }
661 
662 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
663 {
664 	struct page *page;
665 	int start_nid;
666 	int next_nid;
667 	int ret = 0;
668 
669 	start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
670 	next_nid = start_nid;
671 
672 	do {
673 		page = alloc_fresh_huge_page_node(h, next_nid);
674 		if (page) {
675 			ret = 1;
676 			break;
677 		}
678 		next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
679 	} while (next_nid != start_nid);
680 
681 	if (ret)
682 		count_vm_event(HTLB_BUDDY_PGALLOC);
683 	else
684 		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
685 
686 	return ret;
687 }
688 
689 /*
690  * helper for free_pool_huge_page() - return the previously saved
691  * node ["this node"] from which to free a huge page.  Advance the
692  * next node id whether or not we find a free huge page to free so
693  * that the next attempt to free addresses the next node.
694  */
695 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
696 {
697 	int nid;
698 
699 	VM_BUG_ON(!nodes_allowed);
700 
701 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
702 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
703 
704 	return nid;
705 }
706 
707 /*
708  * Free huge page from pool from next node to free.
709  * Attempt to keep persistent huge pages more or less
710  * balanced over allowed nodes.
711  * Called with hugetlb_lock locked.
712  */
713 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
714 							 bool acct_surplus)
715 {
716 	int start_nid;
717 	int next_nid;
718 	int ret = 0;
719 
720 	start_nid = hstate_next_node_to_free(h, nodes_allowed);
721 	next_nid = start_nid;
722 
723 	do {
724 		/*
725 		 * If we're returning unused surplus pages, only examine
726 		 * nodes with surplus pages.
727 		 */
728 		if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
729 		    !list_empty(&h->hugepage_freelists[next_nid])) {
730 			struct page *page =
731 				list_entry(h->hugepage_freelists[next_nid].next,
732 					  struct page, lru);
733 			list_del(&page->lru);
734 			h->free_huge_pages--;
735 			h->free_huge_pages_node[next_nid]--;
736 			if (acct_surplus) {
737 				h->surplus_huge_pages--;
738 				h->surplus_huge_pages_node[next_nid]--;
739 			}
740 			update_and_free_page(h, page);
741 			ret = 1;
742 			break;
743 		}
744 		next_nid = hstate_next_node_to_free(h, nodes_allowed);
745 	} while (next_nid != start_nid);
746 
747 	return ret;
748 }
749 
750 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
751 {
752 	struct page *page;
753 	unsigned int r_nid;
754 
755 	if (h->order >= MAX_ORDER)
756 		return NULL;
757 
758 	/*
759 	 * Assume we will successfully allocate the surplus page to
760 	 * prevent racing processes from causing the surplus to exceed
761 	 * overcommit
762 	 *
763 	 * This however introduces a different race, where a process B
764 	 * tries to grow the static hugepage pool while alloc_pages() is
765 	 * called by process A. B will only examine the per-node
766 	 * counters in determining if surplus huge pages can be
767 	 * converted to normal huge pages in adjust_pool_surplus(). A
768 	 * won't be able to increment the per-node counter, until the
769 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
770 	 * no more huge pages can be converted from surplus to normal
771 	 * state (and doesn't try to convert again). Thus, we have a
772 	 * case where a surplus huge page exists, the pool is grown, and
773 	 * the surplus huge page still exists after, even though it
774 	 * should just have been converted to a normal huge page. This
775 	 * does not leak memory, though, as the hugepage will be freed
776 	 * once it is out of use. It also does not allow the counters to
777 	 * go out of whack in adjust_pool_surplus() as we don't modify
778 	 * the node values until we've gotten the hugepage and only the
779 	 * per-node value is checked there.
780 	 */
781 	spin_lock(&hugetlb_lock);
782 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
783 		spin_unlock(&hugetlb_lock);
784 		return NULL;
785 	} else {
786 		h->nr_huge_pages++;
787 		h->surplus_huge_pages++;
788 	}
789 	spin_unlock(&hugetlb_lock);
790 
791 	if (nid == NUMA_NO_NODE)
792 		page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
793 				   __GFP_REPEAT|__GFP_NOWARN,
794 				   huge_page_order(h));
795 	else
796 		page = alloc_pages_exact_node(nid,
797 			htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
798 			__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
799 
800 	if (page && arch_prepare_hugepage(page)) {
801 		__free_pages(page, huge_page_order(h));
802 		return NULL;
803 	}
804 
805 	spin_lock(&hugetlb_lock);
806 	if (page) {
807 		r_nid = page_to_nid(page);
808 		set_compound_page_dtor(page, free_huge_page);
809 		/*
810 		 * We incremented the global counters already
811 		 */
812 		h->nr_huge_pages_node[r_nid]++;
813 		h->surplus_huge_pages_node[r_nid]++;
814 		__count_vm_event(HTLB_BUDDY_PGALLOC);
815 	} else {
816 		h->nr_huge_pages--;
817 		h->surplus_huge_pages--;
818 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
819 	}
820 	spin_unlock(&hugetlb_lock);
821 
822 	return page;
823 }
824 
825 /*
826  * This allocation function is useful in the context where vma is irrelevant.
827  * E.g. soft-offlining uses this function because it only cares physical
828  * address of error page.
829  */
830 struct page *alloc_huge_page_node(struct hstate *h, int nid)
831 {
832 	struct page *page;
833 
834 	spin_lock(&hugetlb_lock);
835 	page = dequeue_huge_page_node(h, nid);
836 	spin_unlock(&hugetlb_lock);
837 
838 	if (!page)
839 		page = alloc_buddy_huge_page(h, nid);
840 
841 	return page;
842 }
843 
844 /*
845  * Increase the hugetlb pool such that it can accomodate a reservation
846  * of size 'delta'.
847  */
848 static int gather_surplus_pages(struct hstate *h, int delta)
849 {
850 	struct list_head surplus_list;
851 	struct page *page, *tmp;
852 	int ret, i;
853 	int needed, allocated;
854 
855 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
856 	if (needed <= 0) {
857 		h->resv_huge_pages += delta;
858 		return 0;
859 	}
860 
861 	allocated = 0;
862 	INIT_LIST_HEAD(&surplus_list);
863 
864 	ret = -ENOMEM;
865 retry:
866 	spin_unlock(&hugetlb_lock);
867 	for (i = 0; i < needed; i++) {
868 		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
869 		if (!page)
870 			/*
871 			 * We were not able to allocate enough pages to
872 			 * satisfy the entire reservation so we free what
873 			 * we've allocated so far.
874 			 */
875 			goto free;
876 
877 		list_add(&page->lru, &surplus_list);
878 	}
879 	allocated += needed;
880 
881 	/*
882 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
883 	 * because either resv_huge_pages or free_huge_pages may have changed.
884 	 */
885 	spin_lock(&hugetlb_lock);
886 	needed = (h->resv_huge_pages + delta) -
887 			(h->free_huge_pages + allocated);
888 	if (needed > 0)
889 		goto retry;
890 
891 	/*
892 	 * The surplus_list now contains _at_least_ the number of extra pages
893 	 * needed to accomodate the reservation.  Add the appropriate number
894 	 * of pages to the hugetlb pool and free the extras back to the buddy
895 	 * allocator.  Commit the entire reservation here to prevent another
896 	 * process from stealing the pages as they are added to the pool but
897 	 * before they are reserved.
898 	 */
899 	needed += allocated;
900 	h->resv_huge_pages += delta;
901 	ret = 0;
902 
903 	spin_unlock(&hugetlb_lock);
904 	/* Free the needed pages to the hugetlb pool */
905 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
906 		if ((--needed) < 0)
907 			break;
908 		list_del(&page->lru);
909 		/*
910 		 * This page is now managed by the hugetlb allocator and has
911 		 * no users -- drop the buddy allocator's reference.
912 		 */
913 		put_page_testzero(page);
914 		VM_BUG_ON(page_count(page));
915 		enqueue_huge_page(h, page);
916 	}
917 
918 	/* Free unnecessary surplus pages to the buddy allocator */
919 free:
920 	if (!list_empty(&surplus_list)) {
921 		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
922 			list_del(&page->lru);
923 			put_page(page);
924 		}
925 	}
926 	spin_lock(&hugetlb_lock);
927 
928 	return ret;
929 }
930 
931 /*
932  * When releasing a hugetlb pool reservation, any surplus pages that were
933  * allocated to satisfy the reservation must be explicitly freed if they were
934  * never used.
935  * Called with hugetlb_lock held.
936  */
937 static void return_unused_surplus_pages(struct hstate *h,
938 					unsigned long unused_resv_pages)
939 {
940 	unsigned long nr_pages;
941 
942 	/* Uncommit the reservation */
943 	h->resv_huge_pages -= unused_resv_pages;
944 
945 	/* Cannot return gigantic pages currently */
946 	if (h->order >= MAX_ORDER)
947 		return;
948 
949 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
950 
951 	/*
952 	 * We want to release as many surplus pages as possible, spread
953 	 * evenly across all nodes with memory. Iterate across these nodes
954 	 * until we can no longer free unreserved surplus pages. This occurs
955 	 * when the nodes with surplus pages have no free pages.
956 	 * free_pool_huge_page() will balance the the freed pages across the
957 	 * on-line nodes with memory and will handle the hstate accounting.
958 	 */
959 	while (nr_pages--) {
960 		if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
961 			break;
962 	}
963 }
964 
965 /*
966  * Determine if the huge page at addr within the vma has an associated
967  * reservation.  Where it does not we will need to logically increase
968  * reservation and actually increase quota before an allocation can occur.
969  * Where any new reservation would be required the reservation change is
970  * prepared, but not committed.  Once the page has been quota'd allocated
971  * an instantiated the change should be committed via vma_commit_reservation.
972  * No action is required on failure.
973  */
974 static long vma_needs_reservation(struct hstate *h,
975 			struct vm_area_struct *vma, unsigned long addr)
976 {
977 	struct address_space *mapping = vma->vm_file->f_mapping;
978 	struct inode *inode = mapping->host;
979 
980 	if (vma->vm_flags & VM_MAYSHARE) {
981 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
982 		return region_chg(&inode->i_mapping->private_list,
983 							idx, idx + 1);
984 
985 	} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
986 		return 1;
987 
988 	} else  {
989 		long err;
990 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
991 		struct resv_map *reservations = vma_resv_map(vma);
992 
993 		err = region_chg(&reservations->regions, idx, idx + 1);
994 		if (err < 0)
995 			return err;
996 		return 0;
997 	}
998 }
999 static void vma_commit_reservation(struct hstate *h,
1000 			struct vm_area_struct *vma, unsigned long addr)
1001 {
1002 	struct address_space *mapping = vma->vm_file->f_mapping;
1003 	struct inode *inode = mapping->host;
1004 
1005 	if (vma->vm_flags & VM_MAYSHARE) {
1006 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1007 		region_add(&inode->i_mapping->private_list, idx, idx + 1);
1008 
1009 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1010 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1011 		struct resv_map *reservations = vma_resv_map(vma);
1012 
1013 		/* Mark this page used in the map. */
1014 		region_add(&reservations->regions, idx, idx + 1);
1015 	}
1016 }
1017 
1018 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1019 				    unsigned long addr, int avoid_reserve)
1020 {
1021 	struct hstate *h = hstate_vma(vma);
1022 	struct page *page;
1023 	struct address_space *mapping = vma->vm_file->f_mapping;
1024 	struct inode *inode = mapping->host;
1025 	long chg;
1026 
1027 	/*
1028 	 * Processes that did not create the mapping will have no reserves and
1029 	 * will not have accounted against quota. Check that the quota can be
1030 	 * made before satisfying the allocation
1031 	 * MAP_NORESERVE mappings may also need pages and quota allocated
1032 	 * if no reserve mapping overlaps.
1033 	 */
1034 	chg = vma_needs_reservation(h, vma, addr);
1035 	if (chg < 0)
1036 		return ERR_PTR(chg);
1037 	if (chg)
1038 		if (hugetlb_get_quota(inode->i_mapping, chg))
1039 			return ERR_PTR(-ENOSPC);
1040 
1041 	spin_lock(&hugetlb_lock);
1042 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1043 	spin_unlock(&hugetlb_lock);
1044 
1045 	if (!page) {
1046 		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1047 		if (!page) {
1048 			hugetlb_put_quota(inode->i_mapping, chg);
1049 			return ERR_PTR(-VM_FAULT_SIGBUS);
1050 		}
1051 	}
1052 
1053 	set_page_private(page, (unsigned long) mapping);
1054 
1055 	vma_commit_reservation(h, vma, addr);
1056 
1057 	return page;
1058 }
1059 
1060 int __weak alloc_bootmem_huge_page(struct hstate *h)
1061 {
1062 	struct huge_bootmem_page *m;
1063 	int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1064 
1065 	while (nr_nodes) {
1066 		void *addr;
1067 
1068 		addr = __alloc_bootmem_node_nopanic(
1069 				NODE_DATA(hstate_next_node_to_alloc(h,
1070 						&node_states[N_HIGH_MEMORY])),
1071 				huge_page_size(h), huge_page_size(h), 0);
1072 
1073 		if (addr) {
1074 			/*
1075 			 * Use the beginning of the huge page to store the
1076 			 * huge_bootmem_page struct (until gather_bootmem
1077 			 * puts them into the mem_map).
1078 			 */
1079 			m = addr;
1080 			goto found;
1081 		}
1082 		nr_nodes--;
1083 	}
1084 	return 0;
1085 
1086 found:
1087 	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1088 	/* Put them into a private list first because mem_map is not up yet */
1089 	list_add(&m->list, &huge_boot_pages);
1090 	m->hstate = h;
1091 	return 1;
1092 }
1093 
1094 static void prep_compound_huge_page(struct page *page, int order)
1095 {
1096 	if (unlikely(order > (MAX_ORDER - 1)))
1097 		prep_compound_gigantic_page(page, order);
1098 	else
1099 		prep_compound_page(page, order);
1100 }
1101 
1102 /* Put bootmem huge pages into the standard lists after mem_map is up */
1103 static void __init gather_bootmem_prealloc(void)
1104 {
1105 	struct huge_bootmem_page *m;
1106 
1107 	list_for_each_entry(m, &huge_boot_pages, list) {
1108 		struct page *page = virt_to_page(m);
1109 		struct hstate *h = m->hstate;
1110 		__ClearPageReserved(page);
1111 		WARN_ON(page_count(page) != 1);
1112 		prep_compound_huge_page(page, h->order);
1113 		prep_new_huge_page(h, page, page_to_nid(page));
1114 	}
1115 }
1116 
1117 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1118 {
1119 	unsigned long i;
1120 
1121 	for (i = 0; i < h->max_huge_pages; ++i) {
1122 		if (h->order >= MAX_ORDER) {
1123 			if (!alloc_bootmem_huge_page(h))
1124 				break;
1125 		} else if (!alloc_fresh_huge_page(h,
1126 					 &node_states[N_HIGH_MEMORY]))
1127 			break;
1128 	}
1129 	h->max_huge_pages = i;
1130 }
1131 
1132 static void __init hugetlb_init_hstates(void)
1133 {
1134 	struct hstate *h;
1135 
1136 	for_each_hstate(h) {
1137 		/* oversize hugepages were init'ed in early boot */
1138 		if (h->order < MAX_ORDER)
1139 			hugetlb_hstate_alloc_pages(h);
1140 	}
1141 }
1142 
1143 static char * __init memfmt(char *buf, unsigned long n)
1144 {
1145 	if (n >= (1UL << 30))
1146 		sprintf(buf, "%lu GB", n >> 30);
1147 	else if (n >= (1UL << 20))
1148 		sprintf(buf, "%lu MB", n >> 20);
1149 	else
1150 		sprintf(buf, "%lu KB", n >> 10);
1151 	return buf;
1152 }
1153 
1154 static void __init report_hugepages(void)
1155 {
1156 	struct hstate *h;
1157 
1158 	for_each_hstate(h) {
1159 		char buf[32];
1160 		printk(KERN_INFO "HugeTLB registered %s page size, "
1161 				 "pre-allocated %ld pages\n",
1162 			memfmt(buf, huge_page_size(h)),
1163 			h->free_huge_pages);
1164 	}
1165 }
1166 
1167 #ifdef CONFIG_HIGHMEM
1168 static void try_to_free_low(struct hstate *h, unsigned long count,
1169 						nodemask_t *nodes_allowed)
1170 {
1171 	int i;
1172 
1173 	if (h->order >= MAX_ORDER)
1174 		return;
1175 
1176 	for_each_node_mask(i, *nodes_allowed) {
1177 		struct page *page, *next;
1178 		struct list_head *freel = &h->hugepage_freelists[i];
1179 		list_for_each_entry_safe(page, next, freel, lru) {
1180 			if (count >= h->nr_huge_pages)
1181 				return;
1182 			if (PageHighMem(page))
1183 				continue;
1184 			list_del(&page->lru);
1185 			update_and_free_page(h, page);
1186 			h->free_huge_pages--;
1187 			h->free_huge_pages_node[page_to_nid(page)]--;
1188 		}
1189 	}
1190 }
1191 #else
1192 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1193 						nodemask_t *nodes_allowed)
1194 {
1195 }
1196 #endif
1197 
1198 /*
1199  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1200  * balanced by operating on them in a round-robin fashion.
1201  * Returns 1 if an adjustment was made.
1202  */
1203 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1204 				int delta)
1205 {
1206 	int start_nid, next_nid;
1207 	int ret = 0;
1208 
1209 	VM_BUG_ON(delta != -1 && delta != 1);
1210 
1211 	if (delta < 0)
1212 		start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1213 	else
1214 		start_nid = hstate_next_node_to_free(h, nodes_allowed);
1215 	next_nid = start_nid;
1216 
1217 	do {
1218 		int nid = next_nid;
1219 		if (delta < 0)  {
1220 			/*
1221 			 * To shrink on this node, there must be a surplus page
1222 			 */
1223 			if (!h->surplus_huge_pages_node[nid]) {
1224 				next_nid = hstate_next_node_to_alloc(h,
1225 								nodes_allowed);
1226 				continue;
1227 			}
1228 		}
1229 		if (delta > 0) {
1230 			/*
1231 			 * Surplus cannot exceed the total number of pages
1232 			 */
1233 			if (h->surplus_huge_pages_node[nid] >=
1234 						h->nr_huge_pages_node[nid]) {
1235 				next_nid = hstate_next_node_to_free(h,
1236 								nodes_allowed);
1237 				continue;
1238 			}
1239 		}
1240 
1241 		h->surplus_huge_pages += delta;
1242 		h->surplus_huge_pages_node[nid] += delta;
1243 		ret = 1;
1244 		break;
1245 	} while (next_nid != start_nid);
1246 
1247 	return ret;
1248 }
1249 
1250 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1251 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1252 						nodemask_t *nodes_allowed)
1253 {
1254 	unsigned long min_count, ret;
1255 
1256 	if (h->order >= MAX_ORDER)
1257 		return h->max_huge_pages;
1258 
1259 	/*
1260 	 * Increase the pool size
1261 	 * First take pages out of surplus state.  Then make up the
1262 	 * remaining difference by allocating fresh huge pages.
1263 	 *
1264 	 * We might race with alloc_buddy_huge_page() here and be unable
1265 	 * to convert a surplus huge page to a normal huge page. That is
1266 	 * not critical, though, it just means the overall size of the
1267 	 * pool might be one hugepage larger than it needs to be, but
1268 	 * within all the constraints specified by the sysctls.
1269 	 */
1270 	spin_lock(&hugetlb_lock);
1271 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1272 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
1273 			break;
1274 	}
1275 
1276 	while (count > persistent_huge_pages(h)) {
1277 		/*
1278 		 * If this allocation races such that we no longer need the
1279 		 * page, free_huge_page will handle it by freeing the page
1280 		 * and reducing the surplus.
1281 		 */
1282 		spin_unlock(&hugetlb_lock);
1283 		ret = alloc_fresh_huge_page(h, nodes_allowed);
1284 		spin_lock(&hugetlb_lock);
1285 		if (!ret)
1286 			goto out;
1287 
1288 		/* Bail for signals. Probably ctrl-c from user */
1289 		if (signal_pending(current))
1290 			goto out;
1291 	}
1292 
1293 	/*
1294 	 * Decrease the pool size
1295 	 * First return free pages to the buddy allocator (being careful
1296 	 * to keep enough around to satisfy reservations).  Then place
1297 	 * pages into surplus state as needed so the pool will shrink
1298 	 * to the desired size as pages become free.
1299 	 *
1300 	 * By placing pages into the surplus state independent of the
1301 	 * overcommit value, we are allowing the surplus pool size to
1302 	 * exceed overcommit. There are few sane options here. Since
1303 	 * alloc_buddy_huge_page() is checking the global counter,
1304 	 * though, we'll note that we're not allowed to exceed surplus
1305 	 * and won't grow the pool anywhere else. Not until one of the
1306 	 * sysctls are changed, or the surplus pages go out of use.
1307 	 */
1308 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1309 	min_count = max(count, min_count);
1310 	try_to_free_low(h, min_count, nodes_allowed);
1311 	while (min_count < persistent_huge_pages(h)) {
1312 		if (!free_pool_huge_page(h, nodes_allowed, 0))
1313 			break;
1314 	}
1315 	while (count < persistent_huge_pages(h)) {
1316 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
1317 			break;
1318 	}
1319 out:
1320 	ret = persistent_huge_pages(h);
1321 	spin_unlock(&hugetlb_lock);
1322 	return ret;
1323 }
1324 
1325 #define HSTATE_ATTR_RO(_name) \
1326 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1327 
1328 #define HSTATE_ATTR(_name) \
1329 	static struct kobj_attribute _name##_attr = \
1330 		__ATTR(_name, 0644, _name##_show, _name##_store)
1331 
1332 static struct kobject *hugepages_kobj;
1333 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1334 
1335 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1336 
1337 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1338 {
1339 	int i;
1340 
1341 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
1342 		if (hstate_kobjs[i] == kobj) {
1343 			if (nidp)
1344 				*nidp = NUMA_NO_NODE;
1345 			return &hstates[i];
1346 		}
1347 
1348 	return kobj_to_node_hstate(kobj, nidp);
1349 }
1350 
1351 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1352 					struct kobj_attribute *attr, char *buf)
1353 {
1354 	struct hstate *h;
1355 	unsigned long nr_huge_pages;
1356 	int nid;
1357 
1358 	h = kobj_to_hstate(kobj, &nid);
1359 	if (nid == NUMA_NO_NODE)
1360 		nr_huge_pages = h->nr_huge_pages;
1361 	else
1362 		nr_huge_pages = h->nr_huge_pages_node[nid];
1363 
1364 	return sprintf(buf, "%lu\n", nr_huge_pages);
1365 }
1366 
1367 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1368 			struct kobject *kobj, struct kobj_attribute *attr,
1369 			const char *buf, size_t len)
1370 {
1371 	int err;
1372 	int nid;
1373 	unsigned long count;
1374 	struct hstate *h;
1375 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1376 
1377 	err = strict_strtoul(buf, 10, &count);
1378 	if (err)
1379 		goto out;
1380 
1381 	h = kobj_to_hstate(kobj, &nid);
1382 	if (h->order >= MAX_ORDER) {
1383 		err = -EINVAL;
1384 		goto out;
1385 	}
1386 
1387 	if (nid == NUMA_NO_NODE) {
1388 		/*
1389 		 * global hstate attribute
1390 		 */
1391 		if (!(obey_mempolicy &&
1392 				init_nodemask_of_mempolicy(nodes_allowed))) {
1393 			NODEMASK_FREE(nodes_allowed);
1394 			nodes_allowed = &node_states[N_HIGH_MEMORY];
1395 		}
1396 	} else if (nodes_allowed) {
1397 		/*
1398 		 * per node hstate attribute: adjust count to global,
1399 		 * but restrict alloc/free to the specified node.
1400 		 */
1401 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1402 		init_nodemask_of_node(nodes_allowed, nid);
1403 	} else
1404 		nodes_allowed = &node_states[N_HIGH_MEMORY];
1405 
1406 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1407 
1408 	if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1409 		NODEMASK_FREE(nodes_allowed);
1410 
1411 	return len;
1412 out:
1413 	NODEMASK_FREE(nodes_allowed);
1414 	return err;
1415 }
1416 
1417 static ssize_t nr_hugepages_show(struct kobject *kobj,
1418 				       struct kobj_attribute *attr, char *buf)
1419 {
1420 	return nr_hugepages_show_common(kobj, attr, buf);
1421 }
1422 
1423 static ssize_t nr_hugepages_store(struct kobject *kobj,
1424 	       struct kobj_attribute *attr, const char *buf, size_t len)
1425 {
1426 	return nr_hugepages_store_common(false, kobj, attr, buf, len);
1427 }
1428 HSTATE_ATTR(nr_hugepages);
1429 
1430 #ifdef CONFIG_NUMA
1431 
1432 /*
1433  * hstate attribute for optionally mempolicy-based constraint on persistent
1434  * huge page alloc/free.
1435  */
1436 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1437 				       struct kobj_attribute *attr, char *buf)
1438 {
1439 	return nr_hugepages_show_common(kobj, attr, buf);
1440 }
1441 
1442 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1443 	       struct kobj_attribute *attr, const char *buf, size_t len)
1444 {
1445 	return nr_hugepages_store_common(true, kobj, attr, buf, len);
1446 }
1447 HSTATE_ATTR(nr_hugepages_mempolicy);
1448 #endif
1449 
1450 
1451 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1452 					struct kobj_attribute *attr, char *buf)
1453 {
1454 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1455 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1456 }
1457 
1458 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1459 		struct kobj_attribute *attr, const char *buf, size_t count)
1460 {
1461 	int err;
1462 	unsigned long input;
1463 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1464 
1465 	if (h->order >= MAX_ORDER)
1466 		return -EINVAL;
1467 
1468 	err = strict_strtoul(buf, 10, &input);
1469 	if (err)
1470 		return err;
1471 
1472 	spin_lock(&hugetlb_lock);
1473 	h->nr_overcommit_huge_pages = input;
1474 	spin_unlock(&hugetlb_lock);
1475 
1476 	return count;
1477 }
1478 HSTATE_ATTR(nr_overcommit_hugepages);
1479 
1480 static ssize_t free_hugepages_show(struct kobject *kobj,
1481 					struct kobj_attribute *attr, char *buf)
1482 {
1483 	struct hstate *h;
1484 	unsigned long free_huge_pages;
1485 	int nid;
1486 
1487 	h = kobj_to_hstate(kobj, &nid);
1488 	if (nid == NUMA_NO_NODE)
1489 		free_huge_pages = h->free_huge_pages;
1490 	else
1491 		free_huge_pages = h->free_huge_pages_node[nid];
1492 
1493 	return sprintf(buf, "%lu\n", free_huge_pages);
1494 }
1495 HSTATE_ATTR_RO(free_hugepages);
1496 
1497 static ssize_t resv_hugepages_show(struct kobject *kobj,
1498 					struct kobj_attribute *attr, char *buf)
1499 {
1500 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1501 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
1502 }
1503 HSTATE_ATTR_RO(resv_hugepages);
1504 
1505 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1506 					struct kobj_attribute *attr, char *buf)
1507 {
1508 	struct hstate *h;
1509 	unsigned long surplus_huge_pages;
1510 	int nid;
1511 
1512 	h = kobj_to_hstate(kobj, &nid);
1513 	if (nid == NUMA_NO_NODE)
1514 		surplus_huge_pages = h->surplus_huge_pages;
1515 	else
1516 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
1517 
1518 	return sprintf(buf, "%lu\n", surplus_huge_pages);
1519 }
1520 HSTATE_ATTR_RO(surplus_hugepages);
1521 
1522 static struct attribute *hstate_attrs[] = {
1523 	&nr_hugepages_attr.attr,
1524 	&nr_overcommit_hugepages_attr.attr,
1525 	&free_hugepages_attr.attr,
1526 	&resv_hugepages_attr.attr,
1527 	&surplus_hugepages_attr.attr,
1528 #ifdef CONFIG_NUMA
1529 	&nr_hugepages_mempolicy_attr.attr,
1530 #endif
1531 	NULL,
1532 };
1533 
1534 static struct attribute_group hstate_attr_group = {
1535 	.attrs = hstate_attrs,
1536 };
1537 
1538 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1539 				    struct kobject **hstate_kobjs,
1540 				    struct attribute_group *hstate_attr_group)
1541 {
1542 	int retval;
1543 	int hi = h - hstates;
1544 
1545 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1546 	if (!hstate_kobjs[hi])
1547 		return -ENOMEM;
1548 
1549 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1550 	if (retval)
1551 		kobject_put(hstate_kobjs[hi]);
1552 
1553 	return retval;
1554 }
1555 
1556 static void __init hugetlb_sysfs_init(void)
1557 {
1558 	struct hstate *h;
1559 	int err;
1560 
1561 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1562 	if (!hugepages_kobj)
1563 		return;
1564 
1565 	for_each_hstate(h) {
1566 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1567 					 hstate_kobjs, &hstate_attr_group);
1568 		if (err)
1569 			printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1570 								h->name);
1571 	}
1572 }
1573 
1574 #ifdef CONFIG_NUMA
1575 
1576 /*
1577  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1578  * with node sysdevs in node_devices[] using a parallel array.  The array
1579  * index of a node sysdev or _hstate == node id.
1580  * This is here to avoid any static dependency of the node sysdev driver, in
1581  * the base kernel, on the hugetlb module.
1582  */
1583 struct node_hstate {
1584 	struct kobject		*hugepages_kobj;
1585 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
1586 };
1587 struct node_hstate node_hstates[MAX_NUMNODES];
1588 
1589 /*
1590  * A subset of global hstate attributes for node sysdevs
1591  */
1592 static struct attribute *per_node_hstate_attrs[] = {
1593 	&nr_hugepages_attr.attr,
1594 	&free_hugepages_attr.attr,
1595 	&surplus_hugepages_attr.attr,
1596 	NULL,
1597 };
1598 
1599 static struct attribute_group per_node_hstate_attr_group = {
1600 	.attrs = per_node_hstate_attrs,
1601 };
1602 
1603 /*
1604  * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1605  * Returns node id via non-NULL nidp.
1606  */
1607 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1608 {
1609 	int nid;
1610 
1611 	for (nid = 0; nid < nr_node_ids; nid++) {
1612 		struct node_hstate *nhs = &node_hstates[nid];
1613 		int i;
1614 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
1615 			if (nhs->hstate_kobjs[i] == kobj) {
1616 				if (nidp)
1617 					*nidp = nid;
1618 				return &hstates[i];
1619 			}
1620 	}
1621 
1622 	BUG();
1623 	return NULL;
1624 }
1625 
1626 /*
1627  * Unregister hstate attributes from a single node sysdev.
1628  * No-op if no hstate attributes attached.
1629  */
1630 void hugetlb_unregister_node(struct node *node)
1631 {
1632 	struct hstate *h;
1633 	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1634 
1635 	if (!nhs->hugepages_kobj)
1636 		return;		/* no hstate attributes */
1637 
1638 	for_each_hstate(h)
1639 		if (nhs->hstate_kobjs[h - hstates]) {
1640 			kobject_put(nhs->hstate_kobjs[h - hstates]);
1641 			nhs->hstate_kobjs[h - hstates] = NULL;
1642 		}
1643 
1644 	kobject_put(nhs->hugepages_kobj);
1645 	nhs->hugepages_kobj = NULL;
1646 }
1647 
1648 /*
1649  * hugetlb module exit:  unregister hstate attributes from node sysdevs
1650  * that have them.
1651  */
1652 static void hugetlb_unregister_all_nodes(void)
1653 {
1654 	int nid;
1655 
1656 	/*
1657 	 * disable node sysdev registrations.
1658 	 */
1659 	register_hugetlbfs_with_node(NULL, NULL);
1660 
1661 	/*
1662 	 * remove hstate attributes from any nodes that have them.
1663 	 */
1664 	for (nid = 0; nid < nr_node_ids; nid++)
1665 		hugetlb_unregister_node(&node_devices[nid]);
1666 }
1667 
1668 /*
1669  * Register hstate attributes for a single node sysdev.
1670  * No-op if attributes already registered.
1671  */
1672 void hugetlb_register_node(struct node *node)
1673 {
1674 	struct hstate *h;
1675 	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1676 	int err;
1677 
1678 	if (nhs->hugepages_kobj)
1679 		return;		/* already allocated */
1680 
1681 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1682 							&node->sysdev.kobj);
1683 	if (!nhs->hugepages_kobj)
1684 		return;
1685 
1686 	for_each_hstate(h) {
1687 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1688 						nhs->hstate_kobjs,
1689 						&per_node_hstate_attr_group);
1690 		if (err) {
1691 			printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1692 					" for node %d\n",
1693 						h->name, node->sysdev.id);
1694 			hugetlb_unregister_node(node);
1695 			break;
1696 		}
1697 	}
1698 }
1699 
1700 /*
1701  * hugetlb init time:  register hstate attributes for all registered node
1702  * sysdevs of nodes that have memory.  All on-line nodes should have
1703  * registered their associated sysdev by this time.
1704  */
1705 static void hugetlb_register_all_nodes(void)
1706 {
1707 	int nid;
1708 
1709 	for_each_node_state(nid, N_HIGH_MEMORY) {
1710 		struct node *node = &node_devices[nid];
1711 		if (node->sysdev.id == nid)
1712 			hugetlb_register_node(node);
1713 	}
1714 
1715 	/*
1716 	 * Let the node sysdev driver know we're here so it can
1717 	 * [un]register hstate attributes on node hotplug.
1718 	 */
1719 	register_hugetlbfs_with_node(hugetlb_register_node,
1720 				     hugetlb_unregister_node);
1721 }
1722 #else	/* !CONFIG_NUMA */
1723 
1724 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1725 {
1726 	BUG();
1727 	if (nidp)
1728 		*nidp = -1;
1729 	return NULL;
1730 }
1731 
1732 static void hugetlb_unregister_all_nodes(void) { }
1733 
1734 static void hugetlb_register_all_nodes(void) { }
1735 
1736 #endif
1737 
1738 static void __exit hugetlb_exit(void)
1739 {
1740 	struct hstate *h;
1741 
1742 	hugetlb_unregister_all_nodes();
1743 
1744 	for_each_hstate(h) {
1745 		kobject_put(hstate_kobjs[h - hstates]);
1746 	}
1747 
1748 	kobject_put(hugepages_kobj);
1749 }
1750 module_exit(hugetlb_exit);
1751 
1752 static int __init hugetlb_init(void)
1753 {
1754 	/* Some platform decide whether they support huge pages at boot
1755 	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1756 	 * there is no such support
1757 	 */
1758 	if (HPAGE_SHIFT == 0)
1759 		return 0;
1760 
1761 	if (!size_to_hstate(default_hstate_size)) {
1762 		default_hstate_size = HPAGE_SIZE;
1763 		if (!size_to_hstate(default_hstate_size))
1764 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1765 	}
1766 	default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1767 	if (default_hstate_max_huge_pages)
1768 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1769 
1770 	hugetlb_init_hstates();
1771 
1772 	gather_bootmem_prealloc();
1773 
1774 	report_hugepages();
1775 
1776 	hugetlb_sysfs_init();
1777 
1778 	hugetlb_register_all_nodes();
1779 
1780 	return 0;
1781 }
1782 module_init(hugetlb_init);
1783 
1784 /* Should be called on processing a hugepagesz=... option */
1785 void __init hugetlb_add_hstate(unsigned order)
1786 {
1787 	struct hstate *h;
1788 	unsigned long i;
1789 
1790 	if (size_to_hstate(PAGE_SIZE << order)) {
1791 		printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1792 		return;
1793 	}
1794 	BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1795 	BUG_ON(order == 0);
1796 	h = &hstates[max_hstate++];
1797 	h->order = order;
1798 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1799 	h->nr_huge_pages = 0;
1800 	h->free_huge_pages = 0;
1801 	for (i = 0; i < MAX_NUMNODES; ++i)
1802 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1803 	h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1804 	h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1805 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1806 					huge_page_size(h)/1024);
1807 
1808 	parsed_hstate = h;
1809 }
1810 
1811 static int __init hugetlb_nrpages_setup(char *s)
1812 {
1813 	unsigned long *mhp;
1814 	static unsigned long *last_mhp;
1815 
1816 	/*
1817 	 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1818 	 * so this hugepages= parameter goes to the "default hstate".
1819 	 */
1820 	if (!max_hstate)
1821 		mhp = &default_hstate_max_huge_pages;
1822 	else
1823 		mhp = &parsed_hstate->max_huge_pages;
1824 
1825 	if (mhp == last_mhp) {
1826 		printk(KERN_WARNING "hugepages= specified twice without "
1827 			"interleaving hugepagesz=, ignoring\n");
1828 		return 1;
1829 	}
1830 
1831 	if (sscanf(s, "%lu", mhp) <= 0)
1832 		*mhp = 0;
1833 
1834 	/*
1835 	 * Global state is always initialized later in hugetlb_init.
1836 	 * But we need to allocate >= MAX_ORDER hstates here early to still
1837 	 * use the bootmem allocator.
1838 	 */
1839 	if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1840 		hugetlb_hstate_alloc_pages(parsed_hstate);
1841 
1842 	last_mhp = mhp;
1843 
1844 	return 1;
1845 }
1846 __setup("hugepages=", hugetlb_nrpages_setup);
1847 
1848 static int __init hugetlb_default_setup(char *s)
1849 {
1850 	default_hstate_size = memparse(s, &s);
1851 	return 1;
1852 }
1853 __setup("default_hugepagesz=", hugetlb_default_setup);
1854 
1855 static unsigned int cpuset_mems_nr(unsigned int *array)
1856 {
1857 	int node;
1858 	unsigned int nr = 0;
1859 
1860 	for_each_node_mask(node, cpuset_current_mems_allowed)
1861 		nr += array[node];
1862 
1863 	return nr;
1864 }
1865 
1866 #ifdef CONFIG_SYSCTL
1867 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1868 			 struct ctl_table *table, int write,
1869 			 void __user *buffer, size_t *length, loff_t *ppos)
1870 {
1871 	struct hstate *h = &default_hstate;
1872 	unsigned long tmp;
1873 	int ret;
1874 
1875 	if (!write)
1876 		tmp = h->max_huge_pages;
1877 
1878 	if (write && h->order >= MAX_ORDER)
1879 		return -EINVAL;
1880 
1881 	table->data = &tmp;
1882 	table->maxlen = sizeof(unsigned long);
1883 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1884 	if (ret)
1885 		goto out;
1886 
1887 	if (write) {
1888 		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1889 						GFP_KERNEL | __GFP_NORETRY);
1890 		if (!(obey_mempolicy &&
1891 			       init_nodemask_of_mempolicy(nodes_allowed))) {
1892 			NODEMASK_FREE(nodes_allowed);
1893 			nodes_allowed = &node_states[N_HIGH_MEMORY];
1894 		}
1895 		h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1896 
1897 		if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1898 			NODEMASK_FREE(nodes_allowed);
1899 	}
1900 out:
1901 	return ret;
1902 }
1903 
1904 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1905 			  void __user *buffer, size_t *length, loff_t *ppos)
1906 {
1907 
1908 	return hugetlb_sysctl_handler_common(false, table, write,
1909 							buffer, length, ppos);
1910 }
1911 
1912 #ifdef CONFIG_NUMA
1913 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
1914 			  void __user *buffer, size_t *length, loff_t *ppos)
1915 {
1916 	return hugetlb_sysctl_handler_common(true, table, write,
1917 							buffer, length, ppos);
1918 }
1919 #endif /* CONFIG_NUMA */
1920 
1921 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1922 			void __user *buffer,
1923 			size_t *length, loff_t *ppos)
1924 {
1925 	proc_dointvec(table, write, buffer, length, ppos);
1926 	if (hugepages_treat_as_movable)
1927 		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1928 	else
1929 		htlb_alloc_mask = GFP_HIGHUSER;
1930 	return 0;
1931 }
1932 
1933 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1934 			void __user *buffer,
1935 			size_t *length, loff_t *ppos)
1936 {
1937 	struct hstate *h = &default_hstate;
1938 	unsigned long tmp;
1939 	int ret;
1940 
1941 	if (!write)
1942 		tmp = h->nr_overcommit_huge_pages;
1943 
1944 	if (write && h->order >= MAX_ORDER)
1945 		return -EINVAL;
1946 
1947 	table->data = &tmp;
1948 	table->maxlen = sizeof(unsigned long);
1949 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1950 	if (ret)
1951 		goto out;
1952 
1953 	if (write) {
1954 		spin_lock(&hugetlb_lock);
1955 		h->nr_overcommit_huge_pages = tmp;
1956 		spin_unlock(&hugetlb_lock);
1957 	}
1958 out:
1959 	return ret;
1960 }
1961 
1962 #endif /* CONFIG_SYSCTL */
1963 
1964 void hugetlb_report_meminfo(struct seq_file *m)
1965 {
1966 	struct hstate *h = &default_hstate;
1967 	seq_printf(m,
1968 			"HugePages_Total:   %5lu\n"
1969 			"HugePages_Free:    %5lu\n"
1970 			"HugePages_Rsvd:    %5lu\n"
1971 			"HugePages_Surp:    %5lu\n"
1972 			"Hugepagesize:   %8lu kB\n",
1973 			h->nr_huge_pages,
1974 			h->free_huge_pages,
1975 			h->resv_huge_pages,
1976 			h->surplus_huge_pages,
1977 			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1978 }
1979 
1980 int hugetlb_report_node_meminfo(int nid, char *buf)
1981 {
1982 	struct hstate *h = &default_hstate;
1983 	return sprintf(buf,
1984 		"Node %d HugePages_Total: %5u\n"
1985 		"Node %d HugePages_Free:  %5u\n"
1986 		"Node %d HugePages_Surp:  %5u\n",
1987 		nid, h->nr_huge_pages_node[nid],
1988 		nid, h->free_huge_pages_node[nid],
1989 		nid, h->surplus_huge_pages_node[nid]);
1990 }
1991 
1992 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
1993 unsigned long hugetlb_total_pages(void)
1994 {
1995 	struct hstate *h = &default_hstate;
1996 	return h->nr_huge_pages * pages_per_huge_page(h);
1997 }
1998 
1999 static int hugetlb_acct_memory(struct hstate *h, long delta)
2000 {
2001 	int ret = -ENOMEM;
2002 
2003 	spin_lock(&hugetlb_lock);
2004 	/*
2005 	 * When cpuset is configured, it breaks the strict hugetlb page
2006 	 * reservation as the accounting is done on a global variable. Such
2007 	 * reservation is completely rubbish in the presence of cpuset because
2008 	 * the reservation is not checked against page availability for the
2009 	 * current cpuset. Application can still potentially OOM'ed by kernel
2010 	 * with lack of free htlb page in cpuset that the task is in.
2011 	 * Attempt to enforce strict accounting with cpuset is almost
2012 	 * impossible (or too ugly) because cpuset is too fluid that
2013 	 * task or memory node can be dynamically moved between cpusets.
2014 	 *
2015 	 * The change of semantics for shared hugetlb mapping with cpuset is
2016 	 * undesirable. However, in order to preserve some of the semantics,
2017 	 * we fall back to check against current free page availability as
2018 	 * a best attempt and hopefully to minimize the impact of changing
2019 	 * semantics that cpuset has.
2020 	 */
2021 	if (delta > 0) {
2022 		if (gather_surplus_pages(h, delta) < 0)
2023 			goto out;
2024 
2025 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2026 			return_unused_surplus_pages(h, delta);
2027 			goto out;
2028 		}
2029 	}
2030 
2031 	ret = 0;
2032 	if (delta < 0)
2033 		return_unused_surplus_pages(h, (unsigned long) -delta);
2034 
2035 out:
2036 	spin_unlock(&hugetlb_lock);
2037 	return ret;
2038 }
2039 
2040 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2041 {
2042 	struct resv_map *reservations = vma_resv_map(vma);
2043 
2044 	/*
2045 	 * This new VMA should share its siblings reservation map if present.
2046 	 * The VMA will only ever have a valid reservation map pointer where
2047 	 * it is being copied for another still existing VMA.  As that VMA
2048 	 * has a reference to the reservation map it cannot dissappear until
2049 	 * after this open call completes.  It is therefore safe to take a
2050 	 * new reference here without additional locking.
2051 	 */
2052 	if (reservations)
2053 		kref_get(&reservations->refs);
2054 }
2055 
2056 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2057 {
2058 	struct hstate *h = hstate_vma(vma);
2059 	struct resv_map *reservations = vma_resv_map(vma);
2060 	unsigned long reserve;
2061 	unsigned long start;
2062 	unsigned long end;
2063 
2064 	if (reservations) {
2065 		start = vma_hugecache_offset(h, vma, vma->vm_start);
2066 		end = vma_hugecache_offset(h, vma, vma->vm_end);
2067 
2068 		reserve = (end - start) -
2069 			region_count(&reservations->regions, start, end);
2070 
2071 		kref_put(&reservations->refs, resv_map_release);
2072 
2073 		if (reserve) {
2074 			hugetlb_acct_memory(h, -reserve);
2075 			hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
2076 		}
2077 	}
2078 }
2079 
2080 /*
2081  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2082  * handle_mm_fault() to try to instantiate regular-sized pages in the
2083  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2084  * this far.
2085  */
2086 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2087 {
2088 	BUG();
2089 	return 0;
2090 }
2091 
2092 const struct vm_operations_struct hugetlb_vm_ops = {
2093 	.fault = hugetlb_vm_op_fault,
2094 	.open = hugetlb_vm_op_open,
2095 	.close = hugetlb_vm_op_close,
2096 };
2097 
2098 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2099 				int writable)
2100 {
2101 	pte_t entry;
2102 
2103 	if (writable) {
2104 		entry =
2105 		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2106 	} else {
2107 		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2108 	}
2109 	entry = pte_mkyoung(entry);
2110 	entry = pte_mkhuge(entry);
2111 
2112 	return entry;
2113 }
2114 
2115 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2116 				   unsigned long address, pte_t *ptep)
2117 {
2118 	pte_t entry;
2119 
2120 	entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2121 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
2122 		update_mmu_cache(vma, address, ptep);
2123 	}
2124 }
2125 
2126 
2127 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2128 			    struct vm_area_struct *vma)
2129 {
2130 	pte_t *src_pte, *dst_pte, entry;
2131 	struct page *ptepage;
2132 	unsigned long addr;
2133 	int cow;
2134 	struct hstate *h = hstate_vma(vma);
2135 	unsigned long sz = huge_page_size(h);
2136 
2137 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2138 
2139 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2140 		src_pte = huge_pte_offset(src, addr);
2141 		if (!src_pte)
2142 			continue;
2143 		dst_pte = huge_pte_alloc(dst, addr, sz);
2144 		if (!dst_pte)
2145 			goto nomem;
2146 
2147 		/* If the pagetables are shared don't copy or take references */
2148 		if (dst_pte == src_pte)
2149 			continue;
2150 
2151 		spin_lock(&dst->page_table_lock);
2152 		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2153 		if (!huge_pte_none(huge_ptep_get(src_pte))) {
2154 			if (cow)
2155 				huge_ptep_set_wrprotect(src, addr, src_pte);
2156 			entry = huge_ptep_get(src_pte);
2157 			ptepage = pte_page(entry);
2158 			get_page(ptepage);
2159 			page_dup_rmap(ptepage);
2160 			set_huge_pte_at(dst, addr, dst_pte, entry);
2161 		}
2162 		spin_unlock(&src->page_table_lock);
2163 		spin_unlock(&dst->page_table_lock);
2164 	}
2165 	return 0;
2166 
2167 nomem:
2168 	return -ENOMEM;
2169 }
2170 
2171 static int is_hugetlb_entry_migration(pte_t pte)
2172 {
2173 	swp_entry_t swp;
2174 
2175 	if (huge_pte_none(pte) || pte_present(pte))
2176 		return 0;
2177 	swp = pte_to_swp_entry(pte);
2178 	if (non_swap_entry(swp) && is_migration_entry(swp)) {
2179 		return 1;
2180 	} else
2181 		return 0;
2182 }
2183 
2184 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2185 {
2186 	swp_entry_t swp;
2187 
2188 	if (huge_pte_none(pte) || pte_present(pte))
2189 		return 0;
2190 	swp = pte_to_swp_entry(pte);
2191 	if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
2192 		return 1;
2193 	} else
2194 		return 0;
2195 }
2196 
2197 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2198 			    unsigned long end, struct page *ref_page)
2199 {
2200 	struct mm_struct *mm = vma->vm_mm;
2201 	unsigned long address;
2202 	pte_t *ptep;
2203 	pte_t pte;
2204 	struct page *page;
2205 	struct page *tmp;
2206 	struct hstate *h = hstate_vma(vma);
2207 	unsigned long sz = huge_page_size(h);
2208 
2209 	/*
2210 	 * A page gathering list, protected by per file i_mmap_lock. The
2211 	 * lock is used to avoid list corruption from multiple unmapping
2212 	 * of the same page since we are using page->lru.
2213 	 */
2214 	LIST_HEAD(page_list);
2215 
2216 	WARN_ON(!is_vm_hugetlb_page(vma));
2217 	BUG_ON(start & ~huge_page_mask(h));
2218 	BUG_ON(end & ~huge_page_mask(h));
2219 
2220 	mmu_notifier_invalidate_range_start(mm, start, end);
2221 	spin_lock(&mm->page_table_lock);
2222 	for (address = start; address < end; address += sz) {
2223 		ptep = huge_pte_offset(mm, address);
2224 		if (!ptep)
2225 			continue;
2226 
2227 		if (huge_pmd_unshare(mm, &address, ptep))
2228 			continue;
2229 
2230 		/*
2231 		 * If a reference page is supplied, it is because a specific
2232 		 * page is being unmapped, not a range. Ensure the page we
2233 		 * are about to unmap is the actual page of interest.
2234 		 */
2235 		if (ref_page) {
2236 			pte = huge_ptep_get(ptep);
2237 			if (huge_pte_none(pte))
2238 				continue;
2239 			page = pte_page(pte);
2240 			if (page != ref_page)
2241 				continue;
2242 
2243 			/*
2244 			 * Mark the VMA as having unmapped its page so that
2245 			 * future faults in this VMA will fail rather than
2246 			 * looking like data was lost
2247 			 */
2248 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2249 		}
2250 
2251 		pte = huge_ptep_get_and_clear(mm, address, ptep);
2252 		if (huge_pte_none(pte))
2253 			continue;
2254 
2255 		/*
2256 		 * HWPoisoned hugepage is already unmapped and dropped reference
2257 		 */
2258 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2259 			continue;
2260 
2261 		page = pte_page(pte);
2262 		if (pte_dirty(pte))
2263 			set_page_dirty(page);
2264 		list_add(&page->lru, &page_list);
2265 	}
2266 	spin_unlock(&mm->page_table_lock);
2267 	flush_tlb_range(vma, start, end);
2268 	mmu_notifier_invalidate_range_end(mm, start, end);
2269 	list_for_each_entry_safe(page, tmp, &page_list, lru) {
2270 		page_remove_rmap(page);
2271 		list_del(&page->lru);
2272 		put_page(page);
2273 	}
2274 }
2275 
2276 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2277 			  unsigned long end, struct page *ref_page)
2278 {
2279 	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2280 	__unmap_hugepage_range(vma, start, end, ref_page);
2281 	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2282 }
2283 
2284 /*
2285  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2286  * mappping it owns the reserve page for. The intention is to unmap the page
2287  * from other VMAs and let the children be SIGKILLed if they are faulting the
2288  * same region.
2289  */
2290 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2291 				struct page *page, unsigned long address)
2292 {
2293 	struct hstate *h = hstate_vma(vma);
2294 	struct vm_area_struct *iter_vma;
2295 	struct address_space *mapping;
2296 	struct prio_tree_iter iter;
2297 	pgoff_t pgoff;
2298 
2299 	/*
2300 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2301 	 * from page cache lookup which is in HPAGE_SIZE units.
2302 	 */
2303 	address = address & huge_page_mask(h);
2304 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
2305 		+ (vma->vm_pgoff >> PAGE_SHIFT);
2306 	mapping = (struct address_space *)page_private(page);
2307 
2308 	/*
2309 	 * Take the mapping lock for the duration of the table walk. As
2310 	 * this mapping should be shared between all the VMAs,
2311 	 * __unmap_hugepage_range() is called as the lock is already held
2312 	 */
2313 	spin_lock(&mapping->i_mmap_lock);
2314 	vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2315 		/* Do not unmap the current VMA */
2316 		if (iter_vma == vma)
2317 			continue;
2318 
2319 		/*
2320 		 * Unmap the page from other VMAs without their own reserves.
2321 		 * They get marked to be SIGKILLed if they fault in these
2322 		 * areas. This is because a future no-page fault on this VMA
2323 		 * could insert a zeroed page instead of the data existing
2324 		 * from the time of fork. This would look like data corruption
2325 		 */
2326 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2327 			__unmap_hugepage_range(iter_vma,
2328 				address, address + huge_page_size(h),
2329 				page);
2330 	}
2331 	spin_unlock(&mapping->i_mmap_lock);
2332 
2333 	return 1;
2334 }
2335 
2336 /*
2337  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2338  */
2339 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2340 			unsigned long address, pte_t *ptep, pte_t pte,
2341 			struct page *pagecache_page)
2342 {
2343 	struct hstate *h = hstate_vma(vma);
2344 	struct page *old_page, *new_page;
2345 	int avoidcopy;
2346 	int outside_reserve = 0;
2347 
2348 	old_page = pte_page(pte);
2349 
2350 retry_avoidcopy:
2351 	/* If no-one else is actually using this page, avoid the copy
2352 	 * and just make the page writable */
2353 	avoidcopy = (page_mapcount(old_page) == 1);
2354 	if (avoidcopy) {
2355 		if (PageAnon(old_page))
2356 			page_move_anon_rmap(old_page, vma, address);
2357 		set_huge_ptep_writable(vma, address, ptep);
2358 		return 0;
2359 	}
2360 
2361 	/*
2362 	 * If the process that created a MAP_PRIVATE mapping is about to
2363 	 * perform a COW due to a shared page count, attempt to satisfy
2364 	 * the allocation without using the existing reserves. The pagecache
2365 	 * page is used to determine if the reserve at this address was
2366 	 * consumed or not. If reserves were used, a partial faulted mapping
2367 	 * at the time of fork() could consume its reserves on COW instead
2368 	 * of the full address range.
2369 	 */
2370 	if (!(vma->vm_flags & VM_MAYSHARE) &&
2371 			is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2372 			old_page != pagecache_page)
2373 		outside_reserve = 1;
2374 
2375 	page_cache_get(old_page);
2376 
2377 	/* Drop page_table_lock as buddy allocator may be called */
2378 	spin_unlock(&mm->page_table_lock);
2379 	new_page = alloc_huge_page(vma, address, outside_reserve);
2380 
2381 	if (IS_ERR(new_page)) {
2382 		page_cache_release(old_page);
2383 
2384 		/*
2385 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
2386 		 * it is due to references held by a child and an insufficient
2387 		 * huge page pool. To guarantee the original mappers
2388 		 * reliability, unmap the page from child processes. The child
2389 		 * may get SIGKILLed if it later faults.
2390 		 */
2391 		if (outside_reserve) {
2392 			BUG_ON(huge_pte_none(pte));
2393 			if (unmap_ref_private(mm, vma, old_page, address)) {
2394 				BUG_ON(page_count(old_page) != 1);
2395 				BUG_ON(huge_pte_none(pte));
2396 				spin_lock(&mm->page_table_lock);
2397 				goto retry_avoidcopy;
2398 			}
2399 			WARN_ON_ONCE(1);
2400 		}
2401 
2402 		/* Caller expects lock to be held */
2403 		spin_lock(&mm->page_table_lock);
2404 		return -PTR_ERR(new_page);
2405 	}
2406 
2407 	/*
2408 	 * When the original hugepage is shared one, it does not have
2409 	 * anon_vma prepared.
2410 	 */
2411 	if (unlikely(anon_vma_prepare(vma))) {
2412 		/* Caller expects lock to be held */
2413 		spin_lock(&mm->page_table_lock);
2414 		return VM_FAULT_OOM;
2415 	}
2416 
2417 	copy_user_huge_page(new_page, old_page, address, vma,
2418 			    pages_per_huge_page(h));
2419 	__SetPageUptodate(new_page);
2420 
2421 	/*
2422 	 * Retake the page_table_lock to check for racing updates
2423 	 * before the page tables are altered
2424 	 */
2425 	spin_lock(&mm->page_table_lock);
2426 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2427 	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2428 		/* Break COW */
2429 		mmu_notifier_invalidate_range_start(mm,
2430 			address & huge_page_mask(h),
2431 			(address & huge_page_mask(h)) + huge_page_size(h));
2432 		huge_ptep_clear_flush(vma, address, ptep);
2433 		set_huge_pte_at(mm, address, ptep,
2434 				make_huge_pte(vma, new_page, 1));
2435 		page_remove_rmap(old_page);
2436 		hugepage_add_new_anon_rmap(new_page, vma, address);
2437 		/* Make the old page be freed below */
2438 		new_page = old_page;
2439 		mmu_notifier_invalidate_range_end(mm,
2440 			address & huge_page_mask(h),
2441 			(address & huge_page_mask(h)) + huge_page_size(h));
2442 	}
2443 	page_cache_release(new_page);
2444 	page_cache_release(old_page);
2445 	return 0;
2446 }
2447 
2448 /* Return the pagecache page at a given address within a VMA */
2449 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2450 			struct vm_area_struct *vma, unsigned long address)
2451 {
2452 	struct address_space *mapping;
2453 	pgoff_t idx;
2454 
2455 	mapping = vma->vm_file->f_mapping;
2456 	idx = vma_hugecache_offset(h, vma, address);
2457 
2458 	return find_lock_page(mapping, idx);
2459 }
2460 
2461 /*
2462  * Return whether there is a pagecache page to back given address within VMA.
2463  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2464  */
2465 static bool hugetlbfs_pagecache_present(struct hstate *h,
2466 			struct vm_area_struct *vma, unsigned long address)
2467 {
2468 	struct address_space *mapping;
2469 	pgoff_t idx;
2470 	struct page *page;
2471 
2472 	mapping = vma->vm_file->f_mapping;
2473 	idx = vma_hugecache_offset(h, vma, address);
2474 
2475 	page = find_get_page(mapping, idx);
2476 	if (page)
2477 		put_page(page);
2478 	return page != NULL;
2479 }
2480 
2481 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2482 			unsigned long address, pte_t *ptep, unsigned int flags)
2483 {
2484 	struct hstate *h = hstate_vma(vma);
2485 	int ret = VM_FAULT_SIGBUS;
2486 	pgoff_t idx;
2487 	unsigned long size;
2488 	struct page *page;
2489 	struct address_space *mapping;
2490 	pte_t new_pte;
2491 
2492 	/*
2493 	 * Currently, we are forced to kill the process in the event the
2494 	 * original mapper has unmapped pages from the child due to a failed
2495 	 * COW. Warn that such a situation has occured as it may not be obvious
2496 	 */
2497 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2498 		printk(KERN_WARNING
2499 			"PID %d killed due to inadequate hugepage pool\n",
2500 			current->pid);
2501 		return ret;
2502 	}
2503 
2504 	mapping = vma->vm_file->f_mapping;
2505 	idx = vma_hugecache_offset(h, vma, address);
2506 
2507 	/*
2508 	 * Use page lock to guard against racing truncation
2509 	 * before we get page_table_lock.
2510 	 */
2511 retry:
2512 	page = find_lock_page(mapping, idx);
2513 	if (!page) {
2514 		size = i_size_read(mapping->host) >> huge_page_shift(h);
2515 		if (idx >= size)
2516 			goto out;
2517 		page = alloc_huge_page(vma, address, 0);
2518 		if (IS_ERR(page)) {
2519 			ret = -PTR_ERR(page);
2520 			goto out;
2521 		}
2522 		clear_huge_page(page, address, pages_per_huge_page(h));
2523 		__SetPageUptodate(page);
2524 
2525 		if (vma->vm_flags & VM_MAYSHARE) {
2526 			int err;
2527 			struct inode *inode = mapping->host;
2528 
2529 			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2530 			if (err) {
2531 				put_page(page);
2532 				if (err == -EEXIST)
2533 					goto retry;
2534 				goto out;
2535 			}
2536 
2537 			spin_lock(&inode->i_lock);
2538 			inode->i_blocks += blocks_per_huge_page(h);
2539 			spin_unlock(&inode->i_lock);
2540 			page_dup_rmap(page);
2541 		} else {
2542 			lock_page(page);
2543 			if (unlikely(anon_vma_prepare(vma))) {
2544 				ret = VM_FAULT_OOM;
2545 				goto backout_unlocked;
2546 			}
2547 			hugepage_add_new_anon_rmap(page, vma, address);
2548 		}
2549 	} else {
2550 		/*
2551 		 * If memory error occurs between mmap() and fault, some process
2552 		 * don't have hwpoisoned swap entry for errored virtual address.
2553 		 * So we need to block hugepage fault by PG_hwpoison bit check.
2554 		 */
2555 		if (unlikely(PageHWPoison(page))) {
2556 			ret = VM_FAULT_HWPOISON |
2557 			      VM_FAULT_SET_HINDEX(h - hstates);
2558 			goto backout_unlocked;
2559 		}
2560 		page_dup_rmap(page);
2561 	}
2562 
2563 	/*
2564 	 * If we are going to COW a private mapping later, we examine the
2565 	 * pending reservations for this page now. This will ensure that
2566 	 * any allocations necessary to record that reservation occur outside
2567 	 * the spinlock.
2568 	 */
2569 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2570 		if (vma_needs_reservation(h, vma, address) < 0) {
2571 			ret = VM_FAULT_OOM;
2572 			goto backout_unlocked;
2573 		}
2574 
2575 	spin_lock(&mm->page_table_lock);
2576 	size = i_size_read(mapping->host) >> huge_page_shift(h);
2577 	if (idx >= size)
2578 		goto backout;
2579 
2580 	ret = 0;
2581 	if (!huge_pte_none(huge_ptep_get(ptep)))
2582 		goto backout;
2583 
2584 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2585 				&& (vma->vm_flags & VM_SHARED)));
2586 	set_huge_pte_at(mm, address, ptep, new_pte);
2587 
2588 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2589 		/* Optimization, do the COW without a second fault */
2590 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2591 	}
2592 
2593 	spin_unlock(&mm->page_table_lock);
2594 	unlock_page(page);
2595 out:
2596 	return ret;
2597 
2598 backout:
2599 	spin_unlock(&mm->page_table_lock);
2600 backout_unlocked:
2601 	unlock_page(page);
2602 	put_page(page);
2603 	goto out;
2604 }
2605 
2606 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2607 			unsigned long address, unsigned int flags)
2608 {
2609 	pte_t *ptep;
2610 	pte_t entry;
2611 	int ret;
2612 	struct page *page = NULL;
2613 	struct page *pagecache_page = NULL;
2614 	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2615 	struct hstate *h = hstate_vma(vma);
2616 
2617 	ptep = huge_pte_offset(mm, address);
2618 	if (ptep) {
2619 		entry = huge_ptep_get(ptep);
2620 		if (unlikely(is_hugetlb_entry_migration(entry))) {
2621 			migration_entry_wait(mm, (pmd_t *)ptep, address);
2622 			return 0;
2623 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2624 			return VM_FAULT_HWPOISON_LARGE |
2625 			       VM_FAULT_SET_HINDEX(h - hstates);
2626 	}
2627 
2628 	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2629 	if (!ptep)
2630 		return VM_FAULT_OOM;
2631 
2632 	/*
2633 	 * Serialize hugepage allocation and instantiation, so that we don't
2634 	 * get spurious allocation failures if two CPUs race to instantiate
2635 	 * the same page in the page cache.
2636 	 */
2637 	mutex_lock(&hugetlb_instantiation_mutex);
2638 	entry = huge_ptep_get(ptep);
2639 	if (huge_pte_none(entry)) {
2640 		ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2641 		goto out_mutex;
2642 	}
2643 
2644 	ret = 0;
2645 
2646 	/*
2647 	 * If we are going to COW the mapping later, we examine the pending
2648 	 * reservations for this page now. This will ensure that any
2649 	 * allocations necessary to record that reservation occur outside the
2650 	 * spinlock. For private mappings, we also lookup the pagecache
2651 	 * page now as it is used to determine if a reservation has been
2652 	 * consumed.
2653 	 */
2654 	if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2655 		if (vma_needs_reservation(h, vma, address) < 0) {
2656 			ret = VM_FAULT_OOM;
2657 			goto out_mutex;
2658 		}
2659 
2660 		if (!(vma->vm_flags & VM_MAYSHARE))
2661 			pagecache_page = hugetlbfs_pagecache_page(h,
2662 								vma, address);
2663 	}
2664 
2665 	/*
2666 	 * hugetlb_cow() requires page locks of pte_page(entry) and
2667 	 * pagecache_page, so here we need take the former one
2668 	 * when page != pagecache_page or !pagecache_page.
2669 	 * Note that locking order is always pagecache_page -> page,
2670 	 * so no worry about deadlock.
2671 	 */
2672 	page = pte_page(entry);
2673 	if (page != pagecache_page)
2674 		lock_page(page);
2675 
2676 	spin_lock(&mm->page_table_lock);
2677 	/* Check for a racing update before calling hugetlb_cow */
2678 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2679 		goto out_page_table_lock;
2680 
2681 
2682 	if (flags & FAULT_FLAG_WRITE) {
2683 		if (!pte_write(entry)) {
2684 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
2685 							pagecache_page);
2686 			goto out_page_table_lock;
2687 		}
2688 		entry = pte_mkdirty(entry);
2689 	}
2690 	entry = pte_mkyoung(entry);
2691 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2692 						flags & FAULT_FLAG_WRITE))
2693 		update_mmu_cache(vma, address, ptep);
2694 
2695 out_page_table_lock:
2696 	spin_unlock(&mm->page_table_lock);
2697 
2698 	if (pagecache_page) {
2699 		unlock_page(pagecache_page);
2700 		put_page(pagecache_page);
2701 	}
2702 	if (page != pagecache_page)
2703 		unlock_page(page);
2704 
2705 out_mutex:
2706 	mutex_unlock(&hugetlb_instantiation_mutex);
2707 
2708 	return ret;
2709 }
2710 
2711 /* Can be overriden by architectures */
2712 __attribute__((weak)) struct page *
2713 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2714 	       pud_t *pud, int write)
2715 {
2716 	BUG();
2717 	return NULL;
2718 }
2719 
2720 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2721 			struct page **pages, struct vm_area_struct **vmas,
2722 			unsigned long *position, int *length, int i,
2723 			unsigned int flags)
2724 {
2725 	unsigned long pfn_offset;
2726 	unsigned long vaddr = *position;
2727 	int remainder = *length;
2728 	struct hstate *h = hstate_vma(vma);
2729 
2730 	spin_lock(&mm->page_table_lock);
2731 	while (vaddr < vma->vm_end && remainder) {
2732 		pte_t *pte;
2733 		int absent;
2734 		struct page *page;
2735 
2736 		/*
2737 		 * Some archs (sparc64, sh*) have multiple pte_ts to
2738 		 * each hugepage.  We have to make sure we get the
2739 		 * first, for the page indexing below to work.
2740 		 */
2741 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2742 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
2743 
2744 		/*
2745 		 * When coredumping, it suits get_dump_page if we just return
2746 		 * an error where there's an empty slot with no huge pagecache
2747 		 * to back it.  This way, we avoid allocating a hugepage, and
2748 		 * the sparse dumpfile avoids allocating disk blocks, but its
2749 		 * huge holes still show up with zeroes where they need to be.
2750 		 */
2751 		if (absent && (flags & FOLL_DUMP) &&
2752 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2753 			remainder = 0;
2754 			break;
2755 		}
2756 
2757 		if (absent ||
2758 		    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2759 			int ret;
2760 
2761 			spin_unlock(&mm->page_table_lock);
2762 			ret = hugetlb_fault(mm, vma, vaddr,
2763 				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2764 			spin_lock(&mm->page_table_lock);
2765 			if (!(ret & VM_FAULT_ERROR))
2766 				continue;
2767 
2768 			remainder = 0;
2769 			break;
2770 		}
2771 
2772 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2773 		page = pte_page(huge_ptep_get(pte));
2774 same_page:
2775 		if (pages) {
2776 			pages[i] = mem_map_offset(page, pfn_offset);
2777 			get_page(pages[i]);
2778 		}
2779 
2780 		if (vmas)
2781 			vmas[i] = vma;
2782 
2783 		vaddr += PAGE_SIZE;
2784 		++pfn_offset;
2785 		--remainder;
2786 		++i;
2787 		if (vaddr < vma->vm_end && remainder &&
2788 				pfn_offset < pages_per_huge_page(h)) {
2789 			/*
2790 			 * We use pfn_offset to avoid touching the pageframes
2791 			 * of this compound page.
2792 			 */
2793 			goto same_page;
2794 		}
2795 	}
2796 	spin_unlock(&mm->page_table_lock);
2797 	*length = remainder;
2798 	*position = vaddr;
2799 
2800 	return i ? i : -EFAULT;
2801 }
2802 
2803 void hugetlb_change_protection(struct vm_area_struct *vma,
2804 		unsigned long address, unsigned long end, pgprot_t newprot)
2805 {
2806 	struct mm_struct *mm = vma->vm_mm;
2807 	unsigned long start = address;
2808 	pte_t *ptep;
2809 	pte_t pte;
2810 	struct hstate *h = hstate_vma(vma);
2811 
2812 	BUG_ON(address >= end);
2813 	flush_cache_range(vma, address, end);
2814 
2815 	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2816 	spin_lock(&mm->page_table_lock);
2817 	for (; address < end; address += huge_page_size(h)) {
2818 		ptep = huge_pte_offset(mm, address);
2819 		if (!ptep)
2820 			continue;
2821 		if (huge_pmd_unshare(mm, &address, ptep))
2822 			continue;
2823 		if (!huge_pte_none(huge_ptep_get(ptep))) {
2824 			pte = huge_ptep_get_and_clear(mm, address, ptep);
2825 			pte = pte_mkhuge(pte_modify(pte, newprot));
2826 			set_huge_pte_at(mm, address, ptep, pte);
2827 		}
2828 	}
2829 	spin_unlock(&mm->page_table_lock);
2830 	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2831 
2832 	flush_tlb_range(vma, start, end);
2833 }
2834 
2835 int hugetlb_reserve_pages(struct inode *inode,
2836 					long from, long to,
2837 					struct vm_area_struct *vma,
2838 					int acctflag)
2839 {
2840 	long ret, chg;
2841 	struct hstate *h = hstate_inode(inode);
2842 
2843 	/*
2844 	 * Only apply hugepage reservation if asked. At fault time, an
2845 	 * attempt will be made for VM_NORESERVE to allocate a page
2846 	 * and filesystem quota without using reserves
2847 	 */
2848 	if (acctflag & VM_NORESERVE)
2849 		return 0;
2850 
2851 	/*
2852 	 * Shared mappings base their reservation on the number of pages that
2853 	 * are already allocated on behalf of the file. Private mappings need
2854 	 * to reserve the full area even if read-only as mprotect() may be
2855 	 * called to make the mapping read-write. Assume !vma is a shm mapping
2856 	 */
2857 	if (!vma || vma->vm_flags & VM_MAYSHARE)
2858 		chg = region_chg(&inode->i_mapping->private_list, from, to);
2859 	else {
2860 		struct resv_map *resv_map = resv_map_alloc();
2861 		if (!resv_map)
2862 			return -ENOMEM;
2863 
2864 		chg = to - from;
2865 
2866 		set_vma_resv_map(vma, resv_map);
2867 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2868 	}
2869 
2870 	if (chg < 0)
2871 		return chg;
2872 
2873 	/* There must be enough filesystem quota for the mapping */
2874 	if (hugetlb_get_quota(inode->i_mapping, chg))
2875 		return -ENOSPC;
2876 
2877 	/*
2878 	 * Check enough hugepages are available for the reservation.
2879 	 * Hand back the quota if there are not
2880 	 */
2881 	ret = hugetlb_acct_memory(h, chg);
2882 	if (ret < 0) {
2883 		hugetlb_put_quota(inode->i_mapping, chg);
2884 		return ret;
2885 	}
2886 
2887 	/*
2888 	 * Account for the reservations made. Shared mappings record regions
2889 	 * that have reservations as they are shared by multiple VMAs.
2890 	 * When the last VMA disappears, the region map says how much
2891 	 * the reservation was and the page cache tells how much of
2892 	 * the reservation was consumed. Private mappings are per-VMA and
2893 	 * only the consumed reservations are tracked. When the VMA
2894 	 * disappears, the original reservation is the VMA size and the
2895 	 * consumed reservations are stored in the map. Hence, nothing
2896 	 * else has to be done for private mappings here
2897 	 */
2898 	if (!vma || vma->vm_flags & VM_MAYSHARE)
2899 		region_add(&inode->i_mapping->private_list, from, to);
2900 	return 0;
2901 }
2902 
2903 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2904 {
2905 	struct hstate *h = hstate_inode(inode);
2906 	long chg = region_truncate(&inode->i_mapping->private_list, offset);
2907 
2908 	spin_lock(&inode->i_lock);
2909 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
2910 	spin_unlock(&inode->i_lock);
2911 
2912 	hugetlb_put_quota(inode->i_mapping, (chg - freed));
2913 	hugetlb_acct_memory(h, -(chg - freed));
2914 }
2915 
2916 #ifdef CONFIG_MEMORY_FAILURE
2917 
2918 /* Should be called in hugetlb_lock */
2919 static int is_hugepage_on_freelist(struct page *hpage)
2920 {
2921 	struct page *page;
2922 	struct page *tmp;
2923 	struct hstate *h = page_hstate(hpage);
2924 	int nid = page_to_nid(hpage);
2925 
2926 	list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
2927 		if (page == hpage)
2928 			return 1;
2929 	return 0;
2930 }
2931 
2932 /*
2933  * This function is called from memory failure code.
2934  * Assume the caller holds page lock of the head page.
2935  */
2936 int dequeue_hwpoisoned_huge_page(struct page *hpage)
2937 {
2938 	struct hstate *h = page_hstate(hpage);
2939 	int nid = page_to_nid(hpage);
2940 	int ret = -EBUSY;
2941 
2942 	spin_lock(&hugetlb_lock);
2943 	if (is_hugepage_on_freelist(hpage)) {
2944 		list_del(&hpage->lru);
2945 		set_page_refcounted(hpage);
2946 		h->free_huge_pages--;
2947 		h->free_huge_pages_node[nid]--;
2948 		ret = 0;
2949 	}
2950 	spin_unlock(&hugetlb_lock);
2951 	return ret;
2952 }
2953 #endif
2954