xref: /openbmc/linux/mm/hugetlb.c (revision 97da55fc)
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/tlb.h>
28 
29 #include <linux/io.h>
30 #include <linux/hugetlb.h>
31 #include <linux/hugetlb_cgroup.h>
32 #include <linux/node.h>
33 #include "internal.h"
34 
35 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
36 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
37 unsigned long hugepages_treat_as_movable;
38 
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42 
43 __initdata LIST_HEAD(huge_boot_pages);
44 
45 /* for command line parsing */
46 static struct hstate * __initdata parsed_hstate;
47 static unsigned long __initdata default_hstate_max_huge_pages;
48 static unsigned long __initdata default_hstate_size;
49 
50 /*
51  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
52  */
53 DEFINE_SPINLOCK(hugetlb_lock);
54 
55 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
56 {
57 	bool free = (spool->count == 0) && (spool->used_hpages == 0);
58 
59 	spin_unlock(&spool->lock);
60 
61 	/* If no pages are used, and no other handles to the subpool
62 	 * remain, free the subpool the subpool remain */
63 	if (free)
64 		kfree(spool);
65 }
66 
67 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
68 {
69 	struct hugepage_subpool *spool;
70 
71 	spool = kmalloc(sizeof(*spool), GFP_KERNEL);
72 	if (!spool)
73 		return NULL;
74 
75 	spin_lock_init(&spool->lock);
76 	spool->count = 1;
77 	spool->max_hpages = nr_blocks;
78 	spool->used_hpages = 0;
79 
80 	return spool;
81 }
82 
83 void hugepage_put_subpool(struct hugepage_subpool *spool)
84 {
85 	spin_lock(&spool->lock);
86 	BUG_ON(!spool->count);
87 	spool->count--;
88 	unlock_or_release_subpool(spool);
89 }
90 
91 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
92 				      long delta)
93 {
94 	int ret = 0;
95 
96 	if (!spool)
97 		return 0;
98 
99 	spin_lock(&spool->lock);
100 	if ((spool->used_hpages + delta) <= spool->max_hpages) {
101 		spool->used_hpages += delta;
102 	} else {
103 		ret = -ENOMEM;
104 	}
105 	spin_unlock(&spool->lock);
106 
107 	return ret;
108 }
109 
110 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
111 				       long delta)
112 {
113 	if (!spool)
114 		return;
115 
116 	spin_lock(&spool->lock);
117 	spool->used_hpages -= delta;
118 	/* If hugetlbfs_put_super couldn't free spool due to
119 	* an outstanding quota reference, free it now. */
120 	unlock_or_release_subpool(spool);
121 }
122 
123 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
124 {
125 	return HUGETLBFS_SB(inode->i_sb)->spool;
126 }
127 
128 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
129 {
130 	return subpool_inode(file_inode(vma->vm_file));
131 }
132 
133 /*
134  * Region tracking -- allows tracking of reservations and instantiated pages
135  *                    across the pages in a mapping.
136  *
137  * The region data structures are protected by a combination of the mmap_sem
138  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
139  * must either hold the mmap_sem for write, or the mmap_sem for read and
140  * the hugetlb_instantiation mutex:
141  *
142  *	down_write(&mm->mmap_sem);
143  * or
144  *	down_read(&mm->mmap_sem);
145  *	mutex_lock(&hugetlb_instantiation_mutex);
146  */
147 struct file_region {
148 	struct list_head link;
149 	long from;
150 	long to;
151 };
152 
153 static long region_add(struct list_head *head, long f, long t)
154 {
155 	struct file_region *rg, *nrg, *trg;
156 
157 	/* Locate the region we are either in or before. */
158 	list_for_each_entry(rg, head, link)
159 		if (f <= rg->to)
160 			break;
161 
162 	/* Round our left edge to the current segment if it encloses us. */
163 	if (f > rg->from)
164 		f = rg->from;
165 
166 	/* Check for and consume any regions we now overlap with. */
167 	nrg = rg;
168 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
169 		if (&rg->link == head)
170 			break;
171 		if (rg->from > t)
172 			break;
173 
174 		/* If this area reaches higher then extend our area to
175 		 * include it completely.  If this is not the first area
176 		 * which we intend to reuse, free it. */
177 		if (rg->to > t)
178 			t = rg->to;
179 		if (rg != nrg) {
180 			list_del(&rg->link);
181 			kfree(rg);
182 		}
183 	}
184 	nrg->from = f;
185 	nrg->to = t;
186 	return 0;
187 }
188 
189 static long region_chg(struct list_head *head, long f, long t)
190 {
191 	struct file_region *rg, *nrg;
192 	long chg = 0;
193 
194 	/* Locate the region we are before or in. */
195 	list_for_each_entry(rg, head, link)
196 		if (f <= rg->to)
197 			break;
198 
199 	/* If we are below the current region then a new region is required.
200 	 * Subtle, allocate a new region at the position but make it zero
201 	 * size such that we can guarantee to record the reservation. */
202 	if (&rg->link == head || t < rg->from) {
203 		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
204 		if (!nrg)
205 			return -ENOMEM;
206 		nrg->from = f;
207 		nrg->to   = f;
208 		INIT_LIST_HEAD(&nrg->link);
209 		list_add(&nrg->link, rg->link.prev);
210 
211 		return t - f;
212 	}
213 
214 	/* Round our left edge to the current segment if it encloses us. */
215 	if (f > rg->from)
216 		f = rg->from;
217 	chg = t - f;
218 
219 	/* Check for and consume any regions we now overlap with. */
220 	list_for_each_entry(rg, rg->link.prev, link) {
221 		if (&rg->link == head)
222 			break;
223 		if (rg->from > t)
224 			return chg;
225 
226 		/* We overlap with this area, if it extends further than
227 		 * us then we must extend ourselves.  Account for its
228 		 * existing reservation. */
229 		if (rg->to > t) {
230 			chg += rg->to - t;
231 			t = rg->to;
232 		}
233 		chg -= rg->to - rg->from;
234 	}
235 	return chg;
236 }
237 
238 static long region_truncate(struct list_head *head, long end)
239 {
240 	struct file_region *rg, *trg;
241 	long chg = 0;
242 
243 	/* Locate the region we are either in or before. */
244 	list_for_each_entry(rg, head, link)
245 		if (end <= rg->to)
246 			break;
247 	if (&rg->link == head)
248 		return 0;
249 
250 	/* If we are in the middle of a region then adjust it. */
251 	if (end > rg->from) {
252 		chg = rg->to - end;
253 		rg->to = end;
254 		rg = list_entry(rg->link.next, typeof(*rg), link);
255 	}
256 
257 	/* Drop any remaining regions. */
258 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
259 		if (&rg->link == head)
260 			break;
261 		chg += rg->to - rg->from;
262 		list_del(&rg->link);
263 		kfree(rg);
264 	}
265 	return chg;
266 }
267 
268 static long region_count(struct list_head *head, long f, long t)
269 {
270 	struct file_region *rg;
271 	long chg = 0;
272 
273 	/* Locate each segment we overlap with, and count that overlap. */
274 	list_for_each_entry(rg, head, link) {
275 		long seg_from;
276 		long seg_to;
277 
278 		if (rg->to <= f)
279 			continue;
280 		if (rg->from >= t)
281 			break;
282 
283 		seg_from = max(rg->from, f);
284 		seg_to = min(rg->to, t);
285 
286 		chg += seg_to - seg_from;
287 	}
288 
289 	return chg;
290 }
291 
292 /*
293  * Convert the address within this vma to the page offset within
294  * the mapping, in pagecache page units; huge pages here.
295  */
296 static pgoff_t vma_hugecache_offset(struct hstate *h,
297 			struct vm_area_struct *vma, unsigned long address)
298 {
299 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
300 			(vma->vm_pgoff >> huge_page_order(h));
301 }
302 
303 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
304 				     unsigned long address)
305 {
306 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
307 }
308 
309 /*
310  * Return the size of the pages allocated when backing a VMA. In the majority
311  * cases this will be same size as used by the page table entries.
312  */
313 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
314 {
315 	struct hstate *hstate;
316 
317 	if (!is_vm_hugetlb_page(vma))
318 		return PAGE_SIZE;
319 
320 	hstate = hstate_vma(vma);
321 
322 	return 1UL << (hstate->order + PAGE_SHIFT);
323 }
324 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
325 
326 /*
327  * Return the page size being used by the MMU to back a VMA. In the majority
328  * of cases, the page size used by the kernel matches the MMU size. On
329  * architectures where it differs, an architecture-specific version of this
330  * function is required.
331  */
332 #ifndef vma_mmu_pagesize
333 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
334 {
335 	return vma_kernel_pagesize(vma);
336 }
337 #endif
338 
339 /*
340  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
341  * bits of the reservation map pointer, which are always clear due to
342  * alignment.
343  */
344 #define HPAGE_RESV_OWNER    (1UL << 0)
345 #define HPAGE_RESV_UNMAPPED (1UL << 1)
346 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
347 
348 /*
349  * These helpers are used to track how many pages are reserved for
350  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
351  * is guaranteed to have their future faults succeed.
352  *
353  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
354  * the reserve counters are updated with the hugetlb_lock held. It is safe
355  * to reset the VMA at fork() time as it is not in use yet and there is no
356  * chance of the global counters getting corrupted as a result of the values.
357  *
358  * The private mapping reservation is represented in a subtly different
359  * manner to a shared mapping.  A shared mapping has a region map associated
360  * with the underlying file, this region map represents the backing file
361  * pages which have ever had a reservation assigned which this persists even
362  * after the page is instantiated.  A private mapping has a region map
363  * associated with the original mmap which is attached to all VMAs which
364  * reference it, this region map represents those offsets which have consumed
365  * reservation ie. where pages have been instantiated.
366  */
367 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
368 {
369 	return (unsigned long)vma->vm_private_data;
370 }
371 
372 static void set_vma_private_data(struct vm_area_struct *vma,
373 							unsigned long value)
374 {
375 	vma->vm_private_data = (void *)value;
376 }
377 
378 struct resv_map {
379 	struct kref refs;
380 	struct list_head regions;
381 };
382 
383 static struct resv_map *resv_map_alloc(void)
384 {
385 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
386 	if (!resv_map)
387 		return NULL;
388 
389 	kref_init(&resv_map->refs);
390 	INIT_LIST_HEAD(&resv_map->regions);
391 
392 	return resv_map;
393 }
394 
395 static void resv_map_release(struct kref *ref)
396 {
397 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
398 
399 	/* Clear out any active regions before we release the map. */
400 	region_truncate(&resv_map->regions, 0);
401 	kfree(resv_map);
402 }
403 
404 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
405 {
406 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
407 	if (!(vma->vm_flags & VM_MAYSHARE))
408 		return (struct resv_map *)(get_vma_private_data(vma) &
409 							~HPAGE_RESV_MASK);
410 	return NULL;
411 }
412 
413 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
414 {
415 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
416 	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
417 
418 	set_vma_private_data(vma, (get_vma_private_data(vma) &
419 				HPAGE_RESV_MASK) | (unsigned long)map);
420 }
421 
422 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
423 {
424 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
425 	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
426 
427 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
428 }
429 
430 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
431 {
432 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
433 
434 	return (get_vma_private_data(vma) & flag) != 0;
435 }
436 
437 /* Decrement the reserved pages in the hugepage pool by one */
438 static void decrement_hugepage_resv_vma(struct hstate *h,
439 			struct vm_area_struct *vma)
440 {
441 	if (vma->vm_flags & VM_NORESERVE)
442 		return;
443 
444 	if (vma->vm_flags & VM_MAYSHARE) {
445 		/* Shared mappings always use reserves */
446 		h->resv_huge_pages--;
447 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
448 		/*
449 		 * Only the process that called mmap() has reserves for
450 		 * private mappings.
451 		 */
452 		h->resv_huge_pages--;
453 	}
454 }
455 
456 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
457 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
458 {
459 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
460 	if (!(vma->vm_flags & VM_MAYSHARE))
461 		vma->vm_private_data = (void *)0;
462 }
463 
464 /* Returns true if the VMA has associated reserve pages */
465 static int vma_has_reserves(struct vm_area_struct *vma)
466 {
467 	if (vma->vm_flags & VM_MAYSHARE)
468 		return 1;
469 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
470 		return 1;
471 	return 0;
472 }
473 
474 static void copy_gigantic_page(struct page *dst, struct page *src)
475 {
476 	int i;
477 	struct hstate *h = page_hstate(src);
478 	struct page *dst_base = dst;
479 	struct page *src_base = src;
480 
481 	for (i = 0; i < pages_per_huge_page(h); ) {
482 		cond_resched();
483 		copy_highpage(dst, src);
484 
485 		i++;
486 		dst = mem_map_next(dst, dst_base, i);
487 		src = mem_map_next(src, src_base, i);
488 	}
489 }
490 
491 void copy_huge_page(struct page *dst, struct page *src)
492 {
493 	int i;
494 	struct hstate *h = page_hstate(src);
495 
496 	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
497 		copy_gigantic_page(dst, src);
498 		return;
499 	}
500 
501 	might_sleep();
502 	for (i = 0; i < pages_per_huge_page(h); i++) {
503 		cond_resched();
504 		copy_highpage(dst + i, src + i);
505 	}
506 }
507 
508 static void enqueue_huge_page(struct hstate *h, struct page *page)
509 {
510 	int nid = page_to_nid(page);
511 	list_move(&page->lru, &h->hugepage_freelists[nid]);
512 	h->free_huge_pages++;
513 	h->free_huge_pages_node[nid]++;
514 }
515 
516 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
517 {
518 	struct page *page;
519 
520 	if (list_empty(&h->hugepage_freelists[nid]))
521 		return NULL;
522 	page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
523 	list_move(&page->lru, &h->hugepage_activelist);
524 	set_page_refcounted(page);
525 	h->free_huge_pages--;
526 	h->free_huge_pages_node[nid]--;
527 	return page;
528 }
529 
530 static struct page *dequeue_huge_page_vma(struct hstate *h,
531 				struct vm_area_struct *vma,
532 				unsigned long address, int avoid_reserve)
533 {
534 	struct page *page = NULL;
535 	struct mempolicy *mpol;
536 	nodemask_t *nodemask;
537 	struct zonelist *zonelist;
538 	struct zone *zone;
539 	struct zoneref *z;
540 	unsigned int cpuset_mems_cookie;
541 
542 retry_cpuset:
543 	cpuset_mems_cookie = get_mems_allowed();
544 	zonelist = huge_zonelist(vma, address,
545 					htlb_alloc_mask, &mpol, &nodemask);
546 	/*
547 	 * A child process with MAP_PRIVATE mappings created by their parent
548 	 * have no page reserves. This check ensures that reservations are
549 	 * not "stolen". The child may still get SIGKILLed
550 	 */
551 	if (!vma_has_reserves(vma) &&
552 			h->free_huge_pages - h->resv_huge_pages == 0)
553 		goto err;
554 
555 	/* If reserves cannot be used, ensure enough pages are in the pool */
556 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
557 		goto err;
558 
559 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
560 						MAX_NR_ZONES - 1, nodemask) {
561 		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
562 			page = dequeue_huge_page_node(h, zone_to_nid(zone));
563 			if (page) {
564 				if (!avoid_reserve)
565 					decrement_hugepage_resv_vma(h, vma);
566 				break;
567 			}
568 		}
569 	}
570 
571 	mpol_cond_put(mpol);
572 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
573 		goto retry_cpuset;
574 	return page;
575 
576 err:
577 	mpol_cond_put(mpol);
578 	return NULL;
579 }
580 
581 static void update_and_free_page(struct hstate *h, struct page *page)
582 {
583 	int i;
584 
585 	VM_BUG_ON(h->order >= MAX_ORDER);
586 
587 	h->nr_huge_pages--;
588 	h->nr_huge_pages_node[page_to_nid(page)]--;
589 	for (i = 0; i < pages_per_huge_page(h); i++) {
590 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
591 				1 << PG_referenced | 1 << PG_dirty |
592 				1 << PG_active | 1 << PG_reserved |
593 				1 << PG_private | 1 << PG_writeback);
594 	}
595 	VM_BUG_ON(hugetlb_cgroup_from_page(page));
596 	set_compound_page_dtor(page, NULL);
597 	set_page_refcounted(page);
598 	arch_release_hugepage(page);
599 	__free_pages(page, huge_page_order(h));
600 }
601 
602 struct hstate *size_to_hstate(unsigned long size)
603 {
604 	struct hstate *h;
605 
606 	for_each_hstate(h) {
607 		if (huge_page_size(h) == size)
608 			return h;
609 	}
610 	return NULL;
611 }
612 
613 static void free_huge_page(struct page *page)
614 {
615 	/*
616 	 * Can't pass hstate in here because it is called from the
617 	 * compound page destructor.
618 	 */
619 	struct hstate *h = page_hstate(page);
620 	int nid = page_to_nid(page);
621 	struct hugepage_subpool *spool =
622 		(struct hugepage_subpool *)page_private(page);
623 
624 	set_page_private(page, 0);
625 	page->mapping = NULL;
626 	BUG_ON(page_count(page));
627 	BUG_ON(page_mapcount(page));
628 
629 	spin_lock(&hugetlb_lock);
630 	hugetlb_cgroup_uncharge_page(hstate_index(h),
631 				     pages_per_huge_page(h), page);
632 	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
633 		/* remove the page from active list */
634 		list_del(&page->lru);
635 		update_and_free_page(h, page);
636 		h->surplus_huge_pages--;
637 		h->surplus_huge_pages_node[nid]--;
638 	} else {
639 		arch_clear_hugepage_flags(page);
640 		enqueue_huge_page(h, page);
641 	}
642 	spin_unlock(&hugetlb_lock);
643 	hugepage_subpool_put_pages(spool, 1);
644 }
645 
646 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
647 {
648 	INIT_LIST_HEAD(&page->lru);
649 	set_compound_page_dtor(page, free_huge_page);
650 	spin_lock(&hugetlb_lock);
651 	set_hugetlb_cgroup(page, NULL);
652 	h->nr_huge_pages++;
653 	h->nr_huge_pages_node[nid]++;
654 	spin_unlock(&hugetlb_lock);
655 	put_page(page); /* free it into the hugepage allocator */
656 }
657 
658 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
659 {
660 	int i;
661 	int nr_pages = 1 << order;
662 	struct page *p = page + 1;
663 
664 	/* we rely on prep_new_huge_page to set the destructor */
665 	set_compound_order(page, order);
666 	__SetPageHead(page);
667 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
668 		__SetPageTail(p);
669 		set_page_count(p, 0);
670 		p->first_page = page;
671 	}
672 }
673 
674 /*
675  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
676  * transparent huge pages.  See the PageTransHuge() documentation for more
677  * details.
678  */
679 int PageHuge(struct page *page)
680 {
681 	compound_page_dtor *dtor;
682 
683 	if (!PageCompound(page))
684 		return 0;
685 
686 	page = compound_head(page);
687 	dtor = get_compound_page_dtor(page);
688 
689 	return dtor == free_huge_page;
690 }
691 EXPORT_SYMBOL_GPL(PageHuge);
692 
693 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
694 {
695 	struct page *page;
696 
697 	if (h->order >= MAX_ORDER)
698 		return NULL;
699 
700 	page = alloc_pages_exact_node(nid,
701 		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
702 						__GFP_REPEAT|__GFP_NOWARN,
703 		huge_page_order(h));
704 	if (page) {
705 		if (arch_prepare_hugepage(page)) {
706 			__free_pages(page, huge_page_order(h));
707 			return NULL;
708 		}
709 		prep_new_huge_page(h, page, nid);
710 	}
711 
712 	return page;
713 }
714 
715 /*
716  * common helper functions for hstate_next_node_to_{alloc|free}.
717  * We may have allocated or freed a huge page based on a different
718  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
719  * be outside of *nodes_allowed.  Ensure that we use an allowed
720  * node for alloc or free.
721  */
722 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
723 {
724 	nid = next_node(nid, *nodes_allowed);
725 	if (nid == MAX_NUMNODES)
726 		nid = first_node(*nodes_allowed);
727 	VM_BUG_ON(nid >= MAX_NUMNODES);
728 
729 	return nid;
730 }
731 
732 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
733 {
734 	if (!node_isset(nid, *nodes_allowed))
735 		nid = next_node_allowed(nid, nodes_allowed);
736 	return nid;
737 }
738 
739 /*
740  * returns the previously saved node ["this node"] from which to
741  * allocate a persistent huge page for the pool and advance the
742  * next node from which to allocate, handling wrap at end of node
743  * mask.
744  */
745 static int hstate_next_node_to_alloc(struct hstate *h,
746 					nodemask_t *nodes_allowed)
747 {
748 	int nid;
749 
750 	VM_BUG_ON(!nodes_allowed);
751 
752 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
753 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
754 
755 	return nid;
756 }
757 
758 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
759 {
760 	struct page *page;
761 	int start_nid;
762 	int next_nid;
763 	int ret = 0;
764 
765 	start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
766 	next_nid = start_nid;
767 
768 	do {
769 		page = alloc_fresh_huge_page_node(h, next_nid);
770 		if (page) {
771 			ret = 1;
772 			break;
773 		}
774 		next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
775 	} while (next_nid != start_nid);
776 
777 	if (ret)
778 		count_vm_event(HTLB_BUDDY_PGALLOC);
779 	else
780 		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
781 
782 	return ret;
783 }
784 
785 /*
786  * helper for free_pool_huge_page() - return the previously saved
787  * node ["this node"] from which to free a huge page.  Advance the
788  * next node id whether or not we find a free huge page to free so
789  * that the next attempt to free addresses the next node.
790  */
791 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
792 {
793 	int nid;
794 
795 	VM_BUG_ON(!nodes_allowed);
796 
797 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
798 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
799 
800 	return nid;
801 }
802 
803 /*
804  * Free huge page from pool from next node to free.
805  * Attempt to keep persistent huge pages more or less
806  * balanced over allowed nodes.
807  * Called with hugetlb_lock locked.
808  */
809 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
810 							 bool acct_surplus)
811 {
812 	int start_nid;
813 	int next_nid;
814 	int ret = 0;
815 
816 	start_nid = hstate_next_node_to_free(h, nodes_allowed);
817 	next_nid = start_nid;
818 
819 	do {
820 		/*
821 		 * If we're returning unused surplus pages, only examine
822 		 * nodes with surplus pages.
823 		 */
824 		if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
825 		    !list_empty(&h->hugepage_freelists[next_nid])) {
826 			struct page *page =
827 				list_entry(h->hugepage_freelists[next_nid].next,
828 					  struct page, lru);
829 			list_del(&page->lru);
830 			h->free_huge_pages--;
831 			h->free_huge_pages_node[next_nid]--;
832 			if (acct_surplus) {
833 				h->surplus_huge_pages--;
834 				h->surplus_huge_pages_node[next_nid]--;
835 			}
836 			update_and_free_page(h, page);
837 			ret = 1;
838 			break;
839 		}
840 		next_nid = hstate_next_node_to_free(h, nodes_allowed);
841 	} while (next_nid != start_nid);
842 
843 	return ret;
844 }
845 
846 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
847 {
848 	struct page *page;
849 	unsigned int r_nid;
850 
851 	if (h->order >= MAX_ORDER)
852 		return NULL;
853 
854 	/*
855 	 * Assume we will successfully allocate the surplus page to
856 	 * prevent racing processes from causing the surplus to exceed
857 	 * overcommit
858 	 *
859 	 * This however introduces a different race, where a process B
860 	 * tries to grow the static hugepage pool while alloc_pages() is
861 	 * called by process A. B will only examine the per-node
862 	 * counters in determining if surplus huge pages can be
863 	 * converted to normal huge pages in adjust_pool_surplus(). A
864 	 * won't be able to increment the per-node counter, until the
865 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
866 	 * no more huge pages can be converted from surplus to normal
867 	 * state (and doesn't try to convert again). Thus, we have a
868 	 * case where a surplus huge page exists, the pool is grown, and
869 	 * the surplus huge page still exists after, even though it
870 	 * should just have been converted to a normal huge page. This
871 	 * does not leak memory, though, as the hugepage will be freed
872 	 * once it is out of use. It also does not allow the counters to
873 	 * go out of whack in adjust_pool_surplus() as we don't modify
874 	 * the node values until we've gotten the hugepage and only the
875 	 * per-node value is checked there.
876 	 */
877 	spin_lock(&hugetlb_lock);
878 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
879 		spin_unlock(&hugetlb_lock);
880 		return NULL;
881 	} else {
882 		h->nr_huge_pages++;
883 		h->surplus_huge_pages++;
884 	}
885 	spin_unlock(&hugetlb_lock);
886 
887 	if (nid == NUMA_NO_NODE)
888 		page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
889 				   __GFP_REPEAT|__GFP_NOWARN,
890 				   huge_page_order(h));
891 	else
892 		page = alloc_pages_exact_node(nid,
893 			htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
894 			__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
895 
896 	if (page && arch_prepare_hugepage(page)) {
897 		__free_pages(page, huge_page_order(h));
898 		page = NULL;
899 	}
900 
901 	spin_lock(&hugetlb_lock);
902 	if (page) {
903 		INIT_LIST_HEAD(&page->lru);
904 		r_nid = page_to_nid(page);
905 		set_compound_page_dtor(page, free_huge_page);
906 		set_hugetlb_cgroup(page, NULL);
907 		/*
908 		 * We incremented the global counters already
909 		 */
910 		h->nr_huge_pages_node[r_nid]++;
911 		h->surplus_huge_pages_node[r_nid]++;
912 		__count_vm_event(HTLB_BUDDY_PGALLOC);
913 	} else {
914 		h->nr_huge_pages--;
915 		h->surplus_huge_pages--;
916 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
917 	}
918 	spin_unlock(&hugetlb_lock);
919 
920 	return page;
921 }
922 
923 /*
924  * This allocation function is useful in the context where vma is irrelevant.
925  * E.g. soft-offlining uses this function because it only cares physical
926  * address of error page.
927  */
928 struct page *alloc_huge_page_node(struct hstate *h, int nid)
929 {
930 	struct page *page;
931 
932 	spin_lock(&hugetlb_lock);
933 	page = dequeue_huge_page_node(h, nid);
934 	spin_unlock(&hugetlb_lock);
935 
936 	if (!page)
937 		page = alloc_buddy_huge_page(h, nid);
938 
939 	return page;
940 }
941 
942 /*
943  * Increase the hugetlb pool such that it can accommodate a reservation
944  * of size 'delta'.
945  */
946 static int gather_surplus_pages(struct hstate *h, int delta)
947 {
948 	struct list_head surplus_list;
949 	struct page *page, *tmp;
950 	int ret, i;
951 	int needed, allocated;
952 	bool alloc_ok = true;
953 
954 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
955 	if (needed <= 0) {
956 		h->resv_huge_pages += delta;
957 		return 0;
958 	}
959 
960 	allocated = 0;
961 	INIT_LIST_HEAD(&surplus_list);
962 
963 	ret = -ENOMEM;
964 retry:
965 	spin_unlock(&hugetlb_lock);
966 	for (i = 0; i < needed; i++) {
967 		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
968 		if (!page) {
969 			alloc_ok = false;
970 			break;
971 		}
972 		list_add(&page->lru, &surplus_list);
973 	}
974 	allocated += i;
975 
976 	/*
977 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
978 	 * because either resv_huge_pages or free_huge_pages may have changed.
979 	 */
980 	spin_lock(&hugetlb_lock);
981 	needed = (h->resv_huge_pages + delta) -
982 			(h->free_huge_pages + allocated);
983 	if (needed > 0) {
984 		if (alloc_ok)
985 			goto retry;
986 		/*
987 		 * We were not able to allocate enough pages to
988 		 * satisfy the entire reservation so we free what
989 		 * we've allocated so far.
990 		 */
991 		goto free;
992 	}
993 	/*
994 	 * The surplus_list now contains _at_least_ the number of extra pages
995 	 * needed to accommodate the reservation.  Add the appropriate number
996 	 * of pages to the hugetlb pool and free the extras back to the buddy
997 	 * allocator.  Commit the entire reservation here to prevent another
998 	 * process from stealing the pages as they are added to the pool but
999 	 * before they are reserved.
1000 	 */
1001 	needed += allocated;
1002 	h->resv_huge_pages += delta;
1003 	ret = 0;
1004 
1005 	/* Free the needed pages to the hugetlb pool */
1006 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1007 		if ((--needed) < 0)
1008 			break;
1009 		/*
1010 		 * This page is now managed by the hugetlb allocator and has
1011 		 * no users -- drop the buddy allocator's reference.
1012 		 */
1013 		put_page_testzero(page);
1014 		VM_BUG_ON(page_count(page));
1015 		enqueue_huge_page(h, page);
1016 	}
1017 free:
1018 	spin_unlock(&hugetlb_lock);
1019 
1020 	/* Free unnecessary surplus pages to the buddy allocator */
1021 	if (!list_empty(&surplus_list)) {
1022 		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1023 			put_page(page);
1024 		}
1025 	}
1026 	spin_lock(&hugetlb_lock);
1027 
1028 	return ret;
1029 }
1030 
1031 /*
1032  * When releasing a hugetlb pool reservation, any surplus pages that were
1033  * allocated to satisfy the reservation must be explicitly freed if they were
1034  * never used.
1035  * Called with hugetlb_lock held.
1036  */
1037 static void return_unused_surplus_pages(struct hstate *h,
1038 					unsigned long unused_resv_pages)
1039 {
1040 	unsigned long nr_pages;
1041 
1042 	/* Uncommit the reservation */
1043 	h->resv_huge_pages -= unused_resv_pages;
1044 
1045 	/* Cannot return gigantic pages currently */
1046 	if (h->order >= MAX_ORDER)
1047 		return;
1048 
1049 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1050 
1051 	/*
1052 	 * We want to release as many surplus pages as possible, spread
1053 	 * evenly across all nodes with memory. Iterate across these nodes
1054 	 * until we can no longer free unreserved surplus pages. This occurs
1055 	 * when the nodes with surplus pages have no free pages.
1056 	 * free_pool_huge_page() will balance the the freed pages across the
1057 	 * on-line nodes with memory and will handle the hstate accounting.
1058 	 */
1059 	while (nr_pages--) {
1060 		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1061 			break;
1062 	}
1063 }
1064 
1065 /*
1066  * Determine if the huge page at addr within the vma has an associated
1067  * reservation.  Where it does not we will need to logically increase
1068  * reservation and actually increase subpool usage before an allocation
1069  * can occur.  Where any new reservation would be required the
1070  * reservation change is prepared, but not committed.  Once the page
1071  * has been allocated from the subpool and instantiated the change should
1072  * be committed via vma_commit_reservation.  No action is required on
1073  * failure.
1074  */
1075 static long vma_needs_reservation(struct hstate *h,
1076 			struct vm_area_struct *vma, unsigned long addr)
1077 {
1078 	struct address_space *mapping = vma->vm_file->f_mapping;
1079 	struct inode *inode = mapping->host;
1080 
1081 	if (vma->vm_flags & VM_MAYSHARE) {
1082 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1083 		return region_chg(&inode->i_mapping->private_list,
1084 							idx, idx + 1);
1085 
1086 	} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1087 		return 1;
1088 
1089 	} else  {
1090 		long err;
1091 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1092 		struct resv_map *reservations = vma_resv_map(vma);
1093 
1094 		err = region_chg(&reservations->regions, idx, idx + 1);
1095 		if (err < 0)
1096 			return err;
1097 		return 0;
1098 	}
1099 }
1100 static void vma_commit_reservation(struct hstate *h,
1101 			struct vm_area_struct *vma, unsigned long addr)
1102 {
1103 	struct address_space *mapping = vma->vm_file->f_mapping;
1104 	struct inode *inode = mapping->host;
1105 
1106 	if (vma->vm_flags & VM_MAYSHARE) {
1107 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1108 		region_add(&inode->i_mapping->private_list, idx, idx + 1);
1109 
1110 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1111 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1112 		struct resv_map *reservations = vma_resv_map(vma);
1113 
1114 		/* Mark this page used in the map. */
1115 		region_add(&reservations->regions, idx, idx + 1);
1116 	}
1117 }
1118 
1119 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1120 				    unsigned long addr, int avoid_reserve)
1121 {
1122 	struct hugepage_subpool *spool = subpool_vma(vma);
1123 	struct hstate *h = hstate_vma(vma);
1124 	struct page *page;
1125 	long chg;
1126 	int ret, idx;
1127 	struct hugetlb_cgroup *h_cg;
1128 
1129 	idx = hstate_index(h);
1130 	/*
1131 	 * Processes that did not create the mapping will have no
1132 	 * reserves and will not have accounted against subpool
1133 	 * limit. Check that the subpool limit can be made before
1134 	 * satisfying the allocation MAP_NORESERVE mappings may also
1135 	 * need pages and subpool limit allocated allocated if no reserve
1136 	 * mapping overlaps.
1137 	 */
1138 	chg = vma_needs_reservation(h, vma, addr);
1139 	if (chg < 0)
1140 		return ERR_PTR(-ENOMEM);
1141 	if (chg)
1142 		if (hugepage_subpool_get_pages(spool, chg))
1143 			return ERR_PTR(-ENOSPC);
1144 
1145 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1146 	if (ret) {
1147 		hugepage_subpool_put_pages(spool, chg);
1148 		return ERR_PTR(-ENOSPC);
1149 	}
1150 	spin_lock(&hugetlb_lock);
1151 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1152 	if (page) {
1153 		/* update page cgroup details */
1154 		hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
1155 					     h_cg, page);
1156 		spin_unlock(&hugetlb_lock);
1157 	} else {
1158 		spin_unlock(&hugetlb_lock);
1159 		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1160 		if (!page) {
1161 			hugetlb_cgroup_uncharge_cgroup(idx,
1162 						       pages_per_huge_page(h),
1163 						       h_cg);
1164 			hugepage_subpool_put_pages(spool, chg);
1165 			return ERR_PTR(-ENOSPC);
1166 		}
1167 		spin_lock(&hugetlb_lock);
1168 		hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
1169 					     h_cg, page);
1170 		list_move(&page->lru, &h->hugepage_activelist);
1171 		spin_unlock(&hugetlb_lock);
1172 	}
1173 
1174 	set_page_private(page, (unsigned long)spool);
1175 
1176 	vma_commit_reservation(h, vma, addr);
1177 	return page;
1178 }
1179 
1180 int __weak alloc_bootmem_huge_page(struct hstate *h)
1181 {
1182 	struct huge_bootmem_page *m;
1183 	int nr_nodes = nodes_weight(node_states[N_MEMORY]);
1184 
1185 	while (nr_nodes) {
1186 		void *addr;
1187 
1188 		addr = __alloc_bootmem_node_nopanic(
1189 				NODE_DATA(hstate_next_node_to_alloc(h,
1190 						&node_states[N_MEMORY])),
1191 				huge_page_size(h), huge_page_size(h), 0);
1192 
1193 		if (addr) {
1194 			/*
1195 			 * Use the beginning of the huge page to store the
1196 			 * huge_bootmem_page struct (until gather_bootmem
1197 			 * puts them into the mem_map).
1198 			 */
1199 			m = addr;
1200 			goto found;
1201 		}
1202 		nr_nodes--;
1203 	}
1204 	return 0;
1205 
1206 found:
1207 	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1208 	/* Put them into a private list first because mem_map is not up yet */
1209 	list_add(&m->list, &huge_boot_pages);
1210 	m->hstate = h;
1211 	return 1;
1212 }
1213 
1214 static void prep_compound_huge_page(struct page *page, int order)
1215 {
1216 	if (unlikely(order > (MAX_ORDER - 1)))
1217 		prep_compound_gigantic_page(page, order);
1218 	else
1219 		prep_compound_page(page, order);
1220 }
1221 
1222 /* Put bootmem huge pages into the standard lists after mem_map is up */
1223 static void __init gather_bootmem_prealloc(void)
1224 {
1225 	struct huge_bootmem_page *m;
1226 
1227 	list_for_each_entry(m, &huge_boot_pages, list) {
1228 		struct hstate *h = m->hstate;
1229 		struct page *page;
1230 
1231 #ifdef CONFIG_HIGHMEM
1232 		page = pfn_to_page(m->phys >> PAGE_SHIFT);
1233 		free_bootmem_late((unsigned long)m,
1234 				  sizeof(struct huge_bootmem_page));
1235 #else
1236 		page = virt_to_page(m);
1237 #endif
1238 		__ClearPageReserved(page);
1239 		WARN_ON(page_count(page) != 1);
1240 		prep_compound_huge_page(page, h->order);
1241 		prep_new_huge_page(h, page, page_to_nid(page));
1242 		/*
1243 		 * If we had gigantic hugepages allocated at boot time, we need
1244 		 * to restore the 'stolen' pages to totalram_pages in order to
1245 		 * fix confusing memory reports from free(1) and another
1246 		 * side-effects, like CommitLimit going negative.
1247 		 */
1248 		if (h->order > (MAX_ORDER - 1))
1249 			totalram_pages += 1 << h->order;
1250 	}
1251 }
1252 
1253 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1254 {
1255 	unsigned long i;
1256 
1257 	for (i = 0; i < h->max_huge_pages; ++i) {
1258 		if (h->order >= MAX_ORDER) {
1259 			if (!alloc_bootmem_huge_page(h))
1260 				break;
1261 		} else if (!alloc_fresh_huge_page(h,
1262 					 &node_states[N_MEMORY]))
1263 			break;
1264 	}
1265 	h->max_huge_pages = i;
1266 }
1267 
1268 static void __init hugetlb_init_hstates(void)
1269 {
1270 	struct hstate *h;
1271 
1272 	for_each_hstate(h) {
1273 		/* oversize hugepages were init'ed in early boot */
1274 		if (h->order < MAX_ORDER)
1275 			hugetlb_hstate_alloc_pages(h);
1276 	}
1277 }
1278 
1279 static char * __init memfmt(char *buf, unsigned long n)
1280 {
1281 	if (n >= (1UL << 30))
1282 		sprintf(buf, "%lu GB", n >> 30);
1283 	else if (n >= (1UL << 20))
1284 		sprintf(buf, "%lu MB", n >> 20);
1285 	else
1286 		sprintf(buf, "%lu KB", n >> 10);
1287 	return buf;
1288 }
1289 
1290 static void __init report_hugepages(void)
1291 {
1292 	struct hstate *h;
1293 
1294 	for_each_hstate(h) {
1295 		char buf[32];
1296 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1297 			memfmt(buf, huge_page_size(h)),
1298 			h->free_huge_pages);
1299 	}
1300 }
1301 
1302 #ifdef CONFIG_HIGHMEM
1303 static void try_to_free_low(struct hstate *h, unsigned long count,
1304 						nodemask_t *nodes_allowed)
1305 {
1306 	int i;
1307 
1308 	if (h->order >= MAX_ORDER)
1309 		return;
1310 
1311 	for_each_node_mask(i, *nodes_allowed) {
1312 		struct page *page, *next;
1313 		struct list_head *freel = &h->hugepage_freelists[i];
1314 		list_for_each_entry_safe(page, next, freel, lru) {
1315 			if (count >= h->nr_huge_pages)
1316 				return;
1317 			if (PageHighMem(page))
1318 				continue;
1319 			list_del(&page->lru);
1320 			update_and_free_page(h, page);
1321 			h->free_huge_pages--;
1322 			h->free_huge_pages_node[page_to_nid(page)]--;
1323 		}
1324 	}
1325 }
1326 #else
1327 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1328 						nodemask_t *nodes_allowed)
1329 {
1330 }
1331 #endif
1332 
1333 /*
1334  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1335  * balanced by operating on them in a round-robin fashion.
1336  * Returns 1 if an adjustment was made.
1337  */
1338 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1339 				int delta)
1340 {
1341 	int start_nid, next_nid;
1342 	int ret = 0;
1343 
1344 	VM_BUG_ON(delta != -1 && delta != 1);
1345 
1346 	if (delta < 0)
1347 		start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1348 	else
1349 		start_nid = hstate_next_node_to_free(h, nodes_allowed);
1350 	next_nid = start_nid;
1351 
1352 	do {
1353 		int nid = next_nid;
1354 		if (delta < 0)  {
1355 			/*
1356 			 * To shrink on this node, there must be a surplus page
1357 			 */
1358 			if (!h->surplus_huge_pages_node[nid]) {
1359 				next_nid = hstate_next_node_to_alloc(h,
1360 								nodes_allowed);
1361 				continue;
1362 			}
1363 		}
1364 		if (delta > 0) {
1365 			/*
1366 			 * Surplus cannot exceed the total number of pages
1367 			 */
1368 			if (h->surplus_huge_pages_node[nid] >=
1369 						h->nr_huge_pages_node[nid]) {
1370 				next_nid = hstate_next_node_to_free(h,
1371 								nodes_allowed);
1372 				continue;
1373 			}
1374 		}
1375 
1376 		h->surplus_huge_pages += delta;
1377 		h->surplus_huge_pages_node[nid] += delta;
1378 		ret = 1;
1379 		break;
1380 	} while (next_nid != start_nid);
1381 
1382 	return ret;
1383 }
1384 
1385 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1386 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1387 						nodemask_t *nodes_allowed)
1388 {
1389 	unsigned long min_count, ret;
1390 
1391 	if (h->order >= MAX_ORDER)
1392 		return h->max_huge_pages;
1393 
1394 	/*
1395 	 * Increase the pool size
1396 	 * First take pages out of surplus state.  Then make up the
1397 	 * remaining difference by allocating fresh huge pages.
1398 	 *
1399 	 * We might race with alloc_buddy_huge_page() here and be unable
1400 	 * to convert a surplus huge page to a normal huge page. That is
1401 	 * not critical, though, it just means the overall size of the
1402 	 * pool might be one hugepage larger than it needs to be, but
1403 	 * within all the constraints specified by the sysctls.
1404 	 */
1405 	spin_lock(&hugetlb_lock);
1406 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1407 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
1408 			break;
1409 	}
1410 
1411 	while (count > persistent_huge_pages(h)) {
1412 		/*
1413 		 * If this allocation races such that we no longer need the
1414 		 * page, free_huge_page will handle it by freeing the page
1415 		 * and reducing the surplus.
1416 		 */
1417 		spin_unlock(&hugetlb_lock);
1418 		ret = alloc_fresh_huge_page(h, nodes_allowed);
1419 		spin_lock(&hugetlb_lock);
1420 		if (!ret)
1421 			goto out;
1422 
1423 		/* Bail for signals. Probably ctrl-c from user */
1424 		if (signal_pending(current))
1425 			goto out;
1426 	}
1427 
1428 	/*
1429 	 * Decrease the pool size
1430 	 * First return free pages to the buddy allocator (being careful
1431 	 * to keep enough around to satisfy reservations).  Then place
1432 	 * pages into surplus state as needed so the pool will shrink
1433 	 * to the desired size as pages become free.
1434 	 *
1435 	 * By placing pages into the surplus state independent of the
1436 	 * overcommit value, we are allowing the surplus pool size to
1437 	 * exceed overcommit. There are few sane options here. Since
1438 	 * alloc_buddy_huge_page() is checking the global counter,
1439 	 * though, we'll note that we're not allowed to exceed surplus
1440 	 * and won't grow the pool anywhere else. Not until one of the
1441 	 * sysctls are changed, or the surplus pages go out of use.
1442 	 */
1443 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1444 	min_count = max(count, min_count);
1445 	try_to_free_low(h, min_count, nodes_allowed);
1446 	while (min_count < persistent_huge_pages(h)) {
1447 		if (!free_pool_huge_page(h, nodes_allowed, 0))
1448 			break;
1449 	}
1450 	while (count < persistent_huge_pages(h)) {
1451 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
1452 			break;
1453 	}
1454 out:
1455 	ret = persistent_huge_pages(h);
1456 	spin_unlock(&hugetlb_lock);
1457 	return ret;
1458 }
1459 
1460 #define HSTATE_ATTR_RO(_name) \
1461 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1462 
1463 #define HSTATE_ATTR(_name) \
1464 	static struct kobj_attribute _name##_attr = \
1465 		__ATTR(_name, 0644, _name##_show, _name##_store)
1466 
1467 static struct kobject *hugepages_kobj;
1468 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1469 
1470 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1471 
1472 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1473 {
1474 	int i;
1475 
1476 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
1477 		if (hstate_kobjs[i] == kobj) {
1478 			if (nidp)
1479 				*nidp = NUMA_NO_NODE;
1480 			return &hstates[i];
1481 		}
1482 
1483 	return kobj_to_node_hstate(kobj, nidp);
1484 }
1485 
1486 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1487 					struct kobj_attribute *attr, char *buf)
1488 {
1489 	struct hstate *h;
1490 	unsigned long nr_huge_pages;
1491 	int nid;
1492 
1493 	h = kobj_to_hstate(kobj, &nid);
1494 	if (nid == NUMA_NO_NODE)
1495 		nr_huge_pages = h->nr_huge_pages;
1496 	else
1497 		nr_huge_pages = h->nr_huge_pages_node[nid];
1498 
1499 	return sprintf(buf, "%lu\n", nr_huge_pages);
1500 }
1501 
1502 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1503 			struct kobject *kobj, struct kobj_attribute *attr,
1504 			const char *buf, size_t len)
1505 {
1506 	int err;
1507 	int nid;
1508 	unsigned long count;
1509 	struct hstate *h;
1510 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1511 
1512 	err = strict_strtoul(buf, 10, &count);
1513 	if (err)
1514 		goto out;
1515 
1516 	h = kobj_to_hstate(kobj, &nid);
1517 	if (h->order >= MAX_ORDER) {
1518 		err = -EINVAL;
1519 		goto out;
1520 	}
1521 
1522 	if (nid == NUMA_NO_NODE) {
1523 		/*
1524 		 * global hstate attribute
1525 		 */
1526 		if (!(obey_mempolicy &&
1527 				init_nodemask_of_mempolicy(nodes_allowed))) {
1528 			NODEMASK_FREE(nodes_allowed);
1529 			nodes_allowed = &node_states[N_MEMORY];
1530 		}
1531 	} else if (nodes_allowed) {
1532 		/*
1533 		 * per node hstate attribute: adjust count to global,
1534 		 * but restrict alloc/free to the specified node.
1535 		 */
1536 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1537 		init_nodemask_of_node(nodes_allowed, nid);
1538 	} else
1539 		nodes_allowed = &node_states[N_MEMORY];
1540 
1541 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1542 
1543 	if (nodes_allowed != &node_states[N_MEMORY])
1544 		NODEMASK_FREE(nodes_allowed);
1545 
1546 	return len;
1547 out:
1548 	NODEMASK_FREE(nodes_allowed);
1549 	return err;
1550 }
1551 
1552 static ssize_t nr_hugepages_show(struct kobject *kobj,
1553 				       struct kobj_attribute *attr, char *buf)
1554 {
1555 	return nr_hugepages_show_common(kobj, attr, buf);
1556 }
1557 
1558 static ssize_t nr_hugepages_store(struct kobject *kobj,
1559 	       struct kobj_attribute *attr, const char *buf, size_t len)
1560 {
1561 	return nr_hugepages_store_common(false, kobj, attr, buf, len);
1562 }
1563 HSTATE_ATTR(nr_hugepages);
1564 
1565 #ifdef CONFIG_NUMA
1566 
1567 /*
1568  * hstate attribute for optionally mempolicy-based constraint on persistent
1569  * huge page alloc/free.
1570  */
1571 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1572 				       struct kobj_attribute *attr, char *buf)
1573 {
1574 	return nr_hugepages_show_common(kobj, attr, buf);
1575 }
1576 
1577 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1578 	       struct kobj_attribute *attr, const char *buf, size_t len)
1579 {
1580 	return nr_hugepages_store_common(true, kobj, attr, buf, len);
1581 }
1582 HSTATE_ATTR(nr_hugepages_mempolicy);
1583 #endif
1584 
1585 
1586 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1587 					struct kobj_attribute *attr, char *buf)
1588 {
1589 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1590 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1591 }
1592 
1593 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1594 		struct kobj_attribute *attr, const char *buf, size_t count)
1595 {
1596 	int err;
1597 	unsigned long input;
1598 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1599 
1600 	if (h->order >= MAX_ORDER)
1601 		return -EINVAL;
1602 
1603 	err = strict_strtoul(buf, 10, &input);
1604 	if (err)
1605 		return err;
1606 
1607 	spin_lock(&hugetlb_lock);
1608 	h->nr_overcommit_huge_pages = input;
1609 	spin_unlock(&hugetlb_lock);
1610 
1611 	return count;
1612 }
1613 HSTATE_ATTR(nr_overcommit_hugepages);
1614 
1615 static ssize_t free_hugepages_show(struct kobject *kobj,
1616 					struct kobj_attribute *attr, char *buf)
1617 {
1618 	struct hstate *h;
1619 	unsigned long free_huge_pages;
1620 	int nid;
1621 
1622 	h = kobj_to_hstate(kobj, &nid);
1623 	if (nid == NUMA_NO_NODE)
1624 		free_huge_pages = h->free_huge_pages;
1625 	else
1626 		free_huge_pages = h->free_huge_pages_node[nid];
1627 
1628 	return sprintf(buf, "%lu\n", free_huge_pages);
1629 }
1630 HSTATE_ATTR_RO(free_hugepages);
1631 
1632 static ssize_t resv_hugepages_show(struct kobject *kobj,
1633 					struct kobj_attribute *attr, char *buf)
1634 {
1635 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1636 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
1637 }
1638 HSTATE_ATTR_RO(resv_hugepages);
1639 
1640 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1641 					struct kobj_attribute *attr, char *buf)
1642 {
1643 	struct hstate *h;
1644 	unsigned long surplus_huge_pages;
1645 	int nid;
1646 
1647 	h = kobj_to_hstate(kobj, &nid);
1648 	if (nid == NUMA_NO_NODE)
1649 		surplus_huge_pages = h->surplus_huge_pages;
1650 	else
1651 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
1652 
1653 	return sprintf(buf, "%lu\n", surplus_huge_pages);
1654 }
1655 HSTATE_ATTR_RO(surplus_hugepages);
1656 
1657 static struct attribute *hstate_attrs[] = {
1658 	&nr_hugepages_attr.attr,
1659 	&nr_overcommit_hugepages_attr.attr,
1660 	&free_hugepages_attr.attr,
1661 	&resv_hugepages_attr.attr,
1662 	&surplus_hugepages_attr.attr,
1663 #ifdef CONFIG_NUMA
1664 	&nr_hugepages_mempolicy_attr.attr,
1665 #endif
1666 	NULL,
1667 };
1668 
1669 static struct attribute_group hstate_attr_group = {
1670 	.attrs = hstate_attrs,
1671 };
1672 
1673 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1674 				    struct kobject **hstate_kobjs,
1675 				    struct attribute_group *hstate_attr_group)
1676 {
1677 	int retval;
1678 	int hi = hstate_index(h);
1679 
1680 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1681 	if (!hstate_kobjs[hi])
1682 		return -ENOMEM;
1683 
1684 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1685 	if (retval)
1686 		kobject_put(hstate_kobjs[hi]);
1687 
1688 	return retval;
1689 }
1690 
1691 static void __init hugetlb_sysfs_init(void)
1692 {
1693 	struct hstate *h;
1694 	int err;
1695 
1696 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1697 	if (!hugepages_kobj)
1698 		return;
1699 
1700 	for_each_hstate(h) {
1701 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1702 					 hstate_kobjs, &hstate_attr_group);
1703 		if (err)
1704 			pr_err("Hugetlb: Unable to add hstate %s", h->name);
1705 	}
1706 }
1707 
1708 #ifdef CONFIG_NUMA
1709 
1710 /*
1711  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1712  * with node devices in node_devices[] using a parallel array.  The array
1713  * index of a node device or _hstate == node id.
1714  * This is here to avoid any static dependency of the node device driver, in
1715  * the base kernel, on the hugetlb module.
1716  */
1717 struct node_hstate {
1718 	struct kobject		*hugepages_kobj;
1719 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
1720 };
1721 struct node_hstate node_hstates[MAX_NUMNODES];
1722 
1723 /*
1724  * A subset of global hstate attributes for node devices
1725  */
1726 static struct attribute *per_node_hstate_attrs[] = {
1727 	&nr_hugepages_attr.attr,
1728 	&free_hugepages_attr.attr,
1729 	&surplus_hugepages_attr.attr,
1730 	NULL,
1731 };
1732 
1733 static struct attribute_group per_node_hstate_attr_group = {
1734 	.attrs = per_node_hstate_attrs,
1735 };
1736 
1737 /*
1738  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1739  * Returns node id via non-NULL nidp.
1740  */
1741 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1742 {
1743 	int nid;
1744 
1745 	for (nid = 0; nid < nr_node_ids; nid++) {
1746 		struct node_hstate *nhs = &node_hstates[nid];
1747 		int i;
1748 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
1749 			if (nhs->hstate_kobjs[i] == kobj) {
1750 				if (nidp)
1751 					*nidp = nid;
1752 				return &hstates[i];
1753 			}
1754 	}
1755 
1756 	BUG();
1757 	return NULL;
1758 }
1759 
1760 /*
1761  * Unregister hstate attributes from a single node device.
1762  * No-op if no hstate attributes attached.
1763  */
1764 void hugetlb_unregister_node(struct node *node)
1765 {
1766 	struct hstate *h;
1767 	struct node_hstate *nhs = &node_hstates[node->dev.id];
1768 
1769 	if (!nhs->hugepages_kobj)
1770 		return;		/* no hstate attributes */
1771 
1772 	for_each_hstate(h) {
1773 		int idx = hstate_index(h);
1774 		if (nhs->hstate_kobjs[idx]) {
1775 			kobject_put(nhs->hstate_kobjs[idx]);
1776 			nhs->hstate_kobjs[idx] = NULL;
1777 		}
1778 	}
1779 
1780 	kobject_put(nhs->hugepages_kobj);
1781 	nhs->hugepages_kobj = NULL;
1782 }
1783 
1784 /*
1785  * hugetlb module exit:  unregister hstate attributes from node devices
1786  * that have them.
1787  */
1788 static void hugetlb_unregister_all_nodes(void)
1789 {
1790 	int nid;
1791 
1792 	/*
1793 	 * disable node device registrations.
1794 	 */
1795 	register_hugetlbfs_with_node(NULL, NULL);
1796 
1797 	/*
1798 	 * remove hstate attributes from any nodes that have them.
1799 	 */
1800 	for (nid = 0; nid < nr_node_ids; nid++)
1801 		hugetlb_unregister_node(node_devices[nid]);
1802 }
1803 
1804 /*
1805  * Register hstate attributes for a single node device.
1806  * No-op if attributes already registered.
1807  */
1808 void hugetlb_register_node(struct node *node)
1809 {
1810 	struct hstate *h;
1811 	struct node_hstate *nhs = &node_hstates[node->dev.id];
1812 	int err;
1813 
1814 	if (nhs->hugepages_kobj)
1815 		return;		/* already allocated */
1816 
1817 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1818 							&node->dev.kobj);
1819 	if (!nhs->hugepages_kobj)
1820 		return;
1821 
1822 	for_each_hstate(h) {
1823 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1824 						nhs->hstate_kobjs,
1825 						&per_node_hstate_attr_group);
1826 		if (err) {
1827 			pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
1828 				h->name, node->dev.id);
1829 			hugetlb_unregister_node(node);
1830 			break;
1831 		}
1832 	}
1833 }
1834 
1835 /*
1836  * hugetlb init time:  register hstate attributes for all registered node
1837  * devices of nodes that have memory.  All on-line nodes should have
1838  * registered their associated device by this time.
1839  */
1840 static void hugetlb_register_all_nodes(void)
1841 {
1842 	int nid;
1843 
1844 	for_each_node_state(nid, N_MEMORY) {
1845 		struct node *node = node_devices[nid];
1846 		if (node->dev.id == nid)
1847 			hugetlb_register_node(node);
1848 	}
1849 
1850 	/*
1851 	 * Let the node device driver know we're here so it can
1852 	 * [un]register hstate attributes on node hotplug.
1853 	 */
1854 	register_hugetlbfs_with_node(hugetlb_register_node,
1855 				     hugetlb_unregister_node);
1856 }
1857 #else	/* !CONFIG_NUMA */
1858 
1859 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1860 {
1861 	BUG();
1862 	if (nidp)
1863 		*nidp = -1;
1864 	return NULL;
1865 }
1866 
1867 static void hugetlb_unregister_all_nodes(void) { }
1868 
1869 static void hugetlb_register_all_nodes(void) { }
1870 
1871 #endif
1872 
1873 static void __exit hugetlb_exit(void)
1874 {
1875 	struct hstate *h;
1876 
1877 	hugetlb_unregister_all_nodes();
1878 
1879 	for_each_hstate(h) {
1880 		kobject_put(hstate_kobjs[hstate_index(h)]);
1881 	}
1882 
1883 	kobject_put(hugepages_kobj);
1884 }
1885 module_exit(hugetlb_exit);
1886 
1887 static int __init hugetlb_init(void)
1888 {
1889 	/* Some platform decide whether they support huge pages at boot
1890 	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1891 	 * there is no such support
1892 	 */
1893 	if (HPAGE_SHIFT == 0)
1894 		return 0;
1895 
1896 	if (!size_to_hstate(default_hstate_size)) {
1897 		default_hstate_size = HPAGE_SIZE;
1898 		if (!size_to_hstate(default_hstate_size))
1899 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1900 	}
1901 	default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
1902 	if (default_hstate_max_huge_pages)
1903 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1904 
1905 	hugetlb_init_hstates();
1906 	gather_bootmem_prealloc();
1907 	report_hugepages();
1908 
1909 	hugetlb_sysfs_init();
1910 	hugetlb_register_all_nodes();
1911 	hugetlb_cgroup_file_init();
1912 
1913 	return 0;
1914 }
1915 module_init(hugetlb_init);
1916 
1917 /* Should be called on processing a hugepagesz=... option */
1918 void __init hugetlb_add_hstate(unsigned order)
1919 {
1920 	struct hstate *h;
1921 	unsigned long i;
1922 
1923 	if (size_to_hstate(PAGE_SIZE << order)) {
1924 		pr_warning("hugepagesz= specified twice, ignoring\n");
1925 		return;
1926 	}
1927 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
1928 	BUG_ON(order == 0);
1929 	h = &hstates[hugetlb_max_hstate++];
1930 	h->order = order;
1931 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1932 	h->nr_huge_pages = 0;
1933 	h->free_huge_pages = 0;
1934 	for (i = 0; i < MAX_NUMNODES; ++i)
1935 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1936 	INIT_LIST_HEAD(&h->hugepage_activelist);
1937 	h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
1938 	h->next_nid_to_free = first_node(node_states[N_MEMORY]);
1939 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1940 					huge_page_size(h)/1024);
1941 
1942 	parsed_hstate = h;
1943 }
1944 
1945 static int __init hugetlb_nrpages_setup(char *s)
1946 {
1947 	unsigned long *mhp;
1948 	static unsigned long *last_mhp;
1949 
1950 	/*
1951 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
1952 	 * so this hugepages= parameter goes to the "default hstate".
1953 	 */
1954 	if (!hugetlb_max_hstate)
1955 		mhp = &default_hstate_max_huge_pages;
1956 	else
1957 		mhp = &parsed_hstate->max_huge_pages;
1958 
1959 	if (mhp == last_mhp) {
1960 		pr_warning("hugepages= specified twice without "
1961 			   "interleaving hugepagesz=, ignoring\n");
1962 		return 1;
1963 	}
1964 
1965 	if (sscanf(s, "%lu", mhp) <= 0)
1966 		*mhp = 0;
1967 
1968 	/*
1969 	 * Global state is always initialized later in hugetlb_init.
1970 	 * But we need to allocate >= MAX_ORDER hstates here early to still
1971 	 * use the bootmem allocator.
1972 	 */
1973 	if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
1974 		hugetlb_hstate_alloc_pages(parsed_hstate);
1975 
1976 	last_mhp = mhp;
1977 
1978 	return 1;
1979 }
1980 __setup("hugepages=", hugetlb_nrpages_setup);
1981 
1982 static int __init hugetlb_default_setup(char *s)
1983 {
1984 	default_hstate_size = memparse(s, &s);
1985 	return 1;
1986 }
1987 __setup("default_hugepagesz=", hugetlb_default_setup);
1988 
1989 static unsigned int cpuset_mems_nr(unsigned int *array)
1990 {
1991 	int node;
1992 	unsigned int nr = 0;
1993 
1994 	for_each_node_mask(node, cpuset_current_mems_allowed)
1995 		nr += array[node];
1996 
1997 	return nr;
1998 }
1999 
2000 #ifdef CONFIG_SYSCTL
2001 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2002 			 struct ctl_table *table, int write,
2003 			 void __user *buffer, size_t *length, loff_t *ppos)
2004 {
2005 	struct hstate *h = &default_hstate;
2006 	unsigned long tmp;
2007 	int ret;
2008 
2009 	tmp = h->max_huge_pages;
2010 
2011 	if (write && h->order >= MAX_ORDER)
2012 		return -EINVAL;
2013 
2014 	table->data = &tmp;
2015 	table->maxlen = sizeof(unsigned long);
2016 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2017 	if (ret)
2018 		goto out;
2019 
2020 	if (write) {
2021 		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
2022 						GFP_KERNEL | __GFP_NORETRY);
2023 		if (!(obey_mempolicy &&
2024 			       init_nodemask_of_mempolicy(nodes_allowed))) {
2025 			NODEMASK_FREE(nodes_allowed);
2026 			nodes_allowed = &node_states[N_MEMORY];
2027 		}
2028 		h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2029 
2030 		if (nodes_allowed != &node_states[N_MEMORY])
2031 			NODEMASK_FREE(nodes_allowed);
2032 	}
2033 out:
2034 	return ret;
2035 }
2036 
2037 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2038 			  void __user *buffer, size_t *length, loff_t *ppos)
2039 {
2040 
2041 	return hugetlb_sysctl_handler_common(false, table, write,
2042 							buffer, length, ppos);
2043 }
2044 
2045 #ifdef CONFIG_NUMA
2046 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2047 			  void __user *buffer, size_t *length, loff_t *ppos)
2048 {
2049 	return hugetlb_sysctl_handler_common(true, table, write,
2050 							buffer, length, ppos);
2051 }
2052 #endif /* CONFIG_NUMA */
2053 
2054 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2055 			void __user *buffer,
2056 			size_t *length, loff_t *ppos)
2057 {
2058 	proc_dointvec(table, write, buffer, length, ppos);
2059 	if (hugepages_treat_as_movable)
2060 		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2061 	else
2062 		htlb_alloc_mask = GFP_HIGHUSER;
2063 	return 0;
2064 }
2065 
2066 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2067 			void __user *buffer,
2068 			size_t *length, loff_t *ppos)
2069 {
2070 	struct hstate *h = &default_hstate;
2071 	unsigned long tmp;
2072 	int ret;
2073 
2074 	tmp = h->nr_overcommit_huge_pages;
2075 
2076 	if (write && h->order >= MAX_ORDER)
2077 		return -EINVAL;
2078 
2079 	table->data = &tmp;
2080 	table->maxlen = sizeof(unsigned long);
2081 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2082 	if (ret)
2083 		goto out;
2084 
2085 	if (write) {
2086 		spin_lock(&hugetlb_lock);
2087 		h->nr_overcommit_huge_pages = tmp;
2088 		spin_unlock(&hugetlb_lock);
2089 	}
2090 out:
2091 	return ret;
2092 }
2093 
2094 #endif /* CONFIG_SYSCTL */
2095 
2096 void hugetlb_report_meminfo(struct seq_file *m)
2097 {
2098 	struct hstate *h = &default_hstate;
2099 	seq_printf(m,
2100 			"HugePages_Total:   %5lu\n"
2101 			"HugePages_Free:    %5lu\n"
2102 			"HugePages_Rsvd:    %5lu\n"
2103 			"HugePages_Surp:    %5lu\n"
2104 			"Hugepagesize:   %8lu kB\n",
2105 			h->nr_huge_pages,
2106 			h->free_huge_pages,
2107 			h->resv_huge_pages,
2108 			h->surplus_huge_pages,
2109 			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2110 }
2111 
2112 int hugetlb_report_node_meminfo(int nid, char *buf)
2113 {
2114 	struct hstate *h = &default_hstate;
2115 	return sprintf(buf,
2116 		"Node %d HugePages_Total: %5u\n"
2117 		"Node %d HugePages_Free:  %5u\n"
2118 		"Node %d HugePages_Surp:  %5u\n",
2119 		nid, h->nr_huge_pages_node[nid],
2120 		nid, h->free_huge_pages_node[nid],
2121 		nid, h->surplus_huge_pages_node[nid]);
2122 }
2123 
2124 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2125 unsigned long hugetlb_total_pages(void)
2126 {
2127 	struct hstate *h = &default_hstate;
2128 	return h->nr_huge_pages * pages_per_huge_page(h);
2129 }
2130 
2131 static int hugetlb_acct_memory(struct hstate *h, long delta)
2132 {
2133 	int ret = -ENOMEM;
2134 
2135 	spin_lock(&hugetlb_lock);
2136 	/*
2137 	 * When cpuset is configured, it breaks the strict hugetlb page
2138 	 * reservation as the accounting is done on a global variable. Such
2139 	 * reservation is completely rubbish in the presence of cpuset because
2140 	 * the reservation is not checked against page availability for the
2141 	 * current cpuset. Application can still potentially OOM'ed by kernel
2142 	 * with lack of free htlb page in cpuset that the task is in.
2143 	 * Attempt to enforce strict accounting with cpuset is almost
2144 	 * impossible (or too ugly) because cpuset is too fluid that
2145 	 * task or memory node can be dynamically moved between cpusets.
2146 	 *
2147 	 * The change of semantics for shared hugetlb mapping with cpuset is
2148 	 * undesirable. However, in order to preserve some of the semantics,
2149 	 * we fall back to check against current free page availability as
2150 	 * a best attempt and hopefully to minimize the impact of changing
2151 	 * semantics that cpuset has.
2152 	 */
2153 	if (delta > 0) {
2154 		if (gather_surplus_pages(h, delta) < 0)
2155 			goto out;
2156 
2157 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2158 			return_unused_surplus_pages(h, delta);
2159 			goto out;
2160 		}
2161 	}
2162 
2163 	ret = 0;
2164 	if (delta < 0)
2165 		return_unused_surplus_pages(h, (unsigned long) -delta);
2166 
2167 out:
2168 	spin_unlock(&hugetlb_lock);
2169 	return ret;
2170 }
2171 
2172 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2173 {
2174 	struct resv_map *reservations = vma_resv_map(vma);
2175 
2176 	/*
2177 	 * This new VMA should share its siblings reservation map if present.
2178 	 * The VMA will only ever have a valid reservation map pointer where
2179 	 * it is being copied for another still existing VMA.  As that VMA
2180 	 * has a reference to the reservation map it cannot disappear until
2181 	 * after this open call completes.  It is therefore safe to take a
2182 	 * new reference here without additional locking.
2183 	 */
2184 	if (reservations)
2185 		kref_get(&reservations->refs);
2186 }
2187 
2188 static void resv_map_put(struct vm_area_struct *vma)
2189 {
2190 	struct resv_map *reservations = vma_resv_map(vma);
2191 
2192 	if (!reservations)
2193 		return;
2194 	kref_put(&reservations->refs, resv_map_release);
2195 }
2196 
2197 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2198 {
2199 	struct hstate *h = hstate_vma(vma);
2200 	struct resv_map *reservations = vma_resv_map(vma);
2201 	struct hugepage_subpool *spool = subpool_vma(vma);
2202 	unsigned long reserve;
2203 	unsigned long start;
2204 	unsigned long end;
2205 
2206 	if (reservations) {
2207 		start = vma_hugecache_offset(h, vma, vma->vm_start);
2208 		end = vma_hugecache_offset(h, vma, vma->vm_end);
2209 
2210 		reserve = (end - start) -
2211 			region_count(&reservations->regions, start, end);
2212 
2213 		resv_map_put(vma);
2214 
2215 		if (reserve) {
2216 			hugetlb_acct_memory(h, -reserve);
2217 			hugepage_subpool_put_pages(spool, reserve);
2218 		}
2219 	}
2220 }
2221 
2222 /*
2223  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2224  * handle_mm_fault() to try to instantiate regular-sized pages in the
2225  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2226  * this far.
2227  */
2228 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2229 {
2230 	BUG();
2231 	return 0;
2232 }
2233 
2234 const struct vm_operations_struct hugetlb_vm_ops = {
2235 	.fault = hugetlb_vm_op_fault,
2236 	.open = hugetlb_vm_op_open,
2237 	.close = hugetlb_vm_op_close,
2238 };
2239 
2240 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2241 				int writable)
2242 {
2243 	pte_t entry;
2244 
2245 	if (writable) {
2246 		entry =
2247 		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2248 	} else {
2249 		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2250 	}
2251 	entry = pte_mkyoung(entry);
2252 	entry = pte_mkhuge(entry);
2253 	entry = arch_make_huge_pte(entry, vma, page, writable);
2254 
2255 	return entry;
2256 }
2257 
2258 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2259 				   unsigned long address, pte_t *ptep)
2260 {
2261 	pte_t entry;
2262 
2263 	entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2264 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2265 		update_mmu_cache(vma, address, ptep);
2266 }
2267 
2268 
2269 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2270 			    struct vm_area_struct *vma)
2271 {
2272 	pte_t *src_pte, *dst_pte, entry;
2273 	struct page *ptepage;
2274 	unsigned long addr;
2275 	int cow;
2276 	struct hstate *h = hstate_vma(vma);
2277 	unsigned long sz = huge_page_size(h);
2278 
2279 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2280 
2281 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2282 		src_pte = huge_pte_offset(src, addr);
2283 		if (!src_pte)
2284 			continue;
2285 		dst_pte = huge_pte_alloc(dst, addr, sz);
2286 		if (!dst_pte)
2287 			goto nomem;
2288 
2289 		/* If the pagetables are shared don't copy or take references */
2290 		if (dst_pte == src_pte)
2291 			continue;
2292 
2293 		spin_lock(&dst->page_table_lock);
2294 		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2295 		if (!huge_pte_none(huge_ptep_get(src_pte))) {
2296 			if (cow)
2297 				huge_ptep_set_wrprotect(src, addr, src_pte);
2298 			entry = huge_ptep_get(src_pte);
2299 			ptepage = pte_page(entry);
2300 			get_page(ptepage);
2301 			page_dup_rmap(ptepage);
2302 			set_huge_pte_at(dst, addr, dst_pte, entry);
2303 		}
2304 		spin_unlock(&src->page_table_lock);
2305 		spin_unlock(&dst->page_table_lock);
2306 	}
2307 	return 0;
2308 
2309 nomem:
2310 	return -ENOMEM;
2311 }
2312 
2313 static int is_hugetlb_entry_migration(pte_t pte)
2314 {
2315 	swp_entry_t swp;
2316 
2317 	if (huge_pte_none(pte) || pte_present(pte))
2318 		return 0;
2319 	swp = pte_to_swp_entry(pte);
2320 	if (non_swap_entry(swp) && is_migration_entry(swp))
2321 		return 1;
2322 	else
2323 		return 0;
2324 }
2325 
2326 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2327 {
2328 	swp_entry_t swp;
2329 
2330 	if (huge_pte_none(pte) || pte_present(pte))
2331 		return 0;
2332 	swp = pte_to_swp_entry(pte);
2333 	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2334 		return 1;
2335 	else
2336 		return 0;
2337 }
2338 
2339 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2340 			    unsigned long start, unsigned long end,
2341 			    struct page *ref_page)
2342 {
2343 	int force_flush = 0;
2344 	struct mm_struct *mm = vma->vm_mm;
2345 	unsigned long address;
2346 	pte_t *ptep;
2347 	pte_t pte;
2348 	struct page *page;
2349 	struct hstate *h = hstate_vma(vma);
2350 	unsigned long sz = huge_page_size(h);
2351 	const unsigned long mmun_start = start;	/* For mmu_notifiers */
2352 	const unsigned long mmun_end   = end;	/* For mmu_notifiers */
2353 
2354 	WARN_ON(!is_vm_hugetlb_page(vma));
2355 	BUG_ON(start & ~huge_page_mask(h));
2356 	BUG_ON(end & ~huge_page_mask(h));
2357 
2358 	tlb_start_vma(tlb, vma);
2359 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2360 again:
2361 	spin_lock(&mm->page_table_lock);
2362 	for (address = start; address < end; address += sz) {
2363 		ptep = huge_pte_offset(mm, address);
2364 		if (!ptep)
2365 			continue;
2366 
2367 		if (huge_pmd_unshare(mm, &address, ptep))
2368 			continue;
2369 
2370 		pte = huge_ptep_get(ptep);
2371 		if (huge_pte_none(pte))
2372 			continue;
2373 
2374 		/*
2375 		 * HWPoisoned hugepage is already unmapped and dropped reference
2376 		 */
2377 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2378 			pte_clear(mm, address, ptep);
2379 			continue;
2380 		}
2381 
2382 		page = pte_page(pte);
2383 		/*
2384 		 * If a reference page is supplied, it is because a specific
2385 		 * page is being unmapped, not a range. Ensure the page we
2386 		 * are about to unmap is the actual page of interest.
2387 		 */
2388 		if (ref_page) {
2389 			if (page != ref_page)
2390 				continue;
2391 
2392 			/*
2393 			 * Mark the VMA as having unmapped its page so that
2394 			 * future faults in this VMA will fail rather than
2395 			 * looking like data was lost
2396 			 */
2397 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2398 		}
2399 
2400 		pte = huge_ptep_get_and_clear(mm, address, ptep);
2401 		tlb_remove_tlb_entry(tlb, ptep, address);
2402 		if (pte_dirty(pte))
2403 			set_page_dirty(page);
2404 
2405 		page_remove_rmap(page);
2406 		force_flush = !__tlb_remove_page(tlb, page);
2407 		if (force_flush)
2408 			break;
2409 		/* Bail out after unmapping reference page if supplied */
2410 		if (ref_page)
2411 			break;
2412 	}
2413 	spin_unlock(&mm->page_table_lock);
2414 	/*
2415 	 * mmu_gather ran out of room to batch pages, we break out of
2416 	 * the PTE lock to avoid doing the potential expensive TLB invalidate
2417 	 * and page-free while holding it.
2418 	 */
2419 	if (force_flush) {
2420 		force_flush = 0;
2421 		tlb_flush_mmu(tlb);
2422 		if (address < end && !ref_page)
2423 			goto again;
2424 	}
2425 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2426 	tlb_end_vma(tlb, vma);
2427 }
2428 
2429 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2430 			  struct vm_area_struct *vma, unsigned long start,
2431 			  unsigned long end, struct page *ref_page)
2432 {
2433 	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
2434 
2435 	/*
2436 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
2437 	 * test will fail on a vma being torn down, and not grab a page table
2438 	 * on its way out.  We're lucky that the flag has such an appropriate
2439 	 * name, and can in fact be safely cleared here. We could clear it
2440 	 * before the __unmap_hugepage_range above, but all that's necessary
2441 	 * is to clear it before releasing the i_mmap_mutex. This works
2442 	 * because in the context this is called, the VMA is about to be
2443 	 * destroyed and the i_mmap_mutex is held.
2444 	 */
2445 	vma->vm_flags &= ~VM_MAYSHARE;
2446 }
2447 
2448 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2449 			  unsigned long end, struct page *ref_page)
2450 {
2451 	struct mm_struct *mm;
2452 	struct mmu_gather tlb;
2453 
2454 	mm = vma->vm_mm;
2455 
2456 	tlb_gather_mmu(&tlb, mm, 0);
2457 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2458 	tlb_finish_mmu(&tlb, start, end);
2459 }
2460 
2461 /*
2462  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2463  * mappping it owns the reserve page for. The intention is to unmap the page
2464  * from other VMAs and let the children be SIGKILLed if they are faulting the
2465  * same region.
2466  */
2467 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2468 				struct page *page, unsigned long address)
2469 {
2470 	struct hstate *h = hstate_vma(vma);
2471 	struct vm_area_struct *iter_vma;
2472 	struct address_space *mapping;
2473 	pgoff_t pgoff;
2474 
2475 	/*
2476 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2477 	 * from page cache lookup which is in HPAGE_SIZE units.
2478 	 */
2479 	address = address & huge_page_mask(h);
2480 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2481 			vma->vm_pgoff;
2482 	mapping = file_inode(vma->vm_file)->i_mapping;
2483 
2484 	/*
2485 	 * Take the mapping lock for the duration of the table walk. As
2486 	 * this mapping should be shared between all the VMAs,
2487 	 * __unmap_hugepage_range() is called as the lock is already held
2488 	 */
2489 	mutex_lock(&mapping->i_mmap_mutex);
2490 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2491 		/* Do not unmap the current VMA */
2492 		if (iter_vma == vma)
2493 			continue;
2494 
2495 		/*
2496 		 * Unmap the page from other VMAs without their own reserves.
2497 		 * They get marked to be SIGKILLed if they fault in these
2498 		 * areas. This is because a future no-page fault on this VMA
2499 		 * could insert a zeroed page instead of the data existing
2500 		 * from the time of fork. This would look like data corruption
2501 		 */
2502 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2503 			unmap_hugepage_range(iter_vma, address,
2504 					     address + huge_page_size(h), page);
2505 	}
2506 	mutex_unlock(&mapping->i_mmap_mutex);
2507 
2508 	return 1;
2509 }
2510 
2511 /*
2512  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2513  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2514  * cannot race with other handlers or page migration.
2515  * Keep the pte_same checks anyway to make transition from the mutex easier.
2516  */
2517 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2518 			unsigned long address, pte_t *ptep, pte_t pte,
2519 			struct page *pagecache_page)
2520 {
2521 	struct hstate *h = hstate_vma(vma);
2522 	struct page *old_page, *new_page;
2523 	int avoidcopy;
2524 	int outside_reserve = 0;
2525 	unsigned long mmun_start;	/* For mmu_notifiers */
2526 	unsigned long mmun_end;		/* For mmu_notifiers */
2527 
2528 	old_page = pte_page(pte);
2529 
2530 retry_avoidcopy:
2531 	/* If no-one else is actually using this page, avoid the copy
2532 	 * and just make the page writable */
2533 	avoidcopy = (page_mapcount(old_page) == 1);
2534 	if (avoidcopy) {
2535 		if (PageAnon(old_page))
2536 			page_move_anon_rmap(old_page, vma, address);
2537 		set_huge_ptep_writable(vma, address, ptep);
2538 		return 0;
2539 	}
2540 
2541 	/*
2542 	 * If the process that created a MAP_PRIVATE mapping is about to
2543 	 * perform a COW due to a shared page count, attempt to satisfy
2544 	 * the allocation without using the existing reserves. The pagecache
2545 	 * page is used to determine if the reserve at this address was
2546 	 * consumed or not. If reserves were used, a partial faulted mapping
2547 	 * at the time of fork() could consume its reserves on COW instead
2548 	 * of the full address range.
2549 	 */
2550 	if (!(vma->vm_flags & VM_MAYSHARE) &&
2551 			is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2552 			old_page != pagecache_page)
2553 		outside_reserve = 1;
2554 
2555 	page_cache_get(old_page);
2556 
2557 	/* Drop page_table_lock as buddy allocator may be called */
2558 	spin_unlock(&mm->page_table_lock);
2559 	new_page = alloc_huge_page(vma, address, outside_reserve);
2560 
2561 	if (IS_ERR(new_page)) {
2562 		long err = PTR_ERR(new_page);
2563 		page_cache_release(old_page);
2564 
2565 		/*
2566 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
2567 		 * it is due to references held by a child and an insufficient
2568 		 * huge page pool. To guarantee the original mappers
2569 		 * reliability, unmap the page from child processes. The child
2570 		 * may get SIGKILLed if it later faults.
2571 		 */
2572 		if (outside_reserve) {
2573 			BUG_ON(huge_pte_none(pte));
2574 			if (unmap_ref_private(mm, vma, old_page, address)) {
2575 				BUG_ON(huge_pte_none(pte));
2576 				spin_lock(&mm->page_table_lock);
2577 				ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2578 				if (likely(pte_same(huge_ptep_get(ptep), pte)))
2579 					goto retry_avoidcopy;
2580 				/*
2581 				 * race occurs while re-acquiring page_table_lock, and
2582 				 * our job is done.
2583 				 */
2584 				return 0;
2585 			}
2586 			WARN_ON_ONCE(1);
2587 		}
2588 
2589 		/* Caller expects lock to be held */
2590 		spin_lock(&mm->page_table_lock);
2591 		if (err == -ENOMEM)
2592 			return VM_FAULT_OOM;
2593 		else
2594 			return VM_FAULT_SIGBUS;
2595 	}
2596 
2597 	/*
2598 	 * When the original hugepage is shared one, it does not have
2599 	 * anon_vma prepared.
2600 	 */
2601 	if (unlikely(anon_vma_prepare(vma))) {
2602 		page_cache_release(new_page);
2603 		page_cache_release(old_page);
2604 		/* Caller expects lock to be held */
2605 		spin_lock(&mm->page_table_lock);
2606 		return VM_FAULT_OOM;
2607 	}
2608 
2609 	copy_user_huge_page(new_page, old_page, address, vma,
2610 			    pages_per_huge_page(h));
2611 	__SetPageUptodate(new_page);
2612 
2613 	mmun_start = address & huge_page_mask(h);
2614 	mmun_end = mmun_start + huge_page_size(h);
2615 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2616 	/*
2617 	 * Retake the page_table_lock to check for racing updates
2618 	 * before the page tables are altered
2619 	 */
2620 	spin_lock(&mm->page_table_lock);
2621 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2622 	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2623 		/* Break COW */
2624 		huge_ptep_clear_flush(vma, address, ptep);
2625 		set_huge_pte_at(mm, address, ptep,
2626 				make_huge_pte(vma, new_page, 1));
2627 		page_remove_rmap(old_page);
2628 		hugepage_add_new_anon_rmap(new_page, vma, address);
2629 		/* Make the old page be freed below */
2630 		new_page = old_page;
2631 	}
2632 	spin_unlock(&mm->page_table_lock);
2633 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2634 	/* Caller expects lock to be held */
2635 	spin_lock(&mm->page_table_lock);
2636 	page_cache_release(new_page);
2637 	page_cache_release(old_page);
2638 	return 0;
2639 }
2640 
2641 /* Return the pagecache page at a given address within a VMA */
2642 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2643 			struct vm_area_struct *vma, unsigned long address)
2644 {
2645 	struct address_space *mapping;
2646 	pgoff_t idx;
2647 
2648 	mapping = vma->vm_file->f_mapping;
2649 	idx = vma_hugecache_offset(h, vma, address);
2650 
2651 	return find_lock_page(mapping, idx);
2652 }
2653 
2654 /*
2655  * Return whether there is a pagecache page to back given address within VMA.
2656  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2657  */
2658 static bool hugetlbfs_pagecache_present(struct hstate *h,
2659 			struct vm_area_struct *vma, unsigned long address)
2660 {
2661 	struct address_space *mapping;
2662 	pgoff_t idx;
2663 	struct page *page;
2664 
2665 	mapping = vma->vm_file->f_mapping;
2666 	idx = vma_hugecache_offset(h, vma, address);
2667 
2668 	page = find_get_page(mapping, idx);
2669 	if (page)
2670 		put_page(page);
2671 	return page != NULL;
2672 }
2673 
2674 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2675 			unsigned long address, pte_t *ptep, unsigned int flags)
2676 {
2677 	struct hstate *h = hstate_vma(vma);
2678 	int ret = VM_FAULT_SIGBUS;
2679 	int anon_rmap = 0;
2680 	pgoff_t idx;
2681 	unsigned long size;
2682 	struct page *page;
2683 	struct address_space *mapping;
2684 	pte_t new_pte;
2685 
2686 	/*
2687 	 * Currently, we are forced to kill the process in the event the
2688 	 * original mapper has unmapped pages from the child due to a failed
2689 	 * COW. Warn that such a situation has occurred as it may not be obvious
2690 	 */
2691 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2692 		pr_warning("PID %d killed due to inadequate hugepage pool\n",
2693 			   current->pid);
2694 		return ret;
2695 	}
2696 
2697 	mapping = vma->vm_file->f_mapping;
2698 	idx = vma_hugecache_offset(h, vma, address);
2699 
2700 	/*
2701 	 * Use page lock to guard against racing truncation
2702 	 * before we get page_table_lock.
2703 	 */
2704 retry:
2705 	page = find_lock_page(mapping, idx);
2706 	if (!page) {
2707 		size = i_size_read(mapping->host) >> huge_page_shift(h);
2708 		if (idx >= size)
2709 			goto out;
2710 		page = alloc_huge_page(vma, address, 0);
2711 		if (IS_ERR(page)) {
2712 			ret = PTR_ERR(page);
2713 			if (ret == -ENOMEM)
2714 				ret = VM_FAULT_OOM;
2715 			else
2716 				ret = VM_FAULT_SIGBUS;
2717 			goto out;
2718 		}
2719 		clear_huge_page(page, address, pages_per_huge_page(h));
2720 		__SetPageUptodate(page);
2721 
2722 		if (vma->vm_flags & VM_MAYSHARE) {
2723 			int err;
2724 			struct inode *inode = mapping->host;
2725 
2726 			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2727 			if (err) {
2728 				put_page(page);
2729 				if (err == -EEXIST)
2730 					goto retry;
2731 				goto out;
2732 			}
2733 
2734 			spin_lock(&inode->i_lock);
2735 			inode->i_blocks += blocks_per_huge_page(h);
2736 			spin_unlock(&inode->i_lock);
2737 		} else {
2738 			lock_page(page);
2739 			if (unlikely(anon_vma_prepare(vma))) {
2740 				ret = VM_FAULT_OOM;
2741 				goto backout_unlocked;
2742 			}
2743 			anon_rmap = 1;
2744 		}
2745 	} else {
2746 		/*
2747 		 * If memory error occurs between mmap() and fault, some process
2748 		 * don't have hwpoisoned swap entry for errored virtual address.
2749 		 * So we need to block hugepage fault by PG_hwpoison bit check.
2750 		 */
2751 		if (unlikely(PageHWPoison(page))) {
2752 			ret = VM_FAULT_HWPOISON |
2753 				VM_FAULT_SET_HINDEX(hstate_index(h));
2754 			goto backout_unlocked;
2755 		}
2756 	}
2757 
2758 	/*
2759 	 * If we are going to COW a private mapping later, we examine the
2760 	 * pending reservations for this page now. This will ensure that
2761 	 * any allocations necessary to record that reservation occur outside
2762 	 * the spinlock.
2763 	 */
2764 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2765 		if (vma_needs_reservation(h, vma, address) < 0) {
2766 			ret = VM_FAULT_OOM;
2767 			goto backout_unlocked;
2768 		}
2769 
2770 	spin_lock(&mm->page_table_lock);
2771 	size = i_size_read(mapping->host) >> huge_page_shift(h);
2772 	if (idx >= size)
2773 		goto backout;
2774 
2775 	ret = 0;
2776 	if (!huge_pte_none(huge_ptep_get(ptep)))
2777 		goto backout;
2778 
2779 	if (anon_rmap)
2780 		hugepage_add_new_anon_rmap(page, vma, address);
2781 	else
2782 		page_dup_rmap(page);
2783 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2784 				&& (vma->vm_flags & VM_SHARED)));
2785 	set_huge_pte_at(mm, address, ptep, new_pte);
2786 
2787 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2788 		/* Optimization, do the COW without a second fault */
2789 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2790 	}
2791 
2792 	spin_unlock(&mm->page_table_lock);
2793 	unlock_page(page);
2794 out:
2795 	return ret;
2796 
2797 backout:
2798 	spin_unlock(&mm->page_table_lock);
2799 backout_unlocked:
2800 	unlock_page(page);
2801 	put_page(page);
2802 	goto out;
2803 }
2804 
2805 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2806 			unsigned long address, unsigned int flags)
2807 {
2808 	pte_t *ptep;
2809 	pte_t entry;
2810 	int ret;
2811 	struct page *page = NULL;
2812 	struct page *pagecache_page = NULL;
2813 	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2814 	struct hstate *h = hstate_vma(vma);
2815 
2816 	address &= huge_page_mask(h);
2817 
2818 	ptep = huge_pte_offset(mm, address);
2819 	if (ptep) {
2820 		entry = huge_ptep_get(ptep);
2821 		if (unlikely(is_hugetlb_entry_migration(entry))) {
2822 			migration_entry_wait(mm, (pmd_t *)ptep, address);
2823 			return 0;
2824 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2825 			return VM_FAULT_HWPOISON_LARGE |
2826 				VM_FAULT_SET_HINDEX(hstate_index(h));
2827 	}
2828 
2829 	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2830 	if (!ptep)
2831 		return VM_FAULT_OOM;
2832 
2833 	/*
2834 	 * Serialize hugepage allocation and instantiation, so that we don't
2835 	 * get spurious allocation failures if two CPUs race to instantiate
2836 	 * the same page in the page cache.
2837 	 */
2838 	mutex_lock(&hugetlb_instantiation_mutex);
2839 	entry = huge_ptep_get(ptep);
2840 	if (huge_pte_none(entry)) {
2841 		ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2842 		goto out_mutex;
2843 	}
2844 
2845 	ret = 0;
2846 
2847 	/*
2848 	 * If we are going to COW the mapping later, we examine the pending
2849 	 * reservations for this page now. This will ensure that any
2850 	 * allocations necessary to record that reservation occur outside the
2851 	 * spinlock. For private mappings, we also lookup the pagecache
2852 	 * page now as it is used to determine if a reservation has been
2853 	 * consumed.
2854 	 */
2855 	if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2856 		if (vma_needs_reservation(h, vma, address) < 0) {
2857 			ret = VM_FAULT_OOM;
2858 			goto out_mutex;
2859 		}
2860 
2861 		if (!(vma->vm_flags & VM_MAYSHARE))
2862 			pagecache_page = hugetlbfs_pagecache_page(h,
2863 								vma, address);
2864 	}
2865 
2866 	/*
2867 	 * hugetlb_cow() requires page locks of pte_page(entry) and
2868 	 * pagecache_page, so here we need take the former one
2869 	 * when page != pagecache_page or !pagecache_page.
2870 	 * Note that locking order is always pagecache_page -> page,
2871 	 * so no worry about deadlock.
2872 	 */
2873 	page = pte_page(entry);
2874 	get_page(page);
2875 	if (page != pagecache_page)
2876 		lock_page(page);
2877 
2878 	spin_lock(&mm->page_table_lock);
2879 	/* Check for a racing update before calling hugetlb_cow */
2880 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2881 		goto out_page_table_lock;
2882 
2883 
2884 	if (flags & FAULT_FLAG_WRITE) {
2885 		if (!pte_write(entry)) {
2886 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
2887 							pagecache_page);
2888 			goto out_page_table_lock;
2889 		}
2890 		entry = pte_mkdirty(entry);
2891 	}
2892 	entry = pte_mkyoung(entry);
2893 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2894 						flags & FAULT_FLAG_WRITE))
2895 		update_mmu_cache(vma, address, ptep);
2896 
2897 out_page_table_lock:
2898 	spin_unlock(&mm->page_table_lock);
2899 
2900 	if (pagecache_page) {
2901 		unlock_page(pagecache_page);
2902 		put_page(pagecache_page);
2903 	}
2904 	if (page != pagecache_page)
2905 		unlock_page(page);
2906 	put_page(page);
2907 
2908 out_mutex:
2909 	mutex_unlock(&hugetlb_instantiation_mutex);
2910 
2911 	return ret;
2912 }
2913 
2914 /* Can be overriden by architectures */
2915 __attribute__((weak)) struct page *
2916 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2917 	       pud_t *pud, int write)
2918 {
2919 	BUG();
2920 	return NULL;
2921 }
2922 
2923 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2924 			 struct page **pages, struct vm_area_struct **vmas,
2925 			 unsigned long *position, unsigned long *nr_pages,
2926 			 long i, unsigned int flags)
2927 {
2928 	unsigned long pfn_offset;
2929 	unsigned long vaddr = *position;
2930 	unsigned long remainder = *nr_pages;
2931 	struct hstate *h = hstate_vma(vma);
2932 
2933 	spin_lock(&mm->page_table_lock);
2934 	while (vaddr < vma->vm_end && remainder) {
2935 		pte_t *pte;
2936 		int absent;
2937 		struct page *page;
2938 
2939 		/*
2940 		 * Some archs (sparc64, sh*) have multiple pte_ts to
2941 		 * each hugepage.  We have to make sure we get the
2942 		 * first, for the page indexing below to work.
2943 		 */
2944 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2945 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
2946 
2947 		/*
2948 		 * When coredumping, it suits get_dump_page if we just return
2949 		 * an error where there's an empty slot with no huge pagecache
2950 		 * to back it.  This way, we avoid allocating a hugepage, and
2951 		 * the sparse dumpfile avoids allocating disk blocks, but its
2952 		 * huge holes still show up with zeroes where they need to be.
2953 		 */
2954 		if (absent && (flags & FOLL_DUMP) &&
2955 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2956 			remainder = 0;
2957 			break;
2958 		}
2959 
2960 		if (absent ||
2961 		    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2962 			int ret;
2963 
2964 			spin_unlock(&mm->page_table_lock);
2965 			ret = hugetlb_fault(mm, vma, vaddr,
2966 				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2967 			spin_lock(&mm->page_table_lock);
2968 			if (!(ret & VM_FAULT_ERROR))
2969 				continue;
2970 
2971 			remainder = 0;
2972 			break;
2973 		}
2974 
2975 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2976 		page = pte_page(huge_ptep_get(pte));
2977 same_page:
2978 		if (pages) {
2979 			pages[i] = mem_map_offset(page, pfn_offset);
2980 			get_page(pages[i]);
2981 		}
2982 
2983 		if (vmas)
2984 			vmas[i] = vma;
2985 
2986 		vaddr += PAGE_SIZE;
2987 		++pfn_offset;
2988 		--remainder;
2989 		++i;
2990 		if (vaddr < vma->vm_end && remainder &&
2991 				pfn_offset < pages_per_huge_page(h)) {
2992 			/*
2993 			 * We use pfn_offset to avoid touching the pageframes
2994 			 * of this compound page.
2995 			 */
2996 			goto same_page;
2997 		}
2998 	}
2999 	spin_unlock(&mm->page_table_lock);
3000 	*nr_pages = remainder;
3001 	*position = vaddr;
3002 
3003 	return i ? i : -EFAULT;
3004 }
3005 
3006 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3007 		unsigned long address, unsigned long end, pgprot_t newprot)
3008 {
3009 	struct mm_struct *mm = vma->vm_mm;
3010 	unsigned long start = address;
3011 	pte_t *ptep;
3012 	pte_t pte;
3013 	struct hstate *h = hstate_vma(vma);
3014 	unsigned long pages = 0;
3015 
3016 	BUG_ON(address >= end);
3017 	flush_cache_range(vma, address, end);
3018 
3019 	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3020 	spin_lock(&mm->page_table_lock);
3021 	for (; address < end; address += huge_page_size(h)) {
3022 		ptep = huge_pte_offset(mm, address);
3023 		if (!ptep)
3024 			continue;
3025 		if (huge_pmd_unshare(mm, &address, ptep)) {
3026 			pages++;
3027 			continue;
3028 		}
3029 		if (!huge_pte_none(huge_ptep_get(ptep))) {
3030 			pte = huge_ptep_get_and_clear(mm, address, ptep);
3031 			pte = pte_mkhuge(pte_modify(pte, newprot));
3032 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
3033 			set_huge_pte_at(mm, address, ptep, pte);
3034 			pages++;
3035 		}
3036 	}
3037 	spin_unlock(&mm->page_table_lock);
3038 	/*
3039 	 * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
3040 	 * may have cleared our pud entry and done put_page on the page table:
3041 	 * once we release i_mmap_mutex, another task can do the final put_page
3042 	 * and that page table be reused and filled with junk.
3043 	 */
3044 	flush_tlb_range(vma, start, end);
3045 	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3046 
3047 	return pages << h->order;
3048 }
3049 
3050 int hugetlb_reserve_pages(struct inode *inode,
3051 					long from, long to,
3052 					struct vm_area_struct *vma,
3053 					vm_flags_t vm_flags)
3054 {
3055 	long ret, chg;
3056 	struct hstate *h = hstate_inode(inode);
3057 	struct hugepage_subpool *spool = subpool_inode(inode);
3058 
3059 	/*
3060 	 * Only apply hugepage reservation if asked. At fault time, an
3061 	 * attempt will be made for VM_NORESERVE to allocate a page
3062 	 * without using reserves
3063 	 */
3064 	if (vm_flags & VM_NORESERVE)
3065 		return 0;
3066 
3067 	/*
3068 	 * Shared mappings base their reservation on the number of pages that
3069 	 * are already allocated on behalf of the file. Private mappings need
3070 	 * to reserve the full area even if read-only as mprotect() may be
3071 	 * called to make the mapping read-write. Assume !vma is a shm mapping
3072 	 */
3073 	if (!vma || vma->vm_flags & VM_MAYSHARE)
3074 		chg = region_chg(&inode->i_mapping->private_list, from, to);
3075 	else {
3076 		struct resv_map *resv_map = resv_map_alloc();
3077 		if (!resv_map)
3078 			return -ENOMEM;
3079 
3080 		chg = to - from;
3081 
3082 		set_vma_resv_map(vma, resv_map);
3083 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3084 	}
3085 
3086 	if (chg < 0) {
3087 		ret = chg;
3088 		goto out_err;
3089 	}
3090 
3091 	/* There must be enough pages in the subpool for the mapping */
3092 	if (hugepage_subpool_get_pages(spool, chg)) {
3093 		ret = -ENOSPC;
3094 		goto out_err;
3095 	}
3096 
3097 	/*
3098 	 * Check enough hugepages are available for the reservation.
3099 	 * Hand the pages back to the subpool if there are not
3100 	 */
3101 	ret = hugetlb_acct_memory(h, chg);
3102 	if (ret < 0) {
3103 		hugepage_subpool_put_pages(spool, chg);
3104 		goto out_err;
3105 	}
3106 
3107 	/*
3108 	 * Account for the reservations made. Shared mappings record regions
3109 	 * that have reservations as they are shared by multiple VMAs.
3110 	 * When the last VMA disappears, the region map says how much
3111 	 * the reservation was and the page cache tells how much of
3112 	 * the reservation was consumed. Private mappings are per-VMA and
3113 	 * only the consumed reservations are tracked. When the VMA
3114 	 * disappears, the original reservation is the VMA size and the
3115 	 * consumed reservations are stored in the map. Hence, nothing
3116 	 * else has to be done for private mappings here
3117 	 */
3118 	if (!vma || vma->vm_flags & VM_MAYSHARE)
3119 		region_add(&inode->i_mapping->private_list, from, to);
3120 	return 0;
3121 out_err:
3122 	if (vma)
3123 		resv_map_put(vma);
3124 	return ret;
3125 }
3126 
3127 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3128 {
3129 	struct hstate *h = hstate_inode(inode);
3130 	long chg = region_truncate(&inode->i_mapping->private_list, offset);
3131 	struct hugepage_subpool *spool = subpool_inode(inode);
3132 
3133 	spin_lock(&inode->i_lock);
3134 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3135 	spin_unlock(&inode->i_lock);
3136 
3137 	hugepage_subpool_put_pages(spool, (chg - freed));
3138 	hugetlb_acct_memory(h, -(chg - freed));
3139 }
3140 
3141 #ifdef CONFIG_MEMORY_FAILURE
3142 
3143 /* Should be called in hugetlb_lock */
3144 static int is_hugepage_on_freelist(struct page *hpage)
3145 {
3146 	struct page *page;
3147 	struct page *tmp;
3148 	struct hstate *h = page_hstate(hpage);
3149 	int nid = page_to_nid(hpage);
3150 
3151 	list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3152 		if (page == hpage)
3153 			return 1;
3154 	return 0;
3155 }
3156 
3157 /*
3158  * This function is called from memory failure code.
3159  * Assume the caller holds page lock of the head page.
3160  */
3161 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3162 {
3163 	struct hstate *h = page_hstate(hpage);
3164 	int nid = page_to_nid(hpage);
3165 	int ret = -EBUSY;
3166 
3167 	spin_lock(&hugetlb_lock);
3168 	if (is_hugepage_on_freelist(hpage)) {
3169 		/*
3170 		 * Hwpoisoned hugepage isn't linked to activelist or freelist,
3171 		 * but dangling hpage->lru can trigger list-debug warnings
3172 		 * (this happens when we call unpoison_memory() on it),
3173 		 * so let it point to itself with list_del_init().
3174 		 */
3175 		list_del_init(&hpage->lru);
3176 		set_page_refcounted(hpage);
3177 		h->free_huge_pages--;
3178 		h->free_huge_pages_node[nid]--;
3179 		ret = 0;
3180 	}
3181 	spin_unlock(&hugetlb_lock);
3182 	return ret;
3183 }
3184 #endif
3185