xref: /openbmc/linux/mm/hugetlb.c (revision 9cfc5c90)
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
27 
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31 
32 #include <linux/io.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
36 #include "internal.h"
37 
38 int hugepages_treat_as_movable;
39 
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
43 /*
44  * Minimum page order among possible hugepage sizes, set to a proper value
45  * at boot time.
46  */
47 static unsigned int minimum_order __read_mostly = UINT_MAX;
48 
49 __initdata LIST_HEAD(huge_boot_pages);
50 
51 /* for command line parsing */
52 static struct hstate * __initdata parsed_hstate;
53 static unsigned long __initdata default_hstate_max_huge_pages;
54 static unsigned long __initdata default_hstate_size;
55 
56 /*
57  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58  * free_huge_pages, and surplus_huge_pages.
59  */
60 DEFINE_SPINLOCK(hugetlb_lock);
61 
62 /*
63  * Serializes faults on the same logical page.  This is used to
64  * prevent spurious OOMs when the hugepage pool is fully utilized.
65  */
66 static int num_fault_mutexes;
67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
68 
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
71 
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73 {
74 	bool free = (spool->count == 0) && (spool->used_hpages == 0);
75 
76 	spin_unlock(&spool->lock);
77 
78 	/* If no pages are used, and no other handles to the subpool
79 	 * remain, give up any reservations mased on minimum size and
80 	 * free the subpool */
81 	if (free) {
82 		if (spool->min_hpages != -1)
83 			hugetlb_acct_memory(spool->hstate,
84 						-spool->min_hpages);
85 		kfree(spool);
86 	}
87 }
88 
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90 						long min_hpages)
91 {
92 	struct hugepage_subpool *spool;
93 
94 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95 	if (!spool)
96 		return NULL;
97 
98 	spin_lock_init(&spool->lock);
99 	spool->count = 1;
100 	spool->max_hpages = max_hpages;
101 	spool->hstate = h;
102 	spool->min_hpages = min_hpages;
103 
104 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105 		kfree(spool);
106 		return NULL;
107 	}
108 	spool->rsv_hpages = min_hpages;
109 
110 	return spool;
111 }
112 
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
114 {
115 	spin_lock(&spool->lock);
116 	BUG_ON(!spool->count);
117 	spool->count--;
118 	unlock_or_release_subpool(spool);
119 }
120 
121 /*
122  * Subpool accounting for allocating and reserving pages.
123  * Return -ENOMEM if there are not enough resources to satisfy the
124  * the request.  Otherwise, return the number of pages by which the
125  * global pools must be adjusted (upward).  The returned value may
126  * only be different than the passed value (delta) in the case where
127  * a subpool minimum size must be manitained.
128  */
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130 				      long delta)
131 {
132 	long ret = delta;
133 
134 	if (!spool)
135 		return ret;
136 
137 	spin_lock(&spool->lock);
138 
139 	if (spool->max_hpages != -1) {		/* maximum size accounting */
140 		if ((spool->used_hpages + delta) <= spool->max_hpages)
141 			spool->used_hpages += delta;
142 		else {
143 			ret = -ENOMEM;
144 			goto unlock_ret;
145 		}
146 	}
147 
148 	if (spool->min_hpages != -1) {		/* minimum size accounting */
149 		if (delta > spool->rsv_hpages) {
150 			/*
151 			 * Asking for more reserves than those already taken on
152 			 * behalf of subpool.  Return difference.
153 			 */
154 			ret = delta - spool->rsv_hpages;
155 			spool->rsv_hpages = 0;
156 		} else {
157 			ret = 0;	/* reserves already accounted for */
158 			spool->rsv_hpages -= delta;
159 		}
160 	}
161 
162 unlock_ret:
163 	spin_unlock(&spool->lock);
164 	return ret;
165 }
166 
167 /*
168  * Subpool accounting for freeing and unreserving pages.
169  * Return the number of global page reservations that must be dropped.
170  * The return value may only be different than the passed value (delta)
171  * in the case where a subpool minimum size must be maintained.
172  */
173 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
174 				       long delta)
175 {
176 	long ret = delta;
177 
178 	if (!spool)
179 		return delta;
180 
181 	spin_lock(&spool->lock);
182 
183 	if (spool->max_hpages != -1)		/* maximum size accounting */
184 		spool->used_hpages -= delta;
185 
186 	if (spool->min_hpages != -1) {		/* minimum size accounting */
187 		if (spool->rsv_hpages + delta <= spool->min_hpages)
188 			ret = 0;
189 		else
190 			ret = spool->rsv_hpages + delta - spool->min_hpages;
191 
192 		spool->rsv_hpages += delta;
193 		if (spool->rsv_hpages > spool->min_hpages)
194 			spool->rsv_hpages = spool->min_hpages;
195 	}
196 
197 	/*
198 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
199 	 * quota reference, free it now.
200 	 */
201 	unlock_or_release_subpool(spool);
202 
203 	return ret;
204 }
205 
206 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
207 {
208 	return HUGETLBFS_SB(inode->i_sb)->spool;
209 }
210 
211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
212 {
213 	return subpool_inode(file_inode(vma->vm_file));
214 }
215 
216 /*
217  * Region tracking -- allows tracking of reservations and instantiated pages
218  *                    across the pages in a mapping.
219  *
220  * The region data structures are embedded into a resv_map and protected
221  * by a resv_map's lock.  The set of regions within the resv_map represent
222  * reservations for huge pages, or huge pages that have already been
223  * instantiated within the map.  The from and to elements are huge page
224  * indicies into the associated mapping.  from indicates the starting index
225  * of the region.  to represents the first index past the end of  the region.
226  *
227  * For example, a file region structure with from == 0 and to == 4 represents
228  * four huge pages in a mapping.  It is important to note that the to element
229  * represents the first element past the end of the region. This is used in
230  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
231  *
232  * Interval notation of the form [from, to) will be used to indicate that
233  * the endpoint from is inclusive and to is exclusive.
234  */
235 struct file_region {
236 	struct list_head link;
237 	long from;
238 	long to;
239 };
240 
241 /*
242  * Add the huge page range represented by [f, t) to the reserve
243  * map.  In the normal case, existing regions will be expanded
244  * to accommodate the specified range.  Sufficient regions should
245  * exist for expansion due to the previous call to region_chg
246  * with the same range.  However, it is possible that region_del
247  * could have been called after region_chg and modifed the map
248  * in such a way that no region exists to be expanded.  In this
249  * case, pull a region descriptor from the cache associated with
250  * the map and use that for the new range.
251  *
252  * Return the number of new huge pages added to the map.  This
253  * number is greater than or equal to zero.
254  */
255 static long region_add(struct resv_map *resv, long f, long t)
256 {
257 	struct list_head *head = &resv->regions;
258 	struct file_region *rg, *nrg, *trg;
259 	long add = 0;
260 
261 	spin_lock(&resv->lock);
262 	/* Locate the region we are either in or before. */
263 	list_for_each_entry(rg, head, link)
264 		if (f <= rg->to)
265 			break;
266 
267 	/*
268 	 * If no region exists which can be expanded to include the
269 	 * specified range, the list must have been modified by an
270 	 * interleving call to region_del().  Pull a region descriptor
271 	 * from the cache and use it for this range.
272 	 */
273 	if (&rg->link == head || t < rg->from) {
274 		VM_BUG_ON(resv->region_cache_count <= 0);
275 
276 		resv->region_cache_count--;
277 		nrg = list_first_entry(&resv->region_cache, struct file_region,
278 					link);
279 		list_del(&nrg->link);
280 
281 		nrg->from = f;
282 		nrg->to = t;
283 		list_add(&nrg->link, rg->link.prev);
284 
285 		add += t - f;
286 		goto out_locked;
287 	}
288 
289 	/* Round our left edge to the current segment if it encloses us. */
290 	if (f > rg->from)
291 		f = rg->from;
292 
293 	/* Check for and consume any regions we now overlap with. */
294 	nrg = rg;
295 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
296 		if (&rg->link == head)
297 			break;
298 		if (rg->from > t)
299 			break;
300 
301 		/* If this area reaches higher then extend our area to
302 		 * include it completely.  If this is not the first area
303 		 * which we intend to reuse, free it. */
304 		if (rg->to > t)
305 			t = rg->to;
306 		if (rg != nrg) {
307 			/* Decrement return value by the deleted range.
308 			 * Another range will span this area so that by
309 			 * end of routine add will be >= zero
310 			 */
311 			add -= (rg->to - rg->from);
312 			list_del(&rg->link);
313 			kfree(rg);
314 		}
315 	}
316 
317 	add += (nrg->from - f);		/* Added to beginning of region */
318 	nrg->from = f;
319 	add += t - nrg->to;		/* Added to end of region */
320 	nrg->to = t;
321 
322 out_locked:
323 	resv->adds_in_progress--;
324 	spin_unlock(&resv->lock);
325 	VM_BUG_ON(add < 0);
326 	return add;
327 }
328 
329 /*
330  * Examine the existing reserve map and determine how many
331  * huge pages in the specified range [f, t) are NOT currently
332  * represented.  This routine is called before a subsequent
333  * call to region_add that will actually modify the reserve
334  * map to add the specified range [f, t).  region_chg does
335  * not change the number of huge pages represented by the
336  * map.  However, if the existing regions in the map can not
337  * be expanded to represent the new range, a new file_region
338  * structure is added to the map as a placeholder.  This is
339  * so that the subsequent region_add call will have all the
340  * regions it needs and will not fail.
341  *
342  * Upon entry, region_chg will also examine the cache of region descriptors
343  * associated with the map.  If there are not enough descriptors cached, one
344  * will be allocated for the in progress add operation.
345  *
346  * Returns the number of huge pages that need to be added to the existing
347  * reservation map for the range [f, t).  This number is greater or equal to
348  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
349  * is needed and can not be allocated.
350  */
351 static long region_chg(struct resv_map *resv, long f, long t)
352 {
353 	struct list_head *head = &resv->regions;
354 	struct file_region *rg, *nrg = NULL;
355 	long chg = 0;
356 
357 retry:
358 	spin_lock(&resv->lock);
359 retry_locked:
360 	resv->adds_in_progress++;
361 
362 	/*
363 	 * Check for sufficient descriptors in the cache to accommodate
364 	 * the number of in progress add operations.
365 	 */
366 	if (resv->adds_in_progress > resv->region_cache_count) {
367 		struct file_region *trg;
368 
369 		VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
370 		/* Must drop lock to allocate a new descriptor. */
371 		resv->adds_in_progress--;
372 		spin_unlock(&resv->lock);
373 
374 		trg = kmalloc(sizeof(*trg), GFP_KERNEL);
375 		if (!trg)
376 			return -ENOMEM;
377 
378 		spin_lock(&resv->lock);
379 		list_add(&trg->link, &resv->region_cache);
380 		resv->region_cache_count++;
381 		goto retry_locked;
382 	}
383 
384 	/* Locate the region we are before or in. */
385 	list_for_each_entry(rg, head, link)
386 		if (f <= rg->to)
387 			break;
388 
389 	/* If we are below the current region then a new region is required.
390 	 * Subtle, allocate a new region at the position but make it zero
391 	 * size such that we can guarantee to record the reservation. */
392 	if (&rg->link == head || t < rg->from) {
393 		if (!nrg) {
394 			resv->adds_in_progress--;
395 			spin_unlock(&resv->lock);
396 			nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
397 			if (!nrg)
398 				return -ENOMEM;
399 
400 			nrg->from = f;
401 			nrg->to   = f;
402 			INIT_LIST_HEAD(&nrg->link);
403 			goto retry;
404 		}
405 
406 		list_add(&nrg->link, rg->link.prev);
407 		chg = t - f;
408 		goto out_nrg;
409 	}
410 
411 	/* Round our left edge to the current segment if it encloses us. */
412 	if (f > rg->from)
413 		f = rg->from;
414 	chg = t - f;
415 
416 	/* Check for and consume any regions we now overlap with. */
417 	list_for_each_entry(rg, rg->link.prev, link) {
418 		if (&rg->link == head)
419 			break;
420 		if (rg->from > t)
421 			goto out;
422 
423 		/* We overlap with this area, if it extends further than
424 		 * us then we must extend ourselves.  Account for its
425 		 * existing reservation. */
426 		if (rg->to > t) {
427 			chg += rg->to - t;
428 			t = rg->to;
429 		}
430 		chg -= rg->to - rg->from;
431 	}
432 
433 out:
434 	spin_unlock(&resv->lock);
435 	/*  We already know we raced and no longer need the new region */
436 	kfree(nrg);
437 	return chg;
438 out_nrg:
439 	spin_unlock(&resv->lock);
440 	return chg;
441 }
442 
443 /*
444  * Abort the in progress add operation.  The adds_in_progress field
445  * of the resv_map keeps track of the operations in progress between
446  * calls to region_chg and region_add.  Operations are sometimes
447  * aborted after the call to region_chg.  In such cases, region_abort
448  * is called to decrement the adds_in_progress counter.
449  *
450  * NOTE: The range arguments [f, t) are not needed or used in this
451  * routine.  They are kept to make reading the calling code easier as
452  * arguments will match the associated region_chg call.
453  */
454 static void region_abort(struct resv_map *resv, long f, long t)
455 {
456 	spin_lock(&resv->lock);
457 	VM_BUG_ON(!resv->region_cache_count);
458 	resv->adds_in_progress--;
459 	spin_unlock(&resv->lock);
460 }
461 
462 /*
463  * Delete the specified range [f, t) from the reserve map.  If the
464  * t parameter is LONG_MAX, this indicates that ALL regions after f
465  * should be deleted.  Locate the regions which intersect [f, t)
466  * and either trim, delete or split the existing regions.
467  *
468  * Returns the number of huge pages deleted from the reserve map.
469  * In the normal case, the return value is zero or more.  In the
470  * case where a region must be split, a new region descriptor must
471  * be allocated.  If the allocation fails, -ENOMEM will be returned.
472  * NOTE: If the parameter t == LONG_MAX, then we will never split
473  * a region and possibly return -ENOMEM.  Callers specifying
474  * t == LONG_MAX do not need to check for -ENOMEM error.
475  */
476 static long region_del(struct resv_map *resv, long f, long t)
477 {
478 	struct list_head *head = &resv->regions;
479 	struct file_region *rg, *trg;
480 	struct file_region *nrg = NULL;
481 	long del = 0;
482 
483 retry:
484 	spin_lock(&resv->lock);
485 	list_for_each_entry_safe(rg, trg, head, link) {
486 		if (rg->to <= f)
487 			continue;
488 		if (rg->from >= t)
489 			break;
490 
491 		if (f > rg->from && t < rg->to) { /* Must split region */
492 			/*
493 			 * Check for an entry in the cache before dropping
494 			 * lock and attempting allocation.
495 			 */
496 			if (!nrg &&
497 			    resv->region_cache_count > resv->adds_in_progress) {
498 				nrg = list_first_entry(&resv->region_cache,
499 							struct file_region,
500 							link);
501 				list_del(&nrg->link);
502 				resv->region_cache_count--;
503 			}
504 
505 			if (!nrg) {
506 				spin_unlock(&resv->lock);
507 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
508 				if (!nrg)
509 					return -ENOMEM;
510 				goto retry;
511 			}
512 
513 			del += t - f;
514 
515 			/* New entry for end of split region */
516 			nrg->from = t;
517 			nrg->to = rg->to;
518 			INIT_LIST_HEAD(&nrg->link);
519 
520 			/* Original entry is trimmed */
521 			rg->to = f;
522 
523 			list_add(&nrg->link, &rg->link);
524 			nrg = NULL;
525 			break;
526 		}
527 
528 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
529 			del += rg->to - rg->from;
530 			list_del(&rg->link);
531 			kfree(rg);
532 			continue;
533 		}
534 
535 		if (f <= rg->from) {	/* Trim beginning of region */
536 			del += t - rg->from;
537 			rg->from = t;
538 		} else {		/* Trim end of region */
539 			del += rg->to - f;
540 			rg->to = f;
541 		}
542 	}
543 
544 	spin_unlock(&resv->lock);
545 	kfree(nrg);
546 	return del;
547 }
548 
549 /*
550  * A rare out of memory error was encountered which prevented removal of
551  * the reserve map region for a page.  The huge page itself was free'ed
552  * and removed from the page cache.  This routine will adjust the subpool
553  * usage count, and the global reserve count if needed.  By incrementing
554  * these counts, the reserve map entry which could not be deleted will
555  * appear as a "reserved" entry instead of simply dangling with incorrect
556  * counts.
557  */
558 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
559 {
560 	struct hugepage_subpool *spool = subpool_inode(inode);
561 	long rsv_adjust;
562 
563 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
564 	if (restore_reserve && rsv_adjust) {
565 		struct hstate *h = hstate_inode(inode);
566 
567 		hugetlb_acct_memory(h, 1);
568 	}
569 }
570 
571 /*
572  * Count and return the number of huge pages in the reserve map
573  * that intersect with the range [f, t).
574  */
575 static long region_count(struct resv_map *resv, long f, long t)
576 {
577 	struct list_head *head = &resv->regions;
578 	struct file_region *rg;
579 	long chg = 0;
580 
581 	spin_lock(&resv->lock);
582 	/* Locate each segment we overlap with, and count that overlap. */
583 	list_for_each_entry(rg, head, link) {
584 		long seg_from;
585 		long seg_to;
586 
587 		if (rg->to <= f)
588 			continue;
589 		if (rg->from >= t)
590 			break;
591 
592 		seg_from = max(rg->from, f);
593 		seg_to = min(rg->to, t);
594 
595 		chg += seg_to - seg_from;
596 	}
597 	spin_unlock(&resv->lock);
598 
599 	return chg;
600 }
601 
602 /*
603  * Convert the address within this vma to the page offset within
604  * the mapping, in pagecache page units; huge pages here.
605  */
606 static pgoff_t vma_hugecache_offset(struct hstate *h,
607 			struct vm_area_struct *vma, unsigned long address)
608 {
609 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
610 			(vma->vm_pgoff >> huge_page_order(h));
611 }
612 
613 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
614 				     unsigned long address)
615 {
616 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
617 }
618 
619 /*
620  * Return the size of the pages allocated when backing a VMA. In the majority
621  * cases this will be same size as used by the page table entries.
622  */
623 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
624 {
625 	struct hstate *hstate;
626 
627 	if (!is_vm_hugetlb_page(vma))
628 		return PAGE_SIZE;
629 
630 	hstate = hstate_vma(vma);
631 
632 	return 1UL << huge_page_shift(hstate);
633 }
634 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
635 
636 /*
637  * Return the page size being used by the MMU to back a VMA. In the majority
638  * of cases, the page size used by the kernel matches the MMU size. On
639  * architectures where it differs, an architecture-specific version of this
640  * function is required.
641  */
642 #ifndef vma_mmu_pagesize
643 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
644 {
645 	return vma_kernel_pagesize(vma);
646 }
647 #endif
648 
649 /*
650  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
651  * bits of the reservation map pointer, which are always clear due to
652  * alignment.
653  */
654 #define HPAGE_RESV_OWNER    (1UL << 0)
655 #define HPAGE_RESV_UNMAPPED (1UL << 1)
656 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
657 
658 /*
659  * These helpers are used to track how many pages are reserved for
660  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
661  * is guaranteed to have their future faults succeed.
662  *
663  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
664  * the reserve counters are updated with the hugetlb_lock held. It is safe
665  * to reset the VMA at fork() time as it is not in use yet and there is no
666  * chance of the global counters getting corrupted as a result of the values.
667  *
668  * The private mapping reservation is represented in a subtly different
669  * manner to a shared mapping.  A shared mapping has a region map associated
670  * with the underlying file, this region map represents the backing file
671  * pages which have ever had a reservation assigned which this persists even
672  * after the page is instantiated.  A private mapping has a region map
673  * associated with the original mmap which is attached to all VMAs which
674  * reference it, this region map represents those offsets which have consumed
675  * reservation ie. where pages have been instantiated.
676  */
677 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
678 {
679 	return (unsigned long)vma->vm_private_data;
680 }
681 
682 static void set_vma_private_data(struct vm_area_struct *vma,
683 							unsigned long value)
684 {
685 	vma->vm_private_data = (void *)value;
686 }
687 
688 struct resv_map *resv_map_alloc(void)
689 {
690 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
691 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
692 
693 	if (!resv_map || !rg) {
694 		kfree(resv_map);
695 		kfree(rg);
696 		return NULL;
697 	}
698 
699 	kref_init(&resv_map->refs);
700 	spin_lock_init(&resv_map->lock);
701 	INIT_LIST_HEAD(&resv_map->regions);
702 
703 	resv_map->adds_in_progress = 0;
704 
705 	INIT_LIST_HEAD(&resv_map->region_cache);
706 	list_add(&rg->link, &resv_map->region_cache);
707 	resv_map->region_cache_count = 1;
708 
709 	return resv_map;
710 }
711 
712 void resv_map_release(struct kref *ref)
713 {
714 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
715 	struct list_head *head = &resv_map->region_cache;
716 	struct file_region *rg, *trg;
717 
718 	/* Clear out any active regions before we release the map. */
719 	region_del(resv_map, 0, LONG_MAX);
720 
721 	/* ... and any entries left in the cache */
722 	list_for_each_entry_safe(rg, trg, head, link) {
723 		list_del(&rg->link);
724 		kfree(rg);
725 	}
726 
727 	VM_BUG_ON(resv_map->adds_in_progress);
728 
729 	kfree(resv_map);
730 }
731 
732 static inline struct resv_map *inode_resv_map(struct inode *inode)
733 {
734 	return inode->i_mapping->private_data;
735 }
736 
737 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
738 {
739 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
740 	if (vma->vm_flags & VM_MAYSHARE) {
741 		struct address_space *mapping = vma->vm_file->f_mapping;
742 		struct inode *inode = mapping->host;
743 
744 		return inode_resv_map(inode);
745 
746 	} else {
747 		return (struct resv_map *)(get_vma_private_data(vma) &
748 							~HPAGE_RESV_MASK);
749 	}
750 }
751 
752 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
753 {
754 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
755 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
756 
757 	set_vma_private_data(vma, (get_vma_private_data(vma) &
758 				HPAGE_RESV_MASK) | (unsigned long)map);
759 }
760 
761 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
762 {
763 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
764 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
765 
766 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
767 }
768 
769 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
770 {
771 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
772 
773 	return (get_vma_private_data(vma) & flag) != 0;
774 }
775 
776 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
777 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
778 {
779 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
780 	if (!(vma->vm_flags & VM_MAYSHARE))
781 		vma->vm_private_data = (void *)0;
782 }
783 
784 /* Returns true if the VMA has associated reserve pages */
785 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
786 {
787 	if (vma->vm_flags & VM_NORESERVE) {
788 		/*
789 		 * This address is already reserved by other process(chg == 0),
790 		 * so, we should decrement reserved count. Without decrementing,
791 		 * reserve count remains after releasing inode, because this
792 		 * allocated page will go into page cache and is regarded as
793 		 * coming from reserved pool in releasing step.  Currently, we
794 		 * don't have any other solution to deal with this situation
795 		 * properly, so add work-around here.
796 		 */
797 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
798 			return true;
799 		else
800 			return false;
801 	}
802 
803 	/* Shared mappings always use reserves */
804 	if (vma->vm_flags & VM_MAYSHARE) {
805 		/*
806 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
807 		 * be a region map for all pages.  The only situation where
808 		 * there is no region map is if a hole was punched via
809 		 * fallocate.  In this case, there really are no reverves to
810 		 * use.  This situation is indicated if chg != 0.
811 		 */
812 		if (chg)
813 			return false;
814 		else
815 			return true;
816 	}
817 
818 	/*
819 	 * Only the process that called mmap() has reserves for
820 	 * private mappings.
821 	 */
822 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
823 		return true;
824 
825 	return false;
826 }
827 
828 static void enqueue_huge_page(struct hstate *h, struct page *page)
829 {
830 	int nid = page_to_nid(page);
831 	list_move(&page->lru, &h->hugepage_freelists[nid]);
832 	h->free_huge_pages++;
833 	h->free_huge_pages_node[nid]++;
834 }
835 
836 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
837 {
838 	struct page *page;
839 
840 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
841 		if (!is_migrate_isolate_page(page))
842 			break;
843 	/*
844 	 * if 'non-isolated free hugepage' not found on the list,
845 	 * the allocation fails.
846 	 */
847 	if (&h->hugepage_freelists[nid] == &page->lru)
848 		return NULL;
849 	list_move(&page->lru, &h->hugepage_activelist);
850 	set_page_refcounted(page);
851 	h->free_huge_pages--;
852 	h->free_huge_pages_node[nid]--;
853 	return page;
854 }
855 
856 /* Movability of hugepages depends on migration support. */
857 static inline gfp_t htlb_alloc_mask(struct hstate *h)
858 {
859 	if (hugepages_treat_as_movable || hugepage_migration_supported(h))
860 		return GFP_HIGHUSER_MOVABLE;
861 	else
862 		return GFP_HIGHUSER;
863 }
864 
865 static struct page *dequeue_huge_page_vma(struct hstate *h,
866 				struct vm_area_struct *vma,
867 				unsigned long address, int avoid_reserve,
868 				long chg)
869 {
870 	struct page *page = NULL;
871 	struct mempolicy *mpol;
872 	nodemask_t *nodemask;
873 	struct zonelist *zonelist;
874 	struct zone *zone;
875 	struct zoneref *z;
876 	unsigned int cpuset_mems_cookie;
877 
878 	/*
879 	 * A child process with MAP_PRIVATE mappings created by their parent
880 	 * have no page reserves. This check ensures that reservations are
881 	 * not "stolen". The child may still get SIGKILLed
882 	 */
883 	if (!vma_has_reserves(vma, chg) &&
884 			h->free_huge_pages - h->resv_huge_pages == 0)
885 		goto err;
886 
887 	/* If reserves cannot be used, ensure enough pages are in the pool */
888 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
889 		goto err;
890 
891 retry_cpuset:
892 	cpuset_mems_cookie = read_mems_allowed_begin();
893 	zonelist = huge_zonelist(vma, address,
894 					htlb_alloc_mask(h), &mpol, &nodemask);
895 
896 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
897 						MAX_NR_ZONES - 1, nodemask) {
898 		if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
899 			page = dequeue_huge_page_node(h, zone_to_nid(zone));
900 			if (page) {
901 				if (avoid_reserve)
902 					break;
903 				if (!vma_has_reserves(vma, chg))
904 					break;
905 
906 				SetPagePrivate(page);
907 				h->resv_huge_pages--;
908 				break;
909 			}
910 		}
911 	}
912 
913 	mpol_cond_put(mpol);
914 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
915 		goto retry_cpuset;
916 	return page;
917 
918 err:
919 	return NULL;
920 }
921 
922 /*
923  * common helper functions for hstate_next_node_to_{alloc|free}.
924  * We may have allocated or freed a huge page based on a different
925  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
926  * be outside of *nodes_allowed.  Ensure that we use an allowed
927  * node for alloc or free.
928  */
929 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
930 {
931 	nid = next_node(nid, *nodes_allowed);
932 	if (nid == MAX_NUMNODES)
933 		nid = first_node(*nodes_allowed);
934 	VM_BUG_ON(nid >= MAX_NUMNODES);
935 
936 	return nid;
937 }
938 
939 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
940 {
941 	if (!node_isset(nid, *nodes_allowed))
942 		nid = next_node_allowed(nid, nodes_allowed);
943 	return nid;
944 }
945 
946 /*
947  * returns the previously saved node ["this node"] from which to
948  * allocate a persistent huge page for the pool and advance the
949  * next node from which to allocate, handling wrap at end of node
950  * mask.
951  */
952 static int hstate_next_node_to_alloc(struct hstate *h,
953 					nodemask_t *nodes_allowed)
954 {
955 	int nid;
956 
957 	VM_BUG_ON(!nodes_allowed);
958 
959 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
960 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
961 
962 	return nid;
963 }
964 
965 /*
966  * helper for free_pool_huge_page() - return the previously saved
967  * node ["this node"] from which to free a huge page.  Advance the
968  * next node id whether or not we find a free huge page to free so
969  * that the next attempt to free addresses the next node.
970  */
971 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
972 {
973 	int nid;
974 
975 	VM_BUG_ON(!nodes_allowed);
976 
977 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
978 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
979 
980 	return nid;
981 }
982 
983 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
984 	for (nr_nodes = nodes_weight(*mask);				\
985 		nr_nodes > 0 &&						\
986 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
987 		nr_nodes--)
988 
989 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
990 	for (nr_nodes = nodes_weight(*mask);				\
991 		nr_nodes > 0 &&						\
992 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
993 		nr_nodes--)
994 
995 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
996 static void destroy_compound_gigantic_page(struct page *page,
997 					unsigned int order)
998 {
999 	int i;
1000 	int nr_pages = 1 << order;
1001 	struct page *p = page + 1;
1002 
1003 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1004 		clear_compound_head(p);
1005 		set_page_refcounted(p);
1006 	}
1007 
1008 	set_compound_order(page, 0);
1009 	__ClearPageHead(page);
1010 }
1011 
1012 static void free_gigantic_page(struct page *page, unsigned int order)
1013 {
1014 	free_contig_range(page_to_pfn(page), 1 << order);
1015 }
1016 
1017 static int __alloc_gigantic_page(unsigned long start_pfn,
1018 				unsigned long nr_pages)
1019 {
1020 	unsigned long end_pfn = start_pfn + nr_pages;
1021 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1022 }
1023 
1024 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
1025 				unsigned long nr_pages)
1026 {
1027 	unsigned long i, end_pfn = start_pfn + nr_pages;
1028 	struct page *page;
1029 
1030 	for (i = start_pfn; i < end_pfn; i++) {
1031 		if (!pfn_valid(i))
1032 			return false;
1033 
1034 		page = pfn_to_page(i);
1035 
1036 		if (PageReserved(page))
1037 			return false;
1038 
1039 		if (page_count(page) > 0)
1040 			return false;
1041 
1042 		if (PageHuge(page))
1043 			return false;
1044 	}
1045 
1046 	return true;
1047 }
1048 
1049 static bool zone_spans_last_pfn(const struct zone *zone,
1050 			unsigned long start_pfn, unsigned long nr_pages)
1051 {
1052 	unsigned long last_pfn = start_pfn + nr_pages - 1;
1053 	return zone_spans_pfn(zone, last_pfn);
1054 }
1055 
1056 static struct page *alloc_gigantic_page(int nid, unsigned int order)
1057 {
1058 	unsigned long nr_pages = 1 << order;
1059 	unsigned long ret, pfn, flags;
1060 	struct zone *z;
1061 
1062 	z = NODE_DATA(nid)->node_zones;
1063 	for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1064 		spin_lock_irqsave(&z->lock, flags);
1065 
1066 		pfn = ALIGN(z->zone_start_pfn, nr_pages);
1067 		while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1068 			if (pfn_range_valid_gigantic(pfn, nr_pages)) {
1069 				/*
1070 				 * We release the zone lock here because
1071 				 * alloc_contig_range() will also lock the zone
1072 				 * at some point. If there's an allocation
1073 				 * spinning on this lock, it may win the race
1074 				 * and cause alloc_contig_range() to fail...
1075 				 */
1076 				spin_unlock_irqrestore(&z->lock, flags);
1077 				ret = __alloc_gigantic_page(pfn, nr_pages);
1078 				if (!ret)
1079 					return pfn_to_page(pfn);
1080 				spin_lock_irqsave(&z->lock, flags);
1081 			}
1082 			pfn += nr_pages;
1083 		}
1084 
1085 		spin_unlock_irqrestore(&z->lock, flags);
1086 	}
1087 
1088 	return NULL;
1089 }
1090 
1091 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1092 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1093 
1094 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1095 {
1096 	struct page *page;
1097 
1098 	page = alloc_gigantic_page(nid, huge_page_order(h));
1099 	if (page) {
1100 		prep_compound_gigantic_page(page, huge_page_order(h));
1101 		prep_new_huge_page(h, page, nid);
1102 	}
1103 
1104 	return page;
1105 }
1106 
1107 static int alloc_fresh_gigantic_page(struct hstate *h,
1108 				nodemask_t *nodes_allowed)
1109 {
1110 	struct page *page = NULL;
1111 	int nr_nodes, node;
1112 
1113 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1114 		page = alloc_fresh_gigantic_page_node(h, node);
1115 		if (page)
1116 			return 1;
1117 	}
1118 
1119 	return 0;
1120 }
1121 
1122 static inline bool gigantic_page_supported(void) { return true; }
1123 #else
1124 static inline bool gigantic_page_supported(void) { return false; }
1125 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1126 static inline void destroy_compound_gigantic_page(struct page *page,
1127 						unsigned int order) { }
1128 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1129 					nodemask_t *nodes_allowed) { return 0; }
1130 #endif
1131 
1132 static void update_and_free_page(struct hstate *h, struct page *page)
1133 {
1134 	int i;
1135 
1136 	if (hstate_is_gigantic(h) && !gigantic_page_supported())
1137 		return;
1138 
1139 	h->nr_huge_pages--;
1140 	h->nr_huge_pages_node[page_to_nid(page)]--;
1141 	for (i = 0; i < pages_per_huge_page(h); i++) {
1142 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1143 				1 << PG_referenced | 1 << PG_dirty |
1144 				1 << PG_active | 1 << PG_private |
1145 				1 << PG_writeback);
1146 	}
1147 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1148 	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1149 	set_page_refcounted(page);
1150 	if (hstate_is_gigantic(h)) {
1151 		destroy_compound_gigantic_page(page, huge_page_order(h));
1152 		free_gigantic_page(page, huge_page_order(h));
1153 	} else {
1154 		__free_pages(page, huge_page_order(h));
1155 	}
1156 }
1157 
1158 struct hstate *size_to_hstate(unsigned long size)
1159 {
1160 	struct hstate *h;
1161 
1162 	for_each_hstate(h) {
1163 		if (huge_page_size(h) == size)
1164 			return h;
1165 	}
1166 	return NULL;
1167 }
1168 
1169 /*
1170  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1171  * to hstate->hugepage_activelist.)
1172  *
1173  * This function can be called for tail pages, but never returns true for them.
1174  */
1175 bool page_huge_active(struct page *page)
1176 {
1177 	VM_BUG_ON_PAGE(!PageHuge(page), page);
1178 	return PageHead(page) && PagePrivate(&page[1]);
1179 }
1180 
1181 /* never called for tail page */
1182 static void set_page_huge_active(struct page *page)
1183 {
1184 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1185 	SetPagePrivate(&page[1]);
1186 }
1187 
1188 static void clear_page_huge_active(struct page *page)
1189 {
1190 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1191 	ClearPagePrivate(&page[1]);
1192 }
1193 
1194 void free_huge_page(struct page *page)
1195 {
1196 	/*
1197 	 * Can't pass hstate in here because it is called from the
1198 	 * compound page destructor.
1199 	 */
1200 	struct hstate *h = page_hstate(page);
1201 	int nid = page_to_nid(page);
1202 	struct hugepage_subpool *spool =
1203 		(struct hugepage_subpool *)page_private(page);
1204 	bool restore_reserve;
1205 
1206 	set_page_private(page, 0);
1207 	page->mapping = NULL;
1208 	BUG_ON(page_count(page));
1209 	BUG_ON(page_mapcount(page));
1210 	restore_reserve = PagePrivate(page);
1211 	ClearPagePrivate(page);
1212 
1213 	/*
1214 	 * A return code of zero implies that the subpool will be under its
1215 	 * minimum size if the reservation is not restored after page is free.
1216 	 * Therefore, force restore_reserve operation.
1217 	 */
1218 	if (hugepage_subpool_put_pages(spool, 1) == 0)
1219 		restore_reserve = true;
1220 
1221 	spin_lock(&hugetlb_lock);
1222 	clear_page_huge_active(page);
1223 	hugetlb_cgroup_uncharge_page(hstate_index(h),
1224 				     pages_per_huge_page(h), page);
1225 	if (restore_reserve)
1226 		h->resv_huge_pages++;
1227 
1228 	if (h->surplus_huge_pages_node[nid]) {
1229 		/* remove the page from active list */
1230 		list_del(&page->lru);
1231 		update_and_free_page(h, page);
1232 		h->surplus_huge_pages--;
1233 		h->surplus_huge_pages_node[nid]--;
1234 	} else {
1235 		arch_clear_hugepage_flags(page);
1236 		enqueue_huge_page(h, page);
1237 	}
1238 	spin_unlock(&hugetlb_lock);
1239 }
1240 
1241 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1242 {
1243 	INIT_LIST_HEAD(&page->lru);
1244 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1245 	spin_lock(&hugetlb_lock);
1246 	set_hugetlb_cgroup(page, NULL);
1247 	h->nr_huge_pages++;
1248 	h->nr_huge_pages_node[nid]++;
1249 	spin_unlock(&hugetlb_lock);
1250 	put_page(page); /* free it into the hugepage allocator */
1251 }
1252 
1253 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1254 {
1255 	int i;
1256 	int nr_pages = 1 << order;
1257 	struct page *p = page + 1;
1258 
1259 	/* we rely on prep_new_huge_page to set the destructor */
1260 	set_compound_order(page, order);
1261 	__SetPageHead(page);
1262 	__ClearPageReserved(page);
1263 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1264 		/*
1265 		 * For gigantic hugepages allocated through bootmem at
1266 		 * boot, it's safer to be consistent with the not-gigantic
1267 		 * hugepages and clear the PG_reserved bit from all tail pages
1268 		 * too.  Otherwse drivers using get_user_pages() to access tail
1269 		 * pages may get the reference counting wrong if they see
1270 		 * PG_reserved set on a tail page (despite the head page not
1271 		 * having PG_reserved set).  Enforcing this consistency between
1272 		 * head and tail pages allows drivers to optimize away a check
1273 		 * on the head page when they need know if put_page() is needed
1274 		 * after get_user_pages().
1275 		 */
1276 		__ClearPageReserved(p);
1277 		set_page_count(p, 0);
1278 		set_compound_head(p, page);
1279 	}
1280 }
1281 
1282 /*
1283  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1284  * transparent huge pages.  See the PageTransHuge() documentation for more
1285  * details.
1286  */
1287 int PageHuge(struct page *page)
1288 {
1289 	if (!PageCompound(page))
1290 		return 0;
1291 
1292 	page = compound_head(page);
1293 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1294 }
1295 EXPORT_SYMBOL_GPL(PageHuge);
1296 
1297 /*
1298  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1299  * normal or transparent huge pages.
1300  */
1301 int PageHeadHuge(struct page *page_head)
1302 {
1303 	if (!PageHead(page_head))
1304 		return 0;
1305 
1306 	return get_compound_page_dtor(page_head) == free_huge_page;
1307 }
1308 
1309 pgoff_t __basepage_index(struct page *page)
1310 {
1311 	struct page *page_head = compound_head(page);
1312 	pgoff_t index = page_index(page_head);
1313 	unsigned long compound_idx;
1314 
1315 	if (!PageHuge(page_head))
1316 		return page_index(page);
1317 
1318 	if (compound_order(page_head) >= MAX_ORDER)
1319 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1320 	else
1321 		compound_idx = page - page_head;
1322 
1323 	return (index << compound_order(page_head)) + compound_idx;
1324 }
1325 
1326 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1327 {
1328 	struct page *page;
1329 
1330 	page = __alloc_pages_node(nid,
1331 		htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1332 						__GFP_REPEAT|__GFP_NOWARN,
1333 		huge_page_order(h));
1334 	if (page) {
1335 		prep_new_huge_page(h, page, nid);
1336 	}
1337 
1338 	return page;
1339 }
1340 
1341 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1342 {
1343 	struct page *page;
1344 	int nr_nodes, node;
1345 	int ret = 0;
1346 
1347 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1348 		page = alloc_fresh_huge_page_node(h, node);
1349 		if (page) {
1350 			ret = 1;
1351 			break;
1352 		}
1353 	}
1354 
1355 	if (ret)
1356 		count_vm_event(HTLB_BUDDY_PGALLOC);
1357 	else
1358 		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1359 
1360 	return ret;
1361 }
1362 
1363 /*
1364  * Free huge page from pool from next node to free.
1365  * Attempt to keep persistent huge pages more or less
1366  * balanced over allowed nodes.
1367  * Called with hugetlb_lock locked.
1368  */
1369 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1370 							 bool acct_surplus)
1371 {
1372 	int nr_nodes, node;
1373 	int ret = 0;
1374 
1375 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1376 		/*
1377 		 * If we're returning unused surplus pages, only examine
1378 		 * nodes with surplus pages.
1379 		 */
1380 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1381 		    !list_empty(&h->hugepage_freelists[node])) {
1382 			struct page *page =
1383 				list_entry(h->hugepage_freelists[node].next,
1384 					  struct page, lru);
1385 			list_del(&page->lru);
1386 			h->free_huge_pages--;
1387 			h->free_huge_pages_node[node]--;
1388 			if (acct_surplus) {
1389 				h->surplus_huge_pages--;
1390 				h->surplus_huge_pages_node[node]--;
1391 			}
1392 			update_and_free_page(h, page);
1393 			ret = 1;
1394 			break;
1395 		}
1396 	}
1397 
1398 	return ret;
1399 }
1400 
1401 /*
1402  * Dissolve a given free hugepage into free buddy pages. This function does
1403  * nothing for in-use (including surplus) hugepages.
1404  */
1405 static void dissolve_free_huge_page(struct page *page)
1406 {
1407 	spin_lock(&hugetlb_lock);
1408 	if (PageHuge(page) && !page_count(page)) {
1409 		struct hstate *h = page_hstate(page);
1410 		int nid = page_to_nid(page);
1411 		list_del(&page->lru);
1412 		h->free_huge_pages--;
1413 		h->free_huge_pages_node[nid]--;
1414 		update_and_free_page(h, page);
1415 	}
1416 	spin_unlock(&hugetlb_lock);
1417 }
1418 
1419 /*
1420  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1421  * make specified memory blocks removable from the system.
1422  * Note that start_pfn should aligned with (minimum) hugepage size.
1423  */
1424 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1425 {
1426 	unsigned long pfn;
1427 
1428 	if (!hugepages_supported())
1429 		return;
1430 
1431 	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1432 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1433 		dissolve_free_huge_page(pfn_to_page(pfn));
1434 }
1435 
1436 /*
1437  * There are 3 ways this can get called:
1438  * 1. With vma+addr: we use the VMA's memory policy
1439  * 2. With !vma, but nid=NUMA_NO_NODE:  We try to allocate a huge
1440  *    page from any node, and let the buddy allocator itself figure
1441  *    it out.
1442  * 3. With !vma, but nid!=NUMA_NO_NODE.  We allocate a huge page
1443  *    strictly from 'nid'
1444  */
1445 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1446 		struct vm_area_struct *vma, unsigned long addr, int nid)
1447 {
1448 	int order = huge_page_order(h);
1449 	gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1450 	unsigned int cpuset_mems_cookie;
1451 
1452 	/*
1453 	 * We need a VMA to get a memory policy.  If we do not
1454 	 * have one, we use the 'nid' argument.
1455 	 *
1456 	 * The mempolicy stuff below has some non-inlined bits
1457 	 * and calls ->vm_ops.  That makes it hard to optimize at
1458 	 * compile-time, even when NUMA is off and it does
1459 	 * nothing.  This helps the compiler optimize it out.
1460 	 */
1461 	if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1462 		/*
1463 		 * If a specific node is requested, make sure to
1464 		 * get memory from there, but only when a node
1465 		 * is explicitly specified.
1466 		 */
1467 		if (nid != NUMA_NO_NODE)
1468 			gfp |= __GFP_THISNODE;
1469 		/*
1470 		 * Make sure to call something that can handle
1471 		 * nid=NUMA_NO_NODE
1472 		 */
1473 		return alloc_pages_node(nid, gfp, order);
1474 	}
1475 
1476 	/*
1477 	 * OK, so we have a VMA.  Fetch the mempolicy and try to
1478 	 * allocate a huge page with it.  We will only reach this
1479 	 * when CONFIG_NUMA=y.
1480 	 */
1481 	do {
1482 		struct page *page;
1483 		struct mempolicy *mpol;
1484 		struct zonelist *zl;
1485 		nodemask_t *nodemask;
1486 
1487 		cpuset_mems_cookie = read_mems_allowed_begin();
1488 		zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1489 		mpol_cond_put(mpol);
1490 		page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1491 		if (page)
1492 			return page;
1493 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1494 
1495 	return NULL;
1496 }
1497 
1498 /*
1499  * There are two ways to allocate a huge page:
1500  * 1. When you have a VMA and an address (like a fault)
1501  * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1502  *
1503  * 'vma' and 'addr' are only for (1).  'nid' is always NUMA_NO_NODE in
1504  * this case which signifies that the allocation should be done with
1505  * respect for the VMA's memory policy.
1506  *
1507  * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1508  * implies that memory policies will not be taken in to account.
1509  */
1510 static struct page *__alloc_buddy_huge_page(struct hstate *h,
1511 		struct vm_area_struct *vma, unsigned long addr, int nid)
1512 {
1513 	struct page *page;
1514 	unsigned int r_nid;
1515 
1516 	if (hstate_is_gigantic(h))
1517 		return NULL;
1518 
1519 	/*
1520 	 * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1521 	 * This makes sure the caller is picking _one_ of the modes with which
1522 	 * we can call this function, not both.
1523 	 */
1524 	if (vma || (addr != -1)) {
1525 		VM_WARN_ON_ONCE(addr == -1);
1526 		VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1527 	}
1528 	/*
1529 	 * Assume we will successfully allocate the surplus page to
1530 	 * prevent racing processes from causing the surplus to exceed
1531 	 * overcommit
1532 	 *
1533 	 * This however introduces a different race, where a process B
1534 	 * tries to grow the static hugepage pool while alloc_pages() is
1535 	 * called by process A. B will only examine the per-node
1536 	 * counters in determining if surplus huge pages can be
1537 	 * converted to normal huge pages in adjust_pool_surplus(). A
1538 	 * won't be able to increment the per-node counter, until the
1539 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1540 	 * no more huge pages can be converted from surplus to normal
1541 	 * state (and doesn't try to convert again). Thus, we have a
1542 	 * case where a surplus huge page exists, the pool is grown, and
1543 	 * the surplus huge page still exists after, even though it
1544 	 * should just have been converted to a normal huge page. This
1545 	 * does not leak memory, though, as the hugepage will be freed
1546 	 * once it is out of use. It also does not allow the counters to
1547 	 * go out of whack in adjust_pool_surplus() as we don't modify
1548 	 * the node values until we've gotten the hugepage and only the
1549 	 * per-node value is checked there.
1550 	 */
1551 	spin_lock(&hugetlb_lock);
1552 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1553 		spin_unlock(&hugetlb_lock);
1554 		return NULL;
1555 	} else {
1556 		h->nr_huge_pages++;
1557 		h->surplus_huge_pages++;
1558 	}
1559 	spin_unlock(&hugetlb_lock);
1560 
1561 	page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1562 
1563 	spin_lock(&hugetlb_lock);
1564 	if (page) {
1565 		INIT_LIST_HEAD(&page->lru);
1566 		r_nid = page_to_nid(page);
1567 		set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1568 		set_hugetlb_cgroup(page, NULL);
1569 		/*
1570 		 * We incremented the global counters already
1571 		 */
1572 		h->nr_huge_pages_node[r_nid]++;
1573 		h->surplus_huge_pages_node[r_nid]++;
1574 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1575 	} else {
1576 		h->nr_huge_pages--;
1577 		h->surplus_huge_pages--;
1578 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1579 	}
1580 	spin_unlock(&hugetlb_lock);
1581 
1582 	return page;
1583 }
1584 
1585 /*
1586  * Allocate a huge page from 'nid'.  Note, 'nid' may be
1587  * NUMA_NO_NODE, which means that it may be allocated
1588  * anywhere.
1589  */
1590 static
1591 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1592 {
1593 	unsigned long addr = -1;
1594 
1595 	return __alloc_buddy_huge_page(h, NULL, addr, nid);
1596 }
1597 
1598 /*
1599  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1600  */
1601 static
1602 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1603 		struct vm_area_struct *vma, unsigned long addr)
1604 {
1605 	return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1606 }
1607 
1608 /*
1609  * This allocation function is useful in the context where vma is irrelevant.
1610  * E.g. soft-offlining uses this function because it only cares physical
1611  * address of error page.
1612  */
1613 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1614 {
1615 	struct page *page = NULL;
1616 
1617 	spin_lock(&hugetlb_lock);
1618 	if (h->free_huge_pages - h->resv_huge_pages > 0)
1619 		page = dequeue_huge_page_node(h, nid);
1620 	spin_unlock(&hugetlb_lock);
1621 
1622 	if (!page)
1623 		page = __alloc_buddy_huge_page_no_mpol(h, nid);
1624 
1625 	return page;
1626 }
1627 
1628 /*
1629  * Increase the hugetlb pool such that it can accommodate a reservation
1630  * of size 'delta'.
1631  */
1632 static int gather_surplus_pages(struct hstate *h, int delta)
1633 {
1634 	struct list_head surplus_list;
1635 	struct page *page, *tmp;
1636 	int ret, i;
1637 	int needed, allocated;
1638 	bool alloc_ok = true;
1639 
1640 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1641 	if (needed <= 0) {
1642 		h->resv_huge_pages += delta;
1643 		return 0;
1644 	}
1645 
1646 	allocated = 0;
1647 	INIT_LIST_HEAD(&surplus_list);
1648 
1649 	ret = -ENOMEM;
1650 retry:
1651 	spin_unlock(&hugetlb_lock);
1652 	for (i = 0; i < needed; i++) {
1653 		page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1654 		if (!page) {
1655 			alloc_ok = false;
1656 			break;
1657 		}
1658 		list_add(&page->lru, &surplus_list);
1659 	}
1660 	allocated += i;
1661 
1662 	/*
1663 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
1664 	 * because either resv_huge_pages or free_huge_pages may have changed.
1665 	 */
1666 	spin_lock(&hugetlb_lock);
1667 	needed = (h->resv_huge_pages + delta) -
1668 			(h->free_huge_pages + allocated);
1669 	if (needed > 0) {
1670 		if (alloc_ok)
1671 			goto retry;
1672 		/*
1673 		 * We were not able to allocate enough pages to
1674 		 * satisfy the entire reservation so we free what
1675 		 * we've allocated so far.
1676 		 */
1677 		goto free;
1678 	}
1679 	/*
1680 	 * The surplus_list now contains _at_least_ the number of extra pages
1681 	 * needed to accommodate the reservation.  Add the appropriate number
1682 	 * of pages to the hugetlb pool and free the extras back to the buddy
1683 	 * allocator.  Commit the entire reservation here to prevent another
1684 	 * process from stealing the pages as they are added to the pool but
1685 	 * before they are reserved.
1686 	 */
1687 	needed += allocated;
1688 	h->resv_huge_pages += delta;
1689 	ret = 0;
1690 
1691 	/* Free the needed pages to the hugetlb pool */
1692 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1693 		if ((--needed) < 0)
1694 			break;
1695 		/*
1696 		 * This page is now managed by the hugetlb allocator and has
1697 		 * no users -- drop the buddy allocator's reference.
1698 		 */
1699 		put_page_testzero(page);
1700 		VM_BUG_ON_PAGE(page_count(page), page);
1701 		enqueue_huge_page(h, page);
1702 	}
1703 free:
1704 	spin_unlock(&hugetlb_lock);
1705 
1706 	/* Free unnecessary surplus pages to the buddy allocator */
1707 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1708 		put_page(page);
1709 	spin_lock(&hugetlb_lock);
1710 
1711 	return ret;
1712 }
1713 
1714 /*
1715  * When releasing a hugetlb pool reservation, any surplus pages that were
1716  * allocated to satisfy the reservation must be explicitly freed if they were
1717  * never used.
1718  * Called with hugetlb_lock held.
1719  */
1720 static void return_unused_surplus_pages(struct hstate *h,
1721 					unsigned long unused_resv_pages)
1722 {
1723 	unsigned long nr_pages;
1724 
1725 	/* Uncommit the reservation */
1726 	h->resv_huge_pages -= unused_resv_pages;
1727 
1728 	/* Cannot return gigantic pages currently */
1729 	if (hstate_is_gigantic(h))
1730 		return;
1731 
1732 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1733 
1734 	/*
1735 	 * We want to release as many surplus pages as possible, spread
1736 	 * evenly across all nodes with memory. Iterate across these nodes
1737 	 * until we can no longer free unreserved surplus pages. This occurs
1738 	 * when the nodes with surplus pages have no free pages.
1739 	 * free_pool_huge_page() will balance the the freed pages across the
1740 	 * on-line nodes with memory and will handle the hstate accounting.
1741 	 */
1742 	while (nr_pages--) {
1743 		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1744 			break;
1745 		cond_resched_lock(&hugetlb_lock);
1746 	}
1747 }
1748 
1749 
1750 /*
1751  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1752  * are used by the huge page allocation routines to manage reservations.
1753  *
1754  * vma_needs_reservation is called to determine if the huge page at addr
1755  * within the vma has an associated reservation.  If a reservation is
1756  * needed, the value 1 is returned.  The caller is then responsible for
1757  * managing the global reservation and subpool usage counts.  After
1758  * the huge page has been allocated, vma_commit_reservation is called
1759  * to add the page to the reservation map.  If the page allocation fails,
1760  * the reservation must be ended instead of committed.  vma_end_reservation
1761  * is called in such cases.
1762  *
1763  * In the normal case, vma_commit_reservation returns the same value
1764  * as the preceding vma_needs_reservation call.  The only time this
1765  * is not the case is if a reserve map was changed between calls.  It
1766  * is the responsibility of the caller to notice the difference and
1767  * take appropriate action.
1768  */
1769 enum vma_resv_mode {
1770 	VMA_NEEDS_RESV,
1771 	VMA_COMMIT_RESV,
1772 	VMA_END_RESV,
1773 };
1774 static long __vma_reservation_common(struct hstate *h,
1775 				struct vm_area_struct *vma, unsigned long addr,
1776 				enum vma_resv_mode mode)
1777 {
1778 	struct resv_map *resv;
1779 	pgoff_t idx;
1780 	long ret;
1781 
1782 	resv = vma_resv_map(vma);
1783 	if (!resv)
1784 		return 1;
1785 
1786 	idx = vma_hugecache_offset(h, vma, addr);
1787 	switch (mode) {
1788 	case VMA_NEEDS_RESV:
1789 		ret = region_chg(resv, idx, idx + 1);
1790 		break;
1791 	case VMA_COMMIT_RESV:
1792 		ret = region_add(resv, idx, idx + 1);
1793 		break;
1794 	case VMA_END_RESV:
1795 		region_abort(resv, idx, idx + 1);
1796 		ret = 0;
1797 		break;
1798 	default:
1799 		BUG();
1800 	}
1801 
1802 	if (vma->vm_flags & VM_MAYSHARE)
1803 		return ret;
1804 	else
1805 		return ret < 0 ? ret : 0;
1806 }
1807 
1808 static long vma_needs_reservation(struct hstate *h,
1809 			struct vm_area_struct *vma, unsigned long addr)
1810 {
1811 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1812 }
1813 
1814 static long vma_commit_reservation(struct hstate *h,
1815 			struct vm_area_struct *vma, unsigned long addr)
1816 {
1817 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1818 }
1819 
1820 static void vma_end_reservation(struct hstate *h,
1821 			struct vm_area_struct *vma, unsigned long addr)
1822 {
1823 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1824 }
1825 
1826 struct page *alloc_huge_page(struct vm_area_struct *vma,
1827 				    unsigned long addr, int avoid_reserve)
1828 {
1829 	struct hugepage_subpool *spool = subpool_vma(vma);
1830 	struct hstate *h = hstate_vma(vma);
1831 	struct page *page;
1832 	long map_chg, map_commit;
1833 	long gbl_chg;
1834 	int ret, idx;
1835 	struct hugetlb_cgroup *h_cg;
1836 
1837 	idx = hstate_index(h);
1838 	/*
1839 	 * Examine the region/reserve map to determine if the process
1840 	 * has a reservation for the page to be allocated.  A return
1841 	 * code of zero indicates a reservation exists (no change).
1842 	 */
1843 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1844 	if (map_chg < 0)
1845 		return ERR_PTR(-ENOMEM);
1846 
1847 	/*
1848 	 * Processes that did not create the mapping will have no
1849 	 * reserves as indicated by the region/reserve map. Check
1850 	 * that the allocation will not exceed the subpool limit.
1851 	 * Allocations for MAP_NORESERVE mappings also need to be
1852 	 * checked against any subpool limit.
1853 	 */
1854 	if (map_chg || avoid_reserve) {
1855 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
1856 		if (gbl_chg < 0) {
1857 			vma_end_reservation(h, vma, addr);
1858 			return ERR_PTR(-ENOSPC);
1859 		}
1860 
1861 		/*
1862 		 * Even though there was no reservation in the region/reserve
1863 		 * map, there could be reservations associated with the
1864 		 * subpool that can be used.  This would be indicated if the
1865 		 * return value of hugepage_subpool_get_pages() is zero.
1866 		 * However, if avoid_reserve is specified we still avoid even
1867 		 * the subpool reservations.
1868 		 */
1869 		if (avoid_reserve)
1870 			gbl_chg = 1;
1871 	}
1872 
1873 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1874 	if (ret)
1875 		goto out_subpool_put;
1876 
1877 	spin_lock(&hugetlb_lock);
1878 	/*
1879 	 * glb_chg is passed to indicate whether or not a page must be taken
1880 	 * from the global free pool (global change).  gbl_chg == 0 indicates
1881 	 * a reservation exists for the allocation.
1882 	 */
1883 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
1884 	if (!page) {
1885 		spin_unlock(&hugetlb_lock);
1886 		page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1887 		if (!page)
1888 			goto out_uncharge_cgroup;
1889 
1890 		spin_lock(&hugetlb_lock);
1891 		list_move(&page->lru, &h->hugepage_activelist);
1892 		/* Fall through */
1893 	}
1894 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1895 	spin_unlock(&hugetlb_lock);
1896 
1897 	set_page_private(page, (unsigned long)spool);
1898 
1899 	map_commit = vma_commit_reservation(h, vma, addr);
1900 	if (unlikely(map_chg > map_commit)) {
1901 		/*
1902 		 * The page was added to the reservation map between
1903 		 * vma_needs_reservation and vma_commit_reservation.
1904 		 * This indicates a race with hugetlb_reserve_pages.
1905 		 * Adjust for the subpool count incremented above AND
1906 		 * in hugetlb_reserve_pages for the same page.  Also,
1907 		 * the reservation count added in hugetlb_reserve_pages
1908 		 * no longer applies.
1909 		 */
1910 		long rsv_adjust;
1911 
1912 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1913 		hugetlb_acct_memory(h, -rsv_adjust);
1914 	}
1915 	return page;
1916 
1917 out_uncharge_cgroup:
1918 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1919 out_subpool_put:
1920 	if (map_chg || avoid_reserve)
1921 		hugepage_subpool_put_pages(spool, 1);
1922 	vma_end_reservation(h, vma, addr);
1923 	return ERR_PTR(-ENOSPC);
1924 }
1925 
1926 /*
1927  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1928  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1929  * where no ERR_VALUE is expected to be returned.
1930  */
1931 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1932 				unsigned long addr, int avoid_reserve)
1933 {
1934 	struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1935 	if (IS_ERR(page))
1936 		page = NULL;
1937 	return page;
1938 }
1939 
1940 int __weak alloc_bootmem_huge_page(struct hstate *h)
1941 {
1942 	struct huge_bootmem_page *m;
1943 	int nr_nodes, node;
1944 
1945 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1946 		void *addr;
1947 
1948 		addr = memblock_virt_alloc_try_nid_nopanic(
1949 				huge_page_size(h), huge_page_size(h),
1950 				0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1951 		if (addr) {
1952 			/*
1953 			 * Use the beginning of the huge page to store the
1954 			 * huge_bootmem_page struct (until gather_bootmem
1955 			 * puts them into the mem_map).
1956 			 */
1957 			m = addr;
1958 			goto found;
1959 		}
1960 	}
1961 	return 0;
1962 
1963 found:
1964 	BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1965 	/* Put them into a private list first because mem_map is not up yet */
1966 	list_add(&m->list, &huge_boot_pages);
1967 	m->hstate = h;
1968 	return 1;
1969 }
1970 
1971 static void __init prep_compound_huge_page(struct page *page,
1972 		unsigned int order)
1973 {
1974 	if (unlikely(order > (MAX_ORDER - 1)))
1975 		prep_compound_gigantic_page(page, order);
1976 	else
1977 		prep_compound_page(page, order);
1978 }
1979 
1980 /* Put bootmem huge pages into the standard lists after mem_map is up */
1981 static void __init gather_bootmem_prealloc(void)
1982 {
1983 	struct huge_bootmem_page *m;
1984 
1985 	list_for_each_entry(m, &huge_boot_pages, list) {
1986 		struct hstate *h = m->hstate;
1987 		struct page *page;
1988 
1989 #ifdef CONFIG_HIGHMEM
1990 		page = pfn_to_page(m->phys >> PAGE_SHIFT);
1991 		memblock_free_late(__pa(m),
1992 				   sizeof(struct huge_bootmem_page));
1993 #else
1994 		page = virt_to_page(m);
1995 #endif
1996 		WARN_ON(page_count(page) != 1);
1997 		prep_compound_huge_page(page, h->order);
1998 		WARN_ON(PageReserved(page));
1999 		prep_new_huge_page(h, page, page_to_nid(page));
2000 		/*
2001 		 * If we had gigantic hugepages allocated at boot time, we need
2002 		 * to restore the 'stolen' pages to totalram_pages in order to
2003 		 * fix confusing memory reports from free(1) and another
2004 		 * side-effects, like CommitLimit going negative.
2005 		 */
2006 		if (hstate_is_gigantic(h))
2007 			adjust_managed_page_count(page, 1 << h->order);
2008 	}
2009 }
2010 
2011 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2012 {
2013 	unsigned long i;
2014 
2015 	for (i = 0; i < h->max_huge_pages; ++i) {
2016 		if (hstate_is_gigantic(h)) {
2017 			if (!alloc_bootmem_huge_page(h))
2018 				break;
2019 		} else if (!alloc_fresh_huge_page(h,
2020 					 &node_states[N_MEMORY]))
2021 			break;
2022 	}
2023 	h->max_huge_pages = i;
2024 }
2025 
2026 static void __init hugetlb_init_hstates(void)
2027 {
2028 	struct hstate *h;
2029 
2030 	for_each_hstate(h) {
2031 		if (minimum_order > huge_page_order(h))
2032 			minimum_order = huge_page_order(h);
2033 
2034 		/* oversize hugepages were init'ed in early boot */
2035 		if (!hstate_is_gigantic(h))
2036 			hugetlb_hstate_alloc_pages(h);
2037 	}
2038 	VM_BUG_ON(minimum_order == UINT_MAX);
2039 }
2040 
2041 static char * __init memfmt(char *buf, unsigned long n)
2042 {
2043 	if (n >= (1UL << 30))
2044 		sprintf(buf, "%lu GB", n >> 30);
2045 	else if (n >= (1UL << 20))
2046 		sprintf(buf, "%lu MB", n >> 20);
2047 	else
2048 		sprintf(buf, "%lu KB", n >> 10);
2049 	return buf;
2050 }
2051 
2052 static void __init report_hugepages(void)
2053 {
2054 	struct hstate *h;
2055 
2056 	for_each_hstate(h) {
2057 		char buf[32];
2058 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2059 			memfmt(buf, huge_page_size(h)),
2060 			h->free_huge_pages);
2061 	}
2062 }
2063 
2064 #ifdef CONFIG_HIGHMEM
2065 static void try_to_free_low(struct hstate *h, unsigned long count,
2066 						nodemask_t *nodes_allowed)
2067 {
2068 	int i;
2069 
2070 	if (hstate_is_gigantic(h))
2071 		return;
2072 
2073 	for_each_node_mask(i, *nodes_allowed) {
2074 		struct page *page, *next;
2075 		struct list_head *freel = &h->hugepage_freelists[i];
2076 		list_for_each_entry_safe(page, next, freel, lru) {
2077 			if (count >= h->nr_huge_pages)
2078 				return;
2079 			if (PageHighMem(page))
2080 				continue;
2081 			list_del(&page->lru);
2082 			update_and_free_page(h, page);
2083 			h->free_huge_pages--;
2084 			h->free_huge_pages_node[page_to_nid(page)]--;
2085 		}
2086 	}
2087 }
2088 #else
2089 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2090 						nodemask_t *nodes_allowed)
2091 {
2092 }
2093 #endif
2094 
2095 /*
2096  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2097  * balanced by operating on them in a round-robin fashion.
2098  * Returns 1 if an adjustment was made.
2099  */
2100 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2101 				int delta)
2102 {
2103 	int nr_nodes, node;
2104 
2105 	VM_BUG_ON(delta != -1 && delta != 1);
2106 
2107 	if (delta < 0) {
2108 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2109 			if (h->surplus_huge_pages_node[node])
2110 				goto found;
2111 		}
2112 	} else {
2113 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2114 			if (h->surplus_huge_pages_node[node] <
2115 					h->nr_huge_pages_node[node])
2116 				goto found;
2117 		}
2118 	}
2119 	return 0;
2120 
2121 found:
2122 	h->surplus_huge_pages += delta;
2123 	h->surplus_huge_pages_node[node] += delta;
2124 	return 1;
2125 }
2126 
2127 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2128 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2129 						nodemask_t *nodes_allowed)
2130 {
2131 	unsigned long min_count, ret;
2132 
2133 	if (hstate_is_gigantic(h) && !gigantic_page_supported())
2134 		return h->max_huge_pages;
2135 
2136 	/*
2137 	 * Increase the pool size
2138 	 * First take pages out of surplus state.  Then make up the
2139 	 * remaining difference by allocating fresh huge pages.
2140 	 *
2141 	 * We might race with __alloc_buddy_huge_page() here and be unable
2142 	 * to convert a surplus huge page to a normal huge page. That is
2143 	 * not critical, though, it just means the overall size of the
2144 	 * pool might be one hugepage larger than it needs to be, but
2145 	 * within all the constraints specified by the sysctls.
2146 	 */
2147 	spin_lock(&hugetlb_lock);
2148 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2149 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
2150 			break;
2151 	}
2152 
2153 	while (count > persistent_huge_pages(h)) {
2154 		/*
2155 		 * If this allocation races such that we no longer need the
2156 		 * page, free_huge_page will handle it by freeing the page
2157 		 * and reducing the surplus.
2158 		 */
2159 		spin_unlock(&hugetlb_lock);
2160 		if (hstate_is_gigantic(h))
2161 			ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2162 		else
2163 			ret = alloc_fresh_huge_page(h, nodes_allowed);
2164 		spin_lock(&hugetlb_lock);
2165 		if (!ret)
2166 			goto out;
2167 
2168 		/* Bail for signals. Probably ctrl-c from user */
2169 		if (signal_pending(current))
2170 			goto out;
2171 	}
2172 
2173 	/*
2174 	 * Decrease the pool size
2175 	 * First return free pages to the buddy allocator (being careful
2176 	 * to keep enough around to satisfy reservations).  Then place
2177 	 * pages into surplus state as needed so the pool will shrink
2178 	 * to the desired size as pages become free.
2179 	 *
2180 	 * By placing pages into the surplus state independent of the
2181 	 * overcommit value, we are allowing the surplus pool size to
2182 	 * exceed overcommit. There are few sane options here. Since
2183 	 * __alloc_buddy_huge_page() is checking the global counter,
2184 	 * though, we'll note that we're not allowed to exceed surplus
2185 	 * and won't grow the pool anywhere else. Not until one of the
2186 	 * sysctls are changed, or the surplus pages go out of use.
2187 	 */
2188 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2189 	min_count = max(count, min_count);
2190 	try_to_free_low(h, min_count, nodes_allowed);
2191 	while (min_count < persistent_huge_pages(h)) {
2192 		if (!free_pool_huge_page(h, nodes_allowed, 0))
2193 			break;
2194 		cond_resched_lock(&hugetlb_lock);
2195 	}
2196 	while (count < persistent_huge_pages(h)) {
2197 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
2198 			break;
2199 	}
2200 out:
2201 	ret = persistent_huge_pages(h);
2202 	spin_unlock(&hugetlb_lock);
2203 	return ret;
2204 }
2205 
2206 #define HSTATE_ATTR_RO(_name) \
2207 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2208 
2209 #define HSTATE_ATTR(_name) \
2210 	static struct kobj_attribute _name##_attr = \
2211 		__ATTR(_name, 0644, _name##_show, _name##_store)
2212 
2213 static struct kobject *hugepages_kobj;
2214 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2215 
2216 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2217 
2218 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2219 {
2220 	int i;
2221 
2222 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
2223 		if (hstate_kobjs[i] == kobj) {
2224 			if (nidp)
2225 				*nidp = NUMA_NO_NODE;
2226 			return &hstates[i];
2227 		}
2228 
2229 	return kobj_to_node_hstate(kobj, nidp);
2230 }
2231 
2232 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2233 					struct kobj_attribute *attr, char *buf)
2234 {
2235 	struct hstate *h;
2236 	unsigned long nr_huge_pages;
2237 	int nid;
2238 
2239 	h = kobj_to_hstate(kobj, &nid);
2240 	if (nid == NUMA_NO_NODE)
2241 		nr_huge_pages = h->nr_huge_pages;
2242 	else
2243 		nr_huge_pages = h->nr_huge_pages_node[nid];
2244 
2245 	return sprintf(buf, "%lu\n", nr_huge_pages);
2246 }
2247 
2248 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2249 					   struct hstate *h, int nid,
2250 					   unsigned long count, size_t len)
2251 {
2252 	int err;
2253 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2254 
2255 	if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2256 		err = -EINVAL;
2257 		goto out;
2258 	}
2259 
2260 	if (nid == NUMA_NO_NODE) {
2261 		/*
2262 		 * global hstate attribute
2263 		 */
2264 		if (!(obey_mempolicy &&
2265 				init_nodemask_of_mempolicy(nodes_allowed))) {
2266 			NODEMASK_FREE(nodes_allowed);
2267 			nodes_allowed = &node_states[N_MEMORY];
2268 		}
2269 	} else if (nodes_allowed) {
2270 		/*
2271 		 * per node hstate attribute: adjust count to global,
2272 		 * but restrict alloc/free to the specified node.
2273 		 */
2274 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2275 		init_nodemask_of_node(nodes_allowed, nid);
2276 	} else
2277 		nodes_allowed = &node_states[N_MEMORY];
2278 
2279 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2280 
2281 	if (nodes_allowed != &node_states[N_MEMORY])
2282 		NODEMASK_FREE(nodes_allowed);
2283 
2284 	return len;
2285 out:
2286 	NODEMASK_FREE(nodes_allowed);
2287 	return err;
2288 }
2289 
2290 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2291 					 struct kobject *kobj, const char *buf,
2292 					 size_t len)
2293 {
2294 	struct hstate *h;
2295 	unsigned long count;
2296 	int nid;
2297 	int err;
2298 
2299 	err = kstrtoul(buf, 10, &count);
2300 	if (err)
2301 		return err;
2302 
2303 	h = kobj_to_hstate(kobj, &nid);
2304 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2305 }
2306 
2307 static ssize_t nr_hugepages_show(struct kobject *kobj,
2308 				       struct kobj_attribute *attr, char *buf)
2309 {
2310 	return nr_hugepages_show_common(kobj, attr, buf);
2311 }
2312 
2313 static ssize_t nr_hugepages_store(struct kobject *kobj,
2314 	       struct kobj_attribute *attr, const char *buf, size_t len)
2315 {
2316 	return nr_hugepages_store_common(false, kobj, buf, len);
2317 }
2318 HSTATE_ATTR(nr_hugepages);
2319 
2320 #ifdef CONFIG_NUMA
2321 
2322 /*
2323  * hstate attribute for optionally mempolicy-based constraint on persistent
2324  * huge page alloc/free.
2325  */
2326 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2327 				       struct kobj_attribute *attr, char *buf)
2328 {
2329 	return nr_hugepages_show_common(kobj, attr, buf);
2330 }
2331 
2332 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2333 	       struct kobj_attribute *attr, const char *buf, size_t len)
2334 {
2335 	return nr_hugepages_store_common(true, kobj, buf, len);
2336 }
2337 HSTATE_ATTR(nr_hugepages_mempolicy);
2338 #endif
2339 
2340 
2341 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2342 					struct kobj_attribute *attr, char *buf)
2343 {
2344 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2345 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2346 }
2347 
2348 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2349 		struct kobj_attribute *attr, const char *buf, size_t count)
2350 {
2351 	int err;
2352 	unsigned long input;
2353 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2354 
2355 	if (hstate_is_gigantic(h))
2356 		return -EINVAL;
2357 
2358 	err = kstrtoul(buf, 10, &input);
2359 	if (err)
2360 		return err;
2361 
2362 	spin_lock(&hugetlb_lock);
2363 	h->nr_overcommit_huge_pages = input;
2364 	spin_unlock(&hugetlb_lock);
2365 
2366 	return count;
2367 }
2368 HSTATE_ATTR(nr_overcommit_hugepages);
2369 
2370 static ssize_t free_hugepages_show(struct kobject *kobj,
2371 					struct kobj_attribute *attr, char *buf)
2372 {
2373 	struct hstate *h;
2374 	unsigned long free_huge_pages;
2375 	int nid;
2376 
2377 	h = kobj_to_hstate(kobj, &nid);
2378 	if (nid == NUMA_NO_NODE)
2379 		free_huge_pages = h->free_huge_pages;
2380 	else
2381 		free_huge_pages = h->free_huge_pages_node[nid];
2382 
2383 	return sprintf(buf, "%lu\n", free_huge_pages);
2384 }
2385 HSTATE_ATTR_RO(free_hugepages);
2386 
2387 static ssize_t resv_hugepages_show(struct kobject *kobj,
2388 					struct kobj_attribute *attr, char *buf)
2389 {
2390 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2391 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
2392 }
2393 HSTATE_ATTR_RO(resv_hugepages);
2394 
2395 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2396 					struct kobj_attribute *attr, char *buf)
2397 {
2398 	struct hstate *h;
2399 	unsigned long surplus_huge_pages;
2400 	int nid;
2401 
2402 	h = kobj_to_hstate(kobj, &nid);
2403 	if (nid == NUMA_NO_NODE)
2404 		surplus_huge_pages = h->surplus_huge_pages;
2405 	else
2406 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
2407 
2408 	return sprintf(buf, "%lu\n", surplus_huge_pages);
2409 }
2410 HSTATE_ATTR_RO(surplus_hugepages);
2411 
2412 static struct attribute *hstate_attrs[] = {
2413 	&nr_hugepages_attr.attr,
2414 	&nr_overcommit_hugepages_attr.attr,
2415 	&free_hugepages_attr.attr,
2416 	&resv_hugepages_attr.attr,
2417 	&surplus_hugepages_attr.attr,
2418 #ifdef CONFIG_NUMA
2419 	&nr_hugepages_mempolicy_attr.attr,
2420 #endif
2421 	NULL,
2422 };
2423 
2424 static struct attribute_group hstate_attr_group = {
2425 	.attrs = hstate_attrs,
2426 };
2427 
2428 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2429 				    struct kobject **hstate_kobjs,
2430 				    struct attribute_group *hstate_attr_group)
2431 {
2432 	int retval;
2433 	int hi = hstate_index(h);
2434 
2435 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2436 	if (!hstate_kobjs[hi])
2437 		return -ENOMEM;
2438 
2439 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2440 	if (retval)
2441 		kobject_put(hstate_kobjs[hi]);
2442 
2443 	return retval;
2444 }
2445 
2446 static void __init hugetlb_sysfs_init(void)
2447 {
2448 	struct hstate *h;
2449 	int err;
2450 
2451 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2452 	if (!hugepages_kobj)
2453 		return;
2454 
2455 	for_each_hstate(h) {
2456 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2457 					 hstate_kobjs, &hstate_attr_group);
2458 		if (err)
2459 			pr_err("Hugetlb: Unable to add hstate %s", h->name);
2460 	}
2461 }
2462 
2463 #ifdef CONFIG_NUMA
2464 
2465 /*
2466  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2467  * with node devices in node_devices[] using a parallel array.  The array
2468  * index of a node device or _hstate == node id.
2469  * This is here to avoid any static dependency of the node device driver, in
2470  * the base kernel, on the hugetlb module.
2471  */
2472 struct node_hstate {
2473 	struct kobject		*hugepages_kobj;
2474 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
2475 };
2476 static struct node_hstate node_hstates[MAX_NUMNODES];
2477 
2478 /*
2479  * A subset of global hstate attributes for node devices
2480  */
2481 static struct attribute *per_node_hstate_attrs[] = {
2482 	&nr_hugepages_attr.attr,
2483 	&free_hugepages_attr.attr,
2484 	&surplus_hugepages_attr.attr,
2485 	NULL,
2486 };
2487 
2488 static struct attribute_group per_node_hstate_attr_group = {
2489 	.attrs = per_node_hstate_attrs,
2490 };
2491 
2492 /*
2493  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2494  * Returns node id via non-NULL nidp.
2495  */
2496 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2497 {
2498 	int nid;
2499 
2500 	for (nid = 0; nid < nr_node_ids; nid++) {
2501 		struct node_hstate *nhs = &node_hstates[nid];
2502 		int i;
2503 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
2504 			if (nhs->hstate_kobjs[i] == kobj) {
2505 				if (nidp)
2506 					*nidp = nid;
2507 				return &hstates[i];
2508 			}
2509 	}
2510 
2511 	BUG();
2512 	return NULL;
2513 }
2514 
2515 /*
2516  * Unregister hstate attributes from a single node device.
2517  * No-op if no hstate attributes attached.
2518  */
2519 static void hugetlb_unregister_node(struct node *node)
2520 {
2521 	struct hstate *h;
2522 	struct node_hstate *nhs = &node_hstates[node->dev.id];
2523 
2524 	if (!nhs->hugepages_kobj)
2525 		return;		/* no hstate attributes */
2526 
2527 	for_each_hstate(h) {
2528 		int idx = hstate_index(h);
2529 		if (nhs->hstate_kobjs[idx]) {
2530 			kobject_put(nhs->hstate_kobjs[idx]);
2531 			nhs->hstate_kobjs[idx] = NULL;
2532 		}
2533 	}
2534 
2535 	kobject_put(nhs->hugepages_kobj);
2536 	nhs->hugepages_kobj = NULL;
2537 }
2538 
2539 /*
2540  * hugetlb module exit:  unregister hstate attributes from node devices
2541  * that have them.
2542  */
2543 static void hugetlb_unregister_all_nodes(void)
2544 {
2545 	int nid;
2546 
2547 	/*
2548 	 * disable node device registrations.
2549 	 */
2550 	register_hugetlbfs_with_node(NULL, NULL);
2551 
2552 	/*
2553 	 * remove hstate attributes from any nodes that have them.
2554 	 */
2555 	for (nid = 0; nid < nr_node_ids; nid++)
2556 		hugetlb_unregister_node(node_devices[nid]);
2557 }
2558 
2559 /*
2560  * Register hstate attributes for a single node device.
2561  * No-op if attributes already registered.
2562  */
2563 static void hugetlb_register_node(struct node *node)
2564 {
2565 	struct hstate *h;
2566 	struct node_hstate *nhs = &node_hstates[node->dev.id];
2567 	int err;
2568 
2569 	if (nhs->hugepages_kobj)
2570 		return;		/* already allocated */
2571 
2572 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2573 							&node->dev.kobj);
2574 	if (!nhs->hugepages_kobj)
2575 		return;
2576 
2577 	for_each_hstate(h) {
2578 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2579 						nhs->hstate_kobjs,
2580 						&per_node_hstate_attr_group);
2581 		if (err) {
2582 			pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2583 				h->name, node->dev.id);
2584 			hugetlb_unregister_node(node);
2585 			break;
2586 		}
2587 	}
2588 }
2589 
2590 /*
2591  * hugetlb init time:  register hstate attributes for all registered node
2592  * devices of nodes that have memory.  All on-line nodes should have
2593  * registered their associated device by this time.
2594  */
2595 static void __init hugetlb_register_all_nodes(void)
2596 {
2597 	int nid;
2598 
2599 	for_each_node_state(nid, N_MEMORY) {
2600 		struct node *node = node_devices[nid];
2601 		if (node->dev.id == nid)
2602 			hugetlb_register_node(node);
2603 	}
2604 
2605 	/*
2606 	 * Let the node device driver know we're here so it can
2607 	 * [un]register hstate attributes on node hotplug.
2608 	 */
2609 	register_hugetlbfs_with_node(hugetlb_register_node,
2610 				     hugetlb_unregister_node);
2611 }
2612 #else	/* !CONFIG_NUMA */
2613 
2614 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2615 {
2616 	BUG();
2617 	if (nidp)
2618 		*nidp = -1;
2619 	return NULL;
2620 }
2621 
2622 static void hugetlb_unregister_all_nodes(void) { }
2623 
2624 static void hugetlb_register_all_nodes(void) { }
2625 
2626 #endif
2627 
2628 static void __exit hugetlb_exit(void)
2629 {
2630 	struct hstate *h;
2631 
2632 	hugetlb_unregister_all_nodes();
2633 
2634 	for_each_hstate(h) {
2635 		kobject_put(hstate_kobjs[hstate_index(h)]);
2636 	}
2637 
2638 	kobject_put(hugepages_kobj);
2639 	kfree(hugetlb_fault_mutex_table);
2640 }
2641 module_exit(hugetlb_exit);
2642 
2643 static int __init hugetlb_init(void)
2644 {
2645 	int i;
2646 
2647 	if (!hugepages_supported())
2648 		return 0;
2649 
2650 	if (!size_to_hstate(default_hstate_size)) {
2651 		default_hstate_size = HPAGE_SIZE;
2652 		if (!size_to_hstate(default_hstate_size))
2653 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2654 	}
2655 	default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2656 	if (default_hstate_max_huge_pages)
2657 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2658 
2659 	hugetlb_init_hstates();
2660 	gather_bootmem_prealloc();
2661 	report_hugepages();
2662 
2663 	hugetlb_sysfs_init();
2664 	hugetlb_register_all_nodes();
2665 	hugetlb_cgroup_file_init();
2666 
2667 #ifdef CONFIG_SMP
2668 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2669 #else
2670 	num_fault_mutexes = 1;
2671 #endif
2672 	hugetlb_fault_mutex_table =
2673 		kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2674 	BUG_ON(!hugetlb_fault_mutex_table);
2675 
2676 	for (i = 0; i < num_fault_mutexes; i++)
2677 		mutex_init(&hugetlb_fault_mutex_table[i]);
2678 	return 0;
2679 }
2680 module_init(hugetlb_init);
2681 
2682 /* Should be called on processing a hugepagesz=... option */
2683 void __init hugetlb_add_hstate(unsigned int order)
2684 {
2685 	struct hstate *h;
2686 	unsigned long i;
2687 
2688 	if (size_to_hstate(PAGE_SIZE << order)) {
2689 		pr_warning("hugepagesz= specified twice, ignoring\n");
2690 		return;
2691 	}
2692 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2693 	BUG_ON(order == 0);
2694 	h = &hstates[hugetlb_max_hstate++];
2695 	h->order = order;
2696 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2697 	h->nr_huge_pages = 0;
2698 	h->free_huge_pages = 0;
2699 	for (i = 0; i < MAX_NUMNODES; ++i)
2700 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2701 	INIT_LIST_HEAD(&h->hugepage_activelist);
2702 	h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2703 	h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2704 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2705 					huge_page_size(h)/1024);
2706 
2707 	parsed_hstate = h;
2708 }
2709 
2710 static int __init hugetlb_nrpages_setup(char *s)
2711 {
2712 	unsigned long *mhp;
2713 	static unsigned long *last_mhp;
2714 
2715 	/*
2716 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2717 	 * so this hugepages= parameter goes to the "default hstate".
2718 	 */
2719 	if (!hugetlb_max_hstate)
2720 		mhp = &default_hstate_max_huge_pages;
2721 	else
2722 		mhp = &parsed_hstate->max_huge_pages;
2723 
2724 	if (mhp == last_mhp) {
2725 		pr_warning("hugepages= specified twice without "
2726 			   "interleaving hugepagesz=, ignoring\n");
2727 		return 1;
2728 	}
2729 
2730 	if (sscanf(s, "%lu", mhp) <= 0)
2731 		*mhp = 0;
2732 
2733 	/*
2734 	 * Global state is always initialized later in hugetlb_init.
2735 	 * But we need to allocate >= MAX_ORDER hstates here early to still
2736 	 * use the bootmem allocator.
2737 	 */
2738 	if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2739 		hugetlb_hstate_alloc_pages(parsed_hstate);
2740 
2741 	last_mhp = mhp;
2742 
2743 	return 1;
2744 }
2745 __setup("hugepages=", hugetlb_nrpages_setup);
2746 
2747 static int __init hugetlb_default_setup(char *s)
2748 {
2749 	default_hstate_size = memparse(s, &s);
2750 	return 1;
2751 }
2752 __setup("default_hugepagesz=", hugetlb_default_setup);
2753 
2754 static unsigned int cpuset_mems_nr(unsigned int *array)
2755 {
2756 	int node;
2757 	unsigned int nr = 0;
2758 
2759 	for_each_node_mask(node, cpuset_current_mems_allowed)
2760 		nr += array[node];
2761 
2762 	return nr;
2763 }
2764 
2765 #ifdef CONFIG_SYSCTL
2766 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2767 			 struct ctl_table *table, int write,
2768 			 void __user *buffer, size_t *length, loff_t *ppos)
2769 {
2770 	struct hstate *h = &default_hstate;
2771 	unsigned long tmp = h->max_huge_pages;
2772 	int ret;
2773 
2774 	if (!hugepages_supported())
2775 		return -ENOTSUPP;
2776 
2777 	table->data = &tmp;
2778 	table->maxlen = sizeof(unsigned long);
2779 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2780 	if (ret)
2781 		goto out;
2782 
2783 	if (write)
2784 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
2785 						  NUMA_NO_NODE, tmp, *length);
2786 out:
2787 	return ret;
2788 }
2789 
2790 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2791 			  void __user *buffer, size_t *length, loff_t *ppos)
2792 {
2793 
2794 	return hugetlb_sysctl_handler_common(false, table, write,
2795 							buffer, length, ppos);
2796 }
2797 
2798 #ifdef CONFIG_NUMA
2799 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2800 			  void __user *buffer, size_t *length, loff_t *ppos)
2801 {
2802 	return hugetlb_sysctl_handler_common(true, table, write,
2803 							buffer, length, ppos);
2804 }
2805 #endif /* CONFIG_NUMA */
2806 
2807 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2808 			void __user *buffer,
2809 			size_t *length, loff_t *ppos)
2810 {
2811 	struct hstate *h = &default_hstate;
2812 	unsigned long tmp;
2813 	int ret;
2814 
2815 	if (!hugepages_supported())
2816 		return -ENOTSUPP;
2817 
2818 	tmp = h->nr_overcommit_huge_pages;
2819 
2820 	if (write && hstate_is_gigantic(h))
2821 		return -EINVAL;
2822 
2823 	table->data = &tmp;
2824 	table->maxlen = sizeof(unsigned long);
2825 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2826 	if (ret)
2827 		goto out;
2828 
2829 	if (write) {
2830 		spin_lock(&hugetlb_lock);
2831 		h->nr_overcommit_huge_pages = tmp;
2832 		spin_unlock(&hugetlb_lock);
2833 	}
2834 out:
2835 	return ret;
2836 }
2837 
2838 #endif /* CONFIG_SYSCTL */
2839 
2840 void hugetlb_report_meminfo(struct seq_file *m)
2841 {
2842 	struct hstate *h = &default_hstate;
2843 	if (!hugepages_supported())
2844 		return;
2845 	seq_printf(m,
2846 			"HugePages_Total:   %5lu\n"
2847 			"HugePages_Free:    %5lu\n"
2848 			"HugePages_Rsvd:    %5lu\n"
2849 			"HugePages_Surp:    %5lu\n"
2850 			"Hugepagesize:   %8lu kB\n",
2851 			h->nr_huge_pages,
2852 			h->free_huge_pages,
2853 			h->resv_huge_pages,
2854 			h->surplus_huge_pages,
2855 			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2856 }
2857 
2858 int hugetlb_report_node_meminfo(int nid, char *buf)
2859 {
2860 	struct hstate *h = &default_hstate;
2861 	if (!hugepages_supported())
2862 		return 0;
2863 	return sprintf(buf,
2864 		"Node %d HugePages_Total: %5u\n"
2865 		"Node %d HugePages_Free:  %5u\n"
2866 		"Node %d HugePages_Surp:  %5u\n",
2867 		nid, h->nr_huge_pages_node[nid],
2868 		nid, h->free_huge_pages_node[nid],
2869 		nid, h->surplus_huge_pages_node[nid]);
2870 }
2871 
2872 void hugetlb_show_meminfo(void)
2873 {
2874 	struct hstate *h;
2875 	int nid;
2876 
2877 	if (!hugepages_supported())
2878 		return;
2879 
2880 	for_each_node_state(nid, N_MEMORY)
2881 		for_each_hstate(h)
2882 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2883 				nid,
2884 				h->nr_huge_pages_node[nid],
2885 				h->free_huge_pages_node[nid],
2886 				h->surplus_huge_pages_node[nid],
2887 				1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2888 }
2889 
2890 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
2891 {
2892 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
2893 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
2894 }
2895 
2896 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2897 unsigned long hugetlb_total_pages(void)
2898 {
2899 	struct hstate *h;
2900 	unsigned long nr_total_pages = 0;
2901 
2902 	for_each_hstate(h)
2903 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2904 	return nr_total_pages;
2905 }
2906 
2907 static int hugetlb_acct_memory(struct hstate *h, long delta)
2908 {
2909 	int ret = -ENOMEM;
2910 
2911 	spin_lock(&hugetlb_lock);
2912 	/*
2913 	 * When cpuset is configured, it breaks the strict hugetlb page
2914 	 * reservation as the accounting is done on a global variable. Such
2915 	 * reservation is completely rubbish in the presence of cpuset because
2916 	 * the reservation is not checked against page availability for the
2917 	 * current cpuset. Application can still potentially OOM'ed by kernel
2918 	 * with lack of free htlb page in cpuset that the task is in.
2919 	 * Attempt to enforce strict accounting with cpuset is almost
2920 	 * impossible (or too ugly) because cpuset is too fluid that
2921 	 * task or memory node can be dynamically moved between cpusets.
2922 	 *
2923 	 * The change of semantics for shared hugetlb mapping with cpuset is
2924 	 * undesirable. However, in order to preserve some of the semantics,
2925 	 * we fall back to check against current free page availability as
2926 	 * a best attempt and hopefully to minimize the impact of changing
2927 	 * semantics that cpuset has.
2928 	 */
2929 	if (delta > 0) {
2930 		if (gather_surplus_pages(h, delta) < 0)
2931 			goto out;
2932 
2933 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2934 			return_unused_surplus_pages(h, delta);
2935 			goto out;
2936 		}
2937 	}
2938 
2939 	ret = 0;
2940 	if (delta < 0)
2941 		return_unused_surplus_pages(h, (unsigned long) -delta);
2942 
2943 out:
2944 	spin_unlock(&hugetlb_lock);
2945 	return ret;
2946 }
2947 
2948 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2949 {
2950 	struct resv_map *resv = vma_resv_map(vma);
2951 
2952 	/*
2953 	 * This new VMA should share its siblings reservation map if present.
2954 	 * The VMA will only ever have a valid reservation map pointer where
2955 	 * it is being copied for another still existing VMA.  As that VMA
2956 	 * has a reference to the reservation map it cannot disappear until
2957 	 * after this open call completes.  It is therefore safe to take a
2958 	 * new reference here without additional locking.
2959 	 */
2960 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2961 		kref_get(&resv->refs);
2962 }
2963 
2964 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2965 {
2966 	struct hstate *h = hstate_vma(vma);
2967 	struct resv_map *resv = vma_resv_map(vma);
2968 	struct hugepage_subpool *spool = subpool_vma(vma);
2969 	unsigned long reserve, start, end;
2970 	long gbl_reserve;
2971 
2972 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2973 		return;
2974 
2975 	start = vma_hugecache_offset(h, vma, vma->vm_start);
2976 	end = vma_hugecache_offset(h, vma, vma->vm_end);
2977 
2978 	reserve = (end - start) - region_count(resv, start, end);
2979 
2980 	kref_put(&resv->refs, resv_map_release);
2981 
2982 	if (reserve) {
2983 		/*
2984 		 * Decrement reserve counts.  The global reserve count may be
2985 		 * adjusted if the subpool has a minimum size.
2986 		 */
2987 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2988 		hugetlb_acct_memory(h, -gbl_reserve);
2989 	}
2990 }
2991 
2992 /*
2993  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2994  * handle_mm_fault() to try to instantiate regular-sized pages in the
2995  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2996  * this far.
2997  */
2998 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2999 {
3000 	BUG();
3001 	return 0;
3002 }
3003 
3004 const struct vm_operations_struct hugetlb_vm_ops = {
3005 	.fault = hugetlb_vm_op_fault,
3006 	.open = hugetlb_vm_op_open,
3007 	.close = hugetlb_vm_op_close,
3008 };
3009 
3010 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3011 				int writable)
3012 {
3013 	pte_t entry;
3014 
3015 	if (writable) {
3016 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3017 					 vma->vm_page_prot)));
3018 	} else {
3019 		entry = huge_pte_wrprotect(mk_huge_pte(page,
3020 					   vma->vm_page_prot));
3021 	}
3022 	entry = pte_mkyoung(entry);
3023 	entry = pte_mkhuge(entry);
3024 	entry = arch_make_huge_pte(entry, vma, page, writable);
3025 
3026 	return entry;
3027 }
3028 
3029 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3030 				   unsigned long address, pte_t *ptep)
3031 {
3032 	pte_t entry;
3033 
3034 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3035 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3036 		update_mmu_cache(vma, address, ptep);
3037 }
3038 
3039 static int is_hugetlb_entry_migration(pte_t pte)
3040 {
3041 	swp_entry_t swp;
3042 
3043 	if (huge_pte_none(pte) || pte_present(pte))
3044 		return 0;
3045 	swp = pte_to_swp_entry(pte);
3046 	if (non_swap_entry(swp) && is_migration_entry(swp))
3047 		return 1;
3048 	else
3049 		return 0;
3050 }
3051 
3052 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3053 {
3054 	swp_entry_t swp;
3055 
3056 	if (huge_pte_none(pte) || pte_present(pte))
3057 		return 0;
3058 	swp = pte_to_swp_entry(pte);
3059 	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3060 		return 1;
3061 	else
3062 		return 0;
3063 }
3064 
3065 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3066 			    struct vm_area_struct *vma)
3067 {
3068 	pte_t *src_pte, *dst_pte, entry;
3069 	struct page *ptepage;
3070 	unsigned long addr;
3071 	int cow;
3072 	struct hstate *h = hstate_vma(vma);
3073 	unsigned long sz = huge_page_size(h);
3074 	unsigned long mmun_start;	/* For mmu_notifiers */
3075 	unsigned long mmun_end;		/* For mmu_notifiers */
3076 	int ret = 0;
3077 
3078 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3079 
3080 	mmun_start = vma->vm_start;
3081 	mmun_end = vma->vm_end;
3082 	if (cow)
3083 		mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3084 
3085 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3086 		spinlock_t *src_ptl, *dst_ptl;
3087 		src_pte = huge_pte_offset(src, addr);
3088 		if (!src_pte)
3089 			continue;
3090 		dst_pte = huge_pte_alloc(dst, addr, sz);
3091 		if (!dst_pte) {
3092 			ret = -ENOMEM;
3093 			break;
3094 		}
3095 
3096 		/* If the pagetables are shared don't copy or take references */
3097 		if (dst_pte == src_pte)
3098 			continue;
3099 
3100 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
3101 		src_ptl = huge_pte_lockptr(h, src, src_pte);
3102 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3103 		entry = huge_ptep_get(src_pte);
3104 		if (huge_pte_none(entry)) { /* skip none entry */
3105 			;
3106 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
3107 				    is_hugetlb_entry_hwpoisoned(entry))) {
3108 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
3109 
3110 			if (is_write_migration_entry(swp_entry) && cow) {
3111 				/*
3112 				 * COW mappings require pages in both
3113 				 * parent and child to be set to read.
3114 				 */
3115 				make_migration_entry_read(&swp_entry);
3116 				entry = swp_entry_to_pte(swp_entry);
3117 				set_huge_pte_at(src, addr, src_pte, entry);
3118 			}
3119 			set_huge_pte_at(dst, addr, dst_pte, entry);
3120 		} else {
3121 			if (cow) {
3122 				huge_ptep_set_wrprotect(src, addr, src_pte);
3123 				mmu_notifier_invalidate_range(src, mmun_start,
3124 								   mmun_end);
3125 			}
3126 			entry = huge_ptep_get(src_pte);
3127 			ptepage = pte_page(entry);
3128 			get_page(ptepage);
3129 			page_dup_rmap(ptepage);
3130 			set_huge_pte_at(dst, addr, dst_pte, entry);
3131 			hugetlb_count_add(pages_per_huge_page(h), dst);
3132 		}
3133 		spin_unlock(src_ptl);
3134 		spin_unlock(dst_ptl);
3135 	}
3136 
3137 	if (cow)
3138 		mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3139 
3140 	return ret;
3141 }
3142 
3143 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3144 			    unsigned long start, unsigned long end,
3145 			    struct page *ref_page)
3146 {
3147 	int force_flush = 0;
3148 	struct mm_struct *mm = vma->vm_mm;
3149 	unsigned long address;
3150 	pte_t *ptep;
3151 	pte_t pte;
3152 	spinlock_t *ptl;
3153 	struct page *page;
3154 	struct hstate *h = hstate_vma(vma);
3155 	unsigned long sz = huge_page_size(h);
3156 	const unsigned long mmun_start = start;	/* For mmu_notifiers */
3157 	const unsigned long mmun_end   = end;	/* For mmu_notifiers */
3158 
3159 	WARN_ON(!is_vm_hugetlb_page(vma));
3160 	BUG_ON(start & ~huge_page_mask(h));
3161 	BUG_ON(end & ~huge_page_mask(h));
3162 
3163 	tlb_start_vma(tlb, vma);
3164 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3165 	address = start;
3166 again:
3167 	for (; address < end; address += sz) {
3168 		ptep = huge_pte_offset(mm, address);
3169 		if (!ptep)
3170 			continue;
3171 
3172 		ptl = huge_pte_lock(h, mm, ptep);
3173 		if (huge_pmd_unshare(mm, &address, ptep))
3174 			goto unlock;
3175 
3176 		pte = huge_ptep_get(ptep);
3177 		if (huge_pte_none(pte))
3178 			goto unlock;
3179 
3180 		/*
3181 		 * Migrating hugepage or HWPoisoned hugepage is already
3182 		 * unmapped and its refcount is dropped, so just clear pte here.
3183 		 */
3184 		if (unlikely(!pte_present(pte))) {
3185 			huge_pte_clear(mm, address, ptep);
3186 			goto unlock;
3187 		}
3188 
3189 		page = pte_page(pte);
3190 		/*
3191 		 * If a reference page is supplied, it is because a specific
3192 		 * page is being unmapped, not a range. Ensure the page we
3193 		 * are about to unmap is the actual page of interest.
3194 		 */
3195 		if (ref_page) {
3196 			if (page != ref_page)
3197 				goto unlock;
3198 
3199 			/*
3200 			 * Mark the VMA as having unmapped its page so that
3201 			 * future faults in this VMA will fail rather than
3202 			 * looking like data was lost
3203 			 */
3204 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3205 		}
3206 
3207 		pte = huge_ptep_get_and_clear(mm, address, ptep);
3208 		tlb_remove_tlb_entry(tlb, ptep, address);
3209 		if (huge_pte_dirty(pte))
3210 			set_page_dirty(page);
3211 
3212 		hugetlb_count_sub(pages_per_huge_page(h), mm);
3213 		page_remove_rmap(page);
3214 		force_flush = !__tlb_remove_page(tlb, page);
3215 		if (force_flush) {
3216 			address += sz;
3217 			spin_unlock(ptl);
3218 			break;
3219 		}
3220 		/* Bail out after unmapping reference page if supplied */
3221 		if (ref_page) {
3222 			spin_unlock(ptl);
3223 			break;
3224 		}
3225 unlock:
3226 		spin_unlock(ptl);
3227 	}
3228 	/*
3229 	 * mmu_gather ran out of room to batch pages, we break out of
3230 	 * the PTE lock to avoid doing the potential expensive TLB invalidate
3231 	 * and page-free while holding it.
3232 	 */
3233 	if (force_flush) {
3234 		force_flush = 0;
3235 		tlb_flush_mmu(tlb);
3236 		if (address < end && !ref_page)
3237 			goto again;
3238 	}
3239 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3240 	tlb_end_vma(tlb, vma);
3241 }
3242 
3243 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3244 			  struct vm_area_struct *vma, unsigned long start,
3245 			  unsigned long end, struct page *ref_page)
3246 {
3247 	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
3248 
3249 	/*
3250 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3251 	 * test will fail on a vma being torn down, and not grab a page table
3252 	 * on its way out.  We're lucky that the flag has such an appropriate
3253 	 * name, and can in fact be safely cleared here. We could clear it
3254 	 * before the __unmap_hugepage_range above, but all that's necessary
3255 	 * is to clear it before releasing the i_mmap_rwsem. This works
3256 	 * because in the context this is called, the VMA is about to be
3257 	 * destroyed and the i_mmap_rwsem is held.
3258 	 */
3259 	vma->vm_flags &= ~VM_MAYSHARE;
3260 }
3261 
3262 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3263 			  unsigned long end, struct page *ref_page)
3264 {
3265 	struct mm_struct *mm;
3266 	struct mmu_gather tlb;
3267 
3268 	mm = vma->vm_mm;
3269 
3270 	tlb_gather_mmu(&tlb, mm, start, end);
3271 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3272 	tlb_finish_mmu(&tlb, start, end);
3273 }
3274 
3275 /*
3276  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3277  * mappping it owns the reserve page for. The intention is to unmap the page
3278  * from other VMAs and let the children be SIGKILLed if they are faulting the
3279  * same region.
3280  */
3281 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3282 			      struct page *page, unsigned long address)
3283 {
3284 	struct hstate *h = hstate_vma(vma);
3285 	struct vm_area_struct *iter_vma;
3286 	struct address_space *mapping;
3287 	pgoff_t pgoff;
3288 
3289 	/*
3290 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3291 	 * from page cache lookup which is in HPAGE_SIZE units.
3292 	 */
3293 	address = address & huge_page_mask(h);
3294 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3295 			vma->vm_pgoff;
3296 	mapping = file_inode(vma->vm_file)->i_mapping;
3297 
3298 	/*
3299 	 * Take the mapping lock for the duration of the table walk. As
3300 	 * this mapping should be shared between all the VMAs,
3301 	 * __unmap_hugepage_range() is called as the lock is already held
3302 	 */
3303 	i_mmap_lock_write(mapping);
3304 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3305 		/* Do not unmap the current VMA */
3306 		if (iter_vma == vma)
3307 			continue;
3308 
3309 		/*
3310 		 * Shared VMAs have their own reserves and do not affect
3311 		 * MAP_PRIVATE accounting but it is possible that a shared
3312 		 * VMA is using the same page so check and skip such VMAs.
3313 		 */
3314 		if (iter_vma->vm_flags & VM_MAYSHARE)
3315 			continue;
3316 
3317 		/*
3318 		 * Unmap the page from other VMAs without their own reserves.
3319 		 * They get marked to be SIGKILLed if they fault in these
3320 		 * areas. This is because a future no-page fault on this VMA
3321 		 * could insert a zeroed page instead of the data existing
3322 		 * from the time of fork. This would look like data corruption
3323 		 */
3324 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3325 			unmap_hugepage_range(iter_vma, address,
3326 					     address + huge_page_size(h), page);
3327 	}
3328 	i_mmap_unlock_write(mapping);
3329 }
3330 
3331 /*
3332  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3333  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3334  * cannot race with other handlers or page migration.
3335  * Keep the pte_same checks anyway to make transition from the mutex easier.
3336  */
3337 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3338 			unsigned long address, pte_t *ptep, pte_t pte,
3339 			struct page *pagecache_page, spinlock_t *ptl)
3340 {
3341 	struct hstate *h = hstate_vma(vma);
3342 	struct page *old_page, *new_page;
3343 	int ret = 0, outside_reserve = 0;
3344 	unsigned long mmun_start;	/* For mmu_notifiers */
3345 	unsigned long mmun_end;		/* For mmu_notifiers */
3346 
3347 	old_page = pte_page(pte);
3348 
3349 retry_avoidcopy:
3350 	/* If no-one else is actually using this page, avoid the copy
3351 	 * and just make the page writable */
3352 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3353 		page_move_anon_rmap(old_page, vma, address);
3354 		set_huge_ptep_writable(vma, address, ptep);
3355 		return 0;
3356 	}
3357 
3358 	/*
3359 	 * If the process that created a MAP_PRIVATE mapping is about to
3360 	 * perform a COW due to a shared page count, attempt to satisfy
3361 	 * the allocation without using the existing reserves. The pagecache
3362 	 * page is used to determine if the reserve at this address was
3363 	 * consumed or not. If reserves were used, a partial faulted mapping
3364 	 * at the time of fork() could consume its reserves on COW instead
3365 	 * of the full address range.
3366 	 */
3367 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3368 			old_page != pagecache_page)
3369 		outside_reserve = 1;
3370 
3371 	page_cache_get(old_page);
3372 
3373 	/*
3374 	 * Drop page table lock as buddy allocator may be called. It will
3375 	 * be acquired again before returning to the caller, as expected.
3376 	 */
3377 	spin_unlock(ptl);
3378 	new_page = alloc_huge_page(vma, address, outside_reserve);
3379 
3380 	if (IS_ERR(new_page)) {
3381 		/*
3382 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
3383 		 * it is due to references held by a child and an insufficient
3384 		 * huge page pool. To guarantee the original mappers
3385 		 * reliability, unmap the page from child processes. The child
3386 		 * may get SIGKILLed if it later faults.
3387 		 */
3388 		if (outside_reserve) {
3389 			page_cache_release(old_page);
3390 			BUG_ON(huge_pte_none(pte));
3391 			unmap_ref_private(mm, vma, old_page, address);
3392 			BUG_ON(huge_pte_none(pte));
3393 			spin_lock(ptl);
3394 			ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3395 			if (likely(ptep &&
3396 				   pte_same(huge_ptep_get(ptep), pte)))
3397 				goto retry_avoidcopy;
3398 			/*
3399 			 * race occurs while re-acquiring page table
3400 			 * lock, and our job is done.
3401 			 */
3402 			return 0;
3403 		}
3404 
3405 		ret = (PTR_ERR(new_page) == -ENOMEM) ?
3406 			VM_FAULT_OOM : VM_FAULT_SIGBUS;
3407 		goto out_release_old;
3408 	}
3409 
3410 	/*
3411 	 * When the original hugepage is shared one, it does not have
3412 	 * anon_vma prepared.
3413 	 */
3414 	if (unlikely(anon_vma_prepare(vma))) {
3415 		ret = VM_FAULT_OOM;
3416 		goto out_release_all;
3417 	}
3418 
3419 	copy_user_huge_page(new_page, old_page, address, vma,
3420 			    pages_per_huge_page(h));
3421 	__SetPageUptodate(new_page);
3422 	set_page_huge_active(new_page);
3423 
3424 	mmun_start = address & huge_page_mask(h);
3425 	mmun_end = mmun_start + huge_page_size(h);
3426 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3427 
3428 	/*
3429 	 * Retake the page table lock to check for racing updates
3430 	 * before the page tables are altered
3431 	 */
3432 	spin_lock(ptl);
3433 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3434 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3435 		ClearPagePrivate(new_page);
3436 
3437 		/* Break COW */
3438 		huge_ptep_clear_flush(vma, address, ptep);
3439 		mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3440 		set_huge_pte_at(mm, address, ptep,
3441 				make_huge_pte(vma, new_page, 1));
3442 		page_remove_rmap(old_page);
3443 		hugepage_add_new_anon_rmap(new_page, vma, address);
3444 		/* Make the old page be freed below */
3445 		new_page = old_page;
3446 	}
3447 	spin_unlock(ptl);
3448 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3449 out_release_all:
3450 	page_cache_release(new_page);
3451 out_release_old:
3452 	page_cache_release(old_page);
3453 
3454 	spin_lock(ptl); /* Caller expects lock to be held */
3455 	return ret;
3456 }
3457 
3458 /* Return the pagecache page at a given address within a VMA */
3459 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3460 			struct vm_area_struct *vma, unsigned long address)
3461 {
3462 	struct address_space *mapping;
3463 	pgoff_t idx;
3464 
3465 	mapping = vma->vm_file->f_mapping;
3466 	idx = vma_hugecache_offset(h, vma, address);
3467 
3468 	return find_lock_page(mapping, idx);
3469 }
3470 
3471 /*
3472  * Return whether there is a pagecache page to back given address within VMA.
3473  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3474  */
3475 static bool hugetlbfs_pagecache_present(struct hstate *h,
3476 			struct vm_area_struct *vma, unsigned long address)
3477 {
3478 	struct address_space *mapping;
3479 	pgoff_t idx;
3480 	struct page *page;
3481 
3482 	mapping = vma->vm_file->f_mapping;
3483 	idx = vma_hugecache_offset(h, vma, address);
3484 
3485 	page = find_get_page(mapping, idx);
3486 	if (page)
3487 		put_page(page);
3488 	return page != NULL;
3489 }
3490 
3491 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3492 			   pgoff_t idx)
3493 {
3494 	struct inode *inode = mapping->host;
3495 	struct hstate *h = hstate_inode(inode);
3496 	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3497 
3498 	if (err)
3499 		return err;
3500 	ClearPagePrivate(page);
3501 
3502 	spin_lock(&inode->i_lock);
3503 	inode->i_blocks += blocks_per_huge_page(h);
3504 	spin_unlock(&inode->i_lock);
3505 	return 0;
3506 }
3507 
3508 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3509 			   struct address_space *mapping, pgoff_t idx,
3510 			   unsigned long address, pte_t *ptep, unsigned int flags)
3511 {
3512 	struct hstate *h = hstate_vma(vma);
3513 	int ret = VM_FAULT_SIGBUS;
3514 	int anon_rmap = 0;
3515 	unsigned long size;
3516 	struct page *page;
3517 	pte_t new_pte;
3518 	spinlock_t *ptl;
3519 
3520 	/*
3521 	 * Currently, we are forced to kill the process in the event the
3522 	 * original mapper has unmapped pages from the child due to a failed
3523 	 * COW. Warn that such a situation has occurred as it may not be obvious
3524 	 */
3525 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3526 		pr_warning("PID %d killed due to inadequate hugepage pool\n",
3527 			   current->pid);
3528 		return ret;
3529 	}
3530 
3531 	/*
3532 	 * Use page lock to guard against racing truncation
3533 	 * before we get page_table_lock.
3534 	 */
3535 retry:
3536 	page = find_lock_page(mapping, idx);
3537 	if (!page) {
3538 		size = i_size_read(mapping->host) >> huge_page_shift(h);
3539 		if (idx >= size)
3540 			goto out;
3541 		page = alloc_huge_page(vma, address, 0);
3542 		if (IS_ERR(page)) {
3543 			ret = PTR_ERR(page);
3544 			if (ret == -ENOMEM)
3545 				ret = VM_FAULT_OOM;
3546 			else
3547 				ret = VM_FAULT_SIGBUS;
3548 			goto out;
3549 		}
3550 		clear_huge_page(page, address, pages_per_huge_page(h));
3551 		__SetPageUptodate(page);
3552 		set_page_huge_active(page);
3553 
3554 		if (vma->vm_flags & VM_MAYSHARE) {
3555 			int err = huge_add_to_page_cache(page, mapping, idx);
3556 			if (err) {
3557 				put_page(page);
3558 				if (err == -EEXIST)
3559 					goto retry;
3560 				goto out;
3561 			}
3562 		} else {
3563 			lock_page(page);
3564 			if (unlikely(anon_vma_prepare(vma))) {
3565 				ret = VM_FAULT_OOM;
3566 				goto backout_unlocked;
3567 			}
3568 			anon_rmap = 1;
3569 		}
3570 	} else {
3571 		/*
3572 		 * If memory error occurs between mmap() and fault, some process
3573 		 * don't have hwpoisoned swap entry for errored virtual address.
3574 		 * So we need to block hugepage fault by PG_hwpoison bit check.
3575 		 */
3576 		if (unlikely(PageHWPoison(page))) {
3577 			ret = VM_FAULT_HWPOISON |
3578 				VM_FAULT_SET_HINDEX(hstate_index(h));
3579 			goto backout_unlocked;
3580 		}
3581 	}
3582 
3583 	/*
3584 	 * If we are going to COW a private mapping later, we examine the
3585 	 * pending reservations for this page now. This will ensure that
3586 	 * any allocations necessary to record that reservation occur outside
3587 	 * the spinlock.
3588 	 */
3589 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3590 		if (vma_needs_reservation(h, vma, address) < 0) {
3591 			ret = VM_FAULT_OOM;
3592 			goto backout_unlocked;
3593 		}
3594 		/* Just decrements count, does not deallocate */
3595 		vma_end_reservation(h, vma, address);
3596 	}
3597 
3598 	ptl = huge_pte_lockptr(h, mm, ptep);
3599 	spin_lock(ptl);
3600 	size = i_size_read(mapping->host) >> huge_page_shift(h);
3601 	if (idx >= size)
3602 		goto backout;
3603 
3604 	ret = 0;
3605 	if (!huge_pte_none(huge_ptep_get(ptep)))
3606 		goto backout;
3607 
3608 	if (anon_rmap) {
3609 		ClearPagePrivate(page);
3610 		hugepage_add_new_anon_rmap(page, vma, address);
3611 	} else
3612 		page_dup_rmap(page);
3613 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3614 				&& (vma->vm_flags & VM_SHARED)));
3615 	set_huge_pte_at(mm, address, ptep, new_pte);
3616 
3617 	hugetlb_count_add(pages_per_huge_page(h), mm);
3618 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3619 		/* Optimization, do the COW without a second fault */
3620 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3621 	}
3622 
3623 	spin_unlock(ptl);
3624 	unlock_page(page);
3625 out:
3626 	return ret;
3627 
3628 backout:
3629 	spin_unlock(ptl);
3630 backout_unlocked:
3631 	unlock_page(page);
3632 	put_page(page);
3633 	goto out;
3634 }
3635 
3636 #ifdef CONFIG_SMP
3637 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3638 			    struct vm_area_struct *vma,
3639 			    struct address_space *mapping,
3640 			    pgoff_t idx, unsigned long address)
3641 {
3642 	unsigned long key[2];
3643 	u32 hash;
3644 
3645 	if (vma->vm_flags & VM_SHARED) {
3646 		key[0] = (unsigned long) mapping;
3647 		key[1] = idx;
3648 	} else {
3649 		key[0] = (unsigned long) mm;
3650 		key[1] = address >> huge_page_shift(h);
3651 	}
3652 
3653 	hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3654 
3655 	return hash & (num_fault_mutexes - 1);
3656 }
3657 #else
3658 /*
3659  * For uniprocesor systems we always use a single mutex, so just
3660  * return 0 and avoid the hashing overhead.
3661  */
3662 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3663 			    struct vm_area_struct *vma,
3664 			    struct address_space *mapping,
3665 			    pgoff_t idx, unsigned long address)
3666 {
3667 	return 0;
3668 }
3669 #endif
3670 
3671 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3672 			unsigned long address, unsigned int flags)
3673 {
3674 	pte_t *ptep, entry;
3675 	spinlock_t *ptl;
3676 	int ret;
3677 	u32 hash;
3678 	pgoff_t idx;
3679 	struct page *page = NULL;
3680 	struct page *pagecache_page = NULL;
3681 	struct hstate *h = hstate_vma(vma);
3682 	struct address_space *mapping;
3683 	int need_wait_lock = 0;
3684 
3685 	address &= huge_page_mask(h);
3686 
3687 	ptep = huge_pte_offset(mm, address);
3688 	if (ptep) {
3689 		entry = huge_ptep_get(ptep);
3690 		if (unlikely(is_hugetlb_entry_migration(entry))) {
3691 			migration_entry_wait_huge(vma, mm, ptep);
3692 			return 0;
3693 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3694 			return VM_FAULT_HWPOISON_LARGE |
3695 				VM_FAULT_SET_HINDEX(hstate_index(h));
3696 	}
3697 
3698 	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3699 	if (!ptep)
3700 		return VM_FAULT_OOM;
3701 
3702 	mapping = vma->vm_file->f_mapping;
3703 	idx = vma_hugecache_offset(h, vma, address);
3704 
3705 	/*
3706 	 * Serialize hugepage allocation and instantiation, so that we don't
3707 	 * get spurious allocation failures if two CPUs race to instantiate
3708 	 * the same page in the page cache.
3709 	 */
3710 	hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3711 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
3712 
3713 	entry = huge_ptep_get(ptep);
3714 	if (huge_pte_none(entry)) {
3715 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3716 		goto out_mutex;
3717 	}
3718 
3719 	ret = 0;
3720 
3721 	/*
3722 	 * entry could be a migration/hwpoison entry at this point, so this
3723 	 * check prevents the kernel from going below assuming that we have
3724 	 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3725 	 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3726 	 * handle it.
3727 	 */
3728 	if (!pte_present(entry))
3729 		goto out_mutex;
3730 
3731 	/*
3732 	 * If we are going to COW the mapping later, we examine the pending
3733 	 * reservations for this page now. This will ensure that any
3734 	 * allocations necessary to record that reservation occur outside the
3735 	 * spinlock. For private mappings, we also lookup the pagecache
3736 	 * page now as it is used to determine if a reservation has been
3737 	 * consumed.
3738 	 */
3739 	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3740 		if (vma_needs_reservation(h, vma, address) < 0) {
3741 			ret = VM_FAULT_OOM;
3742 			goto out_mutex;
3743 		}
3744 		/* Just decrements count, does not deallocate */
3745 		vma_end_reservation(h, vma, address);
3746 
3747 		if (!(vma->vm_flags & VM_MAYSHARE))
3748 			pagecache_page = hugetlbfs_pagecache_page(h,
3749 								vma, address);
3750 	}
3751 
3752 	ptl = huge_pte_lock(h, mm, ptep);
3753 
3754 	/* Check for a racing update before calling hugetlb_cow */
3755 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3756 		goto out_ptl;
3757 
3758 	/*
3759 	 * hugetlb_cow() requires page locks of pte_page(entry) and
3760 	 * pagecache_page, so here we need take the former one
3761 	 * when page != pagecache_page or !pagecache_page.
3762 	 */
3763 	page = pte_page(entry);
3764 	if (page != pagecache_page)
3765 		if (!trylock_page(page)) {
3766 			need_wait_lock = 1;
3767 			goto out_ptl;
3768 		}
3769 
3770 	get_page(page);
3771 
3772 	if (flags & FAULT_FLAG_WRITE) {
3773 		if (!huge_pte_write(entry)) {
3774 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
3775 					pagecache_page, ptl);
3776 			goto out_put_page;
3777 		}
3778 		entry = huge_pte_mkdirty(entry);
3779 	}
3780 	entry = pte_mkyoung(entry);
3781 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3782 						flags & FAULT_FLAG_WRITE))
3783 		update_mmu_cache(vma, address, ptep);
3784 out_put_page:
3785 	if (page != pagecache_page)
3786 		unlock_page(page);
3787 	put_page(page);
3788 out_ptl:
3789 	spin_unlock(ptl);
3790 
3791 	if (pagecache_page) {
3792 		unlock_page(pagecache_page);
3793 		put_page(pagecache_page);
3794 	}
3795 out_mutex:
3796 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3797 	/*
3798 	 * Generally it's safe to hold refcount during waiting page lock. But
3799 	 * here we just wait to defer the next page fault to avoid busy loop and
3800 	 * the page is not used after unlocked before returning from the current
3801 	 * page fault. So we are safe from accessing freed page, even if we wait
3802 	 * here without taking refcount.
3803 	 */
3804 	if (need_wait_lock)
3805 		wait_on_page_locked(page);
3806 	return ret;
3807 }
3808 
3809 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3810 			 struct page **pages, struct vm_area_struct **vmas,
3811 			 unsigned long *position, unsigned long *nr_pages,
3812 			 long i, unsigned int flags)
3813 {
3814 	unsigned long pfn_offset;
3815 	unsigned long vaddr = *position;
3816 	unsigned long remainder = *nr_pages;
3817 	struct hstate *h = hstate_vma(vma);
3818 
3819 	while (vaddr < vma->vm_end && remainder) {
3820 		pte_t *pte;
3821 		spinlock_t *ptl = NULL;
3822 		int absent;
3823 		struct page *page;
3824 
3825 		/*
3826 		 * If we have a pending SIGKILL, don't keep faulting pages and
3827 		 * potentially allocating memory.
3828 		 */
3829 		if (unlikely(fatal_signal_pending(current))) {
3830 			remainder = 0;
3831 			break;
3832 		}
3833 
3834 		/*
3835 		 * Some archs (sparc64, sh*) have multiple pte_ts to
3836 		 * each hugepage.  We have to make sure we get the
3837 		 * first, for the page indexing below to work.
3838 		 *
3839 		 * Note that page table lock is not held when pte is null.
3840 		 */
3841 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3842 		if (pte)
3843 			ptl = huge_pte_lock(h, mm, pte);
3844 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
3845 
3846 		/*
3847 		 * When coredumping, it suits get_dump_page if we just return
3848 		 * an error where there's an empty slot with no huge pagecache
3849 		 * to back it.  This way, we avoid allocating a hugepage, and
3850 		 * the sparse dumpfile avoids allocating disk blocks, but its
3851 		 * huge holes still show up with zeroes where they need to be.
3852 		 */
3853 		if (absent && (flags & FOLL_DUMP) &&
3854 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3855 			if (pte)
3856 				spin_unlock(ptl);
3857 			remainder = 0;
3858 			break;
3859 		}
3860 
3861 		/*
3862 		 * We need call hugetlb_fault for both hugepages under migration
3863 		 * (in which case hugetlb_fault waits for the migration,) and
3864 		 * hwpoisoned hugepages (in which case we need to prevent the
3865 		 * caller from accessing to them.) In order to do this, we use
3866 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
3867 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3868 		 * both cases, and because we can't follow correct pages
3869 		 * directly from any kind of swap entries.
3870 		 */
3871 		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3872 		    ((flags & FOLL_WRITE) &&
3873 		      !huge_pte_write(huge_ptep_get(pte)))) {
3874 			int ret;
3875 
3876 			if (pte)
3877 				spin_unlock(ptl);
3878 			ret = hugetlb_fault(mm, vma, vaddr,
3879 				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3880 			if (!(ret & VM_FAULT_ERROR))
3881 				continue;
3882 
3883 			remainder = 0;
3884 			break;
3885 		}
3886 
3887 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3888 		page = pte_page(huge_ptep_get(pte));
3889 same_page:
3890 		if (pages) {
3891 			pages[i] = mem_map_offset(page, pfn_offset);
3892 			get_page_foll(pages[i]);
3893 		}
3894 
3895 		if (vmas)
3896 			vmas[i] = vma;
3897 
3898 		vaddr += PAGE_SIZE;
3899 		++pfn_offset;
3900 		--remainder;
3901 		++i;
3902 		if (vaddr < vma->vm_end && remainder &&
3903 				pfn_offset < pages_per_huge_page(h)) {
3904 			/*
3905 			 * We use pfn_offset to avoid touching the pageframes
3906 			 * of this compound page.
3907 			 */
3908 			goto same_page;
3909 		}
3910 		spin_unlock(ptl);
3911 	}
3912 	*nr_pages = remainder;
3913 	*position = vaddr;
3914 
3915 	return i ? i : -EFAULT;
3916 }
3917 
3918 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3919 		unsigned long address, unsigned long end, pgprot_t newprot)
3920 {
3921 	struct mm_struct *mm = vma->vm_mm;
3922 	unsigned long start = address;
3923 	pte_t *ptep;
3924 	pte_t pte;
3925 	struct hstate *h = hstate_vma(vma);
3926 	unsigned long pages = 0;
3927 
3928 	BUG_ON(address >= end);
3929 	flush_cache_range(vma, address, end);
3930 
3931 	mmu_notifier_invalidate_range_start(mm, start, end);
3932 	i_mmap_lock_write(vma->vm_file->f_mapping);
3933 	for (; address < end; address += huge_page_size(h)) {
3934 		spinlock_t *ptl;
3935 		ptep = huge_pte_offset(mm, address);
3936 		if (!ptep)
3937 			continue;
3938 		ptl = huge_pte_lock(h, mm, ptep);
3939 		if (huge_pmd_unshare(mm, &address, ptep)) {
3940 			pages++;
3941 			spin_unlock(ptl);
3942 			continue;
3943 		}
3944 		pte = huge_ptep_get(ptep);
3945 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3946 			spin_unlock(ptl);
3947 			continue;
3948 		}
3949 		if (unlikely(is_hugetlb_entry_migration(pte))) {
3950 			swp_entry_t entry = pte_to_swp_entry(pte);
3951 
3952 			if (is_write_migration_entry(entry)) {
3953 				pte_t newpte;
3954 
3955 				make_migration_entry_read(&entry);
3956 				newpte = swp_entry_to_pte(entry);
3957 				set_huge_pte_at(mm, address, ptep, newpte);
3958 				pages++;
3959 			}
3960 			spin_unlock(ptl);
3961 			continue;
3962 		}
3963 		if (!huge_pte_none(pte)) {
3964 			pte = huge_ptep_get_and_clear(mm, address, ptep);
3965 			pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3966 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
3967 			set_huge_pte_at(mm, address, ptep, pte);
3968 			pages++;
3969 		}
3970 		spin_unlock(ptl);
3971 	}
3972 	/*
3973 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3974 	 * may have cleared our pud entry and done put_page on the page table:
3975 	 * once we release i_mmap_rwsem, another task can do the final put_page
3976 	 * and that page table be reused and filled with junk.
3977 	 */
3978 	flush_tlb_range(vma, start, end);
3979 	mmu_notifier_invalidate_range(mm, start, end);
3980 	i_mmap_unlock_write(vma->vm_file->f_mapping);
3981 	mmu_notifier_invalidate_range_end(mm, start, end);
3982 
3983 	return pages << h->order;
3984 }
3985 
3986 int hugetlb_reserve_pages(struct inode *inode,
3987 					long from, long to,
3988 					struct vm_area_struct *vma,
3989 					vm_flags_t vm_flags)
3990 {
3991 	long ret, chg;
3992 	struct hstate *h = hstate_inode(inode);
3993 	struct hugepage_subpool *spool = subpool_inode(inode);
3994 	struct resv_map *resv_map;
3995 	long gbl_reserve;
3996 
3997 	/*
3998 	 * Only apply hugepage reservation if asked. At fault time, an
3999 	 * attempt will be made for VM_NORESERVE to allocate a page
4000 	 * without using reserves
4001 	 */
4002 	if (vm_flags & VM_NORESERVE)
4003 		return 0;
4004 
4005 	/*
4006 	 * Shared mappings base their reservation on the number of pages that
4007 	 * are already allocated on behalf of the file. Private mappings need
4008 	 * to reserve the full area even if read-only as mprotect() may be
4009 	 * called to make the mapping read-write. Assume !vma is a shm mapping
4010 	 */
4011 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
4012 		resv_map = inode_resv_map(inode);
4013 
4014 		chg = region_chg(resv_map, from, to);
4015 
4016 	} else {
4017 		resv_map = resv_map_alloc();
4018 		if (!resv_map)
4019 			return -ENOMEM;
4020 
4021 		chg = to - from;
4022 
4023 		set_vma_resv_map(vma, resv_map);
4024 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4025 	}
4026 
4027 	if (chg < 0) {
4028 		ret = chg;
4029 		goto out_err;
4030 	}
4031 
4032 	/*
4033 	 * There must be enough pages in the subpool for the mapping. If
4034 	 * the subpool has a minimum size, there may be some global
4035 	 * reservations already in place (gbl_reserve).
4036 	 */
4037 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4038 	if (gbl_reserve < 0) {
4039 		ret = -ENOSPC;
4040 		goto out_err;
4041 	}
4042 
4043 	/*
4044 	 * Check enough hugepages are available for the reservation.
4045 	 * Hand the pages back to the subpool if there are not
4046 	 */
4047 	ret = hugetlb_acct_memory(h, gbl_reserve);
4048 	if (ret < 0) {
4049 		/* put back original number of pages, chg */
4050 		(void)hugepage_subpool_put_pages(spool, chg);
4051 		goto out_err;
4052 	}
4053 
4054 	/*
4055 	 * Account for the reservations made. Shared mappings record regions
4056 	 * that have reservations as they are shared by multiple VMAs.
4057 	 * When the last VMA disappears, the region map says how much
4058 	 * the reservation was and the page cache tells how much of
4059 	 * the reservation was consumed. Private mappings are per-VMA and
4060 	 * only the consumed reservations are tracked. When the VMA
4061 	 * disappears, the original reservation is the VMA size and the
4062 	 * consumed reservations are stored in the map. Hence, nothing
4063 	 * else has to be done for private mappings here
4064 	 */
4065 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
4066 		long add = region_add(resv_map, from, to);
4067 
4068 		if (unlikely(chg > add)) {
4069 			/*
4070 			 * pages in this range were added to the reserve
4071 			 * map between region_chg and region_add.  This
4072 			 * indicates a race with alloc_huge_page.  Adjust
4073 			 * the subpool and reserve counts modified above
4074 			 * based on the difference.
4075 			 */
4076 			long rsv_adjust;
4077 
4078 			rsv_adjust = hugepage_subpool_put_pages(spool,
4079 								chg - add);
4080 			hugetlb_acct_memory(h, -rsv_adjust);
4081 		}
4082 	}
4083 	return 0;
4084 out_err:
4085 	if (!vma || vma->vm_flags & VM_MAYSHARE)
4086 		region_abort(resv_map, from, to);
4087 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4088 		kref_put(&resv_map->refs, resv_map_release);
4089 	return ret;
4090 }
4091 
4092 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4093 								long freed)
4094 {
4095 	struct hstate *h = hstate_inode(inode);
4096 	struct resv_map *resv_map = inode_resv_map(inode);
4097 	long chg = 0;
4098 	struct hugepage_subpool *spool = subpool_inode(inode);
4099 	long gbl_reserve;
4100 
4101 	if (resv_map) {
4102 		chg = region_del(resv_map, start, end);
4103 		/*
4104 		 * region_del() can fail in the rare case where a region
4105 		 * must be split and another region descriptor can not be
4106 		 * allocated.  If end == LONG_MAX, it will not fail.
4107 		 */
4108 		if (chg < 0)
4109 			return chg;
4110 	}
4111 
4112 	spin_lock(&inode->i_lock);
4113 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4114 	spin_unlock(&inode->i_lock);
4115 
4116 	/*
4117 	 * If the subpool has a minimum size, the number of global
4118 	 * reservations to be released may be adjusted.
4119 	 */
4120 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4121 	hugetlb_acct_memory(h, -gbl_reserve);
4122 
4123 	return 0;
4124 }
4125 
4126 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4127 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4128 				struct vm_area_struct *vma,
4129 				unsigned long addr, pgoff_t idx)
4130 {
4131 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4132 				svma->vm_start;
4133 	unsigned long sbase = saddr & PUD_MASK;
4134 	unsigned long s_end = sbase + PUD_SIZE;
4135 
4136 	/* Allow segments to share if only one is marked locked */
4137 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4138 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4139 
4140 	/*
4141 	 * match the virtual addresses, permission and the alignment of the
4142 	 * page table page.
4143 	 */
4144 	if (pmd_index(addr) != pmd_index(saddr) ||
4145 	    vm_flags != svm_flags ||
4146 	    sbase < svma->vm_start || svma->vm_end < s_end)
4147 		return 0;
4148 
4149 	return saddr;
4150 }
4151 
4152 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4153 {
4154 	unsigned long base = addr & PUD_MASK;
4155 	unsigned long end = base + PUD_SIZE;
4156 
4157 	/*
4158 	 * check on proper vm_flags and page table alignment
4159 	 */
4160 	if (vma->vm_flags & VM_MAYSHARE &&
4161 	    vma->vm_start <= base && end <= vma->vm_end)
4162 		return true;
4163 	return false;
4164 }
4165 
4166 /*
4167  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4168  * and returns the corresponding pte. While this is not necessary for the
4169  * !shared pmd case because we can allocate the pmd later as well, it makes the
4170  * code much cleaner. pmd allocation is essential for the shared case because
4171  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4172  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4173  * bad pmd for sharing.
4174  */
4175 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4176 {
4177 	struct vm_area_struct *vma = find_vma(mm, addr);
4178 	struct address_space *mapping = vma->vm_file->f_mapping;
4179 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4180 			vma->vm_pgoff;
4181 	struct vm_area_struct *svma;
4182 	unsigned long saddr;
4183 	pte_t *spte = NULL;
4184 	pte_t *pte;
4185 	spinlock_t *ptl;
4186 
4187 	if (!vma_shareable(vma, addr))
4188 		return (pte_t *)pmd_alloc(mm, pud, addr);
4189 
4190 	i_mmap_lock_write(mapping);
4191 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4192 		if (svma == vma)
4193 			continue;
4194 
4195 		saddr = page_table_shareable(svma, vma, addr, idx);
4196 		if (saddr) {
4197 			spte = huge_pte_offset(svma->vm_mm, saddr);
4198 			if (spte) {
4199 				mm_inc_nr_pmds(mm);
4200 				get_page(virt_to_page(spte));
4201 				break;
4202 			}
4203 		}
4204 	}
4205 
4206 	if (!spte)
4207 		goto out;
4208 
4209 	ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4210 	spin_lock(ptl);
4211 	if (pud_none(*pud)) {
4212 		pud_populate(mm, pud,
4213 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
4214 	} else {
4215 		put_page(virt_to_page(spte));
4216 		mm_inc_nr_pmds(mm);
4217 	}
4218 	spin_unlock(ptl);
4219 out:
4220 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
4221 	i_mmap_unlock_write(mapping);
4222 	return pte;
4223 }
4224 
4225 /*
4226  * unmap huge page backed by shared pte.
4227  *
4228  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4229  * indicated by page_count > 1, unmap is achieved by clearing pud and
4230  * decrementing the ref count. If count == 1, the pte page is not shared.
4231  *
4232  * called with page table lock held.
4233  *
4234  * returns: 1 successfully unmapped a shared pte page
4235  *	    0 the underlying pte page is not shared, or it is the last user
4236  */
4237 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4238 {
4239 	pgd_t *pgd = pgd_offset(mm, *addr);
4240 	pud_t *pud = pud_offset(pgd, *addr);
4241 
4242 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
4243 	if (page_count(virt_to_page(ptep)) == 1)
4244 		return 0;
4245 
4246 	pud_clear(pud);
4247 	put_page(virt_to_page(ptep));
4248 	mm_dec_nr_pmds(mm);
4249 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4250 	return 1;
4251 }
4252 #define want_pmd_share()	(1)
4253 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4254 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4255 {
4256 	return NULL;
4257 }
4258 
4259 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4260 {
4261 	return 0;
4262 }
4263 #define want_pmd_share()	(0)
4264 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4265 
4266 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4267 pte_t *huge_pte_alloc(struct mm_struct *mm,
4268 			unsigned long addr, unsigned long sz)
4269 {
4270 	pgd_t *pgd;
4271 	pud_t *pud;
4272 	pte_t *pte = NULL;
4273 
4274 	pgd = pgd_offset(mm, addr);
4275 	pud = pud_alloc(mm, pgd, addr);
4276 	if (pud) {
4277 		if (sz == PUD_SIZE) {
4278 			pte = (pte_t *)pud;
4279 		} else {
4280 			BUG_ON(sz != PMD_SIZE);
4281 			if (want_pmd_share() && pud_none(*pud))
4282 				pte = huge_pmd_share(mm, addr, pud);
4283 			else
4284 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
4285 		}
4286 	}
4287 	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4288 
4289 	return pte;
4290 }
4291 
4292 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4293 {
4294 	pgd_t *pgd;
4295 	pud_t *pud;
4296 	pmd_t *pmd = NULL;
4297 
4298 	pgd = pgd_offset(mm, addr);
4299 	if (pgd_present(*pgd)) {
4300 		pud = pud_offset(pgd, addr);
4301 		if (pud_present(*pud)) {
4302 			if (pud_huge(*pud))
4303 				return (pte_t *)pud;
4304 			pmd = pmd_offset(pud, addr);
4305 		}
4306 	}
4307 	return (pte_t *) pmd;
4308 }
4309 
4310 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4311 
4312 /*
4313  * These functions are overwritable if your architecture needs its own
4314  * behavior.
4315  */
4316 struct page * __weak
4317 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4318 			      int write)
4319 {
4320 	return ERR_PTR(-EINVAL);
4321 }
4322 
4323 struct page * __weak
4324 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4325 		pmd_t *pmd, int flags)
4326 {
4327 	struct page *page = NULL;
4328 	spinlock_t *ptl;
4329 retry:
4330 	ptl = pmd_lockptr(mm, pmd);
4331 	spin_lock(ptl);
4332 	/*
4333 	 * make sure that the address range covered by this pmd is not
4334 	 * unmapped from other threads.
4335 	 */
4336 	if (!pmd_huge(*pmd))
4337 		goto out;
4338 	if (pmd_present(*pmd)) {
4339 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4340 		if (flags & FOLL_GET)
4341 			get_page(page);
4342 	} else {
4343 		if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4344 			spin_unlock(ptl);
4345 			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
4346 			goto retry;
4347 		}
4348 		/*
4349 		 * hwpoisoned entry is treated as no_page_table in
4350 		 * follow_page_mask().
4351 		 */
4352 	}
4353 out:
4354 	spin_unlock(ptl);
4355 	return page;
4356 }
4357 
4358 struct page * __weak
4359 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4360 		pud_t *pud, int flags)
4361 {
4362 	if (flags & FOLL_GET)
4363 		return NULL;
4364 
4365 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4366 }
4367 
4368 #ifdef CONFIG_MEMORY_FAILURE
4369 
4370 /*
4371  * This function is called from memory failure code.
4372  * Assume the caller holds page lock of the head page.
4373  */
4374 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4375 {
4376 	struct hstate *h = page_hstate(hpage);
4377 	int nid = page_to_nid(hpage);
4378 	int ret = -EBUSY;
4379 
4380 	spin_lock(&hugetlb_lock);
4381 	/*
4382 	 * Just checking !page_huge_active is not enough, because that could be
4383 	 * an isolated/hwpoisoned hugepage (which have >0 refcount).
4384 	 */
4385 	if (!page_huge_active(hpage) && !page_count(hpage)) {
4386 		/*
4387 		 * Hwpoisoned hugepage isn't linked to activelist or freelist,
4388 		 * but dangling hpage->lru can trigger list-debug warnings
4389 		 * (this happens when we call unpoison_memory() on it),
4390 		 * so let it point to itself with list_del_init().
4391 		 */
4392 		list_del_init(&hpage->lru);
4393 		set_page_refcounted(hpage);
4394 		h->free_huge_pages--;
4395 		h->free_huge_pages_node[nid]--;
4396 		ret = 0;
4397 	}
4398 	spin_unlock(&hugetlb_lock);
4399 	return ret;
4400 }
4401 #endif
4402 
4403 bool isolate_huge_page(struct page *page, struct list_head *list)
4404 {
4405 	bool ret = true;
4406 
4407 	VM_BUG_ON_PAGE(!PageHead(page), page);
4408 	spin_lock(&hugetlb_lock);
4409 	if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4410 		ret = false;
4411 		goto unlock;
4412 	}
4413 	clear_page_huge_active(page);
4414 	list_move_tail(&page->lru, list);
4415 unlock:
4416 	spin_unlock(&hugetlb_lock);
4417 	return ret;
4418 }
4419 
4420 void putback_active_hugepage(struct page *page)
4421 {
4422 	VM_BUG_ON_PAGE(!PageHead(page), page);
4423 	spin_lock(&hugetlb_lock);
4424 	set_page_huge_active(page);
4425 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4426 	spin_unlock(&hugetlb_lock);
4427 	put_page(page);
4428 }
4429