xref: /openbmc/linux/mm/hugetlb.c (revision 3f2fb9a834cb1fcddbae22deca7fde136944dc89)
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/page-isolation.h>
25 #include <linux/jhash.h>
26 
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/tlb.h>
30 
31 #include <linux/io.h>
32 #include <linux/hugetlb.h>
33 #include <linux/hugetlb_cgroup.h>
34 #include <linux/node.h>
35 #include "internal.h"
36 
37 int hugepages_treat_as_movable;
38 
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42 /*
43  * Minimum page order among possible hugepage sizes, set to a proper value
44  * at boot time.
45  */
46 static unsigned int minimum_order __read_mostly = UINT_MAX;
47 
48 __initdata LIST_HEAD(huge_boot_pages);
49 
50 /* for command line parsing */
51 static struct hstate * __initdata parsed_hstate;
52 static unsigned long __initdata default_hstate_max_huge_pages;
53 static unsigned long __initdata default_hstate_size;
54 
55 /*
56  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
57  * free_huge_pages, and surplus_huge_pages.
58  */
59 DEFINE_SPINLOCK(hugetlb_lock);
60 
61 /*
62  * Serializes faults on the same logical page.  This is used to
63  * prevent spurious OOMs when the hugepage pool is fully utilized.
64  */
65 static int num_fault_mutexes;
66 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
67 
68 /* Forward declaration */
69 static int hugetlb_acct_memory(struct hstate *h, long delta);
70 
71 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
72 {
73 	bool free = (spool->count == 0) && (spool->used_hpages == 0);
74 
75 	spin_unlock(&spool->lock);
76 
77 	/* If no pages are used, and no other handles to the subpool
78 	 * remain, give up any reservations mased on minimum size and
79 	 * free the subpool */
80 	if (free) {
81 		if (spool->min_hpages != -1)
82 			hugetlb_acct_memory(spool->hstate,
83 						-spool->min_hpages);
84 		kfree(spool);
85 	}
86 }
87 
88 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
89 						long min_hpages)
90 {
91 	struct hugepage_subpool *spool;
92 
93 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
94 	if (!spool)
95 		return NULL;
96 
97 	spin_lock_init(&spool->lock);
98 	spool->count = 1;
99 	spool->max_hpages = max_hpages;
100 	spool->hstate = h;
101 	spool->min_hpages = min_hpages;
102 
103 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
104 		kfree(spool);
105 		return NULL;
106 	}
107 	spool->rsv_hpages = min_hpages;
108 
109 	return spool;
110 }
111 
112 void hugepage_put_subpool(struct hugepage_subpool *spool)
113 {
114 	spin_lock(&spool->lock);
115 	BUG_ON(!spool->count);
116 	spool->count--;
117 	unlock_or_release_subpool(spool);
118 }
119 
120 /*
121  * Subpool accounting for allocating and reserving pages.
122  * Return -ENOMEM if there are not enough resources to satisfy the
123  * the request.  Otherwise, return the number of pages by which the
124  * global pools must be adjusted (upward).  The returned value may
125  * only be different than the passed value (delta) in the case where
126  * a subpool minimum size must be manitained.
127  */
128 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
129 				      long delta)
130 {
131 	long ret = delta;
132 
133 	if (!spool)
134 		return ret;
135 
136 	spin_lock(&spool->lock);
137 
138 	if (spool->max_hpages != -1) {		/* maximum size accounting */
139 		if ((spool->used_hpages + delta) <= spool->max_hpages)
140 			spool->used_hpages += delta;
141 		else {
142 			ret = -ENOMEM;
143 			goto unlock_ret;
144 		}
145 	}
146 
147 	if (spool->min_hpages != -1) {		/* minimum size accounting */
148 		if (delta > spool->rsv_hpages) {
149 			/*
150 			 * Asking for more reserves than those already taken on
151 			 * behalf of subpool.  Return difference.
152 			 */
153 			ret = delta - spool->rsv_hpages;
154 			spool->rsv_hpages = 0;
155 		} else {
156 			ret = 0;	/* reserves already accounted for */
157 			spool->rsv_hpages -= delta;
158 		}
159 	}
160 
161 unlock_ret:
162 	spin_unlock(&spool->lock);
163 	return ret;
164 }
165 
166 /*
167  * Subpool accounting for freeing and unreserving pages.
168  * Return the number of global page reservations that must be dropped.
169  * The return value may only be different than the passed value (delta)
170  * in the case where a subpool minimum size must be maintained.
171  */
172 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
173 				       long delta)
174 {
175 	long ret = delta;
176 
177 	if (!spool)
178 		return delta;
179 
180 	spin_lock(&spool->lock);
181 
182 	if (spool->max_hpages != -1)		/* maximum size accounting */
183 		spool->used_hpages -= delta;
184 
185 	if (spool->min_hpages != -1) {		/* minimum size accounting */
186 		if (spool->rsv_hpages + delta <= spool->min_hpages)
187 			ret = 0;
188 		else
189 			ret = spool->rsv_hpages + delta - spool->min_hpages;
190 
191 		spool->rsv_hpages += delta;
192 		if (spool->rsv_hpages > spool->min_hpages)
193 			spool->rsv_hpages = spool->min_hpages;
194 	}
195 
196 	/*
197 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
198 	 * quota reference, free it now.
199 	 */
200 	unlock_or_release_subpool(spool);
201 
202 	return ret;
203 }
204 
205 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
206 {
207 	return HUGETLBFS_SB(inode->i_sb)->spool;
208 }
209 
210 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
211 {
212 	return subpool_inode(file_inode(vma->vm_file));
213 }
214 
215 /*
216  * Region tracking -- allows tracking of reservations and instantiated pages
217  *                    across the pages in a mapping.
218  *
219  * The region data structures are embedded into a resv_map and protected
220  * by a resv_map's lock.  The set of regions within the resv_map represent
221  * reservations for huge pages, or huge pages that have already been
222  * instantiated within the map.  The from and to elements are huge page
223  * indicies into the associated mapping.  from indicates the starting index
224  * of the region.  to represents the first index past the end of  the region.
225  *
226  * For example, a file region structure with from == 0 and to == 4 represents
227  * four huge pages in a mapping.  It is important to note that the to element
228  * represents the first element past the end of the region. This is used in
229  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
230  *
231  * Interval notation of the form [from, to) will be used to indicate that
232  * the endpoint from is inclusive and to is exclusive.
233  */
234 struct file_region {
235 	struct list_head link;
236 	long from;
237 	long to;
238 };
239 
240 /*
241  * Add the huge page range represented by [f, t) to the reserve
242  * map.  In the normal case, existing regions will be expanded
243  * to accommodate the specified range.  Sufficient regions should
244  * exist for expansion due to the previous call to region_chg
245  * with the same range.  However, it is possible that region_del
246  * could have been called after region_chg and modifed the map
247  * in such a way that no region exists to be expanded.  In this
248  * case, pull a region descriptor from the cache associated with
249  * the map and use that for the new range.
250  *
251  * Return the number of new huge pages added to the map.  This
252  * number is greater than or equal to zero.
253  */
254 static long region_add(struct resv_map *resv, long f, long t)
255 {
256 	struct list_head *head = &resv->regions;
257 	struct file_region *rg, *nrg, *trg;
258 	long add = 0;
259 
260 	spin_lock(&resv->lock);
261 	/* Locate the region we are either in or before. */
262 	list_for_each_entry(rg, head, link)
263 		if (f <= rg->to)
264 			break;
265 
266 	/*
267 	 * If no region exists which can be expanded to include the
268 	 * specified range, the list must have been modified by an
269 	 * interleving call to region_del().  Pull a region descriptor
270 	 * from the cache and use it for this range.
271 	 */
272 	if (&rg->link == head || t < rg->from) {
273 		VM_BUG_ON(resv->region_cache_count <= 0);
274 
275 		resv->region_cache_count--;
276 		nrg = list_first_entry(&resv->region_cache, struct file_region,
277 					link);
278 		list_del(&nrg->link);
279 
280 		nrg->from = f;
281 		nrg->to = t;
282 		list_add(&nrg->link, rg->link.prev);
283 
284 		add += t - f;
285 		goto out_locked;
286 	}
287 
288 	/* Round our left edge to the current segment if it encloses us. */
289 	if (f > rg->from)
290 		f = rg->from;
291 
292 	/* Check for and consume any regions we now overlap with. */
293 	nrg = rg;
294 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
295 		if (&rg->link == head)
296 			break;
297 		if (rg->from > t)
298 			break;
299 
300 		/* If this area reaches higher then extend our area to
301 		 * include it completely.  If this is not the first area
302 		 * which we intend to reuse, free it. */
303 		if (rg->to > t)
304 			t = rg->to;
305 		if (rg != nrg) {
306 			/* Decrement return value by the deleted range.
307 			 * Another range will span this area so that by
308 			 * end of routine add will be >= zero
309 			 */
310 			add -= (rg->to - rg->from);
311 			list_del(&rg->link);
312 			kfree(rg);
313 		}
314 	}
315 
316 	add += (nrg->from - f);		/* Added to beginning of region */
317 	nrg->from = f;
318 	add += t - nrg->to;		/* Added to end of region */
319 	nrg->to = t;
320 
321 out_locked:
322 	resv->adds_in_progress--;
323 	spin_unlock(&resv->lock);
324 	VM_BUG_ON(add < 0);
325 	return add;
326 }
327 
328 /*
329  * Examine the existing reserve map and determine how many
330  * huge pages in the specified range [f, t) are NOT currently
331  * represented.  This routine is called before a subsequent
332  * call to region_add that will actually modify the reserve
333  * map to add the specified range [f, t).  region_chg does
334  * not change the number of huge pages represented by the
335  * map.  However, if the existing regions in the map can not
336  * be expanded to represent the new range, a new file_region
337  * structure is added to the map as a placeholder.  This is
338  * so that the subsequent region_add call will have all the
339  * regions it needs and will not fail.
340  *
341  * Upon entry, region_chg will also examine the cache of region descriptors
342  * associated with the map.  If there are not enough descriptors cached, one
343  * will be allocated for the in progress add operation.
344  *
345  * Returns the number of huge pages that need to be added to the existing
346  * reservation map for the range [f, t).  This number is greater or equal to
347  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
348  * is needed and can not be allocated.
349  */
350 static long region_chg(struct resv_map *resv, long f, long t)
351 {
352 	struct list_head *head = &resv->regions;
353 	struct file_region *rg, *nrg = NULL;
354 	long chg = 0;
355 
356 retry:
357 	spin_lock(&resv->lock);
358 retry_locked:
359 	resv->adds_in_progress++;
360 
361 	/*
362 	 * Check for sufficient descriptors in the cache to accommodate
363 	 * the number of in progress add operations.
364 	 */
365 	if (resv->adds_in_progress > resv->region_cache_count) {
366 		struct file_region *trg;
367 
368 		VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
369 		/* Must drop lock to allocate a new descriptor. */
370 		resv->adds_in_progress--;
371 		spin_unlock(&resv->lock);
372 
373 		trg = kmalloc(sizeof(*trg), GFP_KERNEL);
374 		if (!trg) {
375 			kfree(nrg);
376 			return -ENOMEM;
377 		}
378 
379 		spin_lock(&resv->lock);
380 		list_add(&trg->link, &resv->region_cache);
381 		resv->region_cache_count++;
382 		goto retry_locked;
383 	}
384 
385 	/* Locate the region we are before or in. */
386 	list_for_each_entry(rg, head, link)
387 		if (f <= rg->to)
388 			break;
389 
390 	/* If we are below the current region then a new region is required.
391 	 * Subtle, allocate a new region at the position but make it zero
392 	 * size such that we can guarantee to record the reservation. */
393 	if (&rg->link == head || t < rg->from) {
394 		if (!nrg) {
395 			resv->adds_in_progress--;
396 			spin_unlock(&resv->lock);
397 			nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
398 			if (!nrg)
399 				return -ENOMEM;
400 
401 			nrg->from = f;
402 			nrg->to   = f;
403 			INIT_LIST_HEAD(&nrg->link);
404 			goto retry;
405 		}
406 
407 		list_add(&nrg->link, rg->link.prev);
408 		chg = t - f;
409 		goto out_nrg;
410 	}
411 
412 	/* Round our left edge to the current segment if it encloses us. */
413 	if (f > rg->from)
414 		f = rg->from;
415 	chg = t - f;
416 
417 	/* Check for and consume any regions we now overlap with. */
418 	list_for_each_entry(rg, rg->link.prev, link) {
419 		if (&rg->link == head)
420 			break;
421 		if (rg->from > t)
422 			goto out;
423 
424 		/* We overlap with this area, if it extends further than
425 		 * us then we must extend ourselves.  Account for its
426 		 * existing reservation. */
427 		if (rg->to > t) {
428 			chg += rg->to - t;
429 			t = rg->to;
430 		}
431 		chg -= rg->to - rg->from;
432 	}
433 
434 out:
435 	spin_unlock(&resv->lock);
436 	/*  We already know we raced and no longer need the new region */
437 	kfree(nrg);
438 	return chg;
439 out_nrg:
440 	spin_unlock(&resv->lock);
441 	return chg;
442 }
443 
444 /*
445  * Abort the in progress add operation.  The adds_in_progress field
446  * of the resv_map keeps track of the operations in progress between
447  * calls to region_chg and region_add.  Operations are sometimes
448  * aborted after the call to region_chg.  In such cases, region_abort
449  * is called to decrement the adds_in_progress counter.
450  *
451  * NOTE: The range arguments [f, t) are not needed or used in this
452  * routine.  They are kept to make reading the calling code easier as
453  * arguments will match the associated region_chg call.
454  */
455 static void region_abort(struct resv_map *resv, long f, long t)
456 {
457 	spin_lock(&resv->lock);
458 	VM_BUG_ON(!resv->region_cache_count);
459 	resv->adds_in_progress--;
460 	spin_unlock(&resv->lock);
461 }
462 
463 /*
464  * Delete the specified range [f, t) from the reserve map.  If the
465  * t parameter is LONG_MAX, this indicates that ALL regions after f
466  * should be deleted.  Locate the regions which intersect [f, t)
467  * and either trim, delete or split the existing regions.
468  *
469  * Returns the number of huge pages deleted from the reserve map.
470  * In the normal case, the return value is zero or more.  In the
471  * case where a region must be split, a new region descriptor must
472  * be allocated.  If the allocation fails, -ENOMEM will be returned.
473  * NOTE: If the parameter t == LONG_MAX, then we will never split
474  * a region and possibly return -ENOMEM.  Callers specifying
475  * t == LONG_MAX do not need to check for -ENOMEM error.
476  */
477 static long region_del(struct resv_map *resv, long f, long t)
478 {
479 	struct list_head *head = &resv->regions;
480 	struct file_region *rg, *trg;
481 	struct file_region *nrg = NULL;
482 	long del = 0;
483 
484 retry:
485 	spin_lock(&resv->lock);
486 	list_for_each_entry_safe(rg, trg, head, link) {
487 		/*
488 		 * Skip regions before the range to be deleted.  file_region
489 		 * ranges are normally of the form [from, to).  However, there
490 		 * may be a "placeholder" entry in the map which is of the form
491 		 * (from, to) with from == to.  Check for placeholder entries
492 		 * at the beginning of the range to be deleted.
493 		 */
494 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
495 			continue;
496 
497 		if (rg->from >= t)
498 			break;
499 
500 		if (f > rg->from && t < rg->to) { /* Must split region */
501 			/*
502 			 * Check for an entry in the cache before dropping
503 			 * lock and attempting allocation.
504 			 */
505 			if (!nrg &&
506 			    resv->region_cache_count > resv->adds_in_progress) {
507 				nrg = list_first_entry(&resv->region_cache,
508 							struct file_region,
509 							link);
510 				list_del(&nrg->link);
511 				resv->region_cache_count--;
512 			}
513 
514 			if (!nrg) {
515 				spin_unlock(&resv->lock);
516 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
517 				if (!nrg)
518 					return -ENOMEM;
519 				goto retry;
520 			}
521 
522 			del += t - f;
523 
524 			/* New entry for end of split region */
525 			nrg->from = t;
526 			nrg->to = rg->to;
527 			INIT_LIST_HEAD(&nrg->link);
528 
529 			/* Original entry is trimmed */
530 			rg->to = f;
531 
532 			list_add(&nrg->link, &rg->link);
533 			nrg = NULL;
534 			break;
535 		}
536 
537 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
538 			del += rg->to - rg->from;
539 			list_del(&rg->link);
540 			kfree(rg);
541 			continue;
542 		}
543 
544 		if (f <= rg->from) {	/* Trim beginning of region */
545 			del += t - rg->from;
546 			rg->from = t;
547 		} else {		/* Trim end of region */
548 			del += rg->to - f;
549 			rg->to = f;
550 		}
551 	}
552 
553 	spin_unlock(&resv->lock);
554 	kfree(nrg);
555 	return del;
556 }
557 
558 /*
559  * A rare out of memory error was encountered which prevented removal of
560  * the reserve map region for a page.  The huge page itself was free'ed
561  * and removed from the page cache.  This routine will adjust the subpool
562  * usage count, and the global reserve count if needed.  By incrementing
563  * these counts, the reserve map entry which could not be deleted will
564  * appear as a "reserved" entry instead of simply dangling with incorrect
565  * counts.
566  */
567 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
568 {
569 	struct hugepage_subpool *spool = subpool_inode(inode);
570 	long rsv_adjust;
571 
572 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
573 	if (restore_reserve && rsv_adjust) {
574 		struct hstate *h = hstate_inode(inode);
575 
576 		hugetlb_acct_memory(h, 1);
577 	}
578 }
579 
580 /*
581  * Count and return the number of huge pages in the reserve map
582  * that intersect with the range [f, t).
583  */
584 static long region_count(struct resv_map *resv, long f, long t)
585 {
586 	struct list_head *head = &resv->regions;
587 	struct file_region *rg;
588 	long chg = 0;
589 
590 	spin_lock(&resv->lock);
591 	/* Locate each segment we overlap with, and count that overlap. */
592 	list_for_each_entry(rg, head, link) {
593 		long seg_from;
594 		long seg_to;
595 
596 		if (rg->to <= f)
597 			continue;
598 		if (rg->from >= t)
599 			break;
600 
601 		seg_from = max(rg->from, f);
602 		seg_to = min(rg->to, t);
603 
604 		chg += seg_to - seg_from;
605 	}
606 	spin_unlock(&resv->lock);
607 
608 	return chg;
609 }
610 
611 /*
612  * Convert the address within this vma to the page offset within
613  * the mapping, in pagecache page units; huge pages here.
614  */
615 static pgoff_t vma_hugecache_offset(struct hstate *h,
616 			struct vm_area_struct *vma, unsigned long address)
617 {
618 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
619 			(vma->vm_pgoff >> huge_page_order(h));
620 }
621 
622 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
623 				     unsigned long address)
624 {
625 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
626 }
627 
628 /*
629  * Return the size of the pages allocated when backing a VMA. In the majority
630  * cases this will be same size as used by the page table entries.
631  */
632 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
633 {
634 	struct hstate *hstate;
635 
636 	if (!is_vm_hugetlb_page(vma))
637 		return PAGE_SIZE;
638 
639 	hstate = hstate_vma(vma);
640 
641 	return 1UL << huge_page_shift(hstate);
642 }
643 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
644 
645 /*
646  * Return the page size being used by the MMU to back a VMA. In the majority
647  * of cases, the page size used by the kernel matches the MMU size. On
648  * architectures where it differs, an architecture-specific version of this
649  * function is required.
650  */
651 #ifndef vma_mmu_pagesize
652 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
653 {
654 	return vma_kernel_pagesize(vma);
655 }
656 #endif
657 
658 /*
659  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
660  * bits of the reservation map pointer, which are always clear due to
661  * alignment.
662  */
663 #define HPAGE_RESV_OWNER    (1UL << 0)
664 #define HPAGE_RESV_UNMAPPED (1UL << 1)
665 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
666 
667 /*
668  * These helpers are used to track how many pages are reserved for
669  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
670  * is guaranteed to have their future faults succeed.
671  *
672  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
673  * the reserve counters are updated with the hugetlb_lock held. It is safe
674  * to reset the VMA at fork() time as it is not in use yet and there is no
675  * chance of the global counters getting corrupted as a result of the values.
676  *
677  * The private mapping reservation is represented in a subtly different
678  * manner to a shared mapping.  A shared mapping has a region map associated
679  * with the underlying file, this region map represents the backing file
680  * pages which have ever had a reservation assigned which this persists even
681  * after the page is instantiated.  A private mapping has a region map
682  * associated with the original mmap which is attached to all VMAs which
683  * reference it, this region map represents those offsets which have consumed
684  * reservation ie. where pages have been instantiated.
685  */
686 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
687 {
688 	return (unsigned long)vma->vm_private_data;
689 }
690 
691 static void set_vma_private_data(struct vm_area_struct *vma,
692 							unsigned long value)
693 {
694 	vma->vm_private_data = (void *)value;
695 }
696 
697 struct resv_map *resv_map_alloc(void)
698 {
699 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
700 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
701 
702 	if (!resv_map || !rg) {
703 		kfree(resv_map);
704 		kfree(rg);
705 		return NULL;
706 	}
707 
708 	kref_init(&resv_map->refs);
709 	spin_lock_init(&resv_map->lock);
710 	INIT_LIST_HEAD(&resv_map->regions);
711 
712 	resv_map->adds_in_progress = 0;
713 
714 	INIT_LIST_HEAD(&resv_map->region_cache);
715 	list_add(&rg->link, &resv_map->region_cache);
716 	resv_map->region_cache_count = 1;
717 
718 	return resv_map;
719 }
720 
721 void resv_map_release(struct kref *ref)
722 {
723 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
724 	struct list_head *head = &resv_map->region_cache;
725 	struct file_region *rg, *trg;
726 
727 	/* Clear out any active regions before we release the map. */
728 	region_del(resv_map, 0, LONG_MAX);
729 
730 	/* ... and any entries left in the cache */
731 	list_for_each_entry_safe(rg, trg, head, link) {
732 		list_del(&rg->link);
733 		kfree(rg);
734 	}
735 
736 	VM_BUG_ON(resv_map->adds_in_progress);
737 
738 	kfree(resv_map);
739 }
740 
741 static inline struct resv_map *inode_resv_map(struct inode *inode)
742 {
743 	return inode->i_mapping->private_data;
744 }
745 
746 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
747 {
748 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
749 	if (vma->vm_flags & VM_MAYSHARE) {
750 		struct address_space *mapping = vma->vm_file->f_mapping;
751 		struct inode *inode = mapping->host;
752 
753 		return inode_resv_map(inode);
754 
755 	} else {
756 		return (struct resv_map *)(get_vma_private_data(vma) &
757 							~HPAGE_RESV_MASK);
758 	}
759 }
760 
761 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
762 {
763 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
764 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
765 
766 	set_vma_private_data(vma, (get_vma_private_data(vma) &
767 				HPAGE_RESV_MASK) | (unsigned long)map);
768 }
769 
770 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
771 {
772 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
773 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
774 
775 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
776 }
777 
778 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
779 {
780 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
781 
782 	return (get_vma_private_data(vma) & flag) != 0;
783 }
784 
785 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
786 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
787 {
788 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
789 	if (!(vma->vm_flags & VM_MAYSHARE))
790 		vma->vm_private_data = (void *)0;
791 }
792 
793 /* Returns true if the VMA has associated reserve pages */
794 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
795 {
796 	if (vma->vm_flags & VM_NORESERVE) {
797 		/*
798 		 * This address is already reserved by other process(chg == 0),
799 		 * so, we should decrement reserved count. Without decrementing,
800 		 * reserve count remains after releasing inode, because this
801 		 * allocated page will go into page cache and is regarded as
802 		 * coming from reserved pool in releasing step.  Currently, we
803 		 * don't have any other solution to deal with this situation
804 		 * properly, so add work-around here.
805 		 */
806 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
807 			return true;
808 		else
809 			return false;
810 	}
811 
812 	/* Shared mappings always use reserves */
813 	if (vma->vm_flags & VM_MAYSHARE) {
814 		/*
815 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
816 		 * be a region map for all pages.  The only situation where
817 		 * there is no region map is if a hole was punched via
818 		 * fallocate.  In this case, there really are no reverves to
819 		 * use.  This situation is indicated if chg != 0.
820 		 */
821 		if (chg)
822 			return false;
823 		else
824 			return true;
825 	}
826 
827 	/*
828 	 * Only the process that called mmap() has reserves for
829 	 * private mappings.
830 	 */
831 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
832 		return true;
833 
834 	return false;
835 }
836 
837 static void enqueue_huge_page(struct hstate *h, struct page *page)
838 {
839 	int nid = page_to_nid(page);
840 	list_move(&page->lru, &h->hugepage_freelists[nid]);
841 	h->free_huge_pages++;
842 	h->free_huge_pages_node[nid]++;
843 }
844 
845 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
846 {
847 	struct page *page;
848 
849 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
850 		if (!is_migrate_isolate_page(page))
851 			break;
852 	/*
853 	 * if 'non-isolated free hugepage' not found on the list,
854 	 * the allocation fails.
855 	 */
856 	if (&h->hugepage_freelists[nid] == &page->lru)
857 		return NULL;
858 	list_move(&page->lru, &h->hugepage_activelist);
859 	set_page_refcounted(page);
860 	h->free_huge_pages--;
861 	h->free_huge_pages_node[nid]--;
862 	return page;
863 }
864 
865 /* Movability of hugepages depends on migration support. */
866 static inline gfp_t htlb_alloc_mask(struct hstate *h)
867 {
868 	if (hugepages_treat_as_movable || hugepage_migration_supported(h))
869 		return GFP_HIGHUSER_MOVABLE;
870 	else
871 		return GFP_HIGHUSER;
872 }
873 
874 static struct page *dequeue_huge_page_vma(struct hstate *h,
875 				struct vm_area_struct *vma,
876 				unsigned long address, int avoid_reserve,
877 				long chg)
878 {
879 	struct page *page = NULL;
880 	struct mempolicy *mpol;
881 	nodemask_t *nodemask;
882 	struct zonelist *zonelist;
883 	struct zone *zone;
884 	struct zoneref *z;
885 	unsigned int cpuset_mems_cookie;
886 
887 	/*
888 	 * A child process with MAP_PRIVATE mappings created by their parent
889 	 * have no page reserves. This check ensures that reservations are
890 	 * not "stolen". The child may still get SIGKILLed
891 	 */
892 	if (!vma_has_reserves(vma, chg) &&
893 			h->free_huge_pages - h->resv_huge_pages == 0)
894 		goto err;
895 
896 	/* If reserves cannot be used, ensure enough pages are in the pool */
897 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
898 		goto err;
899 
900 retry_cpuset:
901 	cpuset_mems_cookie = read_mems_allowed_begin();
902 	zonelist = huge_zonelist(vma, address,
903 					htlb_alloc_mask(h), &mpol, &nodemask);
904 
905 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
906 						MAX_NR_ZONES - 1, nodemask) {
907 		if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
908 			page = dequeue_huge_page_node(h, zone_to_nid(zone));
909 			if (page) {
910 				if (avoid_reserve)
911 					break;
912 				if (!vma_has_reserves(vma, chg))
913 					break;
914 
915 				SetPagePrivate(page);
916 				h->resv_huge_pages--;
917 				break;
918 			}
919 		}
920 	}
921 
922 	mpol_cond_put(mpol);
923 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
924 		goto retry_cpuset;
925 	return page;
926 
927 err:
928 	return NULL;
929 }
930 
931 /*
932  * common helper functions for hstate_next_node_to_{alloc|free}.
933  * We may have allocated or freed a huge page based on a different
934  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
935  * be outside of *nodes_allowed.  Ensure that we use an allowed
936  * node for alloc or free.
937  */
938 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
939 {
940 	nid = next_node(nid, *nodes_allowed);
941 	if (nid == MAX_NUMNODES)
942 		nid = first_node(*nodes_allowed);
943 	VM_BUG_ON(nid >= MAX_NUMNODES);
944 
945 	return nid;
946 }
947 
948 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
949 {
950 	if (!node_isset(nid, *nodes_allowed))
951 		nid = next_node_allowed(nid, nodes_allowed);
952 	return nid;
953 }
954 
955 /*
956  * returns the previously saved node ["this node"] from which to
957  * allocate a persistent huge page for the pool and advance the
958  * next node from which to allocate, handling wrap at end of node
959  * mask.
960  */
961 static int hstate_next_node_to_alloc(struct hstate *h,
962 					nodemask_t *nodes_allowed)
963 {
964 	int nid;
965 
966 	VM_BUG_ON(!nodes_allowed);
967 
968 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
969 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
970 
971 	return nid;
972 }
973 
974 /*
975  * helper for free_pool_huge_page() - return the previously saved
976  * node ["this node"] from which to free a huge page.  Advance the
977  * next node id whether or not we find a free huge page to free so
978  * that the next attempt to free addresses the next node.
979  */
980 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
981 {
982 	int nid;
983 
984 	VM_BUG_ON(!nodes_allowed);
985 
986 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
987 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
988 
989 	return nid;
990 }
991 
992 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
993 	for (nr_nodes = nodes_weight(*mask);				\
994 		nr_nodes > 0 &&						\
995 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
996 		nr_nodes--)
997 
998 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
999 	for (nr_nodes = nodes_weight(*mask);				\
1000 		nr_nodes > 0 &&						\
1001 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
1002 		nr_nodes--)
1003 
1004 #if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
1005 static void destroy_compound_gigantic_page(struct page *page,
1006 					unsigned int order)
1007 {
1008 	int i;
1009 	int nr_pages = 1 << order;
1010 	struct page *p = page + 1;
1011 
1012 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1013 		clear_compound_head(p);
1014 		set_page_refcounted(p);
1015 	}
1016 
1017 	set_compound_order(page, 0);
1018 	__ClearPageHead(page);
1019 }
1020 
1021 static void free_gigantic_page(struct page *page, unsigned int order)
1022 {
1023 	free_contig_range(page_to_pfn(page), 1 << order);
1024 }
1025 
1026 static int __alloc_gigantic_page(unsigned long start_pfn,
1027 				unsigned long nr_pages)
1028 {
1029 	unsigned long end_pfn = start_pfn + nr_pages;
1030 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1031 }
1032 
1033 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
1034 				unsigned long nr_pages)
1035 {
1036 	unsigned long i, end_pfn = start_pfn + nr_pages;
1037 	struct page *page;
1038 
1039 	for (i = start_pfn; i < end_pfn; i++) {
1040 		if (!pfn_valid(i))
1041 			return false;
1042 
1043 		page = pfn_to_page(i);
1044 
1045 		if (PageReserved(page))
1046 			return false;
1047 
1048 		if (page_count(page) > 0)
1049 			return false;
1050 
1051 		if (PageHuge(page))
1052 			return false;
1053 	}
1054 
1055 	return true;
1056 }
1057 
1058 static bool zone_spans_last_pfn(const struct zone *zone,
1059 			unsigned long start_pfn, unsigned long nr_pages)
1060 {
1061 	unsigned long last_pfn = start_pfn + nr_pages - 1;
1062 	return zone_spans_pfn(zone, last_pfn);
1063 }
1064 
1065 static struct page *alloc_gigantic_page(int nid, unsigned int order)
1066 {
1067 	unsigned long nr_pages = 1 << order;
1068 	unsigned long ret, pfn, flags;
1069 	struct zone *z;
1070 
1071 	z = NODE_DATA(nid)->node_zones;
1072 	for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1073 		spin_lock_irqsave(&z->lock, flags);
1074 
1075 		pfn = ALIGN(z->zone_start_pfn, nr_pages);
1076 		while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1077 			if (pfn_range_valid_gigantic(pfn, nr_pages)) {
1078 				/*
1079 				 * We release the zone lock here because
1080 				 * alloc_contig_range() will also lock the zone
1081 				 * at some point. If there's an allocation
1082 				 * spinning on this lock, it may win the race
1083 				 * and cause alloc_contig_range() to fail...
1084 				 */
1085 				spin_unlock_irqrestore(&z->lock, flags);
1086 				ret = __alloc_gigantic_page(pfn, nr_pages);
1087 				if (!ret)
1088 					return pfn_to_page(pfn);
1089 				spin_lock_irqsave(&z->lock, flags);
1090 			}
1091 			pfn += nr_pages;
1092 		}
1093 
1094 		spin_unlock_irqrestore(&z->lock, flags);
1095 	}
1096 
1097 	return NULL;
1098 }
1099 
1100 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1101 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1102 
1103 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1104 {
1105 	struct page *page;
1106 
1107 	page = alloc_gigantic_page(nid, huge_page_order(h));
1108 	if (page) {
1109 		prep_compound_gigantic_page(page, huge_page_order(h));
1110 		prep_new_huge_page(h, page, nid);
1111 	}
1112 
1113 	return page;
1114 }
1115 
1116 static int alloc_fresh_gigantic_page(struct hstate *h,
1117 				nodemask_t *nodes_allowed)
1118 {
1119 	struct page *page = NULL;
1120 	int nr_nodes, node;
1121 
1122 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1123 		page = alloc_fresh_gigantic_page_node(h, node);
1124 		if (page)
1125 			return 1;
1126 	}
1127 
1128 	return 0;
1129 }
1130 
1131 static inline bool gigantic_page_supported(void) { return true; }
1132 #else
1133 static inline bool gigantic_page_supported(void) { return false; }
1134 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1135 static inline void destroy_compound_gigantic_page(struct page *page,
1136 						unsigned int order) { }
1137 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1138 					nodemask_t *nodes_allowed) { return 0; }
1139 #endif
1140 
1141 static void update_and_free_page(struct hstate *h, struct page *page)
1142 {
1143 	int i;
1144 
1145 	if (hstate_is_gigantic(h) && !gigantic_page_supported())
1146 		return;
1147 
1148 	h->nr_huge_pages--;
1149 	h->nr_huge_pages_node[page_to_nid(page)]--;
1150 	for (i = 0; i < pages_per_huge_page(h); i++) {
1151 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1152 				1 << PG_referenced | 1 << PG_dirty |
1153 				1 << PG_active | 1 << PG_private |
1154 				1 << PG_writeback);
1155 	}
1156 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1157 	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1158 	set_page_refcounted(page);
1159 	if (hstate_is_gigantic(h)) {
1160 		destroy_compound_gigantic_page(page, huge_page_order(h));
1161 		free_gigantic_page(page, huge_page_order(h));
1162 	} else {
1163 		__free_pages(page, huge_page_order(h));
1164 	}
1165 }
1166 
1167 struct hstate *size_to_hstate(unsigned long size)
1168 {
1169 	struct hstate *h;
1170 
1171 	for_each_hstate(h) {
1172 		if (huge_page_size(h) == size)
1173 			return h;
1174 	}
1175 	return NULL;
1176 }
1177 
1178 /*
1179  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1180  * to hstate->hugepage_activelist.)
1181  *
1182  * This function can be called for tail pages, but never returns true for them.
1183  */
1184 bool page_huge_active(struct page *page)
1185 {
1186 	VM_BUG_ON_PAGE(!PageHuge(page), page);
1187 	return PageHead(page) && PagePrivate(&page[1]);
1188 }
1189 
1190 /* never called for tail page */
1191 static void set_page_huge_active(struct page *page)
1192 {
1193 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1194 	SetPagePrivate(&page[1]);
1195 }
1196 
1197 static void clear_page_huge_active(struct page *page)
1198 {
1199 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1200 	ClearPagePrivate(&page[1]);
1201 }
1202 
1203 void free_huge_page(struct page *page)
1204 {
1205 	/*
1206 	 * Can't pass hstate in here because it is called from the
1207 	 * compound page destructor.
1208 	 */
1209 	struct hstate *h = page_hstate(page);
1210 	int nid = page_to_nid(page);
1211 	struct hugepage_subpool *spool =
1212 		(struct hugepage_subpool *)page_private(page);
1213 	bool restore_reserve;
1214 
1215 	set_page_private(page, 0);
1216 	page->mapping = NULL;
1217 	VM_BUG_ON_PAGE(page_count(page), page);
1218 	VM_BUG_ON_PAGE(page_mapcount(page), page);
1219 	restore_reserve = PagePrivate(page);
1220 	ClearPagePrivate(page);
1221 
1222 	/*
1223 	 * A return code of zero implies that the subpool will be under its
1224 	 * minimum size if the reservation is not restored after page is free.
1225 	 * Therefore, force restore_reserve operation.
1226 	 */
1227 	if (hugepage_subpool_put_pages(spool, 1) == 0)
1228 		restore_reserve = true;
1229 
1230 	spin_lock(&hugetlb_lock);
1231 	clear_page_huge_active(page);
1232 	hugetlb_cgroup_uncharge_page(hstate_index(h),
1233 				     pages_per_huge_page(h), page);
1234 	if (restore_reserve)
1235 		h->resv_huge_pages++;
1236 
1237 	if (h->surplus_huge_pages_node[nid]) {
1238 		/* remove the page from active list */
1239 		list_del(&page->lru);
1240 		update_and_free_page(h, page);
1241 		h->surplus_huge_pages--;
1242 		h->surplus_huge_pages_node[nid]--;
1243 	} else {
1244 		arch_clear_hugepage_flags(page);
1245 		enqueue_huge_page(h, page);
1246 	}
1247 	spin_unlock(&hugetlb_lock);
1248 }
1249 
1250 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1251 {
1252 	INIT_LIST_HEAD(&page->lru);
1253 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1254 	spin_lock(&hugetlb_lock);
1255 	set_hugetlb_cgroup(page, NULL);
1256 	h->nr_huge_pages++;
1257 	h->nr_huge_pages_node[nid]++;
1258 	spin_unlock(&hugetlb_lock);
1259 	put_page(page); /* free it into the hugepage allocator */
1260 }
1261 
1262 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1263 {
1264 	int i;
1265 	int nr_pages = 1 << order;
1266 	struct page *p = page + 1;
1267 
1268 	/* we rely on prep_new_huge_page to set the destructor */
1269 	set_compound_order(page, order);
1270 	__ClearPageReserved(page);
1271 	__SetPageHead(page);
1272 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1273 		/*
1274 		 * For gigantic hugepages allocated through bootmem at
1275 		 * boot, it's safer to be consistent with the not-gigantic
1276 		 * hugepages and clear the PG_reserved bit from all tail pages
1277 		 * too.  Otherwse drivers using get_user_pages() to access tail
1278 		 * pages may get the reference counting wrong if they see
1279 		 * PG_reserved set on a tail page (despite the head page not
1280 		 * having PG_reserved set).  Enforcing this consistency between
1281 		 * head and tail pages allows drivers to optimize away a check
1282 		 * on the head page when they need know if put_page() is needed
1283 		 * after get_user_pages().
1284 		 */
1285 		__ClearPageReserved(p);
1286 		set_page_count(p, 0);
1287 		set_compound_head(p, page);
1288 	}
1289 	atomic_set(compound_mapcount_ptr(page), -1);
1290 }
1291 
1292 /*
1293  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1294  * transparent huge pages.  See the PageTransHuge() documentation for more
1295  * details.
1296  */
1297 int PageHuge(struct page *page)
1298 {
1299 	if (!PageCompound(page))
1300 		return 0;
1301 
1302 	page = compound_head(page);
1303 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1304 }
1305 EXPORT_SYMBOL_GPL(PageHuge);
1306 
1307 /*
1308  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1309  * normal or transparent huge pages.
1310  */
1311 int PageHeadHuge(struct page *page_head)
1312 {
1313 	if (!PageHead(page_head))
1314 		return 0;
1315 
1316 	return get_compound_page_dtor(page_head) == free_huge_page;
1317 }
1318 
1319 pgoff_t __basepage_index(struct page *page)
1320 {
1321 	struct page *page_head = compound_head(page);
1322 	pgoff_t index = page_index(page_head);
1323 	unsigned long compound_idx;
1324 
1325 	if (!PageHuge(page_head))
1326 		return page_index(page);
1327 
1328 	if (compound_order(page_head) >= MAX_ORDER)
1329 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1330 	else
1331 		compound_idx = page - page_head;
1332 
1333 	return (index << compound_order(page_head)) + compound_idx;
1334 }
1335 
1336 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1337 {
1338 	struct page *page;
1339 
1340 	page = __alloc_pages_node(nid,
1341 		htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1342 						__GFP_REPEAT|__GFP_NOWARN,
1343 		huge_page_order(h));
1344 	if (page) {
1345 		prep_new_huge_page(h, page, nid);
1346 	}
1347 
1348 	return page;
1349 }
1350 
1351 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1352 {
1353 	struct page *page;
1354 	int nr_nodes, node;
1355 	int ret = 0;
1356 
1357 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1358 		page = alloc_fresh_huge_page_node(h, node);
1359 		if (page) {
1360 			ret = 1;
1361 			break;
1362 		}
1363 	}
1364 
1365 	if (ret)
1366 		count_vm_event(HTLB_BUDDY_PGALLOC);
1367 	else
1368 		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1369 
1370 	return ret;
1371 }
1372 
1373 /*
1374  * Free huge page from pool from next node to free.
1375  * Attempt to keep persistent huge pages more or less
1376  * balanced over allowed nodes.
1377  * Called with hugetlb_lock locked.
1378  */
1379 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1380 							 bool acct_surplus)
1381 {
1382 	int nr_nodes, node;
1383 	int ret = 0;
1384 
1385 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1386 		/*
1387 		 * If we're returning unused surplus pages, only examine
1388 		 * nodes with surplus pages.
1389 		 */
1390 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1391 		    !list_empty(&h->hugepage_freelists[node])) {
1392 			struct page *page =
1393 				list_entry(h->hugepage_freelists[node].next,
1394 					  struct page, lru);
1395 			list_del(&page->lru);
1396 			h->free_huge_pages--;
1397 			h->free_huge_pages_node[node]--;
1398 			if (acct_surplus) {
1399 				h->surplus_huge_pages--;
1400 				h->surplus_huge_pages_node[node]--;
1401 			}
1402 			update_and_free_page(h, page);
1403 			ret = 1;
1404 			break;
1405 		}
1406 	}
1407 
1408 	return ret;
1409 }
1410 
1411 /*
1412  * Dissolve a given free hugepage into free buddy pages. This function does
1413  * nothing for in-use (including surplus) hugepages.
1414  */
1415 static void dissolve_free_huge_page(struct page *page)
1416 {
1417 	spin_lock(&hugetlb_lock);
1418 	if (PageHuge(page) && !page_count(page)) {
1419 		struct hstate *h = page_hstate(page);
1420 		int nid = page_to_nid(page);
1421 		list_del(&page->lru);
1422 		h->free_huge_pages--;
1423 		h->free_huge_pages_node[nid]--;
1424 		update_and_free_page(h, page);
1425 	}
1426 	spin_unlock(&hugetlb_lock);
1427 }
1428 
1429 /*
1430  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1431  * make specified memory blocks removable from the system.
1432  * Note that start_pfn should aligned with (minimum) hugepage size.
1433  */
1434 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1435 {
1436 	unsigned long pfn;
1437 
1438 	if (!hugepages_supported())
1439 		return;
1440 
1441 	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1442 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1443 		dissolve_free_huge_page(pfn_to_page(pfn));
1444 }
1445 
1446 /*
1447  * There are 3 ways this can get called:
1448  * 1. With vma+addr: we use the VMA's memory policy
1449  * 2. With !vma, but nid=NUMA_NO_NODE:  We try to allocate a huge
1450  *    page from any node, and let the buddy allocator itself figure
1451  *    it out.
1452  * 3. With !vma, but nid!=NUMA_NO_NODE.  We allocate a huge page
1453  *    strictly from 'nid'
1454  */
1455 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1456 		struct vm_area_struct *vma, unsigned long addr, int nid)
1457 {
1458 	int order = huge_page_order(h);
1459 	gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1460 	unsigned int cpuset_mems_cookie;
1461 
1462 	/*
1463 	 * We need a VMA to get a memory policy.  If we do not
1464 	 * have one, we use the 'nid' argument.
1465 	 *
1466 	 * The mempolicy stuff below has some non-inlined bits
1467 	 * and calls ->vm_ops.  That makes it hard to optimize at
1468 	 * compile-time, even when NUMA is off and it does
1469 	 * nothing.  This helps the compiler optimize it out.
1470 	 */
1471 	if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1472 		/*
1473 		 * If a specific node is requested, make sure to
1474 		 * get memory from there, but only when a node
1475 		 * is explicitly specified.
1476 		 */
1477 		if (nid != NUMA_NO_NODE)
1478 			gfp |= __GFP_THISNODE;
1479 		/*
1480 		 * Make sure to call something that can handle
1481 		 * nid=NUMA_NO_NODE
1482 		 */
1483 		return alloc_pages_node(nid, gfp, order);
1484 	}
1485 
1486 	/*
1487 	 * OK, so we have a VMA.  Fetch the mempolicy and try to
1488 	 * allocate a huge page with it.  We will only reach this
1489 	 * when CONFIG_NUMA=y.
1490 	 */
1491 	do {
1492 		struct page *page;
1493 		struct mempolicy *mpol;
1494 		struct zonelist *zl;
1495 		nodemask_t *nodemask;
1496 
1497 		cpuset_mems_cookie = read_mems_allowed_begin();
1498 		zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1499 		mpol_cond_put(mpol);
1500 		page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1501 		if (page)
1502 			return page;
1503 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1504 
1505 	return NULL;
1506 }
1507 
1508 /*
1509  * There are two ways to allocate a huge page:
1510  * 1. When you have a VMA and an address (like a fault)
1511  * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1512  *
1513  * 'vma' and 'addr' are only for (1).  'nid' is always NUMA_NO_NODE in
1514  * this case which signifies that the allocation should be done with
1515  * respect for the VMA's memory policy.
1516  *
1517  * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1518  * implies that memory policies will not be taken in to account.
1519  */
1520 static struct page *__alloc_buddy_huge_page(struct hstate *h,
1521 		struct vm_area_struct *vma, unsigned long addr, int nid)
1522 {
1523 	struct page *page;
1524 	unsigned int r_nid;
1525 
1526 	if (hstate_is_gigantic(h))
1527 		return NULL;
1528 
1529 	/*
1530 	 * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1531 	 * This makes sure the caller is picking _one_ of the modes with which
1532 	 * we can call this function, not both.
1533 	 */
1534 	if (vma || (addr != -1)) {
1535 		VM_WARN_ON_ONCE(addr == -1);
1536 		VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1537 	}
1538 	/*
1539 	 * Assume we will successfully allocate the surplus page to
1540 	 * prevent racing processes from causing the surplus to exceed
1541 	 * overcommit
1542 	 *
1543 	 * This however introduces a different race, where a process B
1544 	 * tries to grow the static hugepage pool while alloc_pages() is
1545 	 * called by process A. B will only examine the per-node
1546 	 * counters in determining if surplus huge pages can be
1547 	 * converted to normal huge pages in adjust_pool_surplus(). A
1548 	 * won't be able to increment the per-node counter, until the
1549 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1550 	 * no more huge pages can be converted from surplus to normal
1551 	 * state (and doesn't try to convert again). Thus, we have a
1552 	 * case where a surplus huge page exists, the pool is grown, and
1553 	 * the surplus huge page still exists after, even though it
1554 	 * should just have been converted to a normal huge page. This
1555 	 * does not leak memory, though, as the hugepage will be freed
1556 	 * once it is out of use. It also does not allow the counters to
1557 	 * go out of whack in adjust_pool_surplus() as we don't modify
1558 	 * the node values until we've gotten the hugepage and only the
1559 	 * per-node value is checked there.
1560 	 */
1561 	spin_lock(&hugetlb_lock);
1562 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1563 		spin_unlock(&hugetlb_lock);
1564 		return NULL;
1565 	} else {
1566 		h->nr_huge_pages++;
1567 		h->surplus_huge_pages++;
1568 	}
1569 	spin_unlock(&hugetlb_lock);
1570 
1571 	page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1572 
1573 	spin_lock(&hugetlb_lock);
1574 	if (page) {
1575 		INIT_LIST_HEAD(&page->lru);
1576 		r_nid = page_to_nid(page);
1577 		set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1578 		set_hugetlb_cgroup(page, NULL);
1579 		/*
1580 		 * We incremented the global counters already
1581 		 */
1582 		h->nr_huge_pages_node[r_nid]++;
1583 		h->surplus_huge_pages_node[r_nid]++;
1584 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1585 	} else {
1586 		h->nr_huge_pages--;
1587 		h->surplus_huge_pages--;
1588 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1589 	}
1590 	spin_unlock(&hugetlb_lock);
1591 
1592 	return page;
1593 }
1594 
1595 /*
1596  * Allocate a huge page from 'nid'.  Note, 'nid' may be
1597  * NUMA_NO_NODE, which means that it may be allocated
1598  * anywhere.
1599  */
1600 static
1601 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1602 {
1603 	unsigned long addr = -1;
1604 
1605 	return __alloc_buddy_huge_page(h, NULL, addr, nid);
1606 }
1607 
1608 /*
1609  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1610  */
1611 static
1612 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1613 		struct vm_area_struct *vma, unsigned long addr)
1614 {
1615 	return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1616 }
1617 
1618 /*
1619  * This allocation function is useful in the context where vma is irrelevant.
1620  * E.g. soft-offlining uses this function because it only cares physical
1621  * address of error page.
1622  */
1623 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1624 {
1625 	struct page *page = NULL;
1626 
1627 	spin_lock(&hugetlb_lock);
1628 	if (h->free_huge_pages - h->resv_huge_pages > 0)
1629 		page = dequeue_huge_page_node(h, nid);
1630 	spin_unlock(&hugetlb_lock);
1631 
1632 	if (!page)
1633 		page = __alloc_buddy_huge_page_no_mpol(h, nid);
1634 
1635 	return page;
1636 }
1637 
1638 /*
1639  * Increase the hugetlb pool such that it can accommodate a reservation
1640  * of size 'delta'.
1641  */
1642 static int gather_surplus_pages(struct hstate *h, int delta)
1643 {
1644 	struct list_head surplus_list;
1645 	struct page *page, *tmp;
1646 	int ret, i;
1647 	int needed, allocated;
1648 	bool alloc_ok = true;
1649 
1650 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1651 	if (needed <= 0) {
1652 		h->resv_huge_pages += delta;
1653 		return 0;
1654 	}
1655 
1656 	allocated = 0;
1657 	INIT_LIST_HEAD(&surplus_list);
1658 
1659 	ret = -ENOMEM;
1660 retry:
1661 	spin_unlock(&hugetlb_lock);
1662 	for (i = 0; i < needed; i++) {
1663 		page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1664 		if (!page) {
1665 			alloc_ok = false;
1666 			break;
1667 		}
1668 		list_add(&page->lru, &surplus_list);
1669 	}
1670 	allocated += i;
1671 
1672 	/*
1673 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
1674 	 * because either resv_huge_pages or free_huge_pages may have changed.
1675 	 */
1676 	spin_lock(&hugetlb_lock);
1677 	needed = (h->resv_huge_pages + delta) -
1678 			(h->free_huge_pages + allocated);
1679 	if (needed > 0) {
1680 		if (alloc_ok)
1681 			goto retry;
1682 		/*
1683 		 * We were not able to allocate enough pages to
1684 		 * satisfy the entire reservation so we free what
1685 		 * we've allocated so far.
1686 		 */
1687 		goto free;
1688 	}
1689 	/*
1690 	 * The surplus_list now contains _at_least_ the number of extra pages
1691 	 * needed to accommodate the reservation.  Add the appropriate number
1692 	 * of pages to the hugetlb pool and free the extras back to the buddy
1693 	 * allocator.  Commit the entire reservation here to prevent another
1694 	 * process from stealing the pages as they are added to the pool but
1695 	 * before they are reserved.
1696 	 */
1697 	needed += allocated;
1698 	h->resv_huge_pages += delta;
1699 	ret = 0;
1700 
1701 	/* Free the needed pages to the hugetlb pool */
1702 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1703 		if ((--needed) < 0)
1704 			break;
1705 		/*
1706 		 * This page is now managed by the hugetlb allocator and has
1707 		 * no users -- drop the buddy allocator's reference.
1708 		 */
1709 		put_page_testzero(page);
1710 		VM_BUG_ON_PAGE(page_count(page), page);
1711 		enqueue_huge_page(h, page);
1712 	}
1713 free:
1714 	spin_unlock(&hugetlb_lock);
1715 
1716 	/* Free unnecessary surplus pages to the buddy allocator */
1717 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1718 		put_page(page);
1719 	spin_lock(&hugetlb_lock);
1720 
1721 	return ret;
1722 }
1723 
1724 /*
1725  * When releasing a hugetlb pool reservation, any surplus pages that were
1726  * allocated to satisfy the reservation must be explicitly freed if they were
1727  * never used.
1728  * Called with hugetlb_lock held.
1729  */
1730 static void return_unused_surplus_pages(struct hstate *h,
1731 					unsigned long unused_resv_pages)
1732 {
1733 	unsigned long nr_pages;
1734 
1735 	/* Uncommit the reservation */
1736 	h->resv_huge_pages -= unused_resv_pages;
1737 
1738 	/* Cannot return gigantic pages currently */
1739 	if (hstate_is_gigantic(h))
1740 		return;
1741 
1742 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1743 
1744 	/*
1745 	 * We want to release as many surplus pages as possible, spread
1746 	 * evenly across all nodes with memory. Iterate across these nodes
1747 	 * until we can no longer free unreserved surplus pages. This occurs
1748 	 * when the nodes with surplus pages have no free pages.
1749 	 * free_pool_huge_page() will balance the the freed pages across the
1750 	 * on-line nodes with memory and will handle the hstate accounting.
1751 	 */
1752 	while (nr_pages--) {
1753 		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1754 			break;
1755 		cond_resched_lock(&hugetlb_lock);
1756 	}
1757 }
1758 
1759 
1760 /*
1761  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1762  * are used by the huge page allocation routines to manage reservations.
1763  *
1764  * vma_needs_reservation is called to determine if the huge page at addr
1765  * within the vma has an associated reservation.  If a reservation is
1766  * needed, the value 1 is returned.  The caller is then responsible for
1767  * managing the global reservation and subpool usage counts.  After
1768  * the huge page has been allocated, vma_commit_reservation is called
1769  * to add the page to the reservation map.  If the page allocation fails,
1770  * the reservation must be ended instead of committed.  vma_end_reservation
1771  * is called in such cases.
1772  *
1773  * In the normal case, vma_commit_reservation returns the same value
1774  * as the preceding vma_needs_reservation call.  The only time this
1775  * is not the case is if a reserve map was changed between calls.  It
1776  * is the responsibility of the caller to notice the difference and
1777  * take appropriate action.
1778  */
1779 enum vma_resv_mode {
1780 	VMA_NEEDS_RESV,
1781 	VMA_COMMIT_RESV,
1782 	VMA_END_RESV,
1783 };
1784 static long __vma_reservation_common(struct hstate *h,
1785 				struct vm_area_struct *vma, unsigned long addr,
1786 				enum vma_resv_mode mode)
1787 {
1788 	struct resv_map *resv;
1789 	pgoff_t idx;
1790 	long ret;
1791 
1792 	resv = vma_resv_map(vma);
1793 	if (!resv)
1794 		return 1;
1795 
1796 	idx = vma_hugecache_offset(h, vma, addr);
1797 	switch (mode) {
1798 	case VMA_NEEDS_RESV:
1799 		ret = region_chg(resv, idx, idx + 1);
1800 		break;
1801 	case VMA_COMMIT_RESV:
1802 		ret = region_add(resv, idx, idx + 1);
1803 		break;
1804 	case VMA_END_RESV:
1805 		region_abort(resv, idx, idx + 1);
1806 		ret = 0;
1807 		break;
1808 	default:
1809 		BUG();
1810 	}
1811 
1812 	if (vma->vm_flags & VM_MAYSHARE)
1813 		return ret;
1814 	else
1815 		return ret < 0 ? ret : 0;
1816 }
1817 
1818 static long vma_needs_reservation(struct hstate *h,
1819 			struct vm_area_struct *vma, unsigned long addr)
1820 {
1821 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1822 }
1823 
1824 static long vma_commit_reservation(struct hstate *h,
1825 			struct vm_area_struct *vma, unsigned long addr)
1826 {
1827 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1828 }
1829 
1830 static void vma_end_reservation(struct hstate *h,
1831 			struct vm_area_struct *vma, unsigned long addr)
1832 {
1833 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1834 }
1835 
1836 struct page *alloc_huge_page(struct vm_area_struct *vma,
1837 				    unsigned long addr, int avoid_reserve)
1838 {
1839 	struct hugepage_subpool *spool = subpool_vma(vma);
1840 	struct hstate *h = hstate_vma(vma);
1841 	struct page *page;
1842 	long map_chg, map_commit;
1843 	long gbl_chg;
1844 	int ret, idx;
1845 	struct hugetlb_cgroup *h_cg;
1846 
1847 	idx = hstate_index(h);
1848 	/*
1849 	 * Examine the region/reserve map to determine if the process
1850 	 * has a reservation for the page to be allocated.  A return
1851 	 * code of zero indicates a reservation exists (no change).
1852 	 */
1853 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1854 	if (map_chg < 0)
1855 		return ERR_PTR(-ENOMEM);
1856 
1857 	/*
1858 	 * Processes that did not create the mapping will have no
1859 	 * reserves as indicated by the region/reserve map. Check
1860 	 * that the allocation will not exceed the subpool limit.
1861 	 * Allocations for MAP_NORESERVE mappings also need to be
1862 	 * checked against any subpool limit.
1863 	 */
1864 	if (map_chg || avoid_reserve) {
1865 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
1866 		if (gbl_chg < 0) {
1867 			vma_end_reservation(h, vma, addr);
1868 			return ERR_PTR(-ENOSPC);
1869 		}
1870 
1871 		/*
1872 		 * Even though there was no reservation in the region/reserve
1873 		 * map, there could be reservations associated with the
1874 		 * subpool that can be used.  This would be indicated if the
1875 		 * return value of hugepage_subpool_get_pages() is zero.
1876 		 * However, if avoid_reserve is specified we still avoid even
1877 		 * the subpool reservations.
1878 		 */
1879 		if (avoid_reserve)
1880 			gbl_chg = 1;
1881 	}
1882 
1883 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1884 	if (ret)
1885 		goto out_subpool_put;
1886 
1887 	spin_lock(&hugetlb_lock);
1888 	/*
1889 	 * glb_chg is passed to indicate whether or not a page must be taken
1890 	 * from the global free pool (global change).  gbl_chg == 0 indicates
1891 	 * a reservation exists for the allocation.
1892 	 */
1893 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
1894 	if (!page) {
1895 		spin_unlock(&hugetlb_lock);
1896 		page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1897 		if (!page)
1898 			goto out_uncharge_cgroup;
1899 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1900 			SetPagePrivate(page);
1901 			h->resv_huge_pages--;
1902 		}
1903 		spin_lock(&hugetlb_lock);
1904 		list_move(&page->lru, &h->hugepage_activelist);
1905 		/* Fall through */
1906 	}
1907 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1908 	spin_unlock(&hugetlb_lock);
1909 
1910 	set_page_private(page, (unsigned long)spool);
1911 
1912 	map_commit = vma_commit_reservation(h, vma, addr);
1913 	if (unlikely(map_chg > map_commit)) {
1914 		/*
1915 		 * The page was added to the reservation map between
1916 		 * vma_needs_reservation and vma_commit_reservation.
1917 		 * This indicates a race with hugetlb_reserve_pages.
1918 		 * Adjust for the subpool count incremented above AND
1919 		 * in hugetlb_reserve_pages for the same page.  Also,
1920 		 * the reservation count added in hugetlb_reserve_pages
1921 		 * no longer applies.
1922 		 */
1923 		long rsv_adjust;
1924 
1925 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1926 		hugetlb_acct_memory(h, -rsv_adjust);
1927 	}
1928 	return page;
1929 
1930 out_uncharge_cgroup:
1931 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1932 out_subpool_put:
1933 	if (map_chg || avoid_reserve)
1934 		hugepage_subpool_put_pages(spool, 1);
1935 	vma_end_reservation(h, vma, addr);
1936 	return ERR_PTR(-ENOSPC);
1937 }
1938 
1939 /*
1940  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1941  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1942  * where no ERR_VALUE is expected to be returned.
1943  */
1944 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1945 				unsigned long addr, int avoid_reserve)
1946 {
1947 	struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1948 	if (IS_ERR(page))
1949 		page = NULL;
1950 	return page;
1951 }
1952 
1953 int __weak alloc_bootmem_huge_page(struct hstate *h)
1954 {
1955 	struct huge_bootmem_page *m;
1956 	int nr_nodes, node;
1957 
1958 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1959 		void *addr;
1960 
1961 		addr = memblock_virt_alloc_try_nid_nopanic(
1962 				huge_page_size(h), huge_page_size(h),
1963 				0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1964 		if (addr) {
1965 			/*
1966 			 * Use the beginning of the huge page to store the
1967 			 * huge_bootmem_page struct (until gather_bootmem
1968 			 * puts them into the mem_map).
1969 			 */
1970 			m = addr;
1971 			goto found;
1972 		}
1973 	}
1974 	return 0;
1975 
1976 found:
1977 	BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1978 	/* Put them into a private list first because mem_map is not up yet */
1979 	list_add(&m->list, &huge_boot_pages);
1980 	m->hstate = h;
1981 	return 1;
1982 }
1983 
1984 static void __init prep_compound_huge_page(struct page *page,
1985 		unsigned int order)
1986 {
1987 	if (unlikely(order > (MAX_ORDER - 1)))
1988 		prep_compound_gigantic_page(page, order);
1989 	else
1990 		prep_compound_page(page, order);
1991 }
1992 
1993 /* Put bootmem huge pages into the standard lists after mem_map is up */
1994 static void __init gather_bootmem_prealloc(void)
1995 {
1996 	struct huge_bootmem_page *m;
1997 
1998 	list_for_each_entry(m, &huge_boot_pages, list) {
1999 		struct hstate *h = m->hstate;
2000 		struct page *page;
2001 
2002 #ifdef CONFIG_HIGHMEM
2003 		page = pfn_to_page(m->phys >> PAGE_SHIFT);
2004 		memblock_free_late(__pa(m),
2005 				   sizeof(struct huge_bootmem_page));
2006 #else
2007 		page = virt_to_page(m);
2008 #endif
2009 		WARN_ON(page_count(page) != 1);
2010 		prep_compound_huge_page(page, h->order);
2011 		WARN_ON(PageReserved(page));
2012 		prep_new_huge_page(h, page, page_to_nid(page));
2013 		/*
2014 		 * If we had gigantic hugepages allocated at boot time, we need
2015 		 * to restore the 'stolen' pages to totalram_pages in order to
2016 		 * fix confusing memory reports from free(1) and another
2017 		 * side-effects, like CommitLimit going negative.
2018 		 */
2019 		if (hstate_is_gigantic(h))
2020 			adjust_managed_page_count(page, 1 << h->order);
2021 	}
2022 }
2023 
2024 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2025 {
2026 	unsigned long i;
2027 
2028 	for (i = 0; i < h->max_huge_pages; ++i) {
2029 		if (hstate_is_gigantic(h)) {
2030 			if (!alloc_bootmem_huge_page(h))
2031 				break;
2032 		} else if (!alloc_fresh_huge_page(h,
2033 					 &node_states[N_MEMORY]))
2034 			break;
2035 	}
2036 	h->max_huge_pages = i;
2037 }
2038 
2039 static void __init hugetlb_init_hstates(void)
2040 {
2041 	struct hstate *h;
2042 
2043 	for_each_hstate(h) {
2044 		if (minimum_order > huge_page_order(h))
2045 			minimum_order = huge_page_order(h);
2046 
2047 		/* oversize hugepages were init'ed in early boot */
2048 		if (!hstate_is_gigantic(h))
2049 			hugetlb_hstate_alloc_pages(h);
2050 	}
2051 	VM_BUG_ON(minimum_order == UINT_MAX);
2052 }
2053 
2054 static char * __init memfmt(char *buf, unsigned long n)
2055 {
2056 	if (n >= (1UL << 30))
2057 		sprintf(buf, "%lu GB", n >> 30);
2058 	else if (n >= (1UL << 20))
2059 		sprintf(buf, "%lu MB", n >> 20);
2060 	else
2061 		sprintf(buf, "%lu KB", n >> 10);
2062 	return buf;
2063 }
2064 
2065 static void __init report_hugepages(void)
2066 {
2067 	struct hstate *h;
2068 
2069 	for_each_hstate(h) {
2070 		char buf[32];
2071 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2072 			memfmt(buf, huge_page_size(h)),
2073 			h->free_huge_pages);
2074 	}
2075 }
2076 
2077 #ifdef CONFIG_HIGHMEM
2078 static void try_to_free_low(struct hstate *h, unsigned long count,
2079 						nodemask_t *nodes_allowed)
2080 {
2081 	int i;
2082 
2083 	if (hstate_is_gigantic(h))
2084 		return;
2085 
2086 	for_each_node_mask(i, *nodes_allowed) {
2087 		struct page *page, *next;
2088 		struct list_head *freel = &h->hugepage_freelists[i];
2089 		list_for_each_entry_safe(page, next, freel, lru) {
2090 			if (count >= h->nr_huge_pages)
2091 				return;
2092 			if (PageHighMem(page))
2093 				continue;
2094 			list_del(&page->lru);
2095 			update_and_free_page(h, page);
2096 			h->free_huge_pages--;
2097 			h->free_huge_pages_node[page_to_nid(page)]--;
2098 		}
2099 	}
2100 }
2101 #else
2102 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2103 						nodemask_t *nodes_allowed)
2104 {
2105 }
2106 #endif
2107 
2108 /*
2109  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2110  * balanced by operating on them in a round-robin fashion.
2111  * Returns 1 if an adjustment was made.
2112  */
2113 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2114 				int delta)
2115 {
2116 	int nr_nodes, node;
2117 
2118 	VM_BUG_ON(delta != -1 && delta != 1);
2119 
2120 	if (delta < 0) {
2121 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2122 			if (h->surplus_huge_pages_node[node])
2123 				goto found;
2124 		}
2125 	} else {
2126 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2127 			if (h->surplus_huge_pages_node[node] <
2128 					h->nr_huge_pages_node[node])
2129 				goto found;
2130 		}
2131 	}
2132 	return 0;
2133 
2134 found:
2135 	h->surplus_huge_pages += delta;
2136 	h->surplus_huge_pages_node[node] += delta;
2137 	return 1;
2138 }
2139 
2140 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2141 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2142 						nodemask_t *nodes_allowed)
2143 {
2144 	unsigned long min_count, ret;
2145 
2146 	if (hstate_is_gigantic(h) && !gigantic_page_supported())
2147 		return h->max_huge_pages;
2148 
2149 	/*
2150 	 * Increase the pool size
2151 	 * First take pages out of surplus state.  Then make up the
2152 	 * remaining difference by allocating fresh huge pages.
2153 	 *
2154 	 * We might race with __alloc_buddy_huge_page() here and be unable
2155 	 * to convert a surplus huge page to a normal huge page. That is
2156 	 * not critical, though, it just means the overall size of the
2157 	 * pool might be one hugepage larger than it needs to be, but
2158 	 * within all the constraints specified by the sysctls.
2159 	 */
2160 	spin_lock(&hugetlb_lock);
2161 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2162 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
2163 			break;
2164 	}
2165 
2166 	while (count > persistent_huge_pages(h)) {
2167 		/*
2168 		 * If this allocation races such that we no longer need the
2169 		 * page, free_huge_page will handle it by freeing the page
2170 		 * and reducing the surplus.
2171 		 */
2172 		spin_unlock(&hugetlb_lock);
2173 		if (hstate_is_gigantic(h))
2174 			ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2175 		else
2176 			ret = alloc_fresh_huge_page(h, nodes_allowed);
2177 		spin_lock(&hugetlb_lock);
2178 		if (!ret)
2179 			goto out;
2180 
2181 		/* Bail for signals. Probably ctrl-c from user */
2182 		if (signal_pending(current))
2183 			goto out;
2184 	}
2185 
2186 	/*
2187 	 * Decrease the pool size
2188 	 * First return free pages to the buddy allocator (being careful
2189 	 * to keep enough around to satisfy reservations).  Then place
2190 	 * pages into surplus state as needed so the pool will shrink
2191 	 * to the desired size as pages become free.
2192 	 *
2193 	 * By placing pages into the surplus state independent of the
2194 	 * overcommit value, we are allowing the surplus pool size to
2195 	 * exceed overcommit. There are few sane options here. Since
2196 	 * __alloc_buddy_huge_page() is checking the global counter,
2197 	 * though, we'll note that we're not allowed to exceed surplus
2198 	 * and won't grow the pool anywhere else. Not until one of the
2199 	 * sysctls are changed, or the surplus pages go out of use.
2200 	 */
2201 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2202 	min_count = max(count, min_count);
2203 	try_to_free_low(h, min_count, nodes_allowed);
2204 	while (min_count < persistent_huge_pages(h)) {
2205 		if (!free_pool_huge_page(h, nodes_allowed, 0))
2206 			break;
2207 		cond_resched_lock(&hugetlb_lock);
2208 	}
2209 	while (count < persistent_huge_pages(h)) {
2210 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
2211 			break;
2212 	}
2213 out:
2214 	ret = persistent_huge_pages(h);
2215 	spin_unlock(&hugetlb_lock);
2216 	return ret;
2217 }
2218 
2219 #define HSTATE_ATTR_RO(_name) \
2220 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2221 
2222 #define HSTATE_ATTR(_name) \
2223 	static struct kobj_attribute _name##_attr = \
2224 		__ATTR(_name, 0644, _name##_show, _name##_store)
2225 
2226 static struct kobject *hugepages_kobj;
2227 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2228 
2229 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2230 
2231 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2232 {
2233 	int i;
2234 
2235 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
2236 		if (hstate_kobjs[i] == kobj) {
2237 			if (nidp)
2238 				*nidp = NUMA_NO_NODE;
2239 			return &hstates[i];
2240 		}
2241 
2242 	return kobj_to_node_hstate(kobj, nidp);
2243 }
2244 
2245 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2246 					struct kobj_attribute *attr, char *buf)
2247 {
2248 	struct hstate *h;
2249 	unsigned long nr_huge_pages;
2250 	int nid;
2251 
2252 	h = kobj_to_hstate(kobj, &nid);
2253 	if (nid == NUMA_NO_NODE)
2254 		nr_huge_pages = h->nr_huge_pages;
2255 	else
2256 		nr_huge_pages = h->nr_huge_pages_node[nid];
2257 
2258 	return sprintf(buf, "%lu\n", nr_huge_pages);
2259 }
2260 
2261 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2262 					   struct hstate *h, int nid,
2263 					   unsigned long count, size_t len)
2264 {
2265 	int err;
2266 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2267 
2268 	if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2269 		err = -EINVAL;
2270 		goto out;
2271 	}
2272 
2273 	if (nid == NUMA_NO_NODE) {
2274 		/*
2275 		 * global hstate attribute
2276 		 */
2277 		if (!(obey_mempolicy &&
2278 				init_nodemask_of_mempolicy(nodes_allowed))) {
2279 			NODEMASK_FREE(nodes_allowed);
2280 			nodes_allowed = &node_states[N_MEMORY];
2281 		}
2282 	} else if (nodes_allowed) {
2283 		/*
2284 		 * per node hstate attribute: adjust count to global,
2285 		 * but restrict alloc/free to the specified node.
2286 		 */
2287 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2288 		init_nodemask_of_node(nodes_allowed, nid);
2289 	} else
2290 		nodes_allowed = &node_states[N_MEMORY];
2291 
2292 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2293 
2294 	if (nodes_allowed != &node_states[N_MEMORY])
2295 		NODEMASK_FREE(nodes_allowed);
2296 
2297 	return len;
2298 out:
2299 	NODEMASK_FREE(nodes_allowed);
2300 	return err;
2301 }
2302 
2303 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2304 					 struct kobject *kobj, const char *buf,
2305 					 size_t len)
2306 {
2307 	struct hstate *h;
2308 	unsigned long count;
2309 	int nid;
2310 	int err;
2311 
2312 	err = kstrtoul(buf, 10, &count);
2313 	if (err)
2314 		return err;
2315 
2316 	h = kobj_to_hstate(kobj, &nid);
2317 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2318 }
2319 
2320 static ssize_t nr_hugepages_show(struct kobject *kobj,
2321 				       struct kobj_attribute *attr, char *buf)
2322 {
2323 	return nr_hugepages_show_common(kobj, attr, buf);
2324 }
2325 
2326 static ssize_t nr_hugepages_store(struct kobject *kobj,
2327 	       struct kobj_attribute *attr, const char *buf, size_t len)
2328 {
2329 	return nr_hugepages_store_common(false, kobj, buf, len);
2330 }
2331 HSTATE_ATTR(nr_hugepages);
2332 
2333 #ifdef CONFIG_NUMA
2334 
2335 /*
2336  * hstate attribute for optionally mempolicy-based constraint on persistent
2337  * huge page alloc/free.
2338  */
2339 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2340 				       struct kobj_attribute *attr, char *buf)
2341 {
2342 	return nr_hugepages_show_common(kobj, attr, buf);
2343 }
2344 
2345 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2346 	       struct kobj_attribute *attr, const char *buf, size_t len)
2347 {
2348 	return nr_hugepages_store_common(true, kobj, buf, len);
2349 }
2350 HSTATE_ATTR(nr_hugepages_mempolicy);
2351 #endif
2352 
2353 
2354 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2355 					struct kobj_attribute *attr, char *buf)
2356 {
2357 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2358 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2359 }
2360 
2361 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2362 		struct kobj_attribute *attr, const char *buf, size_t count)
2363 {
2364 	int err;
2365 	unsigned long input;
2366 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2367 
2368 	if (hstate_is_gigantic(h))
2369 		return -EINVAL;
2370 
2371 	err = kstrtoul(buf, 10, &input);
2372 	if (err)
2373 		return err;
2374 
2375 	spin_lock(&hugetlb_lock);
2376 	h->nr_overcommit_huge_pages = input;
2377 	spin_unlock(&hugetlb_lock);
2378 
2379 	return count;
2380 }
2381 HSTATE_ATTR(nr_overcommit_hugepages);
2382 
2383 static ssize_t free_hugepages_show(struct kobject *kobj,
2384 					struct kobj_attribute *attr, char *buf)
2385 {
2386 	struct hstate *h;
2387 	unsigned long free_huge_pages;
2388 	int nid;
2389 
2390 	h = kobj_to_hstate(kobj, &nid);
2391 	if (nid == NUMA_NO_NODE)
2392 		free_huge_pages = h->free_huge_pages;
2393 	else
2394 		free_huge_pages = h->free_huge_pages_node[nid];
2395 
2396 	return sprintf(buf, "%lu\n", free_huge_pages);
2397 }
2398 HSTATE_ATTR_RO(free_hugepages);
2399 
2400 static ssize_t resv_hugepages_show(struct kobject *kobj,
2401 					struct kobj_attribute *attr, char *buf)
2402 {
2403 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2404 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
2405 }
2406 HSTATE_ATTR_RO(resv_hugepages);
2407 
2408 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2409 					struct kobj_attribute *attr, char *buf)
2410 {
2411 	struct hstate *h;
2412 	unsigned long surplus_huge_pages;
2413 	int nid;
2414 
2415 	h = kobj_to_hstate(kobj, &nid);
2416 	if (nid == NUMA_NO_NODE)
2417 		surplus_huge_pages = h->surplus_huge_pages;
2418 	else
2419 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
2420 
2421 	return sprintf(buf, "%lu\n", surplus_huge_pages);
2422 }
2423 HSTATE_ATTR_RO(surplus_hugepages);
2424 
2425 static struct attribute *hstate_attrs[] = {
2426 	&nr_hugepages_attr.attr,
2427 	&nr_overcommit_hugepages_attr.attr,
2428 	&free_hugepages_attr.attr,
2429 	&resv_hugepages_attr.attr,
2430 	&surplus_hugepages_attr.attr,
2431 #ifdef CONFIG_NUMA
2432 	&nr_hugepages_mempolicy_attr.attr,
2433 #endif
2434 	NULL,
2435 };
2436 
2437 static struct attribute_group hstate_attr_group = {
2438 	.attrs = hstate_attrs,
2439 };
2440 
2441 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2442 				    struct kobject **hstate_kobjs,
2443 				    struct attribute_group *hstate_attr_group)
2444 {
2445 	int retval;
2446 	int hi = hstate_index(h);
2447 
2448 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2449 	if (!hstate_kobjs[hi])
2450 		return -ENOMEM;
2451 
2452 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2453 	if (retval)
2454 		kobject_put(hstate_kobjs[hi]);
2455 
2456 	return retval;
2457 }
2458 
2459 static void __init hugetlb_sysfs_init(void)
2460 {
2461 	struct hstate *h;
2462 	int err;
2463 
2464 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2465 	if (!hugepages_kobj)
2466 		return;
2467 
2468 	for_each_hstate(h) {
2469 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2470 					 hstate_kobjs, &hstate_attr_group);
2471 		if (err)
2472 			pr_err("Hugetlb: Unable to add hstate %s", h->name);
2473 	}
2474 }
2475 
2476 #ifdef CONFIG_NUMA
2477 
2478 /*
2479  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2480  * with node devices in node_devices[] using a parallel array.  The array
2481  * index of a node device or _hstate == node id.
2482  * This is here to avoid any static dependency of the node device driver, in
2483  * the base kernel, on the hugetlb module.
2484  */
2485 struct node_hstate {
2486 	struct kobject		*hugepages_kobj;
2487 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
2488 };
2489 static struct node_hstate node_hstates[MAX_NUMNODES];
2490 
2491 /*
2492  * A subset of global hstate attributes for node devices
2493  */
2494 static struct attribute *per_node_hstate_attrs[] = {
2495 	&nr_hugepages_attr.attr,
2496 	&free_hugepages_attr.attr,
2497 	&surplus_hugepages_attr.attr,
2498 	NULL,
2499 };
2500 
2501 static struct attribute_group per_node_hstate_attr_group = {
2502 	.attrs = per_node_hstate_attrs,
2503 };
2504 
2505 /*
2506  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2507  * Returns node id via non-NULL nidp.
2508  */
2509 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2510 {
2511 	int nid;
2512 
2513 	for (nid = 0; nid < nr_node_ids; nid++) {
2514 		struct node_hstate *nhs = &node_hstates[nid];
2515 		int i;
2516 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
2517 			if (nhs->hstate_kobjs[i] == kobj) {
2518 				if (nidp)
2519 					*nidp = nid;
2520 				return &hstates[i];
2521 			}
2522 	}
2523 
2524 	BUG();
2525 	return NULL;
2526 }
2527 
2528 /*
2529  * Unregister hstate attributes from a single node device.
2530  * No-op if no hstate attributes attached.
2531  */
2532 static void hugetlb_unregister_node(struct node *node)
2533 {
2534 	struct hstate *h;
2535 	struct node_hstate *nhs = &node_hstates[node->dev.id];
2536 
2537 	if (!nhs->hugepages_kobj)
2538 		return;		/* no hstate attributes */
2539 
2540 	for_each_hstate(h) {
2541 		int idx = hstate_index(h);
2542 		if (nhs->hstate_kobjs[idx]) {
2543 			kobject_put(nhs->hstate_kobjs[idx]);
2544 			nhs->hstate_kobjs[idx] = NULL;
2545 		}
2546 	}
2547 
2548 	kobject_put(nhs->hugepages_kobj);
2549 	nhs->hugepages_kobj = NULL;
2550 }
2551 
2552 
2553 /*
2554  * Register hstate attributes for a single node device.
2555  * No-op if attributes already registered.
2556  */
2557 static void hugetlb_register_node(struct node *node)
2558 {
2559 	struct hstate *h;
2560 	struct node_hstate *nhs = &node_hstates[node->dev.id];
2561 	int err;
2562 
2563 	if (nhs->hugepages_kobj)
2564 		return;		/* already allocated */
2565 
2566 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2567 							&node->dev.kobj);
2568 	if (!nhs->hugepages_kobj)
2569 		return;
2570 
2571 	for_each_hstate(h) {
2572 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2573 						nhs->hstate_kobjs,
2574 						&per_node_hstate_attr_group);
2575 		if (err) {
2576 			pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2577 				h->name, node->dev.id);
2578 			hugetlb_unregister_node(node);
2579 			break;
2580 		}
2581 	}
2582 }
2583 
2584 /*
2585  * hugetlb init time:  register hstate attributes for all registered node
2586  * devices of nodes that have memory.  All on-line nodes should have
2587  * registered their associated device by this time.
2588  */
2589 static void __init hugetlb_register_all_nodes(void)
2590 {
2591 	int nid;
2592 
2593 	for_each_node_state(nid, N_MEMORY) {
2594 		struct node *node = node_devices[nid];
2595 		if (node->dev.id == nid)
2596 			hugetlb_register_node(node);
2597 	}
2598 
2599 	/*
2600 	 * Let the node device driver know we're here so it can
2601 	 * [un]register hstate attributes on node hotplug.
2602 	 */
2603 	register_hugetlbfs_with_node(hugetlb_register_node,
2604 				     hugetlb_unregister_node);
2605 }
2606 #else	/* !CONFIG_NUMA */
2607 
2608 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2609 {
2610 	BUG();
2611 	if (nidp)
2612 		*nidp = -1;
2613 	return NULL;
2614 }
2615 
2616 static void hugetlb_register_all_nodes(void) { }
2617 
2618 #endif
2619 
2620 static int __init hugetlb_init(void)
2621 {
2622 	int i;
2623 
2624 	if (!hugepages_supported())
2625 		return 0;
2626 
2627 	if (!size_to_hstate(default_hstate_size)) {
2628 		default_hstate_size = HPAGE_SIZE;
2629 		if (!size_to_hstate(default_hstate_size))
2630 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2631 	}
2632 	default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2633 	if (default_hstate_max_huge_pages) {
2634 		if (!default_hstate.max_huge_pages)
2635 			default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2636 	}
2637 
2638 	hugetlb_init_hstates();
2639 	gather_bootmem_prealloc();
2640 	report_hugepages();
2641 
2642 	hugetlb_sysfs_init();
2643 	hugetlb_register_all_nodes();
2644 	hugetlb_cgroup_file_init();
2645 
2646 #ifdef CONFIG_SMP
2647 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2648 #else
2649 	num_fault_mutexes = 1;
2650 #endif
2651 	hugetlb_fault_mutex_table =
2652 		kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2653 	BUG_ON(!hugetlb_fault_mutex_table);
2654 
2655 	for (i = 0; i < num_fault_mutexes; i++)
2656 		mutex_init(&hugetlb_fault_mutex_table[i]);
2657 	return 0;
2658 }
2659 subsys_initcall(hugetlb_init);
2660 
2661 /* Should be called on processing a hugepagesz=... option */
2662 void __init hugetlb_add_hstate(unsigned int order)
2663 {
2664 	struct hstate *h;
2665 	unsigned long i;
2666 
2667 	if (size_to_hstate(PAGE_SIZE << order)) {
2668 		pr_warning("hugepagesz= specified twice, ignoring\n");
2669 		return;
2670 	}
2671 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2672 	BUG_ON(order == 0);
2673 	h = &hstates[hugetlb_max_hstate++];
2674 	h->order = order;
2675 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2676 	h->nr_huge_pages = 0;
2677 	h->free_huge_pages = 0;
2678 	for (i = 0; i < MAX_NUMNODES; ++i)
2679 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2680 	INIT_LIST_HEAD(&h->hugepage_activelist);
2681 	h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2682 	h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2683 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2684 					huge_page_size(h)/1024);
2685 
2686 	parsed_hstate = h;
2687 }
2688 
2689 static int __init hugetlb_nrpages_setup(char *s)
2690 {
2691 	unsigned long *mhp;
2692 	static unsigned long *last_mhp;
2693 
2694 	/*
2695 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2696 	 * so this hugepages= parameter goes to the "default hstate".
2697 	 */
2698 	if (!hugetlb_max_hstate)
2699 		mhp = &default_hstate_max_huge_pages;
2700 	else
2701 		mhp = &parsed_hstate->max_huge_pages;
2702 
2703 	if (mhp == last_mhp) {
2704 		pr_warning("hugepages= specified twice without "
2705 			   "interleaving hugepagesz=, ignoring\n");
2706 		return 1;
2707 	}
2708 
2709 	if (sscanf(s, "%lu", mhp) <= 0)
2710 		*mhp = 0;
2711 
2712 	/*
2713 	 * Global state is always initialized later in hugetlb_init.
2714 	 * But we need to allocate >= MAX_ORDER hstates here early to still
2715 	 * use the bootmem allocator.
2716 	 */
2717 	if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2718 		hugetlb_hstate_alloc_pages(parsed_hstate);
2719 
2720 	last_mhp = mhp;
2721 
2722 	return 1;
2723 }
2724 __setup("hugepages=", hugetlb_nrpages_setup);
2725 
2726 static int __init hugetlb_default_setup(char *s)
2727 {
2728 	default_hstate_size = memparse(s, &s);
2729 	return 1;
2730 }
2731 __setup("default_hugepagesz=", hugetlb_default_setup);
2732 
2733 static unsigned int cpuset_mems_nr(unsigned int *array)
2734 {
2735 	int node;
2736 	unsigned int nr = 0;
2737 
2738 	for_each_node_mask(node, cpuset_current_mems_allowed)
2739 		nr += array[node];
2740 
2741 	return nr;
2742 }
2743 
2744 #ifdef CONFIG_SYSCTL
2745 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2746 			 struct ctl_table *table, int write,
2747 			 void __user *buffer, size_t *length, loff_t *ppos)
2748 {
2749 	struct hstate *h = &default_hstate;
2750 	unsigned long tmp = h->max_huge_pages;
2751 	int ret;
2752 
2753 	if (!hugepages_supported())
2754 		return -ENOTSUPP;
2755 
2756 	table->data = &tmp;
2757 	table->maxlen = sizeof(unsigned long);
2758 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2759 	if (ret)
2760 		goto out;
2761 
2762 	if (write)
2763 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
2764 						  NUMA_NO_NODE, tmp, *length);
2765 out:
2766 	return ret;
2767 }
2768 
2769 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2770 			  void __user *buffer, size_t *length, loff_t *ppos)
2771 {
2772 
2773 	return hugetlb_sysctl_handler_common(false, table, write,
2774 							buffer, length, ppos);
2775 }
2776 
2777 #ifdef CONFIG_NUMA
2778 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2779 			  void __user *buffer, size_t *length, loff_t *ppos)
2780 {
2781 	return hugetlb_sysctl_handler_common(true, table, write,
2782 							buffer, length, ppos);
2783 }
2784 #endif /* CONFIG_NUMA */
2785 
2786 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2787 			void __user *buffer,
2788 			size_t *length, loff_t *ppos)
2789 {
2790 	struct hstate *h = &default_hstate;
2791 	unsigned long tmp;
2792 	int ret;
2793 
2794 	if (!hugepages_supported())
2795 		return -ENOTSUPP;
2796 
2797 	tmp = h->nr_overcommit_huge_pages;
2798 
2799 	if (write && hstate_is_gigantic(h))
2800 		return -EINVAL;
2801 
2802 	table->data = &tmp;
2803 	table->maxlen = sizeof(unsigned long);
2804 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2805 	if (ret)
2806 		goto out;
2807 
2808 	if (write) {
2809 		spin_lock(&hugetlb_lock);
2810 		h->nr_overcommit_huge_pages = tmp;
2811 		spin_unlock(&hugetlb_lock);
2812 	}
2813 out:
2814 	return ret;
2815 }
2816 
2817 #endif /* CONFIG_SYSCTL */
2818 
2819 void hugetlb_report_meminfo(struct seq_file *m)
2820 {
2821 	struct hstate *h = &default_hstate;
2822 	if (!hugepages_supported())
2823 		return;
2824 	seq_printf(m,
2825 			"HugePages_Total:   %5lu\n"
2826 			"HugePages_Free:    %5lu\n"
2827 			"HugePages_Rsvd:    %5lu\n"
2828 			"HugePages_Surp:    %5lu\n"
2829 			"Hugepagesize:   %8lu kB\n",
2830 			h->nr_huge_pages,
2831 			h->free_huge_pages,
2832 			h->resv_huge_pages,
2833 			h->surplus_huge_pages,
2834 			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2835 }
2836 
2837 int hugetlb_report_node_meminfo(int nid, char *buf)
2838 {
2839 	struct hstate *h = &default_hstate;
2840 	if (!hugepages_supported())
2841 		return 0;
2842 	return sprintf(buf,
2843 		"Node %d HugePages_Total: %5u\n"
2844 		"Node %d HugePages_Free:  %5u\n"
2845 		"Node %d HugePages_Surp:  %5u\n",
2846 		nid, h->nr_huge_pages_node[nid],
2847 		nid, h->free_huge_pages_node[nid],
2848 		nid, h->surplus_huge_pages_node[nid]);
2849 }
2850 
2851 void hugetlb_show_meminfo(void)
2852 {
2853 	struct hstate *h;
2854 	int nid;
2855 
2856 	if (!hugepages_supported())
2857 		return;
2858 
2859 	for_each_node_state(nid, N_MEMORY)
2860 		for_each_hstate(h)
2861 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2862 				nid,
2863 				h->nr_huge_pages_node[nid],
2864 				h->free_huge_pages_node[nid],
2865 				h->surplus_huge_pages_node[nid],
2866 				1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2867 }
2868 
2869 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
2870 {
2871 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
2872 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
2873 }
2874 
2875 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2876 unsigned long hugetlb_total_pages(void)
2877 {
2878 	struct hstate *h;
2879 	unsigned long nr_total_pages = 0;
2880 
2881 	for_each_hstate(h)
2882 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2883 	return nr_total_pages;
2884 }
2885 
2886 static int hugetlb_acct_memory(struct hstate *h, long delta)
2887 {
2888 	int ret = -ENOMEM;
2889 
2890 	spin_lock(&hugetlb_lock);
2891 	/*
2892 	 * When cpuset is configured, it breaks the strict hugetlb page
2893 	 * reservation as the accounting is done on a global variable. Such
2894 	 * reservation is completely rubbish in the presence of cpuset because
2895 	 * the reservation is not checked against page availability for the
2896 	 * current cpuset. Application can still potentially OOM'ed by kernel
2897 	 * with lack of free htlb page in cpuset that the task is in.
2898 	 * Attempt to enforce strict accounting with cpuset is almost
2899 	 * impossible (or too ugly) because cpuset is too fluid that
2900 	 * task or memory node can be dynamically moved between cpusets.
2901 	 *
2902 	 * The change of semantics for shared hugetlb mapping with cpuset is
2903 	 * undesirable. However, in order to preserve some of the semantics,
2904 	 * we fall back to check against current free page availability as
2905 	 * a best attempt and hopefully to minimize the impact of changing
2906 	 * semantics that cpuset has.
2907 	 */
2908 	if (delta > 0) {
2909 		if (gather_surplus_pages(h, delta) < 0)
2910 			goto out;
2911 
2912 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2913 			return_unused_surplus_pages(h, delta);
2914 			goto out;
2915 		}
2916 	}
2917 
2918 	ret = 0;
2919 	if (delta < 0)
2920 		return_unused_surplus_pages(h, (unsigned long) -delta);
2921 
2922 out:
2923 	spin_unlock(&hugetlb_lock);
2924 	return ret;
2925 }
2926 
2927 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2928 {
2929 	struct resv_map *resv = vma_resv_map(vma);
2930 
2931 	/*
2932 	 * This new VMA should share its siblings reservation map if present.
2933 	 * The VMA will only ever have a valid reservation map pointer where
2934 	 * it is being copied for another still existing VMA.  As that VMA
2935 	 * has a reference to the reservation map it cannot disappear until
2936 	 * after this open call completes.  It is therefore safe to take a
2937 	 * new reference here without additional locking.
2938 	 */
2939 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2940 		kref_get(&resv->refs);
2941 }
2942 
2943 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2944 {
2945 	struct hstate *h = hstate_vma(vma);
2946 	struct resv_map *resv = vma_resv_map(vma);
2947 	struct hugepage_subpool *spool = subpool_vma(vma);
2948 	unsigned long reserve, start, end;
2949 	long gbl_reserve;
2950 
2951 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2952 		return;
2953 
2954 	start = vma_hugecache_offset(h, vma, vma->vm_start);
2955 	end = vma_hugecache_offset(h, vma, vma->vm_end);
2956 
2957 	reserve = (end - start) - region_count(resv, start, end);
2958 
2959 	kref_put(&resv->refs, resv_map_release);
2960 
2961 	if (reserve) {
2962 		/*
2963 		 * Decrement reserve counts.  The global reserve count may be
2964 		 * adjusted if the subpool has a minimum size.
2965 		 */
2966 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2967 		hugetlb_acct_memory(h, -gbl_reserve);
2968 	}
2969 }
2970 
2971 /*
2972  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2973  * handle_mm_fault() to try to instantiate regular-sized pages in the
2974  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2975  * this far.
2976  */
2977 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2978 {
2979 	BUG();
2980 	return 0;
2981 }
2982 
2983 const struct vm_operations_struct hugetlb_vm_ops = {
2984 	.fault = hugetlb_vm_op_fault,
2985 	.open = hugetlb_vm_op_open,
2986 	.close = hugetlb_vm_op_close,
2987 };
2988 
2989 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2990 				int writable)
2991 {
2992 	pte_t entry;
2993 
2994 	if (writable) {
2995 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2996 					 vma->vm_page_prot)));
2997 	} else {
2998 		entry = huge_pte_wrprotect(mk_huge_pte(page,
2999 					   vma->vm_page_prot));
3000 	}
3001 	entry = pte_mkyoung(entry);
3002 	entry = pte_mkhuge(entry);
3003 	entry = arch_make_huge_pte(entry, vma, page, writable);
3004 
3005 	return entry;
3006 }
3007 
3008 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3009 				   unsigned long address, pte_t *ptep)
3010 {
3011 	pte_t entry;
3012 
3013 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3014 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3015 		update_mmu_cache(vma, address, ptep);
3016 }
3017 
3018 static int is_hugetlb_entry_migration(pte_t pte)
3019 {
3020 	swp_entry_t swp;
3021 
3022 	if (huge_pte_none(pte) || pte_present(pte))
3023 		return 0;
3024 	swp = pte_to_swp_entry(pte);
3025 	if (non_swap_entry(swp) && is_migration_entry(swp))
3026 		return 1;
3027 	else
3028 		return 0;
3029 }
3030 
3031 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3032 {
3033 	swp_entry_t swp;
3034 
3035 	if (huge_pte_none(pte) || pte_present(pte))
3036 		return 0;
3037 	swp = pte_to_swp_entry(pte);
3038 	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3039 		return 1;
3040 	else
3041 		return 0;
3042 }
3043 
3044 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3045 			    struct vm_area_struct *vma)
3046 {
3047 	pte_t *src_pte, *dst_pte, entry;
3048 	struct page *ptepage;
3049 	unsigned long addr;
3050 	int cow;
3051 	struct hstate *h = hstate_vma(vma);
3052 	unsigned long sz = huge_page_size(h);
3053 	unsigned long mmun_start;	/* For mmu_notifiers */
3054 	unsigned long mmun_end;		/* For mmu_notifiers */
3055 	int ret = 0;
3056 
3057 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3058 
3059 	mmun_start = vma->vm_start;
3060 	mmun_end = vma->vm_end;
3061 	if (cow)
3062 		mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3063 
3064 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3065 		spinlock_t *src_ptl, *dst_ptl;
3066 		src_pte = huge_pte_offset(src, addr);
3067 		if (!src_pte)
3068 			continue;
3069 		dst_pte = huge_pte_alloc(dst, addr, sz);
3070 		if (!dst_pte) {
3071 			ret = -ENOMEM;
3072 			break;
3073 		}
3074 
3075 		/* If the pagetables are shared don't copy or take references */
3076 		if (dst_pte == src_pte)
3077 			continue;
3078 
3079 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
3080 		src_ptl = huge_pte_lockptr(h, src, src_pte);
3081 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3082 		entry = huge_ptep_get(src_pte);
3083 		if (huge_pte_none(entry)) { /* skip none entry */
3084 			;
3085 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
3086 				    is_hugetlb_entry_hwpoisoned(entry))) {
3087 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
3088 
3089 			if (is_write_migration_entry(swp_entry) && cow) {
3090 				/*
3091 				 * COW mappings require pages in both
3092 				 * parent and child to be set to read.
3093 				 */
3094 				make_migration_entry_read(&swp_entry);
3095 				entry = swp_entry_to_pte(swp_entry);
3096 				set_huge_pte_at(src, addr, src_pte, entry);
3097 			}
3098 			set_huge_pte_at(dst, addr, dst_pte, entry);
3099 		} else {
3100 			if (cow) {
3101 				huge_ptep_set_wrprotect(src, addr, src_pte);
3102 				mmu_notifier_invalidate_range(src, mmun_start,
3103 								   mmun_end);
3104 			}
3105 			entry = huge_ptep_get(src_pte);
3106 			ptepage = pte_page(entry);
3107 			get_page(ptepage);
3108 			page_dup_rmap(ptepage, true);
3109 			set_huge_pte_at(dst, addr, dst_pte, entry);
3110 			hugetlb_count_add(pages_per_huge_page(h), dst);
3111 		}
3112 		spin_unlock(src_ptl);
3113 		spin_unlock(dst_ptl);
3114 	}
3115 
3116 	if (cow)
3117 		mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3118 
3119 	return ret;
3120 }
3121 
3122 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3123 			    unsigned long start, unsigned long end,
3124 			    struct page *ref_page)
3125 {
3126 	int force_flush = 0;
3127 	struct mm_struct *mm = vma->vm_mm;
3128 	unsigned long address;
3129 	pte_t *ptep;
3130 	pte_t pte;
3131 	spinlock_t *ptl;
3132 	struct page *page;
3133 	struct hstate *h = hstate_vma(vma);
3134 	unsigned long sz = huge_page_size(h);
3135 	const unsigned long mmun_start = start;	/* For mmu_notifiers */
3136 	const unsigned long mmun_end   = end;	/* For mmu_notifiers */
3137 
3138 	WARN_ON(!is_vm_hugetlb_page(vma));
3139 	BUG_ON(start & ~huge_page_mask(h));
3140 	BUG_ON(end & ~huge_page_mask(h));
3141 
3142 	tlb_start_vma(tlb, vma);
3143 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3144 	address = start;
3145 again:
3146 	for (; address < end; address += sz) {
3147 		ptep = huge_pte_offset(mm, address);
3148 		if (!ptep)
3149 			continue;
3150 
3151 		ptl = huge_pte_lock(h, mm, ptep);
3152 		if (huge_pmd_unshare(mm, &address, ptep))
3153 			goto unlock;
3154 
3155 		pte = huge_ptep_get(ptep);
3156 		if (huge_pte_none(pte))
3157 			goto unlock;
3158 
3159 		/*
3160 		 * Migrating hugepage or HWPoisoned hugepage is already
3161 		 * unmapped and its refcount is dropped, so just clear pte here.
3162 		 */
3163 		if (unlikely(!pte_present(pte))) {
3164 			huge_pte_clear(mm, address, ptep);
3165 			goto unlock;
3166 		}
3167 
3168 		page = pte_page(pte);
3169 		/*
3170 		 * If a reference page is supplied, it is because a specific
3171 		 * page is being unmapped, not a range. Ensure the page we
3172 		 * are about to unmap is the actual page of interest.
3173 		 */
3174 		if (ref_page) {
3175 			if (page != ref_page)
3176 				goto unlock;
3177 
3178 			/*
3179 			 * Mark the VMA as having unmapped its page so that
3180 			 * future faults in this VMA will fail rather than
3181 			 * looking like data was lost
3182 			 */
3183 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3184 		}
3185 
3186 		pte = huge_ptep_get_and_clear(mm, address, ptep);
3187 		tlb_remove_tlb_entry(tlb, ptep, address);
3188 		if (huge_pte_dirty(pte))
3189 			set_page_dirty(page);
3190 
3191 		hugetlb_count_sub(pages_per_huge_page(h), mm);
3192 		page_remove_rmap(page, true);
3193 		force_flush = !__tlb_remove_page(tlb, page);
3194 		if (force_flush) {
3195 			address += sz;
3196 			spin_unlock(ptl);
3197 			break;
3198 		}
3199 		/* Bail out after unmapping reference page if supplied */
3200 		if (ref_page) {
3201 			spin_unlock(ptl);
3202 			break;
3203 		}
3204 unlock:
3205 		spin_unlock(ptl);
3206 	}
3207 	/*
3208 	 * mmu_gather ran out of room to batch pages, we break out of
3209 	 * the PTE lock to avoid doing the potential expensive TLB invalidate
3210 	 * and page-free while holding it.
3211 	 */
3212 	if (force_flush) {
3213 		force_flush = 0;
3214 		tlb_flush_mmu(tlb);
3215 		if (address < end && !ref_page)
3216 			goto again;
3217 	}
3218 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3219 	tlb_end_vma(tlb, vma);
3220 }
3221 
3222 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3223 			  struct vm_area_struct *vma, unsigned long start,
3224 			  unsigned long end, struct page *ref_page)
3225 {
3226 	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
3227 
3228 	/*
3229 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3230 	 * test will fail on a vma being torn down, and not grab a page table
3231 	 * on its way out.  We're lucky that the flag has such an appropriate
3232 	 * name, and can in fact be safely cleared here. We could clear it
3233 	 * before the __unmap_hugepage_range above, but all that's necessary
3234 	 * is to clear it before releasing the i_mmap_rwsem. This works
3235 	 * because in the context this is called, the VMA is about to be
3236 	 * destroyed and the i_mmap_rwsem is held.
3237 	 */
3238 	vma->vm_flags &= ~VM_MAYSHARE;
3239 }
3240 
3241 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3242 			  unsigned long end, struct page *ref_page)
3243 {
3244 	struct mm_struct *mm;
3245 	struct mmu_gather tlb;
3246 
3247 	mm = vma->vm_mm;
3248 
3249 	tlb_gather_mmu(&tlb, mm, start, end);
3250 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3251 	tlb_finish_mmu(&tlb, start, end);
3252 }
3253 
3254 /*
3255  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3256  * mappping it owns the reserve page for. The intention is to unmap the page
3257  * from other VMAs and let the children be SIGKILLed if they are faulting the
3258  * same region.
3259  */
3260 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3261 			      struct page *page, unsigned long address)
3262 {
3263 	struct hstate *h = hstate_vma(vma);
3264 	struct vm_area_struct *iter_vma;
3265 	struct address_space *mapping;
3266 	pgoff_t pgoff;
3267 
3268 	/*
3269 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3270 	 * from page cache lookup which is in HPAGE_SIZE units.
3271 	 */
3272 	address = address & huge_page_mask(h);
3273 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3274 			vma->vm_pgoff;
3275 	mapping = file_inode(vma->vm_file)->i_mapping;
3276 
3277 	/*
3278 	 * Take the mapping lock for the duration of the table walk. As
3279 	 * this mapping should be shared between all the VMAs,
3280 	 * __unmap_hugepage_range() is called as the lock is already held
3281 	 */
3282 	i_mmap_lock_write(mapping);
3283 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3284 		/* Do not unmap the current VMA */
3285 		if (iter_vma == vma)
3286 			continue;
3287 
3288 		/*
3289 		 * Shared VMAs have their own reserves and do not affect
3290 		 * MAP_PRIVATE accounting but it is possible that a shared
3291 		 * VMA is using the same page so check and skip such VMAs.
3292 		 */
3293 		if (iter_vma->vm_flags & VM_MAYSHARE)
3294 			continue;
3295 
3296 		/*
3297 		 * Unmap the page from other VMAs without their own reserves.
3298 		 * They get marked to be SIGKILLed if they fault in these
3299 		 * areas. This is because a future no-page fault on this VMA
3300 		 * could insert a zeroed page instead of the data existing
3301 		 * from the time of fork. This would look like data corruption
3302 		 */
3303 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3304 			unmap_hugepage_range(iter_vma, address,
3305 					     address + huge_page_size(h), page);
3306 	}
3307 	i_mmap_unlock_write(mapping);
3308 }
3309 
3310 /*
3311  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3312  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3313  * cannot race with other handlers or page migration.
3314  * Keep the pte_same checks anyway to make transition from the mutex easier.
3315  */
3316 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3317 			unsigned long address, pte_t *ptep, pte_t pte,
3318 			struct page *pagecache_page, spinlock_t *ptl)
3319 {
3320 	struct hstate *h = hstate_vma(vma);
3321 	struct page *old_page, *new_page;
3322 	int ret = 0, outside_reserve = 0;
3323 	unsigned long mmun_start;	/* For mmu_notifiers */
3324 	unsigned long mmun_end;		/* For mmu_notifiers */
3325 
3326 	old_page = pte_page(pte);
3327 
3328 retry_avoidcopy:
3329 	/* If no-one else is actually using this page, avoid the copy
3330 	 * and just make the page writable */
3331 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3332 		page_move_anon_rmap(old_page, vma, address);
3333 		set_huge_ptep_writable(vma, address, ptep);
3334 		return 0;
3335 	}
3336 
3337 	/*
3338 	 * If the process that created a MAP_PRIVATE mapping is about to
3339 	 * perform a COW due to a shared page count, attempt to satisfy
3340 	 * the allocation without using the existing reserves. The pagecache
3341 	 * page is used to determine if the reserve at this address was
3342 	 * consumed or not. If reserves were used, a partial faulted mapping
3343 	 * at the time of fork() could consume its reserves on COW instead
3344 	 * of the full address range.
3345 	 */
3346 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3347 			old_page != pagecache_page)
3348 		outside_reserve = 1;
3349 
3350 	page_cache_get(old_page);
3351 
3352 	/*
3353 	 * Drop page table lock as buddy allocator may be called. It will
3354 	 * be acquired again before returning to the caller, as expected.
3355 	 */
3356 	spin_unlock(ptl);
3357 	new_page = alloc_huge_page(vma, address, outside_reserve);
3358 
3359 	if (IS_ERR(new_page)) {
3360 		/*
3361 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
3362 		 * it is due to references held by a child and an insufficient
3363 		 * huge page pool. To guarantee the original mappers
3364 		 * reliability, unmap the page from child processes. The child
3365 		 * may get SIGKILLed if it later faults.
3366 		 */
3367 		if (outside_reserve) {
3368 			page_cache_release(old_page);
3369 			BUG_ON(huge_pte_none(pte));
3370 			unmap_ref_private(mm, vma, old_page, address);
3371 			BUG_ON(huge_pte_none(pte));
3372 			spin_lock(ptl);
3373 			ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3374 			if (likely(ptep &&
3375 				   pte_same(huge_ptep_get(ptep), pte)))
3376 				goto retry_avoidcopy;
3377 			/*
3378 			 * race occurs while re-acquiring page table
3379 			 * lock, and our job is done.
3380 			 */
3381 			return 0;
3382 		}
3383 
3384 		ret = (PTR_ERR(new_page) == -ENOMEM) ?
3385 			VM_FAULT_OOM : VM_FAULT_SIGBUS;
3386 		goto out_release_old;
3387 	}
3388 
3389 	/*
3390 	 * When the original hugepage is shared one, it does not have
3391 	 * anon_vma prepared.
3392 	 */
3393 	if (unlikely(anon_vma_prepare(vma))) {
3394 		ret = VM_FAULT_OOM;
3395 		goto out_release_all;
3396 	}
3397 
3398 	copy_user_huge_page(new_page, old_page, address, vma,
3399 			    pages_per_huge_page(h));
3400 	__SetPageUptodate(new_page);
3401 	set_page_huge_active(new_page);
3402 
3403 	mmun_start = address & huge_page_mask(h);
3404 	mmun_end = mmun_start + huge_page_size(h);
3405 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3406 
3407 	/*
3408 	 * Retake the page table lock to check for racing updates
3409 	 * before the page tables are altered
3410 	 */
3411 	spin_lock(ptl);
3412 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3413 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3414 		ClearPagePrivate(new_page);
3415 
3416 		/* Break COW */
3417 		huge_ptep_clear_flush(vma, address, ptep);
3418 		mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3419 		set_huge_pte_at(mm, address, ptep,
3420 				make_huge_pte(vma, new_page, 1));
3421 		page_remove_rmap(old_page, true);
3422 		hugepage_add_new_anon_rmap(new_page, vma, address);
3423 		/* Make the old page be freed below */
3424 		new_page = old_page;
3425 	}
3426 	spin_unlock(ptl);
3427 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3428 out_release_all:
3429 	page_cache_release(new_page);
3430 out_release_old:
3431 	page_cache_release(old_page);
3432 
3433 	spin_lock(ptl); /* Caller expects lock to be held */
3434 	return ret;
3435 }
3436 
3437 /* Return the pagecache page at a given address within a VMA */
3438 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3439 			struct vm_area_struct *vma, unsigned long address)
3440 {
3441 	struct address_space *mapping;
3442 	pgoff_t idx;
3443 
3444 	mapping = vma->vm_file->f_mapping;
3445 	idx = vma_hugecache_offset(h, vma, address);
3446 
3447 	return find_lock_page(mapping, idx);
3448 }
3449 
3450 /*
3451  * Return whether there is a pagecache page to back given address within VMA.
3452  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3453  */
3454 static bool hugetlbfs_pagecache_present(struct hstate *h,
3455 			struct vm_area_struct *vma, unsigned long address)
3456 {
3457 	struct address_space *mapping;
3458 	pgoff_t idx;
3459 	struct page *page;
3460 
3461 	mapping = vma->vm_file->f_mapping;
3462 	idx = vma_hugecache_offset(h, vma, address);
3463 
3464 	page = find_get_page(mapping, idx);
3465 	if (page)
3466 		put_page(page);
3467 	return page != NULL;
3468 }
3469 
3470 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3471 			   pgoff_t idx)
3472 {
3473 	struct inode *inode = mapping->host;
3474 	struct hstate *h = hstate_inode(inode);
3475 	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3476 
3477 	if (err)
3478 		return err;
3479 	ClearPagePrivate(page);
3480 
3481 	spin_lock(&inode->i_lock);
3482 	inode->i_blocks += blocks_per_huge_page(h);
3483 	spin_unlock(&inode->i_lock);
3484 	return 0;
3485 }
3486 
3487 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3488 			   struct address_space *mapping, pgoff_t idx,
3489 			   unsigned long address, pte_t *ptep, unsigned int flags)
3490 {
3491 	struct hstate *h = hstate_vma(vma);
3492 	int ret = VM_FAULT_SIGBUS;
3493 	int anon_rmap = 0;
3494 	unsigned long size;
3495 	struct page *page;
3496 	pte_t new_pte;
3497 	spinlock_t *ptl;
3498 
3499 	/*
3500 	 * Currently, we are forced to kill the process in the event the
3501 	 * original mapper has unmapped pages from the child due to a failed
3502 	 * COW. Warn that such a situation has occurred as it may not be obvious
3503 	 */
3504 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3505 		pr_warning("PID %d killed due to inadequate hugepage pool\n",
3506 			   current->pid);
3507 		return ret;
3508 	}
3509 
3510 	/*
3511 	 * Use page lock to guard against racing truncation
3512 	 * before we get page_table_lock.
3513 	 */
3514 retry:
3515 	page = find_lock_page(mapping, idx);
3516 	if (!page) {
3517 		size = i_size_read(mapping->host) >> huge_page_shift(h);
3518 		if (idx >= size)
3519 			goto out;
3520 		page = alloc_huge_page(vma, address, 0);
3521 		if (IS_ERR(page)) {
3522 			ret = PTR_ERR(page);
3523 			if (ret == -ENOMEM)
3524 				ret = VM_FAULT_OOM;
3525 			else
3526 				ret = VM_FAULT_SIGBUS;
3527 			goto out;
3528 		}
3529 		clear_huge_page(page, address, pages_per_huge_page(h));
3530 		__SetPageUptodate(page);
3531 		set_page_huge_active(page);
3532 
3533 		if (vma->vm_flags & VM_MAYSHARE) {
3534 			int err = huge_add_to_page_cache(page, mapping, idx);
3535 			if (err) {
3536 				put_page(page);
3537 				if (err == -EEXIST)
3538 					goto retry;
3539 				goto out;
3540 			}
3541 		} else {
3542 			lock_page(page);
3543 			if (unlikely(anon_vma_prepare(vma))) {
3544 				ret = VM_FAULT_OOM;
3545 				goto backout_unlocked;
3546 			}
3547 			anon_rmap = 1;
3548 		}
3549 	} else {
3550 		/*
3551 		 * If memory error occurs between mmap() and fault, some process
3552 		 * don't have hwpoisoned swap entry for errored virtual address.
3553 		 * So we need to block hugepage fault by PG_hwpoison bit check.
3554 		 */
3555 		if (unlikely(PageHWPoison(page))) {
3556 			ret = VM_FAULT_HWPOISON |
3557 				VM_FAULT_SET_HINDEX(hstate_index(h));
3558 			goto backout_unlocked;
3559 		}
3560 	}
3561 
3562 	/*
3563 	 * If we are going to COW a private mapping later, we examine the
3564 	 * pending reservations for this page now. This will ensure that
3565 	 * any allocations necessary to record that reservation occur outside
3566 	 * the spinlock.
3567 	 */
3568 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3569 		if (vma_needs_reservation(h, vma, address) < 0) {
3570 			ret = VM_FAULT_OOM;
3571 			goto backout_unlocked;
3572 		}
3573 		/* Just decrements count, does not deallocate */
3574 		vma_end_reservation(h, vma, address);
3575 	}
3576 
3577 	ptl = huge_pte_lockptr(h, mm, ptep);
3578 	spin_lock(ptl);
3579 	size = i_size_read(mapping->host) >> huge_page_shift(h);
3580 	if (idx >= size)
3581 		goto backout;
3582 
3583 	ret = 0;
3584 	if (!huge_pte_none(huge_ptep_get(ptep)))
3585 		goto backout;
3586 
3587 	if (anon_rmap) {
3588 		ClearPagePrivate(page);
3589 		hugepage_add_new_anon_rmap(page, vma, address);
3590 	} else
3591 		page_dup_rmap(page, true);
3592 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3593 				&& (vma->vm_flags & VM_SHARED)));
3594 	set_huge_pte_at(mm, address, ptep, new_pte);
3595 
3596 	hugetlb_count_add(pages_per_huge_page(h), mm);
3597 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3598 		/* Optimization, do the COW without a second fault */
3599 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3600 	}
3601 
3602 	spin_unlock(ptl);
3603 	unlock_page(page);
3604 out:
3605 	return ret;
3606 
3607 backout:
3608 	spin_unlock(ptl);
3609 backout_unlocked:
3610 	unlock_page(page);
3611 	put_page(page);
3612 	goto out;
3613 }
3614 
3615 #ifdef CONFIG_SMP
3616 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3617 			    struct vm_area_struct *vma,
3618 			    struct address_space *mapping,
3619 			    pgoff_t idx, unsigned long address)
3620 {
3621 	unsigned long key[2];
3622 	u32 hash;
3623 
3624 	if (vma->vm_flags & VM_SHARED) {
3625 		key[0] = (unsigned long) mapping;
3626 		key[1] = idx;
3627 	} else {
3628 		key[0] = (unsigned long) mm;
3629 		key[1] = address >> huge_page_shift(h);
3630 	}
3631 
3632 	hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3633 
3634 	return hash & (num_fault_mutexes - 1);
3635 }
3636 #else
3637 /*
3638  * For uniprocesor systems we always use a single mutex, so just
3639  * return 0 and avoid the hashing overhead.
3640  */
3641 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3642 			    struct vm_area_struct *vma,
3643 			    struct address_space *mapping,
3644 			    pgoff_t idx, unsigned long address)
3645 {
3646 	return 0;
3647 }
3648 #endif
3649 
3650 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3651 			unsigned long address, unsigned int flags)
3652 {
3653 	pte_t *ptep, entry;
3654 	spinlock_t *ptl;
3655 	int ret;
3656 	u32 hash;
3657 	pgoff_t idx;
3658 	struct page *page = NULL;
3659 	struct page *pagecache_page = NULL;
3660 	struct hstate *h = hstate_vma(vma);
3661 	struct address_space *mapping;
3662 	int need_wait_lock = 0;
3663 
3664 	address &= huge_page_mask(h);
3665 
3666 	ptep = huge_pte_offset(mm, address);
3667 	if (ptep) {
3668 		entry = huge_ptep_get(ptep);
3669 		if (unlikely(is_hugetlb_entry_migration(entry))) {
3670 			migration_entry_wait_huge(vma, mm, ptep);
3671 			return 0;
3672 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3673 			return VM_FAULT_HWPOISON_LARGE |
3674 				VM_FAULT_SET_HINDEX(hstate_index(h));
3675 	} else {
3676 		ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3677 		if (!ptep)
3678 			return VM_FAULT_OOM;
3679 	}
3680 
3681 	mapping = vma->vm_file->f_mapping;
3682 	idx = vma_hugecache_offset(h, vma, address);
3683 
3684 	/*
3685 	 * Serialize hugepage allocation and instantiation, so that we don't
3686 	 * get spurious allocation failures if two CPUs race to instantiate
3687 	 * the same page in the page cache.
3688 	 */
3689 	hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3690 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
3691 
3692 	entry = huge_ptep_get(ptep);
3693 	if (huge_pte_none(entry)) {
3694 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3695 		goto out_mutex;
3696 	}
3697 
3698 	ret = 0;
3699 
3700 	/*
3701 	 * entry could be a migration/hwpoison entry at this point, so this
3702 	 * check prevents the kernel from going below assuming that we have
3703 	 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3704 	 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3705 	 * handle it.
3706 	 */
3707 	if (!pte_present(entry))
3708 		goto out_mutex;
3709 
3710 	/*
3711 	 * If we are going to COW the mapping later, we examine the pending
3712 	 * reservations for this page now. This will ensure that any
3713 	 * allocations necessary to record that reservation occur outside the
3714 	 * spinlock. For private mappings, we also lookup the pagecache
3715 	 * page now as it is used to determine if a reservation has been
3716 	 * consumed.
3717 	 */
3718 	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3719 		if (vma_needs_reservation(h, vma, address) < 0) {
3720 			ret = VM_FAULT_OOM;
3721 			goto out_mutex;
3722 		}
3723 		/* Just decrements count, does not deallocate */
3724 		vma_end_reservation(h, vma, address);
3725 
3726 		if (!(vma->vm_flags & VM_MAYSHARE))
3727 			pagecache_page = hugetlbfs_pagecache_page(h,
3728 								vma, address);
3729 	}
3730 
3731 	ptl = huge_pte_lock(h, mm, ptep);
3732 
3733 	/* Check for a racing update before calling hugetlb_cow */
3734 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3735 		goto out_ptl;
3736 
3737 	/*
3738 	 * hugetlb_cow() requires page locks of pte_page(entry) and
3739 	 * pagecache_page, so here we need take the former one
3740 	 * when page != pagecache_page or !pagecache_page.
3741 	 */
3742 	page = pte_page(entry);
3743 	if (page != pagecache_page)
3744 		if (!trylock_page(page)) {
3745 			need_wait_lock = 1;
3746 			goto out_ptl;
3747 		}
3748 
3749 	get_page(page);
3750 
3751 	if (flags & FAULT_FLAG_WRITE) {
3752 		if (!huge_pte_write(entry)) {
3753 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
3754 					pagecache_page, ptl);
3755 			goto out_put_page;
3756 		}
3757 		entry = huge_pte_mkdirty(entry);
3758 	}
3759 	entry = pte_mkyoung(entry);
3760 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3761 						flags & FAULT_FLAG_WRITE))
3762 		update_mmu_cache(vma, address, ptep);
3763 out_put_page:
3764 	if (page != pagecache_page)
3765 		unlock_page(page);
3766 	put_page(page);
3767 out_ptl:
3768 	spin_unlock(ptl);
3769 
3770 	if (pagecache_page) {
3771 		unlock_page(pagecache_page);
3772 		put_page(pagecache_page);
3773 	}
3774 out_mutex:
3775 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3776 	/*
3777 	 * Generally it's safe to hold refcount during waiting page lock. But
3778 	 * here we just wait to defer the next page fault to avoid busy loop and
3779 	 * the page is not used after unlocked before returning from the current
3780 	 * page fault. So we are safe from accessing freed page, even if we wait
3781 	 * here without taking refcount.
3782 	 */
3783 	if (need_wait_lock)
3784 		wait_on_page_locked(page);
3785 	return ret;
3786 }
3787 
3788 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3789 			 struct page **pages, struct vm_area_struct **vmas,
3790 			 unsigned long *position, unsigned long *nr_pages,
3791 			 long i, unsigned int flags)
3792 {
3793 	unsigned long pfn_offset;
3794 	unsigned long vaddr = *position;
3795 	unsigned long remainder = *nr_pages;
3796 	struct hstate *h = hstate_vma(vma);
3797 
3798 	while (vaddr < vma->vm_end && remainder) {
3799 		pte_t *pte;
3800 		spinlock_t *ptl = NULL;
3801 		int absent;
3802 		struct page *page;
3803 
3804 		/*
3805 		 * If we have a pending SIGKILL, don't keep faulting pages and
3806 		 * potentially allocating memory.
3807 		 */
3808 		if (unlikely(fatal_signal_pending(current))) {
3809 			remainder = 0;
3810 			break;
3811 		}
3812 
3813 		/*
3814 		 * Some archs (sparc64, sh*) have multiple pte_ts to
3815 		 * each hugepage.  We have to make sure we get the
3816 		 * first, for the page indexing below to work.
3817 		 *
3818 		 * Note that page table lock is not held when pte is null.
3819 		 */
3820 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3821 		if (pte)
3822 			ptl = huge_pte_lock(h, mm, pte);
3823 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
3824 
3825 		/*
3826 		 * When coredumping, it suits get_dump_page if we just return
3827 		 * an error where there's an empty slot with no huge pagecache
3828 		 * to back it.  This way, we avoid allocating a hugepage, and
3829 		 * the sparse dumpfile avoids allocating disk blocks, but its
3830 		 * huge holes still show up with zeroes where they need to be.
3831 		 */
3832 		if (absent && (flags & FOLL_DUMP) &&
3833 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3834 			if (pte)
3835 				spin_unlock(ptl);
3836 			remainder = 0;
3837 			break;
3838 		}
3839 
3840 		/*
3841 		 * We need call hugetlb_fault for both hugepages under migration
3842 		 * (in which case hugetlb_fault waits for the migration,) and
3843 		 * hwpoisoned hugepages (in which case we need to prevent the
3844 		 * caller from accessing to them.) In order to do this, we use
3845 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
3846 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3847 		 * both cases, and because we can't follow correct pages
3848 		 * directly from any kind of swap entries.
3849 		 */
3850 		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3851 		    ((flags & FOLL_WRITE) &&
3852 		      !huge_pte_write(huge_ptep_get(pte)))) {
3853 			int ret;
3854 
3855 			if (pte)
3856 				spin_unlock(ptl);
3857 			ret = hugetlb_fault(mm, vma, vaddr,
3858 				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3859 			if (!(ret & VM_FAULT_ERROR))
3860 				continue;
3861 
3862 			remainder = 0;
3863 			break;
3864 		}
3865 
3866 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3867 		page = pte_page(huge_ptep_get(pte));
3868 same_page:
3869 		if (pages) {
3870 			pages[i] = mem_map_offset(page, pfn_offset);
3871 			get_page(pages[i]);
3872 		}
3873 
3874 		if (vmas)
3875 			vmas[i] = vma;
3876 
3877 		vaddr += PAGE_SIZE;
3878 		++pfn_offset;
3879 		--remainder;
3880 		++i;
3881 		if (vaddr < vma->vm_end && remainder &&
3882 				pfn_offset < pages_per_huge_page(h)) {
3883 			/*
3884 			 * We use pfn_offset to avoid touching the pageframes
3885 			 * of this compound page.
3886 			 */
3887 			goto same_page;
3888 		}
3889 		spin_unlock(ptl);
3890 	}
3891 	*nr_pages = remainder;
3892 	*position = vaddr;
3893 
3894 	return i ? i : -EFAULT;
3895 }
3896 
3897 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3898 		unsigned long address, unsigned long end, pgprot_t newprot)
3899 {
3900 	struct mm_struct *mm = vma->vm_mm;
3901 	unsigned long start = address;
3902 	pte_t *ptep;
3903 	pte_t pte;
3904 	struct hstate *h = hstate_vma(vma);
3905 	unsigned long pages = 0;
3906 
3907 	BUG_ON(address >= end);
3908 	flush_cache_range(vma, address, end);
3909 
3910 	mmu_notifier_invalidate_range_start(mm, start, end);
3911 	i_mmap_lock_write(vma->vm_file->f_mapping);
3912 	for (; address < end; address += huge_page_size(h)) {
3913 		spinlock_t *ptl;
3914 		ptep = huge_pte_offset(mm, address);
3915 		if (!ptep)
3916 			continue;
3917 		ptl = huge_pte_lock(h, mm, ptep);
3918 		if (huge_pmd_unshare(mm, &address, ptep)) {
3919 			pages++;
3920 			spin_unlock(ptl);
3921 			continue;
3922 		}
3923 		pte = huge_ptep_get(ptep);
3924 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3925 			spin_unlock(ptl);
3926 			continue;
3927 		}
3928 		if (unlikely(is_hugetlb_entry_migration(pte))) {
3929 			swp_entry_t entry = pte_to_swp_entry(pte);
3930 
3931 			if (is_write_migration_entry(entry)) {
3932 				pte_t newpte;
3933 
3934 				make_migration_entry_read(&entry);
3935 				newpte = swp_entry_to_pte(entry);
3936 				set_huge_pte_at(mm, address, ptep, newpte);
3937 				pages++;
3938 			}
3939 			spin_unlock(ptl);
3940 			continue;
3941 		}
3942 		if (!huge_pte_none(pte)) {
3943 			pte = huge_ptep_get_and_clear(mm, address, ptep);
3944 			pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3945 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
3946 			set_huge_pte_at(mm, address, ptep, pte);
3947 			pages++;
3948 		}
3949 		spin_unlock(ptl);
3950 	}
3951 	/*
3952 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3953 	 * may have cleared our pud entry and done put_page on the page table:
3954 	 * once we release i_mmap_rwsem, another task can do the final put_page
3955 	 * and that page table be reused and filled with junk.
3956 	 */
3957 	flush_tlb_range(vma, start, end);
3958 	mmu_notifier_invalidate_range(mm, start, end);
3959 	i_mmap_unlock_write(vma->vm_file->f_mapping);
3960 	mmu_notifier_invalidate_range_end(mm, start, end);
3961 
3962 	return pages << h->order;
3963 }
3964 
3965 int hugetlb_reserve_pages(struct inode *inode,
3966 					long from, long to,
3967 					struct vm_area_struct *vma,
3968 					vm_flags_t vm_flags)
3969 {
3970 	long ret, chg;
3971 	struct hstate *h = hstate_inode(inode);
3972 	struct hugepage_subpool *spool = subpool_inode(inode);
3973 	struct resv_map *resv_map;
3974 	long gbl_reserve;
3975 
3976 	/*
3977 	 * Only apply hugepage reservation if asked. At fault time, an
3978 	 * attempt will be made for VM_NORESERVE to allocate a page
3979 	 * without using reserves
3980 	 */
3981 	if (vm_flags & VM_NORESERVE)
3982 		return 0;
3983 
3984 	/*
3985 	 * Shared mappings base their reservation on the number of pages that
3986 	 * are already allocated on behalf of the file. Private mappings need
3987 	 * to reserve the full area even if read-only as mprotect() may be
3988 	 * called to make the mapping read-write. Assume !vma is a shm mapping
3989 	 */
3990 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
3991 		resv_map = inode_resv_map(inode);
3992 
3993 		chg = region_chg(resv_map, from, to);
3994 
3995 	} else {
3996 		resv_map = resv_map_alloc();
3997 		if (!resv_map)
3998 			return -ENOMEM;
3999 
4000 		chg = to - from;
4001 
4002 		set_vma_resv_map(vma, resv_map);
4003 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4004 	}
4005 
4006 	if (chg < 0) {
4007 		ret = chg;
4008 		goto out_err;
4009 	}
4010 
4011 	/*
4012 	 * There must be enough pages in the subpool for the mapping. If
4013 	 * the subpool has a minimum size, there may be some global
4014 	 * reservations already in place (gbl_reserve).
4015 	 */
4016 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4017 	if (gbl_reserve < 0) {
4018 		ret = -ENOSPC;
4019 		goto out_err;
4020 	}
4021 
4022 	/*
4023 	 * Check enough hugepages are available for the reservation.
4024 	 * Hand the pages back to the subpool if there are not
4025 	 */
4026 	ret = hugetlb_acct_memory(h, gbl_reserve);
4027 	if (ret < 0) {
4028 		/* put back original number of pages, chg */
4029 		(void)hugepage_subpool_put_pages(spool, chg);
4030 		goto out_err;
4031 	}
4032 
4033 	/*
4034 	 * Account for the reservations made. Shared mappings record regions
4035 	 * that have reservations as they are shared by multiple VMAs.
4036 	 * When the last VMA disappears, the region map says how much
4037 	 * the reservation was and the page cache tells how much of
4038 	 * the reservation was consumed. Private mappings are per-VMA and
4039 	 * only the consumed reservations are tracked. When the VMA
4040 	 * disappears, the original reservation is the VMA size and the
4041 	 * consumed reservations are stored in the map. Hence, nothing
4042 	 * else has to be done for private mappings here
4043 	 */
4044 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
4045 		long add = region_add(resv_map, from, to);
4046 
4047 		if (unlikely(chg > add)) {
4048 			/*
4049 			 * pages in this range were added to the reserve
4050 			 * map between region_chg and region_add.  This
4051 			 * indicates a race with alloc_huge_page.  Adjust
4052 			 * the subpool and reserve counts modified above
4053 			 * based on the difference.
4054 			 */
4055 			long rsv_adjust;
4056 
4057 			rsv_adjust = hugepage_subpool_put_pages(spool,
4058 								chg - add);
4059 			hugetlb_acct_memory(h, -rsv_adjust);
4060 		}
4061 	}
4062 	return 0;
4063 out_err:
4064 	if (!vma || vma->vm_flags & VM_MAYSHARE)
4065 		region_abort(resv_map, from, to);
4066 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4067 		kref_put(&resv_map->refs, resv_map_release);
4068 	return ret;
4069 }
4070 
4071 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4072 								long freed)
4073 {
4074 	struct hstate *h = hstate_inode(inode);
4075 	struct resv_map *resv_map = inode_resv_map(inode);
4076 	long chg = 0;
4077 	struct hugepage_subpool *spool = subpool_inode(inode);
4078 	long gbl_reserve;
4079 
4080 	if (resv_map) {
4081 		chg = region_del(resv_map, start, end);
4082 		/*
4083 		 * region_del() can fail in the rare case where a region
4084 		 * must be split and another region descriptor can not be
4085 		 * allocated.  If end == LONG_MAX, it will not fail.
4086 		 */
4087 		if (chg < 0)
4088 			return chg;
4089 	}
4090 
4091 	spin_lock(&inode->i_lock);
4092 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4093 	spin_unlock(&inode->i_lock);
4094 
4095 	/*
4096 	 * If the subpool has a minimum size, the number of global
4097 	 * reservations to be released may be adjusted.
4098 	 */
4099 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4100 	hugetlb_acct_memory(h, -gbl_reserve);
4101 
4102 	return 0;
4103 }
4104 
4105 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4106 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4107 				struct vm_area_struct *vma,
4108 				unsigned long addr, pgoff_t idx)
4109 {
4110 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4111 				svma->vm_start;
4112 	unsigned long sbase = saddr & PUD_MASK;
4113 	unsigned long s_end = sbase + PUD_SIZE;
4114 
4115 	/* Allow segments to share if only one is marked locked */
4116 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4117 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4118 
4119 	/*
4120 	 * match the virtual addresses, permission and the alignment of the
4121 	 * page table page.
4122 	 */
4123 	if (pmd_index(addr) != pmd_index(saddr) ||
4124 	    vm_flags != svm_flags ||
4125 	    sbase < svma->vm_start || svma->vm_end < s_end)
4126 		return 0;
4127 
4128 	return saddr;
4129 }
4130 
4131 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4132 {
4133 	unsigned long base = addr & PUD_MASK;
4134 	unsigned long end = base + PUD_SIZE;
4135 
4136 	/*
4137 	 * check on proper vm_flags and page table alignment
4138 	 */
4139 	if (vma->vm_flags & VM_MAYSHARE &&
4140 	    vma->vm_start <= base && end <= vma->vm_end)
4141 		return true;
4142 	return false;
4143 }
4144 
4145 /*
4146  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4147  * and returns the corresponding pte. While this is not necessary for the
4148  * !shared pmd case because we can allocate the pmd later as well, it makes the
4149  * code much cleaner. pmd allocation is essential for the shared case because
4150  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4151  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4152  * bad pmd for sharing.
4153  */
4154 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4155 {
4156 	struct vm_area_struct *vma = find_vma(mm, addr);
4157 	struct address_space *mapping = vma->vm_file->f_mapping;
4158 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4159 			vma->vm_pgoff;
4160 	struct vm_area_struct *svma;
4161 	unsigned long saddr;
4162 	pte_t *spte = NULL;
4163 	pte_t *pte;
4164 	spinlock_t *ptl;
4165 
4166 	if (!vma_shareable(vma, addr))
4167 		return (pte_t *)pmd_alloc(mm, pud, addr);
4168 
4169 	i_mmap_lock_write(mapping);
4170 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4171 		if (svma == vma)
4172 			continue;
4173 
4174 		saddr = page_table_shareable(svma, vma, addr, idx);
4175 		if (saddr) {
4176 			spte = huge_pte_offset(svma->vm_mm, saddr);
4177 			if (spte) {
4178 				mm_inc_nr_pmds(mm);
4179 				get_page(virt_to_page(spte));
4180 				break;
4181 			}
4182 		}
4183 	}
4184 
4185 	if (!spte)
4186 		goto out;
4187 
4188 	ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4189 	spin_lock(ptl);
4190 	if (pud_none(*pud)) {
4191 		pud_populate(mm, pud,
4192 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
4193 	} else {
4194 		put_page(virt_to_page(spte));
4195 		mm_inc_nr_pmds(mm);
4196 	}
4197 	spin_unlock(ptl);
4198 out:
4199 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
4200 	i_mmap_unlock_write(mapping);
4201 	return pte;
4202 }
4203 
4204 /*
4205  * unmap huge page backed by shared pte.
4206  *
4207  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4208  * indicated by page_count > 1, unmap is achieved by clearing pud and
4209  * decrementing the ref count. If count == 1, the pte page is not shared.
4210  *
4211  * called with page table lock held.
4212  *
4213  * returns: 1 successfully unmapped a shared pte page
4214  *	    0 the underlying pte page is not shared, or it is the last user
4215  */
4216 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4217 {
4218 	pgd_t *pgd = pgd_offset(mm, *addr);
4219 	pud_t *pud = pud_offset(pgd, *addr);
4220 
4221 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
4222 	if (page_count(virt_to_page(ptep)) == 1)
4223 		return 0;
4224 
4225 	pud_clear(pud);
4226 	put_page(virt_to_page(ptep));
4227 	mm_dec_nr_pmds(mm);
4228 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4229 	return 1;
4230 }
4231 #define want_pmd_share()	(1)
4232 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4233 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4234 {
4235 	return NULL;
4236 }
4237 
4238 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4239 {
4240 	return 0;
4241 }
4242 #define want_pmd_share()	(0)
4243 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4244 
4245 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4246 pte_t *huge_pte_alloc(struct mm_struct *mm,
4247 			unsigned long addr, unsigned long sz)
4248 {
4249 	pgd_t *pgd;
4250 	pud_t *pud;
4251 	pte_t *pte = NULL;
4252 
4253 	pgd = pgd_offset(mm, addr);
4254 	pud = pud_alloc(mm, pgd, addr);
4255 	if (pud) {
4256 		if (sz == PUD_SIZE) {
4257 			pte = (pte_t *)pud;
4258 		} else {
4259 			BUG_ON(sz != PMD_SIZE);
4260 			if (want_pmd_share() && pud_none(*pud))
4261 				pte = huge_pmd_share(mm, addr, pud);
4262 			else
4263 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
4264 		}
4265 	}
4266 	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4267 
4268 	return pte;
4269 }
4270 
4271 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4272 {
4273 	pgd_t *pgd;
4274 	pud_t *pud;
4275 	pmd_t *pmd = NULL;
4276 
4277 	pgd = pgd_offset(mm, addr);
4278 	if (pgd_present(*pgd)) {
4279 		pud = pud_offset(pgd, addr);
4280 		if (pud_present(*pud)) {
4281 			if (pud_huge(*pud))
4282 				return (pte_t *)pud;
4283 			pmd = pmd_offset(pud, addr);
4284 		}
4285 	}
4286 	return (pte_t *) pmd;
4287 }
4288 
4289 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4290 
4291 /*
4292  * These functions are overwritable if your architecture needs its own
4293  * behavior.
4294  */
4295 struct page * __weak
4296 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4297 			      int write)
4298 {
4299 	return ERR_PTR(-EINVAL);
4300 }
4301 
4302 struct page * __weak
4303 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4304 		pmd_t *pmd, int flags)
4305 {
4306 	struct page *page = NULL;
4307 	spinlock_t *ptl;
4308 retry:
4309 	ptl = pmd_lockptr(mm, pmd);
4310 	spin_lock(ptl);
4311 	/*
4312 	 * make sure that the address range covered by this pmd is not
4313 	 * unmapped from other threads.
4314 	 */
4315 	if (!pmd_huge(*pmd))
4316 		goto out;
4317 	if (pmd_present(*pmd)) {
4318 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4319 		if (flags & FOLL_GET)
4320 			get_page(page);
4321 	} else {
4322 		if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4323 			spin_unlock(ptl);
4324 			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
4325 			goto retry;
4326 		}
4327 		/*
4328 		 * hwpoisoned entry is treated as no_page_table in
4329 		 * follow_page_mask().
4330 		 */
4331 	}
4332 out:
4333 	spin_unlock(ptl);
4334 	return page;
4335 }
4336 
4337 struct page * __weak
4338 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4339 		pud_t *pud, int flags)
4340 {
4341 	if (flags & FOLL_GET)
4342 		return NULL;
4343 
4344 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4345 }
4346 
4347 #ifdef CONFIG_MEMORY_FAILURE
4348 
4349 /*
4350  * This function is called from memory failure code.
4351  * Assume the caller holds page lock of the head page.
4352  */
4353 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4354 {
4355 	struct hstate *h = page_hstate(hpage);
4356 	int nid = page_to_nid(hpage);
4357 	int ret = -EBUSY;
4358 
4359 	spin_lock(&hugetlb_lock);
4360 	/*
4361 	 * Just checking !page_huge_active is not enough, because that could be
4362 	 * an isolated/hwpoisoned hugepage (which have >0 refcount).
4363 	 */
4364 	if (!page_huge_active(hpage) && !page_count(hpage)) {
4365 		/*
4366 		 * Hwpoisoned hugepage isn't linked to activelist or freelist,
4367 		 * but dangling hpage->lru can trigger list-debug warnings
4368 		 * (this happens when we call unpoison_memory() on it),
4369 		 * so let it point to itself with list_del_init().
4370 		 */
4371 		list_del_init(&hpage->lru);
4372 		set_page_refcounted(hpage);
4373 		h->free_huge_pages--;
4374 		h->free_huge_pages_node[nid]--;
4375 		ret = 0;
4376 	}
4377 	spin_unlock(&hugetlb_lock);
4378 	return ret;
4379 }
4380 #endif
4381 
4382 bool isolate_huge_page(struct page *page, struct list_head *list)
4383 {
4384 	bool ret = true;
4385 
4386 	VM_BUG_ON_PAGE(!PageHead(page), page);
4387 	spin_lock(&hugetlb_lock);
4388 	if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4389 		ret = false;
4390 		goto unlock;
4391 	}
4392 	clear_page_huge_active(page);
4393 	list_move_tail(&page->lru, list);
4394 unlock:
4395 	spin_unlock(&hugetlb_lock);
4396 	return ret;
4397 }
4398 
4399 void putback_active_hugepage(struct page *page)
4400 {
4401 	VM_BUG_ON_PAGE(!PageHead(page), page);
4402 	spin_lock(&hugetlb_lock);
4403 	set_page_huge_active(page);
4404 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4405 	spin_unlock(&hugetlb_lock);
4406 	put_page(page);
4407 }
4408