xref: /openbmc/linux/mm/hugetlb.c (revision abfbd895)
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
27 
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31 
32 #include <linux/io.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
36 #include "internal.h"
37 
38 int hugepages_treat_as_movable;
39 
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
43 /*
44  * Minimum page order among possible hugepage sizes, set to a proper value
45  * at boot time.
46  */
47 static unsigned int minimum_order __read_mostly = UINT_MAX;
48 
49 __initdata LIST_HEAD(huge_boot_pages);
50 
51 /* for command line parsing */
52 static struct hstate * __initdata parsed_hstate;
53 static unsigned long __initdata default_hstate_max_huge_pages;
54 static unsigned long __initdata default_hstate_size;
55 
56 /*
57  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58  * free_huge_pages, and surplus_huge_pages.
59  */
60 DEFINE_SPINLOCK(hugetlb_lock);
61 
62 /*
63  * Serializes faults on the same logical page.  This is used to
64  * prevent spurious OOMs when the hugepage pool is fully utilized.
65  */
66 static int num_fault_mutexes;
67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
68 
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
71 
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73 {
74 	bool free = (spool->count == 0) && (spool->used_hpages == 0);
75 
76 	spin_unlock(&spool->lock);
77 
78 	/* If no pages are used, and no other handles to the subpool
79 	 * remain, give up any reservations mased on minimum size and
80 	 * free the subpool */
81 	if (free) {
82 		if (spool->min_hpages != -1)
83 			hugetlb_acct_memory(spool->hstate,
84 						-spool->min_hpages);
85 		kfree(spool);
86 	}
87 }
88 
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90 						long min_hpages)
91 {
92 	struct hugepage_subpool *spool;
93 
94 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95 	if (!spool)
96 		return NULL;
97 
98 	spin_lock_init(&spool->lock);
99 	spool->count = 1;
100 	spool->max_hpages = max_hpages;
101 	spool->hstate = h;
102 	spool->min_hpages = min_hpages;
103 
104 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105 		kfree(spool);
106 		return NULL;
107 	}
108 	spool->rsv_hpages = min_hpages;
109 
110 	return spool;
111 }
112 
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
114 {
115 	spin_lock(&spool->lock);
116 	BUG_ON(!spool->count);
117 	spool->count--;
118 	unlock_or_release_subpool(spool);
119 }
120 
121 /*
122  * Subpool accounting for allocating and reserving pages.
123  * Return -ENOMEM if there are not enough resources to satisfy the
124  * the request.  Otherwise, return the number of pages by which the
125  * global pools must be adjusted (upward).  The returned value may
126  * only be different than the passed value (delta) in the case where
127  * a subpool minimum size must be manitained.
128  */
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130 				      long delta)
131 {
132 	long ret = delta;
133 
134 	if (!spool)
135 		return ret;
136 
137 	spin_lock(&spool->lock);
138 
139 	if (spool->max_hpages != -1) {		/* maximum size accounting */
140 		if ((spool->used_hpages + delta) <= spool->max_hpages)
141 			spool->used_hpages += delta;
142 		else {
143 			ret = -ENOMEM;
144 			goto unlock_ret;
145 		}
146 	}
147 
148 	if (spool->min_hpages != -1) {		/* minimum size accounting */
149 		if (delta > spool->rsv_hpages) {
150 			/*
151 			 * Asking for more reserves than those already taken on
152 			 * behalf of subpool.  Return difference.
153 			 */
154 			ret = delta - spool->rsv_hpages;
155 			spool->rsv_hpages = 0;
156 		} else {
157 			ret = 0;	/* reserves already accounted for */
158 			spool->rsv_hpages -= delta;
159 		}
160 	}
161 
162 unlock_ret:
163 	spin_unlock(&spool->lock);
164 	return ret;
165 }
166 
167 /*
168  * Subpool accounting for freeing and unreserving pages.
169  * Return the number of global page reservations that must be dropped.
170  * The return value may only be different than the passed value (delta)
171  * in the case where a subpool minimum size must be maintained.
172  */
173 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
174 				       long delta)
175 {
176 	long ret = delta;
177 
178 	if (!spool)
179 		return delta;
180 
181 	spin_lock(&spool->lock);
182 
183 	if (spool->max_hpages != -1)		/* maximum size accounting */
184 		spool->used_hpages -= delta;
185 
186 	if (spool->min_hpages != -1) {		/* minimum size accounting */
187 		if (spool->rsv_hpages + delta <= spool->min_hpages)
188 			ret = 0;
189 		else
190 			ret = spool->rsv_hpages + delta - spool->min_hpages;
191 
192 		spool->rsv_hpages += delta;
193 		if (spool->rsv_hpages > spool->min_hpages)
194 			spool->rsv_hpages = spool->min_hpages;
195 	}
196 
197 	/*
198 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
199 	 * quota reference, free it now.
200 	 */
201 	unlock_or_release_subpool(spool);
202 
203 	return ret;
204 }
205 
206 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
207 {
208 	return HUGETLBFS_SB(inode->i_sb)->spool;
209 }
210 
211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
212 {
213 	return subpool_inode(file_inode(vma->vm_file));
214 }
215 
216 /*
217  * Region tracking -- allows tracking of reservations and instantiated pages
218  *                    across the pages in a mapping.
219  *
220  * The region data structures are embedded into a resv_map and protected
221  * by a resv_map's lock.  The set of regions within the resv_map represent
222  * reservations for huge pages, or huge pages that have already been
223  * instantiated within the map.  The from and to elements are huge page
224  * indicies into the associated mapping.  from indicates the starting index
225  * of the region.  to represents the first index past the end of  the region.
226  *
227  * For example, a file region structure with from == 0 and to == 4 represents
228  * four huge pages in a mapping.  It is important to note that the to element
229  * represents the first element past the end of the region. This is used in
230  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
231  *
232  * Interval notation of the form [from, to) will be used to indicate that
233  * the endpoint from is inclusive and to is exclusive.
234  */
235 struct file_region {
236 	struct list_head link;
237 	long from;
238 	long to;
239 };
240 
241 /*
242  * Add the huge page range represented by [f, t) to the reserve
243  * map.  In the normal case, existing regions will be expanded
244  * to accommodate the specified range.  Sufficient regions should
245  * exist for expansion due to the previous call to region_chg
246  * with the same range.  However, it is possible that region_del
247  * could have been called after region_chg and modifed the map
248  * in such a way that no region exists to be expanded.  In this
249  * case, pull a region descriptor from the cache associated with
250  * the map and use that for the new range.
251  *
252  * Return the number of new huge pages added to the map.  This
253  * number is greater than or equal to zero.
254  */
255 static long region_add(struct resv_map *resv, long f, long t)
256 {
257 	struct list_head *head = &resv->regions;
258 	struct file_region *rg, *nrg, *trg;
259 	long add = 0;
260 
261 	spin_lock(&resv->lock);
262 	/* Locate the region we are either in or before. */
263 	list_for_each_entry(rg, head, link)
264 		if (f <= rg->to)
265 			break;
266 
267 	/*
268 	 * If no region exists which can be expanded to include the
269 	 * specified range, the list must have been modified by an
270 	 * interleving call to region_del().  Pull a region descriptor
271 	 * from the cache and use it for this range.
272 	 */
273 	if (&rg->link == head || t < rg->from) {
274 		VM_BUG_ON(resv->region_cache_count <= 0);
275 
276 		resv->region_cache_count--;
277 		nrg = list_first_entry(&resv->region_cache, struct file_region,
278 					link);
279 		list_del(&nrg->link);
280 
281 		nrg->from = f;
282 		nrg->to = t;
283 		list_add(&nrg->link, rg->link.prev);
284 
285 		add += t - f;
286 		goto out_locked;
287 	}
288 
289 	/* Round our left edge to the current segment if it encloses us. */
290 	if (f > rg->from)
291 		f = rg->from;
292 
293 	/* Check for and consume any regions we now overlap with. */
294 	nrg = rg;
295 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
296 		if (&rg->link == head)
297 			break;
298 		if (rg->from > t)
299 			break;
300 
301 		/* If this area reaches higher then extend our area to
302 		 * include it completely.  If this is not the first area
303 		 * which we intend to reuse, free it. */
304 		if (rg->to > t)
305 			t = rg->to;
306 		if (rg != nrg) {
307 			/* Decrement return value by the deleted range.
308 			 * Another range will span this area so that by
309 			 * end of routine add will be >= zero
310 			 */
311 			add -= (rg->to - rg->from);
312 			list_del(&rg->link);
313 			kfree(rg);
314 		}
315 	}
316 
317 	add += (nrg->from - f);		/* Added to beginning of region */
318 	nrg->from = f;
319 	add += t - nrg->to;		/* Added to end of region */
320 	nrg->to = t;
321 
322 out_locked:
323 	resv->adds_in_progress--;
324 	spin_unlock(&resv->lock);
325 	VM_BUG_ON(add < 0);
326 	return add;
327 }
328 
329 /*
330  * Examine the existing reserve map and determine how many
331  * huge pages in the specified range [f, t) are NOT currently
332  * represented.  This routine is called before a subsequent
333  * call to region_add that will actually modify the reserve
334  * map to add the specified range [f, t).  region_chg does
335  * not change the number of huge pages represented by the
336  * map.  However, if the existing regions in the map can not
337  * be expanded to represent the new range, a new file_region
338  * structure is added to the map as a placeholder.  This is
339  * so that the subsequent region_add call will have all the
340  * regions it needs and will not fail.
341  *
342  * Upon entry, region_chg will also examine the cache of region descriptors
343  * associated with the map.  If there are not enough descriptors cached, one
344  * will be allocated for the in progress add operation.
345  *
346  * Returns the number of huge pages that need to be added to the existing
347  * reservation map for the range [f, t).  This number is greater or equal to
348  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
349  * is needed and can not be allocated.
350  */
351 static long region_chg(struct resv_map *resv, long f, long t)
352 {
353 	struct list_head *head = &resv->regions;
354 	struct file_region *rg, *nrg = NULL;
355 	long chg = 0;
356 
357 retry:
358 	spin_lock(&resv->lock);
359 retry_locked:
360 	resv->adds_in_progress++;
361 
362 	/*
363 	 * Check for sufficient descriptors in the cache to accommodate
364 	 * the number of in progress add operations.
365 	 */
366 	if (resv->adds_in_progress > resv->region_cache_count) {
367 		struct file_region *trg;
368 
369 		VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
370 		/* Must drop lock to allocate a new descriptor. */
371 		resv->adds_in_progress--;
372 		spin_unlock(&resv->lock);
373 
374 		trg = kmalloc(sizeof(*trg), GFP_KERNEL);
375 		if (!trg) {
376 			kfree(nrg);
377 			return -ENOMEM;
378 		}
379 
380 		spin_lock(&resv->lock);
381 		list_add(&trg->link, &resv->region_cache);
382 		resv->region_cache_count++;
383 		goto retry_locked;
384 	}
385 
386 	/* Locate the region we are before or in. */
387 	list_for_each_entry(rg, head, link)
388 		if (f <= rg->to)
389 			break;
390 
391 	/* If we are below the current region then a new region is required.
392 	 * Subtle, allocate a new region at the position but make it zero
393 	 * size such that we can guarantee to record the reservation. */
394 	if (&rg->link == head || t < rg->from) {
395 		if (!nrg) {
396 			resv->adds_in_progress--;
397 			spin_unlock(&resv->lock);
398 			nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
399 			if (!nrg)
400 				return -ENOMEM;
401 
402 			nrg->from = f;
403 			nrg->to   = f;
404 			INIT_LIST_HEAD(&nrg->link);
405 			goto retry;
406 		}
407 
408 		list_add(&nrg->link, rg->link.prev);
409 		chg = t - f;
410 		goto out_nrg;
411 	}
412 
413 	/* Round our left edge to the current segment if it encloses us. */
414 	if (f > rg->from)
415 		f = rg->from;
416 	chg = t - f;
417 
418 	/* Check for and consume any regions we now overlap with. */
419 	list_for_each_entry(rg, rg->link.prev, link) {
420 		if (&rg->link == head)
421 			break;
422 		if (rg->from > t)
423 			goto out;
424 
425 		/* We overlap with this area, if it extends further than
426 		 * us then we must extend ourselves.  Account for its
427 		 * existing reservation. */
428 		if (rg->to > t) {
429 			chg += rg->to - t;
430 			t = rg->to;
431 		}
432 		chg -= rg->to - rg->from;
433 	}
434 
435 out:
436 	spin_unlock(&resv->lock);
437 	/*  We already know we raced and no longer need the new region */
438 	kfree(nrg);
439 	return chg;
440 out_nrg:
441 	spin_unlock(&resv->lock);
442 	return chg;
443 }
444 
445 /*
446  * Abort the in progress add operation.  The adds_in_progress field
447  * of the resv_map keeps track of the operations in progress between
448  * calls to region_chg and region_add.  Operations are sometimes
449  * aborted after the call to region_chg.  In such cases, region_abort
450  * is called to decrement the adds_in_progress counter.
451  *
452  * NOTE: The range arguments [f, t) are not needed or used in this
453  * routine.  They are kept to make reading the calling code easier as
454  * arguments will match the associated region_chg call.
455  */
456 static void region_abort(struct resv_map *resv, long f, long t)
457 {
458 	spin_lock(&resv->lock);
459 	VM_BUG_ON(!resv->region_cache_count);
460 	resv->adds_in_progress--;
461 	spin_unlock(&resv->lock);
462 }
463 
464 /*
465  * Delete the specified range [f, t) from the reserve map.  If the
466  * t parameter is LONG_MAX, this indicates that ALL regions after f
467  * should be deleted.  Locate the regions which intersect [f, t)
468  * and either trim, delete or split the existing regions.
469  *
470  * Returns the number of huge pages deleted from the reserve map.
471  * In the normal case, the return value is zero or more.  In the
472  * case where a region must be split, a new region descriptor must
473  * be allocated.  If the allocation fails, -ENOMEM will be returned.
474  * NOTE: If the parameter t == LONG_MAX, then we will never split
475  * a region and possibly return -ENOMEM.  Callers specifying
476  * t == LONG_MAX do not need to check for -ENOMEM error.
477  */
478 static long region_del(struct resv_map *resv, long f, long t)
479 {
480 	struct list_head *head = &resv->regions;
481 	struct file_region *rg, *trg;
482 	struct file_region *nrg = NULL;
483 	long del = 0;
484 
485 retry:
486 	spin_lock(&resv->lock);
487 	list_for_each_entry_safe(rg, trg, head, link) {
488 		/*
489 		 * Skip regions before the range to be deleted.  file_region
490 		 * ranges are normally of the form [from, to).  However, there
491 		 * may be a "placeholder" entry in the map which is of the form
492 		 * (from, to) with from == to.  Check for placeholder entries
493 		 * at the beginning of the range to be deleted.
494 		 */
495 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
496 			continue;
497 
498 		if (rg->from >= t)
499 			break;
500 
501 		if (f > rg->from && t < rg->to) { /* Must split region */
502 			/*
503 			 * Check for an entry in the cache before dropping
504 			 * lock and attempting allocation.
505 			 */
506 			if (!nrg &&
507 			    resv->region_cache_count > resv->adds_in_progress) {
508 				nrg = list_first_entry(&resv->region_cache,
509 							struct file_region,
510 							link);
511 				list_del(&nrg->link);
512 				resv->region_cache_count--;
513 			}
514 
515 			if (!nrg) {
516 				spin_unlock(&resv->lock);
517 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
518 				if (!nrg)
519 					return -ENOMEM;
520 				goto retry;
521 			}
522 
523 			del += t - f;
524 
525 			/* New entry for end of split region */
526 			nrg->from = t;
527 			nrg->to = rg->to;
528 			INIT_LIST_HEAD(&nrg->link);
529 
530 			/* Original entry is trimmed */
531 			rg->to = f;
532 
533 			list_add(&nrg->link, &rg->link);
534 			nrg = NULL;
535 			break;
536 		}
537 
538 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
539 			del += rg->to - rg->from;
540 			list_del(&rg->link);
541 			kfree(rg);
542 			continue;
543 		}
544 
545 		if (f <= rg->from) {	/* Trim beginning of region */
546 			del += t - rg->from;
547 			rg->from = t;
548 		} else {		/* Trim end of region */
549 			del += rg->to - f;
550 			rg->to = f;
551 		}
552 	}
553 
554 	spin_unlock(&resv->lock);
555 	kfree(nrg);
556 	return del;
557 }
558 
559 /*
560  * A rare out of memory error was encountered which prevented removal of
561  * the reserve map region for a page.  The huge page itself was free'ed
562  * and removed from the page cache.  This routine will adjust the subpool
563  * usage count, and the global reserve count if needed.  By incrementing
564  * these counts, the reserve map entry which could not be deleted will
565  * appear as a "reserved" entry instead of simply dangling with incorrect
566  * counts.
567  */
568 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
569 {
570 	struct hugepage_subpool *spool = subpool_inode(inode);
571 	long rsv_adjust;
572 
573 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
574 	if (restore_reserve && rsv_adjust) {
575 		struct hstate *h = hstate_inode(inode);
576 
577 		hugetlb_acct_memory(h, 1);
578 	}
579 }
580 
581 /*
582  * Count and return the number of huge pages in the reserve map
583  * that intersect with the range [f, t).
584  */
585 static long region_count(struct resv_map *resv, long f, long t)
586 {
587 	struct list_head *head = &resv->regions;
588 	struct file_region *rg;
589 	long chg = 0;
590 
591 	spin_lock(&resv->lock);
592 	/* Locate each segment we overlap with, and count that overlap. */
593 	list_for_each_entry(rg, head, link) {
594 		long seg_from;
595 		long seg_to;
596 
597 		if (rg->to <= f)
598 			continue;
599 		if (rg->from >= t)
600 			break;
601 
602 		seg_from = max(rg->from, f);
603 		seg_to = min(rg->to, t);
604 
605 		chg += seg_to - seg_from;
606 	}
607 	spin_unlock(&resv->lock);
608 
609 	return chg;
610 }
611 
612 /*
613  * Convert the address within this vma to the page offset within
614  * the mapping, in pagecache page units; huge pages here.
615  */
616 static pgoff_t vma_hugecache_offset(struct hstate *h,
617 			struct vm_area_struct *vma, unsigned long address)
618 {
619 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
620 			(vma->vm_pgoff >> huge_page_order(h));
621 }
622 
623 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
624 				     unsigned long address)
625 {
626 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
627 }
628 
629 /*
630  * Return the size of the pages allocated when backing a VMA. In the majority
631  * cases this will be same size as used by the page table entries.
632  */
633 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
634 {
635 	struct hstate *hstate;
636 
637 	if (!is_vm_hugetlb_page(vma))
638 		return PAGE_SIZE;
639 
640 	hstate = hstate_vma(vma);
641 
642 	return 1UL << huge_page_shift(hstate);
643 }
644 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
645 
646 /*
647  * Return the page size being used by the MMU to back a VMA. In the majority
648  * of cases, the page size used by the kernel matches the MMU size. On
649  * architectures where it differs, an architecture-specific version of this
650  * function is required.
651  */
652 #ifndef vma_mmu_pagesize
653 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
654 {
655 	return vma_kernel_pagesize(vma);
656 }
657 #endif
658 
659 /*
660  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
661  * bits of the reservation map pointer, which are always clear due to
662  * alignment.
663  */
664 #define HPAGE_RESV_OWNER    (1UL << 0)
665 #define HPAGE_RESV_UNMAPPED (1UL << 1)
666 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
667 
668 /*
669  * These helpers are used to track how many pages are reserved for
670  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
671  * is guaranteed to have their future faults succeed.
672  *
673  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
674  * the reserve counters are updated with the hugetlb_lock held. It is safe
675  * to reset the VMA at fork() time as it is not in use yet and there is no
676  * chance of the global counters getting corrupted as a result of the values.
677  *
678  * The private mapping reservation is represented in a subtly different
679  * manner to a shared mapping.  A shared mapping has a region map associated
680  * with the underlying file, this region map represents the backing file
681  * pages which have ever had a reservation assigned which this persists even
682  * after the page is instantiated.  A private mapping has a region map
683  * associated with the original mmap which is attached to all VMAs which
684  * reference it, this region map represents those offsets which have consumed
685  * reservation ie. where pages have been instantiated.
686  */
687 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
688 {
689 	return (unsigned long)vma->vm_private_data;
690 }
691 
692 static void set_vma_private_data(struct vm_area_struct *vma,
693 							unsigned long value)
694 {
695 	vma->vm_private_data = (void *)value;
696 }
697 
698 struct resv_map *resv_map_alloc(void)
699 {
700 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
701 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
702 
703 	if (!resv_map || !rg) {
704 		kfree(resv_map);
705 		kfree(rg);
706 		return NULL;
707 	}
708 
709 	kref_init(&resv_map->refs);
710 	spin_lock_init(&resv_map->lock);
711 	INIT_LIST_HEAD(&resv_map->regions);
712 
713 	resv_map->adds_in_progress = 0;
714 
715 	INIT_LIST_HEAD(&resv_map->region_cache);
716 	list_add(&rg->link, &resv_map->region_cache);
717 	resv_map->region_cache_count = 1;
718 
719 	return resv_map;
720 }
721 
722 void resv_map_release(struct kref *ref)
723 {
724 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
725 	struct list_head *head = &resv_map->region_cache;
726 	struct file_region *rg, *trg;
727 
728 	/* Clear out any active regions before we release the map. */
729 	region_del(resv_map, 0, LONG_MAX);
730 
731 	/* ... and any entries left in the cache */
732 	list_for_each_entry_safe(rg, trg, head, link) {
733 		list_del(&rg->link);
734 		kfree(rg);
735 	}
736 
737 	VM_BUG_ON(resv_map->adds_in_progress);
738 
739 	kfree(resv_map);
740 }
741 
742 static inline struct resv_map *inode_resv_map(struct inode *inode)
743 {
744 	return inode->i_mapping->private_data;
745 }
746 
747 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
748 {
749 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
750 	if (vma->vm_flags & VM_MAYSHARE) {
751 		struct address_space *mapping = vma->vm_file->f_mapping;
752 		struct inode *inode = mapping->host;
753 
754 		return inode_resv_map(inode);
755 
756 	} else {
757 		return (struct resv_map *)(get_vma_private_data(vma) &
758 							~HPAGE_RESV_MASK);
759 	}
760 }
761 
762 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
763 {
764 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
765 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
766 
767 	set_vma_private_data(vma, (get_vma_private_data(vma) &
768 				HPAGE_RESV_MASK) | (unsigned long)map);
769 }
770 
771 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
772 {
773 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
774 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
775 
776 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
777 }
778 
779 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
780 {
781 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
782 
783 	return (get_vma_private_data(vma) & flag) != 0;
784 }
785 
786 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
787 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
788 {
789 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
790 	if (!(vma->vm_flags & VM_MAYSHARE))
791 		vma->vm_private_data = (void *)0;
792 }
793 
794 /* Returns true if the VMA has associated reserve pages */
795 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
796 {
797 	if (vma->vm_flags & VM_NORESERVE) {
798 		/*
799 		 * This address is already reserved by other process(chg == 0),
800 		 * so, we should decrement reserved count. Without decrementing,
801 		 * reserve count remains after releasing inode, because this
802 		 * allocated page will go into page cache and is regarded as
803 		 * coming from reserved pool in releasing step.  Currently, we
804 		 * don't have any other solution to deal with this situation
805 		 * properly, so add work-around here.
806 		 */
807 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
808 			return true;
809 		else
810 			return false;
811 	}
812 
813 	/* Shared mappings always use reserves */
814 	if (vma->vm_flags & VM_MAYSHARE) {
815 		/*
816 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
817 		 * be a region map for all pages.  The only situation where
818 		 * there is no region map is if a hole was punched via
819 		 * fallocate.  In this case, there really are no reverves to
820 		 * use.  This situation is indicated if chg != 0.
821 		 */
822 		if (chg)
823 			return false;
824 		else
825 			return true;
826 	}
827 
828 	/*
829 	 * Only the process that called mmap() has reserves for
830 	 * private mappings.
831 	 */
832 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
833 		return true;
834 
835 	return false;
836 }
837 
838 static void enqueue_huge_page(struct hstate *h, struct page *page)
839 {
840 	int nid = page_to_nid(page);
841 	list_move(&page->lru, &h->hugepage_freelists[nid]);
842 	h->free_huge_pages++;
843 	h->free_huge_pages_node[nid]++;
844 }
845 
846 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
847 {
848 	struct page *page;
849 
850 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
851 		if (!is_migrate_isolate_page(page))
852 			break;
853 	/*
854 	 * if 'non-isolated free hugepage' not found on the list,
855 	 * the allocation fails.
856 	 */
857 	if (&h->hugepage_freelists[nid] == &page->lru)
858 		return NULL;
859 	list_move(&page->lru, &h->hugepage_activelist);
860 	set_page_refcounted(page);
861 	h->free_huge_pages--;
862 	h->free_huge_pages_node[nid]--;
863 	return page;
864 }
865 
866 /* Movability of hugepages depends on migration support. */
867 static inline gfp_t htlb_alloc_mask(struct hstate *h)
868 {
869 	if (hugepages_treat_as_movable || hugepage_migration_supported(h))
870 		return GFP_HIGHUSER_MOVABLE;
871 	else
872 		return GFP_HIGHUSER;
873 }
874 
875 static struct page *dequeue_huge_page_vma(struct hstate *h,
876 				struct vm_area_struct *vma,
877 				unsigned long address, int avoid_reserve,
878 				long chg)
879 {
880 	struct page *page = NULL;
881 	struct mempolicy *mpol;
882 	nodemask_t *nodemask;
883 	struct zonelist *zonelist;
884 	struct zone *zone;
885 	struct zoneref *z;
886 	unsigned int cpuset_mems_cookie;
887 
888 	/*
889 	 * A child process with MAP_PRIVATE mappings created by their parent
890 	 * have no page reserves. This check ensures that reservations are
891 	 * not "stolen". The child may still get SIGKILLed
892 	 */
893 	if (!vma_has_reserves(vma, chg) &&
894 			h->free_huge_pages - h->resv_huge_pages == 0)
895 		goto err;
896 
897 	/* If reserves cannot be used, ensure enough pages are in the pool */
898 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
899 		goto err;
900 
901 retry_cpuset:
902 	cpuset_mems_cookie = read_mems_allowed_begin();
903 	zonelist = huge_zonelist(vma, address,
904 					htlb_alloc_mask(h), &mpol, &nodemask);
905 
906 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
907 						MAX_NR_ZONES - 1, nodemask) {
908 		if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
909 			page = dequeue_huge_page_node(h, zone_to_nid(zone));
910 			if (page) {
911 				if (avoid_reserve)
912 					break;
913 				if (!vma_has_reserves(vma, chg))
914 					break;
915 
916 				SetPagePrivate(page);
917 				h->resv_huge_pages--;
918 				break;
919 			}
920 		}
921 	}
922 
923 	mpol_cond_put(mpol);
924 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
925 		goto retry_cpuset;
926 	return page;
927 
928 err:
929 	return NULL;
930 }
931 
932 /*
933  * common helper functions for hstate_next_node_to_{alloc|free}.
934  * We may have allocated or freed a huge page based on a different
935  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
936  * be outside of *nodes_allowed.  Ensure that we use an allowed
937  * node for alloc or free.
938  */
939 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
940 {
941 	nid = next_node(nid, *nodes_allowed);
942 	if (nid == MAX_NUMNODES)
943 		nid = first_node(*nodes_allowed);
944 	VM_BUG_ON(nid >= MAX_NUMNODES);
945 
946 	return nid;
947 }
948 
949 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
950 {
951 	if (!node_isset(nid, *nodes_allowed))
952 		nid = next_node_allowed(nid, nodes_allowed);
953 	return nid;
954 }
955 
956 /*
957  * returns the previously saved node ["this node"] from which to
958  * allocate a persistent huge page for the pool and advance the
959  * next node from which to allocate, handling wrap at end of node
960  * mask.
961  */
962 static int hstate_next_node_to_alloc(struct hstate *h,
963 					nodemask_t *nodes_allowed)
964 {
965 	int nid;
966 
967 	VM_BUG_ON(!nodes_allowed);
968 
969 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
970 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
971 
972 	return nid;
973 }
974 
975 /*
976  * helper for free_pool_huge_page() - return the previously saved
977  * node ["this node"] from which to free a huge page.  Advance the
978  * next node id whether or not we find a free huge page to free so
979  * that the next attempt to free addresses the next node.
980  */
981 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
982 {
983 	int nid;
984 
985 	VM_BUG_ON(!nodes_allowed);
986 
987 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
988 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
989 
990 	return nid;
991 }
992 
993 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
994 	for (nr_nodes = nodes_weight(*mask);				\
995 		nr_nodes > 0 &&						\
996 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
997 		nr_nodes--)
998 
999 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
1000 	for (nr_nodes = nodes_weight(*mask);				\
1001 		nr_nodes > 0 &&						\
1002 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
1003 		nr_nodes--)
1004 
1005 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
1006 static void destroy_compound_gigantic_page(struct page *page,
1007 					unsigned int order)
1008 {
1009 	int i;
1010 	int nr_pages = 1 << order;
1011 	struct page *p = page + 1;
1012 
1013 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1014 		clear_compound_head(p);
1015 		set_page_refcounted(p);
1016 	}
1017 
1018 	set_compound_order(page, 0);
1019 	__ClearPageHead(page);
1020 }
1021 
1022 static void free_gigantic_page(struct page *page, unsigned int order)
1023 {
1024 	free_contig_range(page_to_pfn(page), 1 << order);
1025 }
1026 
1027 static int __alloc_gigantic_page(unsigned long start_pfn,
1028 				unsigned long nr_pages)
1029 {
1030 	unsigned long end_pfn = start_pfn + nr_pages;
1031 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1032 }
1033 
1034 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
1035 				unsigned long nr_pages)
1036 {
1037 	unsigned long i, end_pfn = start_pfn + nr_pages;
1038 	struct page *page;
1039 
1040 	for (i = start_pfn; i < end_pfn; i++) {
1041 		if (!pfn_valid(i))
1042 			return false;
1043 
1044 		page = pfn_to_page(i);
1045 
1046 		if (PageReserved(page))
1047 			return false;
1048 
1049 		if (page_count(page) > 0)
1050 			return false;
1051 
1052 		if (PageHuge(page))
1053 			return false;
1054 	}
1055 
1056 	return true;
1057 }
1058 
1059 static bool zone_spans_last_pfn(const struct zone *zone,
1060 			unsigned long start_pfn, unsigned long nr_pages)
1061 {
1062 	unsigned long last_pfn = start_pfn + nr_pages - 1;
1063 	return zone_spans_pfn(zone, last_pfn);
1064 }
1065 
1066 static struct page *alloc_gigantic_page(int nid, unsigned int order)
1067 {
1068 	unsigned long nr_pages = 1 << order;
1069 	unsigned long ret, pfn, flags;
1070 	struct zone *z;
1071 
1072 	z = NODE_DATA(nid)->node_zones;
1073 	for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1074 		spin_lock_irqsave(&z->lock, flags);
1075 
1076 		pfn = ALIGN(z->zone_start_pfn, nr_pages);
1077 		while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1078 			if (pfn_range_valid_gigantic(pfn, nr_pages)) {
1079 				/*
1080 				 * We release the zone lock here because
1081 				 * alloc_contig_range() will also lock the zone
1082 				 * at some point. If there's an allocation
1083 				 * spinning on this lock, it may win the race
1084 				 * and cause alloc_contig_range() to fail...
1085 				 */
1086 				spin_unlock_irqrestore(&z->lock, flags);
1087 				ret = __alloc_gigantic_page(pfn, nr_pages);
1088 				if (!ret)
1089 					return pfn_to_page(pfn);
1090 				spin_lock_irqsave(&z->lock, flags);
1091 			}
1092 			pfn += nr_pages;
1093 		}
1094 
1095 		spin_unlock_irqrestore(&z->lock, flags);
1096 	}
1097 
1098 	return NULL;
1099 }
1100 
1101 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1102 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1103 
1104 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1105 {
1106 	struct page *page;
1107 
1108 	page = alloc_gigantic_page(nid, huge_page_order(h));
1109 	if (page) {
1110 		prep_compound_gigantic_page(page, huge_page_order(h));
1111 		prep_new_huge_page(h, page, nid);
1112 	}
1113 
1114 	return page;
1115 }
1116 
1117 static int alloc_fresh_gigantic_page(struct hstate *h,
1118 				nodemask_t *nodes_allowed)
1119 {
1120 	struct page *page = NULL;
1121 	int nr_nodes, node;
1122 
1123 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1124 		page = alloc_fresh_gigantic_page_node(h, node);
1125 		if (page)
1126 			return 1;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static inline bool gigantic_page_supported(void) { return true; }
1133 #else
1134 static inline bool gigantic_page_supported(void) { return false; }
1135 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1136 static inline void destroy_compound_gigantic_page(struct page *page,
1137 						unsigned int order) { }
1138 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1139 					nodemask_t *nodes_allowed) { return 0; }
1140 #endif
1141 
1142 static void update_and_free_page(struct hstate *h, struct page *page)
1143 {
1144 	int i;
1145 
1146 	if (hstate_is_gigantic(h) && !gigantic_page_supported())
1147 		return;
1148 
1149 	h->nr_huge_pages--;
1150 	h->nr_huge_pages_node[page_to_nid(page)]--;
1151 	for (i = 0; i < pages_per_huge_page(h); i++) {
1152 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1153 				1 << PG_referenced | 1 << PG_dirty |
1154 				1 << PG_active | 1 << PG_private |
1155 				1 << PG_writeback);
1156 	}
1157 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1158 	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1159 	set_page_refcounted(page);
1160 	if (hstate_is_gigantic(h)) {
1161 		destroy_compound_gigantic_page(page, huge_page_order(h));
1162 		free_gigantic_page(page, huge_page_order(h));
1163 	} else {
1164 		__free_pages(page, huge_page_order(h));
1165 	}
1166 }
1167 
1168 struct hstate *size_to_hstate(unsigned long size)
1169 {
1170 	struct hstate *h;
1171 
1172 	for_each_hstate(h) {
1173 		if (huge_page_size(h) == size)
1174 			return h;
1175 	}
1176 	return NULL;
1177 }
1178 
1179 /*
1180  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1181  * to hstate->hugepage_activelist.)
1182  *
1183  * This function can be called for tail pages, but never returns true for them.
1184  */
1185 bool page_huge_active(struct page *page)
1186 {
1187 	VM_BUG_ON_PAGE(!PageHuge(page), page);
1188 	return PageHead(page) && PagePrivate(&page[1]);
1189 }
1190 
1191 /* never called for tail page */
1192 static void set_page_huge_active(struct page *page)
1193 {
1194 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1195 	SetPagePrivate(&page[1]);
1196 }
1197 
1198 static void clear_page_huge_active(struct page *page)
1199 {
1200 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1201 	ClearPagePrivate(&page[1]);
1202 }
1203 
1204 void free_huge_page(struct page *page)
1205 {
1206 	/*
1207 	 * Can't pass hstate in here because it is called from the
1208 	 * compound page destructor.
1209 	 */
1210 	struct hstate *h = page_hstate(page);
1211 	int nid = page_to_nid(page);
1212 	struct hugepage_subpool *spool =
1213 		(struct hugepage_subpool *)page_private(page);
1214 	bool restore_reserve;
1215 
1216 	set_page_private(page, 0);
1217 	page->mapping = NULL;
1218 	BUG_ON(page_count(page));
1219 	BUG_ON(page_mapcount(page));
1220 	restore_reserve = PagePrivate(page);
1221 	ClearPagePrivate(page);
1222 
1223 	/*
1224 	 * A return code of zero implies that the subpool will be under its
1225 	 * minimum size if the reservation is not restored after page is free.
1226 	 * Therefore, force restore_reserve operation.
1227 	 */
1228 	if (hugepage_subpool_put_pages(spool, 1) == 0)
1229 		restore_reserve = true;
1230 
1231 	spin_lock(&hugetlb_lock);
1232 	clear_page_huge_active(page);
1233 	hugetlb_cgroup_uncharge_page(hstate_index(h),
1234 				     pages_per_huge_page(h), page);
1235 	if (restore_reserve)
1236 		h->resv_huge_pages++;
1237 
1238 	if (h->surplus_huge_pages_node[nid]) {
1239 		/* remove the page from active list */
1240 		list_del(&page->lru);
1241 		update_and_free_page(h, page);
1242 		h->surplus_huge_pages--;
1243 		h->surplus_huge_pages_node[nid]--;
1244 	} else {
1245 		arch_clear_hugepage_flags(page);
1246 		enqueue_huge_page(h, page);
1247 	}
1248 	spin_unlock(&hugetlb_lock);
1249 }
1250 
1251 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1252 {
1253 	INIT_LIST_HEAD(&page->lru);
1254 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1255 	spin_lock(&hugetlb_lock);
1256 	set_hugetlb_cgroup(page, NULL);
1257 	h->nr_huge_pages++;
1258 	h->nr_huge_pages_node[nid]++;
1259 	spin_unlock(&hugetlb_lock);
1260 	put_page(page); /* free it into the hugepage allocator */
1261 }
1262 
1263 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1264 {
1265 	int i;
1266 	int nr_pages = 1 << order;
1267 	struct page *p = page + 1;
1268 
1269 	/* we rely on prep_new_huge_page to set the destructor */
1270 	set_compound_order(page, order);
1271 	__SetPageHead(page);
1272 	__ClearPageReserved(page);
1273 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1274 		/*
1275 		 * For gigantic hugepages allocated through bootmem at
1276 		 * boot, it's safer to be consistent with the not-gigantic
1277 		 * hugepages and clear the PG_reserved bit from all tail pages
1278 		 * too.  Otherwse drivers using get_user_pages() to access tail
1279 		 * pages may get the reference counting wrong if they see
1280 		 * PG_reserved set on a tail page (despite the head page not
1281 		 * having PG_reserved set).  Enforcing this consistency between
1282 		 * head and tail pages allows drivers to optimize away a check
1283 		 * on the head page when they need know if put_page() is needed
1284 		 * after get_user_pages().
1285 		 */
1286 		__ClearPageReserved(p);
1287 		set_page_count(p, 0);
1288 		set_compound_head(p, page);
1289 	}
1290 }
1291 
1292 /*
1293  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1294  * transparent huge pages.  See the PageTransHuge() documentation for more
1295  * details.
1296  */
1297 int PageHuge(struct page *page)
1298 {
1299 	if (!PageCompound(page))
1300 		return 0;
1301 
1302 	page = compound_head(page);
1303 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1304 }
1305 EXPORT_SYMBOL_GPL(PageHuge);
1306 
1307 /*
1308  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1309  * normal or transparent huge pages.
1310  */
1311 int PageHeadHuge(struct page *page_head)
1312 {
1313 	if (!PageHead(page_head))
1314 		return 0;
1315 
1316 	return get_compound_page_dtor(page_head) == free_huge_page;
1317 }
1318 
1319 pgoff_t __basepage_index(struct page *page)
1320 {
1321 	struct page *page_head = compound_head(page);
1322 	pgoff_t index = page_index(page_head);
1323 	unsigned long compound_idx;
1324 
1325 	if (!PageHuge(page_head))
1326 		return page_index(page);
1327 
1328 	if (compound_order(page_head) >= MAX_ORDER)
1329 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1330 	else
1331 		compound_idx = page - page_head;
1332 
1333 	return (index << compound_order(page_head)) + compound_idx;
1334 }
1335 
1336 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1337 {
1338 	struct page *page;
1339 
1340 	page = __alloc_pages_node(nid,
1341 		htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1342 						__GFP_REPEAT|__GFP_NOWARN,
1343 		huge_page_order(h));
1344 	if (page) {
1345 		prep_new_huge_page(h, page, nid);
1346 	}
1347 
1348 	return page;
1349 }
1350 
1351 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1352 {
1353 	struct page *page;
1354 	int nr_nodes, node;
1355 	int ret = 0;
1356 
1357 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1358 		page = alloc_fresh_huge_page_node(h, node);
1359 		if (page) {
1360 			ret = 1;
1361 			break;
1362 		}
1363 	}
1364 
1365 	if (ret)
1366 		count_vm_event(HTLB_BUDDY_PGALLOC);
1367 	else
1368 		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1369 
1370 	return ret;
1371 }
1372 
1373 /*
1374  * Free huge page from pool from next node to free.
1375  * Attempt to keep persistent huge pages more or less
1376  * balanced over allowed nodes.
1377  * Called with hugetlb_lock locked.
1378  */
1379 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1380 							 bool acct_surplus)
1381 {
1382 	int nr_nodes, node;
1383 	int ret = 0;
1384 
1385 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1386 		/*
1387 		 * If we're returning unused surplus pages, only examine
1388 		 * nodes with surplus pages.
1389 		 */
1390 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1391 		    !list_empty(&h->hugepage_freelists[node])) {
1392 			struct page *page =
1393 				list_entry(h->hugepage_freelists[node].next,
1394 					  struct page, lru);
1395 			list_del(&page->lru);
1396 			h->free_huge_pages--;
1397 			h->free_huge_pages_node[node]--;
1398 			if (acct_surplus) {
1399 				h->surplus_huge_pages--;
1400 				h->surplus_huge_pages_node[node]--;
1401 			}
1402 			update_and_free_page(h, page);
1403 			ret = 1;
1404 			break;
1405 		}
1406 	}
1407 
1408 	return ret;
1409 }
1410 
1411 /*
1412  * Dissolve a given free hugepage into free buddy pages. This function does
1413  * nothing for in-use (including surplus) hugepages.
1414  */
1415 static void dissolve_free_huge_page(struct page *page)
1416 {
1417 	spin_lock(&hugetlb_lock);
1418 	if (PageHuge(page) && !page_count(page)) {
1419 		struct hstate *h = page_hstate(page);
1420 		int nid = page_to_nid(page);
1421 		list_del(&page->lru);
1422 		h->free_huge_pages--;
1423 		h->free_huge_pages_node[nid]--;
1424 		update_and_free_page(h, page);
1425 	}
1426 	spin_unlock(&hugetlb_lock);
1427 }
1428 
1429 /*
1430  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1431  * make specified memory blocks removable from the system.
1432  * Note that start_pfn should aligned with (minimum) hugepage size.
1433  */
1434 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1435 {
1436 	unsigned long pfn;
1437 
1438 	if (!hugepages_supported())
1439 		return;
1440 
1441 	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1442 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1443 		dissolve_free_huge_page(pfn_to_page(pfn));
1444 }
1445 
1446 /*
1447  * There are 3 ways this can get called:
1448  * 1. With vma+addr: we use the VMA's memory policy
1449  * 2. With !vma, but nid=NUMA_NO_NODE:  We try to allocate a huge
1450  *    page from any node, and let the buddy allocator itself figure
1451  *    it out.
1452  * 3. With !vma, but nid!=NUMA_NO_NODE.  We allocate a huge page
1453  *    strictly from 'nid'
1454  */
1455 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1456 		struct vm_area_struct *vma, unsigned long addr, int nid)
1457 {
1458 	int order = huge_page_order(h);
1459 	gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1460 	unsigned int cpuset_mems_cookie;
1461 
1462 	/*
1463 	 * We need a VMA to get a memory policy.  If we do not
1464 	 * have one, we use the 'nid' argument.
1465 	 *
1466 	 * The mempolicy stuff below has some non-inlined bits
1467 	 * and calls ->vm_ops.  That makes it hard to optimize at
1468 	 * compile-time, even when NUMA is off and it does
1469 	 * nothing.  This helps the compiler optimize it out.
1470 	 */
1471 	if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1472 		/*
1473 		 * If a specific node is requested, make sure to
1474 		 * get memory from there, but only when a node
1475 		 * is explicitly specified.
1476 		 */
1477 		if (nid != NUMA_NO_NODE)
1478 			gfp |= __GFP_THISNODE;
1479 		/*
1480 		 * Make sure to call something that can handle
1481 		 * nid=NUMA_NO_NODE
1482 		 */
1483 		return alloc_pages_node(nid, gfp, order);
1484 	}
1485 
1486 	/*
1487 	 * OK, so we have a VMA.  Fetch the mempolicy and try to
1488 	 * allocate a huge page with it.  We will only reach this
1489 	 * when CONFIG_NUMA=y.
1490 	 */
1491 	do {
1492 		struct page *page;
1493 		struct mempolicy *mpol;
1494 		struct zonelist *zl;
1495 		nodemask_t *nodemask;
1496 
1497 		cpuset_mems_cookie = read_mems_allowed_begin();
1498 		zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1499 		mpol_cond_put(mpol);
1500 		page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1501 		if (page)
1502 			return page;
1503 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1504 
1505 	return NULL;
1506 }
1507 
1508 /*
1509  * There are two ways to allocate a huge page:
1510  * 1. When you have a VMA and an address (like a fault)
1511  * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1512  *
1513  * 'vma' and 'addr' are only for (1).  'nid' is always NUMA_NO_NODE in
1514  * this case which signifies that the allocation should be done with
1515  * respect for the VMA's memory policy.
1516  *
1517  * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1518  * implies that memory policies will not be taken in to account.
1519  */
1520 static struct page *__alloc_buddy_huge_page(struct hstate *h,
1521 		struct vm_area_struct *vma, unsigned long addr, int nid)
1522 {
1523 	struct page *page;
1524 	unsigned int r_nid;
1525 
1526 	if (hstate_is_gigantic(h))
1527 		return NULL;
1528 
1529 	/*
1530 	 * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1531 	 * This makes sure the caller is picking _one_ of the modes with which
1532 	 * we can call this function, not both.
1533 	 */
1534 	if (vma || (addr != -1)) {
1535 		VM_WARN_ON_ONCE(addr == -1);
1536 		VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1537 	}
1538 	/*
1539 	 * Assume we will successfully allocate the surplus page to
1540 	 * prevent racing processes from causing the surplus to exceed
1541 	 * overcommit
1542 	 *
1543 	 * This however introduces a different race, where a process B
1544 	 * tries to grow the static hugepage pool while alloc_pages() is
1545 	 * called by process A. B will only examine the per-node
1546 	 * counters in determining if surplus huge pages can be
1547 	 * converted to normal huge pages in adjust_pool_surplus(). A
1548 	 * won't be able to increment the per-node counter, until the
1549 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1550 	 * no more huge pages can be converted from surplus to normal
1551 	 * state (and doesn't try to convert again). Thus, we have a
1552 	 * case where a surplus huge page exists, the pool is grown, and
1553 	 * the surplus huge page still exists after, even though it
1554 	 * should just have been converted to a normal huge page. This
1555 	 * does not leak memory, though, as the hugepage will be freed
1556 	 * once it is out of use. It also does not allow the counters to
1557 	 * go out of whack in adjust_pool_surplus() as we don't modify
1558 	 * the node values until we've gotten the hugepage and only the
1559 	 * per-node value is checked there.
1560 	 */
1561 	spin_lock(&hugetlb_lock);
1562 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1563 		spin_unlock(&hugetlb_lock);
1564 		return NULL;
1565 	} else {
1566 		h->nr_huge_pages++;
1567 		h->surplus_huge_pages++;
1568 	}
1569 	spin_unlock(&hugetlb_lock);
1570 
1571 	page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1572 
1573 	spin_lock(&hugetlb_lock);
1574 	if (page) {
1575 		INIT_LIST_HEAD(&page->lru);
1576 		r_nid = page_to_nid(page);
1577 		set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1578 		set_hugetlb_cgroup(page, NULL);
1579 		/*
1580 		 * We incremented the global counters already
1581 		 */
1582 		h->nr_huge_pages_node[r_nid]++;
1583 		h->surplus_huge_pages_node[r_nid]++;
1584 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1585 	} else {
1586 		h->nr_huge_pages--;
1587 		h->surplus_huge_pages--;
1588 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1589 	}
1590 	spin_unlock(&hugetlb_lock);
1591 
1592 	return page;
1593 }
1594 
1595 /*
1596  * Allocate a huge page from 'nid'.  Note, 'nid' may be
1597  * NUMA_NO_NODE, which means that it may be allocated
1598  * anywhere.
1599  */
1600 static
1601 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1602 {
1603 	unsigned long addr = -1;
1604 
1605 	return __alloc_buddy_huge_page(h, NULL, addr, nid);
1606 }
1607 
1608 /*
1609  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1610  */
1611 static
1612 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1613 		struct vm_area_struct *vma, unsigned long addr)
1614 {
1615 	return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1616 }
1617 
1618 /*
1619  * This allocation function is useful in the context where vma is irrelevant.
1620  * E.g. soft-offlining uses this function because it only cares physical
1621  * address of error page.
1622  */
1623 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1624 {
1625 	struct page *page = NULL;
1626 
1627 	spin_lock(&hugetlb_lock);
1628 	if (h->free_huge_pages - h->resv_huge_pages > 0)
1629 		page = dequeue_huge_page_node(h, nid);
1630 	spin_unlock(&hugetlb_lock);
1631 
1632 	if (!page)
1633 		page = __alloc_buddy_huge_page_no_mpol(h, nid);
1634 
1635 	return page;
1636 }
1637 
1638 /*
1639  * Increase the hugetlb pool such that it can accommodate a reservation
1640  * of size 'delta'.
1641  */
1642 static int gather_surplus_pages(struct hstate *h, int delta)
1643 {
1644 	struct list_head surplus_list;
1645 	struct page *page, *tmp;
1646 	int ret, i;
1647 	int needed, allocated;
1648 	bool alloc_ok = true;
1649 
1650 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1651 	if (needed <= 0) {
1652 		h->resv_huge_pages += delta;
1653 		return 0;
1654 	}
1655 
1656 	allocated = 0;
1657 	INIT_LIST_HEAD(&surplus_list);
1658 
1659 	ret = -ENOMEM;
1660 retry:
1661 	spin_unlock(&hugetlb_lock);
1662 	for (i = 0; i < needed; i++) {
1663 		page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1664 		if (!page) {
1665 			alloc_ok = false;
1666 			break;
1667 		}
1668 		list_add(&page->lru, &surplus_list);
1669 	}
1670 	allocated += i;
1671 
1672 	/*
1673 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
1674 	 * because either resv_huge_pages or free_huge_pages may have changed.
1675 	 */
1676 	spin_lock(&hugetlb_lock);
1677 	needed = (h->resv_huge_pages + delta) -
1678 			(h->free_huge_pages + allocated);
1679 	if (needed > 0) {
1680 		if (alloc_ok)
1681 			goto retry;
1682 		/*
1683 		 * We were not able to allocate enough pages to
1684 		 * satisfy the entire reservation so we free what
1685 		 * we've allocated so far.
1686 		 */
1687 		goto free;
1688 	}
1689 	/*
1690 	 * The surplus_list now contains _at_least_ the number of extra pages
1691 	 * needed to accommodate the reservation.  Add the appropriate number
1692 	 * of pages to the hugetlb pool and free the extras back to the buddy
1693 	 * allocator.  Commit the entire reservation here to prevent another
1694 	 * process from stealing the pages as they are added to the pool but
1695 	 * before they are reserved.
1696 	 */
1697 	needed += allocated;
1698 	h->resv_huge_pages += delta;
1699 	ret = 0;
1700 
1701 	/* Free the needed pages to the hugetlb pool */
1702 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1703 		if ((--needed) < 0)
1704 			break;
1705 		/*
1706 		 * This page is now managed by the hugetlb allocator and has
1707 		 * no users -- drop the buddy allocator's reference.
1708 		 */
1709 		put_page_testzero(page);
1710 		VM_BUG_ON_PAGE(page_count(page), page);
1711 		enqueue_huge_page(h, page);
1712 	}
1713 free:
1714 	spin_unlock(&hugetlb_lock);
1715 
1716 	/* Free unnecessary surplus pages to the buddy allocator */
1717 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1718 		put_page(page);
1719 	spin_lock(&hugetlb_lock);
1720 
1721 	return ret;
1722 }
1723 
1724 /*
1725  * When releasing a hugetlb pool reservation, any surplus pages that were
1726  * allocated to satisfy the reservation must be explicitly freed if they were
1727  * never used.
1728  * Called with hugetlb_lock held.
1729  */
1730 static void return_unused_surplus_pages(struct hstate *h,
1731 					unsigned long unused_resv_pages)
1732 {
1733 	unsigned long nr_pages;
1734 
1735 	/* Uncommit the reservation */
1736 	h->resv_huge_pages -= unused_resv_pages;
1737 
1738 	/* Cannot return gigantic pages currently */
1739 	if (hstate_is_gigantic(h))
1740 		return;
1741 
1742 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1743 
1744 	/*
1745 	 * We want to release as many surplus pages as possible, spread
1746 	 * evenly across all nodes with memory. Iterate across these nodes
1747 	 * until we can no longer free unreserved surplus pages. This occurs
1748 	 * when the nodes with surplus pages have no free pages.
1749 	 * free_pool_huge_page() will balance the the freed pages across the
1750 	 * on-line nodes with memory and will handle the hstate accounting.
1751 	 */
1752 	while (nr_pages--) {
1753 		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1754 			break;
1755 		cond_resched_lock(&hugetlb_lock);
1756 	}
1757 }
1758 
1759 
1760 /*
1761  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1762  * are used by the huge page allocation routines to manage reservations.
1763  *
1764  * vma_needs_reservation is called to determine if the huge page at addr
1765  * within the vma has an associated reservation.  If a reservation is
1766  * needed, the value 1 is returned.  The caller is then responsible for
1767  * managing the global reservation and subpool usage counts.  After
1768  * the huge page has been allocated, vma_commit_reservation is called
1769  * to add the page to the reservation map.  If the page allocation fails,
1770  * the reservation must be ended instead of committed.  vma_end_reservation
1771  * is called in such cases.
1772  *
1773  * In the normal case, vma_commit_reservation returns the same value
1774  * as the preceding vma_needs_reservation call.  The only time this
1775  * is not the case is if a reserve map was changed between calls.  It
1776  * is the responsibility of the caller to notice the difference and
1777  * take appropriate action.
1778  */
1779 enum vma_resv_mode {
1780 	VMA_NEEDS_RESV,
1781 	VMA_COMMIT_RESV,
1782 	VMA_END_RESV,
1783 };
1784 static long __vma_reservation_common(struct hstate *h,
1785 				struct vm_area_struct *vma, unsigned long addr,
1786 				enum vma_resv_mode mode)
1787 {
1788 	struct resv_map *resv;
1789 	pgoff_t idx;
1790 	long ret;
1791 
1792 	resv = vma_resv_map(vma);
1793 	if (!resv)
1794 		return 1;
1795 
1796 	idx = vma_hugecache_offset(h, vma, addr);
1797 	switch (mode) {
1798 	case VMA_NEEDS_RESV:
1799 		ret = region_chg(resv, idx, idx + 1);
1800 		break;
1801 	case VMA_COMMIT_RESV:
1802 		ret = region_add(resv, idx, idx + 1);
1803 		break;
1804 	case VMA_END_RESV:
1805 		region_abort(resv, idx, idx + 1);
1806 		ret = 0;
1807 		break;
1808 	default:
1809 		BUG();
1810 	}
1811 
1812 	if (vma->vm_flags & VM_MAYSHARE)
1813 		return ret;
1814 	else
1815 		return ret < 0 ? ret : 0;
1816 }
1817 
1818 static long vma_needs_reservation(struct hstate *h,
1819 			struct vm_area_struct *vma, unsigned long addr)
1820 {
1821 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1822 }
1823 
1824 static long vma_commit_reservation(struct hstate *h,
1825 			struct vm_area_struct *vma, unsigned long addr)
1826 {
1827 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1828 }
1829 
1830 static void vma_end_reservation(struct hstate *h,
1831 			struct vm_area_struct *vma, unsigned long addr)
1832 {
1833 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1834 }
1835 
1836 struct page *alloc_huge_page(struct vm_area_struct *vma,
1837 				    unsigned long addr, int avoid_reserve)
1838 {
1839 	struct hugepage_subpool *spool = subpool_vma(vma);
1840 	struct hstate *h = hstate_vma(vma);
1841 	struct page *page;
1842 	long map_chg, map_commit;
1843 	long gbl_chg;
1844 	int ret, idx;
1845 	struct hugetlb_cgroup *h_cg;
1846 
1847 	idx = hstate_index(h);
1848 	/*
1849 	 * Examine the region/reserve map to determine if the process
1850 	 * has a reservation for the page to be allocated.  A return
1851 	 * code of zero indicates a reservation exists (no change).
1852 	 */
1853 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1854 	if (map_chg < 0)
1855 		return ERR_PTR(-ENOMEM);
1856 
1857 	/*
1858 	 * Processes that did not create the mapping will have no
1859 	 * reserves as indicated by the region/reserve map. Check
1860 	 * that the allocation will not exceed the subpool limit.
1861 	 * Allocations for MAP_NORESERVE mappings also need to be
1862 	 * checked against any subpool limit.
1863 	 */
1864 	if (map_chg || avoid_reserve) {
1865 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
1866 		if (gbl_chg < 0) {
1867 			vma_end_reservation(h, vma, addr);
1868 			return ERR_PTR(-ENOSPC);
1869 		}
1870 
1871 		/*
1872 		 * Even though there was no reservation in the region/reserve
1873 		 * map, there could be reservations associated with the
1874 		 * subpool that can be used.  This would be indicated if the
1875 		 * return value of hugepage_subpool_get_pages() is zero.
1876 		 * However, if avoid_reserve is specified we still avoid even
1877 		 * the subpool reservations.
1878 		 */
1879 		if (avoid_reserve)
1880 			gbl_chg = 1;
1881 	}
1882 
1883 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1884 	if (ret)
1885 		goto out_subpool_put;
1886 
1887 	spin_lock(&hugetlb_lock);
1888 	/*
1889 	 * glb_chg is passed to indicate whether or not a page must be taken
1890 	 * from the global free pool (global change).  gbl_chg == 0 indicates
1891 	 * a reservation exists for the allocation.
1892 	 */
1893 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
1894 	if (!page) {
1895 		spin_unlock(&hugetlb_lock);
1896 		page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1897 		if (!page)
1898 			goto out_uncharge_cgroup;
1899 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1900 			SetPagePrivate(page);
1901 			h->resv_huge_pages--;
1902 		}
1903 		spin_lock(&hugetlb_lock);
1904 		list_move(&page->lru, &h->hugepage_activelist);
1905 		/* Fall through */
1906 	}
1907 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1908 	spin_unlock(&hugetlb_lock);
1909 
1910 	set_page_private(page, (unsigned long)spool);
1911 
1912 	map_commit = vma_commit_reservation(h, vma, addr);
1913 	if (unlikely(map_chg > map_commit)) {
1914 		/*
1915 		 * The page was added to the reservation map between
1916 		 * vma_needs_reservation and vma_commit_reservation.
1917 		 * This indicates a race with hugetlb_reserve_pages.
1918 		 * Adjust for the subpool count incremented above AND
1919 		 * in hugetlb_reserve_pages for the same page.  Also,
1920 		 * the reservation count added in hugetlb_reserve_pages
1921 		 * no longer applies.
1922 		 */
1923 		long rsv_adjust;
1924 
1925 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1926 		hugetlb_acct_memory(h, -rsv_adjust);
1927 	}
1928 	return page;
1929 
1930 out_uncharge_cgroup:
1931 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1932 out_subpool_put:
1933 	if (map_chg || avoid_reserve)
1934 		hugepage_subpool_put_pages(spool, 1);
1935 	vma_end_reservation(h, vma, addr);
1936 	return ERR_PTR(-ENOSPC);
1937 }
1938 
1939 /*
1940  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1941  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1942  * where no ERR_VALUE is expected to be returned.
1943  */
1944 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1945 				unsigned long addr, int avoid_reserve)
1946 {
1947 	struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1948 	if (IS_ERR(page))
1949 		page = NULL;
1950 	return page;
1951 }
1952 
1953 int __weak alloc_bootmem_huge_page(struct hstate *h)
1954 {
1955 	struct huge_bootmem_page *m;
1956 	int nr_nodes, node;
1957 
1958 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1959 		void *addr;
1960 
1961 		addr = memblock_virt_alloc_try_nid_nopanic(
1962 				huge_page_size(h), huge_page_size(h),
1963 				0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1964 		if (addr) {
1965 			/*
1966 			 * Use the beginning of the huge page to store the
1967 			 * huge_bootmem_page struct (until gather_bootmem
1968 			 * puts them into the mem_map).
1969 			 */
1970 			m = addr;
1971 			goto found;
1972 		}
1973 	}
1974 	return 0;
1975 
1976 found:
1977 	BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1978 	/* Put them into a private list first because mem_map is not up yet */
1979 	list_add(&m->list, &huge_boot_pages);
1980 	m->hstate = h;
1981 	return 1;
1982 }
1983 
1984 static void __init prep_compound_huge_page(struct page *page,
1985 		unsigned int order)
1986 {
1987 	if (unlikely(order > (MAX_ORDER - 1)))
1988 		prep_compound_gigantic_page(page, order);
1989 	else
1990 		prep_compound_page(page, order);
1991 }
1992 
1993 /* Put bootmem huge pages into the standard lists after mem_map is up */
1994 static void __init gather_bootmem_prealloc(void)
1995 {
1996 	struct huge_bootmem_page *m;
1997 
1998 	list_for_each_entry(m, &huge_boot_pages, list) {
1999 		struct hstate *h = m->hstate;
2000 		struct page *page;
2001 
2002 #ifdef CONFIG_HIGHMEM
2003 		page = pfn_to_page(m->phys >> PAGE_SHIFT);
2004 		memblock_free_late(__pa(m),
2005 				   sizeof(struct huge_bootmem_page));
2006 #else
2007 		page = virt_to_page(m);
2008 #endif
2009 		WARN_ON(page_count(page) != 1);
2010 		prep_compound_huge_page(page, h->order);
2011 		WARN_ON(PageReserved(page));
2012 		prep_new_huge_page(h, page, page_to_nid(page));
2013 		/*
2014 		 * If we had gigantic hugepages allocated at boot time, we need
2015 		 * to restore the 'stolen' pages to totalram_pages in order to
2016 		 * fix confusing memory reports from free(1) and another
2017 		 * side-effects, like CommitLimit going negative.
2018 		 */
2019 		if (hstate_is_gigantic(h))
2020 			adjust_managed_page_count(page, 1 << h->order);
2021 	}
2022 }
2023 
2024 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2025 {
2026 	unsigned long i;
2027 
2028 	for (i = 0; i < h->max_huge_pages; ++i) {
2029 		if (hstate_is_gigantic(h)) {
2030 			if (!alloc_bootmem_huge_page(h))
2031 				break;
2032 		} else if (!alloc_fresh_huge_page(h,
2033 					 &node_states[N_MEMORY]))
2034 			break;
2035 	}
2036 	h->max_huge_pages = i;
2037 }
2038 
2039 static void __init hugetlb_init_hstates(void)
2040 {
2041 	struct hstate *h;
2042 
2043 	for_each_hstate(h) {
2044 		if (minimum_order > huge_page_order(h))
2045 			minimum_order = huge_page_order(h);
2046 
2047 		/* oversize hugepages were init'ed in early boot */
2048 		if (!hstate_is_gigantic(h))
2049 			hugetlb_hstate_alloc_pages(h);
2050 	}
2051 	VM_BUG_ON(minimum_order == UINT_MAX);
2052 }
2053 
2054 static char * __init memfmt(char *buf, unsigned long n)
2055 {
2056 	if (n >= (1UL << 30))
2057 		sprintf(buf, "%lu GB", n >> 30);
2058 	else if (n >= (1UL << 20))
2059 		sprintf(buf, "%lu MB", n >> 20);
2060 	else
2061 		sprintf(buf, "%lu KB", n >> 10);
2062 	return buf;
2063 }
2064 
2065 static void __init report_hugepages(void)
2066 {
2067 	struct hstate *h;
2068 
2069 	for_each_hstate(h) {
2070 		char buf[32];
2071 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2072 			memfmt(buf, huge_page_size(h)),
2073 			h->free_huge_pages);
2074 	}
2075 }
2076 
2077 #ifdef CONFIG_HIGHMEM
2078 static void try_to_free_low(struct hstate *h, unsigned long count,
2079 						nodemask_t *nodes_allowed)
2080 {
2081 	int i;
2082 
2083 	if (hstate_is_gigantic(h))
2084 		return;
2085 
2086 	for_each_node_mask(i, *nodes_allowed) {
2087 		struct page *page, *next;
2088 		struct list_head *freel = &h->hugepage_freelists[i];
2089 		list_for_each_entry_safe(page, next, freel, lru) {
2090 			if (count >= h->nr_huge_pages)
2091 				return;
2092 			if (PageHighMem(page))
2093 				continue;
2094 			list_del(&page->lru);
2095 			update_and_free_page(h, page);
2096 			h->free_huge_pages--;
2097 			h->free_huge_pages_node[page_to_nid(page)]--;
2098 		}
2099 	}
2100 }
2101 #else
2102 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2103 						nodemask_t *nodes_allowed)
2104 {
2105 }
2106 #endif
2107 
2108 /*
2109  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2110  * balanced by operating on them in a round-robin fashion.
2111  * Returns 1 if an adjustment was made.
2112  */
2113 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2114 				int delta)
2115 {
2116 	int nr_nodes, node;
2117 
2118 	VM_BUG_ON(delta != -1 && delta != 1);
2119 
2120 	if (delta < 0) {
2121 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2122 			if (h->surplus_huge_pages_node[node])
2123 				goto found;
2124 		}
2125 	} else {
2126 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2127 			if (h->surplus_huge_pages_node[node] <
2128 					h->nr_huge_pages_node[node])
2129 				goto found;
2130 		}
2131 	}
2132 	return 0;
2133 
2134 found:
2135 	h->surplus_huge_pages += delta;
2136 	h->surplus_huge_pages_node[node] += delta;
2137 	return 1;
2138 }
2139 
2140 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2141 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2142 						nodemask_t *nodes_allowed)
2143 {
2144 	unsigned long min_count, ret;
2145 
2146 	if (hstate_is_gigantic(h) && !gigantic_page_supported())
2147 		return h->max_huge_pages;
2148 
2149 	/*
2150 	 * Increase the pool size
2151 	 * First take pages out of surplus state.  Then make up the
2152 	 * remaining difference by allocating fresh huge pages.
2153 	 *
2154 	 * We might race with __alloc_buddy_huge_page() here and be unable
2155 	 * to convert a surplus huge page to a normal huge page. That is
2156 	 * not critical, though, it just means the overall size of the
2157 	 * pool might be one hugepage larger than it needs to be, but
2158 	 * within all the constraints specified by the sysctls.
2159 	 */
2160 	spin_lock(&hugetlb_lock);
2161 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2162 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
2163 			break;
2164 	}
2165 
2166 	while (count > persistent_huge_pages(h)) {
2167 		/*
2168 		 * If this allocation races such that we no longer need the
2169 		 * page, free_huge_page will handle it by freeing the page
2170 		 * and reducing the surplus.
2171 		 */
2172 		spin_unlock(&hugetlb_lock);
2173 		if (hstate_is_gigantic(h))
2174 			ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2175 		else
2176 			ret = alloc_fresh_huge_page(h, nodes_allowed);
2177 		spin_lock(&hugetlb_lock);
2178 		if (!ret)
2179 			goto out;
2180 
2181 		/* Bail for signals. Probably ctrl-c from user */
2182 		if (signal_pending(current))
2183 			goto out;
2184 	}
2185 
2186 	/*
2187 	 * Decrease the pool size
2188 	 * First return free pages to the buddy allocator (being careful
2189 	 * to keep enough around to satisfy reservations).  Then place
2190 	 * pages into surplus state as needed so the pool will shrink
2191 	 * to the desired size as pages become free.
2192 	 *
2193 	 * By placing pages into the surplus state independent of the
2194 	 * overcommit value, we are allowing the surplus pool size to
2195 	 * exceed overcommit. There are few sane options here. Since
2196 	 * __alloc_buddy_huge_page() is checking the global counter,
2197 	 * though, we'll note that we're not allowed to exceed surplus
2198 	 * and won't grow the pool anywhere else. Not until one of the
2199 	 * sysctls are changed, or the surplus pages go out of use.
2200 	 */
2201 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2202 	min_count = max(count, min_count);
2203 	try_to_free_low(h, min_count, nodes_allowed);
2204 	while (min_count < persistent_huge_pages(h)) {
2205 		if (!free_pool_huge_page(h, nodes_allowed, 0))
2206 			break;
2207 		cond_resched_lock(&hugetlb_lock);
2208 	}
2209 	while (count < persistent_huge_pages(h)) {
2210 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
2211 			break;
2212 	}
2213 out:
2214 	ret = persistent_huge_pages(h);
2215 	spin_unlock(&hugetlb_lock);
2216 	return ret;
2217 }
2218 
2219 #define HSTATE_ATTR_RO(_name) \
2220 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2221 
2222 #define HSTATE_ATTR(_name) \
2223 	static struct kobj_attribute _name##_attr = \
2224 		__ATTR(_name, 0644, _name##_show, _name##_store)
2225 
2226 static struct kobject *hugepages_kobj;
2227 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2228 
2229 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2230 
2231 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2232 {
2233 	int i;
2234 
2235 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
2236 		if (hstate_kobjs[i] == kobj) {
2237 			if (nidp)
2238 				*nidp = NUMA_NO_NODE;
2239 			return &hstates[i];
2240 		}
2241 
2242 	return kobj_to_node_hstate(kobj, nidp);
2243 }
2244 
2245 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2246 					struct kobj_attribute *attr, char *buf)
2247 {
2248 	struct hstate *h;
2249 	unsigned long nr_huge_pages;
2250 	int nid;
2251 
2252 	h = kobj_to_hstate(kobj, &nid);
2253 	if (nid == NUMA_NO_NODE)
2254 		nr_huge_pages = h->nr_huge_pages;
2255 	else
2256 		nr_huge_pages = h->nr_huge_pages_node[nid];
2257 
2258 	return sprintf(buf, "%lu\n", nr_huge_pages);
2259 }
2260 
2261 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2262 					   struct hstate *h, int nid,
2263 					   unsigned long count, size_t len)
2264 {
2265 	int err;
2266 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2267 
2268 	if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2269 		err = -EINVAL;
2270 		goto out;
2271 	}
2272 
2273 	if (nid == NUMA_NO_NODE) {
2274 		/*
2275 		 * global hstate attribute
2276 		 */
2277 		if (!(obey_mempolicy &&
2278 				init_nodemask_of_mempolicy(nodes_allowed))) {
2279 			NODEMASK_FREE(nodes_allowed);
2280 			nodes_allowed = &node_states[N_MEMORY];
2281 		}
2282 	} else if (nodes_allowed) {
2283 		/*
2284 		 * per node hstate attribute: adjust count to global,
2285 		 * but restrict alloc/free to the specified node.
2286 		 */
2287 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2288 		init_nodemask_of_node(nodes_allowed, nid);
2289 	} else
2290 		nodes_allowed = &node_states[N_MEMORY];
2291 
2292 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2293 
2294 	if (nodes_allowed != &node_states[N_MEMORY])
2295 		NODEMASK_FREE(nodes_allowed);
2296 
2297 	return len;
2298 out:
2299 	NODEMASK_FREE(nodes_allowed);
2300 	return err;
2301 }
2302 
2303 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2304 					 struct kobject *kobj, const char *buf,
2305 					 size_t len)
2306 {
2307 	struct hstate *h;
2308 	unsigned long count;
2309 	int nid;
2310 	int err;
2311 
2312 	err = kstrtoul(buf, 10, &count);
2313 	if (err)
2314 		return err;
2315 
2316 	h = kobj_to_hstate(kobj, &nid);
2317 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2318 }
2319 
2320 static ssize_t nr_hugepages_show(struct kobject *kobj,
2321 				       struct kobj_attribute *attr, char *buf)
2322 {
2323 	return nr_hugepages_show_common(kobj, attr, buf);
2324 }
2325 
2326 static ssize_t nr_hugepages_store(struct kobject *kobj,
2327 	       struct kobj_attribute *attr, const char *buf, size_t len)
2328 {
2329 	return nr_hugepages_store_common(false, kobj, buf, len);
2330 }
2331 HSTATE_ATTR(nr_hugepages);
2332 
2333 #ifdef CONFIG_NUMA
2334 
2335 /*
2336  * hstate attribute for optionally mempolicy-based constraint on persistent
2337  * huge page alloc/free.
2338  */
2339 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2340 				       struct kobj_attribute *attr, char *buf)
2341 {
2342 	return nr_hugepages_show_common(kobj, attr, buf);
2343 }
2344 
2345 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2346 	       struct kobj_attribute *attr, const char *buf, size_t len)
2347 {
2348 	return nr_hugepages_store_common(true, kobj, buf, len);
2349 }
2350 HSTATE_ATTR(nr_hugepages_mempolicy);
2351 #endif
2352 
2353 
2354 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2355 					struct kobj_attribute *attr, char *buf)
2356 {
2357 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2358 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2359 }
2360 
2361 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2362 		struct kobj_attribute *attr, const char *buf, size_t count)
2363 {
2364 	int err;
2365 	unsigned long input;
2366 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2367 
2368 	if (hstate_is_gigantic(h))
2369 		return -EINVAL;
2370 
2371 	err = kstrtoul(buf, 10, &input);
2372 	if (err)
2373 		return err;
2374 
2375 	spin_lock(&hugetlb_lock);
2376 	h->nr_overcommit_huge_pages = input;
2377 	spin_unlock(&hugetlb_lock);
2378 
2379 	return count;
2380 }
2381 HSTATE_ATTR(nr_overcommit_hugepages);
2382 
2383 static ssize_t free_hugepages_show(struct kobject *kobj,
2384 					struct kobj_attribute *attr, char *buf)
2385 {
2386 	struct hstate *h;
2387 	unsigned long free_huge_pages;
2388 	int nid;
2389 
2390 	h = kobj_to_hstate(kobj, &nid);
2391 	if (nid == NUMA_NO_NODE)
2392 		free_huge_pages = h->free_huge_pages;
2393 	else
2394 		free_huge_pages = h->free_huge_pages_node[nid];
2395 
2396 	return sprintf(buf, "%lu\n", free_huge_pages);
2397 }
2398 HSTATE_ATTR_RO(free_hugepages);
2399 
2400 static ssize_t resv_hugepages_show(struct kobject *kobj,
2401 					struct kobj_attribute *attr, char *buf)
2402 {
2403 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2404 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
2405 }
2406 HSTATE_ATTR_RO(resv_hugepages);
2407 
2408 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2409 					struct kobj_attribute *attr, char *buf)
2410 {
2411 	struct hstate *h;
2412 	unsigned long surplus_huge_pages;
2413 	int nid;
2414 
2415 	h = kobj_to_hstate(kobj, &nid);
2416 	if (nid == NUMA_NO_NODE)
2417 		surplus_huge_pages = h->surplus_huge_pages;
2418 	else
2419 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
2420 
2421 	return sprintf(buf, "%lu\n", surplus_huge_pages);
2422 }
2423 HSTATE_ATTR_RO(surplus_hugepages);
2424 
2425 static struct attribute *hstate_attrs[] = {
2426 	&nr_hugepages_attr.attr,
2427 	&nr_overcommit_hugepages_attr.attr,
2428 	&free_hugepages_attr.attr,
2429 	&resv_hugepages_attr.attr,
2430 	&surplus_hugepages_attr.attr,
2431 #ifdef CONFIG_NUMA
2432 	&nr_hugepages_mempolicy_attr.attr,
2433 #endif
2434 	NULL,
2435 };
2436 
2437 static struct attribute_group hstate_attr_group = {
2438 	.attrs = hstate_attrs,
2439 };
2440 
2441 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2442 				    struct kobject **hstate_kobjs,
2443 				    struct attribute_group *hstate_attr_group)
2444 {
2445 	int retval;
2446 	int hi = hstate_index(h);
2447 
2448 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2449 	if (!hstate_kobjs[hi])
2450 		return -ENOMEM;
2451 
2452 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2453 	if (retval)
2454 		kobject_put(hstate_kobjs[hi]);
2455 
2456 	return retval;
2457 }
2458 
2459 static void __init hugetlb_sysfs_init(void)
2460 {
2461 	struct hstate *h;
2462 	int err;
2463 
2464 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2465 	if (!hugepages_kobj)
2466 		return;
2467 
2468 	for_each_hstate(h) {
2469 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2470 					 hstate_kobjs, &hstate_attr_group);
2471 		if (err)
2472 			pr_err("Hugetlb: Unable to add hstate %s", h->name);
2473 	}
2474 }
2475 
2476 #ifdef CONFIG_NUMA
2477 
2478 /*
2479  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2480  * with node devices in node_devices[] using a parallel array.  The array
2481  * index of a node device or _hstate == node id.
2482  * This is here to avoid any static dependency of the node device driver, in
2483  * the base kernel, on the hugetlb module.
2484  */
2485 struct node_hstate {
2486 	struct kobject		*hugepages_kobj;
2487 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
2488 };
2489 static struct node_hstate node_hstates[MAX_NUMNODES];
2490 
2491 /*
2492  * A subset of global hstate attributes for node devices
2493  */
2494 static struct attribute *per_node_hstate_attrs[] = {
2495 	&nr_hugepages_attr.attr,
2496 	&free_hugepages_attr.attr,
2497 	&surplus_hugepages_attr.attr,
2498 	NULL,
2499 };
2500 
2501 static struct attribute_group per_node_hstate_attr_group = {
2502 	.attrs = per_node_hstate_attrs,
2503 };
2504 
2505 /*
2506  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2507  * Returns node id via non-NULL nidp.
2508  */
2509 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2510 {
2511 	int nid;
2512 
2513 	for (nid = 0; nid < nr_node_ids; nid++) {
2514 		struct node_hstate *nhs = &node_hstates[nid];
2515 		int i;
2516 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
2517 			if (nhs->hstate_kobjs[i] == kobj) {
2518 				if (nidp)
2519 					*nidp = nid;
2520 				return &hstates[i];
2521 			}
2522 	}
2523 
2524 	BUG();
2525 	return NULL;
2526 }
2527 
2528 /*
2529  * Unregister hstate attributes from a single node device.
2530  * No-op if no hstate attributes attached.
2531  */
2532 static void hugetlb_unregister_node(struct node *node)
2533 {
2534 	struct hstate *h;
2535 	struct node_hstate *nhs = &node_hstates[node->dev.id];
2536 
2537 	if (!nhs->hugepages_kobj)
2538 		return;		/* no hstate attributes */
2539 
2540 	for_each_hstate(h) {
2541 		int idx = hstate_index(h);
2542 		if (nhs->hstate_kobjs[idx]) {
2543 			kobject_put(nhs->hstate_kobjs[idx]);
2544 			nhs->hstate_kobjs[idx] = NULL;
2545 		}
2546 	}
2547 
2548 	kobject_put(nhs->hugepages_kobj);
2549 	nhs->hugepages_kobj = NULL;
2550 }
2551 
2552 /*
2553  * hugetlb module exit:  unregister hstate attributes from node devices
2554  * that have them.
2555  */
2556 static void hugetlb_unregister_all_nodes(void)
2557 {
2558 	int nid;
2559 
2560 	/*
2561 	 * disable node device registrations.
2562 	 */
2563 	register_hugetlbfs_with_node(NULL, NULL);
2564 
2565 	/*
2566 	 * remove hstate attributes from any nodes that have them.
2567 	 */
2568 	for (nid = 0; nid < nr_node_ids; nid++)
2569 		hugetlb_unregister_node(node_devices[nid]);
2570 }
2571 
2572 /*
2573  * Register hstate attributes for a single node device.
2574  * No-op if attributes already registered.
2575  */
2576 static void hugetlb_register_node(struct node *node)
2577 {
2578 	struct hstate *h;
2579 	struct node_hstate *nhs = &node_hstates[node->dev.id];
2580 	int err;
2581 
2582 	if (nhs->hugepages_kobj)
2583 		return;		/* already allocated */
2584 
2585 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2586 							&node->dev.kobj);
2587 	if (!nhs->hugepages_kobj)
2588 		return;
2589 
2590 	for_each_hstate(h) {
2591 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2592 						nhs->hstate_kobjs,
2593 						&per_node_hstate_attr_group);
2594 		if (err) {
2595 			pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2596 				h->name, node->dev.id);
2597 			hugetlb_unregister_node(node);
2598 			break;
2599 		}
2600 	}
2601 }
2602 
2603 /*
2604  * hugetlb init time:  register hstate attributes for all registered node
2605  * devices of nodes that have memory.  All on-line nodes should have
2606  * registered their associated device by this time.
2607  */
2608 static void __init hugetlb_register_all_nodes(void)
2609 {
2610 	int nid;
2611 
2612 	for_each_node_state(nid, N_MEMORY) {
2613 		struct node *node = node_devices[nid];
2614 		if (node->dev.id == nid)
2615 			hugetlb_register_node(node);
2616 	}
2617 
2618 	/*
2619 	 * Let the node device driver know we're here so it can
2620 	 * [un]register hstate attributes on node hotplug.
2621 	 */
2622 	register_hugetlbfs_with_node(hugetlb_register_node,
2623 				     hugetlb_unregister_node);
2624 }
2625 #else	/* !CONFIG_NUMA */
2626 
2627 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2628 {
2629 	BUG();
2630 	if (nidp)
2631 		*nidp = -1;
2632 	return NULL;
2633 }
2634 
2635 static void hugetlb_unregister_all_nodes(void) { }
2636 
2637 static void hugetlb_register_all_nodes(void) { }
2638 
2639 #endif
2640 
2641 static void __exit hugetlb_exit(void)
2642 {
2643 	struct hstate *h;
2644 
2645 	hugetlb_unregister_all_nodes();
2646 
2647 	for_each_hstate(h) {
2648 		kobject_put(hstate_kobjs[hstate_index(h)]);
2649 	}
2650 
2651 	kobject_put(hugepages_kobj);
2652 	kfree(hugetlb_fault_mutex_table);
2653 }
2654 module_exit(hugetlb_exit);
2655 
2656 static int __init hugetlb_init(void)
2657 {
2658 	int i;
2659 
2660 	if (!hugepages_supported())
2661 		return 0;
2662 
2663 	if (!size_to_hstate(default_hstate_size)) {
2664 		default_hstate_size = HPAGE_SIZE;
2665 		if (!size_to_hstate(default_hstate_size))
2666 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2667 	}
2668 	default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2669 	if (default_hstate_max_huge_pages)
2670 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2671 
2672 	hugetlb_init_hstates();
2673 	gather_bootmem_prealloc();
2674 	report_hugepages();
2675 
2676 	hugetlb_sysfs_init();
2677 	hugetlb_register_all_nodes();
2678 	hugetlb_cgroup_file_init();
2679 
2680 #ifdef CONFIG_SMP
2681 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2682 #else
2683 	num_fault_mutexes = 1;
2684 #endif
2685 	hugetlb_fault_mutex_table =
2686 		kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2687 	BUG_ON(!hugetlb_fault_mutex_table);
2688 
2689 	for (i = 0; i < num_fault_mutexes; i++)
2690 		mutex_init(&hugetlb_fault_mutex_table[i]);
2691 	return 0;
2692 }
2693 module_init(hugetlb_init);
2694 
2695 /* Should be called on processing a hugepagesz=... option */
2696 void __init hugetlb_add_hstate(unsigned int order)
2697 {
2698 	struct hstate *h;
2699 	unsigned long i;
2700 
2701 	if (size_to_hstate(PAGE_SIZE << order)) {
2702 		pr_warning("hugepagesz= specified twice, ignoring\n");
2703 		return;
2704 	}
2705 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2706 	BUG_ON(order == 0);
2707 	h = &hstates[hugetlb_max_hstate++];
2708 	h->order = order;
2709 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2710 	h->nr_huge_pages = 0;
2711 	h->free_huge_pages = 0;
2712 	for (i = 0; i < MAX_NUMNODES; ++i)
2713 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2714 	INIT_LIST_HEAD(&h->hugepage_activelist);
2715 	h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2716 	h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2717 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2718 					huge_page_size(h)/1024);
2719 
2720 	parsed_hstate = h;
2721 }
2722 
2723 static int __init hugetlb_nrpages_setup(char *s)
2724 {
2725 	unsigned long *mhp;
2726 	static unsigned long *last_mhp;
2727 
2728 	/*
2729 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2730 	 * so this hugepages= parameter goes to the "default hstate".
2731 	 */
2732 	if (!hugetlb_max_hstate)
2733 		mhp = &default_hstate_max_huge_pages;
2734 	else
2735 		mhp = &parsed_hstate->max_huge_pages;
2736 
2737 	if (mhp == last_mhp) {
2738 		pr_warning("hugepages= specified twice without "
2739 			   "interleaving hugepagesz=, ignoring\n");
2740 		return 1;
2741 	}
2742 
2743 	if (sscanf(s, "%lu", mhp) <= 0)
2744 		*mhp = 0;
2745 
2746 	/*
2747 	 * Global state is always initialized later in hugetlb_init.
2748 	 * But we need to allocate >= MAX_ORDER hstates here early to still
2749 	 * use the bootmem allocator.
2750 	 */
2751 	if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2752 		hugetlb_hstate_alloc_pages(parsed_hstate);
2753 
2754 	last_mhp = mhp;
2755 
2756 	return 1;
2757 }
2758 __setup("hugepages=", hugetlb_nrpages_setup);
2759 
2760 static int __init hugetlb_default_setup(char *s)
2761 {
2762 	default_hstate_size = memparse(s, &s);
2763 	return 1;
2764 }
2765 __setup("default_hugepagesz=", hugetlb_default_setup);
2766 
2767 static unsigned int cpuset_mems_nr(unsigned int *array)
2768 {
2769 	int node;
2770 	unsigned int nr = 0;
2771 
2772 	for_each_node_mask(node, cpuset_current_mems_allowed)
2773 		nr += array[node];
2774 
2775 	return nr;
2776 }
2777 
2778 #ifdef CONFIG_SYSCTL
2779 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2780 			 struct ctl_table *table, int write,
2781 			 void __user *buffer, size_t *length, loff_t *ppos)
2782 {
2783 	struct hstate *h = &default_hstate;
2784 	unsigned long tmp = h->max_huge_pages;
2785 	int ret;
2786 
2787 	if (!hugepages_supported())
2788 		return -ENOTSUPP;
2789 
2790 	table->data = &tmp;
2791 	table->maxlen = sizeof(unsigned long);
2792 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2793 	if (ret)
2794 		goto out;
2795 
2796 	if (write)
2797 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
2798 						  NUMA_NO_NODE, tmp, *length);
2799 out:
2800 	return ret;
2801 }
2802 
2803 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2804 			  void __user *buffer, size_t *length, loff_t *ppos)
2805 {
2806 
2807 	return hugetlb_sysctl_handler_common(false, table, write,
2808 							buffer, length, ppos);
2809 }
2810 
2811 #ifdef CONFIG_NUMA
2812 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2813 			  void __user *buffer, size_t *length, loff_t *ppos)
2814 {
2815 	return hugetlb_sysctl_handler_common(true, table, write,
2816 							buffer, length, ppos);
2817 }
2818 #endif /* CONFIG_NUMA */
2819 
2820 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2821 			void __user *buffer,
2822 			size_t *length, loff_t *ppos)
2823 {
2824 	struct hstate *h = &default_hstate;
2825 	unsigned long tmp;
2826 	int ret;
2827 
2828 	if (!hugepages_supported())
2829 		return -ENOTSUPP;
2830 
2831 	tmp = h->nr_overcommit_huge_pages;
2832 
2833 	if (write && hstate_is_gigantic(h))
2834 		return -EINVAL;
2835 
2836 	table->data = &tmp;
2837 	table->maxlen = sizeof(unsigned long);
2838 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2839 	if (ret)
2840 		goto out;
2841 
2842 	if (write) {
2843 		spin_lock(&hugetlb_lock);
2844 		h->nr_overcommit_huge_pages = tmp;
2845 		spin_unlock(&hugetlb_lock);
2846 	}
2847 out:
2848 	return ret;
2849 }
2850 
2851 #endif /* CONFIG_SYSCTL */
2852 
2853 void hugetlb_report_meminfo(struct seq_file *m)
2854 {
2855 	struct hstate *h = &default_hstate;
2856 	if (!hugepages_supported())
2857 		return;
2858 	seq_printf(m,
2859 			"HugePages_Total:   %5lu\n"
2860 			"HugePages_Free:    %5lu\n"
2861 			"HugePages_Rsvd:    %5lu\n"
2862 			"HugePages_Surp:    %5lu\n"
2863 			"Hugepagesize:   %8lu kB\n",
2864 			h->nr_huge_pages,
2865 			h->free_huge_pages,
2866 			h->resv_huge_pages,
2867 			h->surplus_huge_pages,
2868 			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2869 }
2870 
2871 int hugetlb_report_node_meminfo(int nid, char *buf)
2872 {
2873 	struct hstate *h = &default_hstate;
2874 	if (!hugepages_supported())
2875 		return 0;
2876 	return sprintf(buf,
2877 		"Node %d HugePages_Total: %5u\n"
2878 		"Node %d HugePages_Free:  %5u\n"
2879 		"Node %d HugePages_Surp:  %5u\n",
2880 		nid, h->nr_huge_pages_node[nid],
2881 		nid, h->free_huge_pages_node[nid],
2882 		nid, h->surplus_huge_pages_node[nid]);
2883 }
2884 
2885 void hugetlb_show_meminfo(void)
2886 {
2887 	struct hstate *h;
2888 	int nid;
2889 
2890 	if (!hugepages_supported())
2891 		return;
2892 
2893 	for_each_node_state(nid, N_MEMORY)
2894 		for_each_hstate(h)
2895 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2896 				nid,
2897 				h->nr_huge_pages_node[nid],
2898 				h->free_huge_pages_node[nid],
2899 				h->surplus_huge_pages_node[nid],
2900 				1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2901 }
2902 
2903 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
2904 {
2905 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
2906 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
2907 }
2908 
2909 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2910 unsigned long hugetlb_total_pages(void)
2911 {
2912 	struct hstate *h;
2913 	unsigned long nr_total_pages = 0;
2914 
2915 	for_each_hstate(h)
2916 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2917 	return nr_total_pages;
2918 }
2919 
2920 static int hugetlb_acct_memory(struct hstate *h, long delta)
2921 {
2922 	int ret = -ENOMEM;
2923 
2924 	spin_lock(&hugetlb_lock);
2925 	/*
2926 	 * When cpuset is configured, it breaks the strict hugetlb page
2927 	 * reservation as the accounting is done on a global variable. Such
2928 	 * reservation is completely rubbish in the presence of cpuset because
2929 	 * the reservation is not checked against page availability for the
2930 	 * current cpuset. Application can still potentially OOM'ed by kernel
2931 	 * with lack of free htlb page in cpuset that the task is in.
2932 	 * Attempt to enforce strict accounting with cpuset is almost
2933 	 * impossible (or too ugly) because cpuset is too fluid that
2934 	 * task or memory node can be dynamically moved between cpusets.
2935 	 *
2936 	 * The change of semantics for shared hugetlb mapping with cpuset is
2937 	 * undesirable. However, in order to preserve some of the semantics,
2938 	 * we fall back to check against current free page availability as
2939 	 * a best attempt and hopefully to minimize the impact of changing
2940 	 * semantics that cpuset has.
2941 	 */
2942 	if (delta > 0) {
2943 		if (gather_surplus_pages(h, delta) < 0)
2944 			goto out;
2945 
2946 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2947 			return_unused_surplus_pages(h, delta);
2948 			goto out;
2949 		}
2950 	}
2951 
2952 	ret = 0;
2953 	if (delta < 0)
2954 		return_unused_surplus_pages(h, (unsigned long) -delta);
2955 
2956 out:
2957 	spin_unlock(&hugetlb_lock);
2958 	return ret;
2959 }
2960 
2961 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2962 {
2963 	struct resv_map *resv = vma_resv_map(vma);
2964 
2965 	/*
2966 	 * This new VMA should share its siblings reservation map if present.
2967 	 * The VMA will only ever have a valid reservation map pointer where
2968 	 * it is being copied for another still existing VMA.  As that VMA
2969 	 * has a reference to the reservation map it cannot disappear until
2970 	 * after this open call completes.  It is therefore safe to take a
2971 	 * new reference here without additional locking.
2972 	 */
2973 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2974 		kref_get(&resv->refs);
2975 }
2976 
2977 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2978 {
2979 	struct hstate *h = hstate_vma(vma);
2980 	struct resv_map *resv = vma_resv_map(vma);
2981 	struct hugepage_subpool *spool = subpool_vma(vma);
2982 	unsigned long reserve, start, end;
2983 	long gbl_reserve;
2984 
2985 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2986 		return;
2987 
2988 	start = vma_hugecache_offset(h, vma, vma->vm_start);
2989 	end = vma_hugecache_offset(h, vma, vma->vm_end);
2990 
2991 	reserve = (end - start) - region_count(resv, start, end);
2992 
2993 	kref_put(&resv->refs, resv_map_release);
2994 
2995 	if (reserve) {
2996 		/*
2997 		 * Decrement reserve counts.  The global reserve count may be
2998 		 * adjusted if the subpool has a minimum size.
2999 		 */
3000 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3001 		hugetlb_acct_memory(h, -gbl_reserve);
3002 	}
3003 }
3004 
3005 /*
3006  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3007  * handle_mm_fault() to try to instantiate regular-sized pages in the
3008  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3009  * this far.
3010  */
3011 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3012 {
3013 	BUG();
3014 	return 0;
3015 }
3016 
3017 const struct vm_operations_struct hugetlb_vm_ops = {
3018 	.fault = hugetlb_vm_op_fault,
3019 	.open = hugetlb_vm_op_open,
3020 	.close = hugetlb_vm_op_close,
3021 };
3022 
3023 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3024 				int writable)
3025 {
3026 	pte_t entry;
3027 
3028 	if (writable) {
3029 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3030 					 vma->vm_page_prot)));
3031 	} else {
3032 		entry = huge_pte_wrprotect(mk_huge_pte(page,
3033 					   vma->vm_page_prot));
3034 	}
3035 	entry = pte_mkyoung(entry);
3036 	entry = pte_mkhuge(entry);
3037 	entry = arch_make_huge_pte(entry, vma, page, writable);
3038 
3039 	return entry;
3040 }
3041 
3042 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3043 				   unsigned long address, pte_t *ptep)
3044 {
3045 	pte_t entry;
3046 
3047 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3048 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3049 		update_mmu_cache(vma, address, ptep);
3050 }
3051 
3052 static int is_hugetlb_entry_migration(pte_t pte)
3053 {
3054 	swp_entry_t swp;
3055 
3056 	if (huge_pte_none(pte) || pte_present(pte))
3057 		return 0;
3058 	swp = pte_to_swp_entry(pte);
3059 	if (non_swap_entry(swp) && is_migration_entry(swp))
3060 		return 1;
3061 	else
3062 		return 0;
3063 }
3064 
3065 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3066 {
3067 	swp_entry_t swp;
3068 
3069 	if (huge_pte_none(pte) || pte_present(pte))
3070 		return 0;
3071 	swp = pte_to_swp_entry(pte);
3072 	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3073 		return 1;
3074 	else
3075 		return 0;
3076 }
3077 
3078 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3079 			    struct vm_area_struct *vma)
3080 {
3081 	pte_t *src_pte, *dst_pte, entry;
3082 	struct page *ptepage;
3083 	unsigned long addr;
3084 	int cow;
3085 	struct hstate *h = hstate_vma(vma);
3086 	unsigned long sz = huge_page_size(h);
3087 	unsigned long mmun_start;	/* For mmu_notifiers */
3088 	unsigned long mmun_end;		/* For mmu_notifiers */
3089 	int ret = 0;
3090 
3091 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3092 
3093 	mmun_start = vma->vm_start;
3094 	mmun_end = vma->vm_end;
3095 	if (cow)
3096 		mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3097 
3098 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3099 		spinlock_t *src_ptl, *dst_ptl;
3100 		src_pte = huge_pte_offset(src, addr);
3101 		if (!src_pte)
3102 			continue;
3103 		dst_pte = huge_pte_alloc(dst, addr, sz);
3104 		if (!dst_pte) {
3105 			ret = -ENOMEM;
3106 			break;
3107 		}
3108 
3109 		/* If the pagetables are shared don't copy or take references */
3110 		if (dst_pte == src_pte)
3111 			continue;
3112 
3113 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
3114 		src_ptl = huge_pte_lockptr(h, src, src_pte);
3115 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3116 		entry = huge_ptep_get(src_pte);
3117 		if (huge_pte_none(entry)) { /* skip none entry */
3118 			;
3119 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
3120 				    is_hugetlb_entry_hwpoisoned(entry))) {
3121 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
3122 
3123 			if (is_write_migration_entry(swp_entry) && cow) {
3124 				/*
3125 				 * COW mappings require pages in both
3126 				 * parent and child to be set to read.
3127 				 */
3128 				make_migration_entry_read(&swp_entry);
3129 				entry = swp_entry_to_pte(swp_entry);
3130 				set_huge_pte_at(src, addr, src_pte, entry);
3131 			}
3132 			set_huge_pte_at(dst, addr, dst_pte, entry);
3133 		} else {
3134 			if (cow) {
3135 				huge_ptep_set_wrprotect(src, addr, src_pte);
3136 				mmu_notifier_invalidate_range(src, mmun_start,
3137 								   mmun_end);
3138 			}
3139 			entry = huge_ptep_get(src_pte);
3140 			ptepage = pte_page(entry);
3141 			get_page(ptepage);
3142 			page_dup_rmap(ptepage);
3143 			set_huge_pte_at(dst, addr, dst_pte, entry);
3144 			hugetlb_count_add(pages_per_huge_page(h), dst);
3145 		}
3146 		spin_unlock(src_ptl);
3147 		spin_unlock(dst_ptl);
3148 	}
3149 
3150 	if (cow)
3151 		mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3152 
3153 	return ret;
3154 }
3155 
3156 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3157 			    unsigned long start, unsigned long end,
3158 			    struct page *ref_page)
3159 {
3160 	int force_flush = 0;
3161 	struct mm_struct *mm = vma->vm_mm;
3162 	unsigned long address;
3163 	pte_t *ptep;
3164 	pte_t pte;
3165 	spinlock_t *ptl;
3166 	struct page *page;
3167 	struct hstate *h = hstate_vma(vma);
3168 	unsigned long sz = huge_page_size(h);
3169 	const unsigned long mmun_start = start;	/* For mmu_notifiers */
3170 	const unsigned long mmun_end   = end;	/* For mmu_notifiers */
3171 
3172 	WARN_ON(!is_vm_hugetlb_page(vma));
3173 	BUG_ON(start & ~huge_page_mask(h));
3174 	BUG_ON(end & ~huge_page_mask(h));
3175 
3176 	tlb_start_vma(tlb, vma);
3177 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3178 	address = start;
3179 again:
3180 	for (; address < end; address += sz) {
3181 		ptep = huge_pte_offset(mm, address);
3182 		if (!ptep)
3183 			continue;
3184 
3185 		ptl = huge_pte_lock(h, mm, ptep);
3186 		if (huge_pmd_unshare(mm, &address, ptep))
3187 			goto unlock;
3188 
3189 		pte = huge_ptep_get(ptep);
3190 		if (huge_pte_none(pte))
3191 			goto unlock;
3192 
3193 		/*
3194 		 * Migrating hugepage or HWPoisoned hugepage is already
3195 		 * unmapped and its refcount is dropped, so just clear pte here.
3196 		 */
3197 		if (unlikely(!pte_present(pte))) {
3198 			huge_pte_clear(mm, address, ptep);
3199 			goto unlock;
3200 		}
3201 
3202 		page = pte_page(pte);
3203 		/*
3204 		 * If a reference page is supplied, it is because a specific
3205 		 * page is being unmapped, not a range. Ensure the page we
3206 		 * are about to unmap is the actual page of interest.
3207 		 */
3208 		if (ref_page) {
3209 			if (page != ref_page)
3210 				goto unlock;
3211 
3212 			/*
3213 			 * Mark the VMA as having unmapped its page so that
3214 			 * future faults in this VMA will fail rather than
3215 			 * looking like data was lost
3216 			 */
3217 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3218 		}
3219 
3220 		pte = huge_ptep_get_and_clear(mm, address, ptep);
3221 		tlb_remove_tlb_entry(tlb, ptep, address);
3222 		if (huge_pte_dirty(pte))
3223 			set_page_dirty(page);
3224 
3225 		hugetlb_count_sub(pages_per_huge_page(h), mm);
3226 		page_remove_rmap(page);
3227 		force_flush = !__tlb_remove_page(tlb, page);
3228 		if (force_flush) {
3229 			address += sz;
3230 			spin_unlock(ptl);
3231 			break;
3232 		}
3233 		/* Bail out after unmapping reference page if supplied */
3234 		if (ref_page) {
3235 			spin_unlock(ptl);
3236 			break;
3237 		}
3238 unlock:
3239 		spin_unlock(ptl);
3240 	}
3241 	/*
3242 	 * mmu_gather ran out of room to batch pages, we break out of
3243 	 * the PTE lock to avoid doing the potential expensive TLB invalidate
3244 	 * and page-free while holding it.
3245 	 */
3246 	if (force_flush) {
3247 		force_flush = 0;
3248 		tlb_flush_mmu(tlb);
3249 		if (address < end && !ref_page)
3250 			goto again;
3251 	}
3252 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3253 	tlb_end_vma(tlb, vma);
3254 }
3255 
3256 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3257 			  struct vm_area_struct *vma, unsigned long start,
3258 			  unsigned long end, struct page *ref_page)
3259 {
3260 	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
3261 
3262 	/*
3263 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3264 	 * test will fail on a vma being torn down, and not grab a page table
3265 	 * on its way out.  We're lucky that the flag has such an appropriate
3266 	 * name, and can in fact be safely cleared here. We could clear it
3267 	 * before the __unmap_hugepage_range above, but all that's necessary
3268 	 * is to clear it before releasing the i_mmap_rwsem. This works
3269 	 * because in the context this is called, the VMA is about to be
3270 	 * destroyed and the i_mmap_rwsem is held.
3271 	 */
3272 	vma->vm_flags &= ~VM_MAYSHARE;
3273 }
3274 
3275 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3276 			  unsigned long end, struct page *ref_page)
3277 {
3278 	struct mm_struct *mm;
3279 	struct mmu_gather tlb;
3280 
3281 	mm = vma->vm_mm;
3282 
3283 	tlb_gather_mmu(&tlb, mm, start, end);
3284 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3285 	tlb_finish_mmu(&tlb, start, end);
3286 }
3287 
3288 /*
3289  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3290  * mappping it owns the reserve page for. The intention is to unmap the page
3291  * from other VMAs and let the children be SIGKILLed if they are faulting the
3292  * same region.
3293  */
3294 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3295 			      struct page *page, unsigned long address)
3296 {
3297 	struct hstate *h = hstate_vma(vma);
3298 	struct vm_area_struct *iter_vma;
3299 	struct address_space *mapping;
3300 	pgoff_t pgoff;
3301 
3302 	/*
3303 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3304 	 * from page cache lookup which is in HPAGE_SIZE units.
3305 	 */
3306 	address = address & huge_page_mask(h);
3307 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3308 			vma->vm_pgoff;
3309 	mapping = file_inode(vma->vm_file)->i_mapping;
3310 
3311 	/*
3312 	 * Take the mapping lock for the duration of the table walk. As
3313 	 * this mapping should be shared between all the VMAs,
3314 	 * __unmap_hugepage_range() is called as the lock is already held
3315 	 */
3316 	i_mmap_lock_write(mapping);
3317 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3318 		/* Do not unmap the current VMA */
3319 		if (iter_vma == vma)
3320 			continue;
3321 
3322 		/*
3323 		 * Shared VMAs have their own reserves and do not affect
3324 		 * MAP_PRIVATE accounting but it is possible that a shared
3325 		 * VMA is using the same page so check and skip such VMAs.
3326 		 */
3327 		if (iter_vma->vm_flags & VM_MAYSHARE)
3328 			continue;
3329 
3330 		/*
3331 		 * Unmap the page from other VMAs without their own reserves.
3332 		 * They get marked to be SIGKILLed if they fault in these
3333 		 * areas. This is because a future no-page fault on this VMA
3334 		 * could insert a zeroed page instead of the data existing
3335 		 * from the time of fork. This would look like data corruption
3336 		 */
3337 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3338 			unmap_hugepage_range(iter_vma, address,
3339 					     address + huge_page_size(h), page);
3340 	}
3341 	i_mmap_unlock_write(mapping);
3342 }
3343 
3344 /*
3345  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3346  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3347  * cannot race with other handlers or page migration.
3348  * Keep the pte_same checks anyway to make transition from the mutex easier.
3349  */
3350 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3351 			unsigned long address, pte_t *ptep, pte_t pte,
3352 			struct page *pagecache_page, spinlock_t *ptl)
3353 {
3354 	struct hstate *h = hstate_vma(vma);
3355 	struct page *old_page, *new_page;
3356 	int ret = 0, outside_reserve = 0;
3357 	unsigned long mmun_start;	/* For mmu_notifiers */
3358 	unsigned long mmun_end;		/* For mmu_notifiers */
3359 
3360 	old_page = pte_page(pte);
3361 
3362 retry_avoidcopy:
3363 	/* If no-one else is actually using this page, avoid the copy
3364 	 * and just make the page writable */
3365 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3366 		page_move_anon_rmap(old_page, vma, address);
3367 		set_huge_ptep_writable(vma, address, ptep);
3368 		return 0;
3369 	}
3370 
3371 	/*
3372 	 * If the process that created a MAP_PRIVATE mapping is about to
3373 	 * perform a COW due to a shared page count, attempt to satisfy
3374 	 * the allocation without using the existing reserves. The pagecache
3375 	 * page is used to determine if the reserve at this address was
3376 	 * consumed or not. If reserves were used, a partial faulted mapping
3377 	 * at the time of fork() could consume its reserves on COW instead
3378 	 * of the full address range.
3379 	 */
3380 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3381 			old_page != pagecache_page)
3382 		outside_reserve = 1;
3383 
3384 	page_cache_get(old_page);
3385 
3386 	/*
3387 	 * Drop page table lock as buddy allocator may be called. It will
3388 	 * be acquired again before returning to the caller, as expected.
3389 	 */
3390 	spin_unlock(ptl);
3391 	new_page = alloc_huge_page(vma, address, outside_reserve);
3392 
3393 	if (IS_ERR(new_page)) {
3394 		/*
3395 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
3396 		 * it is due to references held by a child and an insufficient
3397 		 * huge page pool. To guarantee the original mappers
3398 		 * reliability, unmap the page from child processes. The child
3399 		 * may get SIGKILLed if it later faults.
3400 		 */
3401 		if (outside_reserve) {
3402 			page_cache_release(old_page);
3403 			BUG_ON(huge_pte_none(pte));
3404 			unmap_ref_private(mm, vma, old_page, address);
3405 			BUG_ON(huge_pte_none(pte));
3406 			spin_lock(ptl);
3407 			ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3408 			if (likely(ptep &&
3409 				   pte_same(huge_ptep_get(ptep), pte)))
3410 				goto retry_avoidcopy;
3411 			/*
3412 			 * race occurs while re-acquiring page table
3413 			 * lock, and our job is done.
3414 			 */
3415 			return 0;
3416 		}
3417 
3418 		ret = (PTR_ERR(new_page) == -ENOMEM) ?
3419 			VM_FAULT_OOM : VM_FAULT_SIGBUS;
3420 		goto out_release_old;
3421 	}
3422 
3423 	/*
3424 	 * When the original hugepage is shared one, it does not have
3425 	 * anon_vma prepared.
3426 	 */
3427 	if (unlikely(anon_vma_prepare(vma))) {
3428 		ret = VM_FAULT_OOM;
3429 		goto out_release_all;
3430 	}
3431 
3432 	copy_user_huge_page(new_page, old_page, address, vma,
3433 			    pages_per_huge_page(h));
3434 	__SetPageUptodate(new_page);
3435 	set_page_huge_active(new_page);
3436 
3437 	mmun_start = address & huge_page_mask(h);
3438 	mmun_end = mmun_start + huge_page_size(h);
3439 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3440 
3441 	/*
3442 	 * Retake the page table lock to check for racing updates
3443 	 * before the page tables are altered
3444 	 */
3445 	spin_lock(ptl);
3446 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3447 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3448 		ClearPagePrivate(new_page);
3449 
3450 		/* Break COW */
3451 		huge_ptep_clear_flush(vma, address, ptep);
3452 		mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3453 		set_huge_pte_at(mm, address, ptep,
3454 				make_huge_pte(vma, new_page, 1));
3455 		page_remove_rmap(old_page);
3456 		hugepage_add_new_anon_rmap(new_page, vma, address);
3457 		/* Make the old page be freed below */
3458 		new_page = old_page;
3459 	}
3460 	spin_unlock(ptl);
3461 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3462 out_release_all:
3463 	page_cache_release(new_page);
3464 out_release_old:
3465 	page_cache_release(old_page);
3466 
3467 	spin_lock(ptl); /* Caller expects lock to be held */
3468 	return ret;
3469 }
3470 
3471 /* Return the pagecache page at a given address within a VMA */
3472 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3473 			struct vm_area_struct *vma, unsigned long address)
3474 {
3475 	struct address_space *mapping;
3476 	pgoff_t idx;
3477 
3478 	mapping = vma->vm_file->f_mapping;
3479 	idx = vma_hugecache_offset(h, vma, address);
3480 
3481 	return find_lock_page(mapping, idx);
3482 }
3483 
3484 /*
3485  * Return whether there is a pagecache page to back given address within VMA.
3486  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3487  */
3488 static bool hugetlbfs_pagecache_present(struct hstate *h,
3489 			struct vm_area_struct *vma, unsigned long address)
3490 {
3491 	struct address_space *mapping;
3492 	pgoff_t idx;
3493 	struct page *page;
3494 
3495 	mapping = vma->vm_file->f_mapping;
3496 	idx = vma_hugecache_offset(h, vma, address);
3497 
3498 	page = find_get_page(mapping, idx);
3499 	if (page)
3500 		put_page(page);
3501 	return page != NULL;
3502 }
3503 
3504 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3505 			   pgoff_t idx)
3506 {
3507 	struct inode *inode = mapping->host;
3508 	struct hstate *h = hstate_inode(inode);
3509 	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3510 
3511 	if (err)
3512 		return err;
3513 	ClearPagePrivate(page);
3514 
3515 	spin_lock(&inode->i_lock);
3516 	inode->i_blocks += blocks_per_huge_page(h);
3517 	spin_unlock(&inode->i_lock);
3518 	return 0;
3519 }
3520 
3521 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3522 			   struct address_space *mapping, pgoff_t idx,
3523 			   unsigned long address, pte_t *ptep, unsigned int flags)
3524 {
3525 	struct hstate *h = hstate_vma(vma);
3526 	int ret = VM_FAULT_SIGBUS;
3527 	int anon_rmap = 0;
3528 	unsigned long size;
3529 	struct page *page;
3530 	pte_t new_pte;
3531 	spinlock_t *ptl;
3532 
3533 	/*
3534 	 * Currently, we are forced to kill the process in the event the
3535 	 * original mapper has unmapped pages from the child due to a failed
3536 	 * COW. Warn that such a situation has occurred as it may not be obvious
3537 	 */
3538 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3539 		pr_warning("PID %d killed due to inadequate hugepage pool\n",
3540 			   current->pid);
3541 		return ret;
3542 	}
3543 
3544 	/*
3545 	 * Use page lock to guard against racing truncation
3546 	 * before we get page_table_lock.
3547 	 */
3548 retry:
3549 	page = find_lock_page(mapping, idx);
3550 	if (!page) {
3551 		size = i_size_read(mapping->host) >> huge_page_shift(h);
3552 		if (idx >= size)
3553 			goto out;
3554 		page = alloc_huge_page(vma, address, 0);
3555 		if (IS_ERR(page)) {
3556 			ret = PTR_ERR(page);
3557 			if (ret == -ENOMEM)
3558 				ret = VM_FAULT_OOM;
3559 			else
3560 				ret = VM_FAULT_SIGBUS;
3561 			goto out;
3562 		}
3563 		clear_huge_page(page, address, pages_per_huge_page(h));
3564 		__SetPageUptodate(page);
3565 		set_page_huge_active(page);
3566 
3567 		if (vma->vm_flags & VM_MAYSHARE) {
3568 			int err = huge_add_to_page_cache(page, mapping, idx);
3569 			if (err) {
3570 				put_page(page);
3571 				if (err == -EEXIST)
3572 					goto retry;
3573 				goto out;
3574 			}
3575 		} else {
3576 			lock_page(page);
3577 			if (unlikely(anon_vma_prepare(vma))) {
3578 				ret = VM_FAULT_OOM;
3579 				goto backout_unlocked;
3580 			}
3581 			anon_rmap = 1;
3582 		}
3583 	} else {
3584 		/*
3585 		 * If memory error occurs between mmap() and fault, some process
3586 		 * don't have hwpoisoned swap entry for errored virtual address.
3587 		 * So we need to block hugepage fault by PG_hwpoison bit check.
3588 		 */
3589 		if (unlikely(PageHWPoison(page))) {
3590 			ret = VM_FAULT_HWPOISON |
3591 				VM_FAULT_SET_HINDEX(hstate_index(h));
3592 			goto backout_unlocked;
3593 		}
3594 	}
3595 
3596 	/*
3597 	 * If we are going to COW a private mapping later, we examine the
3598 	 * pending reservations for this page now. This will ensure that
3599 	 * any allocations necessary to record that reservation occur outside
3600 	 * the spinlock.
3601 	 */
3602 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3603 		if (vma_needs_reservation(h, vma, address) < 0) {
3604 			ret = VM_FAULT_OOM;
3605 			goto backout_unlocked;
3606 		}
3607 		/* Just decrements count, does not deallocate */
3608 		vma_end_reservation(h, vma, address);
3609 	}
3610 
3611 	ptl = huge_pte_lockptr(h, mm, ptep);
3612 	spin_lock(ptl);
3613 	size = i_size_read(mapping->host) >> huge_page_shift(h);
3614 	if (idx >= size)
3615 		goto backout;
3616 
3617 	ret = 0;
3618 	if (!huge_pte_none(huge_ptep_get(ptep)))
3619 		goto backout;
3620 
3621 	if (anon_rmap) {
3622 		ClearPagePrivate(page);
3623 		hugepage_add_new_anon_rmap(page, vma, address);
3624 	} else
3625 		page_dup_rmap(page);
3626 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3627 				&& (vma->vm_flags & VM_SHARED)));
3628 	set_huge_pte_at(mm, address, ptep, new_pte);
3629 
3630 	hugetlb_count_add(pages_per_huge_page(h), mm);
3631 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3632 		/* Optimization, do the COW without a second fault */
3633 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3634 	}
3635 
3636 	spin_unlock(ptl);
3637 	unlock_page(page);
3638 out:
3639 	return ret;
3640 
3641 backout:
3642 	spin_unlock(ptl);
3643 backout_unlocked:
3644 	unlock_page(page);
3645 	put_page(page);
3646 	goto out;
3647 }
3648 
3649 #ifdef CONFIG_SMP
3650 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3651 			    struct vm_area_struct *vma,
3652 			    struct address_space *mapping,
3653 			    pgoff_t idx, unsigned long address)
3654 {
3655 	unsigned long key[2];
3656 	u32 hash;
3657 
3658 	if (vma->vm_flags & VM_SHARED) {
3659 		key[0] = (unsigned long) mapping;
3660 		key[1] = idx;
3661 	} else {
3662 		key[0] = (unsigned long) mm;
3663 		key[1] = address >> huge_page_shift(h);
3664 	}
3665 
3666 	hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3667 
3668 	return hash & (num_fault_mutexes - 1);
3669 }
3670 #else
3671 /*
3672  * For uniprocesor systems we always use a single mutex, so just
3673  * return 0 and avoid the hashing overhead.
3674  */
3675 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3676 			    struct vm_area_struct *vma,
3677 			    struct address_space *mapping,
3678 			    pgoff_t idx, unsigned long address)
3679 {
3680 	return 0;
3681 }
3682 #endif
3683 
3684 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3685 			unsigned long address, unsigned int flags)
3686 {
3687 	pte_t *ptep, entry;
3688 	spinlock_t *ptl;
3689 	int ret;
3690 	u32 hash;
3691 	pgoff_t idx;
3692 	struct page *page = NULL;
3693 	struct page *pagecache_page = NULL;
3694 	struct hstate *h = hstate_vma(vma);
3695 	struct address_space *mapping;
3696 	int need_wait_lock = 0;
3697 
3698 	address &= huge_page_mask(h);
3699 
3700 	ptep = huge_pte_offset(mm, address);
3701 	if (ptep) {
3702 		entry = huge_ptep_get(ptep);
3703 		if (unlikely(is_hugetlb_entry_migration(entry))) {
3704 			migration_entry_wait_huge(vma, mm, ptep);
3705 			return 0;
3706 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3707 			return VM_FAULT_HWPOISON_LARGE |
3708 				VM_FAULT_SET_HINDEX(hstate_index(h));
3709 	} else {
3710 		ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3711 		if (!ptep)
3712 			return VM_FAULT_OOM;
3713 	}
3714 
3715 	mapping = vma->vm_file->f_mapping;
3716 	idx = vma_hugecache_offset(h, vma, address);
3717 
3718 	/*
3719 	 * Serialize hugepage allocation and instantiation, so that we don't
3720 	 * get spurious allocation failures if two CPUs race to instantiate
3721 	 * the same page in the page cache.
3722 	 */
3723 	hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3724 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
3725 
3726 	entry = huge_ptep_get(ptep);
3727 	if (huge_pte_none(entry)) {
3728 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3729 		goto out_mutex;
3730 	}
3731 
3732 	ret = 0;
3733 
3734 	/*
3735 	 * entry could be a migration/hwpoison entry at this point, so this
3736 	 * check prevents the kernel from going below assuming that we have
3737 	 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3738 	 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3739 	 * handle it.
3740 	 */
3741 	if (!pte_present(entry))
3742 		goto out_mutex;
3743 
3744 	/*
3745 	 * If we are going to COW the mapping later, we examine the pending
3746 	 * reservations for this page now. This will ensure that any
3747 	 * allocations necessary to record that reservation occur outside the
3748 	 * spinlock. For private mappings, we also lookup the pagecache
3749 	 * page now as it is used to determine if a reservation has been
3750 	 * consumed.
3751 	 */
3752 	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3753 		if (vma_needs_reservation(h, vma, address) < 0) {
3754 			ret = VM_FAULT_OOM;
3755 			goto out_mutex;
3756 		}
3757 		/* Just decrements count, does not deallocate */
3758 		vma_end_reservation(h, vma, address);
3759 
3760 		if (!(vma->vm_flags & VM_MAYSHARE))
3761 			pagecache_page = hugetlbfs_pagecache_page(h,
3762 								vma, address);
3763 	}
3764 
3765 	ptl = huge_pte_lock(h, mm, ptep);
3766 
3767 	/* Check for a racing update before calling hugetlb_cow */
3768 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3769 		goto out_ptl;
3770 
3771 	/*
3772 	 * hugetlb_cow() requires page locks of pte_page(entry) and
3773 	 * pagecache_page, so here we need take the former one
3774 	 * when page != pagecache_page or !pagecache_page.
3775 	 */
3776 	page = pte_page(entry);
3777 	if (page != pagecache_page)
3778 		if (!trylock_page(page)) {
3779 			need_wait_lock = 1;
3780 			goto out_ptl;
3781 		}
3782 
3783 	get_page(page);
3784 
3785 	if (flags & FAULT_FLAG_WRITE) {
3786 		if (!huge_pte_write(entry)) {
3787 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
3788 					pagecache_page, ptl);
3789 			goto out_put_page;
3790 		}
3791 		entry = huge_pte_mkdirty(entry);
3792 	}
3793 	entry = pte_mkyoung(entry);
3794 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3795 						flags & FAULT_FLAG_WRITE))
3796 		update_mmu_cache(vma, address, ptep);
3797 out_put_page:
3798 	if (page != pagecache_page)
3799 		unlock_page(page);
3800 	put_page(page);
3801 out_ptl:
3802 	spin_unlock(ptl);
3803 
3804 	if (pagecache_page) {
3805 		unlock_page(pagecache_page);
3806 		put_page(pagecache_page);
3807 	}
3808 out_mutex:
3809 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3810 	/*
3811 	 * Generally it's safe to hold refcount during waiting page lock. But
3812 	 * here we just wait to defer the next page fault to avoid busy loop and
3813 	 * the page is not used after unlocked before returning from the current
3814 	 * page fault. So we are safe from accessing freed page, even if we wait
3815 	 * here without taking refcount.
3816 	 */
3817 	if (need_wait_lock)
3818 		wait_on_page_locked(page);
3819 	return ret;
3820 }
3821 
3822 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3823 			 struct page **pages, struct vm_area_struct **vmas,
3824 			 unsigned long *position, unsigned long *nr_pages,
3825 			 long i, unsigned int flags)
3826 {
3827 	unsigned long pfn_offset;
3828 	unsigned long vaddr = *position;
3829 	unsigned long remainder = *nr_pages;
3830 	struct hstate *h = hstate_vma(vma);
3831 
3832 	while (vaddr < vma->vm_end && remainder) {
3833 		pte_t *pte;
3834 		spinlock_t *ptl = NULL;
3835 		int absent;
3836 		struct page *page;
3837 
3838 		/*
3839 		 * If we have a pending SIGKILL, don't keep faulting pages and
3840 		 * potentially allocating memory.
3841 		 */
3842 		if (unlikely(fatal_signal_pending(current))) {
3843 			remainder = 0;
3844 			break;
3845 		}
3846 
3847 		/*
3848 		 * Some archs (sparc64, sh*) have multiple pte_ts to
3849 		 * each hugepage.  We have to make sure we get the
3850 		 * first, for the page indexing below to work.
3851 		 *
3852 		 * Note that page table lock is not held when pte is null.
3853 		 */
3854 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3855 		if (pte)
3856 			ptl = huge_pte_lock(h, mm, pte);
3857 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
3858 
3859 		/*
3860 		 * When coredumping, it suits get_dump_page if we just return
3861 		 * an error where there's an empty slot with no huge pagecache
3862 		 * to back it.  This way, we avoid allocating a hugepage, and
3863 		 * the sparse dumpfile avoids allocating disk blocks, but its
3864 		 * huge holes still show up with zeroes where they need to be.
3865 		 */
3866 		if (absent && (flags & FOLL_DUMP) &&
3867 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3868 			if (pte)
3869 				spin_unlock(ptl);
3870 			remainder = 0;
3871 			break;
3872 		}
3873 
3874 		/*
3875 		 * We need call hugetlb_fault for both hugepages under migration
3876 		 * (in which case hugetlb_fault waits for the migration,) and
3877 		 * hwpoisoned hugepages (in which case we need to prevent the
3878 		 * caller from accessing to them.) In order to do this, we use
3879 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
3880 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3881 		 * both cases, and because we can't follow correct pages
3882 		 * directly from any kind of swap entries.
3883 		 */
3884 		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3885 		    ((flags & FOLL_WRITE) &&
3886 		      !huge_pte_write(huge_ptep_get(pte)))) {
3887 			int ret;
3888 
3889 			if (pte)
3890 				spin_unlock(ptl);
3891 			ret = hugetlb_fault(mm, vma, vaddr,
3892 				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3893 			if (!(ret & VM_FAULT_ERROR))
3894 				continue;
3895 
3896 			remainder = 0;
3897 			break;
3898 		}
3899 
3900 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3901 		page = pte_page(huge_ptep_get(pte));
3902 same_page:
3903 		if (pages) {
3904 			pages[i] = mem_map_offset(page, pfn_offset);
3905 			get_page_foll(pages[i]);
3906 		}
3907 
3908 		if (vmas)
3909 			vmas[i] = vma;
3910 
3911 		vaddr += PAGE_SIZE;
3912 		++pfn_offset;
3913 		--remainder;
3914 		++i;
3915 		if (vaddr < vma->vm_end && remainder &&
3916 				pfn_offset < pages_per_huge_page(h)) {
3917 			/*
3918 			 * We use pfn_offset to avoid touching the pageframes
3919 			 * of this compound page.
3920 			 */
3921 			goto same_page;
3922 		}
3923 		spin_unlock(ptl);
3924 	}
3925 	*nr_pages = remainder;
3926 	*position = vaddr;
3927 
3928 	return i ? i : -EFAULT;
3929 }
3930 
3931 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3932 		unsigned long address, unsigned long end, pgprot_t newprot)
3933 {
3934 	struct mm_struct *mm = vma->vm_mm;
3935 	unsigned long start = address;
3936 	pte_t *ptep;
3937 	pte_t pte;
3938 	struct hstate *h = hstate_vma(vma);
3939 	unsigned long pages = 0;
3940 
3941 	BUG_ON(address >= end);
3942 	flush_cache_range(vma, address, end);
3943 
3944 	mmu_notifier_invalidate_range_start(mm, start, end);
3945 	i_mmap_lock_write(vma->vm_file->f_mapping);
3946 	for (; address < end; address += huge_page_size(h)) {
3947 		spinlock_t *ptl;
3948 		ptep = huge_pte_offset(mm, address);
3949 		if (!ptep)
3950 			continue;
3951 		ptl = huge_pte_lock(h, mm, ptep);
3952 		if (huge_pmd_unshare(mm, &address, ptep)) {
3953 			pages++;
3954 			spin_unlock(ptl);
3955 			continue;
3956 		}
3957 		pte = huge_ptep_get(ptep);
3958 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3959 			spin_unlock(ptl);
3960 			continue;
3961 		}
3962 		if (unlikely(is_hugetlb_entry_migration(pte))) {
3963 			swp_entry_t entry = pte_to_swp_entry(pte);
3964 
3965 			if (is_write_migration_entry(entry)) {
3966 				pte_t newpte;
3967 
3968 				make_migration_entry_read(&entry);
3969 				newpte = swp_entry_to_pte(entry);
3970 				set_huge_pte_at(mm, address, ptep, newpte);
3971 				pages++;
3972 			}
3973 			spin_unlock(ptl);
3974 			continue;
3975 		}
3976 		if (!huge_pte_none(pte)) {
3977 			pte = huge_ptep_get_and_clear(mm, address, ptep);
3978 			pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3979 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
3980 			set_huge_pte_at(mm, address, ptep, pte);
3981 			pages++;
3982 		}
3983 		spin_unlock(ptl);
3984 	}
3985 	/*
3986 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3987 	 * may have cleared our pud entry and done put_page on the page table:
3988 	 * once we release i_mmap_rwsem, another task can do the final put_page
3989 	 * and that page table be reused and filled with junk.
3990 	 */
3991 	flush_tlb_range(vma, start, end);
3992 	mmu_notifier_invalidate_range(mm, start, end);
3993 	i_mmap_unlock_write(vma->vm_file->f_mapping);
3994 	mmu_notifier_invalidate_range_end(mm, start, end);
3995 
3996 	return pages << h->order;
3997 }
3998 
3999 int hugetlb_reserve_pages(struct inode *inode,
4000 					long from, long to,
4001 					struct vm_area_struct *vma,
4002 					vm_flags_t vm_flags)
4003 {
4004 	long ret, chg;
4005 	struct hstate *h = hstate_inode(inode);
4006 	struct hugepage_subpool *spool = subpool_inode(inode);
4007 	struct resv_map *resv_map;
4008 	long gbl_reserve;
4009 
4010 	/*
4011 	 * Only apply hugepage reservation if asked. At fault time, an
4012 	 * attempt will be made for VM_NORESERVE to allocate a page
4013 	 * without using reserves
4014 	 */
4015 	if (vm_flags & VM_NORESERVE)
4016 		return 0;
4017 
4018 	/*
4019 	 * Shared mappings base their reservation on the number of pages that
4020 	 * are already allocated on behalf of the file. Private mappings need
4021 	 * to reserve the full area even if read-only as mprotect() may be
4022 	 * called to make the mapping read-write. Assume !vma is a shm mapping
4023 	 */
4024 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
4025 		resv_map = inode_resv_map(inode);
4026 
4027 		chg = region_chg(resv_map, from, to);
4028 
4029 	} else {
4030 		resv_map = resv_map_alloc();
4031 		if (!resv_map)
4032 			return -ENOMEM;
4033 
4034 		chg = to - from;
4035 
4036 		set_vma_resv_map(vma, resv_map);
4037 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4038 	}
4039 
4040 	if (chg < 0) {
4041 		ret = chg;
4042 		goto out_err;
4043 	}
4044 
4045 	/*
4046 	 * There must be enough pages in the subpool for the mapping. If
4047 	 * the subpool has a minimum size, there may be some global
4048 	 * reservations already in place (gbl_reserve).
4049 	 */
4050 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4051 	if (gbl_reserve < 0) {
4052 		ret = -ENOSPC;
4053 		goto out_err;
4054 	}
4055 
4056 	/*
4057 	 * Check enough hugepages are available for the reservation.
4058 	 * Hand the pages back to the subpool if there are not
4059 	 */
4060 	ret = hugetlb_acct_memory(h, gbl_reserve);
4061 	if (ret < 0) {
4062 		/* put back original number of pages, chg */
4063 		(void)hugepage_subpool_put_pages(spool, chg);
4064 		goto out_err;
4065 	}
4066 
4067 	/*
4068 	 * Account for the reservations made. Shared mappings record regions
4069 	 * that have reservations as they are shared by multiple VMAs.
4070 	 * When the last VMA disappears, the region map says how much
4071 	 * the reservation was and the page cache tells how much of
4072 	 * the reservation was consumed. Private mappings are per-VMA and
4073 	 * only the consumed reservations are tracked. When the VMA
4074 	 * disappears, the original reservation is the VMA size and the
4075 	 * consumed reservations are stored in the map. Hence, nothing
4076 	 * else has to be done for private mappings here
4077 	 */
4078 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
4079 		long add = region_add(resv_map, from, to);
4080 
4081 		if (unlikely(chg > add)) {
4082 			/*
4083 			 * pages in this range were added to the reserve
4084 			 * map between region_chg and region_add.  This
4085 			 * indicates a race with alloc_huge_page.  Adjust
4086 			 * the subpool and reserve counts modified above
4087 			 * based on the difference.
4088 			 */
4089 			long rsv_adjust;
4090 
4091 			rsv_adjust = hugepage_subpool_put_pages(spool,
4092 								chg - add);
4093 			hugetlb_acct_memory(h, -rsv_adjust);
4094 		}
4095 	}
4096 	return 0;
4097 out_err:
4098 	if (!vma || vma->vm_flags & VM_MAYSHARE)
4099 		region_abort(resv_map, from, to);
4100 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4101 		kref_put(&resv_map->refs, resv_map_release);
4102 	return ret;
4103 }
4104 
4105 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4106 								long freed)
4107 {
4108 	struct hstate *h = hstate_inode(inode);
4109 	struct resv_map *resv_map = inode_resv_map(inode);
4110 	long chg = 0;
4111 	struct hugepage_subpool *spool = subpool_inode(inode);
4112 	long gbl_reserve;
4113 
4114 	if (resv_map) {
4115 		chg = region_del(resv_map, start, end);
4116 		/*
4117 		 * region_del() can fail in the rare case where a region
4118 		 * must be split and another region descriptor can not be
4119 		 * allocated.  If end == LONG_MAX, it will not fail.
4120 		 */
4121 		if (chg < 0)
4122 			return chg;
4123 	}
4124 
4125 	spin_lock(&inode->i_lock);
4126 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4127 	spin_unlock(&inode->i_lock);
4128 
4129 	/*
4130 	 * If the subpool has a minimum size, the number of global
4131 	 * reservations to be released may be adjusted.
4132 	 */
4133 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4134 	hugetlb_acct_memory(h, -gbl_reserve);
4135 
4136 	return 0;
4137 }
4138 
4139 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4140 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4141 				struct vm_area_struct *vma,
4142 				unsigned long addr, pgoff_t idx)
4143 {
4144 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4145 				svma->vm_start;
4146 	unsigned long sbase = saddr & PUD_MASK;
4147 	unsigned long s_end = sbase + PUD_SIZE;
4148 
4149 	/* Allow segments to share if only one is marked locked */
4150 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4151 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4152 
4153 	/*
4154 	 * match the virtual addresses, permission and the alignment of the
4155 	 * page table page.
4156 	 */
4157 	if (pmd_index(addr) != pmd_index(saddr) ||
4158 	    vm_flags != svm_flags ||
4159 	    sbase < svma->vm_start || svma->vm_end < s_end)
4160 		return 0;
4161 
4162 	return saddr;
4163 }
4164 
4165 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4166 {
4167 	unsigned long base = addr & PUD_MASK;
4168 	unsigned long end = base + PUD_SIZE;
4169 
4170 	/*
4171 	 * check on proper vm_flags and page table alignment
4172 	 */
4173 	if (vma->vm_flags & VM_MAYSHARE &&
4174 	    vma->vm_start <= base && end <= vma->vm_end)
4175 		return true;
4176 	return false;
4177 }
4178 
4179 /*
4180  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4181  * and returns the corresponding pte. While this is not necessary for the
4182  * !shared pmd case because we can allocate the pmd later as well, it makes the
4183  * code much cleaner. pmd allocation is essential for the shared case because
4184  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4185  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4186  * bad pmd for sharing.
4187  */
4188 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4189 {
4190 	struct vm_area_struct *vma = find_vma(mm, addr);
4191 	struct address_space *mapping = vma->vm_file->f_mapping;
4192 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4193 			vma->vm_pgoff;
4194 	struct vm_area_struct *svma;
4195 	unsigned long saddr;
4196 	pte_t *spte = NULL;
4197 	pte_t *pte;
4198 	spinlock_t *ptl;
4199 
4200 	if (!vma_shareable(vma, addr))
4201 		return (pte_t *)pmd_alloc(mm, pud, addr);
4202 
4203 	i_mmap_lock_write(mapping);
4204 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4205 		if (svma == vma)
4206 			continue;
4207 
4208 		saddr = page_table_shareable(svma, vma, addr, idx);
4209 		if (saddr) {
4210 			spte = huge_pte_offset(svma->vm_mm, saddr);
4211 			if (spte) {
4212 				mm_inc_nr_pmds(mm);
4213 				get_page(virt_to_page(spte));
4214 				break;
4215 			}
4216 		}
4217 	}
4218 
4219 	if (!spte)
4220 		goto out;
4221 
4222 	ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4223 	spin_lock(ptl);
4224 	if (pud_none(*pud)) {
4225 		pud_populate(mm, pud,
4226 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
4227 	} else {
4228 		put_page(virt_to_page(spte));
4229 		mm_inc_nr_pmds(mm);
4230 	}
4231 	spin_unlock(ptl);
4232 out:
4233 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
4234 	i_mmap_unlock_write(mapping);
4235 	return pte;
4236 }
4237 
4238 /*
4239  * unmap huge page backed by shared pte.
4240  *
4241  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4242  * indicated by page_count > 1, unmap is achieved by clearing pud and
4243  * decrementing the ref count. If count == 1, the pte page is not shared.
4244  *
4245  * called with page table lock held.
4246  *
4247  * returns: 1 successfully unmapped a shared pte page
4248  *	    0 the underlying pte page is not shared, or it is the last user
4249  */
4250 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4251 {
4252 	pgd_t *pgd = pgd_offset(mm, *addr);
4253 	pud_t *pud = pud_offset(pgd, *addr);
4254 
4255 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
4256 	if (page_count(virt_to_page(ptep)) == 1)
4257 		return 0;
4258 
4259 	pud_clear(pud);
4260 	put_page(virt_to_page(ptep));
4261 	mm_dec_nr_pmds(mm);
4262 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4263 	return 1;
4264 }
4265 #define want_pmd_share()	(1)
4266 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4267 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4268 {
4269 	return NULL;
4270 }
4271 
4272 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4273 {
4274 	return 0;
4275 }
4276 #define want_pmd_share()	(0)
4277 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4278 
4279 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4280 pte_t *huge_pte_alloc(struct mm_struct *mm,
4281 			unsigned long addr, unsigned long sz)
4282 {
4283 	pgd_t *pgd;
4284 	pud_t *pud;
4285 	pte_t *pte = NULL;
4286 
4287 	pgd = pgd_offset(mm, addr);
4288 	pud = pud_alloc(mm, pgd, addr);
4289 	if (pud) {
4290 		if (sz == PUD_SIZE) {
4291 			pte = (pte_t *)pud;
4292 		} else {
4293 			BUG_ON(sz != PMD_SIZE);
4294 			if (want_pmd_share() && pud_none(*pud))
4295 				pte = huge_pmd_share(mm, addr, pud);
4296 			else
4297 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
4298 		}
4299 	}
4300 	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4301 
4302 	return pte;
4303 }
4304 
4305 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4306 {
4307 	pgd_t *pgd;
4308 	pud_t *pud;
4309 	pmd_t *pmd = NULL;
4310 
4311 	pgd = pgd_offset(mm, addr);
4312 	if (pgd_present(*pgd)) {
4313 		pud = pud_offset(pgd, addr);
4314 		if (pud_present(*pud)) {
4315 			if (pud_huge(*pud))
4316 				return (pte_t *)pud;
4317 			pmd = pmd_offset(pud, addr);
4318 		}
4319 	}
4320 	return (pte_t *) pmd;
4321 }
4322 
4323 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4324 
4325 /*
4326  * These functions are overwritable if your architecture needs its own
4327  * behavior.
4328  */
4329 struct page * __weak
4330 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4331 			      int write)
4332 {
4333 	return ERR_PTR(-EINVAL);
4334 }
4335 
4336 struct page * __weak
4337 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4338 		pmd_t *pmd, int flags)
4339 {
4340 	struct page *page = NULL;
4341 	spinlock_t *ptl;
4342 retry:
4343 	ptl = pmd_lockptr(mm, pmd);
4344 	spin_lock(ptl);
4345 	/*
4346 	 * make sure that the address range covered by this pmd is not
4347 	 * unmapped from other threads.
4348 	 */
4349 	if (!pmd_huge(*pmd))
4350 		goto out;
4351 	if (pmd_present(*pmd)) {
4352 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4353 		if (flags & FOLL_GET)
4354 			get_page(page);
4355 	} else {
4356 		if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4357 			spin_unlock(ptl);
4358 			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
4359 			goto retry;
4360 		}
4361 		/*
4362 		 * hwpoisoned entry is treated as no_page_table in
4363 		 * follow_page_mask().
4364 		 */
4365 	}
4366 out:
4367 	spin_unlock(ptl);
4368 	return page;
4369 }
4370 
4371 struct page * __weak
4372 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4373 		pud_t *pud, int flags)
4374 {
4375 	if (flags & FOLL_GET)
4376 		return NULL;
4377 
4378 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4379 }
4380 
4381 #ifdef CONFIG_MEMORY_FAILURE
4382 
4383 /*
4384  * This function is called from memory failure code.
4385  * Assume the caller holds page lock of the head page.
4386  */
4387 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4388 {
4389 	struct hstate *h = page_hstate(hpage);
4390 	int nid = page_to_nid(hpage);
4391 	int ret = -EBUSY;
4392 
4393 	spin_lock(&hugetlb_lock);
4394 	/*
4395 	 * Just checking !page_huge_active is not enough, because that could be
4396 	 * an isolated/hwpoisoned hugepage (which have >0 refcount).
4397 	 */
4398 	if (!page_huge_active(hpage) && !page_count(hpage)) {
4399 		/*
4400 		 * Hwpoisoned hugepage isn't linked to activelist or freelist,
4401 		 * but dangling hpage->lru can trigger list-debug warnings
4402 		 * (this happens when we call unpoison_memory() on it),
4403 		 * so let it point to itself with list_del_init().
4404 		 */
4405 		list_del_init(&hpage->lru);
4406 		set_page_refcounted(hpage);
4407 		h->free_huge_pages--;
4408 		h->free_huge_pages_node[nid]--;
4409 		ret = 0;
4410 	}
4411 	spin_unlock(&hugetlb_lock);
4412 	return ret;
4413 }
4414 #endif
4415 
4416 bool isolate_huge_page(struct page *page, struct list_head *list)
4417 {
4418 	bool ret = true;
4419 
4420 	VM_BUG_ON_PAGE(!PageHead(page), page);
4421 	spin_lock(&hugetlb_lock);
4422 	if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4423 		ret = false;
4424 		goto unlock;
4425 	}
4426 	clear_page_huge_active(page);
4427 	list_move_tail(&page->lru, list);
4428 unlock:
4429 	spin_unlock(&hugetlb_lock);
4430 	return ret;
4431 }
4432 
4433 void putback_active_hugepage(struct page *page)
4434 {
4435 	VM_BUG_ON_PAGE(!PageHead(page), page);
4436 	spin_lock(&hugetlb_lock);
4437 	set_page_huge_active(page);
4438 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4439 	spin_unlock(&hugetlb_lock);
4440 	put_page(page);
4441 }
4442