xref: /openbmc/linux/mm/hugetlb.c (revision d89775fc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/mmdebug.h>
23 #include <linux/sched/signal.h>
24 #include <linux/rmap.h>
25 #include <linux/string_helpers.h>
26 #include <linux/swap.h>
27 #include <linux/swapops.h>
28 #include <linux/jhash.h>
29 #include <linux/numa.h>
30 #include <linux/llist.h>
31 #include <linux/cma.h>
32 
33 #include <asm/page.h>
34 #include <asm/pgalloc.h>
35 #include <asm/tlb.h>
36 
37 #include <linux/io.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/node.h>
41 #include <linux/userfaultfd_k.h>
42 #include <linux/page_owner.h>
43 #include "internal.h"
44 
45 int hugetlb_max_hstate __read_mostly;
46 unsigned int default_hstate_idx;
47 struct hstate hstates[HUGE_MAX_HSTATE];
48 
49 #ifdef CONFIG_CMA
50 static struct cma *hugetlb_cma[MAX_NUMNODES];
51 #endif
52 static unsigned long hugetlb_cma_size __initdata;
53 
54 /*
55  * Minimum page order among possible hugepage sizes, set to a proper value
56  * at boot time.
57  */
58 static unsigned int minimum_order __read_mostly = UINT_MAX;
59 
60 __initdata LIST_HEAD(huge_boot_pages);
61 
62 /* for command line parsing */
63 static struct hstate * __initdata parsed_hstate;
64 static unsigned long __initdata default_hstate_max_huge_pages;
65 static bool __initdata parsed_valid_hugepagesz = true;
66 static bool __initdata parsed_default_hugepagesz;
67 
68 /*
69  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
70  * free_huge_pages, and surplus_huge_pages.
71  */
72 DEFINE_SPINLOCK(hugetlb_lock);
73 
74 /*
75  * Serializes faults on the same logical page.  This is used to
76  * prevent spurious OOMs when the hugepage pool is fully utilized.
77  */
78 static int num_fault_mutexes;
79 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
80 
81 /* Forward declaration */
82 static int hugetlb_acct_memory(struct hstate *h, long delta);
83 
84 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
85 {
86 	bool free = (spool->count == 0) && (spool->used_hpages == 0);
87 
88 	spin_unlock(&spool->lock);
89 
90 	/* If no pages are used, and no other handles to the subpool
91 	 * remain, give up any reservations based on minimum size and
92 	 * free the subpool */
93 	if (free) {
94 		if (spool->min_hpages != -1)
95 			hugetlb_acct_memory(spool->hstate,
96 						-spool->min_hpages);
97 		kfree(spool);
98 	}
99 }
100 
101 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
102 						long min_hpages)
103 {
104 	struct hugepage_subpool *spool;
105 
106 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
107 	if (!spool)
108 		return NULL;
109 
110 	spin_lock_init(&spool->lock);
111 	spool->count = 1;
112 	spool->max_hpages = max_hpages;
113 	spool->hstate = h;
114 	spool->min_hpages = min_hpages;
115 
116 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
117 		kfree(spool);
118 		return NULL;
119 	}
120 	spool->rsv_hpages = min_hpages;
121 
122 	return spool;
123 }
124 
125 void hugepage_put_subpool(struct hugepage_subpool *spool)
126 {
127 	spin_lock(&spool->lock);
128 	BUG_ON(!spool->count);
129 	spool->count--;
130 	unlock_or_release_subpool(spool);
131 }
132 
133 /*
134  * Subpool accounting for allocating and reserving pages.
135  * Return -ENOMEM if there are not enough resources to satisfy the
136  * request.  Otherwise, return the number of pages by which the
137  * global pools must be adjusted (upward).  The returned value may
138  * only be different than the passed value (delta) in the case where
139  * a subpool minimum size must be maintained.
140  */
141 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
142 				      long delta)
143 {
144 	long ret = delta;
145 
146 	if (!spool)
147 		return ret;
148 
149 	spin_lock(&spool->lock);
150 
151 	if (spool->max_hpages != -1) {		/* maximum size accounting */
152 		if ((spool->used_hpages + delta) <= spool->max_hpages)
153 			spool->used_hpages += delta;
154 		else {
155 			ret = -ENOMEM;
156 			goto unlock_ret;
157 		}
158 	}
159 
160 	/* minimum size accounting */
161 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
162 		if (delta > spool->rsv_hpages) {
163 			/*
164 			 * Asking for more reserves than those already taken on
165 			 * behalf of subpool.  Return difference.
166 			 */
167 			ret = delta - spool->rsv_hpages;
168 			spool->rsv_hpages = 0;
169 		} else {
170 			ret = 0;	/* reserves already accounted for */
171 			spool->rsv_hpages -= delta;
172 		}
173 	}
174 
175 unlock_ret:
176 	spin_unlock(&spool->lock);
177 	return ret;
178 }
179 
180 /*
181  * Subpool accounting for freeing and unreserving pages.
182  * Return the number of global page reservations that must be dropped.
183  * The return value may only be different than the passed value (delta)
184  * in the case where a subpool minimum size must be maintained.
185  */
186 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
187 				       long delta)
188 {
189 	long ret = delta;
190 
191 	if (!spool)
192 		return delta;
193 
194 	spin_lock(&spool->lock);
195 
196 	if (spool->max_hpages != -1)		/* maximum size accounting */
197 		spool->used_hpages -= delta;
198 
199 	 /* minimum size accounting */
200 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
201 		if (spool->rsv_hpages + delta <= spool->min_hpages)
202 			ret = 0;
203 		else
204 			ret = spool->rsv_hpages + delta - spool->min_hpages;
205 
206 		spool->rsv_hpages += delta;
207 		if (spool->rsv_hpages > spool->min_hpages)
208 			spool->rsv_hpages = spool->min_hpages;
209 	}
210 
211 	/*
212 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
213 	 * quota reference, free it now.
214 	 */
215 	unlock_or_release_subpool(spool);
216 
217 	return ret;
218 }
219 
220 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
221 {
222 	return HUGETLBFS_SB(inode->i_sb)->spool;
223 }
224 
225 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
226 {
227 	return subpool_inode(file_inode(vma->vm_file));
228 }
229 
230 /* Helper that removes a struct file_region from the resv_map cache and returns
231  * it for use.
232  */
233 static struct file_region *
234 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
235 {
236 	struct file_region *nrg = NULL;
237 
238 	VM_BUG_ON(resv->region_cache_count <= 0);
239 
240 	resv->region_cache_count--;
241 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
242 	VM_BUG_ON(!nrg);
243 	list_del(&nrg->link);
244 
245 	nrg->from = from;
246 	nrg->to = to;
247 
248 	return nrg;
249 }
250 
251 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
252 					      struct file_region *rg)
253 {
254 #ifdef CONFIG_CGROUP_HUGETLB
255 	nrg->reservation_counter = rg->reservation_counter;
256 	nrg->css = rg->css;
257 	if (rg->css)
258 		css_get(rg->css);
259 #endif
260 }
261 
262 /* Helper that records hugetlb_cgroup uncharge info. */
263 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
264 						struct hstate *h,
265 						struct resv_map *resv,
266 						struct file_region *nrg)
267 {
268 #ifdef CONFIG_CGROUP_HUGETLB
269 	if (h_cg) {
270 		nrg->reservation_counter =
271 			&h_cg->rsvd_hugepage[hstate_index(h)];
272 		nrg->css = &h_cg->css;
273 		if (!resv->pages_per_hpage)
274 			resv->pages_per_hpage = pages_per_huge_page(h);
275 		/* pages_per_hpage should be the same for all entries in
276 		 * a resv_map.
277 		 */
278 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
279 	} else {
280 		nrg->reservation_counter = NULL;
281 		nrg->css = NULL;
282 	}
283 #endif
284 }
285 
286 static bool has_same_uncharge_info(struct file_region *rg,
287 				   struct file_region *org)
288 {
289 #ifdef CONFIG_CGROUP_HUGETLB
290 	return rg && org &&
291 	       rg->reservation_counter == org->reservation_counter &&
292 	       rg->css == org->css;
293 
294 #else
295 	return true;
296 #endif
297 }
298 
299 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
300 {
301 	struct file_region *nrg = NULL, *prg = NULL;
302 
303 	prg = list_prev_entry(rg, link);
304 	if (&prg->link != &resv->regions && prg->to == rg->from &&
305 	    has_same_uncharge_info(prg, rg)) {
306 		prg->to = rg->to;
307 
308 		list_del(&rg->link);
309 		kfree(rg);
310 
311 		coalesce_file_region(resv, prg);
312 		return;
313 	}
314 
315 	nrg = list_next_entry(rg, link);
316 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
317 	    has_same_uncharge_info(nrg, rg)) {
318 		nrg->from = rg->from;
319 
320 		list_del(&rg->link);
321 		kfree(rg);
322 
323 		coalesce_file_region(resv, nrg);
324 		return;
325 	}
326 }
327 
328 /* Must be called with resv->lock held. Calling this with count_only == true
329  * will count the number of pages to be added but will not modify the linked
330  * list. If regions_needed != NULL and count_only == true, then regions_needed
331  * will indicate the number of file_regions needed in the cache to carry out to
332  * add the regions for this range.
333  */
334 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
335 				     struct hugetlb_cgroup *h_cg,
336 				     struct hstate *h, long *regions_needed,
337 				     bool count_only)
338 {
339 	long add = 0;
340 	struct list_head *head = &resv->regions;
341 	long last_accounted_offset = f;
342 	struct file_region *rg = NULL, *trg = NULL, *nrg = NULL;
343 
344 	if (regions_needed)
345 		*regions_needed = 0;
346 
347 	/* In this loop, we essentially handle an entry for the range
348 	 * [last_accounted_offset, rg->from), at every iteration, with some
349 	 * bounds checking.
350 	 */
351 	list_for_each_entry_safe(rg, trg, head, link) {
352 		/* Skip irrelevant regions that start before our range. */
353 		if (rg->from < f) {
354 			/* If this region ends after the last accounted offset,
355 			 * then we need to update last_accounted_offset.
356 			 */
357 			if (rg->to > last_accounted_offset)
358 				last_accounted_offset = rg->to;
359 			continue;
360 		}
361 
362 		/* When we find a region that starts beyond our range, we've
363 		 * finished.
364 		 */
365 		if (rg->from > t)
366 			break;
367 
368 		/* Add an entry for last_accounted_offset -> rg->from, and
369 		 * update last_accounted_offset.
370 		 */
371 		if (rg->from > last_accounted_offset) {
372 			add += rg->from - last_accounted_offset;
373 			if (!count_only) {
374 				nrg = get_file_region_entry_from_cache(
375 					resv, last_accounted_offset, rg->from);
376 				record_hugetlb_cgroup_uncharge_info(h_cg, h,
377 								    resv, nrg);
378 				list_add(&nrg->link, rg->link.prev);
379 				coalesce_file_region(resv, nrg);
380 			} else if (regions_needed)
381 				*regions_needed += 1;
382 		}
383 
384 		last_accounted_offset = rg->to;
385 	}
386 
387 	/* Handle the case where our range extends beyond
388 	 * last_accounted_offset.
389 	 */
390 	if (last_accounted_offset < t) {
391 		add += t - last_accounted_offset;
392 		if (!count_only) {
393 			nrg = get_file_region_entry_from_cache(
394 				resv, last_accounted_offset, t);
395 			record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
396 			list_add(&nrg->link, rg->link.prev);
397 			coalesce_file_region(resv, nrg);
398 		} else if (regions_needed)
399 			*regions_needed += 1;
400 	}
401 
402 	VM_BUG_ON(add < 0);
403 	return add;
404 }
405 
406 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
407  */
408 static int allocate_file_region_entries(struct resv_map *resv,
409 					int regions_needed)
410 	__must_hold(&resv->lock)
411 {
412 	struct list_head allocated_regions;
413 	int to_allocate = 0, i = 0;
414 	struct file_region *trg = NULL, *rg = NULL;
415 
416 	VM_BUG_ON(regions_needed < 0);
417 
418 	INIT_LIST_HEAD(&allocated_regions);
419 
420 	/*
421 	 * Check for sufficient descriptors in the cache to accommodate
422 	 * the number of in progress add operations plus regions_needed.
423 	 *
424 	 * This is a while loop because when we drop the lock, some other call
425 	 * to region_add or region_del may have consumed some region_entries,
426 	 * so we keep looping here until we finally have enough entries for
427 	 * (adds_in_progress + regions_needed).
428 	 */
429 	while (resv->region_cache_count <
430 	       (resv->adds_in_progress + regions_needed)) {
431 		to_allocate = resv->adds_in_progress + regions_needed -
432 			      resv->region_cache_count;
433 
434 		/* At this point, we should have enough entries in the cache
435 		 * for all the existings adds_in_progress. We should only be
436 		 * needing to allocate for regions_needed.
437 		 */
438 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
439 
440 		spin_unlock(&resv->lock);
441 		for (i = 0; i < to_allocate; i++) {
442 			trg = kmalloc(sizeof(*trg), GFP_KERNEL);
443 			if (!trg)
444 				goto out_of_memory;
445 			list_add(&trg->link, &allocated_regions);
446 		}
447 
448 		spin_lock(&resv->lock);
449 
450 		list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
451 			list_del(&rg->link);
452 			list_add(&rg->link, &resv->region_cache);
453 			resv->region_cache_count++;
454 		}
455 	}
456 
457 	return 0;
458 
459 out_of_memory:
460 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
461 		list_del(&rg->link);
462 		kfree(rg);
463 	}
464 	return -ENOMEM;
465 }
466 
467 /*
468  * Add the huge page range represented by [f, t) to the reserve
469  * map.  Regions will be taken from the cache to fill in this range.
470  * Sufficient regions should exist in the cache due to the previous
471  * call to region_chg with the same range, but in some cases the cache will not
472  * have sufficient entries due to races with other code doing region_add or
473  * region_del.  The extra needed entries will be allocated.
474  *
475  * regions_needed is the out value provided by a previous call to region_chg.
476  *
477  * Return the number of new huge pages added to the map.  This number is greater
478  * than or equal to zero.  If file_region entries needed to be allocated for
479  * this operation and we were not able to allocate, it returns -ENOMEM.
480  * region_add of regions of length 1 never allocate file_regions and cannot
481  * fail; region_chg will always allocate at least 1 entry and a region_add for
482  * 1 page will only require at most 1 entry.
483  */
484 static long region_add(struct resv_map *resv, long f, long t,
485 		       long in_regions_needed, struct hstate *h,
486 		       struct hugetlb_cgroup *h_cg)
487 {
488 	long add = 0, actual_regions_needed = 0;
489 
490 	spin_lock(&resv->lock);
491 retry:
492 
493 	/* Count how many regions are actually needed to execute this add. */
494 	add_reservation_in_range(resv, f, t, NULL, NULL, &actual_regions_needed,
495 				 true);
496 
497 	/*
498 	 * Check for sufficient descriptors in the cache to accommodate
499 	 * this add operation. Note that actual_regions_needed may be greater
500 	 * than in_regions_needed, as the resv_map may have been modified since
501 	 * the region_chg call. In this case, we need to make sure that we
502 	 * allocate extra entries, such that we have enough for all the
503 	 * existing adds_in_progress, plus the excess needed for this
504 	 * operation.
505 	 */
506 	if (actual_regions_needed > in_regions_needed &&
507 	    resv->region_cache_count <
508 		    resv->adds_in_progress +
509 			    (actual_regions_needed - in_regions_needed)) {
510 		/* region_add operation of range 1 should never need to
511 		 * allocate file_region entries.
512 		 */
513 		VM_BUG_ON(t - f <= 1);
514 
515 		if (allocate_file_region_entries(
516 			    resv, actual_regions_needed - in_regions_needed)) {
517 			return -ENOMEM;
518 		}
519 
520 		goto retry;
521 	}
522 
523 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL, false);
524 
525 	resv->adds_in_progress -= in_regions_needed;
526 
527 	spin_unlock(&resv->lock);
528 	VM_BUG_ON(add < 0);
529 	return add;
530 }
531 
532 /*
533  * Examine the existing reserve map and determine how many
534  * huge pages in the specified range [f, t) are NOT currently
535  * represented.  This routine is called before a subsequent
536  * call to region_add that will actually modify the reserve
537  * map to add the specified range [f, t).  region_chg does
538  * not change the number of huge pages represented by the
539  * map.  A number of new file_region structures is added to the cache as a
540  * placeholder, for the subsequent region_add call to use. At least 1
541  * file_region structure is added.
542  *
543  * out_regions_needed is the number of regions added to the
544  * resv->adds_in_progress.  This value needs to be provided to a follow up call
545  * to region_add or region_abort for proper accounting.
546  *
547  * Returns the number of huge pages that need to be added to the existing
548  * reservation map for the range [f, t).  This number is greater or equal to
549  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
550  * is needed and can not be allocated.
551  */
552 static long region_chg(struct resv_map *resv, long f, long t,
553 		       long *out_regions_needed)
554 {
555 	long chg = 0;
556 
557 	spin_lock(&resv->lock);
558 
559 	/* Count how many hugepages in this range are NOT respresented. */
560 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
561 				       out_regions_needed, true);
562 
563 	if (*out_regions_needed == 0)
564 		*out_regions_needed = 1;
565 
566 	if (allocate_file_region_entries(resv, *out_regions_needed))
567 		return -ENOMEM;
568 
569 	resv->adds_in_progress += *out_regions_needed;
570 
571 	spin_unlock(&resv->lock);
572 	return chg;
573 }
574 
575 /*
576  * Abort the in progress add operation.  The adds_in_progress field
577  * of the resv_map keeps track of the operations in progress between
578  * calls to region_chg and region_add.  Operations are sometimes
579  * aborted after the call to region_chg.  In such cases, region_abort
580  * is called to decrement the adds_in_progress counter. regions_needed
581  * is the value returned by the region_chg call, it is used to decrement
582  * the adds_in_progress counter.
583  *
584  * NOTE: The range arguments [f, t) are not needed or used in this
585  * routine.  They are kept to make reading the calling code easier as
586  * arguments will match the associated region_chg call.
587  */
588 static void region_abort(struct resv_map *resv, long f, long t,
589 			 long regions_needed)
590 {
591 	spin_lock(&resv->lock);
592 	VM_BUG_ON(!resv->region_cache_count);
593 	resv->adds_in_progress -= regions_needed;
594 	spin_unlock(&resv->lock);
595 }
596 
597 /*
598  * Delete the specified range [f, t) from the reserve map.  If the
599  * t parameter is LONG_MAX, this indicates that ALL regions after f
600  * should be deleted.  Locate the regions which intersect [f, t)
601  * and either trim, delete or split the existing regions.
602  *
603  * Returns the number of huge pages deleted from the reserve map.
604  * In the normal case, the return value is zero or more.  In the
605  * case where a region must be split, a new region descriptor must
606  * be allocated.  If the allocation fails, -ENOMEM will be returned.
607  * NOTE: If the parameter t == LONG_MAX, then we will never split
608  * a region and possibly return -ENOMEM.  Callers specifying
609  * t == LONG_MAX do not need to check for -ENOMEM error.
610  */
611 static long region_del(struct resv_map *resv, long f, long t)
612 {
613 	struct list_head *head = &resv->regions;
614 	struct file_region *rg, *trg;
615 	struct file_region *nrg = NULL;
616 	long del = 0;
617 
618 retry:
619 	spin_lock(&resv->lock);
620 	list_for_each_entry_safe(rg, trg, head, link) {
621 		/*
622 		 * Skip regions before the range to be deleted.  file_region
623 		 * ranges are normally of the form [from, to).  However, there
624 		 * may be a "placeholder" entry in the map which is of the form
625 		 * (from, to) with from == to.  Check for placeholder entries
626 		 * at the beginning of the range to be deleted.
627 		 */
628 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
629 			continue;
630 
631 		if (rg->from >= t)
632 			break;
633 
634 		if (f > rg->from && t < rg->to) { /* Must split region */
635 			/*
636 			 * Check for an entry in the cache before dropping
637 			 * lock and attempting allocation.
638 			 */
639 			if (!nrg &&
640 			    resv->region_cache_count > resv->adds_in_progress) {
641 				nrg = list_first_entry(&resv->region_cache,
642 							struct file_region,
643 							link);
644 				list_del(&nrg->link);
645 				resv->region_cache_count--;
646 			}
647 
648 			if (!nrg) {
649 				spin_unlock(&resv->lock);
650 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
651 				if (!nrg)
652 					return -ENOMEM;
653 				goto retry;
654 			}
655 
656 			del += t - f;
657 
658 			/* New entry for end of split region */
659 			nrg->from = t;
660 			nrg->to = rg->to;
661 
662 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
663 
664 			INIT_LIST_HEAD(&nrg->link);
665 
666 			/* Original entry is trimmed */
667 			rg->to = f;
668 
669 			hugetlb_cgroup_uncharge_file_region(
670 				resv, rg, nrg->to - nrg->from);
671 
672 			list_add(&nrg->link, &rg->link);
673 			nrg = NULL;
674 			break;
675 		}
676 
677 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
678 			del += rg->to - rg->from;
679 			hugetlb_cgroup_uncharge_file_region(resv, rg,
680 							    rg->to - rg->from);
681 			list_del(&rg->link);
682 			kfree(rg);
683 			continue;
684 		}
685 
686 		if (f <= rg->from) {	/* Trim beginning of region */
687 			del += t - rg->from;
688 			rg->from = t;
689 
690 			hugetlb_cgroup_uncharge_file_region(resv, rg,
691 							    t - rg->from);
692 		} else {		/* Trim end of region */
693 			del += rg->to - f;
694 			rg->to = f;
695 
696 			hugetlb_cgroup_uncharge_file_region(resv, rg,
697 							    rg->to - f);
698 		}
699 	}
700 
701 	spin_unlock(&resv->lock);
702 	kfree(nrg);
703 	return del;
704 }
705 
706 /*
707  * A rare out of memory error was encountered which prevented removal of
708  * the reserve map region for a page.  The huge page itself was free'ed
709  * and removed from the page cache.  This routine will adjust the subpool
710  * usage count, and the global reserve count if needed.  By incrementing
711  * these counts, the reserve map entry which could not be deleted will
712  * appear as a "reserved" entry instead of simply dangling with incorrect
713  * counts.
714  */
715 void hugetlb_fix_reserve_counts(struct inode *inode)
716 {
717 	struct hugepage_subpool *spool = subpool_inode(inode);
718 	long rsv_adjust;
719 
720 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
721 	if (rsv_adjust) {
722 		struct hstate *h = hstate_inode(inode);
723 
724 		hugetlb_acct_memory(h, 1);
725 	}
726 }
727 
728 /*
729  * Count and return the number of huge pages in the reserve map
730  * that intersect with the range [f, t).
731  */
732 static long region_count(struct resv_map *resv, long f, long t)
733 {
734 	struct list_head *head = &resv->regions;
735 	struct file_region *rg;
736 	long chg = 0;
737 
738 	spin_lock(&resv->lock);
739 	/* Locate each segment we overlap with, and count that overlap. */
740 	list_for_each_entry(rg, head, link) {
741 		long seg_from;
742 		long seg_to;
743 
744 		if (rg->to <= f)
745 			continue;
746 		if (rg->from >= t)
747 			break;
748 
749 		seg_from = max(rg->from, f);
750 		seg_to = min(rg->to, t);
751 
752 		chg += seg_to - seg_from;
753 	}
754 	spin_unlock(&resv->lock);
755 
756 	return chg;
757 }
758 
759 /*
760  * Convert the address within this vma to the page offset within
761  * the mapping, in pagecache page units; huge pages here.
762  */
763 static pgoff_t vma_hugecache_offset(struct hstate *h,
764 			struct vm_area_struct *vma, unsigned long address)
765 {
766 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
767 			(vma->vm_pgoff >> huge_page_order(h));
768 }
769 
770 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
771 				     unsigned long address)
772 {
773 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
774 }
775 EXPORT_SYMBOL_GPL(linear_hugepage_index);
776 
777 /*
778  * Return the size of the pages allocated when backing a VMA. In the majority
779  * cases this will be same size as used by the page table entries.
780  */
781 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
782 {
783 	if (vma->vm_ops && vma->vm_ops->pagesize)
784 		return vma->vm_ops->pagesize(vma);
785 	return PAGE_SIZE;
786 }
787 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
788 
789 /*
790  * Return the page size being used by the MMU to back a VMA. In the majority
791  * of cases, the page size used by the kernel matches the MMU size. On
792  * architectures where it differs, an architecture-specific 'strong'
793  * version of this symbol is required.
794  */
795 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
796 {
797 	return vma_kernel_pagesize(vma);
798 }
799 
800 /*
801  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
802  * bits of the reservation map pointer, which are always clear due to
803  * alignment.
804  */
805 #define HPAGE_RESV_OWNER    (1UL << 0)
806 #define HPAGE_RESV_UNMAPPED (1UL << 1)
807 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
808 
809 /*
810  * These helpers are used to track how many pages are reserved for
811  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
812  * is guaranteed to have their future faults succeed.
813  *
814  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
815  * the reserve counters are updated with the hugetlb_lock held. It is safe
816  * to reset the VMA at fork() time as it is not in use yet and there is no
817  * chance of the global counters getting corrupted as a result of the values.
818  *
819  * The private mapping reservation is represented in a subtly different
820  * manner to a shared mapping.  A shared mapping has a region map associated
821  * with the underlying file, this region map represents the backing file
822  * pages which have ever had a reservation assigned which this persists even
823  * after the page is instantiated.  A private mapping has a region map
824  * associated with the original mmap which is attached to all VMAs which
825  * reference it, this region map represents those offsets which have consumed
826  * reservation ie. where pages have been instantiated.
827  */
828 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
829 {
830 	return (unsigned long)vma->vm_private_data;
831 }
832 
833 static void set_vma_private_data(struct vm_area_struct *vma,
834 							unsigned long value)
835 {
836 	vma->vm_private_data = (void *)value;
837 }
838 
839 static void
840 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
841 					  struct hugetlb_cgroup *h_cg,
842 					  struct hstate *h)
843 {
844 #ifdef CONFIG_CGROUP_HUGETLB
845 	if (!h_cg || !h) {
846 		resv_map->reservation_counter = NULL;
847 		resv_map->pages_per_hpage = 0;
848 		resv_map->css = NULL;
849 	} else {
850 		resv_map->reservation_counter =
851 			&h_cg->rsvd_hugepage[hstate_index(h)];
852 		resv_map->pages_per_hpage = pages_per_huge_page(h);
853 		resv_map->css = &h_cg->css;
854 	}
855 #endif
856 }
857 
858 struct resv_map *resv_map_alloc(void)
859 {
860 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
861 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
862 
863 	if (!resv_map || !rg) {
864 		kfree(resv_map);
865 		kfree(rg);
866 		return NULL;
867 	}
868 
869 	kref_init(&resv_map->refs);
870 	spin_lock_init(&resv_map->lock);
871 	INIT_LIST_HEAD(&resv_map->regions);
872 
873 	resv_map->adds_in_progress = 0;
874 	/*
875 	 * Initialize these to 0. On shared mappings, 0's here indicate these
876 	 * fields don't do cgroup accounting. On private mappings, these will be
877 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
878 	 * reservations are to be un-charged from here.
879 	 */
880 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
881 
882 	INIT_LIST_HEAD(&resv_map->region_cache);
883 	list_add(&rg->link, &resv_map->region_cache);
884 	resv_map->region_cache_count = 1;
885 
886 	return resv_map;
887 }
888 
889 void resv_map_release(struct kref *ref)
890 {
891 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
892 	struct list_head *head = &resv_map->region_cache;
893 	struct file_region *rg, *trg;
894 
895 	/* Clear out any active regions before we release the map. */
896 	region_del(resv_map, 0, LONG_MAX);
897 
898 	/* ... and any entries left in the cache */
899 	list_for_each_entry_safe(rg, trg, head, link) {
900 		list_del(&rg->link);
901 		kfree(rg);
902 	}
903 
904 	VM_BUG_ON(resv_map->adds_in_progress);
905 
906 	kfree(resv_map);
907 }
908 
909 static inline struct resv_map *inode_resv_map(struct inode *inode)
910 {
911 	/*
912 	 * At inode evict time, i_mapping may not point to the original
913 	 * address space within the inode.  This original address space
914 	 * contains the pointer to the resv_map.  So, always use the
915 	 * address space embedded within the inode.
916 	 * The VERY common case is inode->mapping == &inode->i_data but,
917 	 * this may not be true for device special inodes.
918 	 */
919 	return (struct resv_map *)(&inode->i_data)->private_data;
920 }
921 
922 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
923 {
924 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
925 	if (vma->vm_flags & VM_MAYSHARE) {
926 		struct address_space *mapping = vma->vm_file->f_mapping;
927 		struct inode *inode = mapping->host;
928 
929 		return inode_resv_map(inode);
930 
931 	} else {
932 		return (struct resv_map *)(get_vma_private_data(vma) &
933 							~HPAGE_RESV_MASK);
934 	}
935 }
936 
937 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
938 {
939 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
940 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
941 
942 	set_vma_private_data(vma, (get_vma_private_data(vma) &
943 				HPAGE_RESV_MASK) | (unsigned long)map);
944 }
945 
946 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
947 {
948 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
949 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
950 
951 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
952 }
953 
954 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
955 {
956 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
957 
958 	return (get_vma_private_data(vma) & flag) != 0;
959 }
960 
961 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
962 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
963 {
964 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
965 	if (!(vma->vm_flags & VM_MAYSHARE))
966 		vma->vm_private_data = (void *)0;
967 }
968 
969 /* Returns true if the VMA has associated reserve pages */
970 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
971 {
972 	if (vma->vm_flags & VM_NORESERVE) {
973 		/*
974 		 * This address is already reserved by other process(chg == 0),
975 		 * so, we should decrement reserved count. Without decrementing,
976 		 * reserve count remains after releasing inode, because this
977 		 * allocated page will go into page cache and is regarded as
978 		 * coming from reserved pool in releasing step.  Currently, we
979 		 * don't have any other solution to deal with this situation
980 		 * properly, so add work-around here.
981 		 */
982 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
983 			return true;
984 		else
985 			return false;
986 	}
987 
988 	/* Shared mappings always use reserves */
989 	if (vma->vm_flags & VM_MAYSHARE) {
990 		/*
991 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
992 		 * be a region map for all pages.  The only situation where
993 		 * there is no region map is if a hole was punched via
994 		 * fallocate.  In this case, there really are no reserves to
995 		 * use.  This situation is indicated if chg != 0.
996 		 */
997 		if (chg)
998 			return false;
999 		else
1000 			return true;
1001 	}
1002 
1003 	/*
1004 	 * Only the process that called mmap() has reserves for
1005 	 * private mappings.
1006 	 */
1007 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1008 		/*
1009 		 * Like the shared case above, a hole punch or truncate
1010 		 * could have been performed on the private mapping.
1011 		 * Examine the value of chg to determine if reserves
1012 		 * actually exist or were previously consumed.
1013 		 * Very Subtle - The value of chg comes from a previous
1014 		 * call to vma_needs_reserves().  The reserve map for
1015 		 * private mappings has different (opposite) semantics
1016 		 * than that of shared mappings.  vma_needs_reserves()
1017 		 * has already taken this difference in semantics into
1018 		 * account.  Therefore, the meaning of chg is the same
1019 		 * as in the shared case above.  Code could easily be
1020 		 * combined, but keeping it separate draws attention to
1021 		 * subtle differences.
1022 		 */
1023 		if (chg)
1024 			return false;
1025 		else
1026 			return true;
1027 	}
1028 
1029 	return false;
1030 }
1031 
1032 static void enqueue_huge_page(struct hstate *h, struct page *page)
1033 {
1034 	int nid = page_to_nid(page);
1035 	list_move(&page->lru, &h->hugepage_freelists[nid]);
1036 	h->free_huge_pages++;
1037 	h->free_huge_pages_node[nid]++;
1038 }
1039 
1040 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1041 {
1042 	struct page *page;
1043 
1044 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
1045 		if (!PageHWPoison(page))
1046 			break;
1047 	/*
1048 	 * if 'non-isolated free hugepage' not found on the list,
1049 	 * the allocation fails.
1050 	 */
1051 	if (&h->hugepage_freelists[nid] == &page->lru)
1052 		return NULL;
1053 	list_move(&page->lru, &h->hugepage_activelist);
1054 	set_page_refcounted(page);
1055 	h->free_huge_pages--;
1056 	h->free_huge_pages_node[nid]--;
1057 	return page;
1058 }
1059 
1060 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1061 		nodemask_t *nmask)
1062 {
1063 	unsigned int cpuset_mems_cookie;
1064 	struct zonelist *zonelist;
1065 	struct zone *zone;
1066 	struct zoneref *z;
1067 	int node = NUMA_NO_NODE;
1068 
1069 	zonelist = node_zonelist(nid, gfp_mask);
1070 
1071 retry_cpuset:
1072 	cpuset_mems_cookie = read_mems_allowed_begin();
1073 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1074 		struct page *page;
1075 
1076 		if (!cpuset_zone_allowed(zone, gfp_mask))
1077 			continue;
1078 		/*
1079 		 * no need to ask again on the same node. Pool is node rather than
1080 		 * zone aware
1081 		 */
1082 		if (zone_to_nid(zone) == node)
1083 			continue;
1084 		node = zone_to_nid(zone);
1085 
1086 		page = dequeue_huge_page_node_exact(h, node);
1087 		if (page)
1088 			return page;
1089 	}
1090 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1091 		goto retry_cpuset;
1092 
1093 	return NULL;
1094 }
1095 
1096 /* Movability of hugepages depends on migration support. */
1097 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1098 {
1099 	if (hugepage_movable_supported(h))
1100 		return GFP_HIGHUSER_MOVABLE;
1101 	else
1102 		return GFP_HIGHUSER;
1103 }
1104 
1105 static struct page *dequeue_huge_page_vma(struct hstate *h,
1106 				struct vm_area_struct *vma,
1107 				unsigned long address, int avoid_reserve,
1108 				long chg)
1109 {
1110 	struct page *page;
1111 	struct mempolicy *mpol;
1112 	gfp_t gfp_mask;
1113 	nodemask_t *nodemask;
1114 	int nid;
1115 
1116 	/*
1117 	 * A child process with MAP_PRIVATE mappings created by their parent
1118 	 * have no page reserves. This check ensures that reservations are
1119 	 * not "stolen". The child may still get SIGKILLed
1120 	 */
1121 	if (!vma_has_reserves(vma, chg) &&
1122 			h->free_huge_pages - h->resv_huge_pages == 0)
1123 		goto err;
1124 
1125 	/* If reserves cannot be used, ensure enough pages are in the pool */
1126 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
1127 		goto err;
1128 
1129 	gfp_mask = htlb_alloc_mask(h);
1130 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1131 	page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1132 	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1133 		SetPagePrivate(page);
1134 		h->resv_huge_pages--;
1135 	}
1136 
1137 	mpol_cond_put(mpol);
1138 	return page;
1139 
1140 err:
1141 	return NULL;
1142 }
1143 
1144 /*
1145  * common helper functions for hstate_next_node_to_{alloc|free}.
1146  * We may have allocated or freed a huge page based on a different
1147  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1148  * be outside of *nodes_allowed.  Ensure that we use an allowed
1149  * node for alloc or free.
1150  */
1151 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1152 {
1153 	nid = next_node_in(nid, *nodes_allowed);
1154 	VM_BUG_ON(nid >= MAX_NUMNODES);
1155 
1156 	return nid;
1157 }
1158 
1159 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1160 {
1161 	if (!node_isset(nid, *nodes_allowed))
1162 		nid = next_node_allowed(nid, nodes_allowed);
1163 	return nid;
1164 }
1165 
1166 /*
1167  * returns the previously saved node ["this node"] from which to
1168  * allocate a persistent huge page for the pool and advance the
1169  * next node from which to allocate, handling wrap at end of node
1170  * mask.
1171  */
1172 static int hstate_next_node_to_alloc(struct hstate *h,
1173 					nodemask_t *nodes_allowed)
1174 {
1175 	int nid;
1176 
1177 	VM_BUG_ON(!nodes_allowed);
1178 
1179 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1180 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1181 
1182 	return nid;
1183 }
1184 
1185 /*
1186  * helper for free_pool_huge_page() - return the previously saved
1187  * node ["this node"] from which to free a huge page.  Advance the
1188  * next node id whether or not we find a free huge page to free so
1189  * that the next attempt to free addresses the next node.
1190  */
1191 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1192 {
1193 	int nid;
1194 
1195 	VM_BUG_ON(!nodes_allowed);
1196 
1197 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1198 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1199 
1200 	return nid;
1201 }
1202 
1203 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
1204 	for (nr_nodes = nodes_weight(*mask);				\
1205 		nr_nodes > 0 &&						\
1206 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
1207 		nr_nodes--)
1208 
1209 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
1210 	for (nr_nodes = nodes_weight(*mask);				\
1211 		nr_nodes > 0 &&						\
1212 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
1213 		nr_nodes--)
1214 
1215 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1216 static void destroy_compound_gigantic_page(struct page *page,
1217 					unsigned int order)
1218 {
1219 	int i;
1220 	int nr_pages = 1 << order;
1221 	struct page *p = page + 1;
1222 
1223 	atomic_set(compound_mapcount_ptr(page), 0);
1224 	if (hpage_pincount_available(page))
1225 		atomic_set(compound_pincount_ptr(page), 0);
1226 
1227 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1228 		clear_compound_head(p);
1229 		set_page_refcounted(p);
1230 	}
1231 
1232 	set_compound_order(page, 0);
1233 	__ClearPageHead(page);
1234 }
1235 
1236 static void free_gigantic_page(struct page *page, unsigned int order)
1237 {
1238 	/*
1239 	 * If the page isn't allocated using the cma allocator,
1240 	 * cma_release() returns false.
1241 	 */
1242 #ifdef CONFIG_CMA
1243 	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1244 		return;
1245 #endif
1246 
1247 	free_contig_range(page_to_pfn(page), 1 << order);
1248 }
1249 
1250 #ifdef CONFIG_CONTIG_ALLOC
1251 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1252 		int nid, nodemask_t *nodemask)
1253 {
1254 	unsigned long nr_pages = 1UL << huge_page_order(h);
1255 
1256 #ifdef CONFIG_CMA
1257 	{
1258 		struct page *page;
1259 		int node;
1260 
1261 		for_each_node_mask(node, *nodemask) {
1262 			if (!hugetlb_cma[node])
1263 				continue;
1264 
1265 			page = cma_alloc(hugetlb_cma[node], nr_pages,
1266 					 huge_page_order(h), true);
1267 			if (page)
1268 				return page;
1269 		}
1270 	}
1271 #endif
1272 
1273 	return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1274 }
1275 
1276 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1277 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1278 #else /* !CONFIG_CONTIG_ALLOC */
1279 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1280 					int nid, nodemask_t *nodemask)
1281 {
1282 	return NULL;
1283 }
1284 #endif /* CONFIG_CONTIG_ALLOC */
1285 
1286 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1287 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1288 					int nid, nodemask_t *nodemask)
1289 {
1290 	return NULL;
1291 }
1292 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1293 static inline void destroy_compound_gigantic_page(struct page *page,
1294 						unsigned int order) { }
1295 #endif
1296 
1297 static void update_and_free_page(struct hstate *h, struct page *page)
1298 {
1299 	int i;
1300 
1301 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1302 		return;
1303 
1304 	h->nr_huge_pages--;
1305 	h->nr_huge_pages_node[page_to_nid(page)]--;
1306 	for (i = 0; i < pages_per_huge_page(h); i++) {
1307 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1308 				1 << PG_referenced | 1 << PG_dirty |
1309 				1 << PG_active | 1 << PG_private |
1310 				1 << PG_writeback);
1311 	}
1312 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1313 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
1314 	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1315 	set_page_refcounted(page);
1316 	if (hstate_is_gigantic(h)) {
1317 		/*
1318 		 * Temporarily drop the hugetlb_lock, because
1319 		 * we might block in free_gigantic_page().
1320 		 */
1321 		spin_unlock(&hugetlb_lock);
1322 		destroy_compound_gigantic_page(page, huge_page_order(h));
1323 		free_gigantic_page(page, huge_page_order(h));
1324 		spin_lock(&hugetlb_lock);
1325 	} else {
1326 		__free_pages(page, huge_page_order(h));
1327 	}
1328 }
1329 
1330 struct hstate *size_to_hstate(unsigned long size)
1331 {
1332 	struct hstate *h;
1333 
1334 	for_each_hstate(h) {
1335 		if (huge_page_size(h) == size)
1336 			return h;
1337 	}
1338 	return NULL;
1339 }
1340 
1341 /*
1342  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1343  * to hstate->hugepage_activelist.)
1344  *
1345  * This function can be called for tail pages, but never returns true for them.
1346  */
1347 bool page_huge_active(struct page *page)
1348 {
1349 	VM_BUG_ON_PAGE(!PageHuge(page), page);
1350 	return PageHead(page) && PagePrivate(&page[1]);
1351 }
1352 
1353 /* never called for tail page */
1354 static void set_page_huge_active(struct page *page)
1355 {
1356 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1357 	SetPagePrivate(&page[1]);
1358 }
1359 
1360 static void clear_page_huge_active(struct page *page)
1361 {
1362 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1363 	ClearPagePrivate(&page[1]);
1364 }
1365 
1366 /*
1367  * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1368  * code
1369  */
1370 static inline bool PageHugeTemporary(struct page *page)
1371 {
1372 	if (!PageHuge(page))
1373 		return false;
1374 
1375 	return (unsigned long)page[2].mapping == -1U;
1376 }
1377 
1378 static inline void SetPageHugeTemporary(struct page *page)
1379 {
1380 	page[2].mapping = (void *)-1U;
1381 }
1382 
1383 static inline void ClearPageHugeTemporary(struct page *page)
1384 {
1385 	page[2].mapping = NULL;
1386 }
1387 
1388 static void __free_huge_page(struct page *page)
1389 {
1390 	/*
1391 	 * Can't pass hstate in here because it is called from the
1392 	 * compound page destructor.
1393 	 */
1394 	struct hstate *h = page_hstate(page);
1395 	int nid = page_to_nid(page);
1396 	struct hugepage_subpool *spool =
1397 		(struct hugepage_subpool *)page_private(page);
1398 	bool restore_reserve;
1399 
1400 	VM_BUG_ON_PAGE(page_count(page), page);
1401 	VM_BUG_ON_PAGE(page_mapcount(page), page);
1402 
1403 	set_page_private(page, 0);
1404 	page->mapping = NULL;
1405 	restore_reserve = PagePrivate(page);
1406 	ClearPagePrivate(page);
1407 
1408 	/*
1409 	 * If PagePrivate() was set on page, page allocation consumed a
1410 	 * reservation.  If the page was associated with a subpool, there
1411 	 * would have been a page reserved in the subpool before allocation
1412 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1413 	 * reservtion, do not call hugepage_subpool_put_pages() as this will
1414 	 * remove the reserved page from the subpool.
1415 	 */
1416 	if (!restore_reserve) {
1417 		/*
1418 		 * A return code of zero implies that the subpool will be
1419 		 * under its minimum size if the reservation is not restored
1420 		 * after page is free.  Therefore, force restore_reserve
1421 		 * operation.
1422 		 */
1423 		if (hugepage_subpool_put_pages(spool, 1) == 0)
1424 			restore_reserve = true;
1425 	}
1426 
1427 	spin_lock(&hugetlb_lock);
1428 	clear_page_huge_active(page);
1429 	hugetlb_cgroup_uncharge_page(hstate_index(h),
1430 				     pages_per_huge_page(h), page);
1431 	hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1432 					  pages_per_huge_page(h), page);
1433 	if (restore_reserve)
1434 		h->resv_huge_pages++;
1435 
1436 	if (PageHugeTemporary(page)) {
1437 		list_del(&page->lru);
1438 		ClearPageHugeTemporary(page);
1439 		update_and_free_page(h, page);
1440 	} else if (h->surplus_huge_pages_node[nid]) {
1441 		/* remove the page from active list */
1442 		list_del(&page->lru);
1443 		update_and_free_page(h, page);
1444 		h->surplus_huge_pages--;
1445 		h->surplus_huge_pages_node[nid]--;
1446 	} else {
1447 		arch_clear_hugepage_flags(page);
1448 		enqueue_huge_page(h, page);
1449 	}
1450 	spin_unlock(&hugetlb_lock);
1451 }
1452 
1453 /*
1454  * As free_huge_page() can be called from a non-task context, we have
1455  * to defer the actual freeing in a workqueue to prevent potential
1456  * hugetlb_lock deadlock.
1457  *
1458  * free_hpage_workfn() locklessly retrieves the linked list of pages to
1459  * be freed and frees them one-by-one. As the page->mapping pointer is
1460  * going to be cleared in __free_huge_page() anyway, it is reused as the
1461  * llist_node structure of a lockless linked list of huge pages to be freed.
1462  */
1463 static LLIST_HEAD(hpage_freelist);
1464 
1465 static void free_hpage_workfn(struct work_struct *work)
1466 {
1467 	struct llist_node *node;
1468 	struct page *page;
1469 
1470 	node = llist_del_all(&hpage_freelist);
1471 
1472 	while (node) {
1473 		page = container_of((struct address_space **)node,
1474 				     struct page, mapping);
1475 		node = node->next;
1476 		__free_huge_page(page);
1477 	}
1478 }
1479 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1480 
1481 void free_huge_page(struct page *page)
1482 {
1483 	/*
1484 	 * Defer freeing if in non-task context to avoid hugetlb_lock deadlock.
1485 	 */
1486 	if (!in_task()) {
1487 		/*
1488 		 * Only call schedule_work() if hpage_freelist is previously
1489 		 * empty. Otherwise, schedule_work() had been called but the
1490 		 * workfn hasn't retrieved the list yet.
1491 		 */
1492 		if (llist_add((struct llist_node *)&page->mapping,
1493 			      &hpage_freelist))
1494 			schedule_work(&free_hpage_work);
1495 		return;
1496 	}
1497 
1498 	__free_huge_page(page);
1499 }
1500 
1501 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1502 {
1503 	INIT_LIST_HEAD(&page->lru);
1504 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1505 	spin_lock(&hugetlb_lock);
1506 	set_hugetlb_cgroup(page, NULL);
1507 	set_hugetlb_cgroup_rsvd(page, NULL);
1508 	h->nr_huge_pages++;
1509 	h->nr_huge_pages_node[nid]++;
1510 	spin_unlock(&hugetlb_lock);
1511 }
1512 
1513 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1514 {
1515 	int i;
1516 	int nr_pages = 1 << order;
1517 	struct page *p = page + 1;
1518 
1519 	/* we rely on prep_new_huge_page to set the destructor */
1520 	set_compound_order(page, order);
1521 	__ClearPageReserved(page);
1522 	__SetPageHead(page);
1523 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1524 		/*
1525 		 * For gigantic hugepages allocated through bootmem at
1526 		 * boot, it's safer to be consistent with the not-gigantic
1527 		 * hugepages and clear the PG_reserved bit from all tail pages
1528 		 * too.  Otherwise drivers using get_user_pages() to access tail
1529 		 * pages may get the reference counting wrong if they see
1530 		 * PG_reserved set on a tail page (despite the head page not
1531 		 * having PG_reserved set).  Enforcing this consistency between
1532 		 * head and tail pages allows drivers to optimize away a check
1533 		 * on the head page when they need know if put_page() is needed
1534 		 * after get_user_pages().
1535 		 */
1536 		__ClearPageReserved(p);
1537 		set_page_count(p, 0);
1538 		set_compound_head(p, page);
1539 	}
1540 	atomic_set(compound_mapcount_ptr(page), -1);
1541 
1542 	if (hpage_pincount_available(page))
1543 		atomic_set(compound_pincount_ptr(page), 0);
1544 }
1545 
1546 /*
1547  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1548  * transparent huge pages.  See the PageTransHuge() documentation for more
1549  * details.
1550  */
1551 int PageHuge(struct page *page)
1552 {
1553 	if (!PageCompound(page))
1554 		return 0;
1555 
1556 	page = compound_head(page);
1557 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1558 }
1559 EXPORT_SYMBOL_GPL(PageHuge);
1560 
1561 /*
1562  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1563  * normal or transparent huge pages.
1564  */
1565 int PageHeadHuge(struct page *page_head)
1566 {
1567 	if (!PageHead(page_head))
1568 		return 0;
1569 
1570 	return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
1571 }
1572 
1573 /*
1574  * Find address_space associated with hugetlbfs page.
1575  * Upon entry page is locked and page 'was' mapped although mapped state
1576  * could change.  If necessary, use anon_vma to find vma and associated
1577  * address space.  The returned mapping may be stale, but it can not be
1578  * invalid as page lock (which is held) is required to destroy mapping.
1579  */
1580 static struct address_space *_get_hugetlb_page_mapping(struct page *hpage)
1581 {
1582 	struct anon_vma *anon_vma;
1583 	pgoff_t pgoff_start, pgoff_end;
1584 	struct anon_vma_chain *avc;
1585 	struct address_space *mapping = page_mapping(hpage);
1586 
1587 	/* Simple file based mapping */
1588 	if (mapping)
1589 		return mapping;
1590 
1591 	/*
1592 	 * Even anonymous hugetlbfs mappings are associated with an
1593 	 * underlying hugetlbfs file (see hugetlb_file_setup in mmap
1594 	 * code).  Find a vma associated with the anonymous vma, and
1595 	 * use the file pointer to get address_space.
1596 	 */
1597 	anon_vma = page_lock_anon_vma_read(hpage);
1598 	if (!anon_vma)
1599 		return mapping;  /* NULL */
1600 
1601 	/* Use first found vma */
1602 	pgoff_start = page_to_pgoff(hpage);
1603 	pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1;
1604 	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1605 					pgoff_start, pgoff_end) {
1606 		struct vm_area_struct *vma = avc->vma;
1607 
1608 		mapping = vma->vm_file->f_mapping;
1609 		break;
1610 	}
1611 
1612 	anon_vma_unlock_read(anon_vma);
1613 	return mapping;
1614 }
1615 
1616 /*
1617  * Find and lock address space (mapping) in write mode.
1618  *
1619  * Upon entry, the page is locked which allows us to find the mapping
1620  * even in the case of an anon page.  However, locking order dictates
1621  * the i_mmap_rwsem be acquired BEFORE the page lock.  This is hugetlbfs
1622  * specific.  So, we first try to lock the sema while still holding the
1623  * page lock.  If this works, great!  If not, then we need to drop the
1624  * page lock and then acquire i_mmap_rwsem and reacquire page lock.  Of
1625  * course, need to revalidate state along the way.
1626  */
1627 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1628 {
1629 	struct address_space *mapping, *mapping2;
1630 
1631 	mapping = _get_hugetlb_page_mapping(hpage);
1632 retry:
1633 	if (!mapping)
1634 		return mapping;
1635 
1636 	/*
1637 	 * If no contention, take lock and return
1638 	 */
1639 	if (i_mmap_trylock_write(mapping))
1640 		return mapping;
1641 
1642 	/*
1643 	 * Must drop page lock and wait on mapping sema.
1644 	 * Note:  Once page lock is dropped, mapping could become invalid.
1645 	 * As a hack, increase map count until we lock page again.
1646 	 */
1647 	atomic_inc(&hpage->_mapcount);
1648 	unlock_page(hpage);
1649 	i_mmap_lock_write(mapping);
1650 	lock_page(hpage);
1651 	atomic_add_negative(-1, &hpage->_mapcount);
1652 
1653 	/* verify page is still mapped */
1654 	if (!page_mapped(hpage)) {
1655 		i_mmap_unlock_write(mapping);
1656 		return NULL;
1657 	}
1658 
1659 	/*
1660 	 * Get address space again and verify it is the same one
1661 	 * we locked.  If not, drop lock and retry.
1662 	 */
1663 	mapping2 = _get_hugetlb_page_mapping(hpage);
1664 	if (mapping2 != mapping) {
1665 		i_mmap_unlock_write(mapping);
1666 		mapping = mapping2;
1667 		goto retry;
1668 	}
1669 
1670 	return mapping;
1671 }
1672 
1673 pgoff_t __basepage_index(struct page *page)
1674 {
1675 	struct page *page_head = compound_head(page);
1676 	pgoff_t index = page_index(page_head);
1677 	unsigned long compound_idx;
1678 
1679 	if (!PageHuge(page_head))
1680 		return page_index(page);
1681 
1682 	if (compound_order(page_head) >= MAX_ORDER)
1683 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1684 	else
1685 		compound_idx = page - page_head;
1686 
1687 	return (index << compound_order(page_head)) + compound_idx;
1688 }
1689 
1690 static struct page *alloc_buddy_huge_page(struct hstate *h,
1691 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1692 		nodemask_t *node_alloc_noretry)
1693 {
1694 	int order = huge_page_order(h);
1695 	struct page *page;
1696 	bool alloc_try_hard = true;
1697 
1698 	/*
1699 	 * By default we always try hard to allocate the page with
1700 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1701 	 * a loop (to adjust global huge page counts) and previous allocation
1702 	 * failed, do not continue to try hard on the same node.  Use the
1703 	 * node_alloc_noretry bitmap to manage this state information.
1704 	 */
1705 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1706 		alloc_try_hard = false;
1707 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1708 	if (alloc_try_hard)
1709 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1710 	if (nid == NUMA_NO_NODE)
1711 		nid = numa_mem_id();
1712 	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1713 	if (page)
1714 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1715 	else
1716 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1717 
1718 	/*
1719 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1720 	 * indicates an overall state change.  Clear bit so that we resume
1721 	 * normal 'try hard' allocations.
1722 	 */
1723 	if (node_alloc_noretry && page && !alloc_try_hard)
1724 		node_clear(nid, *node_alloc_noretry);
1725 
1726 	/*
1727 	 * If we tried hard to get a page but failed, set bit so that
1728 	 * subsequent attempts will not try as hard until there is an
1729 	 * overall state change.
1730 	 */
1731 	if (node_alloc_noretry && !page && alloc_try_hard)
1732 		node_set(nid, *node_alloc_noretry);
1733 
1734 	return page;
1735 }
1736 
1737 /*
1738  * Common helper to allocate a fresh hugetlb page. All specific allocators
1739  * should use this function to get new hugetlb pages
1740  */
1741 static struct page *alloc_fresh_huge_page(struct hstate *h,
1742 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1743 		nodemask_t *node_alloc_noretry)
1744 {
1745 	struct page *page;
1746 
1747 	if (hstate_is_gigantic(h))
1748 		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1749 	else
1750 		page = alloc_buddy_huge_page(h, gfp_mask,
1751 				nid, nmask, node_alloc_noretry);
1752 	if (!page)
1753 		return NULL;
1754 
1755 	if (hstate_is_gigantic(h))
1756 		prep_compound_gigantic_page(page, huge_page_order(h));
1757 	prep_new_huge_page(h, page, page_to_nid(page));
1758 
1759 	return page;
1760 }
1761 
1762 /*
1763  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1764  * manner.
1765  */
1766 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1767 				nodemask_t *node_alloc_noretry)
1768 {
1769 	struct page *page;
1770 	int nr_nodes, node;
1771 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1772 
1773 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1774 		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1775 						node_alloc_noretry);
1776 		if (page)
1777 			break;
1778 	}
1779 
1780 	if (!page)
1781 		return 0;
1782 
1783 	put_page(page); /* free it into the hugepage allocator */
1784 
1785 	return 1;
1786 }
1787 
1788 /*
1789  * Free huge page from pool from next node to free.
1790  * Attempt to keep persistent huge pages more or less
1791  * balanced over allowed nodes.
1792  * Called with hugetlb_lock locked.
1793  */
1794 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1795 							 bool acct_surplus)
1796 {
1797 	int nr_nodes, node;
1798 	int ret = 0;
1799 
1800 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1801 		/*
1802 		 * If we're returning unused surplus pages, only examine
1803 		 * nodes with surplus pages.
1804 		 */
1805 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1806 		    !list_empty(&h->hugepage_freelists[node])) {
1807 			struct page *page =
1808 				list_entry(h->hugepage_freelists[node].next,
1809 					  struct page, lru);
1810 			list_del(&page->lru);
1811 			h->free_huge_pages--;
1812 			h->free_huge_pages_node[node]--;
1813 			if (acct_surplus) {
1814 				h->surplus_huge_pages--;
1815 				h->surplus_huge_pages_node[node]--;
1816 			}
1817 			update_and_free_page(h, page);
1818 			ret = 1;
1819 			break;
1820 		}
1821 	}
1822 
1823 	return ret;
1824 }
1825 
1826 /*
1827  * Dissolve a given free hugepage into free buddy pages. This function does
1828  * nothing for in-use hugepages and non-hugepages.
1829  * This function returns values like below:
1830  *
1831  *  -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1832  *          (allocated or reserved.)
1833  *       0: successfully dissolved free hugepages or the page is not a
1834  *          hugepage (considered as already dissolved)
1835  */
1836 int dissolve_free_huge_page(struct page *page)
1837 {
1838 	int rc = -EBUSY;
1839 
1840 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
1841 	if (!PageHuge(page))
1842 		return 0;
1843 
1844 	spin_lock(&hugetlb_lock);
1845 	if (!PageHuge(page)) {
1846 		rc = 0;
1847 		goto out;
1848 	}
1849 
1850 	if (!page_count(page)) {
1851 		struct page *head = compound_head(page);
1852 		struct hstate *h = page_hstate(head);
1853 		int nid = page_to_nid(head);
1854 		if (h->free_huge_pages - h->resv_huge_pages == 0)
1855 			goto out;
1856 		/*
1857 		 * Move PageHWPoison flag from head page to the raw error page,
1858 		 * which makes any subpages rather than the error page reusable.
1859 		 */
1860 		if (PageHWPoison(head) && page != head) {
1861 			SetPageHWPoison(page);
1862 			ClearPageHWPoison(head);
1863 		}
1864 		list_del(&head->lru);
1865 		h->free_huge_pages--;
1866 		h->free_huge_pages_node[nid]--;
1867 		h->max_huge_pages--;
1868 		update_and_free_page(h, head);
1869 		rc = 0;
1870 	}
1871 out:
1872 	spin_unlock(&hugetlb_lock);
1873 	return rc;
1874 }
1875 
1876 /*
1877  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1878  * make specified memory blocks removable from the system.
1879  * Note that this will dissolve a free gigantic hugepage completely, if any
1880  * part of it lies within the given range.
1881  * Also note that if dissolve_free_huge_page() returns with an error, all
1882  * free hugepages that were dissolved before that error are lost.
1883  */
1884 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1885 {
1886 	unsigned long pfn;
1887 	struct page *page;
1888 	int rc = 0;
1889 
1890 	if (!hugepages_supported())
1891 		return rc;
1892 
1893 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1894 		page = pfn_to_page(pfn);
1895 		rc = dissolve_free_huge_page(page);
1896 		if (rc)
1897 			break;
1898 	}
1899 
1900 	return rc;
1901 }
1902 
1903 /*
1904  * Allocates a fresh surplus page from the page allocator.
1905  */
1906 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1907 		int nid, nodemask_t *nmask)
1908 {
1909 	struct page *page = NULL;
1910 
1911 	if (hstate_is_gigantic(h))
1912 		return NULL;
1913 
1914 	spin_lock(&hugetlb_lock);
1915 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1916 		goto out_unlock;
1917 	spin_unlock(&hugetlb_lock);
1918 
1919 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1920 	if (!page)
1921 		return NULL;
1922 
1923 	spin_lock(&hugetlb_lock);
1924 	/*
1925 	 * We could have raced with the pool size change.
1926 	 * Double check that and simply deallocate the new page
1927 	 * if we would end up overcommiting the surpluses. Abuse
1928 	 * temporary page to workaround the nasty free_huge_page
1929 	 * codeflow
1930 	 */
1931 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1932 		SetPageHugeTemporary(page);
1933 		spin_unlock(&hugetlb_lock);
1934 		put_page(page);
1935 		return NULL;
1936 	} else {
1937 		h->surplus_huge_pages++;
1938 		h->surplus_huge_pages_node[page_to_nid(page)]++;
1939 	}
1940 
1941 out_unlock:
1942 	spin_unlock(&hugetlb_lock);
1943 
1944 	return page;
1945 }
1946 
1947 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1948 				     int nid, nodemask_t *nmask)
1949 {
1950 	struct page *page;
1951 
1952 	if (hstate_is_gigantic(h))
1953 		return NULL;
1954 
1955 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1956 	if (!page)
1957 		return NULL;
1958 
1959 	/*
1960 	 * We do not account these pages as surplus because they are only
1961 	 * temporary and will be released properly on the last reference
1962 	 */
1963 	SetPageHugeTemporary(page);
1964 
1965 	return page;
1966 }
1967 
1968 /*
1969  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1970  */
1971 static
1972 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1973 		struct vm_area_struct *vma, unsigned long addr)
1974 {
1975 	struct page *page;
1976 	struct mempolicy *mpol;
1977 	gfp_t gfp_mask = htlb_alloc_mask(h);
1978 	int nid;
1979 	nodemask_t *nodemask;
1980 
1981 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1982 	page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1983 	mpol_cond_put(mpol);
1984 
1985 	return page;
1986 }
1987 
1988 /* page migration callback function */
1989 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1990 {
1991 	gfp_t gfp_mask = htlb_alloc_mask(h);
1992 	struct page *page = NULL;
1993 
1994 	if (nid != NUMA_NO_NODE)
1995 		gfp_mask |= __GFP_THISNODE;
1996 
1997 	spin_lock(&hugetlb_lock);
1998 	if (h->free_huge_pages - h->resv_huge_pages > 0)
1999 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
2000 	spin_unlock(&hugetlb_lock);
2001 
2002 	if (!page)
2003 		page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
2004 
2005 	return page;
2006 }
2007 
2008 /* page migration callback function */
2009 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
2010 		nodemask_t *nmask)
2011 {
2012 	gfp_t gfp_mask = htlb_alloc_mask(h);
2013 
2014 	spin_lock(&hugetlb_lock);
2015 	if (h->free_huge_pages - h->resv_huge_pages > 0) {
2016 		struct page *page;
2017 
2018 		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
2019 		if (page) {
2020 			spin_unlock(&hugetlb_lock);
2021 			return page;
2022 		}
2023 	}
2024 	spin_unlock(&hugetlb_lock);
2025 
2026 	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
2027 }
2028 
2029 /* mempolicy aware migration callback */
2030 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
2031 		unsigned long address)
2032 {
2033 	struct mempolicy *mpol;
2034 	nodemask_t *nodemask;
2035 	struct page *page;
2036 	gfp_t gfp_mask;
2037 	int node;
2038 
2039 	gfp_mask = htlb_alloc_mask(h);
2040 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2041 	page = alloc_huge_page_nodemask(h, node, nodemask);
2042 	mpol_cond_put(mpol);
2043 
2044 	return page;
2045 }
2046 
2047 /*
2048  * Increase the hugetlb pool such that it can accommodate a reservation
2049  * of size 'delta'.
2050  */
2051 static int gather_surplus_pages(struct hstate *h, int delta)
2052 	__must_hold(&hugetlb_lock)
2053 {
2054 	struct list_head surplus_list;
2055 	struct page *page, *tmp;
2056 	int ret, i;
2057 	int needed, allocated;
2058 	bool alloc_ok = true;
2059 
2060 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2061 	if (needed <= 0) {
2062 		h->resv_huge_pages += delta;
2063 		return 0;
2064 	}
2065 
2066 	allocated = 0;
2067 	INIT_LIST_HEAD(&surplus_list);
2068 
2069 	ret = -ENOMEM;
2070 retry:
2071 	spin_unlock(&hugetlb_lock);
2072 	for (i = 0; i < needed; i++) {
2073 		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
2074 				NUMA_NO_NODE, NULL);
2075 		if (!page) {
2076 			alloc_ok = false;
2077 			break;
2078 		}
2079 		list_add(&page->lru, &surplus_list);
2080 		cond_resched();
2081 	}
2082 	allocated += i;
2083 
2084 	/*
2085 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
2086 	 * because either resv_huge_pages or free_huge_pages may have changed.
2087 	 */
2088 	spin_lock(&hugetlb_lock);
2089 	needed = (h->resv_huge_pages + delta) -
2090 			(h->free_huge_pages + allocated);
2091 	if (needed > 0) {
2092 		if (alloc_ok)
2093 			goto retry;
2094 		/*
2095 		 * We were not able to allocate enough pages to
2096 		 * satisfy the entire reservation so we free what
2097 		 * we've allocated so far.
2098 		 */
2099 		goto free;
2100 	}
2101 	/*
2102 	 * The surplus_list now contains _at_least_ the number of extra pages
2103 	 * needed to accommodate the reservation.  Add the appropriate number
2104 	 * of pages to the hugetlb pool and free the extras back to the buddy
2105 	 * allocator.  Commit the entire reservation here to prevent another
2106 	 * process from stealing the pages as they are added to the pool but
2107 	 * before they are reserved.
2108 	 */
2109 	needed += allocated;
2110 	h->resv_huge_pages += delta;
2111 	ret = 0;
2112 
2113 	/* Free the needed pages to the hugetlb pool */
2114 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2115 		if ((--needed) < 0)
2116 			break;
2117 		/*
2118 		 * This page is now managed by the hugetlb allocator and has
2119 		 * no users -- drop the buddy allocator's reference.
2120 		 */
2121 		put_page_testzero(page);
2122 		VM_BUG_ON_PAGE(page_count(page), page);
2123 		enqueue_huge_page(h, page);
2124 	}
2125 free:
2126 	spin_unlock(&hugetlb_lock);
2127 
2128 	/* Free unnecessary surplus pages to the buddy allocator */
2129 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2130 		put_page(page);
2131 	spin_lock(&hugetlb_lock);
2132 
2133 	return ret;
2134 }
2135 
2136 /*
2137  * This routine has two main purposes:
2138  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2139  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2140  *    to the associated reservation map.
2141  * 2) Free any unused surplus pages that may have been allocated to satisfy
2142  *    the reservation.  As many as unused_resv_pages may be freed.
2143  *
2144  * Called with hugetlb_lock held.  However, the lock could be dropped (and
2145  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
2146  * we must make sure nobody else can claim pages we are in the process of
2147  * freeing.  Do this by ensuring resv_huge_page always is greater than the
2148  * number of huge pages we plan to free when dropping the lock.
2149  */
2150 static void return_unused_surplus_pages(struct hstate *h,
2151 					unsigned long unused_resv_pages)
2152 {
2153 	unsigned long nr_pages;
2154 
2155 	/* Cannot return gigantic pages currently */
2156 	if (hstate_is_gigantic(h))
2157 		goto out;
2158 
2159 	/*
2160 	 * Part (or even all) of the reservation could have been backed
2161 	 * by pre-allocated pages. Only free surplus pages.
2162 	 */
2163 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2164 
2165 	/*
2166 	 * We want to release as many surplus pages as possible, spread
2167 	 * evenly across all nodes with memory. Iterate across these nodes
2168 	 * until we can no longer free unreserved surplus pages. This occurs
2169 	 * when the nodes with surplus pages have no free pages.
2170 	 * free_pool_huge_page() will balance the freed pages across the
2171 	 * on-line nodes with memory and will handle the hstate accounting.
2172 	 *
2173 	 * Note that we decrement resv_huge_pages as we free the pages.  If
2174 	 * we drop the lock, resv_huge_pages will still be sufficiently large
2175 	 * to cover subsequent pages we may free.
2176 	 */
2177 	while (nr_pages--) {
2178 		h->resv_huge_pages--;
2179 		unused_resv_pages--;
2180 		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
2181 			goto out;
2182 		cond_resched_lock(&hugetlb_lock);
2183 	}
2184 
2185 out:
2186 	/* Fully uncommit the reservation */
2187 	h->resv_huge_pages -= unused_resv_pages;
2188 }
2189 
2190 
2191 /*
2192  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2193  * are used by the huge page allocation routines to manage reservations.
2194  *
2195  * vma_needs_reservation is called to determine if the huge page at addr
2196  * within the vma has an associated reservation.  If a reservation is
2197  * needed, the value 1 is returned.  The caller is then responsible for
2198  * managing the global reservation and subpool usage counts.  After
2199  * the huge page has been allocated, vma_commit_reservation is called
2200  * to add the page to the reservation map.  If the page allocation fails,
2201  * the reservation must be ended instead of committed.  vma_end_reservation
2202  * is called in such cases.
2203  *
2204  * In the normal case, vma_commit_reservation returns the same value
2205  * as the preceding vma_needs_reservation call.  The only time this
2206  * is not the case is if a reserve map was changed between calls.  It
2207  * is the responsibility of the caller to notice the difference and
2208  * take appropriate action.
2209  *
2210  * vma_add_reservation is used in error paths where a reservation must
2211  * be restored when a newly allocated huge page must be freed.  It is
2212  * to be called after calling vma_needs_reservation to determine if a
2213  * reservation exists.
2214  */
2215 enum vma_resv_mode {
2216 	VMA_NEEDS_RESV,
2217 	VMA_COMMIT_RESV,
2218 	VMA_END_RESV,
2219 	VMA_ADD_RESV,
2220 };
2221 static long __vma_reservation_common(struct hstate *h,
2222 				struct vm_area_struct *vma, unsigned long addr,
2223 				enum vma_resv_mode mode)
2224 {
2225 	struct resv_map *resv;
2226 	pgoff_t idx;
2227 	long ret;
2228 	long dummy_out_regions_needed;
2229 
2230 	resv = vma_resv_map(vma);
2231 	if (!resv)
2232 		return 1;
2233 
2234 	idx = vma_hugecache_offset(h, vma, addr);
2235 	switch (mode) {
2236 	case VMA_NEEDS_RESV:
2237 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2238 		/* We assume that vma_reservation_* routines always operate on
2239 		 * 1 page, and that adding to resv map a 1 page entry can only
2240 		 * ever require 1 region.
2241 		 */
2242 		VM_BUG_ON(dummy_out_regions_needed != 1);
2243 		break;
2244 	case VMA_COMMIT_RESV:
2245 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2246 		/* region_add calls of range 1 should never fail. */
2247 		VM_BUG_ON(ret < 0);
2248 		break;
2249 	case VMA_END_RESV:
2250 		region_abort(resv, idx, idx + 1, 1);
2251 		ret = 0;
2252 		break;
2253 	case VMA_ADD_RESV:
2254 		if (vma->vm_flags & VM_MAYSHARE) {
2255 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2256 			/* region_add calls of range 1 should never fail. */
2257 			VM_BUG_ON(ret < 0);
2258 		} else {
2259 			region_abort(resv, idx, idx + 1, 1);
2260 			ret = region_del(resv, idx, idx + 1);
2261 		}
2262 		break;
2263 	default:
2264 		BUG();
2265 	}
2266 
2267 	if (vma->vm_flags & VM_MAYSHARE)
2268 		return ret;
2269 	else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
2270 		/*
2271 		 * In most cases, reserves always exist for private mappings.
2272 		 * However, a file associated with mapping could have been
2273 		 * hole punched or truncated after reserves were consumed.
2274 		 * As subsequent fault on such a range will not use reserves.
2275 		 * Subtle - The reserve map for private mappings has the
2276 		 * opposite meaning than that of shared mappings.  If NO
2277 		 * entry is in the reserve map, it means a reservation exists.
2278 		 * If an entry exists in the reserve map, it means the
2279 		 * reservation has already been consumed.  As a result, the
2280 		 * return value of this routine is the opposite of the
2281 		 * value returned from reserve map manipulation routines above.
2282 		 */
2283 		if (ret)
2284 			return 0;
2285 		else
2286 			return 1;
2287 	}
2288 	else
2289 		return ret < 0 ? ret : 0;
2290 }
2291 
2292 static long vma_needs_reservation(struct hstate *h,
2293 			struct vm_area_struct *vma, unsigned long addr)
2294 {
2295 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2296 }
2297 
2298 static long vma_commit_reservation(struct hstate *h,
2299 			struct vm_area_struct *vma, unsigned long addr)
2300 {
2301 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2302 }
2303 
2304 static void vma_end_reservation(struct hstate *h,
2305 			struct vm_area_struct *vma, unsigned long addr)
2306 {
2307 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2308 }
2309 
2310 static long vma_add_reservation(struct hstate *h,
2311 			struct vm_area_struct *vma, unsigned long addr)
2312 {
2313 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2314 }
2315 
2316 /*
2317  * This routine is called to restore a reservation on error paths.  In the
2318  * specific error paths, a huge page was allocated (via alloc_huge_page)
2319  * and is about to be freed.  If a reservation for the page existed,
2320  * alloc_huge_page would have consumed the reservation and set PagePrivate
2321  * in the newly allocated page.  When the page is freed via free_huge_page,
2322  * the global reservation count will be incremented if PagePrivate is set.
2323  * However, free_huge_page can not adjust the reserve map.  Adjust the
2324  * reserve map here to be consistent with global reserve count adjustments
2325  * to be made by free_huge_page.
2326  */
2327 static void restore_reserve_on_error(struct hstate *h,
2328 			struct vm_area_struct *vma, unsigned long address,
2329 			struct page *page)
2330 {
2331 	if (unlikely(PagePrivate(page))) {
2332 		long rc = vma_needs_reservation(h, vma, address);
2333 
2334 		if (unlikely(rc < 0)) {
2335 			/*
2336 			 * Rare out of memory condition in reserve map
2337 			 * manipulation.  Clear PagePrivate so that
2338 			 * global reserve count will not be incremented
2339 			 * by free_huge_page.  This will make it appear
2340 			 * as though the reservation for this page was
2341 			 * consumed.  This may prevent the task from
2342 			 * faulting in the page at a later time.  This
2343 			 * is better than inconsistent global huge page
2344 			 * accounting of reserve counts.
2345 			 */
2346 			ClearPagePrivate(page);
2347 		} else if (rc) {
2348 			rc = vma_add_reservation(h, vma, address);
2349 			if (unlikely(rc < 0))
2350 				/*
2351 				 * See above comment about rare out of
2352 				 * memory condition.
2353 				 */
2354 				ClearPagePrivate(page);
2355 		} else
2356 			vma_end_reservation(h, vma, address);
2357 	}
2358 }
2359 
2360 struct page *alloc_huge_page(struct vm_area_struct *vma,
2361 				    unsigned long addr, int avoid_reserve)
2362 {
2363 	struct hugepage_subpool *spool = subpool_vma(vma);
2364 	struct hstate *h = hstate_vma(vma);
2365 	struct page *page;
2366 	long map_chg, map_commit;
2367 	long gbl_chg;
2368 	int ret, idx;
2369 	struct hugetlb_cgroup *h_cg;
2370 	bool deferred_reserve;
2371 
2372 	idx = hstate_index(h);
2373 	/*
2374 	 * Examine the region/reserve map to determine if the process
2375 	 * has a reservation for the page to be allocated.  A return
2376 	 * code of zero indicates a reservation exists (no change).
2377 	 */
2378 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2379 	if (map_chg < 0)
2380 		return ERR_PTR(-ENOMEM);
2381 
2382 	/*
2383 	 * Processes that did not create the mapping will have no
2384 	 * reserves as indicated by the region/reserve map. Check
2385 	 * that the allocation will not exceed the subpool limit.
2386 	 * Allocations for MAP_NORESERVE mappings also need to be
2387 	 * checked against any subpool limit.
2388 	 */
2389 	if (map_chg || avoid_reserve) {
2390 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2391 		if (gbl_chg < 0) {
2392 			vma_end_reservation(h, vma, addr);
2393 			return ERR_PTR(-ENOSPC);
2394 		}
2395 
2396 		/*
2397 		 * Even though there was no reservation in the region/reserve
2398 		 * map, there could be reservations associated with the
2399 		 * subpool that can be used.  This would be indicated if the
2400 		 * return value of hugepage_subpool_get_pages() is zero.
2401 		 * However, if avoid_reserve is specified we still avoid even
2402 		 * the subpool reservations.
2403 		 */
2404 		if (avoid_reserve)
2405 			gbl_chg = 1;
2406 	}
2407 
2408 	/* If this allocation is not consuming a reservation, charge it now.
2409 	 */
2410 	deferred_reserve = map_chg || avoid_reserve || !vma_resv_map(vma);
2411 	if (deferred_reserve) {
2412 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
2413 			idx, pages_per_huge_page(h), &h_cg);
2414 		if (ret)
2415 			goto out_subpool_put;
2416 	}
2417 
2418 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2419 	if (ret)
2420 		goto out_uncharge_cgroup_reservation;
2421 
2422 	spin_lock(&hugetlb_lock);
2423 	/*
2424 	 * glb_chg is passed to indicate whether or not a page must be taken
2425 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2426 	 * a reservation exists for the allocation.
2427 	 */
2428 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2429 	if (!page) {
2430 		spin_unlock(&hugetlb_lock);
2431 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2432 		if (!page)
2433 			goto out_uncharge_cgroup;
2434 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2435 			SetPagePrivate(page);
2436 			h->resv_huge_pages--;
2437 		}
2438 		spin_lock(&hugetlb_lock);
2439 		list_move(&page->lru, &h->hugepage_activelist);
2440 		/* Fall through */
2441 	}
2442 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2443 	/* If allocation is not consuming a reservation, also store the
2444 	 * hugetlb_cgroup pointer on the page.
2445 	 */
2446 	if (deferred_reserve) {
2447 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2448 						  h_cg, page);
2449 	}
2450 
2451 	spin_unlock(&hugetlb_lock);
2452 
2453 	set_page_private(page, (unsigned long)spool);
2454 
2455 	map_commit = vma_commit_reservation(h, vma, addr);
2456 	if (unlikely(map_chg > map_commit)) {
2457 		/*
2458 		 * The page was added to the reservation map between
2459 		 * vma_needs_reservation and vma_commit_reservation.
2460 		 * This indicates a race with hugetlb_reserve_pages.
2461 		 * Adjust for the subpool count incremented above AND
2462 		 * in hugetlb_reserve_pages for the same page.  Also,
2463 		 * the reservation count added in hugetlb_reserve_pages
2464 		 * no longer applies.
2465 		 */
2466 		long rsv_adjust;
2467 
2468 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2469 		hugetlb_acct_memory(h, -rsv_adjust);
2470 	}
2471 	return page;
2472 
2473 out_uncharge_cgroup:
2474 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2475 out_uncharge_cgroup_reservation:
2476 	if (deferred_reserve)
2477 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2478 						    h_cg);
2479 out_subpool_put:
2480 	if (map_chg || avoid_reserve)
2481 		hugepage_subpool_put_pages(spool, 1);
2482 	vma_end_reservation(h, vma, addr);
2483 	return ERR_PTR(-ENOSPC);
2484 }
2485 
2486 int alloc_bootmem_huge_page(struct hstate *h)
2487 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2488 int __alloc_bootmem_huge_page(struct hstate *h)
2489 {
2490 	struct huge_bootmem_page *m;
2491 	int nr_nodes, node;
2492 
2493 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2494 		void *addr;
2495 
2496 		addr = memblock_alloc_try_nid_raw(
2497 				huge_page_size(h), huge_page_size(h),
2498 				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2499 		if (addr) {
2500 			/*
2501 			 * Use the beginning of the huge page to store the
2502 			 * huge_bootmem_page struct (until gather_bootmem
2503 			 * puts them into the mem_map).
2504 			 */
2505 			m = addr;
2506 			goto found;
2507 		}
2508 	}
2509 	return 0;
2510 
2511 found:
2512 	BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2513 	/* Put them into a private list first because mem_map is not up yet */
2514 	INIT_LIST_HEAD(&m->list);
2515 	list_add(&m->list, &huge_boot_pages);
2516 	m->hstate = h;
2517 	return 1;
2518 }
2519 
2520 static void __init prep_compound_huge_page(struct page *page,
2521 		unsigned int order)
2522 {
2523 	if (unlikely(order > (MAX_ORDER - 1)))
2524 		prep_compound_gigantic_page(page, order);
2525 	else
2526 		prep_compound_page(page, order);
2527 }
2528 
2529 /* Put bootmem huge pages into the standard lists after mem_map is up */
2530 static void __init gather_bootmem_prealloc(void)
2531 {
2532 	struct huge_bootmem_page *m;
2533 
2534 	list_for_each_entry(m, &huge_boot_pages, list) {
2535 		struct page *page = virt_to_page(m);
2536 		struct hstate *h = m->hstate;
2537 
2538 		WARN_ON(page_count(page) != 1);
2539 		prep_compound_huge_page(page, h->order);
2540 		WARN_ON(PageReserved(page));
2541 		prep_new_huge_page(h, page, page_to_nid(page));
2542 		put_page(page); /* free it into the hugepage allocator */
2543 
2544 		/*
2545 		 * If we had gigantic hugepages allocated at boot time, we need
2546 		 * to restore the 'stolen' pages to totalram_pages in order to
2547 		 * fix confusing memory reports from free(1) and another
2548 		 * side-effects, like CommitLimit going negative.
2549 		 */
2550 		if (hstate_is_gigantic(h))
2551 			adjust_managed_page_count(page, 1 << h->order);
2552 		cond_resched();
2553 	}
2554 }
2555 
2556 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2557 {
2558 	unsigned long i;
2559 	nodemask_t *node_alloc_noretry;
2560 
2561 	if (!hstate_is_gigantic(h)) {
2562 		/*
2563 		 * Bit mask controlling how hard we retry per-node allocations.
2564 		 * Ignore errors as lower level routines can deal with
2565 		 * node_alloc_noretry == NULL.  If this kmalloc fails at boot
2566 		 * time, we are likely in bigger trouble.
2567 		 */
2568 		node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
2569 						GFP_KERNEL);
2570 	} else {
2571 		/* allocations done at boot time */
2572 		node_alloc_noretry = NULL;
2573 	}
2574 
2575 	/* bit mask controlling how hard we retry per-node allocations */
2576 	if (node_alloc_noretry)
2577 		nodes_clear(*node_alloc_noretry);
2578 
2579 	for (i = 0; i < h->max_huge_pages; ++i) {
2580 		if (hstate_is_gigantic(h)) {
2581 			if (hugetlb_cma_size) {
2582 				pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
2583 				break;
2584 			}
2585 			if (!alloc_bootmem_huge_page(h))
2586 				break;
2587 		} else if (!alloc_pool_huge_page(h,
2588 					 &node_states[N_MEMORY],
2589 					 node_alloc_noretry))
2590 			break;
2591 		cond_resched();
2592 	}
2593 	if (i < h->max_huge_pages) {
2594 		char buf[32];
2595 
2596 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2597 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2598 			h->max_huge_pages, buf, i);
2599 		h->max_huge_pages = i;
2600 	}
2601 
2602 	kfree(node_alloc_noretry);
2603 }
2604 
2605 static void __init hugetlb_init_hstates(void)
2606 {
2607 	struct hstate *h;
2608 
2609 	for_each_hstate(h) {
2610 		if (minimum_order > huge_page_order(h))
2611 			minimum_order = huge_page_order(h);
2612 
2613 		/* oversize hugepages were init'ed in early boot */
2614 		if (!hstate_is_gigantic(h))
2615 			hugetlb_hstate_alloc_pages(h);
2616 	}
2617 	VM_BUG_ON(minimum_order == UINT_MAX);
2618 }
2619 
2620 static void __init report_hugepages(void)
2621 {
2622 	struct hstate *h;
2623 
2624 	for_each_hstate(h) {
2625 		char buf[32];
2626 
2627 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2628 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2629 			buf, h->free_huge_pages);
2630 	}
2631 }
2632 
2633 #ifdef CONFIG_HIGHMEM
2634 static void try_to_free_low(struct hstate *h, unsigned long count,
2635 						nodemask_t *nodes_allowed)
2636 {
2637 	int i;
2638 
2639 	if (hstate_is_gigantic(h))
2640 		return;
2641 
2642 	for_each_node_mask(i, *nodes_allowed) {
2643 		struct page *page, *next;
2644 		struct list_head *freel = &h->hugepage_freelists[i];
2645 		list_for_each_entry_safe(page, next, freel, lru) {
2646 			if (count >= h->nr_huge_pages)
2647 				return;
2648 			if (PageHighMem(page))
2649 				continue;
2650 			list_del(&page->lru);
2651 			update_and_free_page(h, page);
2652 			h->free_huge_pages--;
2653 			h->free_huge_pages_node[page_to_nid(page)]--;
2654 		}
2655 	}
2656 }
2657 #else
2658 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2659 						nodemask_t *nodes_allowed)
2660 {
2661 }
2662 #endif
2663 
2664 /*
2665  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2666  * balanced by operating on them in a round-robin fashion.
2667  * Returns 1 if an adjustment was made.
2668  */
2669 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2670 				int delta)
2671 {
2672 	int nr_nodes, node;
2673 
2674 	VM_BUG_ON(delta != -1 && delta != 1);
2675 
2676 	if (delta < 0) {
2677 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2678 			if (h->surplus_huge_pages_node[node])
2679 				goto found;
2680 		}
2681 	} else {
2682 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2683 			if (h->surplus_huge_pages_node[node] <
2684 					h->nr_huge_pages_node[node])
2685 				goto found;
2686 		}
2687 	}
2688 	return 0;
2689 
2690 found:
2691 	h->surplus_huge_pages += delta;
2692 	h->surplus_huge_pages_node[node] += delta;
2693 	return 1;
2694 }
2695 
2696 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2697 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2698 			      nodemask_t *nodes_allowed)
2699 {
2700 	unsigned long min_count, ret;
2701 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
2702 
2703 	/*
2704 	 * Bit mask controlling how hard we retry per-node allocations.
2705 	 * If we can not allocate the bit mask, do not attempt to allocate
2706 	 * the requested huge pages.
2707 	 */
2708 	if (node_alloc_noretry)
2709 		nodes_clear(*node_alloc_noretry);
2710 	else
2711 		return -ENOMEM;
2712 
2713 	spin_lock(&hugetlb_lock);
2714 
2715 	/*
2716 	 * Check for a node specific request.
2717 	 * Changing node specific huge page count may require a corresponding
2718 	 * change to the global count.  In any case, the passed node mask
2719 	 * (nodes_allowed) will restrict alloc/free to the specified node.
2720 	 */
2721 	if (nid != NUMA_NO_NODE) {
2722 		unsigned long old_count = count;
2723 
2724 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2725 		/*
2726 		 * User may have specified a large count value which caused the
2727 		 * above calculation to overflow.  In this case, they wanted
2728 		 * to allocate as many huge pages as possible.  Set count to
2729 		 * largest possible value to align with their intention.
2730 		 */
2731 		if (count < old_count)
2732 			count = ULONG_MAX;
2733 	}
2734 
2735 	/*
2736 	 * Gigantic pages runtime allocation depend on the capability for large
2737 	 * page range allocation.
2738 	 * If the system does not provide this feature, return an error when
2739 	 * the user tries to allocate gigantic pages but let the user free the
2740 	 * boottime allocated gigantic pages.
2741 	 */
2742 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2743 		if (count > persistent_huge_pages(h)) {
2744 			spin_unlock(&hugetlb_lock);
2745 			NODEMASK_FREE(node_alloc_noretry);
2746 			return -EINVAL;
2747 		}
2748 		/* Fall through to decrease pool */
2749 	}
2750 
2751 	/*
2752 	 * Increase the pool size
2753 	 * First take pages out of surplus state.  Then make up the
2754 	 * remaining difference by allocating fresh huge pages.
2755 	 *
2756 	 * We might race with alloc_surplus_huge_page() here and be unable
2757 	 * to convert a surplus huge page to a normal huge page. That is
2758 	 * not critical, though, it just means the overall size of the
2759 	 * pool might be one hugepage larger than it needs to be, but
2760 	 * within all the constraints specified by the sysctls.
2761 	 */
2762 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2763 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
2764 			break;
2765 	}
2766 
2767 	while (count > persistent_huge_pages(h)) {
2768 		/*
2769 		 * If this allocation races such that we no longer need the
2770 		 * page, free_huge_page will handle it by freeing the page
2771 		 * and reducing the surplus.
2772 		 */
2773 		spin_unlock(&hugetlb_lock);
2774 
2775 		/* yield cpu to avoid soft lockup */
2776 		cond_resched();
2777 
2778 		ret = alloc_pool_huge_page(h, nodes_allowed,
2779 						node_alloc_noretry);
2780 		spin_lock(&hugetlb_lock);
2781 		if (!ret)
2782 			goto out;
2783 
2784 		/* Bail for signals. Probably ctrl-c from user */
2785 		if (signal_pending(current))
2786 			goto out;
2787 	}
2788 
2789 	/*
2790 	 * Decrease the pool size
2791 	 * First return free pages to the buddy allocator (being careful
2792 	 * to keep enough around to satisfy reservations).  Then place
2793 	 * pages into surplus state as needed so the pool will shrink
2794 	 * to the desired size as pages become free.
2795 	 *
2796 	 * By placing pages into the surplus state independent of the
2797 	 * overcommit value, we are allowing the surplus pool size to
2798 	 * exceed overcommit. There are few sane options here. Since
2799 	 * alloc_surplus_huge_page() is checking the global counter,
2800 	 * though, we'll note that we're not allowed to exceed surplus
2801 	 * and won't grow the pool anywhere else. Not until one of the
2802 	 * sysctls are changed, or the surplus pages go out of use.
2803 	 */
2804 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2805 	min_count = max(count, min_count);
2806 	try_to_free_low(h, min_count, nodes_allowed);
2807 	while (min_count < persistent_huge_pages(h)) {
2808 		if (!free_pool_huge_page(h, nodes_allowed, 0))
2809 			break;
2810 		cond_resched_lock(&hugetlb_lock);
2811 	}
2812 	while (count < persistent_huge_pages(h)) {
2813 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
2814 			break;
2815 	}
2816 out:
2817 	h->max_huge_pages = persistent_huge_pages(h);
2818 	spin_unlock(&hugetlb_lock);
2819 
2820 	NODEMASK_FREE(node_alloc_noretry);
2821 
2822 	return 0;
2823 }
2824 
2825 #define HSTATE_ATTR_RO(_name) \
2826 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2827 
2828 #define HSTATE_ATTR(_name) \
2829 	static struct kobj_attribute _name##_attr = \
2830 		__ATTR(_name, 0644, _name##_show, _name##_store)
2831 
2832 static struct kobject *hugepages_kobj;
2833 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2834 
2835 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2836 
2837 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2838 {
2839 	int i;
2840 
2841 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
2842 		if (hstate_kobjs[i] == kobj) {
2843 			if (nidp)
2844 				*nidp = NUMA_NO_NODE;
2845 			return &hstates[i];
2846 		}
2847 
2848 	return kobj_to_node_hstate(kobj, nidp);
2849 }
2850 
2851 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2852 					struct kobj_attribute *attr, char *buf)
2853 {
2854 	struct hstate *h;
2855 	unsigned long nr_huge_pages;
2856 	int nid;
2857 
2858 	h = kobj_to_hstate(kobj, &nid);
2859 	if (nid == NUMA_NO_NODE)
2860 		nr_huge_pages = h->nr_huge_pages;
2861 	else
2862 		nr_huge_pages = h->nr_huge_pages_node[nid];
2863 
2864 	return sprintf(buf, "%lu\n", nr_huge_pages);
2865 }
2866 
2867 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2868 					   struct hstate *h, int nid,
2869 					   unsigned long count, size_t len)
2870 {
2871 	int err;
2872 	nodemask_t nodes_allowed, *n_mask;
2873 
2874 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2875 		return -EINVAL;
2876 
2877 	if (nid == NUMA_NO_NODE) {
2878 		/*
2879 		 * global hstate attribute
2880 		 */
2881 		if (!(obey_mempolicy &&
2882 				init_nodemask_of_mempolicy(&nodes_allowed)))
2883 			n_mask = &node_states[N_MEMORY];
2884 		else
2885 			n_mask = &nodes_allowed;
2886 	} else {
2887 		/*
2888 		 * Node specific request.  count adjustment happens in
2889 		 * set_max_huge_pages() after acquiring hugetlb_lock.
2890 		 */
2891 		init_nodemask_of_node(&nodes_allowed, nid);
2892 		n_mask = &nodes_allowed;
2893 	}
2894 
2895 	err = set_max_huge_pages(h, count, nid, n_mask);
2896 
2897 	return err ? err : len;
2898 }
2899 
2900 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2901 					 struct kobject *kobj, const char *buf,
2902 					 size_t len)
2903 {
2904 	struct hstate *h;
2905 	unsigned long count;
2906 	int nid;
2907 	int err;
2908 
2909 	err = kstrtoul(buf, 10, &count);
2910 	if (err)
2911 		return err;
2912 
2913 	h = kobj_to_hstate(kobj, &nid);
2914 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2915 }
2916 
2917 static ssize_t nr_hugepages_show(struct kobject *kobj,
2918 				       struct kobj_attribute *attr, char *buf)
2919 {
2920 	return nr_hugepages_show_common(kobj, attr, buf);
2921 }
2922 
2923 static ssize_t nr_hugepages_store(struct kobject *kobj,
2924 	       struct kobj_attribute *attr, const char *buf, size_t len)
2925 {
2926 	return nr_hugepages_store_common(false, kobj, buf, len);
2927 }
2928 HSTATE_ATTR(nr_hugepages);
2929 
2930 #ifdef CONFIG_NUMA
2931 
2932 /*
2933  * hstate attribute for optionally mempolicy-based constraint on persistent
2934  * huge page alloc/free.
2935  */
2936 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2937 				       struct kobj_attribute *attr, char *buf)
2938 {
2939 	return nr_hugepages_show_common(kobj, attr, buf);
2940 }
2941 
2942 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2943 	       struct kobj_attribute *attr, const char *buf, size_t len)
2944 {
2945 	return nr_hugepages_store_common(true, kobj, buf, len);
2946 }
2947 HSTATE_ATTR(nr_hugepages_mempolicy);
2948 #endif
2949 
2950 
2951 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2952 					struct kobj_attribute *attr, char *buf)
2953 {
2954 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2955 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2956 }
2957 
2958 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2959 		struct kobj_attribute *attr, const char *buf, size_t count)
2960 {
2961 	int err;
2962 	unsigned long input;
2963 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2964 
2965 	if (hstate_is_gigantic(h))
2966 		return -EINVAL;
2967 
2968 	err = kstrtoul(buf, 10, &input);
2969 	if (err)
2970 		return err;
2971 
2972 	spin_lock(&hugetlb_lock);
2973 	h->nr_overcommit_huge_pages = input;
2974 	spin_unlock(&hugetlb_lock);
2975 
2976 	return count;
2977 }
2978 HSTATE_ATTR(nr_overcommit_hugepages);
2979 
2980 static ssize_t free_hugepages_show(struct kobject *kobj,
2981 					struct kobj_attribute *attr, char *buf)
2982 {
2983 	struct hstate *h;
2984 	unsigned long free_huge_pages;
2985 	int nid;
2986 
2987 	h = kobj_to_hstate(kobj, &nid);
2988 	if (nid == NUMA_NO_NODE)
2989 		free_huge_pages = h->free_huge_pages;
2990 	else
2991 		free_huge_pages = h->free_huge_pages_node[nid];
2992 
2993 	return sprintf(buf, "%lu\n", free_huge_pages);
2994 }
2995 HSTATE_ATTR_RO(free_hugepages);
2996 
2997 static ssize_t resv_hugepages_show(struct kobject *kobj,
2998 					struct kobj_attribute *attr, char *buf)
2999 {
3000 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3001 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
3002 }
3003 HSTATE_ATTR_RO(resv_hugepages);
3004 
3005 static ssize_t surplus_hugepages_show(struct kobject *kobj,
3006 					struct kobj_attribute *attr, char *buf)
3007 {
3008 	struct hstate *h;
3009 	unsigned long surplus_huge_pages;
3010 	int nid;
3011 
3012 	h = kobj_to_hstate(kobj, &nid);
3013 	if (nid == NUMA_NO_NODE)
3014 		surplus_huge_pages = h->surplus_huge_pages;
3015 	else
3016 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
3017 
3018 	return sprintf(buf, "%lu\n", surplus_huge_pages);
3019 }
3020 HSTATE_ATTR_RO(surplus_hugepages);
3021 
3022 static struct attribute *hstate_attrs[] = {
3023 	&nr_hugepages_attr.attr,
3024 	&nr_overcommit_hugepages_attr.attr,
3025 	&free_hugepages_attr.attr,
3026 	&resv_hugepages_attr.attr,
3027 	&surplus_hugepages_attr.attr,
3028 #ifdef CONFIG_NUMA
3029 	&nr_hugepages_mempolicy_attr.attr,
3030 #endif
3031 	NULL,
3032 };
3033 
3034 static const struct attribute_group hstate_attr_group = {
3035 	.attrs = hstate_attrs,
3036 };
3037 
3038 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
3039 				    struct kobject **hstate_kobjs,
3040 				    const struct attribute_group *hstate_attr_group)
3041 {
3042 	int retval;
3043 	int hi = hstate_index(h);
3044 
3045 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
3046 	if (!hstate_kobjs[hi])
3047 		return -ENOMEM;
3048 
3049 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3050 	if (retval)
3051 		kobject_put(hstate_kobjs[hi]);
3052 
3053 	return retval;
3054 }
3055 
3056 static void __init hugetlb_sysfs_init(void)
3057 {
3058 	struct hstate *h;
3059 	int err;
3060 
3061 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
3062 	if (!hugepages_kobj)
3063 		return;
3064 
3065 	for_each_hstate(h) {
3066 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
3067 					 hstate_kobjs, &hstate_attr_group);
3068 		if (err)
3069 			pr_err("HugeTLB: Unable to add hstate %s", h->name);
3070 	}
3071 }
3072 
3073 #ifdef CONFIG_NUMA
3074 
3075 /*
3076  * node_hstate/s - associate per node hstate attributes, via their kobjects,
3077  * with node devices in node_devices[] using a parallel array.  The array
3078  * index of a node device or _hstate == node id.
3079  * This is here to avoid any static dependency of the node device driver, in
3080  * the base kernel, on the hugetlb module.
3081  */
3082 struct node_hstate {
3083 	struct kobject		*hugepages_kobj;
3084 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
3085 };
3086 static struct node_hstate node_hstates[MAX_NUMNODES];
3087 
3088 /*
3089  * A subset of global hstate attributes for node devices
3090  */
3091 static struct attribute *per_node_hstate_attrs[] = {
3092 	&nr_hugepages_attr.attr,
3093 	&free_hugepages_attr.attr,
3094 	&surplus_hugepages_attr.attr,
3095 	NULL,
3096 };
3097 
3098 static const struct attribute_group per_node_hstate_attr_group = {
3099 	.attrs = per_node_hstate_attrs,
3100 };
3101 
3102 /*
3103  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3104  * Returns node id via non-NULL nidp.
3105  */
3106 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3107 {
3108 	int nid;
3109 
3110 	for (nid = 0; nid < nr_node_ids; nid++) {
3111 		struct node_hstate *nhs = &node_hstates[nid];
3112 		int i;
3113 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
3114 			if (nhs->hstate_kobjs[i] == kobj) {
3115 				if (nidp)
3116 					*nidp = nid;
3117 				return &hstates[i];
3118 			}
3119 	}
3120 
3121 	BUG();
3122 	return NULL;
3123 }
3124 
3125 /*
3126  * Unregister hstate attributes from a single node device.
3127  * No-op if no hstate attributes attached.
3128  */
3129 static void hugetlb_unregister_node(struct node *node)
3130 {
3131 	struct hstate *h;
3132 	struct node_hstate *nhs = &node_hstates[node->dev.id];
3133 
3134 	if (!nhs->hugepages_kobj)
3135 		return;		/* no hstate attributes */
3136 
3137 	for_each_hstate(h) {
3138 		int idx = hstate_index(h);
3139 		if (nhs->hstate_kobjs[idx]) {
3140 			kobject_put(nhs->hstate_kobjs[idx]);
3141 			nhs->hstate_kobjs[idx] = NULL;
3142 		}
3143 	}
3144 
3145 	kobject_put(nhs->hugepages_kobj);
3146 	nhs->hugepages_kobj = NULL;
3147 }
3148 
3149 
3150 /*
3151  * Register hstate attributes for a single node device.
3152  * No-op if attributes already registered.
3153  */
3154 static void hugetlb_register_node(struct node *node)
3155 {
3156 	struct hstate *h;
3157 	struct node_hstate *nhs = &node_hstates[node->dev.id];
3158 	int err;
3159 
3160 	if (nhs->hugepages_kobj)
3161 		return;		/* already allocated */
3162 
3163 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
3164 							&node->dev.kobj);
3165 	if (!nhs->hugepages_kobj)
3166 		return;
3167 
3168 	for_each_hstate(h) {
3169 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3170 						nhs->hstate_kobjs,
3171 						&per_node_hstate_attr_group);
3172 		if (err) {
3173 			pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3174 				h->name, node->dev.id);
3175 			hugetlb_unregister_node(node);
3176 			break;
3177 		}
3178 	}
3179 }
3180 
3181 /*
3182  * hugetlb init time:  register hstate attributes for all registered node
3183  * devices of nodes that have memory.  All on-line nodes should have
3184  * registered their associated device by this time.
3185  */
3186 static void __init hugetlb_register_all_nodes(void)
3187 {
3188 	int nid;
3189 
3190 	for_each_node_state(nid, N_MEMORY) {
3191 		struct node *node = node_devices[nid];
3192 		if (node->dev.id == nid)
3193 			hugetlb_register_node(node);
3194 	}
3195 
3196 	/*
3197 	 * Let the node device driver know we're here so it can
3198 	 * [un]register hstate attributes on node hotplug.
3199 	 */
3200 	register_hugetlbfs_with_node(hugetlb_register_node,
3201 				     hugetlb_unregister_node);
3202 }
3203 #else	/* !CONFIG_NUMA */
3204 
3205 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3206 {
3207 	BUG();
3208 	if (nidp)
3209 		*nidp = -1;
3210 	return NULL;
3211 }
3212 
3213 static void hugetlb_register_all_nodes(void) { }
3214 
3215 #endif
3216 
3217 static int __init hugetlb_init(void)
3218 {
3219 	int i;
3220 
3221 	if (!hugepages_supported()) {
3222 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
3223 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
3224 		return 0;
3225 	}
3226 
3227 	/*
3228 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
3229 	 * architectures depend on setup being done here.
3230 	 */
3231 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
3232 	if (!parsed_default_hugepagesz) {
3233 		/*
3234 		 * If we did not parse a default huge page size, set
3235 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
3236 		 * number of huge pages for this default size was implicitly
3237 		 * specified, set that here as well.
3238 		 * Note that the implicit setting will overwrite an explicit
3239 		 * setting.  A warning will be printed in this case.
3240 		 */
3241 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
3242 		if (default_hstate_max_huge_pages) {
3243 			if (default_hstate.max_huge_pages) {
3244 				char buf[32];
3245 
3246 				string_get_size(huge_page_size(&default_hstate),
3247 					1, STRING_UNITS_2, buf, 32);
3248 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
3249 					default_hstate.max_huge_pages, buf);
3250 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
3251 					default_hstate_max_huge_pages);
3252 			}
3253 			default_hstate.max_huge_pages =
3254 				default_hstate_max_huge_pages;
3255 		}
3256 	}
3257 
3258 	hugetlb_cma_check();
3259 	hugetlb_init_hstates();
3260 	gather_bootmem_prealloc();
3261 	report_hugepages();
3262 
3263 	hugetlb_sysfs_init();
3264 	hugetlb_register_all_nodes();
3265 	hugetlb_cgroup_file_init();
3266 
3267 #ifdef CONFIG_SMP
3268 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
3269 #else
3270 	num_fault_mutexes = 1;
3271 #endif
3272 	hugetlb_fault_mutex_table =
3273 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
3274 			      GFP_KERNEL);
3275 	BUG_ON(!hugetlb_fault_mutex_table);
3276 
3277 	for (i = 0; i < num_fault_mutexes; i++)
3278 		mutex_init(&hugetlb_fault_mutex_table[i]);
3279 	return 0;
3280 }
3281 subsys_initcall(hugetlb_init);
3282 
3283 /* Overwritten by architectures with more huge page sizes */
3284 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
3285 {
3286 	return size == HPAGE_SIZE;
3287 }
3288 
3289 void __init hugetlb_add_hstate(unsigned int order)
3290 {
3291 	struct hstate *h;
3292 	unsigned long i;
3293 
3294 	if (size_to_hstate(PAGE_SIZE << order)) {
3295 		return;
3296 	}
3297 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
3298 	BUG_ON(order == 0);
3299 	h = &hstates[hugetlb_max_hstate++];
3300 	h->order = order;
3301 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
3302 	h->nr_huge_pages = 0;
3303 	h->free_huge_pages = 0;
3304 	for (i = 0; i < MAX_NUMNODES; ++i)
3305 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
3306 	INIT_LIST_HEAD(&h->hugepage_activelist);
3307 	h->next_nid_to_alloc = first_memory_node;
3308 	h->next_nid_to_free = first_memory_node;
3309 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
3310 					huge_page_size(h)/1024);
3311 
3312 	parsed_hstate = h;
3313 }
3314 
3315 /*
3316  * hugepages command line processing
3317  * hugepages normally follows a valid hugepagsz or default_hugepagsz
3318  * specification.  If not, ignore the hugepages value.  hugepages can also
3319  * be the first huge page command line  option in which case it implicitly
3320  * specifies the number of huge pages for the default size.
3321  */
3322 static int __init hugepages_setup(char *s)
3323 {
3324 	unsigned long *mhp;
3325 	static unsigned long *last_mhp;
3326 
3327 	if (!parsed_valid_hugepagesz) {
3328 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
3329 		parsed_valid_hugepagesz = true;
3330 		return 0;
3331 	}
3332 
3333 	/*
3334 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
3335 	 * yet, so this hugepages= parameter goes to the "default hstate".
3336 	 * Otherwise, it goes with the previously parsed hugepagesz or
3337 	 * default_hugepagesz.
3338 	 */
3339 	else if (!hugetlb_max_hstate)
3340 		mhp = &default_hstate_max_huge_pages;
3341 	else
3342 		mhp = &parsed_hstate->max_huge_pages;
3343 
3344 	if (mhp == last_mhp) {
3345 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
3346 		return 0;
3347 	}
3348 
3349 	if (sscanf(s, "%lu", mhp) <= 0)
3350 		*mhp = 0;
3351 
3352 	/*
3353 	 * Global state is always initialized later in hugetlb_init.
3354 	 * But we need to allocate >= MAX_ORDER hstates here early to still
3355 	 * use the bootmem allocator.
3356 	 */
3357 	if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
3358 		hugetlb_hstate_alloc_pages(parsed_hstate);
3359 
3360 	last_mhp = mhp;
3361 
3362 	return 1;
3363 }
3364 __setup("hugepages=", hugepages_setup);
3365 
3366 /*
3367  * hugepagesz command line processing
3368  * A specific huge page size can only be specified once with hugepagesz.
3369  * hugepagesz is followed by hugepages on the command line.  The global
3370  * variable 'parsed_valid_hugepagesz' is used to determine if prior
3371  * hugepagesz argument was valid.
3372  */
3373 static int __init hugepagesz_setup(char *s)
3374 {
3375 	unsigned long size;
3376 	struct hstate *h;
3377 
3378 	parsed_valid_hugepagesz = false;
3379 	size = (unsigned long)memparse(s, NULL);
3380 
3381 	if (!arch_hugetlb_valid_size(size)) {
3382 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
3383 		return 0;
3384 	}
3385 
3386 	h = size_to_hstate(size);
3387 	if (h) {
3388 		/*
3389 		 * hstate for this size already exists.  This is normally
3390 		 * an error, but is allowed if the existing hstate is the
3391 		 * default hstate.  More specifically, it is only allowed if
3392 		 * the number of huge pages for the default hstate was not
3393 		 * previously specified.
3394 		 */
3395 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
3396 		    default_hstate.max_huge_pages) {
3397 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
3398 			return 0;
3399 		}
3400 
3401 		/*
3402 		 * No need to call hugetlb_add_hstate() as hstate already
3403 		 * exists.  But, do set parsed_hstate so that a following
3404 		 * hugepages= parameter will be applied to this hstate.
3405 		 */
3406 		parsed_hstate = h;
3407 		parsed_valid_hugepagesz = true;
3408 		return 1;
3409 	}
3410 
3411 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3412 	parsed_valid_hugepagesz = true;
3413 	return 1;
3414 }
3415 __setup("hugepagesz=", hugepagesz_setup);
3416 
3417 /*
3418  * default_hugepagesz command line input
3419  * Only one instance of default_hugepagesz allowed on command line.
3420  */
3421 static int __init default_hugepagesz_setup(char *s)
3422 {
3423 	unsigned long size;
3424 
3425 	parsed_valid_hugepagesz = false;
3426 	if (parsed_default_hugepagesz) {
3427 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
3428 		return 0;
3429 	}
3430 
3431 	size = (unsigned long)memparse(s, NULL);
3432 
3433 	if (!arch_hugetlb_valid_size(size)) {
3434 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
3435 		return 0;
3436 	}
3437 
3438 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3439 	parsed_valid_hugepagesz = true;
3440 	parsed_default_hugepagesz = true;
3441 	default_hstate_idx = hstate_index(size_to_hstate(size));
3442 
3443 	/*
3444 	 * The number of default huge pages (for this size) could have been
3445 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
3446 	 * then default_hstate_max_huge_pages is set.  If the default huge
3447 	 * page size is gigantic (>= MAX_ORDER), then the pages must be
3448 	 * allocated here from bootmem allocator.
3449 	 */
3450 	if (default_hstate_max_huge_pages) {
3451 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
3452 		if (hstate_is_gigantic(&default_hstate))
3453 			hugetlb_hstate_alloc_pages(&default_hstate);
3454 		default_hstate_max_huge_pages = 0;
3455 	}
3456 
3457 	return 1;
3458 }
3459 __setup("default_hugepagesz=", default_hugepagesz_setup);
3460 
3461 static unsigned int allowed_mems_nr(struct hstate *h)
3462 {
3463 	int node;
3464 	unsigned int nr = 0;
3465 	nodemask_t *mpol_allowed;
3466 	unsigned int *array = h->free_huge_pages_node;
3467 	gfp_t gfp_mask = htlb_alloc_mask(h);
3468 
3469 	mpol_allowed = policy_nodemask_current(gfp_mask);
3470 
3471 	for_each_node_mask(node, cpuset_current_mems_allowed) {
3472 		if (!mpol_allowed ||
3473 		    (mpol_allowed && node_isset(node, *mpol_allowed)))
3474 			nr += array[node];
3475 	}
3476 
3477 	return nr;
3478 }
3479 
3480 #ifdef CONFIG_SYSCTL
3481 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3482 			 struct ctl_table *table, int write,
3483 			 void *buffer, size_t *length, loff_t *ppos)
3484 {
3485 	struct hstate *h = &default_hstate;
3486 	unsigned long tmp = h->max_huge_pages;
3487 	int ret;
3488 
3489 	if (!hugepages_supported())
3490 		return -EOPNOTSUPP;
3491 
3492 	table->data = &tmp;
3493 	table->maxlen = sizeof(unsigned long);
3494 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3495 	if (ret)
3496 		goto out;
3497 
3498 	if (write)
3499 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
3500 						  NUMA_NO_NODE, tmp, *length);
3501 out:
3502 	return ret;
3503 }
3504 
3505 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3506 			  void *buffer, size_t *length, loff_t *ppos)
3507 {
3508 
3509 	return hugetlb_sysctl_handler_common(false, table, write,
3510 							buffer, length, ppos);
3511 }
3512 
3513 #ifdef CONFIG_NUMA
3514 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3515 			  void *buffer, size_t *length, loff_t *ppos)
3516 {
3517 	return hugetlb_sysctl_handler_common(true, table, write,
3518 							buffer, length, ppos);
3519 }
3520 #endif /* CONFIG_NUMA */
3521 
3522 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3523 		void *buffer, size_t *length, loff_t *ppos)
3524 {
3525 	struct hstate *h = &default_hstate;
3526 	unsigned long tmp;
3527 	int ret;
3528 
3529 	if (!hugepages_supported())
3530 		return -EOPNOTSUPP;
3531 
3532 	tmp = h->nr_overcommit_huge_pages;
3533 
3534 	if (write && hstate_is_gigantic(h))
3535 		return -EINVAL;
3536 
3537 	table->data = &tmp;
3538 	table->maxlen = sizeof(unsigned long);
3539 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3540 	if (ret)
3541 		goto out;
3542 
3543 	if (write) {
3544 		spin_lock(&hugetlb_lock);
3545 		h->nr_overcommit_huge_pages = tmp;
3546 		spin_unlock(&hugetlb_lock);
3547 	}
3548 out:
3549 	return ret;
3550 }
3551 
3552 #endif /* CONFIG_SYSCTL */
3553 
3554 void hugetlb_report_meminfo(struct seq_file *m)
3555 {
3556 	struct hstate *h;
3557 	unsigned long total = 0;
3558 
3559 	if (!hugepages_supported())
3560 		return;
3561 
3562 	for_each_hstate(h) {
3563 		unsigned long count = h->nr_huge_pages;
3564 
3565 		total += (PAGE_SIZE << huge_page_order(h)) * count;
3566 
3567 		if (h == &default_hstate)
3568 			seq_printf(m,
3569 				   "HugePages_Total:   %5lu\n"
3570 				   "HugePages_Free:    %5lu\n"
3571 				   "HugePages_Rsvd:    %5lu\n"
3572 				   "HugePages_Surp:    %5lu\n"
3573 				   "Hugepagesize:   %8lu kB\n",
3574 				   count,
3575 				   h->free_huge_pages,
3576 				   h->resv_huge_pages,
3577 				   h->surplus_huge_pages,
3578 				   (PAGE_SIZE << huge_page_order(h)) / 1024);
3579 	}
3580 
3581 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
3582 }
3583 
3584 int hugetlb_report_node_meminfo(int nid, char *buf)
3585 {
3586 	struct hstate *h = &default_hstate;
3587 	if (!hugepages_supported())
3588 		return 0;
3589 	return sprintf(buf,
3590 		"Node %d HugePages_Total: %5u\n"
3591 		"Node %d HugePages_Free:  %5u\n"
3592 		"Node %d HugePages_Surp:  %5u\n",
3593 		nid, h->nr_huge_pages_node[nid],
3594 		nid, h->free_huge_pages_node[nid],
3595 		nid, h->surplus_huge_pages_node[nid]);
3596 }
3597 
3598 void hugetlb_show_meminfo(void)
3599 {
3600 	struct hstate *h;
3601 	int nid;
3602 
3603 	if (!hugepages_supported())
3604 		return;
3605 
3606 	for_each_node_state(nid, N_MEMORY)
3607 		for_each_hstate(h)
3608 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3609 				nid,
3610 				h->nr_huge_pages_node[nid],
3611 				h->free_huge_pages_node[nid],
3612 				h->surplus_huge_pages_node[nid],
3613 				1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3614 }
3615 
3616 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3617 {
3618 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3619 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3620 }
3621 
3622 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3623 unsigned long hugetlb_total_pages(void)
3624 {
3625 	struct hstate *h;
3626 	unsigned long nr_total_pages = 0;
3627 
3628 	for_each_hstate(h)
3629 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3630 	return nr_total_pages;
3631 }
3632 
3633 static int hugetlb_acct_memory(struct hstate *h, long delta)
3634 {
3635 	int ret = -ENOMEM;
3636 
3637 	spin_lock(&hugetlb_lock);
3638 	/*
3639 	 * When cpuset is configured, it breaks the strict hugetlb page
3640 	 * reservation as the accounting is done on a global variable. Such
3641 	 * reservation is completely rubbish in the presence of cpuset because
3642 	 * the reservation is not checked against page availability for the
3643 	 * current cpuset. Application can still potentially OOM'ed by kernel
3644 	 * with lack of free htlb page in cpuset that the task is in.
3645 	 * Attempt to enforce strict accounting with cpuset is almost
3646 	 * impossible (or too ugly) because cpuset is too fluid that
3647 	 * task or memory node can be dynamically moved between cpusets.
3648 	 *
3649 	 * The change of semantics for shared hugetlb mapping with cpuset is
3650 	 * undesirable. However, in order to preserve some of the semantics,
3651 	 * we fall back to check against current free page availability as
3652 	 * a best attempt and hopefully to minimize the impact of changing
3653 	 * semantics that cpuset has.
3654 	 *
3655 	 * Apart from cpuset, we also have memory policy mechanism that
3656 	 * also determines from which node the kernel will allocate memory
3657 	 * in a NUMA system. So similar to cpuset, we also should consider
3658 	 * the memory policy of the current task. Similar to the description
3659 	 * above.
3660 	 */
3661 	if (delta > 0) {
3662 		if (gather_surplus_pages(h, delta) < 0)
3663 			goto out;
3664 
3665 		if (delta > allowed_mems_nr(h)) {
3666 			return_unused_surplus_pages(h, delta);
3667 			goto out;
3668 		}
3669 	}
3670 
3671 	ret = 0;
3672 	if (delta < 0)
3673 		return_unused_surplus_pages(h, (unsigned long) -delta);
3674 
3675 out:
3676 	spin_unlock(&hugetlb_lock);
3677 	return ret;
3678 }
3679 
3680 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3681 {
3682 	struct resv_map *resv = vma_resv_map(vma);
3683 
3684 	/*
3685 	 * This new VMA should share its siblings reservation map if present.
3686 	 * The VMA will only ever have a valid reservation map pointer where
3687 	 * it is being copied for another still existing VMA.  As that VMA
3688 	 * has a reference to the reservation map it cannot disappear until
3689 	 * after this open call completes.  It is therefore safe to take a
3690 	 * new reference here without additional locking.
3691 	 */
3692 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3693 		kref_get(&resv->refs);
3694 }
3695 
3696 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3697 {
3698 	struct hstate *h = hstate_vma(vma);
3699 	struct resv_map *resv = vma_resv_map(vma);
3700 	struct hugepage_subpool *spool = subpool_vma(vma);
3701 	unsigned long reserve, start, end;
3702 	long gbl_reserve;
3703 
3704 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3705 		return;
3706 
3707 	start = vma_hugecache_offset(h, vma, vma->vm_start);
3708 	end = vma_hugecache_offset(h, vma, vma->vm_end);
3709 
3710 	reserve = (end - start) - region_count(resv, start, end);
3711 	hugetlb_cgroup_uncharge_counter(resv, start, end);
3712 	if (reserve) {
3713 		/*
3714 		 * Decrement reserve counts.  The global reserve count may be
3715 		 * adjusted if the subpool has a minimum size.
3716 		 */
3717 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3718 		hugetlb_acct_memory(h, -gbl_reserve);
3719 	}
3720 
3721 	kref_put(&resv->refs, resv_map_release);
3722 }
3723 
3724 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3725 {
3726 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
3727 		return -EINVAL;
3728 	return 0;
3729 }
3730 
3731 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3732 {
3733 	struct hstate *hstate = hstate_vma(vma);
3734 
3735 	return 1UL << huge_page_shift(hstate);
3736 }
3737 
3738 /*
3739  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3740  * handle_mm_fault() to try to instantiate regular-sized pages in the
3741  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3742  * this far.
3743  */
3744 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3745 {
3746 	BUG();
3747 	return 0;
3748 }
3749 
3750 /*
3751  * When a new function is introduced to vm_operations_struct and added
3752  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3753  * This is because under System V memory model, mappings created via
3754  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3755  * their original vm_ops are overwritten with shm_vm_ops.
3756  */
3757 const struct vm_operations_struct hugetlb_vm_ops = {
3758 	.fault = hugetlb_vm_op_fault,
3759 	.open = hugetlb_vm_op_open,
3760 	.close = hugetlb_vm_op_close,
3761 	.split = hugetlb_vm_op_split,
3762 	.pagesize = hugetlb_vm_op_pagesize,
3763 };
3764 
3765 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3766 				int writable)
3767 {
3768 	pte_t entry;
3769 
3770 	if (writable) {
3771 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3772 					 vma->vm_page_prot)));
3773 	} else {
3774 		entry = huge_pte_wrprotect(mk_huge_pte(page,
3775 					   vma->vm_page_prot));
3776 	}
3777 	entry = pte_mkyoung(entry);
3778 	entry = pte_mkhuge(entry);
3779 	entry = arch_make_huge_pte(entry, vma, page, writable);
3780 
3781 	return entry;
3782 }
3783 
3784 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3785 				   unsigned long address, pte_t *ptep)
3786 {
3787 	pte_t entry;
3788 
3789 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3790 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3791 		update_mmu_cache(vma, address, ptep);
3792 }
3793 
3794 bool is_hugetlb_entry_migration(pte_t pte)
3795 {
3796 	swp_entry_t swp;
3797 
3798 	if (huge_pte_none(pte) || pte_present(pte))
3799 		return false;
3800 	swp = pte_to_swp_entry(pte);
3801 	if (non_swap_entry(swp) && is_migration_entry(swp))
3802 		return true;
3803 	else
3804 		return false;
3805 }
3806 
3807 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3808 {
3809 	swp_entry_t swp;
3810 
3811 	if (huge_pte_none(pte) || pte_present(pte))
3812 		return 0;
3813 	swp = pte_to_swp_entry(pte);
3814 	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3815 		return 1;
3816 	else
3817 		return 0;
3818 }
3819 
3820 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3821 			    struct vm_area_struct *vma)
3822 {
3823 	pte_t *src_pte, *dst_pte, entry, dst_entry;
3824 	struct page *ptepage;
3825 	unsigned long addr;
3826 	int cow;
3827 	struct hstate *h = hstate_vma(vma);
3828 	unsigned long sz = huge_page_size(h);
3829 	struct address_space *mapping = vma->vm_file->f_mapping;
3830 	struct mmu_notifier_range range;
3831 	int ret = 0;
3832 
3833 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3834 
3835 	if (cow) {
3836 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
3837 					vma->vm_start,
3838 					vma->vm_end);
3839 		mmu_notifier_invalidate_range_start(&range);
3840 	} else {
3841 		/*
3842 		 * For shared mappings i_mmap_rwsem must be held to call
3843 		 * huge_pte_alloc, otherwise the returned ptep could go
3844 		 * away if part of a shared pmd and another thread calls
3845 		 * huge_pmd_unshare.
3846 		 */
3847 		i_mmap_lock_read(mapping);
3848 	}
3849 
3850 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3851 		spinlock_t *src_ptl, *dst_ptl;
3852 		src_pte = huge_pte_offset(src, addr, sz);
3853 		if (!src_pte)
3854 			continue;
3855 		dst_pte = huge_pte_alloc(dst, addr, sz);
3856 		if (!dst_pte) {
3857 			ret = -ENOMEM;
3858 			break;
3859 		}
3860 
3861 		/*
3862 		 * If the pagetables are shared don't copy or take references.
3863 		 * dst_pte == src_pte is the common case of src/dest sharing.
3864 		 *
3865 		 * However, src could have 'unshared' and dst shares with
3866 		 * another vma.  If dst_pte !none, this implies sharing.
3867 		 * Check here before taking page table lock, and once again
3868 		 * after taking the lock below.
3869 		 */
3870 		dst_entry = huge_ptep_get(dst_pte);
3871 		if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3872 			continue;
3873 
3874 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
3875 		src_ptl = huge_pte_lockptr(h, src, src_pte);
3876 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3877 		entry = huge_ptep_get(src_pte);
3878 		dst_entry = huge_ptep_get(dst_pte);
3879 		if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3880 			/*
3881 			 * Skip if src entry none.  Also, skip in the
3882 			 * unlikely case dst entry !none as this implies
3883 			 * sharing with another vma.
3884 			 */
3885 			;
3886 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
3887 				    is_hugetlb_entry_hwpoisoned(entry))) {
3888 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
3889 
3890 			if (is_write_migration_entry(swp_entry) && cow) {
3891 				/*
3892 				 * COW mappings require pages in both
3893 				 * parent and child to be set to read.
3894 				 */
3895 				make_migration_entry_read(&swp_entry);
3896 				entry = swp_entry_to_pte(swp_entry);
3897 				set_huge_swap_pte_at(src, addr, src_pte,
3898 						     entry, sz);
3899 			}
3900 			set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3901 		} else {
3902 			if (cow) {
3903 				/*
3904 				 * No need to notify as we are downgrading page
3905 				 * table protection not changing it to point
3906 				 * to a new page.
3907 				 *
3908 				 * See Documentation/vm/mmu_notifier.rst
3909 				 */
3910 				huge_ptep_set_wrprotect(src, addr, src_pte);
3911 			}
3912 			entry = huge_ptep_get(src_pte);
3913 			ptepage = pte_page(entry);
3914 			get_page(ptepage);
3915 			page_dup_rmap(ptepage, true);
3916 			set_huge_pte_at(dst, addr, dst_pte, entry);
3917 			hugetlb_count_add(pages_per_huge_page(h), dst);
3918 		}
3919 		spin_unlock(src_ptl);
3920 		spin_unlock(dst_ptl);
3921 	}
3922 
3923 	if (cow)
3924 		mmu_notifier_invalidate_range_end(&range);
3925 	else
3926 		i_mmap_unlock_read(mapping);
3927 
3928 	return ret;
3929 }
3930 
3931 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3932 			    unsigned long start, unsigned long end,
3933 			    struct page *ref_page)
3934 {
3935 	struct mm_struct *mm = vma->vm_mm;
3936 	unsigned long address;
3937 	pte_t *ptep;
3938 	pte_t pte;
3939 	spinlock_t *ptl;
3940 	struct page *page;
3941 	struct hstate *h = hstate_vma(vma);
3942 	unsigned long sz = huge_page_size(h);
3943 	struct mmu_notifier_range range;
3944 
3945 	WARN_ON(!is_vm_hugetlb_page(vma));
3946 	BUG_ON(start & ~huge_page_mask(h));
3947 	BUG_ON(end & ~huge_page_mask(h));
3948 
3949 	/*
3950 	 * This is a hugetlb vma, all the pte entries should point
3951 	 * to huge page.
3952 	 */
3953 	tlb_change_page_size(tlb, sz);
3954 	tlb_start_vma(tlb, vma);
3955 
3956 	/*
3957 	 * If sharing possible, alert mmu notifiers of worst case.
3958 	 */
3959 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3960 				end);
3961 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3962 	mmu_notifier_invalidate_range_start(&range);
3963 	address = start;
3964 	for (; address < end; address += sz) {
3965 		ptep = huge_pte_offset(mm, address, sz);
3966 		if (!ptep)
3967 			continue;
3968 
3969 		ptl = huge_pte_lock(h, mm, ptep);
3970 		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
3971 			spin_unlock(ptl);
3972 			/*
3973 			 * We just unmapped a page of PMDs by clearing a PUD.
3974 			 * The caller's TLB flush range should cover this area.
3975 			 */
3976 			continue;
3977 		}
3978 
3979 		pte = huge_ptep_get(ptep);
3980 		if (huge_pte_none(pte)) {
3981 			spin_unlock(ptl);
3982 			continue;
3983 		}
3984 
3985 		/*
3986 		 * Migrating hugepage or HWPoisoned hugepage is already
3987 		 * unmapped and its refcount is dropped, so just clear pte here.
3988 		 */
3989 		if (unlikely(!pte_present(pte))) {
3990 			huge_pte_clear(mm, address, ptep, sz);
3991 			spin_unlock(ptl);
3992 			continue;
3993 		}
3994 
3995 		page = pte_page(pte);
3996 		/*
3997 		 * If a reference page is supplied, it is because a specific
3998 		 * page is being unmapped, not a range. Ensure the page we
3999 		 * are about to unmap is the actual page of interest.
4000 		 */
4001 		if (ref_page) {
4002 			if (page != ref_page) {
4003 				spin_unlock(ptl);
4004 				continue;
4005 			}
4006 			/*
4007 			 * Mark the VMA as having unmapped its page so that
4008 			 * future faults in this VMA will fail rather than
4009 			 * looking like data was lost
4010 			 */
4011 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
4012 		}
4013 
4014 		pte = huge_ptep_get_and_clear(mm, address, ptep);
4015 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
4016 		if (huge_pte_dirty(pte))
4017 			set_page_dirty(page);
4018 
4019 		hugetlb_count_sub(pages_per_huge_page(h), mm);
4020 		page_remove_rmap(page, true);
4021 
4022 		spin_unlock(ptl);
4023 		tlb_remove_page_size(tlb, page, huge_page_size(h));
4024 		/*
4025 		 * Bail out after unmapping reference page if supplied
4026 		 */
4027 		if (ref_page)
4028 			break;
4029 	}
4030 	mmu_notifier_invalidate_range_end(&range);
4031 	tlb_end_vma(tlb, vma);
4032 }
4033 
4034 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
4035 			  struct vm_area_struct *vma, unsigned long start,
4036 			  unsigned long end, struct page *ref_page)
4037 {
4038 	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
4039 
4040 	/*
4041 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
4042 	 * test will fail on a vma being torn down, and not grab a page table
4043 	 * on its way out.  We're lucky that the flag has such an appropriate
4044 	 * name, and can in fact be safely cleared here. We could clear it
4045 	 * before the __unmap_hugepage_range above, but all that's necessary
4046 	 * is to clear it before releasing the i_mmap_rwsem. This works
4047 	 * because in the context this is called, the VMA is about to be
4048 	 * destroyed and the i_mmap_rwsem is held.
4049 	 */
4050 	vma->vm_flags &= ~VM_MAYSHARE;
4051 }
4052 
4053 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
4054 			  unsigned long end, struct page *ref_page)
4055 {
4056 	struct mm_struct *mm;
4057 	struct mmu_gather tlb;
4058 	unsigned long tlb_start = start;
4059 	unsigned long tlb_end = end;
4060 
4061 	/*
4062 	 * If shared PMDs were possibly used within this vma range, adjust
4063 	 * start/end for worst case tlb flushing.
4064 	 * Note that we can not be sure if PMDs are shared until we try to
4065 	 * unmap pages.  However, we want to make sure TLB flushing covers
4066 	 * the largest possible range.
4067 	 */
4068 	adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
4069 
4070 	mm = vma->vm_mm;
4071 
4072 	tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
4073 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
4074 	tlb_finish_mmu(&tlb, tlb_start, tlb_end);
4075 }
4076 
4077 /*
4078  * This is called when the original mapper is failing to COW a MAP_PRIVATE
4079  * mappping it owns the reserve page for. The intention is to unmap the page
4080  * from other VMAs and let the children be SIGKILLed if they are faulting the
4081  * same region.
4082  */
4083 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
4084 			      struct page *page, unsigned long address)
4085 {
4086 	struct hstate *h = hstate_vma(vma);
4087 	struct vm_area_struct *iter_vma;
4088 	struct address_space *mapping;
4089 	pgoff_t pgoff;
4090 
4091 	/*
4092 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
4093 	 * from page cache lookup which is in HPAGE_SIZE units.
4094 	 */
4095 	address = address & huge_page_mask(h);
4096 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
4097 			vma->vm_pgoff;
4098 	mapping = vma->vm_file->f_mapping;
4099 
4100 	/*
4101 	 * Take the mapping lock for the duration of the table walk. As
4102 	 * this mapping should be shared between all the VMAs,
4103 	 * __unmap_hugepage_range() is called as the lock is already held
4104 	 */
4105 	i_mmap_lock_write(mapping);
4106 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
4107 		/* Do not unmap the current VMA */
4108 		if (iter_vma == vma)
4109 			continue;
4110 
4111 		/*
4112 		 * Shared VMAs have their own reserves and do not affect
4113 		 * MAP_PRIVATE accounting but it is possible that a shared
4114 		 * VMA is using the same page so check and skip such VMAs.
4115 		 */
4116 		if (iter_vma->vm_flags & VM_MAYSHARE)
4117 			continue;
4118 
4119 		/*
4120 		 * Unmap the page from other VMAs without their own reserves.
4121 		 * They get marked to be SIGKILLed if they fault in these
4122 		 * areas. This is because a future no-page fault on this VMA
4123 		 * could insert a zeroed page instead of the data existing
4124 		 * from the time of fork. This would look like data corruption
4125 		 */
4126 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
4127 			unmap_hugepage_range(iter_vma, address,
4128 					     address + huge_page_size(h), page);
4129 	}
4130 	i_mmap_unlock_write(mapping);
4131 }
4132 
4133 /*
4134  * Hugetlb_cow() should be called with page lock of the original hugepage held.
4135  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
4136  * cannot race with other handlers or page migration.
4137  * Keep the pte_same checks anyway to make transition from the mutex easier.
4138  */
4139 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
4140 		       unsigned long address, pte_t *ptep,
4141 		       struct page *pagecache_page, spinlock_t *ptl)
4142 {
4143 	pte_t pte;
4144 	struct hstate *h = hstate_vma(vma);
4145 	struct page *old_page, *new_page;
4146 	int outside_reserve = 0;
4147 	vm_fault_t ret = 0;
4148 	unsigned long haddr = address & huge_page_mask(h);
4149 	struct mmu_notifier_range range;
4150 
4151 	pte = huge_ptep_get(ptep);
4152 	old_page = pte_page(pte);
4153 
4154 retry_avoidcopy:
4155 	/* If no-one else is actually using this page, avoid the copy
4156 	 * and just make the page writable */
4157 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
4158 		page_move_anon_rmap(old_page, vma);
4159 		set_huge_ptep_writable(vma, haddr, ptep);
4160 		return 0;
4161 	}
4162 
4163 	/*
4164 	 * If the process that created a MAP_PRIVATE mapping is about to
4165 	 * perform a COW due to a shared page count, attempt to satisfy
4166 	 * the allocation without using the existing reserves. The pagecache
4167 	 * page is used to determine if the reserve at this address was
4168 	 * consumed or not. If reserves were used, a partial faulted mapping
4169 	 * at the time of fork() could consume its reserves on COW instead
4170 	 * of the full address range.
4171 	 */
4172 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
4173 			old_page != pagecache_page)
4174 		outside_reserve = 1;
4175 
4176 	get_page(old_page);
4177 
4178 	/*
4179 	 * Drop page table lock as buddy allocator may be called. It will
4180 	 * be acquired again before returning to the caller, as expected.
4181 	 */
4182 	spin_unlock(ptl);
4183 	new_page = alloc_huge_page(vma, haddr, outside_reserve);
4184 
4185 	if (IS_ERR(new_page)) {
4186 		/*
4187 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
4188 		 * it is due to references held by a child and an insufficient
4189 		 * huge page pool. To guarantee the original mappers
4190 		 * reliability, unmap the page from child processes. The child
4191 		 * may get SIGKILLed if it later faults.
4192 		 */
4193 		if (outside_reserve) {
4194 			put_page(old_page);
4195 			BUG_ON(huge_pte_none(pte));
4196 			unmap_ref_private(mm, vma, old_page, haddr);
4197 			BUG_ON(huge_pte_none(pte));
4198 			spin_lock(ptl);
4199 			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4200 			if (likely(ptep &&
4201 				   pte_same(huge_ptep_get(ptep), pte)))
4202 				goto retry_avoidcopy;
4203 			/*
4204 			 * race occurs while re-acquiring page table
4205 			 * lock, and our job is done.
4206 			 */
4207 			return 0;
4208 		}
4209 
4210 		ret = vmf_error(PTR_ERR(new_page));
4211 		goto out_release_old;
4212 	}
4213 
4214 	/*
4215 	 * When the original hugepage is shared one, it does not have
4216 	 * anon_vma prepared.
4217 	 */
4218 	if (unlikely(anon_vma_prepare(vma))) {
4219 		ret = VM_FAULT_OOM;
4220 		goto out_release_all;
4221 	}
4222 
4223 	copy_user_huge_page(new_page, old_page, address, vma,
4224 			    pages_per_huge_page(h));
4225 	__SetPageUptodate(new_page);
4226 
4227 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
4228 				haddr + huge_page_size(h));
4229 	mmu_notifier_invalidate_range_start(&range);
4230 
4231 	/*
4232 	 * Retake the page table lock to check for racing updates
4233 	 * before the page tables are altered
4234 	 */
4235 	spin_lock(ptl);
4236 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4237 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
4238 		ClearPagePrivate(new_page);
4239 
4240 		/* Break COW */
4241 		huge_ptep_clear_flush(vma, haddr, ptep);
4242 		mmu_notifier_invalidate_range(mm, range.start, range.end);
4243 		set_huge_pte_at(mm, haddr, ptep,
4244 				make_huge_pte(vma, new_page, 1));
4245 		page_remove_rmap(old_page, true);
4246 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
4247 		set_page_huge_active(new_page);
4248 		/* Make the old page be freed below */
4249 		new_page = old_page;
4250 	}
4251 	spin_unlock(ptl);
4252 	mmu_notifier_invalidate_range_end(&range);
4253 out_release_all:
4254 	restore_reserve_on_error(h, vma, haddr, new_page);
4255 	put_page(new_page);
4256 out_release_old:
4257 	put_page(old_page);
4258 
4259 	spin_lock(ptl); /* Caller expects lock to be held */
4260 	return ret;
4261 }
4262 
4263 /* Return the pagecache page at a given address within a VMA */
4264 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
4265 			struct vm_area_struct *vma, unsigned long address)
4266 {
4267 	struct address_space *mapping;
4268 	pgoff_t idx;
4269 
4270 	mapping = vma->vm_file->f_mapping;
4271 	idx = vma_hugecache_offset(h, vma, address);
4272 
4273 	return find_lock_page(mapping, idx);
4274 }
4275 
4276 /*
4277  * Return whether there is a pagecache page to back given address within VMA.
4278  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
4279  */
4280 static bool hugetlbfs_pagecache_present(struct hstate *h,
4281 			struct vm_area_struct *vma, unsigned long address)
4282 {
4283 	struct address_space *mapping;
4284 	pgoff_t idx;
4285 	struct page *page;
4286 
4287 	mapping = vma->vm_file->f_mapping;
4288 	idx = vma_hugecache_offset(h, vma, address);
4289 
4290 	page = find_get_page(mapping, idx);
4291 	if (page)
4292 		put_page(page);
4293 	return page != NULL;
4294 }
4295 
4296 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
4297 			   pgoff_t idx)
4298 {
4299 	struct inode *inode = mapping->host;
4300 	struct hstate *h = hstate_inode(inode);
4301 	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
4302 
4303 	if (err)
4304 		return err;
4305 	ClearPagePrivate(page);
4306 
4307 	/*
4308 	 * set page dirty so that it will not be removed from cache/file
4309 	 * by non-hugetlbfs specific code paths.
4310 	 */
4311 	set_page_dirty(page);
4312 
4313 	spin_lock(&inode->i_lock);
4314 	inode->i_blocks += blocks_per_huge_page(h);
4315 	spin_unlock(&inode->i_lock);
4316 	return 0;
4317 }
4318 
4319 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
4320 			struct vm_area_struct *vma,
4321 			struct address_space *mapping, pgoff_t idx,
4322 			unsigned long address, pte_t *ptep, unsigned int flags)
4323 {
4324 	struct hstate *h = hstate_vma(vma);
4325 	vm_fault_t ret = VM_FAULT_SIGBUS;
4326 	int anon_rmap = 0;
4327 	unsigned long size;
4328 	struct page *page;
4329 	pte_t new_pte;
4330 	spinlock_t *ptl;
4331 	unsigned long haddr = address & huge_page_mask(h);
4332 	bool new_page = false;
4333 
4334 	/*
4335 	 * Currently, we are forced to kill the process in the event the
4336 	 * original mapper has unmapped pages from the child due to a failed
4337 	 * COW. Warn that such a situation has occurred as it may not be obvious
4338 	 */
4339 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
4340 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
4341 			   current->pid);
4342 		return ret;
4343 	}
4344 
4345 	/*
4346 	 * We can not race with truncation due to holding i_mmap_rwsem.
4347 	 * i_size is modified when holding i_mmap_rwsem, so check here
4348 	 * once for faults beyond end of file.
4349 	 */
4350 	size = i_size_read(mapping->host) >> huge_page_shift(h);
4351 	if (idx >= size)
4352 		goto out;
4353 
4354 retry:
4355 	page = find_lock_page(mapping, idx);
4356 	if (!page) {
4357 		/*
4358 		 * Check for page in userfault range
4359 		 */
4360 		if (userfaultfd_missing(vma)) {
4361 			u32 hash;
4362 			struct vm_fault vmf = {
4363 				.vma = vma,
4364 				.address = haddr,
4365 				.flags = flags,
4366 				/*
4367 				 * Hard to debug if it ends up being
4368 				 * used by a callee that assumes
4369 				 * something about the other
4370 				 * uninitialized fields... same as in
4371 				 * memory.c
4372 				 */
4373 			};
4374 
4375 			/*
4376 			 * hugetlb_fault_mutex and i_mmap_rwsem must be
4377 			 * dropped before handling userfault.  Reacquire
4378 			 * after handling fault to make calling code simpler.
4379 			 */
4380 			hash = hugetlb_fault_mutex_hash(mapping, idx);
4381 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4382 			i_mmap_unlock_read(mapping);
4383 			ret = handle_userfault(&vmf, VM_UFFD_MISSING);
4384 			i_mmap_lock_read(mapping);
4385 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
4386 			goto out;
4387 		}
4388 
4389 		page = alloc_huge_page(vma, haddr, 0);
4390 		if (IS_ERR(page)) {
4391 			/*
4392 			 * Returning error will result in faulting task being
4393 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
4394 			 * tasks from racing to fault in the same page which
4395 			 * could result in false unable to allocate errors.
4396 			 * Page migration does not take the fault mutex, but
4397 			 * does a clear then write of pte's under page table
4398 			 * lock.  Page fault code could race with migration,
4399 			 * notice the clear pte and try to allocate a page
4400 			 * here.  Before returning error, get ptl and make
4401 			 * sure there really is no pte entry.
4402 			 */
4403 			ptl = huge_pte_lock(h, mm, ptep);
4404 			if (!huge_pte_none(huge_ptep_get(ptep))) {
4405 				ret = 0;
4406 				spin_unlock(ptl);
4407 				goto out;
4408 			}
4409 			spin_unlock(ptl);
4410 			ret = vmf_error(PTR_ERR(page));
4411 			goto out;
4412 		}
4413 		clear_huge_page(page, address, pages_per_huge_page(h));
4414 		__SetPageUptodate(page);
4415 		new_page = true;
4416 
4417 		if (vma->vm_flags & VM_MAYSHARE) {
4418 			int err = huge_add_to_page_cache(page, mapping, idx);
4419 			if (err) {
4420 				put_page(page);
4421 				if (err == -EEXIST)
4422 					goto retry;
4423 				goto out;
4424 			}
4425 		} else {
4426 			lock_page(page);
4427 			if (unlikely(anon_vma_prepare(vma))) {
4428 				ret = VM_FAULT_OOM;
4429 				goto backout_unlocked;
4430 			}
4431 			anon_rmap = 1;
4432 		}
4433 	} else {
4434 		/*
4435 		 * If memory error occurs between mmap() and fault, some process
4436 		 * don't have hwpoisoned swap entry for errored virtual address.
4437 		 * So we need to block hugepage fault by PG_hwpoison bit check.
4438 		 */
4439 		if (unlikely(PageHWPoison(page))) {
4440 			ret = VM_FAULT_HWPOISON |
4441 				VM_FAULT_SET_HINDEX(hstate_index(h));
4442 			goto backout_unlocked;
4443 		}
4444 	}
4445 
4446 	/*
4447 	 * If we are going to COW a private mapping later, we examine the
4448 	 * pending reservations for this page now. This will ensure that
4449 	 * any allocations necessary to record that reservation occur outside
4450 	 * the spinlock.
4451 	 */
4452 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4453 		if (vma_needs_reservation(h, vma, haddr) < 0) {
4454 			ret = VM_FAULT_OOM;
4455 			goto backout_unlocked;
4456 		}
4457 		/* Just decrements count, does not deallocate */
4458 		vma_end_reservation(h, vma, haddr);
4459 	}
4460 
4461 	ptl = huge_pte_lock(h, mm, ptep);
4462 	ret = 0;
4463 	if (!huge_pte_none(huge_ptep_get(ptep)))
4464 		goto backout;
4465 
4466 	if (anon_rmap) {
4467 		ClearPagePrivate(page);
4468 		hugepage_add_new_anon_rmap(page, vma, haddr);
4469 	} else
4470 		page_dup_rmap(page, true);
4471 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4472 				&& (vma->vm_flags & VM_SHARED)));
4473 	set_huge_pte_at(mm, haddr, ptep, new_pte);
4474 
4475 	hugetlb_count_add(pages_per_huge_page(h), mm);
4476 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4477 		/* Optimization, do the COW without a second fault */
4478 		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4479 	}
4480 
4481 	spin_unlock(ptl);
4482 
4483 	/*
4484 	 * Only make newly allocated pages active.  Existing pages found
4485 	 * in the pagecache could be !page_huge_active() if they have been
4486 	 * isolated for migration.
4487 	 */
4488 	if (new_page)
4489 		set_page_huge_active(page);
4490 
4491 	unlock_page(page);
4492 out:
4493 	return ret;
4494 
4495 backout:
4496 	spin_unlock(ptl);
4497 backout_unlocked:
4498 	unlock_page(page);
4499 	restore_reserve_on_error(h, vma, haddr, page);
4500 	put_page(page);
4501 	goto out;
4502 }
4503 
4504 #ifdef CONFIG_SMP
4505 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4506 {
4507 	unsigned long key[2];
4508 	u32 hash;
4509 
4510 	key[0] = (unsigned long) mapping;
4511 	key[1] = idx;
4512 
4513 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
4514 
4515 	return hash & (num_fault_mutexes - 1);
4516 }
4517 #else
4518 /*
4519  * For uniprocesor systems we always use a single mutex, so just
4520  * return 0 and avoid the hashing overhead.
4521  */
4522 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4523 {
4524 	return 0;
4525 }
4526 #endif
4527 
4528 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4529 			unsigned long address, unsigned int flags)
4530 {
4531 	pte_t *ptep, entry;
4532 	spinlock_t *ptl;
4533 	vm_fault_t ret;
4534 	u32 hash;
4535 	pgoff_t idx;
4536 	struct page *page = NULL;
4537 	struct page *pagecache_page = NULL;
4538 	struct hstate *h = hstate_vma(vma);
4539 	struct address_space *mapping;
4540 	int need_wait_lock = 0;
4541 	unsigned long haddr = address & huge_page_mask(h);
4542 
4543 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4544 	if (ptep) {
4545 		/*
4546 		 * Since we hold no locks, ptep could be stale.  That is
4547 		 * OK as we are only making decisions based on content and
4548 		 * not actually modifying content here.
4549 		 */
4550 		entry = huge_ptep_get(ptep);
4551 		if (unlikely(is_hugetlb_entry_migration(entry))) {
4552 			migration_entry_wait_huge(vma, mm, ptep);
4553 			return 0;
4554 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4555 			return VM_FAULT_HWPOISON_LARGE |
4556 				VM_FAULT_SET_HINDEX(hstate_index(h));
4557 	}
4558 
4559 	/*
4560 	 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
4561 	 * until finished with ptep.  This serves two purposes:
4562 	 * 1) It prevents huge_pmd_unshare from being called elsewhere
4563 	 *    and making the ptep no longer valid.
4564 	 * 2) It synchronizes us with i_size modifications during truncation.
4565 	 *
4566 	 * ptep could have already be assigned via huge_pte_offset.  That
4567 	 * is OK, as huge_pte_alloc will return the same value unless
4568 	 * something has changed.
4569 	 */
4570 	mapping = vma->vm_file->f_mapping;
4571 	i_mmap_lock_read(mapping);
4572 	ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4573 	if (!ptep) {
4574 		i_mmap_unlock_read(mapping);
4575 		return VM_FAULT_OOM;
4576 	}
4577 
4578 	/*
4579 	 * Serialize hugepage allocation and instantiation, so that we don't
4580 	 * get spurious allocation failures if two CPUs race to instantiate
4581 	 * the same page in the page cache.
4582 	 */
4583 	idx = vma_hugecache_offset(h, vma, haddr);
4584 	hash = hugetlb_fault_mutex_hash(mapping, idx);
4585 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
4586 
4587 	entry = huge_ptep_get(ptep);
4588 	if (huge_pte_none(entry)) {
4589 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4590 		goto out_mutex;
4591 	}
4592 
4593 	ret = 0;
4594 
4595 	/*
4596 	 * entry could be a migration/hwpoison entry at this point, so this
4597 	 * check prevents the kernel from going below assuming that we have
4598 	 * an active hugepage in pagecache. This goto expects the 2nd page
4599 	 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
4600 	 * properly handle it.
4601 	 */
4602 	if (!pte_present(entry))
4603 		goto out_mutex;
4604 
4605 	/*
4606 	 * If we are going to COW the mapping later, we examine the pending
4607 	 * reservations for this page now. This will ensure that any
4608 	 * allocations necessary to record that reservation occur outside the
4609 	 * spinlock. For private mappings, we also lookup the pagecache
4610 	 * page now as it is used to determine if a reservation has been
4611 	 * consumed.
4612 	 */
4613 	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4614 		if (vma_needs_reservation(h, vma, haddr) < 0) {
4615 			ret = VM_FAULT_OOM;
4616 			goto out_mutex;
4617 		}
4618 		/* Just decrements count, does not deallocate */
4619 		vma_end_reservation(h, vma, haddr);
4620 
4621 		if (!(vma->vm_flags & VM_MAYSHARE))
4622 			pagecache_page = hugetlbfs_pagecache_page(h,
4623 								vma, haddr);
4624 	}
4625 
4626 	ptl = huge_pte_lock(h, mm, ptep);
4627 
4628 	/* Check for a racing update before calling hugetlb_cow */
4629 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4630 		goto out_ptl;
4631 
4632 	/*
4633 	 * hugetlb_cow() requires page locks of pte_page(entry) and
4634 	 * pagecache_page, so here we need take the former one
4635 	 * when page != pagecache_page or !pagecache_page.
4636 	 */
4637 	page = pte_page(entry);
4638 	if (page != pagecache_page)
4639 		if (!trylock_page(page)) {
4640 			need_wait_lock = 1;
4641 			goto out_ptl;
4642 		}
4643 
4644 	get_page(page);
4645 
4646 	if (flags & FAULT_FLAG_WRITE) {
4647 		if (!huge_pte_write(entry)) {
4648 			ret = hugetlb_cow(mm, vma, address, ptep,
4649 					  pagecache_page, ptl);
4650 			goto out_put_page;
4651 		}
4652 		entry = huge_pte_mkdirty(entry);
4653 	}
4654 	entry = pte_mkyoung(entry);
4655 	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4656 						flags & FAULT_FLAG_WRITE))
4657 		update_mmu_cache(vma, haddr, ptep);
4658 out_put_page:
4659 	if (page != pagecache_page)
4660 		unlock_page(page);
4661 	put_page(page);
4662 out_ptl:
4663 	spin_unlock(ptl);
4664 
4665 	if (pagecache_page) {
4666 		unlock_page(pagecache_page);
4667 		put_page(pagecache_page);
4668 	}
4669 out_mutex:
4670 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4671 	i_mmap_unlock_read(mapping);
4672 	/*
4673 	 * Generally it's safe to hold refcount during waiting page lock. But
4674 	 * here we just wait to defer the next page fault to avoid busy loop and
4675 	 * the page is not used after unlocked before returning from the current
4676 	 * page fault. So we are safe from accessing freed page, even if we wait
4677 	 * here without taking refcount.
4678 	 */
4679 	if (need_wait_lock)
4680 		wait_on_page_locked(page);
4681 	return ret;
4682 }
4683 
4684 /*
4685  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
4686  * modifications for huge pages.
4687  */
4688 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4689 			    pte_t *dst_pte,
4690 			    struct vm_area_struct *dst_vma,
4691 			    unsigned long dst_addr,
4692 			    unsigned long src_addr,
4693 			    struct page **pagep)
4694 {
4695 	struct address_space *mapping;
4696 	pgoff_t idx;
4697 	unsigned long size;
4698 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
4699 	struct hstate *h = hstate_vma(dst_vma);
4700 	pte_t _dst_pte;
4701 	spinlock_t *ptl;
4702 	int ret;
4703 	struct page *page;
4704 
4705 	if (!*pagep) {
4706 		ret = -ENOMEM;
4707 		page = alloc_huge_page(dst_vma, dst_addr, 0);
4708 		if (IS_ERR(page))
4709 			goto out;
4710 
4711 		ret = copy_huge_page_from_user(page,
4712 						(const void __user *) src_addr,
4713 						pages_per_huge_page(h), false);
4714 
4715 		/* fallback to copy_from_user outside mmap_lock */
4716 		if (unlikely(ret)) {
4717 			ret = -ENOENT;
4718 			*pagep = page;
4719 			/* don't free the page */
4720 			goto out;
4721 		}
4722 	} else {
4723 		page = *pagep;
4724 		*pagep = NULL;
4725 	}
4726 
4727 	/*
4728 	 * The memory barrier inside __SetPageUptodate makes sure that
4729 	 * preceding stores to the page contents become visible before
4730 	 * the set_pte_at() write.
4731 	 */
4732 	__SetPageUptodate(page);
4733 
4734 	mapping = dst_vma->vm_file->f_mapping;
4735 	idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4736 
4737 	/*
4738 	 * If shared, add to page cache
4739 	 */
4740 	if (vm_shared) {
4741 		size = i_size_read(mapping->host) >> huge_page_shift(h);
4742 		ret = -EFAULT;
4743 		if (idx >= size)
4744 			goto out_release_nounlock;
4745 
4746 		/*
4747 		 * Serialization between remove_inode_hugepages() and
4748 		 * huge_add_to_page_cache() below happens through the
4749 		 * hugetlb_fault_mutex_table that here must be hold by
4750 		 * the caller.
4751 		 */
4752 		ret = huge_add_to_page_cache(page, mapping, idx);
4753 		if (ret)
4754 			goto out_release_nounlock;
4755 	}
4756 
4757 	ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4758 	spin_lock(ptl);
4759 
4760 	/*
4761 	 * Recheck the i_size after holding PT lock to make sure not
4762 	 * to leave any page mapped (as page_mapped()) beyond the end
4763 	 * of the i_size (remove_inode_hugepages() is strict about
4764 	 * enforcing that). If we bail out here, we'll also leave a
4765 	 * page in the radix tree in the vm_shared case beyond the end
4766 	 * of the i_size, but remove_inode_hugepages() will take care
4767 	 * of it as soon as we drop the hugetlb_fault_mutex_table.
4768 	 */
4769 	size = i_size_read(mapping->host) >> huge_page_shift(h);
4770 	ret = -EFAULT;
4771 	if (idx >= size)
4772 		goto out_release_unlock;
4773 
4774 	ret = -EEXIST;
4775 	if (!huge_pte_none(huge_ptep_get(dst_pte)))
4776 		goto out_release_unlock;
4777 
4778 	if (vm_shared) {
4779 		page_dup_rmap(page, true);
4780 	} else {
4781 		ClearPagePrivate(page);
4782 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4783 	}
4784 
4785 	_dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4786 	if (dst_vma->vm_flags & VM_WRITE)
4787 		_dst_pte = huge_pte_mkdirty(_dst_pte);
4788 	_dst_pte = pte_mkyoung(_dst_pte);
4789 
4790 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4791 
4792 	(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4793 					dst_vma->vm_flags & VM_WRITE);
4794 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4795 
4796 	/* No need to invalidate - it was non-present before */
4797 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
4798 
4799 	spin_unlock(ptl);
4800 	set_page_huge_active(page);
4801 	if (vm_shared)
4802 		unlock_page(page);
4803 	ret = 0;
4804 out:
4805 	return ret;
4806 out_release_unlock:
4807 	spin_unlock(ptl);
4808 	if (vm_shared)
4809 		unlock_page(page);
4810 out_release_nounlock:
4811 	put_page(page);
4812 	goto out;
4813 }
4814 
4815 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4816 			 struct page **pages, struct vm_area_struct **vmas,
4817 			 unsigned long *position, unsigned long *nr_pages,
4818 			 long i, unsigned int flags, int *locked)
4819 {
4820 	unsigned long pfn_offset;
4821 	unsigned long vaddr = *position;
4822 	unsigned long remainder = *nr_pages;
4823 	struct hstate *h = hstate_vma(vma);
4824 	int err = -EFAULT;
4825 
4826 	while (vaddr < vma->vm_end && remainder) {
4827 		pte_t *pte;
4828 		spinlock_t *ptl = NULL;
4829 		int absent;
4830 		struct page *page;
4831 
4832 		/*
4833 		 * If we have a pending SIGKILL, don't keep faulting pages and
4834 		 * potentially allocating memory.
4835 		 */
4836 		if (fatal_signal_pending(current)) {
4837 			remainder = 0;
4838 			break;
4839 		}
4840 
4841 		/*
4842 		 * Some archs (sparc64, sh*) have multiple pte_ts to
4843 		 * each hugepage.  We have to make sure we get the
4844 		 * first, for the page indexing below to work.
4845 		 *
4846 		 * Note that page table lock is not held when pte is null.
4847 		 */
4848 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4849 				      huge_page_size(h));
4850 		if (pte)
4851 			ptl = huge_pte_lock(h, mm, pte);
4852 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
4853 
4854 		/*
4855 		 * When coredumping, it suits get_dump_page if we just return
4856 		 * an error where there's an empty slot with no huge pagecache
4857 		 * to back it.  This way, we avoid allocating a hugepage, and
4858 		 * the sparse dumpfile avoids allocating disk blocks, but its
4859 		 * huge holes still show up with zeroes where they need to be.
4860 		 */
4861 		if (absent && (flags & FOLL_DUMP) &&
4862 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4863 			if (pte)
4864 				spin_unlock(ptl);
4865 			remainder = 0;
4866 			break;
4867 		}
4868 
4869 		/*
4870 		 * We need call hugetlb_fault for both hugepages under migration
4871 		 * (in which case hugetlb_fault waits for the migration,) and
4872 		 * hwpoisoned hugepages (in which case we need to prevent the
4873 		 * caller from accessing to them.) In order to do this, we use
4874 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
4875 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4876 		 * both cases, and because we can't follow correct pages
4877 		 * directly from any kind of swap entries.
4878 		 */
4879 		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4880 		    ((flags & FOLL_WRITE) &&
4881 		      !huge_pte_write(huge_ptep_get(pte)))) {
4882 			vm_fault_t ret;
4883 			unsigned int fault_flags = 0;
4884 
4885 			if (pte)
4886 				spin_unlock(ptl);
4887 			if (flags & FOLL_WRITE)
4888 				fault_flags |= FAULT_FLAG_WRITE;
4889 			if (locked)
4890 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4891 					FAULT_FLAG_KILLABLE;
4892 			if (flags & FOLL_NOWAIT)
4893 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4894 					FAULT_FLAG_RETRY_NOWAIT;
4895 			if (flags & FOLL_TRIED) {
4896 				/*
4897 				 * Note: FAULT_FLAG_ALLOW_RETRY and
4898 				 * FAULT_FLAG_TRIED can co-exist
4899 				 */
4900 				fault_flags |= FAULT_FLAG_TRIED;
4901 			}
4902 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4903 			if (ret & VM_FAULT_ERROR) {
4904 				err = vm_fault_to_errno(ret, flags);
4905 				remainder = 0;
4906 				break;
4907 			}
4908 			if (ret & VM_FAULT_RETRY) {
4909 				if (locked &&
4910 				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4911 					*locked = 0;
4912 				*nr_pages = 0;
4913 				/*
4914 				 * VM_FAULT_RETRY must not return an
4915 				 * error, it will return zero
4916 				 * instead.
4917 				 *
4918 				 * No need to update "position" as the
4919 				 * caller will not check it after
4920 				 * *nr_pages is set to 0.
4921 				 */
4922 				return i;
4923 			}
4924 			continue;
4925 		}
4926 
4927 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4928 		page = pte_page(huge_ptep_get(pte));
4929 
4930 		/*
4931 		 * If subpage information not requested, update counters
4932 		 * and skip the same_page loop below.
4933 		 */
4934 		if (!pages && !vmas && !pfn_offset &&
4935 		    (vaddr + huge_page_size(h) < vma->vm_end) &&
4936 		    (remainder >= pages_per_huge_page(h))) {
4937 			vaddr += huge_page_size(h);
4938 			remainder -= pages_per_huge_page(h);
4939 			i += pages_per_huge_page(h);
4940 			spin_unlock(ptl);
4941 			continue;
4942 		}
4943 
4944 same_page:
4945 		if (pages) {
4946 			pages[i] = mem_map_offset(page, pfn_offset);
4947 			/*
4948 			 * try_grab_page() should always succeed here, because:
4949 			 * a) we hold the ptl lock, and b) we've just checked
4950 			 * that the huge page is present in the page tables. If
4951 			 * the huge page is present, then the tail pages must
4952 			 * also be present. The ptl prevents the head page and
4953 			 * tail pages from being rearranged in any way. So this
4954 			 * page must be available at this point, unless the page
4955 			 * refcount overflowed:
4956 			 */
4957 			if (WARN_ON_ONCE(!try_grab_page(pages[i], flags))) {
4958 				spin_unlock(ptl);
4959 				remainder = 0;
4960 				err = -ENOMEM;
4961 				break;
4962 			}
4963 		}
4964 
4965 		if (vmas)
4966 			vmas[i] = vma;
4967 
4968 		vaddr += PAGE_SIZE;
4969 		++pfn_offset;
4970 		--remainder;
4971 		++i;
4972 		if (vaddr < vma->vm_end && remainder &&
4973 				pfn_offset < pages_per_huge_page(h)) {
4974 			/*
4975 			 * We use pfn_offset to avoid touching the pageframes
4976 			 * of this compound page.
4977 			 */
4978 			goto same_page;
4979 		}
4980 		spin_unlock(ptl);
4981 	}
4982 	*nr_pages = remainder;
4983 	/*
4984 	 * setting position is actually required only if remainder is
4985 	 * not zero but it's faster not to add a "if (remainder)"
4986 	 * branch.
4987 	 */
4988 	*position = vaddr;
4989 
4990 	return i ? i : err;
4991 }
4992 
4993 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4994 /*
4995  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4996  * implement this.
4997  */
4998 #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
4999 #endif
5000 
5001 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
5002 		unsigned long address, unsigned long end, pgprot_t newprot)
5003 {
5004 	struct mm_struct *mm = vma->vm_mm;
5005 	unsigned long start = address;
5006 	pte_t *ptep;
5007 	pte_t pte;
5008 	struct hstate *h = hstate_vma(vma);
5009 	unsigned long pages = 0;
5010 	bool shared_pmd = false;
5011 	struct mmu_notifier_range range;
5012 
5013 	/*
5014 	 * In the case of shared PMDs, the area to flush could be beyond
5015 	 * start/end.  Set range.start/range.end to cover the maximum possible
5016 	 * range if PMD sharing is possible.
5017 	 */
5018 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
5019 				0, vma, mm, start, end);
5020 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5021 
5022 	BUG_ON(address >= end);
5023 	flush_cache_range(vma, range.start, range.end);
5024 
5025 	mmu_notifier_invalidate_range_start(&range);
5026 	i_mmap_lock_write(vma->vm_file->f_mapping);
5027 	for (; address < end; address += huge_page_size(h)) {
5028 		spinlock_t *ptl;
5029 		ptep = huge_pte_offset(mm, address, huge_page_size(h));
5030 		if (!ptep)
5031 			continue;
5032 		ptl = huge_pte_lock(h, mm, ptep);
5033 		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
5034 			pages++;
5035 			spin_unlock(ptl);
5036 			shared_pmd = true;
5037 			continue;
5038 		}
5039 		pte = huge_ptep_get(ptep);
5040 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
5041 			spin_unlock(ptl);
5042 			continue;
5043 		}
5044 		if (unlikely(is_hugetlb_entry_migration(pte))) {
5045 			swp_entry_t entry = pte_to_swp_entry(pte);
5046 
5047 			if (is_write_migration_entry(entry)) {
5048 				pte_t newpte;
5049 
5050 				make_migration_entry_read(&entry);
5051 				newpte = swp_entry_to_pte(entry);
5052 				set_huge_swap_pte_at(mm, address, ptep,
5053 						     newpte, huge_page_size(h));
5054 				pages++;
5055 			}
5056 			spin_unlock(ptl);
5057 			continue;
5058 		}
5059 		if (!huge_pte_none(pte)) {
5060 			pte_t old_pte;
5061 
5062 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
5063 			pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
5064 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
5065 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
5066 			pages++;
5067 		}
5068 		spin_unlock(ptl);
5069 	}
5070 	/*
5071 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
5072 	 * may have cleared our pud entry and done put_page on the page table:
5073 	 * once we release i_mmap_rwsem, another task can do the final put_page
5074 	 * and that page table be reused and filled with junk.  If we actually
5075 	 * did unshare a page of pmds, flush the range corresponding to the pud.
5076 	 */
5077 	if (shared_pmd)
5078 		flush_hugetlb_tlb_range(vma, range.start, range.end);
5079 	else
5080 		flush_hugetlb_tlb_range(vma, start, end);
5081 	/*
5082 	 * No need to call mmu_notifier_invalidate_range() we are downgrading
5083 	 * page table protection not changing it to point to a new page.
5084 	 *
5085 	 * See Documentation/vm/mmu_notifier.rst
5086 	 */
5087 	i_mmap_unlock_write(vma->vm_file->f_mapping);
5088 	mmu_notifier_invalidate_range_end(&range);
5089 
5090 	return pages << h->order;
5091 }
5092 
5093 int hugetlb_reserve_pages(struct inode *inode,
5094 					long from, long to,
5095 					struct vm_area_struct *vma,
5096 					vm_flags_t vm_flags)
5097 {
5098 	long ret, chg, add = -1;
5099 	struct hstate *h = hstate_inode(inode);
5100 	struct hugepage_subpool *spool = subpool_inode(inode);
5101 	struct resv_map *resv_map;
5102 	struct hugetlb_cgroup *h_cg = NULL;
5103 	long gbl_reserve, regions_needed = 0;
5104 
5105 	/* This should never happen */
5106 	if (from > to) {
5107 		VM_WARN(1, "%s called with a negative range\n", __func__);
5108 		return -EINVAL;
5109 	}
5110 
5111 	/*
5112 	 * Only apply hugepage reservation if asked. At fault time, an
5113 	 * attempt will be made for VM_NORESERVE to allocate a page
5114 	 * without using reserves
5115 	 */
5116 	if (vm_flags & VM_NORESERVE)
5117 		return 0;
5118 
5119 	/*
5120 	 * Shared mappings base their reservation on the number of pages that
5121 	 * are already allocated on behalf of the file. Private mappings need
5122 	 * to reserve the full area even if read-only as mprotect() may be
5123 	 * called to make the mapping read-write. Assume !vma is a shm mapping
5124 	 */
5125 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
5126 		/*
5127 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
5128 		 * called for inodes for which resv_maps were created (see
5129 		 * hugetlbfs_get_inode).
5130 		 */
5131 		resv_map = inode_resv_map(inode);
5132 
5133 		chg = region_chg(resv_map, from, to, &regions_needed);
5134 
5135 	} else {
5136 		/* Private mapping. */
5137 		resv_map = resv_map_alloc();
5138 		if (!resv_map)
5139 			return -ENOMEM;
5140 
5141 		chg = to - from;
5142 
5143 		set_vma_resv_map(vma, resv_map);
5144 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
5145 	}
5146 
5147 	if (chg < 0) {
5148 		ret = chg;
5149 		goto out_err;
5150 	}
5151 
5152 	ret = hugetlb_cgroup_charge_cgroup_rsvd(
5153 		hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
5154 
5155 	if (ret < 0) {
5156 		ret = -ENOMEM;
5157 		goto out_err;
5158 	}
5159 
5160 	if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
5161 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
5162 		 * of the resv_map.
5163 		 */
5164 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
5165 	}
5166 
5167 	/*
5168 	 * There must be enough pages in the subpool for the mapping. If
5169 	 * the subpool has a minimum size, there may be some global
5170 	 * reservations already in place (gbl_reserve).
5171 	 */
5172 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
5173 	if (gbl_reserve < 0) {
5174 		ret = -ENOSPC;
5175 		goto out_uncharge_cgroup;
5176 	}
5177 
5178 	/*
5179 	 * Check enough hugepages are available for the reservation.
5180 	 * Hand the pages back to the subpool if there are not
5181 	 */
5182 	ret = hugetlb_acct_memory(h, gbl_reserve);
5183 	if (ret < 0) {
5184 		goto out_put_pages;
5185 	}
5186 
5187 	/*
5188 	 * Account for the reservations made. Shared mappings record regions
5189 	 * that have reservations as they are shared by multiple VMAs.
5190 	 * When the last VMA disappears, the region map says how much
5191 	 * the reservation was and the page cache tells how much of
5192 	 * the reservation was consumed. Private mappings are per-VMA and
5193 	 * only the consumed reservations are tracked. When the VMA
5194 	 * disappears, the original reservation is the VMA size and the
5195 	 * consumed reservations are stored in the map. Hence, nothing
5196 	 * else has to be done for private mappings here
5197 	 */
5198 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
5199 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
5200 
5201 		if (unlikely(add < 0)) {
5202 			hugetlb_acct_memory(h, -gbl_reserve);
5203 			goto out_put_pages;
5204 		} else if (unlikely(chg > add)) {
5205 			/*
5206 			 * pages in this range were added to the reserve
5207 			 * map between region_chg and region_add.  This
5208 			 * indicates a race with alloc_huge_page.  Adjust
5209 			 * the subpool and reserve counts modified above
5210 			 * based on the difference.
5211 			 */
5212 			long rsv_adjust;
5213 
5214 			hugetlb_cgroup_uncharge_cgroup_rsvd(
5215 				hstate_index(h),
5216 				(chg - add) * pages_per_huge_page(h), h_cg);
5217 
5218 			rsv_adjust = hugepage_subpool_put_pages(spool,
5219 								chg - add);
5220 			hugetlb_acct_memory(h, -rsv_adjust);
5221 		}
5222 	}
5223 	return 0;
5224 out_put_pages:
5225 	/* put back original number of pages, chg */
5226 	(void)hugepage_subpool_put_pages(spool, chg);
5227 out_uncharge_cgroup:
5228 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
5229 					    chg * pages_per_huge_page(h), h_cg);
5230 out_err:
5231 	if (!vma || vma->vm_flags & VM_MAYSHARE)
5232 		/* Only call region_abort if the region_chg succeeded but the
5233 		 * region_add failed or didn't run.
5234 		 */
5235 		if (chg >= 0 && add < 0)
5236 			region_abort(resv_map, from, to, regions_needed);
5237 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5238 		kref_put(&resv_map->refs, resv_map_release);
5239 	return ret;
5240 }
5241 
5242 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
5243 								long freed)
5244 {
5245 	struct hstate *h = hstate_inode(inode);
5246 	struct resv_map *resv_map = inode_resv_map(inode);
5247 	long chg = 0;
5248 	struct hugepage_subpool *spool = subpool_inode(inode);
5249 	long gbl_reserve;
5250 
5251 	/*
5252 	 * Since this routine can be called in the evict inode path for all
5253 	 * hugetlbfs inodes, resv_map could be NULL.
5254 	 */
5255 	if (resv_map) {
5256 		chg = region_del(resv_map, start, end);
5257 		/*
5258 		 * region_del() can fail in the rare case where a region
5259 		 * must be split and another region descriptor can not be
5260 		 * allocated.  If end == LONG_MAX, it will not fail.
5261 		 */
5262 		if (chg < 0)
5263 			return chg;
5264 	}
5265 
5266 	spin_lock(&inode->i_lock);
5267 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
5268 	spin_unlock(&inode->i_lock);
5269 
5270 	/*
5271 	 * If the subpool has a minimum size, the number of global
5272 	 * reservations to be released may be adjusted.
5273 	 */
5274 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
5275 	hugetlb_acct_memory(h, -gbl_reserve);
5276 
5277 	return 0;
5278 }
5279 
5280 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
5281 static unsigned long page_table_shareable(struct vm_area_struct *svma,
5282 				struct vm_area_struct *vma,
5283 				unsigned long addr, pgoff_t idx)
5284 {
5285 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
5286 				svma->vm_start;
5287 	unsigned long sbase = saddr & PUD_MASK;
5288 	unsigned long s_end = sbase + PUD_SIZE;
5289 
5290 	/* Allow segments to share if only one is marked locked */
5291 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
5292 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
5293 
5294 	/*
5295 	 * match the virtual addresses, permission and the alignment of the
5296 	 * page table page.
5297 	 */
5298 	if (pmd_index(addr) != pmd_index(saddr) ||
5299 	    vm_flags != svm_flags ||
5300 	    sbase < svma->vm_start || svma->vm_end < s_end)
5301 		return 0;
5302 
5303 	return saddr;
5304 }
5305 
5306 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
5307 {
5308 	unsigned long base = addr & PUD_MASK;
5309 	unsigned long end = base + PUD_SIZE;
5310 
5311 	/*
5312 	 * check on proper vm_flags and page table alignment
5313 	 */
5314 	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
5315 		return true;
5316 	return false;
5317 }
5318 
5319 /*
5320  * Determine if start,end range within vma could be mapped by shared pmd.
5321  * If yes, adjust start and end to cover range associated with possible
5322  * shared pmd mappings.
5323  */
5324 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5325 				unsigned long *start, unsigned long *end)
5326 {
5327 	unsigned long a_start, a_end;
5328 
5329 	if (!(vma->vm_flags & VM_MAYSHARE))
5330 		return;
5331 
5332 	/* Extend the range to be PUD aligned for a worst case scenario */
5333 	a_start = ALIGN_DOWN(*start, PUD_SIZE);
5334 	a_end = ALIGN(*end, PUD_SIZE);
5335 
5336 	/*
5337 	 * Intersect the range with the vma range, since pmd sharing won't be
5338 	 * across vma after all
5339 	 */
5340 	*start = max(vma->vm_start, a_start);
5341 	*end = min(vma->vm_end, a_end);
5342 }
5343 
5344 /*
5345  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
5346  * and returns the corresponding pte. While this is not necessary for the
5347  * !shared pmd case because we can allocate the pmd later as well, it makes the
5348  * code much cleaner.
5349  *
5350  * This routine must be called with i_mmap_rwsem held in at least read mode.
5351  * For hugetlbfs, this prevents removal of any page table entries associated
5352  * with the address space.  This is important as we are setting up sharing
5353  * based on existing page table entries (mappings).
5354  */
5355 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5356 {
5357 	struct vm_area_struct *vma = find_vma(mm, addr);
5358 	struct address_space *mapping = vma->vm_file->f_mapping;
5359 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
5360 			vma->vm_pgoff;
5361 	struct vm_area_struct *svma;
5362 	unsigned long saddr;
5363 	pte_t *spte = NULL;
5364 	pte_t *pte;
5365 	spinlock_t *ptl;
5366 
5367 	if (!vma_shareable(vma, addr))
5368 		return (pte_t *)pmd_alloc(mm, pud, addr);
5369 
5370 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
5371 		if (svma == vma)
5372 			continue;
5373 
5374 		saddr = page_table_shareable(svma, vma, addr, idx);
5375 		if (saddr) {
5376 			spte = huge_pte_offset(svma->vm_mm, saddr,
5377 					       vma_mmu_pagesize(svma));
5378 			if (spte) {
5379 				get_page(virt_to_page(spte));
5380 				break;
5381 			}
5382 		}
5383 	}
5384 
5385 	if (!spte)
5386 		goto out;
5387 
5388 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
5389 	if (pud_none(*pud)) {
5390 		pud_populate(mm, pud,
5391 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
5392 		mm_inc_nr_pmds(mm);
5393 	} else {
5394 		put_page(virt_to_page(spte));
5395 	}
5396 	spin_unlock(ptl);
5397 out:
5398 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
5399 	return pte;
5400 }
5401 
5402 /*
5403  * unmap huge page backed by shared pte.
5404  *
5405  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
5406  * indicated by page_count > 1, unmap is achieved by clearing pud and
5407  * decrementing the ref count. If count == 1, the pte page is not shared.
5408  *
5409  * Called with page table lock held and i_mmap_rwsem held in write mode.
5410  *
5411  * returns: 1 successfully unmapped a shared pte page
5412  *	    0 the underlying pte page is not shared, or it is the last user
5413  */
5414 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5415 					unsigned long *addr, pte_t *ptep)
5416 {
5417 	pgd_t *pgd = pgd_offset(mm, *addr);
5418 	p4d_t *p4d = p4d_offset(pgd, *addr);
5419 	pud_t *pud = pud_offset(p4d, *addr);
5420 
5421 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
5422 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
5423 	if (page_count(virt_to_page(ptep)) == 1)
5424 		return 0;
5425 
5426 	pud_clear(pud);
5427 	put_page(virt_to_page(ptep));
5428 	mm_dec_nr_pmds(mm);
5429 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
5430 	return 1;
5431 }
5432 #define want_pmd_share()	(1)
5433 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5434 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5435 {
5436 	return NULL;
5437 }
5438 
5439 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5440 				unsigned long *addr, pte_t *ptep)
5441 {
5442 	return 0;
5443 }
5444 
5445 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5446 				unsigned long *start, unsigned long *end)
5447 {
5448 }
5449 #define want_pmd_share()	(0)
5450 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5451 
5452 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
5453 pte_t *huge_pte_alloc(struct mm_struct *mm,
5454 			unsigned long addr, unsigned long sz)
5455 {
5456 	pgd_t *pgd;
5457 	p4d_t *p4d;
5458 	pud_t *pud;
5459 	pte_t *pte = NULL;
5460 
5461 	pgd = pgd_offset(mm, addr);
5462 	p4d = p4d_alloc(mm, pgd, addr);
5463 	if (!p4d)
5464 		return NULL;
5465 	pud = pud_alloc(mm, p4d, addr);
5466 	if (pud) {
5467 		if (sz == PUD_SIZE) {
5468 			pte = (pte_t *)pud;
5469 		} else {
5470 			BUG_ON(sz != PMD_SIZE);
5471 			if (want_pmd_share() && pud_none(*pud))
5472 				pte = huge_pmd_share(mm, addr, pud);
5473 			else
5474 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
5475 		}
5476 	}
5477 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
5478 
5479 	return pte;
5480 }
5481 
5482 /*
5483  * huge_pte_offset() - Walk the page table to resolve the hugepage
5484  * entry at address @addr
5485  *
5486  * Return: Pointer to page table entry (PUD or PMD) for
5487  * address @addr, or NULL if a !p*d_present() entry is encountered and the
5488  * size @sz doesn't match the hugepage size at this level of the page
5489  * table.
5490  */
5491 pte_t *huge_pte_offset(struct mm_struct *mm,
5492 		       unsigned long addr, unsigned long sz)
5493 {
5494 	pgd_t *pgd;
5495 	p4d_t *p4d;
5496 	pud_t *pud;
5497 	pmd_t *pmd;
5498 
5499 	pgd = pgd_offset(mm, addr);
5500 	if (!pgd_present(*pgd))
5501 		return NULL;
5502 	p4d = p4d_offset(pgd, addr);
5503 	if (!p4d_present(*p4d))
5504 		return NULL;
5505 
5506 	pud = pud_offset(p4d, addr);
5507 	if (sz == PUD_SIZE)
5508 		/* must be pud huge, non-present or none */
5509 		return (pte_t *)pud;
5510 	if (!pud_present(*pud))
5511 		return NULL;
5512 	/* must have a valid entry and size to go further */
5513 
5514 	pmd = pmd_offset(pud, addr);
5515 	/* must be pmd huge, non-present or none */
5516 	return (pte_t *)pmd;
5517 }
5518 
5519 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
5520 
5521 /*
5522  * These functions are overwritable if your architecture needs its own
5523  * behavior.
5524  */
5525 struct page * __weak
5526 follow_huge_addr(struct mm_struct *mm, unsigned long address,
5527 			      int write)
5528 {
5529 	return ERR_PTR(-EINVAL);
5530 }
5531 
5532 struct page * __weak
5533 follow_huge_pd(struct vm_area_struct *vma,
5534 	       unsigned long address, hugepd_t hpd, int flags, int pdshift)
5535 {
5536 	WARN(1, "hugepd follow called with no support for hugepage directory format\n");
5537 	return NULL;
5538 }
5539 
5540 struct page * __weak
5541 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
5542 		pmd_t *pmd, int flags)
5543 {
5544 	struct page *page = NULL;
5545 	spinlock_t *ptl;
5546 	pte_t pte;
5547 
5548 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
5549 	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
5550 			 (FOLL_PIN | FOLL_GET)))
5551 		return NULL;
5552 
5553 retry:
5554 	ptl = pmd_lockptr(mm, pmd);
5555 	spin_lock(ptl);
5556 	/*
5557 	 * make sure that the address range covered by this pmd is not
5558 	 * unmapped from other threads.
5559 	 */
5560 	if (!pmd_huge(*pmd))
5561 		goto out;
5562 	pte = huge_ptep_get((pte_t *)pmd);
5563 	if (pte_present(pte)) {
5564 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
5565 		/*
5566 		 * try_grab_page() should always succeed here, because: a) we
5567 		 * hold the pmd (ptl) lock, and b) we've just checked that the
5568 		 * huge pmd (head) page is present in the page tables. The ptl
5569 		 * prevents the head page and tail pages from being rearranged
5570 		 * in any way. So this page must be available at this point,
5571 		 * unless the page refcount overflowed:
5572 		 */
5573 		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
5574 			page = NULL;
5575 			goto out;
5576 		}
5577 	} else {
5578 		if (is_hugetlb_entry_migration(pte)) {
5579 			spin_unlock(ptl);
5580 			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
5581 			goto retry;
5582 		}
5583 		/*
5584 		 * hwpoisoned entry is treated as no_page_table in
5585 		 * follow_page_mask().
5586 		 */
5587 	}
5588 out:
5589 	spin_unlock(ptl);
5590 	return page;
5591 }
5592 
5593 struct page * __weak
5594 follow_huge_pud(struct mm_struct *mm, unsigned long address,
5595 		pud_t *pud, int flags)
5596 {
5597 	if (flags & (FOLL_GET | FOLL_PIN))
5598 		return NULL;
5599 
5600 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5601 }
5602 
5603 struct page * __weak
5604 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5605 {
5606 	if (flags & (FOLL_GET | FOLL_PIN))
5607 		return NULL;
5608 
5609 	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5610 }
5611 
5612 bool isolate_huge_page(struct page *page, struct list_head *list)
5613 {
5614 	bool ret = true;
5615 
5616 	VM_BUG_ON_PAGE(!PageHead(page), page);
5617 	spin_lock(&hugetlb_lock);
5618 	if (!page_huge_active(page) || !get_page_unless_zero(page)) {
5619 		ret = false;
5620 		goto unlock;
5621 	}
5622 	clear_page_huge_active(page);
5623 	list_move_tail(&page->lru, list);
5624 unlock:
5625 	spin_unlock(&hugetlb_lock);
5626 	return ret;
5627 }
5628 
5629 void putback_active_hugepage(struct page *page)
5630 {
5631 	VM_BUG_ON_PAGE(!PageHead(page), page);
5632 	spin_lock(&hugetlb_lock);
5633 	set_page_huge_active(page);
5634 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5635 	spin_unlock(&hugetlb_lock);
5636 	put_page(page);
5637 }
5638 
5639 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5640 {
5641 	struct hstate *h = page_hstate(oldpage);
5642 
5643 	hugetlb_cgroup_migrate(oldpage, newpage);
5644 	set_page_owner_migrate_reason(newpage, reason);
5645 
5646 	/*
5647 	 * transfer temporary state of the new huge page. This is
5648 	 * reverse to other transitions because the newpage is going to
5649 	 * be final while the old one will be freed so it takes over
5650 	 * the temporary status.
5651 	 *
5652 	 * Also note that we have to transfer the per-node surplus state
5653 	 * here as well otherwise the global surplus count will not match
5654 	 * the per-node's.
5655 	 */
5656 	if (PageHugeTemporary(newpage)) {
5657 		int old_nid = page_to_nid(oldpage);
5658 		int new_nid = page_to_nid(newpage);
5659 
5660 		SetPageHugeTemporary(oldpage);
5661 		ClearPageHugeTemporary(newpage);
5662 
5663 		spin_lock(&hugetlb_lock);
5664 		if (h->surplus_huge_pages_node[old_nid]) {
5665 			h->surplus_huge_pages_node[old_nid]--;
5666 			h->surplus_huge_pages_node[new_nid]++;
5667 		}
5668 		spin_unlock(&hugetlb_lock);
5669 	}
5670 }
5671 
5672 #ifdef CONFIG_CMA
5673 static bool cma_reserve_called __initdata;
5674 
5675 static int __init cmdline_parse_hugetlb_cma(char *p)
5676 {
5677 	hugetlb_cma_size = memparse(p, &p);
5678 	return 0;
5679 }
5680 
5681 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
5682 
5683 void __init hugetlb_cma_reserve(int order)
5684 {
5685 	unsigned long size, reserved, per_node;
5686 	int nid;
5687 
5688 	cma_reserve_called = true;
5689 
5690 	if (!hugetlb_cma_size)
5691 		return;
5692 
5693 	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
5694 		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
5695 			(PAGE_SIZE << order) / SZ_1M);
5696 		return;
5697 	}
5698 
5699 	/*
5700 	 * If 3 GB area is requested on a machine with 4 numa nodes,
5701 	 * let's allocate 1 GB on first three nodes and ignore the last one.
5702 	 */
5703 	per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
5704 	pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
5705 		hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
5706 
5707 	reserved = 0;
5708 	for_each_node_state(nid, N_ONLINE) {
5709 		int res;
5710 		char name[20];
5711 
5712 		size = min(per_node, hugetlb_cma_size - reserved);
5713 		size = round_up(size, PAGE_SIZE << order);
5714 
5715 		snprintf(name, 20, "hugetlb%d", nid);
5716 		res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
5717 						 0, false, name,
5718 						 &hugetlb_cma[nid], nid);
5719 		if (res) {
5720 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
5721 				res, nid);
5722 			continue;
5723 		}
5724 
5725 		reserved += size;
5726 		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
5727 			size / SZ_1M, nid);
5728 
5729 		if (reserved >= hugetlb_cma_size)
5730 			break;
5731 	}
5732 }
5733 
5734 void __init hugetlb_cma_check(void)
5735 {
5736 	if (!hugetlb_cma_size || cma_reserve_called)
5737 		return;
5738 
5739 	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
5740 }
5741 
5742 #endif /* CONFIG_CMA */
5743