xref: /openbmc/linux/mm/hugetlb.c (revision 7507f099)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 #include <linux/migrate.h>
34 #include <linux/nospec.h>
35 #include <linux/delayacct.h>
36 
37 #include <asm/page.h>
38 #include <asm/pgalloc.h>
39 #include <asm/tlb.h>
40 
41 #include <linux/io.h>
42 #include <linux/hugetlb.h>
43 #include <linux/hugetlb_cgroup.h>
44 #include <linux/node.h>
45 #include <linux/page_owner.h>
46 #include "internal.h"
47 #include "hugetlb_vmemmap.h"
48 
49 int hugetlb_max_hstate __read_mostly;
50 unsigned int default_hstate_idx;
51 struct hstate hstates[HUGE_MAX_HSTATE];
52 
53 #ifdef CONFIG_CMA
54 static struct cma *hugetlb_cma[MAX_NUMNODES];
55 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
56 static bool hugetlb_cma_page(struct page *page, unsigned int order)
57 {
58 	return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page,
59 				1 << order);
60 }
61 #else
62 static bool hugetlb_cma_page(struct page *page, unsigned int order)
63 {
64 	return false;
65 }
66 #endif
67 static unsigned long hugetlb_cma_size __initdata;
68 
69 /*
70  * Minimum page order among possible hugepage sizes, set to a proper value
71  * at boot time.
72  */
73 static unsigned int minimum_order __read_mostly = UINT_MAX;
74 
75 __initdata LIST_HEAD(huge_boot_pages);
76 
77 /* for command line parsing */
78 static struct hstate * __initdata parsed_hstate;
79 static unsigned long __initdata default_hstate_max_huge_pages;
80 static bool __initdata parsed_valid_hugepagesz = true;
81 static bool __initdata parsed_default_hugepagesz;
82 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
83 
84 /*
85  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
86  * free_huge_pages, and surplus_huge_pages.
87  */
88 DEFINE_SPINLOCK(hugetlb_lock);
89 
90 /*
91  * Serializes faults on the same logical page.  This is used to
92  * prevent spurious OOMs when the hugepage pool is fully utilized.
93  */
94 static int num_fault_mutexes;
95 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
96 
97 /* Forward declaration */
98 static int hugetlb_acct_memory(struct hstate *h, long delta);
99 
100 static inline bool subpool_is_free(struct hugepage_subpool *spool)
101 {
102 	if (spool->count)
103 		return false;
104 	if (spool->max_hpages != -1)
105 		return spool->used_hpages == 0;
106 	if (spool->min_hpages != -1)
107 		return spool->rsv_hpages == spool->min_hpages;
108 
109 	return true;
110 }
111 
112 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
113 						unsigned long irq_flags)
114 {
115 	spin_unlock_irqrestore(&spool->lock, irq_flags);
116 
117 	/* If no pages are used, and no other handles to the subpool
118 	 * remain, give up any reservations based on minimum size and
119 	 * free the subpool */
120 	if (subpool_is_free(spool)) {
121 		if (spool->min_hpages != -1)
122 			hugetlb_acct_memory(spool->hstate,
123 						-spool->min_hpages);
124 		kfree(spool);
125 	}
126 }
127 
128 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
129 						long min_hpages)
130 {
131 	struct hugepage_subpool *spool;
132 
133 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
134 	if (!spool)
135 		return NULL;
136 
137 	spin_lock_init(&spool->lock);
138 	spool->count = 1;
139 	spool->max_hpages = max_hpages;
140 	spool->hstate = h;
141 	spool->min_hpages = min_hpages;
142 
143 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
144 		kfree(spool);
145 		return NULL;
146 	}
147 	spool->rsv_hpages = min_hpages;
148 
149 	return spool;
150 }
151 
152 void hugepage_put_subpool(struct hugepage_subpool *spool)
153 {
154 	unsigned long flags;
155 
156 	spin_lock_irqsave(&spool->lock, flags);
157 	BUG_ON(!spool->count);
158 	spool->count--;
159 	unlock_or_release_subpool(spool, flags);
160 }
161 
162 /*
163  * Subpool accounting for allocating and reserving pages.
164  * Return -ENOMEM if there are not enough resources to satisfy the
165  * request.  Otherwise, return the number of pages by which the
166  * global pools must be adjusted (upward).  The returned value may
167  * only be different than the passed value (delta) in the case where
168  * a subpool minimum size must be maintained.
169  */
170 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
171 				      long delta)
172 {
173 	long ret = delta;
174 
175 	if (!spool)
176 		return ret;
177 
178 	spin_lock_irq(&spool->lock);
179 
180 	if (spool->max_hpages != -1) {		/* maximum size accounting */
181 		if ((spool->used_hpages + delta) <= spool->max_hpages)
182 			spool->used_hpages += delta;
183 		else {
184 			ret = -ENOMEM;
185 			goto unlock_ret;
186 		}
187 	}
188 
189 	/* minimum size accounting */
190 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
191 		if (delta > spool->rsv_hpages) {
192 			/*
193 			 * Asking for more reserves than those already taken on
194 			 * behalf of subpool.  Return difference.
195 			 */
196 			ret = delta - spool->rsv_hpages;
197 			spool->rsv_hpages = 0;
198 		} else {
199 			ret = 0;	/* reserves already accounted for */
200 			spool->rsv_hpages -= delta;
201 		}
202 	}
203 
204 unlock_ret:
205 	spin_unlock_irq(&spool->lock);
206 	return ret;
207 }
208 
209 /*
210  * Subpool accounting for freeing and unreserving pages.
211  * Return the number of global page reservations that must be dropped.
212  * The return value may only be different than the passed value (delta)
213  * in the case where a subpool minimum size must be maintained.
214  */
215 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
216 				       long delta)
217 {
218 	long ret = delta;
219 	unsigned long flags;
220 
221 	if (!spool)
222 		return delta;
223 
224 	spin_lock_irqsave(&spool->lock, flags);
225 
226 	if (spool->max_hpages != -1)		/* maximum size accounting */
227 		spool->used_hpages -= delta;
228 
229 	 /* minimum size accounting */
230 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
231 		if (spool->rsv_hpages + delta <= spool->min_hpages)
232 			ret = 0;
233 		else
234 			ret = spool->rsv_hpages + delta - spool->min_hpages;
235 
236 		spool->rsv_hpages += delta;
237 		if (spool->rsv_hpages > spool->min_hpages)
238 			spool->rsv_hpages = spool->min_hpages;
239 	}
240 
241 	/*
242 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
243 	 * quota reference, free it now.
244 	 */
245 	unlock_or_release_subpool(spool, flags);
246 
247 	return ret;
248 }
249 
250 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
251 {
252 	return HUGETLBFS_SB(inode->i_sb)->spool;
253 }
254 
255 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
256 {
257 	return subpool_inode(file_inode(vma->vm_file));
258 }
259 
260 /* Helper that removes a struct file_region from the resv_map cache and returns
261  * it for use.
262  */
263 static struct file_region *
264 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
265 {
266 	struct file_region *nrg = NULL;
267 
268 	VM_BUG_ON(resv->region_cache_count <= 0);
269 
270 	resv->region_cache_count--;
271 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
272 	list_del(&nrg->link);
273 
274 	nrg->from = from;
275 	nrg->to = to;
276 
277 	return nrg;
278 }
279 
280 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
281 					      struct file_region *rg)
282 {
283 #ifdef CONFIG_CGROUP_HUGETLB
284 	nrg->reservation_counter = rg->reservation_counter;
285 	nrg->css = rg->css;
286 	if (rg->css)
287 		css_get(rg->css);
288 #endif
289 }
290 
291 /* Helper that records hugetlb_cgroup uncharge info. */
292 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
293 						struct hstate *h,
294 						struct resv_map *resv,
295 						struct file_region *nrg)
296 {
297 #ifdef CONFIG_CGROUP_HUGETLB
298 	if (h_cg) {
299 		nrg->reservation_counter =
300 			&h_cg->rsvd_hugepage[hstate_index(h)];
301 		nrg->css = &h_cg->css;
302 		/*
303 		 * The caller will hold exactly one h_cg->css reference for the
304 		 * whole contiguous reservation region. But this area might be
305 		 * scattered when there are already some file_regions reside in
306 		 * it. As a result, many file_regions may share only one css
307 		 * reference. In order to ensure that one file_region must hold
308 		 * exactly one h_cg->css reference, we should do css_get for
309 		 * each file_region and leave the reference held by caller
310 		 * untouched.
311 		 */
312 		css_get(&h_cg->css);
313 		if (!resv->pages_per_hpage)
314 			resv->pages_per_hpage = pages_per_huge_page(h);
315 		/* pages_per_hpage should be the same for all entries in
316 		 * a resv_map.
317 		 */
318 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
319 	} else {
320 		nrg->reservation_counter = NULL;
321 		nrg->css = NULL;
322 	}
323 #endif
324 }
325 
326 static void put_uncharge_info(struct file_region *rg)
327 {
328 #ifdef CONFIG_CGROUP_HUGETLB
329 	if (rg->css)
330 		css_put(rg->css);
331 #endif
332 }
333 
334 static bool has_same_uncharge_info(struct file_region *rg,
335 				   struct file_region *org)
336 {
337 #ifdef CONFIG_CGROUP_HUGETLB
338 	return rg->reservation_counter == org->reservation_counter &&
339 	       rg->css == org->css;
340 
341 #else
342 	return true;
343 #endif
344 }
345 
346 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
347 {
348 	struct file_region *nrg = NULL, *prg = NULL;
349 
350 	prg = list_prev_entry(rg, link);
351 	if (&prg->link != &resv->regions && prg->to == rg->from &&
352 	    has_same_uncharge_info(prg, rg)) {
353 		prg->to = rg->to;
354 
355 		list_del(&rg->link);
356 		put_uncharge_info(rg);
357 		kfree(rg);
358 
359 		rg = prg;
360 	}
361 
362 	nrg = list_next_entry(rg, link);
363 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
364 	    has_same_uncharge_info(nrg, rg)) {
365 		nrg->from = rg->from;
366 
367 		list_del(&rg->link);
368 		put_uncharge_info(rg);
369 		kfree(rg);
370 	}
371 }
372 
373 static inline long
374 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
375 		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
376 		     long *regions_needed)
377 {
378 	struct file_region *nrg;
379 
380 	if (!regions_needed) {
381 		nrg = get_file_region_entry_from_cache(map, from, to);
382 		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
383 		list_add(&nrg->link, rg);
384 		coalesce_file_region(map, nrg);
385 	} else
386 		*regions_needed += 1;
387 
388 	return to - from;
389 }
390 
391 /*
392  * Must be called with resv->lock held.
393  *
394  * Calling this with regions_needed != NULL will count the number of pages
395  * to be added but will not modify the linked list. And regions_needed will
396  * indicate the number of file_regions needed in the cache to carry out to add
397  * the regions for this range.
398  */
399 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
400 				     struct hugetlb_cgroup *h_cg,
401 				     struct hstate *h, long *regions_needed)
402 {
403 	long add = 0;
404 	struct list_head *head = &resv->regions;
405 	long last_accounted_offset = f;
406 	struct file_region *iter, *trg = NULL;
407 	struct list_head *rg = NULL;
408 
409 	if (regions_needed)
410 		*regions_needed = 0;
411 
412 	/* In this loop, we essentially handle an entry for the range
413 	 * [last_accounted_offset, iter->from), at every iteration, with some
414 	 * bounds checking.
415 	 */
416 	list_for_each_entry_safe(iter, trg, head, link) {
417 		/* Skip irrelevant regions that start before our range. */
418 		if (iter->from < f) {
419 			/* If this region ends after the last accounted offset,
420 			 * then we need to update last_accounted_offset.
421 			 */
422 			if (iter->to > last_accounted_offset)
423 				last_accounted_offset = iter->to;
424 			continue;
425 		}
426 
427 		/* When we find a region that starts beyond our range, we've
428 		 * finished.
429 		 */
430 		if (iter->from >= t) {
431 			rg = iter->link.prev;
432 			break;
433 		}
434 
435 		/* Add an entry for last_accounted_offset -> iter->from, and
436 		 * update last_accounted_offset.
437 		 */
438 		if (iter->from > last_accounted_offset)
439 			add += hugetlb_resv_map_add(resv, iter->link.prev,
440 						    last_accounted_offset,
441 						    iter->from, h, h_cg,
442 						    regions_needed);
443 
444 		last_accounted_offset = iter->to;
445 	}
446 
447 	/* Handle the case where our range extends beyond
448 	 * last_accounted_offset.
449 	 */
450 	if (!rg)
451 		rg = head->prev;
452 	if (last_accounted_offset < t)
453 		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
454 					    t, h, h_cg, regions_needed);
455 
456 	return add;
457 }
458 
459 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
460  */
461 static int allocate_file_region_entries(struct resv_map *resv,
462 					int regions_needed)
463 	__must_hold(&resv->lock)
464 {
465 	struct list_head allocated_regions;
466 	int to_allocate = 0, i = 0;
467 	struct file_region *trg = NULL, *rg = NULL;
468 
469 	VM_BUG_ON(regions_needed < 0);
470 
471 	INIT_LIST_HEAD(&allocated_regions);
472 
473 	/*
474 	 * Check for sufficient descriptors in the cache to accommodate
475 	 * the number of in progress add operations plus regions_needed.
476 	 *
477 	 * This is a while loop because when we drop the lock, some other call
478 	 * to region_add or region_del may have consumed some region_entries,
479 	 * so we keep looping here until we finally have enough entries for
480 	 * (adds_in_progress + regions_needed).
481 	 */
482 	while (resv->region_cache_count <
483 	       (resv->adds_in_progress + regions_needed)) {
484 		to_allocate = resv->adds_in_progress + regions_needed -
485 			      resv->region_cache_count;
486 
487 		/* At this point, we should have enough entries in the cache
488 		 * for all the existing adds_in_progress. We should only be
489 		 * needing to allocate for regions_needed.
490 		 */
491 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
492 
493 		spin_unlock(&resv->lock);
494 		for (i = 0; i < to_allocate; i++) {
495 			trg = kmalloc(sizeof(*trg), GFP_KERNEL);
496 			if (!trg)
497 				goto out_of_memory;
498 			list_add(&trg->link, &allocated_regions);
499 		}
500 
501 		spin_lock(&resv->lock);
502 
503 		list_splice(&allocated_regions, &resv->region_cache);
504 		resv->region_cache_count += to_allocate;
505 	}
506 
507 	return 0;
508 
509 out_of_memory:
510 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
511 		list_del(&rg->link);
512 		kfree(rg);
513 	}
514 	return -ENOMEM;
515 }
516 
517 /*
518  * Add the huge page range represented by [f, t) to the reserve
519  * map.  Regions will be taken from the cache to fill in this range.
520  * Sufficient regions should exist in the cache due to the previous
521  * call to region_chg with the same range, but in some cases the cache will not
522  * have sufficient entries due to races with other code doing region_add or
523  * region_del.  The extra needed entries will be allocated.
524  *
525  * regions_needed is the out value provided by a previous call to region_chg.
526  *
527  * Return the number of new huge pages added to the map.  This number is greater
528  * than or equal to zero.  If file_region entries needed to be allocated for
529  * this operation and we were not able to allocate, it returns -ENOMEM.
530  * region_add of regions of length 1 never allocate file_regions and cannot
531  * fail; region_chg will always allocate at least 1 entry and a region_add for
532  * 1 page will only require at most 1 entry.
533  */
534 static long region_add(struct resv_map *resv, long f, long t,
535 		       long in_regions_needed, struct hstate *h,
536 		       struct hugetlb_cgroup *h_cg)
537 {
538 	long add = 0, actual_regions_needed = 0;
539 
540 	spin_lock(&resv->lock);
541 retry:
542 
543 	/* Count how many regions are actually needed to execute this add. */
544 	add_reservation_in_range(resv, f, t, NULL, NULL,
545 				 &actual_regions_needed);
546 
547 	/*
548 	 * Check for sufficient descriptors in the cache to accommodate
549 	 * this add operation. Note that actual_regions_needed may be greater
550 	 * than in_regions_needed, as the resv_map may have been modified since
551 	 * the region_chg call. In this case, we need to make sure that we
552 	 * allocate extra entries, such that we have enough for all the
553 	 * existing adds_in_progress, plus the excess needed for this
554 	 * operation.
555 	 */
556 	if (actual_regions_needed > in_regions_needed &&
557 	    resv->region_cache_count <
558 		    resv->adds_in_progress +
559 			    (actual_regions_needed - in_regions_needed)) {
560 		/* region_add operation of range 1 should never need to
561 		 * allocate file_region entries.
562 		 */
563 		VM_BUG_ON(t - f <= 1);
564 
565 		if (allocate_file_region_entries(
566 			    resv, actual_regions_needed - in_regions_needed)) {
567 			return -ENOMEM;
568 		}
569 
570 		goto retry;
571 	}
572 
573 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
574 
575 	resv->adds_in_progress -= in_regions_needed;
576 
577 	spin_unlock(&resv->lock);
578 	return add;
579 }
580 
581 /*
582  * Examine the existing reserve map and determine how many
583  * huge pages in the specified range [f, t) are NOT currently
584  * represented.  This routine is called before a subsequent
585  * call to region_add that will actually modify the reserve
586  * map to add the specified range [f, t).  region_chg does
587  * not change the number of huge pages represented by the
588  * map.  A number of new file_region structures is added to the cache as a
589  * placeholder, for the subsequent region_add call to use. At least 1
590  * file_region structure is added.
591  *
592  * out_regions_needed is the number of regions added to the
593  * resv->adds_in_progress.  This value needs to be provided to a follow up call
594  * to region_add or region_abort for proper accounting.
595  *
596  * Returns the number of huge pages that need to be added to the existing
597  * reservation map for the range [f, t).  This number is greater or equal to
598  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
599  * is needed and can not be allocated.
600  */
601 static long region_chg(struct resv_map *resv, long f, long t,
602 		       long *out_regions_needed)
603 {
604 	long chg = 0;
605 
606 	spin_lock(&resv->lock);
607 
608 	/* Count how many hugepages in this range are NOT represented. */
609 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
610 				       out_regions_needed);
611 
612 	if (*out_regions_needed == 0)
613 		*out_regions_needed = 1;
614 
615 	if (allocate_file_region_entries(resv, *out_regions_needed))
616 		return -ENOMEM;
617 
618 	resv->adds_in_progress += *out_regions_needed;
619 
620 	spin_unlock(&resv->lock);
621 	return chg;
622 }
623 
624 /*
625  * Abort the in progress add operation.  The adds_in_progress field
626  * of the resv_map keeps track of the operations in progress between
627  * calls to region_chg and region_add.  Operations are sometimes
628  * aborted after the call to region_chg.  In such cases, region_abort
629  * is called to decrement the adds_in_progress counter. regions_needed
630  * is the value returned by the region_chg call, it is used to decrement
631  * the adds_in_progress counter.
632  *
633  * NOTE: The range arguments [f, t) are not needed or used in this
634  * routine.  They are kept to make reading the calling code easier as
635  * arguments will match the associated region_chg call.
636  */
637 static void region_abort(struct resv_map *resv, long f, long t,
638 			 long regions_needed)
639 {
640 	spin_lock(&resv->lock);
641 	VM_BUG_ON(!resv->region_cache_count);
642 	resv->adds_in_progress -= regions_needed;
643 	spin_unlock(&resv->lock);
644 }
645 
646 /*
647  * Delete the specified range [f, t) from the reserve map.  If the
648  * t parameter is LONG_MAX, this indicates that ALL regions after f
649  * should be deleted.  Locate the regions which intersect [f, t)
650  * and either trim, delete or split the existing regions.
651  *
652  * Returns the number of huge pages deleted from the reserve map.
653  * In the normal case, the return value is zero or more.  In the
654  * case where a region must be split, a new region descriptor must
655  * be allocated.  If the allocation fails, -ENOMEM will be returned.
656  * NOTE: If the parameter t == LONG_MAX, then we will never split
657  * a region and possibly return -ENOMEM.  Callers specifying
658  * t == LONG_MAX do not need to check for -ENOMEM error.
659  */
660 static long region_del(struct resv_map *resv, long f, long t)
661 {
662 	struct list_head *head = &resv->regions;
663 	struct file_region *rg, *trg;
664 	struct file_region *nrg = NULL;
665 	long del = 0;
666 
667 retry:
668 	spin_lock(&resv->lock);
669 	list_for_each_entry_safe(rg, trg, head, link) {
670 		/*
671 		 * Skip regions before the range to be deleted.  file_region
672 		 * ranges are normally of the form [from, to).  However, there
673 		 * may be a "placeholder" entry in the map which is of the form
674 		 * (from, to) with from == to.  Check for placeholder entries
675 		 * at the beginning of the range to be deleted.
676 		 */
677 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
678 			continue;
679 
680 		if (rg->from >= t)
681 			break;
682 
683 		if (f > rg->from && t < rg->to) { /* Must split region */
684 			/*
685 			 * Check for an entry in the cache before dropping
686 			 * lock and attempting allocation.
687 			 */
688 			if (!nrg &&
689 			    resv->region_cache_count > resv->adds_in_progress) {
690 				nrg = list_first_entry(&resv->region_cache,
691 							struct file_region,
692 							link);
693 				list_del(&nrg->link);
694 				resv->region_cache_count--;
695 			}
696 
697 			if (!nrg) {
698 				spin_unlock(&resv->lock);
699 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
700 				if (!nrg)
701 					return -ENOMEM;
702 				goto retry;
703 			}
704 
705 			del += t - f;
706 			hugetlb_cgroup_uncharge_file_region(
707 				resv, rg, t - f, false);
708 
709 			/* New entry for end of split region */
710 			nrg->from = t;
711 			nrg->to = rg->to;
712 
713 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
714 
715 			INIT_LIST_HEAD(&nrg->link);
716 
717 			/* Original entry is trimmed */
718 			rg->to = f;
719 
720 			list_add(&nrg->link, &rg->link);
721 			nrg = NULL;
722 			break;
723 		}
724 
725 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
726 			del += rg->to - rg->from;
727 			hugetlb_cgroup_uncharge_file_region(resv, rg,
728 							    rg->to - rg->from, true);
729 			list_del(&rg->link);
730 			kfree(rg);
731 			continue;
732 		}
733 
734 		if (f <= rg->from) {	/* Trim beginning of region */
735 			hugetlb_cgroup_uncharge_file_region(resv, rg,
736 							    t - rg->from, false);
737 
738 			del += t - rg->from;
739 			rg->from = t;
740 		} else {		/* Trim end of region */
741 			hugetlb_cgroup_uncharge_file_region(resv, rg,
742 							    rg->to - f, false);
743 
744 			del += rg->to - f;
745 			rg->to = f;
746 		}
747 	}
748 
749 	spin_unlock(&resv->lock);
750 	kfree(nrg);
751 	return del;
752 }
753 
754 /*
755  * A rare out of memory error was encountered which prevented removal of
756  * the reserve map region for a page.  The huge page itself was free'ed
757  * and removed from the page cache.  This routine will adjust the subpool
758  * usage count, and the global reserve count if needed.  By incrementing
759  * these counts, the reserve map entry which could not be deleted will
760  * appear as a "reserved" entry instead of simply dangling with incorrect
761  * counts.
762  */
763 void hugetlb_fix_reserve_counts(struct inode *inode)
764 {
765 	struct hugepage_subpool *spool = subpool_inode(inode);
766 	long rsv_adjust;
767 	bool reserved = false;
768 
769 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
770 	if (rsv_adjust > 0) {
771 		struct hstate *h = hstate_inode(inode);
772 
773 		if (!hugetlb_acct_memory(h, 1))
774 			reserved = true;
775 	} else if (!rsv_adjust) {
776 		reserved = true;
777 	}
778 
779 	if (!reserved)
780 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
781 }
782 
783 /*
784  * Count and return the number of huge pages in the reserve map
785  * that intersect with the range [f, t).
786  */
787 static long region_count(struct resv_map *resv, long f, long t)
788 {
789 	struct list_head *head = &resv->regions;
790 	struct file_region *rg;
791 	long chg = 0;
792 
793 	spin_lock(&resv->lock);
794 	/* Locate each segment we overlap with, and count that overlap. */
795 	list_for_each_entry(rg, head, link) {
796 		long seg_from;
797 		long seg_to;
798 
799 		if (rg->to <= f)
800 			continue;
801 		if (rg->from >= t)
802 			break;
803 
804 		seg_from = max(rg->from, f);
805 		seg_to = min(rg->to, t);
806 
807 		chg += seg_to - seg_from;
808 	}
809 	spin_unlock(&resv->lock);
810 
811 	return chg;
812 }
813 
814 /*
815  * Convert the address within this vma to the page offset within
816  * the mapping, in pagecache page units; huge pages here.
817  */
818 static pgoff_t vma_hugecache_offset(struct hstate *h,
819 			struct vm_area_struct *vma, unsigned long address)
820 {
821 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
822 			(vma->vm_pgoff >> huge_page_order(h));
823 }
824 
825 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
826 				     unsigned long address)
827 {
828 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
829 }
830 EXPORT_SYMBOL_GPL(linear_hugepage_index);
831 
832 /*
833  * Return the size of the pages allocated when backing a VMA. In the majority
834  * cases this will be same size as used by the page table entries.
835  */
836 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
837 {
838 	if (vma->vm_ops && vma->vm_ops->pagesize)
839 		return vma->vm_ops->pagesize(vma);
840 	return PAGE_SIZE;
841 }
842 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
843 
844 /*
845  * Return the page size being used by the MMU to back a VMA. In the majority
846  * of cases, the page size used by the kernel matches the MMU size. On
847  * architectures where it differs, an architecture-specific 'strong'
848  * version of this symbol is required.
849  */
850 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
851 {
852 	return vma_kernel_pagesize(vma);
853 }
854 
855 /*
856  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
857  * bits of the reservation map pointer, which are always clear due to
858  * alignment.
859  */
860 #define HPAGE_RESV_OWNER    (1UL << 0)
861 #define HPAGE_RESV_UNMAPPED (1UL << 1)
862 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
863 
864 /*
865  * These helpers are used to track how many pages are reserved for
866  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
867  * is guaranteed to have their future faults succeed.
868  *
869  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
870  * the reserve counters are updated with the hugetlb_lock held. It is safe
871  * to reset the VMA at fork() time as it is not in use yet and there is no
872  * chance of the global counters getting corrupted as a result of the values.
873  *
874  * The private mapping reservation is represented in a subtly different
875  * manner to a shared mapping.  A shared mapping has a region map associated
876  * with the underlying file, this region map represents the backing file
877  * pages which have ever had a reservation assigned which this persists even
878  * after the page is instantiated.  A private mapping has a region map
879  * associated with the original mmap which is attached to all VMAs which
880  * reference it, this region map represents those offsets which have consumed
881  * reservation ie. where pages have been instantiated.
882  */
883 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
884 {
885 	return (unsigned long)vma->vm_private_data;
886 }
887 
888 static void set_vma_private_data(struct vm_area_struct *vma,
889 							unsigned long value)
890 {
891 	vma->vm_private_data = (void *)value;
892 }
893 
894 static void
895 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
896 					  struct hugetlb_cgroup *h_cg,
897 					  struct hstate *h)
898 {
899 #ifdef CONFIG_CGROUP_HUGETLB
900 	if (!h_cg || !h) {
901 		resv_map->reservation_counter = NULL;
902 		resv_map->pages_per_hpage = 0;
903 		resv_map->css = NULL;
904 	} else {
905 		resv_map->reservation_counter =
906 			&h_cg->rsvd_hugepage[hstate_index(h)];
907 		resv_map->pages_per_hpage = pages_per_huge_page(h);
908 		resv_map->css = &h_cg->css;
909 	}
910 #endif
911 }
912 
913 struct resv_map *resv_map_alloc(void)
914 {
915 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
916 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
917 
918 	if (!resv_map || !rg) {
919 		kfree(resv_map);
920 		kfree(rg);
921 		return NULL;
922 	}
923 
924 	kref_init(&resv_map->refs);
925 	spin_lock_init(&resv_map->lock);
926 	INIT_LIST_HEAD(&resv_map->regions);
927 
928 	resv_map->adds_in_progress = 0;
929 	/*
930 	 * Initialize these to 0. On shared mappings, 0's here indicate these
931 	 * fields don't do cgroup accounting. On private mappings, these will be
932 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
933 	 * reservations are to be un-charged from here.
934 	 */
935 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
936 
937 	INIT_LIST_HEAD(&resv_map->region_cache);
938 	list_add(&rg->link, &resv_map->region_cache);
939 	resv_map->region_cache_count = 1;
940 
941 	return resv_map;
942 }
943 
944 void resv_map_release(struct kref *ref)
945 {
946 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
947 	struct list_head *head = &resv_map->region_cache;
948 	struct file_region *rg, *trg;
949 
950 	/* Clear out any active regions before we release the map. */
951 	region_del(resv_map, 0, LONG_MAX);
952 
953 	/* ... and any entries left in the cache */
954 	list_for_each_entry_safe(rg, trg, head, link) {
955 		list_del(&rg->link);
956 		kfree(rg);
957 	}
958 
959 	VM_BUG_ON(resv_map->adds_in_progress);
960 
961 	kfree(resv_map);
962 }
963 
964 static inline struct resv_map *inode_resv_map(struct inode *inode)
965 {
966 	/*
967 	 * At inode evict time, i_mapping may not point to the original
968 	 * address space within the inode.  This original address space
969 	 * contains the pointer to the resv_map.  So, always use the
970 	 * address space embedded within the inode.
971 	 * The VERY common case is inode->mapping == &inode->i_data but,
972 	 * this may not be true for device special inodes.
973 	 */
974 	return (struct resv_map *)(&inode->i_data)->private_data;
975 }
976 
977 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
978 {
979 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
980 	if (vma->vm_flags & VM_MAYSHARE) {
981 		struct address_space *mapping = vma->vm_file->f_mapping;
982 		struct inode *inode = mapping->host;
983 
984 		return inode_resv_map(inode);
985 
986 	} else {
987 		return (struct resv_map *)(get_vma_private_data(vma) &
988 							~HPAGE_RESV_MASK);
989 	}
990 }
991 
992 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
993 {
994 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
995 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
996 
997 	set_vma_private_data(vma, (get_vma_private_data(vma) &
998 				HPAGE_RESV_MASK) | (unsigned long)map);
999 }
1000 
1001 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1002 {
1003 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1004 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1005 
1006 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1007 }
1008 
1009 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1010 {
1011 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1012 
1013 	return (get_vma_private_data(vma) & flag) != 0;
1014 }
1015 
1016 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
1017 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
1018 {
1019 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1020 	if (!(vma->vm_flags & VM_MAYSHARE))
1021 		vma->vm_private_data = (void *)0;
1022 }
1023 
1024 /*
1025  * Reset and decrement one ref on hugepage private reservation.
1026  * Called with mm->mmap_sem writer semaphore held.
1027  * This function should be only used by move_vma() and operate on
1028  * same sized vma. It should never come here with last ref on the
1029  * reservation.
1030  */
1031 void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1032 {
1033 	/*
1034 	 * Clear the old hugetlb private page reservation.
1035 	 * It has already been transferred to new_vma.
1036 	 *
1037 	 * During a mremap() operation of a hugetlb vma we call move_vma()
1038 	 * which copies vma into new_vma and unmaps vma. After the copy
1039 	 * operation both new_vma and vma share a reference to the resv_map
1040 	 * struct, and at that point vma is about to be unmapped. We don't
1041 	 * want to return the reservation to the pool at unmap of vma because
1042 	 * the reservation still lives on in new_vma, so simply decrement the
1043 	 * ref here and remove the resv_map reference from this vma.
1044 	 */
1045 	struct resv_map *reservations = vma_resv_map(vma);
1046 
1047 	if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1048 		resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1049 		kref_put(&reservations->refs, resv_map_release);
1050 	}
1051 
1052 	reset_vma_resv_huge_pages(vma);
1053 }
1054 
1055 /* Returns true if the VMA has associated reserve pages */
1056 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1057 {
1058 	if (vma->vm_flags & VM_NORESERVE) {
1059 		/*
1060 		 * This address is already reserved by other process(chg == 0),
1061 		 * so, we should decrement reserved count. Without decrementing,
1062 		 * reserve count remains after releasing inode, because this
1063 		 * allocated page will go into page cache and is regarded as
1064 		 * coming from reserved pool in releasing step.  Currently, we
1065 		 * don't have any other solution to deal with this situation
1066 		 * properly, so add work-around here.
1067 		 */
1068 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1069 			return true;
1070 		else
1071 			return false;
1072 	}
1073 
1074 	/* Shared mappings always use reserves */
1075 	if (vma->vm_flags & VM_MAYSHARE) {
1076 		/*
1077 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
1078 		 * be a region map for all pages.  The only situation where
1079 		 * there is no region map is if a hole was punched via
1080 		 * fallocate.  In this case, there really are no reserves to
1081 		 * use.  This situation is indicated if chg != 0.
1082 		 */
1083 		if (chg)
1084 			return false;
1085 		else
1086 			return true;
1087 	}
1088 
1089 	/*
1090 	 * Only the process that called mmap() has reserves for
1091 	 * private mappings.
1092 	 */
1093 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1094 		/*
1095 		 * Like the shared case above, a hole punch or truncate
1096 		 * could have been performed on the private mapping.
1097 		 * Examine the value of chg to determine if reserves
1098 		 * actually exist or were previously consumed.
1099 		 * Very Subtle - The value of chg comes from a previous
1100 		 * call to vma_needs_reserves().  The reserve map for
1101 		 * private mappings has different (opposite) semantics
1102 		 * than that of shared mappings.  vma_needs_reserves()
1103 		 * has already taken this difference in semantics into
1104 		 * account.  Therefore, the meaning of chg is the same
1105 		 * as in the shared case above.  Code could easily be
1106 		 * combined, but keeping it separate draws attention to
1107 		 * subtle differences.
1108 		 */
1109 		if (chg)
1110 			return false;
1111 		else
1112 			return true;
1113 	}
1114 
1115 	return false;
1116 }
1117 
1118 static void enqueue_huge_page(struct hstate *h, struct page *page)
1119 {
1120 	int nid = page_to_nid(page);
1121 
1122 	lockdep_assert_held(&hugetlb_lock);
1123 	VM_BUG_ON_PAGE(page_count(page), page);
1124 
1125 	list_move(&page->lru, &h->hugepage_freelists[nid]);
1126 	h->free_huge_pages++;
1127 	h->free_huge_pages_node[nid]++;
1128 	SetHPageFreed(page);
1129 }
1130 
1131 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1132 {
1133 	struct page *page;
1134 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1135 
1136 	lockdep_assert_held(&hugetlb_lock);
1137 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1138 		if (pin && !is_pinnable_page(page))
1139 			continue;
1140 
1141 		if (PageHWPoison(page))
1142 			continue;
1143 
1144 		list_move(&page->lru, &h->hugepage_activelist);
1145 		set_page_refcounted(page);
1146 		ClearHPageFreed(page);
1147 		h->free_huge_pages--;
1148 		h->free_huge_pages_node[nid]--;
1149 		return page;
1150 	}
1151 
1152 	return NULL;
1153 }
1154 
1155 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1156 		nodemask_t *nmask)
1157 {
1158 	unsigned int cpuset_mems_cookie;
1159 	struct zonelist *zonelist;
1160 	struct zone *zone;
1161 	struct zoneref *z;
1162 	int node = NUMA_NO_NODE;
1163 
1164 	zonelist = node_zonelist(nid, gfp_mask);
1165 
1166 retry_cpuset:
1167 	cpuset_mems_cookie = read_mems_allowed_begin();
1168 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1169 		struct page *page;
1170 
1171 		if (!cpuset_zone_allowed(zone, gfp_mask))
1172 			continue;
1173 		/*
1174 		 * no need to ask again on the same node. Pool is node rather than
1175 		 * zone aware
1176 		 */
1177 		if (zone_to_nid(zone) == node)
1178 			continue;
1179 		node = zone_to_nid(zone);
1180 
1181 		page = dequeue_huge_page_node_exact(h, node);
1182 		if (page)
1183 			return page;
1184 	}
1185 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1186 		goto retry_cpuset;
1187 
1188 	return NULL;
1189 }
1190 
1191 static struct page *dequeue_huge_page_vma(struct hstate *h,
1192 				struct vm_area_struct *vma,
1193 				unsigned long address, int avoid_reserve,
1194 				long chg)
1195 {
1196 	struct page *page = NULL;
1197 	struct mempolicy *mpol;
1198 	gfp_t gfp_mask;
1199 	nodemask_t *nodemask;
1200 	int nid;
1201 
1202 	/*
1203 	 * A child process with MAP_PRIVATE mappings created by their parent
1204 	 * have no page reserves. This check ensures that reservations are
1205 	 * not "stolen". The child may still get SIGKILLed
1206 	 */
1207 	if (!vma_has_reserves(vma, chg) &&
1208 			h->free_huge_pages - h->resv_huge_pages == 0)
1209 		goto err;
1210 
1211 	/* If reserves cannot be used, ensure enough pages are in the pool */
1212 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
1213 		goto err;
1214 
1215 	gfp_mask = htlb_alloc_mask(h);
1216 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1217 
1218 	if (mpol_is_preferred_many(mpol)) {
1219 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1220 
1221 		/* Fallback to all nodes if page==NULL */
1222 		nodemask = NULL;
1223 	}
1224 
1225 	if (!page)
1226 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1227 
1228 	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1229 		SetHPageRestoreReserve(page);
1230 		h->resv_huge_pages--;
1231 	}
1232 
1233 	mpol_cond_put(mpol);
1234 	return page;
1235 
1236 err:
1237 	return NULL;
1238 }
1239 
1240 /*
1241  * common helper functions for hstate_next_node_to_{alloc|free}.
1242  * We may have allocated or freed a huge page based on a different
1243  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1244  * be outside of *nodes_allowed.  Ensure that we use an allowed
1245  * node for alloc or free.
1246  */
1247 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1248 {
1249 	nid = next_node_in(nid, *nodes_allowed);
1250 	VM_BUG_ON(nid >= MAX_NUMNODES);
1251 
1252 	return nid;
1253 }
1254 
1255 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1256 {
1257 	if (!node_isset(nid, *nodes_allowed))
1258 		nid = next_node_allowed(nid, nodes_allowed);
1259 	return nid;
1260 }
1261 
1262 /*
1263  * returns the previously saved node ["this node"] from which to
1264  * allocate a persistent huge page for the pool and advance the
1265  * next node from which to allocate, handling wrap at end of node
1266  * mask.
1267  */
1268 static int hstate_next_node_to_alloc(struct hstate *h,
1269 					nodemask_t *nodes_allowed)
1270 {
1271 	int nid;
1272 
1273 	VM_BUG_ON(!nodes_allowed);
1274 
1275 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1276 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1277 
1278 	return nid;
1279 }
1280 
1281 /*
1282  * helper for remove_pool_huge_page() - return the previously saved
1283  * node ["this node"] from which to free a huge page.  Advance the
1284  * next node id whether or not we find a free huge page to free so
1285  * that the next attempt to free addresses the next node.
1286  */
1287 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1288 {
1289 	int nid;
1290 
1291 	VM_BUG_ON(!nodes_allowed);
1292 
1293 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1294 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1295 
1296 	return nid;
1297 }
1298 
1299 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
1300 	for (nr_nodes = nodes_weight(*mask);				\
1301 		nr_nodes > 0 &&						\
1302 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
1303 		nr_nodes--)
1304 
1305 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
1306 	for (nr_nodes = nodes_weight(*mask);				\
1307 		nr_nodes > 0 &&						\
1308 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
1309 		nr_nodes--)
1310 
1311 /* used to demote non-gigantic_huge pages as well */
1312 static void __destroy_compound_gigantic_page(struct page *page,
1313 					unsigned int order, bool demote)
1314 {
1315 	int i;
1316 	int nr_pages = 1 << order;
1317 	struct page *p = page + 1;
1318 
1319 	atomic_set(compound_mapcount_ptr(page), 0);
1320 	atomic_set(compound_pincount_ptr(page), 0);
1321 
1322 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1323 		p->mapping = NULL;
1324 		clear_compound_head(p);
1325 		if (!demote)
1326 			set_page_refcounted(p);
1327 	}
1328 
1329 	set_compound_order(page, 0);
1330 #ifdef CONFIG_64BIT
1331 	page[1].compound_nr = 0;
1332 #endif
1333 	__ClearPageHead(page);
1334 }
1335 
1336 static void destroy_compound_hugetlb_page_for_demote(struct page *page,
1337 					unsigned int order)
1338 {
1339 	__destroy_compound_gigantic_page(page, order, true);
1340 }
1341 
1342 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1343 static void destroy_compound_gigantic_page(struct page *page,
1344 					unsigned int order)
1345 {
1346 	__destroy_compound_gigantic_page(page, order, false);
1347 }
1348 
1349 static void free_gigantic_page(struct page *page, unsigned int order)
1350 {
1351 	/*
1352 	 * If the page isn't allocated using the cma allocator,
1353 	 * cma_release() returns false.
1354 	 */
1355 #ifdef CONFIG_CMA
1356 	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1357 		return;
1358 #endif
1359 
1360 	free_contig_range(page_to_pfn(page), 1 << order);
1361 }
1362 
1363 #ifdef CONFIG_CONTIG_ALLOC
1364 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1365 		int nid, nodemask_t *nodemask)
1366 {
1367 	unsigned long nr_pages = pages_per_huge_page(h);
1368 	if (nid == NUMA_NO_NODE)
1369 		nid = numa_mem_id();
1370 
1371 #ifdef CONFIG_CMA
1372 	{
1373 		struct page *page;
1374 		int node;
1375 
1376 		if (hugetlb_cma[nid]) {
1377 			page = cma_alloc(hugetlb_cma[nid], nr_pages,
1378 					huge_page_order(h), true);
1379 			if (page)
1380 				return page;
1381 		}
1382 
1383 		if (!(gfp_mask & __GFP_THISNODE)) {
1384 			for_each_node_mask(node, *nodemask) {
1385 				if (node == nid || !hugetlb_cma[node])
1386 					continue;
1387 
1388 				page = cma_alloc(hugetlb_cma[node], nr_pages,
1389 						huge_page_order(h), true);
1390 				if (page)
1391 					return page;
1392 			}
1393 		}
1394 	}
1395 #endif
1396 
1397 	return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1398 }
1399 
1400 #else /* !CONFIG_CONTIG_ALLOC */
1401 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1402 					int nid, nodemask_t *nodemask)
1403 {
1404 	return NULL;
1405 }
1406 #endif /* CONFIG_CONTIG_ALLOC */
1407 
1408 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1409 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1410 					int nid, nodemask_t *nodemask)
1411 {
1412 	return NULL;
1413 }
1414 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1415 static inline void destroy_compound_gigantic_page(struct page *page,
1416 						unsigned int order) { }
1417 #endif
1418 
1419 /*
1420  * Remove hugetlb page from lists, and update dtor so that page appears
1421  * as just a compound page.
1422  *
1423  * A reference is held on the page, except in the case of demote.
1424  *
1425  * Must be called with hugetlb lock held.
1426  */
1427 static void __remove_hugetlb_page(struct hstate *h, struct page *page,
1428 							bool adjust_surplus,
1429 							bool demote)
1430 {
1431 	int nid = page_to_nid(page);
1432 
1433 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1434 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
1435 
1436 	lockdep_assert_held(&hugetlb_lock);
1437 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1438 		return;
1439 
1440 	list_del(&page->lru);
1441 
1442 	if (HPageFreed(page)) {
1443 		h->free_huge_pages--;
1444 		h->free_huge_pages_node[nid]--;
1445 	}
1446 	if (adjust_surplus) {
1447 		h->surplus_huge_pages--;
1448 		h->surplus_huge_pages_node[nid]--;
1449 	}
1450 
1451 	/*
1452 	 * Very subtle
1453 	 *
1454 	 * For non-gigantic pages set the destructor to the normal compound
1455 	 * page dtor.  This is needed in case someone takes an additional
1456 	 * temporary ref to the page, and freeing is delayed until they drop
1457 	 * their reference.
1458 	 *
1459 	 * For gigantic pages set the destructor to the null dtor.  This
1460 	 * destructor will never be called.  Before freeing the gigantic
1461 	 * page destroy_compound_gigantic_page will turn the compound page
1462 	 * into a simple group of pages.  After this the destructor does not
1463 	 * apply.
1464 	 *
1465 	 * This handles the case where more than one ref is held when and
1466 	 * after update_and_free_page is called.
1467 	 *
1468 	 * In the case of demote we do not ref count the page as it will soon
1469 	 * be turned into a page of smaller size.
1470 	 */
1471 	if (!demote)
1472 		set_page_refcounted(page);
1473 	if (hstate_is_gigantic(h))
1474 		set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1475 	else
1476 		set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
1477 
1478 	h->nr_huge_pages--;
1479 	h->nr_huge_pages_node[nid]--;
1480 }
1481 
1482 static void remove_hugetlb_page(struct hstate *h, struct page *page,
1483 							bool adjust_surplus)
1484 {
1485 	__remove_hugetlb_page(h, page, adjust_surplus, false);
1486 }
1487 
1488 static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page,
1489 							bool adjust_surplus)
1490 {
1491 	__remove_hugetlb_page(h, page, adjust_surplus, true);
1492 }
1493 
1494 static void add_hugetlb_page(struct hstate *h, struct page *page,
1495 			     bool adjust_surplus)
1496 {
1497 	int zeroed;
1498 	int nid = page_to_nid(page);
1499 
1500 	VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page), page);
1501 
1502 	lockdep_assert_held(&hugetlb_lock);
1503 
1504 	INIT_LIST_HEAD(&page->lru);
1505 	h->nr_huge_pages++;
1506 	h->nr_huge_pages_node[nid]++;
1507 
1508 	if (adjust_surplus) {
1509 		h->surplus_huge_pages++;
1510 		h->surplus_huge_pages_node[nid]++;
1511 	}
1512 
1513 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1514 	set_page_private(page, 0);
1515 	SetHPageVmemmapOptimized(page);
1516 
1517 	/*
1518 	 * This page is about to be managed by the hugetlb allocator and
1519 	 * should have no users.  Drop our reference, and check for others
1520 	 * just in case.
1521 	 */
1522 	zeroed = put_page_testzero(page);
1523 	if (!zeroed)
1524 		/*
1525 		 * It is VERY unlikely soneone else has taken a ref on
1526 		 * the page.  In this case, we simply return as the
1527 		 * hugetlb destructor (free_huge_page) will be called
1528 		 * when this other ref is dropped.
1529 		 */
1530 		return;
1531 
1532 	arch_clear_hugepage_flags(page);
1533 	enqueue_huge_page(h, page);
1534 }
1535 
1536 static void __update_and_free_page(struct hstate *h, struct page *page)
1537 {
1538 	int i;
1539 	struct page *subpage = page;
1540 
1541 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1542 		return;
1543 
1544 	if (hugetlb_vmemmap_alloc(h, page)) {
1545 		spin_lock_irq(&hugetlb_lock);
1546 		/*
1547 		 * If we cannot allocate vmemmap pages, just refuse to free the
1548 		 * page and put the page back on the hugetlb free list and treat
1549 		 * as a surplus page.
1550 		 */
1551 		add_hugetlb_page(h, page, true);
1552 		spin_unlock_irq(&hugetlb_lock);
1553 		return;
1554 	}
1555 
1556 	for (i = 0; i < pages_per_huge_page(h);
1557 	     i++, subpage = mem_map_next(subpage, page, i)) {
1558 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1559 				1 << PG_referenced | 1 << PG_dirty |
1560 				1 << PG_active | 1 << PG_private |
1561 				1 << PG_writeback);
1562 	}
1563 
1564 	/*
1565 	 * Non-gigantic pages demoted from CMA allocated gigantic pages
1566 	 * need to be given back to CMA in free_gigantic_page.
1567 	 */
1568 	if (hstate_is_gigantic(h) ||
1569 	    hugetlb_cma_page(page, huge_page_order(h))) {
1570 		destroy_compound_gigantic_page(page, huge_page_order(h));
1571 		free_gigantic_page(page, huge_page_order(h));
1572 	} else {
1573 		__free_pages(page, huge_page_order(h));
1574 	}
1575 }
1576 
1577 /*
1578  * As update_and_free_page() can be called under any context, so we cannot
1579  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1580  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1581  * the vmemmap pages.
1582  *
1583  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1584  * freed and frees them one-by-one. As the page->mapping pointer is going
1585  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1586  * structure of a lockless linked list of huge pages to be freed.
1587  */
1588 static LLIST_HEAD(hpage_freelist);
1589 
1590 static void free_hpage_workfn(struct work_struct *work)
1591 {
1592 	struct llist_node *node;
1593 
1594 	node = llist_del_all(&hpage_freelist);
1595 
1596 	while (node) {
1597 		struct page *page;
1598 		struct hstate *h;
1599 
1600 		page = container_of((struct address_space **)node,
1601 				     struct page, mapping);
1602 		node = node->next;
1603 		page->mapping = NULL;
1604 		/*
1605 		 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
1606 		 * is going to trigger because a previous call to
1607 		 * remove_hugetlb_page() will set_compound_page_dtor(page,
1608 		 * NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
1609 		 */
1610 		h = size_to_hstate(page_size(page));
1611 
1612 		__update_and_free_page(h, page);
1613 
1614 		cond_resched();
1615 	}
1616 }
1617 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1618 
1619 static inline void flush_free_hpage_work(struct hstate *h)
1620 {
1621 	if (hugetlb_optimize_vmemmap_pages(h))
1622 		flush_work(&free_hpage_work);
1623 }
1624 
1625 static void update_and_free_page(struct hstate *h, struct page *page,
1626 				 bool atomic)
1627 {
1628 	if (!HPageVmemmapOptimized(page) || !atomic) {
1629 		__update_and_free_page(h, page);
1630 		return;
1631 	}
1632 
1633 	/*
1634 	 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1635 	 *
1636 	 * Only call schedule_work() if hpage_freelist is previously
1637 	 * empty. Otherwise, schedule_work() had been called but the workfn
1638 	 * hasn't retrieved the list yet.
1639 	 */
1640 	if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist))
1641 		schedule_work(&free_hpage_work);
1642 }
1643 
1644 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
1645 {
1646 	struct page *page, *t_page;
1647 
1648 	list_for_each_entry_safe(page, t_page, list, lru) {
1649 		update_and_free_page(h, page, false);
1650 		cond_resched();
1651 	}
1652 }
1653 
1654 struct hstate *size_to_hstate(unsigned long size)
1655 {
1656 	struct hstate *h;
1657 
1658 	for_each_hstate(h) {
1659 		if (huge_page_size(h) == size)
1660 			return h;
1661 	}
1662 	return NULL;
1663 }
1664 
1665 void free_huge_page(struct page *page)
1666 {
1667 	/*
1668 	 * Can't pass hstate in here because it is called from the
1669 	 * compound page destructor.
1670 	 */
1671 	struct hstate *h = page_hstate(page);
1672 	int nid = page_to_nid(page);
1673 	struct hugepage_subpool *spool = hugetlb_page_subpool(page);
1674 	bool restore_reserve;
1675 	unsigned long flags;
1676 
1677 	VM_BUG_ON_PAGE(page_count(page), page);
1678 	VM_BUG_ON_PAGE(page_mapcount(page), page);
1679 
1680 	hugetlb_set_page_subpool(page, NULL);
1681 	if (PageAnon(page))
1682 		__ClearPageAnonExclusive(page);
1683 	page->mapping = NULL;
1684 	restore_reserve = HPageRestoreReserve(page);
1685 	ClearHPageRestoreReserve(page);
1686 
1687 	/*
1688 	 * If HPageRestoreReserve was set on page, page allocation consumed a
1689 	 * reservation.  If the page was associated with a subpool, there
1690 	 * would have been a page reserved in the subpool before allocation
1691 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1692 	 * reservation, do not call hugepage_subpool_put_pages() as this will
1693 	 * remove the reserved page from the subpool.
1694 	 */
1695 	if (!restore_reserve) {
1696 		/*
1697 		 * A return code of zero implies that the subpool will be
1698 		 * under its minimum size if the reservation is not restored
1699 		 * after page is free.  Therefore, force restore_reserve
1700 		 * operation.
1701 		 */
1702 		if (hugepage_subpool_put_pages(spool, 1) == 0)
1703 			restore_reserve = true;
1704 	}
1705 
1706 	spin_lock_irqsave(&hugetlb_lock, flags);
1707 	ClearHPageMigratable(page);
1708 	hugetlb_cgroup_uncharge_page(hstate_index(h),
1709 				     pages_per_huge_page(h), page);
1710 	hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1711 					  pages_per_huge_page(h), page);
1712 	if (restore_reserve)
1713 		h->resv_huge_pages++;
1714 
1715 	if (HPageTemporary(page)) {
1716 		remove_hugetlb_page(h, page, false);
1717 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1718 		update_and_free_page(h, page, true);
1719 	} else if (h->surplus_huge_pages_node[nid]) {
1720 		/* remove the page from active list */
1721 		remove_hugetlb_page(h, page, true);
1722 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1723 		update_and_free_page(h, page, true);
1724 	} else {
1725 		arch_clear_hugepage_flags(page);
1726 		enqueue_huge_page(h, page);
1727 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1728 	}
1729 }
1730 
1731 /*
1732  * Must be called with the hugetlb lock held
1733  */
1734 static void __prep_account_new_huge_page(struct hstate *h, int nid)
1735 {
1736 	lockdep_assert_held(&hugetlb_lock);
1737 	h->nr_huge_pages++;
1738 	h->nr_huge_pages_node[nid]++;
1739 }
1740 
1741 static void __prep_new_huge_page(struct hstate *h, struct page *page)
1742 {
1743 	hugetlb_vmemmap_free(h, page);
1744 	INIT_LIST_HEAD(&page->lru);
1745 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1746 	hugetlb_set_page_subpool(page, NULL);
1747 	set_hugetlb_cgroup(page, NULL);
1748 	set_hugetlb_cgroup_rsvd(page, NULL);
1749 }
1750 
1751 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1752 {
1753 	__prep_new_huge_page(h, page);
1754 	spin_lock_irq(&hugetlb_lock);
1755 	__prep_account_new_huge_page(h, nid);
1756 	spin_unlock_irq(&hugetlb_lock);
1757 }
1758 
1759 static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
1760 								bool demote)
1761 {
1762 	int i, j;
1763 	int nr_pages = 1 << order;
1764 	struct page *p = page + 1;
1765 
1766 	/* we rely on prep_new_huge_page to set the destructor */
1767 	set_compound_order(page, order);
1768 	__ClearPageReserved(page);
1769 	__SetPageHead(page);
1770 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1771 		/*
1772 		 * For gigantic hugepages allocated through bootmem at
1773 		 * boot, it's safer to be consistent with the not-gigantic
1774 		 * hugepages and clear the PG_reserved bit from all tail pages
1775 		 * too.  Otherwise drivers using get_user_pages() to access tail
1776 		 * pages may get the reference counting wrong if they see
1777 		 * PG_reserved set on a tail page (despite the head page not
1778 		 * having PG_reserved set).  Enforcing this consistency between
1779 		 * head and tail pages allows drivers to optimize away a check
1780 		 * on the head page when they need know if put_page() is needed
1781 		 * after get_user_pages().
1782 		 */
1783 		__ClearPageReserved(p);
1784 		/*
1785 		 * Subtle and very unlikely
1786 		 *
1787 		 * Gigantic 'page allocators' such as memblock or cma will
1788 		 * return a set of pages with each page ref counted.  We need
1789 		 * to turn this set of pages into a compound page with tail
1790 		 * page ref counts set to zero.  Code such as speculative page
1791 		 * cache adding could take a ref on a 'to be' tail page.
1792 		 * We need to respect any increased ref count, and only set
1793 		 * the ref count to zero if count is currently 1.  If count
1794 		 * is not 1, we return an error.  An error return indicates
1795 		 * the set of pages can not be converted to a gigantic page.
1796 		 * The caller who allocated the pages should then discard the
1797 		 * pages using the appropriate free interface.
1798 		 *
1799 		 * In the case of demote, the ref count will be zero.
1800 		 */
1801 		if (!demote) {
1802 			if (!page_ref_freeze(p, 1)) {
1803 				pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
1804 				goto out_error;
1805 			}
1806 		} else {
1807 			VM_BUG_ON_PAGE(page_count(p), p);
1808 		}
1809 		set_compound_head(p, page);
1810 	}
1811 	atomic_set(compound_mapcount_ptr(page), -1);
1812 	atomic_set(compound_pincount_ptr(page), 0);
1813 	return true;
1814 
1815 out_error:
1816 	/* undo tail page modifications made above */
1817 	p = page + 1;
1818 	for (j = 1; j < i; j++, p = mem_map_next(p, page, j)) {
1819 		clear_compound_head(p);
1820 		set_page_refcounted(p);
1821 	}
1822 	/* need to clear PG_reserved on remaining tail pages  */
1823 	for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
1824 		__ClearPageReserved(p);
1825 	set_compound_order(page, 0);
1826 #ifdef CONFIG_64BIT
1827 	page[1].compound_nr = 0;
1828 #endif
1829 	__ClearPageHead(page);
1830 	return false;
1831 }
1832 
1833 static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
1834 {
1835 	return __prep_compound_gigantic_page(page, order, false);
1836 }
1837 
1838 static bool prep_compound_gigantic_page_for_demote(struct page *page,
1839 							unsigned int order)
1840 {
1841 	return __prep_compound_gigantic_page(page, order, true);
1842 }
1843 
1844 /*
1845  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1846  * transparent huge pages.  See the PageTransHuge() documentation for more
1847  * details.
1848  */
1849 int PageHuge(struct page *page)
1850 {
1851 	if (!PageCompound(page))
1852 		return 0;
1853 
1854 	page = compound_head(page);
1855 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1856 }
1857 EXPORT_SYMBOL_GPL(PageHuge);
1858 
1859 /*
1860  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1861  * normal or transparent huge pages.
1862  */
1863 int PageHeadHuge(struct page *page_head)
1864 {
1865 	if (!PageHead(page_head))
1866 		return 0;
1867 
1868 	return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
1869 }
1870 EXPORT_SYMBOL_GPL(PageHeadHuge);
1871 
1872 /*
1873  * Find and lock address space (mapping) in write mode.
1874  *
1875  * Upon entry, the page is locked which means that page_mapping() is
1876  * stable.  Due to locking order, we can only trylock_write.  If we can
1877  * not get the lock, simply return NULL to caller.
1878  */
1879 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1880 {
1881 	struct address_space *mapping = page_mapping(hpage);
1882 
1883 	if (!mapping)
1884 		return mapping;
1885 
1886 	if (i_mmap_trylock_write(mapping))
1887 		return mapping;
1888 
1889 	return NULL;
1890 }
1891 
1892 pgoff_t hugetlb_basepage_index(struct page *page)
1893 {
1894 	struct page *page_head = compound_head(page);
1895 	pgoff_t index = page_index(page_head);
1896 	unsigned long compound_idx;
1897 
1898 	if (compound_order(page_head) >= MAX_ORDER)
1899 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1900 	else
1901 		compound_idx = page - page_head;
1902 
1903 	return (index << compound_order(page_head)) + compound_idx;
1904 }
1905 
1906 static struct page *alloc_buddy_huge_page(struct hstate *h,
1907 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1908 		nodemask_t *node_alloc_noretry)
1909 {
1910 	int order = huge_page_order(h);
1911 	struct page *page;
1912 	bool alloc_try_hard = true;
1913 
1914 	/*
1915 	 * By default we always try hard to allocate the page with
1916 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1917 	 * a loop (to adjust global huge page counts) and previous allocation
1918 	 * failed, do not continue to try hard on the same node.  Use the
1919 	 * node_alloc_noretry bitmap to manage this state information.
1920 	 */
1921 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1922 		alloc_try_hard = false;
1923 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1924 	if (alloc_try_hard)
1925 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1926 	if (nid == NUMA_NO_NODE)
1927 		nid = numa_mem_id();
1928 	page = __alloc_pages(gfp_mask, order, nid, nmask);
1929 	if (page)
1930 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1931 	else
1932 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1933 
1934 	/*
1935 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1936 	 * indicates an overall state change.  Clear bit so that we resume
1937 	 * normal 'try hard' allocations.
1938 	 */
1939 	if (node_alloc_noretry && page && !alloc_try_hard)
1940 		node_clear(nid, *node_alloc_noretry);
1941 
1942 	/*
1943 	 * If we tried hard to get a page but failed, set bit so that
1944 	 * subsequent attempts will not try as hard until there is an
1945 	 * overall state change.
1946 	 */
1947 	if (node_alloc_noretry && !page && alloc_try_hard)
1948 		node_set(nid, *node_alloc_noretry);
1949 
1950 	return page;
1951 }
1952 
1953 /*
1954  * Common helper to allocate a fresh hugetlb page. All specific allocators
1955  * should use this function to get new hugetlb pages
1956  */
1957 static struct page *alloc_fresh_huge_page(struct hstate *h,
1958 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1959 		nodemask_t *node_alloc_noretry)
1960 {
1961 	struct page *page;
1962 	bool retry = false;
1963 
1964 retry:
1965 	if (hstate_is_gigantic(h))
1966 		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1967 	else
1968 		page = alloc_buddy_huge_page(h, gfp_mask,
1969 				nid, nmask, node_alloc_noretry);
1970 	if (!page)
1971 		return NULL;
1972 
1973 	if (hstate_is_gigantic(h)) {
1974 		if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
1975 			/*
1976 			 * Rare failure to convert pages to compound page.
1977 			 * Free pages and try again - ONCE!
1978 			 */
1979 			free_gigantic_page(page, huge_page_order(h));
1980 			if (!retry) {
1981 				retry = true;
1982 				goto retry;
1983 			}
1984 			return NULL;
1985 		}
1986 	}
1987 	prep_new_huge_page(h, page, page_to_nid(page));
1988 
1989 	return page;
1990 }
1991 
1992 /*
1993  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1994  * manner.
1995  */
1996 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1997 				nodemask_t *node_alloc_noretry)
1998 {
1999 	struct page *page;
2000 	int nr_nodes, node;
2001 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2002 
2003 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2004 		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
2005 						node_alloc_noretry);
2006 		if (page)
2007 			break;
2008 	}
2009 
2010 	if (!page)
2011 		return 0;
2012 
2013 	put_page(page); /* free it into the hugepage allocator */
2014 
2015 	return 1;
2016 }
2017 
2018 /*
2019  * Remove huge page from pool from next node to free.  Attempt to keep
2020  * persistent huge pages more or less balanced over allowed nodes.
2021  * This routine only 'removes' the hugetlb page.  The caller must make
2022  * an additional call to free the page to low level allocators.
2023  * Called with hugetlb_lock locked.
2024  */
2025 static struct page *remove_pool_huge_page(struct hstate *h,
2026 						nodemask_t *nodes_allowed,
2027 						 bool acct_surplus)
2028 {
2029 	int nr_nodes, node;
2030 	struct page *page = NULL;
2031 
2032 	lockdep_assert_held(&hugetlb_lock);
2033 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2034 		/*
2035 		 * If we're returning unused surplus pages, only examine
2036 		 * nodes with surplus pages.
2037 		 */
2038 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2039 		    !list_empty(&h->hugepage_freelists[node])) {
2040 			page = list_entry(h->hugepage_freelists[node].next,
2041 					  struct page, lru);
2042 			remove_hugetlb_page(h, page, acct_surplus);
2043 			break;
2044 		}
2045 	}
2046 
2047 	return page;
2048 }
2049 
2050 /*
2051  * Dissolve a given free hugepage into free buddy pages. This function does
2052  * nothing for in-use hugepages and non-hugepages.
2053  * This function returns values like below:
2054  *
2055  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2056  *           when the system is under memory pressure and the feature of
2057  *           freeing unused vmemmap pages associated with each hugetlb page
2058  *           is enabled.
2059  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2060  *           (allocated or reserved.)
2061  *       0:  successfully dissolved free hugepages or the page is not a
2062  *           hugepage (considered as already dissolved)
2063  */
2064 int dissolve_free_huge_page(struct page *page)
2065 {
2066 	int rc = -EBUSY;
2067 
2068 retry:
2069 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2070 	if (!PageHuge(page))
2071 		return 0;
2072 
2073 	spin_lock_irq(&hugetlb_lock);
2074 	if (!PageHuge(page)) {
2075 		rc = 0;
2076 		goto out;
2077 	}
2078 
2079 	if (!page_count(page)) {
2080 		struct page *head = compound_head(page);
2081 		struct hstate *h = page_hstate(head);
2082 		if (h->free_huge_pages - h->resv_huge_pages == 0)
2083 			goto out;
2084 
2085 		/*
2086 		 * We should make sure that the page is already on the free list
2087 		 * when it is dissolved.
2088 		 */
2089 		if (unlikely(!HPageFreed(head))) {
2090 			spin_unlock_irq(&hugetlb_lock);
2091 			cond_resched();
2092 
2093 			/*
2094 			 * Theoretically, we should return -EBUSY when we
2095 			 * encounter this race. In fact, we have a chance
2096 			 * to successfully dissolve the page if we do a
2097 			 * retry. Because the race window is quite small.
2098 			 * If we seize this opportunity, it is an optimization
2099 			 * for increasing the success rate of dissolving page.
2100 			 */
2101 			goto retry;
2102 		}
2103 
2104 		remove_hugetlb_page(h, head, false);
2105 		h->max_huge_pages--;
2106 		spin_unlock_irq(&hugetlb_lock);
2107 
2108 		/*
2109 		 * Normally update_and_free_page will allocate required vmemmmap
2110 		 * before freeing the page.  update_and_free_page will fail to
2111 		 * free the page if it can not allocate required vmemmap.  We
2112 		 * need to adjust max_huge_pages if the page is not freed.
2113 		 * Attempt to allocate vmemmmap here so that we can take
2114 		 * appropriate action on failure.
2115 		 */
2116 		rc = hugetlb_vmemmap_alloc(h, head);
2117 		if (!rc) {
2118 			/*
2119 			 * Move PageHWPoison flag from head page to the raw
2120 			 * error page, which makes any subpages rather than
2121 			 * the error page reusable.
2122 			 */
2123 			if (PageHWPoison(head) && page != head) {
2124 				SetPageHWPoison(page);
2125 				ClearPageHWPoison(head);
2126 			}
2127 			update_and_free_page(h, head, false);
2128 		} else {
2129 			spin_lock_irq(&hugetlb_lock);
2130 			add_hugetlb_page(h, head, false);
2131 			h->max_huge_pages++;
2132 			spin_unlock_irq(&hugetlb_lock);
2133 		}
2134 
2135 		return rc;
2136 	}
2137 out:
2138 	spin_unlock_irq(&hugetlb_lock);
2139 	return rc;
2140 }
2141 
2142 /*
2143  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2144  * make specified memory blocks removable from the system.
2145  * Note that this will dissolve a free gigantic hugepage completely, if any
2146  * part of it lies within the given range.
2147  * Also note that if dissolve_free_huge_page() returns with an error, all
2148  * free hugepages that were dissolved before that error are lost.
2149  */
2150 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2151 {
2152 	unsigned long pfn;
2153 	struct page *page;
2154 	int rc = 0;
2155 
2156 	if (!hugepages_supported())
2157 		return rc;
2158 
2159 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
2160 		page = pfn_to_page(pfn);
2161 		rc = dissolve_free_huge_page(page);
2162 		if (rc)
2163 			break;
2164 	}
2165 
2166 	return rc;
2167 }
2168 
2169 /*
2170  * Allocates a fresh surplus page from the page allocator.
2171  */
2172 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
2173 		int nid, nodemask_t *nmask, bool zero_ref)
2174 {
2175 	struct page *page = NULL;
2176 	bool retry = false;
2177 
2178 	if (hstate_is_gigantic(h))
2179 		return NULL;
2180 
2181 	spin_lock_irq(&hugetlb_lock);
2182 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2183 		goto out_unlock;
2184 	spin_unlock_irq(&hugetlb_lock);
2185 
2186 retry:
2187 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
2188 	if (!page)
2189 		return NULL;
2190 
2191 	spin_lock_irq(&hugetlb_lock);
2192 	/*
2193 	 * We could have raced with the pool size change.
2194 	 * Double check that and simply deallocate the new page
2195 	 * if we would end up overcommiting the surpluses. Abuse
2196 	 * temporary page to workaround the nasty free_huge_page
2197 	 * codeflow
2198 	 */
2199 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2200 		SetHPageTemporary(page);
2201 		spin_unlock_irq(&hugetlb_lock);
2202 		put_page(page);
2203 		return NULL;
2204 	}
2205 
2206 	if (zero_ref) {
2207 		/*
2208 		 * Caller requires a page with zero ref count.
2209 		 * We will drop ref count here.  If someone else is holding
2210 		 * a ref, the page will be freed when they drop it.  Abuse
2211 		 * temporary page flag to accomplish this.
2212 		 */
2213 		SetHPageTemporary(page);
2214 		if (!put_page_testzero(page)) {
2215 			/*
2216 			 * Unexpected inflated ref count on freshly allocated
2217 			 * huge.  Retry once.
2218 			 */
2219 			pr_info("HugeTLB unexpected inflated ref count on freshly allocated page\n");
2220 			spin_unlock_irq(&hugetlb_lock);
2221 			if (retry)
2222 				return NULL;
2223 
2224 			retry = true;
2225 			goto retry;
2226 		}
2227 		ClearHPageTemporary(page);
2228 	}
2229 
2230 	h->surplus_huge_pages++;
2231 	h->surplus_huge_pages_node[page_to_nid(page)]++;
2232 
2233 out_unlock:
2234 	spin_unlock_irq(&hugetlb_lock);
2235 
2236 	return page;
2237 }
2238 
2239 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
2240 				     int nid, nodemask_t *nmask)
2241 {
2242 	struct page *page;
2243 
2244 	if (hstate_is_gigantic(h))
2245 		return NULL;
2246 
2247 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
2248 	if (!page)
2249 		return NULL;
2250 
2251 	/*
2252 	 * We do not account these pages as surplus because they are only
2253 	 * temporary and will be released properly on the last reference
2254 	 */
2255 	SetHPageTemporary(page);
2256 
2257 	return page;
2258 }
2259 
2260 /*
2261  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2262  */
2263 static
2264 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
2265 		struct vm_area_struct *vma, unsigned long addr)
2266 {
2267 	struct page *page = NULL;
2268 	struct mempolicy *mpol;
2269 	gfp_t gfp_mask = htlb_alloc_mask(h);
2270 	int nid;
2271 	nodemask_t *nodemask;
2272 
2273 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2274 	if (mpol_is_preferred_many(mpol)) {
2275 		gfp_t gfp = gfp_mask | __GFP_NOWARN;
2276 
2277 		gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2278 		page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false);
2279 
2280 		/* Fallback to all nodes if page==NULL */
2281 		nodemask = NULL;
2282 	}
2283 
2284 	if (!page)
2285 		page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
2286 	mpol_cond_put(mpol);
2287 	return page;
2288 }
2289 
2290 /* page migration callback function */
2291 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
2292 		nodemask_t *nmask, gfp_t gfp_mask)
2293 {
2294 	spin_lock_irq(&hugetlb_lock);
2295 	if (h->free_huge_pages - h->resv_huge_pages > 0) {
2296 		struct page *page;
2297 
2298 		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
2299 		if (page) {
2300 			spin_unlock_irq(&hugetlb_lock);
2301 			return page;
2302 		}
2303 	}
2304 	spin_unlock_irq(&hugetlb_lock);
2305 
2306 	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
2307 }
2308 
2309 /* mempolicy aware migration callback */
2310 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
2311 		unsigned long address)
2312 {
2313 	struct mempolicy *mpol;
2314 	nodemask_t *nodemask;
2315 	struct page *page;
2316 	gfp_t gfp_mask;
2317 	int node;
2318 
2319 	gfp_mask = htlb_alloc_mask(h);
2320 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2321 	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
2322 	mpol_cond_put(mpol);
2323 
2324 	return page;
2325 }
2326 
2327 /*
2328  * Increase the hugetlb pool such that it can accommodate a reservation
2329  * of size 'delta'.
2330  */
2331 static int gather_surplus_pages(struct hstate *h, long delta)
2332 	__must_hold(&hugetlb_lock)
2333 {
2334 	struct list_head surplus_list;
2335 	struct page *page, *tmp;
2336 	int ret;
2337 	long i;
2338 	long needed, allocated;
2339 	bool alloc_ok = true;
2340 
2341 	lockdep_assert_held(&hugetlb_lock);
2342 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2343 	if (needed <= 0) {
2344 		h->resv_huge_pages += delta;
2345 		return 0;
2346 	}
2347 
2348 	allocated = 0;
2349 	INIT_LIST_HEAD(&surplus_list);
2350 
2351 	ret = -ENOMEM;
2352 retry:
2353 	spin_unlock_irq(&hugetlb_lock);
2354 	for (i = 0; i < needed; i++) {
2355 		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
2356 				NUMA_NO_NODE, NULL, true);
2357 		if (!page) {
2358 			alloc_ok = false;
2359 			break;
2360 		}
2361 		list_add(&page->lru, &surplus_list);
2362 		cond_resched();
2363 	}
2364 	allocated += i;
2365 
2366 	/*
2367 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
2368 	 * because either resv_huge_pages or free_huge_pages may have changed.
2369 	 */
2370 	spin_lock_irq(&hugetlb_lock);
2371 	needed = (h->resv_huge_pages + delta) -
2372 			(h->free_huge_pages + allocated);
2373 	if (needed > 0) {
2374 		if (alloc_ok)
2375 			goto retry;
2376 		/*
2377 		 * We were not able to allocate enough pages to
2378 		 * satisfy the entire reservation so we free what
2379 		 * we've allocated so far.
2380 		 */
2381 		goto free;
2382 	}
2383 	/*
2384 	 * The surplus_list now contains _at_least_ the number of extra pages
2385 	 * needed to accommodate the reservation.  Add the appropriate number
2386 	 * of pages to the hugetlb pool and free the extras back to the buddy
2387 	 * allocator.  Commit the entire reservation here to prevent another
2388 	 * process from stealing the pages as they are added to the pool but
2389 	 * before they are reserved.
2390 	 */
2391 	needed += allocated;
2392 	h->resv_huge_pages += delta;
2393 	ret = 0;
2394 
2395 	/* Free the needed pages to the hugetlb pool */
2396 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2397 		if ((--needed) < 0)
2398 			break;
2399 		/* Add the page to the hugetlb allocator */
2400 		enqueue_huge_page(h, page);
2401 	}
2402 free:
2403 	spin_unlock_irq(&hugetlb_lock);
2404 
2405 	/*
2406 	 * Free unnecessary surplus pages to the buddy allocator.
2407 	 * Pages have no ref count, call free_huge_page directly.
2408 	 */
2409 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2410 		free_huge_page(page);
2411 	spin_lock_irq(&hugetlb_lock);
2412 
2413 	return ret;
2414 }
2415 
2416 /*
2417  * This routine has two main purposes:
2418  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2419  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2420  *    to the associated reservation map.
2421  * 2) Free any unused surplus pages that may have been allocated to satisfy
2422  *    the reservation.  As many as unused_resv_pages may be freed.
2423  */
2424 static void return_unused_surplus_pages(struct hstate *h,
2425 					unsigned long unused_resv_pages)
2426 {
2427 	unsigned long nr_pages;
2428 	struct page *page;
2429 	LIST_HEAD(page_list);
2430 
2431 	lockdep_assert_held(&hugetlb_lock);
2432 	/* Uncommit the reservation */
2433 	h->resv_huge_pages -= unused_resv_pages;
2434 
2435 	/* Cannot return gigantic pages currently */
2436 	if (hstate_is_gigantic(h))
2437 		goto out;
2438 
2439 	/*
2440 	 * Part (or even all) of the reservation could have been backed
2441 	 * by pre-allocated pages. Only free surplus pages.
2442 	 */
2443 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2444 
2445 	/*
2446 	 * We want to release as many surplus pages as possible, spread
2447 	 * evenly across all nodes with memory. Iterate across these nodes
2448 	 * until we can no longer free unreserved surplus pages. This occurs
2449 	 * when the nodes with surplus pages have no free pages.
2450 	 * remove_pool_huge_page() will balance the freed pages across the
2451 	 * on-line nodes with memory and will handle the hstate accounting.
2452 	 */
2453 	while (nr_pages--) {
2454 		page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
2455 		if (!page)
2456 			goto out;
2457 
2458 		list_add(&page->lru, &page_list);
2459 	}
2460 
2461 out:
2462 	spin_unlock_irq(&hugetlb_lock);
2463 	update_and_free_pages_bulk(h, &page_list);
2464 	spin_lock_irq(&hugetlb_lock);
2465 }
2466 
2467 
2468 /*
2469  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2470  * are used by the huge page allocation routines to manage reservations.
2471  *
2472  * vma_needs_reservation is called to determine if the huge page at addr
2473  * within the vma has an associated reservation.  If a reservation is
2474  * needed, the value 1 is returned.  The caller is then responsible for
2475  * managing the global reservation and subpool usage counts.  After
2476  * the huge page has been allocated, vma_commit_reservation is called
2477  * to add the page to the reservation map.  If the page allocation fails,
2478  * the reservation must be ended instead of committed.  vma_end_reservation
2479  * is called in such cases.
2480  *
2481  * In the normal case, vma_commit_reservation returns the same value
2482  * as the preceding vma_needs_reservation call.  The only time this
2483  * is not the case is if a reserve map was changed between calls.  It
2484  * is the responsibility of the caller to notice the difference and
2485  * take appropriate action.
2486  *
2487  * vma_add_reservation is used in error paths where a reservation must
2488  * be restored when a newly allocated huge page must be freed.  It is
2489  * to be called after calling vma_needs_reservation to determine if a
2490  * reservation exists.
2491  *
2492  * vma_del_reservation is used in error paths where an entry in the reserve
2493  * map was created during huge page allocation and must be removed.  It is to
2494  * be called after calling vma_needs_reservation to determine if a reservation
2495  * exists.
2496  */
2497 enum vma_resv_mode {
2498 	VMA_NEEDS_RESV,
2499 	VMA_COMMIT_RESV,
2500 	VMA_END_RESV,
2501 	VMA_ADD_RESV,
2502 	VMA_DEL_RESV,
2503 };
2504 static long __vma_reservation_common(struct hstate *h,
2505 				struct vm_area_struct *vma, unsigned long addr,
2506 				enum vma_resv_mode mode)
2507 {
2508 	struct resv_map *resv;
2509 	pgoff_t idx;
2510 	long ret;
2511 	long dummy_out_regions_needed;
2512 
2513 	resv = vma_resv_map(vma);
2514 	if (!resv)
2515 		return 1;
2516 
2517 	idx = vma_hugecache_offset(h, vma, addr);
2518 	switch (mode) {
2519 	case VMA_NEEDS_RESV:
2520 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2521 		/* We assume that vma_reservation_* routines always operate on
2522 		 * 1 page, and that adding to resv map a 1 page entry can only
2523 		 * ever require 1 region.
2524 		 */
2525 		VM_BUG_ON(dummy_out_regions_needed != 1);
2526 		break;
2527 	case VMA_COMMIT_RESV:
2528 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2529 		/* region_add calls of range 1 should never fail. */
2530 		VM_BUG_ON(ret < 0);
2531 		break;
2532 	case VMA_END_RESV:
2533 		region_abort(resv, idx, idx + 1, 1);
2534 		ret = 0;
2535 		break;
2536 	case VMA_ADD_RESV:
2537 		if (vma->vm_flags & VM_MAYSHARE) {
2538 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2539 			/* region_add calls of range 1 should never fail. */
2540 			VM_BUG_ON(ret < 0);
2541 		} else {
2542 			region_abort(resv, idx, idx + 1, 1);
2543 			ret = region_del(resv, idx, idx + 1);
2544 		}
2545 		break;
2546 	case VMA_DEL_RESV:
2547 		if (vma->vm_flags & VM_MAYSHARE) {
2548 			region_abort(resv, idx, idx + 1, 1);
2549 			ret = region_del(resv, idx, idx + 1);
2550 		} else {
2551 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2552 			/* region_add calls of range 1 should never fail. */
2553 			VM_BUG_ON(ret < 0);
2554 		}
2555 		break;
2556 	default:
2557 		BUG();
2558 	}
2559 
2560 	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2561 		return ret;
2562 	/*
2563 	 * We know private mapping must have HPAGE_RESV_OWNER set.
2564 	 *
2565 	 * In most cases, reserves always exist for private mappings.
2566 	 * However, a file associated with mapping could have been
2567 	 * hole punched or truncated after reserves were consumed.
2568 	 * As subsequent fault on such a range will not use reserves.
2569 	 * Subtle - The reserve map for private mappings has the
2570 	 * opposite meaning than that of shared mappings.  If NO
2571 	 * entry is in the reserve map, it means a reservation exists.
2572 	 * If an entry exists in the reserve map, it means the
2573 	 * reservation has already been consumed.  As a result, the
2574 	 * return value of this routine is the opposite of the
2575 	 * value returned from reserve map manipulation routines above.
2576 	 */
2577 	if (ret > 0)
2578 		return 0;
2579 	if (ret == 0)
2580 		return 1;
2581 	return ret;
2582 }
2583 
2584 static long vma_needs_reservation(struct hstate *h,
2585 			struct vm_area_struct *vma, unsigned long addr)
2586 {
2587 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2588 }
2589 
2590 static long vma_commit_reservation(struct hstate *h,
2591 			struct vm_area_struct *vma, unsigned long addr)
2592 {
2593 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2594 }
2595 
2596 static void vma_end_reservation(struct hstate *h,
2597 			struct vm_area_struct *vma, unsigned long addr)
2598 {
2599 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2600 }
2601 
2602 static long vma_add_reservation(struct hstate *h,
2603 			struct vm_area_struct *vma, unsigned long addr)
2604 {
2605 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2606 }
2607 
2608 static long vma_del_reservation(struct hstate *h,
2609 			struct vm_area_struct *vma, unsigned long addr)
2610 {
2611 	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2612 }
2613 
2614 /*
2615  * This routine is called to restore reservation information on error paths.
2616  * It should ONLY be called for pages allocated via alloc_huge_page(), and
2617  * the hugetlb mutex should remain held when calling this routine.
2618  *
2619  * It handles two specific cases:
2620  * 1) A reservation was in place and the page consumed the reservation.
2621  *    HPageRestoreReserve is set in the page.
2622  * 2) No reservation was in place for the page, so HPageRestoreReserve is
2623  *    not set.  However, alloc_huge_page always updates the reserve map.
2624  *
2625  * In case 1, free_huge_page later in the error path will increment the
2626  * global reserve count.  But, free_huge_page does not have enough context
2627  * to adjust the reservation map.  This case deals primarily with private
2628  * mappings.  Adjust the reserve map here to be consistent with global
2629  * reserve count adjustments to be made by free_huge_page.  Make sure the
2630  * reserve map indicates there is a reservation present.
2631  *
2632  * In case 2, simply undo reserve map modifications done by alloc_huge_page.
2633  */
2634 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2635 			unsigned long address, struct page *page)
2636 {
2637 	long rc = vma_needs_reservation(h, vma, address);
2638 
2639 	if (HPageRestoreReserve(page)) {
2640 		if (unlikely(rc < 0))
2641 			/*
2642 			 * Rare out of memory condition in reserve map
2643 			 * manipulation.  Clear HPageRestoreReserve so that
2644 			 * global reserve count will not be incremented
2645 			 * by free_huge_page.  This will make it appear
2646 			 * as though the reservation for this page was
2647 			 * consumed.  This may prevent the task from
2648 			 * faulting in the page at a later time.  This
2649 			 * is better than inconsistent global huge page
2650 			 * accounting of reserve counts.
2651 			 */
2652 			ClearHPageRestoreReserve(page);
2653 		else if (rc)
2654 			(void)vma_add_reservation(h, vma, address);
2655 		else
2656 			vma_end_reservation(h, vma, address);
2657 	} else {
2658 		if (!rc) {
2659 			/*
2660 			 * This indicates there is an entry in the reserve map
2661 			 * not added by alloc_huge_page.  We know it was added
2662 			 * before the alloc_huge_page call, otherwise
2663 			 * HPageRestoreReserve would be set on the page.
2664 			 * Remove the entry so that a subsequent allocation
2665 			 * does not consume a reservation.
2666 			 */
2667 			rc = vma_del_reservation(h, vma, address);
2668 			if (rc < 0)
2669 				/*
2670 				 * VERY rare out of memory condition.  Since
2671 				 * we can not delete the entry, set
2672 				 * HPageRestoreReserve so that the reserve
2673 				 * count will be incremented when the page
2674 				 * is freed.  This reserve will be consumed
2675 				 * on a subsequent allocation.
2676 				 */
2677 				SetHPageRestoreReserve(page);
2678 		} else if (rc < 0) {
2679 			/*
2680 			 * Rare out of memory condition from
2681 			 * vma_needs_reservation call.  Memory allocation is
2682 			 * only attempted if a new entry is needed.  Therefore,
2683 			 * this implies there is not an entry in the
2684 			 * reserve map.
2685 			 *
2686 			 * For shared mappings, no entry in the map indicates
2687 			 * no reservation.  We are done.
2688 			 */
2689 			if (!(vma->vm_flags & VM_MAYSHARE))
2690 				/*
2691 				 * For private mappings, no entry indicates
2692 				 * a reservation is present.  Since we can
2693 				 * not add an entry, set SetHPageRestoreReserve
2694 				 * on the page so reserve count will be
2695 				 * incremented when freed.  This reserve will
2696 				 * be consumed on a subsequent allocation.
2697 				 */
2698 				SetHPageRestoreReserve(page);
2699 		} else
2700 			/*
2701 			 * No reservation present, do nothing
2702 			 */
2703 			 vma_end_reservation(h, vma, address);
2704 	}
2705 }
2706 
2707 /*
2708  * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
2709  * @h: struct hstate old page belongs to
2710  * @old_page: Old page to dissolve
2711  * @list: List to isolate the page in case we need to
2712  * Returns 0 on success, otherwise negated error.
2713  */
2714 static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
2715 					struct list_head *list)
2716 {
2717 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2718 	int nid = page_to_nid(old_page);
2719 	bool alloc_retry = false;
2720 	struct page *new_page;
2721 	int ret = 0;
2722 
2723 	/*
2724 	 * Before dissolving the page, we need to allocate a new one for the
2725 	 * pool to remain stable.  Here, we allocate the page and 'prep' it
2726 	 * by doing everything but actually updating counters and adding to
2727 	 * the pool.  This simplifies and let us do most of the processing
2728 	 * under the lock.
2729 	 */
2730 alloc_retry:
2731 	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
2732 	if (!new_page)
2733 		return -ENOMEM;
2734 	/*
2735 	 * If all goes well, this page will be directly added to the free
2736 	 * list in the pool.  For this the ref count needs to be zero.
2737 	 * Attempt to drop now, and retry once if needed.  It is VERY
2738 	 * unlikely there is another ref on the page.
2739 	 *
2740 	 * If someone else has a reference to the page, it will be freed
2741 	 * when they drop their ref.  Abuse temporary page flag to accomplish
2742 	 * this.  Retry once if there is an inflated ref count.
2743 	 */
2744 	SetHPageTemporary(new_page);
2745 	if (!put_page_testzero(new_page)) {
2746 		if (alloc_retry)
2747 			return -EBUSY;
2748 
2749 		alloc_retry = true;
2750 		goto alloc_retry;
2751 	}
2752 	ClearHPageTemporary(new_page);
2753 
2754 	__prep_new_huge_page(h, new_page);
2755 
2756 retry:
2757 	spin_lock_irq(&hugetlb_lock);
2758 	if (!PageHuge(old_page)) {
2759 		/*
2760 		 * Freed from under us. Drop new_page too.
2761 		 */
2762 		goto free_new;
2763 	} else if (page_count(old_page)) {
2764 		/*
2765 		 * Someone has grabbed the page, try to isolate it here.
2766 		 * Fail with -EBUSY if not possible.
2767 		 */
2768 		spin_unlock_irq(&hugetlb_lock);
2769 		ret = isolate_hugetlb(old_page, list);
2770 		spin_lock_irq(&hugetlb_lock);
2771 		goto free_new;
2772 	} else if (!HPageFreed(old_page)) {
2773 		/*
2774 		 * Page's refcount is 0 but it has not been enqueued in the
2775 		 * freelist yet. Race window is small, so we can succeed here if
2776 		 * we retry.
2777 		 */
2778 		spin_unlock_irq(&hugetlb_lock);
2779 		cond_resched();
2780 		goto retry;
2781 	} else {
2782 		/*
2783 		 * Ok, old_page is still a genuine free hugepage. Remove it from
2784 		 * the freelist and decrease the counters. These will be
2785 		 * incremented again when calling __prep_account_new_huge_page()
2786 		 * and enqueue_huge_page() for new_page. The counters will remain
2787 		 * stable since this happens under the lock.
2788 		 */
2789 		remove_hugetlb_page(h, old_page, false);
2790 
2791 		/*
2792 		 * Ref count on new page is already zero as it was dropped
2793 		 * earlier.  It can be directly added to the pool free list.
2794 		 */
2795 		__prep_account_new_huge_page(h, nid);
2796 		enqueue_huge_page(h, new_page);
2797 
2798 		/*
2799 		 * Pages have been replaced, we can safely free the old one.
2800 		 */
2801 		spin_unlock_irq(&hugetlb_lock);
2802 		update_and_free_page(h, old_page, false);
2803 	}
2804 
2805 	return ret;
2806 
2807 free_new:
2808 	spin_unlock_irq(&hugetlb_lock);
2809 	/* Page has a zero ref count, but needs a ref to be freed */
2810 	set_page_refcounted(new_page);
2811 	update_and_free_page(h, new_page, false);
2812 
2813 	return ret;
2814 }
2815 
2816 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2817 {
2818 	struct hstate *h;
2819 	struct page *head;
2820 	int ret = -EBUSY;
2821 
2822 	/*
2823 	 * The page might have been dissolved from under our feet, so make sure
2824 	 * to carefully check the state under the lock.
2825 	 * Return success when racing as if we dissolved the page ourselves.
2826 	 */
2827 	spin_lock_irq(&hugetlb_lock);
2828 	if (PageHuge(page)) {
2829 		head = compound_head(page);
2830 		h = page_hstate(head);
2831 	} else {
2832 		spin_unlock_irq(&hugetlb_lock);
2833 		return 0;
2834 	}
2835 	spin_unlock_irq(&hugetlb_lock);
2836 
2837 	/*
2838 	 * Fence off gigantic pages as there is a cyclic dependency between
2839 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2840 	 * of bailing out right away without further retrying.
2841 	 */
2842 	if (hstate_is_gigantic(h))
2843 		return -ENOMEM;
2844 
2845 	if (page_count(head) && !isolate_hugetlb(head, list))
2846 		ret = 0;
2847 	else if (!page_count(head))
2848 		ret = alloc_and_dissolve_huge_page(h, head, list);
2849 
2850 	return ret;
2851 }
2852 
2853 struct page *alloc_huge_page(struct vm_area_struct *vma,
2854 				    unsigned long addr, int avoid_reserve)
2855 {
2856 	struct hugepage_subpool *spool = subpool_vma(vma);
2857 	struct hstate *h = hstate_vma(vma);
2858 	struct page *page;
2859 	long map_chg, map_commit;
2860 	long gbl_chg;
2861 	int ret, idx;
2862 	struct hugetlb_cgroup *h_cg;
2863 	bool deferred_reserve;
2864 
2865 	idx = hstate_index(h);
2866 	/*
2867 	 * Examine the region/reserve map to determine if the process
2868 	 * has a reservation for the page to be allocated.  A return
2869 	 * code of zero indicates a reservation exists (no change).
2870 	 */
2871 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2872 	if (map_chg < 0)
2873 		return ERR_PTR(-ENOMEM);
2874 
2875 	/*
2876 	 * Processes that did not create the mapping will have no
2877 	 * reserves as indicated by the region/reserve map. Check
2878 	 * that the allocation will not exceed the subpool limit.
2879 	 * Allocations for MAP_NORESERVE mappings also need to be
2880 	 * checked against any subpool limit.
2881 	 */
2882 	if (map_chg || avoid_reserve) {
2883 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2884 		if (gbl_chg < 0) {
2885 			vma_end_reservation(h, vma, addr);
2886 			return ERR_PTR(-ENOSPC);
2887 		}
2888 
2889 		/*
2890 		 * Even though there was no reservation in the region/reserve
2891 		 * map, there could be reservations associated with the
2892 		 * subpool that can be used.  This would be indicated if the
2893 		 * return value of hugepage_subpool_get_pages() is zero.
2894 		 * However, if avoid_reserve is specified we still avoid even
2895 		 * the subpool reservations.
2896 		 */
2897 		if (avoid_reserve)
2898 			gbl_chg = 1;
2899 	}
2900 
2901 	/* If this allocation is not consuming a reservation, charge it now.
2902 	 */
2903 	deferred_reserve = map_chg || avoid_reserve;
2904 	if (deferred_reserve) {
2905 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
2906 			idx, pages_per_huge_page(h), &h_cg);
2907 		if (ret)
2908 			goto out_subpool_put;
2909 	}
2910 
2911 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2912 	if (ret)
2913 		goto out_uncharge_cgroup_reservation;
2914 
2915 	spin_lock_irq(&hugetlb_lock);
2916 	/*
2917 	 * glb_chg is passed to indicate whether or not a page must be taken
2918 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2919 	 * a reservation exists for the allocation.
2920 	 */
2921 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2922 	if (!page) {
2923 		spin_unlock_irq(&hugetlb_lock);
2924 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2925 		if (!page)
2926 			goto out_uncharge_cgroup;
2927 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2928 			SetHPageRestoreReserve(page);
2929 			h->resv_huge_pages--;
2930 		}
2931 		spin_lock_irq(&hugetlb_lock);
2932 		list_add(&page->lru, &h->hugepage_activelist);
2933 		/* Fall through */
2934 	}
2935 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2936 	/* If allocation is not consuming a reservation, also store the
2937 	 * hugetlb_cgroup pointer on the page.
2938 	 */
2939 	if (deferred_reserve) {
2940 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2941 						  h_cg, page);
2942 	}
2943 
2944 	spin_unlock_irq(&hugetlb_lock);
2945 
2946 	hugetlb_set_page_subpool(page, spool);
2947 
2948 	map_commit = vma_commit_reservation(h, vma, addr);
2949 	if (unlikely(map_chg > map_commit)) {
2950 		/*
2951 		 * The page was added to the reservation map between
2952 		 * vma_needs_reservation and vma_commit_reservation.
2953 		 * This indicates a race with hugetlb_reserve_pages.
2954 		 * Adjust for the subpool count incremented above AND
2955 		 * in hugetlb_reserve_pages for the same page.  Also,
2956 		 * the reservation count added in hugetlb_reserve_pages
2957 		 * no longer applies.
2958 		 */
2959 		long rsv_adjust;
2960 
2961 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2962 		hugetlb_acct_memory(h, -rsv_adjust);
2963 		if (deferred_reserve)
2964 			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
2965 					pages_per_huge_page(h), page);
2966 	}
2967 	return page;
2968 
2969 out_uncharge_cgroup:
2970 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2971 out_uncharge_cgroup_reservation:
2972 	if (deferred_reserve)
2973 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2974 						    h_cg);
2975 out_subpool_put:
2976 	if (map_chg || avoid_reserve)
2977 		hugepage_subpool_put_pages(spool, 1);
2978 	vma_end_reservation(h, vma, addr);
2979 	return ERR_PTR(-ENOSPC);
2980 }
2981 
2982 int alloc_bootmem_huge_page(struct hstate *h, int nid)
2983 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2984 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
2985 {
2986 	struct huge_bootmem_page *m = NULL; /* initialize for clang */
2987 	int nr_nodes, node;
2988 
2989 	/* do node specific alloc */
2990 	if (nid != NUMA_NO_NODE) {
2991 		m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
2992 				0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
2993 		if (!m)
2994 			return 0;
2995 		goto found;
2996 	}
2997 	/* allocate from next node when distributing huge pages */
2998 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2999 		m = memblock_alloc_try_nid_raw(
3000 				huge_page_size(h), huge_page_size(h),
3001 				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3002 		/*
3003 		 * Use the beginning of the huge page to store the
3004 		 * huge_bootmem_page struct (until gather_bootmem
3005 		 * puts them into the mem_map).
3006 		 */
3007 		if (!m)
3008 			return 0;
3009 		goto found;
3010 	}
3011 
3012 found:
3013 	/* Put them into a private list first because mem_map is not up yet */
3014 	INIT_LIST_HEAD(&m->list);
3015 	list_add(&m->list, &huge_boot_pages);
3016 	m->hstate = h;
3017 	return 1;
3018 }
3019 
3020 /*
3021  * Put bootmem huge pages into the standard lists after mem_map is up.
3022  * Note: This only applies to gigantic (order > MAX_ORDER) pages.
3023  */
3024 static void __init gather_bootmem_prealloc(void)
3025 {
3026 	struct huge_bootmem_page *m;
3027 
3028 	list_for_each_entry(m, &huge_boot_pages, list) {
3029 		struct page *page = virt_to_page(m);
3030 		struct hstate *h = m->hstate;
3031 
3032 		VM_BUG_ON(!hstate_is_gigantic(h));
3033 		WARN_ON(page_count(page) != 1);
3034 		if (prep_compound_gigantic_page(page, huge_page_order(h))) {
3035 			WARN_ON(PageReserved(page));
3036 			prep_new_huge_page(h, page, page_to_nid(page));
3037 			put_page(page); /* add to the hugepage allocator */
3038 		} else {
3039 			/* VERY unlikely inflated ref count on a tail page */
3040 			free_gigantic_page(page, huge_page_order(h));
3041 		}
3042 
3043 		/*
3044 		 * We need to restore the 'stolen' pages to totalram_pages
3045 		 * in order to fix confusing memory reports from free(1) and
3046 		 * other side-effects, like CommitLimit going negative.
3047 		 */
3048 		adjust_managed_page_count(page, pages_per_huge_page(h));
3049 		cond_resched();
3050 	}
3051 }
3052 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3053 {
3054 	unsigned long i;
3055 	char buf[32];
3056 
3057 	for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3058 		if (hstate_is_gigantic(h)) {
3059 			if (!alloc_bootmem_huge_page(h, nid))
3060 				break;
3061 		} else {
3062 			struct page *page;
3063 			gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3064 
3065 			page = alloc_fresh_huge_page(h, gfp_mask, nid,
3066 					&node_states[N_MEMORY], NULL);
3067 			if (!page)
3068 				break;
3069 			put_page(page); /* free it into the hugepage allocator */
3070 		}
3071 		cond_resched();
3072 	}
3073 	if (i == h->max_huge_pages_node[nid])
3074 		return;
3075 
3076 	string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3077 	pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3078 		h->max_huge_pages_node[nid], buf, nid, i);
3079 	h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3080 	h->max_huge_pages_node[nid] = i;
3081 }
3082 
3083 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3084 {
3085 	unsigned long i;
3086 	nodemask_t *node_alloc_noretry;
3087 	bool node_specific_alloc = false;
3088 
3089 	/* skip gigantic hugepages allocation if hugetlb_cma enabled */
3090 	if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3091 		pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3092 		return;
3093 	}
3094 
3095 	/* do node specific alloc */
3096 	for_each_online_node(i) {
3097 		if (h->max_huge_pages_node[i] > 0) {
3098 			hugetlb_hstate_alloc_pages_onenode(h, i);
3099 			node_specific_alloc = true;
3100 		}
3101 	}
3102 
3103 	if (node_specific_alloc)
3104 		return;
3105 
3106 	/* below will do all node balanced alloc */
3107 	if (!hstate_is_gigantic(h)) {
3108 		/*
3109 		 * Bit mask controlling how hard we retry per-node allocations.
3110 		 * Ignore errors as lower level routines can deal with
3111 		 * node_alloc_noretry == NULL.  If this kmalloc fails at boot
3112 		 * time, we are likely in bigger trouble.
3113 		 */
3114 		node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3115 						GFP_KERNEL);
3116 	} else {
3117 		/* allocations done at boot time */
3118 		node_alloc_noretry = NULL;
3119 	}
3120 
3121 	/* bit mask controlling how hard we retry per-node allocations */
3122 	if (node_alloc_noretry)
3123 		nodes_clear(*node_alloc_noretry);
3124 
3125 	for (i = 0; i < h->max_huge_pages; ++i) {
3126 		if (hstate_is_gigantic(h)) {
3127 			if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3128 				break;
3129 		} else if (!alloc_pool_huge_page(h,
3130 					 &node_states[N_MEMORY],
3131 					 node_alloc_noretry))
3132 			break;
3133 		cond_resched();
3134 	}
3135 	if (i < h->max_huge_pages) {
3136 		char buf[32];
3137 
3138 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3139 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3140 			h->max_huge_pages, buf, i);
3141 		h->max_huge_pages = i;
3142 	}
3143 	kfree(node_alloc_noretry);
3144 }
3145 
3146 static void __init hugetlb_init_hstates(void)
3147 {
3148 	struct hstate *h, *h2;
3149 
3150 	for_each_hstate(h) {
3151 		if (minimum_order > huge_page_order(h))
3152 			minimum_order = huge_page_order(h);
3153 
3154 		/* oversize hugepages were init'ed in early boot */
3155 		if (!hstate_is_gigantic(h))
3156 			hugetlb_hstate_alloc_pages(h);
3157 
3158 		/*
3159 		 * Set demote order for each hstate.  Note that
3160 		 * h->demote_order is initially 0.
3161 		 * - We can not demote gigantic pages if runtime freeing
3162 		 *   is not supported, so skip this.
3163 		 * - If CMA allocation is possible, we can not demote
3164 		 *   HUGETLB_PAGE_ORDER or smaller size pages.
3165 		 */
3166 		if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3167 			continue;
3168 		if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3169 			continue;
3170 		for_each_hstate(h2) {
3171 			if (h2 == h)
3172 				continue;
3173 			if (h2->order < h->order &&
3174 			    h2->order > h->demote_order)
3175 				h->demote_order = h2->order;
3176 		}
3177 	}
3178 	VM_BUG_ON(minimum_order == UINT_MAX);
3179 }
3180 
3181 static void __init report_hugepages(void)
3182 {
3183 	struct hstate *h;
3184 
3185 	for_each_hstate(h) {
3186 		char buf[32];
3187 
3188 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3189 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
3190 			buf, h->free_huge_pages);
3191 	}
3192 }
3193 
3194 #ifdef CONFIG_HIGHMEM
3195 static void try_to_free_low(struct hstate *h, unsigned long count,
3196 						nodemask_t *nodes_allowed)
3197 {
3198 	int i;
3199 	LIST_HEAD(page_list);
3200 
3201 	lockdep_assert_held(&hugetlb_lock);
3202 	if (hstate_is_gigantic(h))
3203 		return;
3204 
3205 	/*
3206 	 * Collect pages to be freed on a list, and free after dropping lock
3207 	 */
3208 	for_each_node_mask(i, *nodes_allowed) {
3209 		struct page *page, *next;
3210 		struct list_head *freel = &h->hugepage_freelists[i];
3211 		list_for_each_entry_safe(page, next, freel, lru) {
3212 			if (count >= h->nr_huge_pages)
3213 				goto out;
3214 			if (PageHighMem(page))
3215 				continue;
3216 			remove_hugetlb_page(h, page, false);
3217 			list_add(&page->lru, &page_list);
3218 		}
3219 	}
3220 
3221 out:
3222 	spin_unlock_irq(&hugetlb_lock);
3223 	update_and_free_pages_bulk(h, &page_list);
3224 	spin_lock_irq(&hugetlb_lock);
3225 }
3226 #else
3227 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3228 						nodemask_t *nodes_allowed)
3229 {
3230 }
3231 #endif
3232 
3233 /*
3234  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
3235  * balanced by operating on them in a round-robin fashion.
3236  * Returns 1 if an adjustment was made.
3237  */
3238 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3239 				int delta)
3240 {
3241 	int nr_nodes, node;
3242 
3243 	lockdep_assert_held(&hugetlb_lock);
3244 	VM_BUG_ON(delta != -1 && delta != 1);
3245 
3246 	if (delta < 0) {
3247 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3248 			if (h->surplus_huge_pages_node[node])
3249 				goto found;
3250 		}
3251 	} else {
3252 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3253 			if (h->surplus_huge_pages_node[node] <
3254 					h->nr_huge_pages_node[node])
3255 				goto found;
3256 		}
3257 	}
3258 	return 0;
3259 
3260 found:
3261 	h->surplus_huge_pages += delta;
3262 	h->surplus_huge_pages_node[node] += delta;
3263 	return 1;
3264 }
3265 
3266 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3267 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3268 			      nodemask_t *nodes_allowed)
3269 {
3270 	unsigned long min_count, ret;
3271 	struct page *page;
3272 	LIST_HEAD(page_list);
3273 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3274 
3275 	/*
3276 	 * Bit mask controlling how hard we retry per-node allocations.
3277 	 * If we can not allocate the bit mask, do not attempt to allocate
3278 	 * the requested huge pages.
3279 	 */
3280 	if (node_alloc_noretry)
3281 		nodes_clear(*node_alloc_noretry);
3282 	else
3283 		return -ENOMEM;
3284 
3285 	/*
3286 	 * resize_lock mutex prevents concurrent adjustments to number of
3287 	 * pages in hstate via the proc/sysfs interfaces.
3288 	 */
3289 	mutex_lock(&h->resize_lock);
3290 	flush_free_hpage_work(h);
3291 	spin_lock_irq(&hugetlb_lock);
3292 
3293 	/*
3294 	 * Check for a node specific request.
3295 	 * Changing node specific huge page count may require a corresponding
3296 	 * change to the global count.  In any case, the passed node mask
3297 	 * (nodes_allowed) will restrict alloc/free to the specified node.
3298 	 */
3299 	if (nid != NUMA_NO_NODE) {
3300 		unsigned long old_count = count;
3301 
3302 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
3303 		/*
3304 		 * User may have specified a large count value which caused the
3305 		 * above calculation to overflow.  In this case, they wanted
3306 		 * to allocate as many huge pages as possible.  Set count to
3307 		 * largest possible value to align with their intention.
3308 		 */
3309 		if (count < old_count)
3310 			count = ULONG_MAX;
3311 	}
3312 
3313 	/*
3314 	 * Gigantic pages runtime allocation depend on the capability for large
3315 	 * page range allocation.
3316 	 * If the system does not provide this feature, return an error when
3317 	 * the user tries to allocate gigantic pages but let the user free the
3318 	 * boottime allocated gigantic pages.
3319 	 */
3320 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3321 		if (count > persistent_huge_pages(h)) {
3322 			spin_unlock_irq(&hugetlb_lock);
3323 			mutex_unlock(&h->resize_lock);
3324 			NODEMASK_FREE(node_alloc_noretry);
3325 			return -EINVAL;
3326 		}
3327 		/* Fall through to decrease pool */
3328 	}
3329 
3330 	/*
3331 	 * Increase the pool size
3332 	 * First take pages out of surplus state.  Then make up the
3333 	 * remaining difference by allocating fresh huge pages.
3334 	 *
3335 	 * We might race with alloc_surplus_huge_page() here and be unable
3336 	 * to convert a surplus huge page to a normal huge page. That is
3337 	 * not critical, though, it just means the overall size of the
3338 	 * pool might be one hugepage larger than it needs to be, but
3339 	 * within all the constraints specified by the sysctls.
3340 	 */
3341 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3342 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
3343 			break;
3344 	}
3345 
3346 	while (count > persistent_huge_pages(h)) {
3347 		/*
3348 		 * If this allocation races such that we no longer need the
3349 		 * page, free_huge_page will handle it by freeing the page
3350 		 * and reducing the surplus.
3351 		 */
3352 		spin_unlock_irq(&hugetlb_lock);
3353 
3354 		/* yield cpu to avoid soft lockup */
3355 		cond_resched();
3356 
3357 		ret = alloc_pool_huge_page(h, nodes_allowed,
3358 						node_alloc_noretry);
3359 		spin_lock_irq(&hugetlb_lock);
3360 		if (!ret)
3361 			goto out;
3362 
3363 		/* Bail for signals. Probably ctrl-c from user */
3364 		if (signal_pending(current))
3365 			goto out;
3366 	}
3367 
3368 	/*
3369 	 * Decrease the pool size
3370 	 * First return free pages to the buddy allocator (being careful
3371 	 * to keep enough around to satisfy reservations).  Then place
3372 	 * pages into surplus state as needed so the pool will shrink
3373 	 * to the desired size as pages become free.
3374 	 *
3375 	 * By placing pages into the surplus state independent of the
3376 	 * overcommit value, we are allowing the surplus pool size to
3377 	 * exceed overcommit. There are few sane options here. Since
3378 	 * alloc_surplus_huge_page() is checking the global counter,
3379 	 * though, we'll note that we're not allowed to exceed surplus
3380 	 * and won't grow the pool anywhere else. Not until one of the
3381 	 * sysctls are changed, or the surplus pages go out of use.
3382 	 */
3383 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
3384 	min_count = max(count, min_count);
3385 	try_to_free_low(h, min_count, nodes_allowed);
3386 
3387 	/*
3388 	 * Collect pages to be removed on list without dropping lock
3389 	 */
3390 	while (min_count < persistent_huge_pages(h)) {
3391 		page = remove_pool_huge_page(h, nodes_allowed, 0);
3392 		if (!page)
3393 			break;
3394 
3395 		list_add(&page->lru, &page_list);
3396 	}
3397 	/* free the pages after dropping lock */
3398 	spin_unlock_irq(&hugetlb_lock);
3399 	update_and_free_pages_bulk(h, &page_list);
3400 	flush_free_hpage_work(h);
3401 	spin_lock_irq(&hugetlb_lock);
3402 
3403 	while (count < persistent_huge_pages(h)) {
3404 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
3405 			break;
3406 	}
3407 out:
3408 	h->max_huge_pages = persistent_huge_pages(h);
3409 	spin_unlock_irq(&hugetlb_lock);
3410 	mutex_unlock(&h->resize_lock);
3411 
3412 	NODEMASK_FREE(node_alloc_noretry);
3413 
3414 	return 0;
3415 }
3416 
3417 static int demote_free_huge_page(struct hstate *h, struct page *page)
3418 {
3419 	int i, nid = page_to_nid(page);
3420 	struct hstate *target_hstate;
3421 	int rc = 0;
3422 
3423 	target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
3424 
3425 	remove_hugetlb_page_for_demote(h, page, false);
3426 	spin_unlock_irq(&hugetlb_lock);
3427 
3428 	rc = hugetlb_vmemmap_alloc(h, page);
3429 	if (rc) {
3430 		/* Allocation of vmemmmap failed, we can not demote page */
3431 		spin_lock_irq(&hugetlb_lock);
3432 		set_page_refcounted(page);
3433 		add_hugetlb_page(h, page, false);
3434 		return rc;
3435 	}
3436 
3437 	/*
3438 	 * Use destroy_compound_hugetlb_page_for_demote for all huge page
3439 	 * sizes as it will not ref count pages.
3440 	 */
3441 	destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h));
3442 
3443 	/*
3444 	 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3445 	 * Without the mutex, pages added to target hstate could be marked
3446 	 * as surplus.
3447 	 *
3448 	 * Note that we already hold h->resize_lock.  To prevent deadlock,
3449 	 * use the convention of always taking larger size hstate mutex first.
3450 	 */
3451 	mutex_lock(&target_hstate->resize_lock);
3452 	for (i = 0; i < pages_per_huge_page(h);
3453 				i += pages_per_huge_page(target_hstate)) {
3454 		if (hstate_is_gigantic(target_hstate))
3455 			prep_compound_gigantic_page_for_demote(page + i,
3456 							target_hstate->order);
3457 		else
3458 			prep_compound_page(page + i, target_hstate->order);
3459 		set_page_private(page + i, 0);
3460 		set_page_refcounted(page + i);
3461 		prep_new_huge_page(target_hstate, page + i, nid);
3462 		put_page(page + i);
3463 	}
3464 	mutex_unlock(&target_hstate->resize_lock);
3465 
3466 	spin_lock_irq(&hugetlb_lock);
3467 
3468 	/*
3469 	 * Not absolutely necessary, but for consistency update max_huge_pages
3470 	 * based on pool changes for the demoted page.
3471 	 */
3472 	h->max_huge_pages--;
3473 	target_hstate->max_huge_pages += pages_per_huge_page(h);
3474 
3475 	return rc;
3476 }
3477 
3478 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
3479 	__must_hold(&hugetlb_lock)
3480 {
3481 	int nr_nodes, node;
3482 	struct page *page;
3483 
3484 	lockdep_assert_held(&hugetlb_lock);
3485 
3486 	/* We should never get here if no demote order */
3487 	if (!h->demote_order) {
3488 		pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
3489 		return -EINVAL;		/* internal error */
3490 	}
3491 
3492 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3493 		list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
3494 			if (PageHWPoison(page))
3495 				continue;
3496 
3497 			return demote_free_huge_page(h, page);
3498 		}
3499 	}
3500 
3501 	/*
3502 	 * Only way to get here is if all pages on free lists are poisoned.
3503 	 * Return -EBUSY so that caller will not retry.
3504 	 */
3505 	return -EBUSY;
3506 }
3507 
3508 #define HSTATE_ATTR_RO(_name) \
3509 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3510 
3511 #define HSTATE_ATTR_WO(_name) \
3512 	static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
3513 
3514 #define HSTATE_ATTR(_name) \
3515 	static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3516 
3517 static struct kobject *hugepages_kobj;
3518 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3519 
3520 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
3521 
3522 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3523 {
3524 	int i;
3525 
3526 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
3527 		if (hstate_kobjs[i] == kobj) {
3528 			if (nidp)
3529 				*nidp = NUMA_NO_NODE;
3530 			return &hstates[i];
3531 		}
3532 
3533 	return kobj_to_node_hstate(kobj, nidp);
3534 }
3535 
3536 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3537 					struct kobj_attribute *attr, char *buf)
3538 {
3539 	struct hstate *h;
3540 	unsigned long nr_huge_pages;
3541 	int nid;
3542 
3543 	h = kobj_to_hstate(kobj, &nid);
3544 	if (nid == NUMA_NO_NODE)
3545 		nr_huge_pages = h->nr_huge_pages;
3546 	else
3547 		nr_huge_pages = h->nr_huge_pages_node[nid];
3548 
3549 	return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3550 }
3551 
3552 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3553 					   struct hstate *h, int nid,
3554 					   unsigned long count, size_t len)
3555 {
3556 	int err;
3557 	nodemask_t nodes_allowed, *n_mask;
3558 
3559 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3560 		return -EINVAL;
3561 
3562 	if (nid == NUMA_NO_NODE) {
3563 		/*
3564 		 * global hstate attribute
3565 		 */
3566 		if (!(obey_mempolicy &&
3567 				init_nodemask_of_mempolicy(&nodes_allowed)))
3568 			n_mask = &node_states[N_MEMORY];
3569 		else
3570 			n_mask = &nodes_allowed;
3571 	} else {
3572 		/*
3573 		 * Node specific request.  count adjustment happens in
3574 		 * set_max_huge_pages() after acquiring hugetlb_lock.
3575 		 */
3576 		init_nodemask_of_node(&nodes_allowed, nid);
3577 		n_mask = &nodes_allowed;
3578 	}
3579 
3580 	err = set_max_huge_pages(h, count, nid, n_mask);
3581 
3582 	return err ? err : len;
3583 }
3584 
3585 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3586 					 struct kobject *kobj, const char *buf,
3587 					 size_t len)
3588 {
3589 	struct hstate *h;
3590 	unsigned long count;
3591 	int nid;
3592 	int err;
3593 
3594 	err = kstrtoul(buf, 10, &count);
3595 	if (err)
3596 		return err;
3597 
3598 	h = kobj_to_hstate(kobj, &nid);
3599 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3600 }
3601 
3602 static ssize_t nr_hugepages_show(struct kobject *kobj,
3603 				       struct kobj_attribute *attr, char *buf)
3604 {
3605 	return nr_hugepages_show_common(kobj, attr, buf);
3606 }
3607 
3608 static ssize_t nr_hugepages_store(struct kobject *kobj,
3609 	       struct kobj_attribute *attr, const char *buf, size_t len)
3610 {
3611 	return nr_hugepages_store_common(false, kobj, buf, len);
3612 }
3613 HSTATE_ATTR(nr_hugepages);
3614 
3615 #ifdef CONFIG_NUMA
3616 
3617 /*
3618  * hstate attribute for optionally mempolicy-based constraint on persistent
3619  * huge page alloc/free.
3620  */
3621 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3622 					   struct kobj_attribute *attr,
3623 					   char *buf)
3624 {
3625 	return nr_hugepages_show_common(kobj, attr, buf);
3626 }
3627 
3628 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
3629 	       struct kobj_attribute *attr, const char *buf, size_t len)
3630 {
3631 	return nr_hugepages_store_common(true, kobj, buf, len);
3632 }
3633 HSTATE_ATTR(nr_hugepages_mempolicy);
3634 #endif
3635 
3636 
3637 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3638 					struct kobj_attribute *attr, char *buf)
3639 {
3640 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3641 	return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3642 }
3643 
3644 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3645 		struct kobj_attribute *attr, const char *buf, size_t count)
3646 {
3647 	int err;
3648 	unsigned long input;
3649 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3650 
3651 	if (hstate_is_gigantic(h))
3652 		return -EINVAL;
3653 
3654 	err = kstrtoul(buf, 10, &input);
3655 	if (err)
3656 		return err;
3657 
3658 	spin_lock_irq(&hugetlb_lock);
3659 	h->nr_overcommit_huge_pages = input;
3660 	spin_unlock_irq(&hugetlb_lock);
3661 
3662 	return count;
3663 }
3664 HSTATE_ATTR(nr_overcommit_hugepages);
3665 
3666 static ssize_t free_hugepages_show(struct kobject *kobj,
3667 					struct kobj_attribute *attr, char *buf)
3668 {
3669 	struct hstate *h;
3670 	unsigned long free_huge_pages;
3671 	int nid;
3672 
3673 	h = kobj_to_hstate(kobj, &nid);
3674 	if (nid == NUMA_NO_NODE)
3675 		free_huge_pages = h->free_huge_pages;
3676 	else
3677 		free_huge_pages = h->free_huge_pages_node[nid];
3678 
3679 	return sysfs_emit(buf, "%lu\n", free_huge_pages);
3680 }
3681 HSTATE_ATTR_RO(free_hugepages);
3682 
3683 static ssize_t resv_hugepages_show(struct kobject *kobj,
3684 					struct kobj_attribute *attr, char *buf)
3685 {
3686 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3687 	return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3688 }
3689 HSTATE_ATTR_RO(resv_hugepages);
3690 
3691 static ssize_t surplus_hugepages_show(struct kobject *kobj,
3692 					struct kobj_attribute *attr, char *buf)
3693 {
3694 	struct hstate *h;
3695 	unsigned long surplus_huge_pages;
3696 	int nid;
3697 
3698 	h = kobj_to_hstate(kobj, &nid);
3699 	if (nid == NUMA_NO_NODE)
3700 		surplus_huge_pages = h->surplus_huge_pages;
3701 	else
3702 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
3703 
3704 	return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3705 }
3706 HSTATE_ATTR_RO(surplus_hugepages);
3707 
3708 static ssize_t demote_store(struct kobject *kobj,
3709 	       struct kobj_attribute *attr, const char *buf, size_t len)
3710 {
3711 	unsigned long nr_demote;
3712 	unsigned long nr_available;
3713 	nodemask_t nodes_allowed, *n_mask;
3714 	struct hstate *h;
3715 	int err = 0;
3716 	int nid;
3717 
3718 	err = kstrtoul(buf, 10, &nr_demote);
3719 	if (err)
3720 		return err;
3721 	h = kobj_to_hstate(kobj, &nid);
3722 
3723 	if (nid != NUMA_NO_NODE) {
3724 		init_nodemask_of_node(&nodes_allowed, nid);
3725 		n_mask = &nodes_allowed;
3726 	} else {
3727 		n_mask = &node_states[N_MEMORY];
3728 	}
3729 
3730 	/* Synchronize with other sysfs operations modifying huge pages */
3731 	mutex_lock(&h->resize_lock);
3732 	spin_lock_irq(&hugetlb_lock);
3733 
3734 	while (nr_demote) {
3735 		/*
3736 		 * Check for available pages to demote each time thorough the
3737 		 * loop as demote_pool_huge_page will drop hugetlb_lock.
3738 		 */
3739 		if (nid != NUMA_NO_NODE)
3740 			nr_available = h->free_huge_pages_node[nid];
3741 		else
3742 			nr_available = h->free_huge_pages;
3743 		nr_available -= h->resv_huge_pages;
3744 		if (!nr_available)
3745 			break;
3746 
3747 		err = demote_pool_huge_page(h, n_mask);
3748 		if (err)
3749 			break;
3750 
3751 		nr_demote--;
3752 	}
3753 
3754 	spin_unlock_irq(&hugetlb_lock);
3755 	mutex_unlock(&h->resize_lock);
3756 
3757 	if (err)
3758 		return err;
3759 	return len;
3760 }
3761 HSTATE_ATTR_WO(demote);
3762 
3763 static ssize_t demote_size_show(struct kobject *kobj,
3764 					struct kobj_attribute *attr, char *buf)
3765 {
3766 	int nid;
3767 	struct hstate *h = kobj_to_hstate(kobj, &nid);
3768 	unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
3769 
3770 	return sysfs_emit(buf, "%lukB\n", demote_size);
3771 }
3772 
3773 static ssize_t demote_size_store(struct kobject *kobj,
3774 					struct kobj_attribute *attr,
3775 					const char *buf, size_t count)
3776 {
3777 	struct hstate *h, *demote_hstate;
3778 	unsigned long demote_size;
3779 	unsigned int demote_order;
3780 	int nid;
3781 
3782 	demote_size = (unsigned long)memparse(buf, NULL);
3783 
3784 	demote_hstate = size_to_hstate(demote_size);
3785 	if (!demote_hstate)
3786 		return -EINVAL;
3787 	demote_order = demote_hstate->order;
3788 	if (demote_order < HUGETLB_PAGE_ORDER)
3789 		return -EINVAL;
3790 
3791 	/* demote order must be smaller than hstate order */
3792 	h = kobj_to_hstate(kobj, &nid);
3793 	if (demote_order >= h->order)
3794 		return -EINVAL;
3795 
3796 	/* resize_lock synchronizes access to demote size and writes */
3797 	mutex_lock(&h->resize_lock);
3798 	h->demote_order = demote_order;
3799 	mutex_unlock(&h->resize_lock);
3800 
3801 	return count;
3802 }
3803 HSTATE_ATTR(demote_size);
3804 
3805 static struct attribute *hstate_attrs[] = {
3806 	&nr_hugepages_attr.attr,
3807 	&nr_overcommit_hugepages_attr.attr,
3808 	&free_hugepages_attr.attr,
3809 	&resv_hugepages_attr.attr,
3810 	&surplus_hugepages_attr.attr,
3811 #ifdef CONFIG_NUMA
3812 	&nr_hugepages_mempolicy_attr.attr,
3813 #endif
3814 	NULL,
3815 };
3816 
3817 static const struct attribute_group hstate_attr_group = {
3818 	.attrs = hstate_attrs,
3819 };
3820 
3821 static struct attribute *hstate_demote_attrs[] = {
3822 	&demote_size_attr.attr,
3823 	&demote_attr.attr,
3824 	NULL,
3825 };
3826 
3827 static const struct attribute_group hstate_demote_attr_group = {
3828 	.attrs = hstate_demote_attrs,
3829 };
3830 
3831 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
3832 				    struct kobject **hstate_kobjs,
3833 				    const struct attribute_group *hstate_attr_group)
3834 {
3835 	int retval;
3836 	int hi = hstate_index(h);
3837 
3838 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
3839 	if (!hstate_kobjs[hi])
3840 		return -ENOMEM;
3841 
3842 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3843 	if (retval) {
3844 		kobject_put(hstate_kobjs[hi]);
3845 		hstate_kobjs[hi] = NULL;
3846 	}
3847 
3848 	if (h->demote_order) {
3849 		if (sysfs_create_group(hstate_kobjs[hi],
3850 					&hstate_demote_attr_group))
3851 			pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
3852 	}
3853 
3854 	return retval;
3855 }
3856 
3857 static void __init hugetlb_sysfs_init(void)
3858 {
3859 	struct hstate *h;
3860 	int err;
3861 
3862 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
3863 	if (!hugepages_kobj)
3864 		return;
3865 
3866 	for_each_hstate(h) {
3867 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
3868 					 hstate_kobjs, &hstate_attr_group);
3869 		if (err)
3870 			pr_err("HugeTLB: Unable to add hstate %s", h->name);
3871 	}
3872 }
3873 
3874 #ifdef CONFIG_NUMA
3875 
3876 /*
3877  * node_hstate/s - associate per node hstate attributes, via their kobjects,
3878  * with node devices in node_devices[] using a parallel array.  The array
3879  * index of a node device or _hstate == node id.
3880  * This is here to avoid any static dependency of the node device driver, in
3881  * the base kernel, on the hugetlb module.
3882  */
3883 struct node_hstate {
3884 	struct kobject		*hugepages_kobj;
3885 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
3886 };
3887 static struct node_hstate node_hstates[MAX_NUMNODES];
3888 
3889 /*
3890  * A subset of global hstate attributes for node devices
3891  */
3892 static struct attribute *per_node_hstate_attrs[] = {
3893 	&nr_hugepages_attr.attr,
3894 	&free_hugepages_attr.attr,
3895 	&surplus_hugepages_attr.attr,
3896 	NULL,
3897 };
3898 
3899 static const struct attribute_group per_node_hstate_attr_group = {
3900 	.attrs = per_node_hstate_attrs,
3901 };
3902 
3903 /*
3904  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3905  * Returns node id via non-NULL nidp.
3906  */
3907 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3908 {
3909 	int nid;
3910 
3911 	for (nid = 0; nid < nr_node_ids; nid++) {
3912 		struct node_hstate *nhs = &node_hstates[nid];
3913 		int i;
3914 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
3915 			if (nhs->hstate_kobjs[i] == kobj) {
3916 				if (nidp)
3917 					*nidp = nid;
3918 				return &hstates[i];
3919 			}
3920 	}
3921 
3922 	BUG();
3923 	return NULL;
3924 }
3925 
3926 /*
3927  * Unregister hstate attributes from a single node device.
3928  * No-op if no hstate attributes attached.
3929  */
3930 static void hugetlb_unregister_node(struct node *node)
3931 {
3932 	struct hstate *h;
3933 	struct node_hstate *nhs = &node_hstates[node->dev.id];
3934 
3935 	if (!nhs->hugepages_kobj)
3936 		return;		/* no hstate attributes */
3937 
3938 	for_each_hstate(h) {
3939 		int idx = hstate_index(h);
3940 		if (nhs->hstate_kobjs[idx]) {
3941 			kobject_put(nhs->hstate_kobjs[idx]);
3942 			nhs->hstate_kobjs[idx] = NULL;
3943 		}
3944 	}
3945 
3946 	kobject_put(nhs->hugepages_kobj);
3947 	nhs->hugepages_kobj = NULL;
3948 }
3949 
3950 
3951 /*
3952  * Register hstate attributes for a single node device.
3953  * No-op if attributes already registered.
3954  */
3955 static void hugetlb_register_node(struct node *node)
3956 {
3957 	struct hstate *h;
3958 	struct node_hstate *nhs = &node_hstates[node->dev.id];
3959 	int err;
3960 
3961 	if (nhs->hugepages_kobj)
3962 		return;		/* already allocated */
3963 
3964 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
3965 							&node->dev.kobj);
3966 	if (!nhs->hugepages_kobj)
3967 		return;
3968 
3969 	for_each_hstate(h) {
3970 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3971 						nhs->hstate_kobjs,
3972 						&per_node_hstate_attr_group);
3973 		if (err) {
3974 			pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3975 				h->name, node->dev.id);
3976 			hugetlb_unregister_node(node);
3977 			break;
3978 		}
3979 	}
3980 }
3981 
3982 /*
3983  * hugetlb init time:  register hstate attributes for all registered node
3984  * devices of nodes that have memory.  All on-line nodes should have
3985  * registered their associated device by this time.
3986  */
3987 static void __init hugetlb_register_all_nodes(void)
3988 {
3989 	int nid;
3990 
3991 	for_each_node_state(nid, N_MEMORY) {
3992 		struct node *node = node_devices[nid];
3993 		if (node->dev.id == nid)
3994 			hugetlb_register_node(node);
3995 	}
3996 
3997 	/*
3998 	 * Let the node device driver know we're here so it can
3999 	 * [un]register hstate attributes on node hotplug.
4000 	 */
4001 	register_hugetlbfs_with_node(hugetlb_register_node,
4002 				     hugetlb_unregister_node);
4003 }
4004 #else	/* !CONFIG_NUMA */
4005 
4006 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4007 {
4008 	BUG();
4009 	if (nidp)
4010 		*nidp = -1;
4011 	return NULL;
4012 }
4013 
4014 static void hugetlb_register_all_nodes(void) { }
4015 
4016 #endif
4017 
4018 static int __init hugetlb_init(void)
4019 {
4020 	int i;
4021 
4022 	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4023 			__NR_HPAGEFLAGS);
4024 
4025 	if (!hugepages_supported()) {
4026 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4027 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4028 		return 0;
4029 	}
4030 
4031 	/*
4032 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4033 	 * architectures depend on setup being done here.
4034 	 */
4035 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4036 	if (!parsed_default_hugepagesz) {
4037 		/*
4038 		 * If we did not parse a default huge page size, set
4039 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4040 		 * number of huge pages for this default size was implicitly
4041 		 * specified, set that here as well.
4042 		 * Note that the implicit setting will overwrite an explicit
4043 		 * setting.  A warning will be printed in this case.
4044 		 */
4045 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4046 		if (default_hstate_max_huge_pages) {
4047 			if (default_hstate.max_huge_pages) {
4048 				char buf[32];
4049 
4050 				string_get_size(huge_page_size(&default_hstate),
4051 					1, STRING_UNITS_2, buf, 32);
4052 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4053 					default_hstate.max_huge_pages, buf);
4054 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4055 					default_hstate_max_huge_pages);
4056 			}
4057 			default_hstate.max_huge_pages =
4058 				default_hstate_max_huge_pages;
4059 
4060 			for_each_online_node(i)
4061 				default_hstate.max_huge_pages_node[i] =
4062 					default_hugepages_in_node[i];
4063 		}
4064 	}
4065 
4066 	hugetlb_cma_check();
4067 	hugetlb_init_hstates();
4068 	gather_bootmem_prealloc();
4069 	report_hugepages();
4070 
4071 	hugetlb_sysfs_init();
4072 	hugetlb_register_all_nodes();
4073 	hugetlb_cgroup_file_init();
4074 
4075 #ifdef CONFIG_SMP
4076 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4077 #else
4078 	num_fault_mutexes = 1;
4079 #endif
4080 	hugetlb_fault_mutex_table =
4081 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4082 			      GFP_KERNEL);
4083 	BUG_ON(!hugetlb_fault_mutex_table);
4084 
4085 	for (i = 0; i < num_fault_mutexes; i++)
4086 		mutex_init(&hugetlb_fault_mutex_table[i]);
4087 	return 0;
4088 }
4089 subsys_initcall(hugetlb_init);
4090 
4091 /* Overwritten by architectures with more huge page sizes */
4092 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4093 {
4094 	return size == HPAGE_SIZE;
4095 }
4096 
4097 void __init hugetlb_add_hstate(unsigned int order)
4098 {
4099 	struct hstate *h;
4100 	unsigned long i;
4101 
4102 	if (size_to_hstate(PAGE_SIZE << order)) {
4103 		return;
4104 	}
4105 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4106 	BUG_ON(order == 0);
4107 	h = &hstates[hugetlb_max_hstate++];
4108 	mutex_init(&h->resize_lock);
4109 	h->order = order;
4110 	h->mask = ~(huge_page_size(h) - 1);
4111 	for (i = 0; i < MAX_NUMNODES; ++i)
4112 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4113 	INIT_LIST_HEAD(&h->hugepage_activelist);
4114 	h->next_nid_to_alloc = first_memory_node;
4115 	h->next_nid_to_free = first_memory_node;
4116 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4117 					huge_page_size(h)/1024);
4118 	hugetlb_vmemmap_init(h);
4119 
4120 	parsed_hstate = h;
4121 }
4122 
4123 bool __init __weak hugetlb_node_alloc_supported(void)
4124 {
4125 	return true;
4126 }
4127 
4128 static void __init hugepages_clear_pages_in_node(void)
4129 {
4130 	if (!hugetlb_max_hstate) {
4131 		default_hstate_max_huge_pages = 0;
4132 		memset(default_hugepages_in_node, 0,
4133 			MAX_NUMNODES * sizeof(unsigned int));
4134 	} else {
4135 		parsed_hstate->max_huge_pages = 0;
4136 		memset(parsed_hstate->max_huge_pages_node, 0,
4137 			MAX_NUMNODES * sizeof(unsigned int));
4138 	}
4139 }
4140 
4141 /*
4142  * hugepages command line processing
4143  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4144  * specification.  If not, ignore the hugepages value.  hugepages can also
4145  * be the first huge page command line  option in which case it implicitly
4146  * specifies the number of huge pages for the default size.
4147  */
4148 static int __init hugepages_setup(char *s)
4149 {
4150 	unsigned long *mhp;
4151 	static unsigned long *last_mhp;
4152 	int node = NUMA_NO_NODE;
4153 	int count;
4154 	unsigned long tmp;
4155 	char *p = s;
4156 
4157 	if (!parsed_valid_hugepagesz) {
4158 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4159 		parsed_valid_hugepagesz = true;
4160 		return 1;
4161 	}
4162 
4163 	/*
4164 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4165 	 * yet, so this hugepages= parameter goes to the "default hstate".
4166 	 * Otherwise, it goes with the previously parsed hugepagesz or
4167 	 * default_hugepagesz.
4168 	 */
4169 	else if (!hugetlb_max_hstate)
4170 		mhp = &default_hstate_max_huge_pages;
4171 	else
4172 		mhp = &parsed_hstate->max_huge_pages;
4173 
4174 	if (mhp == last_mhp) {
4175 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4176 		return 1;
4177 	}
4178 
4179 	while (*p) {
4180 		count = 0;
4181 		if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4182 			goto invalid;
4183 		/* Parameter is node format */
4184 		if (p[count] == ':') {
4185 			if (!hugetlb_node_alloc_supported()) {
4186 				pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4187 				return 1;
4188 			}
4189 			if (tmp >= MAX_NUMNODES || !node_online(tmp))
4190 				goto invalid;
4191 			node = array_index_nospec(tmp, MAX_NUMNODES);
4192 			p += count + 1;
4193 			/* Parse hugepages */
4194 			if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4195 				goto invalid;
4196 			if (!hugetlb_max_hstate)
4197 				default_hugepages_in_node[node] = tmp;
4198 			else
4199 				parsed_hstate->max_huge_pages_node[node] = tmp;
4200 			*mhp += tmp;
4201 			/* Go to parse next node*/
4202 			if (p[count] == ',')
4203 				p += count + 1;
4204 			else
4205 				break;
4206 		} else {
4207 			if (p != s)
4208 				goto invalid;
4209 			*mhp = tmp;
4210 			break;
4211 		}
4212 	}
4213 
4214 	/*
4215 	 * Global state is always initialized later in hugetlb_init.
4216 	 * But we need to allocate gigantic hstates here early to still
4217 	 * use the bootmem allocator.
4218 	 */
4219 	if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
4220 		hugetlb_hstate_alloc_pages(parsed_hstate);
4221 
4222 	last_mhp = mhp;
4223 
4224 	return 1;
4225 
4226 invalid:
4227 	pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4228 	hugepages_clear_pages_in_node();
4229 	return 1;
4230 }
4231 __setup("hugepages=", hugepages_setup);
4232 
4233 /*
4234  * hugepagesz command line processing
4235  * A specific huge page size can only be specified once with hugepagesz.
4236  * hugepagesz is followed by hugepages on the command line.  The global
4237  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4238  * hugepagesz argument was valid.
4239  */
4240 static int __init hugepagesz_setup(char *s)
4241 {
4242 	unsigned long size;
4243 	struct hstate *h;
4244 
4245 	parsed_valid_hugepagesz = false;
4246 	size = (unsigned long)memparse(s, NULL);
4247 
4248 	if (!arch_hugetlb_valid_size(size)) {
4249 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4250 		return 1;
4251 	}
4252 
4253 	h = size_to_hstate(size);
4254 	if (h) {
4255 		/*
4256 		 * hstate for this size already exists.  This is normally
4257 		 * an error, but is allowed if the existing hstate is the
4258 		 * default hstate.  More specifically, it is only allowed if
4259 		 * the number of huge pages for the default hstate was not
4260 		 * previously specified.
4261 		 */
4262 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4263 		    default_hstate.max_huge_pages) {
4264 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4265 			return 1;
4266 		}
4267 
4268 		/*
4269 		 * No need to call hugetlb_add_hstate() as hstate already
4270 		 * exists.  But, do set parsed_hstate so that a following
4271 		 * hugepages= parameter will be applied to this hstate.
4272 		 */
4273 		parsed_hstate = h;
4274 		parsed_valid_hugepagesz = true;
4275 		return 1;
4276 	}
4277 
4278 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4279 	parsed_valid_hugepagesz = true;
4280 	return 1;
4281 }
4282 __setup("hugepagesz=", hugepagesz_setup);
4283 
4284 /*
4285  * default_hugepagesz command line input
4286  * Only one instance of default_hugepagesz allowed on command line.
4287  */
4288 static int __init default_hugepagesz_setup(char *s)
4289 {
4290 	unsigned long size;
4291 	int i;
4292 
4293 	parsed_valid_hugepagesz = false;
4294 	if (parsed_default_hugepagesz) {
4295 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4296 		return 1;
4297 	}
4298 
4299 	size = (unsigned long)memparse(s, NULL);
4300 
4301 	if (!arch_hugetlb_valid_size(size)) {
4302 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4303 		return 1;
4304 	}
4305 
4306 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4307 	parsed_valid_hugepagesz = true;
4308 	parsed_default_hugepagesz = true;
4309 	default_hstate_idx = hstate_index(size_to_hstate(size));
4310 
4311 	/*
4312 	 * The number of default huge pages (for this size) could have been
4313 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
4314 	 * then default_hstate_max_huge_pages is set.  If the default huge
4315 	 * page size is gigantic (>= MAX_ORDER), then the pages must be
4316 	 * allocated here from bootmem allocator.
4317 	 */
4318 	if (default_hstate_max_huge_pages) {
4319 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4320 		for_each_online_node(i)
4321 			default_hstate.max_huge_pages_node[i] =
4322 				default_hugepages_in_node[i];
4323 		if (hstate_is_gigantic(&default_hstate))
4324 			hugetlb_hstate_alloc_pages(&default_hstate);
4325 		default_hstate_max_huge_pages = 0;
4326 	}
4327 
4328 	return 1;
4329 }
4330 __setup("default_hugepagesz=", default_hugepagesz_setup);
4331 
4332 static unsigned int allowed_mems_nr(struct hstate *h)
4333 {
4334 	int node;
4335 	unsigned int nr = 0;
4336 	nodemask_t *mpol_allowed;
4337 	unsigned int *array = h->free_huge_pages_node;
4338 	gfp_t gfp_mask = htlb_alloc_mask(h);
4339 
4340 	mpol_allowed = policy_nodemask_current(gfp_mask);
4341 
4342 	for_each_node_mask(node, cpuset_current_mems_allowed) {
4343 		if (!mpol_allowed || node_isset(node, *mpol_allowed))
4344 			nr += array[node];
4345 	}
4346 
4347 	return nr;
4348 }
4349 
4350 #ifdef CONFIG_SYSCTL
4351 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
4352 					  void *buffer, size_t *length,
4353 					  loff_t *ppos, unsigned long *out)
4354 {
4355 	struct ctl_table dup_table;
4356 
4357 	/*
4358 	 * In order to avoid races with __do_proc_doulongvec_minmax(), we
4359 	 * can duplicate the @table and alter the duplicate of it.
4360 	 */
4361 	dup_table = *table;
4362 	dup_table.data = out;
4363 
4364 	return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
4365 }
4366 
4367 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
4368 			 struct ctl_table *table, int write,
4369 			 void *buffer, size_t *length, loff_t *ppos)
4370 {
4371 	struct hstate *h = &default_hstate;
4372 	unsigned long tmp = h->max_huge_pages;
4373 	int ret;
4374 
4375 	if (!hugepages_supported())
4376 		return -EOPNOTSUPP;
4377 
4378 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4379 					     &tmp);
4380 	if (ret)
4381 		goto out;
4382 
4383 	if (write)
4384 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
4385 						  NUMA_NO_NODE, tmp, *length);
4386 out:
4387 	return ret;
4388 }
4389 
4390 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
4391 			  void *buffer, size_t *length, loff_t *ppos)
4392 {
4393 
4394 	return hugetlb_sysctl_handler_common(false, table, write,
4395 							buffer, length, ppos);
4396 }
4397 
4398 #ifdef CONFIG_NUMA
4399 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
4400 			  void *buffer, size_t *length, loff_t *ppos)
4401 {
4402 	return hugetlb_sysctl_handler_common(true, table, write,
4403 							buffer, length, ppos);
4404 }
4405 #endif /* CONFIG_NUMA */
4406 
4407 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
4408 		void *buffer, size_t *length, loff_t *ppos)
4409 {
4410 	struct hstate *h = &default_hstate;
4411 	unsigned long tmp;
4412 	int ret;
4413 
4414 	if (!hugepages_supported())
4415 		return -EOPNOTSUPP;
4416 
4417 	tmp = h->nr_overcommit_huge_pages;
4418 
4419 	if (write && hstate_is_gigantic(h))
4420 		return -EINVAL;
4421 
4422 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4423 					     &tmp);
4424 	if (ret)
4425 		goto out;
4426 
4427 	if (write) {
4428 		spin_lock_irq(&hugetlb_lock);
4429 		h->nr_overcommit_huge_pages = tmp;
4430 		spin_unlock_irq(&hugetlb_lock);
4431 	}
4432 out:
4433 	return ret;
4434 }
4435 
4436 #endif /* CONFIG_SYSCTL */
4437 
4438 void hugetlb_report_meminfo(struct seq_file *m)
4439 {
4440 	struct hstate *h;
4441 	unsigned long total = 0;
4442 
4443 	if (!hugepages_supported())
4444 		return;
4445 
4446 	for_each_hstate(h) {
4447 		unsigned long count = h->nr_huge_pages;
4448 
4449 		total += huge_page_size(h) * count;
4450 
4451 		if (h == &default_hstate)
4452 			seq_printf(m,
4453 				   "HugePages_Total:   %5lu\n"
4454 				   "HugePages_Free:    %5lu\n"
4455 				   "HugePages_Rsvd:    %5lu\n"
4456 				   "HugePages_Surp:    %5lu\n"
4457 				   "Hugepagesize:   %8lu kB\n",
4458 				   count,
4459 				   h->free_huge_pages,
4460 				   h->resv_huge_pages,
4461 				   h->surplus_huge_pages,
4462 				   huge_page_size(h) / SZ_1K);
4463 	}
4464 
4465 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
4466 }
4467 
4468 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
4469 {
4470 	struct hstate *h = &default_hstate;
4471 
4472 	if (!hugepages_supported())
4473 		return 0;
4474 
4475 	return sysfs_emit_at(buf, len,
4476 			     "Node %d HugePages_Total: %5u\n"
4477 			     "Node %d HugePages_Free:  %5u\n"
4478 			     "Node %d HugePages_Surp:  %5u\n",
4479 			     nid, h->nr_huge_pages_node[nid],
4480 			     nid, h->free_huge_pages_node[nid],
4481 			     nid, h->surplus_huge_pages_node[nid]);
4482 }
4483 
4484 void hugetlb_show_meminfo(void)
4485 {
4486 	struct hstate *h;
4487 	int nid;
4488 
4489 	if (!hugepages_supported())
4490 		return;
4491 
4492 	for_each_node_state(nid, N_MEMORY)
4493 		for_each_hstate(h)
4494 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4495 				nid,
4496 				h->nr_huge_pages_node[nid],
4497 				h->free_huge_pages_node[nid],
4498 				h->surplus_huge_pages_node[nid],
4499 				huge_page_size(h) / SZ_1K);
4500 }
4501 
4502 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
4503 {
4504 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
4505 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
4506 }
4507 
4508 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
4509 unsigned long hugetlb_total_pages(void)
4510 {
4511 	struct hstate *h;
4512 	unsigned long nr_total_pages = 0;
4513 
4514 	for_each_hstate(h)
4515 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4516 	return nr_total_pages;
4517 }
4518 
4519 static int hugetlb_acct_memory(struct hstate *h, long delta)
4520 {
4521 	int ret = -ENOMEM;
4522 
4523 	if (!delta)
4524 		return 0;
4525 
4526 	spin_lock_irq(&hugetlb_lock);
4527 	/*
4528 	 * When cpuset is configured, it breaks the strict hugetlb page
4529 	 * reservation as the accounting is done on a global variable. Such
4530 	 * reservation is completely rubbish in the presence of cpuset because
4531 	 * the reservation is not checked against page availability for the
4532 	 * current cpuset. Application can still potentially OOM'ed by kernel
4533 	 * with lack of free htlb page in cpuset that the task is in.
4534 	 * Attempt to enforce strict accounting with cpuset is almost
4535 	 * impossible (or too ugly) because cpuset is too fluid that
4536 	 * task or memory node can be dynamically moved between cpusets.
4537 	 *
4538 	 * The change of semantics for shared hugetlb mapping with cpuset is
4539 	 * undesirable. However, in order to preserve some of the semantics,
4540 	 * we fall back to check against current free page availability as
4541 	 * a best attempt and hopefully to minimize the impact of changing
4542 	 * semantics that cpuset has.
4543 	 *
4544 	 * Apart from cpuset, we also have memory policy mechanism that
4545 	 * also determines from which node the kernel will allocate memory
4546 	 * in a NUMA system. So similar to cpuset, we also should consider
4547 	 * the memory policy of the current task. Similar to the description
4548 	 * above.
4549 	 */
4550 	if (delta > 0) {
4551 		if (gather_surplus_pages(h, delta) < 0)
4552 			goto out;
4553 
4554 		if (delta > allowed_mems_nr(h)) {
4555 			return_unused_surplus_pages(h, delta);
4556 			goto out;
4557 		}
4558 	}
4559 
4560 	ret = 0;
4561 	if (delta < 0)
4562 		return_unused_surplus_pages(h, (unsigned long) -delta);
4563 
4564 out:
4565 	spin_unlock_irq(&hugetlb_lock);
4566 	return ret;
4567 }
4568 
4569 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
4570 {
4571 	struct resv_map *resv = vma_resv_map(vma);
4572 
4573 	/*
4574 	 * This new VMA should share its siblings reservation map if present.
4575 	 * The VMA will only ever have a valid reservation map pointer where
4576 	 * it is being copied for another still existing VMA.  As that VMA
4577 	 * has a reference to the reservation map it cannot disappear until
4578 	 * after this open call completes.  It is therefore safe to take a
4579 	 * new reference here without additional locking.
4580 	 */
4581 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4582 		resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4583 		kref_get(&resv->refs);
4584 	}
4585 }
4586 
4587 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4588 {
4589 	struct hstate *h = hstate_vma(vma);
4590 	struct resv_map *resv = vma_resv_map(vma);
4591 	struct hugepage_subpool *spool = subpool_vma(vma);
4592 	unsigned long reserve, start, end;
4593 	long gbl_reserve;
4594 
4595 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4596 		return;
4597 
4598 	start = vma_hugecache_offset(h, vma, vma->vm_start);
4599 	end = vma_hugecache_offset(h, vma, vma->vm_end);
4600 
4601 	reserve = (end - start) - region_count(resv, start, end);
4602 	hugetlb_cgroup_uncharge_counter(resv, start, end);
4603 	if (reserve) {
4604 		/*
4605 		 * Decrement reserve counts.  The global reserve count may be
4606 		 * adjusted if the subpool has a minimum size.
4607 		 */
4608 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
4609 		hugetlb_acct_memory(h, -gbl_reserve);
4610 	}
4611 
4612 	kref_put(&resv->refs, resv_map_release);
4613 }
4614 
4615 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4616 {
4617 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
4618 		return -EINVAL;
4619 	return 0;
4620 }
4621 
4622 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
4623 {
4624 	return huge_page_size(hstate_vma(vma));
4625 }
4626 
4627 /*
4628  * We cannot handle pagefaults against hugetlb pages at all.  They cause
4629  * handle_mm_fault() to try to instantiate regular-sized pages in the
4630  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
4631  * this far.
4632  */
4633 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
4634 {
4635 	BUG();
4636 	return 0;
4637 }
4638 
4639 /*
4640  * When a new function is introduced to vm_operations_struct and added
4641  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4642  * This is because under System V memory model, mappings created via
4643  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4644  * their original vm_ops are overwritten with shm_vm_ops.
4645  */
4646 const struct vm_operations_struct hugetlb_vm_ops = {
4647 	.fault = hugetlb_vm_op_fault,
4648 	.open = hugetlb_vm_op_open,
4649 	.close = hugetlb_vm_op_close,
4650 	.may_split = hugetlb_vm_op_split,
4651 	.pagesize = hugetlb_vm_op_pagesize,
4652 };
4653 
4654 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
4655 				int writable)
4656 {
4657 	pte_t entry;
4658 	unsigned int shift = huge_page_shift(hstate_vma(vma));
4659 
4660 	if (writable) {
4661 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
4662 					 vma->vm_page_prot)));
4663 	} else {
4664 		entry = huge_pte_wrprotect(mk_huge_pte(page,
4665 					   vma->vm_page_prot));
4666 	}
4667 	entry = pte_mkyoung(entry);
4668 	entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
4669 
4670 	return entry;
4671 }
4672 
4673 static void set_huge_ptep_writable(struct vm_area_struct *vma,
4674 				   unsigned long address, pte_t *ptep)
4675 {
4676 	pte_t entry;
4677 
4678 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
4679 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4680 		update_mmu_cache(vma, address, ptep);
4681 }
4682 
4683 bool is_hugetlb_entry_migration(pte_t pte)
4684 {
4685 	swp_entry_t swp;
4686 
4687 	if (huge_pte_none(pte) || pte_present(pte))
4688 		return false;
4689 	swp = pte_to_swp_entry(pte);
4690 	if (is_migration_entry(swp))
4691 		return true;
4692 	else
4693 		return false;
4694 }
4695 
4696 static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
4697 {
4698 	swp_entry_t swp;
4699 
4700 	if (huge_pte_none(pte) || pte_present(pte))
4701 		return false;
4702 	swp = pte_to_swp_entry(pte);
4703 	if (is_hwpoison_entry(swp))
4704 		return true;
4705 	else
4706 		return false;
4707 }
4708 
4709 static void
4710 hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
4711 		     struct page *new_page)
4712 {
4713 	__SetPageUptodate(new_page);
4714 	hugepage_add_new_anon_rmap(new_page, vma, addr);
4715 	set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
4716 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
4717 	ClearHPageRestoreReserve(new_page);
4718 	SetHPageMigratable(new_page);
4719 }
4720 
4721 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4722 			    struct vm_area_struct *dst_vma,
4723 			    struct vm_area_struct *src_vma)
4724 {
4725 	pte_t *src_pte, *dst_pte, entry, dst_entry;
4726 	struct page *ptepage;
4727 	unsigned long addr;
4728 	bool cow = is_cow_mapping(src_vma->vm_flags);
4729 	struct hstate *h = hstate_vma(src_vma);
4730 	unsigned long sz = huge_page_size(h);
4731 	unsigned long npages = pages_per_huge_page(h);
4732 	struct address_space *mapping = src_vma->vm_file->f_mapping;
4733 	struct mmu_notifier_range range;
4734 	int ret = 0;
4735 
4736 	if (cow) {
4737 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_vma, src,
4738 					src_vma->vm_start,
4739 					src_vma->vm_end);
4740 		mmu_notifier_invalidate_range_start(&range);
4741 		mmap_assert_write_locked(src);
4742 		raw_write_seqcount_begin(&src->write_protect_seq);
4743 	} else {
4744 		/*
4745 		 * For shared mappings i_mmap_rwsem must be held to call
4746 		 * huge_pte_alloc, otherwise the returned ptep could go
4747 		 * away if part of a shared pmd and another thread calls
4748 		 * huge_pmd_unshare.
4749 		 */
4750 		i_mmap_lock_read(mapping);
4751 	}
4752 
4753 	for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
4754 		spinlock_t *src_ptl, *dst_ptl;
4755 		src_pte = huge_pte_offset(src, addr, sz);
4756 		if (!src_pte)
4757 			continue;
4758 		dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
4759 		if (!dst_pte) {
4760 			ret = -ENOMEM;
4761 			break;
4762 		}
4763 
4764 		/*
4765 		 * If the pagetables are shared don't copy or take references.
4766 		 * dst_pte == src_pte is the common case of src/dest sharing.
4767 		 *
4768 		 * However, src could have 'unshared' and dst shares with
4769 		 * another vma.  If dst_pte !none, this implies sharing.
4770 		 * Check here before taking page table lock, and once again
4771 		 * after taking the lock below.
4772 		 */
4773 		dst_entry = huge_ptep_get(dst_pte);
4774 		if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
4775 			continue;
4776 
4777 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
4778 		src_ptl = huge_pte_lockptr(h, src, src_pte);
4779 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4780 		entry = huge_ptep_get(src_pte);
4781 		dst_entry = huge_ptep_get(dst_pte);
4782 again:
4783 		if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
4784 			/*
4785 			 * Skip if src entry none.  Also, skip in the
4786 			 * unlikely case dst entry !none as this implies
4787 			 * sharing with another vma.
4788 			 */
4789 			;
4790 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
4791 				    is_hugetlb_entry_hwpoisoned(entry))) {
4792 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
4793 			bool uffd_wp = huge_pte_uffd_wp(entry);
4794 
4795 			if (!is_readable_migration_entry(swp_entry) && cow) {
4796 				/*
4797 				 * COW mappings require pages in both
4798 				 * parent and child to be set to read.
4799 				 */
4800 				swp_entry = make_readable_migration_entry(
4801 							swp_offset(swp_entry));
4802 				entry = swp_entry_to_pte(swp_entry);
4803 				if (userfaultfd_wp(src_vma) && uffd_wp)
4804 					entry = huge_pte_mkuffd_wp(entry);
4805 				set_huge_swap_pte_at(src, addr, src_pte,
4806 						     entry, sz);
4807 			}
4808 			if (!userfaultfd_wp(dst_vma) && uffd_wp)
4809 				entry = huge_pte_clear_uffd_wp(entry);
4810 			set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
4811 		} else if (unlikely(is_pte_marker(entry))) {
4812 			/*
4813 			 * We copy the pte marker only if the dst vma has
4814 			 * uffd-wp enabled.
4815 			 */
4816 			if (userfaultfd_wp(dst_vma))
4817 				set_huge_pte_at(dst, addr, dst_pte, entry);
4818 		} else {
4819 			entry = huge_ptep_get(src_pte);
4820 			ptepage = pte_page(entry);
4821 			get_page(ptepage);
4822 
4823 			/*
4824 			 * Failing to duplicate the anon rmap is a rare case
4825 			 * where we see pinned hugetlb pages while they're
4826 			 * prone to COW. We need to do the COW earlier during
4827 			 * fork.
4828 			 *
4829 			 * When pre-allocating the page or copying data, we
4830 			 * need to be without the pgtable locks since we could
4831 			 * sleep during the process.
4832 			 */
4833 			if (!PageAnon(ptepage)) {
4834 				page_dup_file_rmap(ptepage, true);
4835 			} else if (page_try_dup_anon_rmap(ptepage, true,
4836 							  src_vma)) {
4837 				pte_t src_pte_old = entry;
4838 				struct page *new;
4839 
4840 				spin_unlock(src_ptl);
4841 				spin_unlock(dst_ptl);
4842 				/* Do not use reserve as it's private owned */
4843 				new = alloc_huge_page(dst_vma, addr, 1);
4844 				if (IS_ERR(new)) {
4845 					put_page(ptepage);
4846 					ret = PTR_ERR(new);
4847 					break;
4848 				}
4849 				copy_user_huge_page(new, ptepage, addr, dst_vma,
4850 						    npages);
4851 				put_page(ptepage);
4852 
4853 				/* Install the new huge page if src pte stable */
4854 				dst_ptl = huge_pte_lock(h, dst, dst_pte);
4855 				src_ptl = huge_pte_lockptr(h, src, src_pte);
4856 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4857 				entry = huge_ptep_get(src_pte);
4858 				if (!pte_same(src_pte_old, entry)) {
4859 					restore_reserve_on_error(h, dst_vma, addr,
4860 								new);
4861 					put_page(new);
4862 					/* dst_entry won't change as in child */
4863 					goto again;
4864 				}
4865 				hugetlb_install_page(dst_vma, dst_pte, addr, new);
4866 				spin_unlock(src_ptl);
4867 				spin_unlock(dst_ptl);
4868 				continue;
4869 			}
4870 
4871 			if (cow) {
4872 				/*
4873 				 * No need to notify as we are downgrading page
4874 				 * table protection not changing it to point
4875 				 * to a new page.
4876 				 *
4877 				 * See Documentation/mm/mmu_notifier.rst
4878 				 */
4879 				huge_ptep_set_wrprotect(src, addr, src_pte);
4880 				entry = huge_pte_wrprotect(entry);
4881 			}
4882 
4883 			set_huge_pte_at(dst, addr, dst_pte, entry);
4884 			hugetlb_count_add(npages, dst);
4885 		}
4886 		spin_unlock(src_ptl);
4887 		spin_unlock(dst_ptl);
4888 	}
4889 
4890 	if (cow) {
4891 		raw_write_seqcount_end(&src->write_protect_seq);
4892 		mmu_notifier_invalidate_range_end(&range);
4893 	} else {
4894 		i_mmap_unlock_read(mapping);
4895 	}
4896 
4897 	return ret;
4898 }
4899 
4900 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
4901 			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
4902 {
4903 	struct hstate *h = hstate_vma(vma);
4904 	struct mm_struct *mm = vma->vm_mm;
4905 	spinlock_t *src_ptl, *dst_ptl;
4906 	pte_t pte;
4907 
4908 	dst_ptl = huge_pte_lock(h, mm, dst_pte);
4909 	src_ptl = huge_pte_lockptr(h, mm, src_pte);
4910 
4911 	/*
4912 	 * We don't have to worry about the ordering of src and dst ptlocks
4913 	 * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
4914 	 */
4915 	if (src_ptl != dst_ptl)
4916 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4917 
4918 	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
4919 	set_huge_pte_at(mm, new_addr, dst_pte, pte);
4920 
4921 	if (src_ptl != dst_ptl)
4922 		spin_unlock(src_ptl);
4923 	spin_unlock(dst_ptl);
4924 }
4925 
4926 int move_hugetlb_page_tables(struct vm_area_struct *vma,
4927 			     struct vm_area_struct *new_vma,
4928 			     unsigned long old_addr, unsigned long new_addr,
4929 			     unsigned long len)
4930 {
4931 	struct hstate *h = hstate_vma(vma);
4932 	struct address_space *mapping = vma->vm_file->f_mapping;
4933 	unsigned long sz = huge_page_size(h);
4934 	struct mm_struct *mm = vma->vm_mm;
4935 	unsigned long old_end = old_addr + len;
4936 	unsigned long old_addr_copy;
4937 	pte_t *src_pte, *dst_pte;
4938 	struct mmu_notifier_range range;
4939 	bool shared_pmd = false;
4940 
4941 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr,
4942 				old_end);
4943 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4944 	/*
4945 	 * In case of shared PMDs, we should cover the maximum possible
4946 	 * range.
4947 	 */
4948 	flush_cache_range(vma, range.start, range.end);
4949 
4950 	mmu_notifier_invalidate_range_start(&range);
4951 	/* Prevent race with file truncation */
4952 	i_mmap_lock_write(mapping);
4953 	for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
4954 		src_pte = huge_pte_offset(mm, old_addr, sz);
4955 		if (!src_pte)
4956 			continue;
4957 		if (huge_pte_none(huge_ptep_get(src_pte)))
4958 			continue;
4959 
4960 		/* old_addr arg to huge_pmd_unshare() is a pointer and so the
4961 		 * arg may be modified. Pass a copy instead to preserve the
4962 		 * value in old_addr.
4963 		 */
4964 		old_addr_copy = old_addr;
4965 
4966 		if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte)) {
4967 			shared_pmd = true;
4968 			continue;
4969 		}
4970 
4971 		dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
4972 		if (!dst_pte)
4973 			break;
4974 
4975 		move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
4976 	}
4977 
4978 	if (shared_pmd)
4979 		flush_tlb_range(vma, range.start, range.end);
4980 	else
4981 		flush_tlb_range(vma, old_end - len, old_end);
4982 	mmu_notifier_invalidate_range_end(&range);
4983 	i_mmap_unlock_write(mapping);
4984 
4985 	return len + old_addr - old_end;
4986 }
4987 
4988 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
4989 				   unsigned long start, unsigned long end,
4990 				   struct page *ref_page, zap_flags_t zap_flags)
4991 {
4992 	struct mm_struct *mm = vma->vm_mm;
4993 	unsigned long address;
4994 	pte_t *ptep;
4995 	pte_t pte;
4996 	spinlock_t *ptl;
4997 	struct page *page;
4998 	struct hstate *h = hstate_vma(vma);
4999 	unsigned long sz = huge_page_size(h);
5000 	struct mmu_notifier_range range;
5001 	bool force_flush = false;
5002 
5003 	WARN_ON(!is_vm_hugetlb_page(vma));
5004 	BUG_ON(start & ~huge_page_mask(h));
5005 	BUG_ON(end & ~huge_page_mask(h));
5006 
5007 	/*
5008 	 * This is a hugetlb vma, all the pte entries should point
5009 	 * to huge page.
5010 	 */
5011 	tlb_change_page_size(tlb, sz);
5012 	tlb_start_vma(tlb, vma);
5013 
5014 	/*
5015 	 * If sharing possible, alert mmu notifiers of worst case.
5016 	 */
5017 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
5018 				end);
5019 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5020 	mmu_notifier_invalidate_range_start(&range);
5021 	address = start;
5022 	for (; address < end; address += sz) {
5023 		ptep = huge_pte_offset(mm, address, sz);
5024 		if (!ptep)
5025 			continue;
5026 
5027 		ptl = huge_pte_lock(h, mm, ptep);
5028 		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
5029 			spin_unlock(ptl);
5030 			tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5031 			force_flush = true;
5032 			continue;
5033 		}
5034 
5035 		pte = huge_ptep_get(ptep);
5036 		if (huge_pte_none(pte)) {
5037 			spin_unlock(ptl);
5038 			continue;
5039 		}
5040 
5041 		/*
5042 		 * Migrating hugepage or HWPoisoned hugepage is already
5043 		 * unmapped and its refcount is dropped, so just clear pte here.
5044 		 */
5045 		if (unlikely(!pte_present(pte))) {
5046 			/*
5047 			 * If the pte was wr-protected by uffd-wp in any of the
5048 			 * swap forms, meanwhile the caller does not want to
5049 			 * drop the uffd-wp bit in this zap, then replace the
5050 			 * pte with a marker.
5051 			 */
5052 			if (pte_swp_uffd_wp_any(pte) &&
5053 			    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5054 				set_huge_pte_at(mm, address, ptep,
5055 						make_pte_marker(PTE_MARKER_UFFD_WP));
5056 			else
5057 				huge_pte_clear(mm, address, ptep, sz);
5058 			spin_unlock(ptl);
5059 			continue;
5060 		}
5061 
5062 		page = pte_page(pte);
5063 		/*
5064 		 * If a reference page is supplied, it is because a specific
5065 		 * page is being unmapped, not a range. Ensure the page we
5066 		 * are about to unmap is the actual page of interest.
5067 		 */
5068 		if (ref_page) {
5069 			if (page != ref_page) {
5070 				spin_unlock(ptl);
5071 				continue;
5072 			}
5073 			/*
5074 			 * Mark the VMA as having unmapped its page so that
5075 			 * future faults in this VMA will fail rather than
5076 			 * looking like data was lost
5077 			 */
5078 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5079 		}
5080 
5081 		pte = huge_ptep_get_and_clear(mm, address, ptep);
5082 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5083 		if (huge_pte_dirty(pte))
5084 			set_page_dirty(page);
5085 		/* Leave a uffd-wp pte marker if needed */
5086 		if (huge_pte_uffd_wp(pte) &&
5087 		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5088 			set_huge_pte_at(mm, address, ptep,
5089 					make_pte_marker(PTE_MARKER_UFFD_WP));
5090 		hugetlb_count_sub(pages_per_huge_page(h), mm);
5091 		page_remove_rmap(page, vma, true);
5092 
5093 		spin_unlock(ptl);
5094 		tlb_remove_page_size(tlb, page, huge_page_size(h));
5095 		/*
5096 		 * Bail out after unmapping reference page if supplied
5097 		 */
5098 		if (ref_page)
5099 			break;
5100 	}
5101 	mmu_notifier_invalidate_range_end(&range);
5102 	tlb_end_vma(tlb, vma);
5103 
5104 	/*
5105 	 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5106 	 * could defer the flush until now, since by holding i_mmap_rwsem we
5107 	 * guaranteed that the last refernece would not be dropped. But we must
5108 	 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5109 	 * dropped and the last reference to the shared PMDs page might be
5110 	 * dropped as well.
5111 	 *
5112 	 * In theory we could defer the freeing of the PMD pages as well, but
5113 	 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5114 	 * detect sharing, so we cannot defer the release of the page either.
5115 	 * Instead, do flush now.
5116 	 */
5117 	if (force_flush)
5118 		tlb_flush_mmu_tlbonly(tlb);
5119 }
5120 
5121 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
5122 			  struct vm_area_struct *vma, unsigned long start,
5123 			  unsigned long end, struct page *ref_page,
5124 			  zap_flags_t zap_flags)
5125 {
5126 	__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
5127 
5128 	/*
5129 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
5130 	 * test will fail on a vma being torn down, and not grab a page table
5131 	 * on its way out.  We're lucky that the flag has such an appropriate
5132 	 * name, and can in fact be safely cleared here. We could clear it
5133 	 * before the __unmap_hugepage_range above, but all that's necessary
5134 	 * is to clear it before releasing the i_mmap_rwsem. This works
5135 	 * because in the context this is called, the VMA is about to be
5136 	 * destroyed and the i_mmap_rwsem is held.
5137 	 */
5138 	vma->vm_flags &= ~VM_MAYSHARE;
5139 }
5140 
5141 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5142 			  unsigned long end, struct page *ref_page,
5143 			  zap_flags_t zap_flags)
5144 {
5145 	struct mmu_gather tlb;
5146 
5147 	tlb_gather_mmu(&tlb, vma->vm_mm);
5148 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
5149 	tlb_finish_mmu(&tlb);
5150 }
5151 
5152 /*
5153  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5154  * mapping it owns the reserve page for. The intention is to unmap the page
5155  * from other VMAs and let the children be SIGKILLed if they are faulting the
5156  * same region.
5157  */
5158 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5159 			      struct page *page, unsigned long address)
5160 {
5161 	struct hstate *h = hstate_vma(vma);
5162 	struct vm_area_struct *iter_vma;
5163 	struct address_space *mapping;
5164 	pgoff_t pgoff;
5165 
5166 	/*
5167 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5168 	 * from page cache lookup which is in HPAGE_SIZE units.
5169 	 */
5170 	address = address & huge_page_mask(h);
5171 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5172 			vma->vm_pgoff;
5173 	mapping = vma->vm_file->f_mapping;
5174 
5175 	/*
5176 	 * Take the mapping lock for the duration of the table walk. As
5177 	 * this mapping should be shared between all the VMAs,
5178 	 * __unmap_hugepage_range() is called as the lock is already held
5179 	 */
5180 	i_mmap_lock_write(mapping);
5181 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5182 		/* Do not unmap the current VMA */
5183 		if (iter_vma == vma)
5184 			continue;
5185 
5186 		/*
5187 		 * Shared VMAs have their own reserves and do not affect
5188 		 * MAP_PRIVATE accounting but it is possible that a shared
5189 		 * VMA is using the same page so check and skip such VMAs.
5190 		 */
5191 		if (iter_vma->vm_flags & VM_MAYSHARE)
5192 			continue;
5193 
5194 		/*
5195 		 * Unmap the page from other VMAs without their own reserves.
5196 		 * They get marked to be SIGKILLed if they fault in these
5197 		 * areas. This is because a future no-page fault on this VMA
5198 		 * could insert a zeroed page instead of the data existing
5199 		 * from the time of fork. This would look like data corruption
5200 		 */
5201 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5202 			unmap_hugepage_range(iter_vma, address,
5203 					     address + huge_page_size(h), page, 0);
5204 	}
5205 	i_mmap_unlock_write(mapping);
5206 }
5207 
5208 /*
5209  * hugetlb_wp() should be called with page lock of the original hugepage held.
5210  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5211  * cannot race with other handlers or page migration.
5212  * Keep the pte_same checks anyway to make transition from the mutex easier.
5213  */
5214 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5215 		       unsigned long address, pte_t *ptep, unsigned int flags,
5216 		       struct page *pagecache_page, spinlock_t *ptl)
5217 {
5218 	const bool unshare = flags & FAULT_FLAG_UNSHARE;
5219 	pte_t pte;
5220 	struct hstate *h = hstate_vma(vma);
5221 	struct page *old_page, *new_page;
5222 	int outside_reserve = 0;
5223 	vm_fault_t ret = 0;
5224 	unsigned long haddr = address & huge_page_mask(h);
5225 	struct mmu_notifier_range range;
5226 
5227 	VM_BUG_ON(unshare && (flags & FOLL_WRITE));
5228 	VM_BUG_ON(!unshare && !(flags & FOLL_WRITE));
5229 
5230 	pte = huge_ptep_get(ptep);
5231 	old_page = pte_page(pte);
5232 
5233 	delayacct_wpcopy_start();
5234 
5235 retry_avoidcopy:
5236 	/*
5237 	 * If no-one else is actually using this page, we're the exclusive
5238 	 * owner and can reuse this page.
5239 	 */
5240 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
5241 		if (!PageAnonExclusive(old_page))
5242 			page_move_anon_rmap(old_page, vma);
5243 		if (likely(!unshare))
5244 			set_huge_ptep_writable(vma, haddr, ptep);
5245 
5246 		delayacct_wpcopy_end();
5247 		return 0;
5248 	}
5249 	VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page),
5250 		       old_page);
5251 
5252 	/*
5253 	 * If the process that created a MAP_PRIVATE mapping is about to
5254 	 * perform a COW due to a shared page count, attempt to satisfy
5255 	 * the allocation without using the existing reserves. The pagecache
5256 	 * page is used to determine if the reserve at this address was
5257 	 * consumed or not. If reserves were used, a partial faulted mapping
5258 	 * at the time of fork() could consume its reserves on COW instead
5259 	 * of the full address range.
5260 	 */
5261 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5262 			old_page != pagecache_page)
5263 		outside_reserve = 1;
5264 
5265 	get_page(old_page);
5266 
5267 	/*
5268 	 * Drop page table lock as buddy allocator may be called. It will
5269 	 * be acquired again before returning to the caller, as expected.
5270 	 */
5271 	spin_unlock(ptl);
5272 	new_page = alloc_huge_page(vma, haddr, outside_reserve);
5273 
5274 	if (IS_ERR(new_page)) {
5275 		/*
5276 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
5277 		 * it is due to references held by a child and an insufficient
5278 		 * huge page pool. To guarantee the original mappers
5279 		 * reliability, unmap the page from child processes. The child
5280 		 * may get SIGKILLed if it later faults.
5281 		 */
5282 		if (outside_reserve) {
5283 			struct address_space *mapping = vma->vm_file->f_mapping;
5284 			pgoff_t idx;
5285 			u32 hash;
5286 
5287 			put_page(old_page);
5288 			BUG_ON(huge_pte_none(pte));
5289 			/*
5290 			 * Drop hugetlb_fault_mutex and i_mmap_rwsem before
5291 			 * unmapping.  unmapping needs to hold i_mmap_rwsem
5292 			 * in write mode.  Dropping i_mmap_rwsem in read mode
5293 			 * here is OK as COW mappings do not interact with
5294 			 * PMD sharing.
5295 			 *
5296 			 * Reacquire both after unmap operation.
5297 			 */
5298 			idx = vma_hugecache_offset(h, vma, haddr);
5299 			hash = hugetlb_fault_mutex_hash(mapping, idx);
5300 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5301 			i_mmap_unlock_read(mapping);
5302 
5303 			unmap_ref_private(mm, vma, old_page, haddr);
5304 
5305 			i_mmap_lock_read(mapping);
5306 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
5307 			spin_lock(ptl);
5308 			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5309 			if (likely(ptep &&
5310 				   pte_same(huge_ptep_get(ptep), pte)))
5311 				goto retry_avoidcopy;
5312 			/*
5313 			 * race occurs while re-acquiring page table
5314 			 * lock, and our job is done.
5315 			 */
5316 			delayacct_wpcopy_end();
5317 			return 0;
5318 		}
5319 
5320 		ret = vmf_error(PTR_ERR(new_page));
5321 		goto out_release_old;
5322 	}
5323 
5324 	/*
5325 	 * When the original hugepage is shared one, it does not have
5326 	 * anon_vma prepared.
5327 	 */
5328 	if (unlikely(anon_vma_prepare(vma))) {
5329 		ret = VM_FAULT_OOM;
5330 		goto out_release_all;
5331 	}
5332 
5333 	copy_user_huge_page(new_page, old_page, address, vma,
5334 			    pages_per_huge_page(h));
5335 	__SetPageUptodate(new_page);
5336 
5337 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
5338 				haddr + huge_page_size(h));
5339 	mmu_notifier_invalidate_range_start(&range);
5340 
5341 	/*
5342 	 * Retake the page table lock to check for racing updates
5343 	 * before the page tables are altered
5344 	 */
5345 	spin_lock(ptl);
5346 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5347 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
5348 		ClearHPageRestoreReserve(new_page);
5349 
5350 		/* Break COW or unshare */
5351 		huge_ptep_clear_flush(vma, haddr, ptep);
5352 		mmu_notifier_invalidate_range(mm, range.start, range.end);
5353 		page_remove_rmap(old_page, vma, true);
5354 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
5355 		set_huge_pte_at(mm, haddr, ptep,
5356 				make_huge_pte(vma, new_page, !unshare));
5357 		SetHPageMigratable(new_page);
5358 		/* Make the old page be freed below */
5359 		new_page = old_page;
5360 	}
5361 	spin_unlock(ptl);
5362 	mmu_notifier_invalidate_range_end(&range);
5363 out_release_all:
5364 	/*
5365 	 * No restore in case of successful pagetable update (Break COW or
5366 	 * unshare)
5367 	 */
5368 	if (new_page != old_page)
5369 		restore_reserve_on_error(h, vma, haddr, new_page);
5370 	put_page(new_page);
5371 out_release_old:
5372 	put_page(old_page);
5373 
5374 	spin_lock(ptl); /* Caller expects lock to be held */
5375 
5376 	delayacct_wpcopy_end();
5377 	return ret;
5378 }
5379 
5380 /* Return the pagecache page at a given address within a VMA */
5381 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
5382 			struct vm_area_struct *vma, unsigned long address)
5383 {
5384 	struct address_space *mapping;
5385 	pgoff_t idx;
5386 
5387 	mapping = vma->vm_file->f_mapping;
5388 	idx = vma_hugecache_offset(h, vma, address);
5389 
5390 	return find_lock_page(mapping, idx);
5391 }
5392 
5393 /*
5394  * Return whether there is a pagecache page to back given address within VMA.
5395  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
5396  */
5397 static bool hugetlbfs_pagecache_present(struct hstate *h,
5398 			struct vm_area_struct *vma, unsigned long address)
5399 {
5400 	struct address_space *mapping;
5401 	pgoff_t idx;
5402 	struct page *page;
5403 
5404 	mapping = vma->vm_file->f_mapping;
5405 	idx = vma_hugecache_offset(h, vma, address);
5406 
5407 	page = find_get_page(mapping, idx);
5408 	if (page)
5409 		put_page(page);
5410 	return page != NULL;
5411 }
5412 
5413 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
5414 			   pgoff_t idx)
5415 {
5416 	struct inode *inode = mapping->host;
5417 	struct hstate *h = hstate_inode(inode);
5418 	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
5419 
5420 	if (err)
5421 		return err;
5422 	ClearHPageRestoreReserve(page);
5423 
5424 	/*
5425 	 * set page dirty so that it will not be removed from cache/file
5426 	 * by non-hugetlbfs specific code paths.
5427 	 */
5428 	set_page_dirty(page);
5429 
5430 	spin_lock(&inode->i_lock);
5431 	inode->i_blocks += blocks_per_huge_page(h);
5432 	spin_unlock(&inode->i_lock);
5433 	return 0;
5434 }
5435 
5436 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
5437 						  struct address_space *mapping,
5438 						  pgoff_t idx,
5439 						  unsigned int flags,
5440 						  unsigned long haddr,
5441 						  unsigned long addr,
5442 						  unsigned long reason)
5443 {
5444 	vm_fault_t ret;
5445 	u32 hash;
5446 	struct vm_fault vmf = {
5447 		.vma = vma,
5448 		.address = haddr,
5449 		.real_address = addr,
5450 		.flags = flags,
5451 
5452 		/*
5453 		 * Hard to debug if it ends up being
5454 		 * used by a callee that assumes
5455 		 * something about the other
5456 		 * uninitialized fields... same as in
5457 		 * memory.c
5458 		 */
5459 	};
5460 
5461 	/*
5462 	 * hugetlb_fault_mutex and i_mmap_rwsem must be
5463 	 * dropped before handling userfault.  Reacquire
5464 	 * after handling fault to make calling code simpler.
5465 	 */
5466 	hash = hugetlb_fault_mutex_hash(mapping, idx);
5467 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5468 	i_mmap_unlock_read(mapping);
5469 	ret = handle_userfault(&vmf, reason);
5470 	i_mmap_lock_read(mapping);
5471 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
5472 
5473 	return ret;
5474 }
5475 
5476 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
5477 			struct vm_area_struct *vma,
5478 			struct address_space *mapping, pgoff_t idx,
5479 			unsigned long address, pte_t *ptep,
5480 			pte_t old_pte, unsigned int flags)
5481 {
5482 	struct hstate *h = hstate_vma(vma);
5483 	vm_fault_t ret = VM_FAULT_SIGBUS;
5484 	int anon_rmap = 0;
5485 	unsigned long size;
5486 	struct page *page;
5487 	pte_t new_pte;
5488 	spinlock_t *ptl;
5489 	unsigned long haddr = address & huge_page_mask(h);
5490 	bool new_page, new_pagecache_page = false;
5491 
5492 	/*
5493 	 * Currently, we are forced to kill the process in the event the
5494 	 * original mapper has unmapped pages from the child due to a failed
5495 	 * COW/unsharing. Warn that such a situation has occurred as it may not
5496 	 * be obvious.
5497 	 */
5498 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5499 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5500 			   current->pid);
5501 		return ret;
5502 	}
5503 
5504 	/*
5505 	 * We can not race with truncation due to holding i_mmap_rwsem.
5506 	 * i_size is modified when holding i_mmap_rwsem, so check here
5507 	 * once for faults beyond end of file.
5508 	 */
5509 	size = i_size_read(mapping->host) >> huge_page_shift(h);
5510 	if (idx >= size)
5511 		goto out;
5512 
5513 retry:
5514 	new_page = false;
5515 	page = find_lock_page(mapping, idx);
5516 	if (!page) {
5517 		/* Check for page in userfault range */
5518 		if (userfaultfd_missing(vma)) {
5519 			ret = hugetlb_handle_userfault(vma, mapping, idx,
5520 						       flags, haddr, address,
5521 						       VM_UFFD_MISSING);
5522 			goto out;
5523 		}
5524 
5525 		page = alloc_huge_page(vma, haddr, 0);
5526 		if (IS_ERR(page)) {
5527 			/*
5528 			 * Returning error will result in faulting task being
5529 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
5530 			 * tasks from racing to fault in the same page which
5531 			 * could result in false unable to allocate errors.
5532 			 * Page migration does not take the fault mutex, but
5533 			 * does a clear then write of pte's under page table
5534 			 * lock.  Page fault code could race with migration,
5535 			 * notice the clear pte and try to allocate a page
5536 			 * here.  Before returning error, get ptl and make
5537 			 * sure there really is no pte entry.
5538 			 */
5539 			ptl = huge_pte_lock(h, mm, ptep);
5540 			ret = 0;
5541 			if (huge_pte_none(huge_ptep_get(ptep)))
5542 				ret = vmf_error(PTR_ERR(page));
5543 			spin_unlock(ptl);
5544 			goto out;
5545 		}
5546 		clear_huge_page(page, address, pages_per_huge_page(h));
5547 		__SetPageUptodate(page);
5548 		new_page = true;
5549 
5550 		if (vma->vm_flags & VM_MAYSHARE) {
5551 			int err = huge_add_to_page_cache(page, mapping, idx);
5552 			if (err) {
5553 				put_page(page);
5554 				if (err == -EEXIST)
5555 					goto retry;
5556 				goto out;
5557 			}
5558 			new_pagecache_page = true;
5559 		} else {
5560 			lock_page(page);
5561 			if (unlikely(anon_vma_prepare(vma))) {
5562 				ret = VM_FAULT_OOM;
5563 				goto backout_unlocked;
5564 			}
5565 			anon_rmap = 1;
5566 		}
5567 	} else {
5568 		/*
5569 		 * If memory error occurs between mmap() and fault, some process
5570 		 * don't have hwpoisoned swap entry for errored virtual address.
5571 		 * So we need to block hugepage fault by PG_hwpoison bit check.
5572 		 */
5573 		if (unlikely(PageHWPoison(page))) {
5574 			ret = VM_FAULT_HWPOISON_LARGE |
5575 				VM_FAULT_SET_HINDEX(hstate_index(h));
5576 			goto backout_unlocked;
5577 		}
5578 
5579 		/* Check for page in userfault range. */
5580 		if (userfaultfd_minor(vma)) {
5581 			unlock_page(page);
5582 			put_page(page);
5583 			ret = hugetlb_handle_userfault(vma, mapping, idx,
5584 						       flags, haddr, address,
5585 						       VM_UFFD_MINOR);
5586 			goto out;
5587 		}
5588 	}
5589 
5590 	/*
5591 	 * If we are going to COW a private mapping later, we examine the
5592 	 * pending reservations for this page now. This will ensure that
5593 	 * any allocations necessary to record that reservation occur outside
5594 	 * the spinlock.
5595 	 */
5596 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5597 		if (vma_needs_reservation(h, vma, haddr) < 0) {
5598 			ret = VM_FAULT_OOM;
5599 			goto backout_unlocked;
5600 		}
5601 		/* Just decrements count, does not deallocate */
5602 		vma_end_reservation(h, vma, haddr);
5603 	}
5604 
5605 	ptl = huge_pte_lock(h, mm, ptep);
5606 	ret = 0;
5607 	/* If pte changed from under us, retry */
5608 	if (!pte_same(huge_ptep_get(ptep), old_pte))
5609 		goto backout;
5610 
5611 	if (anon_rmap) {
5612 		ClearHPageRestoreReserve(page);
5613 		hugepage_add_new_anon_rmap(page, vma, haddr);
5614 	} else
5615 		page_dup_file_rmap(page, true);
5616 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
5617 				&& (vma->vm_flags & VM_SHARED)));
5618 	/*
5619 	 * If this pte was previously wr-protected, keep it wr-protected even
5620 	 * if populated.
5621 	 */
5622 	if (unlikely(pte_marker_uffd_wp(old_pte)))
5623 		new_pte = huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte));
5624 	set_huge_pte_at(mm, haddr, ptep, new_pte);
5625 
5626 	hugetlb_count_add(pages_per_huge_page(h), mm);
5627 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5628 		/* Optimization, do the COW without a second fault */
5629 		ret = hugetlb_wp(mm, vma, address, ptep, flags, page, ptl);
5630 	}
5631 
5632 	spin_unlock(ptl);
5633 
5634 	/*
5635 	 * Only set HPageMigratable in newly allocated pages.  Existing pages
5636 	 * found in the pagecache may not have HPageMigratableset if they have
5637 	 * been isolated for migration.
5638 	 */
5639 	if (new_page)
5640 		SetHPageMigratable(page);
5641 
5642 	unlock_page(page);
5643 out:
5644 	return ret;
5645 
5646 backout:
5647 	spin_unlock(ptl);
5648 backout_unlocked:
5649 	unlock_page(page);
5650 	/* restore reserve for newly allocated pages not in page cache */
5651 	if (new_page && !new_pagecache_page)
5652 		restore_reserve_on_error(h, vma, haddr, page);
5653 	put_page(page);
5654 	goto out;
5655 }
5656 
5657 #ifdef CONFIG_SMP
5658 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5659 {
5660 	unsigned long key[2];
5661 	u32 hash;
5662 
5663 	key[0] = (unsigned long) mapping;
5664 	key[1] = idx;
5665 
5666 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
5667 
5668 	return hash & (num_fault_mutexes - 1);
5669 }
5670 #else
5671 /*
5672  * For uniprocessor systems we always use a single mutex, so just
5673  * return 0 and avoid the hashing overhead.
5674  */
5675 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5676 {
5677 	return 0;
5678 }
5679 #endif
5680 
5681 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5682 			unsigned long address, unsigned int flags)
5683 {
5684 	pte_t *ptep, entry;
5685 	spinlock_t *ptl;
5686 	vm_fault_t ret;
5687 	u32 hash;
5688 	pgoff_t idx;
5689 	struct page *page = NULL;
5690 	struct page *pagecache_page = NULL;
5691 	struct hstate *h = hstate_vma(vma);
5692 	struct address_space *mapping;
5693 	int need_wait_lock = 0;
5694 	unsigned long haddr = address & huge_page_mask(h);
5695 
5696 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5697 	if (ptep) {
5698 		/*
5699 		 * Since we hold no locks, ptep could be stale.  That is
5700 		 * OK as we are only making decisions based on content and
5701 		 * not actually modifying content here.
5702 		 */
5703 		entry = huge_ptep_get(ptep);
5704 		if (unlikely(is_hugetlb_entry_migration(entry))) {
5705 			migration_entry_wait_huge(vma, ptep);
5706 			return 0;
5707 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
5708 			return VM_FAULT_HWPOISON_LARGE |
5709 				VM_FAULT_SET_HINDEX(hstate_index(h));
5710 	}
5711 
5712 	/*
5713 	 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
5714 	 * until finished with ptep.  This serves two purposes:
5715 	 * 1) It prevents huge_pmd_unshare from being called elsewhere
5716 	 *    and making the ptep no longer valid.
5717 	 * 2) It synchronizes us with i_size modifications during truncation.
5718 	 *
5719 	 * ptep could have already be assigned via huge_pte_offset.  That
5720 	 * is OK, as huge_pte_alloc will return the same value unless
5721 	 * something has changed.
5722 	 */
5723 	mapping = vma->vm_file->f_mapping;
5724 	i_mmap_lock_read(mapping);
5725 	ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
5726 	if (!ptep) {
5727 		i_mmap_unlock_read(mapping);
5728 		return VM_FAULT_OOM;
5729 	}
5730 
5731 	/*
5732 	 * Serialize hugepage allocation and instantiation, so that we don't
5733 	 * get spurious allocation failures if two CPUs race to instantiate
5734 	 * the same page in the page cache.
5735 	 */
5736 	idx = vma_hugecache_offset(h, vma, haddr);
5737 	hash = hugetlb_fault_mutex_hash(mapping, idx);
5738 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
5739 
5740 	entry = huge_ptep_get(ptep);
5741 	/* PTE markers should be handled the same way as none pte */
5742 	if (huge_pte_none_mostly(entry)) {
5743 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
5744 				      entry, flags);
5745 		goto out_mutex;
5746 	}
5747 
5748 	ret = 0;
5749 
5750 	/*
5751 	 * entry could be a migration/hwpoison entry at this point, so this
5752 	 * check prevents the kernel from going below assuming that we have
5753 	 * an active hugepage in pagecache. This goto expects the 2nd page
5754 	 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
5755 	 * properly handle it.
5756 	 */
5757 	if (!pte_present(entry))
5758 		goto out_mutex;
5759 
5760 	/*
5761 	 * If we are going to COW/unshare the mapping later, we examine the
5762 	 * pending reservations for this page now. This will ensure that any
5763 	 * allocations necessary to record that reservation occur outside the
5764 	 * spinlock. For private mappings, we also lookup the pagecache
5765 	 * page now as it is used to determine if a reservation has been
5766 	 * consumed.
5767 	 */
5768 	if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
5769 	    !huge_pte_write(entry)) {
5770 		if (vma_needs_reservation(h, vma, haddr) < 0) {
5771 			ret = VM_FAULT_OOM;
5772 			goto out_mutex;
5773 		}
5774 		/* Just decrements count, does not deallocate */
5775 		vma_end_reservation(h, vma, haddr);
5776 
5777 		if (!(vma->vm_flags & VM_MAYSHARE))
5778 			pagecache_page = hugetlbfs_pagecache_page(h,
5779 								vma, haddr);
5780 	}
5781 
5782 	ptl = huge_pte_lock(h, mm, ptep);
5783 
5784 	/* Check for a racing update before calling hugetlb_wp() */
5785 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
5786 		goto out_ptl;
5787 
5788 	/* Handle userfault-wp first, before trying to lock more pages */
5789 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
5790 	    (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
5791 		struct vm_fault vmf = {
5792 			.vma = vma,
5793 			.address = haddr,
5794 			.real_address = address,
5795 			.flags = flags,
5796 		};
5797 
5798 		spin_unlock(ptl);
5799 		if (pagecache_page) {
5800 			unlock_page(pagecache_page);
5801 			put_page(pagecache_page);
5802 		}
5803 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5804 		i_mmap_unlock_read(mapping);
5805 		return handle_userfault(&vmf, VM_UFFD_WP);
5806 	}
5807 
5808 	/*
5809 	 * hugetlb_wp() requires page locks of pte_page(entry) and
5810 	 * pagecache_page, so here we need take the former one
5811 	 * when page != pagecache_page or !pagecache_page.
5812 	 */
5813 	page = pte_page(entry);
5814 	if (page != pagecache_page)
5815 		if (!trylock_page(page)) {
5816 			need_wait_lock = 1;
5817 			goto out_ptl;
5818 		}
5819 
5820 	get_page(page);
5821 
5822 	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5823 		if (!huge_pte_write(entry)) {
5824 			ret = hugetlb_wp(mm, vma, address, ptep, flags,
5825 					 pagecache_page, ptl);
5826 			goto out_put_page;
5827 		} else if (likely(flags & FAULT_FLAG_WRITE)) {
5828 			entry = huge_pte_mkdirty(entry);
5829 		}
5830 	}
5831 	entry = pte_mkyoung(entry);
5832 	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
5833 						flags & FAULT_FLAG_WRITE))
5834 		update_mmu_cache(vma, haddr, ptep);
5835 out_put_page:
5836 	if (page != pagecache_page)
5837 		unlock_page(page);
5838 	put_page(page);
5839 out_ptl:
5840 	spin_unlock(ptl);
5841 
5842 	if (pagecache_page) {
5843 		unlock_page(pagecache_page);
5844 		put_page(pagecache_page);
5845 	}
5846 out_mutex:
5847 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5848 	i_mmap_unlock_read(mapping);
5849 	/*
5850 	 * Generally it's safe to hold refcount during waiting page lock. But
5851 	 * here we just wait to defer the next page fault to avoid busy loop and
5852 	 * the page is not used after unlocked before returning from the current
5853 	 * page fault. So we are safe from accessing freed page, even if we wait
5854 	 * here without taking refcount.
5855 	 */
5856 	if (need_wait_lock)
5857 		wait_on_page_locked(page);
5858 	return ret;
5859 }
5860 
5861 #ifdef CONFIG_USERFAULTFD
5862 /*
5863  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
5864  * modifications for huge pages.
5865  */
5866 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
5867 			    pte_t *dst_pte,
5868 			    struct vm_area_struct *dst_vma,
5869 			    unsigned long dst_addr,
5870 			    unsigned long src_addr,
5871 			    enum mcopy_atomic_mode mode,
5872 			    struct page **pagep,
5873 			    bool wp_copy)
5874 {
5875 	bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
5876 	struct hstate *h = hstate_vma(dst_vma);
5877 	struct address_space *mapping = dst_vma->vm_file->f_mapping;
5878 	pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
5879 	unsigned long size;
5880 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
5881 	pte_t _dst_pte;
5882 	spinlock_t *ptl;
5883 	int ret = -ENOMEM;
5884 	struct page *page;
5885 	int writable;
5886 	bool page_in_pagecache = false;
5887 
5888 	if (is_continue) {
5889 		ret = -EFAULT;
5890 		page = find_lock_page(mapping, idx);
5891 		if (!page)
5892 			goto out;
5893 		page_in_pagecache = true;
5894 	} else if (!*pagep) {
5895 		/* If a page already exists, then it's UFFDIO_COPY for
5896 		 * a non-missing case. Return -EEXIST.
5897 		 */
5898 		if (vm_shared &&
5899 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
5900 			ret = -EEXIST;
5901 			goto out;
5902 		}
5903 
5904 		page = alloc_huge_page(dst_vma, dst_addr, 0);
5905 		if (IS_ERR(page)) {
5906 			ret = -ENOMEM;
5907 			goto out;
5908 		}
5909 
5910 		ret = copy_huge_page_from_user(page,
5911 						(const void __user *) src_addr,
5912 						pages_per_huge_page(h), false);
5913 
5914 		/* fallback to copy_from_user outside mmap_lock */
5915 		if (unlikely(ret)) {
5916 			ret = -ENOENT;
5917 			/* Free the allocated page which may have
5918 			 * consumed a reservation.
5919 			 */
5920 			restore_reserve_on_error(h, dst_vma, dst_addr, page);
5921 			put_page(page);
5922 
5923 			/* Allocate a temporary page to hold the copied
5924 			 * contents.
5925 			 */
5926 			page = alloc_huge_page_vma(h, dst_vma, dst_addr);
5927 			if (!page) {
5928 				ret = -ENOMEM;
5929 				goto out;
5930 			}
5931 			*pagep = page;
5932 			/* Set the outparam pagep and return to the caller to
5933 			 * copy the contents outside the lock. Don't free the
5934 			 * page.
5935 			 */
5936 			goto out;
5937 		}
5938 	} else {
5939 		if (vm_shared &&
5940 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
5941 			put_page(*pagep);
5942 			ret = -EEXIST;
5943 			*pagep = NULL;
5944 			goto out;
5945 		}
5946 
5947 		page = alloc_huge_page(dst_vma, dst_addr, 0);
5948 		if (IS_ERR(page)) {
5949 			ret = -ENOMEM;
5950 			*pagep = NULL;
5951 			goto out;
5952 		}
5953 		copy_user_huge_page(page, *pagep, dst_addr, dst_vma,
5954 				    pages_per_huge_page(h));
5955 		put_page(*pagep);
5956 		*pagep = NULL;
5957 	}
5958 
5959 	/*
5960 	 * The memory barrier inside __SetPageUptodate makes sure that
5961 	 * preceding stores to the page contents become visible before
5962 	 * the set_pte_at() write.
5963 	 */
5964 	__SetPageUptodate(page);
5965 
5966 	/* Add shared, newly allocated pages to the page cache. */
5967 	if (vm_shared && !is_continue) {
5968 		size = i_size_read(mapping->host) >> huge_page_shift(h);
5969 		ret = -EFAULT;
5970 		if (idx >= size)
5971 			goto out_release_nounlock;
5972 
5973 		/*
5974 		 * Serialization between remove_inode_hugepages() and
5975 		 * huge_add_to_page_cache() below happens through the
5976 		 * hugetlb_fault_mutex_table that here must be hold by
5977 		 * the caller.
5978 		 */
5979 		ret = huge_add_to_page_cache(page, mapping, idx);
5980 		if (ret)
5981 			goto out_release_nounlock;
5982 		page_in_pagecache = true;
5983 	}
5984 
5985 	ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
5986 	spin_lock(ptl);
5987 
5988 	/*
5989 	 * Recheck the i_size after holding PT lock to make sure not
5990 	 * to leave any page mapped (as page_mapped()) beyond the end
5991 	 * of the i_size (remove_inode_hugepages() is strict about
5992 	 * enforcing that). If we bail out here, we'll also leave a
5993 	 * page in the radix tree in the vm_shared case beyond the end
5994 	 * of the i_size, but remove_inode_hugepages() will take care
5995 	 * of it as soon as we drop the hugetlb_fault_mutex_table.
5996 	 */
5997 	size = i_size_read(mapping->host) >> huge_page_shift(h);
5998 	ret = -EFAULT;
5999 	if (idx >= size)
6000 		goto out_release_unlock;
6001 
6002 	ret = -EEXIST;
6003 	/*
6004 	 * We allow to overwrite a pte marker: consider when both MISSING|WP
6005 	 * registered, we firstly wr-protect a none pte which has no page cache
6006 	 * page backing it, then access the page.
6007 	 */
6008 	if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
6009 		goto out_release_unlock;
6010 
6011 	if (vm_shared) {
6012 		page_dup_file_rmap(page, true);
6013 	} else {
6014 		ClearHPageRestoreReserve(page);
6015 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
6016 	}
6017 
6018 	/*
6019 	 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6020 	 * with wp flag set, don't set pte write bit.
6021 	 */
6022 	if (wp_copy || (is_continue && !vm_shared))
6023 		writable = 0;
6024 	else
6025 		writable = dst_vma->vm_flags & VM_WRITE;
6026 
6027 	_dst_pte = make_huge_pte(dst_vma, page, writable);
6028 	/*
6029 	 * Always mark UFFDIO_COPY page dirty; note that this may not be
6030 	 * extremely important for hugetlbfs for now since swapping is not
6031 	 * supported, but we should still be clear in that this page cannot be
6032 	 * thrown away at will, even if write bit not set.
6033 	 */
6034 	_dst_pte = huge_pte_mkdirty(_dst_pte);
6035 	_dst_pte = pte_mkyoung(_dst_pte);
6036 
6037 	if (wp_copy)
6038 		_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6039 
6040 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
6041 
6042 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6043 
6044 	/* No need to invalidate - it was non-present before */
6045 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
6046 
6047 	spin_unlock(ptl);
6048 	if (!is_continue)
6049 		SetHPageMigratable(page);
6050 	if (vm_shared || is_continue)
6051 		unlock_page(page);
6052 	ret = 0;
6053 out:
6054 	return ret;
6055 out_release_unlock:
6056 	spin_unlock(ptl);
6057 	if (vm_shared || is_continue)
6058 		unlock_page(page);
6059 out_release_nounlock:
6060 	if (!page_in_pagecache)
6061 		restore_reserve_on_error(h, dst_vma, dst_addr, page);
6062 	put_page(page);
6063 	goto out;
6064 }
6065 #endif /* CONFIG_USERFAULTFD */
6066 
6067 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
6068 				 int refs, struct page **pages,
6069 				 struct vm_area_struct **vmas)
6070 {
6071 	int nr;
6072 
6073 	for (nr = 0; nr < refs; nr++) {
6074 		if (likely(pages))
6075 			pages[nr] = mem_map_offset(page, nr);
6076 		if (vmas)
6077 			vmas[nr] = vma;
6078 	}
6079 }
6080 
6081 static inline bool __follow_hugetlb_must_fault(unsigned int flags, pte_t *pte,
6082 					       bool *unshare)
6083 {
6084 	pte_t pteval = huge_ptep_get(pte);
6085 
6086 	*unshare = false;
6087 	if (is_swap_pte(pteval))
6088 		return true;
6089 	if (huge_pte_write(pteval))
6090 		return false;
6091 	if (flags & FOLL_WRITE)
6092 		return true;
6093 	if (gup_must_unshare(flags, pte_page(pteval))) {
6094 		*unshare = true;
6095 		return true;
6096 	}
6097 	return false;
6098 }
6099 
6100 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
6101 			 struct page **pages, struct vm_area_struct **vmas,
6102 			 unsigned long *position, unsigned long *nr_pages,
6103 			 long i, unsigned int flags, int *locked)
6104 {
6105 	unsigned long pfn_offset;
6106 	unsigned long vaddr = *position;
6107 	unsigned long remainder = *nr_pages;
6108 	struct hstate *h = hstate_vma(vma);
6109 	int err = -EFAULT, refs;
6110 
6111 	while (vaddr < vma->vm_end && remainder) {
6112 		pte_t *pte;
6113 		spinlock_t *ptl = NULL;
6114 		bool unshare = false;
6115 		int absent;
6116 		struct page *page;
6117 
6118 		/*
6119 		 * If we have a pending SIGKILL, don't keep faulting pages and
6120 		 * potentially allocating memory.
6121 		 */
6122 		if (fatal_signal_pending(current)) {
6123 			remainder = 0;
6124 			break;
6125 		}
6126 
6127 		/*
6128 		 * Some archs (sparc64, sh*) have multiple pte_ts to
6129 		 * each hugepage.  We have to make sure we get the
6130 		 * first, for the page indexing below to work.
6131 		 *
6132 		 * Note that page table lock is not held when pte is null.
6133 		 */
6134 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
6135 				      huge_page_size(h));
6136 		if (pte)
6137 			ptl = huge_pte_lock(h, mm, pte);
6138 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
6139 
6140 		/*
6141 		 * When coredumping, it suits get_dump_page if we just return
6142 		 * an error where there's an empty slot with no huge pagecache
6143 		 * to back it.  This way, we avoid allocating a hugepage, and
6144 		 * the sparse dumpfile avoids allocating disk blocks, but its
6145 		 * huge holes still show up with zeroes where they need to be.
6146 		 */
6147 		if (absent && (flags & FOLL_DUMP) &&
6148 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
6149 			if (pte)
6150 				spin_unlock(ptl);
6151 			remainder = 0;
6152 			break;
6153 		}
6154 
6155 		/*
6156 		 * We need call hugetlb_fault for both hugepages under migration
6157 		 * (in which case hugetlb_fault waits for the migration,) and
6158 		 * hwpoisoned hugepages (in which case we need to prevent the
6159 		 * caller from accessing to them.) In order to do this, we use
6160 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
6161 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
6162 		 * both cases, and because we can't follow correct pages
6163 		 * directly from any kind of swap entries.
6164 		 */
6165 		if (absent ||
6166 		    __follow_hugetlb_must_fault(flags, pte, &unshare)) {
6167 			vm_fault_t ret;
6168 			unsigned int fault_flags = 0;
6169 
6170 			if (pte)
6171 				spin_unlock(ptl);
6172 			if (flags & FOLL_WRITE)
6173 				fault_flags |= FAULT_FLAG_WRITE;
6174 			else if (unshare)
6175 				fault_flags |= FAULT_FLAG_UNSHARE;
6176 			if (locked)
6177 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
6178 					FAULT_FLAG_KILLABLE;
6179 			if (flags & FOLL_NOWAIT)
6180 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
6181 					FAULT_FLAG_RETRY_NOWAIT;
6182 			if (flags & FOLL_TRIED) {
6183 				/*
6184 				 * Note: FAULT_FLAG_ALLOW_RETRY and
6185 				 * FAULT_FLAG_TRIED can co-exist
6186 				 */
6187 				fault_flags |= FAULT_FLAG_TRIED;
6188 			}
6189 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
6190 			if (ret & VM_FAULT_ERROR) {
6191 				err = vm_fault_to_errno(ret, flags);
6192 				remainder = 0;
6193 				break;
6194 			}
6195 			if (ret & VM_FAULT_RETRY) {
6196 				if (locked &&
6197 				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
6198 					*locked = 0;
6199 				*nr_pages = 0;
6200 				/*
6201 				 * VM_FAULT_RETRY must not return an
6202 				 * error, it will return zero
6203 				 * instead.
6204 				 *
6205 				 * No need to update "position" as the
6206 				 * caller will not check it after
6207 				 * *nr_pages is set to 0.
6208 				 */
6209 				return i;
6210 			}
6211 			continue;
6212 		}
6213 
6214 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
6215 		page = pte_page(huge_ptep_get(pte));
6216 
6217 		VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
6218 			       !PageAnonExclusive(page), page);
6219 
6220 		/*
6221 		 * If subpage information not requested, update counters
6222 		 * and skip the same_page loop below.
6223 		 */
6224 		if (!pages && !vmas && !pfn_offset &&
6225 		    (vaddr + huge_page_size(h) < vma->vm_end) &&
6226 		    (remainder >= pages_per_huge_page(h))) {
6227 			vaddr += huge_page_size(h);
6228 			remainder -= pages_per_huge_page(h);
6229 			i += pages_per_huge_page(h);
6230 			spin_unlock(ptl);
6231 			continue;
6232 		}
6233 
6234 		/* vaddr may not be aligned to PAGE_SIZE */
6235 		refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
6236 		    (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
6237 
6238 		if (pages || vmas)
6239 			record_subpages_vmas(mem_map_offset(page, pfn_offset),
6240 					     vma, refs,
6241 					     likely(pages) ? pages + i : NULL,
6242 					     vmas ? vmas + i : NULL);
6243 
6244 		if (pages) {
6245 			/*
6246 			 * try_grab_folio() should always succeed here,
6247 			 * because: a) we hold the ptl lock, and b) we've just
6248 			 * checked that the huge page is present in the page
6249 			 * tables. If the huge page is present, then the tail
6250 			 * pages must also be present. The ptl prevents the
6251 			 * head page and tail pages from being rearranged in
6252 			 * any way. So this page must be available at this
6253 			 * point, unless the page refcount overflowed:
6254 			 */
6255 			if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
6256 							 flags))) {
6257 				spin_unlock(ptl);
6258 				remainder = 0;
6259 				err = -ENOMEM;
6260 				break;
6261 			}
6262 		}
6263 
6264 		vaddr += (refs << PAGE_SHIFT);
6265 		remainder -= refs;
6266 		i += refs;
6267 
6268 		spin_unlock(ptl);
6269 	}
6270 	*nr_pages = remainder;
6271 	/*
6272 	 * setting position is actually required only if remainder is
6273 	 * not zero but it's faster not to add a "if (remainder)"
6274 	 * branch.
6275 	 */
6276 	*position = vaddr;
6277 
6278 	return i ? i : err;
6279 }
6280 
6281 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
6282 		unsigned long address, unsigned long end,
6283 		pgprot_t newprot, unsigned long cp_flags)
6284 {
6285 	struct mm_struct *mm = vma->vm_mm;
6286 	unsigned long start = address;
6287 	pte_t *ptep;
6288 	pte_t pte;
6289 	struct hstate *h = hstate_vma(vma);
6290 	unsigned long pages = 0, psize = huge_page_size(h);
6291 	bool shared_pmd = false;
6292 	struct mmu_notifier_range range;
6293 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6294 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6295 
6296 	/*
6297 	 * In the case of shared PMDs, the area to flush could be beyond
6298 	 * start/end.  Set range.start/range.end to cover the maximum possible
6299 	 * range if PMD sharing is possible.
6300 	 */
6301 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6302 				0, vma, mm, start, end);
6303 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6304 
6305 	BUG_ON(address >= end);
6306 	flush_cache_range(vma, range.start, range.end);
6307 
6308 	mmu_notifier_invalidate_range_start(&range);
6309 	i_mmap_lock_write(vma->vm_file->f_mapping);
6310 	for (; address < end; address += psize) {
6311 		spinlock_t *ptl;
6312 		ptep = huge_pte_offset(mm, address, psize);
6313 		if (!ptep)
6314 			continue;
6315 		ptl = huge_pte_lock(h, mm, ptep);
6316 		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
6317 			/*
6318 			 * When uffd-wp is enabled on the vma, unshare
6319 			 * shouldn't happen at all.  Warn about it if it
6320 			 * happened due to some reason.
6321 			 */
6322 			WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6323 			pages++;
6324 			spin_unlock(ptl);
6325 			shared_pmd = true;
6326 			continue;
6327 		}
6328 		pte = huge_ptep_get(ptep);
6329 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
6330 			spin_unlock(ptl);
6331 			continue;
6332 		}
6333 		if (unlikely(is_hugetlb_entry_migration(pte))) {
6334 			swp_entry_t entry = pte_to_swp_entry(pte);
6335 			struct page *page = pfn_swap_entry_to_page(entry);
6336 
6337 			if (!is_readable_migration_entry(entry)) {
6338 				pte_t newpte;
6339 
6340 				if (PageAnon(page))
6341 					entry = make_readable_exclusive_migration_entry(
6342 								swp_offset(entry));
6343 				else
6344 					entry = make_readable_migration_entry(
6345 								swp_offset(entry));
6346 				newpte = swp_entry_to_pte(entry);
6347 				if (uffd_wp)
6348 					newpte = pte_swp_mkuffd_wp(newpte);
6349 				else if (uffd_wp_resolve)
6350 					newpte = pte_swp_clear_uffd_wp(newpte);
6351 				set_huge_swap_pte_at(mm, address, ptep,
6352 						     newpte, psize);
6353 				pages++;
6354 			}
6355 			spin_unlock(ptl);
6356 			continue;
6357 		}
6358 		if (unlikely(pte_marker_uffd_wp(pte))) {
6359 			/*
6360 			 * This is changing a non-present pte into a none pte,
6361 			 * no need for huge_ptep_modify_prot_start/commit().
6362 			 */
6363 			if (uffd_wp_resolve)
6364 				huge_pte_clear(mm, address, ptep, psize);
6365 		}
6366 		if (!huge_pte_none(pte)) {
6367 			pte_t old_pte;
6368 			unsigned int shift = huge_page_shift(hstate_vma(vma));
6369 
6370 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6371 			pte = huge_pte_modify(old_pte, newprot);
6372 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6373 			if (uffd_wp)
6374 				pte = huge_pte_mkuffd_wp(huge_pte_wrprotect(pte));
6375 			else if (uffd_wp_resolve)
6376 				pte = huge_pte_clear_uffd_wp(pte);
6377 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6378 			pages++;
6379 		} else {
6380 			/* None pte */
6381 			if (unlikely(uffd_wp))
6382 				/* Safe to modify directly (none->non-present). */
6383 				set_huge_pte_at(mm, address, ptep,
6384 						make_pte_marker(PTE_MARKER_UFFD_WP));
6385 		}
6386 		spin_unlock(ptl);
6387 	}
6388 	/*
6389 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6390 	 * may have cleared our pud entry and done put_page on the page table:
6391 	 * once we release i_mmap_rwsem, another task can do the final put_page
6392 	 * and that page table be reused and filled with junk.  If we actually
6393 	 * did unshare a page of pmds, flush the range corresponding to the pud.
6394 	 */
6395 	if (shared_pmd)
6396 		flush_hugetlb_tlb_range(vma, range.start, range.end);
6397 	else
6398 		flush_hugetlb_tlb_range(vma, start, end);
6399 	/*
6400 	 * No need to call mmu_notifier_invalidate_range() we are downgrading
6401 	 * page table protection not changing it to point to a new page.
6402 	 *
6403 	 * See Documentation/mm/mmu_notifier.rst
6404 	 */
6405 	i_mmap_unlock_write(vma->vm_file->f_mapping);
6406 	mmu_notifier_invalidate_range_end(&range);
6407 
6408 	return pages << h->order;
6409 }
6410 
6411 /* Return true if reservation was successful, false otherwise.  */
6412 bool hugetlb_reserve_pages(struct inode *inode,
6413 					long from, long to,
6414 					struct vm_area_struct *vma,
6415 					vm_flags_t vm_flags)
6416 {
6417 	long chg, add = -1;
6418 	struct hstate *h = hstate_inode(inode);
6419 	struct hugepage_subpool *spool = subpool_inode(inode);
6420 	struct resv_map *resv_map;
6421 	struct hugetlb_cgroup *h_cg = NULL;
6422 	long gbl_reserve, regions_needed = 0;
6423 
6424 	/* This should never happen */
6425 	if (from > to) {
6426 		VM_WARN(1, "%s called with a negative range\n", __func__);
6427 		return false;
6428 	}
6429 
6430 	/*
6431 	 * Only apply hugepage reservation if asked. At fault time, an
6432 	 * attempt will be made for VM_NORESERVE to allocate a page
6433 	 * without using reserves
6434 	 */
6435 	if (vm_flags & VM_NORESERVE)
6436 		return true;
6437 
6438 	/*
6439 	 * Shared mappings base their reservation on the number of pages that
6440 	 * are already allocated on behalf of the file. Private mappings need
6441 	 * to reserve the full area even if read-only as mprotect() may be
6442 	 * called to make the mapping read-write. Assume !vma is a shm mapping
6443 	 */
6444 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6445 		/*
6446 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
6447 		 * called for inodes for which resv_maps were created (see
6448 		 * hugetlbfs_get_inode).
6449 		 */
6450 		resv_map = inode_resv_map(inode);
6451 
6452 		chg = region_chg(resv_map, from, to, &regions_needed);
6453 
6454 	} else {
6455 		/* Private mapping. */
6456 		resv_map = resv_map_alloc();
6457 		if (!resv_map)
6458 			return false;
6459 
6460 		chg = to - from;
6461 
6462 		set_vma_resv_map(vma, resv_map);
6463 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
6464 	}
6465 
6466 	if (chg < 0)
6467 		goto out_err;
6468 
6469 	if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6470 				chg * pages_per_huge_page(h), &h_cg) < 0)
6471 		goto out_err;
6472 
6473 	if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
6474 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
6475 		 * of the resv_map.
6476 		 */
6477 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6478 	}
6479 
6480 	/*
6481 	 * There must be enough pages in the subpool for the mapping. If
6482 	 * the subpool has a minimum size, there may be some global
6483 	 * reservations already in place (gbl_reserve).
6484 	 */
6485 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
6486 	if (gbl_reserve < 0)
6487 		goto out_uncharge_cgroup;
6488 
6489 	/*
6490 	 * Check enough hugepages are available for the reservation.
6491 	 * Hand the pages back to the subpool if there are not
6492 	 */
6493 	if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6494 		goto out_put_pages;
6495 
6496 	/*
6497 	 * Account for the reservations made. Shared mappings record regions
6498 	 * that have reservations as they are shared by multiple VMAs.
6499 	 * When the last VMA disappears, the region map says how much
6500 	 * the reservation was and the page cache tells how much of
6501 	 * the reservation was consumed. Private mappings are per-VMA and
6502 	 * only the consumed reservations are tracked. When the VMA
6503 	 * disappears, the original reservation is the VMA size and the
6504 	 * consumed reservations are stored in the map. Hence, nothing
6505 	 * else has to be done for private mappings here
6506 	 */
6507 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6508 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6509 
6510 		if (unlikely(add < 0)) {
6511 			hugetlb_acct_memory(h, -gbl_reserve);
6512 			goto out_put_pages;
6513 		} else if (unlikely(chg > add)) {
6514 			/*
6515 			 * pages in this range were added to the reserve
6516 			 * map between region_chg and region_add.  This
6517 			 * indicates a race with alloc_huge_page.  Adjust
6518 			 * the subpool and reserve counts modified above
6519 			 * based on the difference.
6520 			 */
6521 			long rsv_adjust;
6522 
6523 			/*
6524 			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6525 			 * reference to h_cg->css. See comment below for detail.
6526 			 */
6527 			hugetlb_cgroup_uncharge_cgroup_rsvd(
6528 				hstate_index(h),
6529 				(chg - add) * pages_per_huge_page(h), h_cg);
6530 
6531 			rsv_adjust = hugepage_subpool_put_pages(spool,
6532 								chg - add);
6533 			hugetlb_acct_memory(h, -rsv_adjust);
6534 		} else if (h_cg) {
6535 			/*
6536 			 * The file_regions will hold their own reference to
6537 			 * h_cg->css. So we should release the reference held
6538 			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6539 			 * done.
6540 			 */
6541 			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
6542 		}
6543 	}
6544 	return true;
6545 
6546 out_put_pages:
6547 	/* put back original number of pages, chg */
6548 	(void)hugepage_subpool_put_pages(spool, chg);
6549 out_uncharge_cgroup:
6550 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6551 					    chg * pages_per_huge_page(h), h_cg);
6552 out_err:
6553 	if (!vma || vma->vm_flags & VM_MAYSHARE)
6554 		/* Only call region_abort if the region_chg succeeded but the
6555 		 * region_add failed or didn't run.
6556 		 */
6557 		if (chg >= 0 && add < 0)
6558 			region_abort(resv_map, from, to, regions_needed);
6559 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
6560 		kref_put(&resv_map->refs, resv_map_release);
6561 	return false;
6562 }
6563 
6564 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6565 								long freed)
6566 {
6567 	struct hstate *h = hstate_inode(inode);
6568 	struct resv_map *resv_map = inode_resv_map(inode);
6569 	long chg = 0;
6570 	struct hugepage_subpool *spool = subpool_inode(inode);
6571 	long gbl_reserve;
6572 
6573 	/*
6574 	 * Since this routine can be called in the evict inode path for all
6575 	 * hugetlbfs inodes, resv_map could be NULL.
6576 	 */
6577 	if (resv_map) {
6578 		chg = region_del(resv_map, start, end);
6579 		/*
6580 		 * region_del() can fail in the rare case where a region
6581 		 * must be split and another region descriptor can not be
6582 		 * allocated.  If end == LONG_MAX, it will not fail.
6583 		 */
6584 		if (chg < 0)
6585 			return chg;
6586 	}
6587 
6588 	spin_lock(&inode->i_lock);
6589 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6590 	spin_unlock(&inode->i_lock);
6591 
6592 	/*
6593 	 * If the subpool has a minimum size, the number of global
6594 	 * reservations to be released may be adjusted.
6595 	 *
6596 	 * Note that !resv_map implies freed == 0. So (chg - freed)
6597 	 * won't go negative.
6598 	 */
6599 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
6600 	hugetlb_acct_memory(h, -gbl_reserve);
6601 
6602 	return 0;
6603 }
6604 
6605 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
6606 static unsigned long page_table_shareable(struct vm_area_struct *svma,
6607 				struct vm_area_struct *vma,
6608 				unsigned long addr, pgoff_t idx)
6609 {
6610 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
6611 				svma->vm_start;
6612 	unsigned long sbase = saddr & PUD_MASK;
6613 	unsigned long s_end = sbase + PUD_SIZE;
6614 
6615 	/* Allow segments to share if only one is marked locked */
6616 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
6617 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
6618 
6619 	/*
6620 	 * match the virtual addresses, permission and the alignment of the
6621 	 * page table page.
6622 	 */
6623 	if (pmd_index(addr) != pmd_index(saddr) ||
6624 	    vm_flags != svm_flags ||
6625 	    !range_in_vma(svma, sbase, s_end))
6626 		return 0;
6627 
6628 	return saddr;
6629 }
6630 
6631 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
6632 {
6633 	unsigned long base = addr & PUD_MASK;
6634 	unsigned long end = base + PUD_SIZE;
6635 
6636 	/*
6637 	 * check on proper vm_flags and page table alignment
6638 	 */
6639 	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
6640 		return true;
6641 	return false;
6642 }
6643 
6644 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6645 {
6646 #ifdef CONFIG_USERFAULTFD
6647 	if (uffd_disable_huge_pmd_share(vma))
6648 		return false;
6649 #endif
6650 	return vma_shareable(vma, addr);
6651 }
6652 
6653 /*
6654  * Determine if start,end range within vma could be mapped by shared pmd.
6655  * If yes, adjust start and end to cover range associated with possible
6656  * shared pmd mappings.
6657  */
6658 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6659 				unsigned long *start, unsigned long *end)
6660 {
6661 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6662 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6663 
6664 	/*
6665 	 * vma needs to span at least one aligned PUD size, and the range
6666 	 * must be at least partially within in.
6667 	 */
6668 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6669 		(*end <= v_start) || (*start >= v_end))
6670 		return;
6671 
6672 	/* Extend the range to be PUD aligned for a worst case scenario */
6673 	if (*start > v_start)
6674 		*start = ALIGN_DOWN(*start, PUD_SIZE);
6675 
6676 	if (*end < v_end)
6677 		*end = ALIGN(*end, PUD_SIZE);
6678 }
6679 
6680 /*
6681  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
6682  * and returns the corresponding pte. While this is not necessary for the
6683  * !shared pmd case because we can allocate the pmd later as well, it makes the
6684  * code much cleaner.
6685  *
6686  * This routine must be called with i_mmap_rwsem held in at least read mode if
6687  * sharing is possible.  For hugetlbfs, this prevents removal of any page
6688  * table entries associated with the address space.  This is important as we
6689  * are setting up sharing based on existing page table entries (mappings).
6690  */
6691 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6692 		      unsigned long addr, pud_t *pud)
6693 {
6694 	struct address_space *mapping = vma->vm_file->f_mapping;
6695 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
6696 			vma->vm_pgoff;
6697 	struct vm_area_struct *svma;
6698 	unsigned long saddr;
6699 	pte_t *spte = NULL;
6700 	pte_t *pte;
6701 	spinlock_t *ptl;
6702 
6703 	i_mmap_assert_locked(mapping);
6704 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
6705 		if (svma == vma)
6706 			continue;
6707 
6708 		saddr = page_table_shareable(svma, vma, addr, idx);
6709 		if (saddr) {
6710 			spte = huge_pte_offset(svma->vm_mm, saddr,
6711 					       vma_mmu_pagesize(svma));
6712 			if (spte) {
6713 				get_page(virt_to_page(spte));
6714 				break;
6715 			}
6716 		}
6717 	}
6718 
6719 	if (!spte)
6720 		goto out;
6721 
6722 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
6723 	if (pud_none(*pud)) {
6724 		pud_populate(mm, pud,
6725 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
6726 		mm_inc_nr_pmds(mm);
6727 	} else {
6728 		put_page(virt_to_page(spte));
6729 	}
6730 	spin_unlock(ptl);
6731 out:
6732 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
6733 	return pte;
6734 }
6735 
6736 /*
6737  * unmap huge page backed by shared pte.
6738  *
6739  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
6740  * indicated by page_count > 1, unmap is achieved by clearing pud and
6741  * decrementing the ref count. If count == 1, the pte page is not shared.
6742  *
6743  * Called with page table lock held and i_mmap_rwsem held in write mode.
6744  *
6745  * returns: 1 successfully unmapped a shared pte page
6746  *	    0 the underlying pte page is not shared, or it is the last user
6747  */
6748 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
6749 					unsigned long *addr, pte_t *ptep)
6750 {
6751 	pgd_t *pgd = pgd_offset(mm, *addr);
6752 	p4d_t *p4d = p4d_offset(pgd, *addr);
6753 	pud_t *pud = pud_offset(p4d, *addr);
6754 
6755 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6756 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
6757 	if (page_count(virt_to_page(ptep)) == 1)
6758 		return 0;
6759 
6760 	pud_clear(pud);
6761 	put_page(virt_to_page(ptep));
6762 	mm_dec_nr_pmds(mm);
6763 	/*
6764 	 * This update of passed address optimizes loops sequentially
6765 	 * processing addresses in increments of huge page size (PMD_SIZE
6766 	 * in this case).  By clearing the pud, a PUD_SIZE area is unmapped.
6767 	 * Update address to the 'last page' in the cleared area so that
6768 	 * calling loop can move to first page past this area.
6769 	 */
6770 	*addr |= PUD_SIZE - PMD_SIZE;
6771 	return 1;
6772 }
6773 
6774 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
6775 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6776 		      unsigned long addr, pud_t *pud)
6777 {
6778 	return NULL;
6779 }
6780 
6781 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
6782 				unsigned long *addr, pte_t *ptep)
6783 {
6784 	return 0;
6785 }
6786 
6787 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6788 				unsigned long *start, unsigned long *end)
6789 {
6790 }
6791 
6792 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6793 {
6794 	return false;
6795 }
6796 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
6797 
6798 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
6799 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
6800 			unsigned long addr, unsigned long sz)
6801 {
6802 	pgd_t *pgd;
6803 	p4d_t *p4d;
6804 	pud_t *pud;
6805 	pte_t *pte = NULL;
6806 
6807 	pgd = pgd_offset(mm, addr);
6808 	p4d = p4d_alloc(mm, pgd, addr);
6809 	if (!p4d)
6810 		return NULL;
6811 	pud = pud_alloc(mm, p4d, addr);
6812 	if (pud) {
6813 		if (sz == PUD_SIZE) {
6814 			pte = (pte_t *)pud;
6815 		} else {
6816 			BUG_ON(sz != PMD_SIZE);
6817 			if (want_pmd_share(vma, addr) && pud_none(*pud))
6818 				pte = huge_pmd_share(mm, vma, addr, pud);
6819 			else
6820 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
6821 		}
6822 	}
6823 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
6824 
6825 	return pte;
6826 }
6827 
6828 /*
6829  * huge_pte_offset() - Walk the page table to resolve the hugepage
6830  * entry at address @addr
6831  *
6832  * Return: Pointer to page table entry (PUD or PMD) for
6833  * address @addr, or NULL if a !p*d_present() entry is encountered and the
6834  * size @sz doesn't match the hugepage size at this level of the page
6835  * table.
6836  */
6837 pte_t *huge_pte_offset(struct mm_struct *mm,
6838 		       unsigned long addr, unsigned long sz)
6839 {
6840 	pgd_t *pgd;
6841 	p4d_t *p4d;
6842 	pud_t *pud;
6843 	pmd_t *pmd;
6844 
6845 	pgd = pgd_offset(mm, addr);
6846 	if (!pgd_present(*pgd))
6847 		return NULL;
6848 	p4d = p4d_offset(pgd, addr);
6849 	if (!p4d_present(*p4d))
6850 		return NULL;
6851 
6852 	pud = pud_offset(p4d, addr);
6853 	if (sz == PUD_SIZE)
6854 		/* must be pud huge, non-present or none */
6855 		return (pte_t *)pud;
6856 	if (!pud_present(*pud))
6857 		return NULL;
6858 	/* must have a valid entry and size to go further */
6859 
6860 	pmd = pmd_offset(pud, addr);
6861 	/* must be pmd huge, non-present or none */
6862 	return (pte_t *)pmd;
6863 }
6864 
6865 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
6866 
6867 /*
6868  * These functions are overwritable if your architecture needs its own
6869  * behavior.
6870  */
6871 struct page * __weak
6872 follow_huge_addr(struct mm_struct *mm, unsigned long address,
6873 			      int write)
6874 {
6875 	return ERR_PTR(-EINVAL);
6876 }
6877 
6878 struct page * __weak
6879 follow_huge_pd(struct vm_area_struct *vma,
6880 	       unsigned long address, hugepd_t hpd, int flags, int pdshift)
6881 {
6882 	WARN(1, "hugepd follow called with no support for hugepage directory format\n");
6883 	return NULL;
6884 }
6885 
6886 struct page * __weak
6887 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
6888 		pmd_t *pmd, int flags)
6889 {
6890 	struct page *page = NULL;
6891 	spinlock_t *ptl;
6892 	pte_t pte;
6893 
6894 	/*
6895 	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
6896 	 * follow_hugetlb_page().
6897 	 */
6898 	if (WARN_ON_ONCE(flags & FOLL_PIN))
6899 		return NULL;
6900 
6901 retry:
6902 	ptl = pmd_lockptr(mm, pmd);
6903 	spin_lock(ptl);
6904 	/*
6905 	 * make sure that the address range covered by this pmd is not
6906 	 * unmapped from other threads.
6907 	 */
6908 	if (!pmd_huge(*pmd))
6909 		goto out;
6910 	pte = huge_ptep_get((pte_t *)pmd);
6911 	if (pte_present(pte)) {
6912 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
6913 		/*
6914 		 * try_grab_page() should always succeed here, because: a) we
6915 		 * hold the pmd (ptl) lock, and b) we've just checked that the
6916 		 * huge pmd (head) page is present in the page tables. The ptl
6917 		 * prevents the head page and tail pages from being rearranged
6918 		 * in any way. So this page must be available at this point,
6919 		 * unless the page refcount overflowed:
6920 		 */
6921 		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
6922 			page = NULL;
6923 			goto out;
6924 		}
6925 	} else {
6926 		if (is_hugetlb_entry_migration(pte)) {
6927 			spin_unlock(ptl);
6928 			__migration_entry_wait_huge((pte_t *)pmd, ptl);
6929 			goto retry;
6930 		}
6931 		/*
6932 		 * hwpoisoned entry is treated as no_page_table in
6933 		 * follow_page_mask().
6934 		 */
6935 	}
6936 out:
6937 	spin_unlock(ptl);
6938 	return page;
6939 }
6940 
6941 struct page * __weak
6942 follow_huge_pud(struct mm_struct *mm, unsigned long address,
6943 		pud_t *pud, int flags)
6944 {
6945 	if (flags & (FOLL_GET | FOLL_PIN))
6946 		return NULL;
6947 
6948 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
6949 }
6950 
6951 struct page * __weak
6952 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
6953 {
6954 	if (flags & (FOLL_GET | FOLL_PIN))
6955 		return NULL;
6956 
6957 	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
6958 }
6959 
6960 int isolate_hugetlb(struct page *page, struct list_head *list)
6961 {
6962 	int ret = 0;
6963 
6964 	spin_lock_irq(&hugetlb_lock);
6965 	if (!PageHeadHuge(page) ||
6966 	    !HPageMigratable(page) ||
6967 	    !get_page_unless_zero(page)) {
6968 		ret = -EBUSY;
6969 		goto unlock;
6970 	}
6971 	ClearHPageMigratable(page);
6972 	list_move_tail(&page->lru, list);
6973 unlock:
6974 	spin_unlock_irq(&hugetlb_lock);
6975 	return ret;
6976 }
6977 
6978 int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
6979 {
6980 	int ret = 0;
6981 
6982 	*hugetlb = false;
6983 	spin_lock_irq(&hugetlb_lock);
6984 	if (PageHeadHuge(page)) {
6985 		*hugetlb = true;
6986 		if (HPageFreed(page))
6987 			ret = 0;
6988 		else if (HPageMigratable(page))
6989 			ret = get_page_unless_zero(page);
6990 		else
6991 			ret = -EBUSY;
6992 	}
6993 	spin_unlock_irq(&hugetlb_lock);
6994 	return ret;
6995 }
6996 
6997 int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
6998 {
6999 	int ret;
7000 
7001 	spin_lock_irq(&hugetlb_lock);
7002 	ret = __get_huge_page_for_hwpoison(pfn, flags);
7003 	spin_unlock_irq(&hugetlb_lock);
7004 	return ret;
7005 }
7006 
7007 void putback_active_hugepage(struct page *page)
7008 {
7009 	spin_lock_irq(&hugetlb_lock);
7010 	SetHPageMigratable(page);
7011 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
7012 	spin_unlock_irq(&hugetlb_lock);
7013 	put_page(page);
7014 }
7015 
7016 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
7017 {
7018 	struct hstate *h = page_hstate(oldpage);
7019 
7020 	hugetlb_cgroup_migrate(oldpage, newpage);
7021 	set_page_owner_migrate_reason(newpage, reason);
7022 
7023 	/*
7024 	 * transfer temporary state of the new huge page. This is
7025 	 * reverse to other transitions because the newpage is going to
7026 	 * be final while the old one will be freed so it takes over
7027 	 * the temporary status.
7028 	 *
7029 	 * Also note that we have to transfer the per-node surplus state
7030 	 * here as well otherwise the global surplus count will not match
7031 	 * the per-node's.
7032 	 */
7033 	if (HPageTemporary(newpage)) {
7034 		int old_nid = page_to_nid(oldpage);
7035 		int new_nid = page_to_nid(newpage);
7036 
7037 		SetHPageTemporary(oldpage);
7038 		ClearHPageTemporary(newpage);
7039 
7040 		/*
7041 		 * There is no need to transfer the per-node surplus state
7042 		 * when we do not cross the node.
7043 		 */
7044 		if (new_nid == old_nid)
7045 			return;
7046 		spin_lock_irq(&hugetlb_lock);
7047 		if (h->surplus_huge_pages_node[old_nid]) {
7048 			h->surplus_huge_pages_node[old_nid]--;
7049 			h->surplus_huge_pages_node[new_nid]++;
7050 		}
7051 		spin_unlock_irq(&hugetlb_lock);
7052 	}
7053 }
7054 
7055 /*
7056  * This function will unconditionally remove all the shared pmd pgtable entries
7057  * within the specific vma for a hugetlbfs memory range.
7058  */
7059 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7060 {
7061 	struct hstate *h = hstate_vma(vma);
7062 	unsigned long sz = huge_page_size(h);
7063 	struct mm_struct *mm = vma->vm_mm;
7064 	struct mmu_notifier_range range;
7065 	unsigned long address, start, end;
7066 	spinlock_t *ptl;
7067 	pte_t *ptep;
7068 
7069 	if (!(vma->vm_flags & VM_MAYSHARE))
7070 		return;
7071 
7072 	start = ALIGN(vma->vm_start, PUD_SIZE);
7073 	end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
7074 
7075 	if (start >= end)
7076 		return;
7077 
7078 	flush_cache_range(vma, start, end);
7079 	/*
7080 	 * No need to call adjust_range_if_pmd_sharing_possible(), because
7081 	 * we have already done the PUD_SIZE alignment.
7082 	 */
7083 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
7084 				start, end);
7085 	mmu_notifier_invalidate_range_start(&range);
7086 	i_mmap_lock_write(vma->vm_file->f_mapping);
7087 	for (address = start; address < end; address += PUD_SIZE) {
7088 		unsigned long tmp = address;
7089 
7090 		ptep = huge_pte_offset(mm, address, sz);
7091 		if (!ptep)
7092 			continue;
7093 		ptl = huge_pte_lock(h, mm, ptep);
7094 		/* We don't want 'address' to be changed */
7095 		huge_pmd_unshare(mm, vma, &tmp, ptep);
7096 		spin_unlock(ptl);
7097 	}
7098 	flush_hugetlb_tlb_range(vma, start, end);
7099 	i_mmap_unlock_write(vma->vm_file->f_mapping);
7100 	/*
7101 	 * No need to call mmu_notifier_invalidate_range(), see
7102 	 * Documentation/mm/mmu_notifier.rst.
7103 	 */
7104 	mmu_notifier_invalidate_range_end(&range);
7105 }
7106 
7107 #ifdef CONFIG_CMA
7108 static bool cma_reserve_called __initdata;
7109 
7110 static int __init cmdline_parse_hugetlb_cma(char *p)
7111 {
7112 	int nid, count = 0;
7113 	unsigned long tmp;
7114 	char *s = p;
7115 
7116 	while (*s) {
7117 		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
7118 			break;
7119 
7120 		if (s[count] == ':') {
7121 			if (tmp >= MAX_NUMNODES)
7122 				break;
7123 			nid = array_index_nospec(tmp, MAX_NUMNODES);
7124 
7125 			s += count + 1;
7126 			tmp = memparse(s, &s);
7127 			hugetlb_cma_size_in_node[nid] = tmp;
7128 			hugetlb_cma_size += tmp;
7129 
7130 			/*
7131 			 * Skip the separator if have one, otherwise
7132 			 * break the parsing.
7133 			 */
7134 			if (*s == ',')
7135 				s++;
7136 			else
7137 				break;
7138 		} else {
7139 			hugetlb_cma_size = memparse(p, &p);
7140 			break;
7141 		}
7142 	}
7143 
7144 	return 0;
7145 }
7146 
7147 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7148 
7149 void __init hugetlb_cma_reserve(int order)
7150 {
7151 	unsigned long size, reserved, per_node;
7152 	bool node_specific_cma_alloc = false;
7153 	int nid;
7154 
7155 	cma_reserve_called = true;
7156 
7157 	if (!hugetlb_cma_size)
7158 		return;
7159 
7160 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
7161 		if (hugetlb_cma_size_in_node[nid] == 0)
7162 			continue;
7163 
7164 		if (!node_online(nid)) {
7165 			pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7166 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7167 			hugetlb_cma_size_in_node[nid] = 0;
7168 			continue;
7169 		}
7170 
7171 		if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
7172 			pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7173 				nid, (PAGE_SIZE << order) / SZ_1M);
7174 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7175 			hugetlb_cma_size_in_node[nid] = 0;
7176 		} else {
7177 			node_specific_cma_alloc = true;
7178 		}
7179 	}
7180 
7181 	/* Validate the CMA size again in case some invalid nodes specified. */
7182 	if (!hugetlb_cma_size)
7183 		return;
7184 
7185 	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7186 		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7187 			(PAGE_SIZE << order) / SZ_1M);
7188 		hugetlb_cma_size = 0;
7189 		return;
7190 	}
7191 
7192 	if (!node_specific_cma_alloc) {
7193 		/*
7194 		 * If 3 GB area is requested on a machine with 4 numa nodes,
7195 		 * let's allocate 1 GB on first three nodes and ignore the last one.
7196 		 */
7197 		per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7198 		pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7199 			hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
7200 	}
7201 
7202 	reserved = 0;
7203 	for_each_online_node(nid) {
7204 		int res;
7205 		char name[CMA_MAX_NAME];
7206 
7207 		if (node_specific_cma_alloc) {
7208 			if (hugetlb_cma_size_in_node[nid] == 0)
7209 				continue;
7210 
7211 			size = hugetlb_cma_size_in_node[nid];
7212 		} else {
7213 			size = min(per_node, hugetlb_cma_size - reserved);
7214 		}
7215 
7216 		size = round_up(size, PAGE_SIZE << order);
7217 
7218 		snprintf(name, sizeof(name), "hugetlb%d", nid);
7219 		/*
7220 		 * Note that 'order per bit' is based on smallest size that
7221 		 * may be returned to CMA allocator in the case of
7222 		 * huge page demotion.
7223 		 */
7224 		res = cma_declare_contiguous_nid(0, size, 0,
7225 						PAGE_SIZE << HUGETLB_PAGE_ORDER,
7226 						 0, false, name,
7227 						 &hugetlb_cma[nid], nid);
7228 		if (res) {
7229 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7230 				res, nid);
7231 			continue;
7232 		}
7233 
7234 		reserved += size;
7235 		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7236 			size / SZ_1M, nid);
7237 
7238 		if (reserved >= hugetlb_cma_size)
7239 			break;
7240 	}
7241 
7242 	if (!reserved)
7243 		/*
7244 		 * hugetlb_cma_size is used to determine if allocations from
7245 		 * cma are possible.  Set to zero if no cma regions are set up.
7246 		 */
7247 		hugetlb_cma_size = 0;
7248 }
7249 
7250 void __init hugetlb_cma_check(void)
7251 {
7252 	if (!hugetlb_cma_size || cma_reserve_called)
7253 		return;
7254 
7255 	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7256 }
7257 
7258 #endif /* CONFIG_CMA */
7259