xref: /openbmc/linux/mm/hugetlb.c (revision 5478afc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 #include <linux/migrate.h>
34 #include <linux/nospec.h>
35 #include <linux/delayacct.h>
36 #include <linux/memory.h>
37 
38 #include <asm/page.h>
39 #include <asm/pgalloc.h>
40 #include <asm/tlb.h>
41 
42 #include <linux/io.h>
43 #include <linux/hugetlb.h>
44 #include <linux/hugetlb_cgroup.h>
45 #include <linux/node.h>
46 #include <linux/page_owner.h>
47 #include "internal.h"
48 #include "hugetlb_vmemmap.h"
49 
50 int hugetlb_max_hstate __read_mostly;
51 unsigned int default_hstate_idx;
52 struct hstate hstates[HUGE_MAX_HSTATE];
53 
54 #ifdef CONFIG_CMA
55 static struct cma *hugetlb_cma[MAX_NUMNODES];
56 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
57 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
58 {
59 	return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page,
60 				1 << order);
61 }
62 #else
63 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
64 {
65 	return false;
66 }
67 #endif
68 static unsigned long hugetlb_cma_size __initdata;
69 
70 __initdata LIST_HEAD(huge_boot_pages);
71 
72 /* for command line parsing */
73 static struct hstate * __initdata parsed_hstate;
74 static unsigned long __initdata default_hstate_max_huge_pages;
75 static bool __initdata parsed_valid_hugepagesz = true;
76 static bool __initdata parsed_default_hugepagesz;
77 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
78 
79 /*
80  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
81  * free_huge_pages, and surplus_huge_pages.
82  */
83 DEFINE_SPINLOCK(hugetlb_lock);
84 
85 /*
86  * Serializes faults on the same logical page.  This is used to
87  * prevent spurious OOMs when the hugepage pool is fully utilized.
88  */
89 static int num_fault_mutexes;
90 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
91 
92 /* Forward declaration */
93 static int hugetlb_acct_memory(struct hstate *h, long delta);
94 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
95 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
96 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
97 
98 static inline bool subpool_is_free(struct hugepage_subpool *spool)
99 {
100 	if (spool->count)
101 		return false;
102 	if (spool->max_hpages != -1)
103 		return spool->used_hpages == 0;
104 	if (spool->min_hpages != -1)
105 		return spool->rsv_hpages == spool->min_hpages;
106 
107 	return true;
108 }
109 
110 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
111 						unsigned long irq_flags)
112 {
113 	spin_unlock_irqrestore(&spool->lock, irq_flags);
114 
115 	/* If no pages are used, and no other handles to the subpool
116 	 * remain, give up any reservations based on minimum size and
117 	 * free the subpool */
118 	if (subpool_is_free(spool)) {
119 		if (spool->min_hpages != -1)
120 			hugetlb_acct_memory(spool->hstate,
121 						-spool->min_hpages);
122 		kfree(spool);
123 	}
124 }
125 
126 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
127 						long min_hpages)
128 {
129 	struct hugepage_subpool *spool;
130 
131 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
132 	if (!spool)
133 		return NULL;
134 
135 	spin_lock_init(&spool->lock);
136 	spool->count = 1;
137 	spool->max_hpages = max_hpages;
138 	spool->hstate = h;
139 	spool->min_hpages = min_hpages;
140 
141 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
142 		kfree(spool);
143 		return NULL;
144 	}
145 	spool->rsv_hpages = min_hpages;
146 
147 	return spool;
148 }
149 
150 void hugepage_put_subpool(struct hugepage_subpool *spool)
151 {
152 	unsigned long flags;
153 
154 	spin_lock_irqsave(&spool->lock, flags);
155 	BUG_ON(!spool->count);
156 	spool->count--;
157 	unlock_or_release_subpool(spool, flags);
158 }
159 
160 /*
161  * Subpool accounting for allocating and reserving pages.
162  * Return -ENOMEM if there are not enough resources to satisfy the
163  * request.  Otherwise, return the number of pages by which the
164  * global pools must be adjusted (upward).  The returned value may
165  * only be different than the passed value (delta) in the case where
166  * a subpool minimum size must be maintained.
167  */
168 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
169 				      long delta)
170 {
171 	long ret = delta;
172 
173 	if (!spool)
174 		return ret;
175 
176 	spin_lock_irq(&spool->lock);
177 
178 	if (spool->max_hpages != -1) {		/* maximum size accounting */
179 		if ((spool->used_hpages + delta) <= spool->max_hpages)
180 			spool->used_hpages += delta;
181 		else {
182 			ret = -ENOMEM;
183 			goto unlock_ret;
184 		}
185 	}
186 
187 	/* minimum size accounting */
188 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
189 		if (delta > spool->rsv_hpages) {
190 			/*
191 			 * Asking for more reserves than those already taken on
192 			 * behalf of subpool.  Return difference.
193 			 */
194 			ret = delta - spool->rsv_hpages;
195 			spool->rsv_hpages = 0;
196 		} else {
197 			ret = 0;	/* reserves already accounted for */
198 			spool->rsv_hpages -= delta;
199 		}
200 	}
201 
202 unlock_ret:
203 	spin_unlock_irq(&spool->lock);
204 	return ret;
205 }
206 
207 /*
208  * Subpool accounting for freeing and unreserving pages.
209  * Return the number of global page reservations that must be dropped.
210  * The return value may only be different than the passed value (delta)
211  * in the case where a subpool minimum size must be maintained.
212  */
213 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
214 				       long delta)
215 {
216 	long ret = delta;
217 	unsigned long flags;
218 
219 	if (!spool)
220 		return delta;
221 
222 	spin_lock_irqsave(&spool->lock, flags);
223 
224 	if (spool->max_hpages != -1)		/* maximum size accounting */
225 		spool->used_hpages -= delta;
226 
227 	 /* minimum size accounting */
228 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
229 		if (spool->rsv_hpages + delta <= spool->min_hpages)
230 			ret = 0;
231 		else
232 			ret = spool->rsv_hpages + delta - spool->min_hpages;
233 
234 		spool->rsv_hpages += delta;
235 		if (spool->rsv_hpages > spool->min_hpages)
236 			spool->rsv_hpages = spool->min_hpages;
237 	}
238 
239 	/*
240 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
241 	 * quota reference, free it now.
242 	 */
243 	unlock_or_release_subpool(spool, flags);
244 
245 	return ret;
246 }
247 
248 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
249 {
250 	return HUGETLBFS_SB(inode->i_sb)->spool;
251 }
252 
253 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
254 {
255 	return subpool_inode(file_inode(vma->vm_file));
256 }
257 
258 /* Helper that removes a struct file_region from the resv_map cache and returns
259  * it for use.
260  */
261 static struct file_region *
262 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
263 {
264 	struct file_region *nrg;
265 
266 	VM_BUG_ON(resv->region_cache_count <= 0);
267 
268 	resv->region_cache_count--;
269 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
270 	list_del(&nrg->link);
271 
272 	nrg->from = from;
273 	nrg->to = to;
274 
275 	return nrg;
276 }
277 
278 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
279 					      struct file_region *rg)
280 {
281 #ifdef CONFIG_CGROUP_HUGETLB
282 	nrg->reservation_counter = rg->reservation_counter;
283 	nrg->css = rg->css;
284 	if (rg->css)
285 		css_get(rg->css);
286 #endif
287 }
288 
289 /* Helper that records hugetlb_cgroup uncharge info. */
290 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
291 						struct hstate *h,
292 						struct resv_map *resv,
293 						struct file_region *nrg)
294 {
295 #ifdef CONFIG_CGROUP_HUGETLB
296 	if (h_cg) {
297 		nrg->reservation_counter =
298 			&h_cg->rsvd_hugepage[hstate_index(h)];
299 		nrg->css = &h_cg->css;
300 		/*
301 		 * The caller will hold exactly one h_cg->css reference for the
302 		 * whole contiguous reservation region. But this area might be
303 		 * scattered when there are already some file_regions reside in
304 		 * it. As a result, many file_regions may share only one css
305 		 * reference. In order to ensure that one file_region must hold
306 		 * exactly one h_cg->css reference, we should do css_get for
307 		 * each file_region and leave the reference held by caller
308 		 * untouched.
309 		 */
310 		css_get(&h_cg->css);
311 		if (!resv->pages_per_hpage)
312 			resv->pages_per_hpage = pages_per_huge_page(h);
313 		/* pages_per_hpage should be the same for all entries in
314 		 * a resv_map.
315 		 */
316 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
317 	} else {
318 		nrg->reservation_counter = NULL;
319 		nrg->css = NULL;
320 	}
321 #endif
322 }
323 
324 static void put_uncharge_info(struct file_region *rg)
325 {
326 #ifdef CONFIG_CGROUP_HUGETLB
327 	if (rg->css)
328 		css_put(rg->css);
329 #endif
330 }
331 
332 static bool has_same_uncharge_info(struct file_region *rg,
333 				   struct file_region *org)
334 {
335 #ifdef CONFIG_CGROUP_HUGETLB
336 	return rg->reservation_counter == org->reservation_counter &&
337 	       rg->css == org->css;
338 
339 #else
340 	return true;
341 #endif
342 }
343 
344 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
345 {
346 	struct file_region *nrg, *prg;
347 
348 	prg = list_prev_entry(rg, link);
349 	if (&prg->link != &resv->regions && prg->to == rg->from &&
350 	    has_same_uncharge_info(prg, rg)) {
351 		prg->to = rg->to;
352 
353 		list_del(&rg->link);
354 		put_uncharge_info(rg);
355 		kfree(rg);
356 
357 		rg = prg;
358 	}
359 
360 	nrg = list_next_entry(rg, link);
361 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
362 	    has_same_uncharge_info(nrg, rg)) {
363 		nrg->from = rg->from;
364 
365 		list_del(&rg->link);
366 		put_uncharge_info(rg);
367 		kfree(rg);
368 	}
369 }
370 
371 static inline long
372 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
373 		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
374 		     long *regions_needed)
375 {
376 	struct file_region *nrg;
377 
378 	if (!regions_needed) {
379 		nrg = get_file_region_entry_from_cache(map, from, to);
380 		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
381 		list_add(&nrg->link, rg);
382 		coalesce_file_region(map, nrg);
383 	} else
384 		*regions_needed += 1;
385 
386 	return to - from;
387 }
388 
389 /*
390  * Must be called with resv->lock held.
391  *
392  * Calling this with regions_needed != NULL will count the number of pages
393  * to be added but will not modify the linked list. And regions_needed will
394  * indicate the number of file_regions needed in the cache to carry out to add
395  * the regions for this range.
396  */
397 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
398 				     struct hugetlb_cgroup *h_cg,
399 				     struct hstate *h, long *regions_needed)
400 {
401 	long add = 0;
402 	struct list_head *head = &resv->regions;
403 	long last_accounted_offset = f;
404 	struct file_region *iter, *trg = NULL;
405 	struct list_head *rg = NULL;
406 
407 	if (regions_needed)
408 		*regions_needed = 0;
409 
410 	/* In this loop, we essentially handle an entry for the range
411 	 * [last_accounted_offset, iter->from), at every iteration, with some
412 	 * bounds checking.
413 	 */
414 	list_for_each_entry_safe(iter, trg, head, link) {
415 		/* Skip irrelevant regions that start before our range. */
416 		if (iter->from < f) {
417 			/* If this region ends after the last accounted offset,
418 			 * then we need to update last_accounted_offset.
419 			 */
420 			if (iter->to > last_accounted_offset)
421 				last_accounted_offset = iter->to;
422 			continue;
423 		}
424 
425 		/* When we find a region that starts beyond our range, we've
426 		 * finished.
427 		 */
428 		if (iter->from >= t) {
429 			rg = iter->link.prev;
430 			break;
431 		}
432 
433 		/* Add an entry for last_accounted_offset -> iter->from, and
434 		 * update last_accounted_offset.
435 		 */
436 		if (iter->from > last_accounted_offset)
437 			add += hugetlb_resv_map_add(resv, iter->link.prev,
438 						    last_accounted_offset,
439 						    iter->from, h, h_cg,
440 						    regions_needed);
441 
442 		last_accounted_offset = iter->to;
443 	}
444 
445 	/* Handle the case where our range extends beyond
446 	 * last_accounted_offset.
447 	 */
448 	if (!rg)
449 		rg = head->prev;
450 	if (last_accounted_offset < t)
451 		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
452 					    t, h, h_cg, regions_needed);
453 
454 	return add;
455 }
456 
457 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
458  */
459 static int allocate_file_region_entries(struct resv_map *resv,
460 					int regions_needed)
461 	__must_hold(&resv->lock)
462 {
463 	LIST_HEAD(allocated_regions);
464 	int to_allocate = 0, i = 0;
465 	struct file_region *trg = NULL, *rg = NULL;
466 
467 	VM_BUG_ON(regions_needed < 0);
468 
469 	/*
470 	 * Check for sufficient descriptors in the cache to accommodate
471 	 * the number of in progress add operations plus regions_needed.
472 	 *
473 	 * This is a while loop because when we drop the lock, some other call
474 	 * to region_add or region_del may have consumed some region_entries,
475 	 * so we keep looping here until we finally have enough entries for
476 	 * (adds_in_progress + regions_needed).
477 	 */
478 	while (resv->region_cache_count <
479 	       (resv->adds_in_progress + regions_needed)) {
480 		to_allocate = resv->adds_in_progress + regions_needed -
481 			      resv->region_cache_count;
482 
483 		/* At this point, we should have enough entries in the cache
484 		 * for all the existing adds_in_progress. We should only be
485 		 * needing to allocate for regions_needed.
486 		 */
487 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
488 
489 		spin_unlock(&resv->lock);
490 		for (i = 0; i < to_allocate; i++) {
491 			trg = kmalloc(sizeof(*trg), GFP_KERNEL);
492 			if (!trg)
493 				goto out_of_memory;
494 			list_add(&trg->link, &allocated_regions);
495 		}
496 
497 		spin_lock(&resv->lock);
498 
499 		list_splice(&allocated_regions, &resv->region_cache);
500 		resv->region_cache_count += to_allocate;
501 	}
502 
503 	return 0;
504 
505 out_of_memory:
506 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
507 		list_del(&rg->link);
508 		kfree(rg);
509 	}
510 	return -ENOMEM;
511 }
512 
513 /*
514  * Add the huge page range represented by [f, t) to the reserve
515  * map.  Regions will be taken from the cache to fill in this range.
516  * Sufficient regions should exist in the cache due to the previous
517  * call to region_chg with the same range, but in some cases the cache will not
518  * have sufficient entries due to races with other code doing region_add or
519  * region_del.  The extra needed entries will be allocated.
520  *
521  * regions_needed is the out value provided by a previous call to region_chg.
522  *
523  * Return the number of new huge pages added to the map.  This number is greater
524  * than or equal to zero.  If file_region entries needed to be allocated for
525  * this operation and we were not able to allocate, it returns -ENOMEM.
526  * region_add of regions of length 1 never allocate file_regions and cannot
527  * fail; region_chg will always allocate at least 1 entry and a region_add for
528  * 1 page will only require at most 1 entry.
529  */
530 static long region_add(struct resv_map *resv, long f, long t,
531 		       long in_regions_needed, struct hstate *h,
532 		       struct hugetlb_cgroup *h_cg)
533 {
534 	long add = 0, actual_regions_needed = 0;
535 
536 	spin_lock(&resv->lock);
537 retry:
538 
539 	/* Count how many regions are actually needed to execute this add. */
540 	add_reservation_in_range(resv, f, t, NULL, NULL,
541 				 &actual_regions_needed);
542 
543 	/*
544 	 * Check for sufficient descriptors in the cache to accommodate
545 	 * this add operation. Note that actual_regions_needed may be greater
546 	 * than in_regions_needed, as the resv_map may have been modified since
547 	 * the region_chg call. In this case, we need to make sure that we
548 	 * allocate extra entries, such that we have enough for all the
549 	 * existing adds_in_progress, plus the excess needed for this
550 	 * operation.
551 	 */
552 	if (actual_regions_needed > in_regions_needed &&
553 	    resv->region_cache_count <
554 		    resv->adds_in_progress +
555 			    (actual_regions_needed - in_regions_needed)) {
556 		/* region_add operation of range 1 should never need to
557 		 * allocate file_region entries.
558 		 */
559 		VM_BUG_ON(t - f <= 1);
560 
561 		if (allocate_file_region_entries(
562 			    resv, actual_regions_needed - in_regions_needed)) {
563 			return -ENOMEM;
564 		}
565 
566 		goto retry;
567 	}
568 
569 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
570 
571 	resv->adds_in_progress -= in_regions_needed;
572 
573 	spin_unlock(&resv->lock);
574 	return add;
575 }
576 
577 /*
578  * Examine the existing reserve map and determine how many
579  * huge pages in the specified range [f, t) are NOT currently
580  * represented.  This routine is called before a subsequent
581  * call to region_add that will actually modify the reserve
582  * map to add the specified range [f, t).  region_chg does
583  * not change the number of huge pages represented by the
584  * map.  A number of new file_region structures is added to the cache as a
585  * placeholder, for the subsequent region_add call to use. At least 1
586  * file_region structure is added.
587  *
588  * out_regions_needed is the number of regions added to the
589  * resv->adds_in_progress.  This value needs to be provided to a follow up call
590  * to region_add or region_abort for proper accounting.
591  *
592  * Returns the number of huge pages that need to be added to the existing
593  * reservation map for the range [f, t).  This number is greater or equal to
594  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
595  * is needed and can not be allocated.
596  */
597 static long region_chg(struct resv_map *resv, long f, long t,
598 		       long *out_regions_needed)
599 {
600 	long chg = 0;
601 
602 	spin_lock(&resv->lock);
603 
604 	/* Count how many hugepages in this range are NOT represented. */
605 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
606 				       out_regions_needed);
607 
608 	if (*out_regions_needed == 0)
609 		*out_regions_needed = 1;
610 
611 	if (allocate_file_region_entries(resv, *out_regions_needed))
612 		return -ENOMEM;
613 
614 	resv->adds_in_progress += *out_regions_needed;
615 
616 	spin_unlock(&resv->lock);
617 	return chg;
618 }
619 
620 /*
621  * Abort the in progress add operation.  The adds_in_progress field
622  * of the resv_map keeps track of the operations in progress between
623  * calls to region_chg and region_add.  Operations are sometimes
624  * aborted after the call to region_chg.  In such cases, region_abort
625  * is called to decrement the adds_in_progress counter. regions_needed
626  * is the value returned by the region_chg call, it is used to decrement
627  * the adds_in_progress counter.
628  *
629  * NOTE: The range arguments [f, t) are not needed or used in this
630  * routine.  They are kept to make reading the calling code easier as
631  * arguments will match the associated region_chg call.
632  */
633 static void region_abort(struct resv_map *resv, long f, long t,
634 			 long regions_needed)
635 {
636 	spin_lock(&resv->lock);
637 	VM_BUG_ON(!resv->region_cache_count);
638 	resv->adds_in_progress -= regions_needed;
639 	spin_unlock(&resv->lock);
640 }
641 
642 /*
643  * Delete the specified range [f, t) from the reserve map.  If the
644  * t parameter is LONG_MAX, this indicates that ALL regions after f
645  * should be deleted.  Locate the regions which intersect [f, t)
646  * and either trim, delete or split the existing regions.
647  *
648  * Returns the number of huge pages deleted from the reserve map.
649  * In the normal case, the return value is zero or more.  In the
650  * case where a region must be split, a new region descriptor must
651  * be allocated.  If the allocation fails, -ENOMEM will be returned.
652  * NOTE: If the parameter t == LONG_MAX, then we will never split
653  * a region and possibly return -ENOMEM.  Callers specifying
654  * t == LONG_MAX do not need to check for -ENOMEM error.
655  */
656 static long region_del(struct resv_map *resv, long f, long t)
657 {
658 	struct list_head *head = &resv->regions;
659 	struct file_region *rg, *trg;
660 	struct file_region *nrg = NULL;
661 	long del = 0;
662 
663 retry:
664 	spin_lock(&resv->lock);
665 	list_for_each_entry_safe(rg, trg, head, link) {
666 		/*
667 		 * Skip regions before the range to be deleted.  file_region
668 		 * ranges are normally of the form [from, to).  However, there
669 		 * may be a "placeholder" entry in the map which is of the form
670 		 * (from, to) with from == to.  Check for placeholder entries
671 		 * at the beginning of the range to be deleted.
672 		 */
673 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
674 			continue;
675 
676 		if (rg->from >= t)
677 			break;
678 
679 		if (f > rg->from && t < rg->to) { /* Must split region */
680 			/*
681 			 * Check for an entry in the cache before dropping
682 			 * lock and attempting allocation.
683 			 */
684 			if (!nrg &&
685 			    resv->region_cache_count > resv->adds_in_progress) {
686 				nrg = list_first_entry(&resv->region_cache,
687 							struct file_region,
688 							link);
689 				list_del(&nrg->link);
690 				resv->region_cache_count--;
691 			}
692 
693 			if (!nrg) {
694 				spin_unlock(&resv->lock);
695 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
696 				if (!nrg)
697 					return -ENOMEM;
698 				goto retry;
699 			}
700 
701 			del += t - f;
702 			hugetlb_cgroup_uncharge_file_region(
703 				resv, rg, t - f, false);
704 
705 			/* New entry for end of split region */
706 			nrg->from = t;
707 			nrg->to = rg->to;
708 
709 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
710 
711 			INIT_LIST_HEAD(&nrg->link);
712 
713 			/* Original entry is trimmed */
714 			rg->to = f;
715 
716 			list_add(&nrg->link, &rg->link);
717 			nrg = NULL;
718 			break;
719 		}
720 
721 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
722 			del += rg->to - rg->from;
723 			hugetlb_cgroup_uncharge_file_region(resv, rg,
724 							    rg->to - rg->from, true);
725 			list_del(&rg->link);
726 			kfree(rg);
727 			continue;
728 		}
729 
730 		if (f <= rg->from) {	/* Trim beginning of region */
731 			hugetlb_cgroup_uncharge_file_region(resv, rg,
732 							    t - rg->from, false);
733 
734 			del += t - rg->from;
735 			rg->from = t;
736 		} else {		/* Trim end of region */
737 			hugetlb_cgroup_uncharge_file_region(resv, rg,
738 							    rg->to - f, false);
739 
740 			del += rg->to - f;
741 			rg->to = f;
742 		}
743 	}
744 
745 	spin_unlock(&resv->lock);
746 	kfree(nrg);
747 	return del;
748 }
749 
750 /*
751  * A rare out of memory error was encountered which prevented removal of
752  * the reserve map region for a page.  The huge page itself was free'ed
753  * and removed from the page cache.  This routine will adjust the subpool
754  * usage count, and the global reserve count if needed.  By incrementing
755  * these counts, the reserve map entry which could not be deleted will
756  * appear as a "reserved" entry instead of simply dangling with incorrect
757  * counts.
758  */
759 void hugetlb_fix_reserve_counts(struct inode *inode)
760 {
761 	struct hugepage_subpool *spool = subpool_inode(inode);
762 	long rsv_adjust;
763 	bool reserved = false;
764 
765 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
766 	if (rsv_adjust > 0) {
767 		struct hstate *h = hstate_inode(inode);
768 
769 		if (!hugetlb_acct_memory(h, 1))
770 			reserved = true;
771 	} else if (!rsv_adjust) {
772 		reserved = true;
773 	}
774 
775 	if (!reserved)
776 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
777 }
778 
779 /*
780  * Count and return the number of huge pages in the reserve map
781  * that intersect with the range [f, t).
782  */
783 static long region_count(struct resv_map *resv, long f, long t)
784 {
785 	struct list_head *head = &resv->regions;
786 	struct file_region *rg;
787 	long chg = 0;
788 
789 	spin_lock(&resv->lock);
790 	/* Locate each segment we overlap with, and count that overlap. */
791 	list_for_each_entry(rg, head, link) {
792 		long seg_from;
793 		long seg_to;
794 
795 		if (rg->to <= f)
796 			continue;
797 		if (rg->from >= t)
798 			break;
799 
800 		seg_from = max(rg->from, f);
801 		seg_to = min(rg->to, t);
802 
803 		chg += seg_to - seg_from;
804 	}
805 	spin_unlock(&resv->lock);
806 
807 	return chg;
808 }
809 
810 /*
811  * Convert the address within this vma to the page offset within
812  * the mapping, in pagecache page units; huge pages here.
813  */
814 static pgoff_t vma_hugecache_offset(struct hstate *h,
815 			struct vm_area_struct *vma, unsigned long address)
816 {
817 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
818 			(vma->vm_pgoff >> huge_page_order(h));
819 }
820 
821 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
822 				     unsigned long address)
823 {
824 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
825 }
826 EXPORT_SYMBOL_GPL(linear_hugepage_index);
827 
828 /*
829  * Return the size of the pages allocated when backing a VMA. In the majority
830  * cases this will be same size as used by the page table entries.
831  */
832 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
833 {
834 	if (vma->vm_ops && vma->vm_ops->pagesize)
835 		return vma->vm_ops->pagesize(vma);
836 	return PAGE_SIZE;
837 }
838 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
839 
840 /*
841  * Return the page size being used by the MMU to back a VMA. In the majority
842  * of cases, the page size used by the kernel matches the MMU size. On
843  * architectures where it differs, an architecture-specific 'strong'
844  * version of this symbol is required.
845  */
846 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
847 {
848 	return vma_kernel_pagesize(vma);
849 }
850 
851 /*
852  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
853  * bits of the reservation map pointer, which are always clear due to
854  * alignment.
855  */
856 #define HPAGE_RESV_OWNER    (1UL << 0)
857 #define HPAGE_RESV_UNMAPPED (1UL << 1)
858 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
859 
860 /*
861  * These helpers are used to track how many pages are reserved for
862  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
863  * is guaranteed to have their future faults succeed.
864  *
865  * With the exception of hugetlb_dup_vma_private() which is called at fork(),
866  * the reserve counters are updated with the hugetlb_lock held. It is safe
867  * to reset the VMA at fork() time as it is not in use yet and there is no
868  * chance of the global counters getting corrupted as a result of the values.
869  *
870  * The private mapping reservation is represented in a subtly different
871  * manner to a shared mapping.  A shared mapping has a region map associated
872  * with the underlying file, this region map represents the backing file
873  * pages which have ever had a reservation assigned which this persists even
874  * after the page is instantiated.  A private mapping has a region map
875  * associated with the original mmap which is attached to all VMAs which
876  * reference it, this region map represents those offsets which have consumed
877  * reservation ie. where pages have been instantiated.
878  */
879 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
880 {
881 	return (unsigned long)vma->vm_private_data;
882 }
883 
884 static void set_vma_private_data(struct vm_area_struct *vma,
885 							unsigned long value)
886 {
887 	vma->vm_private_data = (void *)value;
888 }
889 
890 static void
891 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
892 					  struct hugetlb_cgroup *h_cg,
893 					  struct hstate *h)
894 {
895 #ifdef CONFIG_CGROUP_HUGETLB
896 	if (!h_cg || !h) {
897 		resv_map->reservation_counter = NULL;
898 		resv_map->pages_per_hpage = 0;
899 		resv_map->css = NULL;
900 	} else {
901 		resv_map->reservation_counter =
902 			&h_cg->rsvd_hugepage[hstate_index(h)];
903 		resv_map->pages_per_hpage = pages_per_huge_page(h);
904 		resv_map->css = &h_cg->css;
905 	}
906 #endif
907 }
908 
909 struct resv_map *resv_map_alloc(void)
910 {
911 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
912 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
913 
914 	if (!resv_map || !rg) {
915 		kfree(resv_map);
916 		kfree(rg);
917 		return NULL;
918 	}
919 
920 	kref_init(&resv_map->refs);
921 	spin_lock_init(&resv_map->lock);
922 	INIT_LIST_HEAD(&resv_map->regions);
923 
924 	resv_map->adds_in_progress = 0;
925 	/*
926 	 * Initialize these to 0. On shared mappings, 0's here indicate these
927 	 * fields don't do cgroup accounting. On private mappings, these will be
928 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
929 	 * reservations are to be un-charged from here.
930 	 */
931 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
932 
933 	INIT_LIST_HEAD(&resv_map->region_cache);
934 	list_add(&rg->link, &resv_map->region_cache);
935 	resv_map->region_cache_count = 1;
936 
937 	return resv_map;
938 }
939 
940 void resv_map_release(struct kref *ref)
941 {
942 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
943 	struct list_head *head = &resv_map->region_cache;
944 	struct file_region *rg, *trg;
945 
946 	/* Clear out any active regions before we release the map. */
947 	region_del(resv_map, 0, LONG_MAX);
948 
949 	/* ... and any entries left in the cache */
950 	list_for_each_entry_safe(rg, trg, head, link) {
951 		list_del(&rg->link);
952 		kfree(rg);
953 	}
954 
955 	VM_BUG_ON(resv_map->adds_in_progress);
956 
957 	kfree(resv_map);
958 }
959 
960 static inline struct resv_map *inode_resv_map(struct inode *inode)
961 {
962 	/*
963 	 * At inode evict time, i_mapping may not point to the original
964 	 * address space within the inode.  This original address space
965 	 * contains the pointer to the resv_map.  So, always use the
966 	 * address space embedded within the inode.
967 	 * The VERY common case is inode->mapping == &inode->i_data but,
968 	 * this may not be true for device special inodes.
969 	 */
970 	return (struct resv_map *)(&inode->i_data)->private_data;
971 }
972 
973 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
974 {
975 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
976 	if (vma->vm_flags & VM_MAYSHARE) {
977 		struct address_space *mapping = vma->vm_file->f_mapping;
978 		struct inode *inode = mapping->host;
979 
980 		return inode_resv_map(inode);
981 
982 	} else {
983 		return (struct resv_map *)(get_vma_private_data(vma) &
984 							~HPAGE_RESV_MASK);
985 	}
986 }
987 
988 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
989 {
990 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
991 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
992 
993 	set_vma_private_data(vma, (get_vma_private_data(vma) &
994 				HPAGE_RESV_MASK) | (unsigned long)map);
995 }
996 
997 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
998 {
999 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1000 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1001 
1002 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1003 }
1004 
1005 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1006 {
1007 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1008 
1009 	return (get_vma_private_data(vma) & flag) != 0;
1010 }
1011 
1012 void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1013 {
1014 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1015 	/*
1016 	 * Clear vm_private_data
1017 	 * - For shared mappings this is a per-vma semaphore that may be
1018 	 *   allocated in a subsequent call to hugetlb_vm_op_open.
1019 	 *   Before clearing, make sure pointer is not associated with vma
1020 	 *   as this will leak the structure.  This is the case when called
1021 	 *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1022 	 *   been called to allocate a new structure.
1023 	 * - For MAP_PRIVATE mappings, this is the reserve map which does
1024 	 *   not apply to children.  Faults generated by the children are
1025 	 *   not guaranteed to succeed, even if read-only.
1026 	 */
1027 	if (vma->vm_flags & VM_MAYSHARE) {
1028 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1029 
1030 		if (vma_lock && vma_lock->vma != vma)
1031 			vma->vm_private_data = NULL;
1032 	} else
1033 		vma->vm_private_data = NULL;
1034 }
1035 
1036 /*
1037  * Reset and decrement one ref on hugepage private reservation.
1038  * Called with mm->mmap_sem writer semaphore held.
1039  * This function should be only used by move_vma() and operate on
1040  * same sized vma. It should never come here with last ref on the
1041  * reservation.
1042  */
1043 void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1044 {
1045 	/*
1046 	 * Clear the old hugetlb private page reservation.
1047 	 * It has already been transferred to new_vma.
1048 	 *
1049 	 * During a mremap() operation of a hugetlb vma we call move_vma()
1050 	 * which copies vma into new_vma and unmaps vma. After the copy
1051 	 * operation both new_vma and vma share a reference to the resv_map
1052 	 * struct, and at that point vma is about to be unmapped. We don't
1053 	 * want to return the reservation to the pool at unmap of vma because
1054 	 * the reservation still lives on in new_vma, so simply decrement the
1055 	 * ref here and remove the resv_map reference from this vma.
1056 	 */
1057 	struct resv_map *reservations = vma_resv_map(vma);
1058 
1059 	if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1060 		resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1061 		kref_put(&reservations->refs, resv_map_release);
1062 	}
1063 
1064 	hugetlb_dup_vma_private(vma);
1065 }
1066 
1067 /* Returns true if the VMA has associated reserve pages */
1068 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1069 {
1070 	if (vma->vm_flags & VM_NORESERVE) {
1071 		/*
1072 		 * This address is already reserved by other process(chg == 0),
1073 		 * so, we should decrement reserved count. Without decrementing,
1074 		 * reserve count remains after releasing inode, because this
1075 		 * allocated page will go into page cache and is regarded as
1076 		 * coming from reserved pool in releasing step.  Currently, we
1077 		 * don't have any other solution to deal with this situation
1078 		 * properly, so add work-around here.
1079 		 */
1080 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1081 			return true;
1082 		else
1083 			return false;
1084 	}
1085 
1086 	/* Shared mappings always use reserves */
1087 	if (vma->vm_flags & VM_MAYSHARE) {
1088 		/*
1089 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
1090 		 * be a region map for all pages.  The only situation where
1091 		 * there is no region map is if a hole was punched via
1092 		 * fallocate.  In this case, there really are no reserves to
1093 		 * use.  This situation is indicated if chg != 0.
1094 		 */
1095 		if (chg)
1096 			return false;
1097 		else
1098 			return true;
1099 	}
1100 
1101 	/*
1102 	 * Only the process that called mmap() has reserves for
1103 	 * private mappings.
1104 	 */
1105 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1106 		/*
1107 		 * Like the shared case above, a hole punch or truncate
1108 		 * could have been performed on the private mapping.
1109 		 * Examine the value of chg to determine if reserves
1110 		 * actually exist or were previously consumed.
1111 		 * Very Subtle - The value of chg comes from a previous
1112 		 * call to vma_needs_reserves().  The reserve map for
1113 		 * private mappings has different (opposite) semantics
1114 		 * than that of shared mappings.  vma_needs_reserves()
1115 		 * has already taken this difference in semantics into
1116 		 * account.  Therefore, the meaning of chg is the same
1117 		 * as in the shared case above.  Code could easily be
1118 		 * combined, but keeping it separate draws attention to
1119 		 * subtle differences.
1120 		 */
1121 		if (chg)
1122 			return false;
1123 		else
1124 			return true;
1125 	}
1126 
1127 	return false;
1128 }
1129 
1130 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1131 {
1132 	int nid = folio_nid(folio);
1133 
1134 	lockdep_assert_held(&hugetlb_lock);
1135 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1136 
1137 	list_move(&folio->lru, &h->hugepage_freelists[nid]);
1138 	h->free_huge_pages++;
1139 	h->free_huge_pages_node[nid]++;
1140 	folio_set_hugetlb_freed(folio);
1141 }
1142 
1143 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1144 {
1145 	struct page *page;
1146 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1147 
1148 	lockdep_assert_held(&hugetlb_lock);
1149 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1150 		if (pin && !is_longterm_pinnable_page(page))
1151 			continue;
1152 
1153 		if (PageHWPoison(page))
1154 			continue;
1155 
1156 		list_move(&page->lru, &h->hugepage_activelist);
1157 		set_page_refcounted(page);
1158 		ClearHPageFreed(page);
1159 		h->free_huge_pages--;
1160 		h->free_huge_pages_node[nid]--;
1161 		return page;
1162 	}
1163 
1164 	return NULL;
1165 }
1166 
1167 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1168 		nodemask_t *nmask)
1169 {
1170 	unsigned int cpuset_mems_cookie;
1171 	struct zonelist *zonelist;
1172 	struct zone *zone;
1173 	struct zoneref *z;
1174 	int node = NUMA_NO_NODE;
1175 
1176 	zonelist = node_zonelist(nid, gfp_mask);
1177 
1178 retry_cpuset:
1179 	cpuset_mems_cookie = read_mems_allowed_begin();
1180 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1181 		struct page *page;
1182 
1183 		if (!cpuset_zone_allowed(zone, gfp_mask))
1184 			continue;
1185 		/*
1186 		 * no need to ask again on the same node. Pool is node rather than
1187 		 * zone aware
1188 		 */
1189 		if (zone_to_nid(zone) == node)
1190 			continue;
1191 		node = zone_to_nid(zone);
1192 
1193 		page = dequeue_huge_page_node_exact(h, node);
1194 		if (page)
1195 			return page;
1196 	}
1197 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1198 		goto retry_cpuset;
1199 
1200 	return NULL;
1201 }
1202 
1203 static unsigned long available_huge_pages(struct hstate *h)
1204 {
1205 	return h->free_huge_pages - h->resv_huge_pages;
1206 }
1207 
1208 static struct page *dequeue_huge_page_vma(struct hstate *h,
1209 				struct vm_area_struct *vma,
1210 				unsigned long address, int avoid_reserve,
1211 				long chg)
1212 {
1213 	struct page *page = NULL;
1214 	struct mempolicy *mpol;
1215 	gfp_t gfp_mask;
1216 	nodemask_t *nodemask;
1217 	int nid;
1218 
1219 	/*
1220 	 * A child process with MAP_PRIVATE mappings created by their parent
1221 	 * have no page reserves. This check ensures that reservations are
1222 	 * not "stolen". The child may still get SIGKILLed
1223 	 */
1224 	if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
1225 		goto err;
1226 
1227 	/* If reserves cannot be used, ensure enough pages are in the pool */
1228 	if (avoid_reserve && !available_huge_pages(h))
1229 		goto err;
1230 
1231 	gfp_mask = htlb_alloc_mask(h);
1232 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1233 
1234 	if (mpol_is_preferred_many(mpol)) {
1235 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1236 
1237 		/* Fallback to all nodes if page==NULL */
1238 		nodemask = NULL;
1239 	}
1240 
1241 	if (!page)
1242 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1243 
1244 	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1245 		SetHPageRestoreReserve(page);
1246 		h->resv_huge_pages--;
1247 	}
1248 
1249 	mpol_cond_put(mpol);
1250 	return page;
1251 
1252 err:
1253 	return NULL;
1254 }
1255 
1256 /*
1257  * common helper functions for hstate_next_node_to_{alloc|free}.
1258  * We may have allocated or freed a huge page based on a different
1259  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1260  * be outside of *nodes_allowed.  Ensure that we use an allowed
1261  * node for alloc or free.
1262  */
1263 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1264 {
1265 	nid = next_node_in(nid, *nodes_allowed);
1266 	VM_BUG_ON(nid >= MAX_NUMNODES);
1267 
1268 	return nid;
1269 }
1270 
1271 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1272 {
1273 	if (!node_isset(nid, *nodes_allowed))
1274 		nid = next_node_allowed(nid, nodes_allowed);
1275 	return nid;
1276 }
1277 
1278 /*
1279  * returns the previously saved node ["this node"] from which to
1280  * allocate a persistent huge page for the pool and advance the
1281  * next node from which to allocate, handling wrap at end of node
1282  * mask.
1283  */
1284 static int hstate_next_node_to_alloc(struct hstate *h,
1285 					nodemask_t *nodes_allowed)
1286 {
1287 	int nid;
1288 
1289 	VM_BUG_ON(!nodes_allowed);
1290 
1291 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1292 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1293 
1294 	return nid;
1295 }
1296 
1297 /*
1298  * helper for remove_pool_huge_page() - return the previously saved
1299  * node ["this node"] from which to free a huge page.  Advance the
1300  * next node id whether or not we find a free huge page to free so
1301  * that the next attempt to free addresses the next node.
1302  */
1303 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1304 {
1305 	int nid;
1306 
1307 	VM_BUG_ON(!nodes_allowed);
1308 
1309 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1310 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1311 
1312 	return nid;
1313 }
1314 
1315 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
1316 	for (nr_nodes = nodes_weight(*mask);				\
1317 		nr_nodes > 0 &&						\
1318 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
1319 		nr_nodes--)
1320 
1321 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
1322 	for (nr_nodes = nodes_weight(*mask);				\
1323 		nr_nodes > 0 &&						\
1324 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
1325 		nr_nodes--)
1326 
1327 /* used to demote non-gigantic_huge pages as well */
1328 static void __destroy_compound_gigantic_folio(struct folio *folio,
1329 					unsigned int order, bool demote)
1330 {
1331 	int i;
1332 	int nr_pages = 1 << order;
1333 	struct page *p;
1334 
1335 	atomic_set(folio_mapcount_ptr(folio), 0);
1336 	atomic_set(folio_subpages_mapcount_ptr(folio), 0);
1337 	atomic_set(folio_pincount_ptr(folio), 0);
1338 
1339 	for (i = 1; i < nr_pages; i++) {
1340 		p = folio_page(folio, i);
1341 		p->mapping = NULL;
1342 		clear_compound_head(p);
1343 		if (!demote)
1344 			set_page_refcounted(p);
1345 	}
1346 
1347 	folio_set_compound_order(folio, 0);
1348 	__folio_clear_head(folio);
1349 }
1350 
1351 static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
1352 					unsigned int order)
1353 {
1354 	__destroy_compound_gigantic_folio(folio, order, true);
1355 }
1356 
1357 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1358 static void destroy_compound_gigantic_folio(struct folio *folio,
1359 					unsigned int order)
1360 {
1361 	__destroy_compound_gigantic_folio(folio, order, false);
1362 }
1363 
1364 static void free_gigantic_folio(struct folio *folio, unsigned int order)
1365 {
1366 	/*
1367 	 * If the page isn't allocated using the cma allocator,
1368 	 * cma_release() returns false.
1369 	 */
1370 #ifdef CONFIG_CMA
1371 	int nid = folio_nid(folio);
1372 
1373 	if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
1374 		return;
1375 #endif
1376 
1377 	free_contig_range(folio_pfn(folio), 1 << order);
1378 }
1379 
1380 #ifdef CONFIG_CONTIG_ALLOC
1381 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1382 		int nid, nodemask_t *nodemask)
1383 {
1384 	struct page *page;
1385 	unsigned long nr_pages = pages_per_huge_page(h);
1386 	if (nid == NUMA_NO_NODE)
1387 		nid = numa_mem_id();
1388 
1389 #ifdef CONFIG_CMA
1390 	{
1391 		int node;
1392 
1393 		if (hugetlb_cma[nid]) {
1394 			page = cma_alloc(hugetlb_cma[nid], nr_pages,
1395 					huge_page_order(h), true);
1396 			if (page)
1397 				return page_folio(page);
1398 		}
1399 
1400 		if (!(gfp_mask & __GFP_THISNODE)) {
1401 			for_each_node_mask(node, *nodemask) {
1402 				if (node == nid || !hugetlb_cma[node])
1403 					continue;
1404 
1405 				page = cma_alloc(hugetlb_cma[node], nr_pages,
1406 						huge_page_order(h), true);
1407 				if (page)
1408 					return page_folio(page);
1409 			}
1410 		}
1411 	}
1412 #endif
1413 
1414 	page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1415 	return page ? page_folio(page) : NULL;
1416 }
1417 
1418 #else /* !CONFIG_CONTIG_ALLOC */
1419 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1420 					int nid, nodemask_t *nodemask)
1421 {
1422 	return NULL;
1423 }
1424 #endif /* CONFIG_CONTIG_ALLOC */
1425 
1426 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1427 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1428 					int nid, nodemask_t *nodemask)
1429 {
1430 	return NULL;
1431 }
1432 static inline void free_gigantic_folio(struct folio *folio,
1433 						unsigned int order) { }
1434 static inline void destroy_compound_gigantic_folio(struct folio *folio,
1435 						unsigned int order) { }
1436 #endif
1437 
1438 /*
1439  * Remove hugetlb folio from lists, and update dtor so that the folio appears
1440  * as just a compound page.
1441  *
1442  * A reference is held on the folio, except in the case of demote.
1443  *
1444  * Must be called with hugetlb lock held.
1445  */
1446 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1447 							bool adjust_surplus,
1448 							bool demote)
1449 {
1450 	int nid = folio_nid(folio);
1451 
1452 	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1453 	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1454 
1455 	lockdep_assert_held(&hugetlb_lock);
1456 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1457 		return;
1458 
1459 	list_del(&folio->lru);
1460 
1461 	if (folio_test_hugetlb_freed(folio)) {
1462 		h->free_huge_pages--;
1463 		h->free_huge_pages_node[nid]--;
1464 	}
1465 	if (adjust_surplus) {
1466 		h->surplus_huge_pages--;
1467 		h->surplus_huge_pages_node[nid]--;
1468 	}
1469 
1470 	/*
1471 	 * Very subtle
1472 	 *
1473 	 * For non-gigantic pages set the destructor to the normal compound
1474 	 * page dtor.  This is needed in case someone takes an additional
1475 	 * temporary ref to the page, and freeing is delayed until they drop
1476 	 * their reference.
1477 	 *
1478 	 * For gigantic pages set the destructor to the null dtor.  This
1479 	 * destructor will never be called.  Before freeing the gigantic
1480 	 * page destroy_compound_gigantic_folio will turn the folio into a
1481 	 * simple group of pages.  After this the destructor does not
1482 	 * apply.
1483 	 *
1484 	 * This handles the case where more than one ref is held when and
1485 	 * after update_and_free_hugetlb_folio is called.
1486 	 *
1487 	 * In the case of demote we do not ref count the page as it will soon
1488 	 * be turned into a page of smaller size.
1489 	 */
1490 	if (!demote)
1491 		folio_ref_unfreeze(folio, 1);
1492 	if (hstate_is_gigantic(h))
1493 		folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR);
1494 	else
1495 		folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR);
1496 
1497 	h->nr_huge_pages--;
1498 	h->nr_huge_pages_node[nid]--;
1499 }
1500 
1501 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1502 							bool adjust_surplus)
1503 {
1504 	__remove_hugetlb_folio(h, folio, adjust_surplus, false);
1505 }
1506 
1507 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
1508 							bool adjust_surplus)
1509 {
1510 	__remove_hugetlb_folio(h, folio, adjust_surplus, true);
1511 }
1512 
1513 static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1514 			     bool adjust_surplus)
1515 {
1516 	int zeroed;
1517 	int nid = folio_nid(folio);
1518 
1519 	VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1520 
1521 	lockdep_assert_held(&hugetlb_lock);
1522 
1523 	INIT_LIST_HEAD(&folio->lru);
1524 	h->nr_huge_pages++;
1525 	h->nr_huge_pages_node[nid]++;
1526 
1527 	if (adjust_surplus) {
1528 		h->surplus_huge_pages++;
1529 		h->surplus_huge_pages_node[nid]++;
1530 	}
1531 
1532 	folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR);
1533 	folio_change_private(folio, NULL);
1534 	/*
1535 	 * We have to set hugetlb_vmemmap_optimized again as above
1536 	 * folio_change_private(folio, NULL) cleared it.
1537 	 */
1538 	folio_set_hugetlb_vmemmap_optimized(folio);
1539 
1540 	/*
1541 	 * This folio is about to be managed by the hugetlb allocator and
1542 	 * should have no users.  Drop our reference, and check for others
1543 	 * just in case.
1544 	 */
1545 	zeroed = folio_put_testzero(folio);
1546 	if (unlikely(!zeroed))
1547 		/*
1548 		 * It is VERY unlikely soneone else has taken a ref on
1549 		 * the page.  In this case, we simply return as the
1550 		 * hugetlb destructor (free_huge_page) will be called
1551 		 * when this other ref is dropped.
1552 		 */
1553 		return;
1554 
1555 	arch_clear_hugepage_flags(&folio->page);
1556 	enqueue_hugetlb_folio(h, folio);
1557 }
1558 
1559 static void __update_and_free_page(struct hstate *h, struct page *page)
1560 {
1561 	int i;
1562 	struct folio *folio = page_folio(page);
1563 	struct page *subpage;
1564 
1565 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1566 		return;
1567 
1568 	/*
1569 	 * If we don't know which subpages are hwpoisoned, we can't free
1570 	 * the hugepage, so it's leaked intentionally.
1571 	 */
1572 	if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1573 		return;
1574 
1575 	if (hugetlb_vmemmap_restore(h, page)) {
1576 		spin_lock_irq(&hugetlb_lock);
1577 		/*
1578 		 * If we cannot allocate vmemmap pages, just refuse to free the
1579 		 * page and put the page back on the hugetlb free list and treat
1580 		 * as a surplus page.
1581 		 */
1582 		add_hugetlb_folio(h, folio, true);
1583 		spin_unlock_irq(&hugetlb_lock);
1584 		return;
1585 	}
1586 
1587 	/*
1588 	 * Move PageHWPoison flag from head page to the raw error pages,
1589 	 * which makes any healthy subpages reusable.
1590 	 */
1591 	if (unlikely(folio_test_hwpoison(folio)))
1592 		hugetlb_clear_page_hwpoison(&folio->page);
1593 
1594 	for (i = 0; i < pages_per_huge_page(h); i++) {
1595 		subpage = folio_page(folio, i);
1596 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1597 				1 << PG_referenced | 1 << PG_dirty |
1598 				1 << PG_active | 1 << PG_private |
1599 				1 << PG_writeback);
1600 	}
1601 
1602 	/*
1603 	 * Non-gigantic pages demoted from CMA allocated gigantic pages
1604 	 * need to be given back to CMA in free_gigantic_folio.
1605 	 */
1606 	if (hstate_is_gigantic(h) ||
1607 	    hugetlb_cma_folio(folio, huge_page_order(h))) {
1608 		destroy_compound_gigantic_folio(folio, huge_page_order(h));
1609 		free_gigantic_folio(folio, huge_page_order(h));
1610 	} else {
1611 		__free_pages(page, huge_page_order(h));
1612 	}
1613 }
1614 
1615 /*
1616  * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1617  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1618  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1619  * the vmemmap pages.
1620  *
1621  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1622  * freed and frees them one-by-one. As the page->mapping pointer is going
1623  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1624  * structure of a lockless linked list of huge pages to be freed.
1625  */
1626 static LLIST_HEAD(hpage_freelist);
1627 
1628 static void free_hpage_workfn(struct work_struct *work)
1629 {
1630 	struct llist_node *node;
1631 
1632 	node = llist_del_all(&hpage_freelist);
1633 
1634 	while (node) {
1635 		struct page *page;
1636 		struct hstate *h;
1637 
1638 		page = container_of((struct address_space **)node,
1639 				     struct page, mapping);
1640 		node = node->next;
1641 		page->mapping = NULL;
1642 		/*
1643 		 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
1644 		 * is going to trigger because a previous call to
1645 		 * remove_hugetlb_folio() will call folio_set_compound_dtor
1646 		 * (folio, NULL_COMPOUND_DTOR), so do not use page_hstate()
1647 		 * directly.
1648 		 */
1649 		h = size_to_hstate(page_size(page));
1650 
1651 		__update_and_free_page(h, page);
1652 
1653 		cond_resched();
1654 	}
1655 }
1656 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1657 
1658 static inline void flush_free_hpage_work(struct hstate *h)
1659 {
1660 	if (hugetlb_vmemmap_optimizable(h))
1661 		flush_work(&free_hpage_work);
1662 }
1663 
1664 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1665 				 bool atomic)
1666 {
1667 	if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1668 		__update_and_free_page(h, &folio->page);
1669 		return;
1670 	}
1671 
1672 	/*
1673 	 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1674 	 *
1675 	 * Only call schedule_work() if hpage_freelist is previously
1676 	 * empty. Otherwise, schedule_work() had been called but the workfn
1677 	 * hasn't retrieved the list yet.
1678 	 */
1679 	if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1680 		schedule_work(&free_hpage_work);
1681 }
1682 
1683 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
1684 {
1685 	struct page *page, *t_page;
1686 	struct folio *folio;
1687 
1688 	list_for_each_entry_safe(page, t_page, list, lru) {
1689 		folio = page_folio(page);
1690 		update_and_free_hugetlb_folio(h, folio, false);
1691 		cond_resched();
1692 	}
1693 }
1694 
1695 struct hstate *size_to_hstate(unsigned long size)
1696 {
1697 	struct hstate *h;
1698 
1699 	for_each_hstate(h) {
1700 		if (huge_page_size(h) == size)
1701 			return h;
1702 	}
1703 	return NULL;
1704 }
1705 
1706 void free_huge_page(struct page *page)
1707 {
1708 	/*
1709 	 * Can't pass hstate in here because it is called from the
1710 	 * compound page destructor.
1711 	 */
1712 	struct folio *folio = page_folio(page);
1713 	struct hstate *h = folio_hstate(folio);
1714 	int nid = folio_nid(folio);
1715 	struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1716 	bool restore_reserve;
1717 	unsigned long flags;
1718 
1719 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1720 	VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1721 
1722 	hugetlb_set_folio_subpool(folio, NULL);
1723 	if (folio_test_anon(folio))
1724 		__ClearPageAnonExclusive(&folio->page);
1725 	folio->mapping = NULL;
1726 	restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1727 	folio_clear_hugetlb_restore_reserve(folio);
1728 
1729 	/*
1730 	 * If HPageRestoreReserve was set on page, page allocation consumed a
1731 	 * reservation.  If the page was associated with a subpool, there
1732 	 * would have been a page reserved in the subpool before allocation
1733 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1734 	 * reservation, do not call hugepage_subpool_put_pages() as this will
1735 	 * remove the reserved page from the subpool.
1736 	 */
1737 	if (!restore_reserve) {
1738 		/*
1739 		 * A return code of zero implies that the subpool will be
1740 		 * under its minimum size if the reservation is not restored
1741 		 * after page is free.  Therefore, force restore_reserve
1742 		 * operation.
1743 		 */
1744 		if (hugepage_subpool_put_pages(spool, 1) == 0)
1745 			restore_reserve = true;
1746 	}
1747 
1748 	spin_lock_irqsave(&hugetlb_lock, flags);
1749 	folio_clear_hugetlb_migratable(folio);
1750 	hugetlb_cgroup_uncharge_folio(hstate_index(h),
1751 				     pages_per_huge_page(h), folio);
1752 	hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1753 					  pages_per_huge_page(h), folio);
1754 	if (restore_reserve)
1755 		h->resv_huge_pages++;
1756 
1757 	if (folio_test_hugetlb_temporary(folio)) {
1758 		remove_hugetlb_folio(h, folio, false);
1759 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1760 		update_and_free_hugetlb_folio(h, folio, true);
1761 	} else if (h->surplus_huge_pages_node[nid]) {
1762 		/* remove the page from active list */
1763 		remove_hugetlb_folio(h, folio, true);
1764 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1765 		update_and_free_hugetlb_folio(h, folio, true);
1766 	} else {
1767 		arch_clear_hugepage_flags(page);
1768 		enqueue_hugetlb_folio(h, folio);
1769 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1770 	}
1771 }
1772 
1773 /*
1774  * Must be called with the hugetlb lock held
1775  */
1776 static void __prep_account_new_huge_page(struct hstate *h, int nid)
1777 {
1778 	lockdep_assert_held(&hugetlb_lock);
1779 	h->nr_huge_pages++;
1780 	h->nr_huge_pages_node[nid]++;
1781 }
1782 
1783 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1784 {
1785 	hugetlb_vmemmap_optimize(h, &folio->page);
1786 	INIT_LIST_HEAD(&folio->lru);
1787 	folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR);
1788 	hugetlb_set_folio_subpool(folio, NULL);
1789 	set_hugetlb_cgroup(folio, NULL);
1790 	set_hugetlb_cgroup_rsvd(folio, NULL);
1791 }
1792 
1793 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
1794 {
1795 	__prep_new_hugetlb_folio(h, folio);
1796 	spin_lock_irq(&hugetlb_lock);
1797 	__prep_account_new_huge_page(h, nid);
1798 	spin_unlock_irq(&hugetlb_lock);
1799 }
1800 
1801 static bool __prep_compound_gigantic_folio(struct folio *folio,
1802 					unsigned int order, bool demote)
1803 {
1804 	int i, j;
1805 	int nr_pages = 1 << order;
1806 	struct page *p;
1807 
1808 	/* we rely on prep_new_hugetlb_folio to set the destructor */
1809 	folio_set_compound_order(folio, order);
1810 	__folio_clear_reserved(folio);
1811 	__folio_set_head(folio);
1812 	for (i = 0; i < nr_pages; i++) {
1813 		p = folio_page(folio, i);
1814 
1815 		/*
1816 		 * For gigantic hugepages allocated through bootmem at
1817 		 * boot, it's safer to be consistent with the not-gigantic
1818 		 * hugepages and clear the PG_reserved bit from all tail pages
1819 		 * too.  Otherwise drivers using get_user_pages() to access tail
1820 		 * pages may get the reference counting wrong if they see
1821 		 * PG_reserved set on a tail page (despite the head page not
1822 		 * having PG_reserved set).  Enforcing this consistency between
1823 		 * head and tail pages allows drivers to optimize away a check
1824 		 * on the head page when they need know if put_page() is needed
1825 		 * after get_user_pages().
1826 		 */
1827 		if (i != 0)	/* head page cleared above */
1828 			__ClearPageReserved(p);
1829 		/*
1830 		 * Subtle and very unlikely
1831 		 *
1832 		 * Gigantic 'page allocators' such as memblock or cma will
1833 		 * return a set of pages with each page ref counted.  We need
1834 		 * to turn this set of pages into a compound page with tail
1835 		 * page ref counts set to zero.  Code such as speculative page
1836 		 * cache adding could take a ref on a 'to be' tail page.
1837 		 * We need to respect any increased ref count, and only set
1838 		 * the ref count to zero if count is currently 1.  If count
1839 		 * is not 1, we return an error.  An error return indicates
1840 		 * the set of pages can not be converted to a gigantic page.
1841 		 * The caller who allocated the pages should then discard the
1842 		 * pages using the appropriate free interface.
1843 		 *
1844 		 * In the case of demote, the ref count will be zero.
1845 		 */
1846 		if (!demote) {
1847 			if (!page_ref_freeze(p, 1)) {
1848 				pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
1849 				goto out_error;
1850 			}
1851 		} else {
1852 			VM_BUG_ON_PAGE(page_count(p), p);
1853 		}
1854 		if (i != 0)
1855 			set_compound_head(p, &folio->page);
1856 	}
1857 	atomic_set(folio_mapcount_ptr(folio), -1);
1858 	atomic_set(folio_subpages_mapcount_ptr(folio), 0);
1859 	atomic_set(folio_pincount_ptr(folio), 0);
1860 	return true;
1861 
1862 out_error:
1863 	/* undo page modifications made above */
1864 	for (j = 0; j < i; j++) {
1865 		p = folio_page(folio, j);
1866 		if (j != 0)
1867 			clear_compound_head(p);
1868 		set_page_refcounted(p);
1869 	}
1870 	/* need to clear PG_reserved on remaining tail pages  */
1871 	for (; j < nr_pages; j++) {
1872 		p = folio_page(folio, j);
1873 		__ClearPageReserved(p);
1874 	}
1875 	folio_set_compound_order(folio, 0);
1876 	__folio_clear_head(folio);
1877 	return false;
1878 }
1879 
1880 static bool prep_compound_gigantic_folio(struct folio *folio,
1881 							unsigned int order)
1882 {
1883 	return __prep_compound_gigantic_folio(folio, order, false);
1884 }
1885 
1886 static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
1887 							unsigned int order)
1888 {
1889 	return __prep_compound_gigantic_folio(folio, order, true);
1890 }
1891 
1892 /*
1893  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1894  * transparent huge pages.  See the PageTransHuge() documentation for more
1895  * details.
1896  */
1897 int PageHuge(struct page *page)
1898 {
1899 	if (!PageCompound(page))
1900 		return 0;
1901 
1902 	page = compound_head(page);
1903 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1904 }
1905 EXPORT_SYMBOL_GPL(PageHuge);
1906 
1907 /*
1908  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1909  * normal or transparent huge pages.
1910  */
1911 int PageHeadHuge(struct page *page_head)
1912 {
1913 	if (!PageHead(page_head))
1914 		return 0;
1915 
1916 	return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
1917 }
1918 EXPORT_SYMBOL_GPL(PageHeadHuge);
1919 
1920 /*
1921  * Find and lock address space (mapping) in write mode.
1922  *
1923  * Upon entry, the page is locked which means that page_mapping() is
1924  * stable.  Due to locking order, we can only trylock_write.  If we can
1925  * not get the lock, simply return NULL to caller.
1926  */
1927 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1928 {
1929 	struct address_space *mapping = page_mapping(hpage);
1930 
1931 	if (!mapping)
1932 		return mapping;
1933 
1934 	if (i_mmap_trylock_write(mapping))
1935 		return mapping;
1936 
1937 	return NULL;
1938 }
1939 
1940 pgoff_t hugetlb_basepage_index(struct page *page)
1941 {
1942 	struct page *page_head = compound_head(page);
1943 	pgoff_t index = page_index(page_head);
1944 	unsigned long compound_idx;
1945 
1946 	if (compound_order(page_head) >= MAX_ORDER)
1947 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1948 	else
1949 		compound_idx = page - page_head;
1950 
1951 	return (index << compound_order(page_head)) + compound_idx;
1952 }
1953 
1954 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
1955 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1956 		nodemask_t *node_alloc_noretry)
1957 {
1958 	int order = huge_page_order(h);
1959 	struct page *page;
1960 	bool alloc_try_hard = true;
1961 	bool retry = true;
1962 
1963 	/*
1964 	 * By default we always try hard to allocate the page with
1965 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1966 	 * a loop (to adjust global huge page counts) and previous allocation
1967 	 * failed, do not continue to try hard on the same node.  Use the
1968 	 * node_alloc_noretry bitmap to manage this state information.
1969 	 */
1970 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1971 		alloc_try_hard = false;
1972 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1973 	if (alloc_try_hard)
1974 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1975 	if (nid == NUMA_NO_NODE)
1976 		nid = numa_mem_id();
1977 retry:
1978 	page = __alloc_pages(gfp_mask, order, nid, nmask);
1979 
1980 	/* Freeze head page */
1981 	if (page && !page_ref_freeze(page, 1)) {
1982 		__free_pages(page, order);
1983 		if (retry) {	/* retry once */
1984 			retry = false;
1985 			goto retry;
1986 		}
1987 		/* WOW!  twice in a row. */
1988 		pr_warn("HugeTLB head page unexpected inflated ref count\n");
1989 		page = NULL;
1990 	}
1991 
1992 	/*
1993 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1994 	 * indicates an overall state change.  Clear bit so that we resume
1995 	 * normal 'try hard' allocations.
1996 	 */
1997 	if (node_alloc_noretry && page && !alloc_try_hard)
1998 		node_clear(nid, *node_alloc_noretry);
1999 
2000 	/*
2001 	 * If we tried hard to get a page but failed, set bit so that
2002 	 * subsequent attempts will not try as hard until there is an
2003 	 * overall state change.
2004 	 */
2005 	if (node_alloc_noretry && !page && alloc_try_hard)
2006 		node_set(nid, *node_alloc_noretry);
2007 
2008 	if (!page) {
2009 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
2010 		return NULL;
2011 	}
2012 
2013 	__count_vm_event(HTLB_BUDDY_PGALLOC);
2014 	return page_folio(page);
2015 }
2016 
2017 /*
2018  * Common helper to allocate a fresh hugetlb page. All specific allocators
2019  * should use this function to get new hugetlb pages
2020  *
2021  * Note that returned page is 'frozen':  ref count of head page and all tail
2022  * pages is zero.
2023  */
2024 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
2025 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
2026 		nodemask_t *node_alloc_noretry)
2027 {
2028 	struct folio *folio;
2029 	bool retry = false;
2030 
2031 retry:
2032 	if (hstate_is_gigantic(h))
2033 		folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
2034 	else
2035 		folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
2036 				nid, nmask, node_alloc_noretry);
2037 	if (!folio)
2038 		return NULL;
2039 	if (hstate_is_gigantic(h)) {
2040 		if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
2041 			/*
2042 			 * Rare failure to convert pages to compound page.
2043 			 * Free pages and try again - ONCE!
2044 			 */
2045 			free_gigantic_folio(folio, huge_page_order(h));
2046 			if (!retry) {
2047 				retry = true;
2048 				goto retry;
2049 			}
2050 			return NULL;
2051 		}
2052 	}
2053 	prep_new_hugetlb_folio(h, folio, folio_nid(folio));
2054 
2055 	return folio;
2056 }
2057 
2058 /*
2059  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
2060  * manner.
2061  */
2062 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
2063 				nodemask_t *node_alloc_noretry)
2064 {
2065 	struct folio *folio;
2066 	int nr_nodes, node;
2067 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2068 
2069 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2070 		folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node,
2071 					nodes_allowed, node_alloc_noretry);
2072 		if (folio) {
2073 			free_huge_page(&folio->page); /* free it into the hugepage allocator */
2074 			return 1;
2075 		}
2076 	}
2077 
2078 	return 0;
2079 }
2080 
2081 /*
2082  * Remove huge page from pool from next node to free.  Attempt to keep
2083  * persistent huge pages more or less balanced over allowed nodes.
2084  * This routine only 'removes' the hugetlb page.  The caller must make
2085  * an additional call to free the page to low level allocators.
2086  * Called with hugetlb_lock locked.
2087  */
2088 static struct page *remove_pool_huge_page(struct hstate *h,
2089 						nodemask_t *nodes_allowed,
2090 						 bool acct_surplus)
2091 {
2092 	int nr_nodes, node;
2093 	struct page *page = NULL;
2094 	struct folio *folio;
2095 
2096 	lockdep_assert_held(&hugetlb_lock);
2097 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2098 		/*
2099 		 * If we're returning unused surplus pages, only examine
2100 		 * nodes with surplus pages.
2101 		 */
2102 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2103 		    !list_empty(&h->hugepage_freelists[node])) {
2104 			page = list_entry(h->hugepage_freelists[node].next,
2105 					  struct page, lru);
2106 			folio = page_folio(page);
2107 			remove_hugetlb_folio(h, folio, acct_surplus);
2108 			break;
2109 		}
2110 	}
2111 
2112 	return page;
2113 }
2114 
2115 /*
2116  * Dissolve a given free hugepage into free buddy pages. This function does
2117  * nothing for in-use hugepages and non-hugepages.
2118  * This function returns values like below:
2119  *
2120  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2121  *           when the system is under memory pressure and the feature of
2122  *           freeing unused vmemmap pages associated with each hugetlb page
2123  *           is enabled.
2124  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2125  *           (allocated or reserved.)
2126  *       0:  successfully dissolved free hugepages or the page is not a
2127  *           hugepage (considered as already dissolved)
2128  */
2129 int dissolve_free_huge_page(struct page *page)
2130 {
2131 	int rc = -EBUSY;
2132 	struct folio *folio = page_folio(page);
2133 
2134 retry:
2135 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2136 	if (!folio_test_hugetlb(folio))
2137 		return 0;
2138 
2139 	spin_lock_irq(&hugetlb_lock);
2140 	if (!folio_test_hugetlb(folio)) {
2141 		rc = 0;
2142 		goto out;
2143 	}
2144 
2145 	if (!folio_ref_count(folio)) {
2146 		struct hstate *h = folio_hstate(folio);
2147 		if (!available_huge_pages(h))
2148 			goto out;
2149 
2150 		/*
2151 		 * We should make sure that the page is already on the free list
2152 		 * when it is dissolved.
2153 		 */
2154 		if (unlikely(!folio_test_hugetlb_freed(folio))) {
2155 			spin_unlock_irq(&hugetlb_lock);
2156 			cond_resched();
2157 
2158 			/*
2159 			 * Theoretically, we should return -EBUSY when we
2160 			 * encounter this race. In fact, we have a chance
2161 			 * to successfully dissolve the page if we do a
2162 			 * retry. Because the race window is quite small.
2163 			 * If we seize this opportunity, it is an optimization
2164 			 * for increasing the success rate of dissolving page.
2165 			 */
2166 			goto retry;
2167 		}
2168 
2169 		remove_hugetlb_folio(h, folio, false);
2170 		h->max_huge_pages--;
2171 		spin_unlock_irq(&hugetlb_lock);
2172 
2173 		/*
2174 		 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2175 		 * before freeing the page.  update_and_free_hugtlb_folio will fail to
2176 		 * free the page if it can not allocate required vmemmap.  We
2177 		 * need to adjust max_huge_pages if the page is not freed.
2178 		 * Attempt to allocate vmemmmap here so that we can take
2179 		 * appropriate action on failure.
2180 		 */
2181 		rc = hugetlb_vmemmap_restore(h, &folio->page);
2182 		if (!rc) {
2183 			update_and_free_hugetlb_folio(h, folio, false);
2184 		} else {
2185 			spin_lock_irq(&hugetlb_lock);
2186 			add_hugetlb_folio(h, folio, false);
2187 			h->max_huge_pages++;
2188 			spin_unlock_irq(&hugetlb_lock);
2189 		}
2190 
2191 		return rc;
2192 	}
2193 out:
2194 	spin_unlock_irq(&hugetlb_lock);
2195 	return rc;
2196 }
2197 
2198 /*
2199  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2200  * make specified memory blocks removable from the system.
2201  * Note that this will dissolve a free gigantic hugepage completely, if any
2202  * part of it lies within the given range.
2203  * Also note that if dissolve_free_huge_page() returns with an error, all
2204  * free hugepages that were dissolved before that error are lost.
2205  */
2206 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2207 {
2208 	unsigned long pfn;
2209 	struct page *page;
2210 	int rc = 0;
2211 	unsigned int order;
2212 	struct hstate *h;
2213 
2214 	if (!hugepages_supported())
2215 		return rc;
2216 
2217 	order = huge_page_order(&default_hstate);
2218 	for_each_hstate(h)
2219 		order = min(order, huge_page_order(h));
2220 
2221 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2222 		page = pfn_to_page(pfn);
2223 		rc = dissolve_free_huge_page(page);
2224 		if (rc)
2225 			break;
2226 	}
2227 
2228 	return rc;
2229 }
2230 
2231 /*
2232  * Allocates a fresh surplus page from the page allocator.
2233  */
2234 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
2235 						int nid, nodemask_t *nmask)
2236 {
2237 	struct folio *folio = NULL;
2238 
2239 	if (hstate_is_gigantic(h))
2240 		return NULL;
2241 
2242 	spin_lock_irq(&hugetlb_lock);
2243 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2244 		goto out_unlock;
2245 	spin_unlock_irq(&hugetlb_lock);
2246 
2247 	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2248 	if (!folio)
2249 		return NULL;
2250 
2251 	spin_lock_irq(&hugetlb_lock);
2252 	/*
2253 	 * We could have raced with the pool size change.
2254 	 * Double check that and simply deallocate the new page
2255 	 * if we would end up overcommiting the surpluses. Abuse
2256 	 * temporary page to workaround the nasty free_huge_page
2257 	 * codeflow
2258 	 */
2259 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2260 		folio_set_hugetlb_temporary(folio);
2261 		spin_unlock_irq(&hugetlb_lock);
2262 		free_huge_page(&folio->page);
2263 		return NULL;
2264 	}
2265 
2266 	h->surplus_huge_pages++;
2267 	h->surplus_huge_pages_node[folio_nid(folio)]++;
2268 
2269 out_unlock:
2270 	spin_unlock_irq(&hugetlb_lock);
2271 
2272 	return &folio->page;
2273 }
2274 
2275 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
2276 				     int nid, nodemask_t *nmask)
2277 {
2278 	struct folio *folio;
2279 
2280 	if (hstate_is_gigantic(h))
2281 		return NULL;
2282 
2283 	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2284 	if (!folio)
2285 		return NULL;
2286 
2287 	/* fresh huge pages are frozen */
2288 	folio_ref_unfreeze(folio, 1);
2289 	/*
2290 	 * We do not account these pages as surplus because they are only
2291 	 * temporary and will be released properly on the last reference
2292 	 */
2293 	folio_set_hugetlb_temporary(folio);
2294 
2295 	return &folio->page;
2296 }
2297 
2298 /*
2299  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2300  */
2301 static
2302 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
2303 		struct vm_area_struct *vma, unsigned long addr)
2304 {
2305 	struct page *page = NULL;
2306 	struct mempolicy *mpol;
2307 	gfp_t gfp_mask = htlb_alloc_mask(h);
2308 	int nid;
2309 	nodemask_t *nodemask;
2310 
2311 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2312 	if (mpol_is_preferred_many(mpol)) {
2313 		gfp_t gfp = gfp_mask | __GFP_NOWARN;
2314 
2315 		gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2316 		page = alloc_surplus_huge_page(h, gfp, nid, nodemask);
2317 
2318 		/* Fallback to all nodes if page==NULL */
2319 		nodemask = NULL;
2320 	}
2321 
2322 	if (!page)
2323 		page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
2324 	mpol_cond_put(mpol);
2325 	return page;
2326 }
2327 
2328 /* page migration callback function */
2329 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
2330 		nodemask_t *nmask, gfp_t gfp_mask)
2331 {
2332 	spin_lock_irq(&hugetlb_lock);
2333 	if (available_huge_pages(h)) {
2334 		struct page *page;
2335 
2336 		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
2337 		if (page) {
2338 			spin_unlock_irq(&hugetlb_lock);
2339 			return page;
2340 		}
2341 	}
2342 	spin_unlock_irq(&hugetlb_lock);
2343 
2344 	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
2345 }
2346 
2347 /* mempolicy aware migration callback */
2348 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
2349 		unsigned long address)
2350 {
2351 	struct mempolicy *mpol;
2352 	nodemask_t *nodemask;
2353 	struct page *page;
2354 	gfp_t gfp_mask;
2355 	int node;
2356 
2357 	gfp_mask = htlb_alloc_mask(h);
2358 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2359 	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
2360 	mpol_cond_put(mpol);
2361 
2362 	return page;
2363 }
2364 
2365 /*
2366  * Increase the hugetlb pool such that it can accommodate a reservation
2367  * of size 'delta'.
2368  */
2369 static int gather_surplus_pages(struct hstate *h, long delta)
2370 	__must_hold(&hugetlb_lock)
2371 {
2372 	LIST_HEAD(surplus_list);
2373 	struct page *page, *tmp;
2374 	int ret;
2375 	long i;
2376 	long needed, allocated;
2377 	bool alloc_ok = true;
2378 
2379 	lockdep_assert_held(&hugetlb_lock);
2380 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2381 	if (needed <= 0) {
2382 		h->resv_huge_pages += delta;
2383 		return 0;
2384 	}
2385 
2386 	allocated = 0;
2387 
2388 	ret = -ENOMEM;
2389 retry:
2390 	spin_unlock_irq(&hugetlb_lock);
2391 	for (i = 0; i < needed; i++) {
2392 		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
2393 				NUMA_NO_NODE, NULL);
2394 		if (!page) {
2395 			alloc_ok = false;
2396 			break;
2397 		}
2398 		list_add(&page->lru, &surplus_list);
2399 		cond_resched();
2400 	}
2401 	allocated += i;
2402 
2403 	/*
2404 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
2405 	 * because either resv_huge_pages or free_huge_pages may have changed.
2406 	 */
2407 	spin_lock_irq(&hugetlb_lock);
2408 	needed = (h->resv_huge_pages + delta) -
2409 			(h->free_huge_pages + allocated);
2410 	if (needed > 0) {
2411 		if (alloc_ok)
2412 			goto retry;
2413 		/*
2414 		 * We were not able to allocate enough pages to
2415 		 * satisfy the entire reservation so we free what
2416 		 * we've allocated so far.
2417 		 */
2418 		goto free;
2419 	}
2420 	/*
2421 	 * The surplus_list now contains _at_least_ the number of extra pages
2422 	 * needed to accommodate the reservation.  Add the appropriate number
2423 	 * of pages to the hugetlb pool and free the extras back to the buddy
2424 	 * allocator.  Commit the entire reservation here to prevent another
2425 	 * process from stealing the pages as they are added to the pool but
2426 	 * before they are reserved.
2427 	 */
2428 	needed += allocated;
2429 	h->resv_huge_pages += delta;
2430 	ret = 0;
2431 
2432 	/* Free the needed pages to the hugetlb pool */
2433 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2434 		if ((--needed) < 0)
2435 			break;
2436 		/* Add the page to the hugetlb allocator */
2437 		enqueue_hugetlb_folio(h, page_folio(page));
2438 	}
2439 free:
2440 	spin_unlock_irq(&hugetlb_lock);
2441 
2442 	/*
2443 	 * Free unnecessary surplus pages to the buddy allocator.
2444 	 * Pages have no ref count, call free_huge_page directly.
2445 	 */
2446 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2447 		free_huge_page(page);
2448 	spin_lock_irq(&hugetlb_lock);
2449 
2450 	return ret;
2451 }
2452 
2453 /*
2454  * This routine has two main purposes:
2455  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2456  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2457  *    to the associated reservation map.
2458  * 2) Free any unused surplus pages that may have been allocated to satisfy
2459  *    the reservation.  As many as unused_resv_pages may be freed.
2460  */
2461 static void return_unused_surplus_pages(struct hstate *h,
2462 					unsigned long unused_resv_pages)
2463 {
2464 	unsigned long nr_pages;
2465 	struct page *page;
2466 	LIST_HEAD(page_list);
2467 
2468 	lockdep_assert_held(&hugetlb_lock);
2469 	/* Uncommit the reservation */
2470 	h->resv_huge_pages -= unused_resv_pages;
2471 
2472 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2473 		goto out;
2474 
2475 	/*
2476 	 * Part (or even all) of the reservation could have been backed
2477 	 * by pre-allocated pages. Only free surplus pages.
2478 	 */
2479 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2480 
2481 	/*
2482 	 * We want to release as many surplus pages as possible, spread
2483 	 * evenly across all nodes with memory. Iterate across these nodes
2484 	 * until we can no longer free unreserved surplus pages. This occurs
2485 	 * when the nodes with surplus pages have no free pages.
2486 	 * remove_pool_huge_page() will balance the freed pages across the
2487 	 * on-line nodes with memory and will handle the hstate accounting.
2488 	 */
2489 	while (nr_pages--) {
2490 		page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
2491 		if (!page)
2492 			goto out;
2493 
2494 		list_add(&page->lru, &page_list);
2495 	}
2496 
2497 out:
2498 	spin_unlock_irq(&hugetlb_lock);
2499 	update_and_free_pages_bulk(h, &page_list);
2500 	spin_lock_irq(&hugetlb_lock);
2501 }
2502 
2503 
2504 /*
2505  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2506  * are used by the huge page allocation routines to manage reservations.
2507  *
2508  * vma_needs_reservation is called to determine if the huge page at addr
2509  * within the vma has an associated reservation.  If a reservation is
2510  * needed, the value 1 is returned.  The caller is then responsible for
2511  * managing the global reservation and subpool usage counts.  After
2512  * the huge page has been allocated, vma_commit_reservation is called
2513  * to add the page to the reservation map.  If the page allocation fails,
2514  * the reservation must be ended instead of committed.  vma_end_reservation
2515  * is called in such cases.
2516  *
2517  * In the normal case, vma_commit_reservation returns the same value
2518  * as the preceding vma_needs_reservation call.  The only time this
2519  * is not the case is if a reserve map was changed between calls.  It
2520  * is the responsibility of the caller to notice the difference and
2521  * take appropriate action.
2522  *
2523  * vma_add_reservation is used in error paths where a reservation must
2524  * be restored when a newly allocated huge page must be freed.  It is
2525  * to be called after calling vma_needs_reservation to determine if a
2526  * reservation exists.
2527  *
2528  * vma_del_reservation is used in error paths where an entry in the reserve
2529  * map was created during huge page allocation and must be removed.  It is to
2530  * be called after calling vma_needs_reservation to determine if a reservation
2531  * exists.
2532  */
2533 enum vma_resv_mode {
2534 	VMA_NEEDS_RESV,
2535 	VMA_COMMIT_RESV,
2536 	VMA_END_RESV,
2537 	VMA_ADD_RESV,
2538 	VMA_DEL_RESV,
2539 };
2540 static long __vma_reservation_common(struct hstate *h,
2541 				struct vm_area_struct *vma, unsigned long addr,
2542 				enum vma_resv_mode mode)
2543 {
2544 	struct resv_map *resv;
2545 	pgoff_t idx;
2546 	long ret;
2547 	long dummy_out_regions_needed;
2548 
2549 	resv = vma_resv_map(vma);
2550 	if (!resv)
2551 		return 1;
2552 
2553 	idx = vma_hugecache_offset(h, vma, addr);
2554 	switch (mode) {
2555 	case VMA_NEEDS_RESV:
2556 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2557 		/* We assume that vma_reservation_* routines always operate on
2558 		 * 1 page, and that adding to resv map a 1 page entry can only
2559 		 * ever require 1 region.
2560 		 */
2561 		VM_BUG_ON(dummy_out_regions_needed != 1);
2562 		break;
2563 	case VMA_COMMIT_RESV:
2564 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2565 		/* region_add calls of range 1 should never fail. */
2566 		VM_BUG_ON(ret < 0);
2567 		break;
2568 	case VMA_END_RESV:
2569 		region_abort(resv, idx, idx + 1, 1);
2570 		ret = 0;
2571 		break;
2572 	case VMA_ADD_RESV:
2573 		if (vma->vm_flags & VM_MAYSHARE) {
2574 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2575 			/* region_add calls of range 1 should never fail. */
2576 			VM_BUG_ON(ret < 0);
2577 		} else {
2578 			region_abort(resv, idx, idx + 1, 1);
2579 			ret = region_del(resv, idx, idx + 1);
2580 		}
2581 		break;
2582 	case VMA_DEL_RESV:
2583 		if (vma->vm_flags & VM_MAYSHARE) {
2584 			region_abort(resv, idx, idx + 1, 1);
2585 			ret = region_del(resv, idx, idx + 1);
2586 		} else {
2587 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2588 			/* region_add calls of range 1 should never fail. */
2589 			VM_BUG_ON(ret < 0);
2590 		}
2591 		break;
2592 	default:
2593 		BUG();
2594 	}
2595 
2596 	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2597 		return ret;
2598 	/*
2599 	 * We know private mapping must have HPAGE_RESV_OWNER set.
2600 	 *
2601 	 * In most cases, reserves always exist for private mappings.
2602 	 * However, a file associated with mapping could have been
2603 	 * hole punched or truncated after reserves were consumed.
2604 	 * As subsequent fault on such a range will not use reserves.
2605 	 * Subtle - The reserve map for private mappings has the
2606 	 * opposite meaning than that of shared mappings.  If NO
2607 	 * entry is in the reserve map, it means a reservation exists.
2608 	 * If an entry exists in the reserve map, it means the
2609 	 * reservation has already been consumed.  As a result, the
2610 	 * return value of this routine is the opposite of the
2611 	 * value returned from reserve map manipulation routines above.
2612 	 */
2613 	if (ret > 0)
2614 		return 0;
2615 	if (ret == 0)
2616 		return 1;
2617 	return ret;
2618 }
2619 
2620 static long vma_needs_reservation(struct hstate *h,
2621 			struct vm_area_struct *vma, unsigned long addr)
2622 {
2623 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2624 }
2625 
2626 static long vma_commit_reservation(struct hstate *h,
2627 			struct vm_area_struct *vma, unsigned long addr)
2628 {
2629 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2630 }
2631 
2632 static void vma_end_reservation(struct hstate *h,
2633 			struct vm_area_struct *vma, unsigned long addr)
2634 {
2635 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2636 }
2637 
2638 static long vma_add_reservation(struct hstate *h,
2639 			struct vm_area_struct *vma, unsigned long addr)
2640 {
2641 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2642 }
2643 
2644 static long vma_del_reservation(struct hstate *h,
2645 			struct vm_area_struct *vma, unsigned long addr)
2646 {
2647 	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2648 }
2649 
2650 /*
2651  * This routine is called to restore reservation information on error paths.
2652  * It should ONLY be called for pages allocated via alloc_huge_page(), and
2653  * the hugetlb mutex should remain held when calling this routine.
2654  *
2655  * It handles two specific cases:
2656  * 1) A reservation was in place and the page consumed the reservation.
2657  *    HPageRestoreReserve is set in the page.
2658  * 2) No reservation was in place for the page, so HPageRestoreReserve is
2659  *    not set.  However, alloc_huge_page always updates the reserve map.
2660  *
2661  * In case 1, free_huge_page later in the error path will increment the
2662  * global reserve count.  But, free_huge_page does not have enough context
2663  * to adjust the reservation map.  This case deals primarily with private
2664  * mappings.  Adjust the reserve map here to be consistent with global
2665  * reserve count adjustments to be made by free_huge_page.  Make sure the
2666  * reserve map indicates there is a reservation present.
2667  *
2668  * In case 2, simply undo reserve map modifications done by alloc_huge_page.
2669  */
2670 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2671 			unsigned long address, struct page *page)
2672 {
2673 	long rc = vma_needs_reservation(h, vma, address);
2674 
2675 	if (HPageRestoreReserve(page)) {
2676 		if (unlikely(rc < 0))
2677 			/*
2678 			 * Rare out of memory condition in reserve map
2679 			 * manipulation.  Clear HPageRestoreReserve so that
2680 			 * global reserve count will not be incremented
2681 			 * by free_huge_page.  This will make it appear
2682 			 * as though the reservation for this page was
2683 			 * consumed.  This may prevent the task from
2684 			 * faulting in the page at a later time.  This
2685 			 * is better than inconsistent global huge page
2686 			 * accounting of reserve counts.
2687 			 */
2688 			ClearHPageRestoreReserve(page);
2689 		else if (rc)
2690 			(void)vma_add_reservation(h, vma, address);
2691 		else
2692 			vma_end_reservation(h, vma, address);
2693 	} else {
2694 		if (!rc) {
2695 			/*
2696 			 * This indicates there is an entry in the reserve map
2697 			 * not added by alloc_huge_page.  We know it was added
2698 			 * before the alloc_huge_page call, otherwise
2699 			 * HPageRestoreReserve would be set on the page.
2700 			 * Remove the entry so that a subsequent allocation
2701 			 * does not consume a reservation.
2702 			 */
2703 			rc = vma_del_reservation(h, vma, address);
2704 			if (rc < 0)
2705 				/*
2706 				 * VERY rare out of memory condition.  Since
2707 				 * we can not delete the entry, set
2708 				 * HPageRestoreReserve so that the reserve
2709 				 * count will be incremented when the page
2710 				 * is freed.  This reserve will be consumed
2711 				 * on a subsequent allocation.
2712 				 */
2713 				SetHPageRestoreReserve(page);
2714 		} else if (rc < 0) {
2715 			/*
2716 			 * Rare out of memory condition from
2717 			 * vma_needs_reservation call.  Memory allocation is
2718 			 * only attempted if a new entry is needed.  Therefore,
2719 			 * this implies there is not an entry in the
2720 			 * reserve map.
2721 			 *
2722 			 * For shared mappings, no entry in the map indicates
2723 			 * no reservation.  We are done.
2724 			 */
2725 			if (!(vma->vm_flags & VM_MAYSHARE))
2726 				/*
2727 				 * For private mappings, no entry indicates
2728 				 * a reservation is present.  Since we can
2729 				 * not add an entry, set SetHPageRestoreReserve
2730 				 * on the page so reserve count will be
2731 				 * incremented when freed.  This reserve will
2732 				 * be consumed on a subsequent allocation.
2733 				 */
2734 				SetHPageRestoreReserve(page);
2735 		} else
2736 			/*
2737 			 * No reservation present, do nothing
2738 			 */
2739 			 vma_end_reservation(h, vma, address);
2740 	}
2741 }
2742 
2743 /*
2744  * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2745  * the old one
2746  * @h: struct hstate old page belongs to
2747  * @old_folio: Old folio to dissolve
2748  * @list: List to isolate the page in case we need to
2749  * Returns 0 on success, otherwise negated error.
2750  */
2751 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
2752 			struct folio *old_folio, struct list_head *list)
2753 {
2754 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2755 	int nid = folio_nid(old_folio);
2756 	struct folio *new_folio;
2757 	int ret = 0;
2758 
2759 	/*
2760 	 * Before dissolving the folio, we need to allocate a new one for the
2761 	 * pool to remain stable.  Here, we allocate the folio and 'prep' it
2762 	 * by doing everything but actually updating counters and adding to
2763 	 * the pool.  This simplifies and let us do most of the processing
2764 	 * under the lock.
2765 	 */
2766 	new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL);
2767 	if (!new_folio)
2768 		return -ENOMEM;
2769 	__prep_new_hugetlb_folio(h, new_folio);
2770 
2771 retry:
2772 	spin_lock_irq(&hugetlb_lock);
2773 	if (!folio_test_hugetlb(old_folio)) {
2774 		/*
2775 		 * Freed from under us. Drop new_folio too.
2776 		 */
2777 		goto free_new;
2778 	} else if (folio_ref_count(old_folio)) {
2779 		/*
2780 		 * Someone has grabbed the folio, try to isolate it here.
2781 		 * Fail with -EBUSY if not possible.
2782 		 */
2783 		spin_unlock_irq(&hugetlb_lock);
2784 		ret = isolate_hugetlb(&old_folio->page, list);
2785 		spin_lock_irq(&hugetlb_lock);
2786 		goto free_new;
2787 	} else if (!folio_test_hugetlb_freed(old_folio)) {
2788 		/*
2789 		 * Folio's refcount is 0 but it has not been enqueued in the
2790 		 * freelist yet. Race window is small, so we can succeed here if
2791 		 * we retry.
2792 		 */
2793 		spin_unlock_irq(&hugetlb_lock);
2794 		cond_resched();
2795 		goto retry;
2796 	} else {
2797 		/*
2798 		 * Ok, old_folio is still a genuine free hugepage. Remove it from
2799 		 * the freelist and decrease the counters. These will be
2800 		 * incremented again when calling __prep_account_new_huge_page()
2801 		 * and enqueue_hugetlb_folio() for new_folio. The counters will
2802 		 * remain stable since this happens under the lock.
2803 		 */
2804 		remove_hugetlb_folio(h, old_folio, false);
2805 
2806 		/*
2807 		 * Ref count on new_folio is already zero as it was dropped
2808 		 * earlier.  It can be directly added to the pool free list.
2809 		 */
2810 		__prep_account_new_huge_page(h, nid);
2811 		enqueue_hugetlb_folio(h, new_folio);
2812 
2813 		/*
2814 		 * Folio has been replaced, we can safely free the old one.
2815 		 */
2816 		spin_unlock_irq(&hugetlb_lock);
2817 		update_and_free_hugetlb_folio(h, old_folio, false);
2818 	}
2819 
2820 	return ret;
2821 
2822 free_new:
2823 	spin_unlock_irq(&hugetlb_lock);
2824 	/* Folio has a zero ref count, but needs a ref to be freed */
2825 	folio_ref_unfreeze(new_folio, 1);
2826 	update_and_free_hugetlb_folio(h, new_folio, false);
2827 
2828 	return ret;
2829 }
2830 
2831 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2832 {
2833 	struct hstate *h;
2834 	struct folio *folio = page_folio(page);
2835 	int ret = -EBUSY;
2836 
2837 	/*
2838 	 * The page might have been dissolved from under our feet, so make sure
2839 	 * to carefully check the state under the lock.
2840 	 * Return success when racing as if we dissolved the page ourselves.
2841 	 */
2842 	spin_lock_irq(&hugetlb_lock);
2843 	if (folio_test_hugetlb(folio)) {
2844 		h = folio_hstate(folio);
2845 	} else {
2846 		spin_unlock_irq(&hugetlb_lock);
2847 		return 0;
2848 	}
2849 	spin_unlock_irq(&hugetlb_lock);
2850 
2851 	/*
2852 	 * Fence off gigantic pages as there is a cyclic dependency between
2853 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2854 	 * of bailing out right away without further retrying.
2855 	 */
2856 	if (hstate_is_gigantic(h))
2857 		return -ENOMEM;
2858 
2859 	if (folio_ref_count(folio) && !isolate_hugetlb(&folio->page, list))
2860 		ret = 0;
2861 	else if (!folio_ref_count(folio))
2862 		ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
2863 
2864 	return ret;
2865 }
2866 
2867 struct page *alloc_huge_page(struct vm_area_struct *vma,
2868 				    unsigned long addr, int avoid_reserve)
2869 {
2870 	struct hugepage_subpool *spool = subpool_vma(vma);
2871 	struct hstate *h = hstate_vma(vma);
2872 	struct page *page;
2873 	struct folio *folio;
2874 	long map_chg, map_commit;
2875 	long gbl_chg;
2876 	int ret, idx;
2877 	struct hugetlb_cgroup *h_cg;
2878 	bool deferred_reserve;
2879 
2880 	idx = hstate_index(h);
2881 	/*
2882 	 * Examine the region/reserve map to determine if the process
2883 	 * has a reservation for the page to be allocated.  A return
2884 	 * code of zero indicates a reservation exists (no change).
2885 	 */
2886 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2887 	if (map_chg < 0)
2888 		return ERR_PTR(-ENOMEM);
2889 
2890 	/*
2891 	 * Processes that did not create the mapping will have no
2892 	 * reserves as indicated by the region/reserve map. Check
2893 	 * that the allocation will not exceed the subpool limit.
2894 	 * Allocations for MAP_NORESERVE mappings also need to be
2895 	 * checked against any subpool limit.
2896 	 */
2897 	if (map_chg || avoid_reserve) {
2898 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2899 		if (gbl_chg < 0) {
2900 			vma_end_reservation(h, vma, addr);
2901 			return ERR_PTR(-ENOSPC);
2902 		}
2903 
2904 		/*
2905 		 * Even though there was no reservation in the region/reserve
2906 		 * map, there could be reservations associated with the
2907 		 * subpool that can be used.  This would be indicated if the
2908 		 * return value of hugepage_subpool_get_pages() is zero.
2909 		 * However, if avoid_reserve is specified we still avoid even
2910 		 * the subpool reservations.
2911 		 */
2912 		if (avoid_reserve)
2913 			gbl_chg = 1;
2914 	}
2915 
2916 	/* If this allocation is not consuming a reservation, charge it now.
2917 	 */
2918 	deferred_reserve = map_chg || avoid_reserve;
2919 	if (deferred_reserve) {
2920 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
2921 			idx, pages_per_huge_page(h), &h_cg);
2922 		if (ret)
2923 			goto out_subpool_put;
2924 	}
2925 
2926 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2927 	if (ret)
2928 		goto out_uncharge_cgroup_reservation;
2929 
2930 	spin_lock_irq(&hugetlb_lock);
2931 	/*
2932 	 * glb_chg is passed to indicate whether or not a page must be taken
2933 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2934 	 * a reservation exists for the allocation.
2935 	 */
2936 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2937 	if (!page) {
2938 		spin_unlock_irq(&hugetlb_lock);
2939 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2940 		if (!page)
2941 			goto out_uncharge_cgroup;
2942 		spin_lock_irq(&hugetlb_lock);
2943 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2944 			SetHPageRestoreReserve(page);
2945 			h->resv_huge_pages--;
2946 		}
2947 		list_add(&page->lru, &h->hugepage_activelist);
2948 		set_page_refcounted(page);
2949 		/* Fall through */
2950 	}
2951 	folio = page_folio(page);
2952 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2953 	/* If allocation is not consuming a reservation, also store the
2954 	 * hugetlb_cgroup pointer on the page.
2955 	 */
2956 	if (deferred_reserve) {
2957 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2958 						  h_cg, page);
2959 	}
2960 
2961 	spin_unlock_irq(&hugetlb_lock);
2962 
2963 	hugetlb_set_page_subpool(page, spool);
2964 
2965 	map_commit = vma_commit_reservation(h, vma, addr);
2966 	if (unlikely(map_chg > map_commit)) {
2967 		/*
2968 		 * The page was added to the reservation map between
2969 		 * vma_needs_reservation and vma_commit_reservation.
2970 		 * This indicates a race with hugetlb_reserve_pages.
2971 		 * Adjust for the subpool count incremented above AND
2972 		 * in hugetlb_reserve_pages for the same page.  Also,
2973 		 * the reservation count added in hugetlb_reserve_pages
2974 		 * no longer applies.
2975 		 */
2976 		long rsv_adjust;
2977 
2978 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2979 		hugetlb_acct_memory(h, -rsv_adjust);
2980 		if (deferred_reserve)
2981 			hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
2982 					pages_per_huge_page(h), folio);
2983 	}
2984 	return page;
2985 
2986 out_uncharge_cgroup:
2987 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2988 out_uncharge_cgroup_reservation:
2989 	if (deferred_reserve)
2990 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2991 						    h_cg);
2992 out_subpool_put:
2993 	if (map_chg || avoid_reserve)
2994 		hugepage_subpool_put_pages(spool, 1);
2995 	vma_end_reservation(h, vma, addr);
2996 	return ERR_PTR(-ENOSPC);
2997 }
2998 
2999 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3000 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
3001 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3002 {
3003 	struct huge_bootmem_page *m = NULL; /* initialize for clang */
3004 	int nr_nodes, node;
3005 
3006 	/* do node specific alloc */
3007 	if (nid != NUMA_NO_NODE) {
3008 		m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3009 				0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3010 		if (!m)
3011 			return 0;
3012 		goto found;
3013 	}
3014 	/* allocate from next node when distributing huge pages */
3015 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
3016 		m = memblock_alloc_try_nid_raw(
3017 				huge_page_size(h), huge_page_size(h),
3018 				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3019 		/*
3020 		 * Use the beginning of the huge page to store the
3021 		 * huge_bootmem_page struct (until gather_bootmem
3022 		 * puts them into the mem_map).
3023 		 */
3024 		if (!m)
3025 			return 0;
3026 		goto found;
3027 	}
3028 
3029 found:
3030 	/* Put them into a private list first because mem_map is not up yet */
3031 	INIT_LIST_HEAD(&m->list);
3032 	list_add(&m->list, &huge_boot_pages);
3033 	m->hstate = h;
3034 	return 1;
3035 }
3036 
3037 /*
3038  * Put bootmem huge pages into the standard lists after mem_map is up.
3039  * Note: This only applies to gigantic (order > MAX_ORDER) pages.
3040  */
3041 static void __init gather_bootmem_prealloc(void)
3042 {
3043 	struct huge_bootmem_page *m;
3044 
3045 	list_for_each_entry(m, &huge_boot_pages, list) {
3046 		struct page *page = virt_to_page(m);
3047 		struct folio *folio = page_folio(page);
3048 		struct hstate *h = m->hstate;
3049 
3050 		VM_BUG_ON(!hstate_is_gigantic(h));
3051 		WARN_ON(folio_ref_count(folio) != 1);
3052 		if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
3053 			WARN_ON(folio_test_reserved(folio));
3054 			prep_new_hugetlb_folio(h, folio, folio_nid(folio));
3055 			free_huge_page(page); /* add to the hugepage allocator */
3056 		} else {
3057 			/* VERY unlikely inflated ref count on a tail page */
3058 			free_gigantic_folio(folio, huge_page_order(h));
3059 		}
3060 
3061 		/*
3062 		 * We need to restore the 'stolen' pages to totalram_pages
3063 		 * in order to fix confusing memory reports from free(1) and
3064 		 * other side-effects, like CommitLimit going negative.
3065 		 */
3066 		adjust_managed_page_count(page, pages_per_huge_page(h));
3067 		cond_resched();
3068 	}
3069 }
3070 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3071 {
3072 	unsigned long i;
3073 	char buf[32];
3074 
3075 	for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3076 		if (hstate_is_gigantic(h)) {
3077 			if (!alloc_bootmem_huge_page(h, nid))
3078 				break;
3079 		} else {
3080 			struct folio *folio;
3081 			gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3082 
3083 			folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3084 					&node_states[N_MEMORY], NULL);
3085 			if (!folio)
3086 				break;
3087 			free_huge_page(&folio->page); /* free it into the hugepage allocator */
3088 		}
3089 		cond_resched();
3090 	}
3091 	if (i == h->max_huge_pages_node[nid])
3092 		return;
3093 
3094 	string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3095 	pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3096 		h->max_huge_pages_node[nid], buf, nid, i);
3097 	h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3098 	h->max_huge_pages_node[nid] = i;
3099 }
3100 
3101 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3102 {
3103 	unsigned long i;
3104 	nodemask_t *node_alloc_noretry;
3105 	bool node_specific_alloc = false;
3106 
3107 	/* skip gigantic hugepages allocation if hugetlb_cma enabled */
3108 	if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3109 		pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3110 		return;
3111 	}
3112 
3113 	/* do node specific alloc */
3114 	for_each_online_node(i) {
3115 		if (h->max_huge_pages_node[i] > 0) {
3116 			hugetlb_hstate_alloc_pages_onenode(h, i);
3117 			node_specific_alloc = true;
3118 		}
3119 	}
3120 
3121 	if (node_specific_alloc)
3122 		return;
3123 
3124 	/* below will do all node balanced alloc */
3125 	if (!hstate_is_gigantic(h)) {
3126 		/*
3127 		 * Bit mask controlling how hard we retry per-node allocations.
3128 		 * Ignore errors as lower level routines can deal with
3129 		 * node_alloc_noretry == NULL.  If this kmalloc fails at boot
3130 		 * time, we are likely in bigger trouble.
3131 		 */
3132 		node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3133 						GFP_KERNEL);
3134 	} else {
3135 		/* allocations done at boot time */
3136 		node_alloc_noretry = NULL;
3137 	}
3138 
3139 	/* bit mask controlling how hard we retry per-node allocations */
3140 	if (node_alloc_noretry)
3141 		nodes_clear(*node_alloc_noretry);
3142 
3143 	for (i = 0; i < h->max_huge_pages; ++i) {
3144 		if (hstate_is_gigantic(h)) {
3145 			if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3146 				break;
3147 		} else if (!alloc_pool_huge_page(h,
3148 					 &node_states[N_MEMORY],
3149 					 node_alloc_noretry))
3150 			break;
3151 		cond_resched();
3152 	}
3153 	if (i < h->max_huge_pages) {
3154 		char buf[32];
3155 
3156 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3157 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3158 			h->max_huge_pages, buf, i);
3159 		h->max_huge_pages = i;
3160 	}
3161 	kfree(node_alloc_noretry);
3162 }
3163 
3164 static void __init hugetlb_init_hstates(void)
3165 {
3166 	struct hstate *h, *h2;
3167 
3168 	for_each_hstate(h) {
3169 		/* oversize hugepages were init'ed in early boot */
3170 		if (!hstate_is_gigantic(h))
3171 			hugetlb_hstate_alloc_pages(h);
3172 
3173 		/*
3174 		 * Set demote order for each hstate.  Note that
3175 		 * h->demote_order is initially 0.
3176 		 * - We can not demote gigantic pages if runtime freeing
3177 		 *   is not supported, so skip this.
3178 		 * - If CMA allocation is possible, we can not demote
3179 		 *   HUGETLB_PAGE_ORDER or smaller size pages.
3180 		 */
3181 		if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3182 			continue;
3183 		if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3184 			continue;
3185 		for_each_hstate(h2) {
3186 			if (h2 == h)
3187 				continue;
3188 			if (h2->order < h->order &&
3189 			    h2->order > h->demote_order)
3190 				h->demote_order = h2->order;
3191 		}
3192 	}
3193 }
3194 
3195 static void __init report_hugepages(void)
3196 {
3197 	struct hstate *h;
3198 
3199 	for_each_hstate(h) {
3200 		char buf[32];
3201 
3202 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3203 		pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3204 			buf, h->free_huge_pages);
3205 		pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3206 			hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3207 	}
3208 }
3209 
3210 #ifdef CONFIG_HIGHMEM
3211 static void try_to_free_low(struct hstate *h, unsigned long count,
3212 						nodemask_t *nodes_allowed)
3213 {
3214 	int i;
3215 	LIST_HEAD(page_list);
3216 
3217 	lockdep_assert_held(&hugetlb_lock);
3218 	if (hstate_is_gigantic(h))
3219 		return;
3220 
3221 	/*
3222 	 * Collect pages to be freed on a list, and free after dropping lock
3223 	 */
3224 	for_each_node_mask(i, *nodes_allowed) {
3225 		struct page *page, *next;
3226 		struct list_head *freel = &h->hugepage_freelists[i];
3227 		list_for_each_entry_safe(page, next, freel, lru) {
3228 			if (count >= h->nr_huge_pages)
3229 				goto out;
3230 			if (PageHighMem(page))
3231 				continue;
3232 			remove_hugetlb_folio(h, page_folio(page), false);
3233 			list_add(&page->lru, &page_list);
3234 		}
3235 	}
3236 
3237 out:
3238 	spin_unlock_irq(&hugetlb_lock);
3239 	update_and_free_pages_bulk(h, &page_list);
3240 	spin_lock_irq(&hugetlb_lock);
3241 }
3242 #else
3243 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3244 						nodemask_t *nodes_allowed)
3245 {
3246 }
3247 #endif
3248 
3249 /*
3250  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
3251  * balanced by operating on them in a round-robin fashion.
3252  * Returns 1 if an adjustment was made.
3253  */
3254 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3255 				int delta)
3256 {
3257 	int nr_nodes, node;
3258 
3259 	lockdep_assert_held(&hugetlb_lock);
3260 	VM_BUG_ON(delta != -1 && delta != 1);
3261 
3262 	if (delta < 0) {
3263 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3264 			if (h->surplus_huge_pages_node[node])
3265 				goto found;
3266 		}
3267 	} else {
3268 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3269 			if (h->surplus_huge_pages_node[node] <
3270 					h->nr_huge_pages_node[node])
3271 				goto found;
3272 		}
3273 	}
3274 	return 0;
3275 
3276 found:
3277 	h->surplus_huge_pages += delta;
3278 	h->surplus_huge_pages_node[node] += delta;
3279 	return 1;
3280 }
3281 
3282 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3283 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3284 			      nodemask_t *nodes_allowed)
3285 {
3286 	unsigned long min_count, ret;
3287 	struct page *page;
3288 	LIST_HEAD(page_list);
3289 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3290 
3291 	/*
3292 	 * Bit mask controlling how hard we retry per-node allocations.
3293 	 * If we can not allocate the bit mask, do not attempt to allocate
3294 	 * the requested huge pages.
3295 	 */
3296 	if (node_alloc_noretry)
3297 		nodes_clear(*node_alloc_noretry);
3298 	else
3299 		return -ENOMEM;
3300 
3301 	/*
3302 	 * resize_lock mutex prevents concurrent adjustments to number of
3303 	 * pages in hstate via the proc/sysfs interfaces.
3304 	 */
3305 	mutex_lock(&h->resize_lock);
3306 	flush_free_hpage_work(h);
3307 	spin_lock_irq(&hugetlb_lock);
3308 
3309 	/*
3310 	 * Check for a node specific request.
3311 	 * Changing node specific huge page count may require a corresponding
3312 	 * change to the global count.  In any case, the passed node mask
3313 	 * (nodes_allowed) will restrict alloc/free to the specified node.
3314 	 */
3315 	if (nid != NUMA_NO_NODE) {
3316 		unsigned long old_count = count;
3317 
3318 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
3319 		/*
3320 		 * User may have specified a large count value which caused the
3321 		 * above calculation to overflow.  In this case, they wanted
3322 		 * to allocate as many huge pages as possible.  Set count to
3323 		 * largest possible value to align with their intention.
3324 		 */
3325 		if (count < old_count)
3326 			count = ULONG_MAX;
3327 	}
3328 
3329 	/*
3330 	 * Gigantic pages runtime allocation depend on the capability for large
3331 	 * page range allocation.
3332 	 * If the system does not provide this feature, return an error when
3333 	 * the user tries to allocate gigantic pages but let the user free the
3334 	 * boottime allocated gigantic pages.
3335 	 */
3336 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3337 		if (count > persistent_huge_pages(h)) {
3338 			spin_unlock_irq(&hugetlb_lock);
3339 			mutex_unlock(&h->resize_lock);
3340 			NODEMASK_FREE(node_alloc_noretry);
3341 			return -EINVAL;
3342 		}
3343 		/* Fall through to decrease pool */
3344 	}
3345 
3346 	/*
3347 	 * Increase the pool size
3348 	 * First take pages out of surplus state.  Then make up the
3349 	 * remaining difference by allocating fresh huge pages.
3350 	 *
3351 	 * We might race with alloc_surplus_huge_page() here and be unable
3352 	 * to convert a surplus huge page to a normal huge page. That is
3353 	 * not critical, though, it just means the overall size of the
3354 	 * pool might be one hugepage larger than it needs to be, but
3355 	 * within all the constraints specified by the sysctls.
3356 	 */
3357 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3358 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
3359 			break;
3360 	}
3361 
3362 	while (count > persistent_huge_pages(h)) {
3363 		/*
3364 		 * If this allocation races such that we no longer need the
3365 		 * page, free_huge_page will handle it by freeing the page
3366 		 * and reducing the surplus.
3367 		 */
3368 		spin_unlock_irq(&hugetlb_lock);
3369 
3370 		/* yield cpu to avoid soft lockup */
3371 		cond_resched();
3372 
3373 		ret = alloc_pool_huge_page(h, nodes_allowed,
3374 						node_alloc_noretry);
3375 		spin_lock_irq(&hugetlb_lock);
3376 		if (!ret)
3377 			goto out;
3378 
3379 		/* Bail for signals. Probably ctrl-c from user */
3380 		if (signal_pending(current))
3381 			goto out;
3382 	}
3383 
3384 	/*
3385 	 * Decrease the pool size
3386 	 * First return free pages to the buddy allocator (being careful
3387 	 * to keep enough around to satisfy reservations).  Then place
3388 	 * pages into surplus state as needed so the pool will shrink
3389 	 * to the desired size as pages become free.
3390 	 *
3391 	 * By placing pages into the surplus state independent of the
3392 	 * overcommit value, we are allowing the surplus pool size to
3393 	 * exceed overcommit. There are few sane options here. Since
3394 	 * alloc_surplus_huge_page() is checking the global counter,
3395 	 * though, we'll note that we're not allowed to exceed surplus
3396 	 * and won't grow the pool anywhere else. Not until one of the
3397 	 * sysctls are changed, or the surplus pages go out of use.
3398 	 */
3399 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
3400 	min_count = max(count, min_count);
3401 	try_to_free_low(h, min_count, nodes_allowed);
3402 
3403 	/*
3404 	 * Collect pages to be removed on list without dropping lock
3405 	 */
3406 	while (min_count < persistent_huge_pages(h)) {
3407 		page = remove_pool_huge_page(h, nodes_allowed, 0);
3408 		if (!page)
3409 			break;
3410 
3411 		list_add(&page->lru, &page_list);
3412 	}
3413 	/* free the pages after dropping lock */
3414 	spin_unlock_irq(&hugetlb_lock);
3415 	update_and_free_pages_bulk(h, &page_list);
3416 	flush_free_hpage_work(h);
3417 	spin_lock_irq(&hugetlb_lock);
3418 
3419 	while (count < persistent_huge_pages(h)) {
3420 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
3421 			break;
3422 	}
3423 out:
3424 	h->max_huge_pages = persistent_huge_pages(h);
3425 	spin_unlock_irq(&hugetlb_lock);
3426 	mutex_unlock(&h->resize_lock);
3427 
3428 	NODEMASK_FREE(node_alloc_noretry);
3429 
3430 	return 0;
3431 }
3432 
3433 static int demote_free_huge_page(struct hstate *h, struct page *page)
3434 {
3435 	int i, nid = page_to_nid(page);
3436 	struct hstate *target_hstate;
3437 	struct folio *folio = page_folio(page);
3438 	struct page *subpage;
3439 	int rc = 0;
3440 
3441 	target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
3442 
3443 	remove_hugetlb_folio_for_demote(h, folio, false);
3444 	spin_unlock_irq(&hugetlb_lock);
3445 
3446 	rc = hugetlb_vmemmap_restore(h, page);
3447 	if (rc) {
3448 		/* Allocation of vmemmmap failed, we can not demote page */
3449 		spin_lock_irq(&hugetlb_lock);
3450 		set_page_refcounted(page);
3451 		add_hugetlb_folio(h, page_folio(page), false);
3452 		return rc;
3453 	}
3454 
3455 	/*
3456 	 * Use destroy_compound_hugetlb_folio_for_demote for all huge page
3457 	 * sizes as it will not ref count pages.
3458 	 */
3459 	destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
3460 
3461 	/*
3462 	 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3463 	 * Without the mutex, pages added to target hstate could be marked
3464 	 * as surplus.
3465 	 *
3466 	 * Note that we already hold h->resize_lock.  To prevent deadlock,
3467 	 * use the convention of always taking larger size hstate mutex first.
3468 	 */
3469 	mutex_lock(&target_hstate->resize_lock);
3470 	for (i = 0; i < pages_per_huge_page(h);
3471 				i += pages_per_huge_page(target_hstate)) {
3472 		subpage = nth_page(page, i);
3473 		folio = page_folio(subpage);
3474 		if (hstate_is_gigantic(target_hstate))
3475 			prep_compound_gigantic_folio_for_demote(folio,
3476 							target_hstate->order);
3477 		else
3478 			prep_compound_page(subpage, target_hstate->order);
3479 		set_page_private(subpage, 0);
3480 		prep_new_hugetlb_folio(target_hstate, folio, nid);
3481 		free_huge_page(subpage);
3482 	}
3483 	mutex_unlock(&target_hstate->resize_lock);
3484 
3485 	spin_lock_irq(&hugetlb_lock);
3486 
3487 	/*
3488 	 * Not absolutely necessary, but for consistency update max_huge_pages
3489 	 * based on pool changes for the demoted page.
3490 	 */
3491 	h->max_huge_pages--;
3492 	target_hstate->max_huge_pages +=
3493 		pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
3494 
3495 	return rc;
3496 }
3497 
3498 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
3499 	__must_hold(&hugetlb_lock)
3500 {
3501 	int nr_nodes, node;
3502 	struct page *page;
3503 
3504 	lockdep_assert_held(&hugetlb_lock);
3505 
3506 	/* We should never get here if no demote order */
3507 	if (!h->demote_order) {
3508 		pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
3509 		return -EINVAL;		/* internal error */
3510 	}
3511 
3512 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3513 		list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
3514 			if (PageHWPoison(page))
3515 				continue;
3516 
3517 			return demote_free_huge_page(h, page);
3518 		}
3519 	}
3520 
3521 	/*
3522 	 * Only way to get here is if all pages on free lists are poisoned.
3523 	 * Return -EBUSY so that caller will not retry.
3524 	 */
3525 	return -EBUSY;
3526 }
3527 
3528 #define HSTATE_ATTR_RO(_name) \
3529 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3530 
3531 #define HSTATE_ATTR_WO(_name) \
3532 	static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
3533 
3534 #define HSTATE_ATTR(_name) \
3535 	static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3536 
3537 static struct kobject *hugepages_kobj;
3538 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3539 
3540 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
3541 
3542 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3543 {
3544 	int i;
3545 
3546 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
3547 		if (hstate_kobjs[i] == kobj) {
3548 			if (nidp)
3549 				*nidp = NUMA_NO_NODE;
3550 			return &hstates[i];
3551 		}
3552 
3553 	return kobj_to_node_hstate(kobj, nidp);
3554 }
3555 
3556 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3557 					struct kobj_attribute *attr, char *buf)
3558 {
3559 	struct hstate *h;
3560 	unsigned long nr_huge_pages;
3561 	int nid;
3562 
3563 	h = kobj_to_hstate(kobj, &nid);
3564 	if (nid == NUMA_NO_NODE)
3565 		nr_huge_pages = h->nr_huge_pages;
3566 	else
3567 		nr_huge_pages = h->nr_huge_pages_node[nid];
3568 
3569 	return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3570 }
3571 
3572 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3573 					   struct hstate *h, int nid,
3574 					   unsigned long count, size_t len)
3575 {
3576 	int err;
3577 	nodemask_t nodes_allowed, *n_mask;
3578 
3579 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3580 		return -EINVAL;
3581 
3582 	if (nid == NUMA_NO_NODE) {
3583 		/*
3584 		 * global hstate attribute
3585 		 */
3586 		if (!(obey_mempolicy &&
3587 				init_nodemask_of_mempolicy(&nodes_allowed)))
3588 			n_mask = &node_states[N_MEMORY];
3589 		else
3590 			n_mask = &nodes_allowed;
3591 	} else {
3592 		/*
3593 		 * Node specific request.  count adjustment happens in
3594 		 * set_max_huge_pages() after acquiring hugetlb_lock.
3595 		 */
3596 		init_nodemask_of_node(&nodes_allowed, nid);
3597 		n_mask = &nodes_allowed;
3598 	}
3599 
3600 	err = set_max_huge_pages(h, count, nid, n_mask);
3601 
3602 	return err ? err : len;
3603 }
3604 
3605 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3606 					 struct kobject *kobj, const char *buf,
3607 					 size_t len)
3608 {
3609 	struct hstate *h;
3610 	unsigned long count;
3611 	int nid;
3612 	int err;
3613 
3614 	err = kstrtoul(buf, 10, &count);
3615 	if (err)
3616 		return err;
3617 
3618 	h = kobj_to_hstate(kobj, &nid);
3619 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3620 }
3621 
3622 static ssize_t nr_hugepages_show(struct kobject *kobj,
3623 				       struct kobj_attribute *attr, char *buf)
3624 {
3625 	return nr_hugepages_show_common(kobj, attr, buf);
3626 }
3627 
3628 static ssize_t nr_hugepages_store(struct kobject *kobj,
3629 	       struct kobj_attribute *attr, const char *buf, size_t len)
3630 {
3631 	return nr_hugepages_store_common(false, kobj, buf, len);
3632 }
3633 HSTATE_ATTR(nr_hugepages);
3634 
3635 #ifdef CONFIG_NUMA
3636 
3637 /*
3638  * hstate attribute for optionally mempolicy-based constraint on persistent
3639  * huge page alloc/free.
3640  */
3641 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3642 					   struct kobj_attribute *attr,
3643 					   char *buf)
3644 {
3645 	return nr_hugepages_show_common(kobj, attr, buf);
3646 }
3647 
3648 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
3649 	       struct kobj_attribute *attr, const char *buf, size_t len)
3650 {
3651 	return nr_hugepages_store_common(true, kobj, buf, len);
3652 }
3653 HSTATE_ATTR(nr_hugepages_mempolicy);
3654 #endif
3655 
3656 
3657 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3658 					struct kobj_attribute *attr, char *buf)
3659 {
3660 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3661 	return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3662 }
3663 
3664 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3665 		struct kobj_attribute *attr, const char *buf, size_t count)
3666 {
3667 	int err;
3668 	unsigned long input;
3669 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3670 
3671 	if (hstate_is_gigantic(h))
3672 		return -EINVAL;
3673 
3674 	err = kstrtoul(buf, 10, &input);
3675 	if (err)
3676 		return err;
3677 
3678 	spin_lock_irq(&hugetlb_lock);
3679 	h->nr_overcommit_huge_pages = input;
3680 	spin_unlock_irq(&hugetlb_lock);
3681 
3682 	return count;
3683 }
3684 HSTATE_ATTR(nr_overcommit_hugepages);
3685 
3686 static ssize_t free_hugepages_show(struct kobject *kobj,
3687 					struct kobj_attribute *attr, char *buf)
3688 {
3689 	struct hstate *h;
3690 	unsigned long free_huge_pages;
3691 	int nid;
3692 
3693 	h = kobj_to_hstate(kobj, &nid);
3694 	if (nid == NUMA_NO_NODE)
3695 		free_huge_pages = h->free_huge_pages;
3696 	else
3697 		free_huge_pages = h->free_huge_pages_node[nid];
3698 
3699 	return sysfs_emit(buf, "%lu\n", free_huge_pages);
3700 }
3701 HSTATE_ATTR_RO(free_hugepages);
3702 
3703 static ssize_t resv_hugepages_show(struct kobject *kobj,
3704 					struct kobj_attribute *attr, char *buf)
3705 {
3706 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3707 	return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3708 }
3709 HSTATE_ATTR_RO(resv_hugepages);
3710 
3711 static ssize_t surplus_hugepages_show(struct kobject *kobj,
3712 					struct kobj_attribute *attr, char *buf)
3713 {
3714 	struct hstate *h;
3715 	unsigned long surplus_huge_pages;
3716 	int nid;
3717 
3718 	h = kobj_to_hstate(kobj, &nid);
3719 	if (nid == NUMA_NO_NODE)
3720 		surplus_huge_pages = h->surplus_huge_pages;
3721 	else
3722 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
3723 
3724 	return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3725 }
3726 HSTATE_ATTR_RO(surplus_hugepages);
3727 
3728 static ssize_t demote_store(struct kobject *kobj,
3729 	       struct kobj_attribute *attr, const char *buf, size_t len)
3730 {
3731 	unsigned long nr_demote;
3732 	unsigned long nr_available;
3733 	nodemask_t nodes_allowed, *n_mask;
3734 	struct hstate *h;
3735 	int err;
3736 	int nid;
3737 
3738 	err = kstrtoul(buf, 10, &nr_demote);
3739 	if (err)
3740 		return err;
3741 	h = kobj_to_hstate(kobj, &nid);
3742 
3743 	if (nid != NUMA_NO_NODE) {
3744 		init_nodemask_of_node(&nodes_allowed, nid);
3745 		n_mask = &nodes_allowed;
3746 	} else {
3747 		n_mask = &node_states[N_MEMORY];
3748 	}
3749 
3750 	/* Synchronize with other sysfs operations modifying huge pages */
3751 	mutex_lock(&h->resize_lock);
3752 	spin_lock_irq(&hugetlb_lock);
3753 
3754 	while (nr_demote) {
3755 		/*
3756 		 * Check for available pages to demote each time thorough the
3757 		 * loop as demote_pool_huge_page will drop hugetlb_lock.
3758 		 */
3759 		if (nid != NUMA_NO_NODE)
3760 			nr_available = h->free_huge_pages_node[nid];
3761 		else
3762 			nr_available = h->free_huge_pages;
3763 		nr_available -= h->resv_huge_pages;
3764 		if (!nr_available)
3765 			break;
3766 
3767 		err = demote_pool_huge_page(h, n_mask);
3768 		if (err)
3769 			break;
3770 
3771 		nr_demote--;
3772 	}
3773 
3774 	spin_unlock_irq(&hugetlb_lock);
3775 	mutex_unlock(&h->resize_lock);
3776 
3777 	if (err)
3778 		return err;
3779 	return len;
3780 }
3781 HSTATE_ATTR_WO(demote);
3782 
3783 static ssize_t demote_size_show(struct kobject *kobj,
3784 					struct kobj_attribute *attr, char *buf)
3785 {
3786 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3787 	unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
3788 
3789 	return sysfs_emit(buf, "%lukB\n", demote_size);
3790 }
3791 
3792 static ssize_t demote_size_store(struct kobject *kobj,
3793 					struct kobj_attribute *attr,
3794 					const char *buf, size_t count)
3795 {
3796 	struct hstate *h, *demote_hstate;
3797 	unsigned long demote_size;
3798 	unsigned int demote_order;
3799 
3800 	demote_size = (unsigned long)memparse(buf, NULL);
3801 
3802 	demote_hstate = size_to_hstate(demote_size);
3803 	if (!demote_hstate)
3804 		return -EINVAL;
3805 	demote_order = demote_hstate->order;
3806 	if (demote_order < HUGETLB_PAGE_ORDER)
3807 		return -EINVAL;
3808 
3809 	/* demote order must be smaller than hstate order */
3810 	h = kobj_to_hstate(kobj, NULL);
3811 	if (demote_order >= h->order)
3812 		return -EINVAL;
3813 
3814 	/* resize_lock synchronizes access to demote size and writes */
3815 	mutex_lock(&h->resize_lock);
3816 	h->demote_order = demote_order;
3817 	mutex_unlock(&h->resize_lock);
3818 
3819 	return count;
3820 }
3821 HSTATE_ATTR(demote_size);
3822 
3823 static struct attribute *hstate_attrs[] = {
3824 	&nr_hugepages_attr.attr,
3825 	&nr_overcommit_hugepages_attr.attr,
3826 	&free_hugepages_attr.attr,
3827 	&resv_hugepages_attr.attr,
3828 	&surplus_hugepages_attr.attr,
3829 #ifdef CONFIG_NUMA
3830 	&nr_hugepages_mempolicy_attr.attr,
3831 #endif
3832 	NULL,
3833 };
3834 
3835 static const struct attribute_group hstate_attr_group = {
3836 	.attrs = hstate_attrs,
3837 };
3838 
3839 static struct attribute *hstate_demote_attrs[] = {
3840 	&demote_size_attr.attr,
3841 	&demote_attr.attr,
3842 	NULL,
3843 };
3844 
3845 static const struct attribute_group hstate_demote_attr_group = {
3846 	.attrs = hstate_demote_attrs,
3847 };
3848 
3849 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
3850 				    struct kobject **hstate_kobjs,
3851 				    const struct attribute_group *hstate_attr_group)
3852 {
3853 	int retval;
3854 	int hi = hstate_index(h);
3855 
3856 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
3857 	if (!hstate_kobjs[hi])
3858 		return -ENOMEM;
3859 
3860 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3861 	if (retval) {
3862 		kobject_put(hstate_kobjs[hi]);
3863 		hstate_kobjs[hi] = NULL;
3864 		return retval;
3865 	}
3866 
3867 	if (h->demote_order) {
3868 		retval = sysfs_create_group(hstate_kobjs[hi],
3869 					    &hstate_demote_attr_group);
3870 		if (retval) {
3871 			pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
3872 			sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
3873 			kobject_put(hstate_kobjs[hi]);
3874 			hstate_kobjs[hi] = NULL;
3875 			return retval;
3876 		}
3877 	}
3878 
3879 	return 0;
3880 }
3881 
3882 #ifdef CONFIG_NUMA
3883 static bool hugetlb_sysfs_initialized __ro_after_init;
3884 
3885 /*
3886  * node_hstate/s - associate per node hstate attributes, via their kobjects,
3887  * with node devices in node_devices[] using a parallel array.  The array
3888  * index of a node device or _hstate == node id.
3889  * This is here to avoid any static dependency of the node device driver, in
3890  * the base kernel, on the hugetlb module.
3891  */
3892 struct node_hstate {
3893 	struct kobject		*hugepages_kobj;
3894 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
3895 };
3896 static struct node_hstate node_hstates[MAX_NUMNODES];
3897 
3898 /*
3899  * A subset of global hstate attributes for node devices
3900  */
3901 static struct attribute *per_node_hstate_attrs[] = {
3902 	&nr_hugepages_attr.attr,
3903 	&free_hugepages_attr.attr,
3904 	&surplus_hugepages_attr.attr,
3905 	NULL,
3906 };
3907 
3908 static const struct attribute_group per_node_hstate_attr_group = {
3909 	.attrs = per_node_hstate_attrs,
3910 };
3911 
3912 /*
3913  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3914  * Returns node id via non-NULL nidp.
3915  */
3916 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3917 {
3918 	int nid;
3919 
3920 	for (nid = 0; nid < nr_node_ids; nid++) {
3921 		struct node_hstate *nhs = &node_hstates[nid];
3922 		int i;
3923 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
3924 			if (nhs->hstate_kobjs[i] == kobj) {
3925 				if (nidp)
3926 					*nidp = nid;
3927 				return &hstates[i];
3928 			}
3929 	}
3930 
3931 	BUG();
3932 	return NULL;
3933 }
3934 
3935 /*
3936  * Unregister hstate attributes from a single node device.
3937  * No-op if no hstate attributes attached.
3938  */
3939 void hugetlb_unregister_node(struct node *node)
3940 {
3941 	struct hstate *h;
3942 	struct node_hstate *nhs = &node_hstates[node->dev.id];
3943 
3944 	if (!nhs->hugepages_kobj)
3945 		return;		/* no hstate attributes */
3946 
3947 	for_each_hstate(h) {
3948 		int idx = hstate_index(h);
3949 		struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
3950 
3951 		if (!hstate_kobj)
3952 			continue;
3953 		if (h->demote_order)
3954 			sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
3955 		sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
3956 		kobject_put(hstate_kobj);
3957 		nhs->hstate_kobjs[idx] = NULL;
3958 	}
3959 
3960 	kobject_put(nhs->hugepages_kobj);
3961 	nhs->hugepages_kobj = NULL;
3962 }
3963 
3964 
3965 /*
3966  * Register hstate attributes for a single node device.
3967  * No-op if attributes already registered.
3968  */
3969 void hugetlb_register_node(struct node *node)
3970 {
3971 	struct hstate *h;
3972 	struct node_hstate *nhs = &node_hstates[node->dev.id];
3973 	int err;
3974 
3975 	if (!hugetlb_sysfs_initialized)
3976 		return;
3977 
3978 	if (nhs->hugepages_kobj)
3979 		return;		/* already allocated */
3980 
3981 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
3982 							&node->dev.kobj);
3983 	if (!nhs->hugepages_kobj)
3984 		return;
3985 
3986 	for_each_hstate(h) {
3987 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3988 						nhs->hstate_kobjs,
3989 						&per_node_hstate_attr_group);
3990 		if (err) {
3991 			pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3992 				h->name, node->dev.id);
3993 			hugetlb_unregister_node(node);
3994 			break;
3995 		}
3996 	}
3997 }
3998 
3999 /*
4000  * hugetlb init time:  register hstate attributes for all registered node
4001  * devices of nodes that have memory.  All on-line nodes should have
4002  * registered their associated device by this time.
4003  */
4004 static void __init hugetlb_register_all_nodes(void)
4005 {
4006 	int nid;
4007 
4008 	for_each_online_node(nid)
4009 		hugetlb_register_node(node_devices[nid]);
4010 }
4011 #else	/* !CONFIG_NUMA */
4012 
4013 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4014 {
4015 	BUG();
4016 	if (nidp)
4017 		*nidp = -1;
4018 	return NULL;
4019 }
4020 
4021 static void hugetlb_register_all_nodes(void) { }
4022 
4023 #endif
4024 
4025 #ifdef CONFIG_CMA
4026 static void __init hugetlb_cma_check(void);
4027 #else
4028 static inline __init void hugetlb_cma_check(void)
4029 {
4030 }
4031 #endif
4032 
4033 static void __init hugetlb_sysfs_init(void)
4034 {
4035 	struct hstate *h;
4036 	int err;
4037 
4038 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
4039 	if (!hugepages_kobj)
4040 		return;
4041 
4042 	for_each_hstate(h) {
4043 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4044 					 hstate_kobjs, &hstate_attr_group);
4045 		if (err)
4046 			pr_err("HugeTLB: Unable to add hstate %s", h->name);
4047 	}
4048 
4049 #ifdef CONFIG_NUMA
4050 	hugetlb_sysfs_initialized = true;
4051 #endif
4052 	hugetlb_register_all_nodes();
4053 }
4054 
4055 static int __init hugetlb_init(void)
4056 {
4057 	int i;
4058 
4059 	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4060 			__NR_HPAGEFLAGS);
4061 
4062 	if (!hugepages_supported()) {
4063 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4064 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4065 		return 0;
4066 	}
4067 
4068 	/*
4069 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4070 	 * architectures depend on setup being done here.
4071 	 */
4072 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4073 	if (!parsed_default_hugepagesz) {
4074 		/*
4075 		 * If we did not parse a default huge page size, set
4076 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4077 		 * number of huge pages for this default size was implicitly
4078 		 * specified, set that here as well.
4079 		 * Note that the implicit setting will overwrite an explicit
4080 		 * setting.  A warning will be printed in this case.
4081 		 */
4082 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4083 		if (default_hstate_max_huge_pages) {
4084 			if (default_hstate.max_huge_pages) {
4085 				char buf[32];
4086 
4087 				string_get_size(huge_page_size(&default_hstate),
4088 					1, STRING_UNITS_2, buf, 32);
4089 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4090 					default_hstate.max_huge_pages, buf);
4091 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4092 					default_hstate_max_huge_pages);
4093 			}
4094 			default_hstate.max_huge_pages =
4095 				default_hstate_max_huge_pages;
4096 
4097 			for_each_online_node(i)
4098 				default_hstate.max_huge_pages_node[i] =
4099 					default_hugepages_in_node[i];
4100 		}
4101 	}
4102 
4103 	hugetlb_cma_check();
4104 	hugetlb_init_hstates();
4105 	gather_bootmem_prealloc();
4106 	report_hugepages();
4107 
4108 	hugetlb_sysfs_init();
4109 	hugetlb_cgroup_file_init();
4110 
4111 #ifdef CONFIG_SMP
4112 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4113 #else
4114 	num_fault_mutexes = 1;
4115 #endif
4116 	hugetlb_fault_mutex_table =
4117 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4118 			      GFP_KERNEL);
4119 	BUG_ON(!hugetlb_fault_mutex_table);
4120 
4121 	for (i = 0; i < num_fault_mutexes; i++)
4122 		mutex_init(&hugetlb_fault_mutex_table[i]);
4123 	return 0;
4124 }
4125 subsys_initcall(hugetlb_init);
4126 
4127 /* Overwritten by architectures with more huge page sizes */
4128 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4129 {
4130 	return size == HPAGE_SIZE;
4131 }
4132 
4133 void __init hugetlb_add_hstate(unsigned int order)
4134 {
4135 	struct hstate *h;
4136 	unsigned long i;
4137 
4138 	if (size_to_hstate(PAGE_SIZE << order)) {
4139 		return;
4140 	}
4141 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4142 	BUG_ON(order == 0);
4143 	h = &hstates[hugetlb_max_hstate++];
4144 	mutex_init(&h->resize_lock);
4145 	h->order = order;
4146 	h->mask = ~(huge_page_size(h) - 1);
4147 	for (i = 0; i < MAX_NUMNODES; ++i)
4148 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4149 	INIT_LIST_HEAD(&h->hugepage_activelist);
4150 	h->next_nid_to_alloc = first_memory_node;
4151 	h->next_nid_to_free = first_memory_node;
4152 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4153 					huge_page_size(h)/SZ_1K);
4154 
4155 	parsed_hstate = h;
4156 }
4157 
4158 bool __init __weak hugetlb_node_alloc_supported(void)
4159 {
4160 	return true;
4161 }
4162 
4163 static void __init hugepages_clear_pages_in_node(void)
4164 {
4165 	if (!hugetlb_max_hstate) {
4166 		default_hstate_max_huge_pages = 0;
4167 		memset(default_hugepages_in_node, 0,
4168 			sizeof(default_hugepages_in_node));
4169 	} else {
4170 		parsed_hstate->max_huge_pages = 0;
4171 		memset(parsed_hstate->max_huge_pages_node, 0,
4172 			sizeof(parsed_hstate->max_huge_pages_node));
4173 	}
4174 }
4175 
4176 /*
4177  * hugepages command line processing
4178  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4179  * specification.  If not, ignore the hugepages value.  hugepages can also
4180  * be the first huge page command line  option in which case it implicitly
4181  * specifies the number of huge pages for the default size.
4182  */
4183 static int __init hugepages_setup(char *s)
4184 {
4185 	unsigned long *mhp;
4186 	static unsigned long *last_mhp;
4187 	int node = NUMA_NO_NODE;
4188 	int count;
4189 	unsigned long tmp;
4190 	char *p = s;
4191 
4192 	if (!parsed_valid_hugepagesz) {
4193 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4194 		parsed_valid_hugepagesz = true;
4195 		return 1;
4196 	}
4197 
4198 	/*
4199 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4200 	 * yet, so this hugepages= parameter goes to the "default hstate".
4201 	 * Otherwise, it goes with the previously parsed hugepagesz or
4202 	 * default_hugepagesz.
4203 	 */
4204 	else if (!hugetlb_max_hstate)
4205 		mhp = &default_hstate_max_huge_pages;
4206 	else
4207 		mhp = &parsed_hstate->max_huge_pages;
4208 
4209 	if (mhp == last_mhp) {
4210 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4211 		return 1;
4212 	}
4213 
4214 	while (*p) {
4215 		count = 0;
4216 		if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4217 			goto invalid;
4218 		/* Parameter is node format */
4219 		if (p[count] == ':') {
4220 			if (!hugetlb_node_alloc_supported()) {
4221 				pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4222 				return 1;
4223 			}
4224 			if (tmp >= MAX_NUMNODES || !node_online(tmp))
4225 				goto invalid;
4226 			node = array_index_nospec(tmp, MAX_NUMNODES);
4227 			p += count + 1;
4228 			/* Parse hugepages */
4229 			if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4230 				goto invalid;
4231 			if (!hugetlb_max_hstate)
4232 				default_hugepages_in_node[node] = tmp;
4233 			else
4234 				parsed_hstate->max_huge_pages_node[node] = tmp;
4235 			*mhp += tmp;
4236 			/* Go to parse next node*/
4237 			if (p[count] == ',')
4238 				p += count + 1;
4239 			else
4240 				break;
4241 		} else {
4242 			if (p != s)
4243 				goto invalid;
4244 			*mhp = tmp;
4245 			break;
4246 		}
4247 	}
4248 
4249 	/*
4250 	 * Global state is always initialized later in hugetlb_init.
4251 	 * But we need to allocate gigantic hstates here early to still
4252 	 * use the bootmem allocator.
4253 	 */
4254 	if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
4255 		hugetlb_hstate_alloc_pages(parsed_hstate);
4256 
4257 	last_mhp = mhp;
4258 
4259 	return 1;
4260 
4261 invalid:
4262 	pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4263 	hugepages_clear_pages_in_node();
4264 	return 1;
4265 }
4266 __setup("hugepages=", hugepages_setup);
4267 
4268 /*
4269  * hugepagesz command line processing
4270  * A specific huge page size can only be specified once with hugepagesz.
4271  * hugepagesz is followed by hugepages on the command line.  The global
4272  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4273  * hugepagesz argument was valid.
4274  */
4275 static int __init hugepagesz_setup(char *s)
4276 {
4277 	unsigned long size;
4278 	struct hstate *h;
4279 
4280 	parsed_valid_hugepagesz = false;
4281 	size = (unsigned long)memparse(s, NULL);
4282 
4283 	if (!arch_hugetlb_valid_size(size)) {
4284 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4285 		return 1;
4286 	}
4287 
4288 	h = size_to_hstate(size);
4289 	if (h) {
4290 		/*
4291 		 * hstate for this size already exists.  This is normally
4292 		 * an error, but is allowed if the existing hstate is the
4293 		 * default hstate.  More specifically, it is only allowed if
4294 		 * the number of huge pages for the default hstate was not
4295 		 * previously specified.
4296 		 */
4297 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4298 		    default_hstate.max_huge_pages) {
4299 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4300 			return 1;
4301 		}
4302 
4303 		/*
4304 		 * No need to call hugetlb_add_hstate() as hstate already
4305 		 * exists.  But, do set parsed_hstate so that a following
4306 		 * hugepages= parameter will be applied to this hstate.
4307 		 */
4308 		parsed_hstate = h;
4309 		parsed_valid_hugepagesz = true;
4310 		return 1;
4311 	}
4312 
4313 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4314 	parsed_valid_hugepagesz = true;
4315 	return 1;
4316 }
4317 __setup("hugepagesz=", hugepagesz_setup);
4318 
4319 /*
4320  * default_hugepagesz command line input
4321  * Only one instance of default_hugepagesz allowed on command line.
4322  */
4323 static int __init default_hugepagesz_setup(char *s)
4324 {
4325 	unsigned long size;
4326 	int i;
4327 
4328 	parsed_valid_hugepagesz = false;
4329 	if (parsed_default_hugepagesz) {
4330 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4331 		return 1;
4332 	}
4333 
4334 	size = (unsigned long)memparse(s, NULL);
4335 
4336 	if (!arch_hugetlb_valid_size(size)) {
4337 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4338 		return 1;
4339 	}
4340 
4341 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4342 	parsed_valid_hugepagesz = true;
4343 	parsed_default_hugepagesz = true;
4344 	default_hstate_idx = hstate_index(size_to_hstate(size));
4345 
4346 	/*
4347 	 * The number of default huge pages (for this size) could have been
4348 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
4349 	 * then default_hstate_max_huge_pages is set.  If the default huge
4350 	 * page size is gigantic (>= MAX_ORDER), then the pages must be
4351 	 * allocated here from bootmem allocator.
4352 	 */
4353 	if (default_hstate_max_huge_pages) {
4354 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4355 		for_each_online_node(i)
4356 			default_hstate.max_huge_pages_node[i] =
4357 				default_hugepages_in_node[i];
4358 		if (hstate_is_gigantic(&default_hstate))
4359 			hugetlb_hstate_alloc_pages(&default_hstate);
4360 		default_hstate_max_huge_pages = 0;
4361 	}
4362 
4363 	return 1;
4364 }
4365 __setup("default_hugepagesz=", default_hugepagesz_setup);
4366 
4367 static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4368 {
4369 #ifdef CONFIG_NUMA
4370 	struct mempolicy *mpol = get_task_policy(current);
4371 
4372 	/*
4373 	 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4374 	 * (from policy_nodemask) specifically for hugetlb case
4375 	 */
4376 	if (mpol->mode == MPOL_BIND &&
4377 		(apply_policy_zone(mpol, gfp_zone(gfp)) &&
4378 		 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4379 		return &mpol->nodes;
4380 #endif
4381 	return NULL;
4382 }
4383 
4384 static unsigned int allowed_mems_nr(struct hstate *h)
4385 {
4386 	int node;
4387 	unsigned int nr = 0;
4388 	nodemask_t *mbind_nodemask;
4389 	unsigned int *array = h->free_huge_pages_node;
4390 	gfp_t gfp_mask = htlb_alloc_mask(h);
4391 
4392 	mbind_nodemask = policy_mbind_nodemask(gfp_mask);
4393 	for_each_node_mask(node, cpuset_current_mems_allowed) {
4394 		if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4395 			nr += array[node];
4396 	}
4397 
4398 	return nr;
4399 }
4400 
4401 #ifdef CONFIG_SYSCTL
4402 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
4403 					  void *buffer, size_t *length,
4404 					  loff_t *ppos, unsigned long *out)
4405 {
4406 	struct ctl_table dup_table;
4407 
4408 	/*
4409 	 * In order to avoid races with __do_proc_doulongvec_minmax(), we
4410 	 * can duplicate the @table and alter the duplicate of it.
4411 	 */
4412 	dup_table = *table;
4413 	dup_table.data = out;
4414 
4415 	return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
4416 }
4417 
4418 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
4419 			 struct ctl_table *table, int write,
4420 			 void *buffer, size_t *length, loff_t *ppos)
4421 {
4422 	struct hstate *h = &default_hstate;
4423 	unsigned long tmp = h->max_huge_pages;
4424 	int ret;
4425 
4426 	if (!hugepages_supported())
4427 		return -EOPNOTSUPP;
4428 
4429 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4430 					     &tmp);
4431 	if (ret)
4432 		goto out;
4433 
4434 	if (write)
4435 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
4436 						  NUMA_NO_NODE, tmp, *length);
4437 out:
4438 	return ret;
4439 }
4440 
4441 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
4442 			  void *buffer, size_t *length, loff_t *ppos)
4443 {
4444 
4445 	return hugetlb_sysctl_handler_common(false, table, write,
4446 							buffer, length, ppos);
4447 }
4448 
4449 #ifdef CONFIG_NUMA
4450 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
4451 			  void *buffer, size_t *length, loff_t *ppos)
4452 {
4453 	return hugetlb_sysctl_handler_common(true, table, write,
4454 							buffer, length, ppos);
4455 }
4456 #endif /* CONFIG_NUMA */
4457 
4458 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
4459 		void *buffer, size_t *length, loff_t *ppos)
4460 {
4461 	struct hstate *h = &default_hstate;
4462 	unsigned long tmp;
4463 	int ret;
4464 
4465 	if (!hugepages_supported())
4466 		return -EOPNOTSUPP;
4467 
4468 	tmp = h->nr_overcommit_huge_pages;
4469 
4470 	if (write && hstate_is_gigantic(h))
4471 		return -EINVAL;
4472 
4473 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4474 					     &tmp);
4475 	if (ret)
4476 		goto out;
4477 
4478 	if (write) {
4479 		spin_lock_irq(&hugetlb_lock);
4480 		h->nr_overcommit_huge_pages = tmp;
4481 		spin_unlock_irq(&hugetlb_lock);
4482 	}
4483 out:
4484 	return ret;
4485 }
4486 
4487 #endif /* CONFIG_SYSCTL */
4488 
4489 void hugetlb_report_meminfo(struct seq_file *m)
4490 {
4491 	struct hstate *h;
4492 	unsigned long total = 0;
4493 
4494 	if (!hugepages_supported())
4495 		return;
4496 
4497 	for_each_hstate(h) {
4498 		unsigned long count = h->nr_huge_pages;
4499 
4500 		total += huge_page_size(h) * count;
4501 
4502 		if (h == &default_hstate)
4503 			seq_printf(m,
4504 				   "HugePages_Total:   %5lu\n"
4505 				   "HugePages_Free:    %5lu\n"
4506 				   "HugePages_Rsvd:    %5lu\n"
4507 				   "HugePages_Surp:    %5lu\n"
4508 				   "Hugepagesize:   %8lu kB\n",
4509 				   count,
4510 				   h->free_huge_pages,
4511 				   h->resv_huge_pages,
4512 				   h->surplus_huge_pages,
4513 				   huge_page_size(h) / SZ_1K);
4514 	}
4515 
4516 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
4517 }
4518 
4519 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
4520 {
4521 	struct hstate *h = &default_hstate;
4522 
4523 	if (!hugepages_supported())
4524 		return 0;
4525 
4526 	return sysfs_emit_at(buf, len,
4527 			     "Node %d HugePages_Total: %5u\n"
4528 			     "Node %d HugePages_Free:  %5u\n"
4529 			     "Node %d HugePages_Surp:  %5u\n",
4530 			     nid, h->nr_huge_pages_node[nid],
4531 			     nid, h->free_huge_pages_node[nid],
4532 			     nid, h->surplus_huge_pages_node[nid]);
4533 }
4534 
4535 void hugetlb_show_meminfo_node(int nid)
4536 {
4537 	struct hstate *h;
4538 
4539 	if (!hugepages_supported())
4540 		return;
4541 
4542 	for_each_hstate(h)
4543 		printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4544 			nid,
4545 			h->nr_huge_pages_node[nid],
4546 			h->free_huge_pages_node[nid],
4547 			h->surplus_huge_pages_node[nid],
4548 			huge_page_size(h) / SZ_1K);
4549 }
4550 
4551 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
4552 {
4553 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
4554 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
4555 }
4556 
4557 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
4558 unsigned long hugetlb_total_pages(void)
4559 {
4560 	struct hstate *h;
4561 	unsigned long nr_total_pages = 0;
4562 
4563 	for_each_hstate(h)
4564 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4565 	return nr_total_pages;
4566 }
4567 
4568 static int hugetlb_acct_memory(struct hstate *h, long delta)
4569 {
4570 	int ret = -ENOMEM;
4571 
4572 	if (!delta)
4573 		return 0;
4574 
4575 	spin_lock_irq(&hugetlb_lock);
4576 	/*
4577 	 * When cpuset is configured, it breaks the strict hugetlb page
4578 	 * reservation as the accounting is done on a global variable. Such
4579 	 * reservation is completely rubbish in the presence of cpuset because
4580 	 * the reservation is not checked against page availability for the
4581 	 * current cpuset. Application can still potentially OOM'ed by kernel
4582 	 * with lack of free htlb page in cpuset that the task is in.
4583 	 * Attempt to enforce strict accounting with cpuset is almost
4584 	 * impossible (or too ugly) because cpuset is too fluid that
4585 	 * task or memory node can be dynamically moved between cpusets.
4586 	 *
4587 	 * The change of semantics for shared hugetlb mapping with cpuset is
4588 	 * undesirable. However, in order to preserve some of the semantics,
4589 	 * we fall back to check against current free page availability as
4590 	 * a best attempt and hopefully to minimize the impact of changing
4591 	 * semantics that cpuset has.
4592 	 *
4593 	 * Apart from cpuset, we also have memory policy mechanism that
4594 	 * also determines from which node the kernel will allocate memory
4595 	 * in a NUMA system. So similar to cpuset, we also should consider
4596 	 * the memory policy of the current task. Similar to the description
4597 	 * above.
4598 	 */
4599 	if (delta > 0) {
4600 		if (gather_surplus_pages(h, delta) < 0)
4601 			goto out;
4602 
4603 		if (delta > allowed_mems_nr(h)) {
4604 			return_unused_surplus_pages(h, delta);
4605 			goto out;
4606 		}
4607 	}
4608 
4609 	ret = 0;
4610 	if (delta < 0)
4611 		return_unused_surplus_pages(h, (unsigned long) -delta);
4612 
4613 out:
4614 	spin_unlock_irq(&hugetlb_lock);
4615 	return ret;
4616 }
4617 
4618 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
4619 {
4620 	struct resv_map *resv = vma_resv_map(vma);
4621 
4622 	/*
4623 	 * HPAGE_RESV_OWNER indicates a private mapping.
4624 	 * This new VMA should share its siblings reservation map if present.
4625 	 * The VMA will only ever have a valid reservation map pointer where
4626 	 * it is being copied for another still existing VMA.  As that VMA
4627 	 * has a reference to the reservation map it cannot disappear until
4628 	 * after this open call completes.  It is therefore safe to take a
4629 	 * new reference here without additional locking.
4630 	 */
4631 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4632 		resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4633 		kref_get(&resv->refs);
4634 	}
4635 
4636 	/*
4637 	 * vma_lock structure for sharable mappings is vma specific.
4638 	 * Clear old pointer (if copied via vm_area_dup) and allocate
4639 	 * new structure.  Before clearing, make sure vma_lock is not
4640 	 * for this vma.
4641 	 */
4642 	if (vma->vm_flags & VM_MAYSHARE) {
4643 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
4644 
4645 		if (vma_lock) {
4646 			if (vma_lock->vma != vma) {
4647 				vma->vm_private_data = NULL;
4648 				hugetlb_vma_lock_alloc(vma);
4649 			} else
4650 				pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
4651 		} else
4652 			hugetlb_vma_lock_alloc(vma);
4653 	}
4654 }
4655 
4656 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4657 {
4658 	struct hstate *h = hstate_vma(vma);
4659 	struct resv_map *resv;
4660 	struct hugepage_subpool *spool = subpool_vma(vma);
4661 	unsigned long reserve, start, end;
4662 	long gbl_reserve;
4663 
4664 	hugetlb_vma_lock_free(vma);
4665 
4666 	resv = vma_resv_map(vma);
4667 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4668 		return;
4669 
4670 	start = vma_hugecache_offset(h, vma, vma->vm_start);
4671 	end = vma_hugecache_offset(h, vma, vma->vm_end);
4672 
4673 	reserve = (end - start) - region_count(resv, start, end);
4674 	hugetlb_cgroup_uncharge_counter(resv, start, end);
4675 	if (reserve) {
4676 		/*
4677 		 * Decrement reserve counts.  The global reserve count may be
4678 		 * adjusted if the subpool has a minimum size.
4679 		 */
4680 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
4681 		hugetlb_acct_memory(h, -gbl_reserve);
4682 	}
4683 
4684 	kref_put(&resv->refs, resv_map_release);
4685 }
4686 
4687 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4688 {
4689 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
4690 		return -EINVAL;
4691 	return 0;
4692 }
4693 
4694 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
4695 {
4696 	return huge_page_size(hstate_vma(vma));
4697 }
4698 
4699 /*
4700  * We cannot handle pagefaults against hugetlb pages at all.  They cause
4701  * handle_mm_fault() to try to instantiate regular-sized pages in the
4702  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
4703  * this far.
4704  */
4705 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
4706 {
4707 	BUG();
4708 	return 0;
4709 }
4710 
4711 /*
4712  * When a new function is introduced to vm_operations_struct and added
4713  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4714  * This is because under System V memory model, mappings created via
4715  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4716  * their original vm_ops are overwritten with shm_vm_ops.
4717  */
4718 const struct vm_operations_struct hugetlb_vm_ops = {
4719 	.fault = hugetlb_vm_op_fault,
4720 	.open = hugetlb_vm_op_open,
4721 	.close = hugetlb_vm_op_close,
4722 	.may_split = hugetlb_vm_op_split,
4723 	.pagesize = hugetlb_vm_op_pagesize,
4724 };
4725 
4726 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
4727 				int writable)
4728 {
4729 	pte_t entry;
4730 	unsigned int shift = huge_page_shift(hstate_vma(vma));
4731 
4732 	if (writable) {
4733 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
4734 					 vma->vm_page_prot)));
4735 	} else {
4736 		entry = huge_pte_wrprotect(mk_huge_pte(page,
4737 					   vma->vm_page_prot));
4738 	}
4739 	entry = pte_mkyoung(entry);
4740 	entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
4741 
4742 	return entry;
4743 }
4744 
4745 static void set_huge_ptep_writable(struct vm_area_struct *vma,
4746 				   unsigned long address, pte_t *ptep)
4747 {
4748 	pte_t entry;
4749 
4750 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
4751 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4752 		update_mmu_cache(vma, address, ptep);
4753 }
4754 
4755 bool is_hugetlb_entry_migration(pte_t pte)
4756 {
4757 	swp_entry_t swp;
4758 
4759 	if (huge_pte_none(pte) || pte_present(pte))
4760 		return false;
4761 	swp = pte_to_swp_entry(pte);
4762 	if (is_migration_entry(swp))
4763 		return true;
4764 	else
4765 		return false;
4766 }
4767 
4768 static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
4769 {
4770 	swp_entry_t swp;
4771 
4772 	if (huge_pte_none(pte) || pte_present(pte))
4773 		return false;
4774 	swp = pte_to_swp_entry(pte);
4775 	if (is_hwpoison_entry(swp))
4776 		return true;
4777 	else
4778 		return false;
4779 }
4780 
4781 static void
4782 hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
4783 		     struct page *new_page)
4784 {
4785 	__SetPageUptodate(new_page);
4786 	hugepage_add_new_anon_rmap(new_page, vma, addr);
4787 	set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
4788 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
4789 	SetHPageMigratable(new_page);
4790 }
4791 
4792 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4793 			    struct vm_area_struct *dst_vma,
4794 			    struct vm_area_struct *src_vma)
4795 {
4796 	pte_t *src_pte, *dst_pte, entry;
4797 	struct page *ptepage;
4798 	unsigned long addr;
4799 	bool cow = is_cow_mapping(src_vma->vm_flags);
4800 	struct hstate *h = hstate_vma(src_vma);
4801 	unsigned long sz = huge_page_size(h);
4802 	unsigned long npages = pages_per_huge_page(h);
4803 	struct mmu_notifier_range range;
4804 	unsigned long last_addr_mask;
4805 	int ret = 0;
4806 
4807 	if (cow) {
4808 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_vma, src,
4809 					src_vma->vm_start,
4810 					src_vma->vm_end);
4811 		mmu_notifier_invalidate_range_start(&range);
4812 		mmap_assert_write_locked(src);
4813 		raw_write_seqcount_begin(&src->write_protect_seq);
4814 	} else {
4815 		/*
4816 		 * For shared mappings the vma lock must be held before
4817 		 * calling huge_pte_offset in the src vma. Otherwise, the
4818 		 * returned ptep could go away if part of a shared pmd and
4819 		 * another thread calls huge_pmd_unshare.
4820 		 */
4821 		hugetlb_vma_lock_read(src_vma);
4822 	}
4823 
4824 	last_addr_mask = hugetlb_mask_last_page(h);
4825 	for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
4826 		spinlock_t *src_ptl, *dst_ptl;
4827 		src_pte = huge_pte_offset(src, addr, sz);
4828 		if (!src_pte) {
4829 			addr |= last_addr_mask;
4830 			continue;
4831 		}
4832 		dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
4833 		if (!dst_pte) {
4834 			ret = -ENOMEM;
4835 			break;
4836 		}
4837 
4838 		/*
4839 		 * If the pagetables are shared don't copy or take references.
4840 		 *
4841 		 * dst_pte == src_pte is the common case of src/dest sharing.
4842 		 * However, src could have 'unshared' and dst shares with
4843 		 * another vma. So page_count of ptep page is checked instead
4844 		 * to reliably determine whether pte is shared.
4845 		 */
4846 		if (page_count(virt_to_page(dst_pte)) > 1) {
4847 			addr |= last_addr_mask;
4848 			continue;
4849 		}
4850 
4851 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
4852 		src_ptl = huge_pte_lockptr(h, src, src_pte);
4853 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4854 		entry = huge_ptep_get(src_pte);
4855 again:
4856 		if (huge_pte_none(entry)) {
4857 			/*
4858 			 * Skip if src entry none.
4859 			 */
4860 			;
4861 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
4862 			bool uffd_wp = huge_pte_uffd_wp(entry);
4863 
4864 			if (!userfaultfd_wp(dst_vma) && uffd_wp)
4865 				entry = huge_pte_clear_uffd_wp(entry);
4866 			set_huge_pte_at(dst, addr, dst_pte, entry);
4867 		} else if (unlikely(is_hugetlb_entry_migration(entry))) {
4868 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
4869 			bool uffd_wp = huge_pte_uffd_wp(entry);
4870 
4871 			if (!is_readable_migration_entry(swp_entry) && cow) {
4872 				/*
4873 				 * COW mappings require pages in both
4874 				 * parent and child to be set to read.
4875 				 */
4876 				swp_entry = make_readable_migration_entry(
4877 							swp_offset(swp_entry));
4878 				entry = swp_entry_to_pte(swp_entry);
4879 				if (userfaultfd_wp(src_vma) && uffd_wp)
4880 					entry = huge_pte_mkuffd_wp(entry);
4881 				set_huge_pte_at(src, addr, src_pte, entry);
4882 			}
4883 			if (!userfaultfd_wp(dst_vma) && uffd_wp)
4884 				entry = huge_pte_clear_uffd_wp(entry);
4885 			set_huge_pte_at(dst, addr, dst_pte, entry);
4886 		} else if (unlikely(is_pte_marker(entry))) {
4887 			/*
4888 			 * We copy the pte marker only if the dst vma has
4889 			 * uffd-wp enabled.
4890 			 */
4891 			if (userfaultfd_wp(dst_vma))
4892 				set_huge_pte_at(dst, addr, dst_pte, entry);
4893 		} else {
4894 			entry = huge_ptep_get(src_pte);
4895 			ptepage = pte_page(entry);
4896 			get_page(ptepage);
4897 
4898 			/*
4899 			 * Failing to duplicate the anon rmap is a rare case
4900 			 * where we see pinned hugetlb pages while they're
4901 			 * prone to COW. We need to do the COW earlier during
4902 			 * fork.
4903 			 *
4904 			 * When pre-allocating the page or copying data, we
4905 			 * need to be without the pgtable locks since we could
4906 			 * sleep during the process.
4907 			 */
4908 			if (!PageAnon(ptepage)) {
4909 				page_dup_file_rmap(ptepage, true);
4910 			} else if (page_try_dup_anon_rmap(ptepage, true,
4911 							  src_vma)) {
4912 				pte_t src_pte_old = entry;
4913 				struct page *new;
4914 
4915 				spin_unlock(src_ptl);
4916 				spin_unlock(dst_ptl);
4917 				/* Do not use reserve as it's private owned */
4918 				new = alloc_huge_page(dst_vma, addr, 1);
4919 				if (IS_ERR(new)) {
4920 					put_page(ptepage);
4921 					ret = PTR_ERR(new);
4922 					break;
4923 				}
4924 				copy_user_huge_page(new, ptepage, addr, dst_vma,
4925 						    npages);
4926 				put_page(ptepage);
4927 
4928 				/* Install the new huge page if src pte stable */
4929 				dst_ptl = huge_pte_lock(h, dst, dst_pte);
4930 				src_ptl = huge_pte_lockptr(h, src, src_pte);
4931 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4932 				entry = huge_ptep_get(src_pte);
4933 				if (!pte_same(src_pte_old, entry)) {
4934 					restore_reserve_on_error(h, dst_vma, addr,
4935 								new);
4936 					put_page(new);
4937 					/* huge_ptep of dst_pte won't change as in child */
4938 					goto again;
4939 				}
4940 				hugetlb_install_page(dst_vma, dst_pte, addr, new);
4941 				spin_unlock(src_ptl);
4942 				spin_unlock(dst_ptl);
4943 				continue;
4944 			}
4945 
4946 			if (cow) {
4947 				/*
4948 				 * No need to notify as we are downgrading page
4949 				 * table protection not changing it to point
4950 				 * to a new page.
4951 				 *
4952 				 * See Documentation/mm/mmu_notifier.rst
4953 				 */
4954 				huge_ptep_set_wrprotect(src, addr, src_pte);
4955 				entry = huge_pte_wrprotect(entry);
4956 			}
4957 
4958 			set_huge_pte_at(dst, addr, dst_pte, entry);
4959 			hugetlb_count_add(npages, dst);
4960 		}
4961 		spin_unlock(src_ptl);
4962 		spin_unlock(dst_ptl);
4963 	}
4964 
4965 	if (cow) {
4966 		raw_write_seqcount_end(&src->write_protect_seq);
4967 		mmu_notifier_invalidate_range_end(&range);
4968 	} else {
4969 		hugetlb_vma_unlock_read(src_vma);
4970 	}
4971 
4972 	return ret;
4973 }
4974 
4975 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
4976 			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
4977 {
4978 	struct hstate *h = hstate_vma(vma);
4979 	struct mm_struct *mm = vma->vm_mm;
4980 	spinlock_t *src_ptl, *dst_ptl;
4981 	pte_t pte;
4982 
4983 	dst_ptl = huge_pte_lock(h, mm, dst_pte);
4984 	src_ptl = huge_pte_lockptr(h, mm, src_pte);
4985 
4986 	/*
4987 	 * We don't have to worry about the ordering of src and dst ptlocks
4988 	 * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
4989 	 */
4990 	if (src_ptl != dst_ptl)
4991 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4992 
4993 	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
4994 	set_huge_pte_at(mm, new_addr, dst_pte, pte);
4995 
4996 	if (src_ptl != dst_ptl)
4997 		spin_unlock(src_ptl);
4998 	spin_unlock(dst_ptl);
4999 }
5000 
5001 int move_hugetlb_page_tables(struct vm_area_struct *vma,
5002 			     struct vm_area_struct *new_vma,
5003 			     unsigned long old_addr, unsigned long new_addr,
5004 			     unsigned long len)
5005 {
5006 	struct hstate *h = hstate_vma(vma);
5007 	struct address_space *mapping = vma->vm_file->f_mapping;
5008 	unsigned long sz = huge_page_size(h);
5009 	struct mm_struct *mm = vma->vm_mm;
5010 	unsigned long old_end = old_addr + len;
5011 	unsigned long last_addr_mask;
5012 	pte_t *src_pte, *dst_pte;
5013 	struct mmu_notifier_range range;
5014 	bool shared_pmd = false;
5015 
5016 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr,
5017 				old_end);
5018 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5019 	/*
5020 	 * In case of shared PMDs, we should cover the maximum possible
5021 	 * range.
5022 	 */
5023 	flush_cache_range(vma, range.start, range.end);
5024 
5025 	mmu_notifier_invalidate_range_start(&range);
5026 	last_addr_mask = hugetlb_mask_last_page(h);
5027 	/* Prevent race with file truncation */
5028 	hugetlb_vma_lock_write(vma);
5029 	i_mmap_lock_write(mapping);
5030 	for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5031 		src_pte = huge_pte_offset(mm, old_addr, sz);
5032 		if (!src_pte) {
5033 			old_addr |= last_addr_mask;
5034 			new_addr |= last_addr_mask;
5035 			continue;
5036 		}
5037 		if (huge_pte_none(huge_ptep_get(src_pte)))
5038 			continue;
5039 
5040 		if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
5041 			shared_pmd = true;
5042 			old_addr |= last_addr_mask;
5043 			new_addr |= last_addr_mask;
5044 			continue;
5045 		}
5046 
5047 		dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5048 		if (!dst_pte)
5049 			break;
5050 
5051 		move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
5052 	}
5053 
5054 	if (shared_pmd)
5055 		flush_tlb_range(vma, range.start, range.end);
5056 	else
5057 		flush_tlb_range(vma, old_end - len, old_end);
5058 	mmu_notifier_invalidate_range_end(&range);
5059 	i_mmap_unlock_write(mapping);
5060 	hugetlb_vma_unlock_write(vma);
5061 
5062 	return len + old_addr - old_end;
5063 }
5064 
5065 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5066 				   unsigned long start, unsigned long end,
5067 				   struct page *ref_page, zap_flags_t zap_flags)
5068 {
5069 	struct mm_struct *mm = vma->vm_mm;
5070 	unsigned long address;
5071 	pte_t *ptep;
5072 	pte_t pte;
5073 	spinlock_t *ptl;
5074 	struct page *page;
5075 	struct hstate *h = hstate_vma(vma);
5076 	unsigned long sz = huge_page_size(h);
5077 	unsigned long last_addr_mask;
5078 	bool force_flush = false;
5079 
5080 	WARN_ON(!is_vm_hugetlb_page(vma));
5081 	BUG_ON(start & ~huge_page_mask(h));
5082 	BUG_ON(end & ~huge_page_mask(h));
5083 
5084 	/*
5085 	 * This is a hugetlb vma, all the pte entries should point
5086 	 * to huge page.
5087 	 */
5088 	tlb_change_page_size(tlb, sz);
5089 	tlb_start_vma(tlb, vma);
5090 
5091 	last_addr_mask = hugetlb_mask_last_page(h);
5092 	address = start;
5093 	for (; address < end; address += sz) {
5094 		ptep = huge_pte_offset(mm, address, sz);
5095 		if (!ptep) {
5096 			address |= last_addr_mask;
5097 			continue;
5098 		}
5099 
5100 		ptl = huge_pte_lock(h, mm, ptep);
5101 		if (huge_pmd_unshare(mm, vma, address, ptep)) {
5102 			spin_unlock(ptl);
5103 			tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5104 			force_flush = true;
5105 			address |= last_addr_mask;
5106 			continue;
5107 		}
5108 
5109 		pte = huge_ptep_get(ptep);
5110 		if (huge_pte_none(pte)) {
5111 			spin_unlock(ptl);
5112 			continue;
5113 		}
5114 
5115 		/*
5116 		 * Migrating hugepage or HWPoisoned hugepage is already
5117 		 * unmapped and its refcount is dropped, so just clear pte here.
5118 		 */
5119 		if (unlikely(!pte_present(pte))) {
5120 			/*
5121 			 * If the pte was wr-protected by uffd-wp in any of the
5122 			 * swap forms, meanwhile the caller does not want to
5123 			 * drop the uffd-wp bit in this zap, then replace the
5124 			 * pte with a marker.
5125 			 */
5126 			if (pte_swp_uffd_wp_any(pte) &&
5127 			    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5128 				set_huge_pte_at(mm, address, ptep,
5129 						make_pte_marker(PTE_MARKER_UFFD_WP));
5130 			else
5131 				huge_pte_clear(mm, address, ptep, sz);
5132 			spin_unlock(ptl);
5133 			continue;
5134 		}
5135 
5136 		page = pte_page(pte);
5137 		/*
5138 		 * If a reference page is supplied, it is because a specific
5139 		 * page is being unmapped, not a range. Ensure the page we
5140 		 * are about to unmap is the actual page of interest.
5141 		 */
5142 		if (ref_page) {
5143 			if (page != ref_page) {
5144 				spin_unlock(ptl);
5145 				continue;
5146 			}
5147 			/*
5148 			 * Mark the VMA as having unmapped its page so that
5149 			 * future faults in this VMA will fail rather than
5150 			 * looking like data was lost
5151 			 */
5152 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5153 		}
5154 
5155 		pte = huge_ptep_get_and_clear(mm, address, ptep);
5156 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5157 		if (huge_pte_dirty(pte))
5158 			set_page_dirty(page);
5159 		/* Leave a uffd-wp pte marker if needed */
5160 		if (huge_pte_uffd_wp(pte) &&
5161 		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5162 			set_huge_pte_at(mm, address, ptep,
5163 					make_pte_marker(PTE_MARKER_UFFD_WP));
5164 		hugetlb_count_sub(pages_per_huge_page(h), mm);
5165 		page_remove_rmap(page, vma, true);
5166 
5167 		spin_unlock(ptl);
5168 		tlb_remove_page_size(tlb, page, huge_page_size(h));
5169 		/*
5170 		 * Bail out after unmapping reference page if supplied
5171 		 */
5172 		if (ref_page)
5173 			break;
5174 	}
5175 	tlb_end_vma(tlb, vma);
5176 
5177 	/*
5178 	 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5179 	 * could defer the flush until now, since by holding i_mmap_rwsem we
5180 	 * guaranteed that the last refernece would not be dropped. But we must
5181 	 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5182 	 * dropped and the last reference to the shared PMDs page might be
5183 	 * dropped as well.
5184 	 *
5185 	 * In theory we could defer the freeing of the PMD pages as well, but
5186 	 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5187 	 * detect sharing, so we cannot defer the release of the page either.
5188 	 * Instead, do flush now.
5189 	 */
5190 	if (force_flush)
5191 		tlb_flush_mmu_tlbonly(tlb);
5192 }
5193 
5194 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
5195 			  struct vm_area_struct *vma, unsigned long start,
5196 			  unsigned long end, struct page *ref_page,
5197 			  zap_flags_t zap_flags)
5198 {
5199 	hugetlb_vma_lock_write(vma);
5200 	i_mmap_lock_write(vma->vm_file->f_mapping);
5201 
5202 	/* mmu notification performed in caller */
5203 	__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
5204 
5205 	if (zap_flags & ZAP_FLAG_UNMAP) {	/* final unmap */
5206 		/*
5207 		 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5208 		 * When the vma_lock is freed, this makes the vma ineligible
5209 		 * for pmd sharing.  And, i_mmap_rwsem is required to set up
5210 		 * pmd sharing.  This is important as page tables for this
5211 		 * unmapped range will be asynchrously deleted.  If the page
5212 		 * tables are shared, there will be issues when accessed by
5213 		 * someone else.
5214 		 */
5215 		__hugetlb_vma_unlock_write_free(vma);
5216 		i_mmap_unlock_write(vma->vm_file->f_mapping);
5217 	} else {
5218 		i_mmap_unlock_write(vma->vm_file->f_mapping);
5219 		hugetlb_vma_unlock_write(vma);
5220 	}
5221 }
5222 
5223 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5224 			  unsigned long end, struct page *ref_page,
5225 			  zap_flags_t zap_flags)
5226 {
5227 	struct mmu_notifier_range range;
5228 	struct mmu_gather tlb;
5229 
5230 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
5231 				start, end);
5232 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5233 	mmu_notifier_invalidate_range_start(&range);
5234 	tlb_gather_mmu(&tlb, vma->vm_mm);
5235 
5236 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
5237 
5238 	mmu_notifier_invalidate_range_end(&range);
5239 	tlb_finish_mmu(&tlb);
5240 }
5241 
5242 /*
5243  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5244  * mapping it owns the reserve page for. The intention is to unmap the page
5245  * from other VMAs and let the children be SIGKILLed if they are faulting the
5246  * same region.
5247  */
5248 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5249 			      struct page *page, unsigned long address)
5250 {
5251 	struct hstate *h = hstate_vma(vma);
5252 	struct vm_area_struct *iter_vma;
5253 	struct address_space *mapping;
5254 	pgoff_t pgoff;
5255 
5256 	/*
5257 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5258 	 * from page cache lookup which is in HPAGE_SIZE units.
5259 	 */
5260 	address = address & huge_page_mask(h);
5261 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5262 			vma->vm_pgoff;
5263 	mapping = vma->vm_file->f_mapping;
5264 
5265 	/*
5266 	 * Take the mapping lock for the duration of the table walk. As
5267 	 * this mapping should be shared between all the VMAs,
5268 	 * __unmap_hugepage_range() is called as the lock is already held
5269 	 */
5270 	i_mmap_lock_write(mapping);
5271 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5272 		/* Do not unmap the current VMA */
5273 		if (iter_vma == vma)
5274 			continue;
5275 
5276 		/*
5277 		 * Shared VMAs have their own reserves and do not affect
5278 		 * MAP_PRIVATE accounting but it is possible that a shared
5279 		 * VMA is using the same page so check and skip such VMAs.
5280 		 */
5281 		if (iter_vma->vm_flags & VM_MAYSHARE)
5282 			continue;
5283 
5284 		/*
5285 		 * Unmap the page from other VMAs without their own reserves.
5286 		 * They get marked to be SIGKILLed if they fault in these
5287 		 * areas. This is because a future no-page fault on this VMA
5288 		 * could insert a zeroed page instead of the data existing
5289 		 * from the time of fork. This would look like data corruption
5290 		 */
5291 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5292 			unmap_hugepage_range(iter_vma, address,
5293 					     address + huge_page_size(h), page, 0);
5294 	}
5295 	i_mmap_unlock_write(mapping);
5296 }
5297 
5298 /*
5299  * hugetlb_wp() should be called with page lock of the original hugepage held.
5300  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5301  * cannot race with other handlers or page migration.
5302  * Keep the pte_same checks anyway to make transition from the mutex easier.
5303  */
5304 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5305 		       unsigned long address, pte_t *ptep, unsigned int flags,
5306 		       struct page *pagecache_page, spinlock_t *ptl)
5307 {
5308 	const bool unshare = flags & FAULT_FLAG_UNSHARE;
5309 	pte_t pte;
5310 	struct hstate *h = hstate_vma(vma);
5311 	struct page *old_page, *new_page;
5312 	int outside_reserve = 0;
5313 	vm_fault_t ret = 0;
5314 	unsigned long haddr = address & huge_page_mask(h);
5315 	struct mmu_notifier_range range;
5316 
5317 	/*
5318 	 * hugetlb does not support FOLL_FORCE-style write faults that keep the
5319 	 * PTE mapped R/O such as maybe_mkwrite() would do.
5320 	 */
5321 	if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
5322 		return VM_FAULT_SIGSEGV;
5323 
5324 	/* Let's take out MAP_SHARED mappings first. */
5325 	if (vma->vm_flags & VM_MAYSHARE) {
5326 		set_huge_ptep_writable(vma, haddr, ptep);
5327 		return 0;
5328 	}
5329 
5330 	pte = huge_ptep_get(ptep);
5331 	old_page = pte_page(pte);
5332 
5333 	delayacct_wpcopy_start();
5334 
5335 retry_avoidcopy:
5336 	/*
5337 	 * If no-one else is actually using this page, we're the exclusive
5338 	 * owner and can reuse this page.
5339 	 */
5340 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
5341 		if (!PageAnonExclusive(old_page))
5342 			page_move_anon_rmap(old_page, vma);
5343 		if (likely(!unshare))
5344 			set_huge_ptep_writable(vma, haddr, ptep);
5345 
5346 		delayacct_wpcopy_end();
5347 		return 0;
5348 	}
5349 	VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page),
5350 		       old_page);
5351 
5352 	/*
5353 	 * If the process that created a MAP_PRIVATE mapping is about to
5354 	 * perform a COW due to a shared page count, attempt to satisfy
5355 	 * the allocation without using the existing reserves. The pagecache
5356 	 * page is used to determine if the reserve at this address was
5357 	 * consumed or not. If reserves were used, a partial faulted mapping
5358 	 * at the time of fork() could consume its reserves on COW instead
5359 	 * of the full address range.
5360 	 */
5361 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5362 			old_page != pagecache_page)
5363 		outside_reserve = 1;
5364 
5365 	get_page(old_page);
5366 
5367 	/*
5368 	 * Drop page table lock as buddy allocator may be called. It will
5369 	 * be acquired again before returning to the caller, as expected.
5370 	 */
5371 	spin_unlock(ptl);
5372 	new_page = alloc_huge_page(vma, haddr, outside_reserve);
5373 
5374 	if (IS_ERR(new_page)) {
5375 		/*
5376 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
5377 		 * it is due to references held by a child and an insufficient
5378 		 * huge page pool. To guarantee the original mappers
5379 		 * reliability, unmap the page from child processes. The child
5380 		 * may get SIGKILLed if it later faults.
5381 		 */
5382 		if (outside_reserve) {
5383 			struct address_space *mapping = vma->vm_file->f_mapping;
5384 			pgoff_t idx;
5385 			u32 hash;
5386 
5387 			put_page(old_page);
5388 			/*
5389 			 * Drop hugetlb_fault_mutex and vma_lock before
5390 			 * unmapping.  unmapping needs to hold vma_lock
5391 			 * in write mode.  Dropping vma_lock in read mode
5392 			 * here is OK as COW mappings do not interact with
5393 			 * PMD sharing.
5394 			 *
5395 			 * Reacquire both after unmap operation.
5396 			 */
5397 			idx = vma_hugecache_offset(h, vma, haddr);
5398 			hash = hugetlb_fault_mutex_hash(mapping, idx);
5399 			hugetlb_vma_unlock_read(vma);
5400 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5401 
5402 			unmap_ref_private(mm, vma, old_page, haddr);
5403 
5404 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
5405 			hugetlb_vma_lock_read(vma);
5406 			spin_lock(ptl);
5407 			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5408 			if (likely(ptep &&
5409 				   pte_same(huge_ptep_get(ptep), pte)))
5410 				goto retry_avoidcopy;
5411 			/*
5412 			 * race occurs while re-acquiring page table
5413 			 * lock, and our job is done.
5414 			 */
5415 			delayacct_wpcopy_end();
5416 			return 0;
5417 		}
5418 
5419 		ret = vmf_error(PTR_ERR(new_page));
5420 		goto out_release_old;
5421 	}
5422 
5423 	/*
5424 	 * When the original hugepage is shared one, it does not have
5425 	 * anon_vma prepared.
5426 	 */
5427 	if (unlikely(anon_vma_prepare(vma))) {
5428 		ret = VM_FAULT_OOM;
5429 		goto out_release_all;
5430 	}
5431 
5432 	copy_user_huge_page(new_page, old_page, address, vma,
5433 			    pages_per_huge_page(h));
5434 	__SetPageUptodate(new_page);
5435 
5436 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
5437 				haddr + huge_page_size(h));
5438 	mmu_notifier_invalidate_range_start(&range);
5439 
5440 	/*
5441 	 * Retake the page table lock to check for racing updates
5442 	 * before the page tables are altered
5443 	 */
5444 	spin_lock(ptl);
5445 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5446 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
5447 		/* Break COW or unshare */
5448 		huge_ptep_clear_flush(vma, haddr, ptep);
5449 		mmu_notifier_invalidate_range(mm, range.start, range.end);
5450 		page_remove_rmap(old_page, vma, true);
5451 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
5452 		set_huge_pte_at(mm, haddr, ptep,
5453 				make_huge_pte(vma, new_page, !unshare));
5454 		SetHPageMigratable(new_page);
5455 		/* Make the old page be freed below */
5456 		new_page = old_page;
5457 	}
5458 	spin_unlock(ptl);
5459 	mmu_notifier_invalidate_range_end(&range);
5460 out_release_all:
5461 	/*
5462 	 * No restore in case of successful pagetable update (Break COW or
5463 	 * unshare)
5464 	 */
5465 	if (new_page != old_page)
5466 		restore_reserve_on_error(h, vma, haddr, new_page);
5467 	put_page(new_page);
5468 out_release_old:
5469 	put_page(old_page);
5470 
5471 	spin_lock(ptl); /* Caller expects lock to be held */
5472 
5473 	delayacct_wpcopy_end();
5474 	return ret;
5475 }
5476 
5477 /*
5478  * Return whether there is a pagecache page to back given address within VMA.
5479  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
5480  */
5481 static bool hugetlbfs_pagecache_present(struct hstate *h,
5482 			struct vm_area_struct *vma, unsigned long address)
5483 {
5484 	struct address_space *mapping;
5485 	pgoff_t idx;
5486 	struct page *page;
5487 
5488 	mapping = vma->vm_file->f_mapping;
5489 	idx = vma_hugecache_offset(h, vma, address);
5490 
5491 	page = find_get_page(mapping, idx);
5492 	if (page)
5493 		put_page(page);
5494 	return page != NULL;
5495 }
5496 
5497 int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
5498 			   pgoff_t idx)
5499 {
5500 	struct folio *folio = page_folio(page);
5501 	struct inode *inode = mapping->host;
5502 	struct hstate *h = hstate_inode(inode);
5503 	int err;
5504 
5505 	__folio_set_locked(folio);
5506 	err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5507 
5508 	if (unlikely(err)) {
5509 		__folio_clear_locked(folio);
5510 		return err;
5511 	}
5512 	ClearHPageRestoreReserve(page);
5513 
5514 	/*
5515 	 * mark folio dirty so that it will not be removed from cache/file
5516 	 * by non-hugetlbfs specific code paths.
5517 	 */
5518 	folio_mark_dirty(folio);
5519 
5520 	spin_lock(&inode->i_lock);
5521 	inode->i_blocks += blocks_per_huge_page(h);
5522 	spin_unlock(&inode->i_lock);
5523 	return 0;
5524 }
5525 
5526 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
5527 						  struct address_space *mapping,
5528 						  pgoff_t idx,
5529 						  unsigned int flags,
5530 						  unsigned long haddr,
5531 						  unsigned long addr,
5532 						  unsigned long reason)
5533 {
5534 	u32 hash;
5535 	struct vm_fault vmf = {
5536 		.vma = vma,
5537 		.address = haddr,
5538 		.real_address = addr,
5539 		.flags = flags,
5540 
5541 		/*
5542 		 * Hard to debug if it ends up being
5543 		 * used by a callee that assumes
5544 		 * something about the other
5545 		 * uninitialized fields... same as in
5546 		 * memory.c
5547 		 */
5548 	};
5549 
5550 	/*
5551 	 * vma_lock and hugetlb_fault_mutex must be dropped before handling
5552 	 * userfault. Also mmap_lock could be dropped due to handling
5553 	 * userfault, any vma operation should be careful from here.
5554 	 */
5555 	hugetlb_vma_unlock_read(vma);
5556 	hash = hugetlb_fault_mutex_hash(mapping, idx);
5557 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5558 	return handle_userfault(&vmf, reason);
5559 }
5560 
5561 /*
5562  * Recheck pte with pgtable lock.  Returns true if pte didn't change, or
5563  * false if pte changed or is changing.
5564  */
5565 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
5566 			       pte_t *ptep, pte_t old_pte)
5567 {
5568 	spinlock_t *ptl;
5569 	bool same;
5570 
5571 	ptl = huge_pte_lock(h, mm, ptep);
5572 	same = pte_same(huge_ptep_get(ptep), old_pte);
5573 	spin_unlock(ptl);
5574 
5575 	return same;
5576 }
5577 
5578 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
5579 			struct vm_area_struct *vma,
5580 			struct address_space *mapping, pgoff_t idx,
5581 			unsigned long address, pte_t *ptep,
5582 			pte_t old_pte, unsigned int flags)
5583 {
5584 	struct hstate *h = hstate_vma(vma);
5585 	vm_fault_t ret = VM_FAULT_SIGBUS;
5586 	int anon_rmap = 0;
5587 	unsigned long size;
5588 	struct page *page;
5589 	pte_t new_pte;
5590 	spinlock_t *ptl;
5591 	unsigned long haddr = address & huge_page_mask(h);
5592 	bool new_page, new_pagecache_page = false;
5593 	u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
5594 
5595 	/*
5596 	 * Currently, we are forced to kill the process in the event the
5597 	 * original mapper has unmapped pages from the child due to a failed
5598 	 * COW/unsharing. Warn that such a situation has occurred as it may not
5599 	 * be obvious.
5600 	 */
5601 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5602 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5603 			   current->pid);
5604 		goto out;
5605 	}
5606 
5607 	/*
5608 	 * Use page lock to guard against racing truncation
5609 	 * before we get page_table_lock.
5610 	 */
5611 	new_page = false;
5612 	page = find_lock_page(mapping, idx);
5613 	if (!page) {
5614 		size = i_size_read(mapping->host) >> huge_page_shift(h);
5615 		if (idx >= size)
5616 			goto out;
5617 		/* Check for page in userfault range */
5618 		if (userfaultfd_missing(vma)) {
5619 			/*
5620 			 * Since hugetlb_no_page() was examining pte
5621 			 * without pgtable lock, we need to re-test under
5622 			 * lock because the pte may not be stable and could
5623 			 * have changed from under us.  Try to detect
5624 			 * either changed or during-changing ptes and retry
5625 			 * properly when needed.
5626 			 *
5627 			 * Note that userfaultfd is actually fine with
5628 			 * false positives (e.g. caused by pte changed),
5629 			 * but not wrong logical events (e.g. caused by
5630 			 * reading a pte during changing).  The latter can
5631 			 * confuse the userspace, so the strictness is very
5632 			 * much preferred.  E.g., MISSING event should
5633 			 * never happen on the page after UFFDIO_COPY has
5634 			 * correctly installed the page and returned.
5635 			 */
5636 			if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
5637 				ret = 0;
5638 				goto out;
5639 			}
5640 
5641 			return hugetlb_handle_userfault(vma, mapping, idx, flags,
5642 							haddr, address,
5643 							VM_UFFD_MISSING);
5644 		}
5645 
5646 		page = alloc_huge_page(vma, haddr, 0);
5647 		if (IS_ERR(page)) {
5648 			/*
5649 			 * Returning error will result in faulting task being
5650 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
5651 			 * tasks from racing to fault in the same page which
5652 			 * could result in false unable to allocate errors.
5653 			 * Page migration does not take the fault mutex, but
5654 			 * does a clear then write of pte's under page table
5655 			 * lock.  Page fault code could race with migration,
5656 			 * notice the clear pte and try to allocate a page
5657 			 * here.  Before returning error, get ptl and make
5658 			 * sure there really is no pte entry.
5659 			 */
5660 			if (hugetlb_pte_stable(h, mm, ptep, old_pte))
5661 				ret = vmf_error(PTR_ERR(page));
5662 			else
5663 				ret = 0;
5664 			goto out;
5665 		}
5666 		clear_huge_page(page, address, pages_per_huge_page(h));
5667 		__SetPageUptodate(page);
5668 		new_page = true;
5669 
5670 		if (vma->vm_flags & VM_MAYSHARE) {
5671 			int err = hugetlb_add_to_page_cache(page, mapping, idx);
5672 			if (err) {
5673 				/*
5674 				 * err can't be -EEXIST which implies someone
5675 				 * else consumed the reservation since hugetlb
5676 				 * fault mutex is held when add a hugetlb page
5677 				 * to the page cache. So it's safe to call
5678 				 * restore_reserve_on_error() here.
5679 				 */
5680 				restore_reserve_on_error(h, vma, haddr, page);
5681 				put_page(page);
5682 				goto out;
5683 			}
5684 			new_pagecache_page = true;
5685 		} else {
5686 			lock_page(page);
5687 			if (unlikely(anon_vma_prepare(vma))) {
5688 				ret = VM_FAULT_OOM;
5689 				goto backout_unlocked;
5690 			}
5691 			anon_rmap = 1;
5692 		}
5693 	} else {
5694 		/*
5695 		 * If memory error occurs between mmap() and fault, some process
5696 		 * don't have hwpoisoned swap entry for errored virtual address.
5697 		 * So we need to block hugepage fault by PG_hwpoison bit check.
5698 		 */
5699 		if (unlikely(PageHWPoison(page))) {
5700 			ret = VM_FAULT_HWPOISON_LARGE |
5701 				VM_FAULT_SET_HINDEX(hstate_index(h));
5702 			goto backout_unlocked;
5703 		}
5704 
5705 		/* Check for page in userfault range. */
5706 		if (userfaultfd_minor(vma)) {
5707 			unlock_page(page);
5708 			put_page(page);
5709 			/* See comment in userfaultfd_missing() block above */
5710 			if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
5711 				ret = 0;
5712 				goto out;
5713 			}
5714 			return hugetlb_handle_userfault(vma, mapping, idx, flags,
5715 							haddr, address,
5716 							VM_UFFD_MINOR);
5717 		}
5718 	}
5719 
5720 	/*
5721 	 * If we are going to COW a private mapping later, we examine the
5722 	 * pending reservations for this page now. This will ensure that
5723 	 * any allocations necessary to record that reservation occur outside
5724 	 * the spinlock.
5725 	 */
5726 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5727 		if (vma_needs_reservation(h, vma, haddr) < 0) {
5728 			ret = VM_FAULT_OOM;
5729 			goto backout_unlocked;
5730 		}
5731 		/* Just decrements count, does not deallocate */
5732 		vma_end_reservation(h, vma, haddr);
5733 	}
5734 
5735 	ptl = huge_pte_lock(h, mm, ptep);
5736 	ret = 0;
5737 	/* If pte changed from under us, retry */
5738 	if (!pte_same(huge_ptep_get(ptep), old_pte))
5739 		goto backout;
5740 
5741 	if (anon_rmap)
5742 		hugepage_add_new_anon_rmap(page, vma, haddr);
5743 	else
5744 		page_dup_file_rmap(page, true);
5745 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
5746 				&& (vma->vm_flags & VM_SHARED)));
5747 	/*
5748 	 * If this pte was previously wr-protected, keep it wr-protected even
5749 	 * if populated.
5750 	 */
5751 	if (unlikely(pte_marker_uffd_wp(old_pte)))
5752 		new_pte = huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte));
5753 	set_huge_pte_at(mm, haddr, ptep, new_pte);
5754 
5755 	hugetlb_count_add(pages_per_huge_page(h), mm);
5756 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5757 		/* Optimization, do the COW without a second fault */
5758 		ret = hugetlb_wp(mm, vma, address, ptep, flags, page, ptl);
5759 	}
5760 
5761 	spin_unlock(ptl);
5762 
5763 	/*
5764 	 * Only set HPageMigratable in newly allocated pages.  Existing pages
5765 	 * found in the pagecache may not have HPageMigratableset if they have
5766 	 * been isolated for migration.
5767 	 */
5768 	if (new_page)
5769 		SetHPageMigratable(page);
5770 
5771 	unlock_page(page);
5772 out:
5773 	hugetlb_vma_unlock_read(vma);
5774 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5775 	return ret;
5776 
5777 backout:
5778 	spin_unlock(ptl);
5779 backout_unlocked:
5780 	if (new_page && !new_pagecache_page)
5781 		restore_reserve_on_error(h, vma, haddr, page);
5782 
5783 	unlock_page(page);
5784 	put_page(page);
5785 	goto out;
5786 }
5787 
5788 #ifdef CONFIG_SMP
5789 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5790 {
5791 	unsigned long key[2];
5792 	u32 hash;
5793 
5794 	key[0] = (unsigned long) mapping;
5795 	key[1] = idx;
5796 
5797 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
5798 
5799 	return hash & (num_fault_mutexes - 1);
5800 }
5801 #else
5802 /*
5803  * For uniprocessor systems we always use a single mutex, so just
5804  * return 0 and avoid the hashing overhead.
5805  */
5806 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5807 {
5808 	return 0;
5809 }
5810 #endif
5811 
5812 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5813 			unsigned long address, unsigned int flags)
5814 {
5815 	pte_t *ptep, entry;
5816 	spinlock_t *ptl;
5817 	vm_fault_t ret;
5818 	u32 hash;
5819 	pgoff_t idx;
5820 	struct page *page = NULL;
5821 	struct page *pagecache_page = NULL;
5822 	struct hstate *h = hstate_vma(vma);
5823 	struct address_space *mapping;
5824 	int need_wait_lock = 0;
5825 	unsigned long haddr = address & huge_page_mask(h);
5826 
5827 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5828 	if (ptep) {
5829 		/*
5830 		 * Since we hold no locks, ptep could be stale.  That is
5831 		 * OK as we are only making decisions based on content and
5832 		 * not actually modifying content here.
5833 		 */
5834 		entry = huge_ptep_get(ptep);
5835 		if (unlikely(is_hugetlb_entry_migration(entry))) {
5836 			migration_entry_wait_huge(vma, ptep);
5837 			return 0;
5838 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
5839 			return VM_FAULT_HWPOISON_LARGE |
5840 				VM_FAULT_SET_HINDEX(hstate_index(h));
5841 	}
5842 
5843 	/*
5844 	 * Serialize hugepage allocation and instantiation, so that we don't
5845 	 * get spurious allocation failures if two CPUs race to instantiate
5846 	 * the same page in the page cache.
5847 	 */
5848 	mapping = vma->vm_file->f_mapping;
5849 	idx = vma_hugecache_offset(h, vma, haddr);
5850 	hash = hugetlb_fault_mutex_hash(mapping, idx);
5851 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
5852 
5853 	/*
5854 	 * Acquire vma lock before calling huge_pte_alloc and hold
5855 	 * until finished with ptep.  This prevents huge_pmd_unshare from
5856 	 * being called elsewhere and making the ptep no longer valid.
5857 	 *
5858 	 * ptep could have already be assigned via huge_pte_offset.  That
5859 	 * is OK, as huge_pte_alloc will return the same value unless
5860 	 * something has changed.
5861 	 */
5862 	hugetlb_vma_lock_read(vma);
5863 	ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
5864 	if (!ptep) {
5865 		hugetlb_vma_unlock_read(vma);
5866 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5867 		return VM_FAULT_OOM;
5868 	}
5869 
5870 	entry = huge_ptep_get(ptep);
5871 	/* PTE markers should be handled the same way as none pte */
5872 	if (huge_pte_none_mostly(entry))
5873 		/*
5874 		 * hugetlb_no_page will drop vma lock and hugetlb fault
5875 		 * mutex internally, which make us return immediately.
5876 		 */
5877 		return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
5878 				      entry, flags);
5879 
5880 	ret = 0;
5881 
5882 	/*
5883 	 * entry could be a migration/hwpoison entry at this point, so this
5884 	 * check prevents the kernel from going below assuming that we have
5885 	 * an active hugepage in pagecache. This goto expects the 2nd page
5886 	 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
5887 	 * properly handle it.
5888 	 */
5889 	if (!pte_present(entry))
5890 		goto out_mutex;
5891 
5892 	/*
5893 	 * If we are going to COW/unshare the mapping later, we examine the
5894 	 * pending reservations for this page now. This will ensure that any
5895 	 * allocations necessary to record that reservation occur outside the
5896 	 * spinlock. Also lookup the pagecache page now as it is used to
5897 	 * determine if a reservation has been consumed.
5898 	 */
5899 	if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
5900 	    !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
5901 		if (vma_needs_reservation(h, vma, haddr) < 0) {
5902 			ret = VM_FAULT_OOM;
5903 			goto out_mutex;
5904 		}
5905 		/* Just decrements count, does not deallocate */
5906 		vma_end_reservation(h, vma, haddr);
5907 
5908 		pagecache_page = find_lock_page(mapping, idx);
5909 	}
5910 
5911 	ptl = huge_pte_lock(h, mm, ptep);
5912 
5913 	/* Check for a racing update before calling hugetlb_wp() */
5914 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
5915 		goto out_ptl;
5916 
5917 	/* Handle userfault-wp first, before trying to lock more pages */
5918 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
5919 	    (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
5920 		struct vm_fault vmf = {
5921 			.vma = vma,
5922 			.address = haddr,
5923 			.real_address = address,
5924 			.flags = flags,
5925 		};
5926 
5927 		spin_unlock(ptl);
5928 		if (pagecache_page) {
5929 			unlock_page(pagecache_page);
5930 			put_page(pagecache_page);
5931 		}
5932 		hugetlb_vma_unlock_read(vma);
5933 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5934 		return handle_userfault(&vmf, VM_UFFD_WP);
5935 	}
5936 
5937 	/*
5938 	 * hugetlb_wp() requires page locks of pte_page(entry) and
5939 	 * pagecache_page, so here we need take the former one
5940 	 * when page != pagecache_page or !pagecache_page.
5941 	 */
5942 	page = pte_page(entry);
5943 	if (page != pagecache_page)
5944 		if (!trylock_page(page)) {
5945 			need_wait_lock = 1;
5946 			goto out_ptl;
5947 		}
5948 
5949 	get_page(page);
5950 
5951 	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5952 		if (!huge_pte_write(entry)) {
5953 			ret = hugetlb_wp(mm, vma, address, ptep, flags,
5954 					 pagecache_page, ptl);
5955 			goto out_put_page;
5956 		} else if (likely(flags & FAULT_FLAG_WRITE)) {
5957 			entry = huge_pte_mkdirty(entry);
5958 		}
5959 	}
5960 	entry = pte_mkyoung(entry);
5961 	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
5962 						flags & FAULT_FLAG_WRITE))
5963 		update_mmu_cache(vma, haddr, ptep);
5964 out_put_page:
5965 	if (page != pagecache_page)
5966 		unlock_page(page);
5967 	put_page(page);
5968 out_ptl:
5969 	spin_unlock(ptl);
5970 
5971 	if (pagecache_page) {
5972 		unlock_page(pagecache_page);
5973 		put_page(pagecache_page);
5974 	}
5975 out_mutex:
5976 	hugetlb_vma_unlock_read(vma);
5977 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5978 	/*
5979 	 * Generally it's safe to hold refcount during waiting page lock. But
5980 	 * here we just wait to defer the next page fault to avoid busy loop and
5981 	 * the page is not used after unlocked before returning from the current
5982 	 * page fault. So we are safe from accessing freed page, even if we wait
5983 	 * here without taking refcount.
5984 	 */
5985 	if (need_wait_lock)
5986 		wait_on_page_locked(page);
5987 	return ret;
5988 }
5989 
5990 #ifdef CONFIG_USERFAULTFD
5991 /*
5992  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
5993  * modifications for huge pages.
5994  */
5995 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
5996 			    pte_t *dst_pte,
5997 			    struct vm_area_struct *dst_vma,
5998 			    unsigned long dst_addr,
5999 			    unsigned long src_addr,
6000 			    enum mcopy_atomic_mode mode,
6001 			    struct page **pagep,
6002 			    bool wp_copy)
6003 {
6004 	bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
6005 	struct hstate *h = hstate_vma(dst_vma);
6006 	struct address_space *mapping = dst_vma->vm_file->f_mapping;
6007 	pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6008 	unsigned long size;
6009 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
6010 	pte_t _dst_pte;
6011 	spinlock_t *ptl;
6012 	int ret = -ENOMEM;
6013 	struct page *page;
6014 	int writable;
6015 	bool page_in_pagecache = false;
6016 
6017 	if (is_continue) {
6018 		ret = -EFAULT;
6019 		page = find_lock_page(mapping, idx);
6020 		if (!page)
6021 			goto out;
6022 		page_in_pagecache = true;
6023 	} else if (!*pagep) {
6024 		/* If a page already exists, then it's UFFDIO_COPY for
6025 		 * a non-missing case. Return -EEXIST.
6026 		 */
6027 		if (vm_shared &&
6028 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6029 			ret = -EEXIST;
6030 			goto out;
6031 		}
6032 
6033 		page = alloc_huge_page(dst_vma, dst_addr, 0);
6034 		if (IS_ERR(page)) {
6035 			ret = -ENOMEM;
6036 			goto out;
6037 		}
6038 
6039 		ret = copy_huge_page_from_user(page,
6040 						(const void __user *) src_addr,
6041 						pages_per_huge_page(h), false);
6042 
6043 		/* fallback to copy_from_user outside mmap_lock */
6044 		if (unlikely(ret)) {
6045 			ret = -ENOENT;
6046 			/* Free the allocated page which may have
6047 			 * consumed a reservation.
6048 			 */
6049 			restore_reserve_on_error(h, dst_vma, dst_addr, page);
6050 			put_page(page);
6051 
6052 			/* Allocate a temporary page to hold the copied
6053 			 * contents.
6054 			 */
6055 			page = alloc_huge_page_vma(h, dst_vma, dst_addr);
6056 			if (!page) {
6057 				ret = -ENOMEM;
6058 				goto out;
6059 			}
6060 			*pagep = page;
6061 			/* Set the outparam pagep and return to the caller to
6062 			 * copy the contents outside the lock. Don't free the
6063 			 * page.
6064 			 */
6065 			goto out;
6066 		}
6067 	} else {
6068 		if (vm_shared &&
6069 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6070 			put_page(*pagep);
6071 			ret = -EEXIST;
6072 			*pagep = NULL;
6073 			goto out;
6074 		}
6075 
6076 		page = alloc_huge_page(dst_vma, dst_addr, 0);
6077 		if (IS_ERR(page)) {
6078 			put_page(*pagep);
6079 			ret = -ENOMEM;
6080 			*pagep = NULL;
6081 			goto out;
6082 		}
6083 		copy_user_huge_page(page, *pagep, dst_addr, dst_vma,
6084 				    pages_per_huge_page(h));
6085 		put_page(*pagep);
6086 		*pagep = NULL;
6087 	}
6088 
6089 	/*
6090 	 * The memory barrier inside __SetPageUptodate makes sure that
6091 	 * preceding stores to the page contents become visible before
6092 	 * the set_pte_at() write.
6093 	 */
6094 	__SetPageUptodate(page);
6095 
6096 	/* Add shared, newly allocated pages to the page cache. */
6097 	if (vm_shared && !is_continue) {
6098 		size = i_size_read(mapping->host) >> huge_page_shift(h);
6099 		ret = -EFAULT;
6100 		if (idx >= size)
6101 			goto out_release_nounlock;
6102 
6103 		/*
6104 		 * Serialization between remove_inode_hugepages() and
6105 		 * hugetlb_add_to_page_cache() below happens through the
6106 		 * hugetlb_fault_mutex_table that here must be hold by
6107 		 * the caller.
6108 		 */
6109 		ret = hugetlb_add_to_page_cache(page, mapping, idx);
6110 		if (ret)
6111 			goto out_release_nounlock;
6112 		page_in_pagecache = true;
6113 	}
6114 
6115 	ptl = huge_pte_lock(h, dst_mm, dst_pte);
6116 
6117 	ret = -EIO;
6118 	if (PageHWPoison(page))
6119 		goto out_release_unlock;
6120 
6121 	/*
6122 	 * We allow to overwrite a pte marker: consider when both MISSING|WP
6123 	 * registered, we firstly wr-protect a none pte which has no page cache
6124 	 * page backing it, then access the page.
6125 	 */
6126 	ret = -EEXIST;
6127 	if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
6128 		goto out_release_unlock;
6129 
6130 	if (page_in_pagecache)
6131 		page_dup_file_rmap(page, true);
6132 	else
6133 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
6134 
6135 	/*
6136 	 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6137 	 * with wp flag set, don't set pte write bit.
6138 	 */
6139 	if (wp_copy || (is_continue && !vm_shared))
6140 		writable = 0;
6141 	else
6142 		writable = dst_vma->vm_flags & VM_WRITE;
6143 
6144 	_dst_pte = make_huge_pte(dst_vma, page, writable);
6145 	/*
6146 	 * Always mark UFFDIO_COPY page dirty; note that this may not be
6147 	 * extremely important for hugetlbfs for now since swapping is not
6148 	 * supported, but we should still be clear in that this page cannot be
6149 	 * thrown away at will, even if write bit not set.
6150 	 */
6151 	_dst_pte = huge_pte_mkdirty(_dst_pte);
6152 	_dst_pte = pte_mkyoung(_dst_pte);
6153 
6154 	if (wp_copy)
6155 		_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6156 
6157 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
6158 
6159 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6160 
6161 	/* No need to invalidate - it was non-present before */
6162 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
6163 
6164 	spin_unlock(ptl);
6165 	if (!is_continue)
6166 		SetHPageMigratable(page);
6167 	if (vm_shared || is_continue)
6168 		unlock_page(page);
6169 	ret = 0;
6170 out:
6171 	return ret;
6172 out_release_unlock:
6173 	spin_unlock(ptl);
6174 	if (vm_shared || is_continue)
6175 		unlock_page(page);
6176 out_release_nounlock:
6177 	if (!page_in_pagecache)
6178 		restore_reserve_on_error(h, dst_vma, dst_addr, page);
6179 	put_page(page);
6180 	goto out;
6181 }
6182 #endif /* CONFIG_USERFAULTFD */
6183 
6184 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
6185 				 int refs, struct page **pages,
6186 				 struct vm_area_struct **vmas)
6187 {
6188 	int nr;
6189 
6190 	for (nr = 0; nr < refs; nr++) {
6191 		if (likely(pages))
6192 			pages[nr] = nth_page(page, nr);
6193 		if (vmas)
6194 			vmas[nr] = vma;
6195 	}
6196 }
6197 
6198 static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma,
6199 					       unsigned int flags, pte_t *pte,
6200 					       bool *unshare)
6201 {
6202 	pte_t pteval = huge_ptep_get(pte);
6203 
6204 	*unshare = false;
6205 	if (is_swap_pte(pteval))
6206 		return true;
6207 	if (huge_pte_write(pteval))
6208 		return false;
6209 	if (flags & FOLL_WRITE)
6210 		return true;
6211 	if (gup_must_unshare(vma, flags, pte_page(pteval))) {
6212 		*unshare = true;
6213 		return true;
6214 	}
6215 	return false;
6216 }
6217 
6218 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
6219 				unsigned long address, unsigned int flags)
6220 {
6221 	struct hstate *h = hstate_vma(vma);
6222 	struct mm_struct *mm = vma->vm_mm;
6223 	unsigned long haddr = address & huge_page_mask(h);
6224 	struct page *page = NULL;
6225 	spinlock_t *ptl;
6226 	pte_t *pte, entry;
6227 
6228 	/*
6229 	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
6230 	 * follow_hugetlb_page().
6231 	 */
6232 	if (WARN_ON_ONCE(flags & FOLL_PIN))
6233 		return NULL;
6234 
6235 retry:
6236 	pte = huge_pte_offset(mm, haddr, huge_page_size(h));
6237 	if (!pte)
6238 		return NULL;
6239 
6240 	ptl = huge_pte_lock(h, mm, pte);
6241 	entry = huge_ptep_get(pte);
6242 	if (pte_present(entry)) {
6243 		page = pte_page(entry) +
6244 				((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
6245 		/*
6246 		 * Note that page may be a sub-page, and with vmemmap
6247 		 * optimizations the page struct may be read only.
6248 		 * try_grab_page() will increase the ref count on the
6249 		 * head page, so this will be OK.
6250 		 *
6251 		 * try_grab_page() should always succeed here, because we hold
6252 		 * the ptl lock and have verified pte_present().
6253 		 */
6254 		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
6255 			page = NULL;
6256 			goto out;
6257 		}
6258 	} else {
6259 		if (is_hugetlb_entry_migration(entry)) {
6260 			spin_unlock(ptl);
6261 			__migration_entry_wait_huge(pte, ptl);
6262 			goto retry;
6263 		}
6264 		/*
6265 		 * hwpoisoned entry is treated as no_page_table in
6266 		 * follow_page_mask().
6267 		 */
6268 	}
6269 out:
6270 	spin_unlock(ptl);
6271 	return page;
6272 }
6273 
6274 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
6275 			 struct page **pages, struct vm_area_struct **vmas,
6276 			 unsigned long *position, unsigned long *nr_pages,
6277 			 long i, unsigned int flags, int *locked)
6278 {
6279 	unsigned long pfn_offset;
6280 	unsigned long vaddr = *position;
6281 	unsigned long remainder = *nr_pages;
6282 	struct hstate *h = hstate_vma(vma);
6283 	int err = -EFAULT, refs;
6284 
6285 	while (vaddr < vma->vm_end && remainder) {
6286 		pte_t *pte;
6287 		spinlock_t *ptl = NULL;
6288 		bool unshare = false;
6289 		int absent;
6290 		struct page *page;
6291 
6292 		/*
6293 		 * If we have a pending SIGKILL, don't keep faulting pages and
6294 		 * potentially allocating memory.
6295 		 */
6296 		if (fatal_signal_pending(current)) {
6297 			remainder = 0;
6298 			break;
6299 		}
6300 
6301 		/*
6302 		 * Some archs (sparc64, sh*) have multiple pte_ts to
6303 		 * each hugepage.  We have to make sure we get the
6304 		 * first, for the page indexing below to work.
6305 		 *
6306 		 * Note that page table lock is not held when pte is null.
6307 		 */
6308 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
6309 				      huge_page_size(h));
6310 		if (pte)
6311 			ptl = huge_pte_lock(h, mm, pte);
6312 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
6313 
6314 		/*
6315 		 * When coredumping, it suits get_dump_page if we just return
6316 		 * an error where there's an empty slot with no huge pagecache
6317 		 * to back it.  This way, we avoid allocating a hugepage, and
6318 		 * the sparse dumpfile avoids allocating disk blocks, but its
6319 		 * huge holes still show up with zeroes where they need to be.
6320 		 */
6321 		if (absent && (flags & FOLL_DUMP) &&
6322 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
6323 			if (pte)
6324 				spin_unlock(ptl);
6325 			remainder = 0;
6326 			break;
6327 		}
6328 
6329 		/*
6330 		 * We need call hugetlb_fault for both hugepages under migration
6331 		 * (in which case hugetlb_fault waits for the migration,) and
6332 		 * hwpoisoned hugepages (in which case we need to prevent the
6333 		 * caller from accessing to them.) In order to do this, we use
6334 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
6335 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
6336 		 * both cases, and because we can't follow correct pages
6337 		 * directly from any kind of swap entries.
6338 		 */
6339 		if (absent ||
6340 		    __follow_hugetlb_must_fault(vma, flags, pte, &unshare)) {
6341 			vm_fault_t ret;
6342 			unsigned int fault_flags = 0;
6343 
6344 			if (pte)
6345 				spin_unlock(ptl);
6346 			if (flags & FOLL_WRITE)
6347 				fault_flags |= FAULT_FLAG_WRITE;
6348 			else if (unshare)
6349 				fault_flags |= FAULT_FLAG_UNSHARE;
6350 			if (locked)
6351 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
6352 					FAULT_FLAG_KILLABLE;
6353 			if (flags & FOLL_NOWAIT)
6354 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
6355 					FAULT_FLAG_RETRY_NOWAIT;
6356 			if (flags & FOLL_TRIED) {
6357 				/*
6358 				 * Note: FAULT_FLAG_ALLOW_RETRY and
6359 				 * FAULT_FLAG_TRIED can co-exist
6360 				 */
6361 				fault_flags |= FAULT_FLAG_TRIED;
6362 			}
6363 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
6364 			if (ret & VM_FAULT_ERROR) {
6365 				err = vm_fault_to_errno(ret, flags);
6366 				remainder = 0;
6367 				break;
6368 			}
6369 			if (ret & VM_FAULT_RETRY) {
6370 				if (locked &&
6371 				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
6372 					*locked = 0;
6373 				*nr_pages = 0;
6374 				/*
6375 				 * VM_FAULT_RETRY must not return an
6376 				 * error, it will return zero
6377 				 * instead.
6378 				 *
6379 				 * No need to update "position" as the
6380 				 * caller will not check it after
6381 				 * *nr_pages is set to 0.
6382 				 */
6383 				return i;
6384 			}
6385 			continue;
6386 		}
6387 
6388 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
6389 		page = pte_page(huge_ptep_get(pte));
6390 
6391 		VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
6392 			       !PageAnonExclusive(page), page);
6393 
6394 		/*
6395 		 * If subpage information not requested, update counters
6396 		 * and skip the same_page loop below.
6397 		 */
6398 		if (!pages && !vmas && !pfn_offset &&
6399 		    (vaddr + huge_page_size(h) < vma->vm_end) &&
6400 		    (remainder >= pages_per_huge_page(h))) {
6401 			vaddr += huge_page_size(h);
6402 			remainder -= pages_per_huge_page(h);
6403 			i += pages_per_huge_page(h);
6404 			spin_unlock(ptl);
6405 			continue;
6406 		}
6407 
6408 		/* vaddr may not be aligned to PAGE_SIZE */
6409 		refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
6410 		    (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
6411 
6412 		if (pages || vmas)
6413 			record_subpages_vmas(nth_page(page, pfn_offset),
6414 					     vma, refs,
6415 					     likely(pages) ? pages + i : NULL,
6416 					     vmas ? vmas + i : NULL);
6417 
6418 		if (pages) {
6419 			/*
6420 			 * try_grab_folio() should always succeed here,
6421 			 * because: a) we hold the ptl lock, and b) we've just
6422 			 * checked that the huge page is present in the page
6423 			 * tables. If the huge page is present, then the tail
6424 			 * pages must also be present. The ptl prevents the
6425 			 * head page and tail pages from being rearranged in
6426 			 * any way. So this page must be available at this
6427 			 * point, unless the page refcount overflowed:
6428 			 */
6429 			if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
6430 							 flags))) {
6431 				spin_unlock(ptl);
6432 				remainder = 0;
6433 				err = -ENOMEM;
6434 				break;
6435 			}
6436 		}
6437 
6438 		vaddr += (refs << PAGE_SHIFT);
6439 		remainder -= refs;
6440 		i += refs;
6441 
6442 		spin_unlock(ptl);
6443 	}
6444 	*nr_pages = remainder;
6445 	/*
6446 	 * setting position is actually required only if remainder is
6447 	 * not zero but it's faster not to add a "if (remainder)"
6448 	 * branch.
6449 	 */
6450 	*position = vaddr;
6451 
6452 	return i ? i : err;
6453 }
6454 
6455 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
6456 		unsigned long address, unsigned long end,
6457 		pgprot_t newprot, unsigned long cp_flags)
6458 {
6459 	struct mm_struct *mm = vma->vm_mm;
6460 	unsigned long start = address;
6461 	pte_t *ptep;
6462 	pte_t pte;
6463 	struct hstate *h = hstate_vma(vma);
6464 	unsigned long pages = 0, psize = huge_page_size(h);
6465 	bool shared_pmd = false;
6466 	struct mmu_notifier_range range;
6467 	unsigned long last_addr_mask;
6468 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6469 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6470 
6471 	/*
6472 	 * In the case of shared PMDs, the area to flush could be beyond
6473 	 * start/end.  Set range.start/range.end to cover the maximum possible
6474 	 * range if PMD sharing is possible.
6475 	 */
6476 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6477 				0, vma, mm, start, end);
6478 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6479 
6480 	BUG_ON(address >= end);
6481 	flush_cache_range(vma, range.start, range.end);
6482 
6483 	mmu_notifier_invalidate_range_start(&range);
6484 	hugetlb_vma_lock_write(vma);
6485 	i_mmap_lock_write(vma->vm_file->f_mapping);
6486 	last_addr_mask = hugetlb_mask_last_page(h);
6487 	for (; address < end; address += psize) {
6488 		spinlock_t *ptl;
6489 		ptep = huge_pte_offset(mm, address, psize);
6490 		if (!ptep) {
6491 			address |= last_addr_mask;
6492 			continue;
6493 		}
6494 		ptl = huge_pte_lock(h, mm, ptep);
6495 		if (huge_pmd_unshare(mm, vma, address, ptep)) {
6496 			/*
6497 			 * When uffd-wp is enabled on the vma, unshare
6498 			 * shouldn't happen at all.  Warn about it if it
6499 			 * happened due to some reason.
6500 			 */
6501 			WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6502 			pages++;
6503 			spin_unlock(ptl);
6504 			shared_pmd = true;
6505 			address |= last_addr_mask;
6506 			continue;
6507 		}
6508 		pte = huge_ptep_get(ptep);
6509 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
6510 			spin_unlock(ptl);
6511 			continue;
6512 		}
6513 		if (unlikely(is_hugetlb_entry_migration(pte))) {
6514 			swp_entry_t entry = pte_to_swp_entry(pte);
6515 			struct page *page = pfn_swap_entry_to_page(entry);
6516 
6517 			if (!is_readable_migration_entry(entry)) {
6518 				pte_t newpte;
6519 
6520 				if (PageAnon(page))
6521 					entry = make_readable_exclusive_migration_entry(
6522 								swp_offset(entry));
6523 				else
6524 					entry = make_readable_migration_entry(
6525 								swp_offset(entry));
6526 				newpte = swp_entry_to_pte(entry);
6527 				if (uffd_wp)
6528 					newpte = pte_swp_mkuffd_wp(newpte);
6529 				else if (uffd_wp_resolve)
6530 					newpte = pte_swp_clear_uffd_wp(newpte);
6531 				set_huge_pte_at(mm, address, ptep, newpte);
6532 				pages++;
6533 			}
6534 			spin_unlock(ptl);
6535 			continue;
6536 		}
6537 		if (unlikely(pte_marker_uffd_wp(pte))) {
6538 			/*
6539 			 * This is changing a non-present pte into a none pte,
6540 			 * no need for huge_ptep_modify_prot_start/commit().
6541 			 */
6542 			if (uffd_wp_resolve)
6543 				huge_pte_clear(mm, address, ptep, psize);
6544 		}
6545 		if (!huge_pte_none(pte)) {
6546 			pte_t old_pte;
6547 			unsigned int shift = huge_page_shift(hstate_vma(vma));
6548 
6549 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6550 			pte = huge_pte_modify(old_pte, newprot);
6551 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6552 			if (uffd_wp)
6553 				pte = huge_pte_mkuffd_wp(huge_pte_wrprotect(pte));
6554 			else if (uffd_wp_resolve)
6555 				pte = huge_pte_clear_uffd_wp(pte);
6556 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6557 			pages++;
6558 		} else {
6559 			/* None pte */
6560 			if (unlikely(uffd_wp))
6561 				/* Safe to modify directly (none->non-present). */
6562 				set_huge_pte_at(mm, address, ptep,
6563 						make_pte_marker(PTE_MARKER_UFFD_WP));
6564 		}
6565 		spin_unlock(ptl);
6566 	}
6567 	/*
6568 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6569 	 * may have cleared our pud entry and done put_page on the page table:
6570 	 * once we release i_mmap_rwsem, another task can do the final put_page
6571 	 * and that page table be reused and filled with junk.  If we actually
6572 	 * did unshare a page of pmds, flush the range corresponding to the pud.
6573 	 */
6574 	if (shared_pmd)
6575 		flush_hugetlb_tlb_range(vma, range.start, range.end);
6576 	else
6577 		flush_hugetlb_tlb_range(vma, start, end);
6578 	/*
6579 	 * No need to call mmu_notifier_invalidate_range() we are downgrading
6580 	 * page table protection not changing it to point to a new page.
6581 	 *
6582 	 * See Documentation/mm/mmu_notifier.rst
6583 	 */
6584 	i_mmap_unlock_write(vma->vm_file->f_mapping);
6585 	hugetlb_vma_unlock_write(vma);
6586 	mmu_notifier_invalidate_range_end(&range);
6587 
6588 	return pages << h->order;
6589 }
6590 
6591 /* Return true if reservation was successful, false otherwise.  */
6592 bool hugetlb_reserve_pages(struct inode *inode,
6593 					long from, long to,
6594 					struct vm_area_struct *vma,
6595 					vm_flags_t vm_flags)
6596 {
6597 	long chg, add = -1;
6598 	struct hstate *h = hstate_inode(inode);
6599 	struct hugepage_subpool *spool = subpool_inode(inode);
6600 	struct resv_map *resv_map;
6601 	struct hugetlb_cgroup *h_cg = NULL;
6602 	long gbl_reserve, regions_needed = 0;
6603 
6604 	/* This should never happen */
6605 	if (from > to) {
6606 		VM_WARN(1, "%s called with a negative range\n", __func__);
6607 		return false;
6608 	}
6609 
6610 	/*
6611 	 * vma specific semaphore used for pmd sharing synchronization
6612 	 */
6613 	hugetlb_vma_lock_alloc(vma);
6614 
6615 	/*
6616 	 * Only apply hugepage reservation if asked. At fault time, an
6617 	 * attempt will be made for VM_NORESERVE to allocate a page
6618 	 * without using reserves
6619 	 */
6620 	if (vm_flags & VM_NORESERVE)
6621 		return true;
6622 
6623 	/*
6624 	 * Shared mappings base their reservation on the number of pages that
6625 	 * are already allocated on behalf of the file. Private mappings need
6626 	 * to reserve the full area even if read-only as mprotect() may be
6627 	 * called to make the mapping read-write. Assume !vma is a shm mapping
6628 	 */
6629 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6630 		/*
6631 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
6632 		 * called for inodes for which resv_maps were created (see
6633 		 * hugetlbfs_get_inode).
6634 		 */
6635 		resv_map = inode_resv_map(inode);
6636 
6637 		chg = region_chg(resv_map, from, to, &regions_needed);
6638 	} else {
6639 		/* Private mapping. */
6640 		resv_map = resv_map_alloc();
6641 		if (!resv_map)
6642 			goto out_err;
6643 
6644 		chg = to - from;
6645 
6646 		set_vma_resv_map(vma, resv_map);
6647 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
6648 	}
6649 
6650 	if (chg < 0)
6651 		goto out_err;
6652 
6653 	if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6654 				chg * pages_per_huge_page(h), &h_cg) < 0)
6655 		goto out_err;
6656 
6657 	if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
6658 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
6659 		 * of the resv_map.
6660 		 */
6661 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6662 	}
6663 
6664 	/*
6665 	 * There must be enough pages in the subpool for the mapping. If
6666 	 * the subpool has a minimum size, there may be some global
6667 	 * reservations already in place (gbl_reserve).
6668 	 */
6669 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
6670 	if (gbl_reserve < 0)
6671 		goto out_uncharge_cgroup;
6672 
6673 	/*
6674 	 * Check enough hugepages are available for the reservation.
6675 	 * Hand the pages back to the subpool if there are not
6676 	 */
6677 	if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6678 		goto out_put_pages;
6679 
6680 	/*
6681 	 * Account for the reservations made. Shared mappings record regions
6682 	 * that have reservations as they are shared by multiple VMAs.
6683 	 * When the last VMA disappears, the region map says how much
6684 	 * the reservation was and the page cache tells how much of
6685 	 * the reservation was consumed. Private mappings are per-VMA and
6686 	 * only the consumed reservations are tracked. When the VMA
6687 	 * disappears, the original reservation is the VMA size and the
6688 	 * consumed reservations are stored in the map. Hence, nothing
6689 	 * else has to be done for private mappings here
6690 	 */
6691 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6692 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6693 
6694 		if (unlikely(add < 0)) {
6695 			hugetlb_acct_memory(h, -gbl_reserve);
6696 			goto out_put_pages;
6697 		} else if (unlikely(chg > add)) {
6698 			/*
6699 			 * pages in this range were added to the reserve
6700 			 * map between region_chg and region_add.  This
6701 			 * indicates a race with alloc_huge_page.  Adjust
6702 			 * the subpool and reserve counts modified above
6703 			 * based on the difference.
6704 			 */
6705 			long rsv_adjust;
6706 
6707 			/*
6708 			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6709 			 * reference to h_cg->css. See comment below for detail.
6710 			 */
6711 			hugetlb_cgroup_uncharge_cgroup_rsvd(
6712 				hstate_index(h),
6713 				(chg - add) * pages_per_huge_page(h), h_cg);
6714 
6715 			rsv_adjust = hugepage_subpool_put_pages(spool,
6716 								chg - add);
6717 			hugetlb_acct_memory(h, -rsv_adjust);
6718 		} else if (h_cg) {
6719 			/*
6720 			 * The file_regions will hold their own reference to
6721 			 * h_cg->css. So we should release the reference held
6722 			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6723 			 * done.
6724 			 */
6725 			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
6726 		}
6727 	}
6728 	return true;
6729 
6730 out_put_pages:
6731 	/* put back original number of pages, chg */
6732 	(void)hugepage_subpool_put_pages(spool, chg);
6733 out_uncharge_cgroup:
6734 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6735 					    chg * pages_per_huge_page(h), h_cg);
6736 out_err:
6737 	hugetlb_vma_lock_free(vma);
6738 	if (!vma || vma->vm_flags & VM_MAYSHARE)
6739 		/* Only call region_abort if the region_chg succeeded but the
6740 		 * region_add failed or didn't run.
6741 		 */
6742 		if (chg >= 0 && add < 0)
6743 			region_abort(resv_map, from, to, regions_needed);
6744 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
6745 		kref_put(&resv_map->refs, resv_map_release);
6746 	return false;
6747 }
6748 
6749 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6750 								long freed)
6751 {
6752 	struct hstate *h = hstate_inode(inode);
6753 	struct resv_map *resv_map = inode_resv_map(inode);
6754 	long chg = 0;
6755 	struct hugepage_subpool *spool = subpool_inode(inode);
6756 	long gbl_reserve;
6757 
6758 	/*
6759 	 * Since this routine can be called in the evict inode path for all
6760 	 * hugetlbfs inodes, resv_map could be NULL.
6761 	 */
6762 	if (resv_map) {
6763 		chg = region_del(resv_map, start, end);
6764 		/*
6765 		 * region_del() can fail in the rare case where a region
6766 		 * must be split and another region descriptor can not be
6767 		 * allocated.  If end == LONG_MAX, it will not fail.
6768 		 */
6769 		if (chg < 0)
6770 			return chg;
6771 	}
6772 
6773 	spin_lock(&inode->i_lock);
6774 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6775 	spin_unlock(&inode->i_lock);
6776 
6777 	/*
6778 	 * If the subpool has a minimum size, the number of global
6779 	 * reservations to be released may be adjusted.
6780 	 *
6781 	 * Note that !resv_map implies freed == 0. So (chg - freed)
6782 	 * won't go negative.
6783 	 */
6784 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
6785 	hugetlb_acct_memory(h, -gbl_reserve);
6786 
6787 	return 0;
6788 }
6789 
6790 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
6791 static unsigned long page_table_shareable(struct vm_area_struct *svma,
6792 				struct vm_area_struct *vma,
6793 				unsigned long addr, pgoff_t idx)
6794 {
6795 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
6796 				svma->vm_start;
6797 	unsigned long sbase = saddr & PUD_MASK;
6798 	unsigned long s_end = sbase + PUD_SIZE;
6799 
6800 	/* Allow segments to share if only one is marked locked */
6801 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
6802 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
6803 
6804 	/*
6805 	 * match the virtual addresses, permission and the alignment of the
6806 	 * page table page.
6807 	 *
6808 	 * Also, vma_lock (vm_private_data) is required for sharing.
6809 	 */
6810 	if (pmd_index(addr) != pmd_index(saddr) ||
6811 	    vm_flags != svm_flags ||
6812 	    !range_in_vma(svma, sbase, s_end) ||
6813 	    !svma->vm_private_data)
6814 		return 0;
6815 
6816 	return saddr;
6817 }
6818 
6819 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6820 {
6821 	unsigned long start = addr & PUD_MASK;
6822 	unsigned long end = start + PUD_SIZE;
6823 
6824 #ifdef CONFIG_USERFAULTFD
6825 	if (uffd_disable_huge_pmd_share(vma))
6826 		return false;
6827 #endif
6828 	/*
6829 	 * check on proper vm_flags and page table alignment
6830 	 */
6831 	if (!(vma->vm_flags & VM_MAYSHARE))
6832 		return false;
6833 	if (!vma->vm_private_data)	/* vma lock required for sharing */
6834 		return false;
6835 	if (!range_in_vma(vma, start, end))
6836 		return false;
6837 	return true;
6838 }
6839 
6840 /*
6841  * Determine if start,end range within vma could be mapped by shared pmd.
6842  * If yes, adjust start and end to cover range associated with possible
6843  * shared pmd mappings.
6844  */
6845 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6846 				unsigned long *start, unsigned long *end)
6847 {
6848 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6849 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6850 
6851 	/*
6852 	 * vma needs to span at least one aligned PUD size, and the range
6853 	 * must be at least partially within in.
6854 	 */
6855 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6856 		(*end <= v_start) || (*start >= v_end))
6857 		return;
6858 
6859 	/* Extend the range to be PUD aligned for a worst case scenario */
6860 	if (*start > v_start)
6861 		*start = ALIGN_DOWN(*start, PUD_SIZE);
6862 
6863 	if (*end < v_end)
6864 		*end = ALIGN(*end, PUD_SIZE);
6865 }
6866 
6867 static bool __vma_shareable_flags_pmd(struct vm_area_struct *vma)
6868 {
6869 	return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) &&
6870 		vma->vm_private_data;
6871 }
6872 
6873 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
6874 {
6875 	if (__vma_shareable_flags_pmd(vma)) {
6876 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6877 
6878 		down_read(&vma_lock->rw_sema);
6879 	}
6880 }
6881 
6882 void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
6883 {
6884 	if (__vma_shareable_flags_pmd(vma)) {
6885 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6886 
6887 		up_read(&vma_lock->rw_sema);
6888 	}
6889 }
6890 
6891 void hugetlb_vma_lock_write(struct vm_area_struct *vma)
6892 {
6893 	if (__vma_shareable_flags_pmd(vma)) {
6894 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6895 
6896 		down_write(&vma_lock->rw_sema);
6897 	}
6898 }
6899 
6900 void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
6901 {
6902 	if (__vma_shareable_flags_pmd(vma)) {
6903 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6904 
6905 		up_write(&vma_lock->rw_sema);
6906 	}
6907 }
6908 
6909 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
6910 {
6911 	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6912 
6913 	if (!__vma_shareable_flags_pmd(vma))
6914 		return 1;
6915 
6916 	return down_write_trylock(&vma_lock->rw_sema);
6917 }
6918 
6919 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
6920 {
6921 	if (__vma_shareable_flags_pmd(vma)) {
6922 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6923 
6924 		lockdep_assert_held(&vma_lock->rw_sema);
6925 	}
6926 }
6927 
6928 void hugetlb_vma_lock_release(struct kref *kref)
6929 {
6930 	struct hugetlb_vma_lock *vma_lock = container_of(kref,
6931 			struct hugetlb_vma_lock, refs);
6932 
6933 	kfree(vma_lock);
6934 }
6935 
6936 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
6937 {
6938 	struct vm_area_struct *vma = vma_lock->vma;
6939 
6940 	/*
6941 	 * vma_lock structure may or not be released as a result of put,
6942 	 * it certainly will no longer be attached to vma so clear pointer.
6943 	 * Semaphore synchronizes access to vma_lock->vma field.
6944 	 */
6945 	vma_lock->vma = NULL;
6946 	vma->vm_private_data = NULL;
6947 	up_write(&vma_lock->rw_sema);
6948 	kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
6949 }
6950 
6951 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
6952 {
6953 	if (__vma_shareable_flags_pmd(vma)) {
6954 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6955 
6956 		__hugetlb_vma_unlock_write_put(vma_lock);
6957 	}
6958 }
6959 
6960 static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
6961 {
6962 	/*
6963 	 * Only present in sharable vmas.
6964 	 */
6965 	if (!vma || !__vma_shareable_flags_pmd(vma))
6966 		return;
6967 
6968 	if (vma->vm_private_data) {
6969 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6970 
6971 		down_write(&vma_lock->rw_sema);
6972 		__hugetlb_vma_unlock_write_put(vma_lock);
6973 	}
6974 }
6975 
6976 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
6977 {
6978 	struct hugetlb_vma_lock *vma_lock;
6979 
6980 	/* Only establish in (flags) sharable vmas */
6981 	if (!vma || !(vma->vm_flags & VM_MAYSHARE))
6982 		return;
6983 
6984 	/* Should never get here with non-NULL vm_private_data */
6985 	if (vma->vm_private_data)
6986 		return;
6987 
6988 	vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
6989 	if (!vma_lock) {
6990 		/*
6991 		 * If we can not allocate structure, then vma can not
6992 		 * participate in pmd sharing.  This is only a possible
6993 		 * performance enhancement and memory saving issue.
6994 		 * However, the lock is also used to synchronize page
6995 		 * faults with truncation.  If the lock is not present,
6996 		 * unlikely races could leave pages in a file past i_size
6997 		 * until the file is removed.  Warn in the unlikely case of
6998 		 * allocation failure.
6999 		 */
7000 		pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
7001 		return;
7002 	}
7003 
7004 	kref_init(&vma_lock->refs);
7005 	init_rwsem(&vma_lock->rw_sema);
7006 	vma_lock->vma = vma;
7007 	vma->vm_private_data = vma_lock;
7008 }
7009 
7010 /*
7011  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
7012  * and returns the corresponding pte. While this is not necessary for the
7013  * !shared pmd case because we can allocate the pmd later as well, it makes the
7014  * code much cleaner. pmd allocation is essential for the shared case because
7015  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
7016  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
7017  * bad pmd for sharing.
7018  */
7019 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7020 		      unsigned long addr, pud_t *pud)
7021 {
7022 	struct address_space *mapping = vma->vm_file->f_mapping;
7023 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
7024 			vma->vm_pgoff;
7025 	struct vm_area_struct *svma;
7026 	unsigned long saddr;
7027 	pte_t *spte = NULL;
7028 	pte_t *pte;
7029 	spinlock_t *ptl;
7030 
7031 	i_mmap_lock_read(mapping);
7032 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
7033 		if (svma == vma)
7034 			continue;
7035 
7036 		saddr = page_table_shareable(svma, vma, addr, idx);
7037 		if (saddr) {
7038 			spte = huge_pte_offset(svma->vm_mm, saddr,
7039 					       vma_mmu_pagesize(svma));
7040 			if (spte) {
7041 				get_page(virt_to_page(spte));
7042 				break;
7043 			}
7044 		}
7045 	}
7046 
7047 	if (!spte)
7048 		goto out;
7049 
7050 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
7051 	if (pud_none(*pud)) {
7052 		pud_populate(mm, pud,
7053 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
7054 		mm_inc_nr_pmds(mm);
7055 	} else {
7056 		put_page(virt_to_page(spte));
7057 	}
7058 	spin_unlock(ptl);
7059 out:
7060 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
7061 	i_mmap_unlock_read(mapping);
7062 	return pte;
7063 }
7064 
7065 /*
7066  * unmap huge page backed by shared pte.
7067  *
7068  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
7069  * indicated by page_count > 1, unmap is achieved by clearing pud and
7070  * decrementing the ref count. If count == 1, the pte page is not shared.
7071  *
7072  * Called with page table lock held.
7073  *
7074  * returns: 1 successfully unmapped a shared pte page
7075  *	    0 the underlying pte page is not shared, or it is the last user
7076  */
7077 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7078 					unsigned long addr, pte_t *ptep)
7079 {
7080 	pgd_t *pgd = pgd_offset(mm, addr);
7081 	p4d_t *p4d = p4d_offset(pgd, addr);
7082 	pud_t *pud = pud_offset(p4d, addr);
7083 
7084 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7085 	hugetlb_vma_assert_locked(vma);
7086 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
7087 	if (page_count(virt_to_page(ptep)) == 1)
7088 		return 0;
7089 
7090 	pud_clear(pud);
7091 	put_page(virt_to_page(ptep));
7092 	mm_dec_nr_pmds(mm);
7093 	return 1;
7094 }
7095 
7096 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7097 
7098 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
7099 {
7100 }
7101 
7102 void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
7103 {
7104 }
7105 
7106 void hugetlb_vma_lock_write(struct vm_area_struct *vma)
7107 {
7108 }
7109 
7110 void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
7111 {
7112 }
7113 
7114 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
7115 {
7116 	return 1;
7117 }
7118 
7119 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
7120 {
7121 }
7122 
7123 void hugetlb_vma_lock_release(struct kref *kref)
7124 {
7125 }
7126 
7127 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
7128 {
7129 }
7130 
7131 static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
7132 {
7133 }
7134 
7135 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
7136 {
7137 }
7138 
7139 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7140 		      unsigned long addr, pud_t *pud)
7141 {
7142 	return NULL;
7143 }
7144 
7145 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7146 				unsigned long addr, pte_t *ptep)
7147 {
7148 	return 0;
7149 }
7150 
7151 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7152 				unsigned long *start, unsigned long *end)
7153 {
7154 }
7155 
7156 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7157 {
7158 	return false;
7159 }
7160 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7161 
7162 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
7163 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
7164 			unsigned long addr, unsigned long sz)
7165 {
7166 	pgd_t *pgd;
7167 	p4d_t *p4d;
7168 	pud_t *pud;
7169 	pte_t *pte = NULL;
7170 
7171 	pgd = pgd_offset(mm, addr);
7172 	p4d = p4d_alloc(mm, pgd, addr);
7173 	if (!p4d)
7174 		return NULL;
7175 	pud = pud_alloc(mm, p4d, addr);
7176 	if (pud) {
7177 		if (sz == PUD_SIZE) {
7178 			pte = (pte_t *)pud;
7179 		} else {
7180 			BUG_ON(sz != PMD_SIZE);
7181 			if (want_pmd_share(vma, addr) && pud_none(*pud))
7182 				pte = huge_pmd_share(mm, vma, addr, pud);
7183 			else
7184 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
7185 		}
7186 	}
7187 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
7188 
7189 	return pte;
7190 }
7191 
7192 /*
7193  * huge_pte_offset() - Walk the page table to resolve the hugepage
7194  * entry at address @addr
7195  *
7196  * Return: Pointer to page table entry (PUD or PMD) for
7197  * address @addr, or NULL if a !p*d_present() entry is encountered and the
7198  * size @sz doesn't match the hugepage size at this level of the page
7199  * table.
7200  */
7201 pte_t *huge_pte_offset(struct mm_struct *mm,
7202 		       unsigned long addr, unsigned long sz)
7203 {
7204 	pgd_t *pgd;
7205 	p4d_t *p4d;
7206 	pud_t *pud;
7207 	pmd_t *pmd;
7208 
7209 	pgd = pgd_offset(mm, addr);
7210 	if (!pgd_present(*pgd))
7211 		return NULL;
7212 	p4d = p4d_offset(pgd, addr);
7213 	if (!p4d_present(*p4d))
7214 		return NULL;
7215 
7216 	pud = pud_offset(p4d, addr);
7217 	if (sz == PUD_SIZE)
7218 		/* must be pud huge, non-present or none */
7219 		return (pte_t *)pud;
7220 	if (!pud_present(*pud))
7221 		return NULL;
7222 	/* must have a valid entry and size to go further */
7223 
7224 	pmd = pmd_offset(pud, addr);
7225 	/* must be pmd huge, non-present or none */
7226 	return (pte_t *)pmd;
7227 }
7228 
7229 /*
7230  * Return a mask that can be used to update an address to the last huge
7231  * page in a page table page mapping size.  Used to skip non-present
7232  * page table entries when linearly scanning address ranges.  Architectures
7233  * with unique huge page to page table relationships can define their own
7234  * version of this routine.
7235  */
7236 unsigned long hugetlb_mask_last_page(struct hstate *h)
7237 {
7238 	unsigned long hp_size = huge_page_size(h);
7239 
7240 	if (hp_size == PUD_SIZE)
7241 		return P4D_SIZE - PUD_SIZE;
7242 	else if (hp_size == PMD_SIZE)
7243 		return PUD_SIZE - PMD_SIZE;
7244 	else
7245 		return 0UL;
7246 }
7247 
7248 #else
7249 
7250 /* See description above.  Architectures can provide their own version. */
7251 __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7252 {
7253 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7254 	if (huge_page_size(h) == PMD_SIZE)
7255 		return PUD_SIZE - PMD_SIZE;
7256 #endif
7257 	return 0UL;
7258 }
7259 
7260 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7261 
7262 /*
7263  * These functions are overwritable if your architecture needs its own
7264  * behavior.
7265  */
7266 int isolate_hugetlb(struct page *page, struct list_head *list)
7267 {
7268 	int ret = 0;
7269 
7270 	spin_lock_irq(&hugetlb_lock);
7271 	if (!PageHeadHuge(page) ||
7272 	    !HPageMigratable(page) ||
7273 	    !get_page_unless_zero(page)) {
7274 		ret = -EBUSY;
7275 		goto unlock;
7276 	}
7277 	ClearHPageMigratable(page);
7278 	list_move_tail(&page->lru, list);
7279 unlock:
7280 	spin_unlock_irq(&hugetlb_lock);
7281 	return ret;
7282 }
7283 
7284 int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison)
7285 {
7286 	int ret = 0;
7287 
7288 	*hugetlb = false;
7289 	spin_lock_irq(&hugetlb_lock);
7290 	if (PageHeadHuge(page)) {
7291 		*hugetlb = true;
7292 		if (HPageFreed(page))
7293 			ret = 0;
7294 		else if (HPageMigratable(page) || unpoison)
7295 			ret = get_page_unless_zero(page);
7296 		else
7297 			ret = -EBUSY;
7298 	}
7299 	spin_unlock_irq(&hugetlb_lock);
7300 	return ret;
7301 }
7302 
7303 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7304 				bool *migratable_cleared)
7305 {
7306 	int ret;
7307 
7308 	spin_lock_irq(&hugetlb_lock);
7309 	ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7310 	spin_unlock_irq(&hugetlb_lock);
7311 	return ret;
7312 }
7313 
7314 void putback_active_hugepage(struct page *page)
7315 {
7316 	spin_lock_irq(&hugetlb_lock);
7317 	SetHPageMigratable(page);
7318 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
7319 	spin_unlock_irq(&hugetlb_lock);
7320 	put_page(page);
7321 }
7322 
7323 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7324 {
7325 	struct hstate *h = folio_hstate(old_folio);
7326 
7327 	hugetlb_cgroup_migrate(old_folio, new_folio);
7328 	set_page_owner_migrate_reason(&new_folio->page, reason);
7329 
7330 	/*
7331 	 * transfer temporary state of the new hugetlb folio. This is
7332 	 * reverse to other transitions because the newpage is going to
7333 	 * be final while the old one will be freed so it takes over
7334 	 * the temporary status.
7335 	 *
7336 	 * Also note that we have to transfer the per-node surplus state
7337 	 * here as well otherwise the global surplus count will not match
7338 	 * the per-node's.
7339 	 */
7340 	if (folio_test_hugetlb_temporary(new_folio)) {
7341 		int old_nid = folio_nid(old_folio);
7342 		int new_nid = folio_nid(new_folio);
7343 
7344 		folio_set_hugetlb_temporary(old_folio);
7345 		folio_clear_hugetlb_temporary(new_folio);
7346 
7347 
7348 		/*
7349 		 * There is no need to transfer the per-node surplus state
7350 		 * when we do not cross the node.
7351 		 */
7352 		if (new_nid == old_nid)
7353 			return;
7354 		spin_lock_irq(&hugetlb_lock);
7355 		if (h->surplus_huge_pages_node[old_nid]) {
7356 			h->surplus_huge_pages_node[old_nid]--;
7357 			h->surplus_huge_pages_node[new_nid]++;
7358 		}
7359 		spin_unlock_irq(&hugetlb_lock);
7360 	}
7361 }
7362 
7363 /*
7364  * This function will unconditionally remove all the shared pmd pgtable entries
7365  * within the specific vma for a hugetlbfs memory range.
7366  */
7367 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7368 {
7369 	struct hstate *h = hstate_vma(vma);
7370 	unsigned long sz = huge_page_size(h);
7371 	struct mm_struct *mm = vma->vm_mm;
7372 	struct mmu_notifier_range range;
7373 	unsigned long address, start, end;
7374 	spinlock_t *ptl;
7375 	pte_t *ptep;
7376 
7377 	if (!(vma->vm_flags & VM_MAYSHARE))
7378 		return;
7379 
7380 	start = ALIGN(vma->vm_start, PUD_SIZE);
7381 	end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
7382 
7383 	if (start >= end)
7384 		return;
7385 
7386 	flush_cache_range(vma, start, end);
7387 	/*
7388 	 * No need to call adjust_range_if_pmd_sharing_possible(), because
7389 	 * we have already done the PUD_SIZE alignment.
7390 	 */
7391 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
7392 				start, end);
7393 	mmu_notifier_invalidate_range_start(&range);
7394 	hugetlb_vma_lock_write(vma);
7395 	i_mmap_lock_write(vma->vm_file->f_mapping);
7396 	for (address = start; address < end; address += PUD_SIZE) {
7397 		ptep = huge_pte_offset(mm, address, sz);
7398 		if (!ptep)
7399 			continue;
7400 		ptl = huge_pte_lock(h, mm, ptep);
7401 		huge_pmd_unshare(mm, vma, address, ptep);
7402 		spin_unlock(ptl);
7403 	}
7404 	flush_hugetlb_tlb_range(vma, start, end);
7405 	i_mmap_unlock_write(vma->vm_file->f_mapping);
7406 	hugetlb_vma_unlock_write(vma);
7407 	/*
7408 	 * No need to call mmu_notifier_invalidate_range(), see
7409 	 * Documentation/mm/mmu_notifier.rst.
7410 	 */
7411 	mmu_notifier_invalidate_range_end(&range);
7412 }
7413 
7414 #ifdef CONFIG_CMA
7415 static bool cma_reserve_called __initdata;
7416 
7417 static int __init cmdline_parse_hugetlb_cma(char *p)
7418 {
7419 	int nid, count = 0;
7420 	unsigned long tmp;
7421 	char *s = p;
7422 
7423 	while (*s) {
7424 		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
7425 			break;
7426 
7427 		if (s[count] == ':') {
7428 			if (tmp >= MAX_NUMNODES)
7429 				break;
7430 			nid = array_index_nospec(tmp, MAX_NUMNODES);
7431 
7432 			s += count + 1;
7433 			tmp = memparse(s, &s);
7434 			hugetlb_cma_size_in_node[nid] = tmp;
7435 			hugetlb_cma_size += tmp;
7436 
7437 			/*
7438 			 * Skip the separator if have one, otherwise
7439 			 * break the parsing.
7440 			 */
7441 			if (*s == ',')
7442 				s++;
7443 			else
7444 				break;
7445 		} else {
7446 			hugetlb_cma_size = memparse(p, &p);
7447 			break;
7448 		}
7449 	}
7450 
7451 	return 0;
7452 }
7453 
7454 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7455 
7456 void __init hugetlb_cma_reserve(int order)
7457 {
7458 	unsigned long size, reserved, per_node;
7459 	bool node_specific_cma_alloc = false;
7460 	int nid;
7461 
7462 	cma_reserve_called = true;
7463 
7464 	if (!hugetlb_cma_size)
7465 		return;
7466 
7467 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
7468 		if (hugetlb_cma_size_in_node[nid] == 0)
7469 			continue;
7470 
7471 		if (!node_online(nid)) {
7472 			pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7473 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7474 			hugetlb_cma_size_in_node[nid] = 0;
7475 			continue;
7476 		}
7477 
7478 		if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
7479 			pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7480 				nid, (PAGE_SIZE << order) / SZ_1M);
7481 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7482 			hugetlb_cma_size_in_node[nid] = 0;
7483 		} else {
7484 			node_specific_cma_alloc = true;
7485 		}
7486 	}
7487 
7488 	/* Validate the CMA size again in case some invalid nodes specified. */
7489 	if (!hugetlb_cma_size)
7490 		return;
7491 
7492 	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7493 		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7494 			(PAGE_SIZE << order) / SZ_1M);
7495 		hugetlb_cma_size = 0;
7496 		return;
7497 	}
7498 
7499 	if (!node_specific_cma_alloc) {
7500 		/*
7501 		 * If 3 GB area is requested on a machine with 4 numa nodes,
7502 		 * let's allocate 1 GB on first three nodes and ignore the last one.
7503 		 */
7504 		per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7505 		pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7506 			hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
7507 	}
7508 
7509 	reserved = 0;
7510 	for_each_online_node(nid) {
7511 		int res;
7512 		char name[CMA_MAX_NAME];
7513 
7514 		if (node_specific_cma_alloc) {
7515 			if (hugetlb_cma_size_in_node[nid] == 0)
7516 				continue;
7517 
7518 			size = hugetlb_cma_size_in_node[nid];
7519 		} else {
7520 			size = min(per_node, hugetlb_cma_size - reserved);
7521 		}
7522 
7523 		size = round_up(size, PAGE_SIZE << order);
7524 
7525 		snprintf(name, sizeof(name), "hugetlb%d", nid);
7526 		/*
7527 		 * Note that 'order per bit' is based on smallest size that
7528 		 * may be returned to CMA allocator in the case of
7529 		 * huge page demotion.
7530 		 */
7531 		res = cma_declare_contiguous_nid(0, size, 0,
7532 						PAGE_SIZE << HUGETLB_PAGE_ORDER,
7533 						 0, false, name,
7534 						 &hugetlb_cma[nid], nid);
7535 		if (res) {
7536 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7537 				res, nid);
7538 			continue;
7539 		}
7540 
7541 		reserved += size;
7542 		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7543 			size / SZ_1M, nid);
7544 
7545 		if (reserved >= hugetlb_cma_size)
7546 			break;
7547 	}
7548 
7549 	if (!reserved)
7550 		/*
7551 		 * hugetlb_cma_size is used to determine if allocations from
7552 		 * cma are possible.  Set to zero if no cma regions are set up.
7553 		 */
7554 		hugetlb_cma_size = 0;
7555 }
7556 
7557 static void __init hugetlb_cma_check(void)
7558 {
7559 	if (!hugetlb_cma_size || cma_reserve_called)
7560 		return;
7561 
7562 	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7563 }
7564 
7565 #endif /* CONFIG_CMA */
7566