xref: /openbmc/linux/mm/hugetlb.c (revision 85716a80)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 #include <linux/migrate.h>
34 #include <linux/nospec.h>
35 #include <linux/delayacct.h>
36 #include <linux/memory.h>
37 
38 #include <asm/page.h>
39 #include <asm/pgalloc.h>
40 #include <asm/tlb.h>
41 
42 #include <linux/io.h>
43 #include <linux/hugetlb.h>
44 #include <linux/hugetlb_cgroup.h>
45 #include <linux/node.h>
46 #include <linux/page_owner.h>
47 #include "internal.h"
48 #include "hugetlb_vmemmap.h"
49 
50 int hugetlb_max_hstate __read_mostly;
51 unsigned int default_hstate_idx;
52 struct hstate hstates[HUGE_MAX_HSTATE];
53 
54 #ifdef CONFIG_CMA
55 static struct cma *hugetlb_cma[MAX_NUMNODES];
56 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
57 static bool hugetlb_cma_page(struct page *page, unsigned int order)
58 {
59 	return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page,
60 				1 << order);
61 }
62 #else
63 static bool hugetlb_cma_page(struct page *page, unsigned int order)
64 {
65 	return false;
66 }
67 #endif
68 static unsigned long hugetlb_cma_size __initdata;
69 
70 __initdata LIST_HEAD(huge_boot_pages);
71 
72 /* for command line parsing */
73 static struct hstate * __initdata parsed_hstate;
74 static unsigned long __initdata default_hstate_max_huge_pages;
75 static bool __initdata parsed_valid_hugepagesz = true;
76 static bool __initdata parsed_default_hugepagesz;
77 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
78 
79 /*
80  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
81  * free_huge_pages, and surplus_huge_pages.
82  */
83 DEFINE_SPINLOCK(hugetlb_lock);
84 
85 /*
86  * Serializes faults on the same logical page.  This is used to
87  * prevent spurious OOMs when the hugepage pool is fully utilized.
88  */
89 static int num_fault_mutexes;
90 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
91 
92 /* Forward declaration */
93 static int hugetlb_acct_memory(struct hstate *h, long delta);
94 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
95 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
96 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
97 
98 static inline bool subpool_is_free(struct hugepage_subpool *spool)
99 {
100 	if (spool->count)
101 		return false;
102 	if (spool->max_hpages != -1)
103 		return spool->used_hpages == 0;
104 	if (spool->min_hpages != -1)
105 		return spool->rsv_hpages == spool->min_hpages;
106 
107 	return true;
108 }
109 
110 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
111 						unsigned long irq_flags)
112 {
113 	spin_unlock_irqrestore(&spool->lock, irq_flags);
114 
115 	/* If no pages are used, and no other handles to the subpool
116 	 * remain, give up any reservations based on minimum size and
117 	 * free the subpool */
118 	if (subpool_is_free(spool)) {
119 		if (spool->min_hpages != -1)
120 			hugetlb_acct_memory(spool->hstate,
121 						-spool->min_hpages);
122 		kfree(spool);
123 	}
124 }
125 
126 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
127 						long min_hpages)
128 {
129 	struct hugepage_subpool *spool;
130 
131 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
132 	if (!spool)
133 		return NULL;
134 
135 	spin_lock_init(&spool->lock);
136 	spool->count = 1;
137 	spool->max_hpages = max_hpages;
138 	spool->hstate = h;
139 	spool->min_hpages = min_hpages;
140 
141 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
142 		kfree(spool);
143 		return NULL;
144 	}
145 	spool->rsv_hpages = min_hpages;
146 
147 	return spool;
148 }
149 
150 void hugepage_put_subpool(struct hugepage_subpool *spool)
151 {
152 	unsigned long flags;
153 
154 	spin_lock_irqsave(&spool->lock, flags);
155 	BUG_ON(!spool->count);
156 	spool->count--;
157 	unlock_or_release_subpool(spool, flags);
158 }
159 
160 /*
161  * Subpool accounting for allocating and reserving pages.
162  * Return -ENOMEM if there are not enough resources to satisfy the
163  * request.  Otherwise, return the number of pages by which the
164  * global pools must be adjusted (upward).  The returned value may
165  * only be different than the passed value (delta) in the case where
166  * a subpool minimum size must be maintained.
167  */
168 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
169 				      long delta)
170 {
171 	long ret = delta;
172 
173 	if (!spool)
174 		return ret;
175 
176 	spin_lock_irq(&spool->lock);
177 
178 	if (spool->max_hpages != -1) {		/* maximum size accounting */
179 		if ((spool->used_hpages + delta) <= spool->max_hpages)
180 			spool->used_hpages += delta;
181 		else {
182 			ret = -ENOMEM;
183 			goto unlock_ret;
184 		}
185 	}
186 
187 	/* minimum size accounting */
188 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
189 		if (delta > spool->rsv_hpages) {
190 			/*
191 			 * Asking for more reserves than those already taken on
192 			 * behalf of subpool.  Return difference.
193 			 */
194 			ret = delta - spool->rsv_hpages;
195 			spool->rsv_hpages = 0;
196 		} else {
197 			ret = 0;	/* reserves already accounted for */
198 			spool->rsv_hpages -= delta;
199 		}
200 	}
201 
202 unlock_ret:
203 	spin_unlock_irq(&spool->lock);
204 	return ret;
205 }
206 
207 /*
208  * Subpool accounting for freeing and unreserving pages.
209  * Return the number of global page reservations that must be dropped.
210  * The return value may only be different than the passed value (delta)
211  * in the case where a subpool minimum size must be maintained.
212  */
213 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
214 				       long delta)
215 {
216 	long ret = delta;
217 	unsigned long flags;
218 
219 	if (!spool)
220 		return delta;
221 
222 	spin_lock_irqsave(&spool->lock, flags);
223 
224 	if (spool->max_hpages != -1)		/* maximum size accounting */
225 		spool->used_hpages -= delta;
226 
227 	 /* minimum size accounting */
228 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
229 		if (spool->rsv_hpages + delta <= spool->min_hpages)
230 			ret = 0;
231 		else
232 			ret = spool->rsv_hpages + delta - spool->min_hpages;
233 
234 		spool->rsv_hpages += delta;
235 		if (spool->rsv_hpages > spool->min_hpages)
236 			spool->rsv_hpages = spool->min_hpages;
237 	}
238 
239 	/*
240 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
241 	 * quota reference, free it now.
242 	 */
243 	unlock_or_release_subpool(spool, flags);
244 
245 	return ret;
246 }
247 
248 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
249 {
250 	return HUGETLBFS_SB(inode->i_sb)->spool;
251 }
252 
253 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
254 {
255 	return subpool_inode(file_inode(vma->vm_file));
256 }
257 
258 /* Helper that removes a struct file_region from the resv_map cache and returns
259  * it for use.
260  */
261 static struct file_region *
262 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
263 {
264 	struct file_region *nrg;
265 
266 	VM_BUG_ON(resv->region_cache_count <= 0);
267 
268 	resv->region_cache_count--;
269 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
270 	list_del(&nrg->link);
271 
272 	nrg->from = from;
273 	nrg->to = to;
274 
275 	return nrg;
276 }
277 
278 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
279 					      struct file_region *rg)
280 {
281 #ifdef CONFIG_CGROUP_HUGETLB
282 	nrg->reservation_counter = rg->reservation_counter;
283 	nrg->css = rg->css;
284 	if (rg->css)
285 		css_get(rg->css);
286 #endif
287 }
288 
289 /* Helper that records hugetlb_cgroup uncharge info. */
290 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
291 						struct hstate *h,
292 						struct resv_map *resv,
293 						struct file_region *nrg)
294 {
295 #ifdef CONFIG_CGROUP_HUGETLB
296 	if (h_cg) {
297 		nrg->reservation_counter =
298 			&h_cg->rsvd_hugepage[hstate_index(h)];
299 		nrg->css = &h_cg->css;
300 		/*
301 		 * The caller will hold exactly one h_cg->css reference for the
302 		 * whole contiguous reservation region. But this area might be
303 		 * scattered when there are already some file_regions reside in
304 		 * it. As a result, many file_regions may share only one css
305 		 * reference. In order to ensure that one file_region must hold
306 		 * exactly one h_cg->css reference, we should do css_get for
307 		 * each file_region and leave the reference held by caller
308 		 * untouched.
309 		 */
310 		css_get(&h_cg->css);
311 		if (!resv->pages_per_hpage)
312 			resv->pages_per_hpage = pages_per_huge_page(h);
313 		/* pages_per_hpage should be the same for all entries in
314 		 * a resv_map.
315 		 */
316 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
317 	} else {
318 		nrg->reservation_counter = NULL;
319 		nrg->css = NULL;
320 	}
321 #endif
322 }
323 
324 static void put_uncharge_info(struct file_region *rg)
325 {
326 #ifdef CONFIG_CGROUP_HUGETLB
327 	if (rg->css)
328 		css_put(rg->css);
329 #endif
330 }
331 
332 static bool has_same_uncharge_info(struct file_region *rg,
333 				   struct file_region *org)
334 {
335 #ifdef CONFIG_CGROUP_HUGETLB
336 	return rg->reservation_counter == org->reservation_counter &&
337 	       rg->css == org->css;
338 
339 #else
340 	return true;
341 #endif
342 }
343 
344 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
345 {
346 	struct file_region *nrg, *prg;
347 
348 	prg = list_prev_entry(rg, link);
349 	if (&prg->link != &resv->regions && prg->to == rg->from &&
350 	    has_same_uncharge_info(prg, rg)) {
351 		prg->to = rg->to;
352 
353 		list_del(&rg->link);
354 		put_uncharge_info(rg);
355 		kfree(rg);
356 
357 		rg = prg;
358 	}
359 
360 	nrg = list_next_entry(rg, link);
361 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
362 	    has_same_uncharge_info(nrg, rg)) {
363 		nrg->from = rg->from;
364 
365 		list_del(&rg->link);
366 		put_uncharge_info(rg);
367 		kfree(rg);
368 	}
369 }
370 
371 static inline long
372 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
373 		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
374 		     long *regions_needed)
375 {
376 	struct file_region *nrg;
377 
378 	if (!regions_needed) {
379 		nrg = get_file_region_entry_from_cache(map, from, to);
380 		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
381 		list_add(&nrg->link, rg);
382 		coalesce_file_region(map, nrg);
383 	} else
384 		*regions_needed += 1;
385 
386 	return to - from;
387 }
388 
389 /*
390  * Must be called with resv->lock held.
391  *
392  * Calling this with regions_needed != NULL will count the number of pages
393  * to be added but will not modify the linked list. And regions_needed will
394  * indicate the number of file_regions needed in the cache to carry out to add
395  * the regions for this range.
396  */
397 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
398 				     struct hugetlb_cgroup *h_cg,
399 				     struct hstate *h, long *regions_needed)
400 {
401 	long add = 0;
402 	struct list_head *head = &resv->regions;
403 	long last_accounted_offset = f;
404 	struct file_region *iter, *trg = NULL;
405 	struct list_head *rg = NULL;
406 
407 	if (regions_needed)
408 		*regions_needed = 0;
409 
410 	/* In this loop, we essentially handle an entry for the range
411 	 * [last_accounted_offset, iter->from), at every iteration, with some
412 	 * bounds checking.
413 	 */
414 	list_for_each_entry_safe(iter, trg, head, link) {
415 		/* Skip irrelevant regions that start before our range. */
416 		if (iter->from < f) {
417 			/* If this region ends after the last accounted offset,
418 			 * then we need to update last_accounted_offset.
419 			 */
420 			if (iter->to > last_accounted_offset)
421 				last_accounted_offset = iter->to;
422 			continue;
423 		}
424 
425 		/* When we find a region that starts beyond our range, we've
426 		 * finished.
427 		 */
428 		if (iter->from >= t) {
429 			rg = iter->link.prev;
430 			break;
431 		}
432 
433 		/* Add an entry for last_accounted_offset -> iter->from, and
434 		 * update last_accounted_offset.
435 		 */
436 		if (iter->from > last_accounted_offset)
437 			add += hugetlb_resv_map_add(resv, iter->link.prev,
438 						    last_accounted_offset,
439 						    iter->from, h, h_cg,
440 						    regions_needed);
441 
442 		last_accounted_offset = iter->to;
443 	}
444 
445 	/* Handle the case where our range extends beyond
446 	 * last_accounted_offset.
447 	 */
448 	if (!rg)
449 		rg = head->prev;
450 	if (last_accounted_offset < t)
451 		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
452 					    t, h, h_cg, regions_needed);
453 
454 	return add;
455 }
456 
457 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
458  */
459 static int allocate_file_region_entries(struct resv_map *resv,
460 					int regions_needed)
461 	__must_hold(&resv->lock)
462 {
463 	LIST_HEAD(allocated_regions);
464 	int to_allocate = 0, i = 0;
465 	struct file_region *trg = NULL, *rg = NULL;
466 
467 	VM_BUG_ON(regions_needed < 0);
468 
469 	/*
470 	 * Check for sufficient descriptors in the cache to accommodate
471 	 * the number of in progress add operations plus regions_needed.
472 	 *
473 	 * This is a while loop because when we drop the lock, some other call
474 	 * to region_add or region_del may have consumed some region_entries,
475 	 * so we keep looping here until we finally have enough entries for
476 	 * (adds_in_progress + regions_needed).
477 	 */
478 	while (resv->region_cache_count <
479 	       (resv->adds_in_progress + regions_needed)) {
480 		to_allocate = resv->adds_in_progress + regions_needed -
481 			      resv->region_cache_count;
482 
483 		/* At this point, we should have enough entries in the cache
484 		 * for all the existing adds_in_progress. We should only be
485 		 * needing to allocate for regions_needed.
486 		 */
487 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
488 
489 		spin_unlock(&resv->lock);
490 		for (i = 0; i < to_allocate; i++) {
491 			trg = kmalloc(sizeof(*trg), GFP_KERNEL);
492 			if (!trg)
493 				goto out_of_memory;
494 			list_add(&trg->link, &allocated_regions);
495 		}
496 
497 		spin_lock(&resv->lock);
498 
499 		list_splice(&allocated_regions, &resv->region_cache);
500 		resv->region_cache_count += to_allocate;
501 	}
502 
503 	return 0;
504 
505 out_of_memory:
506 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
507 		list_del(&rg->link);
508 		kfree(rg);
509 	}
510 	return -ENOMEM;
511 }
512 
513 /*
514  * Add the huge page range represented by [f, t) to the reserve
515  * map.  Regions will be taken from the cache to fill in this range.
516  * Sufficient regions should exist in the cache due to the previous
517  * call to region_chg with the same range, but in some cases the cache will not
518  * have sufficient entries due to races with other code doing region_add or
519  * region_del.  The extra needed entries will be allocated.
520  *
521  * regions_needed is the out value provided by a previous call to region_chg.
522  *
523  * Return the number of new huge pages added to the map.  This number is greater
524  * than or equal to zero.  If file_region entries needed to be allocated for
525  * this operation and we were not able to allocate, it returns -ENOMEM.
526  * region_add of regions of length 1 never allocate file_regions and cannot
527  * fail; region_chg will always allocate at least 1 entry and a region_add for
528  * 1 page will only require at most 1 entry.
529  */
530 static long region_add(struct resv_map *resv, long f, long t,
531 		       long in_regions_needed, struct hstate *h,
532 		       struct hugetlb_cgroup *h_cg)
533 {
534 	long add = 0, actual_regions_needed = 0;
535 
536 	spin_lock(&resv->lock);
537 retry:
538 
539 	/* Count how many regions are actually needed to execute this add. */
540 	add_reservation_in_range(resv, f, t, NULL, NULL,
541 				 &actual_regions_needed);
542 
543 	/*
544 	 * Check for sufficient descriptors in the cache to accommodate
545 	 * this add operation. Note that actual_regions_needed may be greater
546 	 * than in_regions_needed, as the resv_map may have been modified since
547 	 * the region_chg call. In this case, we need to make sure that we
548 	 * allocate extra entries, such that we have enough for all the
549 	 * existing adds_in_progress, plus the excess needed for this
550 	 * operation.
551 	 */
552 	if (actual_regions_needed > in_regions_needed &&
553 	    resv->region_cache_count <
554 		    resv->adds_in_progress +
555 			    (actual_regions_needed - in_regions_needed)) {
556 		/* region_add operation of range 1 should never need to
557 		 * allocate file_region entries.
558 		 */
559 		VM_BUG_ON(t - f <= 1);
560 
561 		if (allocate_file_region_entries(
562 			    resv, actual_regions_needed - in_regions_needed)) {
563 			return -ENOMEM;
564 		}
565 
566 		goto retry;
567 	}
568 
569 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
570 
571 	resv->adds_in_progress -= in_regions_needed;
572 
573 	spin_unlock(&resv->lock);
574 	return add;
575 }
576 
577 /*
578  * Examine the existing reserve map and determine how many
579  * huge pages in the specified range [f, t) are NOT currently
580  * represented.  This routine is called before a subsequent
581  * call to region_add that will actually modify the reserve
582  * map to add the specified range [f, t).  region_chg does
583  * not change the number of huge pages represented by the
584  * map.  A number of new file_region structures is added to the cache as a
585  * placeholder, for the subsequent region_add call to use. At least 1
586  * file_region structure is added.
587  *
588  * out_regions_needed is the number of regions added to the
589  * resv->adds_in_progress.  This value needs to be provided to a follow up call
590  * to region_add or region_abort for proper accounting.
591  *
592  * Returns the number of huge pages that need to be added to the existing
593  * reservation map for the range [f, t).  This number is greater or equal to
594  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
595  * is needed and can not be allocated.
596  */
597 static long region_chg(struct resv_map *resv, long f, long t,
598 		       long *out_regions_needed)
599 {
600 	long chg = 0;
601 
602 	spin_lock(&resv->lock);
603 
604 	/* Count how many hugepages in this range are NOT represented. */
605 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
606 				       out_regions_needed);
607 
608 	if (*out_regions_needed == 0)
609 		*out_regions_needed = 1;
610 
611 	if (allocate_file_region_entries(resv, *out_regions_needed))
612 		return -ENOMEM;
613 
614 	resv->adds_in_progress += *out_regions_needed;
615 
616 	spin_unlock(&resv->lock);
617 	return chg;
618 }
619 
620 /*
621  * Abort the in progress add operation.  The adds_in_progress field
622  * of the resv_map keeps track of the operations in progress between
623  * calls to region_chg and region_add.  Operations are sometimes
624  * aborted after the call to region_chg.  In such cases, region_abort
625  * is called to decrement the adds_in_progress counter. regions_needed
626  * is the value returned by the region_chg call, it is used to decrement
627  * the adds_in_progress counter.
628  *
629  * NOTE: The range arguments [f, t) are not needed or used in this
630  * routine.  They are kept to make reading the calling code easier as
631  * arguments will match the associated region_chg call.
632  */
633 static void region_abort(struct resv_map *resv, long f, long t,
634 			 long regions_needed)
635 {
636 	spin_lock(&resv->lock);
637 	VM_BUG_ON(!resv->region_cache_count);
638 	resv->adds_in_progress -= regions_needed;
639 	spin_unlock(&resv->lock);
640 }
641 
642 /*
643  * Delete the specified range [f, t) from the reserve map.  If the
644  * t parameter is LONG_MAX, this indicates that ALL regions after f
645  * should be deleted.  Locate the regions which intersect [f, t)
646  * and either trim, delete or split the existing regions.
647  *
648  * Returns the number of huge pages deleted from the reserve map.
649  * In the normal case, the return value is zero or more.  In the
650  * case where a region must be split, a new region descriptor must
651  * be allocated.  If the allocation fails, -ENOMEM will be returned.
652  * NOTE: If the parameter t == LONG_MAX, then we will never split
653  * a region and possibly return -ENOMEM.  Callers specifying
654  * t == LONG_MAX do not need to check for -ENOMEM error.
655  */
656 static long region_del(struct resv_map *resv, long f, long t)
657 {
658 	struct list_head *head = &resv->regions;
659 	struct file_region *rg, *trg;
660 	struct file_region *nrg = NULL;
661 	long del = 0;
662 
663 retry:
664 	spin_lock(&resv->lock);
665 	list_for_each_entry_safe(rg, trg, head, link) {
666 		/*
667 		 * Skip regions before the range to be deleted.  file_region
668 		 * ranges are normally of the form [from, to).  However, there
669 		 * may be a "placeholder" entry in the map which is of the form
670 		 * (from, to) with from == to.  Check for placeholder entries
671 		 * at the beginning of the range to be deleted.
672 		 */
673 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
674 			continue;
675 
676 		if (rg->from >= t)
677 			break;
678 
679 		if (f > rg->from && t < rg->to) { /* Must split region */
680 			/*
681 			 * Check for an entry in the cache before dropping
682 			 * lock and attempting allocation.
683 			 */
684 			if (!nrg &&
685 			    resv->region_cache_count > resv->adds_in_progress) {
686 				nrg = list_first_entry(&resv->region_cache,
687 							struct file_region,
688 							link);
689 				list_del(&nrg->link);
690 				resv->region_cache_count--;
691 			}
692 
693 			if (!nrg) {
694 				spin_unlock(&resv->lock);
695 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
696 				if (!nrg)
697 					return -ENOMEM;
698 				goto retry;
699 			}
700 
701 			del += t - f;
702 			hugetlb_cgroup_uncharge_file_region(
703 				resv, rg, t - f, false);
704 
705 			/* New entry for end of split region */
706 			nrg->from = t;
707 			nrg->to = rg->to;
708 
709 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
710 
711 			INIT_LIST_HEAD(&nrg->link);
712 
713 			/* Original entry is trimmed */
714 			rg->to = f;
715 
716 			list_add(&nrg->link, &rg->link);
717 			nrg = NULL;
718 			break;
719 		}
720 
721 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
722 			del += rg->to - rg->from;
723 			hugetlb_cgroup_uncharge_file_region(resv, rg,
724 							    rg->to - rg->from, true);
725 			list_del(&rg->link);
726 			kfree(rg);
727 			continue;
728 		}
729 
730 		if (f <= rg->from) {	/* Trim beginning of region */
731 			hugetlb_cgroup_uncharge_file_region(resv, rg,
732 							    t - rg->from, false);
733 
734 			del += t - rg->from;
735 			rg->from = t;
736 		} else {		/* Trim end of region */
737 			hugetlb_cgroup_uncharge_file_region(resv, rg,
738 							    rg->to - f, false);
739 
740 			del += rg->to - f;
741 			rg->to = f;
742 		}
743 	}
744 
745 	spin_unlock(&resv->lock);
746 	kfree(nrg);
747 	return del;
748 }
749 
750 /*
751  * A rare out of memory error was encountered which prevented removal of
752  * the reserve map region for a page.  The huge page itself was free'ed
753  * and removed from the page cache.  This routine will adjust the subpool
754  * usage count, and the global reserve count if needed.  By incrementing
755  * these counts, the reserve map entry which could not be deleted will
756  * appear as a "reserved" entry instead of simply dangling with incorrect
757  * counts.
758  */
759 void hugetlb_fix_reserve_counts(struct inode *inode)
760 {
761 	struct hugepage_subpool *spool = subpool_inode(inode);
762 	long rsv_adjust;
763 	bool reserved = false;
764 
765 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
766 	if (rsv_adjust > 0) {
767 		struct hstate *h = hstate_inode(inode);
768 
769 		if (!hugetlb_acct_memory(h, 1))
770 			reserved = true;
771 	} else if (!rsv_adjust) {
772 		reserved = true;
773 	}
774 
775 	if (!reserved)
776 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
777 }
778 
779 /*
780  * Count and return the number of huge pages in the reserve map
781  * that intersect with the range [f, t).
782  */
783 static long region_count(struct resv_map *resv, long f, long t)
784 {
785 	struct list_head *head = &resv->regions;
786 	struct file_region *rg;
787 	long chg = 0;
788 
789 	spin_lock(&resv->lock);
790 	/* Locate each segment we overlap with, and count that overlap. */
791 	list_for_each_entry(rg, head, link) {
792 		long seg_from;
793 		long seg_to;
794 
795 		if (rg->to <= f)
796 			continue;
797 		if (rg->from >= t)
798 			break;
799 
800 		seg_from = max(rg->from, f);
801 		seg_to = min(rg->to, t);
802 
803 		chg += seg_to - seg_from;
804 	}
805 	spin_unlock(&resv->lock);
806 
807 	return chg;
808 }
809 
810 /*
811  * Convert the address within this vma to the page offset within
812  * the mapping, in pagecache page units; huge pages here.
813  */
814 static pgoff_t vma_hugecache_offset(struct hstate *h,
815 			struct vm_area_struct *vma, unsigned long address)
816 {
817 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
818 			(vma->vm_pgoff >> huge_page_order(h));
819 }
820 
821 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
822 				     unsigned long address)
823 {
824 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
825 }
826 EXPORT_SYMBOL_GPL(linear_hugepage_index);
827 
828 /*
829  * Return the size of the pages allocated when backing a VMA. In the majority
830  * cases this will be same size as used by the page table entries.
831  */
832 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
833 {
834 	if (vma->vm_ops && vma->vm_ops->pagesize)
835 		return vma->vm_ops->pagesize(vma);
836 	return PAGE_SIZE;
837 }
838 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
839 
840 /*
841  * Return the page size being used by the MMU to back a VMA. In the majority
842  * of cases, the page size used by the kernel matches the MMU size. On
843  * architectures where it differs, an architecture-specific 'strong'
844  * version of this symbol is required.
845  */
846 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
847 {
848 	return vma_kernel_pagesize(vma);
849 }
850 
851 /*
852  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
853  * bits of the reservation map pointer, which are always clear due to
854  * alignment.
855  */
856 #define HPAGE_RESV_OWNER    (1UL << 0)
857 #define HPAGE_RESV_UNMAPPED (1UL << 1)
858 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
859 
860 /*
861  * These helpers are used to track how many pages are reserved for
862  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
863  * is guaranteed to have their future faults succeed.
864  *
865  * With the exception of hugetlb_dup_vma_private() which is called at fork(),
866  * the reserve counters are updated with the hugetlb_lock held. It is safe
867  * to reset the VMA at fork() time as it is not in use yet and there is no
868  * chance of the global counters getting corrupted as a result of the values.
869  *
870  * The private mapping reservation is represented in a subtly different
871  * manner to a shared mapping.  A shared mapping has a region map associated
872  * with the underlying file, this region map represents the backing file
873  * pages which have ever had a reservation assigned which this persists even
874  * after the page is instantiated.  A private mapping has a region map
875  * associated with the original mmap which is attached to all VMAs which
876  * reference it, this region map represents those offsets which have consumed
877  * reservation ie. where pages have been instantiated.
878  */
879 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
880 {
881 	return (unsigned long)vma->vm_private_data;
882 }
883 
884 static void set_vma_private_data(struct vm_area_struct *vma,
885 							unsigned long value)
886 {
887 	vma->vm_private_data = (void *)value;
888 }
889 
890 static void
891 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
892 					  struct hugetlb_cgroup *h_cg,
893 					  struct hstate *h)
894 {
895 #ifdef CONFIG_CGROUP_HUGETLB
896 	if (!h_cg || !h) {
897 		resv_map->reservation_counter = NULL;
898 		resv_map->pages_per_hpage = 0;
899 		resv_map->css = NULL;
900 	} else {
901 		resv_map->reservation_counter =
902 			&h_cg->rsvd_hugepage[hstate_index(h)];
903 		resv_map->pages_per_hpage = pages_per_huge_page(h);
904 		resv_map->css = &h_cg->css;
905 	}
906 #endif
907 }
908 
909 struct resv_map *resv_map_alloc(void)
910 {
911 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
912 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
913 
914 	if (!resv_map || !rg) {
915 		kfree(resv_map);
916 		kfree(rg);
917 		return NULL;
918 	}
919 
920 	kref_init(&resv_map->refs);
921 	spin_lock_init(&resv_map->lock);
922 	INIT_LIST_HEAD(&resv_map->regions);
923 
924 	resv_map->adds_in_progress = 0;
925 	/*
926 	 * Initialize these to 0. On shared mappings, 0's here indicate these
927 	 * fields don't do cgroup accounting. On private mappings, these will be
928 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
929 	 * reservations are to be un-charged from here.
930 	 */
931 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
932 
933 	INIT_LIST_HEAD(&resv_map->region_cache);
934 	list_add(&rg->link, &resv_map->region_cache);
935 	resv_map->region_cache_count = 1;
936 
937 	return resv_map;
938 }
939 
940 void resv_map_release(struct kref *ref)
941 {
942 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
943 	struct list_head *head = &resv_map->region_cache;
944 	struct file_region *rg, *trg;
945 
946 	/* Clear out any active regions before we release the map. */
947 	region_del(resv_map, 0, LONG_MAX);
948 
949 	/* ... and any entries left in the cache */
950 	list_for_each_entry_safe(rg, trg, head, link) {
951 		list_del(&rg->link);
952 		kfree(rg);
953 	}
954 
955 	VM_BUG_ON(resv_map->adds_in_progress);
956 
957 	kfree(resv_map);
958 }
959 
960 static inline struct resv_map *inode_resv_map(struct inode *inode)
961 {
962 	/*
963 	 * At inode evict time, i_mapping may not point to the original
964 	 * address space within the inode.  This original address space
965 	 * contains the pointer to the resv_map.  So, always use the
966 	 * address space embedded within the inode.
967 	 * The VERY common case is inode->mapping == &inode->i_data but,
968 	 * this may not be true for device special inodes.
969 	 */
970 	return (struct resv_map *)(&inode->i_data)->private_data;
971 }
972 
973 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
974 {
975 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
976 	if (vma->vm_flags & VM_MAYSHARE) {
977 		struct address_space *mapping = vma->vm_file->f_mapping;
978 		struct inode *inode = mapping->host;
979 
980 		return inode_resv_map(inode);
981 
982 	} else {
983 		return (struct resv_map *)(get_vma_private_data(vma) &
984 							~HPAGE_RESV_MASK);
985 	}
986 }
987 
988 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
989 {
990 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
991 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
992 
993 	set_vma_private_data(vma, (get_vma_private_data(vma) &
994 				HPAGE_RESV_MASK) | (unsigned long)map);
995 }
996 
997 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
998 {
999 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1000 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1001 
1002 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1003 }
1004 
1005 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1006 {
1007 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1008 
1009 	return (get_vma_private_data(vma) & flag) != 0;
1010 }
1011 
1012 void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1013 {
1014 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1015 	/*
1016 	 * Clear vm_private_data
1017 	 * - For shared mappings this is a per-vma semaphore that may be
1018 	 *   allocated in a subsequent call to hugetlb_vm_op_open.
1019 	 *   Before clearing, make sure pointer is not associated with vma
1020 	 *   as this will leak the structure.  This is the case when called
1021 	 *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1022 	 *   been called to allocate a new structure.
1023 	 * - For MAP_PRIVATE mappings, this is the reserve map which does
1024 	 *   not apply to children.  Faults generated by the children are
1025 	 *   not guaranteed to succeed, even if read-only.
1026 	 */
1027 	if (vma->vm_flags & VM_MAYSHARE) {
1028 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1029 
1030 		if (vma_lock && vma_lock->vma != vma)
1031 			vma->vm_private_data = NULL;
1032 	} else
1033 		vma->vm_private_data = NULL;
1034 }
1035 
1036 /*
1037  * Reset and decrement one ref on hugepage private reservation.
1038  * Called with mm->mmap_sem writer semaphore held.
1039  * This function should be only used by move_vma() and operate on
1040  * same sized vma. It should never come here with last ref on the
1041  * reservation.
1042  */
1043 void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1044 {
1045 	/*
1046 	 * Clear the old hugetlb private page reservation.
1047 	 * It has already been transferred to new_vma.
1048 	 *
1049 	 * During a mremap() operation of a hugetlb vma we call move_vma()
1050 	 * which copies vma into new_vma and unmaps vma. After the copy
1051 	 * operation both new_vma and vma share a reference to the resv_map
1052 	 * struct, and at that point vma is about to be unmapped. We don't
1053 	 * want to return the reservation to the pool at unmap of vma because
1054 	 * the reservation still lives on in new_vma, so simply decrement the
1055 	 * ref here and remove the resv_map reference from this vma.
1056 	 */
1057 	struct resv_map *reservations = vma_resv_map(vma);
1058 
1059 	if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1060 		resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1061 		kref_put(&reservations->refs, resv_map_release);
1062 	}
1063 
1064 	hugetlb_dup_vma_private(vma);
1065 }
1066 
1067 /* Returns true if the VMA has associated reserve pages */
1068 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1069 {
1070 	if (vma->vm_flags & VM_NORESERVE) {
1071 		/*
1072 		 * This address is already reserved by other process(chg == 0),
1073 		 * so, we should decrement reserved count. Without decrementing,
1074 		 * reserve count remains after releasing inode, because this
1075 		 * allocated page will go into page cache and is regarded as
1076 		 * coming from reserved pool in releasing step.  Currently, we
1077 		 * don't have any other solution to deal with this situation
1078 		 * properly, so add work-around here.
1079 		 */
1080 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1081 			return true;
1082 		else
1083 			return false;
1084 	}
1085 
1086 	/* Shared mappings always use reserves */
1087 	if (vma->vm_flags & VM_MAYSHARE) {
1088 		/*
1089 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
1090 		 * be a region map for all pages.  The only situation where
1091 		 * there is no region map is if a hole was punched via
1092 		 * fallocate.  In this case, there really are no reserves to
1093 		 * use.  This situation is indicated if chg != 0.
1094 		 */
1095 		if (chg)
1096 			return false;
1097 		else
1098 			return true;
1099 	}
1100 
1101 	/*
1102 	 * Only the process that called mmap() has reserves for
1103 	 * private mappings.
1104 	 */
1105 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1106 		/*
1107 		 * Like the shared case above, a hole punch or truncate
1108 		 * could have been performed on the private mapping.
1109 		 * Examine the value of chg to determine if reserves
1110 		 * actually exist or were previously consumed.
1111 		 * Very Subtle - The value of chg comes from a previous
1112 		 * call to vma_needs_reserves().  The reserve map for
1113 		 * private mappings has different (opposite) semantics
1114 		 * than that of shared mappings.  vma_needs_reserves()
1115 		 * has already taken this difference in semantics into
1116 		 * account.  Therefore, the meaning of chg is the same
1117 		 * as in the shared case above.  Code could easily be
1118 		 * combined, but keeping it separate draws attention to
1119 		 * subtle differences.
1120 		 */
1121 		if (chg)
1122 			return false;
1123 		else
1124 			return true;
1125 	}
1126 
1127 	return false;
1128 }
1129 
1130 static void enqueue_huge_page(struct hstate *h, struct page *page)
1131 {
1132 	int nid = page_to_nid(page);
1133 
1134 	lockdep_assert_held(&hugetlb_lock);
1135 	VM_BUG_ON_PAGE(page_count(page), page);
1136 
1137 	list_move(&page->lru, &h->hugepage_freelists[nid]);
1138 	h->free_huge_pages++;
1139 	h->free_huge_pages_node[nid]++;
1140 	SetHPageFreed(page);
1141 }
1142 
1143 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1144 {
1145 	struct page *page;
1146 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1147 
1148 	lockdep_assert_held(&hugetlb_lock);
1149 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1150 		if (pin && !is_longterm_pinnable_page(page))
1151 			continue;
1152 
1153 		if (PageHWPoison(page))
1154 			continue;
1155 
1156 		list_move(&page->lru, &h->hugepage_activelist);
1157 		set_page_refcounted(page);
1158 		ClearHPageFreed(page);
1159 		h->free_huge_pages--;
1160 		h->free_huge_pages_node[nid]--;
1161 		return page;
1162 	}
1163 
1164 	return NULL;
1165 }
1166 
1167 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1168 		nodemask_t *nmask)
1169 {
1170 	unsigned int cpuset_mems_cookie;
1171 	struct zonelist *zonelist;
1172 	struct zone *zone;
1173 	struct zoneref *z;
1174 	int node = NUMA_NO_NODE;
1175 
1176 	zonelist = node_zonelist(nid, gfp_mask);
1177 
1178 retry_cpuset:
1179 	cpuset_mems_cookie = read_mems_allowed_begin();
1180 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1181 		struct page *page;
1182 
1183 		if (!cpuset_zone_allowed(zone, gfp_mask))
1184 			continue;
1185 		/*
1186 		 * no need to ask again on the same node. Pool is node rather than
1187 		 * zone aware
1188 		 */
1189 		if (zone_to_nid(zone) == node)
1190 			continue;
1191 		node = zone_to_nid(zone);
1192 
1193 		page = dequeue_huge_page_node_exact(h, node);
1194 		if (page)
1195 			return page;
1196 	}
1197 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1198 		goto retry_cpuset;
1199 
1200 	return NULL;
1201 }
1202 
1203 static unsigned long available_huge_pages(struct hstate *h)
1204 {
1205 	return h->free_huge_pages - h->resv_huge_pages;
1206 }
1207 
1208 static struct page *dequeue_huge_page_vma(struct hstate *h,
1209 				struct vm_area_struct *vma,
1210 				unsigned long address, int avoid_reserve,
1211 				long chg)
1212 {
1213 	struct page *page = NULL;
1214 	struct mempolicy *mpol;
1215 	gfp_t gfp_mask;
1216 	nodemask_t *nodemask;
1217 	int nid;
1218 
1219 	/*
1220 	 * A child process with MAP_PRIVATE mappings created by their parent
1221 	 * have no page reserves. This check ensures that reservations are
1222 	 * not "stolen". The child may still get SIGKILLed
1223 	 */
1224 	if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
1225 		goto err;
1226 
1227 	/* If reserves cannot be used, ensure enough pages are in the pool */
1228 	if (avoid_reserve && !available_huge_pages(h))
1229 		goto err;
1230 
1231 	gfp_mask = htlb_alloc_mask(h);
1232 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1233 
1234 	if (mpol_is_preferred_many(mpol)) {
1235 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1236 
1237 		/* Fallback to all nodes if page==NULL */
1238 		nodemask = NULL;
1239 	}
1240 
1241 	if (!page)
1242 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1243 
1244 	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1245 		SetHPageRestoreReserve(page);
1246 		h->resv_huge_pages--;
1247 	}
1248 
1249 	mpol_cond_put(mpol);
1250 	return page;
1251 
1252 err:
1253 	return NULL;
1254 }
1255 
1256 /*
1257  * common helper functions for hstate_next_node_to_{alloc|free}.
1258  * We may have allocated or freed a huge page based on a different
1259  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1260  * be outside of *nodes_allowed.  Ensure that we use an allowed
1261  * node for alloc or free.
1262  */
1263 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1264 {
1265 	nid = next_node_in(nid, *nodes_allowed);
1266 	VM_BUG_ON(nid >= MAX_NUMNODES);
1267 
1268 	return nid;
1269 }
1270 
1271 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1272 {
1273 	if (!node_isset(nid, *nodes_allowed))
1274 		nid = next_node_allowed(nid, nodes_allowed);
1275 	return nid;
1276 }
1277 
1278 /*
1279  * returns the previously saved node ["this node"] from which to
1280  * allocate a persistent huge page for the pool and advance the
1281  * next node from which to allocate, handling wrap at end of node
1282  * mask.
1283  */
1284 static int hstate_next_node_to_alloc(struct hstate *h,
1285 					nodemask_t *nodes_allowed)
1286 {
1287 	int nid;
1288 
1289 	VM_BUG_ON(!nodes_allowed);
1290 
1291 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1292 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1293 
1294 	return nid;
1295 }
1296 
1297 /*
1298  * helper for remove_pool_huge_page() - return the previously saved
1299  * node ["this node"] from which to free a huge page.  Advance the
1300  * next node id whether or not we find a free huge page to free so
1301  * that the next attempt to free addresses the next node.
1302  */
1303 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1304 {
1305 	int nid;
1306 
1307 	VM_BUG_ON(!nodes_allowed);
1308 
1309 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1310 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1311 
1312 	return nid;
1313 }
1314 
1315 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
1316 	for (nr_nodes = nodes_weight(*mask);				\
1317 		nr_nodes > 0 &&						\
1318 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
1319 		nr_nodes--)
1320 
1321 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
1322 	for (nr_nodes = nodes_weight(*mask);				\
1323 		nr_nodes > 0 &&						\
1324 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
1325 		nr_nodes--)
1326 
1327 /* used to demote non-gigantic_huge pages as well */
1328 static void __destroy_compound_gigantic_page(struct page *page,
1329 					unsigned int order, bool demote)
1330 {
1331 	int i;
1332 	int nr_pages = 1 << order;
1333 	struct page *p;
1334 
1335 	atomic_set(compound_mapcount_ptr(page), 0);
1336 	atomic_set(subpages_mapcount_ptr(page), 0);
1337 	atomic_set(compound_pincount_ptr(page), 0);
1338 
1339 	for (i = 1; i < nr_pages; i++) {
1340 		p = nth_page(page, i);
1341 		p->mapping = NULL;
1342 		clear_compound_head(p);
1343 		if (!demote)
1344 			set_page_refcounted(p);
1345 	}
1346 
1347 	set_compound_order(page, 0);
1348 #ifdef CONFIG_64BIT
1349 	page[1].compound_nr = 0;
1350 #endif
1351 	__ClearPageHead(page);
1352 }
1353 
1354 static void destroy_compound_hugetlb_page_for_demote(struct page *page,
1355 					unsigned int order)
1356 {
1357 	__destroy_compound_gigantic_page(page, order, true);
1358 }
1359 
1360 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1361 static void destroy_compound_gigantic_page(struct page *page,
1362 					unsigned int order)
1363 {
1364 	__destroy_compound_gigantic_page(page, order, false);
1365 }
1366 
1367 static void free_gigantic_page(struct page *page, unsigned int order)
1368 {
1369 	/*
1370 	 * If the page isn't allocated using the cma allocator,
1371 	 * cma_release() returns false.
1372 	 */
1373 #ifdef CONFIG_CMA
1374 	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1375 		return;
1376 #endif
1377 
1378 	free_contig_range(page_to_pfn(page), 1 << order);
1379 }
1380 
1381 #ifdef CONFIG_CONTIG_ALLOC
1382 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1383 		int nid, nodemask_t *nodemask)
1384 {
1385 	unsigned long nr_pages = pages_per_huge_page(h);
1386 	if (nid == NUMA_NO_NODE)
1387 		nid = numa_mem_id();
1388 
1389 #ifdef CONFIG_CMA
1390 	{
1391 		struct page *page;
1392 		int node;
1393 
1394 		if (hugetlb_cma[nid]) {
1395 			page = cma_alloc(hugetlb_cma[nid], nr_pages,
1396 					huge_page_order(h), true);
1397 			if (page)
1398 				return page;
1399 		}
1400 
1401 		if (!(gfp_mask & __GFP_THISNODE)) {
1402 			for_each_node_mask(node, *nodemask) {
1403 				if (node == nid || !hugetlb_cma[node])
1404 					continue;
1405 
1406 				page = cma_alloc(hugetlb_cma[node], nr_pages,
1407 						huge_page_order(h), true);
1408 				if (page)
1409 					return page;
1410 			}
1411 		}
1412 	}
1413 #endif
1414 
1415 	return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1416 }
1417 
1418 #else /* !CONFIG_CONTIG_ALLOC */
1419 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1420 					int nid, nodemask_t *nodemask)
1421 {
1422 	return NULL;
1423 }
1424 #endif /* CONFIG_CONTIG_ALLOC */
1425 
1426 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1427 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1428 					int nid, nodemask_t *nodemask)
1429 {
1430 	return NULL;
1431 }
1432 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1433 static inline void destroy_compound_gigantic_page(struct page *page,
1434 						unsigned int order) { }
1435 #endif
1436 
1437 /*
1438  * Remove hugetlb page from lists, and update dtor so that page appears
1439  * as just a compound page.
1440  *
1441  * A reference is held on the page, except in the case of demote.
1442  *
1443  * Must be called with hugetlb lock held.
1444  */
1445 static void __remove_hugetlb_page(struct hstate *h, struct page *page,
1446 							bool adjust_surplus,
1447 							bool demote)
1448 {
1449 	int nid = page_to_nid(page);
1450 	struct folio *folio = page_folio(page);
1451 
1452 	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1453 	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1454 
1455 	lockdep_assert_held(&hugetlb_lock);
1456 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1457 		return;
1458 
1459 	list_del(&page->lru);
1460 
1461 	if (HPageFreed(page)) {
1462 		h->free_huge_pages--;
1463 		h->free_huge_pages_node[nid]--;
1464 	}
1465 	if (adjust_surplus) {
1466 		h->surplus_huge_pages--;
1467 		h->surplus_huge_pages_node[nid]--;
1468 	}
1469 
1470 	/*
1471 	 * Very subtle
1472 	 *
1473 	 * For non-gigantic pages set the destructor to the normal compound
1474 	 * page dtor.  This is needed in case someone takes an additional
1475 	 * temporary ref to the page, and freeing is delayed until they drop
1476 	 * their reference.
1477 	 *
1478 	 * For gigantic pages set the destructor to the null dtor.  This
1479 	 * destructor will never be called.  Before freeing the gigantic
1480 	 * page destroy_compound_gigantic_page will turn the compound page
1481 	 * into a simple group of pages.  After this the destructor does not
1482 	 * apply.
1483 	 *
1484 	 * This handles the case where more than one ref is held when and
1485 	 * after update_and_free_page is called.
1486 	 *
1487 	 * In the case of demote we do not ref count the page as it will soon
1488 	 * be turned into a page of smaller size.
1489 	 */
1490 	if (!demote)
1491 		set_page_refcounted(page);
1492 	if (hstate_is_gigantic(h))
1493 		set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1494 	else
1495 		set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
1496 
1497 	h->nr_huge_pages--;
1498 	h->nr_huge_pages_node[nid]--;
1499 }
1500 
1501 static void remove_hugetlb_page(struct hstate *h, struct page *page,
1502 							bool adjust_surplus)
1503 {
1504 	__remove_hugetlb_page(h, page, adjust_surplus, false);
1505 }
1506 
1507 static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page,
1508 							bool adjust_surplus)
1509 {
1510 	__remove_hugetlb_page(h, page, adjust_surplus, true);
1511 }
1512 
1513 static void add_hugetlb_page(struct hstate *h, struct page *page,
1514 			     bool adjust_surplus)
1515 {
1516 	int zeroed;
1517 	int nid = page_to_nid(page);
1518 
1519 	VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page), page);
1520 
1521 	lockdep_assert_held(&hugetlb_lock);
1522 
1523 	INIT_LIST_HEAD(&page->lru);
1524 	h->nr_huge_pages++;
1525 	h->nr_huge_pages_node[nid]++;
1526 
1527 	if (adjust_surplus) {
1528 		h->surplus_huge_pages++;
1529 		h->surplus_huge_pages_node[nid]++;
1530 	}
1531 
1532 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1533 	set_page_private(page, 0);
1534 	/*
1535 	 * We have to set HPageVmemmapOptimized again as above
1536 	 * set_page_private(page, 0) cleared it.
1537 	 */
1538 	SetHPageVmemmapOptimized(page);
1539 
1540 	/*
1541 	 * This page is about to be managed by the hugetlb allocator and
1542 	 * should have no users.  Drop our reference, and check for others
1543 	 * just in case.
1544 	 */
1545 	zeroed = put_page_testzero(page);
1546 	if (!zeroed)
1547 		/*
1548 		 * It is VERY unlikely soneone else has taken a ref on
1549 		 * the page.  In this case, we simply return as the
1550 		 * hugetlb destructor (free_huge_page) will be called
1551 		 * when this other ref is dropped.
1552 		 */
1553 		return;
1554 
1555 	arch_clear_hugepage_flags(page);
1556 	enqueue_huge_page(h, page);
1557 }
1558 
1559 static void __update_and_free_page(struct hstate *h, struct page *page)
1560 {
1561 	int i;
1562 	struct page *subpage;
1563 
1564 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1565 		return;
1566 
1567 	/*
1568 	 * If we don't know which subpages are hwpoisoned, we can't free
1569 	 * the hugepage, so it's leaked intentionally.
1570 	 */
1571 	if (HPageRawHwpUnreliable(page))
1572 		return;
1573 
1574 	if (hugetlb_vmemmap_restore(h, page)) {
1575 		spin_lock_irq(&hugetlb_lock);
1576 		/*
1577 		 * If we cannot allocate vmemmap pages, just refuse to free the
1578 		 * page and put the page back on the hugetlb free list and treat
1579 		 * as a surplus page.
1580 		 */
1581 		add_hugetlb_page(h, page, true);
1582 		spin_unlock_irq(&hugetlb_lock);
1583 		return;
1584 	}
1585 
1586 	/*
1587 	 * Move PageHWPoison flag from head page to the raw error pages,
1588 	 * which makes any healthy subpages reusable.
1589 	 */
1590 	if (unlikely(PageHWPoison(page)))
1591 		hugetlb_clear_page_hwpoison(page);
1592 
1593 	for (i = 0; i < pages_per_huge_page(h); i++) {
1594 		subpage = nth_page(page, i);
1595 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1596 				1 << PG_referenced | 1 << PG_dirty |
1597 				1 << PG_active | 1 << PG_private |
1598 				1 << PG_writeback);
1599 	}
1600 
1601 	/*
1602 	 * Non-gigantic pages demoted from CMA allocated gigantic pages
1603 	 * need to be given back to CMA in free_gigantic_page.
1604 	 */
1605 	if (hstate_is_gigantic(h) ||
1606 	    hugetlb_cma_page(page, huge_page_order(h))) {
1607 		destroy_compound_gigantic_page(page, huge_page_order(h));
1608 		free_gigantic_page(page, huge_page_order(h));
1609 	} else {
1610 		__free_pages(page, huge_page_order(h));
1611 	}
1612 }
1613 
1614 /*
1615  * As update_and_free_page() can be called under any context, so we cannot
1616  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1617  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1618  * the vmemmap pages.
1619  *
1620  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1621  * freed and frees them one-by-one. As the page->mapping pointer is going
1622  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1623  * structure of a lockless linked list of huge pages to be freed.
1624  */
1625 static LLIST_HEAD(hpage_freelist);
1626 
1627 static void free_hpage_workfn(struct work_struct *work)
1628 {
1629 	struct llist_node *node;
1630 
1631 	node = llist_del_all(&hpage_freelist);
1632 
1633 	while (node) {
1634 		struct page *page;
1635 		struct hstate *h;
1636 
1637 		page = container_of((struct address_space **)node,
1638 				     struct page, mapping);
1639 		node = node->next;
1640 		page->mapping = NULL;
1641 		/*
1642 		 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
1643 		 * is going to trigger because a previous call to
1644 		 * remove_hugetlb_page() will set_compound_page_dtor(page,
1645 		 * NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
1646 		 */
1647 		h = size_to_hstate(page_size(page));
1648 
1649 		__update_and_free_page(h, page);
1650 
1651 		cond_resched();
1652 	}
1653 }
1654 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1655 
1656 static inline void flush_free_hpage_work(struct hstate *h)
1657 {
1658 	if (hugetlb_vmemmap_optimizable(h))
1659 		flush_work(&free_hpage_work);
1660 }
1661 
1662 static void update_and_free_page(struct hstate *h, struct page *page,
1663 				 bool atomic)
1664 {
1665 	if (!HPageVmemmapOptimized(page) || !atomic) {
1666 		__update_and_free_page(h, page);
1667 		return;
1668 	}
1669 
1670 	/*
1671 	 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1672 	 *
1673 	 * Only call schedule_work() if hpage_freelist is previously
1674 	 * empty. Otherwise, schedule_work() had been called but the workfn
1675 	 * hasn't retrieved the list yet.
1676 	 */
1677 	if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist))
1678 		schedule_work(&free_hpage_work);
1679 }
1680 
1681 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
1682 {
1683 	struct page *page, *t_page;
1684 
1685 	list_for_each_entry_safe(page, t_page, list, lru) {
1686 		update_and_free_page(h, page, false);
1687 		cond_resched();
1688 	}
1689 }
1690 
1691 struct hstate *size_to_hstate(unsigned long size)
1692 {
1693 	struct hstate *h;
1694 
1695 	for_each_hstate(h) {
1696 		if (huge_page_size(h) == size)
1697 			return h;
1698 	}
1699 	return NULL;
1700 }
1701 
1702 void free_huge_page(struct page *page)
1703 {
1704 	/*
1705 	 * Can't pass hstate in here because it is called from the
1706 	 * compound page destructor.
1707 	 */
1708 	struct folio *folio = page_folio(page);
1709 	struct hstate *h = folio_hstate(folio);
1710 	int nid = folio_nid(folio);
1711 	struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1712 	bool restore_reserve;
1713 	unsigned long flags;
1714 
1715 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1716 	VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1717 
1718 	hugetlb_set_folio_subpool(folio, NULL);
1719 	if (folio_test_anon(folio))
1720 		__ClearPageAnonExclusive(&folio->page);
1721 	folio->mapping = NULL;
1722 	restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1723 	folio_clear_hugetlb_restore_reserve(folio);
1724 
1725 	/*
1726 	 * If HPageRestoreReserve was set on page, page allocation consumed a
1727 	 * reservation.  If the page was associated with a subpool, there
1728 	 * would have been a page reserved in the subpool before allocation
1729 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1730 	 * reservation, do not call hugepage_subpool_put_pages() as this will
1731 	 * remove the reserved page from the subpool.
1732 	 */
1733 	if (!restore_reserve) {
1734 		/*
1735 		 * A return code of zero implies that the subpool will be
1736 		 * under its minimum size if the reservation is not restored
1737 		 * after page is free.  Therefore, force restore_reserve
1738 		 * operation.
1739 		 */
1740 		if (hugepage_subpool_put_pages(spool, 1) == 0)
1741 			restore_reserve = true;
1742 	}
1743 
1744 	spin_lock_irqsave(&hugetlb_lock, flags);
1745 	folio_clear_hugetlb_migratable(folio);
1746 	hugetlb_cgroup_uncharge_folio(hstate_index(h),
1747 				     pages_per_huge_page(h), folio);
1748 	hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1749 					  pages_per_huge_page(h), folio);
1750 	if (restore_reserve)
1751 		h->resv_huge_pages++;
1752 
1753 	if (folio_test_hugetlb_temporary(folio)) {
1754 		remove_hugetlb_page(h, page, false);
1755 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1756 		update_and_free_page(h, page, true);
1757 	} else if (h->surplus_huge_pages_node[nid]) {
1758 		/* remove the page from active list */
1759 		remove_hugetlb_page(h, page, true);
1760 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1761 		update_and_free_page(h, page, true);
1762 	} else {
1763 		arch_clear_hugepage_flags(page);
1764 		enqueue_huge_page(h, page);
1765 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1766 	}
1767 }
1768 
1769 /*
1770  * Must be called with the hugetlb lock held
1771  */
1772 static void __prep_account_new_huge_page(struct hstate *h, int nid)
1773 {
1774 	lockdep_assert_held(&hugetlb_lock);
1775 	h->nr_huge_pages++;
1776 	h->nr_huge_pages_node[nid]++;
1777 }
1778 
1779 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1780 {
1781 	hugetlb_vmemmap_optimize(h, &folio->page);
1782 	INIT_LIST_HEAD(&folio->lru);
1783 	folio->_folio_dtor = HUGETLB_PAGE_DTOR;
1784 	hugetlb_set_folio_subpool(folio, NULL);
1785 	set_hugetlb_cgroup(folio, NULL);
1786 	set_hugetlb_cgroup_rsvd(folio, NULL);
1787 }
1788 
1789 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1790 {
1791 	struct folio *folio = page_folio(page);
1792 
1793 	__prep_new_hugetlb_folio(h, folio);
1794 	spin_lock_irq(&hugetlb_lock);
1795 	__prep_account_new_huge_page(h, nid);
1796 	spin_unlock_irq(&hugetlb_lock);
1797 }
1798 
1799 static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
1800 								bool demote)
1801 {
1802 	int i, j;
1803 	int nr_pages = 1 << order;
1804 	struct page *p;
1805 
1806 	/* we rely on prep_new_huge_page to set the destructor */
1807 	set_compound_order(page, order);
1808 	__ClearPageReserved(page);
1809 	__SetPageHead(page);
1810 	for (i = 0; i < nr_pages; i++) {
1811 		p = nth_page(page, i);
1812 
1813 		/*
1814 		 * For gigantic hugepages allocated through bootmem at
1815 		 * boot, it's safer to be consistent with the not-gigantic
1816 		 * hugepages and clear the PG_reserved bit from all tail pages
1817 		 * too.  Otherwise drivers using get_user_pages() to access tail
1818 		 * pages may get the reference counting wrong if they see
1819 		 * PG_reserved set on a tail page (despite the head page not
1820 		 * having PG_reserved set).  Enforcing this consistency between
1821 		 * head and tail pages allows drivers to optimize away a check
1822 		 * on the head page when they need know if put_page() is needed
1823 		 * after get_user_pages().
1824 		 */
1825 		if (i != 0)	/* head page cleared above */
1826 			__ClearPageReserved(p);
1827 		/*
1828 		 * Subtle and very unlikely
1829 		 *
1830 		 * Gigantic 'page allocators' such as memblock or cma will
1831 		 * return a set of pages with each page ref counted.  We need
1832 		 * to turn this set of pages into a compound page with tail
1833 		 * page ref counts set to zero.  Code such as speculative page
1834 		 * cache adding could take a ref on a 'to be' tail page.
1835 		 * We need to respect any increased ref count, and only set
1836 		 * the ref count to zero if count is currently 1.  If count
1837 		 * is not 1, we return an error.  An error return indicates
1838 		 * the set of pages can not be converted to a gigantic page.
1839 		 * The caller who allocated the pages should then discard the
1840 		 * pages using the appropriate free interface.
1841 		 *
1842 		 * In the case of demote, the ref count will be zero.
1843 		 */
1844 		if (!demote) {
1845 			if (!page_ref_freeze(p, 1)) {
1846 				pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
1847 				goto out_error;
1848 			}
1849 		} else {
1850 			VM_BUG_ON_PAGE(page_count(p), p);
1851 		}
1852 		if (i != 0)
1853 			set_compound_head(p, page);
1854 	}
1855 	atomic_set(compound_mapcount_ptr(page), -1);
1856 	atomic_set(subpages_mapcount_ptr(page), 0);
1857 	atomic_set(compound_pincount_ptr(page), 0);
1858 	return true;
1859 
1860 out_error:
1861 	/* undo page modifications made above */
1862 	for (j = 0; j < i; j++) {
1863 		p = nth_page(page, j);
1864 		if (j != 0)
1865 			clear_compound_head(p);
1866 		set_page_refcounted(p);
1867 	}
1868 	/* need to clear PG_reserved on remaining tail pages  */
1869 	for (; j < nr_pages; j++) {
1870 		p = nth_page(page, j);
1871 		__ClearPageReserved(p);
1872 	}
1873 	set_compound_order(page, 0);
1874 #ifdef CONFIG_64BIT
1875 	page[1].compound_nr = 0;
1876 #endif
1877 	__ClearPageHead(page);
1878 	return false;
1879 }
1880 
1881 static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
1882 {
1883 	return __prep_compound_gigantic_page(page, order, false);
1884 }
1885 
1886 static bool prep_compound_gigantic_page_for_demote(struct page *page,
1887 							unsigned int order)
1888 {
1889 	return __prep_compound_gigantic_page(page, order, true);
1890 }
1891 
1892 /*
1893  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1894  * transparent huge pages.  See the PageTransHuge() documentation for more
1895  * details.
1896  */
1897 int PageHuge(struct page *page)
1898 {
1899 	if (!PageCompound(page))
1900 		return 0;
1901 
1902 	page = compound_head(page);
1903 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1904 }
1905 EXPORT_SYMBOL_GPL(PageHuge);
1906 
1907 /*
1908  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1909  * normal or transparent huge pages.
1910  */
1911 int PageHeadHuge(struct page *page_head)
1912 {
1913 	if (!PageHead(page_head))
1914 		return 0;
1915 
1916 	return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
1917 }
1918 EXPORT_SYMBOL_GPL(PageHeadHuge);
1919 
1920 /*
1921  * Find and lock address space (mapping) in write mode.
1922  *
1923  * Upon entry, the page is locked which means that page_mapping() is
1924  * stable.  Due to locking order, we can only trylock_write.  If we can
1925  * not get the lock, simply return NULL to caller.
1926  */
1927 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1928 {
1929 	struct address_space *mapping = page_mapping(hpage);
1930 
1931 	if (!mapping)
1932 		return mapping;
1933 
1934 	if (i_mmap_trylock_write(mapping))
1935 		return mapping;
1936 
1937 	return NULL;
1938 }
1939 
1940 pgoff_t hugetlb_basepage_index(struct page *page)
1941 {
1942 	struct page *page_head = compound_head(page);
1943 	pgoff_t index = page_index(page_head);
1944 	unsigned long compound_idx;
1945 
1946 	if (compound_order(page_head) >= MAX_ORDER)
1947 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1948 	else
1949 		compound_idx = page - page_head;
1950 
1951 	return (index << compound_order(page_head)) + compound_idx;
1952 }
1953 
1954 static struct page *alloc_buddy_huge_page(struct hstate *h,
1955 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1956 		nodemask_t *node_alloc_noretry)
1957 {
1958 	int order = huge_page_order(h);
1959 	struct page *page;
1960 	bool alloc_try_hard = true;
1961 	bool retry = true;
1962 
1963 	/*
1964 	 * By default we always try hard to allocate the page with
1965 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1966 	 * a loop (to adjust global huge page counts) and previous allocation
1967 	 * failed, do not continue to try hard on the same node.  Use the
1968 	 * node_alloc_noretry bitmap to manage this state information.
1969 	 */
1970 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1971 		alloc_try_hard = false;
1972 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1973 	if (alloc_try_hard)
1974 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1975 	if (nid == NUMA_NO_NODE)
1976 		nid = numa_mem_id();
1977 retry:
1978 	page = __alloc_pages(gfp_mask, order, nid, nmask);
1979 
1980 	/* Freeze head page */
1981 	if (page && !page_ref_freeze(page, 1)) {
1982 		__free_pages(page, order);
1983 		if (retry) {	/* retry once */
1984 			retry = false;
1985 			goto retry;
1986 		}
1987 		/* WOW!  twice in a row. */
1988 		pr_warn("HugeTLB head page unexpected inflated ref count\n");
1989 		page = NULL;
1990 	}
1991 
1992 	if (page)
1993 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1994 	else
1995 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1996 
1997 	/*
1998 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1999 	 * indicates an overall state change.  Clear bit so that we resume
2000 	 * normal 'try hard' allocations.
2001 	 */
2002 	if (node_alloc_noretry && page && !alloc_try_hard)
2003 		node_clear(nid, *node_alloc_noretry);
2004 
2005 	/*
2006 	 * If we tried hard to get a page but failed, set bit so that
2007 	 * subsequent attempts will not try as hard until there is an
2008 	 * overall state change.
2009 	 */
2010 	if (node_alloc_noretry && !page && alloc_try_hard)
2011 		node_set(nid, *node_alloc_noretry);
2012 
2013 	return page;
2014 }
2015 
2016 /*
2017  * Common helper to allocate a fresh hugetlb page. All specific allocators
2018  * should use this function to get new hugetlb pages
2019  *
2020  * Note that returned page is 'frozen':  ref count of head page and all tail
2021  * pages is zero.
2022  */
2023 static struct page *alloc_fresh_huge_page(struct hstate *h,
2024 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
2025 		nodemask_t *node_alloc_noretry)
2026 {
2027 	struct page *page;
2028 	bool retry = false;
2029 
2030 retry:
2031 	if (hstate_is_gigantic(h))
2032 		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
2033 	else
2034 		page = alloc_buddy_huge_page(h, gfp_mask,
2035 				nid, nmask, node_alloc_noretry);
2036 	if (!page)
2037 		return NULL;
2038 
2039 	if (hstate_is_gigantic(h)) {
2040 		if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
2041 			/*
2042 			 * Rare failure to convert pages to compound page.
2043 			 * Free pages and try again - ONCE!
2044 			 */
2045 			free_gigantic_page(page, huge_page_order(h));
2046 			if (!retry) {
2047 				retry = true;
2048 				goto retry;
2049 			}
2050 			return NULL;
2051 		}
2052 	}
2053 	prep_new_huge_page(h, page, page_to_nid(page));
2054 
2055 	return page;
2056 }
2057 
2058 /*
2059  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
2060  * manner.
2061  */
2062 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
2063 				nodemask_t *node_alloc_noretry)
2064 {
2065 	struct page *page;
2066 	int nr_nodes, node;
2067 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2068 
2069 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2070 		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
2071 						node_alloc_noretry);
2072 		if (page)
2073 			break;
2074 	}
2075 
2076 	if (!page)
2077 		return 0;
2078 
2079 	free_huge_page(page); /* free it into the hugepage allocator */
2080 
2081 	return 1;
2082 }
2083 
2084 /*
2085  * Remove huge page from pool from next node to free.  Attempt to keep
2086  * persistent huge pages more or less balanced over allowed nodes.
2087  * This routine only 'removes' the hugetlb page.  The caller must make
2088  * an additional call to free the page to low level allocators.
2089  * Called with hugetlb_lock locked.
2090  */
2091 static struct page *remove_pool_huge_page(struct hstate *h,
2092 						nodemask_t *nodes_allowed,
2093 						 bool acct_surplus)
2094 {
2095 	int nr_nodes, node;
2096 	struct page *page = NULL;
2097 
2098 	lockdep_assert_held(&hugetlb_lock);
2099 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2100 		/*
2101 		 * If we're returning unused surplus pages, only examine
2102 		 * nodes with surplus pages.
2103 		 */
2104 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2105 		    !list_empty(&h->hugepage_freelists[node])) {
2106 			page = list_entry(h->hugepage_freelists[node].next,
2107 					  struct page, lru);
2108 			remove_hugetlb_page(h, page, acct_surplus);
2109 			break;
2110 		}
2111 	}
2112 
2113 	return page;
2114 }
2115 
2116 /*
2117  * Dissolve a given free hugepage into free buddy pages. This function does
2118  * nothing for in-use hugepages and non-hugepages.
2119  * This function returns values like below:
2120  *
2121  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2122  *           when the system is under memory pressure and the feature of
2123  *           freeing unused vmemmap pages associated with each hugetlb page
2124  *           is enabled.
2125  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2126  *           (allocated or reserved.)
2127  *       0:  successfully dissolved free hugepages or the page is not a
2128  *           hugepage (considered as already dissolved)
2129  */
2130 int dissolve_free_huge_page(struct page *page)
2131 {
2132 	int rc = -EBUSY;
2133 
2134 retry:
2135 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2136 	if (!PageHuge(page))
2137 		return 0;
2138 
2139 	spin_lock_irq(&hugetlb_lock);
2140 	if (!PageHuge(page)) {
2141 		rc = 0;
2142 		goto out;
2143 	}
2144 
2145 	if (!page_count(page)) {
2146 		struct page *head = compound_head(page);
2147 		struct hstate *h = page_hstate(head);
2148 		if (!available_huge_pages(h))
2149 			goto out;
2150 
2151 		/*
2152 		 * We should make sure that the page is already on the free list
2153 		 * when it is dissolved.
2154 		 */
2155 		if (unlikely(!HPageFreed(head))) {
2156 			spin_unlock_irq(&hugetlb_lock);
2157 			cond_resched();
2158 
2159 			/*
2160 			 * Theoretically, we should return -EBUSY when we
2161 			 * encounter this race. In fact, we have a chance
2162 			 * to successfully dissolve the page if we do a
2163 			 * retry. Because the race window is quite small.
2164 			 * If we seize this opportunity, it is an optimization
2165 			 * for increasing the success rate of dissolving page.
2166 			 */
2167 			goto retry;
2168 		}
2169 
2170 		remove_hugetlb_page(h, head, false);
2171 		h->max_huge_pages--;
2172 		spin_unlock_irq(&hugetlb_lock);
2173 
2174 		/*
2175 		 * Normally update_and_free_page will allocate required vmemmmap
2176 		 * before freeing the page.  update_and_free_page will fail to
2177 		 * free the page if it can not allocate required vmemmap.  We
2178 		 * need to adjust max_huge_pages if the page is not freed.
2179 		 * Attempt to allocate vmemmmap here so that we can take
2180 		 * appropriate action on failure.
2181 		 */
2182 		rc = hugetlb_vmemmap_restore(h, head);
2183 		if (!rc) {
2184 			update_and_free_page(h, head, false);
2185 		} else {
2186 			spin_lock_irq(&hugetlb_lock);
2187 			add_hugetlb_page(h, head, false);
2188 			h->max_huge_pages++;
2189 			spin_unlock_irq(&hugetlb_lock);
2190 		}
2191 
2192 		return rc;
2193 	}
2194 out:
2195 	spin_unlock_irq(&hugetlb_lock);
2196 	return rc;
2197 }
2198 
2199 /*
2200  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2201  * make specified memory blocks removable from the system.
2202  * Note that this will dissolve a free gigantic hugepage completely, if any
2203  * part of it lies within the given range.
2204  * Also note that if dissolve_free_huge_page() returns with an error, all
2205  * free hugepages that were dissolved before that error are lost.
2206  */
2207 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2208 {
2209 	unsigned long pfn;
2210 	struct page *page;
2211 	int rc = 0;
2212 	unsigned int order;
2213 	struct hstate *h;
2214 
2215 	if (!hugepages_supported())
2216 		return rc;
2217 
2218 	order = huge_page_order(&default_hstate);
2219 	for_each_hstate(h)
2220 		order = min(order, huge_page_order(h));
2221 
2222 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2223 		page = pfn_to_page(pfn);
2224 		rc = dissolve_free_huge_page(page);
2225 		if (rc)
2226 			break;
2227 	}
2228 
2229 	return rc;
2230 }
2231 
2232 /*
2233  * Allocates a fresh surplus page from the page allocator.
2234  */
2235 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
2236 						int nid, nodemask_t *nmask)
2237 {
2238 	struct page *page = NULL;
2239 
2240 	if (hstate_is_gigantic(h))
2241 		return NULL;
2242 
2243 	spin_lock_irq(&hugetlb_lock);
2244 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2245 		goto out_unlock;
2246 	spin_unlock_irq(&hugetlb_lock);
2247 
2248 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
2249 	if (!page)
2250 		return NULL;
2251 
2252 	spin_lock_irq(&hugetlb_lock);
2253 	/*
2254 	 * We could have raced with the pool size change.
2255 	 * Double check that and simply deallocate the new page
2256 	 * if we would end up overcommiting the surpluses. Abuse
2257 	 * temporary page to workaround the nasty free_huge_page
2258 	 * codeflow
2259 	 */
2260 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2261 		SetHPageTemporary(page);
2262 		spin_unlock_irq(&hugetlb_lock);
2263 		free_huge_page(page);
2264 		return NULL;
2265 	}
2266 
2267 	h->surplus_huge_pages++;
2268 	h->surplus_huge_pages_node[page_to_nid(page)]++;
2269 
2270 out_unlock:
2271 	spin_unlock_irq(&hugetlb_lock);
2272 
2273 	return page;
2274 }
2275 
2276 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
2277 				     int nid, nodemask_t *nmask)
2278 {
2279 	struct page *page;
2280 
2281 	if (hstate_is_gigantic(h))
2282 		return NULL;
2283 
2284 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
2285 	if (!page)
2286 		return NULL;
2287 
2288 	/* fresh huge pages are frozen */
2289 	set_page_refcounted(page);
2290 
2291 	/*
2292 	 * We do not account these pages as surplus because they are only
2293 	 * temporary and will be released properly on the last reference
2294 	 */
2295 	SetHPageTemporary(page);
2296 
2297 	return page;
2298 }
2299 
2300 /*
2301  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2302  */
2303 static
2304 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
2305 		struct vm_area_struct *vma, unsigned long addr)
2306 {
2307 	struct page *page = NULL;
2308 	struct mempolicy *mpol;
2309 	gfp_t gfp_mask = htlb_alloc_mask(h);
2310 	int nid;
2311 	nodemask_t *nodemask;
2312 
2313 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2314 	if (mpol_is_preferred_many(mpol)) {
2315 		gfp_t gfp = gfp_mask | __GFP_NOWARN;
2316 
2317 		gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2318 		page = alloc_surplus_huge_page(h, gfp, nid, nodemask);
2319 
2320 		/* Fallback to all nodes if page==NULL */
2321 		nodemask = NULL;
2322 	}
2323 
2324 	if (!page)
2325 		page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
2326 	mpol_cond_put(mpol);
2327 	return page;
2328 }
2329 
2330 /* page migration callback function */
2331 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
2332 		nodemask_t *nmask, gfp_t gfp_mask)
2333 {
2334 	spin_lock_irq(&hugetlb_lock);
2335 	if (available_huge_pages(h)) {
2336 		struct page *page;
2337 
2338 		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
2339 		if (page) {
2340 			spin_unlock_irq(&hugetlb_lock);
2341 			return page;
2342 		}
2343 	}
2344 	spin_unlock_irq(&hugetlb_lock);
2345 
2346 	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
2347 }
2348 
2349 /* mempolicy aware migration callback */
2350 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
2351 		unsigned long address)
2352 {
2353 	struct mempolicy *mpol;
2354 	nodemask_t *nodemask;
2355 	struct page *page;
2356 	gfp_t gfp_mask;
2357 	int node;
2358 
2359 	gfp_mask = htlb_alloc_mask(h);
2360 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2361 	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
2362 	mpol_cond_put(mpol);
2363 
2364 	return page;
2365 }
2366 
2367 /*
2368  * Increase the hugetlb pool such that it can accommodate a reservation
2369  * of size 'delta'.
2370  */
2371 static int gather_surplus_pages(struct hstate *h, long delta)
2372 	__must_hold(&hugetlb_lock)
2373 {
2374 	LIST_HEAD(surplus_list);
2375 	struct page *page, *tmp;
2376 	int ret;
2377 	long i;
2378 	long needed, allocated;
2379 	bool alloc_ok = true;
2380 
2381 	lockdep_assert_held(&hugetlb_lock);
2382 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2383 	if (needed <= 0) {
2384 		h->resv_huge_pages += delta;
2385 		return 0;
2386 	}
2387 
2388 	allocated = 0;
2389 
2390 	ret = -ENOMEM;
2391 retry:
2392 	spin_unlock_irq(&hugetlb_lock);
2393 	for (i = 0; i < needed; i++) {
2394 		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
2395 				NUMA_NO_NODE, NULL);
2396 		if (!page) {
2397 			alloc_ok = false;
2398 			break;
2399 		}
2400 		list_add(&page->lru, &surplus_list);
2401 		cond_resched();
2402 	}
2403 	allocated += i;
2404 
2405 	/*
2406 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
2407 	 * because either resv_huge_pages or free_huge_pages may have changed.
2408 	 */
2409 	spin_lock_irq(&hugetlb_lock);
2410 	needed = (h->resv_huge_pages + delta) -
2411 			(h->free_huge_pages + allocated);
2412 	if (needed > 0) {
2413 		if (alloc_ok)
2414 			goto retry;
2415 		/*
2416 		 * We were not able to allocate enough pages to
2417 		 * satisfy the entire reservation so we free what
2418 		 * we've allocated so far.
2419 		 */
2420 		goto free;
2421 	}
2422 	/*
2423 	 * The surplus_list now contains _at_least_ the number of extra pages
2424 	 * needed to accommodate the reservation.  Add the appropriate number
2425 	 * of pages to the hugetlb pool and free the extras back to the buddy
2426 	 * allocator.  Commit the entire reservation here to prevent another
2427 	 * process from stealing the pages as they are added to the pool but
2428 	 * before they are reserved.
2429 	 */
2430 	needed += allocated;
2431 	h->resv_huge_pages += delta;
2432 	ret = 0;
2433 
2434 	/* Free the needed pages to the hugetlb pool */
2435 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2436 		if ((--needed) < 0)
2437 			break;
2438 		/* Add the page to the hugetlb allocator */
2439 		enqueue_huge_page(h, page);
2440 	}
2441 free:
2442 	spin_unlock_irq(&hugetlb_lock);
2443 
2444 	/*
2445 	 * Free unnecessary surplus pages to the buddy allocator.
2446 	 * Pages have no ref count, call free_huge_page directly.
2447 	 */
2448 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2449 		free_huge_page(page);
2450 	spin_lock_irq(&hugetlb_lock);
2451 
2452 	return ret;
2453 }
2454 
2455 /*
2456  * This routine has two main purposes:
2457  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2458  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2459  *    to the associated reservation map.
2460  * 2) Free any unused surplus pages that may have been allocated to satisfy
2461  *    the reservation.  As many as unused_resv_pages may be freed.
2462  */
2463 static void return_unused_surplus_pages(struct hstate *h,
2464 					unsigned long unused_resv_pages)
2465 {
2466 	unsigned long nr_pages;
2467 	struct page *page;
2468 	LIST_HEAD(page_list);
2469 
2470 	lockdep_assert_held(&hugetlb_lock);
2471 	/* Uncommit the reservation */
2472 	h->resv_huge_pages -= unused_resv_pages;
2473 
2474 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2475 		goto out;
2476 
2477 	/*
2478 	 * Part (or even all) of the reservation could have been backed
2479 	 * by pre-allocated pages. Only free surplus pages.
2480 	 */
2481 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2482 
2483 	/*
2484 	 * We want to release as many surplus pages as possible, spread
2485 	 * evenly across all nodes with memory. Iterate across these nodes
2486 	 * until we can no longer free unreserved surplus pages. This occurs
2487 	 * when the nodes with surplus pages have no free pages.
2488 	 * remove_pool_huge_page() will balance the freed pages across the
2489 	 * on-line nodes with memory and will handle the hstate accounting.
2490 	 */
2491 	while (nr_pages--) {
2492 		page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
2493 		if (!page)
2494 			goto out;
2495 
2496 		list_add(&page->lru, &page_list);
2497 	}
2498 
2499 out:
2500 	spin_unlock_irq(&hugetlb_lock);
2501 	update_and_free_pages_bulk(h, &page_list);
2502 	spin_lock_irq(&hugetlb_lock);
2503 }
2504 
2505 
2506 /*
2507  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2508  * are used by the huge page allocation routines to manage reservations.
2509  *
2510  * vma_needs_reservation is called to determine if the huge page at addr
2511  * within the vma has an associated reservation.  If a reservation is
2512  * needed, the value 1 is returned.  The caller is then responsible for
2513  * managing the global reservation and subpool usage counts.  After
2514  * the huge page has been allocated, vma_commit_reservation is called
2515  * to add the page to the reservation map.  If the page allocation fails,
2516  * the reservation must be ended instead of committed.  vma_end_reservation
2517  * is called in such cases.
2518  *
2519  * In the normal case, vma_commit_reservation returns the same value
2520  * as the preceding vma_needs_reservation call.  The only time this
2521  * is not the case is if a reserve map was changed between calls.  It
2522  * is the responsibility of the caller to notice the difference and
2523  * take appropriate action.
2524  *
2525  * vma_add_reservation is used in error paths where a reservation must
2526  * be restored when a newly allocated huge page must be freed.  It is
2527  * to be called after calling vma_needs_reservation to determine if a
2528  * reservation exists.
2529  *
2530  * vma_del_reservation is used in error paths where an entry in the reserve
2531  * map was created during huge page allocation and must be removed.  It is to
2532  * be called after calling vma_needs_reservation to determine if a reservation
2533  * exists.
2534  */
2535 enum vma_resv_mode {
2536 	VMA_NEEDS_RESV,
2537 	VMA_COMMIT_RESV,
2538 	VMA_END_RESV,
2539 	VMA_ADD_RESV,
2540 	VMA_DEL_RESV,
2541 };
2542 static long __vma_reservation_common(struct hstate *h,
2543 				struct vm_area_struct *vma, unsigned long addr,
2544 				enum vma_resv_mode mode)
2545 {
2546 	struct resv_map *resv;
2547 	pgoff_t idx;
2548 	long ret;
2549 	long dummy_out_regions_needed;
2550 
2551 	resv = vma_resv_map(vma);
2552 	if (!resv)
2553 		return 1;
2554 
2555 	idx = vma_hugecache_offset(h, vma, addr);
2556 	switch (mode) {
2557 	case VMA_NEEDS_RESV:
2558 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2559 		/* We assume that vma_reservation_* routines always operate on
2560 		 * 1 page, and that adding to resv map a 1 page entry can only
2561 		 * ever require 1 region.
2562 		 */
2563 		VM_BUG_ON(dummy_out_regions_needed != 1);
2564 		break;
2565 	case VMA_COMMIT_RESV:
2566 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2567 		/* region_add calls of range 1 should never fail. */
2568 		VM_BUG_ON(ret < 0);
2569 		break;
2570 	case VMA_END_RESV:
2571 		region_abort(resv, idx, idx + 1, 1);
2572 		ret = 0;
2573 		break;
2574 	case VMA_ADD_RESV:
2575 		if (vma->vm_flags & VM_MAYSHARE) {
2576 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2577 			/* region_add calls of range 1 should never fail. */
2578 			VM_BUG_ON(ret < 0);
2579 		} else {
2580 			region_abort(resv, idx, idx + 1, 1);
2581 			ret = region_del(resv, idx, idx + 1);
2582 		}
2583 		break;
2584 	case VMA_DEL_RESV:
2585 		if (vma->vm_flags & VM_MAYSHARE) {
2586 			region_abort(resv, idx, idx + 1, 1);
2587 			ret = region_del(resv, idx, idx + 1);
2588 		} else {
2589 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2590 			/* region_add calls of range 1 should never fail. */
2591 			VM_BUG_ON(ret < 0);
2592 		}
2593 		break;
2594 	default:
2595 		BUG();
2596 	}
2597 
2598 	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2599 		return ret;
2600 	/*
2601 	 * We know private mapping must have HPAGE_RESV_OWNER set.
2602 	 *
2603 	 * In most cases, reserves always exist for private mappings.
2604 	 * However, a file associated with mapping could have been
2605 	 * hole punched or truncated after reserves were consumed.
2606 	 * As subsequent fault on such a range will not use reserves.
2607 	 * Subtle - The reserve map for private mappings has the
2608 	 * opposite meaning than that of shared mappings.  If NO
2609 	 * entry is in the reserve map, it means a reservation exists.
2610 	 * If an entry exists in the reserve map, it means the
2611 	 * reservation has already been consumed.  As a result, the
2612 	 * return value of this routine is the opposite of the
2613 	 * value returned from reserve map manipulation routines above.
2614 	 */
2615 	if (ret > 0)
2616 		return 0;
2617 	if (ret == 0)
2618 		return 1;
2619 	return ret;
2620 }
2621 
2622 static long vma_needs_reservation(struct hstate *h,
2623 			struct vm_area_struct *vma, unsigned long addr)
2624 {
2625 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2626 }
2627 
2628 static long vma_commit_reservation(struct hstate *h,
2629 			struct vm_area_struct *vma, unsigned long addr)
2630 {
2631 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2632 }
2633 
2634 static void vma_end_reservation(struct hstate *h,
2635 			struct vm_area_struct *vma, unsigned long addr)
2636 {
2637 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2638 }
2639 
2640 static long vma_add_reservation(struct hstate *h,
2641 			struct vm_area_struct *vma, unsigned long addr)
2642 {
2643 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2644 }
2645 
2646 static long vma_del_reservation(struct hstate *h,
2647 			struct vm_area_struct *vma, unsigned long addr)
2648 {
2649 	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2650 }
2651 
2652 /*
2653  * This routine is called to restore reservation information on error paths.
2654  * It should ONLY be called for pages allocated via alloc_huge_page(), and
2655  * the hugetlb mutex should remain held when calling this routine.
2656  *
2657  * It handles two specific cases:
2658  * 1) A reservation was in place and the page consumed the reservation.
2659  *    HPageRestoreReserve is set in the page.
2660  * 2) No reservation was in place for the page, so HPageRestoreReserve is
2661  *    not set.  However, alloc_huge_page always updates the reserve map.
2662  *
2663  * In case 1, free_huge_page later in the error path will increment the
2664  * global reserve count.  But, free_huge_page does not have enough context
2665  * to adjust the reservation map.  This case deals primarily with private
2666  * mappings.  Adjust the reserve map here to be consistent with global
2667  * reserve count adjustments to be made by free_huge_page.  Make sure the
2668  * reserve map indicates there is a reservation present.
2669  *
2670  * In case 2, simply undo reserve map modifications done by alloc_huge_page.
2671  */
2672 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2673 			unsigned long address, struct page *page)
2674 {
2675 	long rc = vma_needs_reservation(h, vma, address);
2676 
2677 	if (HPageRestoreReserve(page)) {
2678 		if (unlikely(rc < 0))
2679 			/*
2680 			 * Rare out of memory condition in reserve map
2681 			 * manipulation.  Clear HPageRestoreReserve so that
2682 			 * global reserve count will not be incremented
2683 			 * by free_huge_page.  This will make it appear
2684 			 * as though the reservation for this page was
2685 			 * consumed.  This may prevent the task from
2686 			 * faulting in the page at a later time.  This
2687 			 * is better than inconsistent global huge page
2688 			 * accounting of reserve counts.
2689 			 */
2690 			ClearHPageRestoreReserve(page);
2691 		else if (rc)
2692 			(void)vma_add_reservation(h, vma, address);
2693 		else
2694 			vma_end_reservation(h, vma, address);
2695 	} else {
2696 		if (!rc) {
2697 			/*
2698 			 * This indicates there is an entry in the reserve map
2699 			 * not added by alloc_huge_page.  We know it was added
2700 			 * before the alloc_huge_page call, otherwise
2701 			 * HPageRestoreReserve would be set on the page.
2702 			 * Remove the entry so that a subsequent allocation
2703 			 * does not consume a reservation.
2704 			 */
2705 			rc = vma_del_reservation(h, vma, address);
2706 			if (rc < 0)
2707 				/*
2708 				 * VERY rare out of memory condition.  Since
2709 				 * we can not delete the entry, set
2710 				 * HPageRestoreReserve so that the reserve
2711 				 * count will be incremented when the page
2712 				 * is freed.  This reserve will be consumed
2713 				 * on a subsequent allocation.
2714 				 */
2715 				SetHPageRestoreReserve(page);
2716 		} else if (rc < 0) {
2717 			/*
2718 			 * Rare out of memory condition from
2719 			 * vma_needs_reservation call.  Memory allocation is
2720 			 * only attempted if a new entry is needed.  Therefore,
2721 			 * this implies there is not an entry in the
2722 			 * reserve map.
2723 			 *
2724 			 * For shared mappings, no entry in the map indicates
2725 			 * no reservation.  We are done.
2726 			 */
2727 			if (!(vma->vm_flags & VM_MAYSHARE))
2728 				/*
2729 				 * For private mappings, no entry indicates
2730 				 * a reservation is present.  Since we can
2731 				 * not add an entry, set SetHPageRestoreReserve
2732 				 * on the page so reserve count will be
2733 				 * incremented when freed.  This reserve will
2734 				 * be consumed on a subsequent allocation.
2735 				 */
2736 				SetHPageRestoreReserve(page);
2737 		} else
2738 			/*
2739 			 * No reservation present, do nothing
2740 			 */
2741 			 vma_end_reservation(h, vma, address);
2742 	}
2743 }
2744 
2745 /*
2746  * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
2747  * @h: struct hstate old page belongs to
2748  * @old_page: Old page to dissolve
2749  * @list: List to isolate the page in case we need to
2750  * Returns 0 on success, otherwise negated error.
2751  */
2752 static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
2753 					struct list_head *list)
2754 {
2755 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2756 	struct folio *old_folio = page_folio(old_page);
2757 	int nid = folio_nid(old_folio);
2758 	struct page *new_page;
2759 	struct folio *new_folio;
2760 	int ret = 0;
2761 
2762 	/*
2763 	 * Before dissolving the page, we need to allocate a new one for the
2764 	 * pool to remain stable.  Here, we allocate the page and 'prep' it
2765 	 * by doing everything but actually updating counters and adding to
2766 	 * the pool.  This simplifies and let us do most of the processing
2767 	 * under the lock.
2768 	 */
2769 	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
2770 	if (!new_page)
2771 		return -ENOMEM;
2772 	new_folio = page_folio(new_page);
2773 	__prep_new_hugetlb_folio(h, new_folio);
2774 
2775 retry:
2776 	spin_lock_irq(&hugetlb_lock);
2777 	if (!folio_test_hugetlb(old_folio)) {
2778 		/*
2779 		 * Freed from under us. Drop new_page too.
2780 		 */
2781 		goto free_new;
2782 	} else if (folio_ref_count(old_folio)) {
2783 		/*
2784 		 * Someone has grabbed the page, try to isolate it here.
2785 		 * Fail with -EBUSY if not possible.
2786 		 */
2787 		spin_unlock_irq(&hugetlb_lock);
2788 		ret = isolate_hugetlb(old_page, list);
2789 		spin_lock_irq(&hugetlb_lock);
2790 		goto free_new;
2791 	} else if (!folio_test_hugetlb_freed(old_folio)) {
2792 		/*
2793 		 * Page's refcount is 0 but it has not been enqueued in the
2794 		 * freelist yet. Race window is small, so we can succeed here if
2795 		 * we retry.
2796 		 */
2797 		spin_unlock_irq(&hugetlb_lock);
2798 		cond_resched();
2799 		goto retry;
2800 	} else {
2801 		/*
2802 		 * Ok, old_page is still a genuine free hugepage. Remove it from
2803 		 * the freelist and decrease the counters. These will be
2804 		 * incremented again when calling __prep_account_new_huge_page()
2805 		 * and enqueue_huge_page() for new_page. The counters will remain
2806 		 * stable since this happens under the lock.
2807 		 */
2808 		remove_hugetlb_page(h, old_page, false);
2809 
2810 		/*
2811 		 * Ref count on new page is already zero as it was dropped
2812 		 * earlier.  It can be directly added to the pool free list.
2813 		 */
2814 		__prep_account_new_huge_page(h, nid);
2815 		enqueue_huge_page(h, new_page);
2816 
2817 		/*
2818 		 * Pages have been replaced, we can safely free the old one.
2819 		 */
2820 		spin_unlock_irq(&hugetlb_lock);
2821 		update_and_free_page(h, old_page, false);
2822 	}
2823 
2824 	return ret;
2825 
2826 free_new:
2827 	spin_unlock_irq(&hugetlb_lock);
2828 	/* Page has a zero ref count, but needs a ref to be freed */
2829 	folio_ref_unfreeze(new_folio, 1);
2830 	update_and_free_page(h, new_page, false);
2831 
2832 	return ret;
2833 }
2834 
2835 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2836 {
2837 	struct hstate *h;
2838 	struct folio *folio = page_folio(page);
2839 	int ret = -EBUSY;
2840 
2841 	/*
2842 	 * The page might have been dissolved from under our feet, so make sure
2843 	 * to carefully check the state under the lock.
2844 	 * Return success when racing as if we dissolved the page ourselves.
2845 	 */
2846 	spin_lock_irq(&hugetlb_lock);
2847 	if (folio_test_hugetlb(folio)) {
2848 		h = folio_hstate(folio);
2849 	} else {
2850 		spin_unlock_irq(&hugetlb_lock);
2851 		return 0;
2852 	}
2853 	spin_unlock_irq(&hugetlb_lock);
2854 
2855 	/*
2856 	 * Fence off gigantic pages as there is a cyclic dependency between
2857 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2858 	 * of bailing out right away without further retrying.
2859 	 */
2860 	if (hstate_is_gigantic(h))
2861 		return -ENOMEM;
2862 
2863 	if (folio_ref_count(folio) && !isolate_hugetlb(&folio->page, list))
2864 		ret = 0;
2865 	else if (!folio_ref_count(folio))
2866 		ret = alloc_and_dissolve_huge_page(h, &folio->page, list);
2867 
2868 	return ret;
2869 }
2870 
2871 struct page *alloc_huge_page(struct vm_area_struct *vma,
2872 				    unsigned long addr, int avoid_reserve)
2873 {
2874 	struct hugepage_subpool *spool = subpool_vma(vma);
2875 	struct hstate *h = hstate_vma(vma);
2876 	struct page *page;
2877 	struct folio *folio;
2878 	long map_chg, map_commit;
2879 	long gbl_chg;
2880 	int ret, idx;
2881 	struct hugetlb_cgroup *h_cg;
2882 	bool deferred_reserve;
2883 
2884 	idx = hstate_index(h);
2885 	/*
2886 	 * Examine the region/reserve map to determine if the process
2887 	 * has a reservation for the page to be allocated.  A return
2888 	 * code of zero indicates a reservation exists (no change).
2889 	 */
2890 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2891 	if (map_chg < 0)
2892 		return ERR_PTR(-ENOMEM);
2893 
2894 	/*
2895 	 * Processes that did not create the mapping will have no
2896 	 * reserves as indicated by the region/reserve map. Check
2897 	 * that the allocation will not exceed the subpool limit.
2898 	 * Allocations for MAP_NORESERVE mappings also need to be
2899 	 * checked against any subpool limit.
2900 	 */
2901 	if (map_chg || avoid_reserve) {
2902 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2903 		if (gbl_chg < 0) {
2904 			vma_end_reservation(h, vma, addr);
2905 			return ERR_PTR(-ENOSPC);
2906 		}
2907 
2908 		/*
2909 		 * Even though there was no reservation in the region/reserve
2910 		 * map, there could be reservations associated with the
2911 		 * subpool that can be used.  This would be indicated if the
2912 		 * return value of hugepage_subpool_get_pages() is zero.
2913 		 * However, if avoid_reserve is specified we still avoid even
2914 		 * the subpool reservations.
2915 		 */
2916 		if (avoid_reserve)
2917 			gbl_chg = 1;
2918 	}
2919 
2920 	/* If this allocation is not consuming a reservation, charge it now.
2921 	 */
2922 	deferred_reserve = map_chg || avoid_reserve;
2923 	if (deferred_reserve) {
2924 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
2925 			idx, pages_per_huge_page(h), &h_cg);
2926 		if (ret)
2927 			goto out_subpool_put;
2928 	}
2929 
2930 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2931 	if (ret)
2932 		goto out_uncharge_cgroup_reservation;
2933 
2934 	spin_lock_irq(&hugetlb_lock);
2935 	/*
2936 	 * glb_chg is passed to indicate whether or not a page must be taken
2937 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2938 	 * a reservation exists for the allocation.
2939 	 */
2940 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2941 
2942 	if (!page) {
2943 		spin_unlock_irq(&hugetlb_lock);
2944 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2945 		if (!page)
2946 			goto out_uncharge_cgroup;
2947 		spin_lock_irq(&hugetlb_lock);
2948 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2949 			SetHPageRestoreReserve(page);
2950 			h->resv_huge_pages--;
2951 		}
2952 		list_add(&page->lru, &h->hugepage_activelist);
2953 		set_page_refcounted(page);
2954 		/* Fall through */
2955 	}
2956 	folio = page_folio(page);
2957 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2958 	/* If allocation is not consuming a reservation, also store the
2959 	 * hugetlb_cgroup pointer on the page.
2960 	 */
2961 	if (deferred_reserve) {
2962 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2963 						  h_cg, page);
2964 	}
2965 
2966 	spin_unlock_irq(&hugetlb_lock);
2967 
2968 	hugetlb_set_page_subpool(page, spool);
2969 
2970 	map_commit = vma_commit_reservation(h, vma, addr);
2971 	if (unlikely(map_chg > map_commit)) {
2972 		/*
2973 		 * The page was added to the reservation map between
2974 		 * vma_needs_reservation and vma_commit_reservation.
2975 		 * This indicates a race with hugetlb_reserve_pages.
2976 		 * Adjust for the subpool count incremented above AND
2977 		 * in hugetlb_reserve_pages for the same page.  Also,
2978 		 * the reservation count added in hugetlb_reserve_pages
2979 		 * no longer applies.
2980 		 */
2981 		long rsv_adjust;
2982 
2983 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2984 		hugetlb_acct_memory(h, -rsv_adjust);
2985 		if (deferred_reserve)
2986 			hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
2987 					pages_per_huge_page(h), folio);
2988 	}
2989 	return page;
2990 
2991 out_uncharge_cgroup:
2992 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2993 out_uncharge_cgroup_reservation:
2994 	if (deferred_reserve)
2995 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2996 						    h_cg);
2997 out_subpool_put:
2998 	if (map_chg || avoid_reserve)
2999 		hugepage_subpool_put_pages(spool, 1);
3000 	vma_end_reservation(h, vma, addr);
3001 	return ERR_PTR(-ENOSPC);
3002 }
3003 
3004 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3005 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
3006 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3007 {
3008 	struct huge_bootmem_page *m = NULL; /* initialize for clang */
3009 	int nr_nodes, node;
3010 
3011 	/* do node specific alloc */
3012 	if (nid != NUMA_NO_NODE) {
3013 		m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3014 				0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3015 		if (!m)
3016 			return 0;
3017 		goto found;
3018 	}
3019 	/* allocate from next node when distributing huge pages */
3020 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
3021 		m = memblock_alloc_try_nid_raw(
3022 				huge_page_size(h), huge_page_size(h),
3023 				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3024 		/*
3025 		 * Use the beginning of the huge page to store the
3026 		 * huge_bootmem_page struct (until gather_bootmem
3027 		 * puts them into the mem_map).
3028 		 */
3029 		if (!m)
3030 			return 0;
3031 		goto found;
3032 	}
3033 
3034 found:
3035 	/* Put them into a private list first because mem_map is not up yet */
3036 	INIT_LIST_HEAD(&m->list);
3037 	list_add(&m->list, &huge_boot_pages);
3038 	m->hstate = h;
3039 	return 1;
3040 }
3041 
3042 /*
3043  * Put bootmem huge pages into the standard lists after mem_map is up.
3044  * Note: This only applies to gigantic (order > MAX_ORDER) pages.
3045  */
3046 static void __init gather_bootmem_prealloc(void)
3047 {
3048 	struct huge_bootmem_page *m;
3049 
3050 	list_for_each_entry(m, &huge_boot_pages, list) {
3051 		struct page *page = virt_to_page(m);
3052 		struct hstate *h = m->hstate;
3053 
3054 		VM_BUG_ON(!hstate_is_gigantic(h));
3055 		WARN_ON(page_count(page) != 1);
3056 		if (prep_compound_gigantic_page(page, huge_page_order(h))) {
3057 			WARN_ON(PageReserved(page));
3058 			prep_new_huge_page(h, page, page_to_nid(page));
3059 			free_huge_page(page); /* add to the hugepage allocator */
3060 		} else {
3061 			/* VERY unlikely inflated ref count on a tail page */
3062 			free_gigantic_page(page, huge_page_order(h));
3063 		}
3064 
3065 		/*
3066 		 * We need to restore the 'stolen' pages to totalram_pages
3067 		 * in order to fix confusing memory reports from free(1) and
3068 		 * other side-effects, like CommitLimit going negative.
3069 		 */
3070 		adjust_managed_page_count(page, pages_per_huge_page(h));
3071 		cond_resched();
3072 	}
3073 }
3074 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3075 {
3076 	unsigned long i;
3077 	char buf[32];
3078 
3079 	for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3080 		if (hstate_is_gigantic(h)) {
3081 			if (!alloc_bootmem_huge_page(h, nid))
3082 				break;
3083 		} else {
3084 			struct page *page;
3085 			gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3086 
3087 			page = alloc_fresh_huge_page(h, gfp_mask, nid,
3088 					&node_states[N_MEMORY], NULL);
3089 			if (!page)
3090 				break;
3091 			free_huge_page(page); /* free it into the hugepage allocator */
3092 		}
3093 		cond_resched();
3094 	}
3095 	if (i == h->max_huge_pages_node[nid])
3096 		return;
3097 
3098 	string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3099 	pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3100 		h->max_huge_pages_node[nid], buf, nid, i);
3101 	h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3102 	h->max_huge_pages_node[nid] = i;
3103 }
3104 
3105 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3106 {
3107 	unsigned long i;
3108 	nodemask_t *node_alloc_noretry;
3109 	bool node_specific_alloc = false;
3110 
3111 	/* skip gigantic hugepages allocation if hugetlb_cma enabled */
3112 	if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3113 		pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3114 		return;
3115 	}
3116 
3117 	/* do node specific alloc */
3118 	for_each_online_node(i) {
3119 		if (h->max_huge_pages_node[i] > 0) {
3120 			hugetlb_hstate_alloc_pages_onenode(h, i);
3121 			node_specific_alloc = true;
3122 		}
3123 	}
3124 
3125 	if (node_specific_alloc)
3126 		return;
3127 
3128 	/* below will do all node balanced alloc */
3129 	if (!hstate_is_gigantic(h)) {
3130 		/*
3131 		 * Bit mask controlling how hard we retry per-node allocations.
3132 		 * Ignore errors as lower level routines can deal with
3133 		 * node_alloc_noretry == NULL.  If this kmalloc fails at boot
3134 		 * time, we are likely in bigger trouble.
3135 		 */
3136 		node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3137 						GFP_KERNEL);
3138 	} else {
3139 		/* allocations done at boot time */
3140 		node_alloc_noretry = NULL;
3141 	}
3142 
3143 	/* bit mask controlling how hard we retry per-node allocations */
3144 	if (node_alloc_noretry)
3145 		nodes_clear(*node_alloc_noretry);
3146 
3147 	for (i = 0; i < h->max_huge_pages; ++i) {
3148 		if (hstate_is_gigantic(h)) {
3149 			if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3150 				break;
3151 		} else if (!alloc_pool_huge_page(h,
3152 					 &node_states[N_MEMORY],
3153 					 node_alloc_noretry))
3154 			break;
3155 		cond_resched();
3156 	}
3157 	if (i < h->max_huge_pages) {
3158 		char buf[32];
3159 
3160 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3161 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3162 			h->max_huge_pages, buf, i);
3163 		h->max_huge_pages = i;
3164 	}
3165 	kfree(node_alloc_noretry);
3166 }
3167 
3168 static void __init hugetlb_init_hstates(void)
3169 {
3170 	struct hstate *h, *h2;
3171 
3172 	for_each_hstate(h) {
3173 		/* oversize hugepages were init'ed in early boot */
3174 		if (!hstate_is_gigantic(h))
3175 			hugetlb_hstate_alloc_pages(h);
3176 
3177 		/*
3178 		 * Set demote order for each hstate.  Note that
3179 		 * h->demote_order is initially 0.
3180 		 * - We can not demote gigantic pages if runtime freeing
3181 		 *   is not supported, so skip this.
3182 		 * - If CMA allocation is possible, we can not demote
3183 		 *   HUGETLB_PAGE_ORDER or smaller size pages.
3184 		 */
3185 		if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3186 			continue;
3187 		if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3188 			continue;
3189 		for_each_hstate(h2) {
3190 			if (h2 == h)
3191 				continue;
3192 			if (h2->order < h->order &&
3193 			    h2->order > h->demote_order)
3194 				h->demote_order = h2->order;
3195 		}
3196 	}
3197 }
3198 
3199 static void __init report_hugepages(void)
3200 {
3201 	struct hstate *h;
3202 
3203 	for_each_hstate(h) {
3204 		char buf[32];
3205 
3206 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3207 		pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3208 			buf, h->free_huge_pages);
3209 		pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3210 			hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3211 	}
3212 }
3213 
3214 #ifdef CONFIG_HIGHMEM
3215 static void try_to_free_low(struct hstate *h, unsigned long count,
3216 						nodemask_t *nodes_allowed)
3217 {
3218 	int i;
3219 	LIST_HEAD(page_list);
3220 
3221 	lockdep_assert_held(&hugetlb_lock);
3222 	if (hstate_is_gigantic(h))
3223 		return;
3224 
3225 	/*
3226 	 * Collect pages to be freed on a list, and free after dropping lock
3227 	 */
3228 	for_each_node_mask(i, *nodes_allowed) {
3229 		struct page *page, *next;
3230 		struct list_head *freel = &h->hugepage_freelists[i];
3231 		list_for_each_entry_safe(page, next, freel, lru) {
3232 			if (count >= h->nr_huge_pages)
3233 				goto out;
3234 			if (PageHighMem(page))
3235 				continue;
3236 			remove_hugetlb_page(h, page, false);
3237 			list_add(&page->lru, &page_list);
3238 		}
3239 	}
3240 
3241 out:
3242 	spin_unlock_irq(&hugetlb_lock);
3243 	update_and_free_pages_bulk(h, &page_list);
3244 	spin_lock_irq(&hugetlb_lock);
3245 }
3246 #else
3247 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3248 						nodemask_t *nodes_allowed)
3249 {
3250 }
3251 #endif
3252 
3253 /*
3254  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
3255  * balanced by operating on them in a round-robin fashion.
3256  * Returns 1 if an adjustment was made.
3257  */
3258 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3259 				int delta)
3260 {
3261 	int nr_nodes, node;
3262 
3263 	lockdep_assert_held(&hugetlb_lock);
3264 	VM_BUG_ON(delta != -1 && delta != 1);
3265 
3266 	if (delta < 0) {
3267 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3268 			if (h->surplus_huge_pages_node[node])
3269 				goto found;
3270 		}
3271 	} else {
3272 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3273 			if (h->surplus_huge_pages_node[node] <
3274 					h->nr_huge_pages_node[node])
3275 				goto found;
3276 		}
3277 	}
3278 	return 0;
3279 
3280 found:
3281 	h->surplus_huge_pages += delta;
3282 	h->surplus_huge_pages_node[node] += delta;
3283 	return 1;
3284 }
3285 
3286 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3287 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3288 			      nodemask_t *nodes_allowed)
3289 {
3290 	unsigned long min_count, ret;
3291 	struct page *page;
3292 	LIST_HEAD(page_list);
3293 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3294 
3295 	/*
3296 	 * Bit mask controlling how hard we retry per-node allocations.
3297 	 * If we can not allocate the bit mask, do not attempt to allocate
3298 	 * the requested huge pages.
3299 	 */
3300 	if (node_alloc_noretry)
3301 		nodes_clear(*node_alloc_noretry);
3302 	else
3303 		return -ENOMEM;
3304 
3305 	/*
3306 	 * resize_lock mutex prevents concurrent adjustments to number of
3307 	 * pages in hstate via the proc/sysfs interfaces.
3308 	 */
3309 	mutex_lock(&h->resize_lock);
3310 	flush_free_hpage_work(h);
3311 	spin_lock_irq(&hugetlb_lock);
3312 
3313 	/*
3314 	 * Check for a node specific request.
3315 	 * Changing node specific huge page count may require a corresponding
3316 	 * change to the global count.  In any case, the passed node mask
3317 	 * (nodes_allowed) will restrict alloc/free to the specified node.
3318 	 */
3319 	if (nid != NUMA_NO_NODE) {
3320 		unsigned long old_count = count;
3321 
3322 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
3323 		/*
3324 		 * User may have specified a large count value which caused the
3325 		 * above calculation to overflow.  In this case, they wanted
3326 		 * to allocate as many huge pages as possible.  Set count to
3327 		 * largest possible value to align with their intention.
3328 		 */
3329 		if (count < old_count)
3330 			count = ULONG_MAX;
3331 	}
3332 
3333 	/*
3334 	 * Gigantic pages runtime allocation depend on the capability for large
3335 	 * page range allocation.
3336 	 * If the system does not provide this feature, return an error when
3337 	 * the user tries to allocate gigantic pages but let the user free the
3338 	 * boottime allocated gigantic pages.
3339 	 */
3340 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3341 		if (count > persistent_huge_pages(h)) {
3342 			spin_unlock_irq(&hugetlb_lock);
3343 			mutex_unlock(&h->resize_lock);
3344 			NODEMASK_FREE(node_alloc_noretry);
3345 			return -EINVAL;
3346 		}
3347 		/* Fall through to decrease pool */
3348 	}
3349 
3350 	/*
3351 	 * Increase the pool size
3352 	 * First take pages out of surplus state.  Then make up the
3353 	 * remaining difference by allocating fresh huge pages.
3354 	 *
3355 	 * We might race with alloc_surplus_huge_page() here and be unable
3356 	 * to convert a surplus huge page to a normal huge page. That is
3357 	 * not critical, though, it just means the overall size of the
3358 	 * pool might be one hugepage larger than it needs to be, but
3359 	 * within all the constraints specified by the sysctls.
3360 	 */
3361 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3362 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
3363 			break;
3364 	}
3365 
3366 	while (count > persistent_huge_pages(h)) {
3367 		/*
3368 		 * If this allocation races such that we no longer need the
3369 		 * page, free_huge_page will handle it by freeing the page
3370 		 * and reducing the surplus.
3371 		 */
3372 		spin_unlock_irq(&hugetlb_lock);
3373 
3374 		/* yield cpu to avoid soft lockup */
3375 		cond_resched();
3376 
3377 		ret = alloc_pool_huge_page(h, nodes_allowed,
3378 						node_alloc_noretry);
3379 		spin_lock_irq(&hugetlb_lock);
3380 		if (!ret)
3381 			goto out;
3382 
3383 		/* Bail for signals. Probably ctrl-c from user */
3384 		if (signal_pending(current))
3385 			goto out;
3386 	}
3387 
3388 	/*
3389 	 * Decrease the pool size
3390 	 * First return free pages to the buddy allocator (being careful
3391 	 * to keep enough around to satisfy reservations).  Then place
3392 	 * pages into surplus state as needed so the pool will shrink
3393 	 * to the desired size as pages become free.
3394 	 *
3395 	 * By placing pages into the surplus state independent of the
3396 	 * overcommit value, we are allowing the surplus pool size to
3397 	 * exceed overcommit. There are few sane options here. Since
3398 	 * alloc_surplus_huge_page() is checking the global counter,
3399 	 * though, we'll note that we're not allowed to exceed surplus
3400 	 * and won't grow the pool anywhere else. Not until one of the
3401 	 * sysctls are changed, or the surplus pages go out of use.
3402 	 */
3403 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
3404 	min_count = max(count, min_count);
3405 	try_to_free_low(h, min_count, nodes_allowed);
3406 
3407 	/*
3408 	 * Collect pages to be removed on list without dropping lock
3409 	 */
3410 	while (min_count < persistent_huge_pages(h)) {
3411 		page = remove_pool_huge_page(h, nodes_allowed, 0);
3412 		if (!page)
3413 			break;
3414 
3415 		list_add(&page->lru, &page_list);
3416 	}
3417 	/* free the pages after dropping lock */
3418 	spin_unlock_irq(&hugetlb_lock);
3419 	update_and_free_pages_bulk(h, &page_list);
3420 	flush_free_hpage_work(h);
3421 	spin_lock_irq(&hugetlb_lock);
3422 
3423 	while (count < persistent_huge_pages(h)) {
3424 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
3425 			break;
3426 	}
3427 out:
3428 	h->max_huge_pages = persistent_huge_pages(h);
3429 	spin_unlock_irq(&hugetlb_lock);
3430 	mutex_unlock(&h->resize_lock);
3431 
3432 	NODEMASK_FREE(node_alloc_noretry);
3433 
3434 	return 0;
3435 }
3436 
3437 static int demote_free_huge_page(struct hstate *h, struct page *page)
3438 {
3439 	int i, nid = page_to_nid(page);
3440 	struct hstate *target_hstate;
3441 	struct page *subpage;
3442 	int rc = 0;
3443 
3444 	target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
3445 
3446 	remove_hugetlb_page_for_demote(h, page, false);
3447 	spin_unlock_irq(&hugetlb_lock);
3448 
3449 	rc = hugetlb_vmemmap_restore(h, page);
3450 	if (rc) {
3451 		/* Allocation of vmemmmap failed, we can not demote page */
3452 		spin_lock_irq(&hugetlb_lock);
3453 		set_page_refcounted(page);
3454 		add_hugetlb_page(h, page, false);
3455 		return rc;
3456 	}
3457 
3458 	/*
3459 	 * Use destroy_compound_hugetlb_page_for_demote for all huge page
3460 	 * sizes as it will not ref count pages.
3461 	 */
3462 	destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h));
3463 
3464 	/*
3465 	 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3466 	 * Without the mutex, pages added to target hstate could be marked
3467 	 * as surplus.
3468 	 *
3469 	 * Note that we already hold h->resize_lock.  To prevent deadlock,
3470 	 * use the convention of always taking larger size hstate mutex first.
3471 	 */
3472 	mutex_lock(&target_hstate->resize_lock);
3473 	for (i = 0; i < pages_per_huge_page(h);
3474 				i += pages_per_huge_page(target_hstate)) {
3475 		subpage = nth_page(page, i);
3476 		if (hstate_is_gigantic(target_hstate))
3477 			prep_compound_gigantic_page_for_demote(subpage,
3478 							target_hstate->order);
3479 		else
3480 			prep_compound_page(subpage, target_hstate->order);
3481 		set_page_private(subpage, 0);
3482 		prep_new_huge_page(target_hstate, subpage, nid);
3483 		free_huge_page(subpage);
3484 	}
3485 	mutex_unlock(&target_hstate->resize_lock);
3486 
3487 	spin_lock_irq(&hugetlb_lock);
3488 
3489 	/*
3490 	 * Not absolutely necessary, but for consistency update max_huge_pages
3491 	 * based on pool changes for the demoted page.
3492 	 */
3493 	h->max_huge_pages--;
3494 	target_hstate->max_huge_pages +=
3495 		pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
3496 
3497 	return rc;
3498 }
3499 
3500 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
3501 	__must_hold(&hugetlb_lock)
3502 {
3503 	int nr_nodes, node;
3504 	struct page *page;
3505 
3506 	lockdep_assert_held(&hugetlb_lock);
3507 
3508 	/* We should never get here if no demote order */
3509 	if (!h->demote_order) {
3510 		pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
3511 		return -EINVAL;		/* internal error */
3512 	}
3513 
3514 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3515 		list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
3516 			if (PageHWPoison(page))
3517 				continue;
3518 
3519 			return demote_free_huge_page(h, page);
3520 		}
3521 	}
3522 
3523 	/*
3524 	 * Only way to get here is if all pages on free lists are poisoned.
3525 	 * Return -EBUSY so that caller will not retry.
3526 	 */
3527 	return -EBUSY;
3528 }
3529 
3530 #define HSTATE_ATTR_RO(_name) \
3531 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3532 
3533 #define HSTATE_ATTR_WO(_name) \
3534 	static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
3535 
3536 #define HSTATE_ATTR(_name) \
3537 	static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3538 
3539 static struct kobject *hugepages_kobj;
3540 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3541 
3542 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
3543 
3544 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3545 {
3546 	int i;
3547 
3548 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
3549 		if (hstate_kobjs[i] == kobj) {
3550 			if (nidp)
3551 				*nidp = NUMA_NO_NODE;
3552 			return &hstates[i];
3553 		}
3554 
3555 	return kobj_to_node_hstate(kobj, nidp);
3556 }
3557 
3558 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3559 					struct kobj_attribute *attr, char *buf)
3560 {
3561 	struct hstate *h;
3562 	unsigned long nr_huge_pages;
3563 	int nid;
3564 
3565 	h = kobj_to_hstate(kobj, &nid);
3566 	if (nid == NUMA_NO_NODE)
3567 		nr_huge_pages = h->nr_huge_pages;
3568 	else
3569 		nr_huge_pages = h->nr_huge_pages_node[nid];
3570 
3571 	return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3572 }
3573 
3574 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3575 					   struct hstate *h, int nid,
3576 					   unsigned long count, size_t len)
3577 {
3578 	int err;
3579 	nodemask_t nodes_allowed, *n_mask;
3580 
3581 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3582 		return -EINVAL;
3583 
3584 	if (nid == NUMA_NO_NODE) {
3585 		/*
3586 		 * global hstate attribute
3587 		 */
3588 		if (!(obey_mempolicy &&
3589 				init_nodemask_of_mempolicy(&nodes_allowed)))
3590 			n_mask = &node_states[N_MEMORY];
3591 		else
3592 			n_mask = &nodes_allowed;
3593 	} else {
3594 		/*
3595 		 * Node specific request.  count adjustment happens in
3596 		 * set_max_huge_pages() after acquiring hugetlb_lock.
3597 		 */
3598 		init_nodemask_of_node(&nodes_allowed, nid);
3599 		n_mask = &nodes_allowed;
3600 	}
3601 
3602 	err = set_max_huge_pages(h, count, nid, n_mask);
3603 
3604 	return err ? err : len;
3605 }
3606 
3607 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3608 					 struct kobject *kobj, const char *buf,
3609 					 size_t len)
3610 {
3611 	struct hstate *h;
3612 	unsigned long count;
3613 	int nid;
3614 	int err;
3615 
3616 	err = kstrtoul(buf, 10, &count);
3617 	if (err)
3618 		return err;
3619 
3620 	h = kobj_to_hstate(kobj, &nid);
3621 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3622 }
3623 
3624 static ssize_t nr_hugepages_show(struct kobject *kobj,
3625 				       struct kobj_attribute *attr, char *buf)
3626 {
3627 	return nr_hugepages_show_common(kobj, attr, buf);
3628 }
3629 
3630 static ssize_t nr_hugepages_store(struct kobject *kobj,
3631 	       struct kobj_attribute *attr, const char *buf, size_t len)
3632 {
3633 	return nr_hugepages_store_common(false, kobj, buf, len);
3634 }
3635 HSTATE_ATTR(nr_hugepages);
3636 
3637 #ifdef CONFIG_NUMA
3638 
3639 /*
3640  * hstate attribute for optionally mempolicy-based constraint on persistent
3641  * huge page alloc/free.
3642  */
3643 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3644 					   struct kobj_attribute *attr,
3645 					   char *buf)
3646 {
3647 	return nr_hugepages_show_common(kobj, attr, buf);
3648 }
3649 
3650 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
3651 	       struct kobj_attribute *attr, const char *buf, size_t len)
3652 {
3653 	return nr_hugepages_store_common(true, kobj, buf, len);
3654 }
3655 HSTATE_ATTR(nr_hugepages_mempolicy);
3656 #endif
3657 
3658 
3659 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3660 					struct kobj_attribute *attr, char *buf)
3661 {
3662 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3663 	return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3664 }
3665 
3666 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3667 		struct kobj_attribute *attr, const char *buf, size_t count)
3668 {
3669 	int err;
3670 	unsigned long input;
3671 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3672 
3673 	if (hstate_is_gigantic(h))
3674 		return -EINVAL;
3675 
3676 	err = kstrtoul(buf, 10, &input);
3677 	if (err)
3678 		return err;
3679 
3680 	spin_lock_irq(&hugetlb_lock);
3681 	h->nr_overcommit_huge_pages = input;
3682 	spin_unlock_irq(&hugetlb_lock);
3683 
3684 	return count;
3685 }
3686 HSTATE_ATTR(nr_overcommit_hugepages);
3687 
3688 static ssize_t free_hugepages_show(struct kobject *kobj,
3689 					struct kobj_attribute *attr, char *buf)
3690 {
3691 	struct hstate *h;
3692 	unsigned long free_huge_pages;
3693 	int nid;
3694 
3695 	h = kobj_to_hstate(kobj, &nid);
3696 	if (nid == NUMA_NO_NODE)
3697 		free_huge_pages = h->free_huge_pages;
3698 	else
3699 		free_huge_pages = h->free_huge_pages_node[nid];
3700 
3701 	return sysfs_emit(buf, "%lu\n", free_huge_pages);
3702 }
3703 HSTATE_ATTR_RO(free_hugepages);
3704 
3705 static ssize_t resv_hugepages_show(struct kobject *kobj,
3706 					struct kobj_attribute *attr, char *buf)
3707 {
3708 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3709 	return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3710 }
3711 HSTATE_ATTR_RO(resv_hugepages);
3712 
3713 static ssize_t surplus_hugepages_show(struct kobject *kobj,
3714 					struct kobj_attribute *attr, char *buf)
3715 {
3716 	struct hstate *h;
3717 	unsigned long surplus_huge_pages;
3718 	int nid;
3719 
3720 	h = kobj_to_hstate(kobj, &nid);
3721 	if (nid == NUMA_NO_NODE)
3722 		surplus_huge_pages = h->surplus_huge_pages;
3723 	else
3724 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
3725 
3726 	return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3727 }
3728 HSTATE_ATTR_RO(surplus_hugepages);
3729 
3730 static ssize_t demote_store(struct kobject *kobj,
3731 	       struct kobj_attribute *attr, const char *buf, size_t len)
3732 {
3733 	unsigned long nr_demote;
3734 	unsigned long nr_available;
3735 	nodemask_t nodes_allowed, *n_mask;
3736 	struct hstate *h;
3737 	int err;
3738 	int nid;
3739 
3740 	err = kstrtoul(buf, 10, &nr_demote);
3741 	if (err)
3742 		return err;
3743 	h = kobj_to_hstate(kobj, &nid);
3744 
3745 	if (nid != NUMA_NO_NODE) {
3746 		init_nodemask_of_node(&nodes_allowed, nid);
3747 		n_mask = &nodes_allowed;
3748 	} else {
3749 		n_mask = &node_states[N_MEMORY];
3750 	}
3751 
3752 	/* Synchronize with other sysfs operations modifying huge pages */
3753 	mutex_lock(&h->resize_lock);
3754 	spin_lock_irq(&hugetlb_lock);
3755 
3756 	while (nr_demote) {
3757 		/*
3758 		 * Check for available pages to demote each time thorough the
3759 		 * loop as demote_pool_huge_page will drop hugetlb_lock.
3760 		 */
3761 		if (nid != NUMA_NO_NODE)
3762 			nr_available = h->free_huge_pages_node[nid];
3763 		else
3764 			nr_available = h->free_huge_pages;
3765 		nr_available -= h->resv_huge_pages;
3766 		if (!nr_available)
3767 			break;
3768 
3769 		err = demote_pool_huge_page(h, n_mask);
3770 		if (err)
3771 			break;
3772 
3773 		nr_demote--;
3774 	}
3775 
3776 	spin_unlock_irq(&hugetlb_lock);
3777 	mutex_unlock(&h->resize_lock);
3778 
3779 	if (err)
3780 		return err;
3781 	return len;
3782 }
3783 HSTATE_ATTR_WO(demote);
3784 
3785 static ssize_t demote_size_show(struct kobject *kobj,
3786 					struct kobj_attribute *attr, char *buf)
3787 {
3788 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3789 	unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
3790 
3791 	return sysfs_emit(buf, "%lukB\n", demote_size);
3792 }
3793 
3794 static ssize_t demote_size_store(struct kobject *kobj,
3795 					struct kobj_attribute *attr,
3796 					const char *buf, size_t count)
3797 {
3798 	struct hstate *h, *demote_hstate;
3799 	unsigned long demote_size;
3800 	unsigned int demote_order;
3801 
3802 	demote_size = (unsigned long)memparse(buf, NULL);
3803 
3804 	demote_hstate = size_to_hstate(demote_size);
3805 	if (!demote_hstate)
3806 		return -EINVAL;
3807 	demote_order = demote_hstate->order;
3808 	if (demote_order < HUGETLB_PAGE_ORDER)
3809 		return -EINVAL;
3810 
3811 	/* demote order must be smaller than hstate order */
3812 	h = kobj_to_hstate(kobj, NULL);
3813 	if (demote_order >= h->order)
3814 		return -EINVAL;
3815 
3816 	/* resize_lock synchronizes access to demote size and writes */
3817 	mutex_lock(&h->resize_lock);
3818 	h->demote_order = demote_order;
3819 	mutex_unlock(&h->resize_lock);
3820 
3821 	return count;
3822 }
3823 HSTATE_ATTR(demote_size);
3824 
3825 static struct attribute *hstate_attrs[] = {
3826 	&nr_hugepages_attr.attr,
3827 	&nr_overcommit_hugepages_attr.attr,
3828 	&free_hugepages_attr.attr,
3829 	&resv_hugepages_attr.attr,
3830 	&surplus_hugepages_attr.attr,
3831 #ifdef CONFIG_NUMA
3832 	&nr_hugepages_mempolicy_attr.attr,
3833 #endif
3834 	NULL,
3835 };
3836 
3837 static const struct attribute_group hstate_attr_group = {
3838 	.attrs = hstate_attrs,
3839 };
3840 
3841 static struct attribute *hstate_demote_attrs[] = {
3842 	&demote_size_attr.attr,
3843 	&demote_attr.attr,
3844 	NULL,
3845 };
3846 
3847 static const struct attribute_group hstate_demote_attr_group = {
3848 	.attrs = hstate_demote_attrs,
3849 };
3850 
3851 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
3852 				    struct kobject **hstate_kobjs,
3853 				    const struct attribute_group *hstate_attr_group)
3854 {
3855 	int retval;
3856 	int hi = hstate_index(h);
3857 
3858 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
3859 	if (!hstate_kobjs[hi])
3860 		return -ENOMEM;
3861 
3862 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3863 	if (retval) {
3864 		kobject_put(hstate_kobjs[hi]);
3865 		hstate_kobjs[hi] = NULL;
3866 		return retval;
3867 	}
3868 
3869 	if (h->demote_order) {
3870 		retval = sysfs_create_group(hstate_kobjs[hi],
3871 					    &hstate_demote_attr_group);
3872 		if (retval) {
3873 			pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
3874 			sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
3875 			kobject_put(hstate_kobjs[hi]);
3876 			hstate_kobjs[hi] = NULL;
3877 			return retval;
3878 		}
3879 	}
3880 
3881 	return 0;
3882 }
3883 
3884 #ifdef CONFIG_NUMA
3885 static bool hugetlb_sysfs_initialized __ro_after_init;
3886 
3887 /*
3888  * node_hstate/s - associate per node hstate attributes, via their kobjects,
3889  * with node devices in node_devices[] using a parallel array.  The array
3890  * index of a node device or _hstate == node id.
3891  * This is here to avoid any static dependency of the node device driver, in
3892  * the base kernel, on the hugetlb module.
3893  */
3894 struct node_hstate {
3895 	struct kobject		*hugepages_kobj;
3896 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
3897 };
3898 static struct node_hstate node_hstates[MAX_NUMNODES];
3899 
3900 /*
3901  * A subset of global hstate attributes for node devices
3902  */
3903 static struct attribute *per_node_hstate_attrs[] = {
3904 	&nr_hugepages_attr.attr,
3905 	&free_hugepages_attr.attr,
3906 	&surplus_hugepages_attr.attr,
3907 	NULL,
3908 };
3909 
3910 static const struct attribute_group per_node_hstate_attr_group = {
3911 	.attrs = per_node_hstate_attrs,
3912 };
3913 
3914 /*
3915  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3916  * Returns node id via non-NULL nidp.
3917  */
3918 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3919 {
3920 	int nid;
3921 
3922 	for (nid = 0; nid < nr_node_ids; nid++) {
3923 		struct node_hstate *nhs = &node_hstates[nid];
3924 		int i;
3925 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
3926 			if (nhs->hstate_kobjs[i] == kobj) {
3927 				if (nidp)
3928 					*nidp = nid;
3929 				return &hstates[i];
3930 			}
3931 	}
3932 
3933 	BUG();
3934 	return NULL;
3935 }
3936 
3937 /*
3938  * Unregister hstate attributes from a single node device.
3939  * No-op if no hstate attributes attached.
3940  */
3941 void hugetlb_unregister_node(struct node *node)
3942 {
3943 	struct hstate *h;
3944 	struct node_hstate *nhs = &node_hstates[node->dev.id];
3945 
3946 	if (!nhs->hugepages_kobj)
3947 		return;		/* no hstate attributes */
3948 
3949 	for_each_hstate(h) {
3950 		int idx = hstate_index(h);
3951 		struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
3952 
3953 		if (!hstate_kobj)
3954 			continue;
3955 		if (h->demote_order)
3956 			sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
3957 		sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
3958 		kobject_put(hstate_kobj);
3959 		nhs->hstate_kobjs[idx] = NULL;
3960 	}
3961 
3962 	kobject_put(nhs->hugepages_kobj);
3963 	nhs->hugepages_kobj = NULL;
3964 }
3965 
3966 
3967 /*
3968  * Register hstate attributes for a single node device.
3969  * No-op if attributes already registered.
3970  */
3971 void hugetlb_register_node(struct node *node)
3972 {
3973 	struct hstate *h;
3974 	struct node_hstate *nhs = &node_hstates[node->dev.id];
3975 	int err;
3976 
3977 	if (!hugetlb_sysfs_initialized)
3978 		return;
3979 
3980 	if (nhs->hugepages_kobj)
3981 		return;		/* already allocated */
3982 
3983 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
3984 							&node->dev.kobj);
3985 	if (!nhs->hugepages_kobj)
3986 		return;
3987 
3988 	for_each_hstate(h) {
3989 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3990 						nhs->hstate_kobjs,
3991 						&per_node_hstate_attr_group);
3992 		if (err) {
3993 			pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3994 				h->name, node->dev.id);
3995 			hugetlb_unregister_node(node);
3996 			break;
3997 		}
3998 	}
3999 }
4000 
4001 /*
4002  * hugetlb init time:  register hstate attributes for all registered node
4003  * devices of nodes that have memory.  All on-line nodes should have
4004  * registered their associated device by this time.
4005  */
4006 static void __init hugetlb_register_all_nodes(void)
4007 {
4008 	int nid;
4009 
4010 	for_each_online_node(nid)
4011 		hugetlb_register_node(node_devices[nid]);
4012 }
4013 #else	/* !CONFIG_NUMA */
4014 
4015 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4016 {
4017 	BUG();
4018 	if (nidp)
4019 		*nidp = -1;
4020 	return NULL;
4021 }
4022 
4023 static void hugetlb_register_all_nodes(void) { }
4024 
4025 #endif
4026 
4027 #ifdef CONFIG_CMA
4028 static void __init hugetlb_cma_check(void);
4029 #else
4030 static inline __init void hugetlb_cma_check(void)
4031 {
4032 }
4033 #endif
4034 
4035 static void __init hugetlb_sysfs_init(void)
4036 {
4037 	struct hstate *h;
4038 	int err;
4039 
4040 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
4041 	if (!hugepages_kobj)
4042 		return;
4043 
4044 	for_each_hstate(h) {
4045 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4046 					 hstate_kobjs, &hstate_attr_group);
4047 		if (err)
4048 			pr_err("HugeTLB: Unable to add hstate %s", h->name);
4049 	}
4050 
4051 #ifdef CONFIG_NUMA
4052 	hugetlb_sysfs_initialized = true;
4053 #endif
4054 	hugetlb_register_all_nodes();
4055 }
4056 
4057 static int __init hugetlb_init(void)
4058 {
4059 	int i;
4060 
4061 	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4062 			__NR_HPAGEFLAGS);
4063 
4064 	if (!hugepages_supported()) {
4065 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4066 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4067 		return 0;
4068 	}
4069 
4070 	/*
4071 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4072 	 * architectures depend on setup being done here.
4073 	 */
4074 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4075 	if (!parsed_default_hugepagesz) {
4076 		/*
4077 		 * If we did not parse a default huge page size, set
4078 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4079 		 * number of huge pages for this default size was implicitly
4080 		 * specified, set that here as well.
4081 		 * Note that the implicit setting will overwrite an explicit
4082 		 * setting.  A warning will be printed in this case.
4083 		 */
4084 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4085 		if (default_hstate_max_huge_pages) {
4086 			if (default_hstate.max_huge_pages) {
4087 				char buf[32];
4088 
4089 				string_get_size(huge_page_size(&default_hstate),
4090 					1, STRING_UNITS_2, buf, 32);
4091 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4092 					default_hstate.max_huge_pages, buf);
4093 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4094 					default_hstate_max_huge_pages);
4095 			}
4096 			default_hstate.max_huge_pages =
4097 				default_hstate_max_huge_pages;
4098 
4099 			for_each_online_node(i)
4100 				default_hstate.max_huge_pages_node[i] =
4101 					default_hugepages_in_node[i];
4102 		}
4103 	}
4104 
4105 	hugetlb_cma_check();
4106 	hugetlb_init_hstates();
4107 	gather_bootmem_prealloc();
4108 	report_hugepages();
4109 
4110 	hugetlb_sysfs_init();
4111 	hugetlb_cgroup_file_init();
4112 
4113 #ifdef CONFIG_SMP
4114 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4115 #else
4116 	num_fault_mutexes = 1;
4117 #endif
4118 	hugetlb_fault_mutex_table =
4119 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4120 			      GFP_KERNEL);
4121 	BUG_ON(!hugetlb_fault_mutex_table);
4122 
4123 	for (i = 0; i < num_fault_mutexes; i++)
4124 		mutex_init(&hugetlb_fault_mutex_table[i]);
4125 	return 0;
4126 }
4127 subsys_initcall(hugetlb_init);
4128 
4129 /* Overwritten by architectures with more huge page sizes */
4130 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4131 {
4132 	return size == HPAGE_SIZE;
4133 }
4134 
4135 void __init hugetlb_add_hstate(unsigned int order)
4136 {
4137 	struct hstate *h;
4138 	unsigned long i;
4139 
4140 	if (size_to_hstate(PAGE_SIZE << order)) {
4141 		return;
4142 	}
4143 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4144 	BUG_ON(order == 0);
4145 	h = &hstates[hugetlb_max_hstate++];
4146 	mutex_init(&h->resize_lock);
4147 	h->order = order;
4148 	h->mask = ~(huge_page_size(h) - 1);
4149 	for (i = 0; i < MAX_NUMNODES; ++i)
4150 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4151 	INIT_LIST_HEAD(&h->hugepage_activelist);
4152 	h->next_nid_to_alloc = first_memory_node;
4153 	h->next_nid_to_free = first_memory_node;
4154 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4155 					huge_page_size(h)/SZ_1K);
4156 
4157 	parsed_hstate = h;
4158 }
4159 
4160 bool __init __weak hugetlb_node_alloc_supported(void)
4161 {
4162 	return true;
4163 }
4164 
4165 static void __init hugepages_clear_pages_in_node(void)
4166 {
4167 	if (!hugetlb_max_hstate) {
4168 		default_hstate_max_huge_pages = 0;
4169 		memset(default_hugepages_in_node, 0,
4170 			sizeof(default_hugepages_in_node));
4171 	} else {
4172 		parsed_hstate->max_huge_pages = 0;
4173 		memset(parsed_hstate->max_huge_pages_node, 0,
4174 			sizeof(parsed_hstate->max_huge_pages_node));
4175 	}
4176 }
4177 
4178 /*
4179  * hugepages command line processing
4180  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4181  * specification.  If not, ignore the hugepages value.  hugepages can also
4182  * be the first huge page command line  option in which case it implicitly
4183  * specifies the number of huge pages for the default size.
4184  */
4185 static int __init hugepages_setup(char *s)
4186 {
4187 	unsigned long *mhp;
4188 	static unsigned long *last_mhp;
4189 	int node = NUMA_NO_NODE;
4190 	int count;
4191 	unsigned long tmp;
4192 	char *p = s;
4193 
4194 	if (!parsed_valid_hugepagesz) {
4195 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4196 		parsed_valid_hugepagesz = true;
4197 		return 1;
4198 	}
4199 
4200 	/*
4201 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4202 	 * yet, so this hugepages= parameter goes to the "default hstate".
4203 	 * Otherwise, it goes with the previously parsed hugepagesz or
4204 	 * default_hugepagesz.
4205 	 */
4206 	else if (!hugetlb_max_hstate)
4207 		mhp = &default_hstate_max_huge_pages;
4208 	else
4209 		mhp = &parsed_hstate->max_huge_pages;
4210 
4211 	if (mhp == last_mhp) {
4212 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4213 		return 1;
4214 	}
4215 
4216 	while (*p) {
4217 		count = 0;
4218 		if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4219 			goto invalid;
4220 		/* Parameter is node format */
4221 		if (p[count] == ':') {
4222 			if (!hugetlb_node_alloc_supported()) {
4223 				pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4224 				return 1;
4225 			}
4226 			if (tmp >= MAX_NUMNODES || !node_online(tmp))
4227 				goto invalid;
4228 			node = array_index_nospec(tmp, MAX_NUMNODES);
4229 			p += count + 1;
4230 			/* Parse hugepages */
4231 			if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4232 				goto invalid;
4233 			if (!hugetlb_max_hstate)
4234 				default_hugepages_in_node[node] = tmp;
4235 			else
4236 				parsed_hstate->max_huge_pages_node[node] = tmp;
4237 			*mhp += tmp;
4238 			/* Go to parse next node*/
4239 			if (p[count] == ',')
4240 				p += count + 1;
4241 			else
4242 				break;
4243 		} else {
4244 			if (p != s)
4245 				goto invalid;
4246 			*mhp = tmp;
4247 			break;
4248 		}
4249 	}
4250 
4251 	/*
4252 	 * Global state is always initialized later in hugetlb_init.
4253 	 * But we need to allocate gigantic hstates here early to still
4254 	 * use the bootmem allocator.
4255 	 */
4256 	if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
4257 		hugetlb_hstate_alloc_pages(parsed_hstate);
4258 
4259 	last_mhp = mhp;
4260 
4261 	return 1;
4262 
4263 invalid:
4264 	pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4265 	hugepages_clear_pages_in_node();
4266 	return 1;
4267 }
4268 __setup("hugepages=", hugepages_setup);
4269 
4270 /*
4271  * hugepagesz command line processing
4272  * A specific huge page size can only be specified once with hugepagesz.
4273  * hugepagesz is followed by hugepages on the command line.  The global
4274  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4275  * hugepagesz argument was valid.
4276  */
4277 static int __init hugepagesz_setup(char *s)
4278 {
4279 	unsigned long size;
4280 	struct hstate *h;
4281 
4282 	parsed_valid_hugepagesz = false;
4283 	size = (unsigned long)memparse(s, NULL);
4284 
4285 	if (!arch_hugetlb_valid_size(size)) {
4286 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4287 		return 1;
4288 	}
4289 
4290 	h = size_to_hstate(size);
4291 	if (h) {
4292 		/*
4293 		 * hstate for this size already exists.  This is normally
4294 		 * an error, but is allowed if the existing hstate is the
4295 		 * default hstate.  More specifically, it is only allowed if
4296 		 * the number of huge pages for the default hstate was not
4297 		 * previously specified.
4298 		 */
4299 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4300 		    default_hstate.max_huge_pages) {
4301 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4302 			return 1;
4303 		}
4304 
4305 		/*
4306 		 * No need to call hugetlb_add_hstate() as hstate already
4307 		 * exists.  But, do set parsed_hstate so that a following
4308 		 * hugepages= parameter will be applied to this hstate.
4309 		 */
4310 		parsed_hstate = h;
4311 		parsed_valid_hugepagesz = true;
4312 		return 1;
4313 	}
4314 
4315 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4316 	parsed_valid_hugepagesz = true;
4317 	return 1;
4318 }
4319 __setup("hugepagesz=", hugepagesz_setup);
4320 
4321 /*
4322  * default_hugepagesz command line input
4323  * Only one instance of default_hugepagesz allowed on command line.
4324  */
4325 static int __init default_hugepagesz_setup(char *s)
4326 {
4327 	unsigned long size;
4328 	int i;
4329 
4330 	parsed_valid_hugepagesz = false;
4331 	if (parsed_default_hugepagesz) {
4332 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4333 		return 1;
4334 	}
4335 
4336 	size = (unsigned long)memparse(s, NULL);
4337 
4338 	if (!arch_hugetlb_valid_size(size)) {
4339 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4340 		return 1;
4341 	}
4342 
4343 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4344 	parsed_valid_hugepagesz = true;
4345 	parsed_default_hugepagesz = true;
4346 	default_hstate_idx = hstate_index(size_to_hstate(size));
4347 
4348 	/*
4349 	 * The number of default huge pages (for this size) could have been
4350 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
4351 	 * then default_hstate_max_huge_pages is set.  If the default huge
4352 	 * page size is gigantic (>= MAX_ORDER), then the pages must be
4353 	 * allocated here from bootmem allocator.
4354 	 */
4355 	if (default_hstate_max_huge_pages) {
4356 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4357 		for_each_online_node(i)
4358 			default_hstate.max_huge_pages_node[i] =
4359 				default_hugepages_in_node[i];
4360 		if (hstate_is_gigantic(&default_hstate))
4361 			hugetlb_hstate_alloc_pages(&default_hstate);
4362 		default_hstate_max_huge_pages = 0;
4363 	}
4364 
4365 	return 1;
4366 }
4367 __setup("default_hugepagesz=", default_hugepagesz_setup);
4368 
4369 static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4370 {
4371 #ifdef CONFIG_NUMA
4372 	struct mempolicy *mpol = get_task_policy(current);
4373 
4374 	/*
4375 	 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4376 	 * (from policy_nodemask) specifically for hugetlb case
4377 	 */
4378 	if (mpol->mode == MPOL_BIND &&
4379 		(apply_policy_zone(mpol, gfp_zone(gfp)) &&
4380 		 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4381 		return &mpol->nodes;
4382 #endif
4383 	return NULL;
4384 }
4385 
4386 static unsigned int allowed_mems_nr(struct hstate *h)
4387 {
4388 	int node;
4389 	unsigned int nr = 0;
4390 	nodemask_t *mbind_nodemask;
4391 	unsigned int *array = h->free_huge_pages_node;
4392 	gfp_t gfp_mask = htlb_alloc_mask(h);
4393 
4394 	mbind_nodemask = policy_mbind_nodemask(gfp_mask);
4395 	for_each_node_mask(node, cpuset_current_mems_allowed) {
4396 		if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4397 			nr += array[node];
4398 	}
4399 
4400 	return nr;
4401 }
4402 
4403 #ifdef CONFIG_SYSCTL
4404 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
4405 					  void *buffer, size_t *length,
4406 					  loff_t *ppos, unsigned long *out)
4407 {
4408 	struct ctl_table dup_table;
4409 
4410 	/*
4411 	 * In order to avoid races with __do_proc_doulongvec_minmax(), we
4412 	 * can duplicate the @table and alter the duplicate of it.
4413 	 */
4414 	dup_table = *table;
4415 	dup_table.data = out;
4416 
4417 	return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
4418 }
4419 
4420 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
4421 			 struct ctl_table *table, int write,
4422 			 void *buffer, size_t *length, loff_t *ppos)
4423 {
4424 	struct hstate *h = &default_hstate;
4425 	unsigned long tmp = h->max_huge_pages;
4426 	int ret;
4427 
4428 	if (!hugepages_supported())
4429 		return -EOPNOTSUPP;
4430 
4431 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4432 					     &tmp);
4433 	if (ret)
4434 		goto out;
4435 
4436 	if (write)
4437 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
4438 						  NUMA_NO_NODE, tmp, *length);
4439 out:
4440 	return ret;
4441 }
4442 
4443 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
4444 			  void *buffer, size_t *length, loff_t *ppos)
4445 {
4446 
4447 	return hugetlb_sysctl_handler_common(false, table, write,
4448 							buffer, length, ppos);
4449 }
4450 
4451 #ifdef CONFIG_NUMA
4452 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
4453 			  void *buffer, size_t *length, loff_t *ppos)
4454 {
4455 	return hugetlb_sysctl_handler_common(true, table, write,
4456 							buffer, length, ppos);
4457 }
4458 #endif /* CONFIG_NUMA */
4459 
4460 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
4461 		void *buffer, size_t *length, loff_t *ppos)
4462 {
4463 	struct hstate *h = &default_hstate;
4464 	unsigned long tmp;
4465 	int ret;
4466 
4467 	if (!hugepages_supported())
4468 		return -EOPNOTSUPP;
4469 
4470 	tmp = h->nr_overcommit_huge_pages;
4471 
4472 	if (write && hstate_is_gigantic(h))
4473 		return -EINVAL;
4474 
4475 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4476 					     &tmp);
4477 	if (ret)
4478 		goto out;
4479 
4480 	if (write) {
4481 		spin_lock_irq(&hugetlb_lock);
4482 		h->nr_overcommit_huge_pages = tmp;
4483 		spin_unlock_irq(&hugetlb_lock);
4484 	}
4485 out:
4486 	return ret;
4487 }
4488 
4489 #endif /* CONFIG_SYSCTL */
4490 
4491 void hugetlb_report_meminfo(struct seq_file *m)
4492 {
4493 	struct hstate *h;
4494 	unsigned long total = 0;
4495 
4496 	if (!hugepages_supported())
4497 		return;
4498 
4499 	for_each_hstate(h) {
4500 		unsigned long count = h->nr_huge_pages;
4501 
4502 		total += huge_page_size(h) * count;
4503 
4504 		if (h == &default_hstate)
4505 			seq_printf(m,
4506 				   "HugePages_Total:   %5lu\n"
4507 				   "HugePages_Free:    %5lu\n"
4508 				   "HugePages_Rsvd:    %5lu\n"
4509 				   "HugePages_Surp:    %5lu\n"
4510 				   "Hugepagesize:   %8lu kB\n",
4511 				   count,
4512 				   h->free_huge_pages,
4513 				   h->resv_huge_pages,
4514 				   h->surplus_huge_pages,
4515 				   huge_page_size(h) / SZ_1K);
4516 	}
4517 
4518 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
4519 }
4520 
4521 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
4522 {
4523 	struct hstate *h = &default_hstate;
4524 
4525 	if (!hugepages_supported())
4526 		return 0;
4527 
4528 	return sysfs_emit_at(buf, len,
4529 			     "Node %d HugePages_Total: %5u\n"
4530 			     "Node %d HugePages_Free:  %5u\n"
4531 			     "Node %d HugePages_Surp:  %5u\n",
4532 			     nid, h->nr_huge_pages_node[nid],
4533 			     nid, h->free_huge_pages_node[nid],
4534 			     nid, h->surplus_huge_pages_node[nid]);
4535 }
4536 
4537 void hugetlb_show_meminfo_node(int nid)
4538 {
4539 	struct hstate *h;
4540 
4541 	if (!hugepages_supported())
4542 		return;
4543 
4544 	for_each_hstate(h)
4545 		printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4546 			nid,
4547 			h->nr_huge_pages_node[nid],
4548 			h->free_huge_pages_node[nid],
4549 			h->surplus_huge_pages_node[nid],
4550 			huge_page_size(h) / SZ_1K);
4551 }
4552 
4553 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
4554 {
4555 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
4556 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
4557 }
4558 
4559 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
4560 unsigned long hugetlb_total_pages(void)
4561 {
4562 	struct hstate *h;
4563 	unsigned long nr_total_pages = 0;
4564 
4565 	for_each_hstate(h)
4566 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4567 	return nr_total_pages;
4568 }
4569 
4570 static int hugetlb_acct_memory(struct hstate *h, long delta)
4571 {
4572 	int ret = -ENOMEM;
4573 
4574 	if (!delta)
4575 		return 0;
4576 
4577 	spin_lock_irq(&hugetlb_lock);
4578 	/*
4579 	 * When cpuset is configured, it breaks the strict hugetlb page
4580 	 * reservation as the accounting is done on a global variable. Such
4581 	 * reservation is completely rubbish in the presence of cpuset because
4582 	 * the reservation is not checked against page availability for the
4583 	 * current cpuset. Application can still potentially OOM'ed by kernel
4584 	 * with lack of free htlb page in cpuset that the task is in.
4585 	 * Attempt to enforce strict accounting with cpuset is almost
4586 	 * impossible (or too ugly) because cpuset is too fluid that
4587 	 * task or memory node can be dynamically moved between cpusets.
4588 	 *
4589 	 * The change of semantics for shared hugetlb mapping with cpuset is
4590 	 * undesirable. However, in order to preserve some of the semantics,
4591 	 * we fall back to check against current free page availability as
4592 	 * a best attempt and hopefully to minimize the impact of changing
4593 	 * semantics that cpuset has.
4594 	 *
4595 	 * Apart from cpuset, we also have memory policy mechanism that
4596 	 * also determines from which node the kernel will allocate memory
4597 	 * in a NUMA system. So similar to cpuset, we also should consider
4598 	 * the memory policy of the current task. Similar to the description
4599 	 * above.
4600 	 */
4601 	if (delta > 0) {
4602 		if (gather_surplus_pages(h, delta) < 0)
4603 			goto out;
4604 
4605 		if (delta > allowed_mems_nr(h)) {
4606 			return_unused_surplus_pages(h, delta);
4607 			goto out;
4608 		}
4609 	}
4610 
4611 	ret = 0;
4612 	if (delta < 0)
4613 		return_unused_surplus_pages(h, (unsigned long) -delta);
4614 
4615 out:
4616 	spin_unlock_irq(&hugetlb_lock);
4617 	return ret;
4618 }
4619 
4620 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
4621 {
4622 	struct resv_map *resv = vma_resv_map(vma);
4623 
4624 	/*
4625 	 * HPAGE_RESV_OWNER indicates a private mapping.
4626 	 * This new VMA should share its siblings reservation map if present.
4627 	 * The VMA will only ever have a valid reservation map pointer where
4628 	 * it is being copied for another still existing VMA.  As that VMA
4629 	 * has a reference to the reservation map it cannot disappear until
4630 	 * after this open call completes.  It is therefore safe to take a
4631 	 * new reference here without additional locking.
4632 	 */
4633 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4634 		resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4635 		kref_get(&resv->refs);
4636 	}
4637 
4638 	/*
4639 	 * vma_lock structure for sharable mappings is vma specific.
4640 	 * Clear old pointer (if copied via vm_area_dup) and allocate
4641 	 * new structure.  Before clearing, make sure vma_lock is not
4642 	 * for this vma.
4643 	 */
4644 	if (vma->vm_flags & VM_MAYSHARE) {
4645 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
4646 
4647 		if (vma_lock) {
4648 			if (vma_lock->vma != vma) {
4649 				vma->vm_private_data = NULL;
4650 				hugetlb_vma_lock_alloc(vma);
4651 			} else
4652 				pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
4653 		} else
4654 			hugetlb_vma_lock_alloc(vma);
4655 	}
4656 }
4657 
4658 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4659 {
4660 	struct hstate *h = hstate_vma(vma);
4661 	struct resv_map *resv;
4662 	struct hugepage_subpool *spool = subpool_vma(vma);
4663 	unsigned long reserve, start, end;
4664 	long gbl_reserve;
4665 
4666 	hugetlb_vma_lock_free(vma);
4667 
4668 	resv = vma_resv_map(vma);
4669 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4670 		return;
4671 
4672 	start = vma_hugecache_offset(h, vma, vma->vm_start);
4673 	end = vma_hugecache_offset(h, vma, vma->vm_end);
4674 
4675 	reserve = (end - start) - region_count(resv, start, end);
4676 	hugetlb_cgroup_uncharge_counter(resv, start, end);
4677 	if (reserve) {
4678 		/*
4679 		 * Decrement reserve counts.  The global reserve count may be
4680 		 * adjusted if the subpool has a minimum size.
4681 		 */
4682 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
4683 		hugetlb_acct_memory(h, -gbl_reserve);
4684 	}
4685 
4686 	kref_put(&resv->refs, resv_map_release);
4687 }
4688 
4689 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4690 {
4691 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
4692 		return -EINVAL;
4693 	return 0;
4694 }
4695 
4696 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
4697 {
4698 	return huge_page_size(hstate_vma(vma));
4699 }
4700 
4701 /*
4702  * We cannot handle pagefaults against hugetlb pages at all.  They cause
4703  * handle_mm_fault() to try to instantiate regular-sized pages in the
4704  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
4705  * this far.
4706  */
4707 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
4708 {
4709 	BUG();
4710 	return 0;
4711 }
4712 
4713 /*
4714  * When a new function is introduced to vm_operations_struct and added
4715  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4716  * This is because under System V memory model, mappings created via
4717  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4718  * their original vm_ops are overwritten with shm_vm_ops.
4719  */
4720 const struct vm_operations_struct hugetlb_vm_ops = {
4721 	.fault = hugetlb_vm_op_fault,
4722 	.open = hugetlb_vm_op_open,
4723 	.close = hugetlb_vm_op_close,
4724 	.may_split = hugetlb_vm_op_split,
4725 	.pagesize = hugetlb_vm_op_pagesize,
4726 };
4727 
4728 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
4729 				int writable)
4730 {
4731 	pte_t entry;
4732 	unsigned int shift = huge_page_shift(hstate_vma(vma));
4733 
4734 	if (writable) {
4735 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
4736 					 vma->vm_page_prot)));
4737 	} else {
4738 		entry = huge_pte_wrprotect(mk_huge_pte(page,
4739 					   vma->vm_page_prot));
4740 	}
4741 	entry = pte_mkyoung(entry);
4742 	entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
4743 
4744 	return entry;
4745 }
4746 
4747 static void set_huge_ptep_writable(struct vm_area_struct *vma,
4748 				   unsigned long address, pte_t *ptep)
4749 {
4750 	pte_t entry;
4751 
4752 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
4753 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4754 		update_mmu_cache(vma, address, ptep);
4755 }
4756 
4757 bool is_hugetlb_entry_migration(pte_t pte)
4758 {
4759 	swp_entry_t swp;
4760 
4761 	if (huge_pte_none(pte) || pte_present(pte))
4762 		return false;
4763 	swp = pte_to_swp_entry(pte);
4764 	if (is_migration_entry(swp))
4765 		return true;
4766 	else
4767 		return false;
4768 }
4769 
4770 static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
4771 {
4772 	swp_entry_t swp;
4773 
4774 	if (huge_pte_none(pte) || pte_present(pte))
4775 		return false;
4776 	swp = pte_to_swp_entry(pte);
4777 	if (is_hwpoison_entry(swp))
4778 		return true;
4779 	else
4780 		return false;
4781 }
4782 
4783 static void
4784 hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
4785 		     struct page *new_page)
4786 {
4787 	__SetPageUptodate(new_page);
4788 	hugepage_add_new_anon_rmap(new_page, vma, addr);
4789 	set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
4790 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
4791 	SetHPageMigratable(new_page);
4792 }
4793 
4794 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4795 			    struct vm_area_struct *dst_vma,
4796 			    struct vm_area_struct *src_vma)
4797 {
4798 	pte_t *src_pte, *dst_pte, entry;
4799 	struct page *ptepage;
4800 	unsigned long addr;
4801 	bool cow = is_cow_mapping(src_vma->vm_flags);
4802 	struct hstate *h = hstate_vma(src_vma);
4803 	unsigned long sz = huge_page_size(h);
4804 	unsigned long npages = pages_per_huge_page(h);
4805 	struct mmu_notifier_range range;
4806 	unsigned long last_addr_mask;
4807 	int ret = 0;
4808 
4809 	if (cow) {
4810 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_vma, src,
4811 					src_vma->vm_start,
4812 					src_vma->vm_end);
4813 		mmu_notifier_invalidate_range_start(&range);
4814 		mmap_assert_write_locked(src);
4815 		raw_write_seqcount_begin(&src->write_protect_seq);
4816 	} else {
4817 		/*
4818 		 * For shared mappings the vma lock must be held before
4819 		 * calling huge_pte_offset in the src vma. Otherwise, the
4820 		 * returned ptep could go away if part of a shared pmd and
4821 		 * another thread calls huge_pmd_unshare.
4822 		 */
4823 		hugetlb_vma_lock_read(src_vma);
4824 	}
4825 
4826 	last_addr_mask = hugetlb_mask_last_page(h);
4827 	for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
4828 		spinlock_t *src_ptl, *dst_ptl;
4829 		src_pte = huge_pte_offset(src, addr, sz);
4830 		if (!src_pte) {
4831 			addr |= last_addr_mask;
4832 			continue;
4833 		}
4834 		dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
4835 		if (!dst_pte) {
4836 			ret = -ENOMEM;
4837 			break;
4838 		}
4839 
4840 		/*
4841 		 * If the pagetables are shared don't copy or take references.
4842 		 *
4843 		 * dst_pte == src_pte is the common case of src/dest sharing.
4844 		 * However, src could have 'unshared' and dst shares with
4845 		 * another vma. So page_count of ptep page is checked instead
4846 		 * to reliably determine whether pte is shared.
4847 		 */
4848 		if (page_count(virt_to_page(dst_pte)) > 1) {
4849 			addr |= last_addr_mask;
4850 			continue;
4851 		}
4852 
4853 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
4854 		src_ptl = huge_pte_lockptr(h, src, src_pte);
4855 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4856 		entry = huge_ptep_get(src_pte);
4857 again:
4858 		if (huge_pte_none(entry)) {
4859 			/*
4860 			 * Skip if src entry none.
4861 			 */
4862 			;
4863 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
4864 			bool uffd_wp = huge_pte_uffd_wp(entry);
4865 
4866 			if (!userfaultfd_wp(dst_vma) && uffd_wp)
4867 				entry = huge_pte_clear_uffd_wp(entry);
4868 			set_huge_pte_at(dst, addr, dst_pte, entry);
4869 		} else if (unlikely(is_hugetlb_entry_migration(entry))) {
4870 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
4871 			bool uffd_wp = huge_pte_uffd_wp(entry);
4872 
4873 			if (!is_readable_migration_entry(swp_entry) && cow) {
4874 				/*
4875 				 * COW mappings require pages in both
4876 				 * parent and child to be set to read.
4877 				 */
4878 				swp_entry = make_readable_migration_entry(
4879 							swp_offset(swp_entry));
4880 				entry = swp_entry_to_pte(swp_entry);
4881 				if (userfaultfd_wp(src_vma) && uffd_wp)
4882 					entry = huge_pte_mkuffd_wp(entry);
4883 				set_huge_pte_at(src, addr, src_pte, entry);
4884 			}
4885 			if (!userfaultfd_wp(dst_vma) && uffd_wp)
4886 				entry = huge_pte_clear_uffd_wp(entry);
4887 			set_huge_pte_at(dst, addr, dst_pte, entry);
4888 		} else if (unlikely(is_pte_marker(entry))) {
4889 			/*
4890 			 * We copy the pte marker only if the dst vma has
4891 			 * uffd-wp enabled.
4892 			 */
4893 			if (userfaultfd_wp(dst_vma))
4894 				set_huge_pte_at(dst, addr, dst_pte, entry);
4895 		} else {
4896 			entry = huge_ptep_get(src_pte);
4897 			ptepage = pte_page(entry);
4898 			get_page(ptepage);
4899 
4900 			/*
4901 			 * Failing to duplicate the anon rmap is a rare case
4902 			 * where we see pinned hugetlb pages while they're
4903 			 * prone to COW. We need to do the COW earlier during
4904 			 * fork.
4905 			 *
4906 			 * When pre-allocating the page or copying data, we
4907 			 * need to be without the pgtable locks since we could
4908 			 * sleep during the process.
4909 			 */
4910 			if (!PageAnon(ptepage)) {
4911 				page_dup_file_rmap(ptepage, true);
4912 			} else if (page_try_dup_anon_rmap(ptepage, true,
4913 							  src_vma)) {
4914 				pte_t src_pte_old = entry;
4915 				struct page *new;
4916 
4917 				spin_unlock(src_ptl);
4918 				spin_unlock(dst_ptl);
4919 				/* Do not use reserve as it's private owned */
4920 				new = alloc_huge_page(dst_vma, addr, 1);
4921 				if (IS_ERR(new)) {
4922 					put_page(ptepage);
4923 					ret = PTR_ERR(new);
4924 					break;
4925 				}
4926 				copy_user_huge_page(new, ptepage, addr, dst_vma,
4927 						    npages);
4928 				put_page(ptepage);
4929 
4930 				/* Install the new huge page if src pte stable */
4931 				dst_ptl = huge_pte_lock(h, dst, dst_pte);
4932 				src_ptl = huge_pte_lockptr(h, src, src_pte);
4933 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4934 				entry = huge_ptep_get(src_pte);
4935 				if (!pte_same(src_pte_old, entry)) {
4936 					restore_reserve_on_error(h, dst_vma, addr,
4937 								new);
4938 					put_page(new);
4939 					/* huge_ptep of dst_pte won't change as in child */
4940 					goto again;
4941 				}
4942 				hugetlb_install_page(dst_vma, dst_pte, addr, new);
4943 				spin_unlock(src_ptl);
4944 				spin_unlock(dst_ptl);
4945 				continue;
4946 			}
4947 
4948 			if (cow) {
4949 				/*
4950 				 * No need to notify as we are downgrading page
4951 				 * table protection not changing it to point
4952 				 * to a new page.
4953 				 *
4954 				 * See Documentation/mm/mmu_notifier.rst
4955 				 */
4956 				huge_ptep_set_wrprotect(src, addr, src_pte);
4957 				entry = huge_pte_wrprotect(entry);
4958 			}
4959 
4960 			set_huge_pte_at(dst, addr, dst_pte, entry);
4961 			hugetlb_count_add(npages, dst);
4962 		}
4963 		spin_unlock(src_ptl);
4964 		spin_unlock(dst_ptl);
4965 	}
4966 
4967 	if (cow) {
4968 		raw_write_seqcount_end(&src->write_protect_seq);
4969 		mmu_notifier_invalidate_range_end(&range);
4970 	} else {
4971 		hugetlb_vma_unlock_read(src_vma);
4972 	}
4973 
4974 	return ret;
4975 }
4976 
4977 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
4978 			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
4979 {
4980 	struct hstate *h = hstate_vma(vma);
4981 	struct mm_struct *mm = vma->vm_mm;
4982 	spinlock_t *src_ptl, *dst_ptl;
4983 	pte_t pte;
4984 
4985 	dst_ptl = huge_pte_lock(h, mm, dst_pte);
4986 	src_ptl = huge_pte_lockptr(h, mm, src_pte);
4987 
4988 	/*
4989 	 * We don't have to worry about the ordering of src and dst ptlocks
4990 	 * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
4991 	 */
4992 	if (src_ptl != dst_ptl)
4993 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4994 
4995 	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
4996 	set_huge_pte_at(mm, new_addr, dst_pte, pte);
4997 
4998 	if (src_ptl != dst_ptl)
4999 		spin_unlock(src_ptl);
5000 	spin_unlock(dst_ptl);
5001 }
5002 
5003 int move_hugetlb_page_tables(struct vm_area_struct *vma,
5004 			     struct vm_area_struct *new_vma,
5005 			     unsigned long old_addr, unsigned long new_addr,
5006 			     unsigned long len)
5007 {
5008 	struct hstate *h = hstate_vma(vma);
5009 	struct address_space *mapping = vma->vm_file->f_mapping;
5010 	unsigned long sz = huge_page_size(h);
5011 	struct mm_struct *mm = vma->vm_mm;
5012 	unsigned long old_end = old_addr + len;
5013 	unsigned long last_addr_mask;
5014 	pte_t *src_pte, *dst_pte;
5015 	struct mmu_notifier_range range;
5016 	bool shared_pmd = false;
5017 
5018 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr,
5019 				old_end);
5020 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5021 	/*
5022 	 * In case of shared PMDs, we should cover the maximum possible
5023 	 * range.
5024 	 */
5025 	flush_cache_range(vma, range.start, range.end);
5026 
5027 	mmu_notifier_invalidate_range_start(&range);
5028 	last_addr_mask = hugetlb_mask_last_page(h);
5029 	/* Prevent race with file truncation */
5030 	hugetlb_vma_lock_write(vma);
5031 	i_mmap_lock_write(mapping);
5032 	for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5033 		src_pte = huge_pte_offset(mm, old_addr, sz);
5034 		if (!src_pte) {
5035 			old_addr |= last_addr_mask;
5036 			new_addr |= last_addr_mask;
5037 			continue;
5038 		}
5039 		if (huge_pte_none(huge_ptep_get(src_pte)))
5040 			continue;
5041 
5042 		if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
5043 			shared_pmd = true;
5044 			old_addr |= last_addr_mask;
5045 			new_addr |= last_addr_mask;
5046 			continue;
5047 		}
5048 
5049 		dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5050 		if (!dst_pte)
5051 			break;
5052 
5053 		move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
5054 	}
5055 
5056 	if (shared_pmd)
5057 		flush_tlb_range(vma, range.start, range.end);
5058 	else
5059 		flush_tlb_range(vma, old_end - len, old_end);
5060 	mmu_notifier_invalidate_range_end(&range);
5061 	i_mmap_unlock_write(mapping);
5062 	hugetlb_vma_unlock_write(vma);
5063 
5064 	return len + old_addr - old_end;
5065 }
5066 
5067 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5068 				   unsigned long start, unsigned long end,
5069 				   struct page *ref_page, zap_flags_t zap_flags)
5070 {
5071 	struct mm_struct *mm = vma->vm_mm;
5072 	unsigned long address;
5073 	pte_t *ptep;
5074 	pte_t pte;
5075 	spinlock_t *ptl;
5076 	struct page *page;
5077 	struct hstate *h = hstate_vma(vma);
5078 	unsigned long sz = huge_page_size(h);
5079 	unsigned long last_addr_mask;
5080 	bool force_flush = false;
5081 
5082 	WARN_ON(!is_vm_hugetlb_page(vma));
5083 	BUG_ON(start & ~huge_page_mask(h));
5084 	BUG_ON(end & ~huge_page_mask(h));
5085 
5086 	/*
5087 	 * This is a hugetlb vma, all the pte entries should point
5088 	 * to huge page.
5089 	 */
5090 	tlb_change_page_size(tlb, sz);
5091 	tlb_start_vma(tlb, vma);
5092 
5093 	last_addr_mask = hugetlb_mask_last_page(h);
5094 	address = start;
5095 	for (; address < end; address += sz) {
5096 		ptep = huge_pte_offset(mm, address, sz);
5097 		if (!ptep) {
5098 			address |= last_addr_mask;
5099 			continue;
5100 		}
5101 
5102 		ptl = huge_pte_lock(h, mm, ptep);
5103 		if (huge_pmd_unshare(mm, vma, address, ptep)) {
5104 			spin_unlock(ptl);
5105 			tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5106 			force_flush = true;
5107 			address |= last_addr_mask;
5108 			continue;
5109 		}
5110 
5111 		pte = huge_ptep_get(ptep);
5112 		if (huge_pte_none(pte)) {
5113 			spin_unlock(ptl);
5114 			continue;
5115 		}
5116 
5117 		/*
5118 		 * Migrating hugepage or HWPoisoned hugepage is already
5119 		 * unmapped and its refcount is dropped, so just clear pte here.
5120 		 */
5121 		if (unlikely(!pte_present(pte))) {
5122 			/*
5123 			 * If the pte was wr-protected by uffd-wp in any of the
5124 			 * swap forms, meanwhile the caller does not want to
5125 			 * drop the uffd-wp bit in this zap, then replace the
5126 			 * pte with a marker.
5127 			 */
5128 			if (pte_swp_uffd_wp_any(pte) &&
5129 			    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5130 				set_huge_pte_at(mm, address, ptep,
5131 						make_pte_marker(PTE_MARKER_UFFD_WP));
5132 			else
5133 				huge_pte_clear(mm, address, ptep, sz);
5134 			spin_unlock(ptl);
5135 			continue;
5136 		}
5137 
5138 		page = pte_page(pte);
5139 		/*
5140 		 * If a reference page is supplied, it is because a specific
5141 		 * page is being unmapped, not a range. Ensure the page we
5142 		 * are about to unmap is the actual page of interest.
5143 		 */
5144 		if (ref_page) {
5145 			if (page != ref_page) {
5146 				spin_unlock(ptl);
5147 				continue;
5148 			}
5149 			/*
5150 			 * Mark the VMA as having unmapped its page so that
5151 			 * future faults in this VMA will fail rather than
5152 			 * looking like data was lost
5153 			 */
5154 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5155 		}
5156 
5157 		pte = huge_ptep_get_and_clear(mm, address, ptep);
5158 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5159 		if (huge_pte_dirty(pte))
5160 			set_page_dirty(page);
5161 		/* Leave a uffd-wp pte marker if needed */
5162 		if (huge_pte_uffd_wp(pte) &&
5163 		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5164 			set_huge_pte_at(mm, address, ptep,
5165 					make_pte_marker(PTE_MARKER_UFFD_WP));
5166 		hugetlb_count_sub(pages_per_huge_page(h), mm);
5167 		page_remove_rmap(page, vma, true);
5168 
5169 		spin_unlock(ptl);
5170 		tlb_remove_page_size(tlb, page, huge_page_size(h));
5171 		/*
5172 		 * Bail out after unmapping reference page if supplied
5173 		 */
5174 		if (ref_page)
5175 			break;
5176 	}
5177 	tlb_end_vma(tlb, vma);
5178 
5179 	/*
5180 	 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5181 	 * could defer the flush until now, since by holding i_mmap_rwsem we
5182 	 * guaranteed that the last refernece would not be dropped. But we must
5183 	 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5184 	 * dropped and the last reference to the shared PMDs page might be
5185 	 * dropped as well.
5186 	 *
5187 	 * In theory we could defer the freeing of the PMD pages as well, but
5188 	 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5189 	 * detect sharing, so we cannot defer the release of the page either.
5190 	 * Instead, do flush now.
5191 	 */
5192 	if (force_flush)
5193 		tlb_flush_mmu_tlbonly(tlb);
5194 }
5195 
5196 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
5197 			  struct vm_area_struct *vma, unsigned long start,
5198 			  unsigned long end, struct page *ref_page,
5199 			  zap_flags_t zap_flags)
5200 {
5201 	hugetlb_vma_lock_write(vma);
5202 	i_mmap_lock_write(vma->vm_file->f_mapping);
5203 
5204 	/* mmu notification performed in caller */
5205 	__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
5206 
5207 	if (zap_flags & ZAP_FLAG_UNMAP) {	/* final unmap */
5208 		/*
5209 		 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5210 		 * When the vma_lock is freed, this makes the vma ineligible
5211 		 * for pmd sharing.  And, i_mmap_rwsem is required to set up
5212 		 * pmd sharing.  This is important as page tables for this
5213 		 * unmapped range will be asynchrously deleted.  If the page
5214 		 * tables are shared, there will be issues when accessed by
5215 		 * someone else.
5216 		 */
5217 		__hugetlb_vma_unlock_write_free(vma);
5218 		i_mmap_unlock_write(vma->vm_file->f_mapping);
5219 	} else {
5220 		i_mmap_unlock_write(vma->vm_file->f_mapping);
5221 		hugetlb_vma_unlock_write(vma);
5222 	}
5223 }
5224 
5225 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5226 			  unsigned long end, struct page *ref_page,
5227 			  zap_flags_t zap_flags)
5228 {
5229 	struct mmu_notifier_range range;
5230 	struct mmu_gather tlb;
5231 
5232 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
5233 				start, end);
5234 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5235 	mmu_notifier_invalidate_range_start(&range);
5236 	tlb_gather_mmu(&tlb, vma->vm_mm);
5237 
5238 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
5239 
5240 	mmu_notifier_invalidate_range_end(&range);
5241 	tlb_finish_mmu(&tlb);
5242 }
5243 
5244 /*
5245  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5246  * mapping it owns the reserve page for. The intention is to unmap the page
5247  * from other VMAs and let the children be SIGKILLed if they are faulting the
5248  * same region.
5249  */
5250 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5251 			      struct page *page, unsigned long address)
5252 {
5253 	struct hstate *h = hstate_vma(vma);
5254 	struct vm_area_struct *iter_vma;
5255 	struct address_space *mapping;
5256 	pgoff_t pgoff;
5257 
5258 	/*
5259 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5260 	 * from page cache lookup which is in HPAGE_SIZE units.
5261 	 */
5262 	address = address & huge_page_mask(h);
5263 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5264 			vma->vm_pgoff;
5265 	mapping = vma->vm_file->f_mapping;
5266 
5267 	/*
5268 	 * Take the mapping lock for the duration of the table walk. As
5269 	 * this mapping should be shared between all the VMAs,
5270 	 * __unmap_hugepage_range() is called as the lock is already held
5271 	 */
5272 	i_mmap_lock_write(mapping);
5273 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5274 		/* Do not unmap the current VMA */
5275 		if (iter_vma == vma)
5276 			continue;
5277 
5278 		/*
5279 		 * Shared VMAs have their own reserves and do not affect
5280 		 * MAP_PRIVATE accounting but it is possible that a shared
5281 		 * VMA is using the same page so check and skip such VMAs.
5282 		 */
5283 		if (iter_vma->vm_flags & VM_MAYSHARE)
5284 			continue;
5285 
5286 		/*
5287 		 * Unmap the page from other VMAs without their own reserves.
5288 		 * They get marked to be SIGKILLed if they fault in these
5289 		 * areas. This is because a future no-page fault on this VMA
5290 		 * could insert a zeroed page instead of the data existing
5291 		 * from the time of fork. This would look like data corruption
5292 		 */
5293 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5294 			unmap_hugepage_range(iter_vma, address,
5295 					     address + huge_page_size(h), page, 0);
5296 	}
5297 	i_mmap_unlock_write(mapping);
5298 }
5299 
5300 /*
5301  * hugetlb_wp() should be called with page lock of the original hugepage held.
5302  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5303  * cannot race with other handlers or page migration.
5304  * Keep the pte_same checks anyway to make transition from the mutex easier.
5305  */
5306 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5307 		       unsigned long address, pte_t *ptep, unsigned int flags,
5308 		       struct page *pagecache_page, spinlock_t *ptl)
5309 {
5310 	const bool unshare = flags & FAULT_FLAG_UNSHARE;
5311 	pte_t pte;
5312 	struct hstate *h = hstate_vma(vma);
5313 	struct page *old_page, *new_page;
5314 	int outside_reserve = 0;
5315 	vm_fault_t ret = 0;
5316 	unsigned long haddr = address & huge_page_mask(h);
5317 	struct mmu_notifier_range range;
5318 
5319 	/*
5320 	 * hugetlb does not support FOLL_FORCE-style write faults that keep the
5321 	 * PTE mapped R/O such as maybe_mkwrite() would do.
5322 	 */
5323 	if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
5324 		return VM_FAULT_SIGSEGV;
5325 
5326 	/* Let's take out MAP_SHARED mappings first. */
5327 	if (vma->vm_flags & VM_MAYSHARE) {
5328 		set_huge_ptep_writable(vma, haddr, ptep);
5329 		return 0;
5330 	}
5331 
5332 	pte = huge_ptep_get(ptep);
5333 	old_page = pte_page(pte);
5334 
5335 	delayacct_wpcopy_start();
5336 
5337 retry_avoidcopy:
5338 	/*
5339 	 * If no-one else is actually using this page, we're the exclusive
5340 	 * owner and can reuse this page.
5341 	 */
5342 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
5343 		if (!PageAnonExclusive(old_page))
5344 			page_move_anon_rmap(old_page, vma);
5345 		if (likely(!unshare))
5346 			set_huge_ptep_writable(vma, haddr, ptep);
5347 
5348 		delayacct_wpcopy_end();
5349 		return 0;
5350 	}
5351 	VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page),
5352 		       old_page);
5353 
5354 	/*
5355 	 * If the process that created a MAP_PRIVATE mapping is about to
5356 	 * perform a COW due to a shared page count, attempt to satisfy
5357 	 * the allocation without using the existing reserves. The pagecache
5358 	 * page is used to determine if the reserve at this address was
5359 	 * consumed or not. If reserves were used, a partial faulted mapping
5360 	 * at the time of fork() could consume its reserves on COW instead
5361 	 * of the full address range.
5362 	 */
5363 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5364 			old_page != pagecache_page)
5365 		outside_reserve = 1;
5366 
5367 	get_page(old_page);
5368 
5369 	/*
5370 	 * Drop page table lock as buddy allocator may be called. It will
5371 	 * be acquired again before returning to the caller, as expected.
5372 	 */
5373 	spin_unlock(ptl);
5374 	new_page = alloc_huge_page(vma, haddr, outside_reserve);
5375 
5376 	if (IS_ERR(new_page)) {
5377 		/*
5378 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
5379 		 * it is due to references held by a child and an insufficient
5380 		 * huge page pool. To guarantee the original mappers
5381 		 * reliability, unmap the page from child processes. The child
5382 		 * may get SIGKILLed if it later faults.
5383 		 */
5384 		if (outside_reserve) {
5385 			struct address_space *mapping = vma->vm_file->f_mapping;
5386 			pgoff_t idx;
5387 			u32 hash;
5388 
5389 			put_page(old_page);
5390 			/*
5391 			 * Drop hugetlb_fault_mutex and vma_lock before
5392 			 * unmapping.  unmapping needs to hold vma_lock
5393 			 * in write mode.  Dropping vma_lock in read mode
5394 			 * here is OK as COW mappings do not interact with
5395 			 * PMD sharing.
5396 			 *
5397 			 * Reacquire both after unmap operation.
5398 			 */
5399 			idx = vma_hugecache_offset(h, vma, haddr);
5400 			hash = hugetlb_fault_mutex_hash(mapping, idx);
5401 			hugetlb_vma_unlock_read(vma);
5402 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5403 
5404 			unmap_ref_private(mm, vma, old_page, haddr);
5405 
5406 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
5407 			hugetlb_vma_lock_read(vma);
5408 			spin_lock(ptl);
5409 			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5410 			if (likely(ptep &&
5411 				   pte_same(huge_ptep_get(ptep), pte)))
5412 				goto retry_avoidcopy;
5413 			/*
5414 			 * race occurs while re-acquiring page table
5415 			 * lock, and our job is done.
5416 			 */
5417 			delayacct_wpcopy_end();
5418 			return 0;
5419 		}
5420 
5421 		ret = vmf_error(PTR_ERR(new_page));
5422 		goto out_release_old;
5423 	}
5424 
5425 	/*
5426 	 * When the original hugepage is shared one, it does not have
5427 	 * anon_vma prepared.
5428 	 */
5429 	if (unlikely(anon_vma_prepare(vma))) {
5430 		ret = VM_FAULT_OOM;
5431 		goto out_release_all;
5432 	}
5433 
5434 	copy_user_huge_page(new_page, old_page, address, vma,
5435 			    pages_per_huge_page(h));
5436 	__SetPageUptodate(new_page);
5437 
5438 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
5439 				haddr + huge_page_size(h));
5440 	mmu_notifier_invalidate_range_start(&range);
5441 
5442 	/*
5443 	 * Retake the page table lock to check for racing updates
5444 	 * before the page tables are altered
5445 	 */
5446 	spin_lock(ptl);
5447 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5448 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
5449 		/* Break COW or unshare */
5450 		huge_ptep_clear_flush(vma, haddr, ptep);
5451 		mmu_notifier_invalidate_range(mm, range.start, range.end);
5452 		page_remove_rmap(old_page, vma, true);
5453 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
5454 		set_huge_pte_at(mm, haddr, ptep,
5455 				make_huge_pte(vma, new_page, !unshare));
5456 		SetHPageMigratable(new_page);
5457 		/* Make the old page be freed below */
5458 		new_page = old_page;
5459 	}
5460 	spin_unlock(ptl);
5461 	mmu_notifier_invalidate_range_end(&range);
5462 out_release_all:
5463 	/*
5464 	 * No restore in case of successful pagetable update (Break COW or
5465 	 * unshare)
5466 	 */
5467 	if (new_page != old_page)
5468 		restore_reserve_on_error(h, vma, haddr, new_page);
5469 	put_page(new_page);
5470 out_release_old:
5471 	put_page(old_page);
5472 
5473 	spin_lock(ptl); /* Caller expects lock to be held */
5474 
5475 	delayacct_wpcopy_end();
5476 	return ret;
5477 }
5478 
5479 /*
5480  * Return whether there is a pagecache page to back given address within VMA.
5481  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
5482  */
5483 static bool hugetlbfs_pagecache_present(struct hstate *h,
5484 			struct vm_area_struct *vma, unsigned long address)
5485 {
5486 	struct address_space *mapping;
5487 	pgoff_t idx;
5488 	struct page *page;
5489 
5490 	mapping = vma->vm_file->f_mapping;
5491 	idx = vma_hugecache_offset(h, vma, address);
5492 
5493 	page = find_get_page(mapping, idx);
5494 	if (page)
5495 		put_page(page);
5496 	return page != NULL;
5497 }
5498 
5499 int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
5500 			   pgoff_t idx)
5501 {
5502 	struct folio *folio = page_folio(page);
5503 	struct inode *inode = mapping->host;
5504 	struct hstate *h = hstate_inode(inode);
5505 	int err;
5506 
5507 	__folio_set_locked(folio);
5508 	err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5509 
5510 	if (unlikely(err)) {
5511 		__folio_clear_locked(folio);
5512 		return err;
5513 	}
5514 	ClearHPageRestoreReserve(page);
5515 
5516 	/*
5517 	 * mark folio dirty so that it will not be removed from cache/file
5518 	 * by non-hugetlbfs specific code paths.
5519 	 */
5520 	folio_mark_dirty(folio);
5521 
5522 	spin_lock(&inode->i_lock);
5523 	inode->i_blocks += blocks_per_huge_page(h);
5524 	spin_unlock(&inode->i_lock);
5525 	return 0;
5526 }
5527 
5528 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
5529 						  struct address_space *mapping,
5530 						  pgoff_t idx,
5531 						  unsigned int flags,
5532 						  unsigned long haddr,
5533 						  unsigned long addr,
5534 						  unsigned long reason)
5535 {
5536 	u32 hash;
5537 	struct vm_fault vmf = {
5538 		.vma = vma,
5539 		.address = haddr,
5540 		.real_address = addr,
5541 		.flags = flags,
5542 
5543 		/*
5544 		 * Hard to debug if it ends up being
5545 		 * used by a callee that assumes
5546 		 * something about the other
5547 		 * uninitialized fields... same as in
5548 		 * memory.c
5549 		 */
5550 	};
5551 
5552 	/*
5553 	 * vma_lock and hugetlb_fault_mutex must be dropped before handling
5554 	 * userfault. Also mmap_lock could be dropped due to handling
5555 	 * userfault, any vma operation should be careful from here.
5556 	 */
5557 	hugetlb_vma_unlock_read(vma);
5558 	hash = hugetlb_fault_mutex_hash(mapping, idx);
5559 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5560 	return handle_userfault(&vmf, reason);
5561 }
5562 
5563 /*
5564  * Recheck pte with pgtable lock.  Returns true if pte didn't change, or
5565  * false if pte changed or is changing.
5566  */
5567 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
5568 			       pte_t *ptep, pte_t old_pte)
5569 {
5570 	spinlock_t *ptl;
5571 	bool same;
5572 
5573 	ptl = huge_pte_lock(h, mm, ptep);
5574 	same = pte_same(huge_ptep_get(ptep), old_pte);
5575 	spin_unlock(ptl);
5576 
5577 	return same;
5578 }
5579 
5580 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
5581 			struct vm_area_struct *vma,
5582 			struct address_space *mapping, pgoff_t idx,
5583 			unsigned long address, pte_t *ptep,
5584 			pte_t old_pte, unsigned int flags)
5585 {
5586 	struct hstate *h = hstate_vma(vma);
5587 	vm_fault_t ret = VM_FAULT_SIGBUS;
5588 	int anon_rmap = 0;
5589 	unsigned long size;
5590 	struct page *page;
5591 	pte_t new_pte;
5592 	spinlock_t *ptl;
5593 	unsigned long haddr = address & huge_page_mask(h);
5594 	bool new_page, new_pagecache_page = false;
5595 	u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
5596 
5597 	/*
5598 	 * Currently, we are forced to kill the process in the event the
5599 	 * original mapper has unmapped pages from the child due to a failed
5600 	 * COW/unsharing. Warn that such a situation has occurred as it may not
5601 	 * be obvious.
5602 	 */
5603 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5604 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5605 			   current->pid);
5606 		goto out;
5607 	}
5608 
5609 	/*
5610 	 * Use page lock to guard against racing truncation
5611 	 * before we get page_table_lock.
5612 	 */
5613 	new_page = false;
5614 	page = find_lock_page(mapping, idx);
5615 	if (!page) {
5616 		size = i_size_read(mapping->host) >> huge_page_shift(h);
5617 		if (idx >= size)
5618 			goto out;
5619 		/* Check for page in userfault range */
5620 		if (userfaultfd_missing(vma)) {
5621 			/*
5622 			 * Since hugetlb_no_page() was examining pte
5623 			 * without pgtable lock, we need to re-test under
5624 			 * lock because the pte may not be stable and could
5625 			 * have changed from under us.  Try to detect
5626 			 * either changed or during-changing ptes and retry
5627 			 * properly when needed.
5628 			 *
5629 			 * Note that userfaultfd is actually fine with
5630 			 * false positives (e.g. caused by pte changed),
5631 			 * but not wrong logical events (e.g. caused by
5632 			 * reading a pte during changing).  The latter can
5633 			 * confuse the userspace, so the strictness is very
5634 			 * much preferred.  E.g., MISSING event should
5635 			 * never happen on the page after UFFDIO_COPY has
5636 			 * correctly installed the page and returned.
5637 			 */
5638 			if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
5639 				ret = 0;
5640 				goto out;
5641 			}
5642 
5643 			return hugetlb_handle_userfault(vma, mapping, idx, flags,
5644 							haddr, address,
5645 							VM_UFFD_MISSING);
5646 		}
5647 
5648 		page = alloc_huge_page(vma, haddr, 0);
5649 		if (IS_ERR(page)) {
5650 			/*
5651 			 * Returning error will result in faulting task being
5652 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
5653 			 * tasks from racing to fault in the same page which
5654 			 * could result in false unable to allocate errors.
5655 			 * Page migration does not take the fault mutex, but
5656 			 * does a clear then write of pte's under page table
5657 			 * lock.  Page fault code could race with migration,
5658 			 * notice the clear pte and try to allocate a page
5659 			 * here.  Before returning error, get ptl and make
5660 			 * sure there really is no pte entry.
5661 			 */
5662 			if (hugetlb_pte_stable(h, mm, ptep, old_pte))
5663 				ret = vmf_error(PTR_ERR(page));
5664 			else
5665 				ret = 0;
5666 			goto out;
5667 		}
5668 		clear_huge_page(page, address, pages_per_huge_page(h));
5669 		__SetPageUptodate(page);
5670 		new_page = true;
5671 
5672 		if (vma->vm_flags & VM_MAYSHARE) {
5673 			int err = hugetlb_add_to_page_cache(page, mapping, idx);
5674 			if (err) {
5675 				/*
5676 				 * err can't be -EEXIST which implies someone
5677 				 * else consumed the reservation since hugetlb
5678 				 * fault mutex is held when add a hugetlb page
5679 				 * to the page cache. So it's safe to call
5680 				 * restore_reserve_on_error() here.
5681 				 */
5682 				restore_reserve_on_error(h, vma, haddr, page);
5683 				put_page(page);
5684 				goto out;
5685 			}
5686 			new_pagecache_page = true;
5687 		} else {
5688 			lock_page(page);
5689 			if (unlikely(anon_vma_prepare(vma))) {
5690 				ret = VM_FAULT_OOM;
5691 				goto backout_unlocked;
5692 			}
5693 			anon_rmap = 1;
5694 		}
5695 	} else {
5696 		/*
5697 		 * If memory error occurs between mmap() and fault, some process
5698 		 * don't have hwpoisoned swap entry for errored virtual address.
5699 		 * So we need to block hugepage fault by PG_hwpoison bit check.
5700 		 */
5701 		if (unlikely(PageHWPoison(page))) {
5702 			ret = VM_FAULT_HWPOISON_LARGE |
5703 				VM_FAULT_SET_HINDEX(hstate_index(h));
5704 			goto backout_unlocked;
5705 		}
5706 
5707 		/* Check for page in userfault range. */
5708 		if (userfaultfd_minor(vma)) {
5709 			unlock_page(page);
5710 			put_page(page);
5711 			/* See comment in userfaultfd_missing() block above */
5712 			if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
5713 				ret = 0;
5714 				goto out;
5715 			}
5716 			return hugetlb_handle_userfault(vma, mapping, idx, flags,
5717 							haddr, address,
5718 							VM_UFFD_MINOR);
5719 		}
5720 	}
5721 
5722 	/*
5723 	 * If we are going to COW a private mapping later, we examine the
5724 	 * pending reservations for this page now. This will ensure that
5725 	 * any allocations necessary to record that reservation occur outside
5726 	 * the spinlock.
5727 	 */
5728 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5729 		if (vma_needs_reservation(h, vma, haddr) < 0) {
5730 			ret = VM_FAULT_OOM;
5731 			goto backout_unlocked;
5732 		}
5733 		/* Just decrements count, does not deallocate */
5734 		vma_end_reservation(h, vma, haddr);
5735 	}
5736 
5737 	ptl = huge_pte_lock(h, mm, ptep);
5738 	ret = 0;
5739 	/* If pte changed from under us, retry */
5740 	if (!pte_same(huge_ptep_get(ptep), old_pte))
5741 		goto backout;
5742 
5743 	if (anon_rmap)
5744 		hugepage_add_new_anon_rmap(page, vma, haddr);
5745 	else
5746 		page_dup_file_rmap(page, true);
5747 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
5748 				&& (vma->vm_flags & VM_SHARED)));
5749 	/*
5750 	 * If this pte was previously wr-protected, keep it wr-protected even
5751 	 * if populated.
5752 	 */
5753 	if (unlikely(pte_marker_uffd_wp(old_pte)))
5754 		new_pte = huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte));
5755 	set_huge_pte_at(mm, haddr, ptep, new_pte);
5756 
5757 	hugetlb_count_add(pages_per_huge_page(h), mm);
5758 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5759 		/* Optimization, do the COW without a second fault */
5760 		ret = hugetlb_wp(mm, vma, address, ptep, flags, page, ptl);
5761 	}
5762 
5763 	spin_unlock(ptl);
5764 
5765 	/*
5766 	 * Only set HPageMigratable in newly allocated pages.  Existing pages
5767 	 * found in the pagecache may not have HPageMigratableset if they have
5768 	 * been isolated for migration.
5769 	 */
5770 	if (new_page)
5771 		SetHPageMigratable(page);
5772 
5773 	unlock_page(page);
5774 out:
5775 	hugetlb_vma_unlock_read(vma);
5776 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5777 	return ret;
5778 
5779 backout:
5780 	spin_unlock(ptl);
5781 backout_unlocked:
5782 	if (new_page && !new_pagecache_page)
5783 		restore_reserve_on_error(h, vma, haddr, page);
5784 
5785 	unlock_page(page);
5786 	put_page(page);
5787 	goto out;
5788 }
5789 
5790 #ifdef CONFIG_SMP
5791 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5792 {
5793 	unsigned long key[2];
5794 	u32 hash;
5795 
5796 	key[0] = (unsigned long) mapping;
5797 	key[1] = idx;
5798 
5799 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
5800 
5801 	return hash & (num_fault_mutexes - 1);
5802 }
5803 #else
5804 /*
5805  * For uniprocessor systems we always use a single mutex, so just
5806  * return 0 and avoid the hashing overhead.
5807  */
5808 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5809 {
5810 	return 0;
5811 }
5812 #endif
5813 
5814 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5815 			unsigned long address, unsigned int flags)
5816 {
5817 	pte_t *ptep, entry;
5818 	spinlock_t *ptl;
5819 	vm_fault_t ret;
5820 	u32 hash;
5821 	pgoff_t idx;
5822 	struct page *page = NULL;
5823 	struct page *pagecache_page = NULL;
5824 	struct hstate *h = hstate_vma(vma);
5825 	struct address_space *mapping;
5826 	int need_wait_lock = 0;
5827 	unsigned long haddr = address & huge_page_mask(h);
5828 
5829 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5830 	if (ptep) {
5831 		/*
5832 		 * Since we hold no locks, ptep could be stale.  That is
5833 		 * OK as we are only making decisions based on content and
5834 		 * not actually modifying content here.
5835 		 */
5836 		entry = huge_ptep_get(ptep);
5837 		if (unlikely(is_hugetlb_entry_migration(entry))) {
5838 			migration_entry_wait_huge(vma, ptep);
5839 			return 0;
5840 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
5841 			return VM_FAULT_HWPOISON_LARGE |
5842 				VM_FAULT_SET_HINDEX(hstate_index(h));
5843 	}
5844 
5845 	/*
5846 	 * Serialize hugepage allocation and instantiation, so that we don't
5847 	 * get spurious allocation failures if two CPUs race to instantiate
5848 	 * the same page in the page cache.
5849 	 */
5850 	mapping = vma->vm_file->f_mapping;
5851 	idx = vma_hugecache_offset(h, vma, haddr);
5852 	hash = hugetlb_fault_mutex_hash(mapping, idx);
5853 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
5854 
5855 	/*
5856 	 * Acquire vma lock before calling huge_pte_alloc and hold
5857 	 * until finished with ptep.  This prevents huge_pmd_unshare from
5858 	 * being called elsewhere and making the ptep no longer valid.
5859 	 *
5860 	 * ptep could have already be assigned via huge_pte_offset.  That
5861 	 * is OK, as huge_pte_alloc will return the same value unless
5862 	 * something has changed.
5863 	 */
5864 	hugetlb_vma_lock_read(vma);
5865 	ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
5866 	if (!ptep) {
5867 		hugetlb_vma_unlock_read(vma);
5868 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5869 		return VM_FAULT_OOM;
5870 	}
5871 
5872 	entry = huge_ptep_get(ptep);
5873 	/* PTE markers should be handled the same way as none pte */
5874 	if (huge_pte_none_mostly(entry))
5875 		/*
5876 		 * hugetlb_no_page will drop vma lock and hugetlb fault
5877 		 * mutex internally, which make us return immediately.
5878 		 */
5879 		return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
5880 				      entry, flags);
5881 
5882 	ret = 0;
5883 
5884 	/*
5885 	 * entry could be a migration/hwpoison entry at this point, so this
5886 	 * check prevents the kernel from going below assuming that we have
5887 	 * an active hugepage in pagecache. This goto expects the 2nd page
5888 	 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
5889 	 * properly handle it.
5890 	 */
5891 	if (!pte_present(entry))
5892 		goto out_mutex;
5893 
5894 	/*
5895 	 * If we are going to COW/unshare the mapping later, we examine the
5896 	 * pending reservations for this page now. This will ensure that any
5897 	 * allocations necessary to record that reservation occur outside the
5898 	 * spinlock. Also lookup the pagecache page now as it is used to
5899 	 * determine if a reservation has been consumed.
5900 	 */
5901 	if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
5902 	    !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
5903 		if (vma_needs_reservation(h, vma, haddr) < 0) {
5904 			ret = VM_FAULT_OOM;
5905 			goto out_mutex;
5906 		}
5907 		/* Just decrements count, does not deallocate */
5908 		vma_end_reservation(h, vma, haddr);
5909 
5910 		pagecache_page = find_lock_page(mapping, idx);
5911 	}
5912 
5913 	ptl = huge_pte_lock(h, mm, ptep);
5914 
5915 	/* Check for a racing update before calling hugetlb_wp() */
5916 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
5917 		goto out_ptl;
5918 
5919 	/* Handle userfault-wp first, before trying to lock more pages */
5920 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
5921 	    (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
5922 		struct vm_fault vmf = {
5923 			.vma = vma,
5924 			.address = haddr,
5925 			.real_address = address,
5926 			.flags = flags,
5927 		};
5928 
5929 		spin_unlock(ptl);
5930 		if (pagecache_page) {
5931 			unlock_page(pagecache_page);
5932 			put_page(pagecache_page);
5933 		}
5934 		hugetlb_vma_unlock_read(vma);
5935 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5936 		return handle_userfault(&vmf, VM_UFFD_WP);
5937 	}
5938 
5939 	/*
5940 	 * hugetlb_wp() requires page locks of pte_page(entry) and
5941 	 * pagecache_page, so here we need take the former one
5942 	 * when page != pagecache_page or !pagecache_page.
5943 	 */
5944 	page = pte_page(entry);
5945 	if (page != pagecache_page)
5946 		if (!trylock_page(page)) {
5947 			need_wait_lock = 1;
5948 			goto out_ptl;
5949 		}
5950 
5951 	get_page(page);
5952 
5953 	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5954 		if (!huge_pte_write(entry)) {
5955 			ret = hugetlb_wp(mm, vma, address, ptep, flags,
5956 					 pagecache_page, ptl);
5957 			goto out_put_page;
5958 		} else if (likely(flags & FAULT_FLAG_WRITE)) {
5959 			entry = huge_pte_mkdirty(entry);
5960 		}
5961 	}
5962 	entry = pte_mkyoung(entry);
5963 	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
5964 						flags & FAULT_FLAG_WRITE))
5965 		update_mmu_cache(vma, haddr, ptep);
5966 out_put_page:
5967 	if (page != pagecache_page)
5968 		unlock_page(page);
5969 	put_page(page);
5970 out_ptl:
5971 	spin_unlock(ptl);
5972 
5973 	if (pagecache_page) {
5974 		unlock_page(pagecache_page);
5975 		put_page(pagecache_page);
5976 	}
5977 out_mutex:
5978 	hugetlb_vma_unlock_read(vma);
5979 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5980 	/*
5981 	 * Generally it's safe to hold refcount during waiting page lock. But
5982 	 * here we just wait to defer the next page fault to avoid busy loop and
5983 	 * the page is not used after unlocked before returning from the current
5984 	 * page fault. So we are safe from accessing freed page, even if we wait
5985 	 * here without taking refcount.
5986 	 */
5987 	if (need_wait_lock)
5988 		wait_on_page_locked(page);
5989 	return ret;
5990 }
5991 
5992 #ifdef CONFIG_USERFAULTFD
5993 /*
5994  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
5995  * modifications for huge pages.
5996  */
5997 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
5998 			    pte_t *dst_pte,
5999 			    struct vm_area_struct *dst_vma,
6000 			    unsigned long dst_addr,
6001 			    unsigned long src_addr,
6002 			    enum mcopy_atomic_mode mode,
6003 			    struct page **pagep,
6004 			    bool wp_copy)
6005 {
6006 	bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
6007 	struct hstate *h = hstate_vma(dst_vma);
6008 	struct address_space *mapping = dst_vma->vm_file->f_mapping;
6009 	pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6010 	unsigned long size;
6011 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
6012 	pte_t _dst_pte;
6013 	spinlock_t *ptl;
6014 	int ret = -ENOMEM;
6015 	struct page *page;
6016 	int writable;
6017 	bool page_in_pagecache = false;
6018 
6019 	if (is_continue) {
6020 		ret = -EFAULT;
6021 		page = find_lock_page(mapping, idx);
6022 		if (!page)
6023 			goto out;
6024 		page_in_pagecache = true;
6025 	} else if (!*pagep) {
6026 		/* If a page already exists, then it's UFFDIO_COPY for
6027 		 * a non-missing case. Return -EEXIST.
6028 		 */
6029 		if (vm_shared &&
6030 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6031 			ret = -EEXIST;
6032 			goto out;
6033 		}
6034 
6035 		page = alloc_huge_page(dst_vma, dst_addr, 0);
6036 		if (IS_ERR(page)) {
6037 			ret = -ENOMEM;
6038 			goto out;
6039 		}
6040 
6041 		ret = copy_huge_page_from_user(page,
6042 						(const void __user *) src_addr,
6043 						pages_per_huge_page(h), false);
6044 
6045 		/* fallback to copy_from_user outside mmap_lock */
6046 		if (unlikely(ret)) {
6047 			ret = -ENOENT;
6048 			/* Free the allocated page which may have
6049 			 * consumed a reservation.
6050 			 */
6051 			restore_reserve_on_error(h, dst_vma, dst_addr, page);
6052 			put_page(page);
6053 
6054 			/* Allocate a temporary page to hold the copied
6055 			 * contents.
6056 			 */
6057 			page = alloc_huge_page_vma(h, dst_vma, dst_addr);
6058 			if (!page) {
6059 				ret = -ENOMEM;
6060 				goto out;
6061 			}
6062 			*pagep = page;
6063 			/* Set the outparam pagep and return to the caller to
6064 			 * copy the contents outside the lock. Don't free the
6065 			 * page.
6066 			 */
6067 			goto out;
6068 		}
6069 	} else {
6070 		if (vm_shared &&
6071 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6072 			put_page(*pagep);
6073 			ret = -EEXIST;
6074 			*pagep = NULL;
6075 			goto out;
6076 		}
6077 
6078 		page = alloc_huge_page(dst_vma, dst_addr, 0);
6079 		if (IS_ERR(page)) {
6080 			put_page(*pagep);
6081 			ret = -ENOMEM;
6082 			*pagep = NULL;
6083 			goto out;
6084 		}
6085 		copy_user_huge_page(page, *pagep, dst_addr, dst_vma,
6086 				    pages_per_huge_page(h));
6087 		put_page(*pagep);
6088 		*pagep = NULL;
6089 	}
6090 
6091 	/*
6092 	 * The memory barrier inside __SetPageUptodate makes sure that
6093 	 * preceding stores to the page contents become visible before
6094 	 * the set_pte_at() write.
6095 	 */
6096 	__SetPageUptodate(page);
6097 
6098 	/* Add shared, newly allocated pages to the page cache. */
6099 	if (vm_shared && !is_continue) {
6100 		size = i_size_read(mapping->host) >> huge_page_shift(h);
6101 		ret = -EFAULT;
6102 		if (idx >= size)
6103 			goto out_release_nounlock;
6104 
6105 		/*
6106 		 * Serialization between remove_inode_hugepages() and
6107 		 * hugetlb_add_to_page_cache() below happens through the
6108 		 * hugetlb_fault_mutex_table that here must be hold by
6109 		 * the caller.
6110 		 */
6111 		ret = hugetlb_add_to_page_cache(page, mapping, idx);
6112 		if (ret)
6113 			goto out_release_nounlock;
6114 		page_in_pagecache = true;
6115 	}
6116 
6117 	ptl = huge_pte_lock(h, dst_mm, dst_pte);
6118 
6119 	ret = -EIO;
6120 	if (PageHWPoison(page))
6121 		goto out_release_unlock;
6122 
6123 	/*
6124 	 * We allow to overwrite a pte marker: consider when both MISSING|WP
6125 	 * registered, we firstly wr-protect a none pte which has no page cache
6126 	 * page backing it, then access the page.
6127 	 */
6128 	ret = -EEXIST;
6129 	if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
6130 		goto out_release_unlock;
6131 
6132 	if (page_in_pagecache)
6133 		page_dup_file_rmap(page, true);
6134 	else
6135 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
6136 
6137 	/*
6138 	 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6139 	 * with wp flag set, don't set pte write bit.
6140 	 */
6141 	if (wp_copy || (is_continue && !vm_shared))
6142 		writable = 0;
6143 	else
6144 		writable = dst_vma->vm_flags & VM_WRITE;
6145 
6146 	_dst_pte = make_huge_pte(dst_vma, page, writable);
6147 	/*
6148 	 * Always mark UFFDIO_COPY page dirty; note that this may not be
6149 	 * extremely important for hugetlbfs for now since swapping is not
6150 	 * supported, but we should still be clear in that this page cannot be
6151 	 * thrown away at will, even if write bit not set.
6152 	 */
6153 	_dst_pte = huge_pte_mkdirty(_dst_pte);
6154 	_dst_pte = pte_mkyoung(_dst_pte);
6155 
6156 	if (wp_copy)
6157 		_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6158 
6159 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
6160 
6161 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6162 
6163 	/* No need to invalidate - it was non-present before */
6164 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
6165 
6166 	spin_unlock(ptl);
6167 	if (!is_continue)
6168 		SetHPageMigratable(page);
6169 	if (vm_shared || is_continue)
6170 		unlock_page(page);
6171 	ret = 0;
6172 out:
6173 	return ret;
6174 out_release_unlock:
6175 	spin_unlock(ptl);
6176 	if (vm_shared || is_continue)
6177 		unlock_page(page);
6178 out_release_nounlock:
6179 	if (!page_in_pagecache)
6180 		restore_reserve_on_error(h, dst_vma, dst_addr, page);
6181 	put_page(page);
6182 	goto out;
6183 }
6184 #endif /* CONFIG_USERFAULTFD */
6185 
6186 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
6187 				 int refs, struct page **pages,
6188 				 struct vm_area_struct **vmas)
6189 {
6190 	int nr;
6191 
6192 	for (nr = 0; nr < refs; nr++) {
6193 		if (likely(pages))
6194 			pages[nr] = nth_page(page, nr);
6195 		if (vmas)
6196 			vmas[nr] = vma;
6197 	}
6198 }
6199 
6200 static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma,
6201 					       unsigned int flags, pte_t *pte,
6202 					       bool *unshare)
6203 {
6204 	pte_t pteval = huge_ptep_get(pte);
6205 
6206 	*unshare = false;
6207 	if (is_swap_pte(pteval))
6208 		return true;
6209 	if (huge_pte_write(pteval))
6210 		return false;
6211 	if (flags & FOLL_WRITE)
6212 		return true;
6213 	if (gup_must_unshare(vma, flags, pte_page(pteval))) {
6214 		*unshare = true;
6215 		return true;
6216 	}
6217 	return false;
6218 }
6219 
6220 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
6221 				unsigned long address, unsigned int flags)
6222 {
6223 	struct hstate *h = hstate_vma(vma);
6224 	struct mm_struct *mm = vma->vm_mm;
6225 	unsigned long haddr = address & huge_page_mask(h);
6226 	struct page *page = NULL;
6227 	spinlock_t *ptl;
6228 	pte_t *pte, entry;
6229 
6230 	/*
6231 	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
6232 	 * follow_hugetlb_page().
6233 	 */
6234 	if (WARN_ON_ONCE(flags & FOLL_PIN))
6235 		return NULL;
6236 
6237 retry:
6238 	pte = huge_pte_offset(mm, haddr, huge_page_size(h));
6239 	if (!pte)
6240 		return NULL;
6241 
6242 	ptl = huge_pte_lock(h, mm, pte);
6243 	entry = huge_ptep_get(pte);
6244 	if (pte_present(entry)) {
6245 		page = pte_page(entry) +
6246 				((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
6247 		/*
6248 		 * Note that page may be a sub-page, and with vmemmap
6249 		 * optimizations the page struct may be read only.
6250 		 * try_grab_page() will increase the ref count on the
6251 		 * head page, so this will be OK.
6252 		 *
6253 		 * try_grab_page() should always succeed here, because we hold
6254 		 * the ptl lock and have verified pte_present().
6255 		 */
6256 		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
6257 			page = NULL;
6258 			goto out;
6259 		}
6260 	} else {
6261 		if (is_hugetlb_entry_migration(entry)) {
6262 			spin_unlock(ptl);
6263 			__migration_entry_wait_huge(pte, ptl);
6264 			goto retry;
6265 		}
6266 		/*
6267 		 * hwpoisoned entry is treated as no_page_table in
6268 		 * follow_page_mask().
6269 		 */
6270 	}
6271 out:
6272 	spin_unlock(ptl);
6273 	return page;
6274 }
6275 
6276 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
6277 			 struct page **pages, struct vm_area_struct **vmas,
6278 			 unsigned long *position, unsigned long *nr_pages,
6279 			 long i, unsigned int flags, int *locked)
6280 {
6281 	unsigned long pfn_offset;
6282 	unsigned long vaddr = *position;
6283 	unsigned long remainder = *nr_pages;
6284 	struct hstate *h = hstate_vma(vma);
6285 	int err = -EFAULT, refs;
6286 
6287 	while (vaddr < vma->vm_end && remainder) {
6288 		pte_t *pte;
6289 		spinlock_t *ptl = NULL;
6290 		bool unshare = false;
6291 		int absent;
6292 		struct page *page;
6293 
6294 		/*
6295 		 * If we have a pending SIGKILL, don't keep faulting pages and
6296 		 * potentially allocating memory.
6297 		 */
6298 		if (fatal_signal_pending(current)) {
6299 			remainder = 0;
6300 			break;
6301 		}
6302 
6303 		/*
6304 		 * Some archs (sparc64, sh*) have multiple pte_ts to
6305 		 * each hugepage.  We have to make sure we get the
6306 		 * first, for the page indexing below to work.
6307 		 *
6308 		 * Note that page table lock is not held when pte is null.
6309 		 */
6310 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
6311 				      huge_page_size(h));
6312 		if (pte)
6313 			ptl = huge_pte_lock(h, mm, pte);
6314 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
6315 
6316 		/*
6317 		 * When coredumping, it suits get_dump_page if we just return
6318 		 * an error where there's an empty slot with no huge pagecache
6319 		 * to back it.  This way, we avoid allocating a hugepage, and
6320 		 * the sparse dumpfile avoids allocating disk blocks, but its
6321 		 * huge holes still show up with zeroes where they need to be.
6322 		 */
6323 		if (absent && (flags & FOLL_DUMP) &&
6324 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
6325 			if (pte)
6326 				spin_unlock(ptl);
6327 			remainder = 0;
6328 			break;
6329 		}
6330 
6331 		/*
6332 		 * We need call hugetlb_fault for both hugepages under migration
6333 		 * (in which case hugetlb_fault waits for the migration,) and
6334 		 * hwpoisoned hugepages (in which case we need to prevent the
6335 		 * caller from accessing to them.) In order to do this, we use
6336 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
6337 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
6338 		 * both cases, and because we can't follow correct pages
6339 		 * directly from any kind of swap entries.
6340 		 */
6341 		if (absent ||
6342 		    __follow_hugetlb_must_fault(vma, flags, pte, &unshare)) {
6343 			vm_fault_t ret;
6344 			unsigned int fault_flags = 0;
6345 
6346 			if (pte)
6347 				spin_unlock(ptl);
6348 			if (flags & FOLL_WRITE)
6349 				fault_flags |= FAULT_FLAG_WRITE;
6350 			else if (unshare)
6351 				fault_flags |= FAULT_FLAG_UNSHARE;
6352 			if (locked)
6353 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
6354 					FAULT_FLAG_KILLABLE;
6355 			if (flags & FOLL_NOWAIT)
6356 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
6357 					FAULT_FLAG_RETRY_NOWAIT;
6358 			if (flags & FOLL_TRIED) {
6359 				/*
6360 				 * Note: FAULT_FLAG_ALLOW_RETRY and
6361 				 * FAULT_FLAG_TRIED can co-exist
6362 				 */
6363 				fault_flags |= FAULT_FLAG_TRIED;
6364 			}
6365 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
6366 			if (ret & VM_FAULT_ERROR) {
6367 				err = vm_fault_to_errno(ret, flags);
6368 				remainder = 0;
6369 				break;
6370 			}
6371 			if (ret & VM_FAULT_RETRY) {
6372 				if (locked &&
6373 				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
6374 					*locked = 0;
6375 				*nr_pages = 0;
6376 				/*
6377 				 * VM_FAULT_RETRY must not return an
6378 				 * error, it will return zero
6379 				 * instead.
6380 				 *
6381 				 * No need to update "position" as the
6382 				 * caller will not check it after
6383 				 * *nr_pages is set to 0.
6384 				 */
6385 				return i;
6386 			}
6387 			continue;
6388 		}
6389 
6390 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
6391 		page = pte_page(huge_ptep_get(pte));
6392 
6393 		VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
6394 			       !PageAnonExclusive(page), page);
6395 
6396 		/*
6397 		 * If subpage information not requested, update counters
6398 		 * and skip the same_page loop below.
6399 		 */
6400 		if (!pages && !vmas && !pfn_offset &&
6401 		    (vaddr + huge_page_size(h) < vma->vm_end) &&
6402 		    (remainder >= pages_per_huge_page(h))) {
6403 			vaddr += huge_page_size(h);
6404 			remainder -= pages_per_huge_page(h);
6405 			i += pages_per_huge_page(h);
6406 			spin_unlock(ptl);
6407 			continue;
6408 		}
6409 
6410 		/* vaddr may not be aligned to PAGE_SIZE */
6411 		refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
6412 		    (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
6413 
6414 		if (pages || vmas)
6415 			record_subpages_vmas(nth_page(page, pfn_offset),
6416 					     vma, refs,
6417 					     likely(pages) ? pages + i : NULL,
6418 					     vmas ? vmas + i : NULL);
6419 
6420 		if (pages) {
6421 			/*
6422 			 * try_grab_folio() should always succeed here,
6423 			 * because: a) we hold the ptl lock, and b) we've just
6424 			 * checked that the huge page is present in the page
6425 			 * tables. If the huge page is present, then the tail
6426 			 * pages must also be present. The ptl prevents the
6427 			 * head page and tail pages from being rearranged in
6428 			 * any way. So this page must be available at this
6429 			 * point, unless the page refcount overflowed:
6430 			 */
6431 			if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
6432 							 flags))) {
6433 				spin_unlock(ptl);
6434 				remainder = 0;
6435 				err = -ENOMEM;
6436 				break;
6437 			}
6438 		}
6439 
6440 		vaddr += (refs << PAGE_SHIFT);
6441 		remainder -= refs;
6442 		i += refs;
6443 
6444 		spin_unlock(ptl);
6445 	}
6446 	*nr_pages = remainder;
6447 	/*
6448 	 * setting position is actually required only if remainder is
6449 	 * not zero but it's faster not to add a "if (remainder)"
6450 	 * branch.
6451 	 */
6452 	*position = vaddr;
6453 
6454 	return i ? i : err;
6455 }
6456 
6457 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
6458 		unsigned long address, unsigned long end,
6459 		pgprot_t newprot, unsigned long cp_flags)
6460 {
6461 	struct mm_struct *mm = vma->vm_mm;
6462 	unsigned long start = address;
6463 	pte_t *ptep;
6464 	pte_t pte;
6465 	struct hstate *h = hstate_vma(vma);
6466 	unsigned long pages = 0, psize = huge_page_size(h);
6467 	bool shared_pmd = false;
6468 	struct mmu_notifier_range range;
6469 	unsigned long last_addr_mask;
6470 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6471 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6472 
6473 	/*
6474 	 * In the case of shared PMDs, the area to flush could be beyond
6475 	 * start/end.  Set range.start/range.end to cover the maximum possible
6476 	 * range if PMD sharing is possible.
6477 	 */
6478 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6479 				0, vma, mm, start, end);
6480 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6481 
6482 	BUG_ON(address >= end);
6483 	flush_cache_range(vma, range.start, range.end);
6484 
6485 	mmu_notifier_invalidate_range_start(&range);
6486 	hugetlb_vma_lock_write(vma);
6487 	i_mmap_lock_write(vma->vm_file->f_mapping);
6488 	last_addr_mask = hugetlb_mask_last_page(h);
6489 	for (; address < end; address += psize) {
6490 		spinlock_t *ptl;
6491 		ptep = huge_pte_offset(mm, address, psize);
6492 		if (!ptep) {
6493 			address |= last_addr_mask;
6494 			continue;
6495 		}
6496 		ptl = huge_pte_lock(h, mm, ptep);
6497 		if (huge_pmd_unshare(mm, vma, address, ptep)) {
6498 			/*
6499 			 * When uffd-wp is enabled on the vma, unshare
6500 			 * shouldn't happen at all.  Warn about it if it
6501 			 * happened due to some reason.
6502 			 */
6503 			WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6504 			pages++;
6505 			spin_unlock(ptl);
6506 			shared_pmd = true;
6507 			address |= last_addr_mask;
6508 			continue;
6509 		}
6510 		pte = huge_ptep_get(ptep);
6511 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
6512 			spin_unlock(ptl);
6513 			continue;
6514 		}
6515 		if (unlikely(is_hugetlb_entry_migration(pte))) {
6516 			swp_entry_t entry = pte_to_swp_entry(pte);
6517 			struct page *page = pfn_swap_entry_to_page(entry);
6518 
6519 			if (!is_readable_migration_entry(entry)) {
6520 				pte_t newpte;
6521 
6522 				if (PageAnon(page))
6523 					entry = make_readable_exclusive_migration_entry(
6524 								swp_offset(entry));
6525 				else
6526 					entry = make_readable_migration_entry(
6527 								swp_offset(entry));
6528 				newpte = swp_entry_to_pte(entry);
6529 				if (uffd_wp)
6530 					newpte = pte_swp_mkuffd_wp(newpte);
6531 				else if (uffd_wp_resolve)
6532 					newpte = pte_swp_clear_uffd_wp(newpte);
6533 				set_huge_pte_at(mm, address, ptep, newpte);
6534 				pages++;
6535 			}
6536 			spin_unlock(ptl);
6537 			continue;
6538 		}
6539 		if (unlikely(pte_marker_uffd_wp(pte))) {
6540 			/*
6541 			 * This is changing a non-present pte into a none pte,
6542 			 * no need for huge_ptep_modify_prot_start/commit().
6543 			 */
6544 			if (uffd_wp_resolve)
6545 				huge_pte_clear(mm, address, ptep, psize);
6546 		}
6547 		if (!huge_pte_none(pte)) {
6548 			pte_t old_pte;
6549 			unsigned int shift = huge_page_shift(hstate_vma(vma));
6550 
6551 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6552 			pte = huge_pte_modify(old_pte, newprot);
6553 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6554 			if (uffd_wp)
6555 				pte = huge_pte_mkuffd_wp(huge_pte_wrprotect(pte));
6556 			else if (uffd_wp_resolve)
6557 				pte = huge_pte_clear_uffd_wp(pte);
6558 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6559 			pages++;
6560 		} else {
6561 			/* None pte */
6562 			if (unlikely(uffd_wp))
6563 				/* Safe to modify directly (none->non-present). */
6564 				set_huge_pte_at(mm, address, ptep,
6565 						make_pte_marker(PTE_MARKER_UFFD_WP));
6566 		}
6567 		spin_unlock(ptl);
6568 	}
6569 	/*
6570 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6571 	 * may have cleared our pud entry and done put_page on the page table:
6572 	 * once we release i_mmap_rwsem, another task can do the final put_page
6573 	 * and that page table be reused and filled with junk.  If we actually
6574 	 * did unshare a page of pmds, flush the range corresponding to the pud.
6575 	 */
6576 	if (shared_pmd)
6577 		flush_hugetlb_tlb_range(vma, range.start, range.end);
6578 	else
6579 		flush_hugetlb_tlb_range(vma, start, end);
6580 	/*
6581 	 * No need to call mmu_notifier_invalidate_range() we are downgrading
6582 	 * page table protection not changing it to point to a new page.
6583 	 *
6584 	 * See Documentation/mm/mmu_notifier.rst
6585 	 */
6586 	i_mmap_unlock_write(vma->vm_file->f_mapping);
6587 	hugetlb_vma_unlock_write(vma);
6588 	mmu_notifier_invalidate_range_end(&range);
6589 
6590 	return pages << h->order;
6591 }
6592 
6593 /* Return true if reservation was successful, false otherwise.  */
6594 bool hugetlb_reserve_pages(struct inode *inode,
6595 					long from, long to,
6596 					struct vm_area_struct *vma,
6597 					vm_flags_t vm_flags)
6598 {
6599 	long chg, add = -1;
6600 	struct hstate *h = hstate_inode(inode);
6601 	struct hugepage_subpool *spool = subpool_inode(inode);
6602 	struct resv_map *resv_map;
6603 	struct hugetlb_cgroup *h_cg = NULL;
6604 	long gbl_reserve, regions_needed = 0;
6605 
6606 	/* This should never happen */
6607 	if (from > to) {
6608 		VM_WARN(1, "%s called with a negative range\n", __func__);
6609 		return false;
6610 	}
6611 
6612 	/*
6613 	 * vma specific semaphore used for pmd sharing synchronization
6614 	 */
6615 	hugetlb_vma_lock_alloc(vma);
6616 
6617 	/*
6618 	 * Only apply hugepage reservation if asked. At fault time, an
6619 	 * attempt will be made for VM_NORESERVE to allocate a page
6620 	 * without using reserves
6621 	 */
6622 	if (vm_flags & VM_NORESERVE)
6623 		return true;
6624 
6625 	/*
6626 	 * Shared mappings base their reservation on the number of pages that
6627 	 * are already allocated on behalf of the file. Private mappings need
6628 	 * to reserve the full area even if read-only as mprotect() may be
6629 	 * called to make the mapping read-write. Assume !vma is a shm mapping
6630 	 */
6631 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6632 		/*
6633 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
6634 		 * called for inodes for which resv_maps were created (see
6635 		 * hugetlbfs_get_inode).
6636 		 */
6637 		resv_map = inode_resv_map(inode);
6638 
6639 		chg = region_chg(resv_map, from, to, &regions_needed);
6640 	} else {
6641 		/* Private mapping. */
6642 		resv_map = resv_map_alloc();
6643 		if (!resv_map)
6644 			goto out_err;
6645 
6646 		chg = to - from;
6647 
6648 		set_vma_resv_map(vma, resv_map);
6649 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
6650 	}
6651 
6652 	if (chg < 0)
6653 		goto out_err;
6654 
6655 	if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6656 				chg * pages_per_huge_page(h), &h_cg) < 0)
6657 		goto out_err;
6658 
6659 	if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
6660 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
6661 		 * of the resv_map.
6662 		 */
6663 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6664 	}
6665 
6666 	/*
6667 	 * There must be enough pages in the subpool for the mapping. If
6668 	 * the subpool has a minimum size, there may be some global
6669 	 * reservations already in place (gbl_reserve).
6670 	 */
6671 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
6672 	if (gbl_reserve < 0)
6673 		goto out_uncharge_cgroup;
6674 
6675 	/*
6676 	 * Check enough hugepages are available for the reservation.
6677 	 * Hand the pages back to the subpool if there are not
6678 	 */
6679 	if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6680 		goto out_put_pages;
6681 
6682 	/*
6683 	 * Account for the reservations made. Shared mappings record regions
6684 	 * that have reservations as they are shared by multiple VMAs.
6685 	 * When the last VMA disappears, the region map says how much
6686 	 * the reservation was and the page cache tells how much of
6687 	 * the reservation was consumed. Private mappings are per-VMA and
6688 	 * only the consumed reservations are tracked. When the VMA
6689 	 * disappears, the original reservation is the VMA size and the
6690 	 * consumed reservations are stored in the map. Hence, nothing
6691 	 * else has to be done for private mappings here
6692 	 */
6693 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6694 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6695 
6696 		if (unlikely(add < 0)) {
6697 			hugetlb_acct_memory(h, -gbl_reserve);
6698 			goto out_put_pages;
6699 		} else if (unlikely(chg > add)) {
6700 			/*
6701 			 * pages in this range were added to the reserve
6702 			 * map between region_chg and region_add.  This
6703 			 * indicates a race with alloc_huge_page.  Adjust
6704 			 * the subpool and reserve counts modified above
6705 			 * based on the difference.
6706 			 */
6707 			long rsv_adjust;
6708 
6709 			/*
6710 			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6711 			 * reference to h_cg->css. See comment below for detail.
6712 			 */
6713 			hugetlb_cgroup_uncharge_cgroup_rsvd(
6714 				hstate_index(h),
6715 				(chg - add) * pages_per_huge_page(h), h_cg);
6716 
6717 			rsv_adjust = hugepage_subpool_put_pages(spool,
6718 								chg - add);
6719 			hugetlb_acct_memory(h, -rsv_adjust);
6720 		} else if (h_cg) {
6721 			/*
6722 			 * The file_regions will hold their own reference to
6723 			 * h_cg->css. So we should release the reference held
6724 			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6725 			 * done.
6726 			 */
6727 			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
6728 		}
6729 	}
6730 	return true;
6731 
6732 out_put_pages:
6733 	/* put back original number of pages, chg */
6734 	(void)hugepage_subpool_put_pages(spool, chg);
6735 out_uncharge_cgroup:
6736 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6737 					    chg * pages_per_huge_page(h), h_cg);
6738 out_err:
6739 	hugetlb_vma_lock_free(vma);
6740 	if (!vma || vma->vm_flags & VM_MAYSHARE)
6741 		/* Only call region_abort if the region_chg succeeded but the
6742 		 * region_add failed or didn't run.
6743 		 */
6744 		if (chg >= 0 && add < 0)
6745 			region_abort(resv_map, from, to, regions_needed);
6746 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
6747 		kref_put(&resv_map->refs, resv_map_release);
6748 	return false;
6749 }
6750 
6751 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6752 								long freed)
6753 {
6754 	struct hstate *h = hstate_inode(inode);
6755 	struct resv_map *resv_map = inode_resv_map(inode);
6756 	long chg = 0;
6757 	struct hugepage_subpool *spool = subpool_inode(inode);
6758 	long gbl_reserve;
6759 
6760 	/*
6761 	 * Since this routine can be called in the evict inode path for all
6762 	 * hugetlbfs inodes, resv_map could be NULL.
6763 	 */
6764 	if (resv_map) {
6765 		chg = region_del(resv_map, start, end);
6766 		/*
6767 		 * region_del() can fail in the rare case where a region
6768 		 * must be split and another region descriptor can not be
6769 		 * allocated.  If end == LONG_MAX, it will not fail.
6770 		 */
6771 		if (chg < 0)
6772 			return chg;
6773 	}
6774 
6775 	spin_lock(&inode->i_lock);
6776 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6777 	spin_unlock(&inode->i_lock);
6778 
6779 	/*
6780 	 * If the subpool has a minimum size, the number of global
6781 	 * reservations to be released may be adjusted.
6782 	 *
6783 	 * Note that !resv_map implies freed == 0. So (chg - freed)
6784 	 * won't go negative.
6785 	 */
6786 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
6787 	hugetlb_acct_memory(h, -gbl_reserve);
6788 
6789 	return 0;
6790 }
6791 
6792 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
6793 static unsigned long page_table_shareable(struct vm_area_struct *svma,
6794 				struct vm_area_struct *vma,
6795 				unsigned long addr, pgoff_t idx)
6796 {
6797 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
6798 				svma->vm_start;
6799 	unsigned long sbase = saddr & PUD_MASK;
6800 	unsigned long s_end = sbase + PUD_SIZE;
6801 
6802 	/* Allow segments to share if only one is marked locked */
6803 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
6804 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
6805 
6806 	/*
6807 	 * match the virtual addresses, permission and the alignment of the
6808 	 * page table page.
6809 	 *
6810 	 * Also, vma_lock (vm_private_data) is required for sharing.
6811 	 */
6812 	if (pmd_index(addr) != pmd_index(saddr) ||
6813 	    vm_flags != svm_flags ||
6814 	    !range_in_vma(svma, sbase, s_end) ||
6815 	    !svma->vm_private_data)
6816 		return 0;
6817 
6818 	return saddr;
6819 }
6820 
6821 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6822 {
6823 	unsigned long start = addr & PUD_MASK;
6824 	unsigned long end = start + PUD_SIZE;
6825 
6826 #ifdef CONFIG_USERFAULTFD
6827 	if (uffd_disable_huge_pmd_share(vma))
6828 		return false;
6829 #endif
6830 	/*
6831 	 * check on proper vm_flags and page table alignment
6832 	 */
6833 	if (!(vma->vm_flags & VM_MAYSHARE))
6834 		return false;
6835 	if (!vma->vm_private_data)	/* vma lock required for sharing */
6836 		return false;
6837 	if (!range_in_vma(vma, start, end))
6838 		return false;
6839 	return true;
6840 }
6841 
6842 /*
6843  * Determine if start,end range within vma could be mapped by shared pmd.
6844  * If yes, adjust start and end to cover range associated with possible
6845  * shared pmd mappings.
6846  */
6847 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6848 				unsigned long *start, unsigned long *end)
6849 {
6850 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6851 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6852 
6853 	/*
6854 	 * vma needs to span at least one aligned PUD size, and the range
6855 	 * must be at least partially within in.
6856 	 */
6857 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6858 		(*end <= v_start) || (*start >= v_end))
6859 		return;
6860 
6861 	/* Extend the range to be PUD aligned for a worst case scenario */
6862 	if (*start > v_start)
6863 		*start = ALIGN_DOWN(*start, PUD_SIZE);
6864 
6865 	if (*end < v_end)
6866 		*end = ALIGN(*end, PUD_SIZE);
6867 }
6868 
6869 static bool __vma_shareable_flags_pmd(struct vm_area_struct *vma)
6870 {
6871 	return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) &&
6872 		vma->vm_private_data;
6873 }
6874 
6875 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
6876 {
6877 	if (__vma_shareable_flags_pmd(vma)) {
6878 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6879 
6880 		down_read(&vma_lock->rw_sema);
6881 	}
6882 }
6883 
6884 void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
6885 {
6886 	if (__vma_shareable_flags_pmd(vma)) {
6887 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6888 
6889 		up_read(&vma_lock->rw_sema);
6890 	}
6891 }
6892 
6893 void hugetlb_vma_lock_write(struct vm_area_struct *vma)
6894 {
6895 	if (__vma_shareable_flags_pmd(vma)) {
6896 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6897 
6898 		down_write(&vma_lock->rw_sema);
6899 	}
6900 }
6901 
6902 void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
6903 {
6904 	if (__vma_shareable_flags_pmd(vma)) {
6905 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6906 
6907 		up_write(&vma_lock->rw_sema);
6908 	}
6909 }
6910 
6911 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
6912 {
6913 	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6914 
6915 	if (!__vma_shareable_flags_pmd(vma))
6916 		return 1;
6917 
6918 	return down_write_trylock(&vma_lock->rw_sema);
6919 }
6920 
6921 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
6922 {
6923 	if (__vma_shareable_flags_pmd(vma)) {
6924 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6925 
6926 		lockdep_assert_held(&vma_lock->rw_sema);
6927 	}
6928 }
6929 
6930 void hugetlb_vma_lock_release(struct kref *kref)
6931 {
6932 	struct hugetlb_vma_lock *vma_lock = container_of(kref,
6933 			struct hugetlb_vma_lock, refs);
6934 
6935 	kfree(vma_lock);
6936 }
6937 
6938 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
6939 {
6940 	struct vm_area_struct *vma = vma_lock->vma;
6941 
6942 	/*
6943 	 * vma_lock structure may or not be released as a result of put,
6944 	 * it certainly will no longer be attached to vma so clear pointer.
6945 	 * Semaphore synchronizes access to vma_lock->vma field.
6946 	 */
6947 	vma_lock->vma = NULL;
6948 	vma->vm_private_data = NULL;
6949 	up_write(&vma_lock->rw_sema);
6950 	kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
6951 }
6952 
6953 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
6954 {
6955 	if (__vma_shareable_flags_pmd(vma)) {
6956 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6957 
6958 		__hugetlb_vma_unlock_write_put(vma_lock);
6959 	}
6960 }
6961 
6962 static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
6963 {
6964 	/*
6965 	 * Only present in sharable vmas.
6966 	 */
6967 	if (!vma || !__vma_shareable_flags_pmd(vma))
6968 		return;
6969 
6970 	if (vma->vm_private_data) {
6971 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
6972 
6973 		down_write(&vma_lock->rw_sema);
6974 		__hugetlb_vma_unlock_write_put(vma_lock);
6975 	}
6976 }
6977 
6978 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
6979 {
6980 	struct hugetlb_vma_lock *vma_lock;
6981 
6982 	/* Only establish in (flags) sharable vmas */
6983 	if (!vma || !(vma->vm_flags & VM_MAYSHARE))
6984 		return;
6985 
6986 	/* Should never get here with non-NULL vm_private_data */
6987 	if (vma->vm_private_data)
6988 		return;
6989 
6990 	vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
6991 	if (!vma_lock) {
6992 		/*
6993 		 * If we can not allocate structure, then vma can not
6994 		 * participate in pmd sharing.  This is only a possible
6995 		 * performance enhancement and memory saving issue.
6996 		 * However, the lock is also used to synchronize page
6997 		 * faults with truncation.  If the lock is not present,
6998 		 * unlikely races could leave pages in a file past i_size
6999 		 * until the file is removed.  Warn in the unlikely case of
7000 		 * allocation failure.
7001 		 */
7002 		pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
7003 		return;
7004 	}
7005 
7006 	kref_init(&vma_lock->refs);
7007 	init_rwsem(&vma_lock->rw_sema);
7008 	vma_lock->vma = vma;
7009 	vma->vm_private_data = vma_lock;
7010 }
7011 
7012 /*
7013  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
7014  * and returns the corresponding pte. While this is not necessary for the
7015  * !shared pmd case because we can allocate the pmd later as well, it makes the
7016  * code much cleaner. pmd allocation is essential for the shared case because
7017  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
7018  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
7019  * bad pmd for sharing.
7020  */
7021 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7022 		      unsigned long addr, pud_t *pud)
7023 {
7024 	struct address_space *mapping = vma->vm_file->f_mapping;
7025 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
7026 			vma->vm_pgoff;
7027 	struct vm_area_struct *svma;
7028 	unsigned long saddr;
7029 	pte_t *spte = NULL;
7030 	pte_t *pte;
7031 	spinlock_t *ptl;
7032 
7033 	i_mmap_lock_read(mapping);
7034 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
7035 		if (svma == vma)
7036 			continue;
7037 
7038 		saddr = page_table_shareable(svma, vma, addr, idx);
7039 		if (saddr) {
7040 			spte = huge_pte_offset(svma->vm_mm, saddr,
7041 					       vma_mmu_pagesize(svma));
7042 			if (spte) {
7043 				get_page(virt_to_page(spte));
7044 				break;
7045 			}
7046 		}
7047 	}
7048 
7049 	if (!spte)
7050 		goto out;
7051 
7052 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
7053 	if (pud_none(*pud)) {
7054 		pud_populate(mm, pud,
7055 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
7056 		mm_inc_nr_pmds(mm);
7057 	} else {
7058 		put_page(virt_to_page(spte));
7059 	}
7060 	spin_unlock(ptl);
7061 out:
7062 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
7063 	i_mmap_unlock_read(mapping);
7064 	return pte;
7065 }
7066 
7067 /*
7068  * unmap huge page backed by shared pte.
7069  *
7070  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
7071  * indicated by page_count > 1, unmap is achieved by clearing pud and
7072  * decrementing the ref count. If count == 1, the pte page is not shared.
7073  *
7074  * Called with page table lock held.
7075  *
7076  * returns: 1 successfully unmapped a shared pte page
7077  *	    0 the underlying pte page is not shared, or it is the last user
7078  */
7079 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7080 					unsigned long addr, pte_t *ptep)
7081 {
7082 	pgd_t *pgd = pgd_offset(mm, addr);
7083 	p4d_t *p4d = p4d_offset(pgd, addr);
7084 	pud_t *pud = pud_offset(p4d, addr);
7085 
7086 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7087 	hugetlb_vma_assert_locked(vma);
7088 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
7089 	if (page_count(virt_to_page(ptep)) == 1)
7090 		return 0;
7091 
7092 	pud_clear(pud);
7093 	put_page(virt_to_page(ptep));
7094 	mm_dec_nr_pmds(mm);
7095 	return 1;
7096 }
7097 
7098 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7099 
7100 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
7101 {
7102 }
7103 
7104 void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
7105 {
7106 }
7107 
7108 void hugetlb_vma_lock_write(struct vm_area_struct *vma)
7109 {
7110 }
7111 
7112 void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
7113 {
7114 }
7115 
7116 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
7117 {
7118 	return 1;
7119 }
7120 
7121 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
7122 {
7123 }
7124 
7125 void hugetlb_vma_lock_release(struct kref *kref)
7126 {
7127 }
7128 
7129 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
7130 {
7131 }
7132 
7133 static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
7134 {
7135 }
7136 
7137 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
7138 {
7139 }
7140 
7141 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7142 		      unsigned long addr, pud_t *pud)
7143 {
7144 	return NULL;
7145 }
7146 
7147 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7148 				unsigned long addr, pte_t *ptep)
7149 {
7150 	return 0;
7151 }
7152 
7153 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7154 				unsigned long *start, unsigned long *end)
7155 {
7156 }
7157 
7158 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7159 {
7160 	return false;
7161 }
7162 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7163 
7164 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
7165 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
7166 			unsigned long addr, unsigned long sz)
7167 {
7168 	pgd_t *pgd;
7169 	p4d_t *p4d;
7170 	pud_t *pud;
7171 	pte_t *pte = NULL;
7172 
7173 	pgd = pgd_offset(mm, addr);
7174 	p4d = p4d_alloc(mm, pgd, addr);
7175 	if (!p4d)
7176 		return NULL;
7177 	pud = pud_alloc(mm, p4d, addr);
7178 	if (pud) {
7179 		if (sz == PUD_SIZE) {
7180 			pte = (pte_t *)pud;
7181 		} else {
7182 			BUG_ON(sz != PMD_SIZE);
7183 			if (want_pmd_share(vma, addr) && pud_none(*pud))
7184 				pte = huge_pmd_share(mm, vma, addr, pud);
7185 			else
7186 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
7187 		}
7188 	}
7189 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
7190 
7191 	return pte;
7192 }
7193 
7194 /*
7195  * huge_pte_offset() - Walk the page table to resolve the hugepage
7196  * entry at address @addr
7197  *
7198  * Return: Pointer to page table entry (PUD or PMD) for
7199  * address @addr, or NULL if a !p*d_present() entry is encountered and the
7200  * size @sz doesn't match the hugepage size at this level of the page
7201  * table.
7202  */
7203 pte_t *huge_pte_offset(struct mm_struct *mm,
7204 		       unsigned long addr, unsigned long sz)
7205 {
7206 	pgd_t *pgd;
7207 	p4d_t *p4d;
7208 	pud_t *pud;
7209 	pmd_t *pmd;
7210 
7211 	pgd = pgd_offset(mm, addr);
7212 	if (!pgd_present(*pgd))
7213 		return NULL;
7214 	p4d = p4d_offset(pgd, addr);
7215 	if (!p4d_present(*p4d))
7216 		return NULL;
7217 
7218 	pud = pud_offset(p4d, addr);
7219 	if (sz == PUD_SIZE)
7220 		/* must be pud huge, non-present or none */
7221 		return (pte_t *)pud;
7222 	if (!pud_present(*pud))
7223 		return NULL;
7224 	/* must have a valid entry and size to go further */
7225 
7226 	pmd = pmd_offset(pud, addr);
7227 	/* must be pmd huge, non-present or none */
7228 	return (pte_t *)pmd;
7229 }
7230 
7231 /*
7232  * Return a mask that can be used to update an address to the last huge
7233  * page in a page table page mapping size.  Used to skip non-present
7234  * page table entries when linearly scanning address ranges.  Architectures
7235  * with unique huge page to page table relationships can define their own
7236  * version of this routine.
7237  */
7238 unsigned long hugetlb_mask_last_page(struct hstate *h)
7239 {
7240 	unsigned long hp_size = huge_page_size(h);
7241 
7242 	if (hp_size == PUD_SIZE)
7243 		return P4D_SIZE - PUD_SIZE;
7244 	else if (hp_size == PMD_SIZE)
7245 		return PUD_SIZE - PMD_SIZE;
7246 	else
7247 		return 0UL;
7248 }
7249 
7250 #else
7251 
7252 /* See description above.  Architectures can provide their own version. */
7253 __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7254 {
7255 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7256 	if (huge_page_size(h) == PMD_SIZE)
7257 		return PUD_SIZE - PMD_SIZE;
7258 #endif
7259 	return 0UL;
7260 }
7261 
7262 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7263 
7264 /*
7265  * These functions are overwritable if your architecture needs its own
7266  * behavior.
7267  */
7268 int isolate_hugetlb(struct page *page, struct list_head *list)
7269 {
7270 	int ret = 0;
7271 
7272 	spin_lock_irq(&hugetlb_lock);
7273 	if (!PageHeadHuge(page) ||
7274 	    !HPageMigratable(page) ||
7275 	    !get_page_unless_zero(page)) {
7276 		ret = -EBUSY;
7277 		goto unlock;
7278 	}
7279 	ClearHPageMigratable(page);
7280 	list_move_tail(&page->lru, list);
7281 unlock:
7282 	spin_unlock_irq(&hugetlb_lock);
7283 	return ret;
7284 }
7285 
7286 int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison)
7287 {
7288 	int ret = 0;
7289 
7290 	*hugetlb = false;
7291 	spin_lock_irq(&hugetlb_lock);
7292 	if (PageHeadHuge(page)) {
7293 		*hugetlb = true;
7294 		if (HPageFreed(page))
7295 			ret = 0;
7296 		else if (HPageMigratable(page) || unpoison)
7297 			ret = get_page_unless_zero(page);
7298 		else
7299 			ret = -EBUSY;
7300 	}
7301 	spin_unlock_irq(&hugetlb_lock);
7302 	return ret;
7303 }
7304 
7305 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7306 				bool *migratable_cleared)
7307 {
7308 	int ret;
7309 
7310 	spin_lock_irq(&hugetlb_lock);
7311 	ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7312 	spin_unlock_irq(&hugetlb_lock);
7313 	return ret;
7314 }
7315 
7316 void putback_active_hugepage(struct page *page)
7317 {
7318 	spin_lock_irq(&hugetlb_lock);
7319 	SetHPageMigratable(page);
7320 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
7321 	spin_unlock_irq(&hugetlb_lock);
7322 	put_page(page);
7323 }
7324 
7325 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7326 {
7327 	struct hstate *h = folio_hstate(old_folio);
7328 
7329 	hugetlb_cgroup_migrate(old_folio, new_folio);
7330 	set_page_owner_migrate_reason(&new_folio->page, reason);
7331 
7332 	/*
7333 	 * transfer temporary state of the new hugetlb folio. This is
7334 	 * reverse to other transitions because the newpage is going to
7335 	 * be final while the old one will be freed so it takes over
7336 	 * the temporary status.
7337 	 *
7338 	 * Also note that we have to transfer the per-node surplus state
7339 	 * here as well otherwise the global surplus count will not match
7340 	 * the per-node's.
7341 	 */
7342 	if (folio_test_hugetlb_temporary(new_folio)) {
7343 		int old_nid = folio_nid(old_folio);
7344 		int new_nid = folio_nid(new_folio);
7345 
7346 
7347 		folio_set_hugetlb_temporary(old_folio);
7348 		folio_clear_hugetlb_temporary(new_folio);
7349 
7350 
7351 		/*
7352 		 * There is no need to transfer the per-node surplus state
7353 		 * when we do not cross the node.
7354 		 */
7355 		if (new_nid == old_nid)
7356 			return;
7357 		spin_lock_irq(&hugetlb_lock);
7358 		if (h->surplus_huge_pages_node[old_nid]) {
7359 			h->surplus_huge_pages_node[old_nid]--;
7360 			h->surplus_huge_pages_node[new_nid]++;
7361 		}
7362 		spin_unlock_irq(&hugetlb_lock);
7363 	}
7364 }
7365 
7366 /*
7367  * This function will unconditionally remove all the shared pmd pgtable entries
7368  * within the specific vma for a hugetlbfs memory range.
7369  */
7370 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7371 {
7372 	struct hstate *h = hstate_vma(vma);
7373 	unsigned long sz = huge_page_size(h);
7374 	struct mm_struct *mm = vma->vm_mm;
7375 	struct mmu_notifier_range range;
7376 	unsigned long address, start, end;
7377 	spinlock_t *ptl;
7378 	pte_t *ptep;
7379 
7380 	if (!(vma->vm_flags & VM_MAYSHARE))
7381 		return;
7382 
7383 	start = ALIGN(vma->vm_start, PUD_SIZE);
7384 	end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
7385 
7386 	if (start >= end)
7387 		return;
7388 
7389 	flush_cache_range(vma, start, end);
7390 	/*
7391 	 * No need to call adjust_range_if_pmd_sharing_possible(), because
7392 	 * we have already done the PUD_SIZE alignment.
7393 	 */
7394 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
7395 				start, end);
7396 	mmu_notifier_invalidate_range_start(&range);
7397 	hugetlb_vma_lock_write(vma);
7398 	i_mmap_lock_write(vma->vm_file->f_mapping);
7399 	for (address = start; address < end; address += PUD_SIZE) {
7400 		ptep = huge_pte_offset(mm, address, sz);
7401 		if (!ptep)
7402 			continue;
7403 		ptl = huge_pte_lock(h, mm, ptep);
7404 		huge_pmd_unshare(mm, vma, address, ptep);
7405 		spin_unlock(ptl);
7406 	}
7407 	flush_hugetlb_tlb_range(vma, start, end);
7408 	i_mmap_unlock_write(vma->vm_file->f_mapping);
7409 	hugetlb_vma_unlock_write(vma);
7410 	/*
7411 	 * No need to call mmu_notifier_invalidate_range(), see
7412 	 * Documentation/mm/mmu_notifier.rst.
7413 	 */
7414 	mmu_notifier_invalidate_range_end(&range);
7415 }
7416 
7417 #ifdef CONFIG_CMA
7418 static bool cma_reserve_called __initdata;
7419 
7420 static int __init cmdline_parse_hugetlb_cma(char *p)
7421 {
7422 	int nid, count = 0;
7423 	unsigned long tmp;
7424 	char *s = p;
7425 
7426 	while (*s) {
7427 		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
7428 			break;
7429 
7430 		if (s[count] == ':') {
7431 			if (tmp >= MAX_NUMNODES)
7432 				break;
7433 			nid = array_index_nospec(tmp, MAX_NUMNODES);
7434 
7435 			s += count + 1;
7436 			tmp = memparse(s, &s);
7437 			hugetlb_cma_size_in_node[nid] = tmp;
7438 			hugetlb_cma_size += tmp;
7439 
7440 			/*
7441 			 * Skip the separator if have one, otherwise
7442 			 * break the parsing.
7443 			 */
7444 			if (*s == ',')
7445 				s++;
7446 			else
7447 				break;
7448 		} else {
7449 			hugetlb_cma_size = memparse(p, &p);
7450 			break;
7451 		}
7452 	}
7453 
7454 	return 0;
7455 }
7456 
7457 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7458 
7459 void __init hugetlb_cma_reserve(int order)
7460 {
7461 	unsigned long size, reserved, per_node;
7462 	bool node_specific_cma_alloc = false;
7463 	int nid;
7464 
7465 	cma_reserve_called = true;
7466 
7467 	if (!hugetlb_cma_size)
7468 		return;
7469 
7470 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
7471 		if (hugetlb_cma_size_in_node[nid] == 0)
7472 			continue;
7473 
7474 		if (!node_online(nid)) {
7475 			pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7476 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7477 			hugetlb_cma_size_in_node[nid] = 0;
7478 			continue;
7479 		}
7480 
7481 		if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
7482 			pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7483 				nid, (PAGE_SIZE << order) / SZ_1M);
7484 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7485 			hugetlb_cma_size_in_node[nid] = 0;
7486 		} else {
7487 			node_specific_cma_alloc = true;
7488 		}
7489 	}
7490 
7491 	/* Validate the CMA size again in case some invalid nodes specified. */
7492 	if (!hugetlb_cma_size)
7493 		return;
7494 
7495 	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7496 		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7497 			(PAGE_SIZE << order) / SZ_1M);
7498 		hugetlb_cma_size = 0;
7499 		return;
7500 	}
7501 
7502 	if (!node_specific_cma_alloc) {
7503 		/*
7504 		 * If 3 GB area is requested on a machine with 4 numa nodes,
7505 		 * let's allocate 1 GB on first three nodes and ignore the last one.
7506 		 */
7507 		per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7508 		pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7509 			hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
7510 	}
7511 
7512 	reserved = 0;
7513 	for_each_online_node(nid) {
7514 		int res;
7515 		char name[CMA_MAX_NAME];
7516 
7517 		if (node_specific_cma_alloc) {
7518 			if (hugetlb_cma_size_in_node[nid] == 0)
7519 				continue;
7520 
7521 			size = hugetlb_cma_size_in_node[nid];
7522 		} else {
7523 			size = min(per_node, hugetlb_cma_size - reserved);
7524 		}
7525 
7526 		size = round_up(size, PAGE_SIZE << order);
7527 
7528 		snprintf(name, sizeof(name), "hugetlb%d", nid);
7529 		/*
7530 		 * Note that 'order per bit' is based on smallest size that
7531 		 * may be returned to CMA allocator in the case of
7532 		 * huge page demotion.
7533 		 */
7534 		res = cma_declare_contiguous_nid(0, size, 0,
7535 						PAGE_SIZE << HUGETLB_PAGE_ORDER,
7536 						 0, false, name,
7537 						 &hugetlb_cma[nid], nid);
7538 		if (res) {
7539 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7540 				res, nid);
7541 			continue;
7542 		}
7543 
7544 		reserved += size;
7545 		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7546 			size / SZ_1M, nid);
7547 
7548 		if (reserved >= hugetlb_cma_size)
7549 			break;
7550 	}
7551 
7552 	if (!reserved)
7553 		/*
7554 		 * hugetlb_cma_size is used to determine if allocations from
7555 		 * cma are possible.  Set to zero if no cma regions are set up.
7556 		 */
7557 		hugetlb_cma_size = 0;
7558 }
7559 
7560 static void __init hugetlb_cma_check(void)
7561 {
7562 	if (!hugetlb_cma_size || cma_reserve_called)
7563 		return;
7564 
7565 	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7566 }
7567 
7568 #endif /* CONFIG_CMA */
7569